blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
3dc12c601157700e9f5f878c498e2d1cba6b605e | Shell | Rochet2/mobile-tech-performance | /testrunscripts/slow.bash | UTF-8 | 175 | 2.921875 | 3 | [
"Unlicense"
] | permissive | #!/bin/bash
echo "Press [CTRL+C] to stop.."
i="0"
while [ $i -lt 5 ]
do
echo "Swiping more..."
xmacroplay "$DISPLAY" < normal.file
sleep 2
i=$[$i+1]
done | true |
7ad4b9d5b2b8edb19aa1e9965752cb81ac84c77a | Shell | gabrielrussoc/mac422 | /ep3/roda.sh | UTF-8 | 284 | 2.71875 | 3 | [] | no_license | #!/bin/zsh
# Parametros da linha de comando
# Caso de teste $1
# Algoritmo de gerencia de memoria $2
# Algoritmo de substituicao de paginas $3
# Intervalo de prints na tela $4
#
all='carrega tests/'$1'.in\nespaco '$2'\nsubstitui '$3'\nexecuta '$4'\nsai'
echo $all | python3 ep3.py
| true |
c13b53554b68695d18fad909512fdcb3bdacb90f | Shell | delkyd/alfheim_linux-PKGBUILDS | /python-fonttools-git/PKGBUILD | UTF-8 | 1,934 | 3.0625 | 3 | [] | no_license | _name=fonttools
pkgbase=python-"$_name"-git
pkgname=("python-$_name-git" "python2-$_name-git")
pkgver=3.0.r1920
pkgrel=1
pkgdesc='Modify OpenType and TrueType fonts and convert them to and from XML'
arch=('any')
url="https://github.com/behdad/$_name"
license=('MIT')
depends=('python')
makedepends=("python2-setuptools" "python-numpy" "python2-numpy")
#provides=("python-$_name" "python2-$_name")
#conflicts=("python-$_name" "python2-$_name")
source=("git+https://github.com/behdad/$_name.git")
sha256sums=('SKIP')
pkgver() {
cd "$srcdir/$_name"
printf "%s.r%s" \
"$(git describe --abbrev=0 | sed 's/^v//')" \
"$(git rev-list --count HEAD)"
}
prepare() {
cd "$srcdir"
cp -a fonttools fonttools-py2
cd fonttools-py2
sed -e "s|#![ ]*/usr/bin/python$|#!/usr/bin/python2|" \
-e "s|#![ ]*/usr/bin/env python$|#!/usr/bin/env python2|" \
-e "s|#![ ]*/bin/env python$|#!/usr/bin/env python2|" \
-i $(find . -name '*.py')
}
build() {
msg "Building Python2"
cd "$srcdir"/fonttools-py2
python2 setup.py build
msg "Building Python3"
cd "$srcdir"/fonttools
python setup.py build
}
package_python2-fonttools-git() {
depends=("python2-numpy")
cd "$srcdir"/fonttools-py2
python2 setup.py install --skip-build --root="$pkgdir" --optimize=1
# fix conflicts with python-fonttools
mv "$pkgdir"/usr/bin/pyftinspect{,2}
mv "$pkgdir"/usr/bin/pyftmerge{,2}
mv "$pkgdir"/usr/bin/pyftsubset{,2}
mv "$pkgdir"/usr/bin/ttx{,2}
mv "$pkgdir"/usr/share/man/man1/ttx{,2}.1
install -D -m755 LICENSE.txt "$pkgdir"/usr/share/licenses/$pkgname/LICENSE
chmod oga+r "$pkgdir"/usr/share/man/man1/ttx2.1
}
package_python-fonttools-git() {
depends=("python-numpy")
cd "$srcdir"/fonttools
python setup.py install --skip-build --root="$pkgdir" --optimize=1
install -D -m755 LICENSE.txt "$pkgdir"/usr/share/licenses/$pkgname/LICENSE
chmod oga+r "$pkgdir"/usr/share/man/man1/ttx.1
}
| true |
d9d7583f5f32cf1bfb9946ae8e71d90415996c05 | Shell | aaron-cole/STIG_SCAN_RHEL6 | /STIGS/RHEL6/V-218111.sh | UTF-8 | 885 | 3.25 | 3 | [] | no_license | #!/bin/sh
##Automatically defined items##
#Vulnerability Discussion
#Virus scanning software can be used to protect a system from penetration from computer viruses and to limit their spread through intermediate systems.
#STIG Identification
GrpID="V-218111"
GrpTitle="SRG-OS-000480"
RuleID="SV-218111r603264_rule"
STIGID="RHEL-06-000533"
Results="./Results/$GrpID"
#Remove File if already there
[ -e $Results ] && rm -rf $Results
#Setup Results File
echo $GrpID >> $Results
echo $GrpTitle >> $Results
echo $RuleID >> $Results
echo $STIGID >> $Results
##END of Automatic Items##
###Check###
if rpm -q ISecTP >> $Results; then
echo "McAfee ENSL Status - $(systemctl state isectpd 2>> $Results)" >> $Results
/opt/isec/ens/threatprevention/bin/isecav --version >> $Results
echo "Pass" >> $Results
else
echo "McAfee ENSL is not installed" >> $Results
echo "Fail" >> $Results
fi
| true |
1a861803cfcc085f124f4622ebe9a442442c6035 | Shell | dd00f/ibm-b2b-monitoring | /com.ibm.b2b.monitoring/src/site/repository/graphitenmonservice.sh | UTF-8 | 638 | 3.71875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
case "$1" in
start)
/etc/monitoring/nmon/graphite_nmon.sh &
echo $!>/var/run/graphite_nmon.pid
;;
stop)
kill `cat /var/run/graphite_nmon.pid`
kill `cat /etc/monitoring/nmon/nmon.pid`
rm /etc/monitoring/nmon/nmon.pid
rm /var/run/graphite_nmon.pid
;;
restart)
$0 stop
$0 start
;;
status)
NMONPID=`cat /var/run/graphite_nmon.pid`
if ps --pid $NMONPID &>/dev/null
then
echo graphite_nmon.sh is running, pid=`cat /var/run/graphite_nmon.pid`
else
echo graphite_nmon.sh is NOT running
exit 1
fi
;;
*)
echo "Usage: $0 {start|stop|status|restart}"
esac
exit 0 | true |
3b8b1468edfd5724d573a6545c6266354c0fba89 | Shell | blackbox-platform/blackbox-sdk-ios | /scripts/build.sh | UTF-8 | 1,631 | 3.703125 | 4 | [
"MIT"
] | permissive | set -eo pipefail
check_master() {
branch=$(git rev-parse --abbrev-ref HEAD)
if [[ "$branch" != "master" ]]; then
echo 'fatal: build.sh must be run from master'
exit 1
fi
}
check_clean() {
clean=$(git status --porcelain)
if [[ -n "$clean" ]]; then
echo 'fatal: build.sh must be run with clean working directory'
exit 1;
fi
}
run_tests() {
xcodebuild -scheme Production test
}
build_release() {
## Build release versions
rm -rf build
xcodebuild -target BlackboxSDK -configuration Release -arch arm64 -arch armv7 -arch armv7s only_active_arch=no defines_module=yes -sdk "iphoneos"
CONFIGURATION_BUILD_DIR=build
xcodebuild -target BlackboxSDK -configuration Release -arch x86_64 -arch i386 only_active_arch=no defines_module=yes -sdk "iphonesimulator" CONFIGURATION_BUILD_DIR=build
# Merge device/simulatior binaries and create package
mkdir -p build/BlackboxSDK
cp -r build/BlackboxSDK.framework build/BlackboxSDK
cp LICENSE README.md build/BlackboxSDK
lipo -create \
-output build/BlackboxSDK/BlackboxSDK.framework/BlackboxSDK \
build/Release-iphoneos/BlackboxSDK.framework/BlackboxSDK \
build/BlackboxSDK.framework/BlackboxSDK
}
publish_github_release() {
## Zip up the package and tag a new version
cd build
zip -r \
BlackboxSDK.zip \
BlackboxSDK
cd -
hub release create \
-a build/BlackboxSDK.zip \
$(cat version)
}
publish_cocoapods_release() {
pod trunk push BlackboxSDK.podspec
}
## Prepare
check_clean
check_master
run_tests
git push origin master
# Deploy
build_release
publish_github_release
publish_cocoapods_release
| true |
801964dbb70a3005eaad9fc9be9232a47dfd94f2 | Shell | aadarshadhakalg/conduit | /ci/archive/script.sh | UTF-8 | 624 | 2.8125 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
set -e
psql -c 'create user conduit_test_user with createdb;' -U postgres
psql -c "alter user conduit_test_user with password 'conduit!';" -U postgres
psql -c 'create database conduit_test_db;' -U postgres
psql -c 'grant all on database conduit_test_db to conduit_test_user;' -U postgres
cd "$TEST_DIR"
pub get
$RUNNER_CMD $RUNNER_ARGS
#if [[ "$TRAVIS_BUILD_STAGE_NAME" == "coverage" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_PULL_REQUEST" == false ]]; then
# pub global activate -sgit https://github.com/stablekernel/conduit-coverage-tool.git
# pub global run conduit_coverage_tool:main
#fi
cd ..
| true |
c660723fef2ef4196ff11ea681d097fbae0faaab | Shell | Homeronius/ASL | /helper_scripts/turbo_boost.sh | UTF-8 | 961 | 4 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Script to enable / disable turbo boost on all cores of an Intel CPU
# Important when running benchmarks
if [[ -z $(which rdmsr) ]]; then
echo "msr-tools is not installed. Run 'sudo apt-get install msr-tools' to install it." >&2
exit 1
fi
if [[ ! -z $1 && $1 != "enable" && $1 != "disable" ]]; then
echo "Invalid argument: $1" >&2
echo ""
echo "Usage: $(basename $0) [disable|enable]"
exit 1
fi
if [ $# -eq 0 ]; then
echo "Usage: $(basename $0) [disable|enable]"
exit 1
fi
cores=$(cat /proc/cpuinfo | grep processor | awk '{print $3}')
for core in $cores; do
if [[ $1 == "disable" ]]; then
sudo wrmsr -p${core} 0x1a0 0x4000850089
fi
if [[ $1 == "enable" ]]; then
sudo wrmsr -p${core} 0x1a0 0x850089
fi
state=$(sudo rdmsr -p${core} 0x1a0 -f 38:38)
if [[ $state -eq 1 ]]; then
echo "core ${core}: disabled"
else
echo "core ${core}: enabled"
fi
done
| true |
50742b65b690073050e3c10e266d120bb48fa02e | Shell | agibalov/java-library-experiments | /ffmpeg-experiment/dummy/demo.sh | UTF-8 | 450 | 2.828125 | 3 | [] | no_license | rm *.jpg
rm *.png
# generate images using ImageMagik
for i in {001..010};
do
convert -size 320x240 -pointsize 70 label:frame${i} ${i}.png
done
# take all images and generate a video
# force overwrite
# take 2 images per second
# make output stream have 30 FPS
ffmpeg -y -framerate 2 -i %03d.png -r 30 out.mp4
# take video and extract 1 frame
# time = 0.1s
# thumbnail size 160x120
ffmpeg -i out.mp4 -ss 0.1 -s 160x120 -vframes 1 thumbnail.jpg
| true |
b3f9b74e77b200437328adb3a1c41d3dfd047d2f | Shell | Shashankreddysunkara/stratos | /deploy/docker-compose/build.sh | UTF-8 | 2,965 | 3.875 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
set -eu
# set defaults
DOCKER_REGISTRY=docker.io
DOCKER_ORG=splatform
TAG=$(date -u +"%Y%m%dT%H%M%SZ")
NO_PUSH="false"
TAG_LATEST="false"
while getopts ":ho:r:t:ln" opt; do
case $opt in
h)
echo
echo "--- To build images of Stratos: "
echo
echo " ./build.sh -t 1.0.13"
echo
echo "--- To build images locally of Stratos: "
echo
echo " ./build.sh -l -n"
echo
exit 0
;;
r)
DOCKER_REGISTRY="${OPTARG}"
;;
o)
DOCKER_ORG="${OPTARG}"
;;
t)
TAG="${OPTARG}"
;;
l)
TAG_LATEST="true"
;;
o)
DOCKER_ORG="${OPTARG}"
;;
n)
NO_PUSH="true"
;;
\?)
echo "Invalid option: -${OPTARG}" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
echo
echo "=============================================================================="
echo "Stratos Docker Compose Build"
echo "=============================================================================="
echo
echo "TAG: ${TAG}"
if [ "${NO_PUSH}" != "false" ]; then
echo "Images will NOT be pushed"
else
echo "Images will be pushed"
echo " REGISTRY: ${DOCKER_REGISTRY}"
echo " ORG: ${DOCKER_ORG}"
fi
echo
echo "Starting build"
# Copy values template
STRATOS_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../.." && pwd )"
source "${STRATOS_PATH}/deploy/common-build.sh"
function buildProxy {
echo
echo "-- Building the Stratos Backend"
echo
echo "-- Build & publish the runtime container image for the Console Proxy"
buildAndPublishImage stratos-dc-jetstream deploy/Dockerfile.bk "${STRATOS_PATH}" dev-build
}
function buildDbMigratorJob {
# Build the db-migrator container
echo
echo "-- Build & publish the runtime container image for the db-migrator job"
buildAndPublishImage stratos-dc-db-migrator deploy/Dockerfile.bk "${STRATOS_PATH}" postflight-job
}
function buildMariaDb {
echo
echo "-- Building/publishing MariaDB"
# Download and retag image to save bandwidth
buildAndPublishImage stratos-dc-mariadb Dockerfile.mariadb "${STRATOS_PATH}/deploy/db"
}
function buildUI {
# Build and push an image based on the nginx container
echo
echo "-- Building/publishing the runtime container image for the Console web server"
# Download and retag image to save bandwidth
buildAndPublishImage stratos-dc-console deploy/Dockerfile.ui "${STRATOS_PATH}" prod-build
}
# MAIN ------------------------------------------------------
#
# Set the path to the portal proxy
STRATOS_PATH="${STRATOS_PATH}"
# cleanup output, intermediate artifacts
cleanup
# Build all of the components that make up the Console
buildProxy
buildDbMigratorJob
buildUI
buildMariaDb
# Done
echo
echo "Build complete...."
if [ "${NO_PUSH}" == "false" ]; then
echo "Registry: ${DOCKER_REGISTRY}"
echo "Org: ${DOCKER_ORG}"
fi
echo "Tag: ${TAG}"
| true |
04fd809acd2f8e0575b6bf296421f108f14e306b | Shell | biconou/recorder | /record.sh | UTF-8 | 1,217 | 3.046875 | 3 | [] | no_license | #!/bin/sh
. ./env.sh
rm ${BUFFER_DIR}/*
./Record_from_lineIn.sh
CMUS_PID=`cat /biconou/cmus.pid`
kill -9 ${CMUS_PID}
mkfifo pipe_to_aplay
aplay <pipe_to_aplay &
arecord -Dhw:0 -r 44100 -c 2 -f S16_LE 2>${OUTPUT_DIR}/arecord.err \
| tee pipe_to_aplay \
| split --suffix-length=4 -b 2M - ${BUFFER_DIR}/OUTPUT > ${OUTPUT_DIR}/split.out 2>&1 &
while true; do
filename=`ls ${BUFFER_DIR}/OUTPUT* | head -n 1`
filesize=`ls -l ${filename} | cut -f5 -d' '`
if [ "${filesize}" = "2097152" ]; then
cat ${filename} >> ${OUTPUT_DIR}/OUT.WAV
rm ${filename}
else
arecord_pid=`ps -ef | grep arecord | grep -v grep | awk '{print $2}'`
if [ "${arecord_pid}" = "" ]
then
mdbefore=`md5sum ${OUTPUT_DIR}/OUT.WAV | cut -f1 -d' '`
mkdir "${ARCHIVE_DIR}"
cp ${OUTPUT_DIR}/* "${ARCHIVE_DIR}"
mdafter=`md5sum "${ARCHIVE_DIR}"/OUT.WAV | cut -f1 -d' '`
if [ "${mdbefore}" = "${mdafter}" ]
then
rm -rf ${OUTPUT_DIR}
fi
exit 0
fi
fi
now=`date | cut -f4 -d' '`
tailleWAV=`ls -l record.sh | cut -f5 -d' '`
echo "${now} ${tailleWAV}" >> ${OUTPUT_DIR}/RECORD_DIR.stat
df -k /mnt/recorder_buffer >> ${OUTPUT_DIR}/RECORD_DIR.stat
done
| true |
812ddd3decde81381f64ca249633d5c0f077a776 | Shell | beards/dotfiles | /scripts/installer/mac/package_manager | UTF-8 | 476 | 3.296875 | 3 | [] | no_license | #!/usr/bin/env bash
which brew >/dev/null 2>&1
exit=$?
if [ $exit -eq 0 ]; then
echo "brew was already installed"
exit 0
fi
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
eval "$(/opt/homebrew/bin/brew shellenv)"
brew update
SCRIPT_DIR="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && cd ../.. && pwd )"
source $SCRIPT_DIR/util_funcs.sh
add_config_line 'eval "$(/opt/homebrew/bin/brew shellenv)"' "$HOME/.zprofile"
| true |
3b36b1928deae40835cd688ed735aede88415f70 | Shell | nebukzza/sigma-converter | /converter-sigma2elastalert.sh | UTF-8 | 2,096 | 3.625 | 4 | [] | no_license | #!/bin/bash
#author kalle.eriksson@pm.me
## Debug properties
#set -x
VERBOSE="NO"
[ "$1x" = "-vx" ] && VERBOSE=YES
[ ${VERBOSE} = YES ] && echo "Converter for sigma to elastic 6.8"
#working in if .. of sigma.
[ ! -d sigma ] && echo "Cant find sigma folder - git clone https://github.com/Neo23x0/sigma" && exit 1
[ ${VERBOSE} = YES ] && echo "Found sigma pull"
SIGMAC_BASE_PATH="sigma"
SIGMAC_BIN="$SIGMAC_BASE_PATH/tools/sigmac"
SIGMAC_CONF="SIGMAC_BASE_PATH/tools/config/generic/windows-audit.yml"
ELASTALERT_OUTPUT_FOLDER=elastalert
ELASTALERT_ELASTIC_COMPAT=winlogbeat-old
# enable below for 7.x
#ELASTALERT_ELASTIC_COMPAT=winlogbeat
function worker () {
SIGMAC_RULES_PATH="sigma/$1"
for folder in $(ls ${SIGMAC_RULES_PATH})
do
[[ "$folder" == "*windows" ]] && echo Changing BASE to windows && SIGMAC_RULES_PATH="sigma/rules/windows"
[ ${VERBOSE} == YES ] && echo " - Working with ${folder} in $SIGMAC_RULES_PATH"
[ ${VERBOSE} = YES ] && echo " - Creating output ${ELASTALERT_OUTPUT_FOLDER}/${SIGMAC_RULES_PATH}/${folder}"
mkdir -p ${ELASTALERT_OUTPUT_FOLDER}/${SIGMAC_RULES_PATH}/${folder}
for alert in $(ls ${SIGMAC_RULES_PATH}/${folder}/*yml 2>/dev/null)
do
[ ${VERBOSE} == YES ] && echo " -- Woring with ${alert}"
#[[ $alert == sysmon* ]] && SIGMAC_CONF="sigma/tools/config/generic/sysmon.yml"
#[[ $alert == powershell* ]] && SIGMAC_CONF="sigma/tools/config/powershell.yml"
#[[ $alert == windows* ]] && SIGMAC_CONF="sigma/tools/config/generic/windows-audit.yml"
[ ${VERBOSE} == YES ] && echo " -- Processing $alert"
${SIGMAC_BIN} -t elastalert -c ${ELASTALERT_ELASTIC_COMPAT} ${alert} -o ${ELASTALERT_OUTPUT_FOLDER}/${alert} 2>/dev/null
done
done
}
disclaimer () {
NOT_SuP=( $(find sigma/rules -type file -exec grep -Hn near {} \; |cut -f1 -d:) )
[ ${VERBOSE} = YES ] && echo "- DISCLAIMER"
[ ${VERBOSE} = YES ] && echo "-- These files alerts were not processed - not supported by backend"
[ ${VERBOSE} = YES ] && echo "--- ${NOT_SuP[*]}"
}
main () {
worker rules
worker rules/windows
disclaimer
}
main
| true |
ec0e963315fda9f7249c9d23bc324affb95b7f77 | Shell | zonoskar/Goodwe2PVoutput | /goodwe2pvoutput.sh | UTF-8 | 252 | 2.796875 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/bash
cd /home/pi/Projects
FILE=/home/pi/.goodwelock
LOGFILE=/home/pi/.goodwelog
if [ -f $FILE ]; then
echo "Already running, remove $FILE if not"
else
touch $FILE
python -m Goodwe2PVoutput > $LOGFILE
fi
lxterminal -e tail -f $LOGFILE
| true |
81856712af93ef50b9088016bcddf06725e4d90a | Shell | justintoo/rose-sh | /dependencies/yajl.sh | UTF-8 | 2,460 | 3.578125 | 4 | [] | no_license | : ${YAJL_DEPENDENCIES:=}
: ${YAJL_CONFIGURE_OPTIONS:=
}
: ${YAJL_TARBALL:="lloyd-yajl-66cb08c.tar.gz"}
: ${YAJL_INSTALLED_FILE:="${ROSE_SH_DEPS_PREFIX}/include/yajl/yajl_version.h"}
#-------------------------------------------------------------------------------
install_yajl()
#-------------------------------------------------------------------------------
{
info "Installing application"
#-----------------------------------------------------------------------------
rosesh__install_dep_setup || exit 1
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dependencies
#-----------------------------------------------------------------------------
install_deps ${YAJL_DEPENDENCIES} || exit 1
#-----------------------------------------------------------------------------
# Installation
#-----------------------------------------------------------------------------
set -x
#-----------------------------------------------------------------------------
if [ ! -f "${YAJL_INSTALLED_FILE}" ]; then
rm -rf "./yajl" || fail "Unable to remove application workspace"
mkdir -p "yajl" || fail "Unable to create application workspace"
cd "yajl/" || fail "Unable to change into the application workspace"
download_tarball "${YAJL_TARBALL}" || fail "Unable to download application tarball"
tar xzvf "${YAJL_TARBALL}" || fail "Unable to unpack application tarball"
cd "$(basename ${YAJL_TARBALL%.tar.gz})" || fail "Unable to change into application source directory"
./configure -p "${ROSE_SH_DEPS_PREFIX}" || fail "Unable to configure application"
make -j1 || fail "An error occurred during application compilation"
make -j1 install || fail "An error occurred during application installation"
else
info "[SKIP] yajl is already installed"
fi
#-----------------------------------------------------------------------------
set +x
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
rosesh__install_dep_teardown || exit 1
#-----------------------------------------------------------------------------
}
| true |
75f0b45380066dbe1a57415b8729fc2bdceb4ca6 | Shell | bombsimon/v-hashids | /script/install-hooks | UTF-8 | 1,092 | 3.796875 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
set -eu
# The directory in the project where project specific hooks are stored.
SCRIPT_DIR="script"
# All known hooks that might be symlinked.
HOOK_NAMES="applypatch-msg pre-applypatch post-applypatch \
pre-commit prepare-commit-msg commit-msg post-commit \
pre-rebase post-checkout post-merge pre-receive update \
post-receive post-update pre-auto-gc"
# Get locations of project hooks.
HOOK_DIR=$(git rev-parse --show-toplevel)/.git/hooks
echo "Installing hooks to your project!"
for hook in $HOOK_NAMES; do
# If a hook with the same name exists and is executable but not a symlink,
# back it up with the suffix `local`.
if [ ! -h "$HOOK_DIR/$hook" ] && [ -x "$HOOK_DIR/$hook" ]; then
mv "$HOOK_DIR/$hook" "$HOOK_DIR/$hook.local"
fi
# Create symlink, overwriting the file if it exists probably the only way
# this would happen is if you're using an old version of git -- back when
# the sample hooks were not executable, instead of being named ____.sample
ln -s -f "../../$SCRIPT_DIR/hook-wrapper" "$HOOK_DIR/$hook"
done
# vim set ts=4 sw=4 et:
| true |
b77428ddba3c284b27dca9c23c7ed841bdc58ce6 | Shell | Raja51/azure-around-the-world | /admin/templates/aks_get_credentials_all_sh.txt | UTF-8 | 530 | 2.796875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Generated bash script to get the kubernetes/kubectl configuration info
# for the AKS clusters in all n-regions.
# Chris Joakim, Microsoft
#
# Usage:
# ./{{ outfile }}
source ../app-config.sh
{% for region_info in region_list %}
echo "getting credentials for aks cluster {{ region_info.az_aks_res }}"
az aks get-credentials \
--resource-group {{ region_info.az_rg }} \
--name {{ region_info.az_aks_res }} \
--overwrite-existing
{% endfor %}
echo 'kubectl config get-contexts'
kubectl config get-contexts
| true |
3378999d935eb4ec8381671834a984f19c355b68 | Shell | dogweather/pdx-ri-map | /proxy/deploy.sh | UTF-8 | 339 | 2.625 | 3 | [] | no_license | # Get the directory of this script
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Copy rackup file to remote host
scp "${DIR}/config.ru" "${SSH_LINODE_USER}@${SSH_LINODE_HOST}:${PDX_RI_PROXY_REMOTE_DIR}/config.ru"
# Restart Passenger
ssh -l $SSH_LINODE_USER $SSH_LINODE_HOST "touch ${PDX_RI_PROXY_REMOTE_DIR}tmp/restart.txt"
| true |
70c944524040373dcb051a0eaf5ff37e0d0be8f0 | Shell | jeffkole/tossittome | /init.d/tossittome | UTF-8 | 4,036 | 3.65625 | 4 | [] | no_license | #!/bin/bash
#
# Copied from https://www.exratione.com/2013/02/nodejs-and-forever-as-a-service-simple-upstart-and-init-scripts-for-ubuntu/
#
# An example init script for running a Node.js process as a service
# using Forever as the process monitor. For more configuration options
# associated with Forever, see: https://github.com/nodejitsu/forever
#
# You will need to set the environment variables noted below to conform to
# your use case, and change the init info comment block.
#
# This was written for Debian distributions such as Ubuntu, but should still
# work on RedHat, Fedora, or other RPM-based distributions, since none
# of the built-in service functions are used. If you do adapt it to a RPM-based
# system, you'll need to replace the init info comment block with a chkconfig
# comment block.
#
### BEGIN INIT INFO
# Provides: tossittome
# Required-Start: $syslog $remote_fs
# Required-Stop: $syslog $remote_fs
# Should-Start: $local_fs
# Should-Stop: $local_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Toss It To Me server
# Description: Toss It To Me server
### END INIT INFO
#
# Based on:
# https://gist.github.com/3748766
# https://github.com/hectorcorrea/hectorcorrea.com/blob/master/etc/forever-initd-hectorcorrea.sh
# https://www.exratione.com/2011/07/running-a-nodejs-server-as-a-service-using-forever/
# Source function library. Note that this isn't used here, but remains to be
# uncommented by those who want to edit this script to add more functionality.
# Note that this is Ubuntu-specific. The scripts and script location are different on
# RPM-based distributions.
. /etc/rc.d/init.d/functions
# The example environment variables below assume that Node.js is
# installed into /home/node/local/node by building from source as outlined
# here:
# https://www.exratione.com/2011/07/running-a-nodejs-server-as-a-service-using-forever/
#
# It should be easy enough to adapt to the paths to be appropriate to a
# package installation, but note that the packages available for Ubuntu in
# the default repositories are far behind the times. Most users will be
# building from source to get a more recent Node.js version.
#
# An application name to display in echo text.
# NAME="My Application"
# The full path to the directory containing the node and forever binaries.
# NODE_BIN_DIR=/home/node/local/node/bin
# Set the NODE_PATH to the Node.js main node_modules directory.
# NODE_PATH=/home/node/local/node/lib/node_modules
# The directory containing the application start Javascript file.
# APPLICATION_DIRECTORY=/home/node/my-application
# The application start Javascript filename.
# APPLICATION_START=start-my-application.js
# Process ID file path.
# PIDFILE=/var/run/my-application.pid
# Log file directory.
# LOGDIR=/var/log/my-application
#
NAME="Toss It To Me"
NODE_BIN_DIR=/usr/bin
NODE_PATH=/usr/lib/node_modules
APPLICATION_DIRECTORY=/var/www/tossittome/current
USER=
start() {
daemon --user $USER $APPLICATION_DIRECTORY/bin/start
RETVAL=$?
}
stop() {
runuser -s /bin/bash $USER -c "$APPLICATION_DIRECTORY/bin/stop"
RETVAL=$?
}
restart() {
echo "Restarting $NAME"
stop
start
}
status() {
echo "Status for $NAME:"
# This is taking the lazy way out on status, as it will return a list of
# all running Forever processes. You get to figure out what you want to
# know from that information.
#
# On Ubuntu, this isn't even necessary. To find out whether the service is
# running, use "service my-application status" which bypasses this script
# entirely provided you used the service utility to start the process.
runuser -s /bin/bash $USER -c "forever list"
RETVAL=$?
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status
;;
restart)
restart
;;
*)
echo "Usage: {start|stop|status|restart}"
exit 1
;;
esac
exit $RETVAL
| true |
fe66866719f38e9a060fee10b328c503ac5e7461 | Shell | msys2/MINGW-packages | /mingw-w64-dcmtk/PKGBUILD | UTF-8 | 3,442 | 3.0625 | 3 | [
"BSD-3-Clause"
] | permissive | # Contributor: Mehdi Chinoune <mehdi.chinoune@hotmail.com>
_realname=dcmtk
pkgbase=mingw-w64-${_realname}
pkgname=("${MINGW_PACKAGE_PREFIX}-${_realname}")
pkgver=3.6.7
pkgrel=5
pkgdesc="A collection of libraries and applications implementing large parts of the DICOM standard (mingw-w64)"
arch=('any')
mingw_arch=('mingw32' 'mingw64' 'ucrt64' 'clang64' 'clang32' 'clangarm64')
url='http://dicom.offis.de/dcmtk'
license=('spdx:BSD-3-Clause')
depends=("${MINGW_PACKAGE_PREFIX}-icu"
"${MINGW_PACKAGE_PREFIX}-libiconv"
"${MINGW_PACKAGE_PREFIX}-libpng"
"${MINGW_PACKAGE_PREFIX}-libsndfile"
"${MINGW_PACKAGE_PREFIX}-libtiff"
"${MINGW_PACKAGE_PREFIX}-libxml2"
"${MINGW_PACKAGE_PREFIX}-openjpeg2"
"${MINGW_PACKAGE_PREFIX}-openssl"
"${MINGW_PACKAGE_PREFIX}-zlib")
makedepends=("${MINGW_PACKAGE_PREFIX}-cc"
"${MINGW_PACKAGE_PREFIX}-cmake"
"${MINGW_PACKAGE_PREFIX}-ninja"
"${MINGW_PACKAGE_PREFIX}-autotools")
source=("https://dicom.offis.de/download/dcmtk/dcmtk${pkgver//./}/${_realname}-${pkgver}.tar.gz"
"001-fix-cmake-config-install-destination.patch"
"002-properly-detect-canonical-host.patch")
sha256sums=('7c58298e3e8d60232ee6fc8408cfadd14463cc11a3c4ca4c59af5988c7e9710a'
'ff124b6f26c5a5982268597c6285e2e320a248a090046b45618e79cb8a6fc099'
'b9cd641267f9aeb055608f045cc91ad5b99b071897aa849ed3282aa03b5ae925')
prepare() {
cd "${srcdir}"/${_realname}-${pkgver}
patch -p1 -i "${srcdir}"/001-fix-cmake-config-install-destination.patch
patch -p1 -i "${srcdir}"/002-properly-detect-canonical-host.patch
cd config
./rootconf
}
build() {
# this is just to get arith.h
cd "${srcdir}"/${_realname}-${pkgver}
./configure --ignore-deprecation
make config-tests-all
install -D config/include/dcmtk/config/arith.h "${srcdir}"/build-${MSYSTEM}/config/include/dcmtk/config/arith.h
# continue the build using cmake
mkdir -p "${srcdir}"/build-${MSYSTEM} && cd "${srcdir}"/build-${MSYSTEM}
declare -a extra_config
if check_option "debug" "n"; then
extra_config+=("-DCMAKE_BUILD_TYPE=Release")
else
extra_config+=("-DCMAKE_BUILD_TYPE=Debug")
fi
MSYS2_ARG_CONV_EXCL="-DCMAKE_INSTALL_PREFIX=" \
"${MINGW_PREFIX}"/bin/cmake.exe \
-GNinja \
-DCMAKE_INSTALL_PREFIX="${MINGW_PREFIX}" \
"${extra_config[@]}" \
-DBUILD_SHARED_LIBS=ON \
-DDCMTK_WITH_ICONV=ON \
-DDCMTK_WITH_ICU=ON \
-DDCMTK_WITH_LIBPNG=ON \
-DDCMTK_WITH_LIBXML=ON \
-DDCMTK_WITH_OPENJPEG=ON \
-DDCMTK_WITH_OPENSSL=ON \
-DDCMTK_WITH_SNDFILE=ON \
-DDCMTK_WITH_TIFF=ON \
-DDCMTK_WITH_ZLIB=ON \
../${_realname}-${pkgver}
"${MINGW_PREFIX}"/bin/cmake.exe --build .
}
check() {
cd "${srcdir}/build-${MSYSTEM}"
"${MINGW_PREFIX}"/bin/cmake.exe --build . --target test
}
package() {
cd "${srcdir}/build-${MSYSTEM}"
DESTDIR="${pkgdir}" "${MINGW_PREFIX}"/bin/cmake.exe --install .
local PREFIX_WIN=$(cygpath -wm ${MINGW_PREFIX})
# Fix pkgconfig file
sed -e "s|${PREFIX_WIN}|\"\$\{prefix\}\"|g" -i "${pkgdir}"${MINGW_PREFIX}/lib/pkgconfig/dcmtk.pc
# Fix cmake-config files
find "${pkgdir}${MINGW_PREFIX}/lib/cmake" -type f -name '*.cmake' \
-exec sed -i -e "s|${PREFIX_WIN}|\$\{_IMPORT_PREFIX\}|g" {} \;
install -Dm644 "${srcdir}/${_realname}-${pkgver}/COPYRIGHT" "${pkgdir}${MINGW_PREFIX}/share/licenses/${_realname}/LICENSE"
}
| true |
97e09833b80956609cb127cf12c707500b3b7595 | Shell | scortum/nginx-proxy-stuff | /systemd/install-nginx-proxy.sh | UTF-8 | 1,669 | 2.59375 | 3 | [] | no_license | #!/bin/bash -e
IMAGE_NAME=jwilder/nginx-proxy
VERSION=latest
DOCKER_CONTAINER_NAME=nginx-proxy
cat > /etc/default/${DOCKER_CONTAINER_NAME} << EOF
DOCKER_IMAGE=${IMAGE_NAME}:${VERSION}
DOCKER_CONTAINER_NAME=${DOCKER_CONTAINER_NAME}
EOF
cat > /lib/systemd/system/${DOCKER_CONTAINER_NAME}.service << EOF
[Unit]
Description=Nginx Proxy
Requires=docker.service
After=docker.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
EnvironmentFile=/etc/default/${DOCKER_CONTAINER_NAME}
ExecStartPre=-/usr/bin/docker kill \${DOCKER_CONTAINER_NAME}
ExecStartPre=-/usr/bin/docker rm \${DOCKER_CONTAINER_NAME}
ExecStartPre=/usr/bin/docker pull \${DOCKER_IMAGE}
ExecStart=/usr/bin/docker run --name \${DOCKER_CONTAINER_NAME} \
-h \${DOCKER_HOSTNAME} \
-p 80:80 \
-p 443:443 \
-e DEFAULT_HOST=default.phon.name \
-v /data/scortum-letsencrypt:/data/scortum-letsencrypt:ro \
-v /data/nginx-proxy/certs:/etc/nginx/certs \
-v /var/run/docker.sock:/tmp/docker.sock:ro \
-v /etc/localtime:/etc/localtime:ro \
\${DOCKER_IMAGE}
ExecStop=/usr/bin/docker stop --time=10 \${DOCKER_CONTAINER_NAME}
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
| true |
0dd3ee26f27ef164eaeed11bf45dc399075bb952 | Shell | bitlum/viabtc_exchange_server | /docker/redis/build.sh | UTF-8 | 127 | 3.015625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
WD=`dirname $0`
if [ $# -lt 1 ]; then
N=1
else
N=$1
shift
fi
cd $WD && docker build -t bitlum/exchange-redis-$N .
| true |
0dfb0f3efab81d02c4e4c77437d258f41096d9a8 | Shell | wesparish/PlexVMwareHAWatcher | /PlexVMwareHAWatcher.sh | UTF-8 | 1,122 | 3.46875 | 3 | [] | no_license | #!/bin/bash
export LD_LIBRARY_PATH=/root/GuestSDK/lib/lib64/
# Usage: /root/GuestSDK/bin/bin64/vmware-appmonitor {enable | disable | markActive | isEnabled | getAppStatus | postAppState [appStateOk|appStateNeedReset]}
VMW_BINARY=/root/GuestSDK/bin/bin64/vmware-appmonitor
LOG_FILE=${1-/var/log/PlexVMwareHAWatcher.log}
echo "Logging to: $LOG_FILE"
#echo "" > $LOG_FILE
$VMW_BINARY enable
while true ; do
echo "$(date) Checking Plex status" >> $LOG_FILE
movieId=$(curl 'http://127.0.0.1:32400/library/sections/1/all/' 2> /dev/null | grep -m 1 ratingKey | grep -o '\bratingKey="[^"]*"' | cut -d '"' -f 2)
if [ "$(curl 'http://127.0.0.1:32400/library/metadata/'$movieId'?checkFiles=1' 2> /dev/null | grep -m 1 'Part accessible' | grep -o '\baccessible="[^"]*"' | cut -d '"' -f 2)" == "1" ] ; then
echo "$(date) Plex status shows UP for movie ID: $movieId" >> $LOG_FILE
echo "$(date) Sending up status" >> $LOG_FILE
$VMW_BINARY markActive
sleep 15
else
echo "$(date) Plex status shows DOWN for movie ID: $movieId" >> $LOG_FILE
# Sleep 5s on failure to keep trying
sleep 5
fi
done
| true |
12e94f04da972f218fe67ff0ce26520bf62f0b30 | Shell | Bjay1435/capstone | /rootfs/usr/share/bash-completion/completions/iperf | UTF-8 | 1,722 | 2.921875 | 3 | [
"MIT"
] | permissive | # iperf(1) completion -*- shell-script -*-
_iperf()
{
local cur prev words cword split
_init_completion -s || return
case $prev in
-h|--help|-v|--version|-i|--interval|-l|--len|-p|--port|-w|--window|\
-M|--mss|-b|--bandwidth|-n|--num|-t|--time|-L|--listenport|-P|\
--parallel|-T|--ttl|-Z|--linux-congestion)
return
;;
-f|--format)
COMPREPLY=( $( compgen -W 'k m K M' -- "$cur" ) )
return
;;
-o|--output|-F|--fileinput)
_filedir
return
;;
-B|--bind)
_available_interfaces -a
_ip_addresses
return
;;
-c|--client)
_known_hosts_real "$cur"
return
;;
-x|--reportexclude)
COMPREPLY=( $( compgen -W 'C D M S V' -- "$cur" ) )
return
;;
-y|--reportstyle)
COMPREPLY=( $( compgen -W 'C' -- "$cur" ) )
return
;;
esac
$split && return
# Filter mode specific options
local i filter=cat
for i in ${words[@]}; do
case $i in
-s|--server)
filter='sed -e /^Client.specific/,/^$/d'
;;
-c|--client)
filter='sed -e /^Server.specific/,/^$/d'
;;
esac
done
[[ $filter != cat ]] && filter+=' -e /--client/d -e /--server/d'
COMPREPLY=( $( compgen -W \
'$( "$1" --help 2>&1 | $filter | _parse_help - )' -- "$cur" ) )
[[ $COMPREPLY == *= ]] && compopt -o nospace
} &&
complete -F _iperf iperf
# ex: ts=4 sw=4 et filetype=sh
| true |
5ac55b3aa4ebf28df123e4bed1293de63876eeca | Shell | bwt-dev/libbwt-nodejs | /scripts/build.sh | UTF-8 | 684 | 3.703125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -xeo pipefail
[ -f libbwt/Cargo.toml ] || (echo >&2 "Missing libbwt submodule, run 'git submodule update --init --recursive'" && exit 1)
version=$(grep -E '^version =' libbwt/Cargo.toml | cut -d'"' -f2)
echo Building libbwt-nodejs v$version
if [ -z "$LIBBWT_DIST" ] || [ ! -d "$LIBBWT_DIST" ]; then
echo >&2 LIBBWT_DIST is missing
exit 1
fi
mkdir -p dist && rm -rf dist/*
# Update LIBBWT-SHA256SUMS
(cd $LIBBWT_DIST && sha256sum *.tar.gz) | sort > LIBBWT-SHA256SUMS
chmod 664 LIBBWT-SHA256SUMS
# Update version
npm version --allow-same-version --no-git-tag-version $version
# Prepare package
npm pack
mv libbwt-$version.tgz dist/libbwt-nodejs-$version.tgz
| true |
84018bd1622e1dafd7c739ad4332f491d1f2213a | Shell | ahmedelhilali/dotfiles-3 | /.scripts/Shell/ytdl.sh | UTF-8 | 412 | 3.5 | 4 | [] | no_license | #!/bin/sh
[ -z "$1" ] && { notify-send 'ytdl.sh: nothing to do' ; exit 1 ; }
cd ~/downloads
tmpfile=$(mktemp -p ~/downloads)
echo "$1" >> "$tmpfile"
notify-send "$0: $1"
printf '\033]2;%s\007' "$@"
if youtube-dl -o '%(title)s.%(ext)s' --add-metadata "$1";then
notify-send "$0 - Download done" "$1"
rm -f "$tmpfile"
exit 0
else
notify-send "$0 - Download finished with erros" "$1"
exit 1
fi
| true |
54a9517571e8f7a85ff89e66e32d07e793a45cf9 | Shell | Derek0428/ucl_nlp | /pull_data.sh | UTF-8 | 343 | 2.96875 | 3 | [] | no_license | #!/bin/bash
echo "download original Conala json dataset"
data_file="conala-corpus-v1.1.zip"
wget -cP data/ http://www.phontron.com/download/${data_file}
unzip data/${data_file} -d data/
rm data/${data_file}
for dataset in conala;
do
mkdir -p saved_models/${dataset}
mkdir -p logs/${dataset}
mkdir -p decodes/${dataset}
done
echo "Done!"
| true |
30a1fac9276bb3ffa6f0d64c22913e4e0c724f91 | Shell | j2moreno/quick_refs | /bashrc | UTF-8 | 7,991 | 3.921875 | 4 | [] | no_license | # bashrc template
#------------------------------------------------------------------------
# Helper functions
#------------------------------------------------------------------------
is_interactive_shell() {
[[ "$-" =~ "i" ]]
}
#------------------------------------------------------------------------
# Color/Terminal Escape Codes
#------------------------------------------------------------------------
# ps_ variables use special \[ and \] to tell bash PS1 not to count these characters
# and prevents line-wrapping issue
ps_blue="\[\033[0;34m\]"
ps_cyan="\[\033[1;96m\]"
ps_green="\[\033[0;32m\]"
ps_white="\[\033[1;97m\]"
ps_yellow="\[\033[1;33m\]"
ps_reset="\[\033[0m\]"
blue='\033[0;34m'
cyan='\033[1;96m'
green='\033[0;32m'
white='\033[1;97m'
yellow='\033[1;33m'
reset='\033[0m'
#------------------------------------------------------------------------
# Function aliases
#------------------------------------------------------------------------
run_cmd() {
echo 1>&2
echo "cmd> $*" 1>&2
"$@"
}
cdf() {
# alias: cdf [pattern] - cd into directory with fuzzy finder
local dir=$(find . -type d | egrep -v '\.git|\.snakemake' | fzf +m -q "$1") && cd "$dir"
}
cdp() {
# alias: cdp [query] - cd into $PROJECTS_DIR project using fzf
local repo fzf_options
local projects_dir=${PROJECTS_DIR:-~/projects}
[[ -n $1 ]] && fzf_options="--select-1 --query=$1"
cd ${projects_dir}/$(for project in $(ls ${projects_dir}); do echo ${project}; done | fzf ${fzf_options})
clear
if [[ -d .git ]]; then
printf '> git fetch --quiet (%s)\n' "$(git config --get remote.origin.url)"
git fetch --quiet
git branch --verbose
git status --short
fi
}
clone-hpc() {
# alias: clone-hpc - Copies/updates from https://github.com/usf-hii/template-hpc to current directory
local found_dir='false'
local dirname
for dirname in ~/dev/usf-hii ~/projects ~/src; do
if [[ -d ${dirname} ]]; then
found='true'
break
fi
done
[[ $found = 'true' ]] || dirname=~/projects
if [[ ! -d ${dirname}/template-hpc ]]; then
git clone --quiet git@github.com:usf-hii/template-hpc $dirname/template-hpc
fi
git -C ${dirname}/template-hpc pull --quiet
${dirname}/template-hpc/Clone.sh $(pwd) | bash -x
}
cls() {
# alias: cls - clear screen
clear
}
getnode() {
srun \
--pty \
--mem=60G \
--partition=hii-interactive \
--exclusive \
--nodes=1 \
--ntasks-per-node=1 \
--time=7-0 \
"$@" \
/bin/bash
}
helpbio() {
# alias: helpbio - print out help for bioinformatcs bash environment
local IFS=$'\n'
echo "-----------------------------------------------------------------------------"
echo "Help for bioinfo aliases and tools (https://github.com/usf-hii/bioinfo-home/)"
echo "-----------------------------------------------------------------------------"
echo
echo "Aliases"
for line in $(grep '# alias: ' ${BASH_SOURCE[0]} | grep -v "grep '# alias:'" | sed -r -e 's/# alias://' -e 's/^\s+//'); do
printf "${green}%-32s${reset} %s\n" \
$(echo ${line} | sed -r 's/^(.*)\s+-\s+.*/\1/') \
$(echo ${line} | sed -r 's/^.*\s+-\s+(.*)/\1/')
done
echo
echo "Commands"
printf "${green}%-32s${reset} %s\n" "github-repos" "List all repos for USF-HII"
printf "${green}%-32s${reset} %s\n" "gpfs-quota" "List all total,free,used quotas on HPC for HII GPFS"
printf "${green}%-32s${reset} %s\n" "<command> <path>/**<TAB>" "Path completion using FZF"
printf "${green}%-32s${reset} %s\n" "<command> <CTRL-T>" "Use FZF to select target file for <command>"
printf "${green}%-32s${reset} %s\n" "<CTRL-R>" "Find command line history using FZF"
}
sa() {
# alias: sa [hours] [sacct_opts...] - detailed slurm sacct info for user (default past 48 hours)
if [[ $1 != -* ]]; then
local hours=${1:-48}; shift
else
local hours=48
fi
local start_time=$(date -d "${hours} hours ago" '+%FT%T')
run_cmd \
/usr/bin/sacct \
--format=user,node%-20,jobid%-16,jobname%-72,start,elapsed,ncpus,reqm,ntasks,avecpu,maxrss,state \
--user=${USER} \
-S ${start_time} \
"$@"
}
sb() {
# alias: sb - source your ~/.bashrc
source ~/.bashrc
}
si() {
# alias: si [sinfo_opts...] - detailed slurm sinfo command
sinfo --partition=hii02,hii-test,hii-interactive --format="%20P %8D %8c %12m %12a %12T %10l %8t %12A" "$@";
}
sq() {
# alias: sq [squeue_opts...] - squeue with fancing formatting for current user
squeue \
--user=$USER \
--partition=hii02,hii-test,hii-interactive \
--format='%24i %8P %10T %42j %12a %12u %20V %8M %8l %8c %10m %N %E' \
"$@";
}
sq-grep() {
# alias: sq-grep [-q] <regex> - print slurm job ids and job names matching <regex>, '-q' prints just job id
if [[ "${1:-''}" == '-q' ]]; then
shift
squeue --user $USER --noheader --format='%A %j %T %M' | grep --perl-regex "^\\d+\\s+.*$1" | awk '{print $1}'
else
squeue --user $USER --noheader --format='%A %j %T %M' | grep --perl-regex "^\\d+\\s+.*$1"
fi
}
termbin() {
# alias: termbin - pastes STDIN to https://termbin.com and returns URL
nc termbin.com 9999
}
tmux() {
/shares/hii/sw/tmux/latest/bin/tmux -2 "$@"
}
vb() {
# alias: vb - edit ~/.bashrc and source it after exiting
${EDITOR:-vim} ~/.bashrc
source ~/.bashrc
}
#------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------
main_bashrc() {
MYDIR=$(readlink -f $(dirname "${BASH_SOURCE[0]}")/..)
[[ $PATH =~ :?${MYDIR}/bin:? ]] || PATH=${MYDIR}/bin:${PATH}
[[ $PATH =~ :?/shares/hii/sw/git/latest/bin:? ]] || PATH=/shares/hii/sw/git/latest/bin:$PATH
[[ $PATH =~ :?/shares/hii/sw/tmux/latest/bin:? ]] || PATH=/shares/hii/sw/tmux/latest/bin:$PATH
if [[ $(uname -n) == 'hii.rc.usf.edu' ]]; then
git() {
/usr/bin/git "$@";
}
fi
[[ -f ${MYDIR}/etc/bashrc.fzf.bindings ]] && source ${MYDIR}/etc/bashrc.fzf.bindings
[[ -f ${MYDIR}/etc/bashrc.fzf.completion ]] && source ${MYDIR}/etc/bashrc.fzf.completion
[[ -f ${MYDIR}/etc/bashrc.git.prompt ]] && source ${MYDIR}/etc/bashrc.git.prompt
export EDITOR="vim"
export LESS="--hilite-search --ignore-case --quit-if-one-screen --RAW-CONTROL-CHARS --no-init --tabs=2"
shopt -s cmdhist # store multiline commands as single entries
shopt -s histappend # append to history rather than overwrite
shopt -s histreedit # puts failed hist sub back on cli
shopt -s cdable_vars
shopt -s direxpand 2>/dev/null
bind 'set bell-style none' &>/dev/null
bind 'set show-all-if-ambiguous on' &>/dev/null
bind 'set completion-query-items 100' &>/dev/null
bind 'set mark-directories on' &>/dev/null # append slash to completed dir name
bind 'set mark-modified-lines off' &>/dev/null # show * for modified history lines
#------------------------------------------------------------------------
# Directory variables
#------------------------------------------------------------------------
bio=/shares/hii/bioinfo
fpdata=/shares/hii/fac/parikhh/data
#------------------------------------------------------------------------
# Bash Prompt
#------------------------------------------------------------------------
PS1="${ps_yellow}\u${ps_reset}"
PS1="${PS1}${ps_white}@${ps_reset}"
PS1="${PS1}${hostname:-$(uname -n)}"
PS1="${PS1}${ps_yellow}:${ps_reset}"
PS1="${PS1}${ps_cyan}\w${ps_reset}"
if type -t __git_ps1 &>/dev/null; then
GIT_PS1_SHOWDIRTYSTATE=yes
GIT_PS1_SHOWUPSTREAM=verbose
GIT_PS1_SHOWCOLORHINTS=true
PS1="${PS1} ${ps_green}\$(__git_ps1 \(%s\))${ps_reset}"
fi
if [[ -n ${SINGULARITY_NAME} ]]; then
PS1="${PS1}\n[${ps_white}SINGULARITY${ps_reset}]\$ "
else
PS1="${PS1}\n\$ "
fi
export PROMPT_COMMAND='history -a; echo -ne "\033]0;$USER@$(hostname -s):$PWD\007"; printf "\n";'
export PS1
}
if is_interactive_shell; then
main_bashrc
fi
| true |
c316eb72e78e27c31ed319a18078c2f8d130e9e3 | Shell | monsieur7/GameCrypTBash | /server.sh | UTF-8 | 1,866 | 3.21875 | 3 | [] | no_license | #!/bin/bash
if (($# != 3))
then
host="localhost"
port_in="1301"
port_out="1302"
else
host=$1
port_out=$2
port_in=$3
fi
nc -l -k $port_in >output 2>>log_server.txt & # launch daemon
if [ ! -s dhb2.pem ]
then
openssl genpkey -genparam -algorithm DH -out dhb2.pem
fi
openssl genpkey -paramfile dhb2.pem -out dhkey2.pem
openssl pkey -in dhkey2.pem -pubout -out dhpub2.pem
sleep 2
nc -z $host $port_out
while (( $? != 0 ))
do
echo "" >/dev/null
nc -z $host $port_out
done
echo "ready to send : a client is listening"
echo "sending parameters"
cat dhb2.pem
cat dhb2.pem | netcat -w 1 $host $port_out
sleep 2
echo "listening for public key"
#nc -l -p 1301 > /tmp/output
while [ ! -s output ]
do
# debug
sleep 1
done
echo "received public key"
sleep 2
cat output
echo "sending public key"
cat dhpub2.pem
sleep 2
cat dhpub2.pem | nc -w 1 $host $port_out
openssl pkeyutl -derive -inkey dhkey2.pem -peerkey <(cat output | tr -d "\000") -out alice_shared_secret.bin
base64 alice_shared_secret.bin
#echo "" > output #erase buffer
echo "secret"
base64 alice_shared_secret.bin #debug
#secure exchange begin here
sleep 5
send() {
echo "$1" | openssl enc -aes256 -base64 -kfile bob_shared_secret.bin -e 2>/dev/null | nc -w 1 $host $port_out
}
echo "" >output
echo "beginning receiving"
while true
do
#echo "waiting"
cat output | tr -d '\000'| grep -q -E '[A-Za-z/\\=]+'
while (( $? > 0))
do
#printf "."
sleep 1
cat output | tr -d '\000' | grep -q -E '^[A-Za-z/\\=]+'
done
printf "\n"
#echo "decoding"
result=$(cat output |tr -d '\000' | grep -E '^[A-Za-z/\\=]+' | openssl enc -aes256 -base64 -kfile alice_shared_secret.bin -d 2>/dev/null )
if echo "$result" | tr -d " " | grep "^q$"
then
rm -rf output
#./clean.sh
echo $!
echo "pid to kill " $!
pkill $!
exit
elif echo "$result" | tr -d " " | grep "^clear$"
then
clear
else
echo $result
fi
echo "" >output
done
| true |
99965fdc3890f8a86868a6f7c2f901fed68b7d5a | Shell | root-project/roottest | /scripts/run-from-buildbot.sh | UTF-8 | 1,428 | 3.828125 | 4 | [] | no_license | #!/bin/sh
# roottest needs several variables to be defined.
# Pushing them through the buildbot config is more difficult than just
# calculating them here. This script will thus invoke roottest for buildbot.
# Axel, 2010-03-25
# PWD on cygwin is garbled, need to adjust \cygwin/home to /home
if uname -a | grep -i cygwin > /dev/null; then
PWD=${PWD##\\cygwin}
cd $PWD
export ROOTTEST_HOME="`cygpath -m $PWD`/"
else
export ROOTTEST_HOME="$PWD/"
fi
STARTPWD=$PWD
# We might be building roottest for roottest-Ubuntu1004-64bit-nightly
# That wants to test ROOT-Ubuntu1004-64bit-nightly, so that's the ROOT
# version we need to set up.
# pwd is ..../ROOT-Ubuntu1004-64bit-nightly/build, so cd up:
cd ..
# and this is the slave that runs us:
BBARCH=`basename $PWD`
# this is the corresponding ROOT slave's location
BBARCH=../ROOT-${BBARCH#*-}/build
# we cd into its build directory and set ROOT up
cd $BBARCH || (echo Cannot find directory $BBARCH from `pwd`; exit 1)
. bin/thisroot.sh || (echo Cannot find ROOT setup script in `pwd`; exit 1)
echo Set up ROOT in $ROOTSYS, SVN revision:
echo === svninfo.txt ===
cat $ROOTSYS/etc/svninfo.txt
echo === svninfo.txt ===
# cd back to where we started
cd $STARTPWD
# Make clean before making roottest, to not depend on dependencies:
#make clean "$@"
make -k FAST=1 "$@"
ret=$?
echo === svninfo.txt ===
cat $ROOTSYS/etc/svninfo.txt
echo === svninfo.txt ===
exit $ret
| true |
05084fae4b41934430cdf043bff5040ba5612a8d | Shell | belltan/UpperComputerProgramingGuideSrc | /12_5_qt_breakpad_demo/dump_tools_win/process_dump.sh | UTF-8 | 1,363 | 3.671875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
if [ $# != 2 ] ; then
echo "USAGE: $0 EXE_NAME DMP_NAME"
echo " e.g.: $0 test 3872B2CF-983B-4963-AFA9-C8534DFD4C44.dmp"
exit 1;
fi
#get input param
exe_file_name=$1
dmp_file_name=$2
getSymbol() {
echo "@getSymbol: start get symbol"
input_file_name=$exe_file_name
pdb_file_name=${input_file_name%.*}
./dump\_syms ./$exe_file_name > $pdb_file_name'.sym'
}
getStackTrace() {
echo "@getStackTrace: start get StackTrace"
input_file_name=$exe_file_name
pdb_file_name=${input_file_name%.*}
sym_file_name=$pdb_file_name'.sym'
#get first line of $sym_file_name
line1=`head -n1 $sym_file_name`
#echo $line1
#get version number form string of first line
OIFS=$IFS; IFS=" "; set -- $line1; aa=$1;bb=$2;cc=$3;dd=$4; IFS=$OIFS
#echo $dd
version_number=$dd
#make standard dir and move *.sym in it
mkdir -p ./symbols/$exe_file_name/$version_number
mv $sym_file_name ./symbols/$exe_file_name/$version_number
#print stack trace at std output
./minidump_stackwalk $dmp_file_name ./symbols 2> /dev/null
#print stack trace at a file
#./minidump_stackwalk $dmp_file_name ./symbols 2>/dev/null >result.txt
}
main() {
getSymbol
if [ $? == 0 ]
then
getStackTrace
fi
}
# run main
main
| true |
2790f6e89da48678552bf91c81d798d4a79aeb7f | Shell | amitagarwlgit/Rainbow | /shell/txb-jdk-checker-onprem.sh | UTF-8 | 506 | 3.296875 | 3 | [] | no_license | #!/bin/bash
IFS=$'\n'
set -f
filename='MachineIP.txt'
for i in $(cat $filename); do
echo $i
ssh amiagarwal@$i \
'for k in $(ps -ef | grep java | grep -v grep | awk '\''{print $8}'\''); do \
if [[ $($k -version 2>&1) == *"OpenJDK"* ]]; \
then echo "Process ==>"$k" ==> OpenJDK"; \
elif [[ $($k -version 2>&1) == *"Java(TM)"* ]]; \
then echo "Process ==>" $k " ==> OracleJDK"; \
else echo "No Process with JDK found"; fi; done;'
done;
echo "I am done"
done
| true |
abf94721b48b8d3b7282b1da97d0c3ce32c14d4b | Shell | harvard-edge/TinyMLPerf | /benchmarks/src/L3/mnist_fc/train/train_and_generate.sh | UTF-8 | 1,083 | 2.59375 | 3 | [] | no_license | #!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
echo "Mnist FC train_and_generate.sh running deep_mlp.py"
python3 ${DIR}/deep_mlp.py $1 $2 2>/dev/null
echo "Mnist FC train_and_generate.sh utensor-cli convert"
# Once for extracting the final graph transform, again for generating cpp files
utensor-cli convert ${DIR}/mnist_model/deep_mlp.pb --output-nodes=y_pred --save-graph --transform-methods dropout,quantize,biasAdd,remove_id_op,refcnt 2>/dev/null
mv quant_deep_mlp.pkl quant_deep_mlp_target.pkl
utensor-cli convert ${DIR}/mnist_model/deep_mlp.pb --output-nodes=y_pred --save-graph 2>/dev/null
echo "cp -f ${DIR}/models/* ${DIR}/../src/"
echo "cp -f ${DIR}/mnist_model/deep_mlp.pb ${DIR}/../src/"
#echo "cp -f ${DIR}/mnist_model/deep_mlp_final.pb ${DIR}/../src/"
cp -f ${DIR}/models/* ${DIR}/../src/
cp -f ${DIR}/mnist_model/deep_mlp.pb ${DIR}/../src/
#cp -f ${DIR}/mnist_model/deep_mlp_final.pb ${DIR}/../src/
# Cleanup
rm -rf ${DIR}/models
rm -rf ${DIR}/mnist_model
rm -rf ${DIR}/mnist_data
rm -rf ${DIR}/constants
rm -rf ${DIR}/chkps | true |
37abc789faebea95ecb8af2da0ba94c551655d4c | Shell | junmipan/vpp | /vagrant/vagrant-start | UTF-8 | 1,948 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -euo pipefail
export K8S_NODE_OS=${K8S_NODE_OS:-ubuntu}
export K8S_NODES=${K8S_NODES:-1}
echo 'Please choose vagrant provider: '
PS3='--> '
options=("VirtualBox" "VMWare_Fusion" "Quit")
select opt in "${options[@]}"
do
case $opt in
"VirtualBox")
echo "You chose VirtualBox"
export VAGRANT_DEFAULT_PROVIDER="virtualbox"
break
;;
"VMWare_Fusion")
echo "You chose VMWare_Fusion"
export VAGRANT_DEFAULT_PROVIDER="vmware_fusion"
break
;;
"Quit")
echo "Exiting..."
exit 0
;;
*) echo invalid option;;
esac
done
echo
echo 'Please choose Kubernetes environment: '
PS3='--> '
options=("Production" "Development" "Quit")
select opt in "${options[@]}"
do
case $opt in
"Production")
echo "You chose Production environment"
export K8S_DEPLOYMENT_ENV="prod"
break
;;
"Development")
echo "You chose Development environment"
export K8S_DEPLOYMENT_ENV="dev"
break
;;
"Quit")
echo "Exiting..."
break
;;
*) echo invalid option;;
esac
done
echo
echo 'Please choose deployment scenario: '
PS3='--> '
options=("Without StealFirstNIC" "With StealFirstNIC" "Quit")
select opt in "${options[@]}"
do
case $opt in
"Without StealFirstNIC")
echo "You chose deployment without StealFirstNIC"
export K8S_DEPLOYMENT_SCENARIO="nostn"
break
;;
"With StealFirstNIC")
echo "You chose deployment with StealFirstNIC"
export K8S_DEPLOYMENT_SCENARIO="stn"
break
;;
"Quit")
echo "Exiting..."
break
;;
*) echo invalid option;;
esac
done
echo
vagrant up
| true |
a4db7e75e1145cc19c8c4864603c72cc4a07d5b2 | Shell | jgruber/icontrollx-fsi | /payload/installer | UTF-8 | 3,610 | 4 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
WK_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
TMOS_KEY_FILE='archive.pubkey.20160210.pem'
TMOS_KEY_DIR='/usr/lib/install'
MODULE_NAME='f5_secure_installer'
function log() {
ts=$(date +'%Y-%m-%d %H:%m:%S,%3N')
echo "$ts - $MODULE_NAME - $1 - $2"
}
function validate_signature() {
# Use the TMOS onbox public PEM key in a well know directory
key_path="${TMOS_KEY_DIR}/${TMOS_KEY_FILE}"
# Use embedded public PEM key if it exists
if [ -f "${WK_DIR}/public_key.pem" ]; then
log "DEBUG" "validating with embedded public key"
key_path="${WK_DIR}/public_key.pem"
else
log "DEBUG" "validating against TMOS public key ${TMOS_KEY_FILE}"
fi
package_name=$(basename $1)
if [ -f $key_path ]; then
openssl dgst -sha512 -verify $key_path -signature $2 $1 > /dev/null
if [ "$?" -ne "0" ]; then
log "ERROR" "${package_name} is invalid for F5 support"
return 1
else
log "INFO" "${package_name} validated as F5 supported"
return 0
fi
else
log "ERROR" "public key file ${TMOS_KEY_FILE} is not on this system"
return 1
fi
}
function install_package() {
package_name=$(basename $1)
package_dir_name=$(basename ${package_name} .rpm)
log "INFO" "installing ${package_name}"
mkdir -p "/var/lib/cloud/fsiverified/${package_dir_name}"
cp "${WK_DIR}/${package_name}" "/var/lib/cloud/fsiverified/${package_dir_name}/${package_name}"
if [ -f "${WK_DIR}/LICENSE.txt" ]; then
cp "${WK_DIR}/LICENSE.txt" "/var/lib/cloud/fsiverified/${package_dir_name}/LICENSE.txt"
if [ "$3" == "quiet" ]; then
log "INFO" "software license details in /var/lib/cloud/fsiverified/${package_dir_name}/LICENSE.txt"
else
echo "PLEASE READ THE LICENSE"
more "${WK_DIR}/LICENSE.txt"
fi
fi
if [ -f "${WK_DIR}/EULA.txt" ]; then
cp "${WK_DIR}/EULA.txt" "/var/lib/cloud/fsiverified/${package_dir_name}/EULA.txt"
if [ "$3" == "quiet" ]; then
log "INFO" "end user license details in /var/lib/cloud/fsiverified/${package_dir_name}/EULA.txt"
else
echo "PLEASE ACCEPT THE END USER LICENSE AGREEMENT"
more "${WK_DIR}/EULA.txt"
read -n1 -p "Please type Y or y to agree to this license: "
while [[ ! $REPLY =~ ^[Yy]$ ]]
do
echo ""
read -n1 -p "Please type Y or y to agree to this license: "
done
echo ""
fi
fi
if [ -f "${WK_DIR}/SUPPORT_CONTACT.txt" ]; then
cp "${WK_DIR}/SUPPORT_CONTACT.txt" "/var/lib/cloud/fsiverified/${package_dir_name}/SUPPORT_CONTACT.txt"
support_contact=$(cat ${WK_DIR}/SUPPORT_CONTACT.txt)
log "INFO" "for support of this software contact: ${support_contact}"
fi
python "${WK_DIR}/package_installer.py" "${package_name}"
if [ "$?" -ne "0" ]; then
return 1
else
return 0
fi
}
installer_exit_status=0
if [ -z $3 ]; then
log "DEBUG" "running installer with argument $3"
fi
for pkg in *.rpm; do
[ -f "${pkg}" ] && [ -f "${pkg}.sha512.sig" ] || break
validate_signature ${pkg} ${pkg}.sha512.sig
if [ "$?" -ne "0" ]; then
installer_exit_status=1
else
install_package $1 $2 $3
installer_exit_status=$?
fi
done
if [ "$installer_exit_status" -ne "0" ]; then
log "ERROR" "installation failed"
exit 1
else
log "INFO" "installation complete"
exit 0
fi
| true |
0190d9a22684f5af4d0ad3f5f13bcc2e08bae0e2 | Shell | jfalzoi-thales/qual | /3rdParty/setup.sh | UTF-8 | 4,731 | 3.578125 | 4 | [] | no_license | #!/bin/bash
clear
echo
echo "****************************************************************************************"
echo "* This scrpits installs and copies all external packages, tool and features required *"
echo "****************************************************************************************"
echo
## Directories
MAIN_WD=$(pwd)
BIN_WD="/usr/local/bin"
LIB_WD="/usr/local/lib"
## Variables
STATUS="Status:\n\t"
PACKAGE="Package status: "
INSTALLED="installed."
NOTINSTALLED="not installed."
MISSING="missing"
###############################################
## Install: Parallel Memory Bandwidth (PMBW) ##
PMBW="pmbw"
if [ -a $BIN_WD"/"$PMBW ]
then
echo -e $STATUS $PMBW $INSTALLED
elif [ -a $MAIN_WD"/"$PMBW ]
then
cp $MAIN_WD"/"$PMBW $BIN_WD
echo -e $STATUS $PMBW $INSTALLED
else
## if for some reason the executable do not exits, compile the source code and install
cd $MAIN_WD"/"$PMBW"/src"
./configure
make
make install
echo -e $STATUS $PMBW $INSTALLED
fi
cd $MAIN_WD
echo "-----------------------------------------------------------------------------------"
##############################################
## Install: FIO ##
FIO="fio"
FIO_PKG="fio-2.1.10-1.el7.rf.x86_64.rpm"
cd $MAIN_WD"/"$FIO
INST=$(rpm -qa | grep "fio-2.1.10-1.el7.rf.x86_64")
if [[ $INST == "" ]]
then
rpm -iv $FIO_PKG
echo -e $STATUS $FIO $INSTALLED
else
echo -e $STATUS $FIO $INSTALLED
fi
cd $MAIN_WD
echo "-----------------------------------------------------------------------------------"
###############################################
## Install: Google Protocol Buffers library, ##
GPB="gpb"
GPB_PKG="protobuf-2.6.1-py2.7.egg"
INST=$(pip list | grep "protobuf")
if [[ $INST == "" ]]
then
cd $MAIN_WD"/"$GPB
easy_install -iv $GPB_PKG
cd $MAIN_WD
echo -e $STATUS "ProtoBuf" $INSTALLED
else
echo -e $STATUS "ProtoBuf" $INSTALLED
fi
echo "-----------------------------------------------------------------------------------"
###############################################
## Install: iPerf ##
IPERF="iperf3"
IPERF_RPM="iperf3-3.1.3-1.el7.x86_64.rpm"
INST=$(rpm -qa | grep "iperf3-3.1.3-1.el7.x86_64")
if [[ $INST == "" ]]
then
cd $MAIN_WD"/"$IPERF
rpm -iv $IPERF_RPM
cd $MAIN_WD
echo -e $STATUS $IPERF $INSTALLED
else
echo -e $STATUS $IPERF $INSTALLED
fi
echo "-----------------------------------------------------------------------------------"
###############################################
## Install: Lookbusy ##
LOOKBUSY="lookbusy"
LOOKBUSY_DIR="lookbusy/lookbusy-1.4"
if [ -a $BIN_WD"/"$LOOKBUSY ]
then
echo -e $STATUS $LOOKBUSY $INSTALLED
else
cd $MAIN_WD"/"$LOOKBUSY_DIR
cp $LOOKBUSY $BIN_WD
cd $MAIN_WD
echo -e $STATUS $LOOKBUSY $INSTALLED
fi
echo "-----------------------------------------------------------------------------------"
###############################################
## Install: pySerial ##
PYSERIAL="pyserial"
PYSERIAL_PKG="pyserial-2.6-5.el7.noarch.rpm"
INST=$(rpm -qa | grep "pyserial-2.6-5.el7.noarch")
if [[ $INST == "" ]]
then
cd $MAIN_WD"/pyserial"
rpm -iv $PYSERIAL_PKG
cd $MAIN_WD
echo -e $STATUS $PYSERIAL $INSTALLED
else
echo -e $STATUS $PYSERIAL $INSTALLED
fi
echo "-----------------------------------------------------------------------------------"
###############################################
## Install: ZMQ ##
ZMQ="ZMQ"
OPENPGM_PKG="openpgm-5.2.122-2.el7.x86_64.rpm"
ZMQ_PKG="zeromq3-3.2.5-1.el7.x86_64.rpm"
PYZMQ_PKG="pyzmq-15.2.0-py2.7-linux-x86_64.egg"
cd $MAIN_WD"/zmq"
INST=$(rpm -qa | grep "openpgm-5.2.122-2.el7.x86_64")
if [[ $INST == "" ]]
then
sudo rpm -Uv $OPENPGM_PKG
echo -e $STATUS "OpenGM" $INSTALLED
else
echo -e $STATUS "OpenGM" $INSTALLED
fi
INST=$(rpm -qa | grep "zeromq3-3.2.5-1.el7.x86_64")
if [[ $INST == "" ]]
then
sudo rpm -Uv $ZMQ_PKG
echo -e $STATUS "ZeroMQ" $INSTALLED
else
echo -e $STATUS "ZeroMQ" $INSTALLED
fi
INST=$(pip list | grep "pyzmq")
if [[ $INST == "" ]]
then
easy_install $PYZMQ_PKG 2> /dev/null
echo -e $STATUS "PyZMQ" $INSTALLED
else
echo -e $STATUS "PyZMQ" $INSTALLED
fi
cd $MAIN_WD
echo "-----------------------------------------------------------------------------------"
echo
echo "****************************************************************************************"
echo "* DONE *"
echo "****************************************************************************************"
echo
############################################### | true |
fa22d689708b5a2ffeef6bfb245df0ea596e57f8 | Shell | vinicius-pirees/git-to-s3 | /start.sh | UTF-8 | 1,058 | 3.8125 | 4 | [] | no_license | #!/bin/bash
set -e
: ${ACCESS_KEY:?"ACCESS_KEY env variable is required"}
: ${SECRET_KEY:?"SECRET_KEY env variable is required"}
: ${S3_PATH:?"S3_PATH env variable is required"}
export DATA_PATH=${DATA_PATH:-/data/}
echo "access_key=$ACCESS_KEY" >> /root/.s3cfg
echo "secret_key=$SECRET_KEY" >> /root/.s3cfg
echo "Parameter is $1"
if [[ "$1" == 'git-ssh' ]]; then
echo "Backing up git repos..."
: ${GIT_REPO:?"GIT_REPO env variable is required"}
rm -rf $DATA_PATH
git clone $GIT_REPO $DATA_PATH
rm -rf $DATA_PATH/.git
exec /sync.sh
elif [[ "$1" == 'git-http' ]]; then
echo "Backing up git repos..."
: ${GIT_REPO:?"GIT_REPO env variable is required"}
: ${GIT_USER:?"GIT_USER env variable is required"}
: ${GIT_PASSWORD:?"GIT_PASSWORD env variable is required"}
credentials=https://$GIT_USER:$GIT_PASSWORD@
GIT_URL=$(echo ${GIT_REPO//https:\/\//$credentials})
rm -rf $DATA_PATH
git clone $GIT_URL $DATA_PATH
rm -rf $DATA_PATH/.git
exec /sync.sh
else
echo "Not a valid argument!"
fi
| true |
4c71269d326db44ef96d05151becfb616682b9d3 | Shell | grokthis/mu | /translate_subx_emulated | UTF-8 | 1,484 | 3.125 | 3 | [] | no_license | #!/bin/sh
# Translate SubX by running the self-hosted translator in emulated mode on
# Linux or BSD or Mac.
#
# Possible knobs:
# Whether to run a phase natively or in emulated mode.
# Just always emulate for now since we debug on non-Linux.
# Whether to stop after a phase.
# Just always run all phases, but print out phases so it's clear where an
# error happens.
# Whether to trace a phase. Whether to always trace or rerun with tracing
# enabled after an error.
# Leave tracing to other scripts. We save intermediate files so it's easy
# to rerun a single phase afterwards.
# Whether to run a phase with debug information. (Need to juggle multiple
# sets of debug files.)
# Again, that's for subsequent scripts.
set -e
./build
echo " braces"
cat $* |./bootstrap_bin run apps/braces > a.braces
echo " calls"
cat a.braces |./bootstrap_bin run apps/calls > a.calls
echo " sigils"
cat a.calls |./bootstrap_bin run apps/sigils > a.sigils
echo " tests"
cat a.sigils |./bootstrap_bin run apps/tests > a.tests
echo " dquotes"
cat a.tests |./bootstrap_bin run apps/dquotes > a.dquotes
echo " assort"
cat a.dquotes |./bootstrap_bin run apps/assort > a.assort
echo " pack"
cat a.assort |./bootstrap_bin run apps/pack > a.pack
echo " survey"
cat a.pack |./bootstrap_bin run apps/survey > a.survey
echo " hex"
cat a.survey |./bootstrap_bin run apps/hex > a.elf
chmod +x a.elf
| true |
9a9ba59459b80a909c67cdcf70b351d86b8dd854 | Shell | smile121621/shell | /example/get_array_all_part.sh | UTF-8 | 496 | 3.46875 | 3 | [] | no_license | #!/bin/bash
# get_array_all_part.sh
function getArr1 {
local newarray
newarray=$@
echo "the new array value is: ${newarray[*]}"
}
myarray=(1 2 3 4 5 6)
echo "the original array is ${myarray[*]}"
getArr1 ${myarray[*]}
function func1 {
local sum=0
local newarray
newarray=($(echo "$@"))
for num in ${newarray[*]}
do
sum=$[ $sum + $num ]
done
echo $sum
}
arg1=${myarray[*]}
result=$( func1 $arg1 )
echo "result: "$result
a=${myarray[*]}
echo $a
| true |
9456c2ae9511d79f732375d62895a55f74c10db1 | Shell | aaivu/aaivu-machine-trans-eng-sin | /src/Transformer-baseline/scripts/preprocess-ensi.sh | UTF-8 | 472 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
SRC=en
TGT=si
ROOT=$(dirname "$0")
DATA=$ROOT/data
DATABIN=$ROOT/data-bin/${SRC}_${TGT}
mkdir -p $DATABIN
TRAIN_SET="train-dataset/train"
VALID_SET="valid-dataset/valid"
TEST_SET="test-dataset/test"
# binarize data
fairseq-preprocess \
--source-lang $SRC --target-lang $TGT \
--trainpref $DATA/${TRAIN_SET} --validpref $DATA/${VALID_SET} --testpref $DATA/${TEST_SET} \
--destdir $DATABIN \
--joined-dictionary \
--workers 4
| true |
fd4a979b20b4e18680ef5d8a3f37411f0f1d3b12 | Shell | abhishekshree/mookit-scraping | /run.sh | UTF-8 | 281 | 2.890625 | 3 | [] | no_license | #!/usr/bin/bash
mv *.html ./index.html
test=$(cat index.html | grep -oE '<title id="courseTitle">.*</title>' | sed 's/<title id="courseTitle">//' | sed 's/<\/title>//')
mkdir -pv "$test"
python3 scraper.py $1
mv data.csv ./"$test"
mv index.html ./"$test"
echo "Task Completed." | true |
1e2b01c97c4bfadd1098b748d77b94e06cedb29a | Shell | sjswuitchik/duck_comp_gen | /05_CompPopGen/03_popGen/hyphy/05_analyses.sh | UTF-8 | 1,036 | 2.65625 | 3 | [] | no_license | # currently a janky mix of manipulation in R and awk
conda activate r
Rscript clean_absrel.R
awk 'BEGIN { FS = OFS = ","} NR == 1 {print "file,total_branches,sig_branches,hetAtr_gene"; next} { match($0, /hetAtr_[^,]+/); hetAtr_gene = substr($0, RSTART, RLENGTH); print $1, $2, $3, hetAtr_gene}' abs_cleanR.csv > abs_clean_final.csv
Rscript absrel_ortho_wrangling.R
# download Entrez ID records from https://www.ncbi.nlm.nih.gov/sites/batchentrez using galGal_protIDs.tsv, and 'Send To' File > GenPept with 'Show GI' option
./parse_gp.awk sequence.gp > entrezIDs.tsv
# download the background records from NCBI datasets in split batches
split -l 3000 all_galGal_protIDs.tsv all_galGal_prot
for file in *.gp;
do
./parse_gp.awk $file > ${file}.tsv
done
for file in *.tsv;
do
cat $file >> bg_entrezIDs.tsv
done
Rscript go_analyses.R
#### GSE approach misc - clean later
awk 'BEGIN {FS = OFS = "\t"} {gsub(/\.[0-9]+/, "", $2); print}' absrel_ogs.tsv > absrel_ogs_stripped.tsv
split -l 1000 galGal_protIDs_all.tsv galGal_prot
| true |
9b6150e1abebfbb18af08941b9edb69dad2bb095 | Shell | indigo-dc/udocker | /utils/make_udockertar.sh | UTF-8 | 1,245 | 3.140625 | 3 | [
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] | permissive | #!/bin/bash
# ##################################################################
#
# Make udocker tarball for release
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ##################################################################
cd ..
VER=`grep "__version__" udocker/__init__.py|cut -d'"' -f 2`
echo "==========================================================="
echo "* This script produces udocker-${VER}.tar.gz, for release *"
echo "=========================================================="
rm -rf `find . -name '*pycache*'` `find . -name '*.pyc'`
mkdir -p udocker-${VER}
cp -prv udocker udocker-${VER}/
cd udocker-${VER}/udocker/
ln -s maincmd.py udocker
cd ../../
tar zcvf udocker-${VER}.tar.gz udocker-${VER}
rm -rf udocker-${VER}
| true |
cb38cd497edabe79bb8b52b679660f58cd0d802f | Shell | talshadow/helpfull_scripts | /new_gcc_ubuntu.sh | UTF-8 | 439 | 2.515625 | 3 | [] | no_license | #!/bin/bash
version_hi=7
version_lo=3
full_version=$version_hi.$version_lo
cd /tmp
sudo add-apt-repository ppa:ubuntu-toolchain-r/test
sudo apt-get update
sudo apt-get install -y gcc-$version_hi g++-$version_hi
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-$version_hi $version_hi$version_lo --slave /usr/bin/g++ g++ /usr/bin/g++-$version_hi
sudo update-alternatives --config gcc
sudo update-alternatives --config g++
| true |
42d469ca06e1faa7c87dcbb87758086043706859 | Shell | continuum-media/tools | /ssht | UTF-8 | 2,147 | 3.9375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
### START OF CODE GENERATED BY Argbash v2.6.1 ###
# Argbash is a bash code generator used to get arguments parsing right.
# Argbash is FREE SOFTWARE, see https://argbash.io for more info
# Generated online by https://argbash.io/generate
die()
{
local _ret=$2
test -n "$_ret" || _ret=1
test "$_PRINT_HELP" = yes && print_help >&2
echo "$1" >&2
exit ${_ret}
}
_positionals=()
print_help ()
{
printf 'Usage: %s <pem> <tunnel-user> <tunnel-ip> <destination-user> <destination-ip>\n' "$0"
printf '\t%s\n' "<pem>: Path to pem"
printf '\t%s\n' "<tunnel-user>: Tunnel User"
printf '\t%s\n' "<tunnel-ip>: Tunnel IP"
printf '\t%s\n' "<destination-user>: Destination User"
printf '\t%s\n' "<destination-ip>: Destination IP"
}
parse_commandline ()
{
while test $# -gt 0
do
_positionals+=("$1")
shift
done
}
handle_passed_args_count ()
{
_required_args_string="'pem', 'tunnel-user', 'tunnel-ip', 'destination-user' and 'destination-ip'"
test ${#_positionals[@]} -ge 5 || _PRINT_HELP=yes die "FATAL ERROR: Not enough positional arguments - we require exactly 5 (namely: $_required_args_string), but got only ${#_positionals[@]}." 1
test ${#_positionals[@]} -le 5 || _PRINT_HELP=yes die "FATAL ERROR: There were spurious positional arguments --- we expect exactly 5 (namely: $_required_args_string), but got ${#_positionals[@]} (the last one was: '${_positionals[*]: -1}')." 1
}
assign_positional_args ()
{
_positional_names=('_arg_pem' '_arg_tunnel_user' '_arg_tunnel_ip' '_arg_destination_user' '_arg_destination_ip' )
for (( ii = 0; ii < ${#_positionals[@]}; ii++))
do
eval "${_positional_names[ii]}=\${_positionals[ii]}" || die "Error during argument parsing, possibly an Argbash bug." 1
done
}
parse_commandline "$@"
handle_passed_args_count
assign_positional_args
### END OF CODE GENERATED BY Argbash ###
$(ssh -o "StrictHostKeyChecking no" -i $_arg_pem -L 2222:$_arg_destination_ip:22 $_arg_tunnel_user@$_arg_tunnel_ip -N &)
ssh -o "StrictHostKeyChecking no" -p 2222 -i $_arg_pem $_arg_destination_user@localhost
kill $(ps aux | grep $_arg_tunnel_user@$_arg_tunnel_ip | grep -v grep | awk '{print $2}')
| true |
576cb2c4f9c9098e097638a69ff22d65b6daf55e | Shell | mateoflorido/Vaccines-COVID19 | /scripts/run_test.sh | UTF-8 | 669 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Our custom function
cust_func(){
cat sample | /usr/lib/jvm/java-1.11.0-openjdk-amd64/bin/java -javaagent:/home/mateo/.local/share/JetBrains/Toolbox/apps/IDEA-U/ch-0/202.7660.26/lib/idea_rt.jar=35725:/home/mateo/.local/share/JetBrains/Toolbox/apps/IDEA-U/ch-0/202.7660.26/bin -Dfile.encoding=UTF-8 -classpath /home/mateo/Vaccines-COVID19/src/EPSClient/target/classes org.flosan.EPSClient.view.EPSClientExec
}
# For loop 5 times
for i in {1..100}
do
cust_func & # Put a function in the background
done
## Put all cust_func in the background and bash
## would wait until those are completed
## before displaying all done message
wait
echo "All done"
| true |
5a986d26ed3d3d7436f4c1be6b53fb5040095e1e | Shell | basherpm/basher | /libexec/basher-_unlink-bins | UTF-8 | 669 | 3.515625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
package="$1"
if [ -e "$BASHER_PACKAGES_PATH/$package/package.sh" ]; then
source "$BASHER_PACKAGES_PATH/$package/package.sh"
IFS=: read -ra bins <<< "$BINS"
fi
if [ -z "$bins" ]; then
if [ -e "$BASHER_PACKAGES_PATH/$package/bin" ]; then
bins=("$BASHER_PACKAGES_PATH/$package"/bin/*)
bins=("${bins[@]##*/}")
bins=("${bins[@]/#/bin/}")
else
bins=($(find "$BASHER_PACKAGES_PATH/$package" -maxdepth 1 -perm -u+x -type f -or -type l))
bins=("${bins[@]##*/}")
fi
fi
for bin in "${bins[@]}"
do
name="${bin##*/}"
if ${REMOVE_EXTENSION:-false}; then
name="${name%%.*}"
fi
rm -f "$BASHER_INSTALL_BIN/${name}"
done
| true |
e130d44af804a728356c57bbd4f10e5002ed6e20 | Shell | maxpark/YOLOv4-Vitis-AI | /workflow/7_run_graph.sh | UTF-8 | 846 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
###############################################################################################################
# Run the tensorFlow graph to check its performances.
###############################################################################################################
# Parameters
export OUTPUT_FILE=./output/graph_eval/output.txt
###############################################################################################################
# Run the graph
python ${SRC_EVAL}/run_graph.py \
--graph ${TF_MODEL_DIR}/${TF_FROZEN_GRAPH} \
--input ${INPUT_NODE_NAME} \
--outputs ${OUTPUT_NODE_NAMES} \
--anchors ${ANCHORS} \
--classes ${CLASSES} \
--det_thresh ${CONF_THRESHOLD} \
--nms_thresh ${NMS_THRESHOLD} \
--dataset ${INPUT_FOLDER} \
--img_format ${IMG_FORMAT} \
--results ${GRAPH_OUTPUT}/output.txt
| true |
3da2349223ecb3fadd5daae10261b52247ed1481 | Shell | cha63506/castorbox | /script/meta/core/files/source/rootfs/etc/rc.d/init.d/video | UTF-8 | 7,188 | 3.109375 | 3 | [] | no_license | #!/bin/sh
################################################################################
# video
#
# The script configures the video.
################################################################################
. /etc/rc.d/functions
start() {
local kernel_module
local PREFERRED_MPEG2_DECODER
mm_message_output info 'configuring video ...'
# Load X kernel modules.
for kernel_module in ${MM_X_KERNEL_MODULES} ; do
/sbin/modprobe ${kernel_module}
if /usr/bin/test $? -ne 0 ; then
mm_message_output err "error: failed to load kernel module: ${kernel_module}"
exit 1
fi
done
if /usr/bin/test -z "${MM_X_DRIVER}" ; then
mm_message_output err "error: no supported video hardware found."
exit 1
fi
if /usr/bin/test "${MM_VIDEO_ASPECT_RATIO}" = "4:3" ; then
/bin/sed -i "s%@MONITORASPECT@%4:3%" /root/.mplayer/config
elif /usr/bin/test "${MM_VIDEO_ASPECT_RATIO}" = "16:9" ; then
/bin/sed -i "s%@MONITORASPECT@%16:9%" /root/.mplayer/config
else
mm_message_output err 'error: invalid value for MM_VIDEO_ASPECT_RATIO.'
exit 1
fi
if /usr/bin/test "${MM_VIDEO_DEINTERLACE_ENABLED}" = "no" ; then
mm_mythdb_setting_update Deinterlace 0
/bin/sed -i "s%@DEINTERLACE_BY_DEFAULT@%0%" /root/.xine/config
/bin/sed -i "s%@XVMC_BOB_DEINTERLACING@%0%" /root/.xine/config
/bin/sed -i "s%@DEINT_BOB@%%" /root/.mplayer/config
elif /usr/bin/test "${MM_VIDEO_DEINTERLACE_ENABLED}" = "yes" ; then
mm_mythdb_setting_update Deinterlace 1
mm_mythdb_setting_update DeinterlaceFilter bobdeint
/bin/sed -i "s%@DEINTERLACE_BY_DEFAULT@%1%" /root/.xine/config
/bin/sed -i "s%@XVMC_BOB_DEINTERLACING@%1%" /root/.xine/config
/bin/sed -i "s%@DEINT_BOB@%:deint-bob%" /root/.mplayer/config
else
mm_message_output err 'error: invalid value for MM_VIDEO_DEINTERLACE_ENABLED.'
exit 1
fi
PREFERRED_MPEG2_DECODER=''
case "${MM_X_DRIVER}" in
intel_i810)
/bin/sed -i "s%@MM_XVMC_LIB@%I810XvMC%" /etc/X11/XvMCConfig
PREFERRED_MPEG2_DECODER='xvmc'
/bin/sed -i "s%@VIDEO_DRIVER@%xvmc%" /root/.xine/config
/bin/sed -i "s%@XVMC_TRUE@%%" /root/.xine/config
/bin/sed -i "s%@XVMC_FALSE@%\#%" /root/.xine/config
/bin/sed -i "s%@XVMC_TRUE@%%" /root/.mplayer/config
/bin/sed -i "s%@XVMC_FALSE@%\#%" /root/.mplayer/config
;;
intel_i830)
PREFERRED_MPEG2_DECODER='ffmpeg'
/bin/sed -i "s%@VIDEO_DRIVER@%xv%" /root/.xine/config
/bin/sed -i "s%@XVMC_TRUE@%\#%" /root/.xine/config
/bin/sed -i "s%@XVMC_FALSE@%%" /root/.xine/config
/bin/sed -i "s%@XVMC_TRUE@%\#%" /root/.mplayer/config
/bin/sed -i "s%@XVMC_FALSE@%%" /root/.mplayer/config
;;
intel_i915)
PREFERRED_MPEG2_DECODER='ffmpeg'
/bin/sed -i "s%@VIDEO_DRIVER@%xv%" /root/.xine/config
/bin/sed -i "s%@XVMC_TRUE@%\#%" /root/.xine/config
/bin/sed -i "s%@XVMC_FALSE@%%" /root/.xine/config
/bin/sed -i "s%@XVMC_TRUE@%\#%" /root/.mplayer/config
/bin/sed -i "s%@XVMC_FALSE@%%" /root/.mplayer/config
;;
nvidia)
/bin/sed -i "s%@MM_XVMC_LIB@%XvMCNVIDIA_dynamic%" /etc/X11/XvMCConfig
# this should really be the more efficient 'libmpeg2',
# but it causes stalls during when fast forwarding and rewinding.
PREFERRED_MPEG2_DECODER='ffmpeg'
/bin/sed -i "s%@VIDEO_DRIVER@%xv%" /root/.xine/config
/bin/sed -i "s%@XVMC_TRUE@%\#%" /root/.xine/config
/bin/sed -i "s%@XVMC_FALSE@%%" /root/.xine/config
/bin/sed -i "s%@XVMC_TRUE@%\#%" /root/.mplayer/config
/bin/sed -i "s%@XVMC_FALSE@%%" /root/.mplayer/config
;;
via)
/bin/sed -i "s%@MM_XVMC_LIB@%viaXvMC%" /etc/X11/XvMCConfig
PREFERRED_MPEG2_DECODER='xvmc-vld'
/bin/sed -i "s%@VIDEO_DRIVER@%xxmc%" /root/.xine/config
/bin/sed -i "s%@XVMC_TRUE@%%" /root/.xine/config
/bin/sed -i "s%@XVMC_FALSE@%\#%" /root/.xine/config
/bin/sed -i "s%@XVMC_TRUE@%%" /root/.mplayer/config
/bin/sed -i "s%@XVMC_FALSE@%\#%" /root/.mplayer/config
;;
via_pro)
/bin/sed -i "s%@MM_XVMC_LIB@%viaXvMCPro%" /etc/X11/XvMCConfig
PREFERRED_MPEG2_DECODER='xvmc-vld'
/bin/sed -i "s%@VIDEO_DRIVER@%xxmc%" /root/.xine/config
/bin/sed -i "s%@XVMC_TRUE@%%" /root/.xine/config
/bin/sed -i "s%@XVMC_FALSE@%\#%" /root/.xine/config
/bin/sed -i "s%@XVMC_TRUE@%%" /root/.mplayer/config
/bin/sed -i "s%@XVMC_FALSE@%\#%" /root/.mplayer/config
;;
esac
case "${PREFERRED_MPEG2_DECODER}" in
ffmpeg)
if /usr/bin/test "${MM_VERSION_MYTH}" = "0.18.2" ; then
mm_mythdb_setting_update UseMPEG2Dec 0
mm_mythdb_setting_update UseXVMC 0
mm_mythdb_setting_update UseXvMcVld 0
else
mm_mythdb_setting_update PreferredMPEG2Decoder ${PREFERRED_MPEG2_DECODER}
fi
;;
libmpeg2)
if /usr/bin/test "${MM_VERSION_MYTH}" = "0.18.2" ; then
mm_mythdb_setting_update UseMPEG2Dec 1
mm_mythdb_setting_update UseXVMC 0
mm_mythdb_setting_update UseXvMcVld 0
else
mm_mythdb_setting_update PreferredMPEG2Decoder ${PREFERRED_MPEG2_DECODER}
fi
;;
xvmc)
if /usr/bin/test "${MM_VERSION_MYTH}" = "0.18.2" ; then
mm_mythdb_setting_update UseMPEG2Dec 0
mm_mythdb_setting_update UseXVMC 1
mm_mythdb_setting_update UseXvMcVld 0
else
mm_mythdb_setting_update PreferredMPEG2Decoder ${PREFERRED_MPEG2_DECODER}
fi
;;
xvmc-vld)
if /usr/bin/test "${MM_VERSION_MYTH}" = "0.18.2" ; then
mm_mythdb_setting_update UseMPEG2Dec 0
mm_mythdb_setting_update UseXVMC 0
mm_mythdb_setting_update UseXvMcVld 1
else
mm_mythdb_setting_update PreferredMPEG2Decoder ${PREFERRED_MPEG2_DECODER}
fi
;;
esac
return 0
}
stop() {
return 0
}
case $1 in
start) start ;;
stop) stop ;;
esac
exit 0
| true |
0cf879e589325fed4cb5c119c849c6e1b1932c35 | Shell | cms-sw/cms-bot | /utils/cmssw-change2branch.sh | UTF-8 | 1,803 | 3.9375 | 4 | [] | no_license | #!/bin/bash
#This is a utility script which can go through your uncommited cmssw changes and separate them
#for each category. After running this script you will have cmssw/src-category directory which
#should only contain changes for that category. If a package belong to multiple cmssw categories
# your you will have src-catA-catB directory.
# your original changes will be copied in to src.orig directory.
function usage_and_exit(){
echo "Usage: $0 '<commit-msg>' '<branch-suffix>'\n"
echo "For example:"
echo "$0 '[GCC12] Fix build warnings' 'gcc12-warn1'"
exit 1
}
COMMIT="$1"
BRANCH="$2"
[ "${COMMIT}" = "" ] && usage_and_exit
[ "${BRANCH}" = "" ] && usage_and_exit
SCRIPT_DIR=$(realpath $(dirname $0))
cd $CMSSW_BASE
scram b clean >/dev/null 2>&1
if [ ! -d src.orig ] ; then mv src src.orig ; fi
rm -rf src
if [ ! -d src.init ] ; then
mkdir src
git cms-init
mv src src.init
fi
cd src.orig
git diff --name-only | ${SCRIPT_DIR}/../package2category.py | while read -r line ; do
cat=$(echo $line | awk '{print $1}')
[ ! -d $CMSSW_BASE/src-${cat} ] || continue
ucat=$(echo $cat | tr '[a-z]' '[A-Z]')
pkgs=$(echo $line | sed 's|^[^ ][^ ]* ||')
pushd $CMSSW_BASE
scram b clean >/dev/null 2>&1
rm -rf src
cp -r src.init src
git cms-addpkg $pkgs
popd
for f in $(git diff --name-only) ; do
[ -e ../src/$f ] || continue
if [ -e $f ] ; then
cp $f ../src/$f
else
rm -f ../src/$f
fi
done
pushd $CMSSW_BASE/src
git commit -a -m "[${ucat}] $COMMIT"
scram build -j 10 code-format
if [ $(git diff --name-only | wc -l) -gt 0 ] ; then
git commit -a -m 'apply code format'
fi
git checkout -b "${cat}-${BRANCH}"
git push my-cmssw "${cat}-${BRANCH}"
popd
mv $CMSSW_BASE/src $CMSSW_BASE/src-${cat}
done
| true |
0505cfef959bf8eb927e9a3b6fed0149691f6f32 | Shell | florin1288/maven-sample | /site/src/main/resources/bin/startup.sh | UTF-8 | 426 | 3.71875 | 4 | [] | no_license | #!/usr/bin/env bash
# define service
SDC_SERVICE=${sdc_service-"sdc.service"}
# try start the service
systemctl list-units | grep -Fq ${SDC_SERVICE}
if [[ $? -ne 0 ]]; then
echo "Service ${SDC_SERVICE} doesn't exist"
exit 1
fi
if [[ $(systemctl show -p SubState ${SDC_SERVICE} | sed 's/SubState=//g') == "running" ]]; then
echo "Service ${SDC_SERVICE} is already running"
exit 1
fi
systemctl start ${SDC_SERVICE} | true |
56e1d4c3a250d46f44535a2fd2496a0e91c9cc31 | Shell | opennetworkinglab/spring-open | /old-scripts/Sprint-4/cleanup.sh | UTF-8 | 840 | 2.6875 | 3 | [
"Apache-2.0"
] | permissive | #! /bin/sh
CLUSTER=/home/masayosi/bin/hosts-3x3.txt
function ilink_up {
echo "add link at $2"
n=`dsh -w $1 "sudo tc qdisc show dev $2" | grep netem | wc -l`
if [ $n -eq 1 ]; then
echo "dsh -w $1 sudo tc qdisc change dev $2 root netem loss 0%"
dsh -w $1 "sudo tc qdisc change dev $2 root netem loss 0%"
else
echo "dsh -w $1 sudo tc qdisc add dev $2 root netem loss 0%"
dsh -w $1 "sudo tc qdisc add dev $2 root netem loss 0%"
fi
echo "done"
}
ilink_up onos9vpc tapa0
ilink_up onos10vpc tapb0
ilink_up onos10vpc tapb1
ilink_up onos11vpc tapc0
#echo "stopping mininet"
#dsh -g onos 'sudo mn -c'
echo "stopping ONOS"
dsh -g onos 'cd ONOS; ./start-onos.sh stop'
echo "stopping Cassandra"
dsh -g onos 'cd ONOS; ./start-cassandra.sh stop'
echo "Removing Cassandra DB"
dsh -g onos 'sudo rm -rf /var/lib/cassandra/*'
| true |
3e8e5545102707c6873e4c9eec528d9b17b993d8 | Shell | LaiWang2020/SWE266 | /github-add-user.sh | UTF-8 | 3,197 | 3.625 | 4 | [] | no_license | #!/bin/bash
# Collaborator list, add, remove from a repository
# (c) 2015 miraculixx
# Author: github.com/miraculixx
# MIT License, see below
function help {
echo "Add collaborators to one or more repositories on github"
echo ""
echo "Syntax: $0 -H GITHUB_OAUTH_TOKEN [-l] [-D] -r repo1,repo2 <collaborator id1>,<collaborator id2>"
echo ""
echo " -H OAUTH toekn"
echo " -l list collaborators"
echo " -r repositories, list as owner/repo[,owner/repo,...]"
echo " -D remove"
echo " id the collaborator id to add or remove"
}
while getopts "h?H:r:D:l?" opt; do
case $opt in
h|\?)
help
exit 0
;;
H)
GITHUB_OAUTH_TOKEN=$OPTARG
;;
D)
METHOD=DELETE
;;
r)
REPOS=$OPTARG
;;
l)
LIST=yes
;;
esac
done
shift $((OPTIND-1))
COL_USER=$1
if [[ -z "$GITHUB_OAUTH_TOKEN" ]]; then
echo Enter your github api oauth token
read GITHUB_OAUTH_TOKEN
fi
if [[ -z "$REPOS" ]]; then
echo Enter the repositories as user/repo. Multiple repos comma separated.
read REPOS
fi
if [[ -z "$COL_USER" ]]; then
LIST=yes
fi
if [[ -z "$METHOD" ]] && [[ ! -z "$COL_USER" ]]; then
echo "[WARN] Assuming you want to add user $COL_USER. Use the -D option to delete"
METHOD=PUT
fi
repos=(${REPOS//,/ })
collaborators=(${COL_USER//,/ })
if [[ ! -z "$COL_USER" ]]; then
for repo in "${repos[@]}"; do
for collaborator in "${collaborators[@]}"; do
echo "[INFO] $METHOD $collaborator to $repo"
curl -i -H "Authorization: token $GITHUB_OAUTH_TOKEN" -X $METHOD -d '' "https://api.github.com/repos/$repo/collaborators/$collaborator" 2>&1 | grep message || echo "OK, done."
done
done
fi
if [[ ! -z "$LIST" ]]; then
for repo in "${repos[@]}"; do
echo "[INFO] Current list of collaborators in $repo:"
curl -i -H "Authorization: token $GITHUB_OAUTH_TOKEN" -X GET -d '' "https://api.github.com/repos/$repo/collaborators" 2>&1 | grep login
done
fi
exit 0
: <<< 'EOF'
The MIT License (MIT)
Copyright (c) <year> <copyright holders>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
EOF
| true |
4b86f693cd4044f14b189584756a54b2415eb988 | Shell | rushioda/PIXELVALID_athena | /athena/Reconstruction/MissingETPerformance/macros/merge.sh | UTF-8 | 404 | 3.03125 | 3 | [] | no_license | #!/bin/sh
processIdent="$$"
##Arguments for script should be new file name and folder where individual files are stored. New file will be created in directory where originals are stored.
python haddMaker.py $1 $2 $processIdent
echo "Merging: Depending on the number of files and their sizes, this could take for-fracking-ever"
runner="susyAdd$$.C"
root -l $runner
rm $runner
echo "Merged"
exit 0
| true |
1f61cddb52c54f7d713422a310bcba19b0637723 | Shell | Katpalacio/Linux-Project-1 | /Ch4.sh | UTF-8 | 828 | 3.640625 | 4 | [] | no_license | #!/bin/bash
clear
amount=0
#ask the user the amount of students
read -p "How many student would you like to register >> " amount
nameArray=()
gradeArray=()
GPAArray=()
averageArray=()
#ask for student information
for x in `seq $amount`
do
echo "STUDENT #$x"
#store information in $x postion of each array
read -p "Name >>" nameArray[$x]
read -p "subject 1 grade >> " gradeArray[$x]
#check for vaild entry
if [ $gradeArray[$x -gt 100 || $gradeArray -lt 0]
then
echo "Please, only grades 0 to 100"
exit
if
read -p "GPA >> " GPAArray[$x]
#check for vaild entry
if [ $GPAArray[$x -gt 5 || $gradeArray -lt 0]
then
echo "Please, only GPA 0 to 5"
exit
if
read -p "average score " averageArray[$x]
echo " "
done
| true |
c924e3897a898a1622181d55601a9ad6c2892a51 | Shell | amitkumar114/classfiles | /classfiles/foreven.sh | UTF-8 | 103 | 3.203125 | 3 | [] | no_license | for a in $@
do
a=$((a%2))
if [ $a -eq 0 ]
then
echo "number is even"
else
echo "number is odd"
fi
done
| true |
7c5ff6689c966ab574d87600d45e603c809b310d | Shell | riccardobl/githubmirror | /automirror.sh | UTF-8 | 547 | 3.515625 | 4 | [
"Unlicense"
] | permissive | #!/bin/bash
set +e
i=0
while true;
do
#Reload repolist
echo "" > "$GENERATED_REPOLIST"
cat "$BASE_REPOLIST" > "$GENERATED_REPOLIST"
## Todo: pull rest from the store
####
readarray -t repolist < "$GENERATED_REPOLIST"
l=${#repolist[@]}
if [ $i -ge $l ];
then
i=0
echo "Mirroring completed for all the repos. Sleep for $TIME_BETWEEN_EXECUTIONS seconds."
sleep $TIME_BETWEEN_EXECUTIONS
fi
./mirror.sh "${repolist[$i]}"
i=`expr $i + 1`
sleep $TIME_BETWEEN_CLONES
done
| true |
ee2bda0d852f0cba1a577872a8dade5b88fb8d6a | Shell | fmlorg/netbsd-modular-userland | /nbpkg-build/sbin/nbpkg-build.sh | UTF-8 | 5,225 | 3.421875 | 3 | [] | no_license | #!/bin/sh
#
# Copyright (C) 2018 Ken'ichi Fukamachi
# All rights reserved. This program is free software; you can
# redistribute it and/or modify it under 2-Clause BSD License.
# https://opensource.org/licenses/BSD-2-Clause
#
# mailto: fukachan@fml.org
# web: http://www.fml.org/
#
# $FML$
# $Revision$
# NAME: nbpkg-build.sh
# DESCRIPTION:
# CODINGSTYLE: POSIX compliant (checked by running "bash --posix" this script)
#
############################################################
#################### CONFIGURATIONS ####################
############################################################
. $(dirname $0)/../etc/defaults/config.sh
. $(dirname $0)/../etc/config.sh
############################################################
#################### FUNCTIONS ####################
############################################################
. $(dirname $0)/../lib/libutil.sh
. $(dirname $0)/../lib/libqueue.sh
. $(dirname $0)/../lib/libnbpkg.sh
. $(dirname $0)/../lib/libnbdist.sh
############################################################
#################### MAIN ####################
############################################################
set -u
PATH=/usr/sbin:/usr/bin:/sbin:/bin
export PATH
nbpkg_build_assert
# global flags
is_debug=${DEBUG:-""}
is_require_download_and_extract=""
# parse options
while getopts dvhb: _opt
do
case $_opt in
h | \?) echo "usage: $0 [-hdv] -b BRANCH [ARCH ...]" 1>&2; exit 1;;
d | v) is_debug=1;;
b) branch=$OPTARG;;
esac
done
shift $(expr $OPTIND - 1)
list=${1:-}
# determine target arch to build
# url_base = http://nycdn.netbsd.org/pub/NetBSD-daily/netbsd-8/
# build_nyid = 201811180430Z
# build_date = 20181118
url_base=$(nbdist_get_url_base $branch)
build_nyid=$(nbdist_get_build_id $branch $url_base)
build_date=$(nbdist_get_build_date $branch $build_nyid)
build_url=$(nbdist_get_url $branch $url_base $build_nyid)
list_all=$(nbdist_get_list $build_url |
tr ' ' '\n' |
grep '^[a-z]' )
for arch in ${list:-$list_all}
do
is_ignore=$(nbdist_check_ignore $arch)
if [ $is_ignore = 1 ];then continue;fi
nbpkg_dir_init $arch $branch $build_date
nbpkg_log_init $arch $branch $build_date
t_start=$(unixtime)
__lockf=/tmp/.lock.nbpkg.build.$branch.$arch
if shlock -f $__lockf -p $$
then
(
logit "session: start $arch $branch $build_nyid"
queue_add active $arch $branch $build_date
nbpkg_build_run_session_start_hook
is_already_done=$(queue_find done $arch $branch $build_date)
if [ ${is_already_done} -eq 1 ];then
logit "session: skip $arch $branch $build_nyid"
exit 0
fi
# 1. preparation
# 1.1 download and extract the latest daily build
nbdist_download $arch $build_url/$arch/binary/sets/
nbdist_extract $arch
# 1.2 official release exception
expr $branch : release >/dev/null
if [ $? -eq 0 ];then
logit "session: run $arch $branch $build_nyid"
_new="build_target=release"
nbpkg_build_gen_basepkg_conf $arch $branch $build_date $_new
nbpkg_build_run_basepkg $arch $branch "all"
nbpkg_release_basepkg_packages $arch $branch "all"
queue_add done $arch $branch $build_date
queue_del retry $arch $branch $build_date
queue_del active $arch $branch $build_date
exit 0
fi
# 1.2 ident based check
# extract ident data, compare it with the saved one and
# generate the list of basepkg to re-build as a file $basepkg_new.
basepkg_new=$junk_dir/list.basepkg.changed
nbdist_check_ident_changes $arch $branch $build_date $basepkg_new
if [ -s $basepkg_new ];then
logit "session: ident changes found, go forward"
else
logit "session: no ident changes, do nothing"
exit 0
fi
# 2. go if not already done
is_already_done=$(queue_find done $arch $branch $build_date)
if [ ${is_already_done} -eq 1 ];then
logit "session: skip $arch $branch $build_nyid"
else
logit "session: run $arch $branch $build_nyid"
nbpkg_build_gen_basepkg_conf $arch $branch $build_date \
$basepkg_new
nbpkg_build_run_basepkg $arch $branch "maint"
nbpkg_release_basepkg_packages $arch $branch "maint"
nbpkg_build_run_basepkg $arch $branch "all"
nbpkg_release_basepkg_packages $arch $branch "all"
queue_add done $arch $branch $build_date
queue_del retry $arch $branch $build_date # clear flag if exists
fi
nbpkg_build_run_session_end_hook
queue_del active $arch $branch $build_date
)
exit=$? # session exit status
rm $__lockf
else
exit=1 # session exit status
logit "session: ***error*** arch=$arch locked."
fi
t_end=$(unixtime)
t_diff=$(($t_end - $t_start))
logit "session: end($exit) $arch $branch $build_nyid total: $t_diff sec."
if [ $exit != 0 ];then
queue_del active $arch $branch $build_date
queue_add retry $arch $branch $build_date
nbpkg_dir_clean 1
logit "session: ***error*** arch=$arch ended abnormally($exit)."
else
nbpkg_dir_clean 0
fi
nbpkg_build_check_suicide
done
exit 0
| true |
4e0d54ecaa506a344d9ef06482aba2303d1c4011 | Shell | gurugecl/train-db | /bin/configuredb.sh | UTF-8 | 246 | 2.75 | 3 | [] | no_license | #!/bin/bash
export PGPASSWORD='train_password'
database="traindb"
echo "Configuring database: $database"
dropdb -U train_user traindb
createdb -U train_user traindb
psql -U train_user traindb < ./bin/sql/train.sql
echo "$database configured"
| true |
74c089cef045fe54f6318a4ce89b25b815b27907 | Shell | deanstoddard/bin | /album_list | UTF-8 | 895 | 3.5 | 4 | [] | no_license | #!/bin/bash
tmpfile=/tmp/fileout.$$
tmpfile2=/tmp/fileout2.$$
tmpfile3=/tmp/fileout3.$$
basedir="/home/dean/Music"
ls $basedir > ${tmpfile}
while read artist; do
first_chars=$( echo $artist | cut -c1-4 | tr '[:upper:]' '[:lower:]' )
if [ "${first_chars}" = "the " ]; then
tmp_artist=$( echo $artist | cut -d " " -f 2- )
tmp_artist="${tmp_artist}, The"
echo "${tmp_artist}#${artist}" >> $tmpfile2
else
echo "${artist}#${artist}" >> $tmpfile2
fi
done < ${tmpfile}
sort -t# -d $tmpfile2 >> $tmpfile3
IFS=$'\n'
while read line; do
sort_name=$( echo $line | cut -d '#' -f1 )
artist_name=$( echo $line | cut -d '#' -f2 )
artist_albums=$( ls "${basedir}/${artist_name}" )
for album in $artist_albums; do
printf "%-40s%60s\n" "${sort_name}:" $album
done
#echo "$sort_name $artist_albums"
done < ${tmpfile3}
#rm $tmpfile $tmpfile2
| true |
9b5a23b16c9eee99668951ad8814c01295521e48 | Shell | AysadKozanoglu/scripts | /mariadb-install.sh | UTF-8 | 3,617 | 3.71875 | 4 | [] | no_license | # author: Aysad Kozanoglu
# email: aysadx@gmail.com
#
# * RHEL/CentOS 6 & 7
# * Ubuntu 14.04 LTS (trusty), 16.04 LTS (xenial), & 18.04 LTS (bionic)
# * Debian 8 (jessie) & 9 (stretch)
# * SLES 12 & 15"
#apt-get -y -qq install software-properties-common
# install repo mariadb
# see source
# https://github.com/AysadKozanoglu/scripts/blob/master/mariadb_install_repo.sh
#wget -O - https://git.io/fjWjZ | bash
#echo "installing mariaDB"
#apt-get update && apt-get -qq -y install mariadb-server mariadb-client
#wget -O /etc/mysql/my.cnf "https://git.io/fpuxX"
#systemctl restart mysql; systemctl status mysql
repo="10.5"
mirror="http://mirror.i3d.net/pub/mariadb/repo"
if [ -e /usr/bin/mysql ] ; then
echo "MySQL or MariaDB is already installed"
exit;
fi
amiroot () {
if [[ "$EUID" -ne 0 ]]; then
echo " Sorry, you need to run this as root"
exit;
fi
}
# Detect OS
detectOS () {
OS=`uname`
if [ "$OS" = "Linux" ] ; then
if [ -f /etc/redhat-release ] ; then
centOS;
elif [ -f /etc/debian_version ] ; then
debian;
elif [ -f /etc/SuSE-release ] ; then
zypper install mariadb
exit;
elif [ -f /etc/arch-release ] ; then
pacman -S mariadb
systemctl enable mysqld.service
systemctl start mysqld.service
exit;
fi
else
echo "unknown os"
exit;
fi
}
# Install on Debian/Ubuntu
debian () {
# Check if sudo exists
if [ ! -e /usr/bin/sudo ] ; then
apt-get install sudo -y
fi
dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
# Check if Ubuntu
if [ "$dist" == "Ubuntu" ]; then
debOS=ubuntu
arch="arch=amd64,i386,ppc64el"
else
debOS=debian
arch="arch=amd64,i386"
fi
# Find debian codename
codename=`lsb_release -c | cut -f2`
if [ "$codename" == "precise" ]; then
arch="arch=amd64,i386"
fi
# Install MariaDB
sudo apt-get install python-software-properties -y
sudo apt-key adv --recv-keys --keyserver keyserver.ubuntu.com 0xcbcb082a1bb943db
sudo add-apt-repository "deb [$arch] $mirror/$repo/$debOS $codename main"
sudo apt-get update -y
sudo apt-get install mariadb-server -y --allow-unauthenticated
exit;
}
# Install on CentOS
centOS () {
# Check if sudo exists
if [ ! -e /usr/bin/sudo ] ; then
yum install sudo -y
fi
rm -f /etc/yum.repos.d/MariaDB.repo
# OS bits information
cpubits=`uname -m`
if [ "$cpubits" == 'x86_64' ]; then
bits=amd64
else
bits=x86
fi
# Check what version is CentOS
isFedora=`cat /etc/*release* | grep ^ID= | cut -d "=" -f 2`
# Check if Fedora
if [ "$isFedora" == "fedora" ]; then
fedora;
fi
osversion=`rpm -q --queryformat '%{VERSION}\n' centos-release`
sudo yum update -y
echo "[mariadb]
name = MariaDB
baseurl = http://yum.mariadb.org/$repo/centos$osversion-$bits
gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
gpgcheck=1" >> /etc/yum.repos.d/MariaDB.repo
# Install MariaDB
sudo yum install MariaDB-server MariaDB-client -y
exit;
}
# Install on Fedora
fedora () {
# Check what version is Fedora
osversion=`rpm -q --queryformat '%{VERSION}\n' fedora-release`
# Check if Fedora version is lower than 22
if [ $osversion -lt 22 ]; then
echo "unsupported fedora version"
echo "fedora 22 and above is supported"
exit;
fi
sudo dnf update -y
echo "[mariadb]
name = MariaDB
baseurl = http://yum.mariadb.org/$repo/fedora$osversion-$bits
gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
gpgcheck=1" >> /etc/yum.repos.d/MariaDB.repo
# Install MariaDB
sudo dnf install MariaDB-server -y
exit;
}
# See how we were called.
case $1 in
*)
amiroot; detectOS;;
esac
exit 1
echo -e "\n execute mysql_secure_installation to set your root password, actually blank!! \n"
| true |
05a33b45123c64573d906b895e41b22c6b150ca6 | Shell | non-conformances-research/tse2020 | /technique/scripts/run_experiment.sh | UTF-8 | 1,315 | 2.625 | 3 | [] | no_license | #!/bin/bash
#Compile project
mvn clean compile
#Run test cases for JVM
export JAVA_HOME=$ORACLE_PATH
mvn exec:java -Dexec.mainClass="mining.NonConformancesStudy" -Dexec.args="oracle"
export JAVA_HOME=$ECLIPSE_OPENJ9_PATH
mvn exec:java -Dexec.mainClass="mining.NonConformancesStudy" -Dexec.args="eclipse-openj9"
export JAVA_HOME=$OPENJDK_PATH
mvn exec:java -Dexec.mainClass="mining.NonConformancesStudy" -Dexec.args="openjdk"
export JAVA_HOME=$IBM_J9_PATH
mvn exec:java -Dexec.mainClass="mining.NonConformancesStudy" -Dexec.args="ibm-j9"
#Run oracle
export JAVA_HOME=$ORACLE_PATH
mvn exec:java -Dexec.mainClass="oracles.ReflectionOracle" -Dexec.args="oracle eclipse-openj9"
mvn exec:java -Dexec.mainClass="oracles.ReflectionOracle" -Dexec.args="oracle openjdk"
mvn exec:java -Dexec.mainClass="oracles.ReflectionOracle" -Dexec.args="oracle ibm-j9"
mvn exec:java -Dexec.mainClass="oracles.ReflectionOracle" -Dexec.args="eclipse-openj9 openjdk"
mvn exec:java -Dexec.mainClass="oracles.ReflectionOracle" -Dexec.args="eclipse-openj9 ibm-j9"
mvn exec:java -Dexec.mainClass="oracles.ReflectionOracle" -Dexec.args="openjdk ibm-j9"
cd classifier
sh filter_results.sh
virtualenv env
source env/bin/activate
for nc in `cat non-conformances.txt`; do
python bugs_classifier.py results/${nc}.txt;
done
deactivate
| true |
8d251c5df4d696abe1890c56ffa79446dfab4976 | Shell | jason0x43/dotfiles | /bin/direnv-info | UTF-8 | 493 | 3.015625 | 3 | [] | no_license | #!/bin/zsh
# Needs qpdf (for zlib-flate) and jq
if [[ -z $DIRENV_FILE ]]; then
echo "Not using direnv"
exit
fi
echo "Using config: $DIRENV_FILE"
direnv_diff_z=$(echo $DIRENV_DIFF | base64 -d)
direnv_diff_text=$(echo $direnv_diff_z | zlib-flate -uncompress)
echo "New/updated variables:"
echo $direnv_diff_text | jq ".n"
# direnv_watches_z=$(echo $DIRENV_WATCHES | base64 -d)
# direnv_watches_text=$(echo $direnv_watches_z | zlib-flate -uncompress)
# echo $direnv_watches_text | json_pp
| true |
5a2bda63e46704115959bd260e176d6fb8626f99 | Shell | amennen/rtAttenPenn_analysis | /pni_sgeconf.sh | UTF-8 | 492 | 3.28125 | 3 | [] | no_license | #!/bin/bash
lists="q p hgrp prj e"
nonlists="conf sconf rqs c"
qconf -sconf >/dev/null 2>&1
(( ! $? == 0 )) && echo "not an SGE cluster, quitting." && exit 1
# configurations that are singletons, not lists
for i in $nonlists ; do
qconf -s${i}
done
# get list members and iterate over them
for i in $lists ; do
qconf -s${i}l
m=$(qconf -s${i}l)
for j in $m ; do
echo "info for $j ..."
qconf -s${i} ${j}
done
done
# get some info about currently running stuff
qstat -F
| true |
f53dc4d1cfadd83162cdcd295ee03bfc141bda7a | Shell | indigos33k3r/LiveClone | /install.sh | UTF-8 | 1,589 | 3.140625 | 3 | [] | no_license | #!/bin/sh
cd $(dirname $0)
VER=$(grep 'version =' src/liveclone.py | head -n 1 | sed "s/.*'\(.*\)'/\1/")
install -d -m 755 $DESTDIR/usr/doc/liveclone-$VER
install -d -m 755 $DESTDIR/install
install -d -m 755 $DESTDIR/usr/sbin
install -d -m 755 $DESTDIR/usr/share/applications
install -d -m 755 $DESTDIR/usr/share/icons/hicolor/24x24/apps
install -d -m 755 $DESTDIR/usr/share/icons/hicolor/64x64/apps
install -d -m 755 $DESTDIR/usr/share/icons/hicolor/128x128/apps
install -d -m 755 $DESTDIR/usr/share/icons/hicolor/scalable/apps
install -d -m 755 $DESTDIR/usr/share/liveclone
install -m 755 src/liveclone.py \
$DESTDIR/usr/sbin/
install -m 644 src/liveclone.glade \
$DESTDIR/usr/share/liveclone
install -m 644 src/liveclone.desktop \
$DESTDIR/usr/share/applications/
install -m 644 src/liveclone-kde.desktop \
$DESTDIR/usr/share/applications/
install -m 644 src/liveclone.png \
$DESTDIR/usr/share/liveclone/
install -m 644 icons/liveclone-24.png \
$DESTDIR/usr/share/icons/hicolor/24x24/apps/liveclone.png
install -m 644 icons/liveclone-64.png \
$DESTDIR/usr/share/icons/hicolor/64x64/apps/liveclone.png
install -m 644 icons/liveclone-128.png \
$DESTDIR/usr/share/icons/hicolor/128x128/apps/liveclone.png
install -m 644 icons/liveclone.svg \
$DESTDIR/usr/share/icons/hicolor/scalable/apps/
for i in `ls po/*.mo|sed "s|po/\(.*\).mo|\1|"`; do
install -d -m 755 $DESTDIR/usr/share/locale/${i}/LC_MESSAGES
install -m 644 po/${i}.mo \
$DESTDIR/usr/share/locale/${i}/LC_MESSAGES/liveclone.mo
done
for i in `ls docs`; do
install -m 644 docs/${i} \
$DESTDIR/usr/doc/liveclone-$VER/
done
| true |
7fc78116564f523e7968d72644f5ea7c56344c77 | Shell | timwata/dotfiles | /zsh/peco.zsh | UTF-8 | 1,019 | 3.4375 | 3 | [] | no_license | function peco-select-history() {
local tac
if which tac > /dev/null; then
tac="tac"
else
tac="tail -r"
fi
BUFFER=$(fc -l -n 1 | eval $tac | peco --query "$LBUFFER" 2>/dev/null)
CURSOR=$#BUFFER
zle clear-screen
}
zle -N peco-select-history
function peco-select-screen() {
local selected_session="$(screen -ls | \
awk 'NR==1,/^There (is a|are) screens? on:/ { next } /^[0-9]+ Sockets? in/ { exit } 1' | \
while read session state; do echo "${(r:30:::::)session} ${state}"; done | peco 2>/dev/null)"
if [[ "${selected_session}" =~ "^[0-9]" ]]
then
BUFFER="screen -rx $(echo ${selected_session} | cut -d ' ' -f1)"
zle accept-line
zle clear-screen
fi
}
zle -N peco-select-screen
function peco-select-ghq() {
local selected_dir=$(ghq list -p | peco --query "$LBUFFER")
if [ -n "$selected_dir" ]; then
BUFFER="cd ${selected_dir}"
zle accept-line
fi
zle clear-screen
}
zle -N peco-select-ghq
| true |
e8419aeccc205c065a7aee3f19805c145deb6c3d | Shell | aur-archive/corsix-th-git | /PKGBUILD | UTF-8 | 1,827 | 2.703125 | 3 | [] | no_license | # Maintainer: Jaroslav Supolik <kadzi@centrum.cz>
# Contributor: Jon Gjengset <jon@tsp.io>
# Contributor: Gaetan Bisson <bisson@archlinux.org>
# Based on Gaetan's corsix-th package in the AUR
pkgname=corsix-th-git
pkgver=0.40.r209.g582fee0
pkgrel=1
pkgdesc='Reimplementation of the game engine of Theme Hospital'
url='https://github.com/CorsixTH/CorsixTH/'
arch=('i686' 'x86_64' 'armv7h')
license=('MIT')
makedepends=('cmake' 'git')
conflicts=('corsix-th')
depends=('sdl2_mixer' 'ffmpeg' 'timidity++' 'luajit' 'lua51-filesystem' 'lua51-lpeg')
optdepends=("freetype2: font support beyond the original game's bitmap fonts"
"timidity-freepats: original background music playback")
source=("git+https://github.com/CorsixTH/CorsixTH.git"
'bin')
sha1sums=('SKIP'
'7fd6ae8db366b7f9c4671708e8ea7beb48f1bea3')
# If you do not have a copy of Theme Hospital,
# you can download the data files of the demo:
# http://th.corsix.org/Demo.zip
pkgver() {
# cd "$pkgname"
cd "$srcdir/CorsixTH"
git describe --long --tags | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g'
}
prepare() {
cd "${srcdir}/CorsixTH"
# work around broken 3.0 cmake config
ln -sfn ../LICENSE.txt CorsixTH/LICENSE.txt
}
build() {
cd "${srcdir}/CorsixTH"
cmake \
-D LUA_INCLUDE_DIR=/usr/include/luajit-2.0 \
-D LUA_LIBRARY=/usr/lib/libluajit-5.1.so \
-D CMAKE_INSTALL_PREFIX=/usr/share/ \
-D CMAKE_BUILD_TYPE=Release \
-Wno-dev .
cd CorsixTH
make
}
package() {
cd "${srcdir}/CorsixTH/CorsixTH"
make DESTDIR="${pkgdir}" install
install -Dm755 ../../bin "${pkgdir}/usr/bin/CorsixTH"
install -Dm644 LICENSE.txt "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
install -Dm644 ../DebianPackage/usr/share/applications/CorsixTH.desktop "${pkgdir}/usr/share/applications/CorsixTH.desktop"
sed -e 's/games/share/g' -i "${pkgdir}/usr/share/applications/CorsixTH.desktop"
}
| true |
5f31e6eb0e2e28f4251ff0954ebf4563a7a01e9d | Shell | bufbuild/buf | /make/buf/scripts/brew.sh | UTF-8 | 1,435 | 3.5625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
set -e
DIR="$(CDPATH= cd "$(dirname "${0}")/../../.." && pwd)"
cd "${DIR}"
if [ -z "${1}" ]; then
echo "usage: ${0} out_dir" >&2
exit 1
fi
OUT_DIR="${1}"
rm -rf "${OUT_DIR}"
mkdir -p "${OUT_DIR}/bin"
mkdir -p "${OUT_DIR}/etc/bash_completion.d"
mkdir -p "${OUT_DIR}/share/fish/vendor_completions.d"
mkdir -p "${OUT_DIR}/share/zsh/site-functions"
mkdir -p "${OUT_DIR}/share/man/man1"
for binary in buf protoc-gen-buf-breaking protoc-gen-buf-lint; do
echo CGO_ENABLED=0 go build -ldflags \"-s -w\" -trimpath -o \"${OUT_DIR}/bin/${binary}\" \"./cmd/${binary}\"
CGO_ENABLED=0 go build -ldflags "-s -w" -trimpath -buildvcs=false -o "${OUT_DIR}/bin/${binary}" "./cmd/${binary}"
done
echo \"${OUT_DIR}/bin/buf\" completion bash \> \"${OUT_DIR}/etc/bash_completion.d/buf\"
"${OUT_DIR}/bin/buf" completion bash > "${OUT_DIR}/etc/bash_completion.d/buf"
echo \"${OUT_DIR}/bin/buf\" completion fish \> \"${OUT_DIR}/share/fish/vendor_completions.d/buf.fish\"
"${OUT_DIR}/bin/buf" completion fish > "${OUT_DIR}/share/fish/vendor_completions.d/buf.fish"
echo \"${OUT_DIR}/bin/buf\" completion zsh \> \"${OUT_DIR}/share/zsh/site-functions/_buf\"
"${OUT_DIR}/bin/buf" completion zsh > "${OUT_DIR}/share/zsh/site-functions/_buf"
echo \"${OUT_DIR}/bin/buf\" manpages \"${OUT_DIR}/share/man/man1\"
"${OUT_DIR}/bin/buf" manpages "${OUT_DIR}/share/man/man1"
echo cp \"LICENSE\" \"${OUT_DIR}/LICENSE\"
cp "LICENSE" "${OUT_DIR}/LICENSE"
| true |
711a085411d82390bf4909f5f3cd563a5e94131e | Shell | pintori/dotfiles | /zsh/exports | UTF-8 | 2,470 | 3.125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env zsh
# vi: ft=zsh
# Make vim the default editor
export EDITOR="vim";
# Prefer US English and use UTF-8
export LANG="en_US.UTF-8";
export LC_ALL="en_US.UTF-8";
# For Docker!
# export DOCKER_HOST=tcp://192.168.23.2:2375
# All the colorful logs
export LOG_FORMAT=colored
# Use 'less' as the pager
export PAGER='less'
# Options for LESS are read from here the $LESS variable
# --IGNORE-CASE: Ignore case in searches, even if the search contains a capital letter
# --quit-if-one-screen: Automatically exit if entire file can be
# displayed on one screen
# --hilite-search: When searching, only highlight last matched string
# instead of all matches
# --RAW-CONTROL-CHARS: Ouput ANSI color escape sequences in "raw" form
# --jump-target=.2: Jumps (including search) end at the specified line or
# percentage of the screen
# --status-column: Displays a status column at the left edge of the screen which
# shows the lines that matched the current search.
# --HILITE-UNREAD: Temporarily highlights the first "new" line after a forward
# movement larger than one page.
# local less_opts=''
# NOTE: Git sets "-FRX" by default, so if we want to preserve git behavior we need those
local less_opts='--IGNORE-CASE'
less_opts+=' --quit-if-one-screen'
less_opts+=' --hilite-search'
less_opts+=' --RAW-CONTROL-CHARS'
less_opts+=' --jump-target=.4'
less_opts+=' --status-column'
less_opts+=' --HILITE-UNREAD'
export LESS="$less_opts"
# Ruby settings for a faster test!
export RUBY_GC_HEAP_INIT_SLOTS=1000000
# export RUBY_GC_HEAP_FREE_SLOTS=1000000 # Another good one to check out
export RUBY_GC_MALLOC_LIMIT=1000000000
# export RUBY_GC_MALLOC_LIMIT_MAX=1000000000 # Another good one to investigate
export RUBY_HEAP_SLOTS_INCREMENT=1000000
export RUBY_HEAP_SLOTS_GROWTH_FACTOR=1
export RUBY_HEAP_FREE_MIN=500000
# --- Colorful LS ---
# Good examples:
# https://github.com/Bash-it/bash-it/blob/master/lib/appearance.bash
# http://altoidnerd.com/2015/10/06/using-ls_colors-to-colorize-your-shell-by-filename-filetype/
# LS_COLORS for non-GNU systems (BSD) (from: http://geoff.greer.fm/lscolors/)
export CLICOLOR=1
export LSCOLORS=ExFxdxdxcxacaccxxxExEh
# TODO: Grep colors too
# if echo hello | grep --color=auto l >/dev/null 2>&1; then
# export GREP_OPTIONS='--color=auto'
# export GREP_COLOR='0;32'
# export GREP_COLORS="sl=0;37:cx=1;32:mt=1;35:fn=0;32:ln=1;34:se=1;33"
# fi
# --- PATH ---
prepend_to_path "$HOME/bin"
| true |
2cf192ef3ff142ded67a441302077bbb64e32c60 | Shell | Chownie/quickup | /quickup.sh | UTF-8 | 1,662 | 4.0625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
## Pacman
# Run using sudo
function pac_sudo {
sudo pacman -Syu
}
# Run as root/admin
function pac_su {
pacman -Syu
}
## APT
# Run using sudo
function apt_sudo {
sudo apt-get update && sudo apt-get autoclean && sudo apt-get upgrade && sudo apt-get autoremove && sudo apt-get clean
}
# Run as root/admin
function apt_su {
apt-get update && apt-get autoclean && apt-get upgrade && apt-get autoremove && apt-get clean
}
## YUM
# Run using sudo
function yum_sudo {
sudo yum update && sudo yum clean all
}
# Run as root/admin
function yum_su {
yum update && yum clean all
}
### Operation
# Check for 'sudo' package, else run using su
function sudo_check {
if command -v sudo
then sudo_status=1 && echo "Package 'sudo' is installed, continuing using 'sudo'" && sleep 1
else sudo_status=0 && echo "Package 'sudo' is not installed, using 'su' instead." && sleep 1
fi
}
# Check for package manager, then run
function pkg_check {
if command -v apt-get
then pkg=APT && echo "$pkg package manager is installed, Using $pkg to continue." && sleep 1
if sudo_status=1
then apt_sudo
else apt_su
fi
exit
elif command -v pacman
then pkg=pacman && echo "$pkg package manager is installed, Using $pkg to continue." && sleep 1
if sudo_status=1
then pac_sudo
else pac_su
fi
exit
elif command -v yum
then pkg=yum && echo "$pkg package manager is installed, Using $pkg to continue." && sleep 1
if sudo_status=1
then yum_sudo
else yum_su
fi
exit
else help_text
exit
fi
}
### Excecution order
sudo_check ; pkg_check
| true |
d543f679d3fd64e053cb6333d7f9302bd34f282e | Shell | lukaszx0/rootnode-legacy | /lxc/lxc-manager/chroot-scripts/chroot.sh | UTF-8 | 3,548 | 3.15625 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
#
# LXC chroot script
# Rootnode, http://rootnode.net
#
# Copyright (C) 2012 Marcin Hlybin
# All rights reserved.
#
export LC_ALL=C
export DEBIAN_FRONTEND=noninteractive
cat > /etc/apt/sources.list<<EOF
deb http://mirror.ovh.net/debian/ squeeze main contrib non-free
deb-src http://mirror.ovh.net/debian/ squeeze main contrib non-free
deb http://security.debian.org/ squeeze/updates main contrib non-free
deb-src http://security.debian.org/ squeeze/updates main contrib non-free
# Percona
#deb http://repo.percona.com/apt squeeze main
#deb-src http://repo.percona.com/apt squeeze main
EOF
gpg --keyserver hkp://keys.gnupg.net --recv-keys 1C4CBDCDCD2EFD2A
gpg -a --export CD2EFD2A | apt-key add -
aptitude -q -y update
aptitude -q -y upgrade
# init.d
for FILE in bootlogd bootlogs bootmisc.sh checkfs.sh checkroot.sh halt hostname.sh hwclockfirst.sh hwclock.sh ifupdown ifupdown-clean killprocs module-init-tools mountall-bootclean.sh mountall.sh mountdevsubfs.sh mountkernfs.sh mountnfs-bootclean.sh mountnfs.sh mountoverflowtmp mtab.sh networking procps README reboot rmnologin rsyslog sendsigs single skeleton stop-bootlogd stop-bootlogd-single umountfs umountnfs.sh umountroot urandom
do
update-rc.d -f $FILE remove
rm -f -- /etc/init.d/$FILE
done
# /dev
aptitude -q -y purge udev
rm -rf /dev/.udev /etc/udev
mknod /dev/tty1 c 4 1
mknod /dev/tty2 c 4 2
mknod /dev/tty3 c 4 3
mknod /dev/tty4 c 4 4
# inittab
cat > /etc/inittab<<EOF
# The default runlevel.
id:2:initdefault:
# Boot-time system configuration/initialization script.
# This is run first except when booting in emergency (-b) mode.
si::sysinit:/etc/init.d/rcS
# What to do in single-user mode.
~:S:wait:/sbin/sulogin
# /etc/init.d executes the S and K scripts upon change
# of runlevel.
#
# Runlevel 0 is halt.
# Runlevel 1 is single-user.
# Runlevels 2-5 are multi-user.
# Runlevel 6 is reboot.
l0:0:wait:/etc/init.d/rc 0
l1:1:wait:/etc/init.d/rc 1
l2:2:wait:/etc/init.d/rc 2
l3:3:wait:/etc/init.d/rc 3
l4:4:wait:/etc/init.d/rc 4
l5:5:wait:/etc/init.d/rc 5
l6:6:wait:/etc/init.d/rc 6
# Normally not reached, but fallthrough in case of emergency.
z6:6:respawn:/sbin/sulogin
# /sbin/getty invocations for the runlevels.
#
# The "id" field MUST be the same as the last
# characters of the device (after "tty").
#
# Format:
# <id>:<runlevels>:<action>:<process>
#
# Note that on most Debian systems tty7 is used by the X Window System,
# so if you want to add more getty's go ahead but skip tty7 if you run X.
#
0:2345:respawn:/sbin/getty 38400 console
1:2345:respawn:/sbin/getty 38400 tty1
2:23:respawn:/sbin/getty 38400 tty2
3:23:respawn:/sbin/getty 38400 tty3
4:23:respawn:/sbin/getty 38400 tty4
EOF
# /etc/profile
cat >> /etc/profile <<EOF
# Set default locale
: ${LANG:=en_US.UTF-8}; export LANG
EOF
# /etc/rc.local
cat > /etc/rc.local<<EOF
#!/bin/bash -e
route add default gw 10.1.0.1
EOF
# /etc/resolv.conf
cat > /etc/resolv.conf<<EOF
nameserver 213.186.33.99
nameserver 8.8.8.8
EOF
# Remove unused files and directories
rm -rf /home /media /mnt /opt /srv
rm -rf /var/log/*
rm -rf /etc/network
rm /etc/fstab /etc/hostname /etc/debian_version
# Install system packages
aptitude -q -y install locales vim htop less ssh screen dstat ifstat iotop apg dnsutils sysv-rc-conf
# Set locale
perl -e 's/^# (en_US.UTF-8 UTF-8|pl_PL ISO-8859-2|pl_PL.UTF-8 UTF-8|de_DE.UTF-8 UTF-8)/$1/g' -p -i /etc/locale.gen
locale-gen
update-locale LANG=en_US LANGUAGE=$LANG LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
# Cleanup
aptitude clean
aptitude autoclean
| true |
c44cb25677226fddeeec0b7a3ce962a86a0b47f4 | Shell | ziyi666666/shell | /auto_mysql_backup.sh | UTF-8 | 1,024 | 3.5 | 4 | [
"MIT"
] | permissive | #!/usr/bin/bash
#Title Mysql Backup
#Author by the UNSG(聯合國秘書長[台灣])
#From The Chinese Taipei
#Version 3.0
#Date 2020/4/8 16:47
#設定當前時間
now=`date +"%Y-%m-%d_%H-%M-%S"`
#設定七天以前的時間
old=`date -d "-7 days" +"%Y-%m-%d_%H-%M-%S"`
#設定備份路徑
path=/back
#設定資料庫賬戶訊息
user='root'
pass='2ayijq8888'
db='demo'
#判定備份資料庫的資料夾是否存在喔
if [ ! -d $path ]
then
mkdir $path
echo "created successfully"
fi
#切換至指定備份路徑喔
cd $path
#開始備份並打包以日期命名喔
/usr/bin/mysqldump -u $user -p$pass -B $db -E -F -R --triggers --single-transaction --master-data=1 --flush-privileges |gzip >$path/$now.demo.sql.gz
#判定是否成功備份資料庫
if [ $? -eq 0 ]
then
echo "backup successfully"
else
echo "error,please retry"
fi
#回收七天以前的舊資料庫
if [ -f $path/$old.demo.sql.gz ]
then
rm -rf $path/$old.demo.sql.gz
echo "delete the old files successfully"
fi
| true |
947c7ae4865c3b5358790fa68ce7366e6f55d282 | Shell | chocobearz/faisal-cids | /databasemaintenance/databasedump.sh | UTF-8 | 951 | 3 | 3 | [] | no_license | path=$1faisal-cids/databasemaintenance/
cd $path
now=$(date +"%m_%d_%Y-%H.%M")
pg_dump --username=postgres --clean --port=5432 faisalcids > $path/backups/faisaldatabase_backup_$now.sql
#-- jobs=njobs Run the dump in parallel by dumping njobs tables simultaneously.
# This option reduces the time of the dump but it also increases the load on the
# database server. You can only use this option with the directory output format
# because this is the only output format where multiple processes can write
#their data at the same time.pg_dump will open njobs + 1 connections to the
#database, so make sure your max_connections setting is high enough to
#accommodate all connections.
#see https://www.postgresql.org/docs/9.3/app-pgdump.html
md5sum $path/backups/faisaldatabase_backup_$now.sql > $path/backups/faisaldatabase_backup_$now.sql.md5sum
$1faisal-cids/venv/bin/python checkmd5sum.py $path/backups/ faisaldatabase_backup_$now.sql.md5sum
| true |
273abf66e96ed62b286f92ee2a01442a2092dfcb | Shell | sitedata/da-ds | /scripts/git_trailers.sh | UTF-8 | 176 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if [ ! -f "git.log" ]
then
./scripts/git_log.sh > git.log || exit 1
fi
grep -E "^[[:space:]]+[a-zA-z0-9-]+:.+[[:space:]]+<.+>[[:space:]]*$" git.log | sort | uniq
| true |
bbd39647c2525c5acec1615afd977aca62ee8b4e | Shell | timborn/ddd | /scripts/no_vnc.sh | UTF-8 | 879 | 3.40625 | 3 | [] | no_license | #!/usr/bin/env bash
### every exit != 0 fails the script
set -e
echo "### executing $0"
# Jump from v0.6.2 to v1.1.0 (noVNC)
# No need to use a different version of websockify any more.
# Websockify v0.8.0 is current with NoVNC v1.1.0
# TODO: Verify env variables are set, or die.
TARBALL=https://github.com/novnc/noVNC/archive/v1.1.0.tar.gz
SOCKBALL=https://github.com/novnc/websockify/archive/v0.8.0.tar.gz
echo "Install noVNC - HTML5 based VNC viewer"
mkdir -p $NO_VNC_HOME/utils/websockify
wget -qO- $TARBALL | tar xz --strip 1 -C $NO_VNC_HOME
# pre-pull websockify
wget -qO- $SOCKBALL | tar xz --strip 1 -C $NO_VNC_HOME/utils/websockify
chmod +x -v $NO_VNC_HOME/utils/*.sh
## create index.html to forward automatically to `vnc_lite.html`
## USED TO link to vnc_auto.html but that disappeared in version upgrade
ln -s $NO_VNC_HOME/vnc_lite.html $NO_VNC_HOME/index.html
| true |
2998d241418b7741a64f4e91fe4bffd491df2cf3 | Shell | m0re4u/turbo-goggles | /train_diagnostic/confusion_matrix.sh | UTF-8 | 807 | 2.546875 | 3 | [] | no_license | #! /bin/bash
set -eu
EPISODES="${1:-100}"
MODEL_DIR="../../machine/models/"
function confusion {
# $1 jobid
# $2 checkno
# $3 LevelName
# $4 Dataset dir
python3 ../../machine/eval_rl.py \
--env $3 \
--model ../../machine/models/$(ls $MODEL_DIR | grep $1)/$2_check.pt \
--vocab ../../machine/models/$(ls $MODEL_DIR | grep $1)/vocab.json \
--reasoning model \
--confusion $4 \
--episodes $EPISODES
}
confusion 2651250 010800 BabyAI-CustomGoToObjSmall-v0 small_new_seed1
confusion 2691623 010000 BabyAI-CustomGoToObjAnd-v0 and_new_seed1
confusion 2689034 025000 BabyAI-CustomGoToObjMedium-v0 beforeafter_new_seed1
confusion 2812454 021900 BabyAI-CustomGoToObjMultiple-v0 multiple_new_seed1
confusion 2812451 024900 BabyAI-CustomGoToObjThrees-v0 threes_new_seed1
| true |
761325eb8c7ba371c1b9120471e52317e11852fc | Shell | andock-ci/pipeline | /bin/acp.sh | UTF-8 | 26,131 | 3.234375 | 3 | [] | no_license | #!/bin/bash
ANSIBLE_VERSION="2.4.4"
ANDOCK_CI_VERSION=0.3.8
REQUIREMENTS_ANDOCK_CI_BUILD='0.1.0'
REQUIREMENTS_ANDOCK_CI_FIN='0.2.1'
REQUIREMENTS_ANDOCK_CI_SERVER='0.1.0'
REQUIREMENTS_SSH_KEYS='0.3'
DEFAULT_CONNECTION_NAME="default"
ANDOCK_CI_PATH="/usr/local/bin/acp"
ANDOCK_CI_PATH_UPDATED="/usr/local/bin/acp.updated"
ANDOCK_CI_HOME="$HOME/.andock-ci"
ANDOCK_CI_INVENTORY="./.andock-ci/connections"
ANDOCK_CI_INVENTORY_GLOBAL="$ANDOCK_CI_HOME/connections"
ANDOCK_CI_PLAYBOOK="$ANDOCK_CI_HOME/playbooks"
ANDOCK_CI_PROJECT_NAME=""
URL_REPO="https://raw.githubusercontent.com/andock-ci/pipeline"
URL_ANDOCK_CI="${URL_REPO}/master/bin/acp.sh"
DEFAULT_ERROR_MESSAGE="Oops. There is probably something wrong. Check the logs."
export ANSIBLE_ROLES_PATH="${ANDOCK_CI_HOME}/roles"
export ANSIBLE_HOST_KEY_CHECKING=False
config_git_target_repository_path=""
config_domain=""
config_project_name=""
config_git_repository_path=""
config_git_source_repository_path=""
# @author Leonid Makarov
# Console colors
red='\033[0;91m'
red_bg='\033[101m'
green='\033[0;32m'
yellow='\033[1;33m'
NC='\033[0m'
#------------------------------ Help functions --------------------------------
# parse yml file:
# See https://gist.github.com/pkuczynski/8665367
_parse_yaml() {
local prefix=$2
local s='[[:space:]]*' w='[a-zA-Z0-9_]*'
local fs
fs=$(echo @|tr @ '\034')
sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
-e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 |
awk -F"$fs" '{
indent = length($1)/2;
vname[indent] = $2;
for (i in vname) {if (i > indent) {delete vname[i]}}
if (length($3) > 0) {
vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
printf("%s%s%s=\"%s\"\n", "'$prefix'",vn, $2, $3);
}
}'
}
# Yes/no confirmation dialog with an optional message
# @param $1 confirmation message
# @author Leonid Makarov
_confirm ()
{
while true; do
read -p "$1 [y/n]: " answer
case "$answer" in
[Yy]|[Yy][Ee][Ss] )
break
;;
[Nn]|[Nn][Oo] )
exit 1
;;
* )
echo 'Please answer yes or no.'
esac
done
}
# Yes/no confirmation dialog with an optional message
# @param $1 confirmation message
_confirmAndReturn ()
{
while true; do
read -p "$1 [y/n]: " answer
case "$answer" in
[Yy]|[Yy][Ee][Ss] )
echo 1
break
;;
[Nn]|[Nn][Oo] )
echo 0
break
;;
* )
echo 'Please answer yes or no.'
esac
done
}
# Nicely prints command help
# @param $1 command name
# @param $2 description
# @param $3 [optional] command color
# @author Oleksii Chekulaiev
printh ()
{
local COMMAND_COLUMN_WIDTH=25;
case "$3" in
yellow)
printf " ${yellow}%-${COMMAND_COLUMN_WIDTH}s${NC}" "$1"
echo -e " $2"
;;
green)
printf " ${green}%-${COMMAND_COLUMN_WIDTH}s${NC}" "$1"
echo -e " $2"
;;
*)
printf " %-${COMMAND_COLUMN_WIDTH}s" "$1"
echo -e " $2"
;;
esac
}
# @author Leonid Makarov
echo-red () { echo -e "${red}$1${NC}"; }
# @author Leonid Makarov
echo-green () { echo -e "${green}$1${NC}"; }
# @author Leonid Makarov
echo-yellow () { echo -e "${yellow}$1${NC}"; }
# @author Leonid Makarov
echo-error () {
echo -e "${red_bg} ERROR: ${NC} ${red}$1${NC}";
shift
# Echo other parameters indented. Can be used for error description or suggestions.
while [[ "$1" != "" ]]; do
echo -e " $1";
shift
done
}
# @author Leonid Makarov
# rewrite previous line
echo-rewrite ()
{
echo -en "\033[1A"
echo -e "\033[0K\r""$1"
}
# @author Leonid Makarov
echo-rewrite-ok ()
{
echo-rewrite "$1 ${green}[OK]${NC}"
}
# Like if_failed but with more strict error
# @author Leonid Makarov
if_failed_error ()
{
if [ ! $? -eq 0 ]; then
echo-error "$@"
exit 1
fi
}
# Ask for input
# @param $1 Question
_ask ()
{
# Skip checks if not running interactively (not a tty or not on Windows)
read -p "$1: " answer
echo $answer
}
# Ask for password
# @param $1 Question
_ask_pw ()
{
# Skip checks if not running interactively (not a tty or not on Windows)
read -s -p "$1 : " answer
echo $answer
}
#------------------------------ SETUP --------------------------------
# Generate playbook files
generate_playbooks()
{
mkdir -p ${ANDOCK_CI_PLAYBOOK}
echo "---
- hosts: andock-ci-build-server
roles:
- { role: andock-ci.build }
" > "${ANDOCK_CI_PLAYBOOK}/build.yml"
echo "---
- hosts: andock-ci-docksal-server
gather_facts: true
roles:
- { role: andock-ci.fin, git_repository_path: \"{{ git_target_repository_path }}\" }
" > "${ANDOCK_CI_PLAYBOOK}/fin.yml"
echo "---
- hosts: andock-ci-docksal-server
roles:
- role: andock-ci.ansible_role_ssh_keys
ssh_keys_clean: False
ssh_keys_user:
andock-ci:
- \"{{ ssh_key }}\"
" > "${ANDOCK_CI_PLAYBOOK}/server_ssh_add.yml"
echo "---
- hosts: andock-ci-docksal-server
roles:
- { role: andock-ci.server }
" > "${ANDOCK_CI_PLAYBOOK}/server_install.yml"
}
# Install ansible
# and ansible galaxy roles
install_pipeline()
{
echo-green ""
echo-green "Installing andock-ci pipeline version: ${ANDOCK_CI_VERSION} ..."
echo-green ""
echo-green "Installing ansible:"
sudo apt-get update
sudo apt-get install whois sudo build-essential libssl-dev libffi-dev python-dev -y
set -e
# Don't install own pip inside travis.
if [ "${TRAVIS}" = "true" ]; then
sudo pip install ansible=="${ANSIBLE_VERSION}"
else
wget https://bootstrap.pypa.io/get-pip.py
sudo python get-pip.py
sudo pip install ansible=="${ANSIBLE_VERSION}"
rm get-pip.py
fi
sudo pip install urllib3 pyOpenSSL ndg-httpsclient pyasn1
which ssh-agent || ( sudo apt-get update -y && sudo apt-get install openssh-client -y )
install_configuration
echo-green ""
echo-green "andock-ci pipeline was installed successfully"
}
# Install ansible galaxy roles.
install_configuration ()
{
mkdir -p $ANDOCK_CI_INVENTORY_GLOBAL
#export ANSIBLE_RETRY_FILES_ENABLED="False"
generate_playbooks
echo-green "Installing roles:"
ansible-galaxy install andock-ci.build,v${REQUIREMENTS_ANDOCK_CI_BUILD} --force
ansible-galaxy install andock-ci.fin,v${REQUIREMENTS_ANDOCK_CI_FIN} --force
ansible-galaxy install andock-ci.ansible_role_ssh_keys,v${REQUIREMENTS_SSH_KEYS} --force
ansible-galaxy install andock-ci.server,v${REQUIREMENTS_ANDOCK_CI_SERVER} --force
echo "
[andock-ci-build-server]
localhost ansible_connection=local
" > "${ANDOCK_CI_INVENTORY_GLOBAL}/build"
}
# Based on docksal update script
# @author Leonid Makarov
self_update()
{
echo-green "Updating andock-ci pipeline..."
local new_andock_ci
new_andock_ci=$(curl -kfsSL "$URL_ANDOCK_CI?r=$RANDOM")
if_failed_error "andock_ci download failed."
# Check if fin update is required and whether it is a major version
local new_version
new_version=$(echo "$new_andock_ci" | grep "^ANDOCK_CI_VERSION=" | cut -f 2 -d "=")
if [[ "$new_version" != "$ANDOCK_CI_VERSION" ]]; then
local current_major_version
current_major_version=$(echo "$ANDOCK_CI_VERSION" | cut -d "." -f 1)
local new_major_version
new_major_version=$(echo "$new_version" | cut -d "." -f 1)
if [[ "$current_major_version" != "$new_major_version" ]]; then
echo -e "${red_bg} WARNING ${NC} ${red}Non-backwards compatible version update${NC}"
echo -e "Updating from ${yellow}$ANDOCK_CI_VERSION${NC} to ${yellow}$new_version${NC} is not backward compatible."
_confirm "Continue with the update?"
fi
# saving to file
echo "$new_andock_ci" | sudo tee "$ANDOCK_CI_PATH_UPDATED" > /dev/null
if_failed_error "Could not write $ANDOCK_CI_PATH_UPDATED"
sudo chmod +x "$ANDOCK_CI_PATH_UPDATED"
echo-green "andock-ci pipeline $new_version downloaded..."
# overwrite old fin
sudo mv "$ANDOCK_CI_PATH_UPDATED" "$ANDOCK_CI_PATH"
acp cup
exit
else
echo-rewrite "Updating andock-ci pipeline... $ANDOCK_CI_VERSION ${green}[OK]${NC}"
fi
}
#------------------------------ HELP --------------------------------
# Show help.
show_help ()
{
echo
printh "andock-ci pipeline command reference" "${ANDOCK_CI_VERSION}" "green"
echo
printh "connect" "Connect andock-ci pipeline to andock-ci server"
printh "(.) ssh-add <ssh-key>" "Add private SSH key <ssh-key> variable to the agent store."
echo
printh "Server management:" "" "yellow"
printh "server:install [root_user, default=root] [andock_ci_pass, default=keygen]" "Install andock-ci server."
printh "server:update [root_user, default=root]" "Update andock-ci server."
printh "server:ssh-add [root_user, default=root]" "Add public ssh key to andock-ci server."
echo
printh "Project configuration:" "" "yellow"
printh "generate:config" "Generate andock-ci project configuration."
echo
printh "Project build management:" "" "yellow"
printh "build" "Build project and push it to target branch."
echo
printh "Control remote docksal:" "" "yellow"
printh "fin init" "Clone git repository and init tasks."
printh "fin up" "Start services."
printh "fin update" "Pull changes from repository and run update tasks."
printh "fin test" "Run tests."
printh "fin stop" "Stop services."
printh "fin rm" "Remove environment."
echo
printh "fin-run <command> <path>" "Run any fin command."
echo
printh "Drush:" "" "yellow"
printh "drush:generate-alias" "Generate drush alias."
echo
printh "version (v, -v)" "Print andock-ci version. [v, -v] - prints short version"
printh "alias" "Print andock-ci alias."
echo
printh "self-update" "${yellow}Update andock-ci${NC}" "yellow"
}
# Display acp version
# @option --short - Display only the version number
version ()
{
if [[ $1 == '--short' ]]; then
echo "$ANDOCK_CI_VERSION"
else
echo "andock-ci pipeline (acp) version: $ANDOCK_CI_VERSION"
echo "Roles:"
echo "andock-ci.build: $REQUIREMENTS_ANDOCK_CI_BUILD"
echo "andock-ci.fin: $REQUIREMENTS_ANDOCK_CI_FIN"
echo "andock-ci.server: $REQUIREMENTS_ANDOCK_CI_SERVER"
fi
}
#----------------------- ENVIRONMENT HELPER FUNCTIONS ------------------------
# Returns the git origin repository url
get_git_origin_url ()
{
echo "$(git config --get remote.origin.url)"
}
# Returns the default project name
get_default_project_name ()
{
if [ "${ANDOCK_CI_PROJECT_NAME}" != "" ]; then
echo "$(basename ${PWD})"
else
echo "${ANDOCK_CI_PROJECT_NAME}"
fi
}
# Returns the path project root folder.
find_root_path () {
path=$(pwd)
while [[ "$path" != "" && ! -e "$path/.andock-ci" ]]; do
path=${path%/*}
done
echo "$path"
}
# Check for connection inventory file in .andock-ci/connections/$1
check_connect()
{
if [ ! -f "${ANDOCK_CI_INVENTORY}/$1" ]; then
echo-red "No alias \"${1}\" exists. Please run acp connect."
exit 1
fi
}
# Checks if andock-ci.yml exists.
check_settings_path ()
{
local path="$PWD/.andock-ci/andock-ci.yml"
if [ ! -f $path ]; then
echo-error "Settings not found. Run acp generate:config"
exit 1
fi
}
# Returns the path to andock-ci.yml
get_settings_path ()
{
local path="$PWD/.andock-ci/andock-ci.yml"
echo $path
}
# Returns the path to andock-ci.yml
get_branch_settings_path ()
{
local branch
branch=$(get_current_branch)
local path="$PWD/.andock-ci/andock-ci.${branch}.yml"
if [ -f $path ]; then
echo $path
fi
}
# Parse the .andock-ci.yaml and
# make all variables accessable.
get_settings()
{
local settings_path
settings_path=$(get_settings_path)
eval "$(_parse_yaml $settings_path 'config_')"
}
# Returns the git branch name
# of the current working directory
get_current_branch ()
{
if [ "${TRAVIS}" = "true" ]; then
echo $TRAVIS_BRANCH
elif [ "${GITLAB_CI}" = "true" ]; then
echo $CI_COMMIT_REF_NAME
else
branch_name="$(git symbolic-ref HEAD 2>/dev/null)" ||
branch_name="(unnamed branch)" # detached HEAD
branch_name=${branch_name##refs/heads/}
echo $branch_name
fi
}
#----------------------- ANSIBLE PLAYBOOK WRAPPERS ------------------------
# Generate ansible inventory files inside .andock-ci/connections folder.
# @param $1 The Connection name.
# @param $2 The andock-ci host name.
# @param $3 The exec path.
run_connect ()
{
if [ "$1" = "" ]; then
local connection_name
connection_name=$(_ask "Please enter connection name [$DEFAULT_CONNECTION_NAME]")
else
local connection_name=$1
shift
fi
if [ "$1" = "" ]; then
local host=
host=$(_ask "Please enter andock-ci server domain or ip")
else
local host=$1
shift
fi
if [ "$connection_name" = "" ]; then
local connection_name=$DEFAULT_CONNECTION_NAME
fi
mkdir -p ".andock-ci/connections"
echo "
[andock-ci-docksal-server]
$host ansible_connection=ssh ansible_user=andock-ci
" > "${ANDOCK_CI_INVENTORY}/${connection_name}"
echo-green "Connection configuration was created successfully."
}
# Ansible playbook wrapper for andock-ci.build role.
run_build ()
{
check_settings_path
local settings_path
settings_path=$(get_settings_path)
local branch_name
branch_name=$(get_current_branch)
echo-green "Building branch <${branch_name}>..."
local skip_tags=""
if [ "${TRAVIS}" = "true" ]; then
skip_tags="--skip-tags=\"setup,checkout\""
fi
ansible-playbook -i "${ANDOCK_CI_INVENTORY_GLOBAL}/build" -e "@${settings_path}" -e "project_path=$PWD build_path=$PWD branch=$branch_name" $skip_tags "$@" ${ANDOCK_CI_PLAYBOOK}/build.yml
if [[ $? == 0 ]]; then
echo-green "Branch ${branch_name} was builded successfully"
else
echo-error ${DEFAULT_ERROR_MESSAGE}
exit 1;
fi
}
# Ansible playbook wrapper for role andock-ci.fin
# @param $1 The Connection.
# @param $2 The fin command.
# @param $3 The exec path.
run_fin_run ()
{
# Check if connection exists
check_settings_path
# Load configuration.
local settings_path
settings_path=$(get_settings_path)
# Get the current branch name.
local branch_name
branch_name=$(get_current_branch)
# Set parameters.
local connection=$1 && shift
local exec_command=$1 && shift
local exec_path=$1 && shift
# Run the playbook.
ansible-playbook -i "${ANDOCK_CI_INVENTORY}/${connection}" --tags "exec" -e "@${settings_path}" ${branch_settings_config} -e "exec_command='$exec_command' exec_path='$exec_path' project_path=$PWD branch=${branch_name}" ${ANDOCK_CI_PLAYBOOK}/fin.yml
if [[ $? == 0 ]]; then
echo-green "fin exec was finished successfully."
else
echo-error $DEFAULT_ERROR_MESSAGE
exit 1;
fi
}
# Ansible playbook wrapper for role andock-ci.fin
# @param $1 Connection
# @param $2 Tag
run_fin ()
{
# Check if connection exists
check_settings_path
# Load settings.
local settings_path
settings_path="$(get_settings_path)"
get_settings
# Load branch specific {branch}.andock-ci.yml file if exist.
local branch_settings_path
branch_settings_path="$(get_branch_settings_path)"
local branch_settings_config=""
if [ "${branch_settings_path}" != "" ]; then
local branch_settings_config="-e @${branch_settings_path}"
fi
# If no target repository path is configured no build process is expected.
# Use source git repository as target repository path.
# AND set target_branch_suffix='' to checkout the standard repository.
local repository_config=""
if [ "${config_git_target_repository_path}" == "" ]; then
local repository_config="git_target_repository_path='${config_git_source_repository_path}' target_branch_suffix=''"
fi
if [ "${config_git_repository_path}" != "" ]; then
local repository_config="git_target_repository_path='${config_git_repository_path}' target_branch_suffix=''"
fi
# Get the current branch.
local branch_name
branch_name=$(get_current_branch)
# Set parameters.
local connection=$1 && shift
local tag=$1 && shift
# Validate tag name. Show help if needed.
case $tag in
init|up|update|test|stop|rm|exec)
echo-green "Start fin ${tag}..."
;;
*)
echo-yellow "Unknown tag '$tag'. See 'acp help' for list of available commands" && \
exit 1
;;
esac
# Run the playbook.
ansible-playbook -i "${ANDOCK_CI_INVENTORY}/${connection}" --tags $tag -e "${repository_config}" -e "@${settings_path}" ${branch_settings_config} -e "project_path=$PWD branch=${branch_name}" "$@" ${ANDOCK_CI_PLAYBOOK}/fin.yml
# Handling playbook results.
if [[ $? == 0 ]]; then
echo-green "fin ${tag} was finished successfully."
local domains
domains=$(echo $config_domain | tr " " "\n")
for domain in $domains
do
local url="http://${branch_name}.${domain}"
echo-green "See [$url]"
done
else
echo-error $DEFAULT_ERROR_MESSAGE
exit 1;
fi
}
#---------------------------------- GENERATE ---------------------------------
# Generate fin hooks.
# @param $1 The hook name.
generate_config_fin_hook()
{
echo "- name: Init andock-ci environment
command: \"fin $1\"
args:
chdir: \"{{ docroot_path }}\"
when: environment_exists_before == false
" > ".andock-ci/hooks/$1_tasks.yml"
}
# Generate composer hook.
generate_config_compser_hook()
{
echo "- name: composer install
command: \"composer install\"
args:
chdir: \"{{ checkout_path }}\"
" > ".andock-ci/hooks/$1_tasks.yml"
}
# Generate empty hook file.
# @param $1 The hook name.
generate_config_empty_hook()
{
echo "---" > ".andock-ci/hooks/$1_tasks.yml"
}
# Generate configuration.
generate_config ()
{
if [[ -f ".andock-ci/andock-ci.yml" ]]; then
echo-yellow ".andock-ci/andock-ci.yml already exists"
_confirm "Do you want to proceed and overwrite it?"
fi
local project_name
project_name=$(get_default_project_name)
local git_source_repository_path
git_source_repository_path=$(get_git_origin_url)
if [ "$git_source_repository_path" = "" ]; then
echo-red "No git repository found."
exit
fi
local domain && domain=$(_ask "Please enter project dev domain. [Like: dev.project.com. Url is: branch.dev.project.com]")
local build && build=$(_confirmAndReturn "Do you want to build the project and push the result to a target repository?")
local git_target=""
if [ "$build" = 1 ]; then
local git_target_repository_path
git_target_repository_path=$(_ask "Please enter git target repository path. [Leave empty to use ${git_source_repository_path}]")
# Set to source repository if empty.
if [ "${git_target_repository_path}" = "" ]; then
git_target_repository_path=${git_source_repository_path}
fi
local git_target="git_target_repository_path: ${git_target_repository_path}"
fi
mkdir -p ".andock-ci"
mkdir -p ".andock-ci/hooks"
echo "project_name: \"${project_name}\"
domain: \"${domain}\"
git_source_repository_path: ${git_source_repository_path}
${git_target}
hook_build_tasks: \"{{project_path}}/.andock-ci/hooks/build_tasks.yml\"
hook_init_tasks: \"{{project_path}}/.andock-ci/hooks/init_tasks.yml\"
hook_update_tasks: \"{{project_path}}/.andock-ci/hooks/update_tasks.yml\"
hook_test_tasks: \"{{project_path}}/.andock-ci/hooks/test_tasks.yml\"
" > .andock-ci/andock-ci.yml
if [[ "$build" = 1 && $(_confirmAndReturn "Do you use composer to build your project?") == 1 ]]; then
generate_config_compser_hook "build"
else
generate_config_empty_hook "build"
fi
generate_config_fin_hook "init"
generate_config_empty_hook "update"
generate_config_empty_hook "test"
if [[ $? == 0 ]]; then
if [ "$build" = 1 ]; then
echo-green "Configuration was generated. Configure your hooks and start the pipeline with ${yellow}acp build${NC}"
else
echo-green "Configuration was generated. Configure your hooks and start the pipeline with ${yellow}acp fin init${NC}"
fi
else
echo-error ${DEFAULT_ERROR_MESSAGE}
fi
}
# Add ssh key.
ssh_add ()
{
eval "$(ssh-agent -s)"
echo "$*" | tr -d '\r' | ssh-add - > /dev/null
mkdir -p ~/.ssh
chmod 700 ~/.ssh
echo-green "SSH key was added to keystore."
}
#----------------------------------- DRUSH -----------------------------------
run_alias ()
{
set -e
check_settings_path
get_settings
local branch_name
branch_name=$(get_current_branch)
local env
env="${config_project_name}.${branch_name}"
echo "${env}"
}
run_drush_generate ()
{
set -e
# Abort if andock is configured.
check_settings_path
# Load settings.
get_settings
local branch_name
# Read current branch.
branch_name=$(get_current_branch)
# Generate drush folder if not exists
mkdir -p drush
# The local drush file.
local drush_file="drush/${config_project_name}.aliases.drushrc.php"
# Check if a drush file already exists. If not generate a stub which export
# the alias name to LC_ANDOCK_CI_ENV.
# Based on LC_ANDOCK_CI_ENV andock server jumps into the correct cli container
if [ ! -f ${drush_file} ]; then
echo "<?php
\$_drush_context = drush_get_context();
if (isset(\$_drush_context['DRUSH_TARGET_SITE_ALIAS'])) {
putenv ('LC_ANDOCK_CI_ENV=' . substr(\$_drush_context['DRUSH_TARGET_SITE_ALIAS'], 1));
}" > ${drush_file}
fi
# source .docksal/docksal.env for DOCROOT.
source .docksal/docksal.env
# Generate one alias for each configured domain.
local domains
domains=$(echo $config_domain | tr " " "\n")
# Loop through each domain to generate one alias for each subsite.
for domain in $domains
do
local url="http://${branch_name}.${domain}"
echo "
\$aliases['${branch_name}'] = array (
'root' => '/var/www/${DOCROOT}',
'uri' => '${url}',
'remote-host' => '${domain}',
'remote-user' => 'andock-ci',
'ssh-options' => '-o SendEnv=LC_ANDOCK_CI_ENV'
);
" >> $drush_file
done
echo-green "Drush alias for branch \"${branch_name}\" was generated successfully."
echo-green "See ${drush_file}"
}
#----------------------------------- SERVER -----------------------------------
# Add ssh key to andock-ci user.
run_server_ssh_add ()
{
set -e
local connection=$1
shift
local ssh_key="command=\"acs _bridge \$SSH_ORIGINAL_COMMAND\" $1"
shift
if [ "$1" = "" ]; then
local root_user="root"
else
local root_user=$1
shift
fi
ansible-playbook -e "ansible_ssh_user=$root_user" -i "${ANDOCK_CI_INVENTORY}/${connection}" -e "ssh_key='$ssh_key'" "${ANDOCK_CI_PLAYBOOK}/server_ssh_add.yml"
echo-green "SSH key was added."
}
# Install andock-ci.
run_server_install ()
{
local connection=$1
shift
local tag=$1
shift
set -e
if [ "$1" = "" ]; then
local andock_ci_pw
andock_ci_pw=$(openssl rand -base64 32)
else
local andock_ci_pw=$1
shift
fi
if [ "$1" = "" ]; then
local root_user="root"
else
local root_user=$1
shift
fi
local andock_ci_pw_enc
andock_ci_pw_enc=$(mkpasswd --method=sha-512 $andock_ci_pw)
ansible andock-ci-docksal-server -e "ansible_ssh_user=$root_user" -i "${ANDOCK_CI_INVENTORY}/${connection}" -m raw -a "test -e /usr/bin/python || (apt -y update && apt install -y python-minimal)"
ansible-playbook -e "ansible_ssh_user=$root_user" --tags $tag -i "${ANDOCK_CI_INVENTORY}/${connection}" -e "pw='$andock_ci_pw_enc'" "${ANDOCK_CI_PLAYBOOK}/server_install.yml"
if [ "$tag" == "install" ]; then
echo-green "andock-ci server was installed successfully."
echo-green "andock-ci password is: $andock_ci_pw"
else
echo-green "andock-ci server was updated successfully."
fi
}
#----------------------------------- MAIN -------------------------------------
# Check for connection alias.
int_connection="$1"
add="${int_connection:0:1}"
if [ "$add" = "@" ]; then
# Connection alias found.
connection="${int_connection:1}"
shift
else
# No alias found. Use the "default"
connection=${DEFAULT_CONNECTION_NAME}
fi
# Than we check if the command needs an connection.
# And if yes we check if the connection exists.
case "$1" in
server:install|server:update|server:info|server:ssh-add|fin)
check_connect $connection
echo-green "Use connection: $connection"
;;
esac
org_path=${PWD}
# ansible playbooks needs to be called from project_root.
# So cd to root path
root_path=$(find_root_path)
cd "$root_path"
# Store the command.
command=$1
shift
# Finally. Run the command.
case "$command" in
_install-pipeline)
install_pipeline "$@"
;;
_update-pipeline)
install_configuration "$@"
;;
cup)
install_configuration "$@"
;;
self-update)
self_update "$@"
;;
ssh-add)
ssh_add "$@"
;;
generate-playbooks)
generate_playbooks
;;
generate:config)
cd $org_path
generate_config
;;
connect)
run_connect "$@"
;;
build)
run_build "$@"
;;
fin)
run_fin "$connection" "$@"
;;
fin-run)
run_fin_run "$connection" "$1" "$2"
;;
alias)
run_alias
;;
drush:generate-alias)
run_drush_generate
;;
server:install)
run_server_install "$connection" "install" "$@"
;;
server:update)
run_server_install "$connection" "update" "$@"
;;
server:info)
run_server_info "$connection" "$@"
;;
server:ssh-add)
run_server_ssh_add "$connection" "$1" "$2"
;;
help|"")
show_help
;;
-v | v)
version --short
;;
version)
version
;;
*)
echo-yellow "Unknown command '$command'. See 'acp help' for list of available commands" && \
exit 1
esac
| true |
df221df1ce5782cbb9cf0b127d8a6ca0c6417df5 | Shell | mmantho/bash-fsl-pipeline | /examples/sbfc/rsfc_2_subjects_1multiroi_feat.sh~ | UTF-8 | 2,400 | 3.140625 | 3 | [] | no_license | #!/bin/bash
WORK_IN_CAB=0
# ====== init params ===========================
if [ $WORK_IN_CAB -eq 0 ]
then
GLOBAL_SCRIPT_DIR=/media/data/MRI/scripts
PROJ_DIR=/media/data/MRI/projects/CAB/fsl_belgrade_early_pd # <<<<@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
export FSLDIR=/usr/local/fsl # change according to used PC
else
GLOBAL_SCRIPT_DIR=/homer/home/dati/fsl_global_scripts
PROJ_DIR=/media/Iomega_HDD/MRI/projects/CAB/fsl_belgrade_early_pd # <<<<@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
export FSLDIR=/usr/share/fsl/4.1 # change according to used PC
fi
#===============================================
. $GLOBAL_SCRIPT_DIR/init_vars.sh $PROJ_DIR
#===============================================
SESS_ID=1
NUM_CPU=2
EXECUTE_SH=$GLOBAL_SCRIPT_DIR/process_subject/rsfc_multiple_subject_1multiroi_feat.sh
# base name of ROI: final name used by the script will be $ROI_DIR/reg_epi/mask_ROINAME_epi.nii.gz
declare -a arr_roi=(l_caudate_hos_fsl l_pallidum_hos_fsl l_putamen_hos_fsl l_thalamus_hos_fsl)
TEMPL_FSF=$PROJ_SCRIPT_DIR/glm/templates/template_feat_4roi_ortho
# standard call: define output dir name
OUTPUT_DIR_NAME=roi_left_caud_pall_put_thal_ortho
INPUT_ROI_DIR="reg_epi"
# alternative call: define output dir name, input file name and output series postfix name
OUTPUT_SERIES_POSTFIX_NAME="denoised"
ALTERNATIVE_OUTPUT_DIR_NAME=roi_left_caud_pall_put_thal_ortho_denoised
ALTERNATIVE_INPUT_NUISANCE_FILE="nuisance_denoised_10000.nii.gz"
ALTERNATIVE_INPUT_ROI_DIR="reg_epi"
OUTPUT_SERIES_POSTFIX_NAME="denoised"
#====================================================================================
declare -a final_roi=()
declare -i cnt=0
for roi in ${arr_roi[@]}; do
final_roi[cnt]=mask_$roi"_epi.nii.gz"
cnt=$cnt+1
done
# default call: read $RSFC_DIR/nuisance_10000.nii.gz
. $MULTICORE_SCRIPT_DIR/define_thread_processes.sh $NUM_CPU $EXECUTE_SH "$arr_subj" $PROJ_DIR -model $TEMPL_FSF -odn $OUTPUT_DIR_NAME -ridn $INPUT_ROI_DIR ${final_roi[@]}
# default call: read $RSFC_DIR/nuisance_denoised_10000.nii.gz
. $MULTICORE_SCRIPT_DIR/define_thread_processes.sh $NUM_CPU $EXECUTE_SH "$arr_subj" $PROJ_DIR -ifn $ALTERNATIVE_INPUT_NUISANCE_FILE -model $TEMPL_FSF -odn $ALTERNATIVE_OUTPUT_DIR_NAME -son $OUTPUT_SERIES_POSTFIX_NAME -ridn $ALTERNATIVE_INPUT_ROI_DIR ${final_roi[@]}
wait
echo "=====================> finished processing $0"
| true |
a379ab8ba7d058f3b5a1178e04d89cebd9e11427 | Shell | acb17ssp/Linux-Stage1 | /Day-5/PDF-4/problem4.sh | UTF-8 | 1,314 | 3.484375 | 3 | [] | no_license | #!/bin/bash -x
read -p "Enter first number: " a
read -p "Enter second number: " b
read -p "Enter third number: " c
value1=$(($a+$b*$c))
value2=$(($a%$b+$c))
value3=$(($c+$a/$b))
value4=$(($a*$b+$c))
max=0
min=0
if [ $value1 -gt $value2 ] && [ $value1 -gt $value3 ] && [ $value1 -gt $value4 ]
then
max=$value1
echo "Maximum value: $max"
elif [ $value2 -gt $value1 ] && [ $value2 -gt $value3 ] && [ $value2 -gt $value4 ]
then
max=$value2
echo "Maximum value: $max"
elif [ $value3 -gt $value1 ] && [ $value3 -gt $value2 ] && [ $value3 -gt $value4 ]
then
max=$value3
echo "Maximum value: $max"
elif [ $value4 -gt $value1 ] && [ $value4 -gt $value2 ] && [ $value4 -gt $value3 ]
then
max=$value4
echo "Maximum value: $max"
else
echo "Maximum value not found"
fi
if [ $value1 -lt $value2 ] && [ $value1 -lt $value3 ] && [ $value1 -lt $value4 ]
then
min=$value1
echo "Minimum value: $min"
elif [ $value2 -lt $value1 ] && [ $value2 -lt $value3 ] && [ $value2 -lt $value4 ]
then
min=$value2
echo "Minimum value: $min"
elif [ $value3 -lt $value1 ] && [ $value3 -lt $value2 ] && [ $value3 -lt $value4 ]
then
min=$value3
echo "Minimum value: $min"
elif [ $value4 -lt $value1 ] && [ $value4 -lt $value2 ] && [ $value4 -lt $value3 ]
then
min=$value4
echo "Minimum value: $min"
else
echo "Value not found"
fi
| true |
f9eec86c6ee91e87e93d98b385b50e6c0f85b8aa | Shell | LSSTDESC/NaMaster | /scripts/install_libchealpix.sh | UTF-8 | 1,510 | 3.390625 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
#Javi Sanchez is the dude
DEPDIR=_deps
[ -e $DEPDIR ] || mkdir $DEPDIR
[ -e $DEPDIR/bin ] || mkdir $DEPDIR/bin
[ -e $DEPDIR/lib ] || mkdir $DEPDIR/lib
[ -e $DEPDIR/include ] || mkdir $DEPDIR/include
ADEPDIR=$PWD/$DEPDIR
cd $DEPDIR
unameOut="$(uname -s)"
echo ${unameOut}
if [ "${unameOut}" = "Linux" ]
then
[ -e chealpix-3.11.4 ] || wget https://sourceforge.net/projects/healpix/files/Healpix_3.11/autotools_packages/chealpix-3.11.4.tar.gz && tar xzf chealpix-3.11.4.tar.gz
elif [ "${unameOut}" = "Darwin" ]
then
[ -e chealpix-3.11.4 ] || curl https://sourceforge.net/projects/healpix/files/Healpix_3.11/autotools_packages/chealpix-3.11.4.tar.gz -L --output chealpix-3.11.4.tar.gz && tar xzf chealpix-3.11.4.tar.gz
fi
cd chealpix-3.11.4
./configure --enable-static --disable-shared --with-pic --prefix=${ADEPDIR} $@
if [ $? -eq 0 ]; then
echo "Successful configure."
else
echo "ERROR: failed to configure HEALPix. Check CFITSIO is installed and reachable."
exit 127
fi
make
if [ $? -eq 0 ]; then
echo "Successful make."
else
echo "ERROR: couldn't compile libsharp. Make sure CFITSIO is installed."
echo " You may need to add the correct path to CPPFLAGS, LDFLAGS and LD_LIBRARY_PATHS."
echo " E.g.:"
echo " >$ export CPPFLAGS+=\" -I/path/to/cfitsio/include\""
echo " >$ export LDFLAGS+=\" -L/path/to/cfitsio/lib\""
echo " >$ export LD_LIBRARY_PATHS=\$LD_LIBRARY_PATH:/path/to/cfitsio/lib"
exit 127
fi
make install
| true |
6d9a351d55a1818ffc1a97301bc74a52e0f4ddad | Shell | sgnconnects/JOTPOT-OS | /installer/installer.sh | UTF-8 | 4,845 | 3.609375 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
#
# JOTPOT OS
#
# Copyright (c) 2017 Jacob O'Toole
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
clear
echo "Welcome to the JOTPOT OS (JPOS) installer..."
if [ "$(whoami)" != "root" ]
then
echo "The installer should be run as root. To run as root, either be logged in as root or put 'sudo' in front of the command to start the installation."
exit
fi
echo "JOTPOT OS is a very experomental OS with it's GUI based on electron and is designed to be increadably open to modification. This is somthing that has never been done before and this is being developed mainly as a very eirly test for what is to come."
echo "Because of this it can be unstable. It should not have massive problems however small issues are very likely to occur, and larger ones may happen."
echo "JPOS is licenced under the Apache License 2.0 - view at http://www.apache.org/licenses/LICENSE-2.0"
echo "NONE OF YOUR FILES SHOULD BE LOST during this installation, however it is still highly recomended you make backups of all files on this computer before installing."
echo ""
echo "!!! Please do not power off this computer while JPOS is installing, this may lead to data curruption or an incomplete installation, thus leaving your computer in an unstable state. !!!"
echo ""
conf=""
echo "Start installation of JPOS now? [Y/n]"
while [ "$conf" != "y" ] && [ "$conf" != "Y" ]
do
read -s -n 1 conf
if [ "$conf" == "n" ] || [ "$conf" == "N" ]
then
exit
fi
done
echo "OK, starting JPOS installation now..."
a="$(arch)"
if [ "$a" == "x86_64" ]
then
a="x64"
elif [ "$a" == "x86" ]
then
a="ia32"
fi
function file {
if [ -f "$1" ] || [ -d "$1" ]
then
echo "'$1' already exists, renaming it to $1.old"
sleep 2
file "$1.old"
mv "$1" "$1.old"
fi
}
file "/server"
file "/JPOS"
electron_version="1.6.5"
electron_file="electron-v$electron_version-linux-$a"
echo ""
echo "Downloading precompiled version of electron v$electron_version for linux on $a from 'https://github.com/electron/electron/releases/download/v$electron_version/$electron_file.zip'..."
wget -O "electron.zip" "https://github.com/electron/electron/releases/download/v$electron_version/$electron_file.zip" >/dev/null 2>/dev/null
echo "Extracting electron..."
unzip -o "electron.zip" -d "installer" >/dev/null
mv ./installer/electron ./installer/installer
rm ./installer/resources/default_app.asar
echo ""
echo "Downloading JPOS installer scripts from 'https://www.jotpot.co.uk/experimental/JPOS/installer.asar'..."
wget -O "./installer/resources/app.asar" "https://www.jotpot.co.uk/experimental/JPOS/installer.asar" >/dev/null 2>/dev/null
echo ""
echo "Installing/Updating the X server..."
echo " This will either be really quick or take a VERY long time depending on the state of your system..."
echo " This process is very unlikely to hang, please give it time..."
echo "Package 1/3 - 'xorg'"
apt-get -y install xorg >/dev/null 2>/dev/null
echo "Package 2/3 - 'openbox'"
apt-get -y install openbox >/dev/null 2>/dev/null
echo "Package 3/3 - 'xinit'"
apt-get -y install xinit >/dev/null 2>/dev/null
echo "Installing/Updating required libraries."
echo "Library 1/3 - 'libxss1'"
apt-get -y install libxss1 >/dev/null 2>/dev/null
echo "Library 2/3 - 'libnss3'"
apt-get -y install libnss3 >/dev/null 2>/dev/null
echo "Library 3/3 - 'libgconfmm-2.6-1c2'"
apt-get -y install libgconfmm-2.6-1c2 >/dev/null 2>/dev/null
echo ""
echo "Changing boot target..."
file /etc/rc.local
echo "#!/bin/bash" >/etc/rc.local
echo "exec < /dev/tty1" >>/etc/rc.local
echo "exec >/dev/tty1" >>/etc/rc.local
echo "cd /JPOS-install" >>/etc/rc.local
echo "startx /JPOS-install/installer/installer >/dev/null" >>/etc/rc.local
echo "exit 0" >>/etc/rc.local
chmod +x /etc/rc.local
systemctl set-default -f multi-user.target >/dev/null
echo "Ready."
echo "Rebooting in 5 seconds..."
sleep 5
reboot
exit 0 | true |
c1ddb354a025b9d5e002bdc6bed28afb9d7555e1 | Shell | mjrider/addon-pi-hole | /pi-hole/rootfs/etc/cont-init.d/32-nginx.sh | UTF-8 | 922 | 2.875 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/usr/bin/with-contenv bash
# ==============================================================================
# Community Hass.io Add-ons: Pi-hole
# Configures NGINX for use with Pi-hole
# ==============================================================================
# shellcheck disable=SC1091
source /usr/lib/hassio-addons/base.sh
declare admin_port
declare certfile
declare keyfile
admin_port=$(hass.config.get 'admin_port')
if hass.config.true 'ssl'; then
rm /etc/nginx/nginx.conf
mv /etc/nginx/nginx-ssl.conf /etc/nginx/nginx.conf
certfile=$(hass.config.get 'certfile')
keyfile=$(hass.config.get 'keyfile')
sed -i "s/%%certfile%%/${certfile}/g" /etc/nginx/nginx.conf
sed -i "s/%%keyfile%%/${keyfile}/g" /etc/nginx/nginx.conf
fi
sed -i "s/%%admin_port%%/${admin_port}/g" /etc/nginx/nginx.conf
if ! hass.config.true 'ipv6'; then
sed -i '/listen \[::\].*/ d' /etc/nginx/nginx.conf
fi
| true |
0901a4b6bd664a4a508f2043e9cfdb19ff9ce1f5 | Shell | vigo/testi | /testi.sh | UTF-8 | 2,673 | 3.953125 | 4 | [] | no_license | #!/usr/bin/env bash
# return 0 => True
set -e
TE_EXECUTED_COUNT=0
TE_PASSED_COUNT=0
TE_FAILED_COUNT=0
function te_print_error
{
echo -e "Fail: ${1}"
}
function te_assert_equal
{
((TE_EXECUTED_COUNT+=1))
if [[ $1 == $2 ]]; then
((TE_PASSED_COUNT+=1))
else
local error_msg=""
if [[ $3 ]]; then
error_msg="${1}, ${2} : ${3}"
else
error_msg="${1} is not equal to ${2}"
fi
te_print_error "${error_msg}"
((TE_FAILED_COUNT+=1))
fi
}
function te_assert_not_equal
{
((TE_EXECUTED_COUNT+=1))
if [[ $1 != $2 ]]; then
((TE_PASSED_COUNT+=1))
else
local error_msg=""
if [[ $3 ]]; then
error_msg="${1}, ${2} : ${3}"
else
error_msg="${1} is equal to ${2}"
fi
te_print_error "${error_msg}"
((TE_FAILED_COUNT+=1))
fi
}
function te_file_system_operations
{
if [ $1 $2 ]; then
echo "true"
else
echo "false"
fi
}
# function myfunc()
# {
# local myresult='some value'
# echo "$myresult"
# }
#
# result=$(myfunc) # or result=`myfunc`
# echo $result
function te_file_operation
{
((TE_EXECUTED_COUNT+=1))
case $1 in
file_exists)
result=$(te_file_system_operations "-f" $2);;
folder_exists)
result=$(te_file_system_operations "-d" $2);;
file_readable)
result=$(te_file_system_operations "-r" $2);;
file_writable)
result=$(te_file_system_operations "-w" $2);;
file_executable)
result=$(te_file_system_operations "-x" $2);;
file_symlink)
result=$(te_file_system_operations "-h" $2);;
file_empty)
result=$(te_file_system_operations "-s" $2);;
*)
((TE_EXECUTED_COUNT-=1));;
esac
if [[ $result == "true" ]]; then
((TE_PASSED_COUNT+=1))
elif [[ $result == "false" ]]; then
local error_msg=""
if [[ $3 ]]; then
error_msg="${3}"
else
error_msg="Not $1: ${2}"
fi
te_print_error "${error_msg}"
((TE_FAILED_COUNT+=1))
else
echo "Unknown test method: $1"
fi
}
# report
function te_result
{
echo "-------------------------------------------------------------------"
echo "Runned: ${TE_EXECUTED_COUNT}"
if [[ $TE_FAILED_COUNT > 0 ]]; then
echo "Failed: ${TE_FAILED_COUNT}"
fi
if [[ $TE_PASSED_COUNT > 0 ]]; then
echo "Passed: ${TE_PASSED_COUNT}"
fi
if [[ $TE_PASSED_COUNT == $TE_EXECUTED_COUNT ]]; then
echo "All passed..."
fi
}
| true |
856f39386ca7306f105d43080f3d8048e4e40a09 | Shell | cuongdn/Altairis.RazorPages.EditorTemplates | /get.sh | UTF-8 | 377 | 2.71875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Inspired by IdentityServer QuickStart UI installer script
set -e
SOURCE="https://github.com/ridercz/Altairis.RazorPages.EditorTemplates/archive/master.zip"
curl -L -o templates.zip "$SOURCE"
unzip -d ui templates.zip
[[ -d Pages ]] || mkdir Pages
cp -r ./templates/Altairis.RazorPages.EditorTemplates-master/Pages/* Pages
rm -rf templates templates.zip
| true |
919d972ac69cba39261ae613e713aec7586a88e5 | Shell | pierremolinaro/ElCanari-documentation | /algo-geometriques-latex/-build.command | UTF-8 | 1,531 | 3.15625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
set -x
DIR=`dirname $0` &&
cd $DIR &&
rm -f algos-geometriques.pdf ref.* algos-geometriques.ilg algos-geometriques.ind &&
rm -f algos-geometriques.aux algos-geometriques.idx algos-geometriques.lof algos-geometriques.lot algos-geometriques.toc &&
rm -f algos-geometriques.log algos-geometriques.out algos-geometriques.synctex.gz &&
#--- First pass
PDF_LATEX=`which pdflatex` &&
MAKE_INDEX=`which makeindex` &&
$PDF_LATEX --file-line-error --shell-escape algos-geometriques.tex &&
touch ref.idx &&
touch ref.lof &&
touch ref.lot &&
touch ref.toc &&
iteration=0 &&
while [ `cmp -s ref.lof algos-geometriques.lof ; echo $?` -ne 0 ] \
|| [ `cmp -s ref.lot algos-geometriques.lot ; echo $?` -ne 0 ] \
|| [ `cmp -s ref.toc algos-geometriques.toc ; echo $?` -ne 0 ] \
|| [ `cmp -s ref.idx algos-geometriques.idx ; echo $?` -ne 0 ]
do
cp algos-geometriques.idx ref.idx &&
cp algos-geometriques.lof ref.lof &&
cp algos-geometriques.lot ref.lot &&
cp algos-geometriques.toc ref.toc &&
$MAKE_INDEX -s $DIR/inclusions/style-indexes.ist algos-geometriques.idx &&
$PDF_LATEX --file-line-error --shell-escape algos-geometriques.tex &&
iteration=$((iteration+=1))
done &&
rm -f algos-geometriques.aux algos-geometriques.idx algos-geometriques.lof algos-geometriques.lot algos-geometriques.toc &&
rm -f algos-geometriques.log algos-geometriques.ilg algos-geometriques.ind algos-geometriques.out algos-geometriques.synctex.gz &&
rm -f ref.* temp.eb temp.eb.tex &&
echo "---------------- SUCCES $iteration iterations"
| true |
512babfdfc297bc825465cf188e20625b3827891 | Shell | KaDock/s6-postfix | /gen.sh | UTF-8 | 279 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env sh
# render a template configuration file
# preserve formatting
# expand variables
cat $1.tmpl \
| sed "s#\${SASLDB_PATH}#${SASLDB_PATH}#g" \
| sed "s#\${SASLDB_DIR}#${SASLDB_DIR}#g" \
| sed "s#\${SSL_DIR}#${SSL_DIR}#g" \
> $1
#render_template $1.tmpl > $1
| true |
73a21f58e6d9ba5fe4b99d5519da822a6b072224 | Shell | snit-ram/git-svn-syncer | /first-sync | UTF-8 | 809 | 3.796875 | 4 | [] | no_license | #!/usr/bin/env bash
CLONE_PATH=`pwd`
if [ "$1" == "" ]
then
echo "usage: first-sync <svn-url> <git-url> <gitsvn-clone-path>"
echo ""
exit 0
fi
SVN_URL=$1
GIT_URL=$2
CLONE_PATH=$3
echo "cloning git repository..."
git clone "$GIT_URL" "$CLONE_PATH"
echo "entering cloned repository..."
cd "$CLONE_PATH"
echo "fetching svn commits..."
git svn init -T trunk "$SVN_URL"
git svn fetch
echo "creating git graft..."
#retrieves git commit and svn commit hashes and makes a graft with them
SVN_COMMIT=`git show-ref trunk | cut -d " " -f 1`
GIT_COMMIT=`git log --pretty=oneline master | tail -n1 | cut -d " " -f 1`
echo "$GIT_COMMIT $SVN_COMMIT" >> .git/info/grafts
#syncs commits with svn
echo "syncing commits into svn..."
git svn dcommit --add-author-from --rmdir --find-copies-harder
echo "DONE" | true |
137d5f1d3260ac14756e3b8c430a14a96e29f0bd | Shell | mr-junior/research | /scripts/plot_spectra.sh | UTF-8 | 811 | 3.59375 | 4 | [] | no_license | #!/bin/bash
input=
output=
function usage()
{
cat << EOF
Usage: $0 [OPTIONS]
OPTIONS:
-i [file] Input file containing eigenvalues.
-o [file] Output image file.
EOF
}
while getopts "i:o:" opt
do
case "$opt" in
i) input=$OPTARG;;
o) output=$OPTARG;;
\:) usage; exit;;
\?) usage; exit;;
esac
done
if [ -z "$output" ]; then
output=${input%.*}.png
fi
> spectra_plotter.gnu
echo "set terminal png enhanced size 1280, 200" >> spectra_plotter.gnu
echo "set output \"$output\"" >> spectra_plotter.gnu
echo "unset ytics" >> spectra_plotter.gnu
echo "stats \"$input\"" >> spectra_plotter.gnu
echo "set xrange [STATS_min-0.5:STATS_max+0.5]" >> spectra_plotter.gnu
echo "plot \"$input\" using 1:(0) notitle" >> spectra_plotter.gnu
gnuplot spectra_plotter.gnu
rm -f spectra_plotter.gnu
| true |
1dac280a4319d148e152613abdb100b8fcc8a3e3 | Shell | legacy-codedigger/Solaris-2.6-Source-Code | /Solaris_2.6/os_net/src_ws/usr/src/pkgdefs/SUNWpmu/postinstall | UTF-8 | 681 | 2.921875 | 3 | [] | no_license | #!/bin/sh
#
# Copyright (c) 1993 - 1996, by Sun Microsystems, Inc.
# All rights reserved.
#
#ident "@(#)postinstall 1.1 96/09/13 SMI"
#
# SUNWpmu postinstall script
#
# adds sysidpm to the list of applications run out of sysidconfig.
#
if [ -x /usr/sbin/sysidconfig ]; then {
/usr/sbin/sysidconfig -b "${BASEDIR}" -l | /usr/bin/grep -s sysidpm >/dev/null
if [ $? -ne 0 ]; then
/usr/sbin/sysidconfig -b "${BASEDIR}" -a /usr/sbin/sysidpm
fi
} else {
/usr/bin/grep -s sysidpm ${BASEDIR}/etc/.sysidconfig.apps >/dev/null
if [ $? -ne 0 ]; then
echo /usr/sbin/sysidpm >> ${BASEDIR}/etc/.sysidconfig.apps
fi
} fi
exit 0
| true |
65abd80919c85acd49df282310a93cbdd4bb8bad | Shell | chomjun/mydemo-blue-green | /init.bash | UTF-8 | 362 | 2.5625 | 3 | [] | no_license | #!/bin/bash
BASE_DIR=$(pwd)
GW_DIR=${BASE_DIR}/proxy-lb
BLUE_DIR=${BASE_DIR}/web-blue
GREEN_DIR=${BASE_DIR}/web-green
docker stack rm demo-web
sleep 10
cd ${GREEN_DIR}/
docker stack deploy -c docker-compose.yml demo-web
cd ${BLUE_DIR}/
docker stack deploy -c docker-compose.yml demo-web
cd ${GW_DIR}/
docker stack deploy -c docker-compose.yml demo-web
| true |
1b9f21c915167c9d20f29e00fa43a62e979719ba | Shell | LorenzoChavez/Master-Data-Science | /shell/script/top-words-1.sh | UTF-8 | 224 | 3.46875 | 3 | [] | no_license | #!/usr/bin/env bash
#1.The command below will extract a list of the most used words in a .txt file
LINES="$2"
FILE="$1"
cat $FILE | tr '[:upper:]' '[:lower:]' | grep -oE '\w+' | sort | uniq -c | sort -nr | head -n $LINES
| true |
37eefc90f48700a9e78e12a8cef44250a690bdec | Shell | looztra/dockerfiles | /centos-ncdigloop/ncdig-loop.sh | UTF-8 | 1,672 | 3.953125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Deal with defaults and overrides
#
sleep_time=${SLEEP_TIME:-15}
dig_options=${DIG_OPTIONS:-"+noall +answer"}
nc_options=${NC_OPTIONS:-"-v -w1 -i1"}
ping_options=${PING_OPTIONS:-"-c 1"}
do_dig=${DO_DIG:-1}
do_nc=${DO_NC:-1}
do_ping=${DO_PING:-1}
#
echo "Found NCDIG_TARGETS=${NCDIG_TARGETS}"
echo
echo "sleep_time [$sleep_time]"
echo "dig_options [$dig_options]"
echo "nc_options [$nc_options]"
echo "ping_options [$ping_options]"
echo "do_dig [$do_dig] do_nc [$do_nc] do_ping [$do_ping]"
function get_me_out() {
echo "Ctrl+c trapped, exiting because you asked"
exit 0
}
trap get_me_out 2
if [ -z "${NCDIG_TARGETS}" ]; then
echo "Doing nothing, NCDIG_TARGETS is not set"
exit 1
fi
IFS=',' read -a targets <<< "${NCDIG_TARGETS}"
for target in "${targets[@]}"
do
echo "found target [$target]"
done
echo "Let's start testing targets!"
echo "============================>"
echo
while true; do
date
echo
for target in "${targets[@]}"
do
host=$(echo ${target} | cut -d":" -f1)
port=$(echo ${target} | cut -d":" -f2)
echo "Checking [$target/host=$host,port=$port]"
if [ $do_dig == 1 ]; then
echo "dig start --------------------->"
dig $host $dig_options
echo "<--------------------- dig end"
fi
if [ $do_nc == 1 ]; then
echo "nc start --------------------->"
nc $nc_options $host $port
echo "<---------------------nc end"
fi
if [ $do_ping == 1 ]; then
echo "ping start --------------------->"
ping $ping_options $host
echo "<--------------------- ping end"
fi
echo
done
echo "Sleeping ${sleep_time} seconds"
echo
sleep ${sleep_time}
done | true |
96990badb104bb65f568bf841805b78a56ccc076 | Shell | idiap/idiaptts_egs_blizzard08_roger | /s1/01_setup.sh | UTF-8 | 8,434 | 3.984375 | 4 | [] | no_license | #!/usr/bin/env bash
#
# Copyright (c) 2019 Idiap Research Institute, http://www.idiap.ch/
# Written by Bastian Schnell <bastian.schnell@idiap.ch>
#
usage() {
cat <<- EOF
usage: ${PROGNAME} [OPTIONS] <Path to roger db> <num_workers=1>
This program loads the samples from the roger database.
It requires the path to roger database as parameter.
Ensure that bc and soxi packages are installed.
It creates a file_id_list_all.txt with all ids,
a file_id_list_full.txt with the ids from carroll, arctic, and theherald1-3,
and a file_id_list_demo.txt with the ids from theherald1.
OPTIONS:
-h show this help
--no_silence_removal skips the silence removal step
--max_length_sec maximum length of audio files used
EOF
}
die () {
echo -e >&2 "ERROR" "$@"
exit 1
}
###############################
# Default options and functions
#
# set -o xtrace # Prints every command before running it, same as "set -x".
# set -o errexit # Exit when a command fails, same as "set -e".
# # Use "|| true" for those who are allowed to fail.
# # Disable (set +e) this mode if you want to know a nonzero return value.
# set -o pipefail # Catch mysqldump fails.
# set -o nounset # Exit when using undeclared variables, same as "set -u".
# set -o noclobber # Prevents the bash shell from overwriting files, but you can force it with ">|".
export SHELLOPTS # Used to pass above shell options to any called subscripts.
readonly PROGNAME=$(basename $0)
readonly PROGDIR=$(readlink -m $(dirname $0))
readonly ARGS="$@"
if [ -z "${IDIAPTTS_ROOT}" ]; then
echo "IDIAPTTS_ROOT variable not set. Please run 'source cmd.sh' first."
exit 1
fi
# Fixed paths.
dir_src="${IDIAPTTS_ROOT}/src/"
# Parameter extraction.
silence_removal=true # Default parameter.
max_length_sec=-1
while getopts ":h-:" flag; do # If a character is followed by a colon (e.g. f:), that option is expected to have an argument.
case "${flag}" in
-) case "${OPTARG}" in
no_silence_removal) silence_removal=false ;;
max_length_sec) val="${!OPTIND}"; OPTIND=$(( $OPTIND + 1 ))
max_length_sec=${val} ;;
*) die "Invalid option: --${OPTARG}" ;;
esac;;
h) usage; exit ;;
\?) die "Invalid option: -$OPTARG" ;;
:) die "Option -$OPTARG requires an argument." ;;
esac
done
shift $(($OPTIND - 1)) # Skip the already processed arguments.
minArgs=1
if [[ $# -lt "${minArgs}" ]]; then
usage # Function call.
die "Wrong number of parameters, expected at least ${minArgs} but got $#."
fi
db_path=${1:-}
num_workers=${2:-"1"} # Default number of workers is one.
dir_exp="${PROGDIR}/experiments"
dir_data="${PROGDIR}/database"
dir_audio="${dir_data}/wav"
dir_logs="${dir_data}/log/"
dir_txt="${dir_data}/txt"
mkdir -p ${dir_exp}
mkdir -p ${dir_data}
mkdir -p ${dir_audio}
mkdir -p ${dir_txt}
file_id_list_demo="${dir_data}/file_id_list_demo.txt"
file_id_list="${dir_data}/file_id_list_full.txt"
file_id_list_all="${dir_data}/file_id_list_all.txt"
# Collect utterance ids of audio files.
echo "Collect utterance ids of audio files..."
utt_lists_demo=("theherald1")
utts_demo=()
for utt_list in "${utt_lists_demo[@]}"; do
mapfile -t -O ${#utts_demo[@]} utts_demo < ${db_path}/stp/${utt_list} # -t remove trailing newline, -O start index to add entries.
done
# Remove duplicates.
utts_demo=($(printf "%s\n" "${utts_demo[@]}" | sort -u))
printf "%s\n" "${utts_demo[@]}" >| ${file_id_list_demo}
utt_lists_full=("carroll" "arctic" "theherald")
utts_full=()
for utt_list in "${utt_lists_full[@]}"; do
mapfile -t -O ${#utts_full[@]} utts_full < ${db_path}/stp/${utt_list} # -t remove trailing newline, -O start index to add entries.
done
# Remove duplicates.
utts_full=($(printf "%s\n" "${utts_full[@]}" | sort -u))
printf "%s\n" "${utts_full[@]}" >| ${file_id_list}
utt_list_all=("all")
utts_all=()
for utt_list in "${utt_list_all[@]}"; do
mapfile -t -O ${#utts_all[@]} utts_all < ${db_path}/stp/${utt_list} # -t remove trailing newline, -O start index to add entries.
done
# Remove duplicates.
utts_all=($(printf "%s\n" "${utts_all[@]}" | sort -u))
printf "%s\n" "${utts_all[@]}" >| ${file_id_list_all}
# Create links to audio files.
echo "Create links to audio files..."
for utt in "${utts_all[@]}"; do
# cp ${db_path}/wav/${utt:0:7}/${utt}.wav $dir_audio/${utt}.wav # Copy files instead of symbolic link.
ln -sf ${db_path}/wav/${utt:0:7}/${utt}.wav ${dir_audio}/${utt}.wav
done
if [ "$silence_removal" = true ]; then
echo "Remove silence..."
num_utts=${#utts_all[@]}
block_size=$(expr ${num_utts} / ${num_workers} + 1)
num_blocks=$(expr ${num_utts} / ${block_size} + 1)
# Split into working blocks.
if [ "$num_blocks" -gt "99" ]; then
suffix_length=3
elif [ "$num_blocks" -gt "9" ]; then
suffix_length=2
else
suffix_length=1
fi
# Split file_id_list in the same way as utts_selected.data.
name_file_id_list=$(basename ${file_id_list_all})
split --numeric=1 --suffix-length ${suffix_length} -l ${block_size} ${file_id_list_all} ${dir_data}/${name_file_id_list}_block
# Remove leading zeros in block numbering.
for FILE in $(ls ${dir_data}/${name_file_id_list}_block*); do
mv "${FILE}" "$(echo ${FILE} | sed -e 's:_block0*:_block:')" 2>/dev/null
done
rm -r -f "${dir_data}"/wav_org_silence/
mv "${dir_data}"/wav "${dir_data}"/wav_org_silence
mkdir -p "${dir_data}"/wav
# python ${dir_src}/data_preparation/audio/silence_remove.py --dir_wav "database/wav_org_silence/" --dir_out "database/wav/" --file_id_list "database/file_id_list_full.txt"
./${cpu_1d_cmd} JOB=1:${num_blocks} ${dir_logs}/${name_file_id_list}_silence_removal_blockJOB.log \
${dir_src}/data_preparation/audio/silence_remove.py \
--dir_wav ${dir_data}/wav_org_silence/ \
--dir_out ${dir_data}/wav/ \
--file_id_list ${dir_data}/${name_file_id_list}_blockJOB
# Copy files not touched in this remove silence step.
cp -R -u -p "${dir_data}/wav_org_silence/*" "${dir_data}/wav" # -u copy only when source is newer than destination file or if it is missing, -p preserve mode, ownership, timestamps etc.
# Remove intermediate files.
rm -r -f "${dir_data}"/wav_org_silence/
eval rm -f ${dir_data}/${name_file_id_list}_block{0..${num_blocks}} # eval command required because otherwise brace expansion { .. } happens before $ expansion
fi
if (( $(echo "$max_length_sec > 0" | bc -l) )); then # bc returns 1 or 0, (( )) translates to true or false.
echo "Removing audio files longer than ${max_length_sec} seconds..."
# Remove too long files.
# TODO: Call it on the grid. Move this into an explicit script?
num_removed=0
for filename in "${utts_all[@]}"; do
length=$(soxi -D "${dir_audio}/${filename}.wav")
if (( $(echo "${length} > ${max_length_sec}" | bc -l) )); then
rm "${dir_audio}/${filename}.wav"
((num_removed++))
fi
done
echo "Removed ${num_removed} files."
# Update file id lists.
comm -12 <(printf '%s\n' "${utts_full[@]}") <(ls "${dir_audio}"/ | sed -e 's/\..*$//' | sort) > "${file_id_list}"
comm -12 <(printf '%s\n' "${utts_demo[@]}") <(ls "${dir_audio}"/ | sed -e 's/\..*$//' | sort) > "${file_id_list_demo}"
comm -12 <(printf '%s\n' "${utts_all[@]}") <(ls "${dir_audio}"/ | sed -e 's/\..*$//' | sort) > "${file_id_list_all}"
fi
# Copy labels file to data directory.
cp ${db_path}/utts.data ${dir_data}/
#echo "Create utts_selected.data file containing only the utterance of the subset."
## Combine the selected utterances to a regex pattern.
#utts_pat=$(echo ${utts_full[@]}|tr " " "|")
## Select those labes of utts.data which belong to the selected utterances.
#cat ${dir_data}/utts.data | grep -wE "${utts_pat}" >| ${dir_txt}/utts_selected.data
## Turn every line of utts.data into a txt file using the utterance id as file name.
#echo "Create txt file with label for each utterance in database/txt/..."
#awk -F' ' -v outDir=${dir_txt} '{print substr($0,length($1)+2,length($0)) > outDir"/"substr($1,2,length($1)-1)".txt"}' ${dir_txt}/utts_selected.data
# Remove intermediate files.
#rm ${dir_txt}/utts_selected.data
| true |
95e14e791f7246ed100f8bab9cd0b7c12225eafb | Shell | jllopis/despertaferro | /install.sh | UTF-8 | 1,967 | 3.96875 | 4 | [] | no_license | #!/usr/bin/env bash
set -e
# install.sh
# Will install the deault environment in a computer with either OSX or Ubuntu Linux
USERNAME=$USER
REQUIRED_GO_VERSION="go1.11.7"
REQUIRED_GO_ARCH="amd64"
REQUIRED_GO_OS="darwin"
# Global vars
TIMESTAMP=`date -u +%Y%m%dT%H%M%SZ`
LOGFILE=${PWD}/install-$TIMESTAMP.log
USERNAME=$USER
# This is the list of packages that should be in your system or installed
# Note that in OSX Homebrew will always be insalled if not present
PACKAGES=('git' 'ansible')
PACMAN=""
function main() {
# Select OS
case `uname` in
'Darwin')
THIS_OS="darwin"
;;
'Linux')
THIS_OS="linux"
;;
*)
echo "${failure}Unrecognized OS. Aborting${text_reset}"
exit 1
;;
esac
if [[ ${THIS_OS} == "linux" ]]; then
LOCATOR="which -s"
case `lsb_release -is` in
"Ubuntu")
PACMAN="apt"
;;
*)
echo "[\033[31mERROR\033[0m] Cannot determine your Linux distro. lsb_release -is = ${`lsb_release -is`}"
;;
esac
else
LOCATOR="type -p"
fi
printf "Installation started at %s\n" $TIMESTAMP
printf "\tOS: %s\n" $THIS_OS
printf "\tUSERNAME: %s\n" $USERNAME
printf "\tHOME DIR: %s\n" $HOME
# Prepare Ansible install
sudo apt update
sudo apt install software-properties-common
sudo add-apt-repository --yes --update ppa:ansible/ansible
# Install required packages
for p in "${PACKAGES[@]}"
do
echo "[EXEC] sudo ${PACMAN} install -y ${p}"
sudo $PACMAN install -y --no-install-recommends ${p}
done
echo ""
printf "[\033[32mOK\033[0m] System requirements installed!"
# Launch ansible-pull
ansible-pull -U https://github.com/jllopis/despertaferro.git
}
# Ask for the administrator password upfront
sudo -v
# Keep-alive: update existing `sudo` time stamp until `.osx` has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
# Start
main "$@"
| true |
65abf8627f8ddd3377602552ea3afada9d5a2ef4 | Shell | binarylHq/ruby_lepton | /bin/setup | UTF-8 | 122 | 2.671875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
set -vx
bundle install
if [ ! -d output ]; then
mkdir -p output;
fi
| true |
b03ef42def843913a25474fc4ae9251208589f01 | Shell | krishna221097/C-C- | /scripting/script2 | UTF-8 | 1,189 | 3.0625 | 3 | [] | no_license | #!/bin/bash
#To store information
#Created by Sri Krishna V
#created on 01/10/2019
echo "If you want to enter record press 'y' else 'n' "
ans='y'
while [ $ans = 'y' ]
do
echo "Enter your roll no"
read rollno
echo "Enter your name"
read name
echo "Enter company name"
read compname
echo "subject 1 mark"
read s1
echo "subject 2 mark"
read s2
echo "subject 3 mark"
read s3
tot=`expr $s1 + $s2 + $s3`
per=`expr $tot / 3`
echo "Rollno :$rollno , Name :$name , Company :$compname , Subject1 mark:$s1 ,Subject2 mark:$s2 ,Subject3 mark:$s3 Total :$tot Percentage :$per"
read ans
done
#sleep 10
if [ $per -ge 80 -a $per -lt 100 ]
then
echo "Rollno :$rollno , Name :$name , Company :$compname , Subject1 mark:$s1 ,Subject2 mark:$s2 , Subject3 mark:$s3 , Total :$tot , Percentage :$per , grade :destinction" >>data
elif [ $per -ge 70 -a $per -lt 80 ]
then
echo "Rollno :$rollno , Name :$name , Company :$compname , Subject1 mark:$s1 ,Subject2 mark:$s2 , Subject3 mark:$s3 Total :$tot Percentage :$per grade :first-class" >>data
else
echo "Rollno :$rollno , Name :$name , Company :$compname , Subject1 mark:$s1 ,Subject2 mark:$s2 ,Subject3 mark:$s3 Total :$tot Percentage :$per grade :pass" >>data
fi
| true |
f2bdba8a1d42699b66483212a8cf932e8846ee18 | Shell | analogeryuta/dotfiles | /dot.xinitrc-urxvt-kinput2 | UTF-8 | 770 | 2.90625 | 3 | [] | no_license | #!/bin/sh
## なんか色々な設定
export LANG=ja_JP.UTF-8
export LC_CTYPE=C
export LC_TIME=C
export LC_MESSAGES=C
#export DISPLAY="localhost:0.0"
#export PATH="$PATH:/usr/local/bin:/usr/X11/bin"
## キーボードマッピング、ウィンドウの設定
userresources=$HOME/.Xresources
usermodmap=$HOME/.Xmodmap
if [ -f "$usermodmap" ]; then
xmodmap "$usermodmap"
fi
if [ -f "$userresources" ]; then
xrdb -nocpp -merge "$userresources"
fi
sleep 1
## urxvtd(rxvt-unicodeのdaemon mode)を起動
urxvtd -q -f -o &
sleep 1
## kinput2の設定
export XMODIFIERS="@im=kinput2"
kinput2 -canna -cannaserver localhost &
sleep 2
## window managerを起動
if [ -f /usr/bin/quartz-wm ]; then
## quartz-wmを起動
exec quartz-wm
else
exec twm
fi
| true |
047fcbaf0272422d94c0e1288df38183d9bc1843 | Shell | udbhav-chugh/SystemSoftwareLab-CS241 | /postmidsem/awkANDshell/shell/q6.sh | UTF-8 | 179 | 3.390625 | 3 | [] | no_license | #! /bin/bash
read str
str2=$(echo $str | rev )
len=$(echo -n "$str" | wc -m)
if [ "$str2" == "$str" ]
then
echo -n yes
else
echo -n no
fi
echo " length of string is $len" | true |
fc0321c2a52cd2ea1b689d723467ec3048e5ee2d | Shell | beneficio/benefweb | /shell script/root/interPolizas.bat | UTF-8 | 1,373 | 2.6875 | 3 | [] | no_license | #!/bin/bash
if [ -e /root/bloq.flag ]; then
echo "INTERFACE BLOQUEADA"
else
echo "INTERFACE ACTIVA"
echo date > /root/bloq.flag
#---------------------------------------------------------------
# EJECUTO LA GENERACION DE NOVEDADES DE POLIZAS
#---------------------------------------------------------------
/usr/bin/dcclient /root/dactweb.txt dc_server
#---------------------------------------------------------------
# FTP DE ARCHIVOS ACT*.TXT
#---------------------------------------------------------------
cd /opt/tomcat/webapps/benef/files/ftppino/
chmod 666 ACT*.TXT
###cd /home/adrian
###/root/pinoftpact.sh
#---------------------------------------------------------------
# COPIAR LOS ARCHIVOS AL DIRECTORIO DEL PROYECTO
#---------------------------------------------------------------
###cd /home/adrian
###cp ACT*.TXT /opt/tomcat/webapps/benef/files/as400/
#---------------------------------------------------------------
# INSERCION DE TABLAS
#---------------------------------------------------------------
su - postgres -c /var/lib/pgsql/pinoact.sh
#---------------------------------------------------------------
# CREA UN DIRECTORIO DE RESGUARDO
#---------------------------------------------------------------
###cd /home/adrian
###mkdir `date +%Y%m%d`
###mv ACT*.TXT `date +%Y%m%d`
cp ACT*.TXT /home/adrian/`date +%Y%m%d`
cd /root
rm bloq.flag
fi
| true |
7d8f699edbb01752054389f78c08330f1968e3a4 | Shell | de13/riak-cs | /scripts/riak.sh | UTF-8 | 564 | 3.0625 | 3 | [] | no_license | #! /bin/sh
# Ensure correct ownership and permissions on volumes
chown riak:riak /var/lib/riak /var/log/riak
chmod 755 /var/lib/riak /var/log/riak
# Open file descriptor limit
ulimit -n 4096
IP_ADDRESS=$(ip -o -4 addr list eth0 | awk '{print $4}' | cut -d/ -f1 | sed -n 2p)
# Ensure the Erlang node name is set correctly
sed -i.bak "s/riak@127.0.0.1/riak@${IP_ADDRESS}/" /etc/riak/riak.conf
rm -rf /var/lib/riak/ring/*
# Start Riak
exec /sbin/setuser riak "$(ls -d /usr/lib/riak/erts*)/bin/run_erl" "/tmp/riak" \
"/var/log/riak" "exec /usr/sbin/riak console"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.