blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b32104b591bf08745e15a41fe416fdaa54ed9bad
|
Shell
|
steelalive/dot
|
/bin/exemple_skeleton_script.sh
|
UTF-8
| 1,681
| 4.125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# ---------------------------------------------------------------------------
# this - test
# Copyright 2018, <root@PC>
# All rights reserved.
# Usage: this [-h|--help] [-h|--help] [-q|--quit]
# Revision history:
# 2018-03-20 Created by script_gen ver. 3.3
# ---------------------------------------------------------------------------
PROGNAME=${0##*/}
VERSION="0.1"
clean_up() { # Perform pre-exit housekeeping
return
}
error_exit() {
echo -e "${PROGNAME}: ${1:-"Unknown Error"}" >&2
clean_up
exit 1
}
graceful_exit() {
clean_up
exit
}
signal_exit() { # Handle trapped signals
case $1 in
INT)
error_exit "Program interrupted by user"
;;
TERM)
echo -e "\n$PROGNAME: Program terminated" >&2
graceful_exit
;;
*)
error_exit "$PROGNAME: Terminating on unknown signal"
;;
esac
}
usage() {
echo -e "Usage: $PROGNAME [-h|--help] [-h|--help] [-q|--quit]"
}
help_message() {
cat <<-_EOF_
$PROGNAME ver. $VERSION
test
$(usage)
Options:
-h, --help Display this help message and exit.
-h, --help help
-q, --quit quit
NOTE: You must be the superuser to run this script.
_EOF_
return
}
# Trap signals
trap "signal_exit TERM" TERM HUP
trap "signal_exit INT" INT
# Check for root UID
if [[ $(id -u) != 0 ]]; then
error_exit "You must be the superuser to run this script."
fi
# Parse command-line
while [[ -n $1 ]]; do
case $1 in
-h | --help)
help_message
graceful_exit
;;
-h | --help)
echo "help"
;;
-q | --quit)
echo "quit"
;;
-* | --*)
usage
error_exit "Unknown option $1"
;;
*)
echo "Argument $1 to process..."
;;
esac
shift
done
# Main logic
graceful_exit
| true
|
cc1efdb9df3369b0351acee1c7dfbe78a59f5796
|
Shell
|
tarmiste/lfs-custom-configs
|
/lfs81/blfsrawscripts/138-z-openldap
|
UTF-8
| 4,734
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
PKG_DIR=openldap
SRC_DIR=${SRC_ARCHIVE}${SRC_SUBDIRS:+/${PKG_DIR}}
BUILD_DIR=${BUILD_ROOT}${BUILD_SUBDIRS:+/${PKG_DIR}}
mkdir -p $SRC_DIR
mkdir -p $BUILD_DIR
cd $SRC_DIR
PACKAGE=openldap-2.4.45.tgz
if [[ ! -f $PACKAGE ]] ; then
if [[ -f $SRC_ARCHIVE/$PACKAGE ]] ; then
cp $SRC_ARCHIVE/$PACKAGE $PACKAGE
else
wget -T 30 -t 5 ftp://ftp.openldap.org/pub/OpenLDAP/openldap-release/openldap-2.4.45.tgz ||
wget -T 30 -t 5 ${FTP_SERVER}svn/o/$PACKAGE
fi
fi
MD5="00ff8301277cdfd0af728a6927042a13 $PACKAGE"
PATCH=openldap-2.4.45-consolidated-1.patch
if [[ ! -f $PATCH ]] ; then
if [[ -f $SRC_ARCHIVE/$PATCH ]] ; then
cp $SRC_ARCHIVE/$PATCH $PATCH
else
wget -T 30 -t 5 http://www.linuxfromscratch.org/patches/blfs/8.1/openldap-2.4.45-consolidated-1.patch ||
wget -T 30 -t 5 ${FTP_SERVER}svn/o/$PATCH
fi
fi
[[ "$SRC_DIR" != "$BUILD_DIR" ]] && ln -sf $SRC_DIR/$PATCH $BUILD_DIR
############# Unpacking source
cd $BUILD_DIR
find . -maxdepth 1 -mindepth 1 -type d | xargs rm -rf
case $PACKAGE in
*.tar.gz|*.tar.bz2|*.tar.xz|*.tgz|*.tar.lzma)
tar -xvf $SRC_DIR/$PACKAGE > unpacked
UNPACKDIR=`grep '[^./]\+' unpacked | head -n1 | sed 's@^\./@@;s@/.*@@'`
;;
*.tar.lz)
bsdtar -xvf $SRC_DIR/$PACKAGE 2> unpacked
UNPACKDIR=`head -n1 unpacked | cut -d" " -f2 | sed 's@^\./@@;s@/.*@@'`
;;
*.zip)
zipinfo -1 $SRC_DIR/$PACKAGE > unpacked
UNPACKDIR="$(sed 's@/.*@@' unpacked | uniq )"
if test $(wc -w <<< $UNPACKDIR) -eq 1; then
unzip $SRC_DIR/$PACKAGE
else
UNPACKDIR=${PACKAGE%.zip}
unzip -d $UNPACKDIR $SRC_DIR/$PACKAGE
fi
;;
*)
UNPACKDIR=$PKG_DIR-build
mkdir $UNPACKDIR
cp $SRC_DIR/$PACKAGE $UNPACKDIR
cp $(find . -mindepth 1 -maxdepth 1 -type l) $UNPACKDIR
;;
esac
############# Building package
cd $UNPACKDIR
patch -Np1 -i ../openldap-2.4.45-consolidated-1.patch &&
autoconf &&
./configure --prefix=/usr \
--sysconfdir=/etc \
--disable-static \
--enable-dynamic \
--disable-debug \
--disable-slapd &&
make depend &&
make
make -j1 install
groupadd -g 83 ldap &&
useradd -c "OpenLDAP Daemon Owner" \
-d /var/lib/openldap -u 83 \
-g ldap -s /bin/false ldap
patch -Np1 -i ../openldap-2.4.45-consolidated-1.patch &&
autoconf &&
./configure --prefix=/usr \
--sysconfdir=/etc \
--localstatedir=/var \
--libexecdir=/usr/lib \
--disable-static \
--disable-debug \
--with-tls=openssl \
--with-cyrus-sasl \
--enable-dynamic \
--enable-crypt \
--enable-spasswd \
--enable-slapd \
--enable-modules \
--enable-rlookups \
--enable-backends=mod \
--disable-ndb \
--disable-sql \
--disable-shell \
--disable-bdb \
--disable-hdb \
--enable-overlays=mod &&
make depend &&
make
#make -k test || true
make -j1 install &&
install -v -dm700 -o ldap -g ldap /var/lib/openldap &&
install -v -dm700 -o ldap -g ldap /etc/openldap/slapd.d &&
chmod -v 640 /etc/openldap/slapd.{conf,ldif} &&
chown -v root:ldap /etc/openldap/slapd.{conf,ldif} &&
install -v -dm755 /usr/share/doc/openldap-2.4.45 &&
cp -vfr doc/{drafts,rfc,guide} \
/usr/share/doc/openldap-2.4.45
ldconfig
[[ ! -d $SRC_DIR/blfs-bootscripts ]] && mkdir $SRC_DIR/blfs-bootscripts
pushd $SRC_DIR/blfs-bootscripts
URL=http://anduin.linuxfromscratch.org/BLFS/blfs-bootscripts/blfs-bootscripts-20170731.tar.xz
BOOTPACKG=$(basename $URL)
if [[ ! -f $BOOTPACKG ]] ; then
if [[ -f $SRC_ARCHIVE/$PKG_DIR/$BOOTPACKG ]] ; then
cp $SRC_ARCHIVE/$PKG_DIR/$BOOTPACKG $BOOTPACKG
elif [[ -f $SRC_ARCHIVE/$BOOTPACKG ]] ; then
cp $SRC_ARCHIVE/$BOOTPACKG $BOOTPACKG
else
wget -T 30 -t 5 $URL
cp $BOOTPACKG $SRC_ARCHIVE
fi
rm -f unpacked
fi
if [[ -e unpacked ]] ; then
BOOTUNPACKDIR=`head -n1 unpacked | sed 's@^./@@;s@/.*@@'`
if ! [[ -d $BOOTUNPACKDIR ]]; then
rm unpacked
tar -xvf $BOOTPACKG > unpacked
BOOTUNPACKDIR=`head -n1 unpacked | sed 's@^./@@;s@/.*@@'`
fi
else
tar -xvf $BOOTPACKG > unpacked
BOOTUNPACKDIR=`head -n1 unpacked | sed 's@^./@@;s@/.*@@'`
fi
######## BLFS BOOT SCRIPTS: REMOVE FOR CUSTOM CONFIG
cd $BOOTUNPACKDIR
make -j1 install-slapd
popd
/etc/rc.d/init.d/slapd start
ldapsearch -x -b '' -s base '(objectclass=*)' namingContexts
cd $BUILD_DIR
[[ -n "$KEEP_FILES" ]] || rm -rf $UNPACKDIR unpacked
exit
| true
|
a72a5b08360b33fbc3143f8c9946245b5c6e0af0
|
Shell
|
EstefrancoUSB/bash_learning
|
/13.Functions/function_exm_2.sh
|
UTF-8
| 594
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/sh
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 26 23:44:11 2020
Second example of bash function
@author: esteban
"""
myfunc()
{
echo "myfunc was called as : $@"
x=2
}
### Main script starts here
echo "Script was called with $@"
x=1
echo "x is $x"
myfunc 1 2 3 | tee out.log
echo "x is $x"
echo "Script was called with $@"
# The $@ parameters are changed within the function to reflect how the function was called.
# The variable x, however, is effectively a global variable - myfunc changed it, and that
# change is still in place when control returns to the main script.
| true
|
71e5055a10476c284ad0d39000a41024e91c44d5
|
Shell
|
leeren/dotfiles
|
/bash/profile.d/install/gcloud.sh
|
UTF-8
| 4,468
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Install gcloud SDK and related components.
UTIL_DIR="${UTIL_DIR:-${HOME}/profile.d/util}"
GCLOUD_CONFIG_PATH="${HOME}/profile.d/config/gcloud.sh"
GCLOUD_SDK_DIR="${GCLOUD_SDK_DIR:-${HOME}/google-cloud-sdk}"
SDK="google-cloud-sdk-290.0.1-darwin-x86_64.tar.gz"
# shellcheck source=./profile.d/util/log.sh
. "${UTIL_DIR}/log.sh" || { echo ". ${UTIL_DIR}/log.sh failed!" >&2; exit 1; }
# shellcheck source=./profile.d/util/log.sh
. "${UTIL_DIR}/util.sh" || util::exit ". ${UTIL_DIR}/util.sh failed!"
function help() {
cat <<EOF
usage: ${0} [-h] [-f]
Installs the gcloud SDK under the directory given by \$GCLOUD_SDK_DIR, which is
currently set to ${GCLOUD_SDK_DIR}.
-h Print out the help message.
-f Force a reinstall.
EOF
exit 0
}
function main() {
local force=0 help=0
while getopts "f" opt; do
case "${opt}" in
f) force=1 ;;
*) help ;;
esac
done
shift $((OPTIND-1))
# Check whether gcloud binary already exists and uninstall if passing `-f`.
if util::binary_exists gcloud; then
install_dir="$(gcloud info --format='value(installation.sdk_root)')"
usr_config="$(gcloud info --format='value(config.paths.global_config_dir)')"
util::info "gcloud already exists with SDK installed in ${install_dir}."
if (( force == 0 )); then
if [[ "${install_dir}" != "${GCLOUD_SDK_DIR}" ]]; then
util::warning "Current install dir ${install_dir} != \$GCLOUD_SDK_DIR!"
fi
util::info "To force a reinstall of gcloud, run ${0} -f."
else
util::info "Force option -f provided, gcloud will be reinstalled."
util::info "Deleting current cloud SDK install dir ${install_dir}..."
rm -rf "${install_dir}"
util::info "Deleting current user config directory ${usr_config}..."
rm -rf "${usr_config}"
util::warn "Review .boto file to remove additional unwanted gcloud configs."
fi
fi
# Check if $GCLOUD_SDK_DIR already exists and delete it if `-f` is passed in.
if [[ -d "${GCLOUD_SDK_DIR}" ]] && (( force == 1 )); then
util::info "Removing existing \$GCLOUD_SDK_DIR since '-f' passed in..."
rm -rf "${GCLOUD_SDK_DIR}"
util::info "Existing directory \$GCLOUD_SDK_DIR deleted!"
fi
# Start the installation process.
if [[ ! -d "${GCLOUD_SDK_DIR}" ]]; then
util::info "Creating SDK directory in ${GCLOUD_SDK_DIR}..."
if ! mkdir -p "${GCLOUD_SDK_DIR}"; then
util::exit "Creation of ${GCLOUD_SDK_DIR} failed!"
fi
util::info "Directory ${GCLOUD_SDK_DIR} created!"
fi
if [[ ! -f "${GCLOUD_SDK_DIR}/install.sh" ]]; then
util::info "Extracting gcloud SDK to ${GCLOUD_SDK_DIR}..."
if [[ "${OSTYPE}" == "darwin"* ]]; then
curl --progress-bar -SL \
"https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/${SDK}" | \
tar -x - -C "${GCLOUD_SDK_DIR}" --strip-components 1
elif [[ "${OSTYPE}" == "linux-gnu"* ]]; then
curl --progress-bar -SL \
"https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/${SDK}" | \
tar -zxf - -C "${GCLOUD_SDK_DIR}" --strip-components 1
else
util::error "OS platform ${OSTYPE} not supported for gcloud installation."
fi
if (( PIPESTATUS[0] != 0 || PIPESTATUS[1] != 0 )); then
util::exit "gcloud SDK extraction to ${GCLOUD_SDK_DIR} failed!"
fi
util::info "gcloud SDK successfully extracted to ${GCLOUD_SDK_DIR}!"
util::info "Installing gcloud SDK..."
if ! "${GCLOUD_SDK_DIR}/install.sh" \
--quiet --bash-completion false \
--path-update false \
--additional-components alpha beta kubectl; then
util::exit "gcloud SDK failed to install!"
fi
util::info "gcloud SDK installation successful!"
else
util::info "Install script already exists, extraction skipped!"
util::info "To force a fresh install extraction, run ${0} -f."
fi
local gcloud_bin="${GCLOUD_SDK_DIR}/bin/gcloud"
util::info "Updating gcloud components to latest version..."
if ! "${gcloud_bin}" components update -q &>/dev/null; then
util::exit "gcloud SDK update failed!"
fi
util::info "Update successful!"
if [[ -z "$("${gcloud_bin}" auth list --format="value(ACCOUNT)")" ]]; then
util::warn "gcloud account has no valid credentials, obtaining new creds..."
if ! "${gcloud_bin}" auth login; then
util::exit "gcloud authentication failed!"
fi
fi
util::info "Installation of gcloud complete!"
}
main "$@"
| true
|
f5be228c0c4208b7e99567c722e8b2ff6fcb7f4c
|
Shell
|
panta97/funnel-bash
|
/script.sh
|
UTF-8
| 1,750
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/zsh
myArray=()
dateArray=()
timeArray=()
filenameArray=()
# comand running example ./.script.sh "Friday" "16:00" "19:00"
# $1 $2 $3
# then you can move the filtered.txt file to a specific folder
# mv `cat filtered.txt` bunchy
while IFS= read -r line; do
myArray+=("$line")
done < <(ls -ltU | awk '{print $6$7, $8, $9}')
while IFS= read -r line; do
dateArray+=("$line")
done < <(ls -ltU | awk '{print $6$7}')
while IFS= read -r line; do
timeArray+=("$line")
done < <(ls -ltU | awk '{print $8}')
while IFS= read -r line; do
filenameArray+=("$line")
done < <(ls -ltU | awk '{print $9}')
# Transform add zeros to months
for ((i = 2; i <= ${#dateArray[@]}; ++i)); do
if [ ${#dateArray[$i]} -eq 4 ];then
singleDigit=${dateArray[$i]: -1}
dateArray[$i]=${dateArray[$i]: :-1}
dateArray[$i]="${dateArray[$i]}0${singleDigit}"
fi
done
get_weekday () {
# $1 ls date format
aux=$(date -j -f '%m-%d-%Y' "$(date -j -f "%b%d%y" "${1}18" +"%m-%d-%Y")" +'%A')
echo $aux
}
is_in_day_in_hour() {
# $1 weekday
# $2 min hour
# $3 max hour
# $4 weekday of current item
# $5 hour day of current item
result=""
if [ "$1" = "$4" ] ; then
if [[ $5 > $2 ]] && [[ $5 < $3 ]] ; then
result="true"
echo $result
fi
else
result="false"
echo $result
fi
}
for ((i = 2; i <= ${#dateArray[@]}; ++i)); do
weekday=$(get_weekday "${dateArray[$i]}")
hours=${timeArray[$i]}
filename=${filenameArray[$i]}
result=$(is_in_day_in_hour $1 $2 $3 $weekday $hours)
if [ "$result" = "true" ] ; then
echo "${filenameArray[$i]}" >> filtered.txt
fi
done
| true
|
53a25dc64afcbd38fcf2ddafa67b1903cbc70c7a
|
Shell
|
jason-morsley/walking-skeleton
|
/Pipelines/Scripts/unit-tests.sh
|
UTF-8
| 1,097
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
PARENT_PATH=$(cd "$(dirname "${BASH_SOURCE[0]}")";pwd -P)
echo "PARENT_PATH:" ${PARENT_PATH}
source ${PARENT_PATH}/header.sh
WD=$(pwd)
echo "WD:" ${WD}
cd built/Tests/Unit
###############################################################################
header 'UNIT TESTS STARTED'
for d in */; do
cd "$d"
test_project=$(find . -type f -name "*.csproj")
if [[ $test_project ]]; then
sub-header 'TESTS INITIATED'
filename=$(basename $test_project)
filename=${filename%.*}.dll
test_dll=$(find *bin/Release* -type f -name $filename)
dotnet vstest $test_dll --logger:trx \
--ResultsDirectory:${WD}/built/TestsResults/Unit
sub-header 'TESTS FINISHED'
fi
cd ..
done
header 'UNIT TESTS COMPLETED'
###############################################################################
header 'DELETING UNIT TESTS'
cd ..
rm --recursive --force Unit
header 'UNIT TESTS DELETED'
###############################################################################
| true
|
1c590da73ebd4290d72695e6ecc619a9827b818a
|
Shell
|
petronny/aur3-mirror
|
/inescapable/PKGBUILD
|
UTF-8
| 1,266
| 2.5625
| 3
|
[] |
no_license
|
pkgname=inescapable
pkgver="1.1"
pkgrel=1
license=('Commercial')
url="http://www.magneticrealms.com/"
pkgdesc="Unravel the mystery uncovered by a remote interplanetary mining operation."
arch=('x86_64')
_archive='inescapable_linux_1.1-amd64.deb'
source=("hib://${_archive}")
md5sums=('7bb8c6194dccac5a476bb288f3ae0f67')
build() {
ar -xv ${_archive}
tar xvf data.tar.gz
}
package() {
install -D -m0755 "${srcdir}/usr/bin/inescapable" "${pkgdir}/usr/bin/inescapable"
install -D -m0755 "${srcdir}/usr/bin/inescapable.bin" "${pkgdir}/usr/bin/inescapable.bin"
install -D -m0644 "${srcdir}/usr/share/icons/hicolor/48x48/apps/inescapable.png" "${pkgdir}/usr/share/icons/hicolor/48x48/apps/inescapable.png"
install -D "${srcdir}/usr/share/inescapable/game0.dat" "${pkgdir}/usr/share/inescapable/game0.dat"
install -D "${srcdir}/usr/share/inescapable/game1.dat" "${pkgdir}/usr/share/inescapable/game1.dat"
install -D "${srcdir}/usr/share/applications/inescapable.desktop" "${pkgdir}/usr/share/applications/inescapable.desktop"
install -D "${srcdir}/usr/share/doc/inescapable/README" "${pkgdir}/usr/share/doc/inescapable/README"
install -D "${srcdir}/usr/share/doc/inescapable/LICENSE" "${pkgdir}/usr/share/doc/inescapable/LICENSE"
}
| true
|
8977c0513c58cf9eedad5cf373a5cba1a355d953
|
Shell
|
raymondchen625/bash-music
|
/bash-music.sh
|
UTF-8
| 495
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
export BASH_MUSIC_FILE=~/bin/example.mp3
export BASH_MUSCI_PLAY_SCRIPT=~/bin/bash-music-play.sh
function playMusic ()
{
if [ "on" = "$MUSIC_ON_COMMAND" ]; then
$BASH_MUSCI_PLAY_SCRIPT `history 1 | head -1 | awk '{print $2}'`
fi
}
alias musicon='export MUSIC_ON_COMMAND=on;trap playMusic DEBUG'
alias 🎶🎶='export MUSIC_ON_COMMAND=on;trap playMusic DEBUG'
alias musicoff='export MUSIC_ON_COMMAND=off;trap - DEBUG'
alias 🔇🔇='export MUSIC_ON_COMMAND=off;trap - DEBUG'
| true
|
b7e691c1c8dc160b900a540b23afacd0ca98abab
|
Shell
|
jonheese/poltergeist
|
/misc_setup.sh
|
UTF-8
| 191
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -f /var/www/hostile/level.txt ] ; then
touch /var/www/hostile/level.txt
echo "1" > /var/www/hostile/level.txt
fi
chown -R www-data:www-data /var/www/* 2>/dev/null
| true
|
bc7d19445275ee1ac605fa4b1d90d64da6ac0f36
|
Shell
|
damianwernert-ecu/cyb6004
|
/portfolio/week 2/menu.sh
|
UTF-8
| 375
| 3.921875
| 4
|
[] |
no_license
|
#! /bin/bash
./PasswordCheck.sh
if [[ $? -ne 0 ]]; then
exit 1
fi
echo "Enter an option:"
echo "1. Create a folder"
echo "2. Copy a folder"
echo "3. Set a password"
read -p "Enter your choice: " choice
case "$choice" in
1) ./foldermaker.sh
;;
2) ./foldercopier.sh
;;
3) ./setPassword.sh
;;
*) echo "Invalid choice ($choice)" >&2
exit 2
;;
esac
| true
|
a08177b3d7dddfefdb84cd6b688cba4db35d5b83
|
Shell
|
FauxFaux/debian-control
|
/d/didiwiki/didiwiki_0.5-13_amd64/postinst
|
UTF-8
| 920
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
umask 0022
do_didiwiki_adduser () {
if ! getent passwd didiwiki >/dev/null; then
adduser --quiet --system --no-create-home --home /var/lib/didiwiki \
--gecos "DidiWiki" --group didiwiki
fi
chown -R didiwiki:didiwiki /var/lib/didiwiki
}
do_didiwiki_css () {
ln -s /etc/didiwiki/styles.css /var/lib/didiwiki/styles.css \
2>/dev/null || true
}
case "$1" in
configure)
do_didiwiki_adduser
do_didiwiki_css
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
# Automatically added by dh_installinit
if [ "$1" = "configure" ] || [ "$1" = "abort-upgrade" ]; then
if [ -x "/etc/init.d/didiwiki" ]; then
update-rc.d didiwiki defaults >/dev/null
invoke-rc.d didiwiki start || exit $?
fi
fi
# End automatically added section
exit 0
| true
|
178a1e670dcc1d920d7facc2ad5f236bd61918a7
|
Shell
|
raminderj/pst-extraction
|
/bin/run_spark_translation.sh
|
UTF-8
| 578
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
FORCE_LANGUGE=$1
set +x
set -e
echo "===========================================$0 $@"
OUTPUT_DIR=spark-emails-translation
if [[ -d "pst-extract/$OUTPUT_DIR" ]]; then
rm -rf "pst-extract/$OUTPUT_DIR"
fi
spark-submit --master local[*] --driver-memory 8g --files spark/moses_translator.py,spark/filters.py --conf spark.storage.memoryFraction=.8 spark/translation.py pst-extract/spark-emails-with-topics pst-extract/$OUTPUT_DIR --force_language $FORCE_LANGUGE --translation_mode apertium --moses_server localhost:8080
./bin/validate_lfs.sh $OUTPUT_DIR
| true
|
302779748726c5fea6f00dd49a87a913e7073067
|
Shell
|
LeoWare/BLFS-RPM
|
/ALSA.sh
|
UTF-8
| 4,868
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
#################################################
# Title: ALSA.sh #
# Date: 2018-02-10 #
# Version: 1.1 #
# Author: baho-utot@columbus.rr.com #
# Options: #
#################################################
set -o errexit # exit if error...insurance ;)
set -o nounset # exit if variable not initalized
set +h # disable hashall
PRGNAME=${0##*/} # script name minus the path
TOPDIR=${PWD}
# Build variables
LC_ALL=POSIX
PATH=/bin:/usr/bin:/sbin:/usr/sbin:/tools/bin
export LC_ALL PATH
#
TITLE=${PRGNAME::-3}
PARENT=/usr/src/Octothorpe
LOGPATH=${TOPDIR}/LOGS/BLFS
INFOPATH=${TOPDIR}/INFO/BLFS
SPECPATH=${TOPDIR}/SPECS/BLFS
PROVIDESPATH=${TOPDIR}/PROVIDES/BLFS
REQUIRESPATH=${TOPDIR}/REQUIRES/BLFS
RPMPATH=${TOPDIR}/RPMS
#
# Build functions
#
die() {
local _red="\\033[1;31m"
local _normal="\\033[0;39m"
[ -n "$*" ] && printf "${_red}$*${_normal}\n"
exit 1
}
msg() {
printf "%s\n" "${1}"
}
msg_line() {
printf "%s" "${1}"
}
msg_failure() {
local _red="\\033[1;31m"
local _normal="\\033[0;39m"
printf "${_red}%s${_normal}\n" "FAILURE"
exit 2
}
msg_success() {
local _green="\\033[1;32m"
local _normal="\\033[0;39m"
printf "${_green}%s${_normal}\n" "SUCCESS"
return 0
}
end-run() {
local _green="\\033[1;32m"
local _normal="\\033[0;39m"
printf "${_green}%s${_normal}\n" "Run Complete"
return
}
maker(){ # $1: name of package
local _log="${LOGPATH}/${1}"
local _pkg=$(find ${RPMPATH} -name "${1}-[0-9]*.rpm" -print 2>/dev/null)
local _filespec=${SPECPATH}/${1}.spec
#
# Build
#
msg_line " Building: ${1}: "
if [ -z ${_pkg} ]; then
rm ${_log}.installed > ${_log} 2>&1 || true
rm ${INFOPATH}/${1} > ${_log} 2>&1 || true
rpmbuild -ba \
${_filespec} >> ${_log} 2>&1 && msg_success || msg_failure
_pkg=$(find ${RPMPATH} -name "${1}-[0-9]*.rpm" -print)
else
msg "Skipped"
# return
fi
}
info(){ # $1: Name of package
local _log="${LOGPATH}/${1}"
local _pkg=$(find ${RPMPATH} -name "${1}-[0-9]*.rpm" -print 2>/dev/null)
#
# Info
#
msg_line " Info: ${1}: "
[ -z ${_pkg} ] && die "ERROR: rpm package not found"
if [ ! -e ${INFOPATH}/${1} ]; then
rpm -qilp \
${_pkg} > ${INFOPATH}/${1} 2>&1 || true
rpm -qp --provides \
${_pkg} > ${PROVIDESPATH}/${1} 2>&1 || true
rpm -qp --requires \
${_pkg} > ${REQUIRESPATH}/${1} 2>&1 || true
msg_success
else
msg "Skipped"
fi
}
installer(){ # $1: name of package
local _log="${LOGPATH}/${1}"
local _pkg=$(find ${RPMPATH} -name "${1}-[0-9]*.rpm" -print 2>/dev/null)
#
# Install
#
msg_line " Installing: ${1}: "
[ -z ${_pkg} ] && die "ERROR: rpm package not found"
if [ ! -e ${_log}.installed ]; then
su -c "rpm -Uvh --nodeps ${_pkg}" >> "${_log}" 2>&1 && msg_success || msg_failure
mv ${_log} ${_log}.installed
else
msg "Skipped"
fi
}
_prepare() {
local _log="${LOGPATH}/${1}"
_wget_list # Create wget list
_md5sum_list # Create md5sum list
rsync -va /home/lfs/BLFS-RPM/${TITLE}.sh .
rsync -var /home/lfs/BLFS-RPM/SPECS/* SPECS/
# Fetch source packages
local DESTDIR=""
local INPUTFILE=""
msg_line " Fetching source: "
[ -d SOURCES ] || install -vdm 755 ${DESTDIR}
# LFS sources
DESTDIR=${TOPDIR}/SOURCES
INPUTFILE=${TOPDIR}/SOURCES/${TITLE}.wget
wget --no-clobber --no-check-certificate --input-file=${INPUTFILE} --directory-prefix=${DESTDIR} > /dev/null 2>&1
msg_success
msg_line " Checking source: "
md5sum -c ${TOPDIR}/SOURCES/${TITLE}.md5sum >> ${_log}
msg_success
cp config-4.12.7.graphics.patch SOURCES
cp config-4.12.7.sound.patch SOURCES
cp config-4.12.7.powersave.patch SOURCES
return
}
_post() {
return
local _log="${LOGPATH}/${1}"
msg " Post processing:"
return
}
_wget_list() {
msg_line " Creating wget-list: "
cat > ${PARENT}/SOURCES/${TITLE}.wget <<- EOF
ftp://ftp.alsa-project.org/pub/lib/alsa-lib-1.1.4.1.tar.bz2
ftp://ftp.alsa-project.org/pub/plugins/alsa-plugins-1.1.4.tar.bz2
http://www.mega-nerd.com/SRC/libsamplerate-0.1.9.tar.gz
EOF
msg_success
return
}
_md5sum_list(){
msg_line " Creating wget-list: "
cat > ${PARENT}/SOURCES/${TITLE}.md5sum <<- EOF
29fa3e69122d3cf3e8f0e01a0cb1d183 SOURCES/alsa-lib-1.1.4.1.tar.bz2
de51130a7444b79b2dd3c25e28420754 SOURCES/alsa-plugins-1.1.4.tar.bz2
2b78ae9fe63b36b9fbb6267fad93f259 SOURCES/libsamplerate-0.1.9.tar.gz
EOF
msg_success
return
}
#
# Main line
#
[ -z ${PARENT} ] && die "${PRGNAME}: Variable: PARENT not set: FAILURE"
#
# BLFS Desktop system
#
msg "Building KDE"
LIST=""
LIST+="prepare linux "
LIST+="Python2 " #
LIST+="alsa-lib " # Python2
LIST+="libsamplerate " #
LIST+=" " #
LIST+=" " #
LIST+=" " #
LIST+=" " #
LIST+=" " #
#LIST+="alsa-plugins " #
LIST+="post "
for i in ${LIST};do
rm -rf BUILD BUILDROOT
case ${i} in
prepare) _prepare "kde.${i}" ;;
post) _post ${i} ;;
*) maker ${i}
info ${i}
installer ${i} ;;
esac
done
end-run
| true
|
f275c8f2770be15def9d5aaf512c3ba4c94e8588
|
Shell
|
vedmaka/mediawiki-backup-bash
|
/mediawiki_regular_install_backup.sh
|
UTF-8
| 3,163
| 4.3125
| 4
|
[] |
no_license
|
#!/bin/sh
help_display() {
echo ""
echo "===================================================================================="
echo " MWBACKUP is a bash script that helps you to create and rotate backups of Mediawiki site."
echo " It will create backup of files and database and place it into specified directory"
echo " under a folder named after as a timestamp in yyyy-mm-dd format."
echo " Example usage:"
echo " ./mwbackup.sh WIKI_ROOT BACKUP_DORECTORY [PATH_TO_LOCALSETTINGS]"
echo " WIKI_ROOT - absolute path to wiki root directory"
echo " BACKUP_DORECTORY - absolute path to folder where backups will be stored"
echo " PATH_TO_LOCALSETTINGS - (optional) path to directory with LocalSettings.php file"
echo " if stored in different from default location"
echo "===================================================================================="
exit 1
}
if [ -z $1 ] || [ -z $2 ]; then
help_display
fi
STAMP=$(date -u +%Y-%m-%d)
WIKI_ROOT=$1
BACKUP_DORECTORY=$2
LS_FILE=$WIKI_ROOT
if [ ! -z $3 ]; then
LS_FILE=$3
fi
if [ ! -d $WIKI_ROOT ]; then
echo " Wiki directory does not exits, check parameters supplied to the script!"
exit 1
fi
if [ ! -d $LS_FILE ]; then
echo " LocalSettings.php directory does not exits, check parameters supplied to the script!"
exit 1
fi
if [ ! -d $BACKUP_DORECTORY ]; then
echo " Backups directory does not exits, check parameters supplied to the script!"
exit 1
fi
TARGET_DIRECTORY=$BACKUP_DORECTORY/$STAMP
echo " Creating backup direcotory.."
install -d $TARGET_DIRECTORY
if [ ! -d $TARGET_DIRECTORY ]; then
echo "Unable to create backup directory, check permissions!"
exit 1
fi
echo " Backing up wiki files into $TARGET_DIRECTORY.."
tar czf $TARGET_DIRECTORY/wiki-files.tar.gz -C $WIKI_ROOT .
# tar may fail due permissions error on some files, uncomment if you'd like
# to fail whole task when this happens
#if [ $? -ne 0 ]; then
# echo "Backup has failed!"
# exit 1
#fi
echo " Files backup completed. $(du -m $TARGET_DIRECTORY/wiki-files.tar.gz | cut -f1)MB."
echo " Looking for farm database credentials..."
DBPASS=$(cat $LS_FILE/LocalSettings.php | grep '^$wgDBpassword' | sed -e 's/$.*"\(.*\)".*/\1/')
DBUSER=$(cat $LS_FILE/LocalSettings.php | grep '^$wgDBuser' | sed -e 's/$.*"\(.*\)".*/\1/')
DBNAME=$(cat $LS_FILE/LocalSettings.php | grep '^$wgDBname' | sed -e 's/$.*"\(.*\)".*/\1/')
DBHOST=$(cat $LS_FILE/LocalSettings.php | grep '^$wgDBserver' | sed -e 's/$.*"\(.*\)".*/\1/')
if [ -z $DBPASS ] || [ -z $DBUSER ] || [ -z $DBNAME ] || [ -z $DBHOST ]; then
echo "Unable to find database credentials in $LS_FILE/LocalSettings.php!"
exit 1
fi
echo " Backing up database into $TARGET_DIRECTORY/$DBNAME-mysql.gz"
mysqldump -u $DBUSER -p"$DBPASS" $DBNAME | gzip -c > $TARGET_DIRECTORY/$DBNAME-mysql.gz
if [ $? -ne 0 ]; then
echo " Database backup has failed!"
exit 1
fi
echo " MySQL backup completed. $(du -m $TARGET_DIRECTORY/$DBNAME-mysql.gz | cut -f1)MB."
echo " Backup procedure has been finished!"
| true
|
b7c6971b95c2d9099da64e9b81a0e9b047f81acd
|
Shell
|
bioconda/bioconda-recipes
|
/recipes/clove/build.sh
|
UTF-8
| 271
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -eu -o pipefail
outdir=${PREFIX}/share/${PKG_NAME}-${PKG_VERSION}-${PKG_BUILDNUM}
mkdir -p $outdir
mkdir -p ${PREFIX}/bin
cp ${SRC_DIR}/* $outdir/
cp ${RECIPE_DIR}/clove.py $outdir/clove
ln -s $outdir/clove ${PREFIX}/bin
chmod 0755 "${PREFIX}/bin/clove"
| true
|
80de8625d45933d083be4a76ad9f9352d6914ac0
|
Shell
|
mehrdad-shokri/kraken-lib
|
/build-scripts/docker-update.sh
|
UTF-8
| 507
| 4.09375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# using sh instead of bash because the target container, `docker:latest` does
# not contain /bin/bash
#
# this script will update the checked in dockerfile to use a passed in parameter
# for the tag on the source image
# expects first argument to be source tag
# expects second argument to be Dockerfile to update
set -x
source_tag=$1
dockerfile=$2
if [ -n ${source_tag} && -n ${dockerfile} ] ; then
sed -i -e "s/latest/${1}/" ${2}
else
echo "missing parameters, make no changes"
fi
| true
|
dbe1df06531625fd7040860469622c0246823620
|
Shell
|
ccs2014/submission243
|
/Simulation Code/taint_tracking/sim.sh
|
UTF-8
| 638
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/sh
#******************************************************
# This script builds all the trace simulators and runs
# all the traces through them.
#*****************************************************
bm=$1
Ncyc=$2
l1=$3
l2=$4
trace_dir="../traces"
mkdir -p logs
mkdir -p cfg
mkdir -p results
mkdir -p rules
mkdir -p tags
mkdir -p l2misses
mkdir -p m-vectors
if [ ! -f "simulate" ]
then
make
fi
taint_file=${trace_dir}/${bm}/init_taints.libraries.gz
echo on host `hostname` >> logs/${bm}.${l1}.${l2}
cmd="./simulate ${trace_dir} ${bm} ${taint_file} ${Ncyc} ${l1} ${l2}"
echo running $cmd >> logs/${bm}.${l1}.${l2}
$cmd
| true
|
3f87aa4c15dcc0f90e44a2bf402af8b7aed1d9e6
|
Shell
|
jeromedecoster/black-white-terraform-imagemagick-lambda-invoke
|
/scripts/s3-put-payload.sh
|
UTF-8
| 2,587
| 3.234375
| 3
|
[] |
no_license
|
# the project root directory, parent directory of this script file
dir="$(cd "$(dirname "$0")/.."; pwd)"
if [[ $# -eq 0 ]]; then
echo "Usage: $0 <object.key>" >&2
exit 1
fi
if [[ ! -f "$dir/$1.json" ]]; then
cd "$dir/terraform"
OUTPUT=$(terraform output)
REGION=$(echo "$OUTPUT" | grep ^region | tr ' ' '\n' | tail -1)
BUCKET=$(echo "$OUTPUT" | grep ^bucket | tr ' ' '\n' | tail -1)
FUNCTION=$(echo "$OUTPUT" | grep ^convert_function | tr ' ' '\n' | tail -1)
OBJECT=$(aws s3api list-objects-v2 \
--region $REGION \
--bucket $BUCKET \
--query "Contents[?Key == '$1']" \
--output json)
if [[ $(echo "$OBJECT" | wc --lines) -lt 2 ]]; then
echo "abort: object not found. region:$REGION bucket:$BUCKET key:$1"
exit 1
fi
echo "$OBJECT" \
| jq '.[0]' \
| sed --expression 's|\\\"||g' \
| jq --arg region $REGION '. + {Region: $region}' \
| jq --arg bucket $BUCKET '. + {Bucket: $bucket}' \
| jq --arg "function" $FUNCTION '. + {Function: $function}' --monochrome-output \
> "$dir/$1.json"
fi
JSON=$(cat "$dir/$1.json")
REGION=$(echo "$JSON" | jq '.Region' --raw-output)
BUCKET=$(echo "$JSON" | jq '.Bucket' --raw-output)
KEY=$(echo "$JSON" | jq '.Key' --raw-output)
SIZE=$(echo "$JSON" | jq '.Size' --raw-output)
ETAG=$(echo "$JSON" | jq '.ETag' --raw-output)
EVENT_TIME=$(echo "$JSON" | jq '.LastModified' --raw-output)
EVENT=$(cat <<EOF
{
"Records": [
{
"eventVersion": "2.0",
"eventSource": "aws:s3",
"awsRegion": "REGION",
"eventTime": "EVENT_TIME",
"eventName": "ObjectCreated:Put",
"userIdentity": {
"principalId": "EXAMPLE"
},
"requestParameters": {
"sourceIPAddress": "127.0.0.1"
},
"responseElements": {
"x-amz-request-qid": "EXAMPLE123456789",
"x-amz-id-2": "EXAMPLE123/5678abcdefghijklambdaisawesome/mnopqrstuvwxyzABCDEFGH"
},
"s3": {
"s3SchemaVersion": "1.0",
"configurationId": "testConfigRule",
"bucket": {
"name": "BUCKET",
"ownerIdentity": {
"principalId": "EXAMPLE"
},
"arn": "arn:aws:s3:::BUCKET"
},
"object": {
"key": "KEY",
"size": "SIZE",
"eTag": "ETAG",
"sequencer": "0A1B2C3D4E5F678901"
}
}
}
]
}
EOF
)
echo "$EVENT" | sed --expression "s|REGION|$REGION|" \
--expression "s|BUCKET|$BUCKET|" \
--expression "s|KEY|$KEY|" \
--expression "s|SIZE|$SIZE|" \
--expression "s|ETAG|$ETAG|" \
--expression "s|EVENT_TIME|$EVENT_TIME|"
| true
|
018272249162586da533fefd79cb02ab1d97dcb5
|
Shell
|
laurenclarke/braddunbar
|
/scss
|
UTF-8
| 414
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/sh -e
v="$(sass -v | sed 's/[^0-9.]//g')"
if [ -z "$(semver -v "$v" -r ">=3.1")" ]
then
echo "sass@>=3.1 required"
false
fi
sass="sass -C --scss --compass --trace"
while getopts ":mr:" opt; do
case $opt in
m) sass="$sass -t compressed";;
r) sass="$sass -r $OPTARG";;
\?) echo "invalid option: -$OPTARG"; exit;;
esac
done
shift $((OPTIND-1))
awk 'FNR==1 && NR!=1{print "\n"}{print}' $@ | $sass
| true
|
1f608830550d82dfa38fbd17c3b511dab0f72a68
|
Shell
|
apps4av/avare
|
/extra/minimums/mins.sh
|
UTF-8
| 1,851
| 2.984375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
#Copyright (c) 2015 Apps4Av Inc.
# Author Zubair Khan (governer@gmail.com)
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Download and run on ALTERNATE and TAKEOFF minimums
# e.g. mins.sh NE1TO
# e.g. mins.sh NEIALT
# Use mogirfy to convert PDF to images with names like NE1TO-0.jpeg ...
export PG=`pdfinfo $1.PDF | grep Pages | sed "s/Pages:\s*//"`
for (( c=1; c<=${PG}; c++ ))
do
pdftotext -f $c -l $c -nopgbrk -W 194 -x 0 -H 594 -y 0 $1.PDF tmp1.txt
pdftotext -f $c -l $c -nopgbrk -W 194 -x 194 -H 594 -y 0 $1.PDF tmp2.txt
cat tmp1.txt > tmp.txt
cat tmp2.txt >> tmp.txt
${MODULE_DIR}/mins.pl $1 `expr $c - 1`
done
| true
|
0dbd71c902969a489752b1213d7502c4455b1c58
|
Shell
|
corford/netatmo
|
/devops/hack/docker-tools.sh
|
UTF-8
| 645
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/sh
DOCKER_COMPOSE="1.23.2"
DRY="0.9-beta.9"
# Exit immediately on error or undefined variable
set -e
set -u
echo "Downloading & installing Docker Compose ${DOCKER_COMPOSE} (to /usr/local/bin/docker-compose)"
curl -fsSL https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE}/docker-compose-`uname -s`-x86_64 -o /usr/local/bin/docker-compose
chmod 755 /usr/local/bin/docker-compose
echo "Downloading & installing Dry ${DRY} (to /usr/local/bin/dry)"
curl -fsSL https://github.com/moncho/dry/releases/download/v${DRY}/dry-`uname -s | sed -e 's/\(.*\)/\L\1/'`-amd64 -o /usr/local/bin/dry
chmod 755 /usr/local/bin/dry
exit 0
| true
|
7266543b5d37261189f8669cf785b5210a9b6b5c
|
Shell
|
GovernoRegionalAcores/DSpace
|
/entrypoint.sh
|
UTF-8
| 1,235
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
if [ "${1}" = "run" ];then
set -- catalina.sh "$@"
fi
mkdir -p $CATALINA_HOME/conf/Catalina/localhost
for webapp in $(ls /dspace/webapps/); do
# Exclue jspui parce qu'il n'est pas compilable avec tomcat8
if [ "$webapp" != "jspui" ]; then
if [ "$webapp" == "solr" ]; then
{
echo "<Context docBase=\"/dspace/webapps/$webapp\" reloadable=\"true\">"
echo "<Valve className=\"org.apache.catalina.valves.RemoteAddrValve\" allow=\"127\.0\.0\.1|172\.17\.0\.1|172\.16\.0\.57|111\.222\.233\.d+\"/>"
echo "<Parameter name=\"LocalHostRestrictionFilter.localhost\" value=\"false\" override=\"false\" />"
echo "</Context>"
} > $CATALINA_HOME/conf/Catalina/localhost/$webapp.xml
else
{
echo "<?xml version='1.0'?>"
echo "<Context"
echo docBase=\"/dspace/webapps/$webapp\"
echo 'reloadable="true"'
echo 'cachingAllowed="false"/>'
} > $CATALINA_HOME/conf/Catalina/localhost/$webapp.xml
fi
fi
done
cp $CATALINA_HOME/conf/Catalina/localhost/{xmlui,ROOT}.xml
sed -i "s/localhost:5432/db:5432/" /dspace/config/dspace.cfg
chown dspace:dspace /dspace/assetstore
#service cron start
sh /sbin/create-admin.sh
exec "$@"
| true
|
af24cf4a7bb6c847bf187e203810c77239cecc31
|
Shell
|
tongr/coheel
|
/bin/download-training-data.sh
|
UTF-8
| 757
| 3.140625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
RESULTS_FOLDER=${1:-hdfs://tenemhead2/home/stefan.bunk/results}
echo "This script is to be run after the training data program and before the"
echo "actual training."
echo "It downloads the training and test set files typed-training-data-3786-{12345,678910}.wiki"
echo "and typed-training-data-632-{12345,678910}.wiki and merges them."
echo ""
echo "Will work on the folder: ${RESULTS_FOLDER}. Abort in next five seconds if wrong."
sleep 5
echo "Downloading .."
$HADOOP_HOME/bin/hdfs dfs -getmerge $RESULTS_FOLDER/typed-training-data-3786-{12345,678910}.wiki ./typed-training-data-3786-2015-11.wiki
$HADOOP_HOME/bin/hdfs dfs -getmerge $RESULTS_FOLDER/typed-training-data-632-{12345,678910}.wiki ./typed-training-data-632-2015-11.wiki
| true
|
45db56f0f8642d0af1bbdc986cbafefb49d7a5e1
|
Shell
|
Sandlayth/docker-armv7l-images
|
/update_images/update_alpine.sh
|
UTF-8
| 383
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
# This script is intented to update alpine image
MAJOR_VERSION="3.5"
MINOR_VERSION="0"
pushd ../docker-alpine
curl https://nl.alpinelinux.org/alpine/v3.6/releases/armhf/alpine-minirootfs-3.6.0-armhf.tar.gz > rootfs.tar.gz
docker build -t sandlayth/armv7l-alpine . && docker push sandlayth/armv7l-alpine && echo "Alpine successfully updated and pushed."
popd
| true
|
0ef6ad9678c97d6cfb0dc6ecced7d07bb28ab88a
|
Shell
|
621Alice/Fed-Brane-V6
|
/Brane/V6_server_node_deployment/v6_node/entrypoint.sh
|
UTF-8
| 732
| 2.796875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
export LC_ALL=C.UTF-8
export LANG=C.UTF-8
sed -i "s/PLC_API_KEY/$API_KEY/g" configuration.yml
#sed -i "s/PLC_DATA_PATH/$DATA_PATH/g" configuration.yml
sed -i "s/PLC_DATA_PATH/${DATA_PATH//\//\\/}/g" configuration.yml
sed -i "s/PLC_SERVER_HOST/$SERVER_HOST/g" configuration.yml
sed -i "s/PLC_SERVER_PORT/$SERVER_PORT/g" configuration.yml
# Generate random port number for the proxy.
export PROXY_SERVER_PORT=$(shuf -i 2000-65000 -n 1)
#set the data volume to be the same for brane and v6
export DATA_VOLUME_NAME=brane_data
#set v6 proxy server to be the same as host IP
export PROXY_SERVER_HOST='192.168.0.157'
vnode-local start --config "./configuration.yml"
echo "~~>output: done"
exit 0
| true
|
e72efd5c9c5265d90b3b8c4bfbe27ffa3f443f1f
|
Shell
|
einverne/droolsjbpm-build-bootstrap
|
/script/release/kie-wbSmokeTestsMatrix.sh
|
UTF-8
| 491
| 3.203125
| 3
|
[
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"AGPL-3.0-or-later"
] |
permissive
|
#!/bin/bash -e
echo "kieVersion:" $kieVersion
echo "target" : $target
if [ "$target" == "community" ]; then
stagingRep=kie-group
else
stagingRep=kie-internal-group
fi
# wget the tar.gz sources
wget -q https://repository.jboss.org/nexus/content/groups/$stagingRep/org/kie/kie-wb-distributions/$kieVersion/kie-wb-distributions-$kieVersion-project-sources.tar.gz -O sources.tar.gz
tar xzf sources.tar.gz
mv kie-wb-distributions-$kieVersion/* .
rmdir kie-wb-distributions-$kieVersion
| true
|
e092db26dc9b3ec5cdb81f48111beb3966ed3890
|
Shell
|
mwiede/heroku-buildpack-oc
|
/bin/compile
|
UTF-8
| 657
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# bin/compile <build-dir> <cache-dir> <env-dir>
# fail fast
set -e
BP_DIR=$(cd $(dirname $0)/..; pwd) # absolute path
BIN_DIR=$BP_DIR/bin
# parse args
BUILD_DIR=$1
CACHE_DIR=$2
ENV_DIR=$3
cd $BUILD_DIR
OC_HOME=.oc-cli
mkdir -p .oc-cli
echo -n "-----> Installing openshift CLI... "
curl -O https://mirror.openshift.com/pub/openshift-v3/clients/3.6.173.0.7/linux/oc.tar.gz
echo "downloaded."
tar xf oc.tar.gz
echo "extracted."
mv oc $OC_HOME
echo "moved."
rm oc.tar.gz
echo "removed archive."
echo "-----> Adding oc script"
[ ! -d $BUILD_DIR/.profile.d ] && mkdir $BUILD_DIR/.profile.d
cp -a $BP_DIR/oc.sh $BUILD_DIR/.profile.d/
| true
|
396b118512e5cae40bf97faa3e0f4ec2c8f64031
|
Shell
|
rtcn2/tm351vm
|
/build/jupyter-custom/jupyter_nbextensions.sh
|
UTF-8
| 2,352
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
if [ ! -f /opt/jupyter_nbextensions.done ]; then
for PYTHONVER in 3 ; do
PYTHON="python$PYTHONVER"
PIP="pip$PYTHONVER"
#https://stackoverflow.com/questions/49836676/python-pip3-cannot-import-name-main
PIP="python3 -m pip"
#Go for the easy option and src all the jupyter_contrib_nbextensions
$PIP install jupyter_contrib_nbextensions
$PIP install RISE
$PIP install jupyter-wysiwyg
#Install nbgrader
$PIP install nbgrader
done
#The service runs under oustudent user but we're root here...
# So if we install as --user, thats wrong...
#su $NB_USER <<-EOF
jupyter contrib nbextension install --sys-prefix
#Enable certain extensions from the start
jupyter nbextension enable freeze/main --sys-prefix
jupyter nbextension enable highlighter/highlighter --sys-prefix
jupyter nbextension enable spellchecker/main --sys-prefix
jupyter nbextension enable collapsible_headings/main --sys-prefix
jupyter nbextension enable codefolding/main --sys-prefix
jupyter nbextension enable rubberband/main --sys-prefix
jupyter nbextension enable exercise2/main --sys-prefix
jupyter nbextension enable python-markdown/main --sys-prefix
jupyter nbextension enable export_embedded/main --sys-prefix
jupyter nbextension enable skip-traceback/main --sys-prefix
jupyter nbextension enable hide_input/main --sys-prefix
jupyter nbextension enable init_cell/main --sys-prefix
#Slideshow
jupyter nbextension install rise --py --sys-prefix
jupyter nbextension enable rise --py --sys-prefix
#WYSIWYG editor
jupyter nbextension install jupyter_wysiwyg --py --sys-prefix
jupyter nbextension enable jupyter_wysiwyg --py --sys-prefix
#nbgrader - do not enable by default
#jupyter nbextension install --sys-prefix --py nbgrader --overwrite
#jupyter nbextension enable --sys-prefix --py nbgrader
#jupyter serverextension enable --sys-prefix --py nbgrader
#EOF
touch /opt/jupyter_nbextensions.done
fi
#If not the Docker build, set up the services
if [[ -z "${DOCKERBUILD}" ]]; then
#restart the service
systemctl restart jupyter.service
fi
| true
|
b83709e91fac4b423f6ee722e08deab44d43af38
|
Shell
|
bodii/test-code
|
/shell/test1/block_subshell.sh
|
UTF-8
| 337
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
echo
echo "Block output is redirected to file 'block1.out;"
# 重定向若干个命令序列的标准输出
# 显示子shell的当前工作目录
( date; cd /etc; echo -n "Current Working dir: ";pwd; ) > block_subshell.out
# 打印父shell的当前工作目录
echo "Current Working dir:$PWD"
echo
exit 0
| true
|
d339e4d598c92ba4321b51b19a0352ae309d9ec7
|
Shell
|
tipsotto/archlinux-setup
|
/Initial
|
UTF-8
| 6,519
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
# Created by: TIPSotto
# Free to use, distribute and do whatever the hell you want with it.
# Last Updated: May 13, 2012
#####################################################################
clear
echo "*****************************************************************"
echo "| Initial: Initial Arch Setup. |"
echo "*****************************************************************"
#Run script using: /path/to/Initial
scriptpath="`dirname \"$0\"`"
#Root Check
#***********
if [ $USER != root ]; then
echo; echo "This script must be run as root. You are running script as: $USER."
echo "Either login as root or run script with the \"sudo\" command like: sudo /path/to/Initial"
echo "Will now exit. Goodbye! "
exit 0
fi
#Intro
#******
echo; echo -n "Hello, this will complete an initial setup of your Arch install You will need internet. Continue? [Y/n]"
read prompt
if [ $prompt = n ]; then
echo "GOODBYE! "
echo
exit 0
fi
#Setup Username and Password
#There is a check to ask user if username is satisfactory.
#*********************************************************************************************************
echo; echo "#Setup a USERNAME and PASSWORD"
satisfied=0
while [ $satisfied -eq 0 ]
do
echo "Please type desired USERNAME in lowercase letters ONLY, followed by [ENTER]"
echo -n "Leave blank to skip this step: "
read user
if [ -z "$user" ]; then
satisfied=1
echo;echo "--- Username setup skipped"
echo "Your USERNAME will be \"$USER\"."
else
echo -n "Are you satisfied with \"$user\" as your USERNAME? [Y/n]: "
read prompt
if [ -z "$prompt" ] || [ $prompt = y ]; then
satisfied=1
useradd -m -g users -G audio,lp,optical,storage,video,wheel,games,power,scanner -s /bin/bash $user
echo; echo "--- Your USERNAME: $user was added."
echo "Will now setup your PASSWORD. Note as you are typing no charachters will be shown."
passwd $user
elif [ $prompt = n ]; then
echo
fi
fi
done
echo
#Internet Connection
#This is to check if there is an interent connection and, if not, to offer a chance to setup wireless.
#************************************************************************
satisfied=0
while [ $satisfied -eq 0 ]
do
echo;echo "Are you connected to the interenet?"
if [ -z "$prompt" ] || [ $prompt = y ]; then
echo "Great!"
else
echo "If you are not connected, you can open another console (ALT+F2) and connect then come back WHEN YOU ARE DONE."
echo "If you are not connected to the internet script will finish as there is nothing more we can do."
echo "Connect then come back. Waiting..."
echo -n "Continue? [Y/n]: "
read prompt
if [ -z "$prompt" ] || [ $prompt = y ]; then
echo "Great!"
else
echo;echo "Script has nothing more to do. All consequent actions require an internet connection."
echo "GOODBYE!";echo; exit 0
fi
done
connected=1 #After wireless setup, supposed to be connected, otherwise script would have exited.
#System Upgrade
#***************
echo "Initiating System Upgrade"
pacman-db-upgrade
pacman -Syyu
#Install SUDO
#*************
echo; echo "#Install SUDO"
echo -n "Now we will install the \"sudo\" command, hit [ENTER] to continue or type 's' to skip: "
read prompt
if [ -z "$prompt" ]; then
pacman -S sudo
echo;echo "Now you will configure 'sudo' by uncommenting (removing the '#' in front of) this line:"
echo "%wheel ALL=(ALL) ALL"
echo -n "It is located towards the end of the file. Use Ctrl-x to save and exit. Hit [ENTER] to contnue: "
read prompt
if [ -z "$prompt" ]; then
EDITOR=nano visudo
echo "--- Editing of sudo config file DONE! "
fi
else
echo;echo "--- Installing SUDO skipped."
fi
#Install X and Dependancies
#***************************
echo; echo "#Install X"
echo -n "Now we will install \"X\", hit [Enter] to continue or type 's' to skip: "
read prompt
if [ -z "$prompt" ]; then
pacman -S xorg-server xorg-xinit xorg-utils xorg-server-utils mesa
pacman -S xorg-twm xorg-xclock xterm dbus
echo;echo "Now you will need to add dbus to the DAEMONS line in /etc/rc.conf, add this word: dbus
to the last line of the file that will show now.Then use Ctrl-x to save and exit. Add to line like this:
DAEMONS=(hwclock syslog-ng dbus network crond)"
echo -n "Ready? type 'y' when ready: "
read prompt
nano /etc/rc.conf
echo "--- Editing of /etc/rc.conf DONE! ";
else
echo;echo "--- Installing X skipped."
fi
#Install Video Drivers
#**********************
echo; echo "#Install Video Drivers"
echo -n "Now we will install the video drivers, hit [Enter] to continue or type 's' to skip: "
read prompt
if [ -z "$prompt" ]; then
lspci
echo; echo "Guess: Your card might be:"
lspci | grep VGA
echo; echo "Above is a list of hardware you have on this machine. To install the correct drivers, identify what
kind of video card you own. There is a guess on the bottom of the list that might help you."
echo "Once you have identified your video card please type in number corresponding to card chipset brand.
Note that these are not proprietary drivers."
echo "Possible (Open Source) driver choices:"
echo "1 : ATI"
echo "2 : Nvidia"
echo "3 : Nvidia with 3D support"
echo "4 : Intel"
echo -n "Your choice, type in number only. Leave blank to skip: "
read prompt
if [ -z "$prompt" ]; then
echo;echo "--- Installing Video Drivers skipped."
elif [ $prompt == 1 ]; then
pacman -S xf86-video-ati
elif [ $prompt == 2 ]; then
pacman -S xf86-video-nouveau
cp $scriptpath/Support/NOUVEAU-DRI.CONF /etc/X11/xorg.conf.d/20-nouveau.conf
elif [ $prompt == 3 ]; then
pacman -S nouveau-dri
cp $scriptpath/Support/NOUVEAU-DRI.CONF /etc/X11/xorg.conf.d/20-nouveau.conf
elif [ $prompt == 4 ]; then
pacman -S xf86-video-intel
fi
else
echo;echo "--- Installing Video Drivers skipped."
fi
#Synaptics Drivers
#******************
echo; echo "Is this computer a laptop? If so, we can install synaptics drivers for the touchpad"
echo -n "Hit [Enter] to continue or type 's' to skip: "
read prompt
if [ -z "$prompt" ]; then
pacman -S xf86-input-synaptics
else
echo;echo "--- Installing Synaptics Drivers skipped."
fi
#End of Script
#**************
echo; echo "Initial script will end. Initial setup is complete! "
echo "You should reboot computer now. Then login with the username and password you setup."
echo "Your username: $user "
echo; echo "Reboot now? (recommended) [Y/n]: "
read prompt
if [ -z "$prompt" ] || [ $prompt == y ] || [ $prompt == Y ]; then
reboot
else
echo;echo "GOODBYE!"
fi
| true
|
a3e844578ad2a48a8ab476182573336d4fb7bd9a
|
Shell
|
harryi3t/base
|
/migrate.sh
|
UTF-8
| 7,947
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -e
readonly ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
readonly USR_DIR="$ROOT_DIR/usr"
OLD_STATE_FILE="$ROOT_DIR/data/state.json"
STATE_FILE_TEMPLATE="$USR_DIR/state.json.example"
STATE_FILE_MIGRATE="$USR_DIR/state.json.migrate"
update_release() {
# update the release version in migrate from versions file
echo "updating release version and main metadata"
echo "updating install mode to production"
local update=$(cat $STATE_FILE_MIGRATE \
| jq '.installMode="production"')
update=$(echo $update \
| jq '.' \
| tee $STATE_FILE_MIGRATE)
}
update_machines() {
# copy machines from old state to migrate file
echo "updating machines"
cp -vr $ROOT_DIR/data/machines.json $USR_DIR/machines.json
local machines=$(cat $OLD_STATE_FILE \
| jq -c '[ .machines[] ]')
local update=$(cat $STATE_FILE_MIGRATE \
| jq '.machines='$machines'' )
update=$(echo $update \
| jq '.' \
| tee $STATE_FILE_MIGRATE)
}
update_install_status() {
# update install status of all settings to true
echo "updating install status"
local update=$(cat $STATE_FILE_MIGRATE \
| jq '.installStatus.dockerInstalled=true')
update=$(echo $update \
| jq '.installStatus.dockerInitialized=true')
update=$(echo $update \
| jq '.installStatus.redisInstalled=true')
update=$(echo $update \
| jq '.installStatus.redisInitialized=true')
update=$(echo $update \
| jq '.installStatus.databaseInstalled=true')
update=$(echo $update \
| jq '.installStatus.databaseInitialized=true')
update=$(echo $update \
| jq '.installStatus.rabbitmqInstalled=true')
update=$(echo $update \
| jq '.installStatus.rabbitmqInitialized=true')
update=$(echo $update \
| jq '.installStatus.vaultInstalled=true')
update=$(echo $update \
| jq '.installStatus.vaultInitialized=true')
update=$(echo $update \
| jq '.installStatus.serviceuserTokenGenerated=true')
update=$(echo $update \
| jq '.installStatus.systemConfigUpdated=true')
update=$(echo $update \
| jq '.installStatus.machinesBootstrapped=true')
update=$(echo $update \
| jq '.installStatus.machinesSSHSuccessful=true')
update=$(echo $update \
| jq '.installStatus.gitlabInstalled=true')
update=$(echo $update \
| jq '.installStatus.gitlabInitialized=true')
update=$(echo $update \
| jq '.installStatus.composeInstalled=true')
update=$(echo $update \
| jq '.installStatus.swarmInstalled=true')
update=$(echo $update \
| jq '.installStatus.swarmInitialized=true')
update=$(echo $update \
| jq '.installStatus.ecrInitialized=true')
update=$(echo $update \
| jq '.installStatus.ecrInstalled=true')
update=$(echo $update \
| jq '.' \
| tee $STATE_FILE_MIGRATE)
}
migrate() {
echo "migrating integrations"
if [ -f $OLD_STATE_FILE ]; then
#cp $OLD_STATE_FILE $STATE_FILE_MIGRATE
local sys_ints=$(cat $OLD_STATE_FILE | jq -c '[ .systemIntegrations[] ]')
local sys_ints_length=$(echo $sys_ints | jq ' . | length')
local system_settings=$(cat $OLD_STATE_FILE | jq -c '.systemSettings')
local master_ints="[]"
for i in $(seq 1 $sys_ints_length); do
local master_type=$(echo $sys_ints | jq '.['"$i-1"'] | .masterType')
local master_name=$(echo $sys_ints | jq '.['"$i-1"'] | .masterName')
if [ "$master_name" == "\"ECR\"" ]; then
master_type="\"cloudproviders\""
master_name="\"AWS\""
local access_key=$(echo $sys_ints | jq -r '.['"$i-1"'] | .formJSONValues[] | select (.label=="aws_access_key_id") | .value')
local secret_key=$(echo $sys_ints | jq -r '.['"$i-1"'] | .formJSONValues[] | select (.label=="aws_secret_access_key") | .value')
local formJSONValues="[
{
\"label\":\"accessKey\",
\"value\":\"$access_key\"
},
{
\"label\":\"secretKey\",
\"value\":\"$secret_key\"
}]"
formJSONValues=$(echo $formJSONValues | jq -c '.')
sys_ints=$(echo $sys_ints | jq 'map((select(.masterName == "ECR") | .masterName) |= "AWS")')
sys_ints=$(echo $sys_ints | jq 'map((select(.masterName == "AWS") | .masterType) |= "cloudproviders")')
sys_ints=$(echo $sys_ints | jq 'map((select(.masterName == "AWS") | .formJSONValues) |= '$formJSONValues')')
fi
if [ "$master_name" == "\"hub\"" ]; then
master_name="\"Docker\""
sys_ints=$(echo $sys_ints | jq 'map((select(.masterName == "hub") | .masterName) |= "Docker")')
fi
local master_int=$(echo $master_ints | jq '.[] | select (.name=='$master_name') | .name')
if [ -z "$master_int" ]; then
master_ints=$(echo $master_ints | jq '
. |= . + [{
"name": '"$master_name"',
"type": '"$master_type"'
}]')
fi
done
system_settings=$(echo $system_settings | jq '.systemImagesRegistry ="374168611083.dkr.ecr.us-east-1.amazonaws.com"')
system_settings=$(echo $system_settings | jq '.stepExecImage ="shipimg/micro50:stepExec"')
system_settings=$(echo $system_settings | jq '.customHostDockerVersion ="1.12.1"')
local state_migrate="{
\"masterIntegrations\": $master_ints,
\"systemIntegrations\": $sys_ints,
\"systemSettings\": $system_settings,
\"release\": \"\",
\"services\":[]
}"
local pretty_state=$(echo $state_migrate \
| jq '.' \
| tee $STATE_FILE_MIGRATE)
else
echo "The old state.json file doesn't exist"
fi
}
update_db_creds() {
echo "updating db credentials"
local db_host=$(cat $STATE_FILE_MIGRATE \
| jq '.machines[] | select (.group=="core" and .name=="db")')
local host=$(echo $db_host | jq -r '.ip')
local update=$(cat $STATE_FILE_MIGRATE \
| jq '.systemSettings.dbHost="'$host'"')
update=$(echo $update \
| jq '.systemSettings.dbPort=5432')
update=$(echo $update \
| jq '.systemSettings.dbUsername="apiuser"')
update=$(echo $update \
| jq '.systemSettings.dbPassword="testing1234"')
update=$(echo $update \
| jq '.systemSettings.dbname="shipdb"')
update=$(echo $update \
| jq '.systemSettings.dbDialect="postgres"')
local db_url="$host:5432"
update=$(echo $update \
| jq '.systemSettings.dbUrl="'$db_url'"')
update=$(echo $update \
| jq '.' \
| tee $STATE_FILE_MIGRATE)
}
update_amqp_vars() {
echo "updating amqp vars"
local amqp_user="SHIPPABLETESTUSER"
local amqp_pass="SHIPPABLETESTPASS"
local amqp_protocol=$(cat $STATE_FILE_MIGRATE \
| jq -r '.systemSettings.amqpProtocol')
local amqp_host=$(cat $STATE_FILE_MIGRATE \
| jq -r '.systemSettings.amqpHost')
local amqp_port=$(cat $STATE_FILE_MIGRATE \
| jq -r '.systemSettings.amqpPort')
local amqp_admin_protocol=$(cat $STATE_FILE_MIGRATE \
| jq -r '.systemSettings.amqpAdminProtocol')
local amqp_admin_port=$(cat $STATE_FILE_MIGRATE \
| jq -r '.systemSettings.amqpAdminPort')
local amqp_url_updated="$amqp_protocol://$amqp_user:$amqp_pass@$amqp_host/shippable"
local amqp_url_root="$amqp_protocol://$amqp_user:$amqp_pass@$amqp_host/shippableRoot"
local amqp_url_admin="$amqp_admin_protocol://$amqp_user:$amqp_pass@$amqp_host:$amqp_admin_port"
local update=$(cat $STATE_FILE_MIGRATE \
| jq '.systemSettings.amqpUrl="'$amqp_url_updated'"')
update=$(echo $update \
| jq '.systemSettings.amqpUrlRoot="'$amqp_url_root'"')
update=$(echo $update \
| jq '.systemSettings.amqpUrlAdmin="'$amqp_url_admin'"')
update=$(echo $update \
| jq '.systemSettings.amqpDefaultExchange="shippableEx"')
update=$(echo $update \
| jq '.' \
| tee $STATE_FILE_MIGRATE)
}
copy_keys() {
echo "copying key files"
sudo cp -vr $ROOT_DIR/data/machinekey $USR_DIR/machinekey
sudo cp -vr $ROOT_DIR/data/machinekey.pub $USR_DIR/machinekey.pub
}
main() {
migrate
update_release
update_machines
update_install_status
update_db_creds
copy_keys
update_amqp_vars
}
main
| true
|
0e25e6fb3e0aab07161f69faa9242ca91a4e0f05
|
Shell
|
nelse003/pandda
|
/scripts/pandda.run_tests
|
UTF-8
| 1,072
| 3.203125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
err_file="pandda-errors.log"
if [ -n "$1" ]; then
n_cpus="$1"
else
n_cpus="1"
fi
echo "Running with $n_cpus CPUS"
if [ ! -d pandda-test ]; then
mkdir pandda-test
fi
cd pandda-test
if [ ! -f data.zip ]; then
echo "downloading test data"
wget https://zenodo.org/record/48768/files/data.zip
fi
if [ ! -d data ]; then
echo "unpacking test data"
unzip data.zip
fi
if [ -d pandda ]; then
rm -rf pandda
fi
echo "Running 1ST pandda analysis"
pandda.analyse data_dirs="data/*" pdb_style="*.dimple.pdb" cpus=$n_cpus \
high_res_lower_limit=1.8 \
ignore_datasets=BAZ2BA-x434 \
exclude_from_zmap_analysis=BAZ2BA-x559 \
1> pandda-1.log 2>> $err_file
echo "Running 2ND pandda analysis"
pandda.analyse data_dirs="data/*" pdb_style="*.dimple.pdb" cpus=$n_cpus \
reprocess_datasets=BAZ2BA-x529 \
high_res_lower_limit=1.8
1> pandda-2.log 2>> $err_file
pandda.analyse data_dirs="data/*" pdb_style="*.dimple.pdb" cpus=$n_cpus \
reprocess_datasets=BAZ2BA-x559 \
high_res_lower_limit=1.8
1> pandda-3.log 2>> $err_file
exit
| true
|
fb44e63c0c477311affc41efb9396d39dcc4e63a
|
Shell
|
rogamoore/sulu
|
/bin/travis/before_script_php.sh
|
UTF-8
| 863
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ ! -d downloads ]; then mkdir downloads; fi
if [[ $SYMFONY__PHPCR__TRANSPORT = jackrabbit ]]; then
if [ ! -f downloads/jackrabbit-standalone-$JACKRABBIT_VERSION.jar ]; then
cd downloads
wget http://archive.apache.org/dist/jackrabbit/$JACKRABBIT_VERSION/jackrabbit-standalone-$JACKRABBIT_VERSION.jar
cd -
fi
java -jar downloads/jackrabbit-standalone-$JACKRABBIT_VERSION.jar > /dev/null &
fi
# the content tests are intensive and there are memory leaks, this is more pronounced with the Jackalope DBAL PHPCR implementation.
echo "memory_limit=2048M" >> ~/.phpenv/versions/$(phpenv version-name)/etc/conf.d/travis.ini
phpenv config-rm xdebug.ini
composer self-update
composer update $COMPOSER_FLAGS
if [[ $SYMFONY__PHPCR__TRANSPORT = jackrabbit ]]; then composer require jackalope/jackalope-jackrabbit:~1.2 ; fi
| true
|
c32693292e1acc98f6f229b5a5493d5319f8b168
|
Shell
|
MyRequiem/SlackBuilds
|
/flashplayer-plugin-11/flashplayer-plugin-11.SlackBuild
|
UTF-8
| 2,110
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
PKGNAME="flashplayer-plugin-11"
INSTALLED_VER="$(find /var/log/packages/ -type f -name "${PKGNAME}-*" | \
cut -f 5 -d / | rev | cut -f 3 -d - | rev)"
echo "Installed version: ${INSTALLED_VER}"
URL="https://helpx.adobe.com/ru/flash-player/kb/"
URL="${URL}archived-flash-player-versions.html"
VERSION=$(wget -q -O - "${URL}" | grep "<a href=" | \
grep "Flash Player 11.2." | head -n 5 | cut -d ">" -f 3 | \
cut -d "<" -f 1 | cut -d " " -f 3 | sort -V | tail -n 1)
echo "Latest version: ${VERSION}"
echo -ne "\nContinue? (y/N): "
read -r YESNO
[[ "${YESNO}" != "y" ]] && exit 0
SRCARCH="fp_${VERSION}_archive.zip"
TAG="myreq"
PKGTYPE="txz"
CWD=$(pwd)
URL="https://fpdownload.macromedia.com/pub/flashplayer/installers/archive/"
if ! [ -r "${CWD}/${SRCARCH}" ]; then
wget "${URL}${SRCARCH}"
fi
ARCH="i386"
BITS="32"
LIBDIRSUFFIX=""
[[ "$(uname -m)" == "x86_64" ]] && \
LIBDIRSUFFIX="64" && \
ARCH="x86_64" && \
BITS="64"
TMP="/tmp/${PKGNAME}-build"
PKG="${TMP}/package-${PKGNAME}"
OUTPUT="/root/src"
rm -rf "${TMP}"
mkdir -p "${PKG}" "${OUTPUT}"
cd "${TMP}" || exit 1
unzip "${CWD}/${SRCARCH}"
tar xvf "11"_*_"${BITS}bit/flashplayer11"_*_"linux.${ARCH}.tar.gz" -C ${PKG}
mkdir -p "${PKG}/usr/lib${LIBDIRSUFFIX}/mozilla/plugins"
PLUGIN="${PKG}/usr/lib${LIBDIRSUFFIX}/mozilla/plugins/libflashplayer-11.so"
mv ${PKG}/libflashplayer.so "${PLUGIN}"
chmod 755 "${PLUGIN}"
[[ "${ARCH}" == "x86_64" ]] && rm -rf "${PKG}/usr/lib"
rm -rf "${PKG:?}/usr/bin"
rm -rf "${PKG:?}/usr/share"
rm -rf "${PKG:?}/usr/lib${LIBDIRSUFFIX}/kde4"
mkdir -p "${PKG}/usr/doc/${PKGNAME}-${VERSION}"
mv "${PKG}/readme.txt" "${PKG}/LGPL" "${PKG}/usr/doc/${PKGNAME}-${VERSION}"
cat "${CWD}/${PKGNAME}.SlackBuild" > \
"${PKG}/usr/doc/${PKGNAME}-${VERSION}/${PKGNAME}.SlackBuild"
mkdir -p "${PKG}/install"
cat "${CWD}/slack-desc" > "${PKG}/install/slack-desc"
cat "${CWD}/doinst.sh" > "${PKG}/install/doinst.sh"
cd "${PKG}" || exit 1
chown -R root:root "${PKG}"
PKGBIN="${OUTPUT}/${PKGNAME}-${VERSION}-${ARCH}-${TAG}.${PKGTYPE}"
rm -f "${PKGBIN}"
/sbin/makepkg -l y -c n -p "${PKGBIN}"
| true
|
818cc7945107377e77c90fd579fb77251609ae83
|
Shell
|
shizonic/packages
|
/net/squid/files/squid.init
|
UTF-8
| 1,995
| 3.328125
| 3
|
[
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh /etc/rc.common
# Copyright (C) 2015 OpenWrt.org
START=90
STOP=10
USE_PROCD=1
PROG=/usr/sbin/squid
CONFIGFILE="/tmp/squid/squid.conf"
MIMETABLE="/tmp/squid/mime.conf"
validate_squid_section() {
uci_load_validate squid squid "$1" "$2" \
'config_file:string' \
'http_port:port:3128' \
'http_port_options:string' \
'ssldb:string' \
'ssldb_options:string' \
'coredump_dir:string' \
'visible_hostname:string:OpenWrt' \
'pinger_enable:string:off' \
'mime_table:string:/etc/squid/mime.conf'
}
create_squid_user() {
user_exists squid || user_add squid $USERID
group_exists squid || group_add squid $USERID && group_add_user squid squid
}
start_squid_instance() {
local config_dir
[ "$2" = 0 ] || {
echo "validation failed"
return 1
}
config_dir=$(dirname $CONFIGFILE)
[ -d $config_dir ] || mkdir -p $config_dir && chown nobody:nogroup $config_dir
[ -d $coredump_dir ] || mkdir -p $coredump_dir && chown nobody:nogroup $coredump_dir
[ "$ssldb" ] && ( [ -f "$ssldb"/size ] || /usr/lib/squid/security_file_certgen -c -s $ssldb $ssldb_options && chown -R nobody:nogroup $ssldb )
cat $config_file > $CONFIGFILE
echo http_port $http_port $http_port_options >> $CONFIGFILE
echo coredump_dir $coredump_dir >> $CONFIGFILE
echo visible_hostname $visible_hostname >> $CONFIGFILE
echo pinger_enable $pinger_enable >> $CONFIGFILE
cat $mime_table > $MIMETABLE
echo mime_table $MIMETABLE >> $CONFIGFILE
[ "$ssldb" ] && echo sslcrtd_program /usr/lib/squid/security_file_certgen -s $ssldb $ssldb_options >> $CONFIGFILE
$PROG -s -f $CONFIGFILE -N -z 2>/dev/null
procd_open_instance
procd_set_param command $PROG -s -f $CONFIGFILE -N
procd_set_param file $CONFIGFILE
procd_set_param respawn
procd_close_instance
}
start_service()
{
validate_squid_section squid start_squid_instance
}
stop_service()
{
$PROG -f $CONFIGFILE -N -k shutdown 2>/dev/null
}
service_triggers()
{
procd_add_reload_trigger "squid"
procd_add_validation validate_squid_section
}
| true
|
ea48e923ee9fce09a6b7ee2f6a84828cf3c42367
|
Shell
|
pambros/qLibs
|
/common/util.sh
|
UTF-8
| 127
| 3.25
| 3
|
[] |
no_license
|
pathWindowsToUnix () {
_TMP_PATH=$(echo "$1" | sed 's,\\,/,g')
_TMP_PATH=/${_TMP_PATH:0:1}${_TMP_PATH:2}
echo ${_TMP_PATH}
}
| true
|
1bcb2245f368af04669b5aec68348a3dfbd9a897
|
Shell
|
reinaldo-z/tiniba
|
/utils/infiniband-quad.sh
|
UTF-8
| 4,532
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
## please keep this history.
##
## LAST MODIFICATION : Febrero 18 2010 by Cabellos a 16:52
## LAST MODIFICATION : Febrero 18 2010 by Cabellos a 18:06
## LAST MODIFICATION : Septiembre 28 2010 by Cabellos a 16:03
RED='\e[0;31m'
BLUE='\e[0;34m'
BLU='\e[1;34m'
CYAN='\e[0;36m'
GREEN='\e[0;32m'
GRE='\e[1;32m'
YELLOW='\e[1;33m'
NC='\e[0m' # No Color
WORKZPACE="workspace"
BASEDIR=`dirname $PWD`
PARENT=`basename $BASEDIR`
CASO=`basename $PWD`
ANFIBIO=`hostname`
declare -a FALSEMACHINES
declare -a VIVOS
declare -a MUERTOS
### hha this all the cluster jl
MAQ501[1]="quad01"
MAQ501[2]="quad02"
MAQ501[3]="quad03"
MAQ501[4]="quad04"
MAQ501[5]="quad05"
MAQ501[6]="quad06"
MAQ501[7]="quad07"
MAQ501[8]="quad08"
MAQ501[9]="quad09"
MAQ501[10]="quad10"
MAQ501[11]="quad11"
MAQ501[12]="quad12"
MAQ501[13]="quad13"
MAQ501[14]="quad14"
IPES[1]="172.17.1.37"
IPES[2]="172.17.1.38"
IPES[3]="172.17.1.39"
IPES[4]="172.17.1.40"
IPES[5]="172.17.1.41"
IPES[6]="172.17.1.42"
IPES[7]="172.17.1.43"
IPES[8]="172.17.1.44"
IPES[9]="172.17.1.45"
IPES[10]="172.17.1.46"
IPES[11]="172.17.1.47"
IPES[12]="172.17.1.48"
IPES[13]="172.17.1.49"
IPES[14]="172.17.1.50"
function findMaq {
ALLOWED="0"
SALIDA="1000"
local kk=1
local NOMAQ501a=`echo ${#MAQ501[@]}`
for ((kk=1;kk<=($NOMAQ501a); kk++));do
if [ "${MAQ501[$kk]}" == "$1" ];then
SALIDA="$kk"
ALLOWED=1
fi
done
}
function findIndex {
INDES="1000"
local kk=1
local NOMAQ501a=`echo ${#MAQ501[@]}`
for ((kk=1;kk<=($NOMAQ501a); kk++));do
if [ "${MAQ501[$kk]}" == "$1" ];then
INDES="$kk"
fi
done
}
function Line {
printf "\t${BLUE}=============================${NC}\n"
}
echo $1 > dog
name=`awk -F. '{print $2}' dog`
rm dog
cp $1 .$name.original
IN=.$name.original
if [ $# -eq 0 ];then
printf "\t ${RED}Hold on !${NC}\n"
printf "\t I need a input file with the machines QUADxx\n"
printf "\t ${RED}Stop right now ...${NC}\n"
exit 0
fi
if [ ! -e $IN ];then
printf "\t ${RED}Hold on !, There is not FILE:${NC} $IN ...create one.\n"
printf "\t ${RED}Stop right now ...${NC}\n"
exit 0
else
FALSEMACHINES=(`cat $IN`)
NOFALSEMACHINES=`echo ${#FALSEMACHINES[@]}`
jj=0
mm=0
nn=0
rm -f $IN
touch $IN
for ((hh=0;hh<=($NOFALSEMACHINES-1); hh++));do
findMaq ${FALSEMACHINES[$hh]}
if [ "$ALLOWED" == "1" ];then
IPT=`nmap --max_rtt_timeout 20 -oG - -p 514 ${FALSEMACHINES[$hh]} | grep open | cut -d" " -f2`
findIndex ${FALSEMACHINES[$hh]}
if [ "$IPT" == "${IPES[$INDES]}" ];then
let jj++
let nn++
echo ${FALSEMACHINES[$hh]} >> $IN
else
let jj++
let mm++
MUERTOS[$mm]=${FALSEMACHINES[$hh]}
fi
fi
done
NOMUERTOS=`echo ${#MUERTOS[@]}`
if [ $NOMUERTOS -gt 0 ];then
printf "\tYour original $IN has $NOMUERTOS nodes dead that have been eliminated\n"
for ((hh=1;hh<=($NOMUERTOS); hh++));do
printf "\t%4d%12s${RED}%7s${NC}\n" "$hh" "${MUERTOS[$hh]}" "Dead"
done
fi
fi
if [ "$ANFIBIO" == "quad01" ];then
if [ ! -e $IN ];then
printf "\t ${RED}There is not file${NC} $IN\n"
exit 0
else
MACHINESinf=(`cat $IN`)
NOMACHINESinf=`echo ${#MACHINESinf[@]}`
fi
else
# printf "\t ${RED}Hold on !${NC}\n"
printf "\tTo run with infiniband\n"
printf "\tyou need to be in quad01\n"
# printf "\t ${RED}Stop right now ...${NC}\n"
exit 0
fi
######
SAL=0
while [ "$SAL" -lt "10" ];do
MACHINESinf=(`cat $IN`)
NOMACHINESinf=`echo ${#MACHINESinf[@]}`
# Line
# echo "mpdboot -v -r ssh -f $IN -n $NOMACHINESinf > INFI"
# Line
mpdboot -v -r ssh -f $IN -n $NOMACHINESinf > INFI
QUEPEX=`grep "failed to connect to mpd" INFI`
rm -f $IN
touch $IN
if [ -z "$QUEPEX" ];then
printf "\t Infiniband working in alive nodes is ok and your final list is:\n"
for ((hh=0;hh<=($NOMACHINESinf-1); hh++));do
let "PP=hh+1"
printf "\t[$PP] ${MACHINESinf[$hh]}\n"
echo ${MACHINESinf[$hh]} >> $IN
done
Line
printf "\tin file ${RED}$IN${NC}\n"
Line
SAL=20
else
NODE=`echo ${QUEPEX: -6}`
for ((hh=0;hh<=($NOMACHINESinf-1); hh++));do
if [ ${MACHINESinf[$hh]} != $NODE ];then
echo ${MACHINESinf[$hh]} >> $IN
else
echo -e ${BLUE}******************${NC}
echo -e ${RED} node ${MACHINESinf[$hh]} does not have infiniband connection${NC}
echo -e ${BLUE}******************${NC}
fi
done
fi
done
rm -f INFI
| true
|
e834b81ce1f51efbeae856b5c2fa73838a78cee3
|
Shell
|
anshulguleria/dotfiles
|
/install-scripts/asdf-setup.sh
|
UTF-8
| 855
| 3.5625
| 4
|
[] |
no_license
|
# -x Print commands getting executed
# -e exit on error
set -xe
# Prerequisites:
# * oh-my-zsh setup
# * presence of ~/.zshrc file
# Install asdf by git clone of master branch
git clone https://github.com/asdf-vm/asdf.git ~/.asdf
# Add path to zsh
# ./configurations/zsh-asdf-setup file contains necessary configuration. We need
# to add this file as source in ~/.zshrc file
echo "" >> ~/.zshrc
echo "# load asdf paths and autocompletions" >> ~/.zshrc
# Get directory path of this file
dir_path=$(realpath $(dirname $0))
# TODO: don't again twice if already preset
echo "source $dir_path/configurations/zsh-asdf-setup" >> ~/.zshrc
# Source your zshrc file so that we get asdf command
source ~/.zshrc
# TODO: prompt user if he wants to setup node
# sh $dir_path/node-setup.sh
# TODO: Prompt user if he wants to setup deno
# sh $dir_path/deno-setup.sh
| true
|
c5c0734e6b7f2f2c14852b013dc2ac37a4384059
|
Shell
|
holysatan007/myrepo
|
/ecs_status.sh
|
UTF-8
| 840
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
clustername=$1
servicename=$2
prevtime=$(sudo aws ecs describe-services --cluster $clustername --service $servicename |jq '.services[].events[0].createdAt')
pattern='(.*has reached a steady state.*)'
count=0
while true
do
getcurrstatus=$(sudo aws ecs describe-services --cluster $clustername --service $servicename |jq '.services[].events[0].message')
eventtime=$(sudo aws ecs describe-services --cluster $clustername --service $servicename |jq '.services[].events[0].createdAt')
echo "eventtime -- $eventtime "
echo "getcurrstatus -- $getcurrstatus "
if [[ ( $eventtime > $prevtime && $getcurrstatus =~ $pattern ) ]]
then
echo "Job is sucessful"
exit 0
break
fi
count=$[$count +1 ]
if [ $count -gt 200 ]
then
echo " please check the service its it not in steady state "
exit 1
fi
sleep 6
done
| true
|
9dcacd9291dcda8eb46922c3d36832ebce46e7d8
|
Shell
|
wanelo/postgres-scripts
|
/restore_wal_archives.sh
|
UTF-8
| 576
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
export MASTER=$1
export SERVICE=postgres924
function usage {
echo "USAGE: $0 <master_fqdn>"
}
if [ -z $MASTER ]; then
usage
exit 1
fi
if [ ! -f /var/pgsql/data92/recovery.conf ]; then
echo "Script can only be run on a Postgres replica. Missing recovery.conf."
echo
usage
exit 3
fi
svcadm disable -s $SERVICE
rm -rf /var/pgsql/data92/wal_archive
mkdir -p /var/pgsql/data92/wal_archive
cp -v -r /backups/$MASTER/pgsql/wal_archive/* /var/pgsql/data92/wal_archive/
chown -R postgres:postgres /var/pgsql/data92/wal_archive
svcadm enable -s $SERVICE
| true
|
903c7004fc25adda57823658967ae9c838bb6bf6
|
Shell
|
cirque-bts/cirque-on-cloud
|
/webapp.postinstall
|
UTF-8
| 657
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/sh
echo "****** POSTINSTALL STARTED ******"
DEPLOY_CURRENT=$HOME/current
JSONRPC_CREDENTIALS=$DEPLOY_CURRENT/etc/jsonrpc_credentials.pl
### create database
$DEPLOY_CURRENT/misc/dotcloud_db_setup.pl
$DEPLOY_CURRENT/bin/cirqued --setup
echo " Database is ready."
### create jsonrpc-credentials file and servicer-account
if [ ! -e $JSONRPC_CREDENTIALS ]; then
$DEPLOY_CURRENT/bin/cirqueadmin servicer create --id=cirque --name=cirque &&
$DEPLOY_CURRENT/bin/cirqueadmin servicer info cirque --format=perl > $JSONRPC_CREDENTIALS &&
echo ' JSON-RPC credentials file was created.'
fi
echo "****** POSTINSTALL WAS FINISHED ******"
| true
|
d6bf334494d52d44764f2c80475e6b09a470d251
|
Shell
|
axray/dataware.dreamplug
|
/usr/lib/pm-utils/sleep.d/98smart-kernel-video
|
UTF-8
| 2,150
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Copyright 2008 Victor Lowther <victor.lowther@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
. "${PM_FUNCTIONS}"
# Test to see if the kernel has a video driver that is smart enough to
# handle quirks without external assistance. If it is, remove the quirks.
smart_kernel_nvidia()
{
# despite the bad rep the nvidia driver has, it is miles better than
# any other video driver when it comes to handling power managment and
# suspend/resume in a quirk-free manner.
[ -d /sys/module/nvidia ] || return 1
remove_parameters --quirk-dpms-on \
--quirk-dpms-suspend \
--quirk-s3-mode \
--quirk-s3-bios \
--quirk-vbe-post \
--quirk-vbe-post \
--quirk-vga-mode3 \
--quirk-vbemode-restore \
--quirk-vbestate-restore \
--quirk-reset-brightness \
--quirk-radeon-off
}
smart_kernel_fglrx()
{
# the ATI driver is pretty good about it, too.
[ -d /sys/module/fglrx ] || return 1
remove_parameters --quirk-dpms-on \
--quirk-dpms-suspend \
--quirk-s3-mode \
--quirk-s3-bios \
--quirk-vbe-post \
--quirk-vbe-post \
--quirk-vga-mode3 \
--quirk-vbemode-restore \
--quirk-vbestate-restore \
--quirk-reset-brightness \
--quirk-radeon-off
}
smart_kernel_intel()
{
# currently, intel kernel modesetting is not quite smart enough
# we still need acpi s3 kernel modesetting hooks, so don't remove those
# options if they were passed.
[ -d /sys/module/i915 ] || return 1
local kernel_rev="$(uname -r |awk -F '[_-]' '{print $1}')"
[ "$kernel_rev" \> "2.6.26" -o "$kernel_rev" = "2.6.26" ] || return 1
remove_parameters --quirk-dpms-on \
--quirk-dpms-suspend \
--quirk-vbe-post \
--quirk-vbe-post \
--quirk-vga-mode3 \
--quirk-vbemode-restore \
--quirk-vbestate-restore \
--quirk-reset-brightness \
--quirk-radeon-off
}
smart_kernel_video()
{
smart_kernel_nvidia || smart_kernel_fglrx || smart_kernel_intel || \
return $NA
}
case $1 in
suspend|hibernate)
smart_kernel_video ;;
*) exit 0 ;;
esac
| true
|
57b0b91769902b2c867214278435fd7ddf5cedfd
|
Shell
|
robotstreamer/robotstreamer
|
/scripts/install_python
|
UTF-8
| 460
| 2.9375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
RELEASE=3.6.3
sudo apt-get update
# install dependencies
sudo apt-get install libbz2-dev liblzma-dev libsqlite3-dev libncurses5-dev libgdbm-dev zlib1g-dev libreadline-dev libssl-dev tk-dev
# download and build Python
mkdir ~/python3
cd ~/python3
wget https://www.python.org/ftp/python/$RELEASE/Python-$RELEASE.tar.xz
tar xvf Python-$RELEASE.tar.xz
cd Python-$RELEASE
./configure
make
sudo make install
sudo rm -rf ~/python3/Python-$RELEASE
cd ~
| true
|
5e195001e0355711f5127e385b5fe0a278acdad7
|
Shell
|
ilventu/aur-mirror
|
/prebuild-svn/PKGBUILD
|
UTF-8
| 1,078
| 3.25
| 3
|
[] |
no_license
|
pkgname=prebuild-svn
pkgver=324
pkgrel=1
pkgdesc="A cross-platform XML-driven pre-build tool which allows developers to easily generate project files for major IDE's and .NET development tools (svn)"
arch=(any)
url="http://mono-project.com/Prebuild"
license=("BSD")
depends=(mono)
makedepends=(mono subversion)
conflicts=(prebuild)
provides=(prebuild)
_svntrunk="https://dnpb.svn.sourceforge.net/svnroot/dnpb/trunk"
_svnmod="dnpb"
build() {
cd "${srcdir}"
if [ -d "${_svnmod}/.svn" ]; then
(cd "$_svnmod" && svn up -r $pkgver)
else
svn co "$_svntrunk" --config-dir ./ -r $pkgver $_svnmod
fi
msg 'SVN checkout done or server timeout'
rm -rf "${_svnmod}-build"
cp -r "$_svnmod" "${_svnmod}-build"
cd "${_svnmod}-build/Prebuild"
mono Prebuild.exe /target makefile
make
}
package() {
cd "${_svnmod}-build/Prebuild"
install -Dm755 "Prebuild.exe" "$pkgdir/usr/lib/prebuild/prebuild.exe"
echo -e "#!/bin/sh\n\nmono /usr/lib/prebuild/prebuild.exe \"$@\"" > "$srcdir/prebuild.sh"
install -Dm755 "$srcdir/prebuild.sh" "$pkgdir/usr/bin/prebuild"
}
| true
|
a3f5ea4bbb1d40da1df9980456f3f64a7ae446e4
|
Shell
|
benjamincjackson/mice_LD
|
/10_get_mut_matrix/get_mut_matrix.sh
|
UTF-8
| 334
| 2.5625
| 3
|
[] |
no_license
|
EST_SFS_INPUT=$1
EST_SFS_OUTPUT=$2
CHR=`echo $EST_SFS_INPUT | rev | cut -d'/' -f1 | rev | cut -d'.' -f1`
tail -n+9 $EST_SFS_OUTPUT > ${CHR}.temp
paste $EST_SFS_INPUT ${CHR}.temp > ${CHR}.in_out.txt
python3 ~/github_repos/wild_mice/est-sfs_2_mut_mat.py ${CHR}.in_out.txt ${CHR}.mut_mat.txt 48
rm ${CHR}.in_out.txt ${CHR}.temp
| true
|
04e6df26a240a38cdaa22c59bd4423fe3f03a9f4
|
Shell
|
rmwu/config
|
/.zshrc
|
UTF-8
| 2,512
| 2.78125
| 3
|
[] |
no_license
|
####################
# .ZSHRC
# rachel's preferences
# 2019-10-10
####################
# # # # # # # # # #
# oh-my-zsh
# # # # # # # # # #
# Path to your oh-my-zsh installation.
export ZSH="/afs/csail.mit.edu/u/r/rmwu/.oh-my-zsh"
ZSH_THEME="agnoster"
DISABLE_UNTRACKED_FILES_DIRTY="true"
HIST_STAMPS="yyyy-mm-dd"
# Standard plugins can be found in ~/.oh-my-zsh/plugins/*
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
plugins=(git fzf)
source $ZSH/oh-my-zsh.sh
export EDITOR=vim
# # # # # # # # # #
# General
# # # # # # # # # #
# Listing alias
alias lls='ls -FGltr'
# return to previous dir
alias back='cd $OLDPWD'
# an ssh-suitable hostname
alias scpme='echo $(whoami)@$(hostname):$(pwd)'
alias weather='curl http://wttr.in/Boston?m'
# # # # # # # # # #
# Web Development
# # # # # # # # # #
# setup http server
alias localserv='python -m SimpleHTTPServer'
# sass watch with style
# usage: first arg file, second style
alias sasswatch='function _sasswatch(){ sass --watch $1.sass:$1.css --style $2; };_sasswatch'
# # # # # # # # # #
# Remote Machines
# # # # # # # # # #
rosetta () {
mosh rmwu@rosetta$1.csail.mit.edu --experimental-remote-ip=remote
}
athena='rmwu@athena.dialup.mit.edu'
alias sshmit='ssh $athena'
# # # # # # # # # #
# CLANG
# # # # # # # # # #
alias gcc-4.2='gcc'
# # # # # # # # # #
# PYTHON
# # # # # # # # # #
# conda
# export PATH="/data/rsg/nlp/rmwu/miniconda3/bin:$PATH" # commented out by conda initialize
alias py37='conda deactivate; conda activate py37'
alias mgh='conda deactivate; conda activate mgh'
# updated to cuda 10
export CUDA_HOME=/usr/local/cuda-10.1
export PATH=/usr/local/cuda-10.1/bin:$PATH
export LD_LIBRARY_PATH=/usr/local/cuda-10.1/lib64${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}
# gpustat with color
alias gpu="watch -n0.5 --color gpustat --color"
# use python 3
alias python='python3'
# profiling
alias pyprof='python -m cProfile -s time'
# # # # # # # # # #
# GIT
# # # # # # # # # #
alias gts='git status'
alias gtd='git diff'
alias gtl='git log'
# >>> conda initialize >>>
# !! Contents within this block are managed by 'conda init' !!
__conda_setup="$('/data/rsg/nlp/rmwu/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "/data/rsg/nlp/rmwu/miniconda3/etc/profile.d/conda.sh" ]; then
. "/data/rsg/nlp/rmwu/miniconda3/etc/profile.d/conda.sh"
else
export PATH="/data/rsg/nlp/rmwu/miniconda3/bin:$PATH"
fi
fi
unset __conda_setup
# <<< conda initialize <<<
| true
|
618dd058d6fef8c11397805e354411244cb0927b
|
Shell
|
arschles/alpine-builds
|
/kubectl/go-build.sh
|
UTF-8
| 602
| 2.875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# this script is part of a set that should be run inside a container. run build.sh to run the entire build
echo "go get github.com/tools/godep"
go get github.com/tools/godep
export PATH=$PATH:$GOPATH/bin
echo "downloading k8s code"
mkdir -p $GOPATH/src/k8s.io/kubernetes
mkdir -p /kubedl
cd /kubedl
curl -s -L https://github.com/kubernetes/kubernetes/archive/v1.1.1.tar.gz > k8s-1.1.1.tar.gz
tar -xzf k8s-1.1.1.tar.gz
mv kubernetes-1.1.1 $GOPATH/src/k8s.io/kubernetes
cd $GOPATH/src/k8s.io/kubernetes
echo "go build"
CGO_ENABLED=0 godep go build cmd/kubectl/kubectl.go -o /pwd/kubectl
| true
|
c20af8309393ec61f7c9aa5cbf29a4800c4462ae
|
Shell
|
damnops/homework
|
/zhuye/auto/start
|
UTF-8
| 165
| 2.6875
| 3
|
[] |
no_license
|
#! /bin/bash -e
#
cd $(dirname $0)/..
source ./auto/functions
RUNLIST="app"
for r in ${RUNLIST}; do
docker-compose --project-name ${PROJECTNAME} start ${r}
done
| true
|
779ba52b340987c72bd2d56e94541a197d01e404
|
Shell
|
padresmurfa/minetest-development-with-osx-and-docker
|
/repositories/clone
|
UTF-8
| 744
| 3.421875
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
# see e.g. https://gist.github.com/mohanpedala/1e2ff5661761d3abd0385e8223e16425 for details
set -e
set -u
set -o pipefail
# determine the directory this script resides in
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
############################################################################################
# GIT REPOS
# install an initial version of the git repos, if this has not yet been done
############################################################################################
pushd ../source >> /dev/null
"$SCRIPT_DIR/.maybe_clone_minetest_git_repo"
"$SCRIPT_DIR/.maybe_install_game_from_git_repo" minetest_game https://github.com/minetest/minetest_game.git
popd >> /dev/null
| true
|
f717b0ae8c005fa3973b5d5f99ce5935a6e20556
|
Shell
|
dbfun/tools
|
/mymount
|
UTF-8
| 2,751
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
PROJECT=$1
OPTION=$2
cd $(dirname $(readlink -f $0))
declare -A PROFILE
declare -A SAMBA_PROFILE
source <(cat etc/projects.d/*.conf)
PROJECT_MNT_PATH=~/mnt/$PROJECT
function checkMountPoint {
if [ -d "$PROJECT_MNT_PATH" ]; then
mountpoint "$PROJECT_MNT_PATH"
if [ "$?" == "0" ]; then
echo "Project already mounted $PROJECT_MNT_PATH"
exit 3
else
echo "Reconnect..."
fi
fi
}
function prepareMount {
_UID=`id -u`
_GID=`id -g`
mkdir -p $PROJECT_MNT_PATH
}
function showStatus {
echo ===== PROJECTS =====
for i in "${!PROFILE[@]}"; do
echo $i
done
echo
echo ===== SAMBA PROJECTS =====
for i in "${!SAMBA_PROFILE[@]}"; do
echo $i
done
echo
echo ===== MOUNTED =====
mount -t fuse.sshfs
mount -t cifs
echo
}
CONF=`ls etc/projects.d/*.conf > /dev/null 2>&1`
if [ $? -ne "0" ]; then
echo "No projects profiles"
exit 2
fi
if [ -z "$PROJECT" ]; then
showStatus
exit 0
fi
if [ -n "${PROFILE[$PROJECT]}" ]; then
RESP=`which sshfs`
if [ $? -ne "0" ]; then
echo 'No sshfs! Use: sudo apt-get install sshfs'
exit 10
fi
TYPE='sshfs'
SSH_SERVER_STR=${PROFILE[$PROJECT]}
elif [ -n "${SAMBA_PROFILE[$PROJECT]}" ]; then
RESP=`which mount.cifs`
if [ $? -ne "0" ]; then
echo 'No mount.cifs! Use: sudo apt-get install cifs-utils'
exit 10
fi
TYPE='mount.cifs'
SSH_SERVER_STR=${SAMBA_PROFILE[$PROJECT]}
else
echo "No such project profile"
exit 4
fi
if [ "$TYPE" == "sshfs" ]; then
case "$OPTION" in
-u)
fusermount -u -z $PROJECT_MNT_PATH
rmdir $PROJECT_MNT_PATH
exit 0
;;
*)
checkMountPoint
prepareMount
sshfs -o allow_other,reconnect,cache_timeout=5,ServerAliveInterval=15,ServerAliveCountMax=3 "$SSH_SERVER_STR" $PROJECT_MNT_PATH
if [ 0 != "$?" ]; then
echo 'Error'
rmdir $PROJECT_MNT_PATH
exit 5
fi
exit 0
;;
esac
elif [ "$TYPE" == "mount.cifs" ]; then
case "$OPTION" in
-u)
umount $PROJECT_MNT_PATH
rmdir $PROJECT_MNT_PATH
exit 0
;;
*)
checkMountPoint
prepareMount
/sbin/mount.cifs -o allow_other,reconnect,ServerAliveInterval=15,ServerAliveCountMax=3,iocharset=utf8 ${SSH_SERVER_STR/VAR_MOUNTPOINT/$PROJECT_MNT_PATH}
if [ 0 != "$?" ]; then
echo 'Error'
rmdir $PROJECT_MNT_PATH
exit 5
fi
exit 0
;;
esac
else
echo "No such type"
exit 6
fi
| true
|
9b5e19d6fde107befa5b65ad4ce579cde354f6b9
|
Shell
|
raygomez/bash-exercise
|
/file-attr.sh
|
UTF-8
| 552
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ $# -ne 1 ]
then
echo 'It needs one argument.'
exit -1
fi
arg=$1
echo
echo File:$arg
if [ ! -e "$arg" ]
then
echo -e '\tFile does not exist.'
exit
fi
echo -e '\tFile exists.'
if [ -r "$arg" ]
then
echo -e '\tFile is readable.'
else
echo -e '\tFile is not readable. '
fi
if [ -w "$arg" ]
then
echo -e '\tFile is writable.'
else
echo -e '\tFile is not writable. '
fi
if [ -x "$arg" ]
then
echo -e '\tFile is executable.'
else
echo -e '\tFile is not executable. '
fi
echo -e '\tOwner:'`stat -c %U $arg`
echo
| true
|
89b5a6837731d0ad57f5cd92e8546e6a6aa244e6
|
Shell
|
ifremer-bioinformatics/FLORA
|
/bin/remove_rrna.sh
|
UTF-8
| 816
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
###############################################################################
## ##
## Purpose of script: Remove rRNA from RNAseq raw reads using Bowtie2 and ##
## the SILVA database ##
## ##
###############################################################################
# var settings
args=("$@")
CPUS=${args[0]}
RRNA_DB=${args[1]}
R1=${args[2]}
R2=${args[3]}
FILTERED_READS=${args[4]}
LOGCMD=${args[5]}
#Run Bowtie2
CMD="bowtie2 --quiet --nofw --very-sensitive --phred33 -x ${RRNA_DB} -p ${CPUS} -1 ${R1} -2 ${R2} --un-conc-gz ${FILTERED_READS}"
echo ${CMD} > ${LOGCMD}
eval ${CMD}
| true
|
95f64a4647e760efcaf569eaa4647b513c6db182
|
Shell
|
uofis/blackarch
|
/archtrack/packages/ecryptfs-utils/PKGBUILD
|
UTF-8
| 1,101
| 2.640625
| 3
|
[] |
no_license
|
# TODO: Add to package groups.
# old groups:
pkgname=ecryptfs-utils-blackarch
_realname=ecryptfs-utils
pkgver=103
pkgrel=1
arch=('i686' 'x86_64')
pkgdesc="Enterprise-class stacked cryptographic filesystem for Linux"
url="https://launchpad.net/ecryptfs"
license=('GPL')
makedepends=('swig' 'intltool' 'gettext' 'python2')
depends=('keyutils' 'nss' 'openssl')
optdepends=('python2: for python module')
source=("http://launchpad.net/ecryptfs/trunk/${pkgver}/+download/${_realname}_${pkgver}.orig.tar.gz"
"${_realname}_${pkgver}.orig.tar.gz.sig::http://launchpad.net/ecryptfs/trunk/${pkgver}/+download/..-${_realname}_${pkgver}.orig.tar.gz.asc")
options=(!libtool)
md5sums=('39929d850edd24b175ff0c82722e0de1'
'1fc46fb18d662315c8d4cb13b0e618c0')
groups=(blackarch)
conflicts=(ecryptfs-utils)
provides=(ecryptfs-utils)
build() {
cd "$srcdir/${_realname}-${pkgver}"
./configure --prefix=/usr --with-pamdir=/usr/lib/security PYTHON=python2
make
}
package() {
cd "$srcdir/${_realname}-${pkgver}"
make DESTDIR="$pkgdir/" install
chmod +s "$pkgdir"/sbin/mount.ecryptfs_private
}
| true
|
0500b4626adb63cb66a12a55c963f405102ad4aa
|
Shell
|
rolandoquiroz/holberton-system_engineering-devops
|
/0x04-loops_conditions_and_parsing/3-until_holberton_school
|
UTF-8
| 143
| 3.046875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Script to display "Holberton School" 10 times
i=0
until [ $i -gt 9 ]
do
echo "Holberton School"
let i++
done
| true
|
460a6df193dcd57cbaf4c3098c5ca459e53416c7
|
Shell
|
ojalaquellueva/gnrs
|
/gnrs_export.sh
|
UTF-8
| 3,208
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#########################################################################
# Purpose: Exports GNRS results
#
# Usage: ./gnrs_export.sh
#
# Authors: Brad Boyle (bboyle@email.arizona.edu)
# Date created: 12 June 2017
#########################################################################
: <<'COMMENT_BLOCK_x'
COMMENT_BLOCK_x
######################################################
# Set basic parameters, functions and options
######################################################
# Get local working directory
#DIR_LOCAL="${BASH_SOURCE%/*}"
DIR_LOCAL="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
if [[ ! -d "$DIR_LOCAL" ]]; then DIR_LOCAL="$PWD"; fi
# $local = name of this file
# $local_basename = name of this file minus '.sh' extension
# $local_basename should be same as containing directory, as
# well as local data subdirectory within main data directory,
local=`basename "${BASH_SOURCE[0]}"`
local_basename="${local/.sh/}"
# Set parent directory if running independently
if [ -z ${master+x} ]; then
DIR=$DIR_LOCAL
fi
# Set includes directory path, relative to $DIR
includes_dir=$DIR"/includes"
# Load startup script for local files
# Sets remaining parameters and options, and issues confirmation
# and startup messages
source "$includes_dir/startup_local.sh"
# Pseudo error log, to absorb screen echo during import
tmplog="/tmp/tmplog.txt"
echo "Error log
" > $tmplog
# Set current script as master if not already source by another file
# master = name of this file.
# Tells sourced scripts not to reload general parameters and command line
# options as they are being called by another script. Allows component
# scripts to be called individually if needed
if [ -z ${master+x} ]; then
master=`basename "$0"`
fi
#########################################################################
# Main
#########################################################################
: <<'COMMENT_BLOCK_1'
COMMENT_BLOCK_1
############################################
# Export results from user_data to data
# directory sa CSV file
############################################
echoi $e -n "Dumping gnrs results to data directory as file '$results_filename'..."
gnrs_results_file=$data_dir_local"/"$results_filename
rm "${gnrs_results_file}"
sql_results="SELECT * FROM user_data WHERE job='${job}' ORDER BY user_id, poldiv_full"
PGOPTIONS='--client-min-messages=warning' psql -d gnrs -q << EOF
\set ON_ERROR_STOP on
\copy '${sql_results}' TO '${gnrs_results_file}' csv header
EOF
echoi $i "done"
############################################
# Clear user data tables
############################################
echoi $e -n "Clearing user data for this job from database..."
PGOPTIONS='--client-min-messages=warning' psql -d gnrs --set ON_ERROR_STOP=1 -q -v job=$job -f $DIR_LOCAL/sql/clear_user_data.sql
echoi $e "done"
######################################################
# Report total elapsed time and exit if running solo
######################################################
if [ -z ${master+x} ]; then source "$DIR/../includes/finish.sh"; fi
######################################################
# End script
######################################################
| true
|
eb36b9d0029a03122681ffda484fde5965d3719b
|
Shell
|
swoogles/Personal
|
/UbuntuTomcat.sh
|
UTF-8
| 1,156
| 2.734375
| 3
|
[] |
no_license
|
#cgdb Setup
sudo apt-get install libreadline-dev
sudo apt-get install ncurses-dev
# Install Java
sudo apt-get install default-jdk
# Set JAVA_HOME variable
export JAVA_HOME=/usr/lib/jvm/default-java
# Get tomcat
wget http://mirror.cc.columbia.edu/pub/software/apache/tomcat/tomcat-7/v7.0.40/bin/apache-tomcat-7.0.40.tar.gz
tar xvzf apache-tomcat-7.0.40.tar.gz
mkdir /usr/share/tomcat7
mv apache-tomcat-7.0.40 /usr/share/tomcat7
# wget http://download.oracle.com/otn-pub/java/jdk/7u21-b11/jdk-7u21-linux-x64.tar.gz
# tar xvzf jdk-7u21-linux-x64.tar.gz
# Installing Grails (Optional)
# From: http://grails.org/Installation
git clone git://github.com/grails/grails-core.git
mkdir /usr/share/grails
cd /usr/share/grails
export GRAILS_HOME=/usr/share/grails/grails-core
export PATH=$PATH:${GRAILS_HOME}/bin
chdir $GRAILS_HOME
wget http://ftp.postgresql.org/pub/source/v9.2.3/postgresql-9.2.3.tar.gz
tar xvzf postgresql-9.2.3.tar.gz
useradd -d /home/willy -m willy
wget http://search.maven.org/remotecontent?filepath=junit/junit/4.11/junit-4.11.jar
wget http://search.maven.org/remotecontent?filepath=org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar
| true
|
e49c158e2a2221496bd8e823259ecf5418c1bbb0
|
Shell
|
leasual/third_party
|
/ffmpeg/android/build_android_arm.sh
|
UTF-8
| 933
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
SYSROOT=${NDKROOT}/platforms/android-19/arch-arm/
TOOLCHAIN=${NDKROOT}/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64
X264=/home/bronze/Documents/android/build/x264/arm
function build_one {
./configure \
--prefix=$PREFIX \
--enable-shared \
--disable-static \
--disable-doc \
--disable-avdevice \
--disable-doc \
--disable-symver \
--enable-pthreads \
--enable-gpl \
--enable-libx264 \
--enable-encoder=libx264 \
--cross-prefix=$TOOLCHAIN/bin/arm-linux-androideabi- \
--target-os=linux \
--arch=arm \
--enable-cross-compile \
--sysroot=$SYSROOT \
--extra-cflags="-I$X264/include -Os -fpic $ADDI_CFLAGS" \
--extra-ldflags="$ADDI_LDFLAGS -L$X264/lib" \
$ADDITIONAL_CONFIGURE_FLAG
make clean
make
make install
}
CPU=arm
PREFIX=/home/bronze/Documents/android/build/ffmpeg/$CPU
ADDI_CFLAGS="-marm"
build_one
| true
|
722aa949319961fc633bcf969853a12692f30a0a
|
Shell
|
TomasDSD/kibana-datasweet-formula
|
/build.sh
|
UTF-8
| 583
| 3.171875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
rm -Rf releases
mkdir -p releases
function build {
npm run build -- -k $1
for old in build/datasweet_formula-*; do
new=$(echo $old | sed -E "s/(^build)(\/datasweet_formula-.+)\.zip$/releases\2_kibana-$1.zip/")
mv -v "$old" "$new"
done
}
if [ -n "$1" ]; then
build $1
fi
build 6.2.4
build 6.2.3
# build 6.2.2
# build 6.2.1
# build 6.2.0
# build 6.1.3
# build 6.1.2
# build 6.1.1
# build 6.1.0
# build 6.0.1
# build 6.0.0
# build 5.6.7
# build 5.6.6
# build 5.6.5
# build 5.6.4
# build 5.6.3
# build 5.6.2
# build 5.6.1
# build 5.6.0
| true
|
f07a1e055fd2bc72f173de19d1322ff8db0c40d4
|
Shell
|
BackupTheBerlios/packware
|
/pwbuild/xap/psi/psi.pwbuild
|
UTF-8
| 923
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/sh
name="psi"
version="0.9.2"
build="1"
_pwbuilder 0.6.3
_doc -d libpsi/iconset/ICONSET-HOWTO
_source -m e29f90aea7d839f2a70e4a6e77e95a70 -s 1154392 -f
_patch -p 0 translations
_patch no_online_status
_patch no_default_status_text
_patch status_history
_patch status_indicator
_patch offline_status
_have_libmng && _patch enable-mng
_c_opt --prefix=/usr
_configure
( cd ${src1}/src ; qmake -o Makefile )
_qconf_fix ${src1}/src/Makefile
# If you get a 'double const' error, uncomment these lines:
#
# sed "s}\$(QTDIR)/bin/uic}${src}/uic-wrapper}" ${src1}/src/Makefile > ${tmp}/uicfix
# cat ${tmp}/uicfix > ${src1}/src/Makefile
_patch opt
_make
_install
rm ${pkg}/usr/share/psi/{COPYING,README}
ln -s ${docdir}/COPYING ${pkg}/usr/share/psi
ln -s ${docdir}/README ${pkg}/usr/share/psi
# $Log: psi.pwbuild,v $
# Revision 1.1 2004/07/30 20:34:54 swiergot
# - Initial commit.
# - Version 0.9.2/20040730.
#
| true
|
b25fc40c209785810bd6117846c6b6ef33004fa6
|
Shell
|
EmilyShepherd/dotfiles
|
/services/user
|
UTF-8
| 432
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/sh
export USER=emily
export HOME=$(eval echo ~$USER)
export XDG_RUNTIME_DIR=/run/user/$USER
export XDG_CONFIG_HOME=$HOME/.config
export XDG_DATA_HOME=$HOME/.local/share
export XDG_CACHE_HOME=$HOME/.cache
export SVDIR=$HOME/.local/services
if ! [ -d $XDG_RUNTIME_DIR ]
then
mkdir -m 700 $XDG_RUNTIME_DIR
chown $USER:$USER $XDG_RUNTIME_DIR
fi
. /etc/profile.d/locale.sh
cd $HOME
exec su -c "runsvdir $SVDIR" emily
| true
|
36641d8d419cc34309f5d0995723d560d044acd1
|
Shell
|
kif/edna
|
/bioSaxsv1/bin/stop-tango-server
|
UTF-8
| 177
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
testit=`ps aux |grep tango-EdnaDS | grep DAU | grep -v grep | awk '{print $2}'`
if [ "$testit" = "" ];then
echo "edna is already stopped"
else
kill $testit
fi
| true
|
e0af6e98ca6f3baaa6fa063717872a267c11e126
|
Shell
|
sk4zuzu/tf-k8s-libvirt
|
/terraform/k8s-libvirt/k8s-master/remote-exec/03-haproxy.sh
|
UTF-8
| 900
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -o errexit -o nounset -o pipefail
set -x
[ -f "$0.done" ] && exit 0
BACKEND_SERVERS=`
for (( k = 1; k <= ${_COUNT}; k++ )); do
IPV4="$(getent ahostsv4 ${_PREFIX}$k | head -n1 | cut -d' ' -f1)"
echo " server ${_PREFIX}$k $IPV4:6443 check port 6443"
done
`
mkdir -p /etc/haproxy/ && cat >/etc/haproxy/haproxy.cfg <<EOF
global
log /dev/log local0
log /dev/log local1 notice
daemon
defaults
log global
retries 3
maxconn 2000
timeout connect 5s
timeout client 50s
timeout server 50s
frontend k8s
mode tcp
bind 0.0.0.0:7878
default_backend k8s
backend k8s
mode tcp
balance roundrobin
option tcp-check
$BACKEND_SERVERS
EOF
systemctl daemon-reload && systemctl start haproxy
if ! grep local-lb /etc/hosts; then
echo "$(hostname -i) local-lb" >>/etc/hosts
fi
touch "$0.done"
# vim:ts=4:sw=4:et:syn=sh:
| true
|
d62a96798ed29b4c3ff1ba6f86650999e5670d8c
|
Shell
|
BofengDuke/shell_script
|
/chapter7/network.sh
|
UTF-8
| 372
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# ifconfig
# 提取IP地址
ifconfig eth0 | egrep -o "inet [^ ]*" | grep -o "[0-9.]*"
# 获取当前系统分配到的名字服务器
cat /etc/resolv.conf
# 列出系统中开发的端口以及对应的服务信息
lsof -i
# 分两步提取信息
lsof -i | grep ":[0-9]\+->" -o | grep "[0-9]\+" -o | sort | uniq
# netstat -tnp 列出开放端口与服务
| true
|
22bd0b3efb8380ecde56b86b9b3df936de68ed2c
|
Shell
|
Ram-Z/PKGBUILDs
|
/connman-git/PKGBUILD
|
UTF-8
| 1,248
| 2.609375
| 3
|
[] |
no_license
|
# Maintainer: Samir Benmendil <ram-z[at]chakra-project[dot]org>
# Contributor: aur2ccr (http://ddg.gg/?q=!ccr+aur2ccr&t=chakra)
# Contributor: tailinchu <use_my_id at gmail dot com>
pkgname=connman-git
pkgver=1.24.89.gd60623c
pkgrel=1
pkgdesc="Wireless LAN network manager (git version)"
arch=('i686' 'x86_64')
url="http://connman.net/"
license=('GPL2')
depends=('dbus-core' 'iptables' 'glib2' 'wpa_supplicant' 'gnutls' 'pptpclient')
makedepends=('git')
conflicts=('connman')
provides=('connman')
source=("git://git.kernel.org/pub/scm/network/connman/connman.git")
md5sums=('SKIP')
_gitroot="connman"
pkgver () {
cd "$srcdir/$_gitroot"
git describe --always | sed 's|-|.|g'
}
build() {
cd "$srcdir/$_gitroot"
./bootstrap
./configure \
--prefix=/usr \
--sysconfdir=/etc \
--localstatedir=/var \
--bindir=/usr/bin \
--sbindir=/usr/bin \
--with-systemdunitdir=/usr/lib/systemd/system \
--enable-threads \
--enable-pptp \
--enable-polkit \
--enable-client
make
}
package() {
cd "$srcdir/$_gitroot"
make PREFIX=/usr DESTDIR="$pkgdir" install
install -Dm755 "$srcdir/$_gitroot/client/connmanctl" "$pkgdir/usr/bin/connmanctl"
find "$pkgdir/usr" -name \*.service -exec sed -i 's/s\(bin\)/\1/' {} +
}
| true
|
ba2d1620ef5e12108eb78565ac893d64df38b905
|
Shell
|
hiram-labs/startup-in-a-box
|
/service-000-gcloud/src/scripts/shell/gcloud/init.sh
|
UTF-8
| 785
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
# set -x
# set -euo pipefail
parse_general_flags "$@"
PROJECT_ZONE=${PROJECT_ZONE:-"europe-west2-b"}
PROJECT_REGION=${PROJECT_REGION:-"europe-west2"}
PROJECT_ID=${PROJECT_ID:-$(jq -r '.project_id' <"${GCLOUD_SECRETS}"/gcloud-key.json)}
gcloud auth activate-service-account --key-file="${GCLOUD_SECRETS}"/gcloud-key.json
gcloud config set disable_prompts true
gcloud config set project "$PROJECT_ID"
gcloud config set compute/region "$PROJECT_REGION"
gcloud config set compute/zone "$PROJECT_ZONE"
gcloud services enable compute.googleapis.com
gcloud services enable container.googleapis.com
# gcloud services enable cloudbuild.googleapis.com
gcloud services enable cloudresourcemanager.googleapis.com
gcloud config configurations list
gcloud container clusters list
| true
|
7f78b31f69bdbfd160380e846dd66a4f76be6106
|
Shell
|
RoJoHub/lightsocks
|
/docker/entrypoint.sh
|
UTF-8
| 128
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
LIGHT_MODULE=${LIGHT_MODULE:-"lightsocks-server"}
if [ "$1" != "" ]; then
exec "$@"
else
$LIGHT_MODULE
fi
| true
|
1b48c57477420cd082b3bae970cf54aae4b56ed7
|
Shell
|
mrosata/kleos
|
/errno-vars.conf
|
UTF-8
| 421
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
######################################################################
#### Error Numbers for Kleos Scripts
#### - It should be safe to run this from any of the scripts even
#### if for some reason they set their own values for the various
#### error numbers.
####
ERRNO_NOSUDO=${ERRNO_NOSUDO:-101}
ERRNO_BADKEY=${ERRNO_NOSUDO:-102}
ERRNO_USER=${ERRNO_NOSUDO:-103}
ERRNO_ROOT=${ERRNO_NOSUDO:-104}
| true
|
e5ca63d51defa3500d98d714ccebe5a8530ef003
|
Shell
|
bcgov/gwells
|
/app/scripts/gwells-deploy.sh
|
UTF-8
| 1,976
| 2.90625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
# Mon Nov 6 15:03:49 2017 GW Shell script run by 'oc exec' on OpenShift
# initiated by Jenkins job, which connects to the application server
# pod (gwells-nn-xxxxx which is STATUS = 'Running'):
# oc exec gwells-nnn-xxxx $VIRTUAL_ENV/src/openshift/scripts/gwells-deploy.sh
#
# This deploy is triggered on all three envionments (moe-gwells-dev,
# moe-gwells-test, moe-gwells-prod)
#
# Example: oc exec gwells-97-69b7z /opt/app-root/src/openshift/scripts/gwells-deploy.sh
#
# If run on local Developer workstation, ensure that you have Environment variables set
# for $DATABASE_SERVICE_NAME, $DATABASE_PASSWORD, $DATABASE_NAME, $DATABASE_USER
#
# Optionally, set $DB_REPLICATE (None|Subset|Full).
#
# Example: ./gwells-deploy.sh
# Sensitive, keep before 'set -x'
#
export PGPASSWORD=$DATABASE_PASSWORD
APP_SOURCE_DIR=${APP_SOURCE_DIR:-"${APP_ROOT}/src"}
# Halt conditions, verbosity and field separator
#
set -xeuo pipefail
IFS=$'\n\t'
ls $APP_SOURCE_DIR
ls /app
# Python migrate table changes
#
echo "Post-Deploy: Python migration"
cd $APP_SOURCE_DIR/backend/
python manage.py migrate
# Create additional DB objects (e.g. spatial indices, stored procedures)
#
echo "Post-Deploy: SQL imports"
# 2018-SEP-25 GW Aquifers CodeWithUs
cd $APP_SOURCE_DIR/database/scripts/aquifers/
psql -X --set ON_ERROR_STOP=on -h $DATABASE_SERVICE_NAME -d $DATABASE_NAME -U $DATABASE_USER << EOF
DROP TABLE IF EXISTS xform_aquifers;
CREATE unlogged TABLE IF NOT EXISTS xform_aquifers (
aquifer_id integer,mapping_year integer);
\copy xform_aquifers FROM 'xforms-aquifers.csv' HEADER DELIMITER ',' CSV
EOF
cd $APP_SOURCE_DIR/database/scripts/wellsearch/
psql -X --set ON_ERROR_STOP=on -h $DATABASE_SERVICE_NAME -d $DATABASE_NAME -U $DATABASE_USER -f \
post-deploy.sql
# Python related portion of post-deploy
#
echo "Post-Deploy: Python tasks"
cd $APP_SOURCE_DIR/backend/
python manage.py post-deploy
# Success!
#
echo "Post-Deploy: completed successfully"
| true
|
927a0cfd75ba1da8970f35d80152329b507276e0
|
Shell
|
usr42/docker-selenium
|
/xvfb.init
|
UTF-8
| 2,032
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
### BEGIN INIT INFO
# Provides: xvfb
# Required-Start:
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Starts xvfb
# Description: Starts X Virtual Framebuffer server
### END INIT INFO
DESC="Starts xvfb"
DAEMON=/usr/bin/Xvfb
NAME=xvfb
DAEMON_OPTIONS=":10 -ac"
PIDFILE=/tmp/$NAME.pid
case "$1" in
start)
if [ -f "$PIDFILE" ];then
echo "${NAME} Service already running" >&2
exit 1
fi
printf "%-50s" "Starting $NAME..."
PID=`$DAEMON $DAEMON_OPTIONS > /dev/null 2>&1 & echo $!`
if [ -z $PID ]; then
printf "%s\n" "Fail"
else
echo $PID > $PIDFILE
printf "%s\n" "Ok"
fi
disown -ar
;;
stop)
printf "%-50s" "Stopping $NAME"
if [ -f $PIDFILE ]; then
PID=`cat $PIDFILE`
if [ -z "`ps axf | grep ${PID} | grep -v grep`" ]; then
printf "%s\n" "Process already dead."
rm -f $PIDFILE
exit 0
fi
kill $PID
sleep 0.5
if [ -n "`ps axf | grep ${PID} | grep -v grep`" ]; then
printf "%s\n" "Fail"
printf "%-50s" "Stopping (kill -9) $NAME"
kill -9 $PID
sleep 0.5
if [ -n "`ps axf | grep ${PID} | grep -v grep`" ]; then
printf "%s\n" "Fail"
echo "$NAME still running. Giving up..."
exit 1
else
printf "%s\n" "Ok"
fi
else
printf "%s\n" "Ok"
fi
rm -f $PIDFILE
else
echo "pidfile not found"
fi
;;
restart|reload)
$0 stop
$0 start
RETVAL=$?
;;
status)
printf "%-50s" "Checking $NAME..."
if [ -f $PIDFILE ]; then
PID=`cat $PIDFILE`
if [ -z "`ps axf | grep ${PID} | grep -v grep`" ]; then
printf "%s\n" "Process dead but pidfile exists"
else
echo "Running"
fi
else
printf "%s\n" "Service not running"
fi
RETVAL=$?
;;
*)
echo $"Usage: $0 (start|stop|restart|reload|status)"
exit 1
esac
exit $RETVAL
| true
|
0cff8eb58692f20b938d8b642ec0e0fb8469c5d2
|
Shell
|
ulises-jeremias/aa2020unlp-covid-19-xray
|
/bin/docker
|
UTF-8
| 2,277
| 4.09375
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
## Copyright (C) 2020 Ulises Jeremias Cornejo Fandos
## Licensed under MIT
##
## @script.name [OPTION] ARGUMENTS...
##
## Options:
## -h, --help Help.
## -b, --build Build docker image
##
## --tag=TAG_NAME
## --log-file=LOG_FILE_PATH Logs file path, is /tmp/install_progress_log_$(date +'%m-%d-%y_%H:%M:%S').txt by default.
##
ROOT=$(dirname $0)
source "${ROOT}/../scripts/opts/opts.sh" || exit
source "${ROOT}/../scripts/logs.sh" || exit
#==========================================
# Default argument values and preprocessing
#==========================================
time_str=$(date +'%m-%d-%y_%H:%M:%S')
log_file=${log_file:-"/tmp/install_progress_log_${time_str}.txt"}
USER_FLAG="-u $(id -u):$(id -g)"
APP_NAME="aap2020"
[ ! -f "${log_file}" ] && touch ${log_file}
tag=${tag:-"latest"}
# creates docker image if it doesn't exists
docker_ini() {
if ! type -p docker > /dev/null; then
log_failed "Docker is not installed in this system" ${log_file}
exit -1
fi
if type -p md5 > /dev/null; then
lockSum=$(md5 -r requirements.txt | awk '{ print $1 }')
else
lockSum=$(md5sum -t requirements.txt | awk '{ print $1 }')
fi
IMAGE_NAME="${APP_NAME}-${lockSum}:${tag}"
IMAGE_NAME_ARRAY=(${IMAGE_NAME//:/ })
if [[ -n "${build}" ]] || [[ "$(docker images ${IMAGE_NAME_ARRAY[0]} | grep ${IMAGE_NAME_ARRAY[1]} 2> /dev/null)" = "" ]]; then
docker build -f docker/tf-py3-jupyter.Dockerfile -t "${IMAGE_NAME}" \
--build-arg USERNAME=${USER} \
--build-arg DOCKER_ENV=${tag} \
.
fi
}
# docker run
docker_run() {
runtime=""
if [[ $tag == *"gpu"* ]]; then
runtime="--gpus all"
fi
docker run --rm -it -e DISPLAY=:${XPORT} \
-v "$(pwd)":"/home/${USER}/${APP_NAME}" \
-v "$(pwd)/data":"/tf/data" \
-v "$(pwd)/src/notebooks":"/tf/notebooks" \
-w "/home/${USER}" \
-p 6006:6006 -p 8888:8888 \
${runtime} \
${USER_FLAG} \
"${IMAGE_NAME}"
exit
}
# test process
docker_ini
docker_run
# Help shown by default
[[ -z "$documentation" ]] && parse_documentation
echo "$documentation"
| true
|
b8d158b24494418f92a65e47d84f025406bd2b65
|
Shell
|
xprazak2/foreman-infra
|
/puppet/modules/jenkins_job_builder/files/theforeman.org/scripts/gemset_cleanup.sh
|
UTF-8
| 227
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
[ -z "$ruby" ] && ruby=2.0.0
# Clean gemset and database
. /etc/profile.d/rvm.sh
gemset=$(echo ${JOB_NAME} | cut -d/ -f1)-${EXECUTOR_NUMBER}
rvm use ruby-${ruby}@${gemset}
rvm gemset delete ${gemset} --force
exit 0
| true
|
cd026bba0d408771574cd4d709bd726ea30372ec
|
Shell
|
dauer/dotfiles
|
/bin/show-no-merge.sh
|
UTF-8
| 305
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Shows Git branches not merged into master for projects in current folder
for dir in ./*; do (
if [ -d $dir ]; then
cd "$dir" &&
if [ -d '.git' ]; then
branch=`git branch --no-merge` &&
echo -e "$branch \t $dir";
fi
fi
) done
| true
|
0f45f5753f0b1a4e99dbf55207608387ce9c5e8d
|
Shell
|
skellet0r/fzf-eip
|
/eip.sh
|
UTF-8
| 461
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
function eip () {
URL="https://github.com/ethereum/EIPs/blob/master/EIPS/eip"
eip_number=$(
fzf --no-sort \
--prompt 'Ethereum Improvement Proposal (EIP) >' \
--preview='echo {}' --preview-window=down:1:wrap \
--height=80% <"$FZF_EIP_HOME/.cache/eip-index" \
| awk '{ print $1 }'
)
# do nothing if nothing selected
[[ -z $eip_number ]] && return
(
"${FZF_BROWSER}" "${URL}-${eip_number}.md" > /dev/null 2>&1 &
)
}
| true
|
4849a192c7736a3b85d783666760f1933756dd6b
|
Shell
|
yrrodriguezb/bash-shell
|
/script/conceptos/16_case.sh
|
UTF-8
| 230
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
declare -l month=$(date +%b)
case $month in
dec | jan | feb)
echo "Winter";;
mar | apr | may)
echo "Spring";;
jun | jul | aug)
echo "Summer";;
sep | oct | nov)
echo "Autum";;
esac
echo $(date +%d/%m/%Y)
| true
|
ebddf74d5d18d0af373dbeebc8232728caf3bfb9
|
Shell
|
Interns14/EVAL-A-Group-Peer-Based-Quizzing-System
|
/Server/server/Files/standard6/Marathi_while.sh
|
UTF-8
| 92
| 3.109375
| 3
|
[] |
no_license
|
counter=0
while [ $counter -le 100 ]
do
echo "hello"
counter=`expr $counter + 1`
done
| true
|
4bd5d9c0c32abcef879adcfceca35c472ce7dce5
|
Shell
|
ZEDOSPROJECT/ZED-UI-WEB
|
/Backend/SERVER/API/SYSTEM/SETTINGS/USER/OOBEFinish.sh
|
UTF-8
| 724
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
username=$1
password=$2
useradd -m $username -p $password -U
usermod -s /bin/bash $username
echo -e "${password}\n${password}\n" | passwd $username
cp -a /home/zed/Desktop /home/$username/Desktop
cp -a /home/zed/Documents /home/$username/Documents
cp -a /home/zed/Downloads /home/$username/Downloads
cp -a /home/zed/Music /home/$username/Music
cp -a /home/zed/Pictures /home/$username/Pictures
cp -a /home/zed/Public /home/$username/Public
cp -a /home/zed/Templates /home/$username/Templates
cp -a /home/zed/Videos /home/$username/Videos
cp -a /home/zed/ZED-UI-WEB /home/$username/ZED-UI-WEB
chown -R $username:$username /home/$username/
cp /home/zed/.config/openbox/autostart /etc/xdg/openbox/autostart
reboot
| true
|
4800d47a212b2a65b79e0248cd55b2d4ae0f8435
|
Shell
|
rorlab/dotfiles
|
/aliases/editor.sh
|
UTF-8
| 323
| 3.5625
| 4
|
[] |
no_license
|
# `editlast` opens the last modified file in the editor
editlast() {
FILE="$(
/usr/bin/find "${1:-.}" -type f \
-not -regex '\./\..*' \
-not -regex '\./tmp/.*' \
-not -regex '\./log/.*' \
-exec stat -c '%Y %n' {} +\; |
sort -n | tail -1 | cut -d ' ' -f 2-
)"
"${EDITOR:-vi}" "$FILE"
}
| true
|
a04091bd8aaae6b35e26e72a1f63c0353db2658c
|
Shell
|
wandersoncferreira/dotfiles-1
|
/.functions
|
UTF-8
| 1,568
| 3.421875
| 3
|
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env bash
ANDROID_EMULATOR_NIX_FOLDER="$DOTFILES/nix/android"
ANDROID_EMULATOR_CONFIG_FILE="$HOME/.android/avd/device.avd/config.ini"
function android-emulator() {
cd $ANDROID_EMULATOR_NIX_FOLDER
if [ ! -f "$ANDROID_EMULATOR_CONFIG_FILE" ]; then
nix-build "$ANDROID_EMULATOR_NIX_FOLDER/emulator.nix"
$DOTFILES/nix/android/result/bin/run-test-emulator & pidsave=$!
sleep 10
kill $pidsave
ln -sf "$DOTFILES/nix/android/emulator-config.ini" "$ANDROID_EMULATOR_CONFIG_FILE"
fi
nix-build "$ANDROID_EMULATOR_NIX_FOLDER/emulator.nix"
$DOTFILES/nix/android/result/bin/run-test-emulator
}
function unity-apk-install() {
adb connect $DEVICE_IP &&
sleep 2 &&
adb -s $DEVICE_IP uninstall com.gregcodes.$1 &&
adb -s $DEVICE_IP install ~/games-dev/apks/$1.apk &&
adb -s $DEVICE_IP shell am start -n com.gregcodes.$1/com.unity3d.player.UnityPlayerActivity
}
# Miband
function miband-build() {
TZ=Europe/Berlin wine ~/Documents/MiBand/MiBandWFTool_1.4.1/PaletteImageMode/WatchFace.exe $1
}
function miband-watch() {
inotifywait -m -e close_write *.png $1 |
while read -r filename event; do
if [ ${#filename} -eq 8 ] || [ "$filename" = "$1" ]; then
miband-build *.json
fi
done
}
# Raspberry
PI_IP="192.168.1.29"
function midi-to-pi() {
scp "$1" pi@$PI_IP:/home/pi/Piano-LED-Visualizer/Songs/"$2"
}
function midi-from-pi() {
scp pi@$PI_IP:/home/pi/Piano-LED-Visualizer/Songs/"$1" "$2"
}
source $HOME/.gregflix-functions 2> /dev/null
| true
|
e643967973d3e70ac661f4a6452d9bb5268e03da
|
Shell
|
novemberde/node-awscli
|
/docker/docker-entrypoint.sh
|
UTF-8
| 928
| 3.90625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
# first arg is `-f` or `--some-option`
if [ "${1#-}" != "$1" ]; then
set -- docker "$@"
fi
# if our command is a valid Docker subcommand, let's invoke it through Docker instead
# (this allows for "docker run docker ps", etc)
if docker help "$1" > /dev/null 2>&1; then
set -- docker "$@"
fi
# if we have "--link some-docker:docker" and not DOCKER_HOST, let's set DOCKER_HOST automatically
if [ -z "$DOCKER_HOST" -a "$DOCKER_PORT_2375_TCP" ]; then
export DOCKER_HOST='tcp://docker:2375'
fi
if [ "$1" = 'dockerd' ]; then
cat >&2 <<-'EOW'
📎 Hey there! It looks like you're trying to run a Docker daemon.
You probably should use the "dind" image variant instead, something like:
docker run --privileged --name some-overlay-docker -d docker:stable-dind --storage-driver=overlay
See https://hub.docker.com/_/docker/ for more documentation and usage examples.
EOW
sleep 3
fi
exec "$@"
| true
|
3e78ae005b4da3002db4ab4ad7c99ada7cdb22f7
|
Shell
|
soccin/ATAC-seq
|
/getGenomeBuildBAM.sh
|
UTF-8
| 1,678
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$#" != "1" ]; then
echo usage getGenomeBuild.sh BAM
exit
fi
SAMTOOLS=$(which samtools)
if [ $SAMTOOLS == "" ]; then
echo samtools not in current path
exit -1
fi
GENOME_MD5=$($SAMTOOLS view -H $1 | egrep "^@SQ" | cut -f-3 | sort | md5sum - | awk '{print $1}')
case $GENOME_MD5 in
b879c678e7fd80718cf20d10c6b846e4)
# b37 gatk /ifs/depot/assemblies/H.sapiens/b37/b37.dict
echo "b37"
;;
117fce86b797081e0af6d69cbd94dcde)
# b37 version used by DMP pipeline
echo "b37_dmp"
;;
5b4e380a6b4fc3494cfc66c917d41b37)
# UCSC hg19 /ifs/depot/assemblies/H.sapiens/hg19/hg19.dict
echo "hg19"
;;
3d72c6961689390556ed2d5a33e66e17)
# Main chromosomes only (used by cfDNA collaboration)
echo "hg19-mainOnly"
;;
933b376d936c265fc6b44c8bd19fc66d)
# TCGA BAMs UR:ftp://ftp.ncbi.nih.gov/genbank/genomes/Eukaryotes/vertebrates_mammals/Homo_sapiens/GRCh37/special_requests/GRCh37-lite.fa.gz
# AS:GRCh37-lite (b37-ish)
echo "GRCh37-lite"
;;
7f8c5323ff7e0ff6b5d40efe53eaf050)
# BIC Xeno-graph genome
echo "b37+mm10"
;;
d660fd17a979374182d3ba8b6d76cac0)
# UCSC mm10 /ifs/depot/assemblies/M.musculus/mm10/mm10.dict
echo "mm10"
;;
34839afd79d8b772037ed7a0e0a4f9c3)
# UCSC mm10
echo "mm10_hBRAF_V600E"
;;
f9cd233a3d5c9540eece434c65f84f1c)
# mm9 Full
echo "mm9Full"
;;
0835b244e0adb20253e5fa1c4ee58ec4)
# mouse_GRCm38
echo "GRC_m38"
;;
8a300152df87118834c4268ab4b713aa)
# Yeast hybrid sCer+sMik_IFO1815
echo "sCer+sMik_IFO1815"
;;
*)
echo "unknown" $GENOME_MD5
;;
esac
| true
|
b5c9396eb9142ab0f0bae28bf36904972974633e
|
Shell
|
zk-st/genetics-sumstat-data
|
/ingest/eqtl_db_v1/1_transfer_to_gcs.sh
|
UTF-8
| 1,040
| 2.9375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#
set -euo pipefail
lftp ftp-private.ebi.ac.uk
set ftp:ssl-force yes
set ftp:ssl-protect-data no
set ssl:verify-certificate false
login <redacted>
# Password: <redacted>
# Download to local
mirror upload/eqtls
# Remove permuted datasets
rm -f eqtls/*/*/*.permuted.txt.gz
# Split gzip (untested)
for inf in raw/*/*/*.txt.gz; do
echo pypy3 scripts/gzip_split.py --inf $inf --chunks 300 --header no_header --delete
done | parallel -j 8
# Copy to GCS
gsutil -m rsync -rn eqtls/ gs://genetics-portal-raw/eqtl_db_v1/raw
# Copy gene meta data
wget -O - https://github.com/kauralasoo/RNAseq_pipeline/raw/master/metadata/gene_metadata/featureCounts_Ensembl_92_gene_metadata.txt.gz | zcat | gsutil cp - gs://genetics-portal-raw/eqtl_db_v1/raw/featureCounts_Ensembl_92_gene_metadata.txt
wget -O - https://github.com/kauralasoo/RNAseq_pipeline/raw/master/metadata/gene_metadata/HumanHT-12_V4_gene_metadata.txt.gz | zcat | gsutil cp - gs://genetics-portal-raw/eqtl_db_v1/raw/HumanHT-12_V4_gene_metadata.txt
echo COMPLETE
| true
|
20e92b86dee3f9367df6fee7518920b1123267a0
|
Shell
|
qwee123/miniupnpd-sdn
|
/scripts/dnat_ex.sh
|
UTF-8
| 4,544
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
onos_nfv_network=onosnfv
auth_database_network=databases
controller_address=
controller_port=
controller_igd_app_port=40000
controller_container_name=
client_gateway=172.16.0.1
miniupnpd_addr=172.16.0.100
miniupnpd_version=v10
auth_server_addr=172.16.0.150
auth_server_port=50000
auth_db_address=
auth_db_root_pass=$(cat db_pass.txt)
wan2_mac=c2:67:18:3d:bc:ca
wan2_ip=192.168.1.11/24
ConnectClient() {
client=$1
ovs=$2
client_ip=$3
docker run -itd --name ${client} --net none --cap-add NET_ADMIN -e auth_server_address=${auth_server_addr}":"${auth_server_port} py_test_client
ovs-docker add-port ${ovs} eth0 ${client} --ipaddress=${client_ip}
docker exec ${client} ip route add default via ${client_gateway}
PrintIfaceInfo ${client} eth0 ${ovs}
}
PrintIfaceInfo() {
container=$1
ifacename=$2
ovs=$3
iface_num=$(docker exec ${container} ip addr show | sed -n 's/\([0-9]*\): \('${ifacename}'@\).*/\1/p')
iface=$(ip addr show | sed -n 's/\([0-9]*\): \([a-z0-9]*_l\)@if'${iface_num}'.*/\2/p')
echo "Interface "${ovs}"-"${container}": "${iface}
}
if [ -z "$1" ]; then
echo "Please Specify an experiment situation, it's either 'pytest' or 'onos'"
exit 1
fi
case "$1" in
pytest )
if [ ! "$(docker ps -q -f name='^py_test_server$')" ]; then
if [ "$(docker ps -aq -f status=exited -f name='^py_test_server$')" ]; then
docker start py_test_server
else
docker run -td --name py_test_server --net ${onos_nfv_network} py_test_server
fi
fi
controller_container_name=py_test_server
controller_port=40000
;;
onos )
if [ ! "$(docker ps -q -f name='^onos$')" ]; then
if [ "$(docker ps -aq -f status=exited -f name='^onos$')" ]; then
docker start onos
else
docker run -td --name onos --net ${onos_nfv_network} onosproject/onos:2.5.1 #no interactive mode
fi
fi
controller_container_name=onos
controller_port=6653
;;
-h|--help )
echo "Usage : $0 [pytest|onos]"
echo "pytest: Use pytest server instead of a real sdn controller."
echo "onos: Use onos controller."
exit 1
;;
* )
echo "Unknown mode"
exit 1
;;
esac
controller_address=$(docker inspect ${controller_container_name} -f '{{ .NetworkSettings.Networks.'${onos_nfv_network}'.IPAddress }}')
if [ "$(docker ps -aq -f name='^miniupnpd-sdn$')" ]; then
docker stop miniupnpd-sdn && docker rm miniupnpd-sdn
fi
#db initialization may take up to 15 secs. No connection could be made until the process is complete.
docker run --name auth_db -e MARIADB_ROOT_PASSWORD=${auth_db_root_pass} --network ${auth_database_network} -td mariadb
auth_db_address=$(docker inspect auth_db -f '{{ .NetworkSettings.Networks.'${auth_database_network}'.IPAddress }}')
docker run -itd --name miniupnpd-sdn --cap-add NET_ADMIN --cap-add NET_BROADCAST \
-e CONTROLLER_ADDRESS=${controller_address}":"${controller_igd_app_port} \
--network ${onos_nfv_network} miniupnpd-sdn:${miniupnpd_version}
ovs-docker add-port ovs-s3 eth1 miniupnpd-sdn --ipaddress=${miniupnpd_addr}/24
PrintIfaceInfo miniupnpd-sdn eth1 ovs-s3
ConnectClient client1 ovs-s11 172.16.0.2/24
if [ "$1" == onos ]; then
echo "set ovs-controller connection"
ovs-vsctl set-controller ovs-s11 tcp:${controller_address}:${controller_port}
ovs-vsctl set-controller ovs-s1 tcp:${controller_address}:${controller_port}
ovs-vsctl set-controller ovs-s2 tcp:${controller_address}:${controller_port}
ovs-vsctl set-controller ovs-s3 tcp:${controller_address}:${controller_port}
ovs-vsctl set-controller ovs-r1 tcp:${controller_address}:${controller_port}
fi
ip netns add demo
ip link add name wan2 type veth peer name wan3
ip link set wan2 netns demo
ovs-vsctl add-port ovs-r1 wan3
ip netns exec demo ifconfig wan2 hw ether ${wan2_mac}
ip netns exec demo ip addr add ${wan2_ip} dev wan2
ip netns exec demo ip link set wan2 up
ip link set wan3 up
sysctl -w net.ipv4.conf.wan3.forwarding=1
#Disable stderr during testing database connectivity
exec 3>&2
exec 2> /dev/null
echo "Waiting for database to complete initialization..."
for i in {1..20}
do
result=$(mysqladmin ping -h ${auth_db_address} -uroot -p${auth_db_root_pass} | grep "mysqld is alive")
if [ -n "${result}" ]; then
break
fi
sleep 2
done
exec 2>&3
mysql -h ${auth_db_address} -uroot -p${auth_db_root_pass} < ./init_db.sql
docker run -td --name auth_server --network ${auth_database_network} -e port=${auth_server_port} -e db_addr=${auth_db_address} -e db_port=3306 demo_auth_server
ovs-docker add-port ovs-s3 eth1 auth_server --ipaddress=${auth_server_addr}/24
| true
|
7a42d5e99a63bc123b9a0876e7b22f6d643dd1f2
|
Shell
|
erickformaggio/sophie
|
/blogproof.sh
|
UTF-8
| 546
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
# blogproof.sh
# Criação: 25/03/2019
# Propósito: Fazer teste do wscan para WordPress no blog
# Autor: Erick Formaggio
clear
echo "=================================================="
echo "ANALISANDO O BLOG"
echo "=================================================="
echo "Analisando... O teste pode demorar alguns minutos"
echo "Se quiser acompanhar os detalhes, em outra aba digite: tcpdump host ip-do-site -v"
wpscan --update
y | wpscan --url "$host" --random-agent > Testes/blog_"$nomeempresablog"/"$nomeempresablog"_blog-$DATE.txt
| true
|
a5638a72667e8ad973edbf817d3692beb7701a2e
|
Shell
|
ronalabraham/dotfiles
|
/bashrc
|
UTF-8
| 5,865
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
# ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# for examples
# adding per https://stackoverflow.com/questions/12373586
stty -ixon
# adding per https://stackoverflow.com/questions/34253579
export GTEST_COLOR=1
# If not running interactively, don't do anything
case $- in
*i*) ;;
*) return;;
esac
# don't put duplicate lines or lines starting with space in the history.
# See bash(1) for more options
HISTCONTROL=ignoreboth
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# If set, the pattern "**" used in a pathname expansion context will
# match all files and zero or more directories and subdirectories.
#shopt -s globstar
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color|*-256color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
#force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
fi
unset color_prompt force_color_prompt
# If this is an xterm set the title to user@host:dir
case "$TERM" in
xterm*|rxvt*)
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
;;
*)
;;
esac
# define gnu utilities on different OSes
os=`uname -s`
case $os in
"Darwin")
gnu_dircolors=/usr/local/bin/gdircolors
gnu_ls=gls
gnu_dir=gdir
gnu_vdir=gvdir
gnu_grep=ggrep
gnu_fgrep=gfgrep
gnu_egrep=gegrep
;;
* )
gnu_dircolors=/usr/bin/dircolors
gnu_ls=ls
gnu_dir=dir
gnu_vdir=vdir
gnu_grep=grep
gnu_fgrep=fgrep
gnu_egrep=egrep
;;
esac
# enable color support of ls and also add handy aliases
if [ -x $gnu_dircolors ]; then
test -r ~/.dircolors && eval "$($gnu_dircolors -b ~/.dircolors)" || eval "$($gnu_dircolors -b)"
alias ls='$gnu_ls --color=auto'
alias dir='$gnu_dir --color=auto'
alias vdir='$gnu_vdir --color=auto'
fi
alias grep='$gnu_grep --color=auto'
alias fgrep='$gnu_fgrep --color=auto'
alias egrep='$gnu_egrep --color=auto'
# colored GCC warnings and errors
#export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
# Add an "alert" alias for long running commands. Use like so:
# sleep 10; alert
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
# Alias definitions.
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# Bash completion definitions.
# You may want to put all your additions into a separate file like
# ~/.bash_completion, instead of adding them here directly.
[ -f ~/.bash_completion ] && source ~/.bash_completion
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
# Tmux hooks.
# This script contains any tmux-related setup for new bash shells.
if [ -f ~/.bash_tmux ]; then
. ~/.bash_tmux
fi
# Enable fzf, but only on Linux and Mac; we will assume the fzf executable is
# not supported on other architectures. See https://github.com/junegunn/fzf for
# more information about the fzf executable.
case $os in
"Linux" ) enable_fzf=yes;;
"Darwin") enable_fzf=yes;;
esac
if [ "$enable_fzf" = yes ]; then
[ -f ~/.fzf.bash ] && source ~/.fzf.bash
[ -f ~/.fzf.bash_completion ] && source ~/.fzf.bash_completion
fi
# Enable bash_preexec. Also define a 'preexec_bash_tmux()' function that
# executes just before every bash command is processed. For more details, see
# https://github.com/rcaloras/bash-preexec.
[[ -f ~/.bash_preexec ]] && source ~/.bash_preexec
preexec_bash_tmux()
{
# Execute ~/.bash_tmux with the current command being run (truncated).
local bash_cmd="$1"
local MAX_TITLE_LEN=32
if [ ${#bash_cmd} -gt $MAX_TITLE_LEN ]; then
bash_cmd="${bash_cmd:0:$(expr $MAX_TITLE_LEN - 3)}..."
fi
. ~/.bash_tmux "$bash_cmd"
}
preexec_functions+=(preexec_bash_tmux)
precmd_bash_tmux()
{
# Execute ~/.bash_tmux.
. ~/.bash_tmux
}
precmd_functions+=(precmd_bash_tmux)
[[ -r ~/.bashrc_local ]] && . ~/.bashrc_local
| true
|
07999c3f20f8b7cc2cbdec0371693b7c73012bd4
|
Shell
|
iconicfuture-cookbooks/kafka
|
/templates/default/zookeeper.init.erb
|
UTF-8
| 1,402
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/sh
### BEGIN INIT INFO
# Provides: zookeeper
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Kafka Zookeper initscript
### END INIT INFO
# Author: Thomas Liebscher <thomas.liebscher@iconicfuture.com>
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/bin:/usr/bin
DESC="zookeeper"
NAME=zookeeper
DAEMON=<%= @kafka_dir %>/bin/zookeeper-server-start.sh
PIDFILE=/var/run/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
test -x $DAEMON || exit 0
start_server() {
echo -n "Starting $DESC... "
/usr/local/sbin/start-stop-daemon --start --make-pidfile --pidfile $PIDFILE --startas $DAEMON -- "<%= @kafka_dir %>/config/zookeeper.properties"
echo "done."
}
stop_server() {
echo -n "Stopping $DESC... "
/usr/local/sbin/start-stop-daemon --stop --pidfile $PIDFILE --exec $DAEMON $DAEMON -- "<%= @kafka_dir %>/config/zookeeper.properties"
echo "done."
}
status_server() {
if [ "`ps -A -o command | grep [z]ookeeper`" != "" ]; then
exit 0
else
exit 1
fi
}
rh_status_q() {
status_server >/dev/null 2>&1
}
case "$1" in
start)
start_server
;;
stop)
stop_server
;;
restart)
stop_server
sleep 3
start_server
;;
status)
status_server
;;
*)
N=/etc/init.d/$NAME
echo "Usage: $N {start|stop|restart|status}" >&2
exit 1
;;
esac
exit 0
| true
|
5bf7a4686e0328c885389aae1c7c6ddd7383973e
|
Shell
|
hubertwwong/debScriptz
|
/dotFiles/.bashrc
|
UTF-8
| 4,955
| 3.34375
| 3
|
[] |
no_license
|
# DEBIAN
################################################################################
# If not running interactively, don't do anything
case $- in
*i*) ;;
*) return;;
esac
# don't put duplicate lines or lines starting with space in the history.
# See bash(1) for more options
HISTCONTROL=ignoreboth
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# If set, the pattern "**" used in a pathname expansion context will
# match all files and zero or more directories and subdirectories.
#shopt -s globstar
# make less more friendly for non-text input files, see lesspipe(1)
#[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
#alias grep='grep --color=auto'
#alias fgrep='fgrep --color=auto'
#alias egrep='egrep --color=auto'
fi
# Alias definitions.
# Defer to bash_alises
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# DO YOU NEED THIS?
# source ~/.profile
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
# GIT
################################################################################
# displays what git branch you are.
parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
}
# PROMPTS
################################################################################
# ANSI color codes
RS="\[\033[0m\]" # reset
HC="\[\033[1m\]" # hicolor
UL="\[\033[4m\]" # underline
INV="\[\033[7m\]" # inverse background and foreground
FBLK="\[\033[30m\]" # foreground black
FRED="\[\033[31m\]" # foreground red
FGRN="\[\033[32m\]" # foreground green
FYEL="\[\033[33m\]" # foreground yellow
FBLE="\[\033[34m\]" # foreground blue
FMAG="\[\033[35m\]" # foreground magenta
FCYN="\[\033[36m\]" # foreground cyan
FWHT="\[\033[37m\]" # foreground white
BBLK="\[\033[40m\]" # background black
BRED="\[\033[41m\]" # background red
BGRN="\[\033[42m\]" # background green
BYEL="\[\033[43m\]" # background yellow
BBLE="\[\033[44m\]" # background blue
BMAG="\[\033[45m\]" # background magenta
BCYN="\[\033[46m\]" # background cyan
BWHT="\[\033[47m\]" # background white
# debian chroot var
DEBCHROOT="${debian_chroot:+($debian_chroot)}"
# TERMINAL
PS1="$HC$FRED$FBLE$DEBCHROOT\u$FRED | $FGRN\w$FRED | $FCYN\d$FRED | $FMAG\T$FRED |$FYEL\$(parse_git_branch)$FRED $RS
"
PS2="$HC$FRED> $RS
"
# PATHS
################################################################################
# FOR OSX
# MACPORTS
#export PATHPORT=/opt/local/bin:/opt/local/sbin
# HEROKU
#export PATHHEROKU=/usr/local/heroku/bin
# FOR DEBIAN
export PATHDEFAULT=/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
export PATHNVM=$HOME/.nvm
export PATHRVM=$HOME/.rvm/bin
# SCRIPTS DIR
export SCRIPTZ_HOME=$HOME/zzz/debScriptz
export SCRIPTZ_SCRIPTZ=$SCRIPTZ_HOME/scripts
export SCRIPTZ_ANSIBLE=$SCRIPTZ_HOME/ansible
# FINAL PATH
# export PATH=$PATHRVM:$PATHNVM:$PATHDEFAULT:$SCRIPTZ_SCRIPTZ
export PATH=$PATHDEFAULT:$SCRIPTZ_SCRIPTZ
# RVM, NVM
################################################################################
# LOAD NVM
[[ -s "$HOME/.nvm/nvm.sh" ]] && source "$HOME/.nvm/nvm.sh"
# Load RVM into a shell session *as a function*
# this needs to come last.
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm"
| true
|
49e46b2b055a5a4b0f0a3e9c3a79bec879ac8843
|
Shell
|
artsy/force
|
/scripts/build_review_app.sh
|
UTF-8
| 3,087
| 3.765625
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# Description: Run this script to automate the process of building Hokusai
# review application for Force. It draws upon Artsy's existing review app
# documentation:
# https://github.com/artsy/hokusai/blob/master/docs/Review_Apps.md and
# experiences trying to set up review apps!
# USAGE: $ ./scripts/build_review_app.sh review-app-name
echo "[build_review_app.sh] START"
# Bail out of script on first expression failure and echo the commands as
# they are being run.
set -ev
NAME="$1"
if test -z "$NAME"; then
echo "You didn't provide a shell argument, so NAME isn't meaningful, exiting."
exit 1
fi
# Generate the Kubernetes YAML needed to provision the application.
hokusai review_app setup "$NAME"
review_app_file_path="hokusai/$NAME.yml"
# Create the Docker image of your current working direct of Force, and push
# it to Artsy's docker registry.
#
# --force is needed as the current working directory is dirty with (at least)
# the YAML file generated above.
# --skip-latest as we're making no claim that this is the "latest" build of the
# service.
# --tag to name the image.
# WARNING: This is likely going to take ~10 mins on your MBP.
# Be patient and grab some baby carrots.
hokusai registry push --force --skip-latest --overwrite --verbose --tag "$NAME"
# Edit the K8S YAML to reference the proper Docker image
sed -i.bak "s/:staging/:$NAME/g" "$review_app_file_path" && rm "$review_app_file_path.bak"
# Edit the K8S YAML Ingress resource to use the Review App's name as the host.
sed -i.bak "s/host: staging.artsy.net/host: $NAME.artsy.net/g" "$review_app_file_path" && rm "$review_app_file_path.bak"
# Provision the review app
hokusai review_app create "$NAME" --verbose
# Copy Force staging's ConfigMap to your review app
hokusai review_app env copy "$NAME" --verbose
# To enable authentication via Force's server, we need to allow XHR requests
# from Force's client to server. As such, Force's server needs to have the
# proper name of the domain that the requests are coming from. Otherwise,
# authentication requests won't work!
hokusai review_app env set "$NAME" \
APP_URL="https://$NAME.artsy.net" \
APPLICATION_NAME="$NAME" \
COOKIE_DOMAIN="$NAME.artsy.net" \
FORCE_URL="https://$NAME.artsy.net"
# Publish Force assets to S3
hokusai review_app run "$NAME" 'yarn publish-assets'
# Refresh ENV
hokusai review_app refresh "$NAME"
# Now you need to create a CNAME for your review app and wait. This is required
# as Gravity only allows authentication requests from requests of originating
# from the artsy.net domain
# Hey you human!
# ...
# ...
# ...
# ...
# ...
# ...
# ...
# ...
# ...
# yeah, I'm talking to you
# you need to configure a CNAME record so that $NAME.artsy.net points to nginx-staging.artsy.net
#
echo "Create a CNAME record of $NAME.artsy.net pointing to nginx-staging.artsy.net"
#
# you may do this in the Cloudflare interface. Credentials are in 1pass.
#
# Step-by-step instructions:
# https://github.com/artsy/force/blob/main/docs/creating_review_app.md#dns-setup
echo "[build_review_app.sh] SUCCESS"
exit 0
| true
|
cc9598aa32ee2a63a23e6c1648a39291f6d516ae
|
Shell
|
delebedev/dotfiles
|
/macos/mac-install.sh
|
UTF-8
| 540
| 3.03125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# This is a script for bootstrapping macOS setup
set -e
if [[ ! -e "manage.sh" ]]; then
echo "Make sure you have the manage script nearby"
exit 1
fi
if ! which brew &> /dev/null; then
echo "You need to install homebrew"
exit 1
fi
echo "Installing dependencies from Brewfile..."
brew tap homebrew/cask-fonts
brew bundle --file="macos/Brewfile"
brew bundle --file="macos/Brewfile.cask"
### defaults
echo "Setting up defaults..."
defaults write com.googlecode.iterm2 HotkeyTermAnimationDuration -float 0.00001
| true
|
6ccc1557a743e8bbdbc55650980c6b9c2d4b7852
|
Shell
|
samrocketman/jenkins-bootstrap-shared
|
/tests/test.sh
|
UTF-8
| 1,551
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
export LC_ALL=C
export PS4='$ '
export DEBIAN_FRONTEND=noninteractive
if [ ! -d ".git" ]; then
echo 'ERROR: must be run from the root of the repository. e.g.'
echo './tests/test.sh'
exit 1
fi
if [ -z "${GITHUB_TOKEN}" ]; then
echo 'WARNING: Tests may fail without GITHUB_TOKEN environment variable set.'
fi
echo 'Last command to exit has the non-zero exit status.'
#configure error exit trap
function on_err() {
set +x
echo "^^ command above has non-zero exit code."
echo
echo "Cleaning up test environment: ./gradlew clean"
./gradlew clean &> /dev/null
}
trap on_err ERR
set -x
bash -n ./tests/random_port.sh
test -x ./tests/random_port.sh
export RANDOM_PORT="$(./tests/random_port.sh)"
bash -n ./jenkins_bootstrap.sh
test -x ./jenkins_bootstrap.sh
export JENKINS_START="java -jar jenkins.war --httpPort=${RANDOM_PORT} --httpListenAddress=127.0.0.1"
export JENKINS_WEB="http://127.0.0.1:${RANDOM_PORT}"
export JENKINS_CLI="java -jar ./jenkins-cli.jar -s http://127.0.0.1:${RANDOM_PORT} -noKeyAuth"
export JENKINS_HOME="$(mktemp -d /tmp/my_jenkins_homeXXX)"
set +x
./jenkins_bootstrap.sh
test -e jenkins.pid
./scripts/provision_jenkins.sh url-ready "${JENKINS_WEB}/jnlpJars/jenkins-cli.jar"
bash -x ./scripts/provision_jenkins.sh stop
test ! -e jenkins.pid
test -e console.log
test -e jenkins.war
test -e "$JENKINS_HOME"
test -e plugins
./gradlew clean
test ! -e console.log
test ! -e jenkins.war
test ! -e "$JENKINS_HOME"
test ! -e plugins
# check for bash syntax
find . -type f -name '*.sh' | xargs -n1 bash -n
| true
|
ce97fda8c8d1d263c0ed757418782ceb9ba3397e
|
Shell
|
awohns/selection_v4
|
/anc.modern.eachchr.sh
|
UTF-8
| 4,382
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
#
#BATCH --job-name=haplo_plink
#SBATCH --output=haplo_plink.txt
#
#SBATCH --ntasks=1
#SBATCH --time=60:00:00
#SBATCH --mem-per-cpu=50g
cur_chr=$1
maxmis=$2
bams_list=$3
folder=$4
module load angsd-0.913-22
module load plink-1.9.0
module load R
#Perform haplocall
angsd -b references/${bams_list} -doHaploCall 1 -doCounts 1 -out ${folder}/1.haplo/${cur_chr} -r ${cur_chr} -maxMis ${maxmis} -sites references/pobi.snps.by.chr/${cur_chr}.se.pobi.updated.bim.chr.pos.txt
wait
#Convert to tped
/storage/software/angsd-0.913-22/misc/haploToPlink ${folder}/1.haplo/${cur_chr}.haplo.gz ${folder}/2.tped/${cur_chr}
wait
#Replace N's with 0's
eval 'sed 's/N/0/g' ${folder}/2.tped/${cur_chr}.tped > ${folder}/3.tped.nto0/temp.${cur_chr}.tped'
#Copy the tfam file
eval 'cp ${folder}/2.tped/${cur_chr}.tfam ${folder}/3.tped.nto0/temp.${cur_chr}.tfam'
#Convert tped to ped
plink --tfile ${folder}/3.tped.nto0/temp.${cur_chr} --recode --out ${folder}/4.ped/${cur_chr}_ped
#Add the rsids
plink --file ${folder}/4.ped/${cur_chr}_ped --update-map references/pobi.snps.by.chr/${cur_chr}.se.pobi.updated.bim.txt.fixed --update-name --make-bed --out ${folder}/5.add.rsid/${cur_chr}.with.rsid
#Find intersected SNPS between ancient and pobi
plink --bfile ${folder}/5.add.rsid/${cur_chr}.with.rsid --extract references/pobi.snps/${cur_chr}.se.pobi.bim.snps.only.txt --make-bed --out ${folder}/6.intersect/${cur_chr}.intersected.anc.pobi
#Change the phenotypes
awk '{$6 = "2";print $0 }' ${folder}/6.intersect/${cur_chr}.intersected.anc.pobi.fam > ${folder}/7.intersect.pheno/${cur_chr}.intersected.anc.pobi.fam
#Add phenotypes to intersected list
cp ${folder}/6.intersect/*.intersected.anc.pobi.bim ${folder}/7.intersect.pheno/
cp ${folder}/6.intersect/*.intersected.anc.pobi.log ${folder}/7.intersect.pheno/
cp ${folder}/6.intersect/*.intersected.anc.pobi.nosex ${folder}/7.intersect.pheno/
cp ${folder}/6.intersect/*.intersected.anc.pobi.bed ${folder}/7.intersect.pheno/
#Get list of SNPs intersected from the ancient dataset
cut -f2 ${folder}/7.intersect.pheno/${cur_chr}.intersected.anc.pobi.bim > ${folder}/8.anc.snps/${cur_chr}.anc.snps.txt
#Extract the overlapping SNPs from POBI
plink --bfile references/se.england.pobi/9.10.11.updated.map --extract ${folder}/8.anc.snps/${cur_chr}.anc.snps.txt --make-bed --out ${folder}/9.pobi.with.anc.snps/${cur_chr}.pobi.with.anc.snps
#Merge pobi and ancient
plink --bfile ${folder}/7.intersect.pheno/${cur_chr}.intersected.anc.pobi --bmerge ${folder}/9.pobi.with.anc.snps/${cur_chr}.pobi.with.anc.snps --make-bed --out ${folder}/10.merged/${cur_chr}.merge
#Flip the missnps
plink --bfile ${folder}/7.intersect.pheno/${cur_chr}.intersected.anc.pobi --flip ${folder}/10.merged/${cur_chr}.merge-merge.missnp --make-bed --out ${folder}/11.merged.flipped/${cur_chr}.merge.flipped
#Remerge after the flip
plink --bfile ${folder}/11.merged.flipped/${cur_chr}.merge.flipped --bmerge ${folder}/9.pobi.with.anc.snps/${cur_chr}.pobi.with.anc.snps --make-bed --allow-no-sex --out ${folder}/12.remerged/${cur_chr}.pobi.anc
#Final exclude and merge
plink --bfile ${folder}/11.merged.flipped/${cur_chr}.merge.flipped --exclude ${folder}/12.remerged/${cur_chr}.pobi.anc-merge.missnp --make-bed --out ${folder}/13.notriallelic/${cur_chr}.anc.notri_tmp
plink --bfile ${folder}/9.pobi.with.anc.snps/${cur_chr}.pobi.with.anc.snps --exclude ${folder}/12.remerged/${cur_chr}.pobi.anc-merge.missnp --make-bed --out ${folder}/13.notriallelic/${cur_chr}.pobi.notri_tmp
plink --bfile ${folder}/13.notriallelic/${cur_chr}.anc.notri_tmp --bmerge ${folder}/13.notriallelic/${cur_chr}.pobi.notri_tmp --make-bed --allow-no-sex --out ${folder}/13.notriallelic/${cur_chr}.anc.pobi.notri
#Find Monomorphic Problems
Rscript lib/flip.mono.problems.R ${cur_chr} ${folder}
#Flip the monomorphic SNPs
plink --bfile ${folder}/13.notriallelic/${cur_chr}.anc.notri_tmp --flip ${folder}/14.snps.to.flip/${cur_chr}results.txt --make-bed --out ${folder}/15.flipped.snps/${cur_chr}.flipped.monomorphic
#Remerge the flipped sites with the pobi file
plink --bfile ${folder}/15.flipped.snps/${cur_chr}.flipped.monomorphic --bmerge ${folder}/13.notriallelic/${cur_chr}.pobi.notri_tmp --make-bed --allow-no-sex --out ${folder}/16.flipped.merged/${cur_chr}.anc.pobi.no.tri.no.monomorphic
#**********NEED STEP HERE TO FIND AMBIGUOUS SNPS*******************************
| true
|
2c204f94764e43fd64757285d774d4f80496799b
|
Shell
|
BurningBright/repository
|
/shell-study/20151114_01.sh
|
UTF-8
| 557
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
#test menu manually
function diskspace {
clear
df -k
}
function whoseon {
clear
who
}
function memusage {
clear
cat /proc/meminfo
}
function menu {
clear
echo -e "\tSys Admin Menu\n"
echo -e "\t1) Display disk space\n"
echo -e "\t2) Display user info\n"
echo -e "\t3) Display memory usage\n"
echo -e "\t0) Exit\n"
echo -en "\tEnter option"
read -n1 option
}
while [ 1 ]
do
menu
case $option in
1)
diskspace;;
2)
whoseon;;
3)
memusage;;
*)
break;;
esac
echo -en "\n\n\t Any key to continue"
read -n1 line
done
clear
| true
|
1aee404b7524ec8dee75810fa3639982d82b160b
|
Shell
|
sdeseille/Act_Vagrant_Env
|
/Install_Perl_via_plenv.sh
|
UTF-8
| 517
| 2.921875
| 3
|
[] |
no_license
|
cd /home/vagrant
echo "Install perl-build in order to simplify the process of installing Perl versions"
git clone git://github.com/tokuhirom/Perl-Build.git /home/vagrant/.plenv/plugins/perl-build/
echo "Build Perl 5.12.4 version needed by Act"
plenv install -j 9 -D usethreads -D man1dir=none -D man3dir=none 5.12.4
echo "Rebuild the shim executables. To do anytime you install Perl executable"
plenv rehash
echo "Set Perl version globaly"
plenv global 5.12.4
echo "Install cpanm utility"
plenv install-cpanm
| true
|
d783f6c5fad28447c18938f9b05a20efbf77910b
|
Shell
|
shashivish/ShellScript
|
/RunHbaseLoad_Debug
|
UTF-8
| 1,082
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/sh
echo "Packaging Project"
cd /home/mapr/Components/clickstream/tools/
mvn package > /home/mapr/logs/mvnRun
grep "BUILD SUCCESS" /home/mapr/logs/mvnRun > /home/mapr/logs/mvnResponse
#checkmvnResponse()
if [ -n $mvnResponse ]
then
echo "Congratulations! mvn packaging successful. Proceeding further.."
cd /home/mapr/Components/clickstream/dist/target/clickstream-1.3-SNAPSHOT
echo `pwd`
echo "Copying Jar"
cp /home/mapr/Components/clickstream/tools/target/clickstream-tools-1.3-SNAPSHOT.jar ./lib/
cp /home/mapr/Components/clickstream/common/target/clickstream-common-1.3-SNAPSHOT.jar /home/mapr/Components/clickstream/dist/target/clickstream-1.3-SNAPSHOT/lib/
echo "Starting Hbase Bulk Load Job"
bin/cs hbasebulk -conf /home/mapr/Components/clickstream/dist/target/clickstream-1.3-SNAPSHOT/conf -database flightdemo -D clickstream.map.tasks.max=1900 1> /home/mapr/logs/hbaseBulkLoad.out 2> /home/mapr/logs/hbaseBulkLoad.err
echo "Hbase Bulk Load Complated Successfully..Relax Now..:D"
else
echo "mvn build failed!! Please check the cloned code."
exit 1
fi
| true
|
4be3afa04a54bdd0e388d387f2423d892cbfdc8d
|
Shell
|
wuyongzheng/wgetflv
|
/gettudou-iid.sh
|
UTF-8
| 1,214
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 2 ] ; then
echo gettudou-iid.sh 37789103 outbase
exit
fi
iid=$1
outbase=$2
rm -f /tmp/downtudou1.xml
if ! wget -q -O /tmp/downtudou1.xml "http://v2.tudou.com/v?it=$iid" ; then
echo wget "http://v2.tudou.com/v?it=$iid" failed
exit
fi
if grep -q -i '<meta .*charset *= *"*gb' /tmp/downtudou1.xml ; then
dos2unix </tmp/downtudou1.xml | iconv -c -f gbk -t utf-8 >/tmp/downtudoux
mv /tmp/downtudoux /tmp/downtudou1.xml
else
dos2unix -q /tmp/downtudou1.xml
fi
cat /tmp/downtudou1.xml | \
binreplace -r '\n' ' ' -r '<f ' '\n<f ' -r '</f>' '</f>\n' | \
grep size= | \
grep 'http://.*[fm][l4]v.key=' | \
sed -e 's/.*size="//g' -e 's/">/\t/g' -e 's/<.*//g' -e 's/&/\&/g' | \
sort -n -r | \
cut -f 2 | \
head -n 1 > /tmp/downtudou2.txt
if ! grep -q '^http://.*[fm][l4]v.key=' /tmp/downtudou2.txt ; then
echo "unexpected content of http://v2.tudou.com/v?it=$iid"
exit
fi
url3=`cat /tmp/downtudou2.txt`
if grep -q 'f[l4]v.key=' /tmp/downtudou2.txt ; then
outfile="$outbase.flv"
else
outfile="$outbase.mp4"
fi
if ! wget --retry-connrefused -t 0 --progress=dot:mega -O "$outfile" "$url3" ; then
echo wget "$url3" failed
exit
fi
rm /tmp/downtudou1.xml /tmp/downtudou2.txt
| true
|
752f1df95e506eb46f70224f1439c13b443916fc
|
Shell
|
syzxy/Shrug
|
/src/shrug-commit.sh
|
UTF-8
| 3,032
| 4.34375
| 4
|
[] |
no_license
|
#!/bin/dash
# shrug-commit --- saves a copy of all files in the index to the repository.
# shrug-commit [-a] -m message
# commands:
# -m msg: a message describing the commit
# message: assumed to be legal, i.e. ASCII and not starting with '-'
# -a : optional, causes all files in the index to have their contents from the current directory
# added to the index before commit
# directories
REPO='.shrug'
OBJ="${REPO}/objects"
HEADS="${REPO}/logs/refs/heads"
INDECIES="${REPO}/indecies" # backups of index files per branch tip
# files
HEAD="$REPO/HEAD"
INDEX="${REPO}/index"
TIP=`cat "$HEAD" | sed -r -e "s@.*: (.*)@$REPO/\1@"`
#BRANCH="${HEADS}/master"
BRANCH=`echo "$TIP" | sed -r -e "s@.*/.*/(.*)@$HEADS/\1@"`
HEAD_LOG="${REPO}/logs/HEAD"
REFS="${REPO}/info/refs"
abort() {
echo "usage: shrug-commit [-a] -m commit-message" 1>&2
exit 1
}
# ---------- check if argument to -m is legal ----------- #
check_msg() {
if [ "$message" = "" ]; then abort; fi
if echo "$message" | egrep -- '^\-.*' >/dev/null; then abort; fi
}
# ---------- update index file if -a provided ----------- #
update_index() {
if [ -r "$INDEX" ]
then
cat "$INDEX" | cut -d' ' -f1 | xargs shrug-add
fi
}
# ------------------- parse options --------------------- #
if ! [ -d $REPO ]; then # repo does not exist
echo "$(basename $0): error: no $REPO directory containing shrug repository exists" 1>&2
exit 1
elif [ $# -lt 2 -o $# -gt 3 ]; then abort
fi
while true
do
# echo "now arguments are $@ and \$1 is $1"
case $1 in
-m) message="$2"; check_msg; shift 2
[ $1 ] || break # breake when all arguments have been parsed
# echo "$1 is last"
;;
-a) update_index; shift 1; [ $1 ] || break
# echo "-a is first, because $1 = $2"
;;
*) abort ;;
esac
done
# ---------------- commit files in index ----------------- #
# 1. check if there are anything to commit
if ! [ -r $INDEX ] # index file does not exitst
then
echo "nothing to commit"
exit 0
elif ! egrep '^.* (initial|staged|deleted) .*$' "$INDEX" > /dev/null # no staged or deleted files in index
then
echo "nothing to commit"
exit 0
fi
# 2. create commit object
if ! [ -f "$HEAD" ]
then
echo 'ref: refs/heads/master' > "$HEAD"
elif ! [ -f "$BRANCH" ]
then
mkdir -p "$HEADS"
serial=0
else
serial=`tail -1 "$HEAD_LOG" | cut -d' ' -f1`
serial=$(( serial + 1 ))
fi
egrep -v '^.* deleted .*$' "$INDEX" | # files except deleted ones in index
cut -d' ' -f1,2 >> "${OBJ}/${serial}" # record these files and their sha1 in a commit file
# 3. change status of commited files in index; remove deleted files from index
sed -r -i "s/^(.* )(staged|initial) .*$/\1same $serial/g; s/.* deleted .*//g" "$INDEX"
# 4. LOG
echo "$serial" > "$TIP"
echo "$serial commit $message" >> "$BRANCH"
echo "$serial commit $message" >> "$HEAD_LOG"
mkdir -p "$INDECIES"
cp "$INDEX" "$INDECIES/$serial"
echo "Committed as commit $serial"
| true
|
c70296f7c72e47200b4119194cf3919bce6f852d
|
Shell
|
xielei552/Programming_3
|
/a12/a12p3
|
UTF-8
| 806
| 3.65625
| 4
|
[] |
no_license
|
#! /bin/sh
# File: a12p3.sh
# Author: Lei Xie
# Purpose: the shell program find the file in /usr/bin count the sum of
# file and sum of text file, print the sum of file, sum of text
# file and precent of script file
# Date: Dec 3, 2014
# Version: 1.0
# Note: None
total=0
text=0
find /usr/bin | \
(
while read line
do
total=`expr $total + 1`
if [ $(file $line | grep -c text) = 1 ]
then
text=`expr $text + 1`
else
:
fi
done
echo I am using GNU/Linux 2.6.32, 64-bit version
echo There are $total file in /usr/bin
echo There are $text text file in /usr/bin
script=`expr $total - $text`
percent=$( echo $script \* 100 / $total | bc)
echo $percent % file is script file
)
| true
|
e17a907d10e4a1a6068652875064eb1146645347
|
Shell
|
roldanpau/RTBP
|
/prtbp_del/periodicpoints_SECg2.sh
|
UTF-8
| 316
| 2.65625
| 3
|
[] |
no_license
|
NAMEROOT=periodicpoints_SECg2
datfile=$NAMEROOT.dat
resfile=$NAMEROOT.res
errfile=$NAMEROOT.err
echo "0.95387536e-3 SECg2 1" > $datfile # mu, sec, num of iterates
# periodic point p (in Delaunay coords).
cut -d ' ' -f 1-4 ../cardel/periodicorbits_del.res >> $datfile
./prtbpdel < $datfile > $resfile
rm $datfile
| true
|
28aadf399b2ebcbaf71e098cd22988eff6f68b2c
|
Shell
|
Zedeau/bootstrap-wrapper
|
/boostrap-wrapper.sh
|
UTF-8
| 1,788
| 3.578125
| 4
|
[] |
no_license
|
!/bin/bash
# Usage:
# From the system to register, as root
# [root@somehost]# curl -s http://satellite.fqdn/pub/bootstrap-wrapper.sh | bash
#
######## VARS ########
SATELLITE=satellite.local
ORG_LABEL=Startx
LOCATION="Europe/Paris"
AK=AK_RHEL7
HG=HG_RHEL7
REX_USER=remote_user
OS=$(cat /etc/redhat-release | grep -oE '[0-9].[0-9]{1,2}')
TIMESTAMP=$(date +%Y%m%d%H%M)
EXT=bak.$TIMESTAMP
SUDOERS=/etc/sudoers.d/nopasswd
######## MAIN ########
((EUID == 0)) || {
printf 'FATAL: You must be the super-user\n'
exit 1
}
rpm -q subscription-manager &> /dev/null || {
printf 'FATAL: Package subscription-manager is not installed\n'
exit 2
}
shopt -s nullglob
printf '==> Backing up repos configuration\n'
for repo in /etc/yum.repos.d/*.repo;
do
mv "$repo" "$repo"."$EXT"
done
printf '==> Disabling subscription-manager plugin for yum\n'
sed -ri '/^enabled\>/ s/=.*/ = 0/' /etc/yum/pluginconf.d/subscription-manager.conf
printf '==> Remove any previous registration data\n'
rm -f /etc/pki/consumer/cert.pem
rm -f /etc/sysconfig/rhn/systemid
printf '==> Clean the subscription manager config\n'
subscription-manager clean
printf '==> Create remote execution user\n'
if id $REX_USER &>/dev/null;
then
printf '==> Remote execution user already exists\n'
else
useradd -m -e '' $REX_USER
echo "$REX_USER ALL = (root) NOPASSWD : ALL" >> $SUDOERS
fi
printf '==> Registering to Satellite\n'
curl -s http://$SATELLITE/pub/bootstrap.py | python - --server "$SATELLITE" \
--organization "$ORG_LABEL" \
--location "$LOCATION" \
--activationkey "$AK" \
--download-method http \
--rex --rex-user "$REX_USER" \
--enablerepos "*" \
--hostgroup "$HG" \
--skip katello-agent \
--skip puppet \
--operatingsystem "RHEL Server $OS" \
--fqdn $(hostname -f) \
--force \
--login admin
| true
|
8fcfe73bf2170b0c64e2def722642874e8221f19
|
Shell
|
pelias/interpolation
|
/dev/addresses.sh
|
UTF-8
| 571
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# dump address table records for a specific street
ADDRESS_DB="/data/address.db";
STREET_DB="/data/street.db";
P1="174.766843";
P2="-41.288788";
NAME="glasgow street";
sqlite3 $ADDRESS_DB "ATTACH DATABASE '$STREET_DB' as 'street'; \
SELECT address.* FROM street.rtree \
JOIN street.names ON street.names.id = street.rtree.id \
JOIN address ON address.id = street.rtree.id \
WHERE ( street.rtree.minX<$P1 AND street.rtree.maxX>$P1 AND street.rtree.minY<$P2 AND street.rtree.maxY>$P2) \
AND ( names.name='$NAME' ) \
ORDER BY address.housenumber ASC;";
| true
|
8f4046b30d7c2ebf68ce4f2b005853fe2475a9e9
|
Shell
|
nurettin/odd
|
/service/candle/stop.sh
|
UTF-8
| 403
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
printf "Stopping\n"
if [ -f ./pid ]
then
PID=`cat pid`
printf "Killing process $PID\n"
while true
do
kill $PID
sleep 1
kill -0 $PID 1> /dev/null 2> /dev/null
if [ $? -eq 0 ]
then
echo "Trying to kill $PID again"
else
echo "Process $PID killed"
rm pid
exit 0
fi
done
else
printf "Process id file not found\n"
exit 1
fi
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.