blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
382dd546886721a3f10e3a7669e8337e8f64ef60 | Shell | mrhubbs/forge | /install-forge | UTF-8 | 504 | 3.84375 | 4 | [
"MIT"
] | permissive | #!/bin/sh
say () {
echo "\n\033[1;34m# ${1}\033[0;37m\n"
}
set -e
# any subsequent commands that fail will cause the script to stop
say "Installing starter script"
# install forge bootstrap script
CMD="s+THE_INSTALL_PATH+${PWD}+"
# either the first argument or ~/.bin
USER_BIN=${1:-${HOME}/.bin}
FORGE_FILE=${USER_BIN}/forge
mkdir -p ${USER_BIN}/
cat ./forge | sed ${CMD} > ${FORGE_FILE}
chmod +x ${FORGE_FILE}
echo "...done"
say "Forge installed"
echo "make sure ${USER_BIN} is in your PATH"
| true |
7e5c9148e0f2b0837af6c8eb330dae4a9ea19f6b | Shell | FlorianFritz/spksrc | /spk/rutorrent/src/service-setup.sh | UTF-8 | 10,461 | 3.6875 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
# Package
PACKAGE="rutorrent"
DNAME="ruTorrent"
PACKAGE_NAME="com.synocommunity.packages.${PACKAGE}"
# Others
WEB_DIR="/var/services/web"
PATH="${SYNOPKG_PKGDEST}/bin:${SYNOPKG_PKGDEST}/usr/bin:${PATH}"
APACHE_USER="$([ $(grep buildnumber /etc.defaults/VERSION | cut -d"\"" -f2) -ge 4418 ] && echo -n http || echo -n nobody)"
BUILDNUMBER="$(/bin/get_key_value /etc.defaults/VERSION buildnumber)"
GROUP="sc-download"
GROUP_DESC="SynoCommunity's download related group"
LEGACY_USER="rutorrent"
LEGACY_GROUP="users"
PYTHON_DIR="/usr/local/python3"
VIRTUALENV="${PYTHON_DIR}/bin/virtualenv"
SVC_BACKGROUND=y
PID_FILE="${SYNOPKG_PKGDEST}/var/rtorrent.pid"
LOG_FILE="${SYNOPKG_PKGDEST}/var/rtorrent.log"
SVC_WRITE_PID=y
service_preinst ()
{
if [ "${SYNOPKG_PKG_STATUS}" == "INSTALL" ]; then
if [ ! -d "${wizard_download_dir}" ]; then
echo "Download directory ${wizard_download_dir} does not exist."
exit 1
fi
if [ -n "${wizard_watch_dir}" -a ! -d "${wizard_watch_dir}" ]; then
echo "Watch directory ${wizard_watch_dir} does not exist."
exit 1
fi
fi
return 0
}
fix_shared_folders_rights()
{
local folder=$1
echo "Fixing shared folder rights for ${folder}" >> "${INST_LOG}"
chown -R "${EFF_USER}:${APACHE_USER}" "${folder}" >> "${INST_LOG}" 2>&1
synoacltool -add "${folder}" "everyone::allow:r-x----------:fd--" >> "${INST_LOG}" 2>&1
synoacltool -add "${folder}" "user:${EFF_USER}:allow:rwxpdDaARWc--:fd" >> "${INST_LOG}" 2>&1
synoacltool -add "${folder}" "group:${USER}:allow:rwxpdDaARWc--:fd" >> "${INST_LOG}" 2>&1
synoacltool -add "${folder}" "user:${APACHE_USER}:allow:rwxp-D------:fd" >> "${INST_LOG}" 2>&1
synoacltool -add "${folder}" "group:${APACHE_USER}:allow:rwxp-D------:fd--" >> "${INST_LOG}" 2>&1
find "${folder}" -mindepth 1 -type d -exec synoacltool -enforce-inherit "{}" \; >> ${INST_LOG} 2>&1
}
service_postinst ()
{
# Install busybox stuff
${SYNOPKG_PKGDEST}/bin/busybox --install ${SYNOPKG_PKGDEST}/bin
syno_user_add_to_legacy_group "${EFF_USER}" "${LEGACY_USER}" "${LEGACY_GROUP}"
# Install the web interface
cp -pR ${SYNOPKG_PKGDEST}/share/${PACKAGE} ${WEB_DIR} >>"${INST_LOG}" 2>&1
# Configure open_basedir
if [ "${APACHE_USER}" == "nobody" ]; then
echo -e "<Directory \"${WEB_DIR}/${PACKAGE}\">\nphp_admin_value open_basedir none\n</Directory>" > /usr/syno/etc/sites-enabled-user/${PACKAGE}.conf
else
if [ -d "/etc/php/conf.d/" ]; then
echo -e "[PATH=${WEB_DIR}/${PACKAGE}]\nopen_basedir = Null" > /etc/php/conf.d/${PACKAGE_NAME}.ini
fi
fi
# Configure files
if [ "${SYNOPKG_PKG_STATUS}" == "INSTALL" ]; then
TOP_DIR=`echo "${wizard_download_dir:=/volume1/downloads}" | cut -d "/" -f 2`
MAX_MEMORY=`awk '/MemTotal/{memory=$2*1024*0.25; if (memory > 512*1024*1024) memory=512*1024*1024; printf "%0.f", memory}' /proc/meminfo`
sed -i -e "s|scgi_port = 5000;|scgi_port = 8050;|g" \
-e "s|topDirectory = '/';|topDirectory = '/${TOP_DIR}/';|g" \
-e "s|tempDirectory = null;|tempDirectory = '${SYNOPKG_PKGDEST}/tmp/';|g" \
-e "s|\"python\"\(\\s*\)=>\(\\s*\)'.*'\(\\s*\),\(\\s*\)|\"python\"\1=>\2'${SYNOPKG_PKGDEST}/env/bin/python3'\3,\4|g" \
-e "s|\"pgrep\"\(\\s*\)=>\(\\s*\)'.*'\(\\s*\),\(\\s*\)|\"pgrep\"\1=>\2'${SYNOPKG_PKGDEST}/bin/pgrep'\3,\4|g" \
-e "s|\"sox\"\(\\s*\)=>\(\\s*\)'.*'\(\\s*\),\(\\s*\)|\"sox\"\1=>\2'${SYNOPKG_PKGDEST}/bin/sox'\3,\4|g" \
-e "s|\"mediainfo\"\(\\s*\)=>\(\\s*\)'.*'\(\\s*\),\(\\s*\)|\"mediainfo\"\1=>\2'${SYNOPKG_PKGDEST}/bin/mediainfo'\3,\4|g" \
-e "s|\"stat\"\(\\s*\)=>\(\\s*\)'.*'\(\\s*\),\(\\s*\)|\"stat\"\1=>\2'/bin/stat'\3,\4|g" \
-e "s|\"curl\"\(\\s*\)=>\(\\s*\)'.*'\(\\s*\),\(\\s*\)|\"curl\"\1=>\2'${SYNOPKG_PKGDEST}/bin/curl'\3,\4|g" \
-e "s|\"id\"\(\\s*\)=>\(\\s*\)'.*'\(\\s*\),\(\\s*\)|\"id\"\1=>\2'/bin/id'\3,\4|g" \
-e "s|\"gzip\"\(\\s*\)=>\(\\s*\)'.*'\(\\s*\),\(\\s*\)|\"gzip\"\1=>\2'/bin/gzip'\3,\4|g" \
-e "s|\"php\"\(\\s*\)=>\(\\s*\)'.*'\(\\s*\),\(\\s*\)|\"php\"\1=>\2'/bin/php'\3,\4|g" \
${WEB_DIR}/${PACKAGE}/conf/config.php >>"${INST_LOG}" 2>&1
sed -i -e "s|@download_dir@|${wizard_download_dir:=/volume1/downloads}|g" \
-e "s|@max_memory@|$MAX_MEMORY|g" \
-e "s|@port_range@|${wizard_port_range:=6881-6999}|g" \
${SYNOPKG_PKGDEST}/var/.rtorrent.rc >>"${INST_LOG}" 2>&1
if [ -d "${wizard_watch_dir}" ]; then
sed -i -e "s|@watch_dir@|${wizard_watch_dir}|g" ${SYNOPKG_PKGDEST}/var/.rtorrent.rc >>"${INST_LOG}" 2>&1
else
sed -i -e "/@watch_dir@/d" ${SYNOPKG_PKGDEST}/var/.rtorrent.rc >>"${INST_LOG}" 2>&1
fi
if [ "${wizard_disable_openbasedir}" == "true" ] && [ "${APACHE_USER}" == "http" ]; then
if [ -f "/etc/php/conf.d/user-settings.ini" ]; then
sed -i -e "s|^open_basedir.*|open_basedir = none|g" /etc/php/conf.d/user-settings.ini >>"${INST_LOG}" 2>&1
initctl restart php-fpm > /dev/null 2>&1
fi
fi
# Permissions handling
if [ "${BUILDNUMBER}" -ge "4418" ]; then
set_syno_permissions "${wizard_download_dir:=/volume1/downloads}" "${GROUP}"
if [ -d "${wizard_watch_dir}" ]; then
set_syno_permissions "${wizard_watch_dir}" "${GROUP}"
fi
fi
fi
#If python3 is available setup a virtual environment with cloudscraper
if [ -f "${PYTHON_DIR}/bin/python3" ]; then
# Create a Python virtualenv
${VIRTUALENV} --system-site-packages ${SYNOPKG_PKGDEST}/env >> "${INST_LOG}" 2>&1
# Install the cloudscraper wheels
${SYNOPKG_PKGDEST}/env/bin/pip install -U cloudscraper==1.2.48 >> "${INST_LOG}" 2>&1
fi
fix_shared_folders_rights "${SYNOPKG_PKGDEST}/tmp"
fix_shared_folders_rights "${WEB_DIR}/${PACKAGE}/share"
return 0
}
service_postuninst ()
{
# Remove the web interface
log_step "Removing web interface"
rm -fr "${WEB_DIR}/${PACKAGE}" >>"${INST_LOG}" 2>&1
return 0
}
service_save ()
{
# Revision 8 introduces backward incompatible changes
if [ `echo ${SYNOPKG_OLD_PKGVER} | sed -r "s/^.*-([0-9]+)$/\1/"` -le 8 ]; then
sed -i -e "s|http_cacert = .*|http_cacert = /etc/ssl/certs/ca-certificates.crt|g" ${SYNOPKG_PKGDEST}/var/.rtorrent.rc
fi
# Save the configuration file
mv ${WEB_DIR}/${PACKAGE}/conf/config.php ${TMP_DIR}/ >>"${INST_LOG}" 2>&1
if [ -f "${WEB_DIR}/${PACKAGE}/.htaccess" ]; then
mv "${WEB_DIR}/${PACKAGE}/.htaccess" "${TMP_DIR}/" >>"${INST_LOG}" 2>&1
fi
cp -pr ${WEB_DIR}/${PACKAGE}/share/ ${TMP_DIR}/ >>"${INST_LOG}" 2>&1
mv ${SYNOPKG_PKGDEST}/var/.rtorrent.rc ${TMP_DIR}/ >>"${INST_LOG}" 2>&1
mv ${SYNOPKG_PKGDEST}/var/.session ${TMP_DIR}/ >>"${INST_LOG}" 2>&1
return 0
}
is_not_defined_external_program()
{
program=$1
php -r "require_once('${WEB_DIR}/${PACKAGE}/conf/config.php'); if (isset(\$pathToExternals['${program}']) && !empty(\$pathToExternals['${program}'])) { exit(1); } else { exit(0); }" >>"${INST_LOG}" 2>&1
return $?
}
define_external_program()
{
program=$1
value=$2
like=$3
echo "\$pathToExternals['${program}'] = '${value}'; // Something like $like. If empty, will be found in PATH" \
>> "${WEB_DIR}/${PACKAGE}/conf/config.php"
}
service_restore ()
{
# Restore the configuration file
mv -f "${TMP_DIR}/config.php" "${WEB_DIR}/${PACKAGE}/conf/" >>"${INST_LOG}" 2>&1
if [ -f "${TMP_DIR}/.htaccess" ]; then
mv -f "${TMP_DIR}/.htaccess" "${WEB_DIR}/${PACKAGE}/" >>"${INST_LOG}" 2>&1
set_syno_permissions "${WEB_DIR}/${PACKAGE}/.htaccess" "${APACHE_USER}"
fi
# In previous versions the python entry had nothing defined,
# here we define it if, and only if, python3 is actually installed
if [ -f "${PYTHON_DIR}/bin/python3" ] && `is_not_defined_external_program 'python'`; then
define_external_program 'python' "${SYNOPKG_PKGDEST}/env/bin/python3" '/usr/bin/python3'
fi
# In previous versions the pgrep entry had nothing defined
if `is_not_defined_external_program 'pgrep'`; then
define_external_program 'pgrep' "${SYNOPKG_PKGDEST}/bin/pgrep" '/usr/bin/pgrep'
fi
# In previous versions the sox entry had nothing defined
if `is_not_defined_external_program 'sox'`; then
define_external_program 'sox' "${SYNOPKG_PKGDEST}/bin/sox" '/usr/bin/sox'
fi
# In previous versions the mediainfo entry had nothing defined
if `is_not_defined_external_program 'mediainfo'`; then
define_external_program 'mediainfo' "${SYNOPKG_PKGDEST}/bin/mediainfo" '/usr/bin/mediainfo'
fi
# In previous versions the stat entry had nothing defined
if `is_not_defined_external_program 'stat'`; then
define_external_program 'stat' '/bin/stat' '/usr/bin/stat'
fi
if `is_not_defined_external_program 'id'`; then
define_external_program 'id' '/bin/id' '/usr/bin/id'
fi
if `is_not_defined_external_program 'gzip'`; then
define_external_program 'gzip' '/bin/gzip' '/usr/bin/gzip'
fi
if `is_not_defined_external_program 'curl'`; then
define_external_program 'curl' "${SYNOPKG_PKGDEST}/bin/curl" '/usr/bin/curl'
fi
if `is_not_defined_external_program 'php'`; then
define_external_program 'php' '/bin/php' '/usr/bin/php'
fi
set_syno_permissions "${WEB_DIR}/${PACKAGE}/conf/config.php" "${APACHE_USER}"
cp -pr ${TMP_DIR}/share/*/ ${WEB_DIR}/${PACKAGE}/share/ >>"${INST_LOG}" 2>&1
set_syno_permissions "${WEB_DIR}/${PACKAGE}/share/" "${APACHE_USER}"
mv ${TMP_DIR}/.rtorrent.rc ${SYNOPKG_PKGDEST}/var/ >>"${INST_LOG}" 2>&1
if [ ! `grep 'http_cacert = ' "${SYNOPKG_PKGDEST}/var/.rtorrent.rc" | wc -l` -eq 0 ]; then
# http_cacert command has been moved to network.http.cacert
sed -i -e 's|http_cacert = \(.*\)|network.http.cacert = \1|g' ${SYNOPKG_PKGDEST}/var/.rtorrent.rc >>"${INST_LOG}" 2>&1
fi
mv ${TMP_DIR}/.session ${SYNOPKG_PKGDEST}/var/ >>"${INST_LOG}" 2>&1
# Restore appropriate rights on the var directory
set_unix_permissions "${SYNOPKG_PKGDEST}/var/"
return 0
}
| true |
92d73f8cdd69fefec7db51c0325b3679e707adf1 | Shell | INOS-soft/pbi | /modules/net-p2p/retroshare/pbi.conf | UTF-8 | 1,031 | 2.875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# PBI Build Configuration
# Place over-rides and settings here
#
# XDG Desktop Menu Spec:
# http://standards.freedesktop.org/menu-spec/menu-spec-1.0.html
##############################################################################
# Program Name
PBI_PROGNAME="RetroShare"
# Program Website
PBI_PROGWEB="http://retroshare.sourceforge.net/"
# Program Author / Vendor
PBI_PROGAUTHOR="RetroShare developers"
# Default Icon (Relative to %%PBI_APPDIR%% or resources/)
PBI_PROGICON="retroshare.png"
# The target port we are building
PBI_MAKEPORT="net-p2p/retroshare"
# Additional options for make.conf
PBI_MAKEOPTS=""
# Increment to trigger rebuild of PBI on build servers
PBI_BUILDKEY="00"
# This app needs to install as root
PBI_REQUIRESROOT="NO"
# Set the priority of this build
PBI_AB_PRIORITY="00"
# Set the use of tmpfs during autobuild
PBI_AB_NOTMPFS="NO"
export PBI_PROGNAME PBI_PROGWEB PBI_PROGAUTHOR PBI_PROGICON PBI_MAKEPORT PBI_MAKEOPTS PBI_BUILDKEY PBI_REQUIRESROOT PBI_AB_PRIORITY PBI_AB_NOTMPFS
| true |
4f1b547b5b940f4d8f5cb6ce78fa94679b349180 | Shell | Gustavroot/SP2118 | /ProyectoFinal/programa/transformer.sh | UTF-8 | 1,573 | 3.296875 | 3 | [] | no_license | #!/bin/bash
#----------------------------------------------------------------
#Algunas configuraciones
userD=`whoami`
currentDir=`pwd`
cd /home/$userD/
git clone https://github.com/Gustavroot/LEEDcicima.git
#cd LEEDcicima
#git pull origin master
cd $currentDir
#-------------------------------------------------------------------------
#Resolucion recomendada: 1280x710, para Acer Aspire 14 pulgadas
#640x480
resolucion=1280x710
#Specify ffmpeg PATH
ffmpegPATH=/home/$userD/Downloads/src0/ffmpeg-0.11.1/ffmpeg
directVideos=../videos/
transformedPath=../videos/transf/
rm -R $transformedPath
mkdir $transformedPath
#echo "cosa"
stringList=`ls $directVideos`
arrayList=(${stringList//\ / })
#echo ${#arrayList[@]}
counter=0
for i in $(ls $directVideos)
do
fileToProcess=${arrayList[$counter]}
#echo $fileToProcess
if [[ -d $directVideos$fileToProcess ]]; then
continue
#echo "Procesando archivo "${arrayList[$counter]}"..."
else
echo ""
echo "Procesando archivo "${arrayList[$counter]}"..."
$ffmpegPATH -i $directVideos$fileToProcess -s $resolucion -b:v 512k -vcodec mpeg1video -acodec copy "TRANSFORMED"$fileToProcess
mv ./TRANSFORMED$fileToProcess $transformedPath
#transformedPath
fi
counter=`expr $counter + 1`
done
echo ""
echo "Transformaciones finalizadas..."
#echo ${arrayList[0]}
#----------------------------------------------------------------
#Algunas configuraciones
sudo aptitude install gnuplot
#-------------------------------------------------------------------------
| true |
83874f5f048aa7a893d7acf4532e95d6d72ca7b4 | Shell | kmdouglass/mm-docker | /libMMCore-ubuntu-16.04-x86_64/build.sh | UTF-8 | 594 | 2.796875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
set +o errexit
cd /micro-manager/micro-manager
./autogen.sh
./configure \
--prefix="/micro-manager/build" \
--with-boost-libdir="/usr/lib/x86_64-linux-gnu" \
--with-boost="/usr/include/boost" \
--disable-java-app \
--disable-install-dependency-jars \
--with-java="no"
if [ -n "${BUILD_DEVICE_ADAPTERS}" ];
then
make -j "${NUM_CORES}"
make install
else
(cd MMDevice; make -j "${NUM_CORES}")
fi
cd MMCore
make -j "${NUM_CORES}"
mkdir -p /micro-manager/build/lib/micro-manager
cp .libs/libMMCore.a /micro-manager/build/lib/micro-manager
| true |
8c15e9b263e7ccf6bdb96c716fc4a680e5c8c2e0 | Shell | ShubraChowdhury/DeepReinforcementLearning | /RoboND-DeepRL-Project/gazebo/gazebo-arm.sh | UTF-8 | 492 | 2.78125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
echo " "
echo "configuring gazebo7 plugin paths"
echo "previous GAZEBO_PLUGIN_PATH=$GAZEBO_PLUGIN_PATH"
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
echo "script directory $SCRIPT_DIR"
MY_PLUGIN_PATH=$SCRIPT_DIR/../lib
echo "plugin path $MY_PLUGIN_PATH"
export GAZEBO_PLUGIN_PATH=$MY_PLUGIN_PATH:$GAZEBO_PLUGIN_PATH
echo "new GAZEBO_PLUGIN_PATH=$GAZEBO_PLUGIN_PATH"
echo " "
echo "starting gazebo7 simulator"
gazebo gazebo-arm.world --verbose
| true |
9e45d6d1361da0fe0f3cc01efa00a6f1a16b784d | Shell | shiznix/unity-gentoo | /profiles/unity_mirrors_update.sh | UTF-8 | 804 | 3.484375 | 3 | [] | no_license | #!/bin/bash
# This script is needed as Ubuntu mirrors in Gentoo are unmaintained at present (refer b.g.o #494882)
## Usage: ./unity_mirrors_update.sh > thirdpartymirrors
# Grab list of working mirrors #
lynx -width 500 -dump https://launchpad.net/ubuntu/+archivemirrors | sed 's/^ *//g' > /tmp/ubuntu_mirrors.txt || exit 1
# Extract link IDs and load into '${http_linkid_array[@]}'
for each in $(grep -i "up to date" /tmp/ubuntu_mirrors.txt | sed 's/.*\[\([^]]*\)\]http.*/\1/g' | grep ^[0-9]); do
http_linkid_array+=( "${each}" )
done
# Grep for link IDs and load matching URLs into '${http_link_array[@]}'
for each in $(echo "${http_linkid_array[@]}"); do
http_link_array+=( $(grep "^${each}\\." /tmp/ubuntu_mirrors.txt | awk '{print $2}') )
done
# Final output #
echo "unity ${http_link_array[@]}"
| true |
0b7e59ffa8d0badbed561aedd60a1ae5ac6df6ff | Shell | ilventu/aur-mirror | /libblocksruntime/PKGBUILD | UTF-8 | 630 | 2.515625 | 3 | [] | no_license | # Maintainer: SJ_UnderWater
pkgname=libblocksruntime
pkgver=0.1
pkgrel=0
pkgdesc='Development headers for building software that uses blocks'
url='http://mark.heily.com/project/libblocksruntime'
arch=('i686' 'x86_64')
depends=('glibc')
license=('APACHE')
options=('!libtool')
source=(http://mark.heily.com/sites/mark.heily.com/files/$pkgname-$pkgver.tar.gz)
md5sums=('32b292d9898db21d1ccfc00920a08a2e')
build() {
cd libBlocksRuntime-$pkgver
msg2 "Configuring..."
CFLAGS="-fPIC" ./configure --prefix=/usr
msg2 "Making..."
make
}
package() {
cd libBlocksRuntime-$pkgver
msg2 "Building..."
make DESTDIR="$pkgdir" install
}
| true |
7a4ca9f314c8799361ca13f8d9ab3e50cd3bf994 | Shell | delkyd/alfheim_linux-PKGBUILDS | /libfusion-git/PKGBUILD | UTF-8 | 762 | 2.65625 | 3 | [] | no_license | # Maintainer: Kamil Krzyżanowski <kamnxt@kamnxt.com>
_pkgname=libfusion
_reponame=LibFusion
pkgname=${_pkgname}-git
pkgver=20151023.633cb0e
pkgrel=1
pkgdesc="A game management library used by the Fusion Launcher"
arch=('i386' 'x86_64')
url="https://github.com/FusionLauncher/LibFusion"
license=('GPL2')
depends=('qt5-base')
makedepends=('git')
provides=("${_pkgname}")
conflicts=("${_pkgname}")
options=(!emptydirs)
source=("${_pkgname}::git+https://github.com/FusionLauncher/LibFusion")
md5sums=('SKIP')
pkgver() {
cd "${srcdir}/${_pkgname}"
git log -1 --format='%cd.%h' --date=short | tr -d -
}
build() {
cd "${srcdir}/${_pkgname}"
qmake-qt5 CONFIG+=release
make
}
package() {
cd "${srcdir}/${_pkgname}"
make INSTALL_ROOT="$pkgdir/" install
}
# vim:set ts=2 sw=2 et:
| true |
ae4aed50ed885c94175d21cf8cdac020c974df2a | Shell | zoumingyin/font-glyphs | /deploy.sh | UTF-8 | 647 | 3.453125 | 3 | [] | no_license | #!/bin/bash
set -e
if [[ -n $(git status -s) ]]; then
>&2 echo "Error: Working directory not clean"
exit 1
fi
>&2 echo "Generating glyphs"
./generate.sh
SHA=`git rev-parse --verify HEAD`
git config --global user.name "lzxue"
git config --global user.email "lzx199065@gmail.com"
mv glyphs.json glyphs.json.new
git checkout -b gh-pages origin/gh-pages || git checkout --orphan gh-pages
mv glyphs.json.new glyphs.json
rm .git/index
git add -f glyphs
git add -f glyphs.json
if [ -n "`git diff --staged`" ]; then
git commit -m "Deploy to GitHub Pages: ${SHA}"
git push origin gh-pages
else
>&2 echo "Nothing to deploy"
fi
git checkout master -f
| true |
674f0964014e55b74710470c8284c78d739f001c | Shell | richardimaoka/aws-ping-cross-region | /create-vpc-stacks.sh | UTF-8 | 2,814 | 4.15625 | 4 | [] | no_license | #!/bin/sh
# cd to the current directory as it runs other shell scripts
cd "$(dirname "$0")" || exit
for OPT in "$@"
do
case "$OPT" in
'--stack-name' )
if [ -z "$2" ]; then
echo "option -f or --stack-name requires an argument -- $1" 1>&2
exit 1
fi
STACK_NAME="$2"
shift 2
;;
esac
done
if [ -z "${STACK_NAME}" ] ; then
echo "ERROR: Option --stack-name needs to be specified"
exit 1
fi
AWS_ACCOUNT_ID="$(aws sts get-caller-identity --query Account --output text)"
SSH_LOCATION="$(curl ifconfig.co 2> /dev/null)/32"
REGIONS=$(aws ec2 describe-regions --query "Regions[].RegionName" --output text)
################################
# Step 1: Create the VPCs
################################
for REGION in ${REGIONS}
do
################################
# Step 1.1: Create if not exist
################################
if ! aws cloudformation describe-stacks --stack-name "${STACK_NAME}" --region "${REGION}" > /dev/null 2>&1; then
echo "Creating a CloudFormation stack=${STACK_NAME} for region=${REGION}"
# If it fails, an error message is displayed and it continues to the next REGION
aws cloudformation create-stack \
--stack-name "${STACK_NAME}" \
--template-body file://cloudformation-vpc.yaml \
--capabilities CAPABILITY_NAMED_IAM \
--parameters ParameterKey=SSHLocation,ParameterValue="${SSH_LOCATION}" \
ParameterKey=AWSAccountId,ParameterValue="${AWS_ACCOUNT_ID}" \
--region "${REGION}" \
--output text
fi
#################################
# Step 1.2: Wait until it's ready
#################################
echo "Waiting until the CloudFormation stack is CREATE_COMPLETE or UPDATE_COMPLETE for ${REGION}"
if ! aws cloudformation wait stack-create-complete --stack-name "${STACK_NAME}" --region "${REGION}"; then
>&2 echo "ERROR: CloudFormation wait failed for ${REGION}"
exit 1
fi
done
##################################################################################
# Step 2: Create VPC Peering in all the regions
#
# The CloudFormation template of this repository does not define VPC Peering,
# because of its complex requester-accepter dependencies. Instead, VPC Peering is
# created by AWS CLI calls in create-vpc-peering.sh. See README.md for more detail.
###################################################################################
AWS_ACCOUNT_ID="$(aws sts get-caller-identity --query Account --output text)"
for REGION1 in ${REGIONS}
do
for REGION2 in ${REGIONS}
do
if [ "${REGION1}" != "${REGION2}" ] ; then
./create-vpc-peering.sh \
--aws-account "${AWS_ACCOUNT_ID}" \
--stack-name "${STACK_NAME}" \
--region1 "${REGION1}" \
--region2 "${REGION2}"
fi
done
done
| true |
9f32c2d567de49b96a62237bb0cdee8086d045c9 | Shell | aceleradora-TW/easy-beasy-v2 | /webservice/banco | UTF-8 | 777 | 3.40625 | 3 | [] | no_license | #!/bin/bash
readonly CONTAINER_NAME=easy-beasy-db
readonly POSTGRES_PASSWORD=123456
readonly POSTGRES_USER=postgres
readonly DATABASE=easybeasy
case $1 in
run-deprecated)
docker run --rm --name ${CONTAINER_NAME} -p 5432:5432 -e POSTGRES_PASSWORD=${POSTGRES_PASSWORD} -d ${POSTGRES_USER}
;;
run)
mkdir -p ~/.pgsql/${CONTAINER_NAME}; docker run --name ${CONTAINER_NAME} -v ~/.pgsql/${CONTAINER_NAME}:/var/lib/postgresql/data -p 5432:5432 -e POSTGRES_PASSWORD=${POSTGRES_PASSWORD} -d ${POSTGRES_USER};
;;
create)
docker exec -it ${CONTAINER_NAME} psql -U ${POSTGRES_USER} -c "CREATE DATABASE ${DATABASE}"
;;
console)
docker exec -it ${CONTAINER_NAME} psql -U ${POSTGRES_USER} -d ${POSTGRES_PASSWORD}
;;
*)
echo "nope"
;;
esac | true |
c30226ce03e025daae0905f616a846a77a5800af | Shell | michalwitwicki/.dotfiles | /.bashrc | UTF-8 | 5,457 | 3.5 | 4 | [] | no_license | #
# ~/.bashrc
#
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
# --- Launch tmux session on bash startup ---
if command -v tmux &> /dev/null && [ -n "$PS1" ] && [[ ! "$TERM" =~ screen ]] && [[ ! "$TERM" =~ tmux ]] && [ -z "$TMUX" ]; then
tmux a -t default || exec tmux new -s default && exit;
fi
# --- Settings ---
# user specific environment
if ! [[ "$PATH" =~ "$HOME/.local/bin:$HOME/bin:" ]]
then
PATH="$HOME/.local/bin:$HOME/bin:$PATH"
fi
export PATH
# set default editor
export EDITOR=nvim
# don't put duplicate lines or lines starting with space in the history.
HISTCONTROL=ignoreboth
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=10000
HISTFILESIZE=20000
# append to the history file, don't overwrite it
shopt -s histappend
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# --- FZF settings ---
# export FZF_DEFAULT_COMMAND='fd --type file --hidden --no-ignore'
export FZF_DEFAULT_COMMAND='fd --type file --hidden'
export FZF_DEFAULT_OPTS=''
# --- Various aliases ---
alias v='nvim'
alias ls='ls --color=auto'
alias ll='ls -alF'
alias grep='grep --color=auto'
alias rs='rsync --exclude=.git --info=progress2 --stats -H -azr'
# warning to use trash-cli instead of rm
alias rm=' echo "This is not the command you are looking for."
echo "Use trash-cli instead: https://github.com/andreafrancia/trash-cli"
echo "If you in desperate need of rm use this -> \rm"; false'
# --- "Rsync makes life easier" the script ---
rsync_pass_file()
{
FILE=~/pass
if [ ! -f "$FILE" ]; then
echo "$FILE does not exist."
return
fi
sshpass -f "$FILE" rsync --exclude=.git --exclude='*cscope*' --info=progress2 -azvh "$@"
}
alias rsp='rsync_pass_file'
# --- GIT related functions and aliases ---
git-parse-branch()
{
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/(\1)/'
}
git-fzf-branch()
{
git rev-parse HEAD > /dev/null 2>&1 || return
git branch --color=always --all --sort=-committerdate |
grep -v HEAD |
fzf --height 50% --ansi --no-multi --preview-window right:65% \
--preview 'git log -n 50 --color=always --date=short --pretty="format:%C(auto)%cd %h%d %s" $(sed "s/.* //" <<< {})' |
sed "s/.* //"
}
git-fzf-checkout()
{
git rev-parse HEAD > /dev/null 2>&1 || return
local branch
branch=$(git-fzf-branch)
if [[ "$branch" = "" ]]; then
echo "No branch selected."
return
fi
# If branch name starts with 'remotes/' then it is a remote branch. By
# using --track and a remote branch name, it is the same as:
# git checkout -b branchName --track origin/branchName
if [[ "$branch" = 'remotes/'* ]]; then
git checkout --track $branch
else
git checkout $branch;
fi
}
git-fzf-log ()
{
PREVIEW_COMMAND='f() {
set -- $(echo -- "$@" | grep -o "[a-f0-9]\{7\}")
[ $# -eq 0 ] || (
git show --no-patch --color=always $1
echo
git show --stat --format="" --color=always $1 |
while read line; do
tput dim
echo " $line" | sed "s/\x1B\[m/\x1B\[2m/g"
tput sgr0
done |
tac | sed "1 a \ " | tac
echo
git show $1 --format="" --color=always | delta --side-by-side -w ${FZF_PREVIEW_COLUMNS:-$COLUMNS}
)
}; f {}'
ENTER_COMMAND='git show'
git log --graph --color=always --format="%C(auto)%h %s%d " | \
fzf --ansi --reverse --height 100% --no-sort --tiebreak=index \
--preview-window=top:50 --preview "${PREVIEW_COMMAND}" | awk '{print $2}' #\
# --bind "enter:execute:${ENTER_COMMAND}"
}
alias gs='git status'
alias gd='git diff'
alias gll="git log --graph --abbrev-commit --decorate --format=format:'%C(bold blue)%h%C(reset) - %C(bold
green)(%ar)%C(reset) %C(white)%s%C(reset) %C(dim white)- %an%C(reset)%C(bold yellow)%d%C(reset)'"
alias gas='find . -name ".git" -type d | while read dir ; do sh -c "cd $dir/../ && echo "-----------------" && pwd && git status" ; done'
alias gap='find . -name ".git" -type d | while read dir ; do sh -c "cd $dir/../ && echo "-----------------" && pwd && git pull" ; done'
alias gc='git-fzf-checkout'
alias gl='git-fzf-log'
# --- FZF enhanced finding functions ---
fzf_find_file() {
# nvim `fd --type file --hidden | fzf --preview='bat --color=always {}'`
# fd --type file --hidden | \
fzf --preview='bat --color=always {}' \
--preview-window 'up:60%:border-bottom:~3' \
--bind 'enter:become(nvim {})'
}
fzf_find_grep() {
RG_PREFIX="rg --column --line-number --no-heading --color=always --smart-case "
INITIAL_QUERY="${*:-}"
$RG_PREFIX $(printf %q "$INITIAL_QUERY") | \
fzf --ansi \
--disabled --query "$INITIAL_QUERY" \
--bind "change:reload:sleep 0.1; $RG_PREFIX {q} || true" \
--delimiter : \
--preview 'bat --color=always {1} --highlight-line {2}' \
--preview-window 'up:60%:border-bottom:+{2}+3/3:~3' \
--bind 'enter:become(nvim {1} +{2})'
}
alias ff='fzf_find_file'
alias fg='fzf_find_grep'
# --- Set prompt ---
export PS1="\[$(tput bold)\]\[$(tput setaf 1)\][\[$(tput setaf 2)\]\t \[$(tput setaf 3)\]\W\[$(tput setaf 1)\]]\[$(tput setaf 5)\]\$(git-parse-branch)\[$(tput setaf 7)\]\\$ \[$(tput sgr0)\]"
# --- FFF cd on exit function ---
f() {
fff "$@"
cd "$(cat "${XDG_CACHE_HOME:=${HOME}/.cache}/fff/.fff_d")"
}
| true |
301c596e0a310321a5cc7ebd5cfa0bf84102f1a9 | Shell | edisonlil/k8s-install | /ubuntu/install-k8sadm-3.sh | UTF-8 | 940 | 3.421875 | 3 | [] | no_license | #!/usr/bin/env bash
#sudo apt-cache madison $1
function add_sources() {
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
apt-get update -y
}
function install_k8s() {
version=$K8S_VERSION
if test -z $version; then
#为空使用默认版本
version=`apt-cache madison kubelet kubeadm kubectl | grep kubeadm | awk 'NR==1{print $3}'`
apt-get install -y kubeadm=$version kubectl=$version kubelet=$version && systemctl enable --now kubelet
export K8S_VERSION="v$(echo $version | sed "s/-00//g")"
else
version="${version}-00"
apt-get install -y kubeadm=$version kubectl=$version kubelet=$version && systemctl enable --now kubelet
fi
}
echo "--------------------------------開始安裝k8s-------------------------------------"
add_sources
install_k8s
| true |
2f5c153a25a5c417cba4fd015488ede586adb7d2 | Shell | MasuodSamarin/config-files | /bin/backupDropbox | UTF-8 | 584 | 3.515625 | 4 | [] | no_license | #!/bin/bash
set -e
date=$(date +"%y%m%d")
src="/home/shark/Dropbox/"
base="/media/extern/dropbox-backup/"
current="$base/Dropbox"
incremental="$base/differential-$date"
if [[ ! -e $src ]]; then
echo "Source directory for backup does not exist: '$src'"
exit 1
fi
if [[ ! -e $base ]]; then
echo "Target directory for backup does not exist: '$base'"
exit 1
fi
mkdir -p "$current"
rsync \
--verbose \
--archive \
--delete \
--backup \
--progress \
--backup-dir="$incremental" \
--checksum \
--one-file-system \
"$src" "$current"
| true |
22a4b452614adbd1818e2e7ff67f837991ea397c | Shell | gauntface/dotfiles | /bootstrap.sh | UTF-8 | 5,145 | 3.96875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -eo pipefail
# Catch and log errors
trap uncaughtError ERR
OS="$(uname -s)"
case "${OS}" in
Linux*)
OS="${OS} - $(awk -F= '/^NAME/{print $2}' /etc/os-release | xargs)"
;;
esac
function uncaughtError {
echo -e "\n\t❌ Error\n"
if [[ ! -z "${ERROR_LOG}" ]]; then
echo -e "\t$(<${ERROR_LOG})"
fi
echo -e "\n\t😞 Sorry\n"
exit $?
}
function isCorpInstall() {
echo "💼 Is this a corp install? (Please enter a number)"
select yn in "Yes" "No"; do
case $yn in
Yes )
IS_CORP_INSTALL=true
break;;
No )
IS_CORP_INSTALL=false
break;;
esac
done
echo ""
}
function setupDirectories() {
PROJECTS_DIR="${HOME}/Projects"
TOOLS_DIR="${HOME}/Projects/Tools"
CODE_DIR="${HOME}/Projects/Code"
DOTFILES_DIR="${HOME}/Projects/Tools/dotfiles"
TEMP_DIR="$(mktemp -d)"
ERROR_LOG="${TEMP_DIR}/dotfile-install-err.log"
echo -e "📂 Setting up directories..."
echo -e "\tProjects:\t${PROJECTS_DIR}"
echo -e "\tTools:\t\t${TOOLS_DIR}"
echo -e "\tCode:\t\t${CODE_DIR}"
echo -e "\tTemp:\t\t${TEMP_DIR}"
mkdir -p ${PROJECTS_DIR}
mkdir -p ${TOOLS_DIR}
mkdir -p ${CODE_DIR}
echo -e "\n\t✔️ Done\n"
}
function installChrome() {
if [[ "${IS_CORP_INSTALL}" = true ]]; then
return
fi
echo -e "🌎 Installing Chrome..."
chrome_version="google-chrome-stable"
case "${OS}" in
"Linux - Ubuntu"* | "Linux - Debian"*)
if [ ! -f /etc/apt/sources.list.d/google-chrome.list ]; then
wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add - &> ${ERROR_LOG}
sudo sh -c 'echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list' &> ${ERROR_LOG}
fi
sudo apt-get update &> ${ERROR_LOG}
sudo apt-get install -y $chrome_version &> ${ERROR_LOG}
;;
"Linux - Fedora"*)
sudo dnf install fedora-workstation-repositories &> ${ERROR_LOG}
sudo dnf config-manager --set-enabled google-chrome &> ${ERROR_LOG}
sudo dnf install -y $chrome_version &> ${ERROR_LOG}
;;
Darwin*)
# NOOP
;;
*)
echo "Running on unknown OS: ${OS}" > "$ERROR_LOG"
uncaughtError
exit 1
;;
esac
# Try and open chrome since it may have been install in the previous step but do not error if it fails
(google-chrome gauntface.com || true) &> /dev/null &
echo -e "\t👷 Please setup Chrome and press enter to continue\n"
read -p ""
echo -e "\n\t✅ Done\n"
}
function installGit() {
echo -e "📦 Installing git + deps..."
deps="git xclip"
case "${OS}" in
"Linux - Ubuntu"* | "Linux - Debian"*)
sudo apt-get install -y $deps &> ${ERROR_LOG}
;;
"Linux - Fedora"*)
sudo dnf install -y $deps &> ${ERROR_LOG}
;;
esac
echo -e "\n\t✅ Done\n"
}
function setupSSHKeys() {
echo -e "🔑 Setting up SSH Key..."
expected_ssh_file=".ssh/id_ed25519"
if [ ! -f "${HOME}/${expected_ssh_file}" ] ; then
ssh-keygen -t ed25519 -C "hello@gaunt.dev"
eval "$(ssh-agent -s)" &> ${ERROR_LOG}
ssh-add "~/${expected_ssh_file}"
fi
case "${OS}" in
Linux*)
xclip -selection clipboard < ~/${expected_ssh_file}.pub
;;
Darwin*)
pbcopy < ~/${expected_ssh_file}.pub
;;
*)
echo "Running on unknown OS: ${OS}" > "$ERROR_LOG"
uncaughtError
exit 1
;;
esac
echo -e "📋 Your SSH key has been copied to your clipboard, please add it to https://github.com/settings/keys"
# Try and open chrome since it may have been install in the previous step but do not error if it fails
google-chrome github.com/settings/keys || true
read -p "Press enter to continue"
echo -e "\n\t✅ Done\n"
}
function cloneDotfiles() {
echo -e "🖥 Cloning dotfiles..."
if [[ "${IS_CORP_INSTALL}" = true ]]; then
git clone https://github.com/gauntface/dotfiles.git ${DOTFILES_DIR} &> ${ERROR_LOG}
else
git clone git@github.com:gauntface/dotfiles.git ${DOTFILES_DIR} &> ${ERROR_LOG}
fi
(cd $DOTFILES_DIR; git fetch origin)
(cd $DOTFILES_DIR; git reset origin/main --hard)
echo -e "\n\t✅ Done\n"
}
function runSetup() {
echo -e "👢 Bootstrap complete...\n"
case "${OS}" in
Linux*)
# `source` is used so the script inherits environment variables
source "${DOTFILES_DIR}/setup.sh"
;;
Darwin*)
source "${DOTFILES_DIR}/setup.sh"
;;
*)
echo "Running on unknown environment: ${unameOut}" > "$ERROR_LOG"
uncaughtError
exit 1
;;
esac
}
# -e means 'enable interpretation of backslash escapes'
echo -e "\n👢 Bootstrapping @gauntface's Dotfiles"
echo -e "👟 Running on '${OS}'\n"
isCorpInstall
setupDirectories
# Install Chrome first so we can set up GitHub with passwords etc next
installChrome
installGit
setupSSHKeys
cloneDotfiles
runSetup
| true |
d68900d1fb6decbc55a6af215f6a65d5cf292749 | Shell | weeble/ohos | /image-builder/profiles/skel/hooks.sh | UTF-8 | 222 | 2.65625 | 3 | [
"Apache-2.0",
"BSD-3-Clause",
"BSD-2-Clause",
"MIT",
"zlib-acknowledgement"
] | permissive | function hook_skel()
{
echo --running skeleton hook ...
mkdir -p ${IMG_ROOT}/var/ohos
mkdir -p ${IMG_ROOT}/opt/update
echo --running \'${PROFILE}\' hook
source ${PROFILE_PATH}/hooks.sh
hook_prof
}
| true |
d51584a3cce2c19635b8c13e80a9459d269ac201 | Shell | imaginois/dotfiles-old | /scripts/zsh.sh | UTF-8 | 2,266 | 3.75 | 4 | [] | no_license | #!/usr/bin/env bash
if ! command_exists zsh; then
echo "\n### zsh not found. Please install and then re-run installation scripts"
exit 1
elif ! [[ $SHELL =~ .*zsh.* ]]; then
echo "### Configuring zsh as default shell"
chsh -s $(which zsh)
fi
if [ -d ~/.oh-my-zsh ]; then
echo "### Oh-My-Zsh seems to be already isntalled. Skipping..."
else
echo "### Installing Oh-My-Zsh"
sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)" &
fi
if [ ! -d ~/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting/ ]; then
echo "### Insalling zsh-syntax-highlighting plugin"
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ~/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting
else
echo "### zsh-syntax-highlighting already installed. Skipping..."
fi
if [ ! -d ~/.oh-my-zsh/custom/plugins/zsh-completions/ ]; then
echo "### Insalling zsh-completions plugin"
git clone https://github.com/zsh-users/zsh-completions ~/.oh-my-zsh/custom/plugins/zsh-completions
else
echo "### zsh-completions already installed. Skipping..."
fi
if [ ! -d ~/.oh-my-zsh/custom/plugins/zsh-autosuggestions/ ]; then
echo "### Insalling zsh-autosuggestions plugin"
git clone https://github.com/zsh-users/zsh-autosuggestions ~/.oh-my-zsh/custom/plugins/zsh-autosuggestions
else
echo "### zsh-zsh-autosuggestions already installed. Skipping..."
fi
if [ -L ~/.zshrc ] || [ -f ~/.zshrc ]; then
echo "### ~/.zshrc already exists. Making backup to ~/.zshrc.backup ..."
mv ~/.zshrc ~/.zshrc.backup
fi
if [ -L ~/.alias ] || [ -f ~/.alias ]; then
echo "### ~/.alias already exists. Removing..."
echo "#### Local aliases should be in ~/.zsh_alias and ~/.bash_alias respectively"
rm -f ~/.alias
fi
echo "### Symlink .zshrc file from repo"
ln -s $DOT_HOME/shell/zshrc.symlink ~/.zshrc
# Use the .alias file to set all the needed aliases.
# if .zsh_alias or .bash_alias exists they wiil be sourced
# after the .alias file to override possible conflicts
echo "### Symlink .alias file from repo"
ln -s $DOT_HOME/shell/alias.symlink ~/.alias
# if ! command_exists zplug; then
# echo "### installing zplug, a plugin manager for zsh - http://zplug.sh"
# git clone https://github.com/zplug/zplug ~/.zplug
# fi
| true |
fcf22cc230434e90c936850ad9700484d6d725da | Shell | jaysh/dotfiles | /bootstrap.sh | UTF-8 | 262 | 3.015625 | 3 | [] | no_license | #!/bin/bash
# Exit as soon as any one of these commands fails
set -e
# Print commands as they're being run
set -x
function setup_dotfiles() {
git clone --depth=1 https://github.com/jaysh/dotfiles ~/dotfiles
cd ~/dotfiles
./setup.sh
}
setup_dotfiles
| true |
9c254fde94307e99aba70fb07a16a14ef4b9b971 | Shell | AlbertoGP/boo2pdf | /convert.sh | UTF-8 | 809 | 2.984375 | 3 | [] | no_license | #!/bin/bash -e
# $1 is the input book name
# $2 is the root name (w/o extension)
BOOKMANAGER=/usr/local/BookManager
export LD_LIBRARY_PATH=$BOOKMANAGER/sys
BOOTMP=/tmp/boo2pdf
FILENAME=$1
BASENAME=$2
/usr/local/bin/xvfb-run.sh -f $BOOTMP/booX -s '-screen 0 1024x768x24' java -cp $BOOKMANAGER/bin:$BOOKMANAGER/sys/hlccommon.jar:$BOOKMANAGER/sys/XKS.jar boo2pdf -d $BOOKMANAGER/sys/ $FILENAME $BOOTMP/$BASENAME
# Fix image paths
sed -i s"|file:///||" $BOOTMP/$BASENAME.html
# Remove large indent that throws images off screen
sed -i 's| <A HREF="REF:PIC|<A HREF="REF:PIC|g' $BOOTMP/$BASENAME.html
# Convert to PDF
htmldoc --compression=9 --left 36 --webpage --outfile $BOOTMP/$BASENAME.pdf $BOOTMP/$BASENAME.html
# Remove temporary Xauthority from Xvfb
rm -f $BOOTMP/booX
| true |
e1f1ee3067699edbb81c2c8e8e13133f321ca356 | Shell | sinasab/cse5914 | /vagrant/web.sh | UTF-8 | 2,911 | 3.296875 | 3 | [] | no_license | #!/bin/bash
# install the sqlite3 command line utility
apt-get -y install sqlite3
# install redis
add-apt-repository -y ppa:chris-lea/redis-server
apt-get update
apt-get -y install redis-server
# configure redis to bind to all interfaces (so we can reach it outside the VM)
sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/redis/redis.conf
# create a virtualenv for the application
apt-get -y install python-virtualenv python3-dev
virtualenv --python=/usr/bin/python3 /home/vagrant/env
# install python requirements in the virtualenv
source /home/vagrant/env/bin/activate
pip install -r /vagrant/src/brutus-api/requirements.txt
pip install -r /vagrant/src/brutus-module-math/requirements.txt
pip install -r /vagrant/src/brutus-module-weather/requirements.txt
pip install -r /vagrant/src/brutus-module-search/requirements.txt
pip install -r /vagrant/src/brutus-module-jokes/requirements.txt
# install the web app in-place (you can update the code without reinstalling)
pip install -e /vagrant/src/brutus-api
pip install -e /vagrant/src/brutus-module-math
pip install -e /vagrant/src/brutus-module-weather
pip install -e /vagrant/src/brutus-module-search
pip install -e /vagrant/src/brutus-module-jokes
# fix virtualenv permissions (because we're running as root)
chown -R vagrant:vagrant /home/vagrant/env
# setup the brutus-api web app and worker
install -o root -g root -m 0644 \
/vagrant/vagrant/web-upstart-brutus-api.conf \
/etc/init/brutus-api.conf
install -o root -g root -m 0644 \
/vagrant/vagrant/web-upstart-brutus-module-math.conf \
/etc/init/brutus-module-math.conf
install -o root -g root -m 0644 \
/vagrant/vagrant/web-upstart-brutus-module-weather.conf \
/etc/init/brutus-module-weather.conf
install -o root -g root -m 0644 \
/vagrant/vagrant/web-upstart-brutus-module-search.conf \
/etc/init/brutus-module-search.conf
install -o root -g root -m 0644 \
/vagrant/vagrant/web-upstart-brutus-module-jokes.conf \
/etc/init/brutus-module-jokes.conf
install -o root -g root -m 0644 \
/vagrant/vagrant/web-upstart-brutus-api-worker.conf \
/etc/init/brutus-api-worker.conf
initctl reload-configuration
start brutus-api
start brutus-module-math
start brutus-module-weather
start brutus-module-search
start brutus-module-jokes
start brutus-api-worker
# wait for services to start
sleep 3
# configure the vagrant user's profile
echo 'source /home/vagrant/env/bin/activate' >> /home/vagrant/.profile
# initialize the brutus-api backend database
source "/vagrant/conf/brutus-api.sh"
export LOCAL="http://127.0.0.1"
rm -f "${DATABASE}"
curl -L -s "$LOCAL:5000" > /dev/null
for i in math,$LOCAL:5010 weather,$LOCAL:5020 search,$LOCAL:5030 joke,$LOCAL:5040; do
IFS=',' read name url <<< "${i}"
curl -s -X POST -H "Content-Type: application/json" \
-d "{\"name\":\"${name}\",\"url\":\"${url}\"}" \
http://127.0.0.1:5000/api/module > /dev/null
done
| true |
6afbda1529eb18f9946283571101248c85633cb9 | Shell | SteveSatterfield/HEVf | /idea/src/ivutilities/ivtext3 | UTF-8 | 1,072 | 3.640625 | 4 | [] | no_license | #! /bin/sh
# ivtext3, ivutext3 - output an iv file of 3D text
b=${0##*/}
usage()
{
echo "Usage: $b string [spacing justification] [parts]"
echo
echo " $b is a special case. ivtext3 fully implements Text3"
echo
echo " string test string"
echo " spacing float; default is 1.0"
echo " justification 'LEFT','RIGHT','CENTER'; default is 'CENTER'"
echo " parts 'SIDES','FRONT','BACK','ALL'; defualt is 'FRONT'"
echo
exit
}
string=""
spacing="1"
justification="LEFT"
parts="FRONT"
case $# in
1)
string="$1"
;;
2)
string="$1"
parts="$2"
;;
3)
string="$1"
spacing="$2"
justification="$3"
;;
4)
string="$1"
spacing="$2"
justification="$3"
parts="$4"
;;
*)
usage
;;
esac
echo "#Inventor V2.0 ascii"
if [ "$b" == "ivutext3" ]
then
echo "Rotation { rotation 1 0 0 1.57079632679}"
fi
/bin/cat <<!end
Separator {
# Font {
# name "Times-Roman"
# size 2
# }
Text3 {
string ["$string"]
spacing ${spacing}
justification ${justification}
parts ${parts}
}
}
!end
| true |
673518dc348376e5e1ede34b077b46c547c7389d | Shell | osp/osp.work.annak | /nltkavan/scripts/index.sh | UTF-8 | 269 | 3.296875 | 3 | [] | no_license | #! /usr/bin/env bash
# Outputs a list of words in the text and the number of times they appear
# Ordered from the most frequent word to the least one.
#
# USAGE
#
# ./index.sh /path/to/file.txt
cat $1 | tr -d "[:punct:]" | tr " " "\n" | sort | uniq -ci | sort -nr
| true |
af8bda94623e2fecf652ad35285b5fd5beeff86d | Shell | resistr/hassio-addons | /pentair-screenlogic/run.sh | UTF-8 | 3,347 | 3.40625 | 3 | [] | no_license | #!/bin/bash
#set -e
CONFIG_PATH=/data/options.json
export MQTTIP=$(jq --raw-output ".MQTT_server" $CONFIG_PATH) #"$(bashio::config 'MQTT_server')"
export MQTTPORT=$(jq --raw-output ".MQTT_port" $CONFIG_PATH) #"$(bashio::config 'MQTT_port')"
export MQTTUSER=$(jq --raw-output ".MQTT_user" $CONFIG_PATH) #"$(bashio::config 'MQTT_user')"
export MQTTPASS=$(jq --raw-output ".MQTT_password" $CONFIG_PATH) #"$(bashio::config 'MQTT_password')"
export SCREENLOGICIP=$(jq --raw-output ".ScreenLogic_server" $CONFIG_PATH) #"$(bashio::config 'ScreenLogic_server')"
export POOLCIRCUIT=$(jq --raw-output ".pool_circuit" $CONFIG_PATH) #"$(bashio::config 'pool_circuit')"
export SPACIRCUIT=$(jq --raw-output ".spa_circuit" $CONFIG_PATH) #"$(bashio::config 'spa_circuit')"
export POOLLIGHTCIRCUIT=$(jq --raw-output ".pool_light_circuit" $CONFIG_PATH) #"$(bashio::config 'pool_light_circuit')"
export SPALIGHTCIRCUIT=$(jq --raw-output ".spa_light_circuit" $CONFIG_PATH) #"$(bashio::config 'spa_light_circuit')"
export JETSCIRCUIT=$(jq --raw-output ".jets_circuit" $CONFIG_PATH) #"$(bashio::config 'jets_circuit')"
export CLEANERCIRCUIT=$(jq --raw-output ".cleaner_circuit" $CONFIG_PATH) #"$(bashio::config 'cleaner_circuit')"
declare -A MESSAGELOOKUP
MESSAGELOOKUP=( ["ON"]="1" ["OFF"]="0" ["spa"]="1" ["pool"]="0" ["heat"]="1")
cd /node_modules/node-screenlogic
node initialize.js
while [ 1 ]; do
# change IP address (-h) port (-p) username (-u) and password (-P) to match your MQTT broker settings
PAYLOAD=`mosquitto_sub -h $MQTTIP -p $MQTTPORT -u $MQTTUSER -P $MQTTPASS -v -t pentair/# -W 10 -C 1`
if [ $? -gt 0 ]; then
echo "MQTT Client exited with non-zero status"
sleep 10
else
echo "$PAYLOAD"
TOPIC=`echo $PAYLOAD | awk '{print $1}'`
MESSAGE=`echo $PAYLOAD | awk '{print $2}'`
IFS="/"
read -ra TOPICPARTS <<< $TOPIC
TOPICROOT="${TOPICPARTS[0]}"
echo "$TOPICROOT"
if [[ $TOPICROOT == "pentair" ]]; then
TOPICACTION="${TOPICPARTS[1]}"
echo "$TOPICACTION"
case $TOPICACTION in
"circuit")
CIRCUITNUMBER="${TOPICPARTS[2]}"
CIRCUITACTION="${TOPICPARTS[3]}"
CIRCUITCOMMAND="${MESSAGELOOKUP[$MESSAGE]}"
if [[ $CIRCUITACTION == "command" ]]; then
echo "set_circuit $CIRCUITNUMBER $CIRCUITCOMMAND"
./set_circuit $CIRCUITNUMBER $CIRCUITCOMMAND
fi
;;
"heater")
POOLSYSTEM="${MESSAGELOOKUP[${TOPICPARTS[2]}]}"
HEATERACTION="${TOPICPARTS[3]}"
HEATERCOMMAND="${TOPICPARTS[4]}"
if [[ $HEATERACTION = "mode" ]] && [[ $HEATERCOMMAND = "set" ]]; then
HEATERMESSAGE="${MESSAGELOOKUP[$MESSAGE]}"
echo "set_heater $POOLSYSTEM $HEATERMESSAGE"
./set_heater $POOLSYSTEM $HEATERMESSAGE
fi
if [[ $HEATERACTION = "temperature" ]] && [[ $HEATERCOMMAND = "set" ]]; then
echo "set_temp $POOLSYSTEM $MESSAGE"
./set_temp $POOLSYSTEM "$MESSAGE"
fi
;;
"light")
LIGHTACTION="${TOPICPARTS[2]}"
if [[ $LIGHTACTION = "command" ]]; then
echo "set_light $MESSAGE"
./set_light "$MESSAGE"
fi
esac
fi
fi
# change IP address (-h) port (-p) username (-u) and password (-P) to match your MQTT broker settings
node send_state_to_ha.js | awk -F, '{print "mosquitto_pub -h '"$MQTTIP"' -p '"$MQTTPORT"' -u '"$MQTTUSER"' -P '"$MQTTPASS"' -t " $1 " -m " $2}' | bash -s
done | true |
cd706ec1b33710661a037ff272e3863c523ff3a1 | Shell | irepan/MythicalMisfits | /scripts/dockerOperations.sh | UTF-8 | 1,825 | 3.09375 | 3 | [] | no_license | #!/bin/bash
scriptDir=$(cd `dirname $0` ; pwd)
. $scriptDir/stack-commons.sh
STACK_NAME='MysfitsMicroServiceStack'
CLUSTER_STACK_NAME='MysfitsClusterStack'
TASK_FILE="$scriptDir/../TASK_PROPERTIES.outputs.json"
ECR_Container=$(getTaskOutputsValue $STACK_NAME MonoRepoUrl)
MONO_TASK_DEFINITION=$(getTaskOutputsValue $STACK_NAME MythicalMonolithTaskDefinition)
TASK_CLUSTER=$(getTaskOutputsValue $STACK_NAME MythicalEcsCluster)
MYTHICAL_TARGET_GROUP=$(getTaskOutputsValue $STACK_NAME MythicalMysfitsTargetGroup)
LOAD_BALANCER_NAME=$(getTaskOutputsValue $STACK_NAME MythicalLoadBalancer)
VPC_TASK_CLUSTER=$(getTaskOutputsValue $CLUSTER_STACK_NAME VPC)
PRIVATE_SUBNET_ONE=$(getTaskOutputsValue $CLUSTER_STACK_NAME PrivateSubnetOne)
PRIVATE_SUBNET_TWO=$(getTaskOutputsValue $CLUSTER_STACK_NAME PrivateSubnetTwo)
TASK_SECURITY_GROUP=$(getTaskOutputsValue $CLUSTER_STACK_NAME FargateContainerSecurityGroup)
# Login to docker
docker_login=$(aws ecr get-login --no-include-email --region $REGION)
$docker_login
CURDIR=$(pwd)
cd $scriptDir/../backend/app
build docker service container
docker build -t mythicalmysfits/service:latest .
#tag docker created
docker tag mythicalmysfits/service:latest $ECR_Container
#push docker container to ECR
docker push $ECR_Container
cd $CURDIR
aws ecs create-service --cluster $TASK_CLUSTER --service-name 'mysfits-service' --task-definition $MONO_TASK_DEFINITION \
--launch-type FARGATE --desired-count 1 \
--deployment-configuration "maximumPercent=200,minimumHealthyPercent=0" \
--network-configuration "awsvpcConfiguration={subnets=[\"$PRIVATE_SUBNET_ONE\",\"$PRIVATE_SUBNET_TWO\"],securityGroups=[\"$TASK_SECURITY_GROUP\"],assignPublicIp=\"DISABLED\"}" \
--load-balancers "targetGroupArn=$MYTHICAL_TARGET_GROUP,containerName=MythicalMysfits-Service,containerPort=8080"
| true |
d96273db8b3de5f340e315cb475f47eb0b47b888 | Shell | google-code-export/tokland | /dicts/maria_moliner/generate_dictd.sh | UTF-8 | 929 | 3.6875 | 4 | [] | no_license | #!/bin/bash
set -e
debug() { echo "$@" >&2; }
remove_html_tags() { sed "s/<[^>]*>//g"; }
remove_leading_blank_lines() { sed '/./,$!d'; }
html2text() {
lynx -stdin -dump -pseudo_inlines -display_charset=utf-8 -assume_charset=utf-8 | \
remove_leading_blank_lines
}
generate_jargon_input() {
local DIRECTORY=$1
FILES=$(find "$DIRECTORY" -type f -name '*.html')
NFILES=$(echo "$FILES" | wc -l)
echo "$FILES" | sort | head -n10 | cat -n | while read INDEX HTMLFILE; do
WORD=$(basename "$HTMLFILE" ".html")
DEFINITION=$(ruby process.rb "$HTMLFILE" | html2text) || return 1
debug "[$INDEX/$NFILES] $WORD"
echo ":$WORD:$DEFINITION"
done
}
generate_dict() {
local DIRECTORY=$1
local NAME=$2
generate_jargon_input "$DIRECTORY" |
dictfmt -j --utf8 --without-headword -s "$NAME" "$NAME"
dictzip $NAME.dict
echo $NAME.index $NAME.dict.dz
}
generate_dict "html" "mariamoliner"
| true |
177856b8f287f04aca4889870c8686515a8e1fb1 | Shell | gpabloandres/siep-developer | /devlib/services/service_auth_api.sh | UTF-8 | 697 | 3.328125 | 3 | [] | no_license |
servicio_siep_auth_api() {
MOUNT_VOLUME=$SIEP_AUTH_API_MOUNT
title "Ejecutando $SIEP_AUTH_API - Montar volumen: $MOUNT_VOLUME"
MOUNT_COMMAND=""
if [ $MOUNT_VOLUME == 1 ]
then
echo -e "MONTANDO VOLUMEN"
MOUNT_COMMAND="-v ${CURRENT_DIR}/forks/siep-auth-api:/var/www/html"
pwd
fi
sudo docker run -itd --name $SIEP_AUTH_API \
-e DB_HOST=$DB \
-e DB_DATABASE=$DB_DATABASE \
-e DB_USERNAME=$DB_USERNAME \
-e DB_PASSWORD=$DB_PASSWORD \
--network=$NETWORK \
-p $SIEP_AUTH_API_PORT \
$MOUNT_COMMAND \
$SIEP_AUTH_API_IMG
if [ $MOUNT_VOLUME == 1 ]
then
sudo docker exec -it $SIEP_AUTH_API sh -c "composer install && chmod 777 storage/ -R"
pwd
fi
}
| true |
0387e7278944519c7c2f42561501b01123b9106e | Shell | m081429/PanMutsRx | /1.1.1/PREPARE_REF_FILES.sh | UTF-8 | 14,435 | 3.796875 | 4 | [] | no_license | #! /usr/bin/env bash
#trap "exit 100; exit" ERR
####################
## Script Options ##
####################
#set -x
usage ()
{
cat << EOF
######################################################################
## script to install tools
## Script Options:
## -r <ref file directory> - (REQUIRED) required path to directory for ref files download
## -t <tool install config file> - (REQUIRED) required path to tool info config file
## -h - Display this usage/help text (No arg)
#############################################################################
EOF
exit
}
echo "Options specified: $@"
while getopts "r:t:h" OPTION; do
case $OPTION in
r) reffile_dir=$OPTARG ;;
t) toolinfo_file=$OPTARG ;;
h) usage
exit ;;
esac
done
shift $((OPTIND-1))
if [ -z "$reffile_dir" ] || [ -z "$toolinfo_file" ]; then
usage
fi
echo "******************"
echo "Ref file Directory: $reffile_dir"
echo "Tool info File: $toolinfo_file"
echo "******************"
echo "checking tool info tools"
source $toolinfo_file
#Toolinfo file
if [[ -w $toolinfo_file ]]
then
echo "Tool info file $toolinfo_file is found"
else
echo "Tool info file $toolinfo_file is not found"
exit
fi
#FTP links
LINK_GTF='ftp://ftp.ensembl.org/pub/release-75/gtf/homo_sapiens/Homo_sapiens.GRCh37.75.gtf.gz'
LINK_STAR_FUSION='https://data.broadinstitute.org/Trinity/CTAT_RESOURCE_LIB/GRCh37_gencode_v19_CTAT_lib_July272016.tar.gz'
LINK_1KG='ftp://gsapubftp-anonymous@ftp.broadinstitute.org/bundle/hg19/1000G_phase1.snps.high_confidence.hg19.sites.vcf.gz'
LINK_KG_MILLS='ftp://gsapubftp-anonymous@ftp.broadinstitute.org/bundle/hg19/Mills_and_1000G_gold_standard.indels.hg19.sites.vcf.gz'
LINK_DB_SNP='ftp://gsapubftp-anonymous@ftp.broadinstitute.org/bundle/hg19/dbsnp_138.hg19.excluding_sites_after_129.vcf.gz'
LINK_GENCODE='ftp://ftp.sanger.ac.uk/pub/gencode/Gencode_human/release_19/gencode.v19.annotation.gtf.gz'
#testing links
mkdir -p $reffile_dir
links_not_worked=" "
wget --spider -v $LINK_GTF > $reffile_dir/testlink 2>&1
testp=`grep -c "exists." $reffile_dir/testlink`
if [ $testp -eq 0 ]
then
echo "GTF ftp LINK $LINK_GTF expired. Please replace the old link with new one on the line 55 of this script"
links_not_worked=$links_not_worked" GTF"
fi
rm $reffile_dir/testlink
wget --spider -v $LINK_STAR_FUSION > $reffile_dir/testlink 2>&1
testp=`grep -c "exists." $reffile_dir/testlink`
if [ $testp -eq 0 ]
then
echo "STAR_FUSION ftp LINK $LINK_STAR_FUSION expired. Please replace the old link with new one on the line 56 of this script"
links_not_worked=$links_not_worked" STAR_FUSION"
fi
rm $reffile_dir/testlink
wget --spider -v $LINK_1KG > $reffile_dir/testlink 2>&1
testp=`grep -c "exists." $reffile_dir/testlink`
if [ $testp -eq 0 ]
then
echo "1KG ftp LINK $LINK_1KG expired. Please replace the old link with new one on the line 57 of this script"
links_not_worked=$links_not_worked" 1KG"
fi
rm $reffile_dir/testlink
wget --spider -v $LINK_KG_MILLS > $reffile_dir/testlink 2>&1
testp=`grep -c "exists." $reffile_dir/testlink`
if [ $testp -eq 0 ]
then
echo "KG_MILLS ftp LINK $LINK_KG_MILLS expired. Please replace the old link with new one on the line 58 of this script"
links_not_worked=$links_not_worked" KG_MILLS"
fi
rm $reffile_dir/testlink
wget --spider -v $LINK_DB_SNP > $reffile_dir/testlink 2>&1
testp=`grep -c "exists." $reffile_dir/testlink`
if [ $testp -eq 0 ]
then
echo "DB_SNP ftp LINK $LINK_DB_SNP expired. Please replace the old link with new one on the line 59 of this script"
links_not_worked=$links_not_worked" DB_SNP"
fi
rm $reffile_dir/testlink
wget --spider -v $LINK_GENCODE > $reffile_dir/testlink 2>&1
testp=`grep -c "exists." $reffile_dir/testlink`
if [ $testp -eq 0 ]
then
echo "GENCODE ftp LINK $LINK_GENCODE expired. Please replace the old link with new one on the line 60 of this script"
links_not_worked=$links_not_worked" GENCODE"
fi
rm $reffile_dir/testlink
if [[ $links_not_worked == " " ]]; then
echo "All links are properly working"
else
echo "All links are not working properly" $links_not_worked
exit 1
fi
#GTF file
echo "downloading gtf file from encode"
cd $reffile_dir
#wget ftp://ftp.ensembl.org/pub/release-75/gtf/homo_sapiens/Homo_sapiens.GRCh37.75.gtf.gz
wget $LINK_GTF
FILE="Homo_sapiens.GRCh37.75.gtf.gz"
if [[ -r "$FILE" ]]
then
gunzip $FILE
else
echo "File '$FILE' is not downloaded properly"
exit
fi
#creating Coding file
FILE="$WORKFLOW_PATH/perp_codingfile.pl";
if [[ -x $FILE ]]
then
echo "Workflow script $FILE is found"
else
echo "Workflow script $FILE is not found"
exit
fi
#bedtools
FILE="$BEDTOOLS/mergeBed";
if [[ -x $FILE ]]
then
echo "Mergebed tool $FILE is found"
else
echo "Mergebed tools $FILE is not found"
exit
fi
echo " creating coding file from annovare ref file"
FILE="$ANNOVAR/humandb/hg19_refGene.txt"
if [[ -r "$FILE" ]]
then
perl $WORKFLOW_PATH/perp_codingfile.pl $ANNOVAR/humandb/hg19_refGene.txt coding.bed
sort -T $ANNOVAR/humandb/ -k1,1 -k2,2n -k3,3n coding.bed|uniq> coding1.bed
$BEDTOOLS/mergeBed -i coding1.bed > coding.bed
else
echo "File '$FILE' is not found in ANNOVAR directory.First run the tool install script and then run the ref file script"
exit
fi
#star fusion files
#wget https://data.broadinstitute.org/Trinity/CTAT_RESOURCE_LIB/GRCh37_gencode_v19_CTAT_lib_July272016.tar.gz
wget $LINK_STAR_FUSION
FILE="GRCh37_gencode_v19_CTAT_lib_July272016.tar.gz"
if [[ -r "$FILE" ]]
then
tar -zxvf $FILE
else
echo "File '$FILE' is not downloaded properly"
exit
fi
cd GRCh37_gencode_v19_CTAT_lib_July272016
if [[ -x "$SAMTOOLS" ]]
then
echo "SAMTOOLS $SAMTOOLS found or is executable"
else
echo "SAMTOOLS $SAMTOOLS nor found or not executable"
exit
fi
#Ref file
$SAMTOOLS faidx ref_genome.fa
if [[ -x $PICARD ]]
then
$JAVA -jar $PICARD CreateSequenceDictionary R=ref_genome.fa O=ref_genome.dict
else
echo "PICARD tool $GATK is not found"
exit
fi
cd $reffile_dir
##GATK recalibration sites
#wget ftp://gsapubftp-anonymous@ftp.broadinstitute.org/bundle/hg19/1000G_phase1.snps.high_confidence.hg19.sites.vcf.gz
wget $LINK_1KG
FILE="1000G_phase1.snps.high_confidence.hg19.sites.vcf.gz"
if [[ ! -r "$FILE" ]]
then
echo "File '$FILE' is not downloaded properly"
exit
fi
#wget ftp://gsapubftp-anonymous@ftp.broadinstitute.org/bundle/hg19/Mills_and_1000G_gold_standard.indels.hg19.sites.vcf.gz
wget $LINK_KG_MILLS
FILE="Mills_and_1000G_gold_standard.indels.hg19.sites.vcf.gz"
if [[ ! -r "$FILE" ]]
then
echo "File '$FILE' is not downloaded properly"
exit
fi
#wget ftp://gsapubftp-anonymous@ftp.broadinstitute.org/bundle/hg19/dbsnp_138.hg19.excluding_sites_after_129.vcf.gz
wget $LINK_DB_SNP
FILE="dbsnp_138.hg19.excluding_sites_after_129.vcf.gz"
if [[ ! -r "$FILE" ]]
then
echo "File '$FILE' is not downloaded properly"
exit
fi
BGZIP="$HTSDIR/bgzip"
TABIX="$HTSDIR/tabix"
if [[ -x $TABIX ]]
then
gunzip 1000G_phase1.snps.high_confidence.hg19.sites.vcf.gz Mills_and_1000G_gold_standard.indels.hg19.sites.vcf.gz dbsnp_138.hg19.excluding_sites_after_129.vcf.gz
$BGZIP 1000G_phase1.snps.high_confidence.hg19.sites.vcf
$BGZIP Mills_and_1000G_gold_standard.indels.hg19.sites.vcf
$BGZIP dbsnp_138.hg19.excluding_sites_after_129.vcf
$TABIX -f -p vcf 1000G_phase1.snps.high_confidence.hg19.sites.vcf.gz
$TABIX -f -p vcf Mills_and_1000G_gold_standard.indels.hg19.sites.vcf.gz
$TABIX -f -p vcf dbsnp_138.hg19.excluding_sites_after_129.vcf.gz
else
echo "tabix $TABIX is not found"
exit
fi
#annovar
cd $reffile_dir
mkdir ANNOVAR_humandb/
FILE="$ANNOVAR/annotate_variation.pl";
if [[ -x $FILE ]]
then
$PERL $ANNOVAR/annotate_variation.pl -downdb -buildver hg19 -webfrom annovar refGene ANNOVAR_humandb/
cd ANNOVAR_humandb
FILE="hg19_refGene.txt"
if [[ ! -r "$FILE" ]]
then
echo "File '$FILE' is not downloaded properly"
exit
fi
FILE="hg19_refGeneMrna.fa"
if [[ ! -r "$FILE" ]]
then
echo "File '$FILE' is not downloaded properly"
exit
fi
#$PERL $ANNOVAR/annotate_variation.pl -downdb -buildver hg19 -webfrom annovar ensGene ANNOVAR_humandb/
#$PERL $ANNOVAR/annotate_variation.pl -downdb -buildver hg19 -webfrom annovar knownGene ANNOVAR_humandb/
# $PERL $ANNOVAR/annotate_variation.pl -build hg19 -downdb tfbsConsSites ANNOVAR_humandb/
# $PERL $ANNOVAR/annotate_variation.pl -build hg19 -downdb cytoBand ANNOVAR_humandb/
# $PERL $ANNOVAR/annotate_variation.pl -build hg19 -downdb targetScanS ANNOVAR_humandb/
# $PERL $ANNOVAR/annotate_variation.pl -build hg19 -downdb genomicSuperDups ANNOVAR_humandb/
# $PERL $ANNOVAR/annotate_variation.pl -build hg19 -downdb dgvMerged ANNOVAR_humandb/
# $PERL $ANNOVAR/annotate_variation.pl -build hg19 -downdb gwasCatalog ANNOVAR_humandb/
# $PERL $ANNOVAR/annotate_variation.pl -build hg19 -downdb wgEncodeBroadHmmGm12878HMM ANNOVAR_humandb/
# $PERL $ANNOVAR/annotate_variation.pl -downdb 1000g2012apr ANNOVAR_humandb/ -buildver hg19
# $PERL $ANNOVAR/annotate_variation.pl -downdb -buildver hg19 -webfrom annovar snp138 ANNOVAR_humandb/
# $PERL $ANNOVAR/annotate_variation.pl -build hg19 -downdb -webfrom annovar ljb23_sift ANNOVAR_humandb/ -buildver hg19
# $PERL $ANNOVAR/annotate_variation.pl -downdb -webfrom annovar -build hg19 esp6500si_all ANNOVAR_humandb/
# $PERL $ANNOVAR/annotate_variation.pl -downdb -webfrom annovar -build hg19 exac03 ANNOVAR_humandb/
# $PERL $ANNOVAR/annotate_variation.pl -downdb -buildver hg19 -webfrom annovar gerp++gt2 ANNOVAR_humandb/
# $PERL $ANNOVAR/annotate_variation.pl -downdb -buildver hg19 -webfrom annovar clinvar_20140211 ANNOVAR_humandb/
# $PERL $ANNOVAR/annotate_variation.pl -downdb -buildver hg19 -webfrom annovar cosmic68 ANNOVAR_humandb/
else
echo "ANNOVAR $FILE is not found or not executable"
exit
fi
#star reference files
cd $reffile_dir
mkdir STAR/
if [[ -x $STAR ]]
then
$STAR --runMode genomeGenerate --genomeDir $reffile_dir/STAR --genomeFastaFiles $reffile_dir/GRCh37_gencode_v19_CTAT_lib_July272016/ref_genome.fa
cd STAR
FILE="SAindex"
if [[ ! -r "$FILE" ]]
then
echo "File '$FILE' is not processed properly"
exit
fi
FILE="SA"
if [[ ! -r "$FILE" ]]
then
echo "File '$FILE' is not processed properly"
exit
fi
FILE="Genome"
if [[ ! -r "$FILE" ]]
then
echo "File '$FILE' is not processed properly"
exit
fi
FILE="chrNameLength.txt"
if [[ ! -r "$FILE" ]]
then
echo "File '$FILE' is not processed properly"
exit
fi
else
echo "STAR tool $STAR is not found"
exit
fi
#gsnap reference files
cd $reffile_dir
#wget ftp://ftp.sanger.ac.uk/pub/gencode/Gencode_human/release_19/gencode.v19.annotation.gtf.gz
wget $LINK_GENCODE
FILE="gencode.v19.annotation.gtf.gz"
if [[ -r "$FILE" ]]
then
gunzip $FILE
FILE="gencode.v19.annotation.gtf"
if [[ ! -r "$FILE" ]]
then
echo "File '$FILE' is not processed properly"
exit
fi
else
echo "File $FILE is not downloaded properly"
exit
fi
mkdir GSNAP/
cd GSNAP
fasta=$reffile_dir/GRCh37_gencode_v19_CTAT_lib_July272016/ref_genome.fa
genomeDir=$reffile_dir/GSNAP
if [[ -x $GSNAP ]]
then
gsnapdir=`echo $GSNAP|sed -e 's/gsnap$//g'`
else
echo "GSNAP tool $GSNAP is not found"
exit
fi
$gsnapdir/gmap_build -d $genomeDir $fasta
FILE="GSNAP.sarray"
if [[ ! -r "$FILE" ]]
then
echo "File '$FILE' is not processed properly"
exit
fi
##create junction files using gencode annotation v19
cat $reffile_dir/gencode.v19.annotation.gtf | $gsnapdir/gtf_splicesites >$genomeDir/GSNAP.maps/gencode.v19.splicesites
FILE="$genomeDir/GSNAP.maps/gencode.v19.splicesites"
if [[ ! -r "$FILE" ]]
then
echo "File '$FILE' is not processed properly"
exit
fi
cat $reffile_dir/gencode.v19.annotation.gtf | $gsnapdir/gtf_introns >$genomeDir/GSNAP.maps/gencode.v19.introns
FILE="$genomeDir/GSNAP.maps/gencode.v19.introns"
if [[ ! -r "$FILE" ]]
then
echo "File '$FILE' is not processed properly"
exit
fi
cd $genomeDir/GSNAP.maps/
cat gencode.v19.splicesites | $gsnapdir/iit_store -o gencode.v19.splicesites.iit
FILE="gencode.v19.splicesites.iit"
if [[ ! -r "$FILE" ]]
then
echo "File '$FILE' is not processed properly"
exit
fi
cat gencode.v19.introns | $gsnapdir/iit_store -o gencode.v19.introns.iit
FILE="gencode.v19.introns.iit"
if [[ ! -r "$FILE" ]]
then
echo "File '$FILE' is not processed properly"
exit
fi
cd $reffile_dir
#processing the star fusion reference files
if [[ -x $STAR ]]
then
stardir=`echo $STAR|sed -e 's/STAR$//g'`
else
echo "STAR tool $STAR is not found"
exit
fi
if [[ -x $GSNAP ]]
then
gsnapdir=`echo $GSNAP|sed -e 's/gsnap$//g'`
else
echo "GSNAP tool $GSNAP is not found"
exit
fi
#bowtie
FILE="$BOWTIEDIR/bowtie-build";
if [[ -x $FILE ]]
then
echo "bowtie build tool $FILE is found"
else
echo "bowtie build tools $FILE is not found"
exit
fi
export PATH=$PATH:$stardir:$gsnapdir:$BOWTIEDIR
cd GRCh37_gencode_v19_CTAT_lib_July272016
if [[ -x $PERL ]]
then
if [[ -x $STAR_FUSION ]]
then
STAR_FUSION_HOME=`echo $STAR_FUSION|sed -e 's/STAR-Fusion$//g'`
$STAR_FUSION_HOME/FusionFilter/prep_genome_lib.pl --genome_fa ref_genome.fa --gtf ref_annot.gtf --blast_pairs blast_pairs.outfmt6.gz
else
echo "STAR FUSION $STAR_FUSION is not found"
exit
fi
else
echo "PERL $PERL is not found"
exit
fi
echo "REF_GENOME=$reffile_dir/GRCh37_gencode_v19_CTAT_lib_July272016/ref_genome.fa" >> $toolinfo_file
echo 'GATK_BASE_RECALIBRATION_KNOWNSITES="-knownSites '"$reffile_dir/1000G_phase1.snps.high_confidence.hg19.sites.vcf.gz"' -knownSites '"$reffile_dir/Mills_and_1000G_gold_standard.indels.hg19.sites.vcf.gz"' -knownSites '"$reffile_dir/dbsnp_138.hg19.excluding_sites_after_129.vcf.gz"'"'>> $toolinfo_file
echo "STAR_REF=$reffile_dir/STAR">> $toolinfo_file
echo 'GSNAP_OPTION="-t 4 -A sam -D '"$reffile_dir/GSNAP"' -d GSNAP --use-splicing='"$reffile_dir/GSNAP/GSNAP.maps/gencode.v19.splicesites.iit"' -N 1 --read-group-id=group1 --read-group-name=sample1 --read-group-library=lib1 --read-group-platform=illumina"'>> $toolinfo_file
echo 'GATK_HAPLOTYPE_CALLER_OPTION=" -stand_call_conf 20.0 -ERCIS 50 -pcrModel HOSTILE -stand_emit_conf 20.0 -mmq 20 -L '"$reffile_dir/coding.bed"' "'>> $toolinfo_file
echo 'ANNOVAR_OPTION="'"$reffile_dir/ANNOVAR_humandb/"' -buildver hg19 -remove -protocol refGene -operation g -nastring ."'>> $toolinfo_file
echo "STAR_FUSION_CTAT_LIB=$reffile_dir/GRCh37_gencode_v19_CTAT_lib_July272016">> $toolinfo_file
echo 'FEATURECOUNTS_OPTION="-t exon -g gene_name -a '"$reffile_dir/Homo_sapiens.GRCh37.75.gtf"'"'>> $toolinfo_file
| true |
cad77ca7aec5dd597b8a7d20f7888bba139ee9b2 | Shell | hcw-00/Beginning_Linux_Shell_Script_Programming | /chapter7_bash쉘프로그래밍/loops/while/say.sh | UTF-8 | 237 | 3.28125 | 3 | [] | no_license | #!/bin/bash
echo q를 입력하면 종료햡니다.
go=start
while [[ -n $go ]]
do
echo -n 종료하려면 q를 입력하세요 :
read word
if [[ $word == [Qq] ]]
then
echo q를 입력하셨네요. 종료합니다!
go=
fi
done
| true |
dbad069db371e31498bd23ffc36690d6c2276375 | Shell | Bwen/docker-gui-apps | /clion/clion | UTF-8 | 1,151 | 2.84375 | 3 | [] | no_license | #!/bin/sh
GIT_PROFILES=""
if [[ -f "${HOME}/.gitconfig" ]]; then
GIT_PROFILES="-v ${HOME}/.gitconfig:/home/gui/.gitconfig"
fi
if [[ -f "${HOME}/.gitconfig-work" ]]; then
GIT_PROFILES="${GIT_PROFILES} -v ${HOME}/.gitconfig-work:/home/gui/.gitconfig-work"
fi
if [[ -f "${HOME}/.gitconfig-personal" ]]; then
GIT_PROFILES="${GIT_PROFILES} -v ${HOME}/.gitconfig-personal:/home/gui/.gitconfig-personal"
fi
docker run \
--rm \
--net=host \
--memory 2048mb \
--name clion \
-e DISPLAY=$DISPLAY \
-u $(id -u):1000 \
-v /etc/localtime:/etc/localtime:ro \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v ${HOME}/.Xauthority:/home/gui/.Xauthority \
-v ${HOME}/Workspace:/home/gui/Workspace \
$GIT_PROFILES \
-v clion_java:/home/gui/.java \
-v clion_cache:/home/gui/.cache \
-v clion_local:/home/gui/.local \
-v clion_config:/home/gui/.config \
-v clion_plugins:/gui-app/plugins \
-v clion_license:/gui-app/license \
-v clion_perfs:/home/gui/.Clion2020.1 \
-v clion_cargo:/home/gui/.cargo \
-v clion_xcargo:/home/gui/.xcargo \
-v clion_rustup:/home/gui/.rustup \
-v /etc/hosts:/etc/hosts \
-v /dev/shm:/dev/shm \
apps/clion
| true |
fe74438558940eebdfec993de67e3fabd4b25d19 | Shell | VE3RD/Nextion | /timetest.sh | UTF-8 | 594 | 3.640625 | 4 | [] | no_license | #!/bin/bash
#########################################################
# Routine to determin the run time of a scipt #
# $1 = script Name #
# $2 - parameter #
# #
# VE3RD 2020-02-08 #
#########################################################
if [ ! "$1" ]; then
exit
fi
start=$(date +%s.%N)
# Here you can place your function
sudo $1 $2
duration=$(echo "$(date +%s.%N) - $start" | bc)
execution_time=`printf "%.2f seconds" $duration`
echo "Script Completed: $execution_time"
| true |
efba19cd31e4427dcbd52339171d7d95415122e8 | Shell | justin-taylor/server_conf | /gunicorn.sh | UTF-8 | 512 | 2.84375 | 3 | [] | no_license | #!/bin/bash
set -e
PIDFILE=/srv/example.com/gunicorn.pid
LOGFILE=/srv/example.com/logs/gunicorn.log
LOGDIR=$(dirname $LOGFILE)
NUM_WORKERS=3
# user/group to run as
USER=root
GROUP=root
#DJANGO_SETTINGS_MODULE='production.settings'
cd /srv/example.com/application/current/project
#use virtualenv
source ../../../bin/activate
test -d $LOGDIR || mkdir -p $LOGDIR
exec ../../../bin/gunicorn_django -w $NUM_WORKERS \
--user=$USER --group=$GROUP --log-level=debug \
--log-file=$LOGFILE --pid=$PIDFILE 2>>$LOGFILE
| true |
fc5f6580ef506d10215c3ffd1ea79b1e73f42ce9 | Shell | JBetoReyes/Learning-Ext-4 | /provisionAsVagrant.sh | UTF-8 | 994 | 3.828125 | 4 | [] | no_license | export isNvmInstalled=$false
export NVM_DIR="$HOME/.nvm" || exit $?
# Installs nvm
# the -s argument checks if a file exists
if [ ! -s "$NVM_DIR/nvm.sh" ] >/dev/null; then
echo "Installing nvm..."
wget -qO- https://raw.githubusercontent.com/creationix/nvm/v0.33.8/install.sh | bash || exit $?
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" || exit $? # This loads nvm
# Installing node 8
if ! [ -x "$(command -v node)" ]; >/dev/null; then
nvm install 8 && nvm use 8 || exit $?
fi
fi
# Checking if nvm is installed
if [ -s "$NVM_DIR/nvm.sh" ] >/dev/null; then
echo "==========nvm installed ${isNvmInstalled} =========="
isNvmInstalled=$true
fi
# if [ $isNvmInstalled ] && ! [ -x "$(command -v gulp)" ]; then
if ! [ -x "$(command -v gulp)" ]; then
echo "Installing gulp"
npm install -g gulp || exit $?
fi
# Creates a link for the source files
if [ ! -d "~/Dev" ] ; then
mkdir ~/Dev || exit $?
ln -s /vagrant/Learning-Ext/ ~/Dev/Learning-Ext || exit $?
fi
| true |
b18d040a7f92ebb744b0b5253d02b1a89f4fc80c | Shell | DaCoolX/sheepit-docker | /runrenderer.sh | UTF-8 | 1,220 | 3.703125 | 4 | [
"CC0-1.0"
] | permissive | #!/bin/bash
echo "Checking for client updates..."
cd /sheep-cache
latestVersion=$(curl --silent --head https://www.sheepit-renderfarm.com/media/applet/client-latest.php | \
grep -Po '(?i)content-disposition:.*filename="?(?-i)\Ksheepit-client-[\d\.]+\d')
if [ -z "$latestVersion" ]; then
#Empty latestVersion is a good indicator of a critical failure
echo "!!! Failed parsing version information! Aborting !!!"
echo "Possible causes and troubleshooting steps:"
echo "1. Check for internet connectivity \(Routes, DNS, Proxy\)"
echo "2. Check the status of SheepIt via the SheepIt website: https://www.sheepit-renderfarm.com/"
echo "3. Open an issue on Github if problems persists after 1. and 2."
exit 1
elif [ ! -e $latestVersion.jar ]; then
echo "Updating client to version $latestVersion..."
rm -f sheepit-client*.jar
#Download new client.
curl https://www.sheepit-renderfarm.com/media/applet/client-latest.php -o $latestVersion.jar
echo "Update finished"
else
echo "No updates found"
fi
echo "Starting client:"
java -jar /sheep-cache/$latestVersion.jar \
-ui text \
-login "$user_name" \
-password "$user_password" \
-cores $(nproc) \
-cache-dir /sheep-cache \
$extra_opt
| true |
f5553a5641d1157e14c64ea120e5a0d11548b85a | Shell | AZobec/ATFM | /Honeypot_Raspberry/bin/get_IOC.sh | UTF-8 | 883 | 2.703125 | 3 | [] | no_license | #!/bin/bash
sudo rm -rf ~/ATFM/Honeypot_Raspberry/datas/*.ioc*
echo ">>> Get all IOC Files"
sudo last > ~/ATFM/Honeypot_Raspberry/datas/last.ioc
sudo w > ~/ATFM/Honeypot_Raspberry/datas/w.ioc
sudo ls / -lat > ~/ATFM/Honeypot_Raspberry/datas/ls.ioc
sudo ps aux > ~/ATFM/Honeypot_Raspberry/datas/ps_aux.ioc
sudo ps elf > ~/ATFM/Honeypot_Raspberry/datas/ps_elf.ioc
sudo lsof > ~/ATFM/Honeypot_Raspberry/datas/lsof.ioc
sudo date > ~/ATFM/Honeypot_Raspberry/datas/date.ioc
sudo netstat > ~/ATFM/Honeypot_Raspberry/datas/netstat.ioc
cat ~/.bash_history > ~/ATFM/Honeypot_Raspberry/datas/bash_history.ioc
sudo cat /root/.bash_history > ~/ATFM/Honeypot_Raspberry/datas/bash_history_root.ioc
sudo chown pi:pi ~/ATFM/Honeypot_Raspberry/datas/*
echo ">>> Clean some important files"
sudo truncate -s 0 /root/.bash_history
sudo truncate -s 0 ~/.bash_history
sudo truncate -s 0 /var/log/wtmp
| true |
639019558eab3095c8eeec5e660d9ae2fdeb8069 | Shell | delkyd/alfheim_linux-PKGBUILDS | /rapidminer-studio/PKGBUILD | UTF-8 | 1,510 | 2.65625 | 3 | [] | no_license | # Maintainer: Emanuel Fernandes <efernandes@tektorque.com>
pkgname=rapidminer-studio
pkgver=7.6.0
pkgrel=1
pkgdesc="Empowers data scientists and business analysts to effortlessly design predictive analytics from mashup to modeling to deployment"
arch=('any')
url="https://rapidminer.com/products/studio/"
license=('AGPL3')
depends=('java-environment')
makedepends=('unzip' 'gendesk')
source=(https://s3.amazonaws.com/rapidminer.releases/rapidminer-studio/$pkgver/rapidminer-studio-$pkgver.zip \
rapidminer-studio.sh)
sha256sums=('4bfe9d8e46fcd9a92b274dbdce2dd0ea2f2f8079371beed58b162ac8603c2dfe'
'd3d76353c1ae15eec44c2fc638bbde98a192b70447bd467763f6d41cf24b6e5a')
prepare() {
gendesk -f -n \
--name "RapidMiner Studio" \
--pkgname "$pkgname" \
--pkgdesc "$pkgdesc" \
--categories="Science;Education;Development"
}
package() {
cd "$srcdir"
mkdir -p "$pkgdir/opt/$pkgname"
unzip -p "$srcdir/$pkgname/lib/rapidminer-studio-core-$pkgver.jar" \
com/rapidminer/resources/rapidminer_frame_icon_128.png > "$pkgname.png"
install -Dm644 "$pkgname.png" "$pkgdir/usr/share/pixmaps/$pkgname.png"
cp -R "$srcdir/$pkgname/" "$pkgdir/opt/"
mkdir -p "$pkgdir/usr/bin/"
mkdir -p "$pkgdir/usr/share/applications/"
mkdir -p "$pkgdir/usr/share/licenses/$pkgname"
install -Dm655 "$srcdir/$pkgname.sh" "$pkgdir/usr/bin/$pkgname"
install -Dm644 "$pkgname.desktop" "$pkgdir/usr/share/applications/$pkgname.desktop"
}
| true |
9fd502d85020052d37a3542a904fb5c4baf104e4 | Shell | wildcard/colorful-avatar | /generate-all-avatars.sh | UTF-8 | 303 | 2.875 | 3 | [] | no_license | #!/usr/bin/env bash
echo "Brands colors"
find colors/* -type f -print0 | xargs -0 -I % sh -c 'echo $(basename %) $(cat %)' | tee /tmp/brands-colors
echo
echo "Index Avatar"
./create-brand-avatar.sh '#000000' 'index'
awk '{system( "./create-brand-avatar.sh " "\"" $2 "\" " $1)}' /tmp/brands-colors
| true |
8e9031a19e14cb61880b513e6f84cbeb3752188a | Shell | liujiangyi/openmoko-svn | /src/host/envedit/xtests/Common | UTF-8 | 1,241 | 3.53125 | 4 | [] | no_license | #!/bin/sh
fail()
{
echo FAILED "($SCRIPT)" 1>&2
cat _out_$1 _err_$1 1>&2
exit 1
}
setup()
{
echo -n "$1: " 1>&2
}
edit_out()
{
../../devirginator/envedit.pl "$@" -o _out_1 >_err_1 2>&1 || fail 1
../envedit -P ../envcpp.pl "$@" -o _out_2 >_err_2 2>&1 || fail 2
}
edit()
{
../../devirginator/envedit.pl "$@" >_out_1 2>_err_1 || fail 1
../envedit -P ../envcpp.pl "$@" >_out_2 2>_err_2 || fail 2
}
process()
{
in_file=$1
shift
pp_opts="$*"
../envcpp.pl "$@" "$in_file" >_out_1 2>_err_1 || fail 1
mv _out_1 _in
}
edit_processed()
{
../../devirginator/envedit.pl $pp_opts \
"$@" -f "$in_file" >_out_1 2>_err_1 || fail 1
../envedit -n \
"$@" -f _in >_out_2 2>_err_2 || fail 2
}
expect()
{
if ! diff -u _out_1 _out_2 >_tmp; then
echo FAILED "($SCRIPT)" 1>&2
cat _tmp
exit
fi
cat _err_1 _err_2 >_err
if ! diff -u - _err >_tmp; then
echo FAILED "($SCRIPT)" 1>&2
cat _tmp
exit
fi
rm -f _in _out_1 _out_2 _err_1 _err_2 _err _tmp
echo PASSED 1>&2
passed=`expr ${passed:-0} + 1`
}
differ()
{
if diff -u _out_1 _out_2 >/dev/null; then
echo FAILED "($SCRIPT)" 1>&2
exit
fi
cp _out_1 _out_2
expect
}
| true |
dd753c4f0ec2d6bb6dea4f19b430d4f3f6b6ce81 | Shell | arvin580/scripts_by_sofia | /sync_push.sh | UTF-8 | 265 | 2.515625 | 3 | [] | no_license | #!/bin/bash
## this is to automate the committing of any new changes to my git repository
cd ~/src/scripts_by_sofia
rsync -u ~/bin/* ~/src/scripts_by_sofia/.
git add *
git status >> sync_push.log
git commit -m 'daily commit of any changes to all scritps'
git push
| true |
ed57f7ccd4d29c43e628d719f6b183f054549fa0 | Shell | dcurca/CS2211 | /Assignment2/path.sh | UTF-8 | 2,064 | 4 | 4 | [] | no_license | #!/bin/bash
# This shell script gets an infinite number of inputs, it checks for various
# cases and in the end computes the calculations including distance, max
# distance, and total distance
# Dana Curca; dcurca; 250976773
x1=$1
y1=$2
x2=$3
y2=$4
# makes a variable equal to whether or not the number of arguments is even or odd
even=$(( $# % 2 ))
if [ $# -lt 4 ]; then
echo "Error: need at least 4 arguments"
exit 1
fi
# checks to make sure number of arguments is even
if [ ! $even -eq 0 ]; then
echo "Error: need an even number of arguments"
exit 2
fi
# checks to make sure that arguments are only integers
if ! [[ $x1 =~ ^[0-9]+$ && $x2 =~ ^[0-9]+$ && $y1 =~ ^[0-9]+$ && $y2 =~ ^[0-9]+$ ]]; then
echo "Error: integers only"
exit 3
fi
#variable initialization
total=0
longestD=0
count=0
firstX=$x1
firstY=$y1
# checks to make sure number of arguments supports base case
while [ $# -gt "2" ]; do
#compute calculations
pair1=`echo "scale=2; ($x1-$x2)^2" | bc`
pair2=`echo "scale=2; ($y1-$y2)^2" | bc`
distance=`echo "scale=2; sqrt($pair1 + $pair2)" | bc`
total=`echo "scale=2; $total + $distance" | bc`
let "count++"
if (( $(echo "$distance > $longestD" | bc -l))); then
longestD=$distance
fi
SecondX=$x2
SecondY=$y2
# shift to get next (x,y) pair
shift
shift
x1=$1
x2=$3
y1=$2
y2=$4
done
# echos the total, longest distance and average distance
echo "Total path length: "$total""
echo "Longest distance between two points: "$longestD""
average=`echo "scale=2; $total / $count" | bc`
echo "Average distance between two points: "$average""
# checks to see if last (x,y) pair is equal to first (x,y) pair
if [[ $firstX -eq $SecondX && $firstY -eq $SecondY ]]; then
echo "Path leads to start."
else
# if they do not equal then calculate and echo distance between first and last (x,y) pair
pair1=`echo "scale=2; ($firstX - $SecondX)^2" | bc`
pair2=`echo "scale=2; ($firstY - $SecondY)^2" | bc`
Distance=`echo "scale=2; sqrt($pair1 + $pair2)" | bc`
echo "Path leads "$Distance" distance from start."
fi
| true |
93aa3096f7ed9f16c07b9e06c19536f3d6429928 | Shell | funtoo/corenetwork | /netif.d/wireguard | UTF-8 | 539 | 3.765625 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
WG_QUICK="/usr/bin/wg-quick"
start() {
if [ ! -f "${WG_QUICK}" ]; then
echo "wg-quick binary not found. Did you emerge net-vpn/wireguard-tools?"
exit 1
fi
if [ ! -f /etc/wireguard/${interface}.conf ]; then
echo "Can't find /etc/wireguard/${interface}.conf. Please ensure this file exists."
fi
ebegin "Configuring wireguard interface $interface"
$WG_QUICK up $interface 2>/dev/null
eend $?
}
stop() {
ebegin "Stopping wireguard interface $interface"
$WG_QUICK down $interface 2>/dev/null
eend $?
}
| true |
b4e8cb13a591373a5ded7b4098698b3f78d60905 | Shell | TheDarkula/habitat | /.expeditor/scripts/release_habitat/build_mac_hab_binary.sh | UTF-8 | 1,422 | 2.96875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/local/bin/bash
set -euo pipefail
source .expeditor/scripts/release_habitat/shared.sh
# Get secrets! (our auth token and aws creds should be auto-injected but there's a bug:
# https://github.com/chef/ci-studio-common/issues/200)
eval "$(vault-util fetch-secret-env)"
export HAB_AUTH_TOKEN="${PIPELINE_HAB_AUTH_TOKEN}"
export HAB_BLDR_URL="${PIPELINE_HAB_BLDR_URL}"
channel=$(get_release_channel)
echo "--- Channel: $channel - bldr url: $HAB_BLDR_URL"
macos_install_bootstrap_package
declare -g hab_binary
curlbash_hab "$BUILD_PKG_TARGET"
import_keys
macos_use_cert_file_from_linux_cacerts_package
macos_sync_cache_signing_keys
install_rustup
# set the rust toolchain
rust_toolchain="$(cat rust-toolchain)"
echo "--- :rust: Using Rust toolchain ${rust_toolchain}"
rustc --version # just 'cause I'm paranoid and I want to double check
echo "--- :habicat: Building components/hab"
HAB_BLDR_CHANNEL="${channel}" macos_build components/hab
source results/last_build.env
echo "--- :habicat: Uploading ${pkg_ident:?} to ${HAB_BLDR_URL} in the '${channel}' channel"
${hab_binary} pkg upload \
--channel="${channel}" \
--auth="${HAB_AUTH_TOKEN}" \
--no-build \
"results/${pkg_artifact:?}"
echo "<br>* ${pkg_ident} (${BUILD_PKG_TARGET})" | buildkite-agent annotate --append --context "release-manifest"
set_target_metadata "${pkg_ident}" "${pkg_target}"
| true |
349108ce20ab86b120e86ba7fe984e452862bfe4 | Shell | vmalguy/pedagogic_ctf | /init.sh | UTF-8 | 2,179 | 3.0625 | 3 | [] | no_license | #!/bin/bash
userdel ctf_interne
groupdel ctf_interne
useradd ctf_interne
mkdir /home/ctf_interne && chown ctf_interne:ctf_interne /home/ctf_interne -R
rm -rf /srv/ctf_go && mkdir /srv/ctf_go
export GOPATH=`pwd`
export PATH=$PATH:${GOROOT}/bin:${GOPATH}/bin
echo "Fetching golang requirements.."
go get ctf/main
cp . -R /srv/ctf_go
rm -rf /srv/writable && mkdir /srv/writable && chmod 733 /srv/writable
chmod 733 /tmp
cp /srv/ctf_go/src/ctf/utils/config.json.example /srv/ctf_go/src/ctf/utils/config.json
touch /srv/ctf_go/database.db
chown ctf_interne /srv/ctf_go -R
chmod o-rwx /srv/ctf_go -R
chmod o+rx /srv/ctf_go/
chmod o+rx /srv/ctf_go/challs/
chown :www-data /srv/ctf_go/frontend-angular/ -R
# Build app that check if user has well corrected the script
gcc /srv/ctf_go/check_challenge_corrected.c -o /srv/ctf_go/check_challenge_corrected
chown root:ctf_interne /srv/ctf_go/check_challenge_corrected
chmod 4750 /srv/ctf_go/check_challenge_corrected
chown root:root /srv/ctf_go/check_challenge_corrected.py
chmod 500 /srv/ctf_go/check_challenge_corrected.py
# Init challenges
for chall_name in `ls challs|grep dir|sed "s/.dir$//"`
do
userdel $chall_name
groupdel $chall_name
printf "thesecret" > /srv/ctf_go/challs/${chall_name}.dir/secret
(cd /srv/ctf_go/ && ./load_challenges.py $chall_name)
done
# Selenium based challs specific
# TODO: add to init challenges
cd /usr/local/bin
wget "https://github.com/mozilla/geckodriver/releases/download/v0.15.0/geckodriver-v0.15.0-linux64.tar.gz"
tar xvzf geckodriver-v0.15.0-linux64.tar.gz
chmod +x geckodriver
chown root:stored_xss /srv/ctf_go/challs/stored_xss.dir/victim_browser.py
chmod +x /srv/ctf_go/challs/stored_xss.dir/victim_browser.py
touch /tmp/api.log
chmod 666 /tmp/api.log
touch /srv/ctf_go/challs/data_exposure.dir/key
chown root:data_exposure /srv/ctf_go/challs/data_exposure.dir/key
chown ctf_interne /srv/ctf_go/challenges.json
# configure nginx
cp /srv/ctf_go/nginx.conf /etc/nginx/sites-available/pedagogictf
ln -s /etc/nginx/sites-available/pedagogictf /etc/nginx/sites-enabled/
rm /etc/nginx/sites-enabled/default
service nginx restart
echo
echo "Check src/ctf/utils/config.json !"
| true |
a0c489ce642c15fe52829e0a468aa6b1e037d175 | Shell | dkns/dotfiles | /bin/i3_volume | UTF-8 | 326 | 3.03125 | 3 | [] | no_license | #!/bin/bash
set -euo pipefail
pactl_version=$(pactl --version | grep pactl | awk -F ' ' '{print $2}' | awk -F '.' '{print $1}')
sink=$(pactl list short sinks | grep RUNNING | cut -f1)
if [ $pactl_version=8 ]; then
pactl set-sink-volume $sink $0
fi
if [ $pactl_version=4 ]; then
pactl set-sink-volume $sink -- $0
fi
| true |
ed95b29ef20bb75a2d2015e24aebaee2cd1a56bc | Shell | xeraa/mongodb-monitoring | /elastic-stack/templates/filebeat-restart.sh | UTF-8 | 138 | 2.8125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
sleep 30
FILEBEAT_STATUS=$(service filebeat status)
if [[ ${FILEBEAT_STATUS} != 0 ]]
then
service filebeat start
fi
| true |
cac7fc6659d569ddd1dbcc3beb04b19cff3da948 | Shell | GregTheMadMonk/dotfiles | /home_scripts/scripts/set-brightness.sh | UTF-8 | 278 | 3.265625 | 3 | [] | no_license | #!/bin/sh
BL_DIR=/sys/class/backlight/intel_backlight
read NEW
MAX=$(cat $BL_DIR/max_brightness)
NOW=$(cat $BL_DIR/brightness)
NVAL=$(($NOW + $NEW * $MAX / 100))
if [ $NVAL -le 0 ]; then
NVAL=0
fi
if [ $NVAL -ge $MAX ]; then
NVAL=$MAX
fi
echo $NVAL > $BL_DIR/brightness
| true |
77886c1ef6c2d23351a22087a7bf2391588b6077 | Shell | craig-m/rpi_cluster | /ansible/roles/rpilog/files/test-rpilog.sh | UTF-8 | 565 | 3.609375 | 4 | [] | no_license | #!/bin/bash
# check rpilog ansible role has setup the host properly.
rpilogit () {
echo -e "rpicluster: $script_name $1 \n";
logger -t rpicluster "$script_name $1";
}
scriptname=$(basename -- "$1")
hostname=$(hostname)
test_token=$(uuidgen)
rpilogit "${scriptname} checking rpilog on ${hostname}";
rpilogit "${scriptname} looking for token: ${test_token}";
grep --silent "${test_token}" /var/log/rpicluster.log
if [ $? -eq 0 ]; then
rpilogit "${scriptname} test passed"
true
exit
else
rpilogit "${scriptname} ERROR test failed"
exit 1
fi
| true |
2ba66eb5261569bd54f518aa2aa0f15aa19b6fa9 | Shell | andreas-mausch/archive | /archive.sh | UTF-8 | 1,952 | 4.25 | 4 | [] | no_license | #!/usr/bin/env bash
set -e
if [ "$#" -ne 2 ]; then
echo "Usage: ${0} ./source/ ./output/"
exit 2
fi
if ! [ -x "$(command -v ffmpeg)" ]; then
echo "Error: ffmpeg is not installed." >&2
exit 1
fi
if ! [ -x "$(command -v magick)" ]; then
echo "Error: magick is not installed." >&2
exit 1
fi
ARCHIVE_SCRIPT_DIRECTORY="${PWD}"
SOURCE_FOLDER=$(realpath "$1")
TARGET_FOLDER=$(realpath "$2")
echo "Source folder: ${SOURCE_FOLDER}"
echo "Target folder: ${TARGET_FOLDER}"
echo
function convertFile {
SOURCE_FILENAME="${SOURCE_FOLDER}/${1}"
TARGET_FILENAME="${TARGET_FOLDER}/${1%.*}.${3}"
TARGET_DIRNAME=`dirname "${1}"`
TARGET_FILENAME_FOLDER="${TARGET_FOLDER}/${TARGET_DIRNAME}"
mkdir -p "${TARGET_FILENAME_FOLDER}"
if [ ! -f "${TARGET_FILENAME}" ]; then
echo "Archiving ${SOURCE_FILENAME}"
${ARCHIVE_SCRIPT_DIRECTORY}/${2} "${SOURCE_FILENAME}" "${TARGET_FILENAME}"
touch -r "${SOURCE_FILENAME}" "${TARGET_FILENAME}"
else
echo "Skipping (already exists): ${SOURCE_FILENAME}"
fi
}
function copyFile {
LOWERCASE_FILENAME="${0,,}"
if [[ "${LOWERCASE_FILENAME}" =~ ^.*\.(bmp|jpg|jpeg|png|gif)$ ]]; then
convertFile "$0" ./image.sh heic
elif [[ "${LOWERCASE_FILENAME}" =~ ^.*\.(mp3|m4a|opus|wav)$ ]]; then
convertFile "$0" ./audio.sh opus
elif [[ "${LOWERCASE_FILENAME}" =~ ^.*\.(avi|mp4|mpg|mpeg|mov)$ ]]; then
# Already handled before
:
elif [[ "${LOWERCASE_FILENAME}" =~ ^.*\.(txt|html|mhtml|xhtml|pdf|doc|docx|xls|xlsx|odt|ods)$ ]]; then
convertFile "$0" ./copy.sh "${0##*.}"
else
echo "File not copied (unknown type): ${0}"
fi
}
export ARCHIVE_SCRIPT_DIRECTORY
export SOURCE_FOLDER
export TARGET_FOLDER
export -f convertFile
export -f copyFile
cd "${SOURCE_FOLDER}"
find . -iregex ".*\.\(avi\|mp4\|mpg\|mpeg\|mov\)$" -print0 | xargs --no-run-if-empty -0 -n1 bash -c 'convertFile "$0" ./video.sh mp4'
find . -type f -print0 | xargs -0 -n1 -P 8 bash -c 'copyFile "$0"'
| true |
aec211781b50584bf3eb646f9a41dbd512331ae3 | Shell | maoshuai/simpleLog4sh | /examples/duplicate_stdout_stderr_to_log_file/test.sh | UTF-8 | 635 | 2.765625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#-------------
# Example
# In this example, any stdout or stderr will duplicate to a log file
# It is helpful when you want to collect output of other commands
#-------------
. ../../src/simpleLog4sh.source ./simpleLog4sh.cfg
# stdout of this command will be also collected in log file: /tmp/simpleLog4sh/${date}/simpleLog4sh_root.out
uptime
# stderr of this command will be also collected in log file: /tmp/simpleLog4sh/${date}/simpleLog4sh_root.err
not_fund_command
# logEcho logEchoError is also duplicated
logEcho "logEcho something"
# logEcho logEchoError is also duplicated
logEchoError "logEchoError something" | true |
0b4867abeb1fb9c1dcb5389f1d86dcd576282e99 | Shell | urban-1/git-puller | /puller-config.sh.sample | UTF-8 | 633 | 2.875 | 3 | [] | no_license | #!/bin/bash
#
# General configuration for puller utility:
#
# Mailer
# MAILER=/usr/sbin/sendmail
MAILER=(/usr/sbin/sendmail "-C" ~/.ssmtp.conf)
# Which "from" name/address to use whens sending out emails
MAIL_FROM="git-puller <Do.Not.Reply@gitpuller.com>"
# False to stop sending merge emails
SEND_MERGE_MAILS=1
# False to stop sending error/warning mails
SEND_ERROR_MAILS=1
# The team maintaining this script to receive usage/success/failure reports
# This will be used in CC if defined
DEV_TEAM_MAIL=""
# How to log date?
DATE_FORMAT="%d/%m/%Y %H:%M:%S"
# Default logging level
LOG_LEVEL=10
# Key to use when cloning
KEY_FILE=~/.ssh/id_rsa
| true |
9afe4c251afa73126072c41ffb1f4adafdfceb2c | Shell | Xvaria/holberton-system_engineering-devops | /0x08-networking_basics_2/1-show_attached_IPs | UTF-8 | 142 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env bash
#displays all active IPv4 IPs on the machine its executed on
ip -4 addr | grep "inet" | awk '{print $2}' | cut -d "/" -f 1 | true |
60b21e57cc9baf8f7881dc38a78ee264c50ca12d | Shell | NGenetzky/shlib | /tool/etckeeper/etckeeper_here.bash | UTF-8 | 349 | 3.65625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
SCRIPTDIR="$(CDPATH='' cd -- "$(dirname -- "$0")" && pwd -P)"
etckeeper_here(){
local command
command="${1?}"
shift
etckeeper "${command}" \
-d "${SCRIPTDIR}" \
"$*"
}
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
# Bash Strict Mode
set -eu -o pipefail
# set -x
etckeeper_here "$@"
fi
| true |
5a81d66879b57cb74ba2fd2c6d9b0b00d9be5298 | Shell | gdgly/bxchongdian | /buildTools/ProtocolBuffer/build.sh | UTF-8 | 712 | 3.140625 | 3 | [] | no_license | #!/bin/bash
SRC_DIR=./
DST_DIR=./gen
rm -R -f $DST_DIR
#android
mkdir -p $DST_DIR/java
protoc -I=$SRC_DIR --java_out=$DST_DIR/java/ $SRC_DIR/*.proto
#objctc
mkdir -p $DST_DIR/objectc
protoc -I=$SRC_DIR --objc_out=$DST_DIR/objectc/ $SRC_DIR/*.proto
#/charging/model/src/main/java/com/sojoline/model/protobufdata
APP_PKG=com/sojoline/model/protobufdata
APP_DIR=../../model/src/main/java/$APP_PKG
if ((!$?)); then
echo 'Compile success...'
echo 'Copy file to Android proto data file'
rm -f $APP_DIR/*.java
cp -f ./gen/java/$APP_PKG/* $APP_DIR/
# echo 'Copy file to IOS protp data file'
# rm -f ../iOS/SKDMC_IOS/SKDMC_IOS/ProtoBufData/*
# cp -r ./gen/objectc/* ../iOS/SKDMC_IOS/SKDMC_IOS/ProtoBufData/
fi
| true |
150c43ba0678a843bfeea377647edd3b4eaa958d | Shell | rajuprade/ALl_MCM | /start_deviceclient.sh | UTF-8 | 1,079 | 2.90625 | 3 | [] | no_license | #! /bin/bash
cd /home/lmcuser/ALL_MCM/
./stop_deviceclient.sh
sleep 2
id=`ps aux | grep mcmtest | grep -v grep | awk '{ print $2 }'`;
sudo kill -9 $id
sleep 2
cd /home/lmcuser/ALL_MCM/MCM
sudo ./mcmtest >> /dev/null 2>&1 &
if [ $? -eq 0 ]
then
echo "USB BASED MCMTEST PROGRAM ${HOSTNAME} started successfully"
else
echo "### ERROR USB BASED MCMTEST PROGRAM not started"
fi
sleep 2
cd /home/lmcuser/ALL_MCM/fecb
./deviceClientfecb >> /dev/null 2>&1 &
if [ $? -eq 0 ]
then
echo "FIFO BASED FECB Client ${HOSTNAME} started successfully"
else
echo "### ERROR FIFO BASED FECB Client not started"
fi
sleep 2
cd /home/lmcuser/ALL_MCM/FPS_MCM
./deviceClientfps >> /dev/null 2>&1 &
if [ $? -eq 0 ]
then
echo "FIFO BASED FPS Client ${HOSTNAME} started successfully"
else
echo "### ERROR FIFO BASED FPS Client not started"
fi
sleep 2
cd /home/lmcuser/ALL_MCM/IFLO
./deviceClientiflo >> /dev/null 2>&1 &
if [ $? -eq 0 ]
then
echo "FIFO BASED IFLO Client ${HOSTNAME} started successfully"
else
echo "### ERROR FIFO BASED IFLO Client not started"
fi
| true |
1a17bce35148c7c020e116332b5f6c9e586fb80f | Shell | hanifann/dotfiles | /.config/polybar/modules/time.sh | UTF-8 | 344 | 3.484375 | 3 | [] | no_license | #!/bin/sh
# So this script will make the clock work in 12-hour format
HOUR=$(date +%H)
MIN=$(date +%M)
ELEVEN=11
TWELVE=12
ICON=
if [ $HOUR -gt $ELEVEN ];then
if [ $HOUR -eq $TWELVE ];
then
NEW_HOUR=$HOUR
else
NEW_HOUR=$(($HOUR-$TWELVE))
fi
echo "$NEW_HOUR:$MIN P.M."
else
echo "$HOUR:$MIN A.M."
fi
| true |
b84eaa73d2a1fb64a5b0365452d7edb4c030674a | Shell | mic90/lede-packages | /yun-scripts/files/etc/init.d/avahi-update-config | UTF-8 | 317 | 3.125 | 3 | [] | no_license | #!/bin/sh /etc/rc.common
# Update /etc/avahi/services/arduino.service to announce
# correct hardware flavor
START=60
start() {
. /lib/ar71xx.sh
ar71xx_board_detect
case $AR71XX_BOARD_NAME in
arduino-yun-shield)
sed -i 's/board=yun/description=Yún Shield/g' /etc/avahi/services/arduino.service
;;
esac
}
| true |
b5f2945b0df661b24ee2073722b2864e9283b4bb | Shell | acuas/sin | /client.sh | UTF-8 | 679 | 3.78125 | 4 | [] | no_license | #!/usr/bin/env bash
# Examples:
# sin hello.txt world.py # paste files.
# echo Hello world. | sin # read from STDIN.
# sin # Paste in terminal.
sin() {
local sin_HOST=http://localhost:8081
[ -t 0 ] && {
[ $# -gt 0 ] && {
for filename in "$@"
do
if [ -f "$filename" ]
then
curl -F f:1=@"$filename" $sin_HOST
else
echo "file '$filename' does not exist!"
fi
done
return
}
echo "^C to cancel, ^D to send."
}
curl -F f:1='<-' $sin_HOST
}
sin $*
| true |
7be461e8f31e47b9662d82077dc12a6a7e9915f3 | Shell | xuan103/SRE_Class | /Class_ppt/1119.sh/coco.kuo_adduser/user_add_all2/chmodsudos.sh | UTF-8 | 517 | 3.484375 | 3 | [] | no_license | #!/bin/bash
check=$(sudo -l | head -n 1 | fmt -u | cut -d' ' -f1)
if [ $check != 'Matching' ]
then
echo 'You Do Not A Root!'
else
read -p "Enter head name To Chmod: " ans
read -p "Enter Start value To Chmod: " start
read -p "Enter End Value To Chmod: " end
for ((no=${start};no<=${end};no=no+1))
do
account=$(cat /etc/passwd | cut -d ':' -f1 | grep -x $ans$no)
if [ ! -n "$account" ]
then
echo "$ans$no Account Doesn't Exist"
else
sudo usermod -aG sudo $ans$no
echo "$ans$no Chmod!"
fi
done
fi
| true |
b3f8a970ad39e00af015499d2c41615c0f013a23 | Shell | circuithub/ch-hs-imports | /nix/pkgs/update-to-nixpkgs-channels.sh | UTF-8 | 814 | 3.75 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env nix-shell
#!nix-shell -i bash --pure -p nix gnused gawk git cacert
# update fetch.nix to be the same revision as the specified nixos channel's revision
if [ -z "$1" ]; then
echo "Please specify the nixos channel to update to"
exit 1
fi
echo "updating to latest revision for channel $1"
# script working directory
SWD=$(dirname $(realpath "$0"))
# get the revision from the current system's nixos version
REV=$(git ls-remote https://github.com/NixOS/nixpkgs "refs/heads/$1" | awk '{print $1}')
# prefetch the revision and get its SHA
HASH=$(nix-prefetch-url --unpack https://github.com/NixOS/nixpkgs/archive/$REV.tar.gz)
# update the revision in fetch.nix
sed -i "/rev = \"/s/\".*/\"$REV\";/" $SWD/fetch.nix
# update the sha in fetch.nix
sed -i "/hash = \"/s/\".*/\"$HASH\";/" $SWD/fetch.nix
| true |
21b79e3c0094cc1999305665b90a139aa9cba8be | Shell | kagurazakayashi/CodeNotebook | /ShellScript_Linux/RedHat8安装EPEL和RPMf.sh | UTF-8 | 754 | 2.640625 | 3 | [] | no_license | # Installing EPEL Repository on RHEL 8.x
dnf install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm -y
# Install a Package from the EPEL Repository on RHEL 8
dnf --enablerepo="epel" install <package_name>
# rpmfusion
dnf -y install https://download.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
dnf localinstall --nogpgcheck https://download1.rpmfusion.org/free/el/rpmfusion-free-release-8.noarch.rpm
dnf install --nogpgcheck https://download1.rpmfusion.org/nonfree/el/rpmfusion-nonfree-release-8.noarch.rpm
# aliyun
dnf install https://mirrors.aliyun.com/rpmfusion/free/el/rpmfusion-free-release-8.noarch.rpm -y
dnf install https://mirrors.aliyun.com/rpmfusion/nonfree/el/rpmfusion-nonfree-release-8.noarch.rpm -y | true |
fdbb7f923e38cbe00b6730f0aa484c94de2573f5 | Shell | lucapaga/kubeflow-install | /gke-by-google/02.ksonnet-setup.sh | UTF-8 | 324 | 2.875 | 3 | [] | no_license | #!/bin/bash
. 00.variables.sh
#download tar of ksonnet
wget --no-check-certificate https://github.com/ksonnet/ksonnet/releases/download/v${KSONNET_VERSION}/${KSONNET_FULL_VERSION}.tar.gz
#unpack file
tar -xvf ${KSONNET_FULL_VERSION}.tar.gz
#add ks command to path
export PATH=$PATH:$(pwd)/${KSONNET_FULL_VERSION}
cd .. | true |
a59c57d7f97fb01314b377848273fa587b69b2ef | Shell | maybe-william/AirBnB_clone_v2 | /0-setup_web_static.sh | UTF-8 | 1,343 | 3.25 | 3 | [] | no_license | #!/usr/bin/env bash
# set up web_static deployment
# install nginx if not installed
sudo apt-get update
sudo apt-get install -y nginx
sudo service nginx start
# create all directories and basic files if nonexistant
sudo mkdir -p /data/web_static/releases/test/
sudo mkdir -p /data/web_static/shared
sudo touch /data/web_static/releases/test/index.html
echo "(web_static deployment placeholder)" | sudo tee /data/web_static/releases/test/index.html
# recreate current symlink
sudo rm /data/web_static/current
sudo ln -s /data/web_static/releases/test/ /data/web_static/current
# change ownership
sudo chown -R ubuntu:ubuntu /data
# set the nginx config file with an alias
echo $'server {\n\tadd_header X-Served-By '"$HOSTNAME"$';\n\tlisten 80 default_server;\n\tlisten [::]:80 default_server;\n\troot /var/www/html;\n\tindex index.html index.htm index.nginx-debian.html;\n\tserver_name _;\n\terror_page 404 /404.html;\n\tlocation /hbnb_static {\n\t\talias /data/web_static/current;\n\t}\n\tlocation /404.html {\n\t\troot /var/www/err/html;\n\t\tinternal;\n\t}\n\tlocation /redirect_me {\n\t\treturn 301 https://www.youtube.com/watch?v=QH2-TGUlwu4;\n\t}\n\tlocation / {\n\t\ttry_files $uri $uri/ =404;\n\t}\n}\n' | sudo tee /etc/nginx/sites-available/default
# reload and restart nginx
sudo service nginx reload
sudo service nginx restart
| true |
547a35ec15d8641ada312eb74186d1c4aa27b8d6 | Shell | HackerDom/ructfe-2019 | /ansible/roles/cloud_node/files/scripts/attach_vm_gui.sh | UTF-8 | 331 | 3.421875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
TEAM=${1?Syntax: ./attach_vm_gui.sh <team_id> [fix]}
FIX=${2}
if ! [[ $TEAM =~ ^[0-9]+$ ]]; then
echo "team number validation error"
exit 1
fi
vm="test_team${TEAM}"
# fix keyboard layout
if [ "$FIX" == fix ]; then
echo fixing
setxkbmap us -print | xkbcomp - $DISPLAY
fi
VirtualBox --startvm "$vm" --separate
| true |
98f5a799101ba314f17d8e9f41039ce69d7d5db4 | Shell | paalth/kless | /build-tools/build.sh | UTF-8 | 780 | 3.28125 | 3 | [] | no_license | #!/bin/bash
set -o errexit
set -o pipefail
KLESS_ROOT=$(dirname "${BASH_SOURCE}")/..
IMGNAME=klessv1/klessserver
TAG=$IMGNAME:$BUILD_ID
if [[ ! -z "$KLESS_DEST_REGISTRY" ]]; then
TAG=$KLESS_DEST_REGISTRY/$TAG
fi
echo "Building image with tag $TAG"
echo $KLESS_ROOT
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "-s" -a -installsuffix cgo -o klessserver cmd/klessserver/klessserver.go
mv klessserver cmd/klessserver
cd cmd/klessserver
echo "Logging into docker registry $KLESS_DEST_REGISTRY"
sudo docker login -u $KLESS_DEST_REGISTRY_USERNAME -p "$KLESS_DEST_REGISTRY_PASSWORD" $KLESS_DEST_REGISTRY
sudo docker build -f Dockerfile --build-arg KLESS_VERSION=0.0.1 --build-arg KLESS_MAINTAINER=paal@thorstensen.org -t $TAG .
sudo docker push $TAG
rm klessserver
cd ../..
| true |
4519d05db46ead6187bfaafdde977bd6687f133c | Shell | Sarath-Molathoti/Bash-Scripting | /until.sh | UTF-8 | 96 | 3.21875 | 3 | [] | no_license | ######UNTIL LOOP
#! /bin/bash
n=1
until [ $n -ge 10 ]
do
echo "$n"
n=$(( $n + 1))
done
| true |
36aa3a41844ccb60e2443d51e46edad4bde3ff24 | Shell | ldo/blender_pythify | /pythify_mesh | UTF-8 | 7,471 | 3.515625 | 4 | [] | no_license | #!/bin/bash
#+
# This script runs Blender in batch mode on the specified .blend file,
# and extracts the geometry of a specified mesh object in Python form,
# such that it can be used, for example with the from_py_data API call
# to recreate the mesh geometry. Invoke it as follows:
#
# pythify_mesh [options...] «blendfile»
#
# where «blendfile» is the .blend file from which to extract the mesh.
# The extracted data is written to standard output. Valid options are
# as follows:
#
# --blender=blender
# specifies the path to the Blender executable. Defaults to
# searching for the name “blender” in your PATH.
# --mesh=«meshname»
# specifies the name of the mesh datablock to extract.
# Cannot be specified together with --object.
# --noweights
# indicates that vertex group memberships are to be specified
# as a set (omitting the weights), rather than as a dict
# mapping vertices to their assigned weights.
# --object=«objectname»
# specifies the name of the object to extract. It must be
# of mesh type.
# Cannot be specified together with --mesh.
#
# Note Blender attaches vertex group information to the object datablock,
# not the mesh datablock, so it cannot be extracted if --mesh is specified.
# If neither --mesh nor --object is specified, then the mesh is extracted
# from the active object in the active scene in the .blend file.
#-
opterror()
{
echo "$0: $1" 1>&2
exit 3
} # opterror
blender=blender
do_obj=
do_mesh=
do_weights=1
for ((;;)); do
if [ "${1:0:2}" != "--" ]; then
break
fi
if [ "$1" == "--" ]; then
shift
break
fi
opt="${1:2:${#1}}"
shift
val="${opt#*=}"
opt="${opt%%=*}"
if [ "$opt" = "blender" ]; then
blender="$val"
elif [ "$opt" = "mesh" ]; then
do_mesh="$val"
elif [ "$opt" = "noweights" ]; then
do_weights=
elif [ "$opt" = "object" ]; then
do_obj="$val"
else
opterror "bad option $opt"
fi
done
if [ -z "$(type -p "$blender")" ]; then
opterror "no such executable “$blender”"
fi
if [ $# != 1 ]; then
opterror $'Usage:\n\t'"$0 "$'<blendfile>'
fi
blendfile="$1"
export RENDER_blendfile="$blendfile"
export RENDER_do_obj="$do_obj"
export RENDER_do_mesh="$do_mesh"
export RENDER_do_weights="$do_weights"
exec "$blender" -noaudio 5>&1 1>/dev/null -b -P <(cat <<'EOD'
import sys
import os
import getopt
import bpy
try :
os.wait() # gobble zombie child of shell which was previously in this process slot
except ChildProcessError :
# can happen intermittently?
pass
#end try
out = os.fdopen(5, "w")
# use a different fd from stdout, only way it seems to avoid
# output being polluted by Blender’s messages
#+
# Mainline
#-
blendfile = os.getenv("RENDER_blendfile")
do_obj = os.getenv("RENDER_do_obj", "")
do_mesh = os.getenv("RENDER_do_mesh", "")
do_weights = os.getenv("RENDER_do_weights", "")
nr_digits = 7
bpy.ops.wm.open_mainfile(filepath = blendfile)
if do_obj != "" and do_mesh != "" :
raise getopt.GetoptError("specify at most one of --object or --mesh")
#end if
if do_obj == "" and do_mesh == "" :
the_obj = bpy.context.scene.objects.active
if the_obj == None :
raise getopt.GetoptError("no object specified, and no active object")
#end if
elif do_obj != "" :
the_obj = bpy.data.objects.get(do_obj)
if the_obj == None :
raise getopt.GetoptError("no such object “%s”" % do_obj)
#end if
else :
the_obj = None
#end if
if the_obj != None :
the_mesh = the_obj.data
if type(the_mesh) != bpy.types.Mesh :
raise getopt.GetoptError("object “%s(%s)” is not a mesh" % (the_obj.name, the_obj.type))
#end if
elif do_mesh != "" :
the_mesh = bpy.data.meshes.get(do_mesh)
if the_mesh == None :
raise getopt.GetoptError("no such mesh “%s”" % do_mesh)
#end if
else :
assert False
#end if
num_format = "%%.%dg" % nr_digits
out.write \
(
"vertices = \\\n"
" [\n"
)
verts_format = "(" + ", ".join([num_format] * 3) + ")"
for v in the_mesh.vertices :
out.write(" " + verts_format % tuple(v.co) + ",\n")
#end for
out.write \
(
" ]\n"
"\n"
)
if len(the_mesh.vertices) != 0 :
out.write \
(
"bounds = \\\n"
" (\n"
)
for axis in range(3) :
out.write \
(
" (%s, %s),\n"
%
(num_format, num_format)
%
(
min(v.co[axis] for v in the_mesh.vertices),
max(v.co[axis] for v in the_mesh.vertices)
)
)
#end for
out.write \
(
" )\n"
"\n"
)
#end if
out.write \
(
"faces = \\\n"
" [\n"
)
for f in the_mesh.polygons :
out.write \
(
" [" + ", ".join("%d" % i for i in f.vertices) + "],\n"
)
#end for
out.write \
(
" ]\n"
)
out.write("smooth_faces = [")
first = True
for i, f in enumerate(the_mesh.polygons) :
if f.use_smooth :
if first :
first = False
else :
out.write(", ")
#end if
out.write("%d" % i)
#end if
#end for
out.write("]\n")
if the_obj != None and len(the_obj.vertex_groups) != 0 :
group_indices = dict \
(
(the_obj.vertex_groups[i].name, i)
for i in range(len(the_obj.vertex_groups))
)
groups = {}
for v in the_mesh.vertices :
for vg in v.groups :
if vg.group in groups :
group_entry = groups[vg.group]
else :
group_entry = {}
groups[vg.group] = group_entry
#end if
group_entry[v.index] = vg.weight
#end for
#end for
out.write \
(
"vertex_groups = \\\n"
" {\n"
)
for group_name in sorted(group_indices.keys()) :
group_index = group_indices[group_name]
group_vertices = groups.get(group_index, {})
out.write(" %s :\n" % repr(group_name))
out.write(" {\n")
for v in sorted(group_vertices.keys()) :
if do_weights :
out.write(" %%d : %s,\n" % num_format % (v, group_vertices[v]))
else :
if group_vertices[v] != 0 :
out.write(" %d,\n" % v)
#end if
#end if
#end for
out.write(" },\n")
#end for
out.write \
(
" }\n"
)
#end if
materials = {}
for f in the_mesh.polygons :
if f.material_index in materials :
slot = materials[f.material_index]
else :
slot = set()
materials[f.material_index] = slot
#end if
slot.add(f.index)
#end for
if len(materials) != 0 :
out.write \
(
"face_materials = \\\n"
" {\n"
)
for slotindex in sorted(materials.keys()) :
out.write(" %d :\n" % slotindex)
out.write(" {\n")
slot = materials[slotindex]
for f in sorted(slot) :
out.write(" %d,\n" % f)
#end for
out.write(" },\n")
#end for
out.write \
(
" }\n"
)
else :
out.write \
(
"face_materials = {}\n"
)
#end if
EOD
)
| true |
f8d6b3480cdec070d0b58dbfe30f69fff5ae6289 | Shell | scwang18/docker_env | /a6test/import.sh | UTF-8 | 745 | 2.734375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -e
set -x
# mkdir -p tmp
source config.sh
# for i in "${files[@]}"; do
# zcat tmp/$i | docker load
# done
while read -r line; do
read -ra images <<<"$line"
echo "zcat ${images[1]} | docker load"
zcat tmp/${images[1]} | docker load
done <<< "$VAR"
# zcat tmp/elasticsearch.wzh.tgz | docker load
# zcat tmp/centos.wzh.tgz | docker load
# zcat tmp/hadoop.bases.tgz | docker load
# zcat tmp/hadoop.wzh.tgz | docker load
# zcat tmp/cp-kafka-connect.wzh.tgz | docker load
# zcat tmp/hue.wzh.tgz | docker load
# zcat tmp/flume.build.tgz | docker load
# zcat tmp/flume.wzh.tgz | docker load
# zcat tmp/kite.build.tgz | docker load
# zcat tmp/dbz.wzh.tgz | docker load
# zcat tmp/kudu.wzh.tgz | docker load
| true |
93ec15df550bc2de2301b4ad0c90a6782ffa3b56 | Shell | waterlink/ShellTools | /test/makeSureDirIsPresent.sh | UTF-8 | 597 | 3.0625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env ../libs/bin/bats
load ../src/makeSureDirIsPresent
ARG0=
ARG1=
ARG2=
function fakeExecute() {
ARG0="$1"
ARG1="$2"
ARG2="$3"
}
@test "it creates directory" {
makeSureDirIsPresentGeneric fakeExecute "some/dir"
[[ "${status}" -eq 0 ]]
[[ "${ARG0}" == "mkdir" ]]
[[ "${ARG1}" == "-p" ]]
[[ "${ARG2}" == "some/dir" ]]
}
@test "it creates different directory" {
makeSureDirIsPresentGeneric fakeExecute "some/other/dir"
[[ "${status}" -eq 0 ]]
[[ "${ARG0}" == "mkdir" ]]
[[ "${ARG1}" == "-p" ]]
[[ "${ARG2}" == "some/other/dir" ]]
} | true |
20feed7414a4d7b60439d7de72af9aad6f1c3410 | Shell | UnderNotic/dotfiles | /dotnet/install.sh | UTF-8 | 1,176 | 3.71875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# dotnet core install
echo ''
echo 'Installing dotnet core'
echo ''
wget https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb
sudo dpkg -i packages-microsoft-prod.deb
sudo rm packages-microsoft-prod.deb
sudo apt update
sudo apt-get install -y dotnet-sdk-3.1
echo ''
echo "Now installing azure-functions-core-tools"
echo ''
sudo apt-get install azure-functions-core-tools -y
# Setup and configure az cli
echo ''
read -p "Do you want to install Azure CLI? y/n (This will take some time...)" -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
echo ''
echo "Now installing az cli..."
echo ''
# curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
curl -sL https://packages.microsoft.com/keys/microsoft.asc |
gpg --dearmor |
sudo tee /etc/apt/trusted.gpg.d/microsoft.gpg > /dev/null
AZ_REPO=$(lsb_release -cs)
echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ $AZ_REPO main" |
sudo tee /etc/apt/sources.list.d/azure-cli.list
sudo apt-get update
sudo apt-get install azure-cli
else
echo "You chose not to install Azure CLI. Exiting now."
fi | true |
0eff05dd817ddb3a84267e2b12b93116530bda88 | Shell | ghnocchi/tools | /bin/cron_env.sh | UTF-8 | 482 | 3.0625 | 3 | [] | no_license | #!/bin/bash
# 参考
# cronジョブを作るのにいつものやり方でいいんですか?
# https://qiita.com/jmatsu/items/0a5d80abe188b09644c1
# 使いかた
# edit crontab -e
# * * * * * env > share/cron_env
# $ cron_env.sh hoge.sh
CUR=$(cd $(dirname $0); pwd)
SHERE=$(dirname $CUR)/share
# reset env
while read line; do
eval "$line"
done < <(diff <(env) $SHERE/cron_env |grep "^[><]"|grep -v " _="|sed -e 's/^< \([^=]*\)=.*/unset \1/' -e 's/^>/export/'|sort -r)
$1
| true |
30e9191cc814df755d46862f4f6011ade15c8535 | Shell | cloudfoundry/cnb2cf | /scripts/integration.sh | UTF-8 | 588 | 3.4375 | 3 | [] | no_license | #!/usr/bin/env bash
set -euo pipefail
function main() {
pushd "$( dirname "${BASH_SOURCE[0]}" )/.." > /dev/null || return
./scripts/build.sh
set +e
local exit_code
go test -timeout 0 ./integration/... -v -run Integration
exit_code="${?}"
if [[ "${exit_code}" != "0" ]]; then
echo -e "\n\033[0;31m** GO Test Failed **\033[0m"
else
echo -e "\n\033[0;32m** GO Test Succeeded **\033[0m"
fi
set -e
popd > /dev/null || return
exit $exit_code
}
main
| true |
5f93e0488edad156680fc44687bd6e1984e93f8b | Shell | evenh/git-commit-autoprefix | /hooks/commit-msg | UTF-8 | 1,945 | 3.984375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# To enable this hook, copy this file under name "commit-msg"
# to your project folder, into .git/hooks directory and chmod +x.
#
# note: if the hook fails with a non-zero status, the commit is aborted
jira_project_key="FOO|BAR|BAZ" # specify JIRA project keys here
commitMessage=$(cat $1) # $1 means input parameter - which is file with the commit message
branch="$(git rev-parse --abbrev-ref HEAD)" # current branch name
if [ -n "${branch}" ] ; then # branch name is not an empty string
issueName=`echo $branch | grep -Eo "(${jira_project_key})-[[:digit:]]+"` # project keys of your JIRA projects, parsed from GIT branch name;
if [ -n "${issueName}" ] && [[ "${issueName}" != " " ]] ; then # an issue name has been parsed
# see "project key" at https://confluence.atlassian.com/display/JIRA/Defining+a+Project#DefiningaProject-Creatingaproject
if [ -n "${commitMessage}" ] && [[ "${commitMessage}" != "$issueName"* ]] ; then # original commit message does not start with issue name yet
echo "[$issueName] $commitMessage" > $1 # rewriting original commit message with prefixed one
fi
else
# If we are not in a feature branch, fetch the issue from the commit message
issueName=`echo $commitMessage | grep -Eo "(${jira_project_key})-[[:digit:]]+"`
if [ -n "${issueName}" ] && [[ "${issueName}" != " " ]] ; then
if [ -n "${commitMessage}" ] && [[ "${commitMessage}" != "${issueName}"* ]] ; then # original commit message does not start with issue name yet
echo "$commitMessage" > $1 # rewriting original commit message with prefixed one
fi
else
echo -e >&2 "\n\033[31mREJECTED:\033[0m\t\033[1mHold on cowboy! I can't see your JIRA issue number? Allowed project keys are: ${jira_project_key}"
echo -e >&2 "\xF0\x9F\x9A\xAB\xF0\x9F\x91\xAE\t\tSpecify issue number in your commits like this: '[FOO-123] Added a bug fix'\033[0m\n"
exit 1
fi
fi
fi
| true |
c12d6f057cb266d3cadd868635206d8307b33968 | Shell | nateurope/eldk | /ppc_85xx/usr/lib/ltp/testcases/network/stress/ns-tools/initialize_if | UTF-8 | 3,382 | 3.640625 | 4 | [] | no_license | #!/bin/sh
################################################################################
## ##
## Copyright (c) International Business Machines Corp., 2005 ##
## ##
## This program is free software; you can redistribute it and#or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation; either version 2 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ##
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ##
## for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program; if not, write to the Free Software ##
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ##
## ##
## ##
################################################################################
#
# File:
# initialize_if
#
# Description:
# Initialize the interface which belongs to the specified test link
#
# Author:
# Mitsuru Chinen <mitch@jp.ibm.com>
#
# Arguments:
# $1: Set the host type (lhost - local host | rhost - remote host)
# $2: The number of the test link
#
# Exit Value:
# 0: Exit normally
# >0: Exit abnormally
#
# History:
# Oct 19 2005 - Created (Mitsuru Chinen)
#
#-----------------------------------------------------------------------
#Uncomment line below for debug output.
$trace_logic
# Make sure the value of LTPROOT
LTPROOT=${LTPROOT:-`(cd ../../../../ ; pwd)`}
export LTPROOT
# Check the environmanet variable for the test
source check_envval || exit 1
# Arguments
if [ $# -ne 2 ]; then
echo "Usage: $0 host_type link_num" >&2
exit 1
fi
host_type=$1
link_num=$2
# Check the host type
if [ $host_type != lhost -a $host_type != rhost ]; then
echo "$0: 1st argumet is lhost or rhost" >$2
exit 1
fi
# Define the interface name
ifname=`get_ifname $host_type $link_num` || exit 1
# Initialize the specified interface
command="ifconfig $ifname down mtu 1500 ; ip route flush dev $ifname ; ip addr flush dev $ifname ; ifconfig $ifname up"
if [ $host_type = lhost ]; then
( ifconfig $ifname down && \
ip link set mtu 1500 dev $ifname && \
ip route flush dev $ifname && \
ip addr flush dev $ifname && \
ifconfig $ifname up ) >/dev/null 2>&1
ret=$?
else
ret=`$LTP_RSH $RHOST '( PATH=/sbin:/usr/sbin:$PATH ; ifconfig '$ifname' down && ip link set mtu 1500 dev '$ifname' && ip route flush dev '$ifname' && ip addr flush dev '$ifname' && ifconfig '$ifname' up ) >/dev/null 2>&1 ; echo $?'`
fi
if [ $ret -gt 0 ]; then
echo "Failed to initialize $ifname" >&2
exit 1
fi
| true |
0370bd6d3f005f24b14bb018900b3c2fcf93ddf1 | Shell | guangchen/hadoop-deployer | /bin/configure_hadoop_cluster.sh | UTF-8 | 1,445 | 3.609375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if [ $# != 1 ]; then
echo "Script Usage: ./configure_hadoop_cluster.sh <path/to/node/file>"
exit -1
fi
PBS_NODEFILE=$1
NODEFILE="$HADOOP_HOME/conf/nodefile"
NAMENODE_IPC_PORT=8020
NAMENODE_IPC_PORT=`$HADOOP_HOME/contrib/hadoop-deployer/bin/find_avail_port.sh $NAMENODE_IPC_PORT`
echo "namenode IPC server port is $NAMENODE_IPC_PORT"
JOBTRACKER_IPC_PORT=`expr $NAMENODE_IPC_PORT + 1`
JOBTRACKER_IPC_PORT=`$HADOOP_HOME/contrib/hadoop-deployer/bin/find_avail_port.sh $JOBTRACKER_IPC_PORT`
echo "jobtracker IPC server port is $JOBTRACKER_IPC_PORT"
# generate node file that lists each node, one per line
sort -u $PBS_NODEFILE > $NODEFILE
# the first node is used as master node
MASTER_NODE=`head -n 1 $NODEFILE`
echo "Uses $MASTER_NODE as master node"
SLAVE_NODE_FILE="$HADOOP_HOME/conf/slaves"
rm -rf $SLAVE_NODE_FILE
NUM_WORKERS=`wc -l $NODEFILE | awk '{print $1}'`
NUM_WORKERS=`expr $NUM_WORKERS - 1`
for line in `tail -n $NUM_WORKERS $NODEFILE`;do
echo $line >> $SLAVE_NODE_FILE
done
# generate core-site.xml
sed -e 's|__hostname__|'$MASTER_NODE'|' -e 's|__port__|'$NAMENODE_IPC_PORT'|' $HADOOP_HOME/contrib/hadoop-deployer/etc/core-site-template.xml > $HADOOP_HOME/conf/core-site.xml
# generate mapred-site.xml
sed -e 's|__hostname__|'$MASTER_NODE'|' -e 's|__port__|'$JOBTRACKER_IPC_PORT'|' $HADOOP_HOME/contrib/hadoop-deployer/etc/mapred-site-template.xml > $HADOOP_HOME/conf/mapred-site.xml
| true |
5300cb848ed4f895c8c51a30576ea299e900ced4 | Shell | lattera/my3status | /modules/kver.zsh | UTF-8 | 290 | 3.5 | 4 | [] | no_license | # Copyright (c) 2015 Shawn Webb
# License: 2-clause BSD
if [ -z "${myver}" ]; then
myver=""
fi
function kver() {
if [ -z ${myver} ]; then
myver=$(uname -v | awk '{print $4;}')
myver=${myver%\(*}
fi
cat<<EOF
{
"name": "kernel_version",
"full_text": "KVER: ${myver}"
}
EOF
}
| true |
02e6546e8018545a427ebc1851b3b77fe6f65eef | Shell | danpechi/nlu-test-sets | /irt_scripts/estimate_irt_params.sh | UTF-8 | 1,382 | 2.671875 | 3 | [] | no_license | # BASE_DIR=/Users/phumon/Documents/Research/
BASE_DIR=$(pwd)
SCRIPT_DIR=${BASE_DIR}/irt_scripts
IN_DIR=${BASE_DIR}/data
OUT_DIR=${BASE_DIR}/params
SEED=101
# adjust the following parameters according to chosen setup
# here we use our best parameters used in the paper.
# supported distributions: 'lognormal', 'beta', 'normal'
DISTS=('lognormal')
ALPH_STDS=('0.3')
PARAM_STDS=('1.0')
# list of tasks to analyze
TASKS="boolq,cb,commonsenseqa,copa,cosmosqa,hellaswag,rte,snli,wic,qamr,arct,mcscript,mctaco,mutual,mutual-plus,quoref,socialiqa,squad_v2,wsc,mnli,mrqa-nq,newsqa,abductive-nli,arc-easy,arc-challenge,piqa,quail,winogrande,adversarial-nli"
# if want to use sampling instead of all examples
# not used if --no_subsample is specified
sample_size=1500
for alpha_std in "${ALPH_STDS[@]}"
do
for item_std in "${PARAM_STDS[@]}"
do
echo Alpha Std $alpha_std, Diff Guess Std $item_std
ALPHA_TRANS=identity
THETA_TRANS=identity
python \
$SCRIPT_DIR/variational_irt.py \
--response_dir $IN_DIR \
--out_dir $OUT_DIR \
--seed $SEED \
--discr 'lognormal' \
--ability 'normal' \
--discr_transform $ALPHA_TRANS \
--ability_transform $THETA_TRANS \
--datasets $TASKS \
--sample_size $sample_size \
--no_subsample \
--alpha_std $alpha_std \
--item_param_std $item_std \
--verbose
done
done
| true |
d1142da709f05ec4ee9adf67dc44fecb5cb484f9 | Shell | william0wang/drone-android | /init.sh | UTF-8 | 1,242 | 3.203125 | 3 | [] | no_license | #! /bin/bash
ver_file=/data/workspace/last_build
var=`cat $ver_file`
if [ -z $var ]; then
var=0
fi
if [ $var -lt 1 ]; then
cd /data/android-sdk-linux
axel --output=android-sdk.zip https://dl.google.com/android/repository/tools_r25.2.2-linux.zip
unzip android-sdk.zip
rm -f android-sdk.zip
chown -R root.root /data/android-sdk-linux
cd /data/android-sdk-linux
mkdir licenses
cd licenses
echo -e "\n8933bad161af4178b1185d1a37fbf41ea5269c55" > "android-sdk-license"
echo -e "\n84831b9409646a918e30573bab4c9c91346d8abd" > "android-sdk-preview-license"
cd /data/android-sdk-linux
echo y | android update sdk --all --no-ui --filter \
"tools-r26.0.0,platform-tools-25.0.4,build-tools-25.0.2,android-25,android-24,android-23,extra-android-m2repository,extra-google-google_play_services,extra-google-m2repository"
echo 1 >$ver_file
fi
if [ $var -lt 9 ]; then
echo y | android update sdk --all --no-ui --filter "tools-r26.0.0,platform-tools-25.0.4"
cd /data/workspace
git clone https://github.com/william0wang/initapp.git
chmod 755 initapp/gradlew
cd initapp && ./gradlew assembleRelease
cd /data/workspace
rm -rf initapp
echo 9 >$ver_file
fi
| true |
49bb03697354ee4f850032d569b6d2b7c8945a69 | Shell | anisotropi4/goldfinch | /ogrrailway/bin/creategeoindex.sh | UTF-8 | 541 | 2.796875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
#NOTE only creates a GeoIndex on the ['lat', 'lon'] fields
. ar-env.sh
COLLECTION=`echo $@`
if [ $# = 0 ]; then
COLLECTION=`cat -`
fi
${NODE} <<- @EOF
arangojs = require('arangojs');
db = new arangojs.Database({url: 'http://${ARSVR}:8529'});
db.useBasicAuth("${ARUSR}", "${ARPWD}");
db.useDatabase("${ARDBN}");
db.collection('${COLLECTION}').createGeoIndex(['lat','lon'])
.then(index => console.log("create geoindex ${COLLECTION}: ",index.id, index.fields),
err => console.error("No GeoIndex ${COLLECITON}"));
@EOF
| true |
7d0f47b86513058a2ab0713f3b2d73b7e220a703 | Shell | akegiraldo/Miscellaneous | /mysql_5.7_setup | UTF-8 | 1,222 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env bash
#Script that delete any current version of mysql and its data and install mysql_5.7
#Write by: Kevin Giraldo
service mysql stop
killall -KILL mysql mysqld_safe mysqld
apt-get purge mysql-server mysql-client
apt-get autoremove --purge
sudo apt-get remove --purge *mysql\*
sudo apt-get autoremove
deluser --remove-home mysql
delgroup mysql
rm -rf /etc/apparmor.d/abstractions/mysql /etc/apparmor.d/cache/usr.sbin.mysqld /etc/mysql /var/lib/mysql /var/log/mysql* /var/log/upstart/mysql.log* /var/run/mysqld
rm ~/.mysql_history
sudo apt-get autoclean
mkdir mysql_setup
wget https://dev.mysql.com/get/Downloads/MySQL-5.7/mysql-server_5.7.28-1ubuntu19.04_amd64.deb-bundle.tar
mv mysql-server_5.7.28-1ubuntu19.04_amd64.deb-bundle.tar mysql_setup/
cd mysql_setup/
tar -xvf mysql-server_5.7.28-1ubuntu19.04_amd64.deb-bundle.tar
rm -rf mysql-testsuite_5.7.28-1ubuntu19.04_amd64.deb mysql-community-test_5.7.28-1ubuntu19.04_amd64.deb
dpkg -i mysql-*.deb
cd ..
rm -rf mysql_setup
apt --fix-broken install
apt-get install python3-dev
apt-get install libmysqlclient-dev
apt-get install zlib1g-dev
pip3 install mysqlclient
pip3 install SQLAlchemy==1.2.5
/etc/init.d/mysql start
/etc/init.d/mysql status
mysql --version
exit
| true |
ff6bc3b3e7d1a465a8a8f7dfd42f51331b61a205 | Shell | ravindasenarath/bash-scripts | /bin/install_dhall | UTF-8 | 6,616 | 3.625 | 4 | [] | no_license | #!/bin/bash -eu
function __install_dhall_activator() {
local self=$(readlink -f "${BASH_SOURCE[0]}")
local dir=$(dirname $self)
local activator="${1}"
local version="${2}"
[[ ! -d $(dirname "${config}") ]] && mkdir -p $(dirname "${config}")
cat <<EOD > "${config}"
#!/bin/bash
export DHALL_VERSION=${version}
export DHALL_ARCH=\${DHALL_ARCH:-x86_64-linux}
export DHALL_HOME=\${TOOLS_HOME:=\$HOME/tools}/dhall-v\${DHALL_VERSION}-\${DHALL_ARCH}/bin
export PATH=\${DHALL_HOME}:\${PATH}
EOD
chmod ugo+x "${config}"
echo "${config}"
}
function __install_dhall_compiler_binaries() {
local self=$(readlink -f "${BASH_SOURCE[0]}")
local dir=$(dirname $self)
which wget >/dev/null 2>&1 || apt+ install wget
local latest=https://github.com/dhall-lang/dhall-haskell/releases/latest
local version=$(wget -q -SO- -T 5 -t 1 "${latest}" 2>/dev/null | fgrep location: | cut -d' ' -f2 | sed -E 's|.*\/v(.*)|\1|')
local version=${version:-"1.38.0"}
local version=${1:-"${version}"}
##FIXME: local arch=${2:-"$DHALL_ARCH"}
local arch=${arch:-"x86_64-linux"}
local file=dhall-${version}-${arch}.tar.bz2
local url=https://github.com/dhall-lang/dhall-haskell/releases/download/${version}/${file}
local folder=dhall-v${version}-${arch}
local symlink=dhall
local config="${VIRTUAL_ENV:-${HOME}/.local/share/bash-scripts}"/postactivate/postactivate.d/271-dhall.sh
__install_dhall_activator "${config}" "${version}"
"${dir}"/bash_install_tar "${url}" "${file}" "${folder}" "${symlink}"
}
function __install_dhall_lsp_binaries() {
local self=$(readlink -f "${BASH_SOURCE[0]}")
local dir=$(dirname $self)
which wget >/dev/null 2>&1 || apt+ install wget
local latest=https://github.com/dhall-lang/dhall-haskell/releases/latest
local version=$(wget -q -SO- -T 5 -t 1 "${latest}" 2>/dev/null | fgrep location: | cut -d' ' -f2 | sed -E 's|.*\/v(.*)|\1|')
local version=${version:-"1.38.0"}
local version=${1:-"${version}"}
##FIXME: local vtool=${2:-"$DHALL_LSP_VERSION"}
local vtool=${vtool:-"1.0.13"}
##FIXME: local arch=${3:-"$DHALL_ARCH"}
local arch=${arch:-"x86_64-linux"}
local file=dhall-lsp-server-${vtool}-${arch}.tar.bz2
local url=https://github.com/dhall-lang/dhall-haskell/releases/download/${version}/${file}
local folder=dhall-v${version}-${arch}
local symlink=dhall
"${dir}"/bash_install_tar "${url}" "${file}" "${folder}" "${symlink}"
}
function __install_dhall_json_binaries() {
local self=$(readlink -f "${BASH_SOURCE[0]}")
local dir=$(dirname $self)
which wget >/dev/null 2>&1 || apt+ install wget
local latest=https://github.com/dhall-lang/dhall-haskell/releases/latest
local version=$(wget -q -SO- -T 5 -t 1 "${latest}" 2>/dev/null | fgrep location: | cut -d' ' -f2 | sed -E 's|.*\/v(.*)|\1|')
local version=${version:-"1.38.0"}
local version=${1:-"${version}"}
##FIXME: local vtool=${2:-"$DHALL_JSON_VERSION"}
local vtool=${vtool:-"1.7.5"}
##FIXME: local arch=${3:-"$DHALL_ARCH"}
local arch=${arch:-"x86_64-linux"}
local file=dhall-json-${vtool}-${arch}.tar.bz2
local url=https://github.com/dhall-lang/dhall-haskell/releases/download/${version}/${file}
local folder=dhall-v${version}-${arch}
local symlink=dhall
"${dir}"/bash_install_tar "${url}" "${file}" "${folder}" "${symlink}"
}
function __install_dhall_yaml_binaries() {
local self=$(readlink -f "${BASH_SOURCE[0]}")
local dir=$(dirname $self)
which wget >/dev/null 2>&1 || apt+ install wget
local latest=https://github.com/dhall-lang/dhall-haskell/releases/latest
local version=$(wget -q -SO- -T 5 -t 1 "${latest}" 2>/dev/null | fgrep location: | cut -d' ' -f2 | sed -E 's|.*\/v(.*)|\1|')
local version=${version:-"1.38.0"}
local version=${1:-"${version}"}
##FIXME: local vtool=${2:-"$DHALL_YAML_VERSION"}
local vtool=${vtool:-"1.2.5"}
##FIXME: local arch=${3:-"$DHALL_ARCH"}
local arch=${arch:-"x86_64-linux"}
local file=dhall-yaml-${vtool}-${arch}.tar.bz2
local url=https://github.com/dhall-lang/dhall-haskell/releases/download/${version}/${file}
local folder=dhall-v${version}-${arch}
local symlink=dhall
"${dir}"/bash_install_tar "${url}" "${file}" "${folder}" "${symlink}"
}
function __install_dhall_bash_binaries() {
local self=$(readlink -f "${BASH_SOURCE[0]}")
local dir=$(dirname $self)
which wget >/dev/null 2>&1 || apt+ install wget
local latest=https://github.com/dhall-lang/dhall-haskell/releases/latest
local version=$(wget -q -SO- -T 5 -t 1 "${latest}" 2>/dev/null | fgrep location: | cut -d' ' -f2 | sed -E 's|.*\/v(.*)|\1|')
local version=${version:-"1.38.0"}
local version=${1:-"${version}"}
##FIXME: local vtool=${2:-"$DHALL_BASH_VERSION"}
local vtool=${vtool:-"1.0.36"}
##FIXME: local arch=${3:-"$DHALL_ARCH"}
local arch=${arch:-"x86_64-linux"}
local file=dhall-bash-${vtool}-${arch}.tar.bz2
local url=https://github.com/dhall-lang/dhall-haskell/releases/download/${version}/${file}
local folder=dhall-v${version}-${arch}
local symlink=dhall
"${dir}"/bash_install_tar "${url}" "${file}" "${folder}" "${symlink}"
}
function __install_dhall_nix_binaries() {
local self=$(readlink -f "${BASH_SOURCE[0]}")
local dir=$(dirname $self)
which wget >/dev/null 2>&1 || apt+ install wget
local latest=https://github.com/dhall-lang/dhall-haskell/releases/latest
local version=$(wget -q -SO- -T 5 -t 1 "${latest}" 2>/dev/null | fgrep location: | cut -d' ' -f2 | sed -E 's|.*\/v(.*)|\1|')
local version=${version:-"1.38.0"}
local version=${1:-"${version}"}
##FIXME: local vtool=${2:-"$DHALL_NIX_VERSION"}
local vtool=${vtool:-"1.1.20"}
##FIXME: local arch=${3:-"$DHALL_ARCH"}
local arch=${arch:-"x86_64-linux"}
local file=dhall-nix-${vtool}-${arch}.tar.bz2
local url=https://github.com/dhall-lang/dhall-haskell/releases/download/${version}/${file}
local folder=dhall-v${version}-${arch}
local symlink=dhall
"${dir}"/bash_install_tar "${url}" "${file}" "${folder}" "${symlink}"
}
function __install_dhall() {
__install_dhall_compiler_binaries "$@" && \
__install_dhall_lsp_binaries "$@" && \
__install_dhall_json_binaries "$@" && \
__install_dhall_yaml_binaries "$@" && \
__install_dhall_bash_binaries "$@" && \
__install_dhall_nix_binaries "$@"
}
if [ $_ != $0 ]; then
# echo "Script is being sourced: list all functions"
grep -E "^function __" $(readlink -f "${BASH_SOURCE[0]}") | cut -d' ' -f2 | sed 's/()//'
else
# echo "Script is a subshell: execute last function"
$(grep -E "^function __" $(readlink -f "${BASH_SOURCE[0]}") | cut -d' ' -f2 | sed 's/()//' | tail -1) "$@"
fi
| true |
e34ce24e84062124ee311849a2bb131e8d621793 | Shell | afrovita/OpenWrt-Rpi | /files/bin/clashcs | UTF-8 | 4,629 | 3.671875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#--------------------------------------------------------
# Don't remove this section for credits
# Don't rename this file
#--------------------------------------------------------
# This script function is to switch between clash_core
# and clash_premium_core
# by Helmi Amirudin a.k.a helmiau
# my profile page https://wwww.helmiau.com
# my github https://github.com/helmiau
#--------------------------------------------------------
CORE="/etc/openclash/core/clash"
ORI="/etc/openclash/core/clash_original"
PREM="/etc/openclash/core/clash_premium"
ARCH=$(grep core_version /etc/config/openclash | awk -F"'" '$0=$2')
#ORILINK=$(curl -sL http://api.github.com/repos/Dreamacro/clash/releases/tags/premium | grep /clash-linux-$1 | sed 's/.*url\": \"//g' | sed 's/\"//g')
#PREMLINK=$(curl -sL http://api.github.com/repos/vernesong/OpenClash/releases/tags/Clash | grep /clash-linux-$1 | sed 's/.*url\": \"//g' | sed 's/\"//g')
ORIEXT=".tar.gz -o /tmp/clash.tar.gz"
PREMEXT=".gz -o /tmp/clash.gz"
echo -e "\033[1;32m"
echo "==============================="
echo " Clash Core Switcher Script "
echo " Created by helmiau "
echo "==============================="
echo -e " Architecture : $ARCH"
function helmi_clash_cemod() {
chmod +x /etc/openclash/core/clash*
}
function helmi_clash_start() {
/etc/init.d/openclash start
uci set openclash.config.enable=1
uci commit openclash
}
function helmi_clash_stop() {
/etc/init.d/openclash stop
uci set openclash.config.enable=0
uci commit openclash
}
echo "==============================="
if [[ -f $CORE ]] && [[ -f $PREM ]];then
echo " Current Clash Core : Original"
echo " Switch to Premium Core? (y/n)"
echo "==============================="
echo -n " "
read -r jawab
if [[ $jawab == "y" ]]; then
helmi_clash_stop \ &&
mv /etc/openclash/core/clash /etc/openclash/core/clash_original
mv /etc/openclash/core/clash_premium /etc/openclash/core/clash
helmi_clash_cemod \ &&
helmi_clash_start
else
exit
fi
elif [[ -f $CORE ]] && [[ -f $ORI ]];then
echo " Current Clash Core : Premium"
echo " Switch to Original Core? (y/n)"
echo "==============================="
echo -n " "
read -r jawab
if [[ $jawab == "y" ]]; then
helmi_clash_stop \ &&
mv /etc/openclash/core/clash /etc/openclash/core/clash_premium
mv /etc/openclash/core/clash_original /etc/openclash/core/clash
helmi_clash_cemod \ &&
helmi_clash_start
else
exit
fi
elif [[ -f $CORE ]] && [[ ! -f $PREM ]];then
echo " Current Clash Core = Unknown"
echo " Download Premium Core Now? (y/n)"
echo "==============================="
echo -n " "
read -r jawab
if [[ $jawab == "y" ]]; then
helmi_clash_stop \ &&
wget -qO- $(curl -sL http://api.github.com/repos/Dreamacro/clash/releases/tags/premium | grep /clash-$ARCH | sed 's/.*url\": \"//g' | sed 's/\"//g') | gunzip -c > /etc/openclash/core/clash
wget -qO- $(curl -sL http://api.github.com/repos/vernesong/OpenClash/releases/tags/Clash | grep /clash-$ARCH | sed 's/.*url\": \"//g' | sed 's/\"//g') | tar xOvz > /etc/openclash/core/clash_original
helmi_clash_cemod \ &&
helmi_clash_start
else
exit
fi
elif [[ -f $CORE ]] && [[ ! -f $ORI ]];then
echo " Current Clash Core = Unknown"
echo " Download Premium Core Now? (y/n)"
echo "==============================="
echo -n " "
read -r jawab
if [[ $jawab == "y" ]]; then
helmi_clash_stop \ &&
wget -qO- $(curl -sL http://api.github.com/repos/Dreamacro/clash/releases/tags/premium | grep /clash-$ARCH | sed 's/.*url\": \"//g' | sed 's/\"//g') | gunzip -c > /etc/openclash/core/clash
wget -qO- $(curl -sL http://api.github.com/repos/vernesong/OpenClash/releases/tags/Clash | grep /clash-$ARCH | sed 's/.*url\": \"//g' | sed 's/\"//g') | tar xOvz > /etc/openclash/core/clash_original
helmi_clash_cemod \ &&
helmi_clash_start
else
exit
fi
elif [[ ! -f $ORI ]] && [[ ! -f $PREM ]] && [[ ! -f $CORE ]];then
echo " Current Clash Core = Unknown"
echo " Download Premium Core Now? (y/n)"
echo "==============================="
echo -n " "
read -r jawab
if [[ $jawab == "y" ]]; then
helmi_clash_stop \ &&
wget -qO- $(curl -sL http://api.github.com/repos/Dreamacro/clash/releases/tags/premium | grep /clash-$ARCH | sed 's/.*url\": \"//g' | sed 's/\"//g') | gunzip -c > /etc/openclash/core/clash
wget -qO- $(curl -sL http://api.github.com/repos/vernesong/OpenClash/releases/tags/Clash | grep /clash-$ARCH | sed 's/.*url\": \"//g' | sed 's/\"//g') | tar xOvz > /etc/openclash/core/clash_original
helmi_clash_cemod \ &&
helmi_clash_start
else
exit
fi
else
echo " Exiting bro. see yu suun !"
exit
fi
| true |
e3d98a373446b59b958122f305640fe9608c1a47 | Shell | KriwkA/GeekBrainsAI | /LinuxWorkstation/lesson2/task2-5.sh | UTF-8 | 1,339 | 3.921875 | 4 | [] | no_license | #!/bin/bash
function pause() {
read -p "Press enter to continue..."
}
# task 2
echo "Creating task2_dir directory for second task files"
pause
mkdir task2_dir
echo "Enter the file1.txt's text:"
cat > task2_dir/file1.txt
echo "Enter the file2.txt's text:"
cat > task2_dir/file2.txt
echo "Concatenate task2_dir/file1.txt task2_dir/file2.txt to ask2_dir/file3.txt"
pause
cat task2_dir/file1.txt task2_dir/file2.txt > task2_dir/file3.txt
echo "Renaming task2_dir/file3.txt to task2_dir/file3_renamed.txt"
pause
mv task2_dir/file3.txt task2_dir/file3_renamed.txt
# task 3
echo "Creating task3_dir directory for third task files"
pause
mkdir task3_dir
echo "Creating files: task3_dir/task3_file{1..5}.txt"
pause
touch task3_dir/task3_file{1..5}.txt
echo "Creating task3_dir/temp directory."
pause
mkdir task3_dir/temp
echo "Moving task3_dir/task3_file1.txt to task3_dir/temp/task3_file1.txt"
pause
mv task3_dir/task3_file1.txt task3_dir/temp/task3_file1.txt
echo "All created files at second and third tasks will be removed"
pause
mv task3_dir task2_dir/task_3dir
rm -rf task2_dir
#task 4
echo "Count of hiden files in '/home' is:"
ls -a /home | grep ^'\.' | wc -l
#task 5
echo "Printing all files from /etc:"
pause
for file in "/etc"/*
do
cat $file 2> errors.txt
done
echo "Count of read file error is:"
cat errors.txt | wc -l
| true |
7d95c212bfe6cd11ccf7bdbaafc2d66472c87e47 | Shell | marinkarin/linux_sysinfo | /src/linux_sysinfo.sh | UTF-8 | 8,124 | 4.25 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# v.2.0
# mjk 2018.08.07
#### Bash script to extract useful info from a Linux box ####
#### Tested to run on the following distros: ####
#### Ubuntu 16.04 LTS "Xenial Xerus" ####
#### This script builds on, and improves grabsysinfo.sh ####
#### attributed to Vivek Gite circa 2007.09.12. ####
#### PRELIMS ####
function pv_check(){
# `pv` is used in `disk_hogs` for progress bar
# `which` to check for `pv` --> output to /dev/null
# exit program if not installed
if ! which pv &> /dev/null; then
printf "%s\\n" "ERROR: PLEASE INSTALL PV OR ADD IT TO YOUR PATH."
exit 1
fi
}
#### HEADERS ####
write_header() {
# print header
local name=$1; shift;
printf "%s""--------------------\\n$name%s\\n--------------------\\n"
printf "%s" "$@"
}
write_info() {
# print info
local name=$1; shift;
printf "%s""$name%s"
printf "%s\\n" "$@"
}
#### MENU ####
show_menu() {
# display on-screen menu
date
printf "%s\n" "------------------------------"
printf "%s\n" " LINUX SYSTEM INFO "
printf "%s\n" " MAIN MENU "
printf "%s\n" "------------------------------"
printf "%s\n" " 1. OS Info"
printf "%s\n" " 2. Hostname & DNS Info"
printf "%s\n" " 3. Network Info"
printf "%s\n" " 4. Who is Online"
printf "%s\n" " 5. Last Logged in Users"
printf "%s\n" " 6. CPU Info"
printf "%s\n" " 7. Free & Used Memory Info"
printf "%s\n" " 8. Disk Usage"
printf "%s\n" " 9. Exit"
}
read_input(){
# get user input via keyboard and make a decision using case...esac
local c
read -p "Enter your choice [ 1-9 ]: " c
case $c in
1) os_info ;;
2) host_info ;;
3) net_info ;;
4) current_users ;;
5) recent_users ;;
6) cpu_info ;;
7) mem_info ;;
8) disk_space;;
9) printf "%s\n" "Ciao!"; exit 0 ;;
*)
printf "%s\n" "Select an Option (1 to 9): "
pause
esac
}
pause() {
# pause prompt
# suspend processing of script; display message prompting user to press [Enter] key to continue
# $1-> Message (optional)
local message="$@"
[ -z $message ] && message="Press [Enter] key to continue: "
read -p "$message" readEnterKey
}
#### SYS INFO ####
#### OS INFO ####
# kernel and operating system info
kernel_name() {
# kernel name
local kern=$(uname --kernel-name)
write_info "Kernel Name: ${kern}"
}
kernel_release () {
# kernel release
local kernr=$(uname --kernel-release)
write_info "Kernel Release: ${kernr}"
}
os_name() {
# relase name
local name=$(awk '/^NAME=/' /etc/*-release |cut --delimiter=\" --field=2)
write_info "OS Name: ${name}"
}
os_version() {
# release version
local version=$(awk '/^VERSION=/' /etc/*-release |cut --delimiter=\" --field=2)
write_info "OS Version: ${version}"
}
os_info() {
# wrapper
write_header "SYSTEM INFO"
kernel_name
kernel_release
os_name
os_version
pause
}
#### HOST INFO ####
# host & DNS info
host_name() {
local host=$(hostname --short)
write_info "Hostname: ${host}"
}
dns_domain() {
local dns=$(hostname --domain)
write_info "DNS Domain: ${dns}"
}
fully_qualified() {
local fqdn=$(hostname --fqdn)
write_info "Fully-qualified Domain Name: ${fqdn}"
}
ip_address() {
local ip=$(hostname --ip-address)
write_info "Network Address (IP): ${ip}"
}
dns_name() {
local search=$(awk '/^search/ {print $2}' /etc/resolv.conf)
write_info "Domain Name Servers (DNS name): ${search}"
}
dns_ips() {
local name_serv=$(awk '/^nameserver/ {print $2}' /etc/resolv.conf)
write_info "Domain Name Servers (DNS IPs): ${name_serv}"
}
host_info() {
# wrapper
write_header "HOSTNAME & DNS INFO"
host_name
dns_domain
fully_qualified
ip_address
dns_name
dns_ips
pause
}
#### NET INFO ####
# network interfaces and routing
network_interfaces() {
# number of network interfaces
local devices=$(ip -oneline link show |wc --lines)
printf "%s\\n" "${devices}"
}
ip_add_info() {
# IP protocol addresses
local ip_add=$(ip --family inet address show)
printf "%s\\n" "${ip_add}"
}
network_routing() {
# IP routing table
local route=$(netstat --route --numeric)
printf "%s\\n" "${route}"
}
interface_traffic() {
# status of interfaces on the network
local traffic=$(netstat --interfaces)
printf "%s\\n" "${traffic}"
}
net_info() {
# wrapper
write_header "NETWORK INFO"
write_header "TOTAL NETWORK INTERFACES"
network_interfaces
write_header "IP ADDRESS INFO"
ip_add_info
write_header "NETWORK ROUTING"
network_routing
write_header "INTERFACE TRAFFIC INFO"
interface_traffic
pause
}
#### CURRENT USERS ####
# logged in users
who_is_on() {
# `who` built in
local whoo=$(who --heading)
printf "%s\\n" "${whoo}"
}
current_users() {
# wrapper function
write_header "WHO IS ONLINE?"
who_is_on
pause
}
#### RECENT USERS ####
# list of recent logins
ten_last() {
# `last` built-in; last 10 incl. host & dns
local lasty=$(last -n 10 -a -d)
printf "%s\\n" "${lasty}"
}
recent_users() {
# wrapper
write_header "LAST 10 LOGINS" #"$user_info"
ten_last
pause
}
### CPU INFO ###
# info about CPU
cpu_model() {
# query lscpu for: `Model name`
local model=$(lscpu |grep --word-regexp 'Model name:')
write_info "${model}"
}
cpu_socket() {
# query lscpu for: `Sockets`
local socket=$(lscpu |grep --word-regexp 'Socket(s):')
write_info "${socket}"
}
cpu_cores() {
# query lscpu for: `Cores`
local cores=$(lscpu |grep --word-regexp 'Core(s) per socket:')
write_info "${cores}"
}
cpu_info() {
# wrapper
write_header "CPU INFO" "${cpu_info}"
cpu_model
cpu_socket
cpu_cores
pause
}
#### MEM INFO ####
# used and free memory
ram_stats() {
# display free & used memory
local ram=$(free --giga --human)
printf "%s\\n" "${ram}"
}
vram_stats() {
# display virtual memory
local vram=$(vmstat)
printf "%s\\n" "${vram}"
}
top_ram_eaters() {
# regex ps to define, extract, and sort top memory (then cpu) consuming processes
local hungry_ram=$(ps -Ao user,pid,pcpu,pmem,stat,command --sort=-%mem,-%cpu |\
head -11 |awk '{print $1, $2, $3, $4, $5, $6, $7}')
printf "%s\\n" "${hungry_ram}"
}
mem_info() {
# wrapper
write_header "MEMORY INFO"
write_header "FREE & USED MEMORY"
ram_stats
write_header "VIRTUAL MEMORY STATISTICS"
vram_stats
write_header "TOP 10 MEMORY EATING PROCESS"
top_ram_eaters
pause
}
#### DISK INFO ####
# info on free & used disk space
# NOTE: this script was designed to run w/out eleveated privilieges,
# BUT: if run-as a non-admin user, the results from `disk hogs`
# will be limited
disk_usage() {
# Retrieve file system info re: disk space
local disk=$(df --human-readable --total |awk 'NR==1; END{print}')
printf "%s\\n" "${disk}"
}
file_hogs() {
# top 10 disk-eating files
printf "%s\\n" "Searching..."
# scan file system from / for files; output background noise to /dev/null; pv for progress
local largestfiles=$(find / -type f -exec du --separate-dirs --human-readable {} + 2>/dev/null |pv)
printf "%s\n" "${largestfiles}" |sort --reverse --human | head --lines=10
}
dir_hogs() {
# Retrieve top 10 disk-eating directories
printf "%s\\n" "Searching..."
# scan file system from / for directories; output background noise to: /dev/null; pv for progress
local largestdirs=$(find / -type d -exec du --separate-dirs --human-readable {} + 2>/dev/null |pv)
printf "%s\\n" "${largestdirs}" |sort --reverse --human |uniq |head --lines=10
}
disk_space() {
# wrapper
write_header "DISK INFO"
write_header "DISK USAGE"
disk_usage
write_header "TOP 10 DISK-EATING FILES"
file_hogs
write_header "TOP 10 DISK-EATING DIRECTORIES"
dir_hogs
pause
}
#### Main ####
pv_check
while true
do
clear
show_menu
read_input
done
| true |
661aec466bad7666005fe61bcb660f98bebcb482 | Shell | blavigne13/git-migration | /migrate/load.sh | UTF-8 | 4,804 | 3.671875 | 4 | [] | no_license | #!/bin/bash
load() {
local name="${1%.migration}.migration"
if [[ -z "$1" || ! -f "${name}" ]]; then
err "load: ${name}: file not found"
list
return 2
fi
drop_a_load \
&& msg "loading: ${name}" \
&& migration[file]="${name}" \
&& read_migration_file \
&& set_optional \
&& check_required \
&& migration[default-domain]="${migration[default-domain]/'='/' = '}" \
&& migration[trunk]=$(format_map "${migration[trunk]}") \
&& migration[branches]=$(format_map "${migration[branches]}") \
&& migration[tags]=$(format_map "${migration[tags]}")
if [[ ! "$?" = "0" ]]; then
# drop_a_load
err "load: ${name}: fail"
return 1
fi
msg "${name} is loaded"
}
# Clear migration vars
drop_a_load() {
unset migration
declare -g -A migration
msg "load dropped"
}
check_required() {
declare ret
if [[ ! "${migration[svn-url]}" == https://svn.schoolspecialty.com/svn/* ]]; then
err "load: invalid svn-url: ${migration[svn-url]}"
ret="${ret:=1}"
fi
if [[ ! "${migration[git-url]}" == *@bitbucket.schoolspecialty.com:*.git ]]; then
err "load: invalid git-url: ${migration[git-url]}"
# ret="${ret:=1}"
fi
if [[ -z "${migration[authors-file]}" ]]; then
err "load: authors-file: missing required item"
ret="${ret:=1}"
fi
if [[ ! -f "${migration[authors-file]}" ]]; then
err "load: ${migration[authors-file]}: file not found or not a regular file"
ret="${ret:=1}"
fi
if [[ ! "${migration[trunk]}" == *trunk*:*refs/heads/master ]]; then
err "load: invalid trunk map: ${migration[trunk]}"
ret="${ret:=1}"
fi
if [[ -n "${migration[branches]}" && ! "${migration[branches]}" == *branches*:*refs/heads/* ]]; then
err "load: invalid branches map: ${migration[branches]}"
ret="${ret:=1}"
fi
if [[ -n "${migration[tags]}" && ! "${migration[tags]}" == *tags*:*refs/tags/* ]]; then
err "load: invalid tags map: ${migration[tags]}"
ret="${ret:=1}"
fi
return "${ret:-0}"
}
set_optional() {
declare ret
if [[ -z "${migration[svn-dir]}" ]]; then
msg "svn-dir not provided, SVN queries disabled."
svn_queries+="no-svn-dir!"
fi
if [[ -z "${migration[git-dir]}" ]]; then
migration[git-dir]="${migration[git-url]##*'/'}"
fi
if [[ -z "${migration[default-domain]}" ]]; then
migration[default-domain]="${default[default-domain]}"
fi
if [[ -z "${migration[authors-file]}" || ! -f "${migration[authors-file]}" ]]; then
err "authors-file not found: ${migration[authors-file]}"
info "Checking for" "
${project_path}/${default[authors-file]}
${project_path}/../${default[authors-file]}
${scripts_path}/${default[authors-file]}"
if [[ -f "${project_path}/${default[authors-file]}" ]]; then
migration[authors-file]=$(readlink -f "${project_path}/${default[authors-file]}")
info "Found" "${migration[authors-file]}\n"
elif [[ -f "${project_path}/../${default[authors-file]}" ]]; then
migration[authors-file]=$(readlink -f "${project_path}/../${default[authors-file]}")
info "Found" "${migration[authors-file]}\n"
elif [[ -f "${scripts_path}/${default[authors-file]}" ]]; then
migration[authors-file]=$(readlink -f "${scripts_path}/${default[authors-file]}")
info "Found" "${migration[authors-file]}\n"
else
migration[authors-file]=""
err "
If you REALLY don't want to map commit authors, make an empty authors.txt.
After all, it shouldn't be easy to shoot yourself in the foot...
"
ret="${ret:=1}"
fi
fi
return "${ret:-0}"
}
read_migration_file() {
declare ret
(( lnum = 0 ))
while read line || [ -n "${line}" ]; do
(( ++lnum ))
line="${line/'#'*/}" # strip comments
line="${line//[[:space:]]/}" # strip whitespace
case "${line}" in
svn-url*)
migration[svn-url]="${line#*=}"
;;
svn-dir*)
migration[svn-dir]="${line#*=}"
;;
git-url*)
migration[git-url]="${line#*=}"
;;
git-dir*)
migration[git-dir]="${project_path}/${line#*=}"
;;
authors-file*)
migration[authors-file]="${line#*=}"
;;
default-domain*)
migration[default-domain]="\tdefaultDomain=${line#*=}"
;;
trunk*)
migration[trunk]="\t${line}"
;;
branches*)
migration[branches]+="\t${line}\n"
;;
tags*)
migration[tags]+="\t${line}\n"
;;
"")
;;
*)
err "load: ${migration[file]} (${lnum}): ${line}: invalid entry"
ret="${ret:=1}"
;;
esac
done < "${migration[file]}"
return "${ret:=0}"
}
# meant to be used w/ command substitution
format_map() {
local map="${1%'\n'}" # strip trailing newline
map="${map%'/'}" # strip trailing slash
map="${map/'='/' = '}" # replace first '=' by ' = '
map="${map//'/'/'\/'}" # escape slashes
map="${map//'*'/'\*'}" # escape asterisks
echo "${map}"
}
list() {
header "Repository migration files"
for f in *.migration; do
info " ${f}"
done
}
| true |
fe6d150280637da764d05a1660ac215db0c71b10 | Shell | Smathuki/deepform | /init_sweep.sh | UTF-8 | 387 | 3.375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Source this script in order to invoke wandb sweep sweep.yaml and set the var WANDB_SWEEP_ID
export SED_REGEX_EXTRACT='s/^.*Created sweep with ID: \([[:alnum:]]*\).*$/\1/p'
init=$(wandb sweep sweep.yaml 2>&1 | sed -n "$SED_REGEX_EXTRACT")
if [ -z "$init" ]
then
exit 1
else
echo $init
export WANDB_SWEEP_ID="$init"
wandb agent deepform/deepform/$WANDB_SWEEP_ID
fi
| true |
8354d0319092cd9559930b1e19ac1446deea5ce9 | Shell | ploykwan/dotfiles | /setup-remote.sh | UTF-8 | 862 | 3.796875 | 4 | [] | no_license | RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # Reset (No Color)
MODULE=$0
# Check for existance of Xcode Command Line Interface
if [ ! -d "$(xcode-select -p)" ]; then
printf "##### ${RED}Xcode Command Line Tools is required.${NC}\n"
printf "##### ${GREEN}Opening installation...${NC}"
xcode-select --install &> /dev/null
while ! xcode-select -p &> /dev/null; do
sleep 5
done
printf "##### ${GREEN}Done installing Xcode Command Line Tools.${NC}"
fi
rm -rf ~/dotfiles
git clone https://github.com/kykungz/dotfiles.git ~/dotfiles
echo;echo;
if [ "$MODULE" ] && [ "$MODULE" != "bash" ]
then
bash -c "
source ~/dotfiles/utils.sh;
echo_green 'Installing module: ${NC}${MODULE}'
prompt_install;
source ~/dotfiles/${MODULE}/setup.sh
echo;echo;
echo_green Done!"
else
bash ~/dotfiles/setup-mac.sh
fi
| true |
bfbd19f2cab48f2851a84fea6d937134053985ba | Shell | Riyaz-creator/python | /start.sh | UTF-8 | 111 | 2.59375 | 3 | [] | no_license | #!/bin/bash
echo "shows the uptime"
uptime
echo "enter your name"
read -p "enter your name": name
echo $name
| true |
009bb6d292d56d499027ba7e25a7827bae744826 | Shell | carlosfbh/Master-Data-Science | /Shell/Ejercicios/Master_class_03v7_pg25/exercise1.sh | UTF-8 | 171 | 2.84375 | 3 | [] | no_license | #! /bin/bash
#Use Text_example.txt
#Ejercicio 1.
#Replace every “line” with new line character (“\n”)
cd ~/Data/shell
cat Text_example.txt | sed "s/line/\n/g"
| true |
16b04c9c7a0c8d33ed880493b6e08c2b50241876 | Shell | Rivhan64/hardening-geniso | /scripts/hardening.sh | UTF-8 | 794 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash -eux
export HISTSIZE=0
export HISTFILESIZE=0
apt-get --assume-yes install net-tools procps --no-install-recommends;
git clone https://github.com/konstruktoid/hardening;
cd ./hardening || exit 1
sed -i.bak -e "s/SSH_GRPS=.*/SSH_GRPS='vagrant'/" -e "s/^CHANGEME=.*/CHANGEME='changed'/" ./ubuntu.cfg;
sed -i.bak 's/.*f_aide_/# f_aide_/g' ./ubuntu.sh;
bash ./ubuntu.sh
cd .. || exit 1
rm -rf ./hardening
sed -i.bak 's/^/# /g' /etc/default/grub.d/99-hardening-lockdown.cfg
sed -i.bak "s/myhostname =.*/myhostname = hardened.local/g" /etc/postfix/main.cf;
sed -i.bak '/fat/d' /etc/modprobe.d/disable*;
ufw allow ssh;
update-grub
systemctl restart sshd
find /etc -name '*.bak' -exec rm -f {} \;
if id vagrant; then
chage --maxdays 365 vagrant
chage --mindays 1 vagrant
fi
| true |
73620fb05806a18b3e8d52c09398e07645d8a076 | Shell | modsim/FluxML | /config/mkautotools.sh | UTF-8 | 1,128 | 3.125 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-SA-4.0",
"CC-BY-4.0"
] | permissive | #!/bin/sh
AFILES="aclocal.m4 autom4te.cache config/config.guess config.log \
config.status config/config.sub configure config/depcomp \
ThreadsPP-0.1 INSTALL config/install-sh libtool config/ltmain.sh \
Makefile Makefile.in config/missing FLUX-1.0.tar.gz config.h \
config/compile config.h.in config.h.in~ stamp-h1"
bootstrap_autotools()
{
autoreconf -i -I m4
}
cleanup_autotools()
{
if [ -f Makefile ]; then
make distclean
fi
for i in $AFILES; do
if [ -d "$i" ]; then
echo "D[$i]"
rm -r $i
else
if [ -f "$i" ]; then
echo "F[$i]"
rm $i
fi
fi
done
# for i in $(find . -regex '.*Makefile\(\|\.in\)'); do
# if [ -f "$i" ]; then
# echo "F[$i]"
# rm $i
# fi
# done
for i in $(find . -name Makefile); do
if [ -f "$i" -a -f "$i.in" ]; then
echo "F[$i, $i.in]"
rm $i
rm $i.in
fi
done
for i in $(find . -type d -name .deps); do
if [ -d "$i" ]; then
echo "D[$i]"
rm -r $i
fi
done
}
if [ ! -d ./config ]; then
cd ..
fi
case "$1" in
clean)
cleanup_autotools
;;
boot)
bootstrap_autotools
;;
*)
echo "Usage: $0 {clean|boot}"
exit 1
esac
exit 0
| true |
92b77a328f70b954f3657b044cf47fc774bfd460 | Shell | oskar1233/m2-nginx | /nginx-envsubst.sh | UTF-8 | 218 | 3.0625 | 3 | [] | no_license | #!/bin/sh
ENV_VARS_TO_REPLACE="\
\$DOMAIN \
\$PORT \
\$FPM_SERVER
"
for f in $(find /nginx-config/ -type f -regex '.*\.conf'); do
envsubst "$ENV_VARS_TO_REPLACE" < $f > /etc/nginx/conf.d/$(basename $f);
done
| true |
6c563c576e97d1ed4c9e266afece701c118dfb20 | Shell | idaholab/raven | /developer_tools/packaging/mavericks_build.sh | UTF-8 | 2,111 | 2.890625 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | export https_proxy=$http_proxy
export INSTALL_DIR=/opt/raven_libs
source /opt/moose/environments/moose_profile
rm -Rvf $INSTALL_DIR
../../backend_raven_libs_script.sh
mkdir -p $HOME/raven_libs/root/opt
mv $INSTALL_DIR $HOME/raven_libs/root/opt
mkdir -p $HOME/raven_libs/root/opt/raven_libs/environments
PROFILE_FILE=$HOME/raven_libs/root/opt/raven_libs/environments/raven_libs_profile
echo 'export PYTHONPATH=/opt/raven_libs/lib/python2.7/site-packages/:$PYTHONPATH' > $PROFILE_FILE
chmod +x $PROFILE_FILE
mkdir -p $HOME/raven_libs/scripts
cat - > $HOME/raven_libs/scripts/preflight <<PREFLIGHT
#!/bin/bash
rm -Rf /opt/raven_libs/
PREFLIGHT
chmod +x $HOME/raven_libs/scripts/preflight
cat - > $HOME/raven_libs/scripts/postinstall <<POSTINSTALL
#!/bin/bash
echo Running Raven libs postinstall
echo HOME = \$HOME
if grep '. /opt/raven_libs/environments/raven_libs_profile' \$HOME/.bash_profile; then echo Already sourcing /opt/raven_libs/environments/raven_libs_profile; else
cat - >> \$HOME/.bash_profile <<EOF
#source raven libs environment
if [ -f /opt/raven_libs/environments/raven_libs_profile ]; then
. /opt/raven_libs/environments/raven_libs_profile
fi
EOF
fi
if which python3-config; then echo Python3 already installed; else
installer -pkg /Volumes/Raven\ Libraries/Python.mpkg -target /
fi
POSTINSTALL
chmod +x $HOME/raven_libs/scripts/postinstall
rm -Rf raven_libs.pkg
pkgbuild --root $HOME/raven_libs/root --identifier raven_libs --scripts $HOME/raven_libs/scripts raven_libs.pkg
#Get Python
curl -C - -L -O http://www.python.org/ftp/python/3.3.5/python-3.3.5-macosx10.6.dmg
hdiutil attach python-3.3.5-macosx10.6.dmg
cp -a /Volumes/Python\ 3.3.5/Python.mpkg .
hdiutil detach /Volumes/Python\ 3.3.5/
#Create dmg file.
rm -f raven_libs_base.dmg raven_libs.dmg
hdiutil create -size 200m -fs HFS+ -volname "Raven Libraries" raven_libs_base.dmg
hdiutil attach raven_libs_base.dmg
cp -a raven_libs.pkg /Volumes/Raven\ Libraries
cp -a Python.mpkg /Volumes/Raven\ Libraries
hdiutil detach /Volumes/Raven\ Libraries/
hdiutil convert raven_libs_base.dmg -format UDZO -o raven_libs.dmg
| true |
a5773bdeab25d01bbf126522b70ae9df9f2e702f | Shell | nyanyehtun-simon/VVV-test | /www/phpcs/CodeSniffer/Standards/VIP-Coding-Standards/bin/phpcs | UTF-8 | 275 | 2.59375 | 3 | [
"MIT",
"LicenseRef-scancode-other-copyleft",
"LGPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-generic-exception",
"GPL-2.0-only"
] | permissive | #!/usr/bin/env bash
#
# Run PHPCS against VIP Coding Standards.
#
# This ensures that the code in the Sniffs and Tests follow the rules
# defined in the custom ruleset for this repo, `.phpcs.xml.dist`.
#
# EXAMPLE TO RUN LOCALLY:
#
# ./bin/phpcs
"$(pwd)/vendor/bin/phpcs"
| true |
26418cf02a610696ca85d8c3a992a45251f1e530 | Shell | huantingwei/fyp | /backend/login/loginAPI.sh | UTF-8 | 2,283 | 3.90625 | 4 | [] | no_license | #!/bin/bash
#-------Part 0: revoke all gcloud accounts-------------
gcloud auth revoke --all
#-------Part 1: automated authentication, connect gcloud to user's cloud project--------
#-------as well as connect kubectl to the GKE cluster in that project--------
#all stdout will be written to output.txt too
stdout="./output.txt"
#The OAuth URL for Google Cloud authentication will be written into url.txt
url="./url.txt"
#token.txt will store the access token generated from the OAuth authentication process
token="./token.txt"
#for string matching in output.txt, to detect the appearance of the OAuth URL
string="oauth2"
#projindex.txt will contain the index number which matches the project name above
projindex="./projindex.txt"
# parameters
clustername=$1
projectname=$2
zonename=$3
function loop {
#reinitialise gcloud [default] configuration
echo "1"
#enter 'Y' to login
echo "Y"
#loop until the OAuth URL appears
while true
do
#True if the OAuth URL appears in stdout
if [ -e $stdout ] && [ ! -z $(grep "$string" "$stdout") ]; then
#extract the URL from stdout, write it to url.txt
grep $string $stdout | xargs > $url
break
else
sleep 1
fi
done
#loop until the access token is passed by from user and written into token.txt
while true
do
# true if token.txt exists
if [ -s $token ]; then
while read line; do
echo $line
done < $token
break
else
sleep 1
fi
done
while true
do
# true if the project options appears in stdout
if [ -e $stdout ] && [[ ! -z $(grep "$projectname" "$stdout") ]]; then
# extract the desired project option from stdout, write it to projindex.txt
grep $projectname $stdout | xargs > $projindex
# extract the index number which is the third char
while read line; do
# echo the third char of line
echo ${line:1:1}
done < $projindex
echo 'n'
break
else
sleep 1
fi
done
}
# 2>&1 | tee will write a copy of stdout to output.txt
loop | gcloud init --console-only --skip-diagnostics 2>&1 | tee ./output.txt
#connect kubectl to the target GKE cluster
gcloud container clusters get-credentials $clustername --zone $zonename --project $projectname
echo "Finish authentication"
#cleanup
rm $stdout $url $token $projindex | true |
2d41f1178690c1edf4bba239a12f368d2a213347 | Shell | 0xspig/config | /.bashrc | UTF-8 | 663 | 2.6875 | 3 | [] | no_license | #
# ~/.bashrc
#
export PATH=/home/tyler/.bin:$PATH
. ~/.bash_aliases
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
alias ls='ls --color=auto'
PS1='[\u@\h \W]\$ '
if [ "$TERM" = "linux" ]; then
setfont Lat2-TerminusBold22x11.psf.gz
echo -en "\e]P0eec8a8"
echo -en "\e]P1c92e2e"
echo -en "\e]P268b374"
echo -en "\e]P39e9c95"
echo -en "\e]P45a6f9e"
echo -en "\e]P5a077a8"
echo -en "\e]P6266870"
echo -en "\e]P7ad003a"
echo -en "\e]P8878377"
echo -en "\e]P9ff0d4d"
echo -en "\e]Pa62704c"
echo -en "\e]Pbba9163"
echo -en "\e]Pc739deb"
echo -en "\e]Pdd068ed"
echo -en "\e]Pe5eb8ad"
echo -en "\e]Pfa9acc2"
clear
fi
| true |
bb82b9b6ecee47702cd576af6e37a903288fd1c7 | Shell | lsblakk/tools | /buildfarm/mobile/create_dirs.sh.in | UTF-8 | 950 | 3.34375 | 3 | [] | no_license | #!/bin/bash
tegras="sedTEGRALISTsed"
echo "setting up tegra directories"
for TNUM in ${tegras} ; do
TEGRA=tegra-$TNUM
if [ -d /builds/offline-$TNUM ]; then
echo "Skipping offline $TEGRA"
else
echo processing $TEGRA
if [ ! -d /builds/$TEGRA ]; then
echo creating dir for $TEGRA
mkdir /builds/$TEGRA
fi
cd /builds/$TEGRA
if [ ! -f /builds/$TEGRA/clientproxy.py ]; then
echo symlinking clientproxy
ln -s /builds/tools/sut_tools/clientproxy.py .
fi
if [ ! -f /builds/$TEGRA/buildbot.tac ]; then
echo creating buildbot.tac
sed "s/tegra-###/${TEGRA}/" /builds/buildbot.tac.tegras > /builds/$TEGRA/buildbot.tac
mv /builds/$TEGRA/buildbot.tac /builds/$TEGRA/buildbot.tac.sed
sed "s/bm-foopy.build.mozilla.org/test-master01.build.mozilla.org/" /builds/$TEGRA/buildbot.tac.sed > /builds/$TEGRA/buildbot.tac
rm -f /builds/$TEGRA/buildbot.tac.sed
fi
fi
done
| true |
5a067d4edcfff3b0f7345a65e7028a7fa7b9db1f | Shell | patrickacole/xray-view-classifier | /jobs/train.sh | UTF-8 | 783 | 3.375 | 3 | [] | no_license | #!/bin/bash
# Not sure what this does?
nvidia-smi
# Activate desired environment
# Need to source bashrc for some reason
source ~/.bashrc
conda activate pytorch-env
# Check to see if data is in the right spot
if [[ ! -d "/data/pacole2" ]]
then
mkdir /data/pacole2
fi
if [[ ! -d "/data/pacole2/CheXpert-v1.0-small/" ]]
then
echo "Data is not on gpu storage"
echo "Copying over data from shared storage"
FILE="CheXpert-v1.0-small.zip"
cp /shared/rsaas/pacole2/${FILE} /data/pacole2/
cd /data/pacole2/
unzip -qq ${FILE}
rm ${FILE}
cd /home/pacole2/
fi
# Data is ready now run python file
cd ~/Projects/xray-view-classifier/
echo "Running python script now"
python main.py --data /data/pacole2/CheXpert-v1.0-small/ --lr 2e-4 --epoch 10 --train
| true |
59e6f3d601bb1874ae8e0159eed71dde8d2c2692 | Shell | omakoto/misc | /md | UTF-8 | 183 | 3.078125 | 3 | [] | no_license | #!/bin/bash
set -e
. mutil.sh
dir="$1"
if [[ "$dir" == "" ]] ; then
cat <<'EOF'
md: mkdir -p && cd
usage: md PATH
EOF
exit 1
fi
ee mkdir -p "$dir" && schedule-cd "$dir"
| true |
1179b8a7b1ba0220a474d61e13fd1278044f5aab | Shell | yuvaldori/DevOps | /releasemanagement/scripts/upload_release/copy_release_packages_to_website.sh | UTF-8 | 878 | 3.203125 | 3 | [] | no_license | #!/bin/bash
source params.sh
pushd ${BUILD_DIR}
#create the lite packages
mkdir xap-lite
pushd xap-lite
cp -rp ../xap-premium/1.5/gigaspaces-xap-premium-* ../xap-premium/dotnet/GigaSpaces-XAP.NET-Premium-* .
#rename 's/premium/lite/' *.zip (ubuntu syntax)
#rename 's/Premium/Lite/' *.msi (ubuntu syntax)
rename premium lite *premium*.zip
rename Premium Lite *Premium*.msi
popd
popd
ssh -i ~/.ssh/website tempfiles@www.gigaspaces.com "mkdir -p ~/download_files/${MAJOR}/${VERSION}/"
for file in `find ${BUILD_DIR} -name "*.zip" -o -name "*.tar.gz" -o -name "*.msi" -o -name "*.rpm" | grep -v license | grep -v testsuite`; do
echo Uploading $file to ~/download_files/${MAJOR}/${VERSION}
scp -i ~/.ssh/website $file tempfiles@www.gigaspaces.com:~/download_files/${MAJOR}/${VERSION}
done
| true |
b228a13bf358e049b40e7864a9180d12555521c9 | Shell | cbdevnet/midimonster | /installer.sh | UTF-8 | 9,963 | 3.75 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
# shellcheck disable=SC1117
################################################ SETUP ################################################
dependencies=(
libasound2-dev
libevdev-dev
liblua5.3-dev
libjack-jackd2-dev
pkg-config
libssl-dev
python3-dev
gcc
make
wget
git
)
# Replace this with 'root' to bypass the user check
user="$(whoami)"
# Temporary directory used for repository clone
tmp_path="$(mktemp -d)"
# Installer/updater install directory
updater_dir="/etc/midimonster-updater"
latest_version="$(curl --silent "https://api.github.com/repos/cbdevnet/midimonster/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/')"
# make invocation arguments
makeargs="all"
normal="$(tput sgr0)"
dim="$(tput dim)"
bold="$(tput bold)"
uline="$(tput smul)"
c_red="$(tput setaf 1)"
c_green="$(tput setaf 2)"
c_mag="$(tput setaf 5)"
DEFAULT_PREFIX="/usr"
DEFAULT_PLUGINPATH="/lib/midimonster"
DEFAULT_CFGPATH="/etc/midimonster/midimonster.cfg"
DEFAULT_EXAMPLES="/share/midimonster"
############################################## FUNCTIONS ##############################################
assign_defaults(){
VAR_PREFIX="${VAR_PREFIX:-$DEFAULT_PREFIX}"
VAR_PLUGINS="${VAR_PLUGINS:-$VAR_PREFIX$DEFAULT_PLUGINPATH}"
VAR_DEFAULT_CFG="${VAR_DEFAULT_CFG:-$DEFAULT_CFGPATH}"
VAR_EXAMPLE_CFGS="${VAR_EXAMPLE_CFGS:-$VAR_PREFIX$DEFAULT_EXAMPLES}"
}
ARGS(){
for i in "$@"; do
case "$i" in
--prefix=*)
VAR_PREFIX="${i#*=}"
;;
--plugins=*)
VAR_PLUGINS="${i#*=}"
;;
--defcfg=*)
VAR_DEFAULT_CFG="${i#*=}"
;;
--examples=*)
VAR_EXAMPLE_CFGS="${i#*=}"
;;
--dev)
NIGHTLY=1
;;
-d|--default)
assign_defaults
;;
-fu|--forceupdate)
UPDATER_FORCE="1"
;;
--install-updater|--selfupdate)
NIGHTLY=1 prepare_repo
install_script
exit 0
;;
--install-dependencies)
install_dependencies "${dependencies[@]}"
exit 0
;;
-h|--help|*)
assign_defaults
printf "%sUsage: %s[OPTIONS]%s" "${bold}" "${normal} ${0} ${c_green}" "${normal}"
printf "\n\t%s--prefix=%s<path>%s\t\tSet the installation prefix.\t\t%sDefault:%s" "${c_green}" "${normal}${c_red}" "${normal}" "${c_mag}" "${normal} ${dim}$VAR_PREFIX${normal}"
printf "\n\t${c_green}--plugins=${normal}${c_red}<path>${normal}\tSet the plugin install path.\t\t${c_mag}Default:${normal} ${dim}%s${normal}" "$VAR_PLUGINS"
printf "\n\t${c_green}--defcfg=${normal}${c_red}<path>${normal}\t\tSet the default configuration path.\t${c_mag}Default:${normal} ${dim}%s${normal}" "$VAR_DEFAULT_CFG"
printf "\n\t${c_green}--examples=${normal}${c_red}<path>${normal}\tSet the path for example configurations.\t${c_mag}Default:${normal} ${dim}%s${normal}\n" "$VAR_EXAMPLE_CFGS"
printf "\n\t%s--dev%s\t\t\tInstall nightly version." "${c_green}" "${normal}"
printf "\n\t%s-d,\t--default%s\tUse default values to install." "${c_green}" "${normal}"
printf "\n\t%s-fu,\t--forceupdate%s\tForce the updater to update without a version check." "${c_green}" "${normal}"
printf "\n\t%s--selfupdate%s\t\tUpdates this script to the newest version and exit." "${c_green}" "${normal}"
printf "\n\t%s--install-updater%s\tInstall the updater (Run with midimonster-updater) and exit." "${c_green}" "${normal}"
printf "\n\t%s--install-dependencies%s\tInstall dependencies and exit" "${c_green}" "${normal}"
printf "\n\t%s-h,\t--help%s\t\tShow this message and exit." "${c_green}" "${normal}"
printf "\n\t%sEach argument can be overwritten by another, the last one is used!.%s\n" "${uline}${bold}${c_mag}" "${normal}"
rmdir "$tmp_path"
exit 0
;;
esac
shift
done
}
# Install unmatched dependencies
install_dependencies(){
DEBIAN_FRONTEND=noninteractive apt-get update -y -qq > /dev/null || error_handler "There was an error doing apt update."
# unset "$deps"
for dependency in "$@"; do
if [ "$(dpkg-query -W -f='${Status}' "$dependency" 2>/dev/null | grep -c "ok installed")" -eq 0 ]; then
deps+=("$dependency") # Add not installed dependency to the "to be installed array".
else
printf "%s already installed!\n" "$dependency" # If the dependency is already installed print it.
fi
done
if [ ! "${#deps[@]}" -ge "1" ]; then # If nothing needs to get installed don't start apt.
printf "\nAll dependencies are fulfilled!\n" # Dependency array empty! Not running apt!
else
printf "\nThen following dependencies are going to be installed:\n" # Dependency array contains items. Running apt.
printf "\n%s\n" "${deps[@]}" | sed 's/ /, /g'
DEBIAN_FRONTEND=noninteractive apt-get install -y -qq --no-install-suggests --no-install-recommends "${deps[@]}" > /dev/null || error_handler "There was an error doing dependency installation."
printf "\nAll dependencies are installed now!\n" # Dependency array empty! Not running apt!
fi
printf "\n"
}
ask_questions(){
# Only say if necessary
if [ -z "$VAR_PREFIX" ] || [ -z "$VAR_PLUGINS" ] || [ -z "$VAR_DEFAULT_CFG" ] || [ -z "$VAR_EXAMPLE_CFGS" ]; then
printf "%sIf you don't know what you're doing, just hit enter a few times.%s\n\n" "${bold}" "${normal}"
fi
if [ -z "$VAR_PREFIX" ]; then
read -r -e -i "$DEFAULT_PREFIX" -p "PREFIX (Install root directory): " input
VAR_PREFIX="${input:-$VAR_PREFIX}"
fi
if [ -z "$VAR_PLUGINS" ]; then
read -r -e -i "$VAR_PREFIX$DEFAULT_PLUGINPATH" -p "PLUGINS (Plugin directory): " input
VAR_PLUGINS="${input:-$VAR_PLUGINS}"
fi
if [ -z "$VAR_DEFAULT_CFG" ]; then
read -r -e -i "$DEFAULT_CFGPATH" -p "Default config path: " input
VAR_DEFAULT_CFG="${input:-$VAR_DEFAULT_CFG}"
fi
if [ -z "$VAR_EXAMPLE_CFGS" ]; then
read -r -e -i "$VAR_PREFIX$DEFAULT_EXAMPLES" -p "Example config directory: " input
VAR_EXAMPLE_CFGS="${input:-$VAR_EXAMPLE_CFGS}"
fi
}
# Clone the repository and select the correct version
prepare_repo(){
printf "Cloning the repository\n"
git clone "https://github.com/cbdevnet/midimonster.git" "$tmp_path"
printf "\n"
# If not set via argument, ask whether to install development build
if [ -z "$NIGHTLY" ]; then
read -r -p "Do you want to install the latest development version? (y/n)? " magic
case "$magic" in
y|Y)
printf "OK! You´re a risky person ;D\n\n"
NIGHTLY=1
;;
n|N)
printf "That´s OK - installing the latest stable version for you ;-)\n\n"
NIGHTLY=0
;;
*)
printf "%sInvalid input -- INSTALLING LATEST STABLE VERSION!%s\n\n" "${bold}" "${normal}"
NIGHTLY=0
;;
esac
fi
# Roll back to last tag if a stable version was requested
if [ "$NIGHTLY" != 1 ]; then
cd "$tmp_path" || error_handler "Error doing cd to $tmp_path"
printf "Finding latest stable version...\n"
last_tag=$(git describe --abbrev=0)
printf "Checking out %s...\n" "$last_tag"
git checkout -f -q "$last_tag"
fi
printf "\n"
}
# Build and install the software
build(){
# Export variables for make
export PREFIX="$VAR_PREFIX"
export PLUGINS="$VAR_PLUGINS"
export DEFAULT_CFG="$VAR_DEFAULT_CFG"
export EXAMPLES="$VAR_EXAMPLE_CFGS"
cd "$tmp_path" || error_handler "Error doing cd to $tmp_path"
make clean
make "$makeargs"
make install
}
# Save data for the updater
save_config(){
rm -f "$updater_dir/updater.conf"
mkdir -p "$updater_dir"
printf "Exporting updater config\n"
printf "VAR_PREFIX=%s\nVAR_PLUGINS=%s\nVAR_DEFAULT_CFG=%s\nVAR_DESTDIR=%s\nVAR_EXAMPLE_CFGS=%s\n" "$VAR_PREFIX" "$VAR_PLUGINS" "$VAR_DEFAULT_CFG" "$VAR_DESTDIR" "$VAR_EXAMPLE_CFGS" > "$updater_dir/updater.conf"
}
# Updates this script using the one from the checked out repo (containing the requested version)
install_script(){
mkdir -p "$updater_dir"
printf "Copying updater to %s/updater.sh\n" "$updater_dir"
cp "$tmp_path/installer.sh" "$updater_dir/updater.sh"
chmod +x "$updater_dir/updater.sh"
printf "Creating symlink /usr/bin/midimonster-updater\n"
ln -s "$updater_dir/updater.sh" "/usr/bin/midimonster-updater"
}
error_handler(){
[[ -n $1 ]] && printf "\n%s\n" "$1"
printf "\nAborting"
for i in {1..3}; do sleep 0.3s && printf "." && sleep 0.2s; done
printf "\n"
exit "1"
}
cleanup(){
if [ -d "$tmp_path" ]; then
printf "Cleaning up temporary files...\n"
rm -rf "$tmp_path"
fi
}
################################################ Main #################################################
trap error_handler SIGINT SIGTERM
trap cleanup EXIT
# Parse arguments
ARGS "$@"
clear
# Check whether we have the privileges to install stuff
if [ "$user" != "root" ]; then
printf "The installer/updater requires root privileges to install the midimonster system-wide\n"
exit 1
fi
# Check if we can download the sources
if [ "$(wget -q --spider http://github.com)" ]; then
printf "The installer/updater requires internet connectivity to download the midimonster sources and dependencies\n"
exit 1
fi
# Check whether the updater needs to run
if [ -f "$updater_dir/updater.conf" ] || [ "$UPDATER_FORCE" = "1" ]; then
if [ -f "$updater_dir/updater.conf" ]; then
# shellcheck source=/dev/null
. "$updater_dir/updater.conf"
# Parse arguments again to compensate overwrite from source
ARGS "$@"
printf "Imported settings from %s/updater.conf\n" "$updater_dir"
fi
if [ -n "$UPDATER_FORCE" ]; then
printf "Forcing the updater to start...\n\n"
elif [ -x "$VAR_PREFIX/bin/midimonster" ]; then
installed_version="$(midimonster --version)"
if [[ "$installed_version" =~ $latest_version ]]; then
printf "The installed version %s seems to be up to date\nDoing nothing\n\n" "${bold}$installed_version${normal}"
exit 0
else
printf "The installed version %s does not match the latest version %s\nMaybe you are running a development version?\n\n" "${bold}$installed_version${normal}" "${bold}$latest_version${normal}"
fi
fi
# Run updater steps
prepare_repo
install_script
save_config
build
else
# Run installer steps
install_dependencies "${dependencies[@]}"
prepare_repo
ask_questions
install_script
save_config
build
fi
exit 0
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.