blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
7a679e16ad96053aca622fa7f546f7b17c35d11f | Shell | JoseJARN/shellASO | /relacion3/e1.sh | UTF-8 | 259 | 3.53125 | 4 | [] | no_license | #!/bin/bash
if [ $# -ne 1 ];then
echo "Debes pasarme un parámetro, solo uno"
else
if [ -f $1 ] && [ -r $1 ];then
cat $1 | more
else
echo "El parámetro que me has pasado o no es un fichero o no tiene permisos de lectura"
fi
fi | true |
09afd35a3ad8aa8dc57377334ecc1210c605e248 | Shell | gsm-masum/ShellOJ | /source/judge.sh~ | UTF-8 | 829 | 3.1875 | 3 | [
"MIT"
] | permissive | #! /bin/sh
echo "'" | sudo -S -v
INP=test/input
OUTP=test/output
J=0
#FC=$(cd $INP/ && ls -1 | wc -l)
g++ solution.cpp
chmod 777 a.out
#gcc add.c
for i in $INP/*.txt
do
dos2unix -q $OUTP/output0$J.txt
#printf 'output0%d.txt modified \n' "$J"
#START=$(($(date +%s%N)/1000000))
ts=$(date +%s%N)
./a.out < $INP/input0$J.txt > res/result0$J.txt
TIME=$((($(date +%s%N) - $ts)/1000000))
chmod 777 res/result0$J.txt
if diff -q res/result0$J.txt $OUTP/output0$J.txt
then
printf 'Test case %d accepted \t Running Time %d MS \n' "$J" "$TIME" >>log.txt
else
if diff -b res/result0$J.txt $OUTP/output0$J.txt | diff -B res/result0$J.txt $OUTP/output0$J.txt
then
printf 'presentation %d error\n' "$J" >>log.txt
else
printf 'Wrong Answer %d \n' "$J" >>log.txt
fi
fi
J=$((J + 1))
done
echo "'" | sudo -S -v
chmod 777 log.txt
| true |
e9535a2b45f1f5984a402c633d695fce752c710d | Shell | SINBADconsortium/SLIM-release-comp | /ibin/install_FFTW3 | UTF-8 | 1,463 | 3.78125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
fftwv=3.3.1
function fail()
{
echo FATALL ERRROR in $1 stage
echo ... check output log
exit 1
}
if [ -n "$SLIM_COMP" ]; then
echo Installing FFTW3 $fftwv in $SLIM_COMP
else
echo FATAL ERROR: undefined environment SLIM_COMP
exit 1
fi
# TMPDIR
export TMPDIR=/tmp/`whoami`/fftw3-${fftwv}
# set temporary installation directories
srcdir=${TMPDIR}/fftw-${fftwv}
instdir=$SLIM_COMP/external
# set prompt
PS4="\n%%%%% "
#set -x
# delete old temporary directories
test -e $TMPDIR && rm -rf $TMPDIR
test -e $srcdir && rm -rf $srcdir
# create new temporary directories
mkdir -p $TMPDIR || exit 1
cd $TMPDIR || exit 1
tar -xzf $SLIM_COMP/external/tarballs/fftw-${fftwv}.tgz || exit 1
cd $srcdir || exit 1
# CONFIGURE
echo CONFIGURATION stage
echo ... output in ${TMPDIR}/configure.fftw3-${fftwv}
$srcdir/configure \
--prefix=$instdir \
--enable-shared \
--with-pic \
--with-openmp --enable-threads \
1>${TMPDIR}/configure.fftw3-${fftwv} 2>&1 || fail "CONFIGURATION"
echo CONFIGURATION done
# MAKE
echo BUILD stage
echo ... output in ${TMPDIR}/make_build.fftw3-${fftwv}
make -j 2 1>${TMPDIR}/make_build.fftw3-${fftwv} 2>&1 || fail "BUILD"
echo BUILD done
# INSTALL
echo INSTALLATION stage
echo ... output in ${TMPDIR}/make_install.fftw3-${fftwv}
make install 1>${TMPDIR}/make_install.fftw3-${fftwv} 2>&1 || fail "INSTALLATION"
echo INSTALL done
# clean temporary directories
echo CLEANUP stage
rm -rf $TMPDIR
echo FFTW3 installation completed
| true |
f8fc16b2a72970b1d6eaadde16d280b85d608bc8 | Shell | jibundeyare/sitex.example.com | /websitesrm.sh | UTF-8 | 931 | 3.4375 | 3 | [] | no_license | #!/bin/bash
source websitesconf.sh
sudo systemctl stop apache2
sudo systemctl stop php$php_version-fpm
ids="$(seq -w $websites_start $websites_end)"
for id in $ids
do
username="$website_prefix$id"
echo "deleting $username"
# account
sudo userdel $username
# folder
sudo rm -fr /home/$username
# vhost
sudo a2dissite $username
sudo rm -f /etc/apache2/sites-available/$username.conf
sudo rm -f /var/log/apache2/$username.$domain.access.log*
sudo rm -f /var/log/apache2/$username.$domain.error.log*
# database
echo "DROP USER IF EXISTS '$username'@'localhost';" | sudo mysql
echo "DROP DATABASE IF EXISTS $username;" | sudo mysql
echo "FLUSH PRIVILEGES;" | sudo mysql
# php fpm
sudo rm -f /etc/php/$php_version/fpm/pool.d/$username.conf
# remove the dedicated php session directory
sudo rm -fr /var/lib/php/sessions/$username
done
sudo systemctl start apache2
sudo systemctl start php$php_version-fpm
| true |
327a6eec250062f5adf11c2951da24b2fc510fac | Shell | femanov/CXv4 | /4cx/src/doc/_etc_init.d_vcas-servers | UTF-8 | 534 | 3.6875 | 4 | [] | no_license | #!/bin/bash
#
# /etc/rc.d/init.d/vcas-servers
#
# Starts/stops VCAS-servers
#
# chkconfig: 345 90 10
# description: starts VCAS servers on boot and stops them upon shutdown
# Source function library.
. /etc/init.d/functions
#
# See how we were called.
#
case "$1" in
start)
su -c '~/bin/can_serv_qt -can0 -d' work
touch /var/lock/subsys/vcas-servers
;;
stop)
echo -n $"Shutting down VCAS servers: "
success
echo
killall -9 can_serv_qt
rm -f /var/lock/subsys/vcas-servers
;;
*)
echo $"Usage: $0 stop"
exit 1
esac
exit 0
| true |
76c9afae4f7f8f73b7af01c28f4f3107d62d6f0a | Shell | chxzqw/shell-dotfiles | /.bash_aliases | UTF-8 | 1,488 | 2.625 | 3 | [
"MIT"
] | permissive | if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias grep='grep --color=auto'
fi
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
alias git-vimrc='/usr/bin/git --git-dir=/home/sid/.vim/.git/ --work-tree=/home/sid/.vim/'
alias git-shell-dotfiles='/usr/bin/git --git-dir=/home/sid/.shell-dotfiles.git/ --work-tree=/home/sid/'
alias git-x-dotfiles='/usr/bin/git --git-dir=/home/sid/.x-dotfiles.git/ --work-tree=/home/sid/'
alias git-awesomerc='/usr/bin/git --git-dir=/home/sid/.config/awesome/.git/ --work-tree=/home/sid/.config/awesome/'
alias git-openbox='/usr/bin/git --git-dir=/home/sid/.config/openbox/.git/ --work-tree=/home/sid/.config/openbox/'
alias git-i3='/usr/bin/git --git-dir=/home/sid/.config/i3/.git/ --work-tree=/home/sid/.config/i3/'
alias git-docker='/usr/bin/git --git-dir=/home/sid/docker/.git/ --work-tree=/home/sid/docker/'
alias docker-certbot-with-ports='docker run -it --rm -p 443:443 -p 80:80 --name certbot \
-v "/etc/letsencrypt:/etc/letsencrypt" \
-v "/var/lib/letsencrypt:/var/lib/letsencrypt" \
certbot/certbot'
alias docker-certbot-no-port='docker run -it --rm --name certbot \
-v "/etc/letsencrypt:/etc/letsencrypt" \
-v "/var/lib/letsencrypt:/var/lib/letsencrypt" \
certbot/certbot'
| true |
39fe4d48f9c20514aebaacdd9c315b0c2d354a40 | Shell | umireon/dotfiles | /.bash_profile | UTF-8 | 239 | 2.6875 | 3 | [] | no_license | load_password() {
security find-generic-password -s "$1" -g 2>&1 | ruby -ryaml -e "puts YAML.load(ARGF)['password']"
}
# Homebrew
export HOMEBREW_DEVELOPER=1
export HOMEBREW_GITHUB_API_TOKEN=$(load_password HOMEBREW_GITHUB_API_TOKEN)
| true |
b5686fdd3c9d939088ec0b3dc9db674ed7fcf577 | Shell | mafobiyeye/devops | /firstscript4.sh | UTF-8 | 244 | 2.75 | 3 | [] | no_license | #!/bin/bash
# Sample first script from Lolubyte IT Computer Traninig Center
#Author:
#Date:
#Contact Details
#Script Usage
#Change history: (Who, when, why)
name=$USER
echo
echo -e "Hi $name\n" # This is prints out my logon name to the screen
| true |
97310e0f9f4913f99821b92358dd7a8f9642d0c3 | Shell | fakegit/bash | /misc/distro.sh | UTF-8 | 1,656 | 4.34375 | 4 | [] | no_license | #!/bin/bash
# Detects Linux Distribution
#
# Many Distributions have lsb_release or use /etc/lsb_release
# For those that do not we have some fallback cases
#
# The goal is to report the Distribution and Version that is being used
# An icon will be display based on the first word of the distro's name.
# Example: Scientific Linux 6.1 will be Scientific.png or Scientific.gif
OS=$(uname -s)
VER=$(uname -r)
# Look for lsb_release in path
LSB_REL=$(which lsb_release 2>/dev/null)
if [[ $OS == Linux ]]; then
# If lsb_release is executable set the DIST and VER
if [[ -x $LSB_REL ]]; then
DIST=$(lsb_release -is)
VER=$(lsb_release -rs)
elif [[ -f /etc/lsb-release ]]; then
DIST=$(grep 'DISTRIB_ID' /etc/lsb-release | cut -d"=" -f2)
VER=$(grep 'DISTRIB_RELEASE' /etc/lsb-release | cut -d"=" -f2)
elif [[ -f /etc/redhat-release ]]; then
DIST=$(sed 's/ release.*//' /etc/redhat-release)
VER=$(sed 's/.*release\ //' /etc/redhat-release | sed 's/\ .*//')
elif [[ -f /etc/SuSE-release ]]; then
DIST=$(head -1 suse | awk '{print $1}')
VER=$(awk '/VERSION/ {print $3}' /etc/SuSE-release)
elif [[ -f /etc/debian_version ]]; then
DIST="Debian"
read VER < /etc/debian_version
elif [[ -f /etc/arch-release ]]; then
DIST="Arch Linux"
VER=""
else
DIST="${OS}"
fi
# Exceptions
# RHEL uses multiple strings RedHatEnterpriseWS (4.x), RedHatEnterpriseServer (5.x, 6.x)
if [[ $DIST =~ RedHatEnterprise ]]; then
DIST="RedHatEnterprise"
fi
OSSTR="${DIST} ${VER}"
else
OSSTR="$OS $VER"
fi
echo ${OSSTR}
| true |
e1ffa338feb693f747d45e0603f01d1b86db07b9 | Shell | tailscale/tailscale | /release/deb/debian.postinst.sh | UTF-8 | 583 | 2.703125 | 3 | [
"BSD-3-Clause"
] | permissive | if [ "$1" = "configure" ] || [ "$1" = "abort-upgrade" ] || [ "$1" = "abort-deconfigure" ] || [ "$1" = "abort-remove" ] ; then
deb-systemd-helper unmask 'tailscaled.service' >/dev/null || true
if deb-systemd-helper --quiet was-enabled 'tailscaled.service'; then
deb-systemd-helper enable 'tailscaled.service' >/dev/null || true
else
deb-systemd-helper update-state 'tailscaled.service' >/dev/null || true
fi
if [ -d /run/systemd/system ]; then
systemctl --system daemon-reload >/dev/null || true
deb-systemd-invoke restart 'tailscaled.service' >/dev/null || true
fi
fi
| true |
2411e97a64c635c3486d920fe5683971a72f748b | Shell | austincunningham/bash-addresbook | /FindCust | UTF-8 | 4,061 | 3.578125 | 4 | [] | no_license | #!/bin/bash
# Author : Austin Cunningham student id:20073379
#find an existing customers from the file CustomerDetails
#Enter a customer detail to find a contact
# ANSI colour an formats moved to more readable variables names, can change echo colours
# the NONE is to turn off the format, echo -e enables the colour, may not use all of these
NONE='\033[00m'
RED='\033[01;31m'
GREEN='\033[01;32m'
YELLOW='\033[01;33m'
BLUE='\033[01;34m'
PURPLE='\033[01;35m'
CYAN='\033[01;36m'
WHITE='\033[01;37m'
BOLD='\033[1m'
UNDERLINE='\033[4m'
# check to see if CustomerDetails exists and if not then print warning and recommended action
clear
if grep -q "Address" CustomerDetails
then
echo -e ${YELLOW}CustomerDetails already exits ${NONE}
sleep 1
clear
echo
echo -e ${BLUE}__________________________________________________________________${NONE}
echo -e "${BLUE} _____ _ _ ____ _ ${NONE}"
echo -e "${BLUE}| ___(_)_ __ __| |/ ___| _ ___| |_ ___ _ __ ___ ___ _ __ ${NONE}"
echo -e "${BLUE}| |_ | | '_ \ / _' | | | | | / __| __/ _ \| '_ ' _ \ / _ \ '__|${NONE}"
echo -e "${BLUE}| _| | | | | | (_| | |__| |_| \__ \ || (_) | | | | | | __/ | ${NONE}"
echo -e "${BLUE}|_| |_|_| |_|\__,_|\____\__,_|___/\__\___/|_| |_| |_|\___|_| ${NONE}"
echo -e ${BLUE}__________________________________________________________________${NONE}
echo -e ${BLUE}__________________________________________________________________${NONE}
echo
echo -e ${BLUE}"What do you want to search for:"${NONE}
read search
# -z checks variable Name to see if it is blank, if it is blank a message is printed and the script restarted
while [ -z "$search" ]
do
echo
echo -e ${BLUE}"Invalid input blank spaces are not accepted"${NONE}
echo
echo -e ${BLUE}"What do you want to search for:"${NONE}
read search
done
echo
# grep does a count of the successful matches to the string search in CustomerDetails
# Which is then applied to a varalable count, we then print out the result
count=`grep -c $search CustomerDetails`
echo -e ${BLUE}"There are "$count" contacts found matching your search"${NONE}
echo
# grep searches for a string in the varable search in CustomerDetails file the -i ignors the case
echo -e ${BLUE}================================================================================${NONE}
echo
grep -i $search CustomerDetails
echo
echo -e ${BLUE}================================================================================${NONE}
echo
if [[ $count -gt 1 ]]
then
echo -e ${BLUE}"Enter exact Email Address to narrow the search"${NONE}
read Name
# use -z again to check for blank input
while [ -z "$Name" ]
do
echo
echo -e ${BLUE}"Invalid input blank spaces are not accepted"${NONE}
echo
echo -e ${BLUE}Enter the Name or Alias of the customer delete :${NONE}
read Name
done
echo
# new grep search on new input
echo -e ${BLUE}"Refined search results"${NONE}
echo
count=`grep -c $Name CustomerDetails`
echo -e ${BLUE}"There are "$count" contacts found matching your refined search"${NONE}
echo -e ${BLUE}================================================================================${NONE}
echo
grep -i $Name CustomerDetails
echo
echo -e ${BLUE}================================================================================${NONE}
fi
else
clear
echo
echo -e ${YELLOW}"Find Customer script can't find any customers as CustomerDetails file is empty"${NONE}
echo -e ${YELLOW}"Go to Add Customer to create some contacts"${NONE}
fi
# return to the Menu, anything entered other that y-Y including blank spaces will go to the else option and end the script
# entering y-Y and you meet the in condition and call the script for Menu
echo
echo -e "${RED}Enter 'y' to return to the Menu ${NONE}"
echo -e "${RED}Enter any other key to exit ${NONE}"
read yesno
if [[ $yesno = "y" || $yesno = "Y" ]]
then
./Menu
else
echo Goodbye
exit 113
fi | true |
a7b156fce75c8f2d9f3fe9ee5a13fa4d67c5c4c0 | Shell | qingyingliu/youdao_linux | /query.sh | UTF-8 | 605 | 3.53125 | 4 | [] | no_license | #!/bin/bash
if [ $# -eq 0 ] || test "$1" == "--help"
then
echo "<<<<<<<<< youdao_linux <<<<<<<<<<<<<<<"
echo ""
echo "Usage: query [option] [word or phrase]"
echo "option are as follows: "
echo "--help: show the manual of youdao_linux."
echo "--analysis: show the frequency words that you have queried."
echo ""
echo "<<<<<<<<< youdao_linux <<<<<<<<<<<<<<<"
exit 0
fi
if [ "$1" == "--analysis" ]
then
cd ~/youdao_linux/
python3 ~/youdao_linux/analysis.py
cd - > /dev/null
exit 0
fi
cd ~/youdao_linux
# 查询
python3 query.py "$*" #2> /dev/null
cd - > /dev/null
| true |
1c60825efe3f9ed42c3cca526be26ce04393c0f0 | Shell | yuvalturg/kmod-builder | /kmod-builder.sh | UTF-8 | 5,127 | 3.625 | 4 | [] | no_license | #!/bin/bash -e
build_kernel_module() {
local tmpdir="$1"
local kver="$2"
local kconf="$3"
builddir=$(ls -d1 ${tmpdir}/build/kernel*/linux*)
# Copy stock config and symvers to builddir
find ${tmpdir}/dev -name Module.symvers -exec cp {} ${builddir} \;
find ${tmpdir}/dev -name .config -exec cp {} ${builddir} \;
# Find the kmod path and build
pushd ${builddir} > /dev/null
makefile=$(find -name Makefile -exec grep -H "${kconf}.*\.o" {} \; | cut -d: -f1)
[[ -z ${makefile} ]] && echo "Missing Makefile for ${kconf}, exiting" && exit 1
kmodpath=$(dirname ${makefile})
kmodpath=${kmodpath/.\//}
./scripts/config --module ${kconf}
sed -i "s/^EXTRAVERSION.*/EXTRAVERSION=-${kver#*-}/" Makefile
make olddefconfig
make modules_prepare
make M=${kmodpath}
popd > /dev/null
}
build_from_source_rpms() {
local tmpdir="$1"
local kver="$2"
local kconf="$3"
for rpm in src dev; do
rpmdev-extract -qfC ${tmpdir}/${rpm} ${tmpdir}/kernel-${rpm}.rpm
done
kdir=$(basename ${tmpdir}/src/kernel*)
rpmbuild -D "_sourcedir ${tmpdir}/src/${kdir}" \
-D "_builddir ${tmpdir}/build" \
-bp ${tmpdir}/src/${kdir}/kernel.spec --nodeps
build_kernel_module "${tmpdir}" "${kver}" "${kconf}"
}
download_el_package() {
local elver="$1"
case ${elver} in
7)
dnf --repofrompath=r1,http://mirror.centos.org/centos/${elver}/os/x86_64/ \
--repofrompath=r2,http://mirror.centos.org/centos/${elver}/updates/x86_64/ \
--repoid=r1 --repoid=r2 \
download kernel-devel-${kver}
;;
8*)
dnf --repofrompath=r1,http://mirror.centos.org/centos/8/BaseOS/x86_64/os \
--repoid=r1 \
download kernel-devel-${kver}
;;
esac
}
build_el_module() {
local tmpdir="$1"
local kver="$2"
local kconf="$3"
tag=$(git ls-remote https://git.centos.org/rpms/kernel.git | \
grep ${kver%.*}$ | awk '{print $2}')
IFS=/ read -ra arr <<< "$tag"
printf -v branch "/%s" "${arr[@]:2}"
branch=${branch:1}
pushd ${tmpdir} > /dev/null
git clone --depth 1 https://git.centos.org/git/centos-git-common.git
git clone --branch ${branch} --depth 1 https://git.centos.org/rpms/kernel.git
download_el_package ${arr[3]:1} kernel-devel-${kver}
rpmdev-extract -qfC ${tmpdir}/dev kernel-devel-${kver}.rpm
pushd kernel > /dev/null
../centos-git-common/get_sources.sh -b ${arr[3]}
rpmbuild -D "_topdir $(pwd)" -bp SPECS/kernel.spec --nodeps
popd > /dev/null
mv kernel/BUILD/kernel* ${tmpdir}/build
popd > /dev/null
build_kernel_module "${tmpdir}" "${kver}" "${kconf}"
}
build_koji_module() {
local tmpdir="$1"
local kver="$2"
local kconf="$3"
local profile="$4"
pushd ${tmpdir} > /dev/null
koji -p ${profile} download-build --noprogress --rpm kernel-devel-${kver}
koji -p ${profile} download-build --noprogress --rpm kernel-${kver%.*}.src
mv kernel-devel-${kver}.rpm kernel-dev.rpm
mv kernel-${kver%.*}.src.rpm kernel-src.rpm
popd > /dev/null
build_from_source_rpms "${tmpdir}" "${kver}" "${kconf}"
}
build_fc_module() {
local tmpdir="$1"
local kver="$2"
local kconf="$3"
build_koji_module "${tmpdir}" "${kver}" "${kconf}" "fedora"
}
build_vanilla_module() {
local tmpdir="$1"
local kver="$2"
local kconf="$3"
local dotconfig="$4"
pushd ${tmpdir} > /dev/null
tarball="linux-${kver%%-*}.tar.xz"
url="https://cdn.kernel.org/pub/linux/kernel/v${kver:0:1}.x/${tarball}"
echo "Downloading ${url}"
curl -LO# "${url}"
mkdir -p build/kernel
tar -C build/kernel -xf ${tarball}
cp ${dotconfig} dev/.config
popd > /dev/null
build_kernel_module "${tmpdir}" "${kver}" "${kconf}"
}
main() {
local kver
local kconf
local outdir
local koji_profile
local dotconfig
while getopts "d:v:c:j:o:" OPTION
do
case ${OPTION} in
d)
dotconfig=${OPTARG}
;;
v)
kver=${OPTARG}
;;
c)
kconf=${OPTARG}
;;
j)
koji_profile=${OPTARG}
;;
o)
outdir=${OPTARG}
;;
esac
done
kver=${kver:-$(uname -r)} # default kernel version
echo "Building ${kconf} for kernel ${kver}"
tmpdir=$(mktemp -d)
mkdir -p ${tmpdir}/{src,dev,build} ${outdir}
if [[ -n ${dotconfig} ]]; then
build_vanilla_module "${tmpdir}" "${kver}" "${kconf}" "${dotconfig}"
else
if [[ -n ${koji_profile} ]]; then
build_koji_module "${tmpdir}" "${kver}" "${kconf}" "${koji_profile}"
else
dist=$(awk -F. '{print $(NF-1)}' <<< ${kver})
build_${dist%%[0-9]*}_module "${tmpdir}" "${kver}" "${kconf}"
fi
fi
find ${tmpdir}/build -name "*.ko" -exec cp -v {} ${outdir} \;
rm -rf ${tmpdir}
}
main "$@"
| true |
f57fa0e1bc2550dea8f5056f5bad5d434d86727a | Shell | sleepred/oss-httpd-build | /experimental/validate-depends.sh | UTF-8 | 1,004 | 3.1875 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/bash
#
# Pivotal OSS httpd webserver build schema
# Copyright (c) 2017 Pivotal Software, Inc.
# Licensed under the Apache Software License 2.0
#
# validate-depends.sh : Review required -dev[el] dependencies
if test -f /etc/redhat-release; then
echo RedHat development package dependencies:
if which dnf >/dev/null 2>&1; then
dnf list expat-devel libxml2-devel lua-devel pcre-devel zlib-devel
else
yum list expat-devel libxml2-devel lua-devel pcre-devel zlib-devel
fi
elif test -f /etc/debian_version; then
echo Ubuntu development package dependencies:
dpkg-query --list libexpat1-dev libxml2-dev zlib1g-dev liblua5.2-0 libpcre3-dev
echo '[Note pcre2(10.x) and lua(5.3) are not presently supported]'
else
echo Unrecognized architecture.
echo Verify the following development dependency packages are installed:
echo 'Expat(2.x) libxml2(2.9) lua(5.2+) pcre(8.x) zlib(1.2)'
echo '[Note pcre2(10.x) is not presently supported]'
fi
| true |
f7922a72cbc220b0e2aaeb094e29f46a4c6a93fa | Shell | espiralpy/pythontesting | /TMPtest_hook_sendlog.sh | UTF-8 | 1,805 | 3.65625 | 4 | [] | no_license | #!/bin/bash
#Read info.txt and extract line build number
BUILD=''
KEY=''
function getBuild(){
while read line; do
BUILD=$(echo "$line" | grep 'VM Name*')
echo $BUILD
done < info.txt
}
function getKey(){
while read line; do
KEY=$(echo "$line" | grep 'UUID: ')
echo $KEY
done < info.txt
}
#Extract last updated build number from file swupd_output.txt
function getBuildTo(){
FILE=`find /root/swupd_output.txt -type f | xargs grep "Update complete. System updated from version"`
NUMBERS=$(echo "$FILE" | grep -o '[0-9]*')
UPDATE_VERSION=$(echo ${NUMBERS:${#NUMBERS}-4:${#NUMBERS}})
if [[ $UPDATE_VERSION == '' ]]
then
FILE=`find /root/swupd_output.txt -type f | xargs grep "Update complete. System already up-to-date at version"`
NUMBERS=$(echo "$FILE" | grep -o '[0-9]*')
UPDATE_VERSION=$(echo ${NUMBERS:${#NUMBERS}-4:${#NUMBERS}})
echo $UPDATE_VERSION
else
echo $UPDATE_VERSION
fi
}
function getNameFile(){
BUILD=$(getBuild)
BUILD_NUMBER=$(echo "$BUILD" | grep -o '[0-9]*')
#echo $BUILD_NUMBER
BUILDTO=$(getBuildTo)
KEY=$(getKey)
cleaned=${KEY//[UUID: ]}
if [[ $BUILDTO == '' ]]
then
LATEST=$(curl http://clearlinux-sandbox.jf.intel.com/update/version/formatstaging/latest)
echo $BUILD_NUMBER $LATEST $cleaned
else
echo $BUILD_NUMBER $BUILDTO $cleaned
fi
}
function sendFile(){
##GENERATE KEY SSH KEYGEN
echo -e 'y' | ssh-keygen -f ~/.ssh/id_rsa -t rsa -N ''
expect run_password.expect
getNameFile
FILE=$(getNameFile)
cp taplogs/swupd/quick-swupd.t /root/
sleep 2
mv /root/quick-swupd.t "/root/$FILE.log"
##PASS RESULT SSH
scp "/root/$FILE.log" swupd@10.219.106.192:/var/taas/results/
}
sendFile
| true |
90a359923eca55df71a8dea66f48ff1c6544666d | Shell | fayson/cdhproject | /ScriptFiles/gendata.sh | UTF-8 | 457 | 2.890625 | 3 | [] | no_license | function rand(){
min=$1
max=$(($2-$min+1))
num=$(($RANDOM+1000000000))
echo $(($num%$max+$min))
}
let i=1
while [ $i -le 3 ];
do
let n=1
while [ $n -le $1 ];
do
let month=$n%12+1
if [ $month -eq 2 ];then
let day=$n%28+1
else
let day=$n%30+1
fi
let hour=$n%24
rnd=$(rand 10000 10100)
echo "$i$n,$i$n,$i$n,2017-$month-$day $hour:20:00,${rnd},$n,$n,$n" >> data$i.txt
let n=n+1
done
let i=i+1
done
| true |
fda87bb6f183837df1cb42f75bfeb8d59ef921f5 | Shell | splichte/mochi | /script/prepare_image.sh | UTF-8 | 839 | 2.9375 | 3 | [] | no_license | # booting code gets up to 16K.
cat boot/boot_sect.bin boot/switch_to_pm.bin > os.img
b=$(wc -c os.img | awk '{ print $1 }')
# pad to 4K + 512 bytes (4608) so bootloader starts on 0x2000,
# after we load starting on sector 2 to 0x1000 in boot_sect.asm
dd if=/dev/zero bs=1 count=$((4608-$b)) >> os.img
cat boot/bootloader.bin >> os.img
b=$(wc -c os.img | awk '{ print $1 }')
# pad to 16K (so bootloader should not use more than about 12K)
dd if=/dev/zero bs=1 count=$((16384-$b)) >> os.img
# OS code starts at byte 16384
# and we guarantee it can have 8Mb.
cat kernel.bin >> os.img
rm kernel.bin
# pad to at least 8M. (4096 * 2048)
# if many blocks are needed, bs=1 is really slow.
# hence we don't shoot for exactly 1M, and just
# make sure we have enough.
dd if=/dev/zero bs=4096 count=2048 >> os.img
cat drive.img >> os.img
| true |
fa19134630a61d76bf01e59d99905f74f670e45e | Shell | bmc/docker | /awscli/build.sh | UTF-8 | 358 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env bash
#
# Convenient script to build the image.
#
# Usage: ./build.sh [tag]
#
# where <tag> is "latest", "snapshot", whatever. Defaults to "latest".
IMAGE=bclapper/awscli
case "$#" in
0)
tag=latest
;;
1)
tag=$1
;;
*)
echo "Usage: $0 [tag]" >&2
exit 1
;;
esac
set -x
docker build --no-cache -t $IMAGE:$tag .
| true |
2fbba66c47d3e16b017a786db3eb19894ca4ef50 | Shell | CStichbury/pixelscan | /docs/build.sh | UTF-8 | 3,549 | 3.78125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# ----------------------------------------------------------------------
# FILE: build.sh
# DESCRIPTION: Script to build pixelscan documentation web pages.
# REQUIREMENTS: python 2.7, sphinx 1.3.5
# ----------------------------------------------------------------------
CURDIR=$(readlink -f $(dirname ${BASH_SOURCE[0]}))
PARENTDIR=$(readlink -f $(dirname $CURDIR))
OUTPUTDIR=$CURDIR/api
PIXELSCANDIR=$PARENTDIR
ROOTDIR=$CURDIR
# ==================================================
# Display usage
# --------------------------------------------------
__usage()
{
echo "Usage: $0 [-h] [-o <directory>] [-p <directory>] [-r <directory>]"
echo " -h = Display this usage"
echo " -o = output directory"
echo " -p = pixelscan directory"
echo " -r = root directory"
}
# ==================================================
# Parse arguments
# --------------------------------------------------
while getopts :ho:p:r: flag ; do
case $flag in
h) __usage; exit 0; ;;
o) OUTPUTDIR=$OPTARG; ;;
p) PIXELSCANDIR=$OPTARG; ;;
r) ROOTDIR=$OPTARG; ;;
*) echo "Invalid option: -$OPTARG" >&2; exit 1; ;;
esac
done
shift $((OPTIND-1))
SOURCEDIR=$ROOTDIR/source/
# ==================================================
# Generate configuration files in source directory
# --------------------------------------------------
sphinx-quickstart \
--author="Daniel Pulido" \
--dot=_ \
--ext-autodoc \
--ext-ifconfig \
--ext-intersphinx \
--language=en \
--master=index \
--no-batchfile \
--no-makefile \
--no-use-make-mode \
--project=pixelscan \
--quiet \
--release=0.3.2 \
--sep \
--suffix=.rst \
-v 0.3.2 \
$ROOTDIR
# ==================================================
# Build API documents
# --------------------------------------------------
PYTHONPATH=$PIXELSCANDIR sphinx-apidoc -e -f -o $SOURCEDIR $PIXELSCANDIR/pixelscan
# ==================================================
# Update conf.py with bootstrap theme
# --------------------------------------------------
echo "" >> $SOURCEDIR/conf.py
echo "import sphinx_bootstrap_theme" >> $SOURCEDIR/conf.py
echo "autoclass_content = 'both'" >> $SOURCEDIR/conf.py
echo "html_theme = 'bootstrap'" >> $SOURCEDIR/conf.py
echo "html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()" >> $SOURCEDIR/conf.py
echo "html_show_sourcelink = False" >> $SOURCEDIR/conf.py
# ==================================================
# Update index.rst with content
# --------------------------------------------------
cp $CURDIR/intro.txt $SOURCEDIR
sed -i "s/Welcome to pixelscan's documentation!/**pixelscan**/g" $SOURCEDIR/index.rst
sed -i "/Content/i\\
.. include:: intro.txt\n" $SOURCEDIR/index.rst
sed -i "s/Contents:/Contents\n==================/g" $SOURCEDIR/index.rst
sed -i "s/:maxdepth: 2/:maxdepth: 1/g" $SOURCEDIR/index.rst
sed -i "/:maxdepth: 1/a\\
\\
modules.rst\\
pixelscan.pixelscan.rst" $SOURCEDIR/index.rst
# ==================================================
# Build html documents
# --------------------------------------------------
PYTHONPATH=$PIXELSCANDIR sphinx-build $SOURCEDIR $OUTPUTDIR
# ==================================================
# Add file to disable github jekyll processing
# --------------------------------------------------
touch $OUTPUTDIR/.nojekyll
# ==================================================
# Clean up build directories
# --------------------------------------------------
rm -rf $SOURCEDIR
rm -rf $ROOTDIR/build/ | true |
44cd04d134aeefcc8200cecf21c1e2b9dbe1aa54 | Shell | zones-managed-services/CrowdstikeEAsforJamfPro | /CrowdstrikeFalconv6LastEstablishedEA.sh | UTF-8 | 1,390 | 3.484375 | 3 | [] | no_license | #!/bin/sh
# falconctl stats, present in will return a lot of diagnostic information.
# stats is only available in v4.18.8013 and higher
# Get when the cloud state was last established.
# Interval should be every 24 hours
# Location for Crowdstrike Falcon Sensor v3, v4 and v5 installs is /Library/CS/
# Location for v6.10+ installs is /Applications/Falcon.app and /Library/Application Support/CrowdStrike/Falcon
# Last Edit: 20201102 - jkb
pkgCount=$(pkgutil --pkgs | grep crowdstrike | wc -l)
if [ $pkgCount -lt 1 ];
# Check to see if packages are even installed
then
lastCom="Crowdstrike Not Installed"
elif [ -e /Applications/Falcon.app/Contents/MacOS/Falcon ];
# New Falcon install location
then
lastCom=$(/Applications/Falcon.app/Contents/Resources/falconctl stats | awk '/Cloud Activity | Last Established At/ {print $4,$5,$6,$8,$9}')
elif [ -e /Library/CS/ ];
# Old Falcon Location
then
verCheck=$(/Library/CS/falconctl stats | awk '/version/ {print $2}' | sed 's/\.//g' | cut -c 1-3)
# versions older than 15.36 will report via sysctl
if [ $verCheck -ge 418 ];
then
lastCom=$(/Library/CS/falconctl stats | awk '/Cloud Activity | Last Established At/ {print $4,$5,$6,$8,$9}')
else
lastCom="Crowdstrike version is too old to support query"
fi
else
lastCom="Agent Likely Installed But Not Running"
fi
echo "<result>$lastCom</result>" | true |
4e2864317d95781a2ace9518c67b1376b95a16e4 | Shell | Uterok/lms-docker | /export_env.sh | UTF-8 | 576 | 3.265625 | 3 | [] | no_license | #!/bin/bash
# set host ip to environment variable
# export DOCKERHOST=$(ifconfig | grep -E "([0-9]{1,3}\.){3}[0-9]{1,3}" | grep -v 127.0.0.1 | awk '{ print $2 }' | cut -f2 -d: | head -n1)
export DOCKERHOST=$(ifconfig | grep -E "([0-9]{1,3}\.){3}[0-9]{1,3}" | grep -v 127.0.0.1 | awk '{ print $2 }' | cut -f2 -d: | head -n1)
echo "DOCKERHOST - $DOCKERHOST"
# check cpu architecture and choose correspond image
# check architecture name on regexp
if [[ $(arch) =~ "arm" ]];
then
export PGSQL_DOCKER_IMAGE="tobi312/rpi-postgresql"
else
export PGSQL_DOCKER_IMAGE="postgres"
fi | true |
b6cb1c634c0ad783271a25925abc07756eaf8aea | Shell | riddl0rd/SyncTimeWithNTP | /SyncTimeWithNTP.sh | UTF-8 | 900 | 3.796875 | 4 | [
"MIT"
] | permissive | #!/bin/sh
####################################################################################################
#
# More information: http://macmule.com/2013/11/30/how-to-sync-time-with-ntp-via-script/
#
# GitRepo: https://github.com/macmule/SyncTimeWithNTP/
#
# License: http://macmule.com/license/
#
####################################################################################################
# HARDCODED VALUES ARE SET HERE
# FQDN of NTP
NTPServer=""
# CHECK TO SEE IF A VALUE WAS PASSED IN PARAMETER 4 AND, IF SO, ASSIGN TO "NTPServer"
if [ "$4" != "" ] && [ "$NTPServer" == "" ];then
NTPServer=$4
fi
##
# Error if variable NTPServer is empty
##
if [ "$NTPServer" == "" ]; then
echo "Error: No value was specified for the NTPServer variable..."
exit 1
fi
###
# Sync the time with the NTP specified as $NTPServer
###
echo "NTP Server: "$NTPServer""
sudo ntpdate -u $NTPServer
| true |
8e183d8c20a96dbeda6444840393223633a2efd6 | Shell | sdruskat/swh-save-action | /entrypoint.sh | UTF-8 | 436 | 2.953125 | 3 | [
"MIT"
] | permissive | #!/bin/sh -l
echo "Saving https://github.com/$GITHUB_REPOSITORY to Software Heritage"
result=$(curl -X POST https://archive.softwareheritage.org/api/1/origin/save/git/url/https://github.com/${GITHUB_REPOSITORY}/)
# In this version, the result var isn't used further
echo "Save request sent to the Software Heritage API."
echo "To check for the progress of the save process, go to https://archive.softwareheritage.org/save/#requests ."
| true |
90535e06dc4c8040f66ea1de062087db36119f09 | Shell | mj1618/deployment-examples-gpu | /bin/tf-pg-create | UTF-8 | 224 | 2.78125 | 3 | [] | no_license | #!/bin/bash
if [ "dev" != "$1" ] && [ "prod" != "$1" ]; then
echo "Please specify dev or prod as first argument"
exit 1
fi
export TF_WORKSPACE=$1
cd infra/pg
terraform init
terraform apply -auto-approve -refresh=true
| true |
9db4bde7d0db424af2325e34d93d69bc080e00ae | Shell | jiama843/cs246_review | /MidtermReview/Shell/conditions.sh | UTF-8 | 393 | 4 | 4 | [] | no_license | #!/bin/bash
file=${1}
if [ -e $file ]; then
echo "file exists"
fi
if [ -f $file ]; then
echo "file exists and is regular"
fi
if [ -r $file ]; then
echo "file exists and is readable"
fi
if [ -w $file ]; then
echo "file exists and is writable"
fi
if [ -x $file ]; then
echo "file exists and is executable"
fi
if [ -e $file -o "1" == "2" ]; then
echo "file exists or 1 = 2"
fi
| true |
2daac9290424307ecd0cae451a198362d383d11f | Shell | lynix/journalcheck | /journalcheck.sh | UTF-8 | 1,742 | 3.875 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
# journalcheck - Simple 'logcheck' replacement for journald
# (C) Alexander Koch
# This software is released under the terms of the MIT License, see LICENSE.
# to have filters work in foreign languages (french)
export LANG=POSIX
FILTERS_GLOBAL=${JC_FILTERS_GLOBAL:-"/usr/lib/journalcheck"}
FILTERS_LOCAL=${JC_FILTERS_USER:-~/".journalcheck.d"}
CURSOR_FILE=${JC_CURSOR_FILE:-~/".journalcheck.cursor"}
NUM_THREADS=${JC_NUM_THREADS:-$(grep -c '^processor' "/proc/cpuinfo")}
LOGLEVEL=${JC_LOGLEVEL:-"0..5"}
# merge filters to single file
FILTER_FILE="$(mktemp)"
cat "$FILTERS_GLOBAL"/*.ignore > "$FILTER_FILE"
if [ -d "$FILTERS_LOCAL" ]; then
cat "$FILTERS_LOCAL"/*.ignore >> "$FILTER_FILE" 2>/dev/null
fi
# fetch journal entries since last run (or system bootup)
LOG="$(mktemp)"
ARGS="--no-pager --show-cursor -l -p $LOGLEVEL"
if [ -r "$CURSOR_FILE" ]; then
ARGS+=" --after-cursor=$(cat "$CURSOR_FILE")"
else
ARGS+=" -b"
fi
journalctl $ARGS &> "$LOG"
if [ $? -ne 0 ]; then
echo "Error: failed to dump system journal" >&2
exit 1
fi
# save cursor for next iteration
CURSOR="$(tail -n 1 "$LOG")"
if [[ $CURSOR =~ ^--\ cursor:\ ]]; then
echo "${CURSOR:11}" > "$CURSOR_FILE"
else
echo "Error: unable to save journal cursor" >&2
fi
# split journal into NUM_THREADS parts, spawn worker for each part
split -a 3 -n l/$NUM_THREADS -d "$LOG" "${LOG}_"
rm "$LOG"
for I in $(seq 0 $(($NUM_THREADS - 1))); do
F="${LOG}_$(printf "%03d" "$I")"
{ grep -Evf "$FILTER_FILE" "$F" > "${F}_"; mv "${F}_" "$F"; } &
done
# wait for all worker threads to finish
wait
rm "$FILTER_FILE"
# re-assemble filtered output to stdout, remove parts
for I in $(seq 0 $(($NUM_THREADS - 1))); do
cat "${LOG}_$(printf "%03d" "$I")"
rm "$_"
done
exit 0
| true |
6a0e9f320d43dd8ff5f60636488a059228601792 | Shell | alexshangin/otus | /lesson04/parser.sh | UTF-8 | 3,674 | 3.8125 | 4 | [] | no_license | #!/usr/bin/env bash
# переменные
## лог последнего запуска скрипта
ll=last.log
## общий лог запуска, ведется добавлением последнего лога в этот
al=all.log
## путь к логу nginx
log=access.log
sorter(){
sort \
| uniq -c \
| sort -nr
}
top10(){
head -10
}
top_10_ip(){
awk '{print $1}' $log | sorter | top10
}
top_request(){
cut -d '"' -f3 $log | cut -d ' ' -f2 | sorter | top10
}
url_err(){
awk '{print $9}' $log | awk -F '.-' '$1 <= 599 && $1 >= 400' | sorter
}
top_10_domain(){
awk '{print $13}' $log | grep http | awk 'BEGIN { FS = "/" } ; { print $3 }' | awk 'BEGIN { FS = "\"" } ; { print $1 }' | sorter | top10
}
date_end_str(){
awk '{print $4}' $log | tail -1 | sed 's/\[//g'
}
get_date_end(){
#cat $ll | grep timeend | sed 's/timeend//g'
grep timeend $ll | sed 's/timeend//g'
}
start_str(){
awk '{print $4}' $log | grep -nr get_date_end | cut -d : -f 2
}
end_str(){
wc -l $log | awk '{print $1}'
}
range(){
sed -n '$start_str,$end_str_file' $log
}
## основная функция
main(){
### получим номер последней строки
end_str_file=$(end_str)
### извлекаем из нее дату и добавим в память для записи в лог как time_end
time_end=$(date_end_str)
### обнуляем лог скрипта
:> $ll
### стартуем обработку
echo "top 10 ip adresses" >> $ll
echo range | top_10_ip >> $ll
echo "top 10 requests" >> $ll
echo range | top_request >> $ll
echo "top 10 domains" >> $ll
echo range | top_10_domain >> $ll
echo "all errors" >> $ll
echo range | url_err >> $ll
### добавляем последнюю дату как первую для запуска в лог
echo "timestart$time_start" >> $ll
### дописываем дату окончания
echo "timeend $time_end" >> $ll
### дописываем контрольный END
echo "END" >> $ll
}
### проверяем на наличие старого файла лога работы скрипта, если есть
if [ -e $ll ]
then
### если последняя стока лога скрипта END
if [[ $( tail -1 $ll) == END ]]
then
### скрипт завершился корректно
echo "it's ok!"
### дописываем лог в общий
cat $ll >> $al
### проверяем строкe timeend, если есть
if [[ $( grep timeend $ll | awk '{print $1}' ) == timeend ]]
then
### берем дату последнего запуска и добавим в память для записи в лог как time_start
time_start=$(get_date_end)
### получаем номер строки для начала обработки
start_str_file=$(start_str)
### работа основной функции
main
else
### присваиваем начальный номер строки 1 с которого ведется обработка лога nginx
start_str_file=1
### работа основной функции
main
fi
else
### сообщение - скрипт не завершился
echo "last run script isn't end!"
fi
else
### сообщение
echo "create new file log"
### создаем файл лога скрипта
touch $ll
### присваиваем начальный номер строки 1 с которого ведется обработка лога nginx
start_str_file=1
### работа основной функции
main
fi
## отправим письмо
cat $ll | mail -s "Last hour log" master@otus.ru | true |
27b51f24f0320d1d6f5b0ba51d3691ec7afd6243 | Shell | neg-serg/dotfiles_old | /.zsh/99-misc.zsh | UTF-8 | 8,585 | 3.421875 | 3 | [] | no_license | function arc-fix(){
find -type f -exec sed -i 's/5294e2/00465F/g' {} \;
find -type f -exec sed -i 's/5294E2/00465F/g' {} \;
}
function chrome_history(){
export CONF_COLS=$[ COLUMNS/2 ]
export CONF_SEP='{::}'
cp -f ${XDG_CONFIG_HOME}/chromium/Default/History /tmp/h
sqlite3 -separator $CONF_SEP /tmp/h 'select title, url from urls order by last_visit_time desc' \
| ruby -ne '
cols = ENV["CONF_COLS"].to_i
title, url = $_.split(ENV["CONF_SEP"])
puts "\x1b[33m#{title.ljust(cols)}\x1b[0m #{url}"' \
| fzf --ansi --multi --no-hscroll --tiebreak=index \
| grep --color=never -o 'https\?://.*'
unset CONF_COLS CONF_SEP
}
function firefox_history(){
(($# == 1)) || { echo "Usage: ${FUNCNAME} path/to/places.sqlite"; exit 1; }
# http://unfocusedbrain.com/site/2010/03/09/dumping-firefoxs-places-sqlite/
sqlite3 "$@" <<-EOF
SELECT datetime(moz_historyvisits.visit_date/1000000, 'unixepoch'), moz_places.url, moz_places.title, moz_places.visit_count
FROM moz_places, moz_historyvisits
WHERE moz_places.id = moz_historyvisits.place_id
ORDER BY moz_historyvisits.visit_date DESC;
EOF
}
rodir() { sudo mount --bind "$@" && sudo mount -o remount,ro,bind "$@" }
# unrpm: unpack an rpm into a build dir
#
# Copyright 2007 Aron Griffis <agriffis@n01se.net>
# Released under the GNU General Public License v2
unrpm() {
declare cmd=${0##*/}
declare dum version
read dum version dum <<<'$Revision: 4036 $'
case $cmd in
unrpm_|rpmbuild) true ;;
*) die "unrpm: I don't know how to be $cmd" ;;
esac
$cmd "$@"
exit $?
}
unrpm_() {
declare args usage
read -d '' usage <<EOT
usage: unrpm pkg-1.0.src.rpm...
-f --force Unpack into an existing dir
-l --list List contents rather than unpack
-p --prep Prep sources after unpack
-v --verbose Be louder
--help Show this help message
--version Show version information
EOT
# Use /usr/bin/getopt which supports GNU-style long options
declare opt_force=false
declare opt_list=false
declare opt_prep=false
declare opt_verbose=false
args=$(getopt -n "$0" \
-o flpv --long force,help,list,prep,verbose,version -- "$@") || exit
eval set -- "$args"
while true; do
case $1 in
-f|--force) opt_force=true ; shift ;;
-l|--list) opt_list=true ; shift ;;
-p|--prep) opt_prep=true ; shift ;;
-v|--verbose) opt_verbose=true ; shift ;;
--help) echo_unrpm_ "$usage"; exit 0 ;;
--version) echo_unrpm_ "$cmd $version"; exit 0 ;;
--) shift; break ;;
*) die "failed to process cmdline args" ;;
esac
done
if [[ -z $1 ]]; then
die "missing argument, try --help"
elif [[ ! -r $1 ]]; then
die "can't read: $1"
fi
set -e
declare dirs rpm repo v
$opt_verbose && v=v ||:
for rpm in "$@"; do
repo=$(rpm -qp --qf '%{N}-%{V}-%{R}' "$rpm")
dirs=( "$repo/"{BUILD,RPMS,SOURCES,SPECS,SRPMS} )
if $opt_list; then
rpm2cpio $rpm | cpio --quiet -it$v | \
sed "s|^[./]*/*|$repo/SOURCES/|;/\\.spec/s/SOURCES/SPECS/"
continue
fi
if $opt_force; then
mkdir -p$v "${dirs[@]}"
else
mkdir ${v:+-v} $repo "${dirs[@]}"
fi
rm -f$v $repo/SOURCES/* $repo/SPECS/*
rpm2cpio $rpm | ( cd $repo/SOURCES; cpio --quiet -imd$v; )
mv ${v:+-v} $repo/SOURCES/*.spec $repo/SPECS
if $opt_prep; then
rpmbuild -bp $repo/SPECS/*.spec
fi
done
}
echo_unrpm_() {
printf '%s\n' "$*"
}
die() {
declare status=1
if [[ $1 == ?* && $1 != *[!0-9]* ]]; then
status=$1
shift
fi
echo_unrpm_ "$cmd: ${*:-error}" >&2
exit $status
}
rpmbuild() {
declare x topdir
for x; do
if [[ $x == *.spec ]]; then
topdir=$(cd $(dirname $x)/..; pwd)
break
elif [[ $1 == -t* ]]; then
case $x in
*.tar.gz|*.tar.bz2) topdir=${x%.*.*}; break ;;
*.tgz|*.tbz2) topdir=${x%.*}; break ;;
esac
fi
done
set -e
declare cmd status=0
# it sucks when rpmbuild bombs because of missing dirs
[[ -z $topdir ]] || topdir=$(readlink -f $topdir)
[[ -z $topdir ]] || mkdir -p "$topdir/"{SPECS,SOURCES,BUILD,RPMS,SRPMS}
# can't use simple "wrapped $0" because we might have been called as unrpm
cmd=(
"$(wrapped "$(dirname "$(type -P "$0")")"/rpmbuild)"
${topdir:+--define="_topdir $topdir"}
"$@"
)
printf "%q " "${cmd[@]}"; echo_unrpm_
# log rpmbuild output
[[ -z $topdir ]] || exec 3>&1 4>&2 1> >(tee $topdir/rpmbuild-$$.out) 2>&1
"${cmd[@]}" || status=$?
[[ -z $topdir ]] || exec 1>&3- 2>&4-
set +e
return $status
}
function gfshow(){
git log --graph --color=always \
--format="%C(auto)%h%d %s %C(bold black)(%cr) %C(bold blue)<%an>" "$@" |
SHELL="/bin/bash" fzf --ansi --no-sort --reverse --tiebreak=index --toggle-sort=ctrl-s \
--exact --cycle --inline-info --prompt="Commits> " \
--bind "ctrl-m:execute:
(grep -o '[0-9a-f]\{7\}' | head -1 |
xargs -I % sh -c 'git show --color=always % | less -R') << 'FZF_EOF'
{}
FZF_EOF"
}
function allip(){
netstat -lantp \
| grep ESTABLISHED \
| awk '{print }' \
| awk -F: '{print }' \
| sort -u
}
function flac2alac() {
local infile="$1"
local outfile="${infile/%flac/m4a}"
local album_artist=$(metaflac --show-tag='ALBUM ARTIST' "$infile" | sed 's/ALBUM ARTIST=//g')
echo "Converting ${infile} to ${outfile} ..."
ffmpeg -i "$infile" -acodec flac -metadata album_artist="$album_artist" "$outfile"
}
function flac2mp3(){
for f in "$@"; do
[[ "$f" != *.flac ]] && continue
album="$(metaflac --show-tag=album "$f" | sed 's/[^=]*=//')"
artist="$(metaflac --show-tag=artist "$f" | sed 's/[^=]*=//')"
date="$(metaflac --show-tag=date "$f" | sed 's/[^=]*=//')"
title="$(metaflac --show-tag=title "$f" | sed 's/[^=]*=//')"
year="$(metaflac --show-tag=date "$f" | sed 's/[^=]*=//')"
genre="$(metaflac --show-tag=genre "$f" | sed 's/[^=]*=//')"
tracknumber="$(metaflac --show-tag=tracknumber "$f" | sed 's/[^=]*=//')"
flac --decode --stdout "$f" | lame -b 320 --add-id3v2 --tt "$title" --ta "$artist" --tl "$album" --ty "$year" --tn "$tracknumber" --tg "$genre" - "${f%.flac}.mp3"
done
}
# tmux-neww-in-cwd - open a new shell with same cwd as calling pane
# http://chneukirchen.org/dotfiles/bin/tmux-neww-in-cwd
tmux-neww-in-cwd() {
SIP=$(tmux display-message -p "#S:#I:#P")
PTY=$(tmux server-info |
egrep flags=\|bytes |
awk '/windows/ { s = $2 }
/references/ { i = $1 }
/bytes/ { print s i $1 $2 } ' |
grep "$SIP" |
cut -d: -f4)
PTS=${PTY#/dev/}
PID=$(ps -eao pid,tty,command --forest | awk '$2 == "'$PTS'" {print $1; exit}')
DIR=$(readlink /proc/$PID/cwd)
tmux neww "cd '$DIR'; $SHELL"
}
function print_hooks() {
print -C 1 ":::pwd_functions:" ${chpwd_functions}
print -C 1 ":::periodic_functions:" ${periodic_functions}
print -C 1 ":::precmd_functions:" ${precmd_functions}
print -C 1 ":::preexec_functions:" ${preexec_functions}
print -C 1 ":::zshaddhistory_functions:" ${zshaddhistory_functions}
print -C 1 ":::zshexit_functions:" ${zshexit_functions}
}
function fun::fonts(){
alias 2023='toilet -f future'
alias gaym='toilet --gay -f mono9 -t'
alias gayf='toilet --gay -f future -t'
alias gayt='toilet --gay -f term -t'
alias gayp='toilet --gay -f pagga -t'
alias metm='toilet --metal -f mono9 -t'
alias metf='toilet --metal -f future -t'
alias mett='toilet --metal -f term -t'
alias metp='toilet --metal -f pagga -t'
alias 3d='figlet -f 3d'
}
function sh_lsof(){
pushd
cd /proc
for a in * ; do
test "${a}" -gt 0 2> /dev/null
[[ ! $? = 0 ]] && continue
pid_="${a}"
name="$(readlink ${a}/exe)"
[[ -z "${name}" ]] && continue
name="$(basename ${name})"
( cd ${a}/fd
for b in * ; do
link="$(readlink ${b})"
[[ -z "${link}" ]] && continue
printf "${pid_}\t${name}\t${link}\n"
done
)
done
popd
}
| true |
f3be694984cfa7c06575f802796684103d1297df | Shell | daKmoR/ycli | /ycli-scripts/multiple/resume.sh | UTF-8 | 355 | 3.125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#
# Bash Autocomplete
#
if [ "1" == "ycliCommands" ]; then
return;
fi
if [ -z "${ycliMultipleResumeElements[@]}" ]; then
echo '[ERROR] You can only resume if a "ycli multiple <your command>" did not finish before';
echo "";
return;
fi
ycliMultipleElements=(${ycliMultipleResumeElements[@]});
ycli multiple "$ycliMultipleResumeCommand"
| true |
5d2174a1ab3407a26740f2e8782fac635f98e68e | Shell | yolmant/Servers | /Installer/NFS-client.bash | UTF-8 | 334 | 3.1875 | 3 | [] | no_license | !#/bin/bash
#configuration of NFS client in ubuntu
#install the NFS
apt-get -y install nfs-common
#creat a directory for the mounts
mkdir -p /NFS/home
#modify the fstab file to mount our directory from the server
sh -c 'echo "10.128.0.4:/NFS/sharedfiles /NFS/home nfs defaults 0 0" >> /etc/fstab
#mount the directory
mount -a
| true |
add2b4142ea5ea3a4d3c18dccfa1160cd61f0170 | Shell | lpf7551321/script_shell | /Nightly-range-partition-crud.sh | UTF-8 | 515 | 3.1875 | 3 | [] | no_license | #!/bin/sh
basedir=`dirname $0`
#### Include the library
. $basedir/sh2ju.sh
#### Clean old reports
juLogClean
cd $basedir/crud
pwd
COMMAND="transwarp -t -h localhost"
$COMMAND -f external_table.sql
for dir in mc_date mc_int mc_string sc_date sc_int sc_string
do
#create full/part range table
$COMMAND -f $dir/${dir}_fullRange.sql
$COMMAND -f $dir/${dir}_partRange.sql
#start exec sql
for file in query1 query2 query3 query4 query5
do
juLog -name=$file "sh run_sql.sh $dir $file"
done
done
| true |
1f2d7f06314e6dd65f8fb5ff5f317bab61d3785b | Shell | edvillan15/code-server | /ci/build/code-server.sh | UTF-8 | 891 | 3.84375 | 4 | [
"MIT"
] | permissive | #!/bin/sh
# This script is intended to be bundled into the standalone releases.
# Runs code-server with the bundled node binary.
# More complicated than readlink -f or realpath to support macOS.
# See https://github.com/cdr/code-server/issues/1537
bin_dir() {
# We read the symlink, which may be relative from $0.
dst="$(readlink "$0")"
# We cd into the $0 directory.
cd "$(dirname "$0")" || exit 1
# Now we can cd into the dst directory.
cd "$(dirname "$dst")" || exit 1
# Finally we use pwd -P to print the absolute path of the directory of $dst.
pwd -P || exit 1
}
BIN_DIR=$(bin_dir)
if [ "$(uname)" = "Linux" ]; then
export LD_LIBRARY_PATH="$BIN_DIR/../lib${LD_LIBRARY_PATH+:$LD_LIBRARY_PATH}"
elif [ "$(uname)" = "Darwin" ]; then
export DYLD_LIBRARY_PATH="$BIN_DIR/../lib${DYLD_LIBRARY_PATH+:$DYLD_LIBRARY_PATH}"
fi
exec "$BIN_DIR/../lib/node" "$BIN_DIR/.." "$@"
| true |
ef5212409b630deb9fba561065f6c7319fb9db68 | Shell | Lt0/task-flow | /tflow | UTF-8 | 641 | 3.703125 | 4 | [] | no_license | #!/bin/bash
FLOW_ID=$$
APP_PATH=$(which $0)
show_help(){
echo "
tflow [flow config] read flow.conf from current directory if [flow config] was not specified
"
}
# get report from targets
get_report(){
while true
do
cat /tmp/flow_${FLOW_ID} 2>/dev/null
done
}
flow_conf=$1
[[ -n $FLOW_CONF ]] || FLOW_CONF="flow.conf"
if [[ ! -f $FLOW_CONF ]]; then
echo "${FLOW_CONF} not found"
show_help
exit 1
fi
tflow-parser $FLOW_CONF $FLOW_ID
DISPATCHER=/tmp/${FLOW_ID}_dispatcher.sh
echo running dispatcher
$DISPATCHER &
DISPATCHER_PID=$!
get_report &
wait $DISPATCHER_PID
rm -f $DISPATCHER
echo Please check log in /var/log/tflow/
| true |
c1bd61913ee8f9f988fa1b79d535f6bb48a121f5 | Shell | Robbie1977/NRRDtools | /image2NRRD.sh | UTF-8 | 1,080 | 3.859375 | 4 | [
"MIT"
] | permissive | echo 'before running specify the fiji executable and macro (located in this dir):'
echo 'export FIJI=path/fiji'
echo 'export MACRO=ThisDir/image2NRRD.ijm'
echo 'export EXT=h5j'
echo 'export TIMEOUT="gtimeout 15m "'
echo 'run in the directory above the volume.nrrd files'
echo '-f forces recreation'
echo '-h runs in headless mode using xvfb-run'
for file in $(pwd)/*/*.${EXT}
do
echo $file
if [ -f $file ]
output=${file/.${EXT}/*.nrrd}
output=$(echo $output|sed 's|\(.*\)/|\1/C1-|')
then
if [ -e ${output} ] && [ "$1" != "-f" ]
then
echo recent nrrd file already exists! Skipping..
else
echo processing $(pwd)${file/.\//\/}...
# if forcing overwite then delete the old copy
if [ "$1" == "-f" ]
then
rm ${output/C1-/C*-}
fi
# convert n5j into nrrd
if [[ $1 == *"h"* ]]
then
$TIMEOUT xvfb-run -w 10 $FIJI -macro $MACRO $file
pkill Xvfb
else
$TIMEOUT $FIJI -macro $MACRO $file
fi
sleep 5s
fi
else
echo Broken file ${file}! Skipping...
fi
done
| true |
89b144250ed769733b515a296cbca671ea3acf50 | Shell | moparisthebest/arch-ppa | /src/crispy-doom/PKGBUILD | UTF-8 | 1,456 | 2.5625 | 3 | [
"MIT"
] | permissive | # Maintainer: Mike Swanson <mikeonthecomputer@gmail.com>
pkgname=crispy-doom
pkgdesc="Vanilla-compatible enhanced Doom engine"
pkgver=5.9.2
pkgrel=2
arch=('i686' 'x86_64')
url="http://fabiangreffrath.github.io/crispy-doom"
license=('GPL2')
depends=('hicolor-icon-theme' 'libpng' 'libsamplerate' 'sdl2_mixer' 'sdl2_net')
makedepends=('python')
optdepends=('freedm: Free deathmatch game'
'freedoom1: Free Ultimate Doom-compatible game'
'freedoom2: Free Doom II-compatible game')
install=crispy-doom.install
source=(https://github.com/fabiangreffrath/$pkgname/archive/$pkgname-$pkgver.tar.gz
0001-prevent-crashes-with-simultaneous-use-of-record-and-.patch)
b2sums=('63d9a89d6099485c79ff4cad69975de790b32a4ece4bded172ebf771a2bf1b4dcd21ab58383ca3c5eea32a4eff72e65ec0e3a283c85f1bc62be680de04f88e52'
'b1e77adf37f22f1ef47b614e27e6158ac7c19bf5b7adfa97a434b04f514a1e5cb7f1f77024a373392c836c5456c87b5bb6f7240566389574392a2e5f05d63d5d')
prepare() {
cd "$pkgname-$pkgname-$pkgver"
for patch in ../*.patch; do
if [ ! -f "$patch" ]; then
break;
else
patch -p1 -i "$patch"
fi
done
}
build() {
cd "$pkgname-$pkgname-$pkgver"
./autogen.sh --prefix=/usr
make
}
package() {
cd "$pkgname-$pkgname-$pkgver"
make DESTDIR="$pkgdir" install
cd "$pkgdir"/usr
rm -rf share/man/man5/default.cfg.5 \
share/man/man5/heretic.cfg.5 \
share/man/man6/chocolate-{server,setup}.6
}
| true |
389b35b9687749329043c890b3493e6270123b93 | Shell | arron9/dockerfiles | /php/7.1-dev/usr/local/bin/docker-php-entrypoint | UTF-8 | 494 | 3.71875 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
set -o errexit # set -e : exit the script if any statement returns a non-true return value
# Enable core dumps
ulimit -c unlimited
echo "Set: ulimit -c unlimited"
# Specify filename and path for core dumps
echo "/tmp/cores/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
mkdir -p /tmp/cores
# Set folder permissions
chmod a+rwx /tmp/cores
chmod +x /usr/local/bin/core_dump
# first arg is `-f` or `--some-option`
if [ "${1#-}" != "$1" ]; then
set -- php "$@"
fi
exec "$@"
| true |
17814d3edadc84fc2b2eabb38b89cc4726140104 | Shell | vith/archlinux-packages-community | /lib32-openal/repos/multilib-x86_64/PKGBUILD | UTF-8 | 1,175 | 2.609375 | 3 | [] | no_license | # Maintainer: Jan Alexander Steffens (heftig) <jan.steffens@gmail.com>
# Contributor: Allan McRae <allan@archlinux.org>
# Contributor: Jason Chu <jchu@xentac.net>
pkgname=lib32-openal
pkgver=1.20.1
pkgrel=1
pkgdesc="Cross-platform 3D audio library, software implementation (32-bit)"
arch=(x86_64)
url="https://github.com/kcat/openal-soft"
license=(LGPL)
depends=(lib32-gcc-libs openal)
makedepends=(lib32-alsa-lib lib32-libpulse lib32-fluidsynth lib32-portaudio
lib32-jack git cmake)
optdepends=('lib32-fluidsynth: MIDI rendering')
_commit=f5e0eef34db3a3ab94b61a2f99f84f078ba947e7 # tags/openal-soft-1.20.1
source=("git+https://github.com/kcat/openal-soft#commit=$_commit")
sha256sums=('SKIP')
pkgver() {
cd openal-soft
git describe --tags | sed 's/^openal-soft-//;s/-/+/g'
}
prepare() {
cd openal-soft
}
build() {
export CC="gcc -m32 -mstackrealign"
export CXX="g++ -m32 -mstackrealign"
export PKG_CONFIG=i686-pc-linux-gnu-pkg-config
cmake -Hopenal-soft -Bbuild \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=None \
-DCMAKE_INSTALL_LIBDIR=lib32
cmake --build build
}
package() {
DESTDIR="$pkgdir" cmake --build build --target install
rm -rv "$pkgdir"/usr/{include,share,bin}
}
| true |
f51a4aff751d8839005c94a8bb29414139c9a8e4 | Shell | xabufr/meteor-falls | /Editor/makeUi.sh | UTF-8 | 167 | 2.953125 | 3 | [] | no_license | #!/bin/bash
for fic in UI/*.ui
do
base=$(basename $fic)
fileBase=${base%.*}
echo "processing $fic to UI/${fileBase}.py"
pyside-uic $fic -o UI/${fileBase}.py
done
| true |
b07ecc5165620993898ffc1c59c6ca1b18bf00df | Shell | loloazz/szt | /dwd/dwd_fact_szt_in_out_detail.sh | UTF-8 | 1,535 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env bash
# ** 文件名称: dwd_fact_szt_in_out_detail.sh
# ** 创建日期: 2020年8月22日
# ** 编写人员: qinxiao
# ** 输入信息:
# ** 输出信息:
# **
# ** 功能描述:地铁出战入站数据
# ** 处理过程:
# ** Copyright(c) 2016 TianYi Cloud Technologies (China), Inc.
# ** All Rights Reserved.
#***********************************************************************************
#***********************************************************************************
#==修改日期==|===修改人=====|======================================================|
#
#***********************************************************************************
#获取脚本所在目录
shell_home="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
#进入脚本目录
cd $shell_home
day=$1
spark-sql \
--master yarn-client \
--num-executors 1 \
--executor-memory 4G \
--executor-cores 2 \
--conf spark.sql.shuffle.partitions=4 \
-e "
INSERT OVERWRITE TABLE dwd.dwd_fact_szt_in_out_detail partition(DAY = '${day}')
SELECT car_no,
card_no,
close_date,
company_name,
conn_mark,
deal_date,
deal_money,
deal_type,
deal_value,
equ_no,
station
FROM ods.ods_szt_data
WHERE deal_type != '巴士'
AND unix_timestamp(deal_date, 'yyyy-MM-dd HH:mm:ss') > unix_timestamp('${day} 06:14:00', 'yyyy-MM-dd HH:mm:ss')
AND unix_timestamp(deal_date, 'yyyy-MM-dd HH:mm:ss') < unix_timestamp('${day} 23:59:00', 'yyyy-MM-dd HH:mm:ss')
AND DAY = '${day}'
ORDER BY deal_date
"
| true |
9ed3f5b93d403a596a8c51567eb66cbea9181db2 | Shell | delkyd/alfheim_linux-PKGBUILDS | /mspgcc-ti/PKGBUILD | UTF-8 | 1,479 | 3.015625 | 3 | [] | no_license | # Maintainer: Peter Ivanov <ivanovp@gmail.com>
pkgname=mspgcc-ti
pkgver=5.00.00.00
pkgrel=1
pkgdesc="GNU toolchain (as, gcc, g++, ld, gdb) for the TI MSP430 processor"
arch=('i686' 'x86_64')
url="http://software-dl.ti.com/msp430/msp430_public_sw/mcu/msp430/MSPGCC/latest/index_FDS.html"
license=('GPL')
depends_i686=('elfutils' 'libmpc' 'zlib')
depends_x86_64=('elfutils' 'libmpc' 'zlib' 'lib32-gcc-libs' 'lib32-glibc' 'lib32-libstdc++5' 'lib32-zlib' 'lib32-fakeroot')
options=(!strip !emptydirs !libtool staticlibs !upx)
PKGEXT=".pkg.tar"
install=mspgcc-ti.install
_installer=msp430-gcc-full-linux-installer-5.0.0.25.run
source=("http://software-dl.ti.com/msp430/msp430_public_sw/mcu/msp430/MSPGCC/5_00_00_00/exports/$_installer" "${pkgname}.sh")
sha1sums=('551f11fd8e4469cf8c279e0db4b7ba23d30facb3'
'a4a81f1b041bf39c3f9c75d94c22d149d1ceee9e')
_install_dir=/opt/ti/mspgcc
build() {
chmod +x $_installer
}
package() {
msg "Running TI's installer..."
${srcdir}/$_installer --mode unattended --prefix $pkgdir$_install_dir
mkdir -p $pkgdir$_install_dir/msp430-elf/lib
msg "Moving linker scripts to their place..."
mv $pkgdir$_install_dir/include/*.ld $pkgdir$_install_dir/msp430-elf/lib
mkdir -p $pkgdir$_install_dir/msp430-elf/include
msg "Moving header files to their place..."
mv $pkgdir$_install_dir/include/*.h $pkgdir$_install_dir/msp430-elf/include
install -Dm755 "${srcdir}/${pkgname}.sh" "${pkgdir}/etc/profile.d/${pkgname}.sh"
}
# vim:set sts=2 ts=2 sw=2 et:
| true |
3a920dacb470490086bbad3a703e09ed2d4a6a53 | Shell | pcdas/test-mutualauth | /gen-myservice-certs.sh | UTF-8 | 1,422 | 3.515625 | 4 | [] | no_license | #!/bin/bash
OUTDIR=${PWD}/certs
mkdir -p ${OUTDIR}
# We are generating certificates signed by mycloudca and importing it
# into ${DOMAIN}.jks
for service in myserviceA myserviceB
do
for i in ${service} ${service}01 ${service}02
do
DOMAIN=${i}.mycloud.io
if [[ ${i} == ${service} ]]
then EXT_SAN=""
else EXT_SAN="-ext SAN=dns:${service}.mycloud.io"
fi
# import the public certificate to a serverside java keystore file
keytool -storetype JKS -keystore ${OUTDIR}/${DOMAIN}.jks -storepass password -importcert -noprompt -alias mycloudca -file ${OUTDIR}/mycloudca.cer
keytool -genkeypair -dname "cn=$DOMAIN, ou=R&D, o=Mycloud LLC, c=US" -storetype JKS -keystore ${OUTDIR}/${DOMAIN}.jks -storepass password -keypass password -alias $DOMAIN -keyalg RSA
keytool -storetype JKS -keystore ${OUTDIR}/${DOMAIN}.jks -storepass password -alias $DOMAIN -certreq -file ${OUTDIR}/$DOMAIN.csr
keytool -gencert -infile ${OUTDIR}/$DOMAIN.csr -outfile ${OUTDIR}/$DOMAIN.cer -alias mycloudca -storetype JKS -keystore ${OUTDIR}/mycloudca.jks -storepass password -ext KU=digitalSignature,nonRepudiation -ext EKU=clientAuth,serverAuth,codeSigning ${EXT_SAN} -validity 365
rm ${OUTDIR}/$DOMAIN.csr
keytool -storetype JKS -keystore ${OUTDIR}/${DOMAIN}.jks -storepass password -importcert -noprompt -alias $DOMAIN -file ${OUTDIR}/$DOMAIN.cer
done
done
| true |
baf996d9cc7c059a3a8c806566c13a29fa976bee | Shell | AndriiNikitin/mariadb-environs-maxscale | /_template/cluster/maxscale_print_cnf.sh | UTF-8 | 1,071 | 3.578125 | 4 | [] | no_license | #!/bin/bash
set -e
function enum_nodes() {
local first=1
for eid in $(cat __clusterdir/nodes.lst) ; do
[ "$first" == 1 ] || echo -n ', '
echo -n "$eid"
first=0
done
}
function enum_nodes_detailed() {
for eid in $(cat __clusterdir/nodes.lst) ; do
cat << EON
[$eid]
type=server
address=127.0.0.1
port=$(__clusterdir/../$eid*/print_port.sh)
protocol=MySQLBackend
EON
done
}
function write_router_options() {
if [ -z "$1" ] ; then
echo master
else
echo $1
fi
}
function read_router_options() {
if [ -z "$1" ] ; then
echo slave
else
echo $1
fi
}
cat << EOF
[Write-Service]
type=service
router=readconnroute
router_options=$(write_router_options $1)
user=maxscale
passwd=maxscale
servers=$(enum_nodes)
[Read-Service]
type=service
router=readconnroute
router_options=$(read_router_options $1)
user=maxscale
passwd=maxscale
servers=$(enum_nodes)
$(enum_nodes_detailed)
[Replication-Monitor]
type=monitor
module=mysqlmon
servers=$(enum_nodes)
user=maxscale
passwd=maxscale
monitor_interval=10000
EOF
| true |
dfe1dd7f2998ab346b676eccf338363f33a73817 | Shell | xuusheng/system-config | /bin/git-remote-url | UTF-8 | 202 | 2.65625 | 3 | [] | no_license | #!/bin/bash
set -e
test $# == 0 -o -z "$1" && set -- origin
git config remote.$1.url | (
if test "$BPE_ON_APSE"; then
perl -npe 's,.*?:,bibler:,; s,:/git/android/,:shgit/,'
else
cat
fi
)
| true |
6c09e84268cc410f667dd8c4884cb778b866b99c | Shell | dbernheisel/exgradebook | /bin/deploy | UTF-8 | 614 | 3.484375 | 3 | [] | no_license | #!/bin/sh
# Run this script to deploy the app to Heroku.
set -e
app_name="exgradebook-${1}"
target="${1:-staging}"
branch="$(git symbolic-ref HEAD --short)"
if [ "$target" = "production" ] && [ "$branch" != "master" ]; then
echo "You are not on the master branch and trying to deploy to production"
echo "If you really want to deploy to production with a different branch"
echo "you will need to do it manually"
exit 1
fi
git push "$target" "$branch:master"
heroku maintenance:on --app "$app_name"
bin/post_deploy "$target"
heroku restart --app "$app_name"
heroku maintenance:off --app "$app_name"
| true |
b2c2856993f7435e001e7f68d5c2c65c5f7e34f0 | Shell | AquariusPower/CppDebugMessages | /.devsPrefs/AquariusPower/git tools/gitToolsCommonCode.sh | UTF-8 | 468 | 3.3125 | 3 | [
"BSD-3-Clause"
] | permissive | set -u #so when unset variables are expanded, gives fatal error
echo "HELP: ${strHelp-}"
egrep "[#]help" $0
echo "-------------------"
echo
pwd;if [[ ! -f ./.git/config ]];then echo "PROBLEM: not at valid VCS path";exit 1;fi
function FUNCexecEcho() { echo; echo " >>> EXEC: $@"; "$@"; return $?; }
function FUNCexecEchoW() { FUNCexecEcho "$@"; local nRet=$?; if((nRet!=0));then echo "error $nRet";return $nRet;fi; read -p ">>> press ENTER/RETURN to continue..."; }
| true |
a94e72478e901bd393edfd84d76e445e2bc08a89 | Shell | piotras/raki | /Ragnaroek/travis_midgard.sh | UTF-8 | 946 | 3.34375 | 3 | [] | no_license | #!/bin/bash
if [ -n "$MIDGARD_EXT_VERSION:" ] ; then
MIDGARD_EXT_VERSION="ratatoskr"
fi
# Install Midgard2 library dependencies
sudo apt-get install -y dbus libglib2.0-dev libgda-4.0-4 libgda-4.0-dev libxml2-dev libdbus-1-dev libdbus-glib-1-dev libgda-4.0-mysql
# Build Midgard2 core from recent tarball
wget -q https://github.com/midgardproject/midgard-core/tarball/${MIDGARD_EXT_VERSION} -O ${MIDGARD_EXT_VERSION}
tar -xzf ${MIDGARD_EXT_VERSION}
sh -c "cd midgardproject-midgard-core-*&&./autogen.sh --prefix=/usr; make; sudo make install"
rm -f ${MIDGARD_EXT_VERSION}
# Build and install Midgard2 PHP extension
wget -q https://github.com/midgardproject/midgard-php5/tarball/${MIDGARD_EXT_VERSION} -O ${MIDGARD_EXT_VERSION}
tar zxf ${MIDGARD_EXT_VERSION}
sh -c "cd midgardproject-midgard-php5-* && phpize && ./configure && sudo make install"
echo "extension=midgard2.so" >> `php --ini | grep "Loaded Configuration" | sed -e "s|.*:\s*||"`
| true |
9a75da27e6c4b6d0813125766215c3329cff79c4 | Shell | paulpas/pixie_farm | /Ubuntu1804/install.sh | UTF-8 | 2,325 | 3.21875 | 3 | [] | no_license | #!/usr/bin/env bash
# Installs DRBL and configures it
# Push new config
#### Install DRBL
cat > /etc/apt/sources.list.d/drbl.list << EOF
deb http://archive.ubuntu.com/ubuntu bionic main restricted universe multiverse
deb http://free.nchc.org.tw/drbl-core drbl stable
EOF
wget -q http://drbl.org/GPG-KEY-DRBL -O- | sudo apt-key add -
apt update
apt install -y drbl
#### Setup networking
apt -y purge netplan.io # Doesn't support aliased interfaces, so single NIC DRBL support isn't there
apt install -y ifupdown
systemctl disable ufw
systemctl stop ufw
#### Setup DNS resolution
apt install -y resolvconf
#### Packages to build xmrig
apt-get install -y git build-essential cmake libuv1-dev libmicrohttpd-dev libssl-dev libhwloc-dev
# Disable sleep, and laptop lid closes
echo 'HandleLidSwitchDocked=ignore' | tee --append /etc/systemd/logind.conf
echo 'HandleLidSwitch=ignore' | tee --append /etc/systemd/logind.conf
# Install xmrig.service
cp xmrig.service /etc/systemd/system/multi-user.target.wants/
systemctl enable xmrig.service
# Clear out new configs
rm -rf /tftpboot/nodes/10.255.254.*
# Populate drbl configuration
cp -r drbl/*.conf /etc/drbl/
# Push DRBL config non-interactively
yes "y" | drblpush -c /etc/drbl/drblpush.conf
# Enable resolvconf
systemctl enable resolvconf
drbl-client-service resolvconf on
# Enable xmrig
drbl-client-service xmrig on
# I noticed dhcpd doesn't get restarted after a push, so this is a DRBL work-around
systemctl enable isc-dhcp-server
systemctl restart isc-dhcp-server
# Copy resolv.conf to clients
drbl-cp-host /etc/resolvconf/resolv.conf /etc/resolv.conf
# Handle the the ssh keys manually
####################################
# Generate keys if they do not exist, and under all circumstances, populate authorized_keys
if [[ -f /root/.ssh/id_rsa.pub ]]
then
cat /root/.ssh/id_rsa.pub > /root/.ssh/id_rsa.pub/authorized_keys
chmod 600 /root/.ssh/id_rsa.pub/authorized_keys
else
ssh-keygen -t rsa -N "" -f /root/.ssh/id_rsa
cat /root/.ssh/id_rsa.pub > /root/.ssh/id_rsa.pub/authorized_keys
chmod 600 /root/.ssh/id_rsa.pub/authorized_keys
fi
# Deploy keys
for i in $(ls /tftpboot/nodes/); do
mkdir /tftpboot/nodes/$i/root/.ssh 2>/dev/null
cp /root/.ssh/authorized_keys /tftpboot/nodes/$i/root/.ssh/authorized_keys
chmod -R 600 /tftpboot/nodes/$i/root/.ssh
done
| true |
fb59d15dbc4e47b672d346426a20628098fd7936 | Shell | scribblemaniac/_ebDev | /scripts/imgAndVideo/svgo_optimize.sh | UTF-8 | 2,231 | 3.546875 | 4 | [] | no_license | # DESCRIPTION
# Optimizes an svg input file (writing the result to <originalFileName>_opt.svg) including color code conversion suited for random recoloring via BWsvgRandomColorFill.sh.
# DEPENDENCIES
# A nodejs install with the svgo (svgomg) package installed.
# USAGE
# First examine and if you wish to copy the file .svgo.yml in this distribution over the .svgo.yml file that comes with svgo. Among other things it preserves path IDs and long hex color form.
# Invoke this script with one parameter $1 (required), being the name of the svg file for which you want an ~_opt.svg file produced in the same directory; e.g.:
# thisScript.sh inputFile.svg
# NOTE that the CLIopts variables, if you uncomment them, override the .svgo.yml config file.
# ALSO NOTE that this may misbehave when invoked via cygwin. I've at times found that if I copy and paste the printed command to a cmd prompt, it works ok . . . except the result displays wonky in Internet Explorer and inkscape. :(
fileNameNoExt=${1%.}
# CLIopts="--enable=convertColors --enable=collapseGroups --disable=convertPathData"
SVGOcommand="svgo -i $1 --pretty $CLIopts -o "$fileNameNoExt"_opt.svg"
echo Running command\:
echo $SVGOcommand
echo . . .
$SVGOcommand
# OPTIONAL and DANGER: will toast original file--comment out if you do not want that! :
rm $1 && mv "$fileNameNoExt"_opt.svg $1
# SVGO CLI OPTIONS NOTES
# -i input.svg -o output.svg
# -p precision; want 3 decimal points
# --pretty
# ? --multipass
# --config=CONFIG : Config file or JSON string to extend or replace default
# SVGO USAGE
# svgo [OPTIONS] [ARGS]
# Options:
# -h, --help : Help
# -v, --version : Version
# -i INPUT, --input=INPUT : Input file, "-" for STDIN
# -s STRING, --string=STRING : Input SVG data string
# -f FOLDER, --folder=FOLDER : Input folder, optimize and rewrite all *.svg files
# -o OUTPUT, --output=OUTPUT : Output file or folder (by default the same as the input), "-" for STDOUT
# -p PRECISION, --precision=PRECISION : Set number of digits in the fractional part, overrides plugins params
# --config=CONFIG : Config file or JSON string to extend or replace default
# --disable=DISABLE : Disable plugin by name
# --enable=ENABLE : Enable plugin by name | true |
184a72915c6a1ee5cce3273e35ad04bbce3cdbbb | Shell | jasonwryan/surfraw | /elvi/woffle | UTF-8 | 1,950 | 3.6875 | 4 | [
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/sh
# $Id$
# elvis: woffle -- Search the web using Woffle (localhost:8080)
. surfraw || exit 1
w3_config_hook () {
def SURFRAW_woffle_search search/htdig/htsearch
def SURFRAW_woffle_format 'builtin-long'
def SURFRAW_woffle_method and
def SURFRAW_woffle_sort score
}
w3_usage_hook () {
cat <<EOF
Usage: $w3_argv0 [options] [search words]...
Description:
Surfraw search the web using Woffle (localhost:8080/)
Local options:
-method= Method used for searching...
and | ALL
or | ANY
bool | BOOLEAN
-format= Format of results
short | just a link and title
long | description please
-sort= Sort method
score | best result
revscore | worst result!
time | newest
revtime | oldest
title | abc by title
revtitle | zyx by title
EOF
w3_global_usage
}
w3_parse_option_hook () {
opt="$1"
optarg="$2"
case "$opt" in
-format=*) if [ "$optarg" = "short" ] ; then
optarg="builtin-short"
else
optarg="builtin-long"
fi
setopt SURFRAW_woffle_format $optarg
;;
-method=*) if [ "$optarg" = "bool" ] ; then
optarg="boolean"
fi
setopt SURFRAW_woffle_method $optarg
;;
-sort=*) setopt SURFRAW_woffle_sort $optarg ;;
*) return 1 ;;
esac
return 0
}
w3_config
w3_parse_args "$@"
# w3_args now contains a list of arguments
if test -z "$w3_args"; then
w3_browse_url "http://localhost:8080/search/htdig/search.html"
else
escaped_args=`w3_url_of_arg $w3_args`
w3_browse_url "http://localhost:8080/${SURFRAW_woffle_search}?words=${escaped_args}&method=${SURFRAW_woffle_method}&format=${SURFRAW_woffle_format}&sort=${SURFRAW_woffle_sort}"
fi
| true |
a4f16a80087de2147fecf4eb1fe6a04876704d81 | Shell | scriptum/notebook | /bash/aliases/nim.sh | UTF-8 | 743 | 2.796875 | 3 | [
"MIT"
] | permissive | if hash nim 2>/dev/null; then # nim
alias nbi='nimble install'
alias nbs='nimble search'
alias nimr='nim c -d:release --embedsrc --cc:clang'
alias nimc='nim c -d:release --embedsrc'
if hash cilly 2>/dev/null; then
nim.cilly() {
nim cc -c --genScript -d:release --embedsrc "$@" && {
local script=$(echo compile_*.sh)
sed -i 's/^gcc/cilly --merge --keepmerged/' $script
cp -f $HOME/git/Nim/lib/nimbase.h nimcache/
# sed -i 's@define N_NIMCALL([^)]*)@& static inline@' nimcache/nimbase.h
mv -f $script nimcache/
pushd nimcache >/dev/null
bash $script
[[ -f ${1%.nim} ]] && mv -f ${1%.nim} ../${1%.nim}
popd >/dev/null
}
}
fi
fi # end of nim
| true |
efe8b4b9e8c722f07832587f1f7214747d59858a | Shell | qus-jiawei/cdh3to4 | /bin/depoly_cdh3.sh | UTF-8 | 1,681 | 3.171875 | 3 | [] | no_license | #!/bin/bash
UP_BIN=$(cd $(dirname $0);pwd)
. $UP_BIN/head.sh
var_die UP_ROOT
echo "**********分发CDH3的包和配置文件*********"
echo "**********不修改软连接和环境配置文件*********"
file_die $CDH3_HADOOP_JAR
file_die $CDH3_HABASE_JAR
file_die $CDH3_HIVE_JAR
file_die $CDH3_ZK_JAR
echo $CDH3_DIR/$CDH3_HADOOP_JAR
echo $CDH3_HADOOP_DIR
#传送,分发,删除,解压
# $1 机器群 $2 要分发的JAR包 $3 目标JAR包 $4 要删除的路径
send_tar(){
for node in $1
do
scp_file_and_check $2 $node $3
if [ "$FORCE_UNTAR" == "true" ];then
ssh -p $SSH_PORT $node "
rm -rf $4;
echo 'remove $4 and untar $3';
tar -zxf $3
"
else
ssh -p $SSH_PORT $node "
if [ -d $4 ];then
echo 'dir find and skip untar ...';
else
tar -zxf $3
echo 'untar finish ... ';
fi;
"
fi;
done
}
send_tar "$NODES" "$CDH3_DIR/$CDH3_HADOOP_JAR" "~/$CDH3_HADOOP_JAR" "~/$CDH3_HADOOP_DIR"
send_tar "$NODES" "$CDH3_DIR/$CDH3_HBASE_JAR" "~/$CDH3_HBASE_JAR" "~/$CDH3_HBASE_DIR"
send_tar "$HIVE_NODES" "$CDH3_DIR/$CDH3_HIVE_JAR" "~/$CDH3_HIVE_JAR" "~/$CDH3_HIVE_DIR"
send_tar "$ZK_NODES" "$CDH3_DIR/$CDH3_ZK_JAR" "~/$CDH3_ZK_JAR" "~/$CDH3_ZK_DIR"
for node in $NODES
do
myscp "$UP_DATA/cdh3lzo/lib/hadoop-lzo-0.4.12.jar" "$node:~/$CDH3_HADOOP_DIR/lib"
myscp "$UP_DATA/cdh3lzo/lib/native/Linux-amd64-64/*" "$node:~/$CDH3_HADOOP_DIR/lib/native/Linux-amd64-64"
myscp "$UP_DATA/mysql-connector-java-5.1.16-bin.jar" "$node:~/$CDH3_HIVE_DIR/lib/"
done
| true |
bdf3a1989e7e3c493e763977b13e4c4fa0a1cc1d | Shell | Lacrymology/mbc-monit | /scripts/caspa.sh | UTF-8 | 464 | 2.9375 | 3 | [] | no_license | #!/bin/bash
case $1 in
start)
cd /home/malbec/mbc-caspa/;
echo $$ > /home/malbec/caspa.pid;
exec 2>&1 make NODE=/usr/local/bin/node NODE_CONFIG_DIR=/home/malbec/mbc-caspa/node_modules/mbc-common/config MBC_SCRAPE=1 serve_noweb 1>/home/malbec/caspa.out ;
;;
stop)
kill -9 -`cat /home/malbec/caspa.pid`;
rm /home/malbec/caspa.pid;
;;
*)
echo "usage: caspa.sh {start|stop}" ;;
esac
exit 0
| true |
6c766601903fa9924e94b6e3557915f04232d99a | Shell | shagu/pfUI-toolbox | /tools/load_dbcs.sh | UTF-8 | 1,337 | 3.671875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Available game locales
LOCALES="enUS koKR frFR deDE zhCN zhTW esES esMX ruRU"
# PATH_MPQ contains folders for each language (eg. enUS, deDE, ..).
# Each language folder contains the corresponding dbc.MPQ and patch-*.MPQ.
PATH_MPQ="$HOME/games/files/wow/1.12.1/patches"
PATH_OUTPUT="DBC"
# Required DBCs are:
# pfUI:
# ChrClasses.dbc
# ItemSubClass.dbc
# ItemClass.dbc
# Spell.dbc
#
# pfQuest:
# AreaTable.dbc
# WorldMapArea.dbc
#
LIST_DBC=" \
DBFilesClient\ChrClasses.dbc \
DBFilesClient\ItemSubClass.dbc \
DBFilesClient\ItemClass.dbc \
DBFilesClient\Spell.dbc \
DBFilesClient\AreaTable.dbc \
DBFilesClient\WorldMapArea.dbc \
"
rm -rf $PATH_OUTPUT && mkdir $PATH_OUTPUT
for loc in $LOCALES; do
if [ -d ${PATH_MPQ}/${loc} ]; then
echo ":: Building $loc"
rm -rf $loc && mkdir $loc
for dbc in $LIST_DBC; do
echo " - $dbc"
MPQExtractor \
-p ${PATH_MPQ}/${loc}/patch.MPQ ${PATH_MPQ}/${loc}/patch-1.MPQ ${PATH_MPQ}/${loc}/patch-2.MPQ ${PATH_MPQ}/${loc}/patch-3.MPQ \
-e "$dbc" \
-o $loc \
"${PATH_MPQ}/${loc}/dbc.MPQ"
done
echo ":: Moving results to $PATH_OUTPUT"
for file in $loc/*; do
mv $file $PATH_OUTPUT/$(basename $file .dbc)_$loc.dbc
done
rmdir $loc
else
echo ":: Skipping $loc"
fi
echo
done
| true |
90a5a03425171c1da043ab3cd198e24b22d78ccf | Shell | rpitonak/postgresql | /root/usr/share/container-scripts/pre-init/set-config.sh | UTF-8 | 219 | 2.59375 | 3 | [] | no_license | #!/bin/bash
SERVICE_CONFIG_PATH=/var/lib/pgsql/data/userdata/postgresql.conf
[ -r "${APP_DATA}/src/postgresql-config/postgresql.conf" ] && cp "${APP_DATA}/src/postgresql-config/postgresql.conf" ${SERVICE_CONFIG_PATH}
| true |
c043a91539041094c3d76a8cf5e12f713f0fb043 | Shell | lemgandi/raspi_blinky | /usr/local/bin/set_gpio_pins.sh | UTF-8 | 1,354 | 4.5625 | 5 | [] | no_license | #!/bin/bash
#
# Set GPIO pins to be accessible to webserver.
# In fact, make them accessible to the web-server (www-data) and
# the 'typical' user (pi). The 'chmod' allows user 'pi' to write
# Script originally from https://dissectionbydavid.wordpress.com/,
# but modified to pass pin numbers on the command line.
#
# Set up a given pin
#
set_a_pin() {
/usr/bin/env gpio export $1 out
chown -R ${2}:pi /sys/class/gpio/gpio${1}
chmod -R g+w /sys/class/gpio/gpio${1}
}
usage() {
echo Usage: $1 -p "pin list" -g group -u user
cat <<EOU
Make GPIO pins accessible to user user and group group. Pin list is a
list of pin numbers as counted on the board (e.g. from pin 1). The quotes
are mandatory if you are setting more than one pin:
set_gpio_pins.sh -p "11 12 14 18"
Group and user default to www-data.
EOU
}
# Main Line
GROUP=www-data
USER=www-data
OPTSTRING="p:hg:u:"
while getopts ${OPTSTRING} O
do
case ${O} in
p)
PIN_LIST=${OPTARG}
;;
g) GROUP=${OPTARG}
;;
u) USER=${OPTARG}
;;
h)
usage $0
exit 1
;;
*)
usage $0
exit 2
;;
esac
done
shift $((OPTIND-1))
if [ ${PIN_LIST:-NULL} = "NULL" ]
then
usage $0
exit 1
fi
for pin in ${PIN_LIST}; do
set_a_pin ${pin} $USER
done
# Karl's solution, slightly modified
chown root:${GROUP} /dev/gpiomem
chmod g+rw /dev/gpiomem
| true |
d0436588d612178afca7394c3a5e0d6193b87473 | Shell | dmalyuta/config-public | /install/build-youcompleteme | UTF-8 | 1,964 | 4 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#
# Script for installing youcompleteme. Needed because different arguments are
# used for the youcompleteme command depending on the Linux distro.
# See https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/
set -o errexit -o errtrace -o nounset -o pipefail
# shellcheck disable=SC2155
readonly REPO_ROOT="$([[ ${CONFIG_GET_ROOT:-0} == 1 ]] && config-repo-root "${BASH_SOURCE[0]}" || echo "${HOME}")"
readonly YCM_DIR="${REPO_ROOT}/submodules/vim/YouCompleteMe"
install_with_clangd() {
echo 'Installing YCM with clangd'
# This is needed to fix an issue with seemingly old .so files which cause ycm
# to check if they can be loaded, which will fail.
find "${YCM_DIR}/third_party/ycmd/third_party/clang/lib" \
\( -name '*.so' -o -name '*.so.*' \) -exec rm {} \;
python3 ./install.py --clangd-completer "$@"
}
install_with_libclang() {
echo 'Installing YCM with libclang'
extra_args=()
# case "${DISTRO}" in
# arch)
# extra_args=('--system-libclang')
# ;;
# esac
python3 ./install.py --clang-completer "${extra_args[@]}" "$@"
# https://github.com/ycm-core/YouCompleteMe/issues/3584
patchelf --set-rpath "${YCM_DIR}/third_party/ycmd/third_party/clang/lib" "${YCM_DIR}/third_party/ycmd/ycm_core.so"
}
main() {
# Detect distro- see https://unix.stackexchange.com/a/6348
source /etc/os-release
DISTRO="${ID}"
printf 'Running on Linux distribution: %s\n' "${DISTRO}"
if [[ "$(realpath "$(pwd)")" != "${YCM_DIR}" ]]; then
printf 'Not in YCM directory, switching to: %s\n' "${YCM_DIR}"
cd "${YCM_DIR}" || exit 1
fi
# NOTE: As of 2020-02-10, I switched to using clangd instead of libclang,
# since one of the YCM developers said it's now stable (although the docs say
# it's experimental). See:
# https://github.com/ycm-core/YouCompleteMe/issues/3584#issuecomment-584142715
install_with_clangd --go-completer --ts-completer --rust-completer "$@"
}
main "$@"
| true |
753cb14cff40d7eda70cc55ddeadf4887f3aa810 | Shell | mic90/lede-packages | /yun-scripts/files/usr/bin/wifi-reset-button-released | UTF-8 | 459 | 3.515625 | 4 | [] | no_license | #!/bin/sh
logger -t network "wifi reset button released"
if [ ! -f /tmp/wifi.reset ]
then
logger -t network "no previous /tmp/wifi.reset file found. exiting"
exit 0
fi
released_at=`date +%s`
pressed_at=`cat /tmp/wifi.reset`
pressed_for=`expr $released_at - $pressed_at`
logger -t network "wifi reset button pressed for $pressed_for"
if [ $pressed_for -gt 3 ]
then
logger -t network "resetting wifi"
sleep 2
wifi-sta
exit 0
fi
blink-stop system
| true |
085ce7b6045dfca2a84df97362e684c369865b1e | Shell | nishuihanqiu/manuscript | /redis/cmd/cluster.sh | UTF-8 | 4,102 | 3.359375 | 3 | [] | no_license | #!/usr/bin/env bash
## Redis 集群教程
# 1.Redis集群介绍
# Redis 集群是一个提供在多个Redis间节点间共享数据的程序集。
# Redis集群并不支持处理多个keys的命令,因为这需要在不同的节点间移动数据,从而达不到像Redis那样的性能,在高负载的情况下可能会导致不可预料的错误.
# Redis 集群通过分区来提供一定程度的可用性,在实际环境中当某个节点宕机或者不可达的情况下继续处理命令. Redis 集群的优势:
# 自动分割数据到不同的节点上。
# 整个集群的部分节点失败或者不可达的情况下能够继续处理命令。
# 2.Redis 集群的数据分片
# Redis 集群没有使用一致性hash, 而是引入了 哈希槽的概念.
# Redis 集群有16384个哈希槽,每个key通过CRC16校验后对16384取模来决定放置哪个槽.集群的每个节点负责一部分hash槽,举个例子,比如当前集群有3
# 个节点,那么:
# 节点 A 包含 0 到 5500号哈希槽.
# 节点 B 包含5501 到 11000 号哈希槽.
# 节点 C 包含11001 到 16384号哈希槽.
# 这种结构很容易添加或者删除节点. 比如如果我想新添加个节点D, 我需要从节点 A, B, C中得部分槽到D上. 如果我想移除节点A,需要将A中的槽移到B和
# C节点上,然后将没有任何槽的A节点从集群中移除即可. 由于从一个节点将哈希槽移动到另一个节点并不会停止服务,所以无论添加删除或者改变某个节点的
# 哈希槽的数量都不会造成集群不可用的状态.
# 3.Redis 集群的主从复制模型
# 为了使在部分节点失败或者大部分节点无法通信的情况下集群仍然可用,所以集群使用了主从复制模型,每个节点都会有N-1个复制品.
# 在我们例子中具有A,B,C三个节点的集群,在没有复制模型的情况下,如果节点B失败了,那么整个集群就会以为缺少5501-11000这个范围的槽而不可用.
# 然而如果在集群创建的时候(或者过一段时间)我们为每个节点添加一个从节点A1,B1,C1,那么整个集群便有三个master节点和三个slave节点组成,
# 这样在节点B失败后,集群便会选举B1为新的主节点继续服务,整个集群便不会因为槽找不到而不可用了.
# 不过当B和B1 都失败后,集群是不可用的.
# 4.Redis 一致性保证
# Redis 并不能保证数据的强一致性. 这意味这在实际中集群在特定的条件下可能会丢失写操作.
# 第一个原因是因为集群是用了异步复制. 写操作过程:
# 客户端向主节点B写入一条命令.
# 主节点B向客户端回复命令状态.
# 主节点将写操作复制给他得从节点 B1, B2 和 B3.
# 主节点对命令的复制工作发生在返回命令回复之后, 因为如果每次处理命令请求都需要等待复制操作完成的话, 那么主节点处理命令请求的速度将极大地
# 降低 —— 我们必须在性能和一致性之间做出权衡。 注意:Redis 集群可能会在将来提供同步写的方法。 Redis 集群另外一种可能会丢失命令的情况是集
# 群出现了网络分区, 并且一个客户端与至少包括一个主节点在内的少数实例被孤立。
# 举个例子 假设集群包含 A 、 B 、 C 、 A1 、 B1 、 C1 六个节点, 其中 A 、B 、C 为主节点, A1 、B1 、C1 为A,B,C的从节点, 还有一个
# 客户端 Z1 假设集群中发生网络分区,那么集群可能会分为两方,大部分的一方包含节点 A 、C 、A1 、B1 和 C1 ,小部分的一方则包含节点 B 和客户
# 端 Z1 .
# Z1仍然能够向主节点B中写入, 如果网络分区发生时间较短,那么集群将会继续正常运作,如果分区的时间足够让大部分的一方将B1选举为新的master,那么
# Z1写入B中得数据便丢失了.
# 注意, 在网络分裂出现期间, 客户端 Z1 可以向主节点 B 发送写命令的最大时间是有限制的, 这一时间限制称为节点超时时间(node timeout),
# 是 Redis 集群的一个重要的配置选项
| true |
7db9426912ee7b11d830df5029c6a098ba0a275e | Shell | Ambitiont109/FastSFC | /ssh | UTF-8 | 789 | 3.890625 | 4 | [] | no_license | #!/bin/bash
argument="$1"
display_usage() {
echo
echo "Usage: $0"
echo
echo " compute SSH into compute"
echo " web SSH into web"
echo
}
ssh_web() {
ssh -t -i ~/.ssh/fastsfc.pem ubuntu@ec2-3-1-117-36.ap-southeast-1.compute.amazonaws.com "cd /webapps && sudo su ; bash"
}
ssh_compute() {
ssh -t -i ~/.ssh/fastsfc.pem ubuntu@ec2-13-250-105-132.ap-southeast-1.compute.amazonaws.com "cd /webapps && sudo su ; bash"
}
raise_error() {
local error_message="$@"
echo "${error_message}" 1>&2;
}
if [[ -z $argument ]] ; then
raise_error "Missing argument"
display_usage
else
case $argument in
web)
ssh_web
;;
compute)
ssh_compute
;;
*)
raise_error "Unknown argument: ${argument}"
display_usage
;;
esac
fi | true |
1d711ab24ec5213775754ee43e4320573e987d64 | Shell | ivilab/kjb | /lib/qd_cpp/test/interactive/varying_spq | UTF-8 | 1,002 | 3.03125 | 3 | [] | no_license | #!/bin/bash -ex
# $Id: varying_spq 20310 2016-02-01 11:32:25Z predoehl $
#for eee in 101 102 103 105 108 110 115
#EXTRA_CXX_FLAGS = -DSPQ_EXP=$eee -DSPQ_ROOT=100
for eee in -2 -3 -10 -20 -30 -40 -50
do sed -i -e '/^EXTRA_CXX_FLAGS/d' -e "1i\
EXTRA_CXX_FLAGS = -DSPQ_EXP=$eee
" Makefile-2
touch test_spq.cpp
env PRODUCTION=1 make
# make a 20-trajectory one for visualization
fnd=det_$eee.svg
fns=sto_$eee.svg
./test_spq -w2 -i20 $fnd $fns
# make a big one: many samples. poor visualization but better histogram quality
# big one lacks .svg filename suffix, instead has .xxx suffix.
fndx=${fnd%.svg}.xxx
fnsx=${fns%.svg}.xxx
./test_spq -w2 -i1000 $fndx $fnsx
for fff in $fndx $fnsx
do for ((n=0; n < 11; ++n))
do sed -n "/sink_${n}_distance_/s/.*distance_\(.*\).>/\1/p" <$fff >dist_${fff%.xxx}_sink_$n.txt
done
done
done
sed -i -e '/^EXTRA_CXX_FLAGS/d' Makefile-2
# Generate histogram plots, to see empirical distribution of path lengths.
./spq_histo.bash
| true |
bb6779c3aa4e8641af1a3f3b9eb07f05f51bf648 | Shell | porpoiseless/elamxhi | /dev-quickstart.sh | UTF-8 | 468 | 2.875 | 3 | [] | no_license | #!/usr/bin/env bash
DEPENDENCIES=(webpack
webpack-cli
webpack-dev-server
style-loader
css-loader
lit-element
ramda)
echo "initializing npm..."
npm init -y
echo "installing dependencies"
for f in "${DEPENDENCIES[@]}"
do
echo "Installing ${f}"
npm install "${f}"
done
echo "initializing git..."
git init
echo "creating .gitignore"
echo "node_modules" >> .gitignore
mkdir src dist
| true |
a08b7696816ef50a1d503aa0400c4ef7354d5753 | Shell | ssyamoako/FADU | /benchmark_scripts/kallisto.sh | UTF-8 | 531 | 3.140625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# kallisto.sh - Script to execute 'kallisto index' and 'kallisto quant'
# $1 is one of the three organism input files
org_inputs=$1
source $org_inputs
mkdir -p $out_dir
script_dir=`dirname $0`
working_dir=$(mktemp -d -p "$out_dir")
/usr/local/packages/kallisto-0.44.0/kallisto index -i "${nuc_cds_fna}.kallisto.index" --make-unique "$nuc_cds_fna"
/usr/local/packages/kallisto-0.44.0/kallisto quant $kallisto_stranded -t "$threads" -i "${nuc_cds_fna}.kallisto.index" -o "${working_dir}" "$fastq1" "$fastq2"
exit 0
| true |
ba3dee06c41de3961d49694bd67ef11c28615876 | Shell | hf/riak-kv | /riak-up.sh | UTF-8 | 683 | 3.28125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -ex
PRESTART=$(find /etc/riak/prestart.d -name '*.sh' -print | sort)
POSTSTART=$(find /etc/riak/poststart.d -name '*.sh' -print | sort)
PRESTOP=$(find /etc/riak/prestop.d -name '*.sh' -print | sort)
for s in $PRESTART; do
. $s
done
cat /etc/riak/riak.conf
if [ -r "/etc/riak/advanced.config" ]
then
cat /etc/riak/advanced.config
fi
riak chkconfig
riak start
riak-admin wait-for-service riak_kv
riak ping
riak-admin test
for s in $POSTSTART; do
. $s
done
tail -n 1024 -f /var/log/riak/console.log &
TAIL_PID=$!
function graceful_death {
for s in $PRESTOP; do
. $s
done
kill $TAIL_PID
}
trap graceful_death SIGTERM SIGINT
wait $TAIL_PID
| true |
2b0a8379aa78e9f88c80a7940edae9c748a4347f | Shell | Nozza-VS/misc-code | /NAS4Free/Other/notes.sh | UTF-8 | 4,681 | 2.546875 | 3 | [] | no_license | ################################################################################
##### Completed / Almost Complete but fully functional
################################################################################
# Emby
# OwnCloud
# NextCloud
################################################################################
##### In Progress
################################################################################
# Sooo much stuff
################################################################################
##### To-Do's / Future Changes / Planned Additions / etc.
################################################################################
#------------------------------------------------------------------------------#
### General
# FUTURE: Allow users to select owncloud/nextcloud version/ip/port via script
# without the need to edit the script manually.
#------------------------------------------------------------------------------#
### Voice Servers
# LOW-PRIORITY: Finish adding Teamspeak 3 Server & JTS3ServerMod (Server Bot)
# FUTURE: Add "Ventrilo"
# FUTURE: Add "Murmur" (Mumble)
#------------------------------------------------------------------------------#
### Media Download / Search / Management
# FUTURE: Add "Mylar" (Comic Books)
# FUTURE: Add "LazyLibrarian" (Books)
# FUTURE: Add "Sickbeard" (TV/Anime)
# FUTURE: Add "XDM"
# FUTURE: Finish adding "Calibre" (Books)
# FUTURE: Add "Radarr"
# FUTURE: Add "Watcher" (Movies - CouchPotato Alternative)
# FUTURE: Add "HTPC Manager" (Combines many services in one interface)
# FUTURE: Add "NZBHydra" (Meta search for NZB indexers)
# FUTURE: Add "Jackett" (Meta search for torrents)
# LOW-PRIORITY: Finish "Deluge" scripts (Lots of issues with it)
#------------------------------------------------------------------------------#
### Media Server
# FUTURE: Add "Plex"
# Maybe utilize ezPlex Portable Addon by JoseMR? (With permission of course)
# INSTALL
# cd $(myappsdir)
# fetch https://raw.githubusercontent.com/JRGTH/nas4free-plex-extension/master/plex-install.sh && chmod +x plex-install.sh && ./plex-install.sh
# UPDATE
# fetch https://raw.githubusercontent.com/JRGTH/nas4free-plex-extension/master/plex/plexinit && chmod +x plexinit && ./plexinit
# Or make use of OneButtonInstaller by "Crest"
# If not, use ports tree or whatever, will decide later.
# FUTURE: Add "Serviio"
# FUTURE: Add "SqueezeBox"
# FUTURE: Add "UMS (Universal Media Server)"
# FUTURE: If this script has no issues then i may remove standalone scripts from github
# FUTURE: IF & when jail creation via shell is possible for thebrig, will add that option to script.
#------------------------------------------------------------------------------#
### Web Server / Cloud Server
# FUTURE: Add "Pydio"
#------------------------------------------------------------------------------#
### Databases
# FUTURE: Add "MariaDB"
#------------------------------------------------------------------------------#
### System Monitoring
# LOW-PRIORITY: Finish adding "Munin"
# FUTURE: Add "Monit" (Free) & "M/Monit" (Free Trial but requires purchase)
# "M/Monit" is NOT required to be able to use "Monit"
#pkg install monit
#echo 'monit_enable="YES"' >> /etc/rc.conf
#cp /usr/local/etc/monitrc.sample /usr/local/etc/monitrc
#chmod 600 /usr/local/etc/monitrc
#service monit start
# FUTURE: Add "Zabbix"
# FUTURE: Add "Pandora"
# FUTURE: Add "Icinga"
# FUTURE: Add "Observium"
# FUTURE: Add "Cacti"
# FUTURE: Add "Nagios"
# FUTURE: Add "nTop"
# FUTURE: Add "Grafana"
#------------------------------------------------------------------------------#
### XMPP Server
# FUTURE: Add "Jabber" Server (Or Prosody as i'm pretty sure that is easier to set up)
#pkg install ejabberd
#echo 'ejabberd_enable="YES"' >> /etc/rc.conf
#cp /usr/local/etc/ejabberd/ejabberd.yml.example /usr/local/etc/ejabberd/ejabberd.yml
#chown 543:543 /usr/local/etc/ejabberd/ejabberd.yml
#service ejabberd start
#------------------------------------------------------------------------------#
### Other
# FUTURE: Add "Mail Server"
# FUTURE: Add OneButtonInstaller
# http://www.nas4free.org/forums/viewtopic.php?f=71&t=11189
# fetch https://raw.github.com/crestAT/nas4free-onebuttoninstaller/master/OBI.php && mkdir -p ext/OBI && echo '<a href="OBI.php">OneButtonInstaller</a>' > ext/OBI/menu.inc && echo -e "\nDONE"
################################################################################
# By Ashley Townsend (Nozza) Copyright: Beerware License
################################################################################
| true |
f80bbd27c32032fa5c0731f6ca077ad145b18b85 | Shell | aranw/updateghost.sh | /updateghost.sh | UTF-8 | 836 | 3.453125 | 3 | [] | no_license | #!/bin/bash
# Written by Andy Boutte and David Balderston of howtoinstallghost.com and allaboutghost.com
# updateghost.sh will update your current ghost install to the latest version without you losing any content
#Check to make sure script is being run as root
if [[ `whoami` != root ]]; then
echo "This script must be run as root"
exit 1
fi
#Make Tempory Directory and Download Lates Ghost
mkdir temp
cd temp
curl -L -O https://ghost.org/zip/ghost-latest.zip
unzip *.zip
cd ..
#Make Backup DB
cp content/data/ghost.db content/data/ghost_backup.db
#Copy the new files over
yes | cp temp/*.md temp/*.js temp/*.json .
rm -R core
yes | cp -R temp/core .
yes | cp -R temp/content/themes/casper content/themes
npm install --production
#Delete temp folder
rm -R temp
echo "You can now start ghost with npm, forever or whatever else you use"
| true |
5fad3352e25a5b46fc3ea0b82e1f91b39549c83a | Shell | varmalx/jenkins | /helm-deploy.sh | UTF-8 | 411 | 2.953125 | 3 | [] | no_license |
LastRelease=`helm list | grep jenkins-nodeapp | cut -f1`
if [ $LastRelease ] ; then
echo " App Previous version $LastRelease"
helm uninstall $LastRelease
helm inspect jenkins-nodeapp
helm list | grep jenkins-nodeapp
else
echo "App Fresh Install"
helm inspect jenkins-nodeapp
helm install jenkins-nodeapp --namespace default --generate-name
helm list | grep jenkins-nodeapp
fi | true |
4666af0861463838468758ead7889a0229f3f2dc | Shell | icanccwhite/tgirt-dna-seq | /bisufile-seq/methyl_qc/picard.sh | UTF-8 | 504 | 3.078125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
PROJECT_PATH=${WORK}/cdw2854/bisufite_seq
BAMPATH=${PROJECT_PATH}/rmdupBAM
METRIC_PATH=${PROJECT_PATH}/methyl_metric
REF_PATH=${REF}/GRCh38/hg38_rDNA
REF_FASTA=${REF_PATH}/genome_rDNA.fa
mkdir -p ${METRIC_PATH}
for BAM in ${BAMPATH}/*bam
do
SAMPLENAME=$(basename ${BAM%.bam})
echo picard CollectAlignmentSummaryMetrics \
INPUT=${BAM} \
REFERENCE_SEQUENCE=${REF_FASTA} \
IS_BISULFITE_SEQUENCED=true \
ASSUME_SORTED=true \
OUTPUT=${METRIC_PATH}/${SAMPLENAME}.alignment.metrc
done
| true |
d7609c03fd169e41ef5dd30bfb52fb8c7ca2819f | Shell | raidenawkward/design_pattern | /tools/pattern_common.sh | UTF-8 | 5,667 | 3.5625 | 4 | [] | no_license | #! /bin/bash
# functions for design pattern
# created by tome.huang
# NEEDS env TOOLS_ROOT
MODULE_LIST_FILE_NAME='pattern_config.cmake'
# $1 - dir path
# returns [echo]
# 0 - exists
# 1 - not exists
function check_dir_exists()
{
if [ -d $1 ];
then
echo 0
else
echo 1
fi
}
# $1 - source path
# returns [echo]
# 0 - exists
# 1 - not exists
function check_source_exists()
{
source_file=$1
if [ -z $source_file ];
then
echo 1
return
fi
if [ -f $source_file ];
then
echo 0
else
echo 1
fi
}
# $1 pattern
# $2 file
# returns [echo]
# 0/1 - exits/not exists
# 2 - empty pattern
# 3 - no file passed in
function check_pattern_exists_in_file()
{
pattern=$1
file=$2
if [ -z $pattern ];
then
echo 2
fi
if [ -z $file ];
then
echo 3
fi
grep -q "^[[:space:]]*$pattern[[:space:]]*$" $file
if [ $? == 0 ];
then
echo 0
else
echo 1
fi
}
# $1 file path
# $2 backup suffix
# returns
# 0 - succeed
function make_backup_file()
{
suffix=".old"
if [ "`check_source_exists $1`" != "0" ];
then
echo "[WARNING] file not exists: $1"
return 1
fi
if [ ! -z $2 ];
then
suffix=$2
fi
cp -f $1{,$suffix}
return $?
}
# $1 pattern
# $2 file
# returns
# 0 - succeed
function pattern_remove_from_file()
{
pattern=$1
file=$2
if [ -z $pattern ];
then
return 1
fi
if [ "`check_source_exists $file`" != "0" ];
then
return 1
fi
temp_file=$file.tmp____
sed "/^[[:space:]]*$pattern[[:space:]]*$/d" $file > $temp_file
if [ $? != 0 ];
then
rm -f $temp_file
return 1
fi
if [ ! -f $temp_file ];
then
return 1
fi
mv $temp_file $file
return $?
}
# $1 pattern from
# $2 pattern to
# $3 file
# returns
# 0 - succeed
function pattern_rename_in_file()
{
pattern_from=$1
pattern_to=$2
file=$3
if [ -z $pattern_from ] || [ -z $pattern_to ] || [ -z $file ];
then
return 1
fi
if [ "`check_source_exists $file`" != "0" ];
then
return 1
fi
temp_file=$file.tmp____
sed "s/^[[:space:]]*$pattern_from[[:space:]]*$/$pattern_to/g" $file > $temp_file
if [ $? != 0 ];
then
rm -f $temp_file
return 1
fi
if [ ! -f $temp_file ];
then
return 1
fi
mv $temp_file $file
return $?
}
# get user's y/n choice
# $1 tip context
# returns[echo]
# 0 - yes
# 1 - no
function users_choice_get_YN()
{
echo "$*"
read s
res=1
case $s in
Y|y|YES|yes|Yes) res=0;;
N|n|NO|no|No) res=1;;
*)
echo [ERROR]invalid input
exit 1
;;
esac
if [ "$res" == "0" ];
then
return 0
else
return 1
fi
}
# $1 pattern
# $2 file
# returns
# 0 - succeed
function pattern_add_to_file()
{
pattern=$1
file=$2
if [ -z $pattern ];
then
return 1
fi
if [ "`check_source_exists $file`" != "0" ];
then
return 1
fi
temp_file=$file.tmp____
sed "/^[[:space:]]*[)][[:space:]]*$/i $pattern" $file > $temp_file
if [ $? != 0 ];
then
rm -f $temp_file
return 1
fi
if [ ! -f $temp_file ];
then
return 1
fi
mv $temp_file $file
return $?
}
# $1 pattern
# $2 dir
# returns
# 0 - succeed
function __pattern_cmake_file_generate()
{
pattern=$1
dir=$2
if [ -z $pattern ] || [ -z $dir ];
then
return 1
fi
cmake_file=$dir/CMakeLists.txt
PATTERN_CMAKE_TEMPLETE="\
cmake_minimum_required(VERSION 2.8)\n\
\n\
PROJECT(`echo $pattern | tr [:lower:] [:upper:]`)\n\
\n\
SET(TAR $pattern)\n\
SET(SRC_DIR src)\n\
\n\
SET(INSTALLED_DIR \${PROJECT_BINARY_DIR}/../bin)\n\
\n\
AUX_SOURCE_DIRECTORY(\${SRC_DIR} SRC)\n\
\n\
ADD_EXECUTABLE(\${TAR} \${SRC})\n\
"
echo -e $PATTERN_CMAKE_TEMPLETE > $cmake_file
return $?
}
# $1 class
# [$2] dir - source code dir
# returns
# 0 - succeed
function class_source_code_generate()
{
class=$1
dir=.
if [ -z $class ];
then
return 1
fi
if [ ! -z $2 ];
then
dir=$2
fi
header_file_name=`echo $class | tr [:upper:] [:lower:]`.h
cpp_file_name=`echo $class | tr [:upper:] [:lower:]`.cpp
PATTERN_HEADER_TEMPLETE="\
#ifndef _`echo $header_file_name | tr [:lower:] [:upper:] | tr . _`\n\
#define _`echo $header_file_name | tr [:lower:] [:upper:] | tr . _`\n\
\n\
class $class {\n\
\n\
public:\n\
\t$class();\n\
\tvirtual ~$class();\n\
};\n\
\n\
#endif // _`echo $header_file_name | tr [:lower:] [:upper:] | tr . _`\n\
"
echo -e $PATTERN_HEADER_TEMPLETE > $dir/$header_file_name
PATTERN_CPP_TEMPLETE="\
#include \"$header_file_name\"\n\
\n\
$class::$class()\n\
{\n\
}\n\
\n\
$class::~$class()\n\
{\n\
}\
"
echo -e $PATTERN_CPP_TEMPLETE > $dir/$cpp_file_name
return 0
}
# $1 pattern
# $2 dir - source code dir
# $3 class name
# returns
# 0 - succeed
function __pattern_source_code_generate()
{
pattern=$1
dir=$2
class=$3
if [ -z $pattern ] || [ -z $dir ] || [ -z $class ];
then
return 1
fi
header_file_name=`echo $class | tr [:upper:] [:lower:]`.h
cpp_file_name=`echo $class | tr [:upper:] [:lower:]`.cpp
PATTERN_MAIN_TEMPLETE="\
#include <stdio.h>\n\
#include <stdlib.h>\n\
\n\
#include \"$header_file_name\"\n\
\n\
\n\
int main(int argc, char** argv)\n\
{\n\
\treturn 0;\n\
}\
"
echo -e $PATTERN_MAIN_TEMPLETE > $dir/main.cpp
class_source_code_generate $class $dir
}
# $1 pattern
# $2 dir
# returns
# 0 - succeed
function pattern_add_dir_source()
{
pattern=$1
dir=$2
if [ -z $pattern ];
then
return 1
fi
mkdir -p $dir
if [ $? != 0 ];
then
return 1
fi
__pattern_cmake_file_generate $pattern $dir
if [ $? != 0 ];
then
return 1
fi
mkdir -p $dir/src
if [ $? != 0 ];
then
return 1
fi
echo "input class name for pattern $pattern"
read class_name
users_choice_get_YN "Create default code for \"$class_name\"?(y/n)"
if [ $? != 0 ];
then
echo "[USER STOPPED]"
exit 0
fi
__pattern_source_code_generate $pattern $dir/src $class_name
if [ $? != 0 ];
then
return 1
fi
return 0
}
| true |
7b7c9a106c4cd62b9a8f904e37cfa53e58e4fa1e | Shell | zerefdev/My-IT-Scripts | /bash/updating_roundcube.sh | UTF-8 | 1,552 | 4.03125 | 4 | [] | no_license | #!/bin/bash
#
# Beschreibung: Skript zur Aktualisierung von Roundcube
# Das Skript wird als normaler Benutzer ausgefuehrt. Notwendige
# Kennwoerter werden zur Laufzeit abgefragt.
# Autor: Tronde (E-Mail-Adresse: tronde(at)my-it-brain(Punkt)de)
# Datum: 2016-12-11
# Lizenz: GPLv3
# Variablen
INSTALL_PATH="" # Pfad zur Roundcube-Installation
RC_DB_NAME=""
PACKAGE_URL="https://github.com/roundcube/roundcubemail/releases/download/1.2.3/roundcubemail-1.2.3.tar.gz" # Download-URL der akutellen Roundcube-Version
MYSQL_ROOT_USER=""
# Funktionen
check()
{
if [ $1 -gt 0 ]; then
echo "Uuups, hier ist was schiefgegangen"
echo "exit $1"
exit 1
fi
}
do_backup()
{
cd $HOME
echo "Backup des Roundcube-Wurzelverzeichnis"
tar cjf roundcube_rootdir_`date +"%Y-%m-%d"`.tar.bz2 $INSTALL_PATH/*
echo "Backup der Roundcube-Datenbank. Sie werden zur Eingabe des Passworts für den MySQL-Root-Benutzer aufgefordert."
mysqldump -u $MYSQL_ROOT_USER -p $RC_DB_NAME > roundcubedb_`date +"%Y-%m-%d"`.sql
}
do_upgrade()
{
echo "Das Archiv mit der aktuellen Roundcube-Version wird heruntergeladen und entpackt."
wget $PACKAGE_URL
tar xf roundcubemail-*-complete.tar.gz
cd `basename roundcubemail-*.tar.gz -complete.tar.gz`
echo "Bitte geben Sie das sudo-Passwort des angemeldeten Benutzers ein, wenn Sie dazu aufgefordert werden. Folgen Sie den Anweisungen des Installationsscripts."
sudo ./bin/installto.sh $INSTALL_PATH
}
# Programmablauf
do_backup
check $?
do_upgrade
check $?
exit 0
| true |
4ea1b1821cd4054e9c5d0b8eed75e6eef70e36e7 | Shell | Quoin/qiip-lint | /test/unitTest.sh | UTF-8 | 324 | 2.953125 | 3 | [
"MIT"
] | permissive | #! /bin/bash
### unitTest.sh ###
function testForBitShift () {
cd ..
rm -f check/"div-by-zero"/noncompliant.o && make check/"div-by-zero"/noncompliant.o 2>temp.txt
occurance=$(grep -c "\-Wdiv-by-zero" temp.txt)
printf "Occurance: $occurance\n"
}
## Call and Run all Tests
. "../shunit2/shunit2"
| true |
e35ade8a1c7c278d24409a0488b3425dc4b3797c | Shell | jdgraefe/PBIS-scripts | /add_to_group-a.sh | UTF-8 | 764 | 4.34375 | 4 | [] | no_license | #! /bin/sh
#
# Add a list of servers to a group in AD.
# Create a file with the contents being a list of the servers you wish to add to the group
# Name the file/list with the name of the AD group
# FILENAME=ADGROUPNAME
# Contents = server names
# Usage: add_to_group-a.sh <FILENAME>
# check for the groupname arg and see if it's valid
if [ $# -ne 1 ]
then echo "one argument for groupname required"
exit 1
fi
GNAME=$1
LIST=$1
NGROUPS=`/opt/pbis/bin/adtool -a search-group --name $GNAME |grep Total |awk '{print $3}' -`
if [ $NGROUPS -eq 0 ]
then echo "$1 may not be a proper AD group"
exit 1
fi
# Add the serves in the list to the named group
for host in `cat $LIST`
do
echo $host
/opt/pbis/bin/adtool -a add-to-group --to-group=$1 --user=$host\$
done
| true |
9b0d4f52ab9adf6009d3a99755397930659b8a03 | Shell | 1000007201/Bootcamp | /Day-5-sequences-selections/2-if-else/grade.sh | UTF-8 | 224 | 2.953125 | 3 | [] | no_license | #!/bin/bash
read -p "Enter your exam Marks: " grade
if [ $grade -ge 80 ]
then
echo "You got A"
elif [ $grade -ge 70 ]
then
echo "You got B"
elif [ $grade -ge 60 ]
then
echo "You got C"
else
echo "Fail"
fi | true |
0520050812347682727d7b761ccf7c94c903af01 | Shell | mamigot/sterling | /storage/start.sh | UTF-8 | 1,568 | 3.671875 | 4 | [] | no_license | #!/bin/bash
#
# Run on the server machine
#
#############################
# System Requirements (!!!) #
#############################
# Version 4.9 or greater of g++
echo "CHECK THAT YOU ARE USING VERSION 4.9 OF g++ OR GREATER USING 'g++ -version'."
echo "IF YOU ARE USING AN OLDER VERSION, THE APPLICATION WILL NOT COMPILE."
########################################################################
# Parameters to communicate with the client servers (change as needed) #
########################################################################
export DATASERVER_PORT=13002
export DATASERVER_BUFFSIZE=8192
########################################
# Application configuration parameters #
########################################
# The root of the project is the root of this script
# http://stackoverflow.com/a/246128/2708484
PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Make all root-level modules of the app accessible
export PATH=$PATH:$PROJECT_ROOT/main
# Application configuration variables
export CONFIG_PATH=$PROJECT_ROOT/config.txt
# Directory wherein the application data is stored
export STORAGE_FILES_PATH=$PROJECT_ROOT/volumes/
# Create it if it doesn't exist
if [ -z "$(ls $STORAGE_FILES_PATH 2>/dev/null)" ]; then
echo "Creating directory to hold the files... $STORAGE_FILES_PATH"
mkdir $STORAGE_FILES_PATH
fi
#########################################################
# Launch the server and start listening for connections #
#########################################################
make clean
make
./bin/runner
| true |
807af24ca038bb5d71b6f6d5b01d50a4f61af3f6 | Shell | smartao/shellscript_exemplos | /variaveis/variaveis.sh | UTF-8 | 1,159 | 3.46875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Carregando o arquivo variaveis
# possui a lina
#TESTE="ABCD"
. /etc/scripts/variaveis/variaveis
# Testado se a variavel teste foi carregada
# Valor ABCD
echo -e "TESTE = $TESTE \n"
# For para ler a primeira linha do arquivo
for i in `cat /etc/scripts/variaveis/variaveis3`
do
# Mostrando o valor da variavel i
# imprime o valor TESTE
echo "i = $i"
# Atruibindo o valor de i a variavel NOME
NOME=$i
# imprime o valor da variavel Nome, sendo TESTE
echo -e "NOME = $NOME\n"
# Converte a variavel $i na variavel $teste
# o i=teste nesse caso impirmindo a variavel Teste que o valor é ABCD
# mesma coisa com a variavel $NOME
eval "echo i = \$$i"
eval "echo NOME = \$$NOME"
echo ""
# Atribuindo o valor da variavel $TESTE que esta dentro de $i para a variavel $NOMEOK
# Ou seja o conteudo (string) da variavel $i sera transformata na variavel $TESTE e assim retornar o valor ABCD
NOMEOK=$(eval "echo \$$i")
# Impirmindo a variavel NOMEOK o valor sera ABCD mesmo que a variavel TESTE
echo "NOMEOK = $NOMEOK"
# Mesma explicacao que acima, usando a variavel NOME
NOMEOK=$(eval "echo \$$NOME")
echo "NOMEOK = $NOMEOK "
done
| true |
a7ee27cdb1444ec2733732353a7d14835cc68638 | Shell | tfiska/xenserveroperation | /opt/xsoper/bootslaves.sh | UTF-8 | 2,792 | 3.4375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Bootscript
#
# Made by Trygve Gunnar Fiskaa, Evry
# Please ask me for help. :-)
# Mail/SIP: trygve.fiska@evry.com
# Phone: +47 41612477
#Getting locals
SCRIPTNAME="${0##*/}"
PROCSTARTED=`date`
HOSTNAME=`hostname`
POOLMASTERUUID=`xe pool-list params=master --minimal`
#Defining standard variables
TMPDIR="/tmp"
#Defining Other variables
#Check pool master
if ! grep master /etc/xensource/pool.conf 1> /dev/null; then
echo "Please run on pool master to boot all the slaves"
exit 1;
fi
POOLMASTERNAME=`xe host-list uuid=$POOLMASTERUUID params=name-label --minimal`
OTHERHOSTSUUIDS=`xe host-list --minimal|sed "s/"$POOLMASTERUUID"//g"|sed "s/,,/,/g"|tr "," "\n"`
OTERHOSTNAMES=` xe host-list params=name-label --minimal|sed "s/"$POOLMASTERNAME"//g"|sed "s/,,/,/g"|tr "," "\n"|sort`
for TARGETHOSTNAME in $OTERHOSTNAMES
do
HOSTUUID=`xe host-list name-label=$TARGETHOSTNAME --minimal`
#TARGETHOSTNAME=`xe host-list uuid=$HOSTUUID params=name-label --minimal`
xe host-disable host=$TARGETHOSTNAME
echo "Rebooting : $TARGETHOSTNAME (UUID=$HOSTUUID)"
NUMBVMONHOST=`xe vm-list resident-on=$HOSTUUID --minimal|sed -e 's/,/\n,/g' |grep -c ","`
while [[ $NUMBVMONHOST != 0 ]]
do
echo "Trying Evacuate $NUMBVMONHOST vm's left on $TARGETHOSTNAME"
xe host-evacuate host=$TARGETHOSTNAME
sleep 5
NUMBVMONHOST=`xe vm-list resident-on=$HOSTUUID --minimal|sed -e 's/,/\n,/g' |grep -c ","`
done
xe host-reboot host=$TARGETHOSTNAME
date
HOSTINACTIVE=`xe host-list host-metrics-live=false uuid=$HOSTUUID |grep -c "$HOSTUUID"`
while [[ $HOSTINACTIVE != 1 ]]
do
HOSTINACTIVE=`xe host-list host-metrics-live=false uuid=$HOSTUUID |grep -c "$HOSTUUID"`
sleep 5
done
while [[ $HOSTINACTIVE != 0 ]]
do
HOSTINACTIVE=`xe host-list host-metrics-live=false uuid=$HOSTUUID |grep -c "$HOSTUUID"`
sleep 5
done
xe host-enable host=$TARGETHOSTNAME
HOSTDISABLED=`xe host-list enabled=false uuid=$HOSTUUID |grep -c "$HOSTUUID"`
echo "Waiting for $TARGETHOSTNAME to become enabled"
while [[ $HOSTDISABLED != 0 ]]
do
HOSTDISABLED=`xe host-list enabled=false uuid=$HOSTUUID |grep -c "$HOSTUUID"`
sleep 5
done
VMGOINGHOME=`xe vm-list affinity=$HOSTUUID is-control-domain=false live=true --minimal | tr "," "\n"`
for VMUUID in $VMGOINGHOME
do
printf "Moving \"`xe vm-list uuid=$VMUUID params=name-label --minimal`\" to it's home server :...."
xe vm-migrate uuid=$VMUUID host-uuid=$HOSTUUID
printf "done\n"
done
done
xe host-evacuate host=$POOLMASTERUUID
sleep 30
HOSTUUID=$POOLMASTERUUID
VMGOINGHOME=`xe vm-list affinity=$HOSTUUID is-control-domain=false live=true --minimal | tr "," "\n"`
for VMUUID in $VMGOINGHOME
do
printf "Moving \"`xe vm-list uuid=$VMUUID params=name-label --minimal`\" to it's home server :...."
xe vm-migrate uuid=$VMUUID host-uuid=$HOSTUUID
printf "done\n"
done
| true |
b0fc65ada3eafd240a24f10abb3381154a9f4f30 | Shell | Winxton/Oanda-Api-Performance | /non-streaming/FIX/perf_analysis.sh | UTF-8 | 9,315 | 3.234375 | 3 | [] | no_license | #!/bin/bash
cmd=cat
awk=mawk
if [ X$2 == "Xtail" ]; then
cmd="tail -F"
fi
# 35=D
# arg1 is file to follow
(($cmd $1 | \
grep "35=D" | \
sed -e 's/^[0-9]*-\([0-9]*:[0-9]*:[0-9]*.[0-9]*\)[^X]*FIX[^X]*35=D[^O]*49=\([A-Za-z_0-9]*\).*52=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*11=\(test_limit_[0-9a-zA-Z\_\-]*\).*60=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*/request \1 \3-\4 \2 \5 \6-\7/g' |grep "^request"| \
$awk 'function timesec(str) {
split(str,arr1,".");
split(arr1[1],arr2,/:/);
sum = 0;
for (i in arr2) { sum = sum*60 + arr2[i]; }
sum = arr2[3]*1000000 + arr1[2];
return sum;
}
/^request.*/ {
print "REQUEST: "$5 " " timesec($2) $1
}'); ($cmd $1 | \
grep "35=8" | \
sed -e 's/^[0-9]*-\([0-9]*:[0-9]*:[0-9]*.[0-9]*\)[^X]*FIX[^X]*35=8[^O]*49=OANDA.*52=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*56=\([A-Za-z_0-9]*\).*11=\(test_limit_[0-9a-zA-Z\_\-]*\).*60=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*/response \1 \2-\3 \4 \5 \6-\7/g' | grep "^response"| \
$awk 'function timesec(str) {
split(str,arr1,".");
split(arr1[1],arr2,/:/);
sum = 0;
for (i in arr2) { sum = sum*60 + arr2[i]; }
sum = arr2[3]*1000000 + arr1[2];
return sum;
}
/^response.*/ {
print "RESPONSE: "$5 " " timesec($2) $1
}')) | \
awk 'BEGIN {print "Limit orders:"} \
/REQUEST/ { request[$2]=$3; }
/RESPONSE/{ response[$2]=$3; }
END {for (a in request){ diff=response[a]-request[a];if (diff) printf("Limit,%s,%s\n", a, diff);} }'
# 35=D
# arg1 is file to follow
(($cmd $1 | \
grep "35=D" | \
sed -e 's/^[0-9]*-\([0-9]*:[0-9]*:[0-9]*.[0-9]*\)[^X]*FIX[^X]*35=D[^O]*49=\([A-Za-z_0-9]*\).*52=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*11=\(test_open_[0-9a-zA-Z\_\-]*\).*60=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*/request \1 \3-\4 \2 \5 \6-\7/g' |grep "^request"| \
$awk 'function timesec(str) {
split(str,arr1,".");
split(arr1[1],arr2,/:/);
sum = 0;
for (i in arr2) { sum = sum*60 + arr2[i]; }
sum = arr2[3]*1000000 + arr1[2];
return sum;
}
/^request.*/ {
print "REQUEST: "$5 " " timesec($2) $1
}'); ($cmd $1 | \
grep "35=8" | \
sed -e 's/^[0-9]*-\([0-9]*:[0-9]*:[0-9]*.[0-9]*\)[^X]*FIX[^X]*35=8[^O]*49=OANDA.*52=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*56=\([A-Za-z_0-9]*\).*11=\(test_open_[0-9a-zA-Z\_\-]*\).*17=T.*60=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*/response \1 \2-\3 \4 \5 \6-\7/g' | grep "^response"| \
$awk 'function timesec(str) {
split(str,arr1,".");
split(arr1[1],arr2,/:/);
sum = 0;
for (i in arr2) { sum = sum*60 + arr2[i]; }
sum = arr2[3]*1000000 + arr1[2];
return sum;
}
/^response.*/ {
print "RESPONSE: "$5 " " timesec($2) $1
}')) | \
awk 'BEGIN {print "Open orders:"} \
/REQUEST/ { request[$2]=$3; }
/RESPONSE/{ response[$2]=$3; }
END {for (a in request){ diff=response[a]-request[a];if (diff) printf("Open,%s,%s\n", a, diff);} }'
# 35=D
# arg1 is file to follow
(($cmd $1 | \
grep "35=D" | \
sed -e 's/^[0-9]*-\([0-9]*:[0-9]*:[0-9]*.[0-9]*\)[^X]*FIX[^X]*35=D[^O]*49=\([A-Za-z_0-9]*\).*52=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*11=\(test_close_[0-9a-zA-Z\_\-]*\).*60=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*/request \1 \3-\4 \2 \5 \6-\7/g' |grep "^request"| \
$awk 'function timesec(str) {
split(str,arr1,".");
split(arr1[1],arr2,/:/);
sum = 0;
for (i in arr2) { sum = sum*60 + arr2[i]; }
sum = arr2[3]*1000000 + arr1[2];
return sum;
}
/^request.*/ {
print "REQUEST: "$5 " " timesec($2) $1
}'); ($cmd $1 | \
grep "35=8" | \
sed -e 's/^[0-9]*-\([0-9]*:[0-9]*:[0-9]*.[0-9]*\)[^X]*FIX[^X]*35=8[^O]*49=OANDA.*52=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*56=\([A-Za-z_0-9]*\).*11=\(test_close_[0-9a-zA-Z\_\-]*\).*60=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*/response \1 \2-\3 \4 \5 \6-\7/g' | grep "^response"| \
$awk 'function timesec(str) {
split(str,arr1,".");
split(arr1[1],arr2,/:/);
sum = 0;
for (i in arr2) { sum = sum*60 + arr2[i]; }
sum = arr2[3]*1000000 + arr1[2];
return sum;
}
/^response.*/ {
print "RESPONSE: "$5 " " timesec($2) $1
}')) | \
awk 'BEGIN {print "Close orders:"} \
/REQUEST/ { request[$2]=$3; }
/RESPONSE/{ response[$2]=$3; }
END {for (a in request){ diff=response[a]-request[a];if (diff) printf("Close,%s,%s\n", a, diff);} }'
# 35=G
# arg1 is file to follow
(($cmd $1 | \
grep "35=G" | \
sed -e 's/^[0-9]*-\([0-9]*:[0-9]*:[0-9]*.[0-9]*\)[^X]*FIX[^X]*35=G[^O]*49=\([A-Za-z_0-9]*\).*52=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*11=\([0-9a-zA-Z\_\-]*\).*60=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*/request \1 \3-\4 \2 \5 \6-\7/g' |grep "^request"| \
$awk 'function timesec(str) {
split(str,arr1,".");
split(arr1[1],arr2,/:/);
sum = 0;
for (i in arr2) { sum = sum*60 + arr2[i]; }
sum = arr2[3]*1000000 + arr1[2];
return sum;
}
/^request.*/ {
print "REQUEST: "$5 " " timesec($2) $1
}'); ($cmd $1 | \
grep "35=8" | \
sed -e 's/^[0-9]*-\([0-9]*:[0-9]*:[0-9]*.[0-9]*\)[^X]*FIX[^X]*35=8[^O]*49=OANDA.*52=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*56=\([A-Za-z_0-9]*\).*11=\([0-9a-zA-Z\_\-]*\).*60=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*/response \1 \2-\3 \4 \5 \6-\7/g' | grep "^response"| \
$awk 'function timesec(str) {
split(str,arr1,".");
split(arr1[1],arr2,/:/);
sum = 0;
for (i in arr2) { sum = sum*60 + arr2[i]; }
sum = arr2[3]*1000000 + arr1[2];
return sum;
}
/^response.*/ {
print "RESPONSE: "$5 " " timesec($2) $1
}')) | \
awk 'BEGIN {print "Changed orders:"} \
/REQUEST/ { request[$2]=$3; }
/RESPONSE/{ response[$2]=$3; }
END {for (a in request){ diff=response[a]-request[a];if (diff) printf("Change,%s,%s\n", a, diff);} }'
# 35=F
# arg1 is file to follow
(($cmd $1 | \
grep "35=F" | \
sed -e 's/^[0-9]*-\([0-9]*:[0-9]*:[0-9]*.[0-9]*\)[^X]*FIX[^X]*35=F[^O]*49=\([A-Za-z_0-9]*\).*52=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*11=\([0-9a-zA-Z\_\-]*\).*60=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*/request \1 \3-\4 \2 \5 \6-\7/g' |grep "^request"| \
$awk 'function timesec(str) {
split(str,arr1,".");
split(arr1[1],arr2,/:/);
sum = 0;
for (i in arr2) { sum = sum*60 + arr2[i]; }
sum = arr2[3]*1000000 + arr1[2];
return sum;
}
/^request.*/ {
print "REQUEST: "$5 " " timesec($2) $1
}'); ($cmd $1 | \
grep "35=8" | \
sed -e 's/^[0-9]*-\([0-9]*:[0-9]*:[0-9]*.[0-9]*\)[^X]*FIX[^X]*35=8[^O]*49=OANDA.*52=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*56=\([A-Za-z_0-9]*\).*11=\([0-9a-zA-Z\_\-]*\).*60=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*/response \1 \2-\3 \4 \5 \6-\7/g' | grep "^response"| \
$awk 'function timesec(str) {
split(str,arr1,".");
split(arr1[1],arr2,/:/);
sum = 0;
for (i in arr2) { sum = sum*60 + arr2[i]; }
sum = arr2[3]*1000000 + arr1[2];
return sum;
}
/^response.*/ {
print "RESPONSE: "$5 " " timesec($2) $1
}')) | \
awk 'BEGIN {print "Cancelled orders:"} \
/REQUEST/ { request[$2]=$3; }
/RESPONSE/{ response[$2]=$3; }
END {for (a in request){ diff=response[a]-request[a];if (diff) printf("Cancel,%s,%s\n", a, diff);} }'
# 35=H
# arg1 is file to follow
(($cmd $1 | \
grep "35=H" | \
sed -e 's/^[0-9]*-\([0-9]*:[0-9]*:[0-9]*.[0-9]*\)[^X]*FIX[^X]*35=H[^O]*49=\([A-Za-z_0-9]*\).*52=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*790=\([0-9a-zA-Z\_\-]*\).*/request \1 \3-\4 \2 \5/g' |grep "^request"| \
$awk 'function timesec(str) {
split(str,arr1,".");
split(arr1[1],arr2,/:/);
sum = 0;
for (i in arr2) { sum = sum*60 + arr2[i]; }
sum = arr2[3]*1000000 + arr1[2];
return sum;
}
/^request.*/ {
print "REQUEST: "$5 " " timesec($2) $1
}'); ($cmd $1 | \
grep "35=8" | \
sed -e 's/^[0-9]*-\([0-9]*:[0-9]*:[0-9]*.[0-9]*\)[^X]*FIX[^X]*35=8[^O]*49=OANDA.*52=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*56=\([A-Za-z_0-9]*\).*790=\([0-9a-zA-Z\_\-]*\).*/response \1 \2-\3 \4 \5/g' | grep "^response"| \
$awk 'function timesec(str) {
split(str,arr1,".");
split(arr1[1],arr2,/:/);
sum = 0;
for (i in arr2) { sum = sum*60 + arr2[i]; }
sum = arr2[3]*1000000 + arr1[2];
return sum;
}
/^response.*/ {
print "RESPONSE: "$5 " " timesec($2) $1
}')) | \
awk 'BEGIN {print "Status request:"} \
/REQUEST/ { request[$2]=$3; }
/RESPONSE/{ response[$2]=$3; }
END {for (a in request){ diff=response[a]-request[a];if (diff) printf("Status,%s,%s\n", a, diff);} }'
# 35=V
# arg1 is file to follow
(($cmd $1 | \
grep "35=V" | \
sed -e 's/^[0-9]*-\([0-9]*:[0-9]*:[0-9]*.[0-9]*\)[^X]*FIX[^X]*35=V[^O]*49=\([A-Za-z_0-9]*\).*52=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*262=\([0-9a-zA-Z\_\-]*\).*/request \1 \3-\4 \2 \5/g' |grep "^request"| \
$awk 'function timesec(str) {
split(str,arr1,".");
split(arr1[1],arr2,/:/);
sum = 0;
for (i in arr2) { sum = sum*60 + arr2[i]; }
sum = arr2[3]*1000000 + arr1[2];
return sum;
}
/^request.*/ {
print "REQUEST: "$5 " " timesec($2) $1
}'); ($cmd $1 | \
grep "35=W" | \
sed -e 's/^[0-9]*-\([0-9]*:[0-9]*:[0-9]*.[0-9]*\)[^X]*FIX[^X]*35=W[^O]*49=OANDA.*52=\(........\)-\([0-9]*:[0-9]*:[0-9]*\).*56=\([A-Za-z_0-9]*\).*262=\([0-9a-zA-Z\_\-]*\).*/response \1 \2-\3 \4 \5/g' | grep "^response"| \
$awk 'function timesec(str) {
split(str,arr1,".");
split(arr1[1],arr2,/:/);
sum = 0;
for (i in arr2) { sum = sum*60 + arr2[i]; }
sum = arr2[3]*1000000 + arr1[2];
return sum;
}
/^response.*/ {
print "RESPONSE: "$5 " " timesec($2) $1
}')) | \
awk 'BEGIN {print "Status request:"} \
/REQUEST/ { request[$2]=$3; }
/RESPONSE/{ response[$2]=$3; }
END {for (a in request){ diff=response[a]-request[a];if (diff) printf("Rate,%s,%s\n", a, diff);} }'
| true |
15a79a1e2cf6c84155fc51eb028ab9fc98727337 | Shell | mfkiwl/SDRReceiver | /ci-windows-build.sh | UTF-8 | 3,939 | 2.796875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#The MIT License (MIT)
#Copyright (c) 2015-2019 Jonathan Olds
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#windows build (for github "windows-latest")
#this is for 64bit mingw and msys2 install. all is done on the command line.
#fail on first error
set -e
pacman -S --needed --noconfirm git mingw-w64-x86_64-toolchain autoconf libtool mingw-w64-x86_64-cpputest mingw-w64-x86_64-qt5 mingw-w64-x86_64-cmake mingw-w64-x86_64-libvorbis zip p7zip unzip mingw-w64-x86_64-zeromq mingw-w64-x86_64-libusb
#get script path
SCRIPT=$(realpath $0)
SCRIPTPATH=$(dirname $SCRIPT)
cd $SCRIPTPATH/..
#rtl-sdr
FOLDER="rtl-sdr"
URL="https://github.com/osmocom/rtl-sdr"
if [ ! -d "$FOLDER" ] ; then
git clone $URL $FOLDER
cd "$FOLDER"
else
cd "$FOLDER"
git pull $URL
fi
mkdir -p build && cd build
cmake -G "MinGW Makefiles" -DCMAKE_INSTALL_PREFIX:PATH=/mingw64/ ..
mingw32-make
mingw32-make DESTDIR=/../ install
#SDRReceiver
cd $SCRIPTPATH
#needed for github actions
git fetch --prune --unshallow --tags || true
git status > /dev/null 2>&1
PACKAGE_VERSION=1.0
PACKAGE_NAME=SDRReceiver
MAINTAINER=https://github.com/jeroenbeijer
PACKAGE_SOURCE=https://github.com/jeroenbeijer/SDRReceiver
echo "PACKAGE_NAME="$PACKAGE_NAME
echo "PACKAGE_VERSION="$PACKAGE_VERSION
echo "MAINTAINER="$MAINTAINER
echo "PACKAGE_SOURCE="$PACKAGE_SOURCE
qmake
mingw32-make
#package
mkdir release/sdrreceiver
cp release/SDRReceiver.exe release/SDRReceiver/
cd release/SDRReceiver
windeployqt.exe --force SDRReceiver.exe
cp /mingw64/bin/libstdc++-6.dll $PWD
cp /mingw64/bin/libgcc_s_seh-1.dll $PWD
cp /mingw64/bin/libwinpthread-1.dll $PWD
cp /mingw64/bin/zlib1.dll $PWD
cp /mingw64/bin/Qt5PrintSupport.dll $PWD
cp /mingw64/bin/libdouble-conversion.dll $PWD
cp /mingw64/bin/libicuin68.dll $PWD
cp /mingw64/bin/libicuuc68.dll $PWD
cp /mingw64/bin/libpcre2-16-0.dll $PWD
cp /mingw64/bin/libzstd.dll $PWD
cp /mingw64/bin/libharfbuzz-0.dll $PWD
cp /mingw64/bin/libpng16-16.dll $PWD
cp /mingw64/bin/libfreetype-6.dll $PWD
cp /mingw64/bin/libgraphite2.dll $PWD
cp /mingw64/bin/libglib-2.0-0.dll $PWD
cp /mingw64/bin/libicudt68.dll $PWD
cp /mingw64/bin/libbz2-1.dll $PWD
cp /mingw64/bin/libbrotlidec.dll $PWD
cp /mingw64/bin/libintl-8.dll $PWD
cp /mingw64/bin/libpcre-1.dll $PWD
cp /mingw64/bin/libbrotlicommon.dll $PWD
cp /mingw64/bin/libiconv-2.dll $PWD
cp /mingw64/bin/libzmq.dll $PWD
cp /mingw64/bin/librtlsdr.dll $PWD
cp /mingw64/bin/libsodium-23.dll $PWD
cp /mingw64/bin/libusb-1.0.dll $PWD
#add readme
cat <<EOT > readme.md
# JAERO ${PACKAGE_VERSION}
### OS Name: $(systeminfo | sed -n -e 's/^OS Name://p' | awk '{$1=$1;print}')
### OS Version: $(systeminfo | sed -n -e 's/^OS Version://p' | awk '{$1=$1;print}')
### System Type: $(systeminfo | sed -n -e 's/^System Type://p' | awk '{$1=$1;print}')
### Build Date: $(date -u)
Cheers,<br>
ci-windows-build.sh
EOT
#compress
cd ..
zip -r ${PACKAGE_NAME}_${PACKAGE_VERSION%_*}-1_win_$(uname -m).zip SDRReceiver
| true |
3ed088f4333144c713e856158f54f1dfaefec524 | Shell | zeta709/dotfiles | /zsh/dot.zsh | UTF-8 | 3,514 | 2.984375 | 3 | [] | no_license | ## temporary variables
readonly DOTDIR="${${${(%):-%x}:A:h}%/*}" # see self-path repo
## environment variables (not zsh-specific)
export PATH="$HOME/bin:$PATH"
export LANG="en_US.UTF-8"
export GDBHISTFILE="$HOME/.gdb_history"
[[ -r "$HOME/.dircolors" ]] && eval "$(dircolors -b "$HOME/.dircolors")" || eval "$(dircolors -b)"
## common aliases (not zsh-specific)
alias grep='grep --color=auto --exclude-dir={.bzr,.git,.hg,.svn,CVS}'
alias vi="vim"
alias ls="ls --color=auto"
alias l="ls -l"
alias ll='ls -lah'
alias rm="rm -i"
alias cp="cp -i"
alias mv="mv -i"
alias cal="cal -mw"
alias server="python3 -m http.server" # simple server
alias viml='vim -R -n -u "NONE"'
alias date-iso='date --iso-8601=seconds'
alias vminfo='for file in /proc/*/status ; do awk '"'"'/Tgid|VmSwap|Name/{printf $2 " " $3}END{print ""}'"'"' $file; done | grep kB | sort -k 3 -n -r | less'
## zsh command aliases
alias history='history -i'
## suffix aliases
function {
local fts ft
fts=(c cc cpp h hpp md txt)
for ft in "${fts[@]}"; do
alias -s "$ft"='$EDITOR'
done
}
## global aliases
alias -g ...='../..'
alias -g ....='../../..'
alias -g .....='../../../..'
alias -g ......='../../../../..'
## zsh variables
## typeset -U array: keep unique elements
typeset -Ug fpath precmd_functions preexec_functions
fpath=("$HOME/.zfunc" "$DOTDIR/zsh/functions" "${fpath[@]}")
## directory options
unsetopt auto_cd
setopt auto_pushd
#setopt pushd_minus # FIXME: what's this?
## directory plugins
source "$DOTDIR/z/z.sh"
## completion modules
zmodload -i zsh/complist
## completion options
#setopt always_to_end # FIXME: what's this?
unsetopt menu_complete
unsetopt rec_exact
setopt complete_in_word
## completion zstyles
## zstyle ':completion:function:completer:command:argument:tag'
zstyle ':completion:*' menu select
zstyle ':completion:*' matcher-list \
'' 'm:{a-z-}={A-Z_}' 'm:{A-Z_}={a-z-}' 'm:{a-zA-Z-_}={A-Za-z_-}' \
'r:|.=*' 'l:|=* r:|.=*'
zstyle ':completion:*' use-cache on
zstyle ':completion:*:default' list-colors "${(s.:.)LS_COLORS}"
zstyle ':completion:*:descriptions' format '%U%B%d%b%u'
## completion init
autoload -Uz compinit
function {
local _compdump_files COMPDUMP_FILE
COMPDUMP_FILE="${HOME}/.zcompdump-${HOST/.*/}-${ZSH_VERSION}"
_compdump_files=("${COMPDUMP_FILE}"(Nm-24))
if (( $#_compdump_files )); then
compinit -C -d "${COMPDUMP_FILE}"
else
compinit -i -d "${COMPDUMP_FILE}"
fi
}
## expansion and globbing options
setopt extended_glob
## history options
unsetopt hist_expire_dups_first
unsetopt hist_ignore_all_dups
unsetopt hist_ignore_dups
unsetopt hist_ignore_space
unsetopt hist_save_no_dups
setopt extended_history
setopt hist_fcntl_lock
setopt hist_find_no_dups
setopt hist_no_store
setopt hist_reduce_blanks
setopt hist_verify
setopt share_history
## history parameters
HISTFILE="$HOME/.zsh_history"
HISTORY_IGNORE="(ls|ll|ls -[laAh1]#)"
HISTSIZE=10000
SAVEHIST=10000
## input/output options
setopt interactive_comments
## job control options
unsetopt bg_nice
setopt long_list_jobs
## prompting options
setopt prompt_subst
## prompting parameters
autoload -Uz my_git_info
precmd_functions+=(my_git_info)
PS1='%(?..%F{red}%? )%(!.%F{red}.%F{green}%n@)%m %F{blue}%~ $MY_GIT_INFO'
PS1+=$'%($(($COLUMNS/2))l.\n.)%F{blue}%(!.#.$)%f '
RPS1="%F{green}%T%f"
## scripts and functions options
## bindkey settings
source "$DOTDIR/zsh/bindkey.zsh"
## misc
autoload -Uz colors && colors
autoload -Uz update_ssh_agent_env
## unset temporary variables
typeset +r DOTDIR
unset DOTDIR
| true |
2d56d9139af87c19b6da42ef00e1b6a65f42b96d | Shell | amoljagadambe/python_studycases | /apptware/synthetic_data/scripts/configure_mysql.sh | UTF-8 | 827 | 3.25 | 3 | [] | no_license | #!/bin/bash
ROOT_PASSWORD=${ROOT_PASSWORD:-root}
DB_NAME=${DB_NAME:-pieces_common}
DB_USER=${DB_USER:-pieces_dev}
DB_PASS=${DB_PASS:-pieces_dev}
__setup_credentials() {
echo "Setting up new DB and user credentials."
mkdir -p /var/run/mysqld
chown mysql:mysql /var/run/mysqld
/usr/sbin/mysqld & sleep 10
mysql --user=root --password=$ROOT_PASSWORD -e "CREATE DATABASE $DB_NAME"
mysql --user=root --password=$ROOT_PASSWORD -e "GRANT ALL PRIVILEGES ON $DB_NAME.* TO '$DB_USER'@'localhost' IDENTIFIED BY '$DB_PASS'; FLUSH PRIVILEGES;"
mysql --user=root --password=$ROOT_PASSWORD -e "GRANT ALL PRIVILEGES ON $DB_NAME.* TO '$DB_USER'@'%' IDENTIFIED BY '$DB_PASS'; FLUSH PRIVILEGES;"
cat /app/schema/*.sql | mysql --user=root --password=$ROOT_PASSWORD $DB_NAME
sleep 10
}
__setup_credentials
| true |
50444b2752c6fb7ae50d38a3c787bc0e1a9d42f4 | Shell | MastersAcademy/devops-course-2020 | /lesson 3/michael.pereverov/project.sh | UTF-8 | 1,005 | 2.828125 | 3 | [] | no_license | #!/bin/bash
POSTGRES_HOST=postgres-ma
POSTGRES_DB=db
POSTGRES_USER=postgres
POSTGRES_PASSWORD=postgrespasswd
REDIS_HOST=redis-ma
REDIS_PASSWORD=p4ssw0rd
DB="postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}/${POSTGRES_DB}"
CACHE="redis://:${REDIS_PASSWORD}@${REDIS_HOST}:6379/0"
docker network create ma-net || true
docker run -d --rm \
--name ${POSTGRES_HOST} \
--network=ma-net \
-e POSTGRES_DB=${POSTGRES_DB} \
-e POSTGRES_USER=${POSTGRES_USER} \
-e POSTGRES_PASSWORD=${POSTGRES_PASSWORD} \
-v postgresql-data:/var/lib/postgresql/data/ \
postgres:9.6.5-alpine
docker run -d --rm \
--name redis-ma \
--network=ma-net \
redis:6.0.9-alpine \
redis-server --requirepass ${REDIS_PASSWORD}
docker run -d --rm \
--name ruby-ma \
-p 9000:4567 \
--network=ma-net \
-e DB=${DB} \
-e CACHE=${CACHE} \
-v /home/mike/Documents/MA/devops-course-2020/lesson\ 3/:/usr/src/app/ \
-w /usr/src/app \
ruby:2.6.0 \
ruby server.rb
# docker stop $(docker container ls -aq)
| true |
6ef01bd6aa120b5b67cbae880821615d97feca24 | Shell | mradcliffe/migrate_pantheon | /setup-global-vars.sh | UTF-8 | 338 | 3.1875 | 3 | [] | no_license | #!/bin/bash
set -ex
mkdir -p $HOME/.ssh && echo "StrictHostKeyChecking no" >> "$HOME/.ssh/config"
# Set up BASH_ENV if it was not set for us.
BASH_ENV=${BASH_ENV:-$HOME/.bashrc}
touch $BASH_ENV
(
echo 'export TERMINUS_ENV=${CIRCLE_BUILD_NUM}'
echo 'export SITE_ENV=${TERMINUS_SITE}.${TERMINUS_ENV}'
) >> $BASH_ENV
source $BASH_ENV
| true |
39574876bd05a8ee972674c41dd5db838a4d1116 | Shell | 250394/build | /setup/update-local-arm-git.sh | UTF-8 | 612 | 2.78125 | 3 | [] | no_license | #!/bin/bash
extrahosts="iojs-ns-xgene-1 iojs-ns-xgene-2 iojs-ns-xgene-3 iojs-ns-xu3-1"
tgit=/tmp/git.$$
rm -rf $tgit
mkdir $tgit
git clone https://github.com/nodejs/node.git ${tgit}/io.js.reference --mirror # --reference ~/git/iojs/io.js
git clone git@github.com-iojs:janeasystems/node_binary_tmp.git ${tgit}/node_binary_tmp.reference --mirror
list="$(cat ansible-inventory | grep ^iojs-ns-pi) $extrahosts"
for host in $list; do
echo "Updating ${host}..."
rsync -avz --times --perms --delete --links $tgit/ iojs@${host}:git/
rsync -avz --times --perms --delete --links $tgit/ iojs@${host}:git/
done
rm -rf $tgit
| true |
40c36a4a9f2c001e491ba463ef0b936185b1080d | Shell | tomhoule/prisma-query | /.buildkite/docker.sh | UTF-8 | 1,095 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
MYSQL_ROOT_PASSWORD=prisma
docker network create test-net
docker run --name test-postgres --network test-net \
-e POSTGRES_PASSWORD=prisma \
-e POSTGRES_USER=prisma \
-e POSTGRES_DB=prisma -d postgres
docker run --name test-mysql --network test-net \
-e MYSQL_USER=prisma \
-e MYSQL_DATABASE=prisma \
-e MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD \
-e MYSQL_PASSWORD=prisma -d mysql
docker run -w /build --network test-net -v $BUILDKITE_BUILD_CHECKOUT_PATH:/build \
-e TEST_PG_HOST=test-postgres \
-e TEST_PG_PORT=5432 \
-e TEST_PG_DB=prisma \
-e TEST_PG_USER=prisma \
-e TEST_PG_PASSWORD=prisma \
-e TEST_MYSQL_HOST=test-mysql \
-e TEST_MYSQL_PORT=3306 \
-e TEST_MYSQL_DB=prisma \
-e TEST_MYSQL_USER=prisma \
-e TEST_MYSQL_PASSWORD=prisma \
-e TEST_MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD \
prismagraphql/rust-build:latest cargo test
exit_code=$?
docker stop test-postgres
docker rm test-postgres
docker stop test-mysql
docker rm test-mysql
docker network rm test-net
exit $exit_code
| true |
654760542d46b29932bf15f5f4eac76270c2b46d | Shell | boschrexroth/ctrlx-automation-sdk | /scripts/environment/scripts/install-ctrlx-datalayer-deb.sh | UTF-8 | 966 | 3.8125 | 4 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | #!/usr/bin/env bash
DEP_PACKAGE=*.deb
DEP_DIR=/etc/apt/ctrlx-datalayer
echo " "
echo "=================================================================================="
echo Installing ${DEP_PACKAGE} into ${DEP_DIR}
echo "=================================================================================="
echo " "
if compgen -G ${DEP_PACKAGE} > /dev/null; then
# Exists
# Install package containing required component dpkg-scanpackages
sudo apt install -y dpkg-dev
# Install debian package as source locally
sudo dpkg-scanpackages -m . > Packages
sudo mkdir -p ${DEP_DIR}
sudo cp ${DEP_PACKAGE} ${DEP_DIR}
sudo mv Packages ${DEP_DIR}
# Add package to sources list
sudo sh -c "echo 'deb [trusted=yes] file:${DEP_DIR} ./' > /etc/apt/sources.list.d/ctrlx-automation.list"
# Use newest sources list
sudo apt update
# Install newest ctrlx-datalayer package
sudo apt-get install -y ctrlx-datalayer
else
echo "ERROR ${DEP_PACKAGE} not found!"
fi
| true |
6065413af80037771b5329da36a45fd027e7b20d | Shell | jesa7955/context-translation | /experiments/fairseq_configs/fairseq_benchmark.zsh | UTF-8 | 2,801 | 3.171875 | 3 | [
"MIT"
] | permissive | SPM_MODEL_PATH="/data/10/litong/NICT-MT/all-4-sentencepiece-en_ja-32000.model"
BASE_PATH="/data/temp/litong/context_nmt/fairseq_temp/"
CUDA_VISIBLE_DEVICES=${1}
BASE_SAVE_PATH=${2}
DATA_BASE=${BASE_PATH}/data-bin/
RESULT_FILE=${BASE_SAVE_PATH}/results.txt
mkdir -p ${BASE_SAVE_PATH}
for LANG_PAIR in $(echo "en_ja ja_en" | tr " " "\n")
do
echo ${LANG_PAIR} >> ${RESULT_FILE}
echo "------------------------------" >> ${RESULT_FILE}
SOURCE_LANG=$(echo ${LANG_PAIR} | cut -d"_" -f 1)
TARGET_LANG=$(echo ${LANG_PAIR} | cut -d"_" -f 2)
SAVE_PATH=${BASE_SAVE_PATH}/${LANG_PAIR}
mkdir -p ${SAVE_PATH}
# for MODEL in $(find ${BASE_PATH} -maxdepth 1 -name "*${LANG_PAIR}*"| grep -v factored | grep -v "1-to-1" | sort -u | rev | cut -d"/" -f 1 | rev)
for MODEL in $(find ${BASE_PATH} -maxdepth 1 -name "*${LANG_PAIR}*"| grep -v factored | sort -u | rev | cut -d"/" -f 1 | rev)
do
echo "Model -> ${MODEL}" >> ${RESULT_FILE}
for DATA in $(ls -tl resources/test*${SOURCE_LANG} | grep "Jan 24\|Jan 25" | tail -10 | sort -k 5 --reverse | cut -d" " -f 9 | head -5)
do
DATA_NAME="Previous_$(head -5 ${DATA} | grep -c "^@@CONCAT@@")"
SCORE=""
for SPLIT in $(echo "valid\ntest")
do
REFERENCE_DATA=$(echo ${DATA} | sed "s/test/${SPLIT}/g"| cut -d"." -f 1).${TARGET_LANG}
TARGET=${SAVE_PATH}/${MODEL}_${DATA_NAME}_${SPLIT}
REFERENCE=${TARGET}.ref
MODEL_PATH=${BASE_PATH}/${MODEL}
DATA_PATH=${DATA_BASE}/${MODEL}
cat $(echo ${DATA} | sed "s/test/${SPLIT}/g") | fairseq-interactive --path ${MODEL_PATH}/checkpoint_best.pt \
--beam 6 --user-dir context_nmt --buffer-size 512 --batch-size 256 \
--source-lang ${SOURCE_LANG} --target-lang ${TARGET_LANG} ${DATA_PATH} | tee /tmp/gen.out
grep ^H /tmp/gen.out | cut -f3- > /tmp/gen.out.sys
spm_decode --model=${SPM_MODEL_PATH} --input_format=piece < /tmp/gen.out.sys > /tmp/gen.out.sys.retok
spm_decode --model=${SPM_MODEL_PATH} --input_format=piece < ${REFERENCE_DATA} > /tmp/gen.out.ref.retok
if [ "$LANG_PAIR" = "en_ja" ]; then
mecab -O wakati < /tmp/gen.out.sys.retok > ${TARGET}
mecab -O wakati < /tmp/gen.out.ref.retok > ${REFERENCE}
else
cp /tmp/gen.out.sys.retok ${TARGET}
cp /tmp/gen.out.ref.retok ${REFERENCE}
fi
SCORE="${SCORE}\t${SPLIT}: $(sacrebleu -b -w 2 ${REFERENCE} < ${TARGET})"
done
echo "\tDATA -> ${DATA_NAME}: ${SCORE}" | tee -a ${RESULT_FILE}
done
done
echo >> ${RESULT_FILE}
done
| true |
597627b11cc7bc34fb156ad323becdedb767f50e | Shell | BRMcLaren/test-infra | /config/jenkins/master/install-plugins.sh | UTF-8 | 226 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -eox pipefail
#
# Install the list of saved plugins
#
DIR=$(dirname $0)
docker exec -e REF=/var/jenkins_home jenkins /usr/local/bin/install-plugins.sh $(cat $DIR/plugins.txt)
systemctl restart jenkins
| true |
f5d975b52dd349f96048605c57feffeb297ca858 | Shell | summis/.dotfiles | /.scripts/gsearch.sh | UTF-8 | 239 | 3.1875 | 3 | [] | no_license | #!/bin/bash
# Google search from rofi.
url="https://www.google.com/search?q="
query=$( (echo ) | rofi -dmenu -matching fuzzy -location 0 -p "Google search" )
if [[ -n "$query" ]]; then
xdg-open "$url$query"
else
exit
fi
exit 0
| true |
e4a8cfd7150f619d0f1ebbbcba6f8ee610596549 | Shell | jdonenine/k8ssandra-api-service | /scripts/1-node-cluster-gcp-existing/setup.sh | UTF-8 | 991 | 3.109375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
helm uninstall k8ssandra-release
## Cluster environment bootstrapping
kubectl config view --flatten --minify > ../../.kubeconfig
echo "Bootstrapping initial cluster environment complete, kubeconfig temporarily exported to ../../.kubeconfig"
echo "Deploying k8ssandra..."
helm repo add k8ssandra-stable https://helm.k8ssandra.io/stable
helm repo update
helm install k8ssandra-release k8ssandra-stable/k8ssandra -f k8ssandra.values.yaml
echo "Deploying k8ssandra complete."
echo "Deploying k8ssandra-app-user secret..."
kubectl create secret generic k8ssandra-api-service-user --from-literal=username='admin' --from-literal=password='admin123'
echo "Application username:"
echo $(kubectl get secret k8ssandra-api-service-user -o=jsonpath='{.data.username}' | base64 --decode)
echo "Application password:"
echo $(kubectl get secret k8ssandra-api-service-user -o=jsonpath='{.data.password}' | base64 --decode)
echo "Deploying k8ssandra-app secret complete." | true |
d4cc29f836b5cd40505e737a09688b93aced0dae | Shell | burkejasonj/arch-install-scripts | /install.sh | UTF-8 | 1,661 | 3.234375 | 3 | [] | no_license | #!/bin/sh
echo Install Script V0.0.1 for ARCHPad
echo \(C\) 2021 CaptnJason.
echo
echo KEYBOARD LAYOUT: US
echo
ls /sys/firmware/efi/efivars
echo Boot mode should be BIOS. ^C to exit script.
echo
echo Setting up DCHPCD. Assuming wired connection.
ping -c 5 rebelgaming.org | grep -v time=
echo done. Assuming successful ping.
echo
echo Setting time.
timedatectl set-ntp true
echo done.
echo
echo Partitioning /dev/sda
fdisk -l
echo -e "n\np\n\n\n+512M\nt\n82\nn\np\n\n\n\nw\n" | fdisk /dev/sda
echo done.
echo
echo Making root file system
mkfs.ext4 /dev/sda2
echo done.
echo
echo Making swap file system
mkswap /dev/sda1
echo done.
echo
echo Mounting root file system at /mnt
mount /dev/sda2 /mnt
echo done.
echo
echo Enabling swap file system.
swapon /dev/sda1
echo done.
echo
echo Running pacstrap to install base packages.
pacstrap /mnt base linux linux-firmware
echo done.
echo
echo Running pacstrap to install features.
pacstrap /mnt grub vim nano dhcpcd man-db man-pages texinfo sudo
echo done.
echo
echo Generating fstab.
genfstab -U /mnt >> /mnt/etc/fstab
echo done.
echo
echo Changing root. Completing install.
echo "ln -sf /usr/share/zoneinfo/America/Los_Angeles /etc/localtime
hwclock --systohc
echo -e \"177G\nx:wq\" | vim /etc/locale.gen
locale-gen
echo LANG=en_US.UTF-8 > /etc/locale.conf
echo archpad > /etc/hostname
echo \"127.0.0.1 localhost
::1 localhost
127.0.1.1 archpad.localdomain archpad\" > /etc/hosts
mkinitcpio -P
grub-install --target=i386-pc /dev/sda
grub-mkconfig -o /boot/grub/grub.cfg
exit
" | arch-chroot /mnt
echo done. Root Password is archpad. Have fun!
echo
echo Rebooting in 5 seconds.
sleep 5
reboot now | true |
0521824d12a303ae5d72c5f324fbcca32d490b19 | Shell | jwilk/fuzzing | /tools/afl-varcheck | UTF-8 | 942 | 3.65625 | 4 | [
"MIT"
] | permissive | #!/bin/sh
# Copyright © 2017-2018 Jakub Wilk <jwilk@jwilk.net>
# SPDX-License-Identifier: MIT
set -e -u
usage()
{
printf 'Usage: %s <command> ...\n' "${0##*/}" >&2
exit 1
}
[ $# -gt 0 ] || usage
tmpdir=$(mktemp -d -t afl-varcheck.XXXXXX)
if [ -t 0 ]
then
input=/dev/null
else
input="$tmpdir/input"
cat > "$input"
fi
run_afl_showmap()
{
j="$1"
shift
afl-showmap -m none -q -o "$tmpdir/$j" -- "$@" < "$input" || {
printf 'afl-varcheck: afl-showmap failed\n' >&2
exit 1
}
[ -s "$tmpdir/$j" ] || {
printf 'afl-varcheck: no instrumentation detected\n' >&2
exit 1
}
}
run_afl_showmap 1 "$@"
i=1
while [ $i -lt 25 ]
do
run_afl_showmap 2 "$@"
if ! cmp -s "$tmpdir/1" "$tmpdir/2"
then
rm -rf "$tmpdir"
printf 'afl-varcheck: variable behavior detected\n' >&2
kill -s ABRT $$
fi
i=$((i + 1))
done
rm -rf "$tmpdir"
# vim:ts=4 sts=4 sw=4 et
| true |
9dbd990b455e7c3ed30e934344f04d73078edf07 | Shell | robinrob/zsh | /practice/exec_args.zsh | UTF-8 | 155 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env zsh
# Example: ./exec_args.zsh git status
# NOT ./exec_args.zsh "git status"
$@
function doit {
cmd=$@
$@
}
# ls /usr/bin/git
doit $@
| true |
9e6f70183d684e37b6ab2193e4498f656a879997 | Shell | basha48/empstatus | /wagespermonth.sh | UTF-8 | 414 | 3.140625 | 3 | [] | no_license | #! /bin/bash
totaldays=20;
wageperhour=20
fullday=8
count=0
calculatewage=$((wageperhour * fullday))
for (( i=1; i<$totaldays; i++ ))
do
empstatus[$i]=$((RANDOM%2))
if [ ${empstatus[$i]} -eq 1 ]
then
count=$(($count+1));
fi
done
totalwage=$(($calculatewage *$count))
echo "out of 20 working days no of present days are: $count"
echo "salary for $count days are : $totalwage "
#echo ${empstatus[@]}
| true |
d6864b2ee3a41d6364722311ef14c48cca6a45b8 | Shell | shyiko/dotfiles | /bash.d/mvnw.bash | UTF-8 | 242 | 2.65625 | 3 | [] | no_license | function mvnw-init() {
curl -sL https://github.com/shyiko/mvnw/releases/download/0.1.0/mvnw.tar.gz | tar xvz
(MAVEN_VERSION=3.2.5 &&
sed -iEe "s/[0-9]\+[.][0-9]\+[.][0-9]\+/${MAVEN_VERSION}/g" .mvn/wrapper/maven-wrapper.properties)
}
| true |
1b417443dce3a9d4c9b845cc089f5dc0586de746 | Shell | christiam/bdb4pq | /gcp-utils/run-conversion-to-parquet.sh | UTF-8 | 512 | 2.734375 | 3 | [
"LicenseRef-scancode-us-govt-public-domain"
] | permissive | #!/bin/bash
# gcp-utils/run.sh: What this script does
#
# Author: Christiam Camacho (christiam.camacho@gmail.com)
# Created: Fri Mar 1 04:13:23 2019
SCRIPT_DIR=$(dirname $0)
source ${SCRIPT_DIR}/common.sh
set -euo pipefail
CLUSTER_ID=$($SCRIPT_DIR/get-my-active-cluster-id.sh)
SRC=$SCRIPT_DIR/../src/migrate2pq.py
if [ ! -z "$CLUSTER_ID" ] ; then
gcloud dataproc jobs submit pyspark $SRC \
--cluster ${CLUSTER_ID} \
--region ${GCP_REGION} \
-- gs://camacho-test/nr/nr.*-meta.csv gs://camacho-test/nr/nr.parquet
fi
| true |
62b4c915756f232b72494d84f55b7d75f2dbbb1f | Shell | frgfm/Holocron | /docs/build.sh | UTF-8 | 1,641 | 3.15625 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | function deploy_doc(){
if [ ! -z "$1" ]
then
git checkout $1
fi
COMMIT=$(git rev-parse --short HEAD)
echo "Creating doc at commit" $COMMIT "and pushing to folder $2"
# Hotfix
if [ -d ../requirements.txt ]; then
sed -i "s/^torchvision.*/&,<0.11.0/" ../requirements.txt
fi
sed -i "s/torchvision>=.*',/&,<0.11.0',/" ../setup.py
sed -i "s/',,/,/" ../setup.py
pip install -U ..
git checkout ../setup.py
if [ -d ../requirements.txt ]; then
git checkout ../requirements.txt
fi
if [ ! -z "$2" ]
then
if [ "$2" == "latest" ]; then
echo "Pushing main"
sphinx-build source build/$2 -a
elif [ -d build/$2 ]; then
echo "Directory" $2 "already exists"
else
echo "Pushing version" $2
cp -r _static source/ && cp _conf.py source/conf.py
sphinx-build source build/$2 -a
fi
else
echo "Pushing stable"
cp -r _static source/ && cp _conf.py source/conf.py
sphinx-build source build -a
fi
git checkout source/ && git clean -f source/
}
# exit when any command fails
set -e
# You can find the commit for each tag on https://github.com/frgfm/holocron/tags
if [ -d build ]; then rm -Rf build; fi
mkdir build
cp -r source/_static .
cp source/conf.py _conf.py
git fetch --all --tags --unshallow
deploy_doc "" latest
deploy_doc "e9ca768" v0.1.0
deploy_doc "9b3f927" v0.1.1
deploy_doc "59c3124" v0.1.2
deploy_doc "d41610b" v0.1.3
deploy_doc "67a50c7" v0.2.0
deploy_doc "bc0d972" # v0.2.1 Latest stable release
rm -rf _build _static _conf.py
| true |
bdc707164e7fc638def0e1883c81a3718c51ff1c | Shell | joelfnogueira/genomic-data-analysis | /scripts/run_local_pca.sh | UTF-8 | 2,583 | 3.671875 | 4 | [] | no_license | #!/bin/bash
## This script is used to run local_pca based on genotype likelihood data. See https://github.com/petrelharp/local_pca for details.
## This script will use a separate thread for each LG. So you will need to first run /workdir/genomic-data-analysis/scripts/subset_beagle_by_lg.sh
BEAGLE=$1 # This should be the path to a beagle.gz file that you have used for subset_beagle_by_lg.sh. An example is /workdir/cod/greenland-cod/angsd/bam_list_realigned_mindp161_maxdp768_minind97_minq20.beagle.gz
LGLIST=$2 # This should be the path to a list of LGs or chromosomes that you want to subset by. An example is /workdir/cod/greenland-cod/sample_lists/lg_list.txt
SNP=$3 ## Number of SNPs to include in each window
PC=$4 ## Number of PCs to keep for each window
## The following is for debugging purposes. Delete this when the script is finalized.
# BEAGLE=/workdir/cod/greenland-cod/angsd/bam_list_realigned_mindp161_maxdp768_minind97_minq20.beagle.gz
# LGLIST=/workdir/cod/greenland-cod/sample_lists/lg_list.txt
# SNP=10000
# PC=2
# LG=LG01
# INPUT=/workdir/cod/greenland-cod/angsd/local_pca/bam_list_realigned_mindp161_maxdp768_minind97_minq20_LG01.beagle.x00.gz
## Extract prefix and directory from the beagle path
PREFIX=`echo $BEAGLE | sed 's/\..*//' | sed -e 's#.*/\(\)#\1#'`
BEAGLEDIR=`echo $BEAGLE | sed 's:/[^/]*$::' | awk '$1=$1"/"'`
## Split beagle files into smaller windows, each containing a header and the desired number of SNPs
for LG in `cat $LGLIST`; do
echo "Splitting "$LG
zcat $BEAGLEDIR$PREFIX"_"$LG".beagle.gz" | tail -n +2 | split -d --lines $SNP - --filter='bash -c "{ zcat ${FILE%.*} | head -n1; cat; } > $FILE"' $BEAGLEDIR$PREFIX"_"$LG".beagle.x" &
done
wait
## Gzip these beagle files
for LG in `cat $LGLIST`; do
echo "Zipping "$LG
gzip $BEAGLEDIR$PREFIX"_"$LG".beagle.x"* &
done
wait
## Move the beagle files to local_pca directory
for LG in `cat $LGLIST`; do
echo "Moving "$LG
mv $BEAGLEDIR$PREFIX"_"$LG".beagle.x"* $BEAGLEDIR"local_pca/" &
done
wait
## Run pcangsd and and prepare the local_pca input. The dependencies are /workdir/genomic-data-analysis/scripts/local_pca_1.sh and /workdir/genomic-data-analysis/scripts/local_pca_2.R
for LG in `cat $LGLIST`; do
if [ -f $BEAGLEDIR"local_pca/snp_position_"$SNP"snp_"$LG".tsv" ]; then
rm $BEAGLEDIR"local_pca/snp_position_"$SNP"snp_"$LG".tsv"
fi
if [ -f $BEAGLEDIR"local_pca/pca_summary_"$SNP"snp_"$LG".tsv" ]; then
rm $BEAGLEDIR"local_pca/pca_summary_"$SNP"snp_"$LG".tsv"
fi
bash /workdir/genomic-data-analysis/scripts/local_pca_1.sh $BEAGLEDIR $PREFIX $LG $PC $SNP &
done | true |
578224bd696f6d5299e95d3dd50527801113a67e | Shell | Habiba-Mahmoud/radiucal | /tools/reports.sh | UTF-8 | 3,410 | 3.171875 | 3 | [
"BSD-3-Clause"
] | permissive | BIN=bin/
AUDITS=${BIN}audit.csv
if [ ! -e $AUDITS ]; then
exit 0
fi
source /etc/environment
if [ -z "$RPT_HOST" ]; then
echo "missing RPT_HOST var"
exit 1
fi
if [ -z "$RPT_TOKEN" ]; then
echo "missing RPT_TOKEN var"
exit 1
fi
_post() {
for f in $(ls $BIN | grep "\.md"); do
content=$(cat $BIN/$f | python -c "import sys, urllib.parse; print(urllib.parse.quote(sys.stdin.read()))")
name=$(echo "$f" | cut -d "." -f 1)
curl -s -k -X POST -d "name=$name&content=$content" "$RPT_HOST/reports/upload?session=$RPT_TOKEN"
done
}
DAILY=1
if [ ! -z "$1" ]; then
DAILY=$1
fi
# VLAN->User membership
MEMBERSHIP=${BIN}membership.md
echo "| vlan | user |
| --- | --- |" > $MEMBERSHIP
cat $AUDITS | sed "s/,/ /g" | awk '{print "| " $2, "|", $1 " |"}' | sort -u >> $MEMBERSHIP
# User.VLAN macs assigned
ASSIGNED=${BIN}assigned.md
echo "| user | vlan | mac |
| --- | --- | --- |" > $ASSIGNED
cat $AUDITS | sed "s/,/ | /g;s/^/| /g;s/$/ |/g" | sort -u >> $ASSIGNED
if [ $DAILY -ne 1 ]; then
_post
exit 0
fi
# Auth information
AUTHS=${BIN}auths.md
echo "| user | mac | last |
| --- | --- | --- |" > $AUTHS
dates=$(date +%Y-%m-%d)
for i in $(seq 1 10); do
dates="$dates "$(date -d "$i days ago" +%Y-%m-%d)
done
files=""
for d in $(echo "$dates"); do
f="/var/lib/radiucal/log/radiucal.audit.$d"
if [ -e $f ]; then
files="$files $f"
fi
done
if [ ! -z "$files" ]; then
notcruft=""
users=$(cat $files \
| cut -d " " -f 3,4 \
| sed "s/ /,/g" | sort -u)
for u in $(echo "$users"); do
for f in $(echo "$files"); do
has=$(tac $f | sed "s/ /,/g" | grep "$u" | head -n 1)
if [ ! -z "$has" ]; then
day=$(basename $f | cut -d "." -f 3)
stat=$(echo $has | cut -d "," -f 2 | sed "s/\[//g;s/\]//g")
usr=$(echo $u | cut -d "," -f 1)
notcruft="$notcruft|$usr"
mac=$(echo $u | cut -d "," -f 2 | sed "s/(//g;s/)//g;s/mac://g")
echo "| $usr | $mac | $stat ($day) |" >> $AUTHS
break
fi
done
done
notcruft=$(echo "$notcruft" | sed "s/^|//g")
cat $AUDITS | sed "s/,/ /g" | awk '{print $2,".",$1}' | sed "s/ //g" | grep -v -E "($notcruft)" | sed "s/^/drop: /g" | sort -u | smirc
fi
# Leases
LEASES_KNOWN=${BIN}known_leases
rm -f $LEASES_KNOWN
LEASES=${BIN}leases.md
echo "| status | mac | ip |
| --- | --- | --- |" > $LEASES
unknowns=""
leases=$(curl -s -k "$RPT_HOST/reports/view/dns?raw=true")
for l in $(echo "$leases" | sed "s/ /,/g"); do
t=$(echo $l | cut -d "," -f 1)
ip=$(echo $l | cut -d "," -f 3)
mac=$(echo $l | cut -d "," -f 2 | tr '[:upper:]' '[:lower:]' | sed "s/://g")
line="| $mac | $ip |"
if [[ "$t" == "static" ]]; then
echo "| mapped $line" >> $LEASES_KNOWN
continue
fi
if [ ! -z "$LEASE_MGMT" ]; then
echo "$ip" | grep -q "$LEASE_MGMT"
if [ $? -eq 0 ]; then
echo "| mgmt $line" >> $LEASES_KNOWN
continue
fi
fi
cat $AUDITS | grep -q "$mac"
if [ $? -eq 0 ]; then
echo "| dhcp $line" >> $LEASES_KNOWN
continue
fi
unknowns="$unknowns $mac ($ip)"
echo "| unknown $line" >> $LEASES
done
if [ ! -z "$unknowns" ]; then
echo "unknown leases: $unknowns" | smirc
fi
cat $LEASES_KNOWN | sort -u >> $LEASES
_post
| true |
57eff47b96abead617e617e12373a65cd07c95c5 | Shell | cloudfoundry-incubator/bits-service-ci | /tasks/deploy-bits-cert.sh | UTF-8 | 2,148 | 3.25 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash -xe
set -eo pipefail
IFS=$'\n\t'
source ci-resources/scripts/ibmcloud-functions
ibmcloud-login
export-kubeconfig "$CLUSTER_NAME"
export IP=$(kubectl get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="ExternalIP")].address}')
export REGISTRY=registry.$IP.nip.io
cat > /tmp/ssl.conf << EOF
[ req ]
distinguished_name = req_distinguished_name
prompt = no
[ req_distinguished_name ]
O = Local Secure Registry for Kubernetes
CN = $REGISTRY
emailAddress = eirini@cloudfoundry.org
EOF
mkdir -p /tmp/certs
openssl req -config /tmp/ssl.conf \
-newkey rsa:4096 \
-nodes \
-sha256 \
-x509 \
-days 265 \
-keyout /tmp/certs/key_file \
-out /tmp/certs/cert_file
set +e
kubectl delete secret bits-cert -n scf
while [ "$(kubectl get secrets -n scf | grep bits-cert)" != "" ]; do
sleep 1;
done
set -e
kubectl create secret generic bits-cert -n scf --from-file=/tmp/certs/cert_file --from-file=/tmp/certs/key_file
while [ "$(kubectl get secrets -n scf | grep bits-cert)" == "" ]; do
sleep 1;
done
set +e
kubectl delete job bits-cert-copy -n scf
set -e
while [ "$(kubectl get jobs -n scf | grep bits-cert-copy)" != "" ]; do
sleep 1;
done
cat > /tmp/bits-cert-copy.yml << EOF
apiVersion: batch/v1
kind: Job
metadata:
name: bits-cert-copy
spec:
template:
spec:
serviceAccountName: "opi"
restartPolicy: OnFailure
volumes:
- name: host-docker
hostPath:
path: /etc/docker
type: Directory
containers:
- name: copy-certs
env:
- name: BITS_REGISTRY
value: registry.$IP.nip.io:6666
image: pego/bits-cert-copy:latest
volumeMounts:
- name: host-docker
mountPath: /workspace/docker
EOF
kubectl apply -f /tmp/bits-cert-copy.yml -n scf
while [ "$(kubectl get jobs -n scf | grep bits-cert-copy)" == "" ]; do
sleep 1;
done
kubectl delete pod $(kubectl get pod -n scf | grep eirini | cut -f1 -d' ') -n scf
kubectl delete pod $(kubectl get pod -n scf | grep bits | cut -f1 -d' ') -n scf
| true |
c86215ff2ab8c57e87a455dda961b4ffcf07123b | Shell | simlun/dotfiles | /sway/bin/dmenu_actions | UTF-8 | 2,053 | 3.390625 | 3 | [] | no_license | #!/bin/sh
actions=$(cat <<END
lock
move_workspace
enable_output
disable_output
screenshot
screenshot_full_screen
screenshot_full_screen_in_3s
suspend
brightness
END
)
# /etc/udev/rules.d/90-brightnessctl.rules
#
# ACTION=="add", SUBSYSTEM=="backlight", RUN+="/bin/chgrp video /sys/class/backlight/%k/brightness"
# ACTION=="add", SUBSYSTEM=="backlight", RUN+="/bin/chmod g+w /sys/class/backlight/%k/brightness"
# ACTION=="add", SUBSYSTEM=="leds", RUN+="/bin/chgrp input /sys/class/leds/%k/brightness"
# ACTION=="add", SUBSYSTEM=="leds", RUN+="/bin/chmod g+w /sys/class/leds/%k/brightness"
#
# # usermod -aG video simlun
brightnesses=$(cat <<END
100%
75%
50%
25%
10%
5%
1%
END
)
choice=$(echo "$actions" | dmenu)
case "$choice" in
lock)
swaylock -f -c 333333
;;
move_workspace)
outputs=$(swaymsg --raw -t get_outputs | jq '.[].name' | tr -d '"')
output=$(echo "$outputs" | dmenu)
swaymsg move workspace to "$output" current
;;
enable_output)
outputs=$(swaymsg --raw -t get_outputs | jq '.[].name' | tr -d '"')
output=$(echo "$outputs" | dmenu)
swaymsg output "$output" enable
;;
disable_output)
outputs=$(swaymsg --raw -t get_outputs | jq '.[].name' | tr -d '"')
output=$(echo "$outputs" | dmenu)
swaymsg output "$output" disable
;;
screenshot)
GRIM_DEFAULT_DIR=$HOME/Pictures grim -g "$(slurp)"
;;
screenshot_full_screen)
sleep 3
GRIM_DEFAULT_DIR=$HOME/Pictures grim
;;
screenshot_full_screen_in_3s)
sleep 3
GRIM_DEFAULT_DIR=$HOME/Pictures grim
;;
suspend)
swaylock -f -c 333333
sleep 1
systemctl suspend
;;
brightness)
brightness=$(echo "$brightnesses" | dmenu)
brightnessctl s "$brightness"
;;
*)
exit 1
esac
| true |
83808b2a288cc3652d6287142eb06d8983be7673 | Shell | utalo/ontologyUpdate | /OntologyUpdate/Ruby/arb | UTF-8 | 345 | 3.5625 | 4 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | #!/bin/bash
# Run ARQ through a jRuby script
S=":"
if [ "$OSTYPE" == "cygwin" ]; then S=";"; fi
CP="$(arq_path)"
if [ "$JRUBY_HOME" = "" ]
then
echo "JRUBY_HOME not defined" 2>&1
exit 1
fi
if [ $# = 0 ]
then
echo "No scripts to execute" 2>&1
exit 2
fi
java -cp "$JRUBY_HOME/lib/jruby.jar${S}$CP" org.jruby.Main "$@"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.