blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4fb639dff5e669103b7afc923ef0ae3002025f06 | Shell | dycw/dotfiles | /htop/aliases.sh | UTF-8 | 92 | 2.765625 | 3 | [] | permissive | #!/usr/bin/env bash
if [ -x "$(command -v htop)" ]; then
alias htopu='htop -u "$USER"'
fi
| true |
99fe8403b8cd8a8a3b08af5c8296f76281d77984 | Shell | yut23/homeshick | /test/fixtures/rc-files.sh | UTF-8 | 1,173 | 3.25 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# shellcheck disable=2164
function fixture_rc_files() {
local git_username="Homeshick user"
local git_useremail="homeshick@example.com"
local rc_files="$REPO_FIXTURES/rc-files"
git init "$rc_files"
cd "$rc_files"
git config user.name "$git_username"
git config user.email "$git_useremail"
mkdir home
cd home
cat > .bashrc <<EOF
#!/bin/bash
PS1='\[33[01;32m\]\u@\h\[33[00m\]:\[33[01;34m\]\w\'
EOF
git add .bashrc
git commit -m '.bashrc file for my new rc-files repo'
cat > "$NOTHOME/some-file" <<EOF
File with some content.
EOF
ln -s "$NOTHOME/some-file" symlinked-file
git add symlinked-file
git commit -m 'Added a symlinked file'
mkdir "$NOTHOME/some-directory"
ln -s "$NOTHOME/some-directory" symlinked-directory
git add symlinked-directory
git commit -m 'Added a symlinked directory'
ln -s "$NOTHOME/nonexistent" dead-symlink
git add dead-symlink
git commit -m 'Added a dead symlink'
# Create a branch with a slash in it.
# Used for list suite unit test testSlashInBranch()
git branch branch/with/slash
cat > .gitignore <<EOF
.DS_Store
*.swp
EOF
git add .gitignore
git commit -m 'Added .gitignore file'
}
fixture_rc_files > /dev/null
| true |
4a292513374c7f749a2eb529e97ae74f62be87e2 | Shell | dv-rain/DHL | /scripts/bind_nics.sh | UTF-8 | 1,701 | 3.84375 | 4 | [] | no_license | #!/bin/bash
# Confirm environment variables
if [ -z "$RTE_TARGET" ]; then
echo "Please export \$RTE_TARGET"
exit 1
fi
if [ -z "$RTE_SDK" ]; then
echo "Please export \$RTE_SDK"
exit 1
fi
if [ -z "$DPDK_INSTALL_DIR" ]; then
echo "Please export \$DPDK_INSTALL_DIR"
exit 1
fi
DPDK_DEVBIND=$DPDK_INSTALL_DIR/sbin/dpdk-devbind
#DPDK_DEVBIND=$RTE_SDK/usertools/dpdk-devbind.py # for DPDK 17 and up
# Load uio kernel modules
grep -m 1 "igb_uio" /proc/modules | cat
if [ ${PIPESTATUS[0]} != 0 ]; then
echo "Loading uio kernel modules"
sleep 1
kernel_version=$(uname -r)
sudo modprobe uio
sudo insmod ${DPDK_INSTALL_DIR}/kmod/igb_uio.ko
else
echo "IGB UIO module already loaded."
fi
#echo "Checking NIC status"
#sleep 1
#$DPDK_DEVBIND --status
echo "Binding NIC status"
if [ -z "$DHL_NIC_PCI" ];then
tenG_and_40G_nics=$($DPDK_DEVBIND --status | grep -v Active | grep -e "10G" -e "10G-Gigabit" -e "40G" | grep unused=igb_uio | cut -f 1 -d " " | wc -l)
if [ ${tenG_and_40G_nics} == 0 ];then
echo "There is no NICs that can be binded to igb_uio driver"
exit 1
else
for id in $($DPDK_DEVBIND --status | grep -v Active | grep -e "10G" -e "10G-Gigabit" -e "40G" | grep unused=igb_uio | cut -f 1 -d " ")
do
read -r -p "Bind interface $id to DPDK? [Y/N] " response
if [[ $response =~ ^([yY][eE][sS]|[yY])$ ]];then
echo "Binding $id to dpdk"
$DPDK_DEVBIND -b igb_uio $id
fi
done
fi
else
# Auto binding example format: export DPDK_NIC_PCI=" 07:00.0 07:00.1 "
for nic_id in $DHL_NIC_PCI
do
echo "Binding $nic_id to DPDK"
sudo $DPDK_DEVBIND -b igb_uio $nic_id
done
fi
echo "Finished Binding"
$DPDK_DEVBIND --status | true |
d8f9d97a4cd6e94fd101b0db4020d31dab8be424 | Shell | oldweb-today/wine-browsers | /ie4/run.sh | UTF-8 | 515 | 2.53125 | 3 | [] | no_license | #!/bin/bash
#export WINEDLLOVERRIDES="mshtml="
export WINEPREFIX="/home/browser/.wine"
#sudo chown -R browser /home/browser/.wine
USER_REG=/home/browser/.wine/user.reg
sed -i s/'$DIMENSION'/$SCREEN_WIDTH"x"$SCREEN_HEIGHT/g $USER_REG
#wine regedit $USER_REG
if [[ -n "$PROXY_HOST" ]]; then
sed -i s/'$PROXY_HOST'/$PROXY_HOST/g proxy.reg
sed -i s/'$PROXY_PORT'/$PROXY_PORT/g proxy.reg
wine regedit proxy.reg
fi
run_browser wine start /max /W 'C:/Program Files/Internet Explorer/IEXPLORE.exe' $URL
| true |
9d3d31f9b68a79a04091dfa6503d3988c9ae7621 | Shell | MartinIngesen/blocklistbuilder | /runner.sh | UTF-8 | 230 | 2.546875 | 3 | [] | no_license | #!/bin/bash
echo "running builder"
echo "==============="
sh blocklistbuilder.sh
echo "==============="
echo "copying over file"
cp ./adblock.conf /etc/dnsmasq.d/
echo "restarting dnsmasq"
service dnsmasq restart
echo "done!"
| true |
7feb094644cf1ead5ef3f256f21892c04aef5a40 | Shell | renyiwu/bioseq | /extract_nrf2_15_CpG.sh | UTF-8 | 1,165 | 3.171875 | 3 | [] | no_license | #! /usr/bin/bash
# extract raw count for Nrf2 15 CpG sites Species: Mouse
# R Wu. Oct 2018
# Usage: bash extract_nrf2_15_CpG.sh combined.csv
# replaced combined.csv with the file generated by combined_CpG.py -- a script from the DMRfinder tool set.
# A file named Nrf2_15_CpG.csv will be generated after execution of this script.
# single CpG for Nrf2, the 15 CpGs. Note CpG 1-5 are not coverd by the SureSelect Kit.
# chr start dist. CpG
# chr2 75705582 -287 1
# chr2 75705564 -269 2
# chr2 75705554 -259 3
# chr2 75705551 -256 4
# chr2 75705540 -245 5
# chr2 75705467 -172 6
# chr2 75705431 -136 7
# chr2 75705407 -112 8
# chr2 75705366 -71 9
# chr2 75705355 -60 10
# chr2 75705342 -47 11
# chr2 75705320 -25 12
# chr2 75705312 -17 13
# chr2 75705306 -11 14
# chr2 75705295 0 15
# The range for Nrf2 is chr2 75705295 - 75705582
# 75705:
# 295 - 299
# 300 - 499
# 500 - 579
# 580 - 582
# Regexp is 75705(29[5-9]|[3-4][0-9][0-9]|5[0-7][0-9]|58[0-2])
# CMD is grep -P "chr2\t75705(29[5-9]|[3-4][0-9][0-9]|5[0-7][0-9]|58[0-2])" file.csv
head -1 $1 > Nrf2_15_CpG.csv
grep -P "chr2\t75705(29[5-9]|[3-4][0-9][0-9]|5[0-7][0-9]|58[0-2])" $1 >> Nrf2_15_CpG.csv
| true |
43889628c599feff8202bc40e793850b148183d6 | Shell | sicz/docker-simple-ca | /rootfs/docker-entrypoint.d/60-server-userdb.sh | UTF-8 | 749 | 3.53125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash -e
### SERVER_USERDB ##############################################################
# Create CA user database
if [ ! -e ${SERVER_USERDB_FILE} ]; then
info "Adding user ${CA_USER_NAME} to CA user database ${SERVER_USERDB_FILE}"
CA_USER_NAME_DIGEST=$(echo -n "${CA_USER_NAME}:${CA_USER_REALM}:${CA_USER_NAME_PWD}" | md5sum | cut -b -32)
echo "${CA_USER_NAME}:${CA_USER_REALM}:${CA_USER_NAME_DIGEST}" > ${SERVER_USERDB_FILE}
chown ${LIGHTTPD_FILE_OWNER} ${SERVER_USERDB_FILE}
chmod 400 ${SERVER_USERDB_FILE}
else
info "Using CA user database ${SERVER_USERDB_FILE}"
fi
# Export variables for /etc/lighttpd/server.conf
export SERVER_USERDB_FILE
################################################################################
| true |
537e606a6ecc5bfb85d5eca63acdbdbabc5b7269 | Shell | amir-berkani/aur | /pantheon/pantheon-polkit-agent-git/PKGBUILD | UTF-8 | 784 | 2.640625 | 3 | [] | no_license | # Maintainer: Maxime Gauduin <alucryd@archlinux.org>
pkgname=pantheon-polkit-agent-git
pkgver=1.0.0.r1.e9586c5
pkgrel=1
pkgdesc='Pantheon Polkit Agent'
arch=(x86_64)
url=https://github.com/elementary/pantheon-agent-polkit
license=(GPL3)
groups=(pantheon-unstable)
depends=(
glib2
glibc
gtk3
libgranite.so
polkit
)
makedepends=(
git
granite-git
intltool
meson
vala
)
provides=(pantheon-polkit-agent)
conflicts=(pantheon-polkit-agent)
source=(pantheon-polkit-agent::git+https://github.com/elementary/pantheon-agent-polkit.git)
sha256sums=(SKIP)
pkgver() {
cd pantheon-polkit-agent
git describe --tags | sed 's/-/.r/; s/-g/./'
}
build() {
arch-meson pantheon-polkit-agent build
ninja -C build
}
package() {
DESTDIR="${pkgdir}" ninja -C build install
}
# vim: ts=2 sw=2 et:
| true |
5c083bcb42c6ddf0927a2fe68159ca56dcaa5ae2 | Shell | petronny/aur3-mirror | /emacs-mediawiki-bzr/PKGBUILD | UTF-8 | 1,228 | 2.8125 | 3 | [] | no_license | # $Id: pkgbuild-mode.el,v 1.23 2007/10/20 16:02:14 juergen Exp $
# Maintainer: Philanecros Heliostein <philanecros@gmail.com>
pkgname=emacs-mediawiki-bzr
pkgver=34
pkgrel=3
pkgdesc="Use the power of Emacs to edit MediaWiki sites."
url="http://www.emacswiki.org/emacs/MediaWikiMode"
arch=('any')
license=('GPLv3')
depends=('emacs')
makedepends=('bzr')
provides=('emacs-mediawiki')
conflicts=('emacs-mediawiki')
_bzrtrunk="https://code.launchpad.net/~hexmode/mediawiki-el/trunk"
_bzrmod=${pkgname}
build() {
cd "$srcdir"
msg "Connecting to Bazaar server...."
if [ -d $_bzrmod ] ; then
cd ${_bzrmod} && bzr --no-plugins pull ${_bzrtrunk} -r ${pkgver}
msg "The local files are updated."
else
bzr --no-plugins branch ${_bzrtrunk} ${_bzrmod} -q -r ${pkgver}
fi
msg "Bazaar checkout done or server timeout"
msg "Starting make..."
rm -rf "$srcdir/$_bzrmod-build"
cp -r "$srcdir/$_bzrmod" "$srcdir/$_bzrmod-build"
cd "$srcdir/$_bzrmod-build"
emacs --batch --eval '(progn
(push "." load-path)
(byte-compile-file "mediawiki.el"))' || return 1
}
package() {
cd "$srcdir/$_bzrmod-build"
mkdir -p ${pkgdir}/usr/share/emacs/site-lisp || return 1
cp mediawiki.el{,c} ${pkgdir}/usr/share/emacs/site-lisp || return 1
}
| true |
dc4c3ace87da576ca21948226d640da8121ecfb5 | Shell | rkablukov/onec_download_distr | /download.sh | UTF-8 | 2,210 | 3.546875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
read -p "login on login.1c.ru:" USERNAME
read -p "password:" PASSWORD
if [[ -z "$USERNAME" ]];then
echo "USERNAME not set"
exit 1
fi
if [[ -z "$PASSWORD" ]];then
echo "PASSWORD not set"
exit 1
fi
echo "Getting versions, please wait."
SRC=$(curl -c /tmp/cookies.txt -s -L https://releases.1c.ru)
ACTION=$(echo "$SRC" | grep -oP '(?<=form method="post" id="loginForm" action=")[^"]+(?=")')
EXECUTION=$(echo "$SRC" | grep -oP '(?<=input type="hidden" name="execution" value=")[^"]+(?=")')
curl -s -L \
-o /dev/null \
-b /tmp/cookies.txt \
-c /tmp/cookies.txt \
--data-urlencode "inviteCode=" \
--data-urlencode "execution=$EXECUTION" \
--data-urlencode "_eventId=submit" \
--data-urlencode "username=$USERNAME" \
--data-urlencode "password=$PASSWORD" \
https://login.1c.ru"$ACTION"
if ! grep -q "TGC" /tmp/cookies.txt ;then
echo "Auth failed"
exit 1
fi
clear
curl -s -b /tmp/cookies.txt https://releases.1c.ru/project/Platform83 |
grep 'a href="/version_files?nick=Platform83' |
tr -s '=" ' ' ' |
awk -F ' ' '{print $5}' |
sort -Vr | pr -T -5
read -i "8.3." -p "Input version for download: " -e VER
if [[ -z "$VER" ]];then
echo "VERSION not set"
exit 1
fi
if [[ "8.3." = "$VER" ]];then
echo "Need full VERSION number"
exit 1
fi
VER1=${VER//./_}
CLIENTLINK=$(curl -s -G \
-b /tmp/cookies.txt \
--data-urlencode "nick=Platform83" \
--data-urlencode "ver=$VER" \
--data-urlencode "path=Platform\\$VER1\\client_$VER1.deb64.tar.gz" \
https://releases.1c.ru/version_file | grep -oP '(?<=a href=")[^"]+(?=">Скачать дистрибутив<)')
SERVERLINK=$(curl -s -G \
-b /tmp/cookies.txt \
--data-urlencode "nick=Platform83" \
--data-urlencode "ver=$VER" \
--data-urlencode "path=Platform\\$VER1\\deb64_$VER1.tar.gz" \
https://releases.1c.ru/version_file | grep -oP '(?<=a href=")[^"]+(?=">Скачать дистрибутив<)')
mkdir -p dist
curl --fail -b /tmp/cookies.txt -o dist/client64.tar.gz -L "$CLIENTLINK"
curl --fail -b /tmp/cookies.txt -o dist/server64.tar.gz -L "$SERVERLINK"
echo ${VER} > dist/version.txt
rm /tmp/cookies.txt
| true |
480186ca8ac384b2f565d0cbb559270387e4a281 | Shell | SpiderOak/CS | /Log_Retrieval_Bash_Script_One_Mac.command | UTF-8 | 400 | 3.109375 | 3 | [] | no_license | #! /bin/bash
echo Creating Folder for Logs...
cd ~/Desktop
mkdir SpiderOak_One_Log_Files
echo Now Gathering Logs...
cd ~/Library/Application\ Support/SpiderOakONE
cp *.log ~/Desktop/SpiderOak_One_Log_Files
echo Log Retrieval Complete - Compressing
cd ~/Desktop
zip -r SpiderOak_One_Log_Files.zip SpiderOak_One_Log_Files
rm -R SpiderOak_One_Log_Files
echo Compression Complete. Have a nice day!
exit
| true |
7e744e10caefcc945b703b2f1b856b48f7a79c46 | Shell | Alexpux/MINGW-packages | /mingw-w64-python-wheel/PKGBUILD | UTF-8 | 1,159 | 2.59375 | 3 | [
"BSD-3-Clause"
] | permissive | # Maintainer: J. Peter Mugaas <jpmugaas@suddenlink.net>
_pyname=wheel
_realname=${_pyname}
pkgbase=mingw-w64-python-${_realname}
pkgname=("${MINGW_PACKAGE_PREFIX}-python-${_realname}")
provides=("${MINGW_PACKAGE_PREFIX}-python3-${_realname}")
conflicts=("${MINGW_PACKAGE_PREFIX}-python3-${_realname}")
replaces=("${MINGW_PACKAGE_PREFIX}-python3-${_realname}")
pkgver=0.37.1
pkgrel=1
pkgdesc="A built-package format for Python (mingw-w64)"
arch=('any')
mingw_arch=('mingw32' 'mingw64' 'ucrt64' 'clang64' 'clang32' 'clangarm64')
url="https://pypi.python.org/pypi/wheel"
license=('spdx:MIT')
depends=("${MINGW_PACKAGE_PREFIX}-python")
makedepends=("${MINGW_PACKAGE_PREFIX}-python-installer")
options=('!strip')
source=("https://files.pythonhosted.org/packages/27/d6/003e593296a85fd6ed616ed962795b2f87709c3eee2bca4f6d0fe55c6d00/wheel-${pkgver}-py2.py3-none-any.whl")
sha256sums=('4bdcd7d840138086126cd09254dc6195fb4fc6f01c050a1d7236f2630db1d22a')
package() {
MSYS2_ARG_CONV_EXCL="--prefix=" \
python -m installer --prefix=${MINGW_PREFIX} --destdir="${pkgdir}" *.whl
install -Dm644 *.dist-info/LICENSE.txt "${pkgdir}${MINGW_PREFIX}/share/licenses/python-${_realname}/COPYING"
}
| true |
e9e242620f34bedf083b3a9dd870f1d0319a3448 | Shell | davidbeermann/dotdotdot | /scripts/get_insomnia.sh | UTF-8 | 646 | 3.328125 | 3 | [] | no_license | echo "Install Insomnia"
echo "More info: https://insomnia.rest/"
echo "---------------------------------"
# https://stackoverflow.com/a/26759734
if ! [ -x "$(command -v wget)" ]; then
echo 'Error: wget is not installed.' >&2
exit 1
fi
# https://support.insomnia.rest/article/23-installation#linux
echo "1. Adding Debian package repository"
wget --quiet -O - https://insomnia.rest/keys/debian-public.key.asc \
| sudo apt-key add -
echo "deb https://dl.bintray.com/getinsomnia/Insomnia /" \
| sudo tee /etc/apt/sources.list.d/insomnia.list
echo "2. Installing Insomnia"
sudo apt update --quiet && sudo apt install --yes insomnia
exit 0
| true |
752a4a69a3a417315549ff48cf49c0e52c39d1cb | Shell | mfsalama/lab-work | /exercise-6/ebird_summarizer.sh | UTF-8 | 426 | 3.09375 | 3 | [] | no_license | #! /bin/bash
# the next line will create a new file called formatted_eBird_data.csv
# with all \rs replaced with \ns.
#cat $1 | tr "\r" "\n" > formatted_$1
#Step 1:
eeb177-student@eeb177-VirtualBox:~/Desktop/eeb-177/lab-work/exercise-6$ replace_newlines.sh eBird_data.csv
#Step 2:
eeb177-student@eeb177-VirtualBox:~/Desktop/eeb-177/lab-work/exercise-6$ sed 's/,\s/ /g' formatted_eBird_data.csv > reformatted_eBird_data.csv
| true |
adc8a3bd428c8be6cf808485d48e04fe83905f9f | Shell | dsmic/oakfoam | /build-linux.sh | UTF-8 | 1,995 | 3.5 | 4 | [
"FSFAP",
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | #!/bin/bash
# Boost for compiling 32-bit binaries on 64-bit:
# ./bootstrap.sh
# ./b2 link=static address-model=32 stage
set -eu
function boost-static
{
sed -i 's/^\(oakfoam_LDADD =\) \(.*\) \($(HOARD_LIB).*\)$/\1 -Wl,-Bstatic \2 -Wl,-Bdynamic -pthread \3/' Makefile
}
VER=`cat config.h | sed -n 's/.*PACKAGE_VERSION \"\(.*\)\".*/\1/p'`
PREV_CONFIGURE=`cat config.log | head | sed -n 's/\s*$ //p'`
echo "configure was: $PREV_CONFIGURE"
DEBINPUT="0
oakfoam@gmail.com
5
BSD
6
games
7
i386
"
BOOST_ROOT=/data/opt/boost_1_47_0 $PREV_CONFIGURE --with-web 'CPPFLAGS=-m32' 'LDFLAGS=-m32 -pthread'
boost-static
echo "$DEBINPUT" | sudo checkinstall --nodoc --install=no make install
sudo chmod a+rw oakfoam oakfoam_*.deb
NAME=oakfoam_${VER}_i386
rm -f ${NAME}.tar.gz
mkdir ${NAME}
# BOOST_ROOT=/data/opt/boost_1_47_0 $PREV_CONFIGURE --with-web 'CPPFLAGS=-m32' 'LDFLAGS=-m32 -pthread'
# boost-static
make install DESTDIR=`pwd`/${NAME}
find ${NAME}/ -type f | grep -v 'menu\|applications\|www' | xargs -n1 -I{} mv {} $NAME/
find ${NAME}/ -type d -name www | xargs -n1 -I{} mv {} $NAME/
sed -i '/^cd \.\./d;/^bin=".*/d;s/$bin/\./' ${NAME}/oakfoam-web
mv ${NAME}/oakfoam-web ${NAME}/run.sh
tar -czf ${NAME}.tar.gz ${NAME}/
rm -r ${NAME}/
if [ "`uname -m`" == "x86_64" ]; then
DEBINPUT="0
oakfoam@gmail.com
5
BSD
6
games
"
$PREV_CONFIGURE --with-web
boost-static
make clean
echo "$DEBINPUT" | sudo checkinstall --nodoc --install=no make install
sudo chmod a+rw oakfoam oakfoam_*.deb
NAME=oakfoam_${VER}_amd64
rm -f ${NAME}.tar.gz
mkdir ${NAME}
# $PREV_CONFIGURE --with-web
# boost-static
make install DESTDIR=`pwd`/${NAME}
find ${NAME}/ -type f | grep -v 'menu\|applications\|www' | xargs -n1 -I{} mv {} $NAME/
find ${NAME}/ -type d -name www | xargs -n1 -I{} mv {} $NAME/
sed -i '/^cd \.\./d;/^bin=".*/d;s/$bin/\./' ${NAME}/oakfoam-web
mv ${NAME}/oakfoam-web ${NAME}/run.sh
tar -czf ${NAME}.tar.gz ${NAME}/
rm -r ${NAME}/
make clean
fi
$PREV_CONFIGURE
| true |
2c48ca49324e825005c7eefc2dc0fc973d2d2040 | Shell | jeffknupp/crunchy-containers | /bin/install-deps.sh | UTF-8 | 2,438 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive |
#
# next set is only for setting up enterprise crunchy postgres repo
# not required if you build on centos
#
sudo mkdir /opt/crunchy
sudo cp $BUILDBASE/conf/crunchypg95.repo /etc/yum.repos.d
sudo cp $BUILDBASE/conf/CRUNCHY* /opt/crunchy
sudo yum -y install postgresql95-server
sudo yum -y install net-tools bind-utils wget unzip git golang
#
# download the pgadmin4 python wheel distro
#
wget https://ftp.postgresql.org/pub/pgadmin3/pgadmin4/v1.1/pip/pgadmin4-1.1-py2-none-any.whl
#
# download the metrics products, only required to build the containers
#
wget -O $BUILDBASE/prometheus-pushgateway.tar.gz https://github.com/prometheus/pushgateway/releases/download/0.3.0/pushgateway-0.3.0.linux-amd64.tar.gz
wget -O $BUILDBASE/prometheus.tar.gz https://github.com/prometheus/prometheus/releases/download/v1.1.2/prometheus-1.1.2.linux-amd64.tar.gz
wget -O $BUILDBASE/grafana.tar.gz https://grafanarel.s3.amazonaws.com/builds/grafana-3.1.1-1470047149.linux-x64.tar.gz
wget -O /tmp/consul_0.6.4_linux_amd64.zip https://releases.hashicorp.com/consul/0.6.4/consul_0.6.4_linux_amd64.zip
unzip /tmp/consul*.zip -d /tmp
rm /tmp/consul*.zip
mv /tmp/consul $GOBIN
#
# this set is required to build the docs
#
sudo yum -y install asciidoc ruby
gem install --pre asciidoctor-pdf
wget -O $HOME/bootstrap-4.5.0.zip http://laurent-laville.org/asciidoc/bootstrap/bootstrap-4.5.0.zip
asciidoc --backend install $HOME/bootstrap-4.5.0.zip
mkdir -p $HOME/.asciidoc/backends/bootstrap/js
cp $GOPATH/src/github.com/crunchydata/crunchy-containers/docs/bootstrap.js \
$HOME/.asciidoc/backends/bootstrap/js/
unzip $HOME/bootstrap-4.5.0.zip $HOME/.asciidoc/backends/bootstrap/
rpm -qa | grep atomic-openshift-client
if [ $? -ne 0 ]; then
#
# install oc binary into /usr/bin
#
wget -O /tmp/openshift-origin-client-tools-v1.1.3-cffae05-linux-64bit.tar.gz \
https://github.com/openshift/origin/releases/download/v1.1.3/openshift-origin-client-tools-v1.1.3-cffae05-linux-64bit.tar.gz
tar xvzf /tmp/openshift-origin-client-tools-v1.1.3-cffae05-linux-64bit.tar.gz -C /tmp
sudo cp /tmp/openshift-origin-client-tools-v1.1.3-cffae05-linux-64bit/oc /usr/bin/oc
sudo yum -y install postgresql-server
#
# install kubectl binary into /usr/bin
#
wget -O /tmp/kubernetes.tar.gz https://github.com/kubernetes/kubernetes/releases/download/v1.2.4/kubernetes.tar.gz
tar xvzf /tmp/kubernetes.tar.gz -C /tmp
sudo cp /tmp/kubernetes/platforms/linux/amd64/kubectl /usr/bin
fi
| true |
c763bd40cd6d51ec237ace4295bbc559be2b0557 | Shell | dockstore/dockstore-documentation | /add-discourse-topic.sh | UTF-8 | 4,345 | 3.890625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Copyright 2022 OICR and UCSC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script determines if a specified RST references a discourse topic, and if not, it attempts to
# extract some information from the RST, create a Discourse topic, and modify the RST to reference the new topic.
set -e
set -u
source helpers.sh
DOCS_URL="https://docs.dockstore.org/en/stable"
DISCOURSE_URL="https://discuss.dockstore.org/posts.json"
DISCOURSE_CATEGORY=8
file=$1
# Check if the file exists.
if ! [ -f "$file" ]; then
echo "${file} does not exist."
echo "No action taken."
exit 1
fi
# Make sure this is a file with suffix 'rst' that is referenced from the root of the repo.
if ! [[ "$file" =~ ^docs/.*\.rst$ ]]; then
echo "${file} should have suffix 'rst' and be referenced relative to the root of the documentation repo."
echo "Example: docs/some-category/some-documentation.rst"
echo "No action taken."
exit 1
fi
# Check if the file already contains a discourse topic.
if containsDiscourseTopic "$file"; then
echo "${file} has a discourse topic."
echo "No action taken."
exit 1
fi
# Check if the file contains a top-level RST header.
# If not, abort, because the file is probably meant to be included in another file.
if ! grep -q '^[#*=]' "$file"; then
echo "${file} does not contain a top-level RST header."
echo "No action taken."
exit 1
fi
# Extract some information from the file.
echo "Extracting information from ${file}."
# Title is calculated as the first non-blank line that does not begin with a space directly precedes a line starting with one of '#*=-~'.
title=$(cat "$file" | tac | grep -A1 '^[^#*=~-]' | grep '^[^#*=~-]' | tac | grep -v '^\.\. ' | grep -v '^ ' | grep '.' | head -1 )
# Summary is calculated as the first block of regular non-indented text starting with a letter or backquote, with newlines converted to spaces, some common RST markup stripped out, and consecutive spaces condensed to one.
summary=$(cat "$file" | tac | sed '/^[#*=~-]\{2\}/,/^/d' | grep -v '^\.\.' | tac | \
sed -n '/^[`A-Za-z]/,$p' | sed '/^\s*$/,$d' | tr '\n' ' ' | \
sed 's/:[^:]*:`/`/g' | sed 's/`\([^<]*\) <[^>]*>`/\1/g' | tr -d '_' | tr -d '`' | sed 's/ */ /g' )
# Compute the documentation site URL
html_path="$(echo "$file" | sed 's/^docs\///' | sed 's/\.rst$//').html"
docs_url="${DOCS_URL}/${html_path}"
# Echo the inputs to the Discouse topic creation request.
echo "Title: ${title}"
echo "Summary: ${summary}"
echo "Embed URL: ${docs_url}"
echo "Discourse URL: ${DISCOURSE_URL}"
echo "Discourse Category: ${DISCOURSE_CATEGORY}"
# Check that all computed values are reasonable
if [ -z "$title" ] || [ -z "$summary" ]; then
echo "Empty title or summary."
echo "No action taken."
exit 1
fi
# Create a new discourse topic.
echo "Creating a discourse topic."
response=$(curl -s -X POST "${DISCOURSE_URL}" \
-H "Api-Key: ${DISCOURSE_API_KEY}" \
-H "Api-Username: system" \
-H "cache-control: no-cache" \
-F "title=${title}" \
-F "raw=${summary}" \
-F "embed_url=${docs_url}" \
-F "category=${DISCOURSE_CATEGORY}")
echo "Response: ${response}"
# Process the response.
topic_id=$(echo "$response" | jq .topic_id)
echo "Topic ID: ${topic_id}"
# Make sure that the extracted topic ID is a number.
if ! [[ "$topic_id" =~ ^[0-9]+$ ]]; then
echo "Missing or non-numeric topic ID in response."
echo "Aborting.";
exit 1
fi
# Print a confirmation and the topic ID.
echo "Created discourse topic."
echo "Topic ID: ${topic_id}"
# Add the topic ID to the RST file.
echo "Adding reference to new topic to ${file}."
sed -i- -e '/.$/a\' "$file"
echo "" >> "$file"
echo ".. discourse::" >> "$file"
echo " :topic_identifier: ${topic_id}" >> "$file"
# Signal success.
echo "Success."
| true |
daeaff359f8ac7863e9fa753e13cce91e3f242a4 | Shell | colindean/hejmo | /scripts/routes | UTF-8 | 184 | 3.171875 | 3 | [] | no_license | #!/bin/sh
case "$(uname -s)" in
Linux)
ip route
;;
Darwin)
netstat -rn
;;
*)
echo "$0 doesn't know how to look up routes on $(uname -s)…" >&2
;;
esac
| true |
0177aa9dacc75011b730f0f169a1ed23cededf33 | Shell | atul2512/dgraph | /contrib/load-test.sh | UTF-8 | 338 | 3.109375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
# Simple end to end test run for all commits.
bash contrib/simple-e2e.sh $1
# We run the assigner and the loader only when a commit is made on master/release
# branches.
if [[ $TRAVIS_BRANCH =~ master|release\/ ]] && [ $TRAVIS_EVENT_TYPE = "push" ] ; then
bash contrib/loader.sh $1
bash contrib/queries.sh $1
fi
| true |
ce865061f31558580765134a7111d7391ad9044f | Shell | msys2/MINGW-packages | /mingw-w64-libaec/PKGBUILD | UTF-8 | 1,885 | 2.890625 | 3 | [
"BSD-3-Clause"
] | permissive | # Maintainer: Miloš Komarčević <miloskomarcevic@aim.com>
_realname=libaec
pkgbase=mingw-w64-${_realname}
pkgname="${MINGW_PACKAGE_PREFIX}-${_realname}"
pkgver=1.0.6
pkgrel=2
pkgdesc="Adaptive Entropy Coding library (mingw-w64)"
arch=('any')
mingw_arch=('mingw32' 'mingw64' 'ucrt64' 'clang64' 'clang32' 'clangarm64')
url="https://gitlab.dkrz.de/k202009/libaec"
license=('spdx:BSD-2-Clause')
makedepends=("${MINGW_PACKAGE_PREFIX}-cmake"
"${MINGW_PACKAGE_PREFIX}-ninja"
"${MINGW_PACKAGE_PREFIX}-cc")
provides=("${MINGW_PACKAGE_PREFIX}-szip")
replaces=("${MINGW_PACKAGE_PREFIX}-szip")
options=('staticlibs' 'strip')
_md5=45b10e42123edd26ab7b3ad92bcf7be2
source=("${url}/uploads/${_md5}/${_realname}-${pkgver}.tar.gz"
"0005-cmake-fix-cmake-install.patch")
sha256sums=('032961877231113bb094ef224085e6d66fd670f85a3e17f53d0f131abf24f2fd'
'9acc599a0f809b1f196c2ff0db6990901e812e659a594041e7540c4980b3fc91')
prepare() {
cd "${srcdir}/${_realname}-${pkgver}"
patch -p1 -i ${srcdir}/0005-cmake-fix-cmake-install.patch
}
build() {
[[ -d ${srcdir}/build-${MSYSTEM} ]] && rm -rf ${srcdir}/build-${MSYSTEM}
mkdir -p ${srcdir}/build-${MSYSTEM} && cd ${srcdir}/build-${MSYSTEM}
declare -a extra_config
if check_option "debug" "n"; then
extra_config+=("-DCMAKE_BUILD_TYPE=Release")
else
extra_config+=("-DCMAKE_BUILD_TYPE=Debug")
fi
MSYS2_ARG_CONV_EXCL="-DCMAKE_INSTALL_PREFIX=" \
${MINGW_PREFIX}/bin/cmake \
-GNinja \
-DCMAKE_INSTALL_PREFIX=${MINGW_PREFIX} \
"${extra_config[@]}" \
-DBUILD_TESTING=OFF \
../${_realname}-${pkgver}
${MINGW_PREFIX}/bin/cmake --build .
}
package() {
cd ${srcdir}/build-${MSYSTEM}
DESTDIR="${pkgdir}" ${MINGW_PREFIX}/bin/cmake --build . --target install
install -Dm644 ${srcdir}/${_realname}-${pkgver}/LICENSE.txt ${pkgdir}${MINGW_PREFIX}/share/licenses/${_realname}/LICENSE
}
| true |
e6527e230f17f66150927e1f2e35827a7b4e6023 | Shell | X3vikan/DarkMessages | /layout/DEBIAN/postinst | UTF-8 | 232 | 3 | 3 | [] | no_license | #!/bin/bash
# Delete old user preferences if they exist
PREFSFILE=/private/var/mobile/Library/Preferences/com.sticktron.darkmessages.plist
if [[ -f "$PREFSFILE" ]]; then
rm -f $PREFSFILE
echo "Removed old prefs"
fi
exit 0
| true |
176ada596badb555439e78ead70efe7305a8da20 | Shell | cehparitosh/Shell-Scripting | /directly_read | UTF-8 | 145 | 2.765625 | 3 | [] | no_license | #!/bin/bash
read -p 'Hello! What's Your Name ' NAME
read -sp 'What's Your Password: ' PASS
echo "My Name is $NAME"
echo "My Password is $PASS"
| true |
56ccaf25bd7cc28e3d4827e0095e390f7063bf3a | Shell | honkiko/test-vpc-vs-vxlan | /bw-lat/run-bw-lat-test.sh | UTF-8 | 817 | 2.75 | 3 | [] | no_license |
rounds=$1
internal=$2
VPC_LOCAL_NS=6512
VPC_REMOTE_IP='10.0.1.11'
VXLAN_LOCAL_NS=18172
VXLAN_REMOTE_IP='192.168.24.2'
REMOTE_NODE_IP='172.31.0.5'
n=0; while [[ $n -lt $rounds ]]; do
nsenter -t $VPC_LOCAL_NS -n qperf $VPC_REMOTE_IP -oo msg_size:64:64K:*2 -vu -vvc tcp_bw tcp_lat > vpc-c2c-$n.raw
sleep 1
nsenter -t $VXLAN_LOCAL_NS -n qperf $VXLAN_REMOTE_IP -oo msg_size:64:64K:*2 -vu -vvc tcp_bw tcp_lat > vxlan-c2c-$n.raw
sleep 1
nsenter -t $VPC_LOCAL_NS -n qperf $VPC_REMOTE_IP -oo msg_size:64:64K:*2 -vu -vvc tcp_bw tcp_lat > vpc-c2n-$n.raw
sleep 1
nsenter -t $VXLAN_LOCAL_NS -n qperf $VXLAN_REMOTE_IP -oo msg_size:64:64K:*2 -vu -vvc tcp_bw tcp_lat > vxlan-c2n-$n.raw
sleep 1
qperf $REMOTE_NODE_IP -oo msg_size:64:64K:*2 -vu -vvc tcp_bw tcp_lat > n2n-$n.raw
sleep 1
n=$((n+1));
sleep $internal
done
| true |
37b0901bc6f7a9a6ebb557bf989e487233736998 | Shell | sinkingpoint/plex-server | /bootstrap-salt.sh | UTF-8 | 999 | 2.8125 | 3 | [] | no_license | #!/bin/bash
curl -L https://bootstrap.saltstack.com -o bootstrap_salt.sh
sudo sh bootstrap_salt.sh
rm bootstrap_salt.sh
sudo tee /etc/salt/minion << EOF
file_client: local
file_roots:
base:
- /srv/salt
- /srv/formulas/docker-formula-master
- /srv/files
EOF
sudo systemctl restart salt-minion
# Init states
sudo mkdir -p /srv/salt
sudo cp ./salt/* /srv/salt
# Init files
sudo mkdir -p /srv/files
sudo cp ./files/* /srv/files
# Init Pillar
sudo mkdir -p /srv/pillar
sudo cp ./pillar/* /srv/pillar
# Init Docker Compose stuff
sudo mkdir -p /opt/media-compose
sudo chown colin:colin /opt/media-compose
cp docker-compose.yml .env /opt/media-compose
# Init all the formulas
sudo mkdir -p /srv/formulas
for formula in docker; do
wget https://github.com/saltstack-formulas/${formula}-formula/archive/master.tar.gz -O ${formula}-formula.tar.gz
sudo tar -xf ${formula}-formula.tar.gz -C /srv/formulas
rm ${formula}-formula.tar.gz
done
sudo salt-call --local state.highstate
| true |
aa50770e7e03165a6b3b3dcfed1bb76d70b23423 | Shell | syfchao/quick-start-app | /proto/service/compile.sh | UTF-8 | 633 | 3.234375 | 3 | [] | no_license | #!/bin/bash
echo $#
if [ $# -lt 1 ]
then
echo $0 xxx.proto
exit -1
fi
filename=$1
dir=`basename $1 .proto`
echo $filename $dir
../../build/bin/codegen -f $filename -o $dir
succ=`echo $?`
if [ $succ -ne 0 ]
then
echo "failed to gen code"
exit 1
fi
cp $dir/gen_proto/gen_$filename .
cp ../idl/blink.proto .
echo $dir/gen_proto/gen_$filename
./protoc --cpp_out=. gen_$filename
succ=`echo $?`
if [ $succ -ne 0 ]
then
echo "failed to gen proto"
exit 1
fi
cp ./gen_$dir.pb.* ../../apps/common/bo/
cp ./gen_$dir.pb.* ./$dir/gen_proto/
cp $dir/gen_cli/* ../../apps/common/stub/
rm -f ./gen_$filename ./blink.proto *.pb.h *.pb.cc
| true |
5a46f4f14100dbf87f2d0747210515db14007029 | Shell | inducer/thesis-experiments | /hedge/poisson/run-benchmark.sh | UTF-8 | 492 | 2.625 | 3 | [] | no_license | #! /bin/sh
set -e
rm-vis
SUM_FILE=run-summary.txt
rm -f $SUM_FILE
for o in `seq 1 9`; do
for where in "" "--cpu"; do
for prec in "" "--single --tol=1e-4"; do
echo "-------------------------------------------------"
echo "ORDER $o $where $prec"
echo "-------------------------------------------------"
python poisson-cuda.py --write-summary=$SUM_FILE \
--order=$o $where $prec \
--no-vis --no-cg-progress --max-volume=5e-5
done
done
done
| true |
3d7cf741182dd8022240061efb12d88fe0865fb9 | Shell | Babar/EternalTCP | /launcher/ET | UTF-8 | 533 | 3.265625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
SSH_COMMAND=$1
HOSTNAME=$2
PORT=$3
SERVER_BINARY="ETServer"
CLIENT_BINARY="ETClient"
PASSWORD_GENERATOR="env LC_CTYPE=C tr -dc \"a-zA-Z0-9\" < /dev/urandom | head -c 32"
SSH_PASSWORD_COMMAND="
export PASSWD=\`$PASSWORD_GENERATOR\` &&
echo PASSWORD:\$PASSWD &&
$SERVER_BINARY --v=9 --alsologtostderr --passkey=\$PASSWD --port=$PORT --daemon=true"
PASSWD=`echo "$SSH_PASSWORD_COMMAND" | ssh "$SSH_COMMAND" | grep PASSWORD: | cut -d: -f2`
$CLIENT_BINARY --passkey="$PASSWD" --v=9 --host="$HOSTNAME" --port="$PORT" --log_dir=$PWD
| true |
fe43f6b4aec35c128ae626b20dabf22f8f92b561 | Shell | TonyLCC/MAIKF | /GetWhitelistofProcess/getPids.sh | UTF-8 | 735 | 2.78125 | 3 | [] | no_license | echo "Pid of xrdp is : \c" && pidof xrdp
echo "Pid of xrdp-sesman is : \c" && pidof xrdp-sesman
echo "Pid of Xvnc is : \c" && pidof Xvnc
echo "Pid of vsftpd is : \c" && pidof vsftpd
pidof xrdp > tracePidList.txt
pidof xrdp-sesman >> tracePidList.txt
#pidof Xvnc >> tracePidList.txt
pidof vsftpd >> tracePidList.txt
#echo "Turn off the switch of trace..."
echo "Intialize tracing..."
echo 0 > /sys/kernel/debug/tracing/tracing_on
echo "Value of tracing_on is : \c" && cat /sys/kernel/debug/tracing/tracing_on
echo nop > /sys/kernel/debug/tracing/current_tracer
#sed 's/ /\n/g' XvncList.txt > traceXvncPidsList.txt
echo "" > /sys/kernel/debug/tracing/set_ftrace_pid
echo "" > /sys/kernel/debug/tracing/trace
| true |
db8c6807f6df3891ea82dad39580b295a57d4d85 | Shell | Avis20/sandbox | /log_level.sh | UTF-8 | 477 | 3.921875 | 4 | [] | no_license | #!/usr/bin/env bash
if [ -z "$LOG_LEVEL" ]; then LOG_LEVEL=1; fi
# echo "$LOG_LEVEL"
function log_msg {
level=$1
msg=$2
if [ $level -le $LOG_LEVEL ]; then
case $level in
0) level_text="Error" ;;
1) level_text="Warning" ;;
2) level_text="Info" ;;
3) level_text="Debug" ;;
*) level_text="Other"
esac
echo "$level_text: $msg";
fi
}
if [ $# -eq 2 ]; then
log_msg $1 $2
fi
| true |
e394be7274ff0b1425339506259048ed1f880da7 | Shell | dwdraju/docker-user | /entrypoint.sh | UTF-8 | 443 | 3.6875 | 4 | [] | no_license | #!/bin/bash
set -u
# Create new group and user with provided ID and NAME if it does not exist
id -g 1000 > /dev/null 2>&1;
if [ $? = 1 ]; then
echo "Create group with id: 1000"
groupadd --gid $GROUP_ID $USERNAME
fi
id -u 1000 > /dev/null 2>&1;
if [ $? = 1 ]; then
echo "Create user with id: $USER_ID and $USERNAME"
useradd -m -d /home/$USERNAME -s /bin/bash -u $USER_ID -g $GROUP_ID $USERNAME
fi
tail -f /dev/null
exec su $USERNAME "$@" | true |
025ba5144d0a77e7ed33389d6638c223ffb91597 | Shell | georgevarghese8815/DataScience---DataQuest | /Command-Line-Beginner/Challenge_ Working with the Command Line-185.sh | UTF-8 | 453 | 2.890625 | 3 | [] | no_license | ## 1. Command Line Python ##
~$ pwd ..
## 2. Creating a script ##
~$ echo -e 'import sys \nif __name__ = "__main__":\n print(sys.argv[1])' > script.py
## 3. Change file permissions ##
~$ chmod 700 script.p
## 4. Create a virtualenv ##
~$ source script/bin/activate ript
## 5. Move the script ##
(script) ~$ mv script.py printer kdir printer
## 6. Execute the script ##
(script) ~/printer$ python script.py "I'm so good at challenges!" printer | true |
d44b92edbf692916bbe8a4a7bb86da4f8527f56a | Shell | whchoi98/useful-shell | /Node_IP.sh | UTF-8 | 1,587 | 2.5625 | 3 | [] | no_license | #!/bin/bash
read -p "Enter ng_public01 ip address: " ng_public01
read -p "Enter ng_public02 ip address: " ng_public02
read -p "Enter ng_public03 ip address: " ng_public03
read -p "Enter ng_private01 ip address: " ng_private01
read -p "Enter ng_private02 ip address: " ng_private02
read -p "Enter ng_private03 ip address: " ng_private03
read -p "Enter mgmd_ng_public01 ip address: " mgmd_ng_public01
read -p "Enter mgmd_ng_public02 ip address: " mgmd_ng_public02
read -p "Enter mgmd_ng_public03 ip address: " mgmd_ng_public03
read -p "Enter mgmd_ng_private01 ip address: " mgmd_ng_private01
read -p "Enter mgmd_ng_private02 ip address: " mgmd_ng_private02
read -p "Enter mgmd_ng_private03 ip address: " mgmd_ng_private03
echo "export ng_public01=${ng_public01}" | tee -a ~/.bash_profile
echo "export ng_public02=${ng_public02}" | tee -a ~/.bash_profile
echo "export ng_public02=${ng_public03}" | tee -a ~/.bash_profile
echo "export ng_private01=${ng_private01}" | tee -a ~/.bash_profile
echo "export ng_private02=${ng_private02}" | tee -a ~/.bash_profile
echo "export ng_private03=${ng_private03}" | tee -a ~/.bash_profile
echo "export mgmd_ng_public01=${mgmd_ng_public01}" | tee -a ~/.bash_profile
echo "export mgmd_ng_public02=${mgmd_ng_public02}" | tee -a ~/.bash_profile
echo "export mgmd_ng_public02=${mgmd_ng_public03}" | tee -a ~/.bash_profile
echo "export mgmd_ng_private01=${mgmd_ng_private01}" | tee -a ~/.bash_profile
echo "export mgmd_ng_private02=${mgmd_ng_private02}" | tee -a ~/.bash_profile
echo "export mgmd_ng_private03=${mgmd_ng_private03}" | tee -a ~/.bash_profile
| true |
11e1ab90bcf98ae7620d0efeebeb4b0cd81db352 | Shell | giovannidiana/Information | /R-plot/FigureS2_paper/prepare_centrals.sh | UTF-8 | 450 | 2.59375 | 3 | [] | no_license | #!/bin/bash
DIR=/home/diana/workspace/Analysis/Information
for GT in QL196 QL404 QL402 QL435
do
outfile=$GT"_kNN_group0.dat"
$DIR/CCap3D $GT kNN 3NN 30 0 > $DIR/"inpdf_"$GT".dat"
cat $DIR/inpdf_$GT".dat" > $DIR/"inpdf.dat"
$DIR/GetMI3D $GT 0 kNN 3NN 30 > $outfile
$DIR/GetMI1D $GT 1 kNN 3NN 30 0 >> $outfile
$DIR/GetMI1D $GT 2 kNN 3NN 30 0 >> $outfile
$DIR/GetMI1D $GT 3 kNN 3NN 30 0 >> $outfile
done
| true |
ee9bbc43b35bcf1c1cc6ff26255444b4bb747463 | Shell | lcdwiki/LCD-show-ubuntu | /system_backup.sh | UTF-8 | 2,826 | 3.0625 | 3 | [] | no_license | #!/bin/bash
if [ ! -d "./.system_backup" ]; then
sudo mkdir ./.system_backup
fi
sudo rm -rf ./.system_backup/*
if [ -f /etc/X11/xorg.conf.d/99-calibration.conf ]; then
sudo cp -rf /etc/X11/xorg.conf.d/99-calibration.conf ./.system_backup
sudo rm -rf /etc/X11/xorg.conf.d/99-calibration.conf
fi
if [ -f /etc/X11/xorg.conf.d/40-libinput.conf ]; then
sudo cp -rf /etc/X11/xorg.conf.d/40-libinput.conf ./.system_backup
sudo rm -rf /etc/X11/xorg.conf.d/40-libinput.conf
fi
if [ -d /etc/X11/xorg.conf.d ]; then
sudo mkdir -p ./.system_backup/xorg.conf.d
sudo rm -rf /etc/X11/xorg.conf.d
fi
result=`grep -rn "^dtoverlay=" /boot/firmware/config.txt | grep ":rotate=" | tail -n 1`
if [ $? -eq 0 ]; then
str=`echo -n $result | awk -F: '{printf $2}' | awk -F= '{printf $NF}'`
if [ -f /boot/firmware/overlays/$str-overlay.dtb ]; then
sudo cp -rf /boot/firmware/overlays/$str-overlay.dtb ./.system_backup
sudo rm -rf /boot/firmware/overlays/$str-overlay.dtb
fi
if [ -f /boot/firmware/overlays/$str.dtbo ]; then
sudo cp -rf /boot/firmware/overlays/$str.dtbo ./.system_backup
sudo rm -rf /boot/firmware/overlays/$str.dtbo
fi
fi
sudo cp -rf /boot/firmware/config.txt ./.system_backup
sudo cp -rf ./boot/config-nomal.txt /boot/firmware/config.txt
#sudo cp -rf /usr/share/X11/xorg.conf.d/99-fbturbo.conf ./.system_backup/
#sudo cp -rf ./usr/99-fbturbo.conf-original /usr/share/X11/xorg.conf.d/99-fbturbo.conf
#sudo cp -rf /boot/firmware/cmdline.txt ./.system_backup/
#sudo cp -rf ./usr/cmdline.txt-original /boot/firmware/cmdline.txt
if [ -f /usr/share/X11/xorg.conf.d/99-fbturbo.conf ];then
sudo cp -rf /usr/share/X11/xorg.conf.d/99-fbturbo.conf ./.system_backup/
sudo rm -rf /usr/share/X11/xorg.conf.d/99-fbturbo.conf
fi
if [ -f /etc/rc.local ]; then
sudo cp -rf /etc/rc.local ./.system_backup/
sudo rm -rf /etc/rc.local
fi
if [ -f /etc/inittab ]; then
sudo cp -rf /etc/inittab ./.system_backup
sudo rm -rf /etc/inittab
fi
type fbcp > /dev/null 2>&1
if [ $? -eq 0 ]; then
sudo touch ./.system_backup/have_fbcp
sudo rm -rf /usr/local/bin/fbcp
fi
#type cmake > /dev/null 2>&1
#if [ $? -eq 0 ]; then
#sudo touch ./.system_backup/have_cmake
#sudo apt-get purge cmake -y 2> error_output.txt
#result=`cat ./error_output.txt`
#echo -e "\033[31m$result\033[0m"
#fi
if [ -f /usr/share/X11/xorg.conf.d/10-evdev.conf ]; then
sudo cp -rf /usr/share/X11/xorg.conf.d/10-evdev.conf ./.system_backup
sudo dpkg -P xserver-xorg-input-evdev
#sudo apt-get purge xserver-xorg-input-evdev -y 2> error_output.txt
#result=`cat ./error_output.txt`
#echo -e "\033[31m$result\033[0m"
fi
if [ -f /usr/share/X11/xorg.conf.d/45-evdev.conf ]; then
sudo cp -rf /usr/share/X11/xorg.conf.d/45-evdev.conf ./.system_backup
sudo rm -rf /usr/share/X11/xorg.conf.d/45-evdev.conf
fi
if [ -f ./.have_installed ]; then
sudo cp -rf ./.have_installed ./.system_backup
fi
| true |
095f318f5c8a219ae6d7285d96755d62d0fb23e0 | Shell | ikeikeikeike/exantenna | /prod-do | UTF-8 | 4,568 | 3.390625 | 3 | [] | no_license | #!/bin/bash -eu
VERSION=`cat VERSION`
USER=`cat USER`
BUILDING=`cat BUILDING`
PRODUCTION=`cat PRODUCTION`
INNER1=`cat INNER1`
deploy() {
build
download $VERSION
upload
upgrade $VERSION
}
version() {
echo $VERSION
}
fixversion() {
hg commit -m "fix version $VERSION" mix.exs VERSION
hg push
}
build() {
fixversion
ssh ${USER}@${BUILDING} "rm -rf src/exantenna/priv/static/css/*"
ssh ${USER}@${BUILDING} "rm -rf src/exantenna/priv/static/js/*"
ssh ${USER}@${BUILDING} "cd src/exantenna && bash build"
}
download() {
rsync -avr --stats ${USER}@${BUILDING}:~/src/exantenna/rel .
(cd rel/exantenna
rm -rf static
ln -fs lib/exantenna-${VERSION}/priv/static
)
}
upload() {
# upload staging
rsync -avr --stats rel ${USER}@${PRODUCTION}:~/
# TODO: rsync to be background process if additional more INNER server.
bash mix run for_inners.exs
rsync -avr --stats rel ${USER}@${INNER1}:~/
}
upgrade() {
# upgrade
ssh ${USER}@${PRODUCTION} "(cd src/exantenna; hg pull ; hg up -C)"
ssh ${USER}@${PRODUCTION} "sh rel/exantenna/bin/exantenna upgrade $1"
ssh ${USER}@${INNER1} "(cd src/exantenna; hg pull ; hg up -C)"
ssh ${USER}@${INNER1} "sh rel/exantenna/bin/exantenna upgrade $1"
restartsrv
echo;deployed
}
restartsrv() {
set +e
OUT=`ssh ${USER}@${INNER1} "ps aux | grep '/rel/exantenna/bin/exantenna' | grep -v 'grep'"`
echo "INNER1 OUT: $OUT"
ssh ${USER}@${INNER1} "sudo systemctl restart phoenix"
ssh ${USER}@${INNER1} "sudo systemctl start phoenix"
ssh ${USER}@${INNER1} "sudo systemctl start phoenix"
ssh ${USER}@${INNER1} "sudo systemctl start phoenix"
OUT=`ssh ${USER}@${INNER1} "ps aux | grep '/rel/exantenna/bin/exantenna' | grep -v 'grep'"`
echo "INNER1 OUT: $OUT"
OUT=`ssh ${USER}@${PRODUCTION} "ps aux | grep '/rel/exantenna/bin/exantenna' | grep -v 'grep'"`
echo;echo "PRODUCTION OUT: $OUT"
ssh ${USER}@${PRODUCTION} "sudo systemctl restart phoenix"
ssh ${USER}@${PRODUCTION} "sudo systemctl start phoenix"
ssh ${USER}@${PRODUCTION} "sudo systemctl start phoenix"
ssh ${USER}@${PRODUCTION} "sudo systemctl start phoenix"
OUT=`ssh ${USER}@${PRODUCTION} "ps aux | grep '/rel/exantenna/bin/exantenna' | grep -v 'grep'"`
echo "PRODUCTION OUT: $OUT"
set -e
}
downgrade() {
# upgrade
ssh ${USER}@${PRODUCTION} "sh rel/exantenna/bin/exantenna downgrade $1"
ssh ${USER}@${INNER1} "sh rel/exantenna/bin/exantenna downgrade $1"
echo;deployed
}
remove() {
rm -fr rel/exantenna/lib/exantenna-$1
rm -fr rel/exantenna/releases/$1
rsync -avr --delete --stats rel/exantenna/lib ${USER}@${BUILDING}:~/src/exantenna/rel/exantenna/
rsync -avr --delete --stats rel/exantenna/releases ${USER}@${BUILDING}:~/src/exantenna/rel/exantenna/
rsync -avr --delete --stats rel/exantenna/lib ${USER}@${PRODUCTION}:~/rel/exantenna/
rsync -avr --delete --stats rel/exantenna/releases ${USER}@${PRODUCTION}:~/rel/exantenna/
rsync -avr --delete --stats rel/exantenna/lib ${USER}@${INNER1}:~/rel/exantenna/
rsync -avr --delete --stats rel/exantenna/releases ${USER}@${INNER1}:~/rel/exantenna/
}
deployed() {
# upgrade
echo "PRODUCTION"
ssh ${USER}@${PRODUCTION} "ls -t rel/exantenna/releases"
echo "INNER1"
ssh ${USER}@${INNER1} "ls -t rel/exantenna/releases"
}
case "$1" in
deploy) deploy ;;
version) version ;;
fixversion) fixversion ;;
build) build ;;
upload) upload ;;
download) download ;;
upgrade) upgrade $2 ;;
downgrade) downgrade $2 ;;
deployed) deployed ;;
remove) remove $2 ;;
restart) restartsrv ;;
*) echo "
Usage:
$0 <command> [options]
Commands:
deploy Deployment all
version Get package version.
fixversion <version> Fix package version.
build [options] Build packages.
upload UploadDeplo packages.
download <version> Download rel.
upgrade <version> Upload rel.
downgrade <version> Hot deploy server.
deployed Deployed versions.
remove Removing before versions.
restart Restarting servers
" >&2
exit 1
;;
esac
| true |
fe479647a9f3df26f69ff770b2f90f3e067e93c4 | Shell | datomusic/csv-timestamper | /listserials | UTF-8 | 476 | 3.75 | 4 | [] | no_license | #!/bin/bash
echo "Listserials 0.9.1"
shopt -s nullglob
csvfiles=([0-9].csv)
echo "The following serial number csv's are present"
ls *.csv | sed 's/.csv//'
echo
echo "Total number of files: ${#csvfiles[@]}"
#ls -1q *.csv | wc -l
echo
while true; do
read -p "Do you wish to move these files to the entered directory? y/n" yn
case $yn in
[Yy]* ) mv *.csv entered/; break;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
| true |
f0a1356d5cfb143c5d764e001ae9cfedd7d8d9bf | Shell | bymavis/3-Phish-Page-Detection | /install.sh | UTF-8 | 1,187 | 3.625 | 4 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/bash
echo "check your python and pip version first"
python3 --version
pip3 --version
echo "Install dependences"
echo "Install tesseract OCR, package name:tesseract or tesseract-OCR"
#https://github.com/tesseract-ocr/tesseract/wiki
sudo apt-get update
sudo apt-get install tesseract-ocr
echo "Your installed tesseract is located at:"
which tesseract
echo "Please make sure it should be in /usr/bin/tesseract!"
echo "Did you check this?"
read -p "Continue (y/n)?" choice
case "$choice" in
y|Y ) echo "yes, bingo! :)";;
n|N ) echo "no -ehhh some bad thing might happen";;
* ) echo "invalid - wow";;
esac
sleep 3
sudo apt-get install python-tk
sudo apt-get clean
echo "Clean ......"
sleep 3
echo "Install your python dependences"
sudo pip3 install -U pytesseract
echo "Install libraries needed for feature extraction"
sudo pip3 install -U beautifulsoup4
sudo pip3 install -U autocorrect
sudo pip3 install -U nltk
echo "Begin to download nltk data"
sleep 5
python3 -m nltk.downloader all
echo "Install libraries needed for machine learning"
sleep 3
sudo pip3 install -U numpy
sudo pip3 install -U scipy
sudo pip3 install -U scikit-learn==0.18.2
echo "Done!" | true |
7c5ced5dfeb5d0f73b23ad5a878e8b80b9faa9c0 | Shell | memes/home | /bin/go_refresh.sh | UTF-8 | 1,363 | 3.359375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
#
# Install common go packages to local library
set -e
command -v local_lib_path >/dev/null 2>/dev/null || . ~/.profile.d/functions/local_lib_path
_GOPATH=$(local_lib_path go)
mkdir -p ${_GOPATH}
ECHO="$(which echo)" || "echo"
# Install/update packages
awk '!/^($|#)/ {print}' <<EOF |
# Format of list
#
# package [flag]
#
# VS Code required extensions that aren't installed by ~/.Brewfile
# See https://github.com/golang/vscode-go/blob/master/docs/tools.md
golang.org/x/tools/gopls@latest
github.com/go-delve/delve/cmd/dlv@latest
honnef.co/go/tools/cmd/staticcheck@latest
github.com/uudashr/gopkgs/v2/cmd/gopkgs@latest
github.com/ramya-rao-a/go-outline@latest
github.com/haya14busa/goplay/cmd/goplay@latest
github.com/fatih/gomodifytags@latest
github.com/josharian/impl@latest
github.com/cweill/gotests/gotests@master
# Still need to use this on occasion
golang.org/x/tools/cmd/godoc@latest
# crane and gcrane
github.com/google/go-containerregistry/cmd/crane@latest
github.com/google/go-containerregistry/cmd/gcrane@latest
# gofumpt
mvdan.cc/gofumpt@latest
# Codelabs/claat
github.com/googlecodelabs/tools/claat@latest
# Vulnerability checker
golang.org/x/vuln/cmd/govulncheck@latest
EOF
while read p f; do
${ECHO} "Fetching/updating ${p}"
env GOPATH=${_GOPATH} go install ${f} ${p} && \
${ECHO} "${p} done"
done
unset _GOPATH
| true |
1131fe8227b780577961060a0e3198313e87172e | Shell | omega-icl/mcpp | /src/3rdparty/cpplapack-2015.05.11-1/benchmark/do | UTF-8 | 870 | 3.328125 | 3 | [] | no_license | #!/bin/sh
rootdir=`pwd`
MAKEFILE=$HOME/local/cpplapack/makefiles/Makefile
for i in `find * -type d | grep -v .svn`; do
if [ -d $i ]; then
echo "################ Enter into $i/ ################"
cd $i
if [ -f SUCCEEDED ]; then
echo "======== Skipping cause already succeeded ========"
elif [ -f main.cpp ]; then
echo "======== Making ========" &&\
make -f $MAKEFILE &&\
echo "======== Executing ./A.OUT ========" &&\
./A.OUT &&\
make -f $MAKEFILE fullclean
if [ $? != 0 ]; then exit 1; fi
echo "======== Succeeded ========"
touch SUCCEEDED
else
echo "======== No main.cpp ========"
fi
cd $rootdir
echo "################ Exit from $i/ ################"
fi
done
| true |
b3c67b40f64dc7bbbcf6b8ca801a185e32ef7446 | Shell | lianfeng30/work-scripts | /script/check_eth_traffic.sh | UTF-8 | 2,209 | 3.765625 | 4 | [] | no_license | #!/bin/sh
###################################################
#
# Type : Performance Monitor Plugin
# Function : net traffic monitor
# Usage : ./check_eth_traffic.sh
# Creator : Date :
# Modifier : Date :
#
###################################################
export LANG=en_US.UTF-8
OSNAME=`uname -a |awk '{print $1}' `
ethname=$1
WARNING_PARAMS=$2
CRITICAL_PARAMS=$3
TIMEOUT=$4
IP=$5
if [ -z "$WARNING_PARAMS" -o -z "$CRITICAL_PARAMS" -o -z "$ethname" ]
then
echo "ERROR-parameter is error ";
echo "$BASENAME ethname threshold_warning threshold_critical "" "" ";
exit 0;
fi
is_exit=0
###########begin ################
if [ ! -f /tmp/c_traffic$ethname ]; then
is_exit=1
else
last_time=`cat /tmp/c_traffic$ethname|awk '{print $1}'`
last_recv=`cat /tmp/c_traffic$ethname|awk '{print $2}'`
last_tran=`cat /tmp/c_traffic$ethname|awk '{print $3}'`
fi
update_time=`date +%s`
update_recv=`cat /proc/net/dev|sed 's/^ *//g'|grep ^$ethname |cut -d ':' -f 2|tr -s ' '|sed 's/^ //g'|awk '{print $1}'`
update_tran=`cat /proc/net/dev|sed 's/^ *//g'|grep ^$ethname |cut -d ':' -f 2|tr -s ' '|sed 's/^ //g'|awk '{print $9}'`
echo "$update_time $update_recv $update_tran" > /tmp/c_traffic$ethname
if [ $is_exit -eq 1 ]; then
echo "OK - $ethname in traffic 0;out traffic 0"
exit 0
fi
RECV_BYTES=`echo $update_recv $last_recv $update_time $last_time | awk '{total=($1-$2) / ($3-$4)}{printf "%d\n", total}'`
TRAN_BYTES=`echo $update_tran $last_tran $update_time $last_time | awk '{total=($1-$2) / ($3-$4)}{printf "%d\n", total}'`
if [ -z "$RECV_BYTES" -o -z "$TRAN_BYTES" ]
then
echo "ERROR-$ethname is not exists"
exit 1
elif [ $RECV_BYTES -gt $CRITICAL_PARAMS -o $TRAN_BYTES -gt $CRITICAL_PARAMS ]
then
echo "Critical - $ethname in traffic $RECV_BYTES;out traffic $TRAN_BYTES"
exit 2
elif [ $RECV_BYTES -gt $WARNING_PARAMS -o $TRAN_BYTES -gt $WARNING_PARAMS ]
then
echo "Warning - $ethname in traffic $RECV_BYTES;out traffic $TRAN_BYTES"
exit 1
else
echo "OK - $ethname in traffic $RECV_BYTES;out traffic $TRAN_BYTES"
exit 0
fi
| true |
73747b246fc2181c905f8f2f82f2730de612d5f4 | Shell | andrewDDC/HomeConfig | /scripts/pmicrorun | UTF-8 | 750 | 2.890625 | 3 | [] | no_license | #!/bin/sh
export ARCHSTRING=" "
MOREARGS=" "
SIM_PATH=/local/ssd/dev/arch/bin/sim
REV="--core V60A_512"
if [[ "$1" == *cache* ]]; then
export ARCHSTRING=:"$ARCHSTRING --cachedebugfile cache.log"
fi
if [[ "$1" == *debug* ]]; then
SIM_BIN="gdb --args $SIM_PATH"
elif [[ "$1" == *valgrind* ]]; then
SIM_BIN="valgrind --tool=cachegrind $SIM_PATH"
else
SIM_BIN="$SIM_PATH"
fi
if [[ "$1" == *hexgdb* ]]; then
MOREARGS="--interactive $MOREARGS"
fi
if [[ "$1" != *notiming* ]]; then
MOREARGS="--timing $MOREARGS"
fi
$SIM_BIN $MOREARGS $REV --quiet --uarchtrace utrace.log *.exe
#$SIM_BIN --timing --quiet *.exe
# Do some common analysis every time
/prj/dsp/qdsp6/arch/v60/mtools/v60_latest/gnu/bin/hexagon-objdump -d *.exe > asm.s &
| true |
7dbcbfc4ae6bf0a76cf58c4e4fe2a036d3eb050f | Shell | rafalsz98/PwsL | /LAB7/script.sh | UTF-8 | 122 | 2.546875 | 3 | [] | no_license | #!/usr/bin/bash
x=0
sleepTime=$1
while [ -$x -eq 0 ]; do
./cmake-build-debug/kill cw1_p1_v1
sleep "${sleepTime}"
done | true |
8133579a8e6ca5e6e254cacec42c334acc26003d | Shell | demorenoc/setup | /setup.sh | UTF-8 | 4,377 | 2.890625 | 3 | [] | no_license | # Usual update/upgrade
sudo apt-get update
sudo apt-get upgrade
# Set the Ubunutu RELEASE and CODENAME
CODENAME=$(lsb_release -c -s)
RELEASE=$(lsb_release -r -s | cut -f1 -d.)
# Add some repos
## Canonical partners
sudo sed -i "/^# deb .*partner/ s/^# //" /etc/apt/sources.list
## R - CRAN. See: https://cran.r-project.org/bin/linux/ubuntu/
sudo add-apt-repository "deb deb https://cloud.r-project.org/bin/linux/ubuntu $CODENAME-cran40/"
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9
## sublime
sudo add-apt-repository ppa:webupd8team/sublime-text-3
## Texstudio
# sudo apt-add-repository ppa:blahota/texstudio
# Update
sudo apt-get update
# Installing everything:
## First things first
### Git
sudo apt-get install git
git config --global user.name $(whoami)
git config --global user.email "jhondoe@nowhere.who"
### vim
sudo apt-get install vim
### zsh and oh-my-zsh
sudo apt-get zsh
chsh -s $(which zsh)
sh -c "$(curl -fsSL https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
## To mount and read exfat
sudo apt-get install exfat-fuse exfat-utils
## Build tools
sudo apt-get install build-essential
## JDK
sudo apt-get install default-jdk
sed -i '/export PATH=.*$/a\ \ export JAVA_HOME="/usr/bin/java"' ~/.zshrc
# The importatn stuff
## R
sudo apt-get install r-base r-base-dev
## Revolution R Open: https://mran.revolutionanalytics.com/documents/rro/installation/#revorinst-lin
wget https://mran.revolutionanalytics.com/install/RRO-3.2.2-Ubuntu-$RELEASE.4.x86_64.deb
sudo dpkg -i RRO-3.2.2-Ubuntu-$RELEASE.4.x86_64.deb &&
rm RRO-3.2.2-Ubuntu-$RELEASE.4.x86_64.deb
## Math Kernel Library - Enhances RRO
wget https://mran.revolutionanalytics.com/install/RevoMath-3.2.2.tar.gz &&
tar -xzf RevoMath-3.2.2.tar.gz &&
cd RevoMath &&
sudo ./RevoMath.sh &&
cd .. &&
rm RevoMath-3.2.2.tar.gz &&
rm -rf RevoMath
## Set R alternatives by default CRAN R
## To change then just do:
# sudo update-alternatives --config R
sudo update-alternatives --install /usr/bin/R R /usr/lib/R/bin/R 0
sudo update-alternatives --install /usr/bin/R R /usr/lib64/RRO-3.2.2/R-3.2.2/bin/R -1
## RStudio
wget https://download1.rstudio.org/desktop/bionic/amd64/rstudio-1.4.1106-amd64.deb &&
sudo dpkg -i rstudio-1.4.1106-amd64.deb &&
rm rstudio-1.4.1106-amd64.deb.deb
## LaTeX
# sudo apt-get install texlive-full
# sudo apt-get texstudio
## Pandoc (from source)
# sudo apt-get install haskell-platform
# cabal update
# cabal install pandoc
## Add path to profile. Restart the machine to get the PATH loaded
# echo 'export PATH="$PATH:$HOME/.cabal/bin"' >> ~/.profile
## pandoc - symbolic link to RStudio's precompiled binaries
## see: https://github.com/rstudio/rmarkdown/blob/master/PANDOC.md
sudo ln -s /usr/lib/rstudio/bin/pandoc/pandoc /usr/local/bin
sudo ln -s /usr/lib/rstudio/bin/pandoc/pandoc-citeproc /usr/local/bin
## Python stuff - PyCharm IDE
cd /opt &&
sudo wget https://download.jetbrains.com/python/pycharm-community-5.0.2.tar.gz &&
sudo tar xfz pycharm-community-5.0.2.tar.gz &&
sudo rm pycharm-community-5.0.2.tar.gz
# To run and configure: /opt/pycharm-community-4.5.3/bin/pycharm.sh
## ruby stuff
wget -O ruby-install-0.5.0.tar.gz https://github.com/postmodern/ruby-install/archive/v0.5.0.tar.gz &&
tar -xzvf ruby-install-0.5.0.tar.gz &&
cd ruby-install-0.5.0/ &&
sudo make install &&
cd .. &&
rm ruby-install-0.5.0.tar.gz
ruby-install -V
ruby-install ruby 2.2.0
wget -O chruby-0.3.9.tar.gz https://github.com/postmodern/chruby/archive/v0.3.9.tar.gz &&
tar -xzvf chruby-0.3.9.tar.gz &&
cd chruby-0.3.9/ &&
sudo make install &&
cd .. &&
rm chruby-0.3.9.tar.gz
echo 'source /usr/local/share/chruby/chruby.sh' >> ~/.bashrc
echo 'source /usr/local/share/chruby/auto.sh' >> ~/.bashrc
source .bashrc
touch ~/.ruby-version
echo 'ruby-2.2.0' >> ~/.ruby-version
chruby
ruby -v
gem install jekyll
# Other stuff
## Dropbox
# sudo apt-get install nautilus-dropbox
## sublime and okular
sudo apt-get install sublime-text-installer okular
## Tabula: a program to extract tables from PDFs http://tabula.technology/
## https://github.com/tabulapdf/tabula
# cd /opt
# sudo wget https://github.com/tabulapdf/tabula/releases/download/v0.9.7/tabula-jar-0.9.7.zip
# sudo unzip tabula-jar-0.9.7.zip
## some other stuff
sudo apt-get install adobe-flashplugin
# sudo vlc skype
sudo apt-get update
sudo apt-get upgrade
sudo apt-get autoremove
| true |
3913740832b6aa45f6e544c7e5acbe27873451f7 | Shell | jbw/dotfiles | /install_configs.sh | UTF-8 | 2,069 | 3.765625 | 4 | [] | no_license | #! /bin/bash
{
set -euo pipefail
# Lifted from https://github.com/kalbasit/shabka/blob/8f6ba74a9670cc3aad384abb53698f9d4cea9233/os-specific/darwin/setup.sh#L22
sudo_prompt() {
echo "Please enter your password for sudo authentication"
sudo -k
sudo echo "sudo authentication successful!"
while true; do
sudo -n true
sleep 60
kill -0 "$$" || exit
done 2>/dev/null &
}
install_nix_darwin() {
if command -v darwin-rebuild &>/dev/null; then
echo "Already installed Nix Darwin."
else
echo "Installing Nix Darwin..."
nix-build https://github.com/LnL7/nix-darwin/archive/master.tar.gz -A installer --out-link /tmp/nix-darwin
sudo mv /etc/nix/nix.conf /etc/nix/nix.conf.backup-before-nix-darwin
printf "n\ny\ny\ny\ny" | /tmp/nix-darwin/bin/darwin-installer
fi
}
install_my_configs() {
repository="https://github.com/jbw/dotfiles.git"
target="$HOME/.nixpkgs"
if [ ! -z ${JBW_DOTFILES_FRESH_CONFIG_INSTALL+x} ]; then
if [ "$JBW_DOTFILES_FRESH_CONFIG_INSTALL" == "true" ]; then
echo "Installing configs from fresh..."
rm -rf "$target"
mkdir -p "$target"
git clone -b "main" "$repository" "$target"
return
fi
fi
if cat "$target/.git/config" &>/dev/null; then
echo "Already installed JBW's configs."
else
echo "Installing JBW's configs..."
rm -rf "$target"
git clone -b "main" "$repository" "$target"
fi
}
build() {
echo "Building..."
for filename in shells bashrc zshrc; do
filepath="/etc/${filename}"
if [ -f "${filepath}" ] && [ ! -L "${filepath}" ]; then
sudo mv "${filepath}" "${filepath}.backup-before-nix-darwin"
fi
done
# Update local shell
set +u
source /etc/static/bashrc
set -u
# Rebuild
export NIX_PATH=$HOME/.nix-defexpr/channels:$NIX_PATH
darwin-rebuild switch -I "darwin-config=$HOME/.nixpkgs/darwin-configuration.nix"
}
sudo_prompt
install_nix_darwin
install_my_configs
build
}
| true |
5bef50ba6632da3841c89b222fa5f068519d4d3a | Shell | kmgreen2/local-systems | /old/ethereum/start_miner.sh | UTF-8 | 680 | 3.46875 | 3 | [] | no_license | #!/bin/bash
set -e
ETH_BOOT_NODE=""
NUM_TRIES=0
while [[ -z $ETH_BOOT_NODE ]]; do
ETH_BOOT_NODE=`curl http://ethereum-boot.ethereum.svc.cluster.local:80`
if (( NUM_TRIES > 3 )); then
echo "Could not connect to the bootstrap server: ethereum-boot.ethereum.svc.cluster.local:80"
exit 1
fi
let NUM_TRIES=${NUM_TRIES}+1
done
echo "changeme" > $ETH_DATA_DIR/.miner.pwd
export ETHERBASE=`geth account new --password $ETH_DATA_DIR/.miner.pwd | egrep -o '[0-9a-z]{40}'`
CMD="geth --datadir="$ETH_DATA_DIR" --networkid 1337 --bootnodes $ETH_BOOT_NODE --mine --minerthreads=1 --etherbase=${ETHERBASE} > $ETH_LOG_DIR/miner.log 2>&1"
exec sh -c "$CMD"
| true |
1ddf0f178badb37a8eebe4addeadf61d7d48b132 | Shell | stephenshizl/scripts | /install/server/init.sh | UTF-8 | 763 | 2.875 | 3 | [] | no_license | #!/bin/sh
#update first
yum update -y
bit=`getconf LONG_BIT`
#file descriptor limits
echo "* soft nofile 655360" >> /etc/security/limits.conf
echo "* hard nofile 655360" >> /etc/security/limits.conf
if [ $bit == 32 ]; then
echo "session required /lib/security/pam_limits.so" >> /etc/pam.d/login
else
echo "session required /lib64/security/pam_limits.so" >> /etc/pam.d/login
fi
cp disable-transparent-hugepages /etc/init.d/
chmod 755 /etc/init.d/disable-transparent-hugepages
chkconfig --add disable-transparent-hugepages
mkdir /etc/tuned/no-thp
cp tuned.conf /etc/tuned/no-thp/
tuned-adm profile no-thp
#never is right or try to reboot the machine
#cat /sys/kernel/mm/transparent_hugepage/enabled
#cat /sys/kernel/mm/transparent_hugepage/defrag
| true |
b346140a2f09541baeb0f4f40d9bfe531dc51850 | Shell | sashraja/nightwatch-docker-grid | /bin/test | UTF-8 | 546 | 3.6875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Starts nightwatch execution of all specs.
#
# See the 'nightwatch' service config in docker-compose.yml to see which script
# the container runs.
set -e
container=nightwatch_grid_results
function cleanup() {
exit_code=$?
docker cp $container:/home/docker/app/tests_output .
docker rm -fv $container > /dev/null
if [[ "$exit_code" == "1" ]]; then
printf "\n Test failure! View screenshots in tests_output/screenshots. \n"
fi
}
trap cleanup INT TERM EXIT
docker-compose run --name $container nightwatch
| true |
39fc5d742df8ceeda2169a35e3b3d5c7e5f2d7a6 | Shell | felipegerard/metodos-analiticos | /process/txt_2_json.sh | UTF-8 | 911 | 2.65625 | 3 | [] | no_license | #! /bin/bash
tr -d '\r' \
| tr '\n' '|' \
| sed -e 's/||/ <br> <br> /g' \
-e 's/|/ <br> /g' \
| sed -E -e 's/ <br> <br> ([A-Z]+;? )/||\1/g' \
| tr '|' '\n' \
| grep -E "^[A-Z]+;?" \
| sed -e 's/\./<punto>/g' \
-e 's/,/<coma>/g' \
-e 's/:/<dos_puntos>/g' \
-e 's/;/<punto_coma>/g' \
-e 's/\*/<asterisco>/g' \
-e 's/"/<comillas_dobles>/g' \
-e "s/'/<comillas_simples>/g" \
-e 's/`/<backtick>/g' \
-e 's/#/<gato>/g' \
-e 's/\[/<abre_corchetes>/g' \
-e 's/\]/<cierra_corchetes>/g' \
-e 's/(/<abre_parent>/g' \
-e 's/)/<cierra_parent>/g' \
-e 's/-/<guion>/g' \
-e 's/&/<ampersand>/g' \
-e 's/\//<diagonal>/g' \
-e 's/[^0-9a-zA-Z_<> ]//g' \
| sed -E -e 's/^([A-Z]+)(<punto_coma> [A-Z]+)? <br> /"\1\2":"/' \
-e 's/$| $/"/' \
| grep '^"' \
| sed -e '$ ! s/$/,/' \
| awk 'BEGIN {print "{"} {print} END {print "}"}'
| true |
506fa9e7dec27c15d5b4b660d2340d5e3c95564c | Shell | fr34k8/prowler | /checks/check_extra7152 | UTF-8 | 2,561 | 3.3125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# Prowler - the handy cloud security tool (copyright 2019) by Toni de la Fuente
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# Remediation:
#
# https://docs.aws.amazon.com/cli/latest/reference/route53domains/update-domain-contact-privacy.html
#
# update-domain-contact-privacy \
# --region us-east-1 \
# --domain-name example.com \
# --admin-privacy \
# --registrant-privacy \
# --tech-privacy
CHECK_ID_extra7152="7.152"
CHECK_TITLE_extra7152="[extra7152] Enable Privacy Protection for for a Route53 Domain (us-east-1 only)"
CHECK_SCORED_extra7152="NOT_SCORED"
CHECK_CIS_LEVEL_extra7152="EXTRA"
CHECK_SEVERITY_extra7152="Medium"
CHECK_ASFF_RESOURCE_TYPE_extra7152="AwsRoute53Domain"
CHECK_ALTERNATE_check7152="extra7152"
CHECK_SERVICENAME_extra7152="route53"
CHECK_RISK_extra7152='Without privacy protection enabled; ones personal information is published to the public WHOIS database'
CHECK_REMEDIATION_extra7152='Ensure default Privacy is enabled'
CHECK_DOC_extra7152='https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/domain-privacy-protection.html'
CHECK_CAF_EPIC_extra7152='Data Protection'
extra7152(){
# Route53 is a global service, looking for domains in US-EAST-1
# this is also valid for GovCloud https://docs.aws.amazon.com/govcloud-us/latest/UserGuide/setting-up-route53.html
DOMAIN_NAMES=$($AWSCLI route53domains list-domains $PROFILE_OPT --region us-east-1 --query 'Domains[*].DomainName' --output text )
if [[ $DOMAIN_NAMES ]];then
for domain_name in $DOMAIN_NAMES;do
DOMAIN_DETAIL=$($AWSCLI route53domains get-domain-detail $PROFILE_OPT --region us-east-1 --query 'AdminPrivacy' --domain-name $domain_name)
if [[ $DOMAIN_DETAIL == false ]]; then
textFail "us-east-1: Contact information public for: $domain_name" "us-east-1" "$domain_name"
else
textPass "us-east-1: All contact information is private for: $domain_name" "us-east-1" "$domain_name"
fi
done
else
textInfo "us-east-1: No Domain Names found" "us-east-1"
fi
}
| true |
0bccbca8d0f45e9ef87c41dec0a3a452c5a88e72 | Shell | marios/tripleo-ci | /scripts/te-broker/destroy-env | UTF-8 | 2,893 | 3.734375 | 4 | [] | no_license | #!/bin/bash
set -xe
ENVNUM=${1:-$(date +%s)}
PROVISIONNET=provision-${ENVNUM}
PUBLICNET=public-${ENVNUM}
ENVFILE=env-${ENVNUM}-base.yaml
COMPUTE_ENVFILE=env-${ENVNUM}-compute.yaml
EXTRA_ENVFILE=env-${ENVNUM}-extra.yaml
rm -f /opt/stack/openstack-virtual-baremetal/$ENVFILE
rm -f /opt/stack/openstack-virtual-baremetal/env-${ENVNUM}.yaml
rm -f /opt/stack/openstack-virtual-baremetal/$COMPUTE_ENVFILE
rm -f /opt/stack/openstack-virtual-baremetal/$EXTRA_ENVFILE
rm -f /opt/stack/openstack-virtual-baremetal/temp-key-$ENVNUM.pub
set +x
source /etc/nodepoolrc
set -x
# NOTE(bnemec): This function starts the port deletions in the background.
# To ensure they complete before you proceed, you must call "wait" after
# calling this function.
function delete_ports {
local subnetid=${1:-}
if [ -z "$subnetid" ]; then
return
fi
for PORT in $(neutron port-list | grep $subnetid | awk '{print $2}') ; do
neutron port-delete $PORT &
done
}
# Save the end of the bmc log for debugging IPMI connectivity problems
PYTHONIOENCODING='utf-8'
CONSOLE_LOG_PATH=/var/www/html/tebroker/console-logs/
nova console-log bmc-${ENVNUM} | tail -n 100 | awk -v envnum="$ENVNUM" '$0=envnum ": " $0' >> /var/log/bmc-console-logs
# Save all the consoles in the stack to a dedicated directory, stripping out ANSI color codes.
for server in $(openstack server list -f value -c Name | grep baremetal-${ENVNUM}) bmc-$ENVNUM ; do
openstack console log show $server | sed 's/\[[0-9;]*[a-zA-Z]//g' | gzip > $CONSOLE_LOG_PATH/$server-console.log.gz || true
done
# Delete the ports that have been attached to the undercloud
SUBNETID=$(neutron subnet-show $PUBLICNET | awk '$2=="id" {print $4}' || echo '')
delete_ports $SUBNETID
SUBNETID=$(neutron subnet-show $PROVISIONNET | awk '$2=="id" {print $4}')
delete_ports $SUBNETID
# Needed to ensure all ports have been deleted before we delete the heat stack
wait
# If there was a keypair for this specific run, delete it.
openstack keypair delete "tripleo-ci-key-$ENVNUM" || true
function delete_stack {
local stackname=$1
# Nothing to do if the specified stack doesn't exist
if ! heat stack-show $stackname; then
return 0
fi
# NOTE(bnemec): I'm periodically seeing the stack-delete fail to connect to
# Heat. It looks like a transient network issue, so let's just retry when it happens.
for i in $(seq 10); do
heat stack-delete -y $stackname && break
sleep 5
done
while heat stack-show $stackname 2>&1 > /dev/null ; do
# If the delete failed, try again
if heat stack-show $stackname | grep DELETE_FAILED ; then
heat stack-delete -y $stackname || true
fi
sleep 20
done
}
# Extra role stacks must be deleted first
delete_stack baremetal_${ENVNUM}-extra
delete_stack baremetal_${ENVNUM}-compute
delete_stack baremetal_${ENVNUM}
| true |
3624f6e9cca28b4d8ba6dd36d2b70c30b30d6e14 | Shell | DataDog/dd-trace-java | /.circleci/collect_reports.sh | UTF-8 | 2,188 | 4.15625 | 4 | [
"Apache-2.0",
"UPL-1.0",
"MIT",
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
# Save all important reports into (project-root)/reports
# This folder will be saved by circleci and available after test runs.
set -e
#Enable '**' support
shopt -s globstar
REPORTS_DIR=./reports
MOVE=false
DELETE=false
while [[ $# -gt 0 ]]; do
case $1 in
--destination)
REPORTS_DIR="$2"
shift # past argument
shift # past value
;;
--move)
MOVE=true
shift # past argument
;;
--delete)
DELETE=true
shift # past argument
;;
*)
echo "Unknown option $1"
exit 1
;;
esac
done
mkdir -p $REPORTS_DIR >/dev/null 2>&1
cp /tmp/hs_err_pid*.log $REPORTS_DIR || true
cp /tmp/java_pid*.hprof $REPORTS_DIR || true
cp /tmp/javacore.* $REPORTS_DIR || true
cp /tmp/*.trc $REPORTS_DIR || true
cp /tmp/*.dmp $REPORTS_DIR || true
cp /tmp/dd-profiler/*.jfr $REPORTS_DIR || true
function process_reports () {
project_to_save=$1
report_path=$REPORTS_DIR/$project_to_save
if [ "$DELETE" = true ]; then
echo "deleting reports for $project_to_save"
rm -rf workspace/$project_to_save/build/reports/* || true
rm -rf workspace/$project_to_save/build/hs_err_pid*.log || true
rm -rf workspace/$project_to_save/build/javacore*.txt || true
elif [ "$MOVE" = true ]; then
echo "moving reports for $project_to_save"
mkdir -p $report_path
mv -f workspace/$project_to_save/build/reports/* $report_path/ || true
mv -f workspace/$project_to_save/build/hs_err_pid*.log $report_path/ || true
mv -f workspace/$project_to_save/build/javacore*.txt $report_path/ || true
else
echo "copying reports for $project_to_save"
mkdir -p $report_path
cp -r workspace/$project_to_save/build/reports/* $report_path/ || true
cp workspace/$project_to_save/build/hs_err_pid*.log $report_path/ || true
cp workspace/$project_to_save/build/javacore*.txt $report_path/ || true
fi
}
shopt -s globstar
for report_path in workspace/**/build/reports; do
report_path=${report_path//workspace\//}
report_path=${report_path//\/build\/reports/}
process_reports $report_path
done
tar -cvzf reports.tar $REPORTS_DIR
| true |
8be7c337dba86619d8928fa090ee746020b5724c | Shell | ChrisCummins/t4 | /Documentation/mkxml.sh | UTF-8 | 3,706 | 3.984375 | 4 | [
"MIT-0",
"MIT"
] | permissive | #!/bin/bash
# mkxml.sh - run `./mkxml.sh --help` for usage information.
export_prefix="dnl EXPORT"
print_usage() {
echo "Usage: $(basename $0) <xml> <m4-file ...>"
echo ""
echo "Generate a file <xml> containing an xml tree of the"
echo "set of m4 input files <m4-file ...>."
}
to_xml() {
echo -e "$@" | sed 's/\&/\&/g' | sed ':a;N;$!ba;s/\n/\<br \/\>/g' | sed 's/</\</g' | sed 's/>/\>/g' | sed 's/["]/\"/g'
}
xml_cat() {
cat "$1" | sed 's/\&/\&/g' | sed ':a;N;$!ba;s/\n/\<br \/\>/g' | sed 's/</\</g' | sed 's/>/\>/g' | sed 's/["]/\"/g'
}
if [ -z "$1" ] || [ -z "$2" ]; then
print_usage
exit 1
fi
case "$1" in
"-h" | "--help" )
print_usage
exit 0
;;
esac
xml="$1"
shift
echo '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>' > $xml
for file in $@; do
test ! -f "$file" && { echo "file '$file' does not exist!" >&2; exit 1; }
export_lines="$(grep -s -n -E "$export_prefix" "$file" | cut -d ':' -f1)"
if [ -z "$export_lines" ]; then
echo "warning: no exported definitions found in '$file'"
fi
pushd $(dirname "$file") > /dev/null
path=`pwd`
popd > /dev/null
echo "<m4 file=\"$path/$(basename $file)\">" >> $xml
for l in $export_lines; do
linenum=$((l+1))
line="$(sed -n "$linenum"p $file)"
if [[ "$(sed -n "$l"p $file | grep '@var')" != "" ]]; then
type="value"
else
type="proceedure"
fi
sed -n "$l"p $file | sed -r 's/\s*dnl\s+T4_EXPORT(\s+@var\s*)?//' > docs.comment.tmp
while [[ "$(echo "$line" | grep -E '^\s*dnl')" != "" ]]; do
echo $line | sed -r 's/\s*(dnl\s+)?//' >> docs.comment.tmp
linenum=$((linenum+1))
line="$(sed -n "$linenum"p $file)"
done
cat docs.comment.tmp | sed '/^$/d' | sed -r 's/^\s+//' | grep -v -E '^\s*@param\s+' > docs.comment.tmp.tmp
mv -f docs.comment.tmp.tmp docs.desc.tmp
cat docs.comment.tmp | grep -E '^\s*@param\s+' | sed -r 's/^\s*@param\s+//' > docs.param.tmp
name="$(echo "$line" | sed -r 's/^\s*define\(\[//' | sed -r "s/\].+//")"
sed -n "$linenum"p $file | sed -r 's/^\s*define\(\[//' | sed -r "s/[A-Za-z_0-9]+\]\s*,\s*//" > read.tmp
cat "$file" | tail -n+$((linenum+1)) >> read.tmp
echo '' > docs.exp.tmp
depth=0
while IFS= read l; do
while IFS= read -n1 c; do
if [[ $c == "[" ]]; then
depth=$((depth+1))
fi
if [[ $c == "]" ]]; then
depth=$((depth-1))
if [[ $depth = 0 ]]; then
break
fi
fi
done <<EOF
$l
EOF
if [[ $depth = 0 ]]; then
echo "$l" | sed -r 's/\]\),\s*\[\s*(dnl.*)?//' >> docs.exp.tmp
break
else
echo "$l" >> docs.exp.tmp
fi
done < read.tmp
echo " <macro name=\"$(to_xml $name)\" type=\"$(to_xml $type)\" linum=\"$(to_xml $linenum)\">" >> $xml
if [[ "$(cat docs.desc.tmp)" != "" ]]; then
echo " <description>\"$(xml_cat docs.desc.tmp)\"</description>" >> $xml
fi
while read p; do
echo " <parameter>$(to_xml $p)</parameter>" >> $xml
done < docs.param.tmp
echo " <expansion>\"$(xml_cat docs.exp.tmp)\"</expansion>" >> $xml
echo " </macro>" >> $xml
done
echo "</m4>" >> $xml
rm -f docs.exp.tmp
rm -f docs.param.tmp
rm -f docs.desc.tmp
rm -f docs.comment.tmp
rm -f read.tmp
done
| true |
8b5d80ee04991a47abd86fe3bf8431a0b02a323c | Shell | larsks/openldap | /contrib/slapd-modules/datamorph/tests/scripts/test001-config | UTF-8 | 6,361 | 2.921875 | 3 | [
"OLDAP-2.8",
"LicenseRef-scancode-proprietary-license",
"BSD-4.3RENO",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-free-unknown"
] | permissive | #! /bin/sh
## $OpenLDAP$
## This work is part of OpenLDAP Software <http://www.openldap.org/>.
##
## Copyright 2016-2022 The OpenLDAP Foundation.
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted only as authorized by the OpenLDAP
## Public License.
##
## A copy of this license is available in the file LICENSE in the
## top-level directory of the distribution or, alternatively, at
## <http://www.OpenLDAP.org/license.html>.
##
## ACKNOWLEDGEMENTS:
## This module was written in 2016 by Ondřej Kuzník for Symas Corp.
echo "running defines.sh"
. $SRCDIR/scripts/defines.sh
. ${SCRIPTDIR}/common.sh
echo "Applying invalid changes to config (should fail)..."
for CHANGE in data/test001-*.ldif; do
echo "... $CHANGE"
. $CONFFILTER $BACKEND $MONITORDB < $CHANGE | \
$LDAPMODIFY -D cn=config -H $URI1 -y $CONFIGPWF \
>> $TESTOUT 2>&1
RC=$?
case $RC in
0)
echo "ldapmodify should have failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit 1
;;
80)
echo "ldapmodify failed ($RC)"
;;
*)
echo "ldapmodify failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
esac
done
# We run this search after the changes above and before restart so we can also
# check the reconfiguration attempts actually had no side effects
echo "Saving search output before server restart..."
echo "# search output from dynamically configured server..." >> $SERVER6OUT
$LDAPSEARCH -b "$BASEDN" -H $URI1 \
>> $SERVER6OUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
echo "Stopping slapd on TCP/IP port $PORT1..."
kill -HUP $KILLPIDS
KILLPIDS=""
sleep $SLEEP0
echo "Starting slapd on TCP/IP port $PORT1..."
$SLAPD -F $TESTDIR/confdir -h $URI1 -d $LVL >> $LOG1 2>&1 &
PID=$!
if test $WAIT != 0 ; then
echo PID $PID
read foo
fi
KILLPIDS="$PID"
sleep $SLEEP0
for i in 0 1 2 3 4 5; do
$LDAPSEARCH -s base -b "$MONITOR" -H $URI1 \
'objectclass=*' > /dev/null 2>&1
RC=$?
if test $RC = 0 ; then
break
fi
echo "Waiting ${SLEEP1} seconds for slapd to start..."
sleep ${SLEEP1}
done
echo "Testing slapd.conf support..."
mkdir $TESTDIR/conftest $DBDIR2
. $CONFFILTER $BACKEND $MONITORDB < $CONFTWO \
| sed -e '/^argsfile.*/a\
moduleload ../datamorph.la' \
-e '/database.*monitor/i\
include data/datamorph.conf' \
> $CONF2
echo "database config" >>$CONF2
echo "rootpw `$SLAPPASSWD -T $CONFIGPWF`" >>$CONF2
$SLAPADD -f $CONF2 -l data/test.ldif
RC=$?
if test $RC != 0 ; then
echo "slapadd failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
echo "Starting slapd on TCP/IP port $PORT2..."
$SLAPD -f $CONF2 -h $URI2 -d $LVL >> $LOG2 2>&1 &
PID=$!
if test $WAIT != 0 ; then
echo PID $PID
read foo
fi
sleep $SLEEP0
for i in 0 1 2 3 4 5; do
$LDAPSEARCH -s base -b "$MONITOR" -H $URI2 \
'objectclass=*' > /dev/null 2>&1
RC=$?
if test $RC = 0 ; then
break
fi
echo "Waiting ${SLEEP1} seconds for slapd to start..."
sleep ${SLEEP1}
done
echo "# search output from server running from slapd.conf..." >> $SERVER2OUT
$LDAPSEARCH -b "$BASEDN" -H $URI2 \
>> $SERVER2OUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
echo "Stopping slapd on TCP/IP port $PORT2..."
kill -HUP $PID
$SLAPD -Tt -f $CONF2 -F $TESTDIR/conftest -d $LVL >> $LOG3 2>&1
echo "Starting slapd on TCP/IP port $PORT2..."
$SLAPD -F $TESTDIR/conftest -h $URI2 -d $LVL >> $LOG3 2>&1 &
PID=$!
if test $WAIT != 0 ; then
echo PID $PID
read foo
fi
KILLPIDS="$KILLPIDS $PID"
sleep $SLEEP0
for i in 0 1 2 3 4 5; do
$LDAPSEARCH -s base -b "$MONITOR" -H $URI2 \
'objectclass=*' > /dev/null 2>&1
RC=$?
if test $RC = 0 ; then
break
fi
echo "Waiting ${SLEEP1} seconds for slapd to start..."
sleep ${SLEEP1}
done
echo "Gathering overlay configuration from both servers..."
echo "# overlay configuration from dynamically configured server..." >> $SERVER1OUT
$LDAPSEARCH -D cn=config -H $URI1 -y $CONFIGPWF \
-b "olcOverlay={0}datamorph,olcDatabase={1}$BACKEND,cn=config" \
| sed -e "s/ {[0-9]*}/ /" -e "s/={[0-9]*}/=/g" \
>> $SERVER1OUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
echo "# overlay configuration from server configured from slapd.conf..." >> $SERVER3OUT
$LDAPSEARCH -D cn=config -H $URI2 -y $CONFIGPWF \
-b "olcOverlay={0}datamorph,olcDatabase={1}$BACKEND,cn=config" \
| sed -e "s/ {[0-9]*}/ /" -e "s/={[0-9]*}/=/g" \
>> $SERVER3OUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
# We've already filtered out the ordering markers, now sort the entries
echo "Filtering ldapsearch results..."
$LDIFFILTER -s e < $SERVER3OUT > $SERVER3FLT
echo "Filtering expected entries..."
$LDIFFILTER -s e < $SERVER1OUT > $SERVER1FLT
echo "Comparing filter output..."
$CMP $SERVER3FLT $SERVER1FLT > $CMPOUT
if test $? != 0 ; then
echo "Comparison failed"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit 1
fi
rm $SERVER1OUT $SERVER3OUT
echo "Comparing search output on both servers..."
echo "# search output from dynamically configured server..." >> $SERVER1OUT
$LDAPSEARCH -b "$BASEDN" -H $URI1 \
>> $SERVER1OUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
echo "# search output from server configured from slapd.conf..." >> $SERVER3OUT
$LDAPSEARCH -b "$BASEDN" -H $URI2 \
>> $SERVER3OUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
test $KILLSERVERS != no && kill -HUP $KILLPIDS
echo "Filtering ldapsearch results..."
$LDIFFILTER -s e < $SERVER1OUT > $SERVER1FLT
$LDIFFILTER -s e < $SERVER2OUT > $SERVER2FLT
$LDIFFILTER -s e < $SERVER3OUT > $SERVER3FLT
echo "Filtering expected entries..."
$LDIFFILTER -s e < $SERVER6OUT > $SERVER6FLT
echo "Comparing filter output..."
$CMP $SERVER6FLT $SERVER1FLT > $CMPOUT && \
$CMP $SERVER6FLT $SERVER2FLT > $CMPOUT && \
$CMP $SERVER6FLT $SERVER3FLT > $CMPOUT
if test $? != 0 ; then
echo "Comparison failed"
exit 1
fi
echo ">>>>> Test succeeded"
test $KILLSERVERS != no && wait
exit 0
| true |
ff019fd56b1747cbfd28214d19d6eee2de2634e5 | Shell | mrhmouse/dots | /.bin/e | UTF-8 | 234 | 3.125 | 3 | [] | no_license | #!/usr/bin/env rc
pgrep emacs || emacs --daemon
if (~ $1 -) {
shift
buffer=`{mktemp} {
cat > $buffer
emacsclient -c $buffer $*
cat $buffer
rm $buffer >/dev/null >[2=1]
}
return
} else {
emacsclient -c $*
} | true |
36ba6ecc44169b55ee3815c546671c2fbcd812f2 | Shell | finiteautomata/PLN-UBA2018 | /eval.sh | UTF-8 | 260 | 2.5625 | 3 | [] | no_license | #! /bin/sh
#! /bin/sh
echo "| Modelo | |"
echo "|------------------|:----------------------|"
for model in models/*.pkl;
do
sent=`python languagemodeling/scripts/eval.py -i $model`
echo "|$model | $sent |"
done
| true |
57b26afe9eba78fd9851a4b45dcb57b335c6e367 | Shell | naevatec/openvidu-fiware-integration | /fiware-integration/build-pre.sh | UTF-8 | 699 | 3.28125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
echo "PRE_BUILD - INIT"
rm -rf ./src/main/resources/static/
mkdir -p ./src/main/resources/static/
cp -r ./src/frontend/* ./src/main/resources/static/
echo "PRE_BUILD - resources copied to resources/static"
sed -i '' -e 's/spring\.resources\.static-locations/# spring\.resources\.static-locations/g' ./src/main/resources/application.properties
echo "PRE_BUILD - application.properties modified"
# Set the minified versions to production.
find ./src/main/resources/static -name "*.min.js" | while read oldFileName; do
newFileName=$(echo $oldFileName | sed s/.min//g)
mv $oldFileName $newFileName
done
echo "POST_BUILD - js adapted to use minified versions"
echo "PRE_BUILD - END"
| true |
8e39231404c52b7d89a62db4acafaebf9a0aac92 | Shell | olneyhymn/reformed-diaconate | /archive_pdfs.sh | UTF-8 | 289 | 3.171875 | 3 | [
"Unlicense"
] | permissive | for url in $(ag --markdown "\"ht.*.pdf" --nofilename --nobreak | cut -d'=' -f2,3,4,5,6); do
url="${url%\"}"
url="${url#\"}"
echo $url
outFile=$(echo "$url" | cut -d / -f 5- | tr / _)
cd "$HOME/repos/reformed-diaconate/static/pdfs/archive" && curl -o $outFile $url
done | true |
e8719b975ce2bd1011d86cccf08fdaf2916e154b | Shell | Korvinet/Hass.io | /testing/run.sh | UTF-8 | 220 | 2.75 | 3 | [
"MIT"
] | permissive | echo 'Starting plugin...'
echo 'Starting testing...'
CONFIG_PATH=/data/options.json
echo "Config path: $CONFIG_PATH"
echo 'Getting variable host...'
HOST="$(jq --raw-output '.host' $CONFIG_PATH)"
echo "Host value: $HOST" | true |
ad784914296c23b0af1b4c812e21bdf46cc257cd | Shell | get-ready/get-ready | /get-ready.sh | UTF-8 | 828 | 3.90625 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -e
SLASHED_PREFIX="get-ready$(pwd)"
UPPER_PREFIX=${SLASHED_PREFIX//\//__}
PREFIX=$(echo $UPPER_PREFIX | tr '[:upper:]' '[:lower:]')
IMAGE_NAME=${PREFIX}__image
CONTAINER_NAME=${PREFIX}__container
RUNNING_CONTAINERS=$(docker ps)
if [ "$1" == "stop" ]; then
docker stop $CONTAINER_NAME
echo "Environment stopped."
exit
fi
if [[ $RUNNING_CONTAINERS != *$CONTAINER_NAME* ]]; then
docker build -t get-ready/base -f ~/.config/get-ready/default/Dockerfile ~/.config/get-ready/default
docker build -t $IMAGE_NAME .
docker run -d -it --rm -v $(pwd):/src --name $CONTAINER_NAME $(cat dockeropts) $(cat ~/.config/get-ready/default/dockeropts) $IMAGE_NAME
echo "Environment started."
fi
if [ ! -z "$1" ]; then
echo "Running ${@:1}"
docker exec -it $CONTAINER_NAME ${@:1}
fi | true |
42b34ee3235f4fe6a8ef3b951cd800ea55580e12 | Shell | floscr/minimal-dotfiles | /home/.bashrc | UTF-8 | 624 | 2.828125 | 3 | [] | no_license | # If not running interactively, don't do anything
[ -z "$PS1" ] && return
# Enable VI mode
set -o vi
# ReEnable CTRL-L as clearscreen
bind -m vi-insert "\C-l":clear-screen
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
PS1='\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
alias ll='ls -al'
alias la='ls -A'
alias l='ls -alF'
alias dir='ls -alh'
alias vi='vim'
alias grep='grep --color=auto'
alias ..='cd ..'
alias ...='cd ../..'
alias ....='cd ../../..'
export VISUAL=vim
export EDITOR="$VISUAL"
export SHELL="/bin/bash"
| true |
bec6ec8bc73f2f587249461fe79d5e1d7c58d7f4 | Shell | dwxie/salt | /state/base/files/update_center_web.sh | UTF-8 | 4,752 | 3.421875 | 3 | [] | no_license | #!/bin/bash
if [ $# -ne 1 ];then
echo -e "Usage:\n\t$0 \"path_to_index_web\""
exit 12
else
center_src_path="$1"
fi
echo -e "This script is only used to update center_web not for first init install center_web"
read -p "Sure you know ? [y/n]: " co8
if [ "$co8" == "yes" -o "$co8" == "" -o "$co8" == "y" ];then
echo -e "OK..I understand your choice and continue.."
else
echo -e "Oh year..EXIT now.."
exit 2
fi
the_time=$(date +%F_%H:%M)
read -p "Ready to go ? [y/n]: " co77
if [ "$co77" == "yes" -o "$co77" == "" -o "$co77" == "y" ];then
echo -e "OK..Let's go"
else
echo -e "Oh year..EXIT now.."
exit 2
fi
echo -e "OK.. 我先来备份相关文件"
sleep 1
echo -e "创建备份目录/backup/center_web/$the_time"
salt -N "g_list_platweb" cmd.run "mkdir -p /backup/center_web/$the_time"
salt -N "g_list_platweb" cmd.run "du -sh /var/www/ |column -t"
echo
echo -e "cp -a /var/www --> /backup/center_web/$the_time/"
read -p "Need this step? [y/n]: " co10
if [ "$co10" == "yes" -o "$co10" == "" -o "$co10" == "y" ];then
salt -N "g_list_platweb" cmd.run "cp -a /var/www /backup/center_web/$the_time/"
echo -e "[ cp -a /var/www --> /backup/center_web/$the_time/ done ]"
else
echo -e "Skip this step. and We Continue..------"
fi
echo
echo -e "cp -a /etc/supervisor --> /backup/center_web/$the_time/"
read -p "Need this step? [y/n]: " co11
if [ "$co11" == "yes" -o "$co11" == "" -o "$co11" == "y" ];then
salt -N "g_list_platweb" cmd.run "cp -a /etc/supervisor /backup/center_web/$the_time/"
echo -e "[ cp -a /etc/supervisor --> /backup/center_web/$the_time/ done ]"
else
echo -e "Skip this step. and We Continue..------"
fi
echo -e "备份完成了。现在开始更新了"
sleep 1
if [ ! -f "./plat_web" ];then
echo -e "Sorry. plat_web not found"
exit 13
fi
for i in `cat ./plat_web`
do
salt "$i" grains.item ipv4
echo -e "【 I am dealling with server [$i] 】"
read -p "Let's GO? [y/n]: " co0
if [ "$co0" == "yes" -o "$co0" == "" -o "$co0" == "y" ];then
echo -e "---- OK GO ----"
sleep 1
echo
echo -e "cp -a /tmp/camera.env ---> /var/www/camera.env"
read -p "Need this step? [y/n]: " co1
if [ "$co1" == "yes" -o "$co1" == "" -o "$co1" == "y" ];then
salt "$i" cmd.run "cp -a /tmp/camera.env /var/www/camera.env"
echo -e "[ cp -a /tmp/camera.env --> /var/www/camera.env done ]"
else
echo -e "Skip this step. and We Continue..------"
fi
echo
echo -e "cd $center_src_path && make update"
read -p "Need this step? [y/n]: " co4
if [ "$co4" == "yes" -o "$co4" == "" -o "$co4" == "y" ];then
salt "$i" cmd.run "cd $center_src_path && make update"
echo -e "[ cd $center_src_path && make update done ]"
else
echo -e "Skip this step..And We Continue..------"
fi
echo
echo -e "delete /tmp/camera.env"
read -p "Need this step? [y/n]: " co2
if [ "$co2" == "yes" -o "$co2" == "" -o "$co2" == "y" ];then
salt "$i" cmd.run "rm /tmp/camera.env"
echo -e "[ delete /tmp/camera.env done ]"
else
echo -e "Skip this step. and We Continue..------"
fi
echo
echo -e "Do I need to delete this dir [$center_src_path] for you ? [y/n]: "
read -p "Need this step? [y/n]: " co3
if [ "$co3" == "yes" -o "$co3" == "" -o "$co3" == "y" ];then
salt "$i" cmd.run "rm -rf $center_src_path"
echo -e "[ rm -rf ${center_src_path} done ]"
else
echo -e "Skip this step. and We Continue.. ------"
fi
echo
echo -e "supervisorctl -c /etc/supervisor/supervisord.conf restart camera"
read -p "Need this step? [y/n]: " co3
if [ "$co3" == "yes" -o "$co3" == "" -o "$co3" == "y" ];then
salt "$i" cmd.run "supervisorctl -c /etc/supervisor/supervisord.conf restart camera"
echo -e "[ supervisorctl -c /etc/supervisor/supervisord.conf restart camera done ]"
else
echo -e "Skip this step. and We Continue.. ------"
fi
echo
echo -e "supervisorctl -c /etc/supervisor/supervisord.conf status"
read -p "Need this step? [y/n]: " co31
if [ "$co31" == "yes" -o "$co31" == "" -o "$co31" == "y" ];then
salt "$i" cmd.run "supervisorctl -c /etc/supervisor/supervisord.conf status"
echo -e "[ supervisorctl -c /etc/supervisor/supervisord.conf status done ]"
else
echo -e "Skip this step. and We Continue.. ------"
fi
else
echo -e "--- Continue to Next One ---"
sleep 1
fi
done
| true |
75d9c35dc6bc7d2a73af743a26d3d927a219cb6e | Shell | rsuite/rsuite | /docs/script.sh | UTF-8 | 144 | 2.75 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if [[ "$BRANCH" == "next" || "$BRANCH" == "staging" ]]; then
# Proceed with the build
exit 1
else
# Don't build
exit 0
fi
| true |
57d5cfa16edfbbd8138e3d4e3b0e59b354dd7ba0 | Shell | bkmgit/mlpack.org | /irc/scripts/regen-html.sh | UTF-8 | 1,506 | 4.28125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# regen-html.sh <irssi-logdir> <output-html-dir>
#
# A ridiculous bash script to regenerate HTML IRC log files. This is really
# getting close to the wrongest way to run a public logging system. I kinda
# like it because it's so terrible.
#
# This expects that header.html and footer.html are in
# output-html-dir/templates/ and those will be used to generate the header and
# footer.
logdir=$1;
htmldir=$2;
scriptdir=$3;
if [ "a$logdir" = "a" ]; then
echo "Must specify log directory as first parameter."
exit
fi
if [ "a$htmldir" = "a" ]; then
echo "Must specify html directory as second parameter."
exit
fi
if [ "a$scriptdir" = "a" ]; then
echo "Assuming that scriptdir is scripts/."
scriptdir="scripts/";
fi
for i in $logdir/*; do
# Turn the irssi log into something that's kind of like HTML.
filename=`basename $i .log | sed 's/#//'`;
$scriptdir/process-log.sh $i > $htmldir/$filename.tmp;
# Generate the calendar.
$scriptdir/create-calendar.sh $i $logdir $htmldir/cal.tmp;
# Assemble the file.
cat $htmldir/templates/header.html $htmldir/cal.tmp $htmldir/$filename.tmp $htmldir/templates/footer.html > $htmldir/$filename.html;
chmod 644 $htmldir/$filename.html;
rm $htmldir/cal.tmp;
rm $htmldir/$filename.tmp;
# Modify some unset information in the templates, after deriving what the day
# is.
date=`echo $filename | sed -E 's/.*([0-9]{4})([0-9]{2})([0-9]{2}).*/\1-\2-\3/'`;
sed -i -E 's/%%DAY%%/'$date'/g' $htmldir/$filename.html;
done
| true |
91214d2e7e4d9b85b993ce25e9019b81af3596c0 | Shell | k8scenario/k8scenario | /scripts/switch_context.sh | UTF-8 | 906 | 3.953125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
SAVED=~/.k8scenario.saved.context
# USAGE: switch_context.sh
# - if current context is 'k8scenario' AND file $SAVED (above) is present, put back the saved context
# - else
# - create k8scenario context using namespace k8scenario (assumes KIND cluster)
# - set context to k8scenario
use_k8scenario_context() {
kubectl config set-context k8scenario --cluster kind-kind --user kind-kind --namespace=k8scenario
kubectl config use-context k8scenario
}
CONTEXT=$(kubectl config get-contexts | awk '/^* / { print $2; }')
echo "Current context is <$CONTEXT>"
if [ ! -z "$CONTEXT" ]; then
if [ "$CONTEXT" = "k8scenario" ]; then
[ -f $SAVED ] && kubectl config use-context $(cat $SAVED)
else
echo $CONTEXT > $SAVED
use_k8scenario_context
fi
fi
CONTEXT=$(kubectl config get-contexts | awk '/^* / { print $2; }')
echo "Current context is <$CONTEXT>"
| true |
f3512ea55d9d9635b9984a9299aa56da96da150a | Shell | renzhenghang/FATE | /cluster-deploy/scripts/packaging.sh | UTF-8 | 7,731 | 2.890625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
cwd=$(cd `dirname $0`; pwd)
cd ${cwd}
source_code_dir=$(cd `dirname ${cwd}`; cd ../; pwd)
echo "[INFO] Source code dir is ${source_code_dir}"
packages_dir=${source_code_dir}/cluster-deploy/packages
mkdir -p ${packages_dir}
cd ${source_code_dir}
eggroll_git_url=`grep -A 3 '"eggroll"' .gitmodules | grep 'url' | awk -F '= ' '{print $2}'`
eggroll_git_branch=`grep -A 3 '"eggroll"' .gitmodules | grep 'branch' | awk -F '= ' '{print $2}'`
echo "[INFO] Git clone eggroll submodule source code from ${eggroll_git_url} branch ${eggroll_git_branch}"
if [[ -e "eggroll" ]];then
while [[ true ]];do
read -p "The eggroll directory already exists, delete and re-download? [y/n] " input
case ${input} in
[yY]*)
echo "[INFO] Delete the original eggroll"
rm -rf eggroll
git clone ${eggroll_git_url} -b ${eggroll_git_branch} eggroll
break
;;
[nN]*)
echo "[INFO] Use the original eggroll"
break
;;
*)
echo "Just enter y or n, please."
;;
esac
done
else
git clone ${eggroll_git_url} -b ${eggroll_git_branch} eggroll
fi
cd ${source_code_dir}
fateboard_git_url=`grep -A 3 '"fateboard"' .gitmodules | grep 'url' | awk -F '= ' '{print $2}'`
fateboard_git_branch=`grep -A 3 '"fateboard"' .gitmodules | grep 'branch' | awk -F '= ' '{print $2}'`
echo "[INFO] Git clone fateboard submodule source code from ${fateboard_git_url} branch ${fateboard_git_branch}"
if [[ -e "fateboard" ]];then
while [[ true ]];do
read -p "The fateboard directory already exists, delete and re-download? [y/n] " input
case ${input} in
[yY]*)
echo "[INFO] Delete the original fateboard"
rm -rf fateboard
git clone ${fateboard_git_url} -b ${fateboard_git_branch} fateboard
break
;;
[nN]*)
echo "[INFO] Use the original fateboard"
break
;;
*)
echo "Just enter y or n, please."
;;
esac
done
else
git clone ${fateboard_git_url} -b ${fateboard_git_branch} fateboard
fi
egg_version=$(grep -E -m 1 -o "<eggroll.version>(.*)</eggroll.version>" ${source_code_dir}/eggroll/pom.xml| tr -d '[\\-a-z<>//]' | awk -F "eggroll.version" '{print $2}')
meta_service_version=$(grep -E -m 1 -o "<eggroll.version>(.*)</eggroll.version>" ${source_code_dir}/eggroll/pom.xml| tr -d '[\\-a-z<>//]' | awk -F "eggroll.version" '{print $2}')
roll_version=$(grep -E -m 1 -o "<eggroll.version>(.*)</eggroll.version>" ${source_code_dir}/eggroll/pom.xml| tr -d '[\\-a-z<>//]' | awk -F "eggroll.version" '{print $2}')
federation_version=$(grep -E -m 1 -o "<fate.version>(.*)</fate.version>" ${source_code_dir}/arch/pom.xml| tr -d '[\\-a-z<>//]' | awk -F "fte.version" '{print $2}')
proxy_version=$(grep -E -m 1 -o "<fate.version>(.*)</fate.version>" ${source_code_dir}/arch/pom.xml| tr -d '[\\-a-z<>//]' | awk -F "fte.version" '{print $2}')
fateboard_version=$(grep -E -m 1 -o "<version>(.*)</version>" ${source_code_dir}/fateboard/pom.xml| tr -d '[\\-a-z<>//]' | awk -F "version" '{print $2}')
sed -i "s/egg_version=.*/egg_version=${egg_version}/g" ${source_code_dir}/cluster-deploy/scripts/default_configurations.sh
sed -i "s/meta_service_version=.*/meta_service_version=${meta_service_version}/g" ${source_code_dir}/cluster-deploy/scripts/default_configurations.sh
sed -i "s/roll_version=.*/roll_version=${roll_version}/g" ${source_code_dir}/cluster-deploy/scripts/default_configurations.sh
sed -i "s/federation_version=.*/federation_version=${federation_version}/g" ${source_code_dir}/cluster-deploy/scripts/default_configurations.sh
sed -i "s/proxy_version=.*/proxy_version=${proxy_version}/g" ${source_code_dir}/cluster-deploy/scripts/default_configurations.sh
sed -i "s/fateboard_version=.*/fateboard_version=${fateboard_version}/g" ${source_code_dir}/cluster-deploy/scripts/default_configurations.sh
source ${source_code_dir}/cluster-deploy/scripts/default_configurations.sh
eggroll_source_code_dir=${source_code_dir}/eggroll
cd ${eggroll_source_code_dir}
echo "[INFO] Compiling eggroll"
mvn clean package -DskipTests
echo "[INFO] Compile eggroll done"
echo "[INFO] Packaging eggroll"
cd ${eggroll_source_code_dir}
cd api
tar czf eggroll-api-${version}.tar.gz *
mv eggroll-api-${version}.tar.gz ${packages_dir}/
cd ${eggroll_source_code_dir}
cd computing
tar czf eggroll-computing-${version}.tar.gz *
mv eggroll-computing-${version}.tar.gz ${packages_dir}/
cd ${eggroll_source_code_dir}
cd conf
tar czf eggroll-conf-${version}.tar.gz *
mv eggroll-conf-${version}.tar.gz ${packages_dir}/
cd ${eggroll_source_code_dir}
cd framework/egg/target
tar czf eggroll-egg-${version}.tar.gz eggroll-egg-${egg_version}.jar lib/
mv eggroll-egg-${version}.tar.gz ${packages_dir}/
cd ${eggroll_source_code_dir}
cd framework/meta-service/target
tar czf eggroll-meta-service-${version}.tar.gz eggroll-meta-service-${meta_service_version}.jar lib/
mv eggroll-meta-service-${version}.tar.gz ${packages_dir}/
cd ${eggroll_source_code_dir}
cd framework/roll/target
tar czf eggroll-roll-${version}.tar.gz eggroll-roll-${roll_version}.jar lib/
mv eggroll-roll-${version}.tar.gz ${packages_dir}/
cd ${eggroll_source_code_dir}
cd storage/storage-service-cxx
tar czf eggroll-storage-service-cxx-${version}.tar.gz *
mv eggroll-storage-service-cxx-${version}.tar.gz ${packages_dir}/
echo "[INFO] Package eggroll done"
echo "[INFO] Compiling fate"
cd ${source_code_dir}/fateboard/
mvn clean package -DskipTests
cd ${source_code_dir}/arch/
mvn clean package -DskipTests
echo "[INFO] Compile fate done"
echo "[INFO] Packaging fate"
cp ${source_code_dir}/fateboard/target/fateboard-${fateboard_version}.jar ${packages_dir}/
cd ${source_code_dir}/arch/driver/federation/target
tar czf fate-federation-${version}.tar.gz fate-federation-${federation_version}.jar lib/
mv fate-federation-${version}.tar.gz ${packages_dir}/
cd ${source_code_dir}/arch/networking/proxy/target
tar czf fate-proxy-${version}.tar.gz fate-proxy-${proxy_version}.jar lib/
mv fate-proxy-${version}.tar.gz ${packages_dir}/
echo "[INFO] Packaging base module"
get_module_package ${source_code_dir} "python" pip-packages-fate-${python_version}.tar.gz
get_module_package ${source_code_dir} "python" Miniconda3-4.5.4-Linux-x86_64.sh
get_module_package ${source_code_dir} "jdk" jdk-${jdk_version}-linux-x64.tar.gz
get_module_package ${source_code_dir} "mysql" mysql-${mysql_version}-linux-glibc2.12-x86_64.tar.xz
get_module_package ${source_code_dir} "redis" redis-${redis_version}.tar.gz
get_module_package ${source_code_dir} "storage-service-cxx third-party" third_party_eggrollv1.tar.gz
get_module_package ${source_code_dir} "storage-service-cxx third-party" third_party_eggrollv1_ubuntu.tar.gz
echo "[INFO] Package base module done"
echo "[INFO] Package fate done"
echo "[INFO] A total of `ls ${packages_dir} | wc -l | awk '{print $1}'` packages:"
ls -lrt ${packages_dir}
| true |
2009a80777caa65e04ef3d488752779b66d0a7c6 | Shell | technologiclee/gnulinux_support | /GNULinux/Archlinux/PKGBUILDs/hybris-device-sony-nozomi/generate-android-users.sh | UTF-8 | 2,313 | 3.703125 | 4 | [] | no_license | #!/bin/bash
usage (){
cat << EOF
Usage for $0 : $0 <android product output>
<android product output> :
Path to Android product output folder (eg: /home/aosp/out/target/product/nozomi)
EOF
}
die () { echo "ERROR: ${1-UNKNOWN}"; exit 1; }
if [ $# -ne 1 ]; then
usage
exit 1
fi
CONFIG_H=$1/headers/private/android_filesystem_config.h
[ -f $CONFIG_H ] || exit -1
cat << EOGEN
# generate during makepkg by generate-android-users.sh
####
# Variables
$(egrep '^#define AID' $CONFIG_H | sed 's/#define //;s/ \/\*.*//' | egrep -v 'AID_ROOT|AID_NOBODY' | awk '{print $1"="$2}')
####
# Hidden Post install
_post_install(){
# Create groups
$(egrep '^ { "' $CONFIG_H | sed 's/^ { "//;s/",//;s/, },.*//' | egrep -v 'AID_ROOT|AID_NOBODY' | awk '{ print "\tgroupadd -g $"$2" "$1" &>/dev/null"}')
# Create users
$(egrep '^ { "' $CONFIG_H | sed 's/^ { "//;s/",//;s/, },.*//' | egrep -v 'AID_ROOT|AID_NOBODY' | awk '{ print "\tuseradd -M -s /usr/bin/nologin -c \"Android ("$2")\" -g $"$2" -u $"$2" "$1" &>/dev/null"}')
} #end post_install()
####
# Hidden Post upgrade
_post_upgrade(){
# IMPORTANT: we will not take care of uid/gid change and users to delete here.
# you need to take that into account into hybris-device.install script
# Create missing groups
$(egrep '^ { "' $CONFIG_H | sed 's/^ { "//;s/",//;s/, },.*//' | egrep -v 'AID_ROOT|AID_NOBODY' | awk '{ print "\tgetent group "$1" >/dev/null 2>&1 || groupadd -g $"$2" "$1" &>/dev/null"}')
# Create missing users
$(egrep '^ { "' $CONFIG_H | sed 's/^ { "//;s/",//;s/, },.*//' | egrep -v 'AID_ROOT|AID_NOBODY' | awk '{ print "\tgetent passwd "$1" >/dev/null 2>&1 || useradd -M -s /usr/bin/nologin -c \"Android ("$2")\" -g $"$2" -u $"$2" "$1" &>/dev/null"}')
} #end post_upgrade()
####
# Dump
#
#dump(){
#
# # dump what is needed
#
# cat << EOF
#
# ####
# # Groups
#
$(egrep '^ { "' $CONFIG_H | sed 's/^ { "//;s/",//;s/, },.*//' | egrep -v 'AID_ROOT|AID_NOBODY' | awk '{ print "#\t\tgroupadd -g \\$"$2" "$1}')
#
# ####
# # Users
#
$(egrep '^ { "' $CONFIG_H | sed 's/^ { "//;s/",//;s/, },.*//' | egrep -v 'AID_ROOT|AID_NOBODY' | awk '{ print "#\t\tuseradd -M -s /usr/bin/nologin -c \"Android ("$2")\" -g \\$"$2" -u \\$"$2" "$1}')
#
#EOF
#
#} # end dump
EOGEN
| true |
4ad8031edf66185c1cf0c32dd8f4a9e0edd1f3b6 | Shell | msDekova/Linux-Shell | /FMI/Shell/task16.sh | UTF-8 | 624 | 3.8125 | 4 | [] | no_license | #!/bin/bash
if [ $# -ne 3 ]
then
echo "Invalid number of arguments"
exit 1
fi
if [ ! -f ${1} ]
then
echo "First argument must be regular file"
exit 2
fi
FILE=$1
STRING1=$2
STRING2=$3
TERM1=$(cat ${FILE} | grep "${STRING1}=" | cut -d '=' -f2 | tr ' ' '\n' | sort | uniq | sed 1d )
TERM2=$(cat ${FILE} | grep "${STRING2}=" | cut -d '=' -f2 | tr ' ' '\n' | sort | uniq )
echo "${TERM1}"
echo "${TERM2}"
RES=$(echo "${TERM2}" | grep -v "${TERM1}" | tr '\n' ' ')
_TERM1=$(echo "${TERM1}" | tr '\n' ' ' )
_TERM2=$(echo "${TERM2}" | tr '\n' ' ' )
#echo "${RES}"
sed -i -e "s/^${STRING2}=.*/${STRING2}=${RES}/" "${FILE}"
| true |
f3e9c5c5c2083e6630595776fa534e6c34c966bc | Shell | vkutas/DevOps-home-tasks | /task1/whose.sh | UTF-8 | 1,868 | 4.03125 | 4 | [] | no_license | EXAMPLE_OF_USAGE="Example of usage: 'whose.sh firefox' or 'whose.sh 1287'. See README.md for more details.";
PROCESS="$1";
shift;
while [ -n "${1}" ]; do
case "$1" in
-v ) OUTPUT_DATA='/Organization\|Country/p'; n=2;;
-vv ) OUTPUT_DATA='/Organization\|Country\|City/p'; n=3;;
-vvv ) OUTPUT_DATA='/Organization\|Country\|City\|Address\|PostalCode/p';;
-e ) CONNECTIONS_DETAILS=$(netstat -tnp | grep -w 'ESTABLISHED');;
-eu ) CONNECTIONS_DETAILS=$(netstat -tunp | awk '$6 =="ESTABLISHED" || $1 == "udp" { print $0 }');;
-a ) CONNECTIONS_DETAILS=$(netstat -tnap);;
-n ) NUMBER_OF_CONNECTIONS="$2";
shift;
;;
* ) echo "Option ${1} is unknown";
echo "$EXAMPLE_OF_USAGE";
exit 1;
;;
esac
shift;
done
if [ ! -n "${PROCESS}" ]; then
printf "Process name or PID must be specified\n\r%s\n\r" "${EXAMPLE_OF_USAGE}";
exit 1;
fi
if [ ! -n "${CONNECTIONS_DETAILS}" ]; then
CONNECTIONS_DETAILS=$(netstat -tunap)
fi
if [ ! -n "${OUTPUT_DATA}" ]; then
OUTPUT_DATA='/Organization/p'
fi
if [ ! -n "${NUMBER_OF_CONNECTIONS}" ]; then
NUMBER_OF_CONNECTIONS=5
fi
PROCESS_CONNECTIONS=$(echo "${CONNECTIONS_DETAILS}" | awk -v pat="$PROCESS" '$7~pat { print $5 }');
if [ ! -n "${PROCESS_CONNECTIONS}" ]; then
printf "Connections for process \"%s\" not found.\n\r" "${PROCESS}";
exit 1;
fi
echo "${PROCESS_CONNECTIONS}" | cut -d: -f1 | sort | uniq -c | sort | tail -n"${NUMBER_OF_CONNECTIONS}" |
while read ADDRES_LINE; do
CON_PER_IP=$(echo "$ADDRES_LINE" | cut -d' ' -f1);
IP=$(echo "$ADDRES_LINE" | cut -d' ' -f2);
DATA=$(whois "$IP" | sed -n $OUTPUT_DATA | tail -"$n");
echo "Remote IP Address: ${IP}";
echo "Number of Connections ${CON_PER_IP}"
echo "$DATA";
echo;
done | true |
739e4f2fe4c0a9b44d21054d745c88cbdd6d4266 | Shell | Ramkumar47/arch_linux_configuration | /i3-gaps/i3/brightctl.sh | UTF-8 | 1,031 | 3.796875 | 4 | [] | no_license | #!/usr/bin/zsh
# brightness control for i3
# developed by ramkumar
#######################################################################
# NOTE: for bash terminal, this script will not work and #
# throw errors like "invalid arithmetic operator ...". This is #
# due to the fact that the bash doent hande floating point operations #
# wheras zsh does. #
#######################################################################
# getting current device name
deviceName=`xrandr -q | grep ' connected' | head -n 1 | cut -d ' ' -f1`
# getting current brightness
curr_brightness=`xrandr --current --verbose | grep "Brightness" | cut -d" " -f2`
case $1 in
increase)
brightness=$((curr_brightness + 0.1))
if [[ $brightness > 1.0 ]]
then
brightness=1.0
fi
echo $brightness
;;
decrease)
brightness=$((curr_brightness - 0.1))
if [[ $brightness < 0.1 ]]
then
brightness=0.1
fi
echo $brightness
;;
reset)
brightness=0.7
esac
xrandr --output $deviceName --brightness $brightness
| true |
9b73e45b56acde4c7d09dcb42e6b886746b2eeaa | Shell | darkroam/dotfiles | /.config/x11/xprofile | UTF-8 | 2,155 | 3.203125 | 3 | [] | no_license | #!/bin/sh
# This file runs when a DM logs you into a graphical session.
# If you use startx/xinit like a Chad, this file will also be sourced.
# This file's true location is in ~/.config/xprofile and a link exists to it in
# ~/.xprofile. If you do not use a DM, you may remove the link to it to have a
# cleaner home.
# Fix Gnome Apps Slow Start due to failing services
# Add this when you include flatpak in your system
dbus-update-activation-environment --systemd DBUS_SESSION_BUS_ADDRESS DISPLAY XAUTHORITY
eval "$(dbus-launch --sh-syntax --exit-with-session)"
export GTK_IM_MODULE=fcitx
export QT_IM_MODULE=fcitx
export XMODIFIERS="@im=fcitx"
alias setproxy="export ALL_PROXY=http://127.0.0.1:10080"
#alias setproxy="export ALL_PROXY=socks5://127.0.0.1:1080"
alias unsetproxy="unset ALL_PROXY"
alias ip="curl -i http://ip.cn"
export GO111MODULE=on
export GOPROXY=https://goproxy.cn,direct
xrandr --dpi 96 # Set DPI. User may want to use a larger number for larger screens.
setbg & # set the background with the `setbg` script
#xrdb ${XDG_CONFIG_HOME:-$HOME/.config}/x11/xresources & xrdbpid=$! # Uncomment to use Xresources colors/settings on startup
remaps & # run the remaps script, switching caps/esc and more; check it for more info
xset r rate 300 50 & # Speed xrate up
fcitx &
if [ -f /usr/bin/synergy ]; then
synergyc &
fi
autostart="mpd xcompmgr dunst unclutter pipewire"
for program in $autostart; do
pidof -s "$program" || "$program" &
done >/dev/null 2>&1
# Ensure that xrdb has finished running before moving on to start the WM/DE.
[ -n "$xrdbpid" ] && wait "$xrdbpid"
# mpd & # music player daemon-you might prefer it as a service though
# xcompmgr & # xcompmgr for transparency
# dunst & # dunst for notifications
# unclutter & # Remove mouse when idle
# This line autostarts an instance of Pulseaudio that does not exit on idle.
# This is "necessary" on Artix due to a current bug between PA and
# Chromium-based browsers where they fail to start PA and use dummy output.
#pidof -s runit &&
# ! pidof -s pulseaudio >/dev/null 2>&1 &&
# setsid -f pulseaudio --start --exit-idle-time=-1 >/dev/null 2>&1
| true |
b3c90d4e192fc4b67194c0ba410ae9328bbc1466 | Shell | rrshah/nodeman | /metrics/available_bandwidth.sh | UTF-8 | 445 | 3.578125 | 4 | [] | no_license |
#Gets the interface IP Address
#echo $1
/sbin/ifconfig $1 | grep "inet addr" | awk -F: '{print $2}' | awk '{print $1}'
INTERVAL="1" # update interval in seconds
IF=$1
while true
do
R1=`cat /sys/class/net/$1/statistics/rx_bytes`
sleep $INTERVAL
R2=`cat /sys/class/net/$1/statistics/rx_bytes`
RBPS=`expr $R2 - $R1`
RKBPS=`expr $RBPS / 1024`
echo "Current bandwidth usage: $RKBPS kB/s"
done
| true |
fb77bf1f205fe53742a3734e6ad0b8dfc2d02805 | Shell | aponsero/functional_annotation_parralel | /scripts/run_phanotate.sh | UTF-8 | 770 | 3.3125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#PBS -l select=1:ncpus=16:mem=6gb
#PBS -l walltime=01:00:00
#PBS -l place=free:shared
conda activate bio
HOST=`hostname`
LOG="$STDOUT_DIR/${HOST}.log"
ERRORLOG="$STDERR_DIR/${HOST}.log"
if [ ! -f "$LOG" ] ; then
touch "$LOG"
fi
echo "Started `date`">>"$LOG"
echo "Host `hostname`">>"$LOG"
SAMPLE=`head -n +${PBS_ARRAY_INDEX} $FILE_LIST | tail -n 1`
echo "processing $SAMPLE"
i="$(basename -- $SAMPLE)"
echo $i
OUT_FILE="$OUT_DIR/${i}_phanotate.txt"
PROT_FILE="$OUT_DIR/${i}_prot.faa"
GENE_FILE="$OUT_DIR/${i}_genes.fna"
echo "$PHANOTATE/phanotate.py $SAMPLE >> $OUT_FILE"
$PHANOTATE/phanotate.py $SAMPLE >> $OUT_FILE
echo "$WORKER_DIR/parse_phanotate.py"
python $WORKER_DIR/parse_phanotate.py -i $OUT_FILE -f $SAMPLE -p $PROT_FILE -g $GENE_FILE
| true |
d58db61e84baf8c7588c3ceb31366e3bf79240dc | Shell | kevindang88/edu | /cs260/sample-scripts/todebug | UTF-8 | 196 | 3.234375 | 3 | [] | no_license | #!/bin/sh
# Sript name: todebug
name="Joe Blow"
if [ "$name" = "Joe Blow" ]
then
echo "Hi $name"
fi
num=1
while [ $num -lt 5 ]
do
num=`expr $num + 1`
done
echo The grand total is $num
| true |
9cd8204f6082e1988713530494d6ace48d948f51 | Shell | illingwo/sam-web-client | /release.sh | UTF-8 | 369 | 3.328125 | 3 | [
"BSD-3-Clause"
] | permissive | #! /bin/bash
version=${1:?Need to specify release version}
# check upd is available
type -t upd > /dev/null || { echo "upd is not available"; exit 1; }
git tag -a -m "Release $version" $version || exit 1
make dist || exit 1
upd addproduct sam_web_client -T dist.tar.gz -m ups/sam_web_client.table -0 $version
# Try to push tags to main repository
git push --tags
| true |
bb98bd339e00931326b3e8a6deebe0139e883f37 | Shell | devnull-cz/c-prog-lang | /getting-credits/2021/tests/test-005.sh | UTF-8 | 859 | 3.828125 | 4 | [] | no_license | #/bin/bash
#
# Listing test with specific file arguments. Follow the same ordering in argv
# as in the archive for the files from the archive. Put a few non-existent
# files in the argument list. Those are expected to be printed to stderr after
# all present files were listed.
source $configvar
cd $tmpdir
first=$(echo "$inputfiles" | head -1)
third=$(echo "$inputfiles" | head -3 | tail -1)
seventh=$(echo "$inputfiles" | head -7 | tail -1)
last=$(echo "$inputfiles" | tail -1)
base=$(basename $MYTAR)
# The warning messages start with "$argv0: ". In case $MYTAR is not
# "mytar", fix the output.
output=$($MYTAR -t -f $tarfile nonexistent1 $first nonexistent2 \
$third $seventh $last nonexistent3 2>&1)
ret=$?
((ret == 2)) || { echo "Wrong return value $ret." >&2 && exit 1; }
echo "$output" | sed -e "s/^[a-zA-Z0-9/.]*$base: /mytar: /"
exit 0
| true |
10ba3c3c15802b402a8261620ab85474e18052c8 | Shell | ilventu/aur-mirror | /ies4linux/PKGBUILD | UTF-8 | 1,588 | 2.90625 | 3 | [] | no_license | # Maintainer: Limao Luo <luolimao+AUR@gmail.com>
# Contributor: Jarek Sedlacek <jareksedlacek@gmail.com>
pkgname=ies4linux
pkgver=2.99.0.1
pkgrel=5
pkgdesc="The simpler way to run Microsoft Internet Explorer on Wine"
arch=(any)
url=http://www.tatanka.com.br/ies4linux/page/Main_Page
license=(GPL)
depends=(cabextract pygtk unzip wine)
options=(!emptydirs)
source=(${url/pa*}/downloads/$pkgname-$pkgver.tar.gz
remove-wineprefixcreate.patch
wine-version.patch)
sha256sums=('f12685793b8978b1cf418b823724382aaac238adbb6bb4721e6d001ab8afa88e'
'123ce8ff236ccac20ed22d577fc3fca81d684e4adf78bd6223571f0163426f94'
'39c592a8200ee7dc94072bad2f742dbfa7aab0fe98ca4adcfcfe5272da298adf')
sha512sums=('beae6f1d03cd6a004eddd7426bcc655d468aa95e3a2e7588f676cac74b94a7a4c19c27cdc64695b869e560790aaa1947bb78f3954e3bc24ef07ca1bdd43b5c21'
'067072dca40745376d7237f928aa781a2907a4c1fd9919372f8e285423d034f28e02cb260e438d5df5f6eadcb41da4b9454fdc38b773b23f0da051d64bce43db'
'6a05bb9ca31dd4d9718be2fce8465b9d264ebac395d541433006e9a73779e463d9de8bd7ea7e8158b9940c4ead77023286a6853eb0eb36efaae49a08702920e0')
build() {
cd "$srcdir"/$pkgname-$pkgver/
patch -Np1 -i ../remove-wineprefixcreate.patch
patch -Np1 -i ../wine-version.patch
sed -i -e '/IES4LINUX=/s|`dirname "$0"`|/usr/lib/ies4linux|' -e 's/python -c/python2 -c/' $pkgname
sed -i 's/python /python2 /' ui/pygtk/python-gtk.sh
}
package(){
cd "$srcdir"
install -Dm755 $pkgname-$pkgver/$pkgname "$pkgdir"/usr/bin/$pkgname
rm $pkgname-$pkgver/$pkgname
install -d "$pkgdir"/usr/lib/
cp -a $pkgname-$pkgver "$pkgdir"/usr/lib/$pkgname
}
| true |
502c67d57152b90aa5cfbcc87d6fd44776fc84b6 | Shell | DataDog/integrations-core | /.ddev/ci/scripts/ibm_ace/linux/55_install_client.sh | UTF-8 | 1,331 | 3.515625 | 4 | [
"BSD-3-Clause",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] | permissive | #!/bin/bash
# This script installs IBM MQ development version on the CI machines to be able to
# * Compile pymqi image
# * Run integration tests on the machine
set -ex
TMP_DIR=/tmp/mq
MQ_URL=https://ddintegrations.blob.core.windows.net/ibm-mq/mqadv_dev90_linux_x86-64.tar.gz
MQ_PACKAGES="MQSeriesRuntime-*.rpm MQSeriesServer-*.rpm MQSeriesMsg*.rpm MQSeriesJava*.rpm MQSeriesJRE*.rpm MQSeriesGSKit*.rpm"
if [ -e /opt/mqm/inc/cmqc.h ]; then
echo "cmqc.h already exists, exiting"
set +ex
exit 0
fi
sudo apt-get update
sudo apt-get install -y --no-install-recommends \
bash \
bc \
coreutils \
curl \
debianutils \
findutils \
gawk \
gcc \
grep \
libc-bin \
mount \
passwd \
procps \
rpm \
sed \
tar \
util-linux
mkdir -p $TMP_DIR
pushd $TMP_DIR
# Retry necessary due to flaky download that might trigger:
# curl: (56) OpenSSL SSL_read: SSL_ERROR_SYSCALL, errno 110
for i in 2 4 8 16 32; do
curl --verbose -LO $MQ_URL && break
echo "[INFO] Wait $i seconds and retry curl download"
sleep $i
done
tar -zxvf ./*.tar.gz
pushd MQServer
for i in 2 4 8 16 32; do sudo ./mqlicense.sh -text_only -accept && break || sleep $i; done
sudo rpm -ivh --force-debian *.rpm
sudo /opt/mqm/bin/setmqinst -p /opt/mqm -i
popd
popd
ls /opt/mqm
ls /opt/mqm/inc/
set +ex
| true |
abec8bab6452f8da7c0cce76b884b0c10e6832cb | Shell | jo-m/wortprotokolle | /run_nogpu.sh | UTF-8 | 278 | 3.34375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
run="$1"
[ -z "$1" ] && run='train.py'
if [ "$run" = 'repl' ]; then
run=''
fi
if [ -x "$HOME/anaconda/bin/python" ]; then
"$HOME/anaconda/bin/python" $run
else
python $run
fi
if [ $? -eq 0 ]; then
say 'run succeeded'
else
say 'run failed'
fi
| true |
eae56dd1d774142cb54e2e4577dffc585e85cc7e | Shell | unisparks/unisparks | /extras/scripts/setup-arduino.sh | UTF-8 | 852 | 3.421875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
ARDUINO_DIST=https://downloads.arduino.cc/arduino-1.8.5-linux64.tar.xz
UNISPARKS_HOME="${UNISPARKS_HOME:-$(dirname $(dirname $(dirname "$0")))}"
set -e
apt-get update
apt-get install -y zip curl xz-utils build-essential default-jre python
echo "Installing Arduino"
mkdir -p /opt && curl -s ${ARDUINO_DIST} | tar xvJC /opt
ln -s /opt/arduino-1.8.5/arduino /usr/bin/arduino
ln -s /opt/arduino-1.8.5/arduino-builder /usr/bin/arduino-builder
echo "Installing Arduino libraries"
arduino --install-library "FastLED"
arduino --install-library "Adafruit NeoPixel"
echo "Installing boards"
arduino --pref "boardsmanager.additional.urls=http://arduino.esp8266.com/stable/package_esp8266com_index.json" --save-prefs
arduino --install-boards esp8266:esp8266
echo "Installing our library"
ln -s ${UNISPARKS_HOME} /root/Arduino/libraries/Unisparks
| true |
d28725fa5782ab1d15e0a50948ecc2ecf4eb8b5a | Shell | jonco3/mozutils | /bin/copyPatches | UTF-8 | 1,687 | 4.15625 | 4 | [] | no_license | #!/bin/bash
set -euo pipefail
SOURCE=~/work
DEST=~/shared/patches
HOSTALIAS=~/.hostalias
VERBOSE=
if [[ ! -d $SOURCE ]]; then
echo "Can't find source dir: $SOURCE"
exit 1
fi
if [[ ! -d $DEST ]]; then
echo "Can't find dest dir: $DEST"
exit 1
fi
if [[ ! -f $HOSTALIAS ]]; then
echo "Can't find host alias in: $HOSTALIAS"
exit 1
fi
if [[ $# -gt 0 && $1 == "-v" ]]; then
VERBOSE=-i
shift
fi
if [[ $# -ne 0 ]]; then
echo "Usage: copyPatches [-v]"
exit 1
fi
# check for modified files in current repo
if [[ -n "$(hg status -m 2> /dev/null)" ]]; then
echo "Modified files exist, aborting"
exit 1
fi
HOST=`cat $HOSTALIAS`
if [[ -n $VERBOSE ]]; then
echo "copyPatches $SOURCE -> $DEST/$HOST"
fi
cd $SOURCE
for dir in `ls`; do
if [[ ! -d $dir ]]; then
continue
fi
name=${dir##*/}
repo=$dir/.hg
patches=$repo/patches
if [[ ! -e "$patches/series" ]]; then
continue
fi
if [[ -n $VERBOSE ]]; then
echo " repo $dir"
fi
backup=$DEST/$HOST/$name
# Delete old patches. rsync --delete doesn't work without --dirs or
# --recursive.
for file in `ls $backup`; do
path=$patches/$file
if [[ ! -e $patches/$file ]]; then
if [[ -n $VERBOSE ]]; then
echo " deleting $file"
fi
rm $backup/$file
fi
done
if [[ ! -d $backup ]]; then
if ! mkdir -p $backup; then
echo "Failed to create directory: $backup"
exit 1
fi
fi
if ! rsync $VERBOSE --checksum --times $patches/* $backup; then
echo "rsync failed!"
exit 1
fi
done
| true |
bc214eed6dd77c301c2feafc545f1282956ffeeb | Shell | valdoonicanlives/config-files-backup | /scripts/dmenu-scripts/new-ones/dmenu2-menu | UTF-8 | 1,589 | 3.5625 | 4 | [] | no_license | #!/bin/sh
# Define your battery device. Look up in '/sys/class/power_supply/' for a directory named 'BAT0' ( it also can be 'BAT1 or something else )
#device='BAT1'
#battery="$(cat /sys/class/power_supply/$device/capacity)%"
# Volume Status for alsa users
#volume="$(amixer get Master | tail -1 | sed 's/.*\[\([0-9]*%\)\].*/\1/')"
if [[ -f $HOME/.dmenurc ]]; then
. $HOME/.dmenurc
DMENU="dmenu $OPTIONS -p "
else
DMENU="dmenu -i -p "
fi
# Define your preferred terminal
terminal='urxvt -e'
# How many spaces do you want before the battery status ?
spaces=10
# Automating the number of spaces
function auto_space
{
for ((i = 0; i <= $spaces; i++)); do
printf ' '
done
}
# Menu Order.
menu_list="File\nEdit\nWeb\nTerm\nEmacs\nMusic\nWifi\nHtop\nRanger\nScrot\nScrot-s\"
# menu_list="File\nEdit\nWeb\nTerm\nEmacs\nMusic\nWifi\nHtop\nRanger\nScrot\nScrot-s\n$(eval auto_space)Batt: $battery\n Vol: $volume"
cmd=$(echo -e "$menu_list" | eval $Dmenu)
case $cmd in
Edit)
$terminal vim ;;
Web)
chromium --incognito ;;
Ranger)
$terminal ranger ;;
Htop)
$terminal htop ;;
Term)
$terminal bash -c "tmux -q has-session && exec tmux attach-session -d || exec tmux new-session -nmain -s$USER@$HOSTNAME" ;;
File)
thunar ;;
Emacs)
emacs ;;
Music)
$terminal ncmpcpp ;;
Scrot)
scrot '%F--%I:%M:%S:%p--$wx$h--scrot.png' -e 'mv $f ~/.scrots/' && notify-send 'Scrot Saved !' ;;
Scrot-s)
scrot '%F--%I:%M:%S:%p--$wx$h--scrot.png' -s -e 'mv $f ~/.scrots/' && notify-send 'Scrot Saved !' ;;
Wifi)
gksudo ~/.scripts/dmenu-scripts/connman_dmenu ;;
esac
exit
| true |
7a860e3164ea07070274d8d385baaf84dd246742 | Shell | ulno/ulnoiot | /examples/scripts/ulnoiot | UTF-8 | 318 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Example script for starting ulnoiot
# Modify ULNOIOT_ROOT accordingly to your setup
#
# If you run in termux, apply termux-fix shebang onto this script
# if this should run as root make sure to fix $HOME like this:
# HOME=/home/pi
export ULNOIOT_ROOT="$HOME/ulnoiot"
exec bash "$ULNOIOT_ROOT/run" "$@"
| true |
48b9a8c0e1b581ea880c3884a54a401cfe77ab45 | Shell | gittygitgit/bash-sandbox | /bash_scripting_guide_exercises/easy/primes.sh | UTF-8 | 557 | 4.0625 | 4 | [] | no_license | #!/usr/bin/env sh
# Print (to stdout) all prime numbers between 60000 and 63000. The output should be nicely formatted in columns (hint: use printf).
:<< X
Prime number is divisible by 1 and itself
X
i=60000
function check_prime() {
num=$1
newnum=$(( $num-1 ))
j=1
while [ $(( j*j )) -lt $newnum ]; do
(( j++ ))
if [ $(( $num % $j )) -eq 0 ]; then
return 1
fi
done
return 0
}
primes=()
while (( $i <= 63000 )); do
check_prime $i
if [ $? -eq 0 ]; then
printf "%d is a prime number\n" $i
fi
(( i++ ))
done
| true |
56736ef10efb582b8fe149e17781af95697a8861 | Shell | tapaswenipathak/linux-kernel-stats | /scripts/dma.sh | UTF-8 | 2,084 | 3.28125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Obtains git logs files for DMA driver and its related keywords, for different linux kernel versions.
# Contributor: patelmadhu06@gmail.com
cd ~/kbd
myArray=("dma_pool_alloc" "dma_pool_free" "dma_pool_destroy" "dma_pool_create" "dma_map_*" "dma_addr_t" "dma_set_mask_and_coherent" "dma_set_mask" "dma_set_coherent_mask" "DMA_TO_DEVICE" "DMA_FROM_DEVICE" "dma_mapping_error" "dma_map_page" "dma_api" "dma-api" "DMA_API" "DMA-API" "DMADEVICES" "dma_buf" "dma_buffer" "DMA_ENGINE" "DMA_VIRTUAL_CHANNELS")
git checkout 2.0.0
for string in ${myArray[@]}; do
if [ -n "$(git log --all --grep="$string")" ]; then
echo -e "\e[6;35m \n version 2.0.0 \n \e[0m"
echo -e "\e[6;35m \n ${string} \n \e[0m"
git log --all --grep="$string"
else
echo -e "\e[6;35m \n version 2.0.0 \n \e[0m"
echo "No such string exists in version 2.0.0 in the git log."
continue
fi
done
cd ..
cd ~/archive
git checkout v1.0
for string in ${myArray[@]}; do
if [ -n "$(git log --all --grep="$string")" ]; then
echo -e "\e[6;35m \n v$i.0 \n \e[0m"
echo -e "\e[6;35m \n ${string} \n \e[0m"
git log --all --grep="$string"
else
echo -e "\e[6;35m \n v$i.0 \n \e[0m"
echo "No such string exists in version v$i.0 in the git log."
continue
fi
done
cd ..
SRCDIR_e=~/linux-stable/linux-stable
cd $SRCDIR_e
for ((i=3; i<=6; i++)); do
git checkout -fq v$i.0
if [[ $? -eq 0 ]]; then
for string in ${myArray[@]}; do
if [ -n "$(git log --all --grep="$string")" ]; then
echo -e "\e[6;35m \n v$i.0 \n \e[0m"
echo -e "\e[6;35m \n ${string} \n \e[0m"
git log --all --grep="$string"
else
echo -e "\e[6;35m \n v$i.0 \n \e[0m"
echo "No such string exists in version v$i.0 in the git log."
continue
fi
done
else
continue
fi
done | true |
6c060f770d806effad7ad15ac70783009cd12f99 | Shell | godali/terraform-raspberrypi-bootstrap | /k8s_prep.sh | UTF-8 | 1,213 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# This installs the base instructions up to the point of joining / creating a cluster
# Based off of https://gist.github.com/alexellis/fdbc90de7691a1b9edb545c17da2d975#gistcomment-2228114
echo Adding " cgroup_enable=cpuset cgroup_memory=1" to /boot/cmdline.txt
sudo cp /boot/cmdline.txt /boot/cmdline_backup.txt
orig="$(head -n1 /boot/cmdline.txt) cgroup_enable=cpuset cgroup_enable=memory cgroup_memory=1"
echo $orig | sudo tee /boot/cmdline.txt
curl -s https://download.docker.com/linux/raspbian/gpg | sudo apt-key add -
echo "deb [arch=armhf] https://download.docker.com/linux/raspbian stretch edge" | sudo tee /etc/apt/sources.list.d/socker.list
apt-get update -q
apt-get install -y docker-ce=18.06.0~ce~3-0~raspbian --allow-downgrades
echo "docker-ce hold" | sudo dpkg --set-selections
usermod pi -aG docker
dphys-swapfile swapoff
dphys-swapfile uninstall
update-rc.d dphys-swapfile remove
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
apt-get update -q
apt-get install -y kubeadm=1.13.1-00 kubectl=1.13.1-00 kubelet=1.13.1-00
echo reboot....
| true |
71a4f92b939510229cfa06eac50b0f5967f99b5c | Shell | seiji/docker-images | /redis-cluster/docker-entrypoint.sh | UTF-8 | 1,031 | 3.796875 | 4 | [] | no_license | #!/bin/sh
if [ "$1" = 'redis-cluster' ]; then
sysctl -w vm.overcommit_memory=1
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo 511 > /proc/sys/net/core/somaxconn
# Allow passing in cluster IP by argument or environmental variable
IP="${2:-$IP}"
max_port=7002
for port in `seq 7000 $max_port`; do
mkdir -p /redis-conf/${port}
mkdir -p /redis-data/${port}
if [ -e /redis-data/${port}/nodes.conf ]; then
rm /redis-data/${port}/nodes.conf
fi
PORT=${port} envsubst < /redis-conf/redis-cluster.tmpl > /redis-conf/${port}/redis.conf
done
gen-supervisord-conf.sh $max_port > /etc/supervisor/supervisord.conf
supervisord -c /etc/supervisor/supervisord.conf
sleep 3
if [ -z "$IP" ]; then # If IP is unset then discover it
IP=$(hostname -i)
fi
IP=$(echo ${IP}) # trim whitespaces
echo "Using redis-cli to create the cluster"
echo "yes" | redis-cli --cluster create ${IP}:7000 ${IP}:7001 ${IP}:7002
tail -f /var/log/supervisor/redis*.log
else
exec "$@"
fi
| true |
d97896848dbd6271f905f4b7080bd1ad43337aee | Shell | jasonracey/audio-file-scripts | /scripts/iso-to-dsf.sh | UTF-8 | 165 | 2.515625 | 3 | [
"MIT"
] | permissive | #! /bin/sh
find . -name '*.iso' -exec sh -c 'cp -n sacd_extract "$(dirname "${1}")"' _ {} \;
find . -name '*.iso' -execdir sh -c './sacd_extract -s -i"$1"' _ {} \; | true |
d7bb2843108338a69f1ef014c9b481a6ff16bcbe | Shell | mariusroets/scripts | /bin/vpn | UTF-8 | 463 | 3.359375 | 3 | [] | no_license | #!/bin/bash
vpnpid=`pidof openconnect`
group="Eskom"
server="ssl"
if [ "$1" == "status" ]; then
if [ -n "$vpnpid" ]; then
echo "VPN is running with pid $vpnpid"
else
echo "VPN is not running"
fi
exit 0
elif [ "$1" == "vpn" ]; then
group="ESKOM-SECURE-VPN"
server="vpn"
fi
if [ -n "$vpnpid" ]; then
sudo kill -2 $vpnpid
else
sudo openconnect -b -u roetsm --authgroup $group --passwd-on-stdin $server.eskom.co.za
fi
| true |
0224f8a9ac9e0ff6c57687f3d0230c821b9b5ef5 | Shell | illepic/downfall-guild | /project/scripts/drupalvm.sh | UTF-8 | 459 | 2.625 | 3 | [] | no_license | #!/usr/bin/env bash
# Assuming starting from root
cd drupal-vm
git checkout tags/3.1.2 # Change me to update
# Symlink `config.yml` and `drupal.make.yml` from `config/` into `drupal-vm/`
ln -sf ../config/config.yml
ln -sf ../config/drupal.composer.json
# DELETE the D8 folder and kick off Vagrant.
echo "You'll probably be asked for your local admin password here."
sudo rm -rf ../project/web/d8
vagrant halt
vagrant up --provision
# Back to root
cd ../
| true |
e1b059a7acfeed5085eb8f3ea7b650525d0ddf38 | Shell | y-mory/dotfiles | /bk/settings_export.sh | UTF-8 | 245 | 2.6875 | 3 | [] | no_license | # 当該ファイルのパスを取得
this_dir=$(cd $(dirname path); pwd)
# bundleファイルを作成(上書き)
brew bundle dump --force > ${this_dir}
# vscodeエクステンション出力
code --list-extensions > ${this_dir}/extensions
| true |
486c01b9e4e2695e89e3785d76e0cfc358ee9626 | Shell | walkes/forego | /bin/release | UTF-8 | 1,257 | 3.609375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
set -eu
alias errcho='>&2 echo'
cleanup () {
# cleanup
rm -rf release_response upload_response
}
version=v0.16.9
release_creation_status=$(curl \
--header "Authorization: Bearer ${GITHUB_API_KEY}" \
--header "Content-Type: application/json" \
-w "%{http_code}\n" --output release_response -X POST -d "{
\"target_commitish\": \"master\",
\"name\": \"${version}\",
\"tag_name\": \"${version}\",
\"draft\": false,
\"prerelease\": false
}" https://api.github.com/repos/walkes/forego/releases)
if [ "${release_creation_status}" -ne "201" ]; then
errcho "Release creation failed!"
cat release_response && cleanup
exit 22
fi
release_id=$(cat release_response | jq '.id')
echo "Created release id=${release_id}"
upload_status=$(curl --header "Authorization: Bearer ${GITHUB_API_KEY}" \
--header "Content-Type: $(file -b --mime-type forego)" \
-w "%{http_code}\n" --output upload_response \
-X POST --data-binary @forego \
https://uploads.github.com/repos/walkes/forego/releases/${release_id}/assets?name=forego-armhf-${version})
if [ "${upload_status}" -ne "201" ]; then
errcho "Upload failed!"
cat upload_response && cleanup
exit 22
fi
cleanup | true |
9eb2ae6fce0d9cc191e0f987b8f122d836419986 | Shell | elifarley/shell-lib | /lib/file.sh | UTF-8 | 2,420 | 4.125 | 4 | [
"MIT"
] | permissive |
some_file() { local base="$1"; shift; find -H "$base" ! -path "$base" "$@" -print -quit; }
dir_full() { local base="$1"; shift; test "$(cd "$base" &>/dev/null && find -H . -maxdepth 1 ! -path . "$@" -print -quit)"; }
dir_not_empty() { test "$(\ls -A "$@" 2>/dev/null)" ;}
# -empty: not supported in BusyBox v1.29.3
dir_empty() { find -H "$1" -maxdepth 0 -empty | read v ;}
dir_count() { test -d "$1" && echo $(( $(\ls -afq "$1" 2>/dev/null | wc -l ) -2 )) ;}
existing_path() {
local path="$1"
until [[ $path == '.' || -e $path ]]; do
path=$(dirname $path)
done
echo $path
}
path_owner() {
local path="$(existing_path "$1")"
echo $(stat -c '%U' $path)
}
safe_rm() {
local suffix=".saferm"
for i in "$@"; do
test '/' = "$i" && echo "Invalid path: '$i'" && return 1
test -e "$i" || continue
echo "Removing '$i'..." && mv "$i" "$i$suffix" && rm -rf "$i$suffix" || return
done
return 0
}
# returns next-to-last path element
# default separator is /
ntl() { local separator="${1:-/}"; awk -F"$separator" 'NF>1 {print $(NF-1)}'; }
parentname_awk() { echo $1 | ntl "$2"; }
parentname() {
local path root 2>/dev/null # ignore error in shells without `local`
path="${1:-$PWD}"; path="${path%/}"; path="${path%/*}"
root="${2:-/}"; test "$root" != "/" && root="${root%/}"
test "$path" != "$root" && echo $(basename "$path")
}
rmdir_if_exists() {
local p; for p in "$@"; do
test -e "$p" || continue
echo "Removing '$p'"
rm -r "$p" || return
done
}; test "$BASH_VERSION" && export -f rmdir_if_exists
rmexp_if_exists() {
# TODO make it work with paths that include spaces
while test $# -gt 0; do
for i in $(eval echo "$1"); do
shift
rmdir_if_exists "$i" || return 1
done
done
}
# Syntax is similar to rsync
cpdir() {
test $# -eq 2 || { echo "Usage: cpdir <SRC> <DEST>"; return 1; }
local src="$1"; shift
local dest="$1"; shift
local include_all=''; strendswith "$src" / && include_all='*'
test -e "$dest" || { mkdir "$dest" || return; }
cp -dr "$src"$include_all "$dest"
}
# Syntax is similar to rsync
cpdirm() {
test $# -ge 2 || { echo "Usage: cpdirm [OPTION...]"; return 1; }
local cpargs='' dest=''
for p in "$@"; do
test ! "${p##-*}" || {
dest="$p"
strendswith "$p" / && p="$p"'*'
}
cpargs="$cpargs '$p'"
done
test -e "$dest" || { mkdir "$dest" || return; }
eval cp -dr $cpargs
}
| true |
67ed4284952b775a84757a99439023cfbf626967 | Shell | mtintstfc/desktop-scripts | /power-source/udev-power-source.sh | UTF-8 | 1,394 | 3.71875 | 4 | [] | no_license | #!/bin/bash
source "$(dirname $0)/shared.sh"
####
# Configuration
####
SOUND_POWERSAVE_BATTERY=1
SOUND_POWERSAVE_AC=0
WIFI_IFNAME="wlp4s0"
WIFI_POWERSAVE_BATTERY="on"
WIFI_POWERSAVE_AC="off"
# ondemand Dynamically switch between CPU(s) available if at 95% cpu load
# performance Run the cpu at max frequency
# conservative Dynamically switch between CPU(s) available if at 75% load
# powersave Run the cpu at the minimum frequency
# userspace Run the cpu at user specified frequencies
CPU_GOVERNOR_BATTERY=powersave
CPU_GOVERNOR_AC=ondemand
_usage() {
echo "Usage: $0 [ac|battery]"
exit 1
}
_getvalue() {
eval "echo \$${1}_${mode}"
}
case "$1" in
ac) mode="AC" ;;
battery) mode="BATTERY" ;;
*) _usage ;;
esac
if [ $mode == "AC" ]; then
title="Laptop on the main"
body="Your laptop is now connected to the main."
urgency="normal"
else
title="Laptop on battery"
body="Your laptop is now disconnected from the main."
urgency="critical"
fi
exec_all_session "notify-send --urgency=$urgency \"$title\" \"$body\""
# Don't do anything at all
exit
# SOUND
echo $(_getvalue SOUND_POWERSAVE) > /sys/module/snd_hda_intel/parameters/power_save
# CPU
echo $(_getvalue CPU_GOVERNOR) | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor >/dev/null
# WIFI
/usr/sbin/iw dev "$WIFI_IFNAME" set power_save "$(_getvalue WIFI_POWERSAVE)"
| true |
0c8b822d908f15590f1eefffd4bb933d06b52dbb | Shell | kitpages/docker-glusterfs | /bin/start_volume.sh | UTF-8 | 340 | 3.09375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
source $(dirname $0)/common.sh
PARAMETER_LIST=`for i in "${GLUSTER_PEER_BASH_ARRAY[@]}"; do echo -n "${i}:${GLUSTER_BRICK_PATH} ";done `
echo "PARAMETER_LIST = ${PARAMETER_LIST}"
gluster volume info gv0
if [ $? -ne 0 ]; then
gluster volume create gv0 replica ${GLUSTER_REPLICA} ${PARAMETER_LIST}
fi
gluster volume start gv0
| true |
41615217c44a4dcc63e459a67c01f3a07d2ce3da | Shell | couchbase/perfrunner | /scripts/upload_info.sh | UTF-8 | 456 | 2.734375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash +x
for FILE in *.zip; do
[ -f "$FILE" ] || continue
env/bin/aws s3 cp --quiet ${FILE} s3://perf-artifacts/${BUILD_TAG}/${FILE}
echo "https://s3-us-west-2.amazonaws.com/perf-artifacts/${BUILD_TAG}/${FILE}"
done
for FILE in *.prof; do
[ -f "$FILE" ] || continue
env/bin/aws s3 cp --quiet ${FILE} s3://perf-artifacts/${BUILD_TAG}/${FILE}
echo "https://s3-us-west-2.amazonaws.com/perf-artifacts/${BUILD_TAG}/${FILE}"
done
| true |
9b13bbdb80c1f41067fdb51db2f5fb52201a938d | Shell | dva-re/docker-minidlna | /start.sh | UTF-8 | 254 | 2.8125 | 3 | [] | no_license | #!/bin/bash
# if minidlna config file doesnt exist then copy default to host config volume
if [ ! -f "/config/minidlna.conf" ]; then
# copy over customised config
cp /etc/minidlna.conf /config/
fi
/etc/init.d/minidlna force-reload
tail -f /dev/null
| true |
642cd02ddd380dd99626c8741b38da69cc007cd6 | Shell | yults/OS_labs | /lab2/7.sh | UTF-8 | 969 | 3.171875 | 3 | [] | no_license | #!/bin/bash
for x in $(ps -Ao pid,command | tail -n +2 | awk '{print $1":"$2}')
do
pid=$(echo $x | awk -F ":" '{print $1}')
commmand=$(echo $x | awk -F ":" '{print $2}')
if [ -f "/proc/"$pid/io ]
then
rbytes=$(sudo grep -h "read_bytes:" "/proc/"$pid/io | sed "s/[^0-9]*//")
echo "$pid $cmd $rbytes"
fi
done | sort -nk1 > 7beforesleep.txt
sleep 1m
for x in $(ps -Ao pid,command | tail -n +2 | awk '{print $1":"$2}')
do
pid=$(echo $x | awk -F ":" '{print $1}')
commmand=$(echo $x | awk -F ":" '{print $2}')
if [ -f "/proc/"$pid/io ]
then
rbytes=$(sudo grep -h "read_bytes:" "/proc/"$pid/io | sed "s/[^0-9]*//")
echo "$pid $cmd $rbytes"
fi
done | sort -nk1 > 7aftersleep.txt
cat 7aftersleep.txt | while read s
do
pid=$(echo "$s" | awk '{print $1}')
m0=$(echo "$s" | awk '{print $3}')
cmd=$(echo "$s" |awk '{print $2}')
m1=$(cat 7beforesleep.txt | awk -v id="$pid" '{if ($1 == id) print $3}')
mdiff=$((m1 - m0))
echo $pid":"$cmd":"$mdiff
done | sort -t ':' -nrk3 | head -3
| true |
24cece501366c350967e0aad95dc4f3ddb684aea | Shell | jakobant/lxc-dc | /roles/lxc/files/lxcmon.sh | UTF-8 | 716 | 2.71875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# mon to graphite...
# Simple mon
GSERVER="127.0.0.1"
GPORT="2003"
MM=/tmp/stuff
X=/tmp/x
DD=`date +%s`
PRE="stats"
rm $MM
for a in `lxc-ls`
do
lxc-info -H --name $a > $X
NAME=`cat $X|grep Name|awk '{print $2}'`
CPU=`cat $X|grep CPU|awk '{print $3}'`
IOPS=`cat $X|grep BlkIO|awk '{print $3}'`
MEM=`cat $X|grep Memory|awk '{print $3}'`
NI=`cat $X|grep "TX b"|awk '{print $3}'`
NO=`cat $X|grep "RX b"|awk '{print $3}'`
echo "$PRE.$NAME.CPU $CPU $DD" >> $MM
echo "$PRE.$NAME.IOPS $IOPS $DD" >> $MM
echo "$PRE.$NAME.MEM $MEM $DD" >> $MM
echo "$PRE.$NAME.NETIN $NI $DD" >> $MM
echo "$PRE.$NAME.NETOUT $NO $DD" >> $MM
done
cat $MM | nc $GSERVER $GPORT
/usr/bin/lxc-fancy -j > /var/www/html/lxc.json
exit 0
| true |
63fe0508901c8c5ca8567e03b98ab27208043dfe | Shell | vhalbert/teiid-operator | /build/bin/user_setup | UTF-8 | 198 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
set -x
chmod ug+rwx ${HOME}
# runtime user will need to be able to self-insert in /etc/passwd
chmod g=u /etc/passwd
# no need for this script to remain in the image after running
rm $0
| true |
cac4f15c8777ccdb0fbe8242ff0c4a34fcbee2bb | Shell | shwetha-mc/Data-Integration | /DB/Oracle/oracle_exporter.sh | UTF-8 | 1,079 | 3.390625 | 3 | [] | no_license | #!/bin/bash
arch=$(arch)
if [ $arch == "x86_64" ]
then
sqlcmd="sqlplus64"
else
sqlcmd="sqlplus"
fi
if [ $# -ge 4 ]
then
if [ -t 0 ]
then
filename=$1
user=$2
pass=$3
host=$4
service=$5
loc=$6
tablename=$7
else
read filename user pw host service loc tablename
fi
if [ $loc == "nolocal" ]
then
scp hpccdemo@192.168.13.130:/var/lib/HPCCSystems/mydropzone/$filename $HPCC_ORA/gen/data/downloads/
else
cp /var/lib/HPCCSystems/mydropzone/$filename $HPCC_ORA/gen/data/downloads/
fi
#generate control file
sh $HPCC_ORA/./gen/getcolumns.sh $tablename
$sqlcmd $user/$pass@$host/$service @$HPCC_ORA/gen/tmp/exportscript.sql
sh $HPCC_ORA/./gen/ctlgen.sh $filename $tablename
#execute sqlldr command but not before setting ORACLE_HOME and ORACLE_BASE
$ORACLE_HOME/bin/./sqlldr $user/$pass@$host/$service control=$HPCC_ORA/gen/tmp/$tablename.ctl log=$HPCC_ORA/gen/logs/$tablename.log bad=$HPCC_ORA/gen/logs/$tablename.bad
else
echo "./oracle_exporter.sh filename username password hostname:port serviceid local/nolocal tablename"
fi
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.