blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
2060ff56eba2e0ee3f63e2e986737cb3e1602062
|
Shell
|
msys2/MINGW-packages
|
/mingw-w64-metis/PKGBUILD
|
UTF-8
| 3,242
| 2.734375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Maintainer: Ray Donnelly <mingw.android@gmail.com>
_realname=metis
pkgbase=mingw-w64-${_realname}
pkgname=("${MINGW_PACKAGE_PREFIX}-${_realname}")
pkgver=5.1.0
pkgrel=4
pkgdesc="Serial Graph Partitioning and Fill-reducing Matrix Ordering (mingw-w64)"
arch=('any')
mingw_arch=('mingw32' 'mingw64' 'ucrt64' 'clang64' 'clang32' 'clangarm64')
url='http://glaros.dtc.umn.edu/gkhome/views/metis'
license=('Apache')
depends=("${MINGW_PACKAGE_PREFIX}-gcc-libs")
makedepends=("${MINGW_PACKAGE_PREFIX}-cc"
"${MINGW_PACKAGE_PREFIX}-cmake"
"${MINGW_PACKAGE_PREFIX}-ninja")
options=('strip' 'staticlibs')
source=("http://glaros.dtc.umn.edu/gkhome/fetch/sw/${_realname}/metis-${pkgver}.tar.gz"
"0001-mingw-w64-does-not-have-sys-resource-h.patch"
"0002-mingw-w64-do-not-use-reserved-double-underscored-names.patch"
"0003-WIN32-Install-RUNTIME-to-bin.patch"
"0004-Fix-GKLIB_PATH-default-for-out-of-tree-builds.patch")
sha256sums=('76faebe03f6c963127dbb73c13eab58c9a3faeae48779f049066a21c087c5db2'
'2a18b7083c004d2b03fb06c4cda0344002e273daa9d24512da310a7cec350629'
'779ab9b0fe563a27b78ce2670668fbce4d675e70edb75e1020d299d927e58bf9'
'a7ad7b4bad7a83c9c72cf5281830ad6e0d50fe6fd07c73734e8c86e762533421'
'b509590283beb06db544dc76345a3635c8f1eba02d69fcba09f5222add303b9c')
prepare() {
cd "${srcdir}"/${_realname}-${pkgver}
patch -p1 -i "${srcdir}"/0001-mingw-w64-does-not-have-sys-resource-h.patch
patch -p1 -i "${srcdir}"/0002-mingw-w64-do-not-use-reserved-double-underscored-names.patch
patch -p1 -i "${srcdir}"/0003-WIN32-Install-RUNTIME-to-bin.patch
patch -p1 -i "${srcdir}"/0004-Fix-GKLIB_PATH-default-for-out-of-tree-builds.patch
}
build() {
for _shared in OFF ON; do
cd "$srcdir"/${_realname}-${pkgver}
[[ -d "${srcdir}"/build-shared-${_shared}-${MSYSTEM} ]] && rm -rf "${srcdir}"/build-shared-${_shared}-${MSYSTEM}
mkdir -p "${srcdir}"/build-shared-${_shared}-${MSYSTEM} && cd "${srcdir}"/build-shared-${_shared}-${MSYSTEM}
declare -a extra_config
if check_option "debug" "n"; then
extra_config+=("-DCMAKE_BUILD_TYPE=Release")
else
extra_config+=("-DCMAKE_BUILD_TYPE=Debug")
fi
MSYS2_ARG_CONV_EXCL="-DCMAKE_INSTALL_PREFIX=" \
${MINGW_PREFIX}/bin/cmake \
-G'Ninja' \
-DCMAKE_INSTALL_PREFIX=${MINGW_PREFIX} \
-DCMAKE_C_FLAGS="${CPPFLAGS} ${CFLAGS}" \
"${extra_config[@]}" \
-DSHARED=${_shared} \
../${_realname}-${pkgver}
${MINGW_PREFIX}/bin/cmake --build .
done
}
package() {
for _shared in OFF ON; do
cd "${srcdir}"/build-shared-${_shared}-${MSYSTEM}
DESTDIR="${pkgdir}" ${MINGW_PREFIX}/bin/cmake --install .
mkdir -p "${pkgdir}${MINGW_PREFIX}/lib/pkgconfig"
echo "
prefix=${MINGW_PREFIX}
libdir=\${prefix}/lib
includedir=\${prefix}/include
Name: ${_realname}
URL: ${url}
Version: ${pkgver}
Description: ${pkgdesc}
Cflags: -I\${includedir}
Libs: -L\${libdir} -l${_realname}
" | sed '/^\s*$/d;s/^\s*//' > "${pkgdir}${MINGW_PREFIX}/lib/pkgconfig/${_realname}.pc"
done
}
| true
|
8d3148528f89d62a8df964a0b1f87116df2beab8
|
Shell
|
digo-smithh/Simple-Shell-Calculator
|
/calculator.sh
|
UTF-8
| 718
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
# !/bin/bash
# Take user Input
echo "Enter two numbers: "
read a
read b
# Input type of operation
echo "Enter the type of operation:"
echo "1. Addition"
echo "2. Subtraction"
echo "3. Multiplication"
echo "4. Division"
read ch
# Verificating
if [ $ch -eq 4 ] && [ $b -eq 0 ]
then
echo "Division by zero is impossible"
exit 0
else
if [ $ch -ne 1 ] && [ $ch -ne 2 ] && [ $ch -ne 3 ] && [ $ch -ne 4 ]
then
echo "Invalid Option"
else
# Switch Case to perform calulator operations
case $ch in
1)res=`echo $a + $b | bc`
;;
2)res=`echo $a - $b | bc`
;;
3)res=`echo $a \* $b | bc`
;;
4)res=`echo "scale=2; $a / $b" | bc`
;;
esac
echo "Result: $res"
fi
fi
| true
|
d3c25bf1179454e6abe5369408290bd73b5450e2
|
Shell
|
hmit/udd
|
/scripts/fetch_ddtp_translations.sh
|
UTF-8
| 982
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/sh
# Translation files will be taken from the mirror available on the local machine
# However, we are doing some housekeeping to register what translations did really
# changed and need to be importet and which one are not touched by translators
# and thus can be ignored.
set -e
TARGETPATH=$1
MIRROR=$2
for indexdir in `find $MIRROR -name i18n -type d | sed "s?$MIRROR/\(.\+\)/i18n?\1?"` ; do
# rel=`echo $index | sed "s?$MIRROR/*\([^/]\+\)/.*?\1?"`
targetfile="${TARGETPATH}/${indexdir}"
mkdir -p `dirname $targetfile`
index=${MIRROR}/$indexdir/i18n/Index
if [ -f $index ] ; then
grep "\.bz2" $index | sed -e 's/^ //' -e 's/ \+/ /g' > $targetfile
else
rm -f $targetfile
for trans in `find ${MIRROR}/$indexdir/i18n -mindepth 1 -maxdepth 1 -name "*.bz2"` ; do
echo "`sha1sum $trans | cut -d' ' -f1``ls -l $trans | sed 's/^[-rwlx]\+ [0-9]\+ [^ ]\+ [^ ]\+\([ 0-9]\+[0-9]\) .*/\1/'` `basename $trans`" >> $targetfile
done
fi
done
exit 0
| true
|
f764316229edfeb7b722999c8c048fec4cc315f6
|
Shell
|
Mattlk13/bsd
|
/script/freebsd/minimize.sh
|
UTF-8
| 639
| 3.515625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh -ue
echo "==> Zero out the free space to save space in the final image";
COMPRESSION=$(zfs get -H compression zroot | cut -f3);
zfs set compression=off zroot/ROOT/default;
dd if=/dev/zero of=/EMPTY bs=1m &
PID=$!;
avail=$(zfs get -pH avail zroot/ROOT/default | cut -f3);
while [ "$avail" -ne 0 ]; do
sleep 15;
avail=$(zfs get -pH avail zroot/ROOT/default | cut -f3);
done
kill $PID || echo "dd already exited";
rm -f /EMPTY;
# Block until the empty file has been removed, otherwise, Packer
# will try to kill the box while the disk is still full and that's bad
sync;
zfs set compression=$COMPRESSION zroot/ROOT/default;
| true
|
bcd3801114150044e08863fbb77d4292f96d16e2
|
Shell
|
dinojr/configuration
|
/bin/rubber_png
|
UTF-8
| 172
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
NAME=${1%.tex}
NAMETEX="$NAME.tex"
NAMEPS="$NAME.ps"
NAMEPNG="$NAME.png"
rubber --ps $NAMETEX
convert -trim $NAMEPS $NAMEPNG
rubber --ps --pdf --clean $NAMETEX
| true
|
217eeeee9d039b97b16825ebf2aef4759f826f50
|
Shell
|
kevincolyar/ruby_oracle_libs
|
/linux_x86_32/instantclient_11_1/sdk/ott
|
UTF-8
| 529
| 2.921875
| 3
|
[] |
no_license
|
#! /bin/sh
# Script for running ott in instant client.
# Before running this script:
# set jdk1.5/bin in your PATH
# set LD_LIBRARY_PATH to include the directory containing the instant client
# libraries
# set ottclasses.zip, ojdbc5.jar, orai18n.jar as part of your CLASSPATH
while [ $# -gt 0 ]
do
case $1 in
user*)
OTTUSER="$OTTUSER $1"
export OTTUSER ;;
*)
args="$args $1";;
esac
shift
done
exec java oracle.ott.c.CMain nlslang=${NLS_LANG} $args
| true
|
33add58eae1a0c3f09597e0ec23d1cdd453f631e
|
Shell
|
adamtaylorFR/advent2020
|
/day3/treesTwo.sh
|
UTF-8
| 682
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
input="day3/input"
width=31
sideStep=1
side=1
downStep=1
downs=0
opens=0
trees=0
skip=0
# ......##.###..##.#.###......#.#
while IFS= read -r line
do
echo "$line"
echo "$square and $downs, $side "
if [ $skip -eq 0 ]; then
skip=1
side=$(( (1 + $sideStep * $downs) % $width))
square="${line:$(("$side" - 1)):1}"
downs=$((downs + $downStep))
if [[ $square == "#" ]];then
trees=$(($trees + 1))
else
opens=$(($opens + 1))
fi
else
skip=0
fi
done < "$input"
echo "$trees trees and $opens opens"
# Right 1, down 1. 72
# Right 3, down 1. 207
# Right 5, down 1. 90
# Right 7, down 1. 60
# Right 1, down 2. 33
# 2655892800
| true
|
bf13ee0e34a439efb7e52b1446a418f9f09fae38
|
Shell
|
satai-work/cassandra
|
/!run.sh
|
UTF-8
| 420
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
if ! [ -d cassandra1.config ] ;
then
mkdir cassandra1.config
fi
if ! [ -e cassandra1.config/cassandra.yaml ] ;
then
cp ./cassandra.yaml.template ./cassandra1.config/cassandra.yaml
fi
if ! [ -d cassandra2.config ] ;
then
mkdir cassandra2.config
fi
if ! [ -e cassandra2.config/cassandra.yaml ] ;
then
cp ./cassandra.yaml.template ./cassandra2.config/cassandra.yaml
fi
docker-compose up -d
| true
|
bd9d4f01b6fe12c2fa9e0cce0c816a21c4a11b94
|
Shell
|
hubtype/botonic
|
/scripts/ci/old/prepare-packages.sh
|
UTF-8
| 611
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
BIN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "$BIN_DIR/../.." || exit 1
cd packages || exit 1
# clean package, clean install, build
for package in botonic-*; do
cd "$package" || exit
echo "Preparing $package..."
echo "===================================="
echo "Cleaning..."
nice rm -rf node_modules lib dist
echo "Installing deps..."
nice npm i -D > /dev/null
echo "Building..."
nice npm run build > /dev/null
echo ""
cd ..
done
# restart eslint_d in case any eslint plugin has been upgraded
killall eslint_d 2> /dev/null
killall -9 eslint_d 2> /dev/null
| true
|
b37d720368c25dc53128fe50c45537b839301f53
|
Shell
|
Mabahe/typo3-testing-docker-containers
|
/php56/enable_repos.sh
|
UTF-8
| 418
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
source /pd_build/buildconfig
set -x
apt-get update
# ondrej with php 5.6
echo deb http://ppa.launchpad.net/ondrej/php/ubuntu bionic main > /etc/apt/sources.list.d/php.list
# ondrej key - the recv-keys part takes a bit of time, so it's faster to receive multiple keys at once.
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys \
E5267A6C \
#
apt-get update
apt-get -y dist-upgrade
| true
|
0b38282dbe60bb7556278866ca34c54e7a18f312
|
Shell
|
AlxBouras/Compute-Canada-Guidelines
|
/generic_array_launcher.sh
|
UTF-8
| 1,432
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --array=1-10 # Launch an array of 10 jobs
#SBATCH --account=def-someuser # Account with resources
#SBATCH --cpus-per-task=6 # Number of CPUs
#SBATCH --gres=gpu:1 # Number of GPUs (per node)
#SBATCH --mem=5G # memory (per node)
#SBATCH --time=0-01:30 # time (DD-HH:MM)
#SBATCH --mail-user=user@domain # Where to email
#SBATCH --mail-type=FAIL # Email when a job fails
#SBATCH --output=/scratch/username/some/folder/%A_%a.out # Default write output on scratch, to jobID_arrayID.out file
module load python/3.7
virtualenv --no-download $SLURM_TMPDIR/env
source $SLURM_TMPDIR/env/bin/activate
pip install --no-index --upgrade pip
pip install --no-index -r requirements.txt
date
SECONDS=0
# You can access the array ID via $SLURM_ARRAY_TASK_ID
# The $@ transfers all args passed to this bash file to the Python script
# i.e. a call to 'sbatch $sbatch_args this_launcher.sh --arg1=0 --arg2=True'
# will call 'python my_script.py --arg1=0 --arg2=True'
python my_script.py $@
# Utility to show job duration in output file
diff=$SECONDS
echo "$(($diff / 60)) minutes and $(($diff % 60)) seconds elapsed."
date
| true
|
7d016910899d192e3e8ac3fbc6eac8a04e253fde
|
Shell
|
tochofr/dotfiles
|
/bashrc
|
UTF-8
| 1,813
| 3.265625
| 3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# ====== Set-up variables ======
curPlatform=$(uname)
lang="en_US.UTF-8"
curEditor=$(which vim)
# ==== End Set-up variables ====
if [[ $curPlatform == "Darwin" ]]; then
export CLICOLOR=1
export LSCOLORS=gxBxhxDxfxhxhxhxhxcxcx
fi
export EDITOR=${curEditor}
# ====== History section ======
export HISTCONTROL=ignoredups
export HISTIGNORE="ls*:ll*:l*:clear:history:exit"
# ==== End History section ====
# ====== Locales section ======
if [ -z ${LANGUAGE+x} ]; then export LANGUAGE=${lang}; fi
if [ -z ${LC_ALL+x} ]; then export LC_ALL="${lang}"; fi
# ==== End Locales section ====
# ====== Aliases section ======
if [[ $curPlatform == "Darwin" ]]; then
export LS_OPTIONS='-G'
else
export LS_OPTIONS='--color=auto'
eval "`dircolors`"
fi
alias ls='ls $LS_OPTIONS'
alias ll='ls $LS_OPTIONS -l -h'
alias l='ls $LS_OPTIONS -lA'
alias youtube-dl-mp3='youtube-dl --extract-audio --audio-format mp3 '
# ==== End Aliases section ====
# ====== Custom functions section ======
# Colors in man pages
man() {
env \
LESS_TERMCAP_mb=$(printf "\e[1;31m") \
LESS_TERMCAP_md=$(printf "\e[1;31m") \
LESS_TERMCAP_me=$(printf "\e[0m") \
LESS_TERMCAP_se=$(printf "\e[0m") \
LESS_TERMCAP_so=$(printf "\e[1;44;33m") \
LESS_TERMCAP_ue=$(printf "\e[0m") \
LESS_TERMCAP_us=$(printf "\e[1;32m") \
man "$@"
}
if [[ $curPlatform == "Darwin" ]]; then
cdf() {
target=`osascript -e 'tell application "Finder" to if (count of Finder windows) > 0 then get POSIX path of (target of front Finder window as text)'`
if [ "$target" != "" ]; then
cd "$target"; pwd
else
echo 'No Finder window found' >&2
fi
}
fi
# ==== End Custom functions section ====
| true
|
507726da65c967dd9c58faf062e30b02dfff150a
|
Shell
|
jenkinsci/tm4j-automation-plugin
|
/setup.sh
|
UTF-8
| 2,379
| 3.359375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
server=http://localhost:8080/jenkins
echo "[==== SETUP ====] Starting Zephyr Scale Jenkins plugin setup..."
is_running() {
if [ 200 == $(curl -o /dev/null -s -w "%{http_code}\n" $server/jnlpJars/jenkins-cli.jar) ]
then
true
else
false
fi
}
wait_start() {
echo "[==== SETUP ====] Starting Jenkins..."
while ! is_running
do
sleep 1
done
}
wait_stop() {
echo "[==== SETUP ====] Stopping Jenkins..."
while is_running
do
sleep 1
done
}
if is_running
then
curl $server/jnlpJars/jenkins-cli.jar --output jenkins-cli.jar
sleep 2
java -jar jenkins-cli.jar -s $server safe-shutdown
sleep 2
fi
rm -rf work/
sh ./run.sh &
wait_start
curl $server/jnlpJars/jenkins-cli.jar --output jenkins-cli.jar
java -jar jenkins-cli.jar -s $server safe-restart
wait_stop
wait_start
sleep 5
java -jar jenkins-cli.jar -s $server install-plugin workflow-aggregator
java -jar jenkins-cli.jar -s $server install-plugin git
sleep 2
echo "[==== SETUP ====] Stopping Jenkins..."
java -jar jenkins-cli.jar -s $server safe-restart
wait_start
echo "[==== SETUP ====] Setting Jenkins configurations"
echo "[==== SETUP ====] Creating jobs"
java -jar jenkins-cli.jar -s $server create-job zephyr-scale-junit-integration-example-legacy-version < setup/zephyr-scale-junit-integration-example-legacy-version.xml
java -jar jenkins-cli.jar -s $server create-job zephyr-scale-cucumber-calculator-example < setup/zephyr-scale-cucumber-calculator-example.xml
java -jar jenkins-cli.jar -s $server create-job zephyr-scale-cucumber-integration-example < setup/zephyr-scale-cucumber-integration-example.xml
java -jar jenkins-cli.jar -s $server create-job zephyr-scale-cucumber-integration-example-pipeline < setup/zephyr-scale-cucumber-integration-example-pipeline.xml
java -jar jenkins-cli.jar -s $server create-job zephyr-scale-junit-integration-example < setup/zephyr-scale-junit-integration-example.xml
cp setup/com.adaptavist.tm4j.jenkins.extensions.configuration.Tm4jGlobalConfiguration.xml work/
echo "[==== SETUP ====] Restarting Jenkins..."
java -jar jenkins-cli.jar -s $server safe-restart
wait_start
echo "[==== SETUP ====] Shutdown Jenkins"
java -jar jenkins-cli.jar -s $server safe-shutdown
sleep 2
echo "[==== SETUP ====] Jenkins stopped"
echo "[==== SETUP ====] Setup finished"
echo "[==== SETUP ====] Execute run.sh to run Jenkins";
rm jenkins-cli.jar
| true
|
c69746e47b10fd5cb9894de03b1bb2fc60c63ea2
|
Shell
|
nasa/astrobee
|
/scripts/git/configure_isort_paths.sh
|
UTF-8
| 507
| 3.6875
| 4
|
[
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-philippe-de-muyter",
"MIT",
"MPL-2.0",
"MPL-1.0",
"LGPL-2.1-or-later",
"Apache-2.0",
"LGPL-2.1-only",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-generic-cla",
"GPL-3.0-only",
"LGPL-3.0-only",
"LicenseRef-scancode-proprietary-license",
"CC-BY-NC-4.0",
"GPL-2.0-only",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# Updates the src_paths setting in the .isort.cfg file at the top
# level. See that file for more details.
thisdir=$(dirname "$0")
srcdir=$(cd $thisdir/../.. && pwd)
cd $srcdir
# Generate a comma-separated list of folders containing *.py files
pydirs=$(find . -name "*.py" -print0 | xargs -0 dirname | cut -c3- | sort | uniq | paste -sd "," -)
# Overwrite the src_paths line in the config file to use the list
perl -i -ple "if (/^src_paths = /) { \$_ = 'src_paths = $pydirs'; }" .isort.cfg
| true
|
0e219614a967d0025d02ddcfb318918fbd1e9f20
|
Shell
|
dmytro/dotfiles
|
/zsh/prompt.zsh
|
UTF-8
| 837
| 2.921875
| 3
|
[] |
no_license
|
# Set up the prompt
#autoload -Uz promptinit
#promptinit
#prompt redhat
PS1="[%n@%m %1~]$ "
GIT_PROMPT_SHOW_CHANGES=1
autoload -Uz vcs_info
zstyle ':vcs_info:*' enable git
precmd() {
vcs_info
}
setopt prompt_subst
if [ $(uname -s) = Linux -o ${GIT_PROMPT_SHOW_CHANGES} = 1 ]; then
zstyle ':vcs_info:git:*' check-for-changes true
fi
# Colors
#zstyle ':vcs_info:*' formats "%f[%%n@%%m %1~] $ " "%F{5}%a %m%u%c %F{6}%b:%F{3}%r/%S"
zstyle ':vcs_info:*' formats "%f[%%n@%%m %1/] $ " "%f%a %F{3}%m%u%c %f%b:%r/%S"
zstyle ':vcs_info:*' nvcsformats "%f[%%n@%%m %1/]$ " ""
zstyle ':vcs_info:*' actionformats '%F{5}(%f%s%F{5})%F{3}-%F{5}[%F{2}%b%F{3}|%F{1}%a%F{5}]%f '
# PROMPT='${vcs_info_msg_0_}' # This prompt is broken in zsh 5.2
PROMPT='%f[%n@%m %1/]$ '
RPROMPT='${vcs_info_msg_1_} %F{cyan}$(date +%H:%M:%S)'
| true
|
f4ee471cf174da748dd77ab957c7bf28f1a84571
|
Shell
|
anantoni/doop-tools
|
/jvmti-tracer/trace2gxl
|
UTF-8
| 212
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 0 ]; then
echo "Usage: cat /path/to/trace | $0" >&2; exit 1
fi
pushd `dirname $0` > /dev/null
PROBEDIR=../probe
java -cp ${PROBEDIR}/probe.jar:${PROBEDIR}/gxl.jar:./build Trace2GXL
| true
|
9961d944b4539b56d3f0094e46c51c8be4259f5a
|
Shell
|
raychorn/analytics_library
|
/sample_data/bashtest.sh
|
UTF-8
| 1,289
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
TIME=`date +%Y-%m-%d-%H:%M`
declare -a JOBS
let count=0
exec 3< ${ANALYTICS_LIB}/python_streaming/tmp_dates_and_events.txt
while read <&3
do
JOBS[$count]="$REPLY""\n"
echo "start:${REPLY// /t}:end"
IFS="}"; declare -a Array=($REPLY)
echo "start:${Array[0]}:end"
echo "start:${Array[1]}:end"
((count++))
done
exec 3>&-
echo ${TIME}
echo '$ANALYTICS_LIB'
echo ${HADOOP_HOME}
echo ${HIVE_HOME}
echo ${HOSTNAME}
#RESULT=$(hadoop dfs -ls /user/hadoop/sprintcm/`date +%Y%m%d`*) || { echo "command failed"; exit 1; }
#RESULT=$(hadoop dfs -ls /user/hadoop/sprintcm/test*) || { echo "command failed"; exit 1; }
#echo $RESULT
S_ARRAY="${JOBS[@]}"
REQUESTID="hdfs-time-hdfs--`date +%Y%m%d%H%M%S`"
hadoop dfs -mkdir /user/hadoop/sprintcm/${REQUESTID}
#python ${ANALYTICS_LIB}/scripts/hadoop_mailer.py "bashtest" complete hive3 lramos@smithmicro.com " Processed the following Log Files:\n${RESULT}\n\n ${S_ARRAY}"
#hive --config ${HIVE_HOME}/conf -e "CREATE TABLE IF NOT EXISTS InstallationInfo (server_date STRING, ip STRING, log_format STRING, client_date STRING, project_id STRING, version STRING, uuid STRING, xvi STRING, xvii STRING, ii STRING, xlii STRING, xliii STRING, xliv STRING) PARTITIONED BY(ds STRING, ts STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE;"
| true
|
12218b652c84bb207e6c8ff488c2f7f07b8ab0bb
|
Shell
|
agilderdale/pks-env
|
/ref_scripts/download-pivotal.sh
|
UTF-8
| 1,260
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
PIVOTAL_TOKEN=
PKS_PKG_API="https://network.pivotal.io/api/v2/products/pivotal-container-service/releases/191865/product_files/222375/download"
PKS_PKG_NAME="pivotal-container-service-1.2.0-build.47.pivotal"
HARBOR_PKG_API="https://network.pivotal.io/api/v2/products/harbor-container-registry/releases/190421/product_files/220843/download"
HARBOR_PKG_NAME="harbor-container-registry-1.6.0-build.35.pivotal"
STEMCELL_PKG_API="https://network.pivotal.io/api/v2/products/stemcells-ubuntu-xenial/releases/264505/product_files/279631/download"
STEMCELL_PKG_NAME="bosh-stemcell-170.15-vsphere-esxi-ubuntu-xenial-go_agent.tgz"
#for PREFIX in PKS HARBOR STEMCELL
for PREFIX in STEMCELL
do
PKG_API="${PREFIX}_PKG_API"
PKG_NAME="${PREFIX}_PKG_NAME"
#wget --post-data="" --header="Authorization: Token hss2WKxn86fEL8W4VaJk" https://network.pivotal.io/api/v2/products/pivotal-container-service/releases/191865/product_files/222375/download -O "pivotal-container-service-1.2.0-build.47.pivotal"
echo "Downloading ${!PKG_NAME}..."
echo ${!PKG_NAME}
wget --post-data="" --header="Authorization: Token $PIVOTAL_TOKEN" ${!PKG_API} -O " ${!PKG_NAME}"
if [ $? != 0 ]; then echo "Cannot connect to the download page or file is not correct"; break; fi
done
| true
|
9b079baa57df0f3572a5d5a01542dad591410d65
|
Shell
|
rylberg/lbann
|
/experiments/run_lbann_dnn_multi_imagenet.sh
|
UTF-8
| 8,901
| 2.984375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
DIRNAME=`dirname $0`
#Set Script Name variable
SCRIPT=`basename ${0}`
# Figure out which cluster we are on
CLUSTER=`hostname | sed 's/\([a-zA-Z][a-zA-Z]*\)[0-9]*/\1/g'`
# Look for the binary in the cluster specific build directory
BINDIR="${DIRNAME}/../build/${CLUSTER}.llnl.gov/model_zoo"
#Initialize variables to default values.
TRAINING_SAMPLES=-1
VALIDATION_SAMPLES=-1
EPOCHS=20
NETWORK="1000"
PARIO=0
BLOCK_SIZE=256
MODE="false"
MB_SIZE=192
LR=0.1
ACT=1
LRM=1
TEST_W_TRAIN_DATA=0
LR_DECAY=0.5
RUN="srun"
ROOT_DATASET_DIR="/l/ssd"
DATASET_DIR="datasets/ILSVRC2012"
OUTPUT_DIR="/l/ssd/lbann/outputs"
PARAM_DIR="/l/ssd/lbann/models"
SAVE_MODEL=false
LOAD_MODEL=false
CKPT=10
# need this in an mxterm
export SLURM_NNODES=$SLURM_JOB_NUM_NODES
TASKS_PER_NODE=12
NNODES=${SLURM_NNODES}
if [ "${CLUSTER}" = "catalyst" ]; then
LUSTRE_FILEPATH="/p/lscratchf/brainusr"
ENABLE_HT=
CORES_PER_NODE=48
elif [ "${CLUSTER}" = "sierra" ]; then
LUSTRE_FILEPATH="/p/lscratche/brainusr"
#ENABLE_HT=--enable-hyperthreads
#CORES_PER_NODE=24
ENABLE_HT=
CORES_PER_NODE=12
else
LUSTRE_FILEPATH="/p/lscratche/brainusr"
ENABLE_HT=
CORES_PER_NODE=12
fi
USE_LUSTRE_DIRECT=0
SHUFFLE_TRAINING=0
#Set fonts for Help.
NORM=`tput sgr0`
BOLD=`tput bold`
REV=`tput smso`
#Help function
function HELP {
echo -e \\n"Help documentation for ${BOLD}${SCRIPT}.${NORM}"\\n
echo -e "${REV}Basic usage:${NORM} ${BOLD}$SCRIPT -t <training set size> -e <epochs> -v <validation set size>${NORM}"\\n
echo "Command line switches are optional. The following switches are recognized."
echo "${REV}-a${NORM} <val> --Sets the ${BOLD}activation type${NORM}. Default is ${BOLD}${ACT}${NORM}."
echo "${REV}-b${NORM} <val> --Sets the ${BOLD}mini-batch size${NORM}. Default is ${BOLD}${MB_SIZE}${NORM}."
echo "${REV}-c${NORM} --(CHEAT) Test / validate with the ${BOLD}training data${NORM}. Default is ${BOLD}${TEST_W_TRAIN_DATA}${NORM}."
echo "${REV}-d${NORM} --Sets the ${BOLD}debug mode${NORM}."
echo "${REV}-e${NORM} <val> --Sets the ${BOLD}number of epochs${NORM}. Default is ${BOLD}${EPOCHS}${NORM}."
echo "${REV}-f${NORM} <val> --Path to the ${BOLD}datasets${NORM}. Default is ${BOLD}${ROOT_DATASET_DIR}${NORM}."
echo "${REV}-i${NORM} <val> --Sets the ${BOLD}parallel I/O limit${NORM}. Default is ${BOLD}${PARIO}${NORM}."
echo "${REV}-j${NORM} <val> --Sets the ${BOLD}learning rate decay${NORM}. Default is ${BOLD}${LR_DECAY}${NORM}."
echo "${REV}-k${NORM} <val> --Checkpoint after every ${BOLD}N${NORM} epochs. Default is ${BOLD}${CKPT}${NORM}."
echo "${REV}-l${NORM} <val> --Determines if the model is ${BOLD}loaded${NORM}. Default is ${BOLD}${LOAD_MODEL}${NORM}."
echo "${REV}-m${NORM} <val> --Sets the ${BOLD}mode${NORM}. Default is ${BOLD}${MODE}${NORM}."
echo "${REV}-n${NORM} <val> --Sets the ${BOLD}network topology${NORM}. Default is ${BOLD}${NETWORK}${NORM}."
echo "${REV}-o${NORM} <val> --Sets the ${BOLD}output directory${NORM}. Default is ${BOLD}${OUTPUT_DIR}${NORM}."
echo "${REV}-p${NORM} <val> --Sets the ${BOLD}input parameter directory${NORM}. Default is ${BOLD}${PARAM_DIR}${NORM}."
echo "${REV}-q${NORM} <val> --Sets the ${BOLD}learning rate method${NORM}. Default is ${BOLD}${LRM}${NORM}."
echo "${REV}-r${NORM} <val> --Sets the ${BOLD}inital learning rate${NORM}. Default is ${BOLD}${LR}${NORM}."
echo "${REV}-s${NORM} <val> --Determines if the model is ${BOLD}saved${NORM}. Default is ${BOLD}${SAVE_MODEL}${NORM}."
echo "${REV}-t${NORM} <val> --Sets the number of ${BOLD}training samples${NORM}. Default is ${BOLD}${TRAINING_SAMPLES}${NORM}."
echo "${REV}-u${NORM} --Use the ${BOLD}Lustre filesystem${NORM} directly. Default is ${BOLD}${USE_LUSTRE_DIRECT}${NORM}."
echo "${REV}-v${NORM} <val> --Sets the number of ${BOLD}validation samples${NORM}. Default is ${BOLD}${VALIDATION_SAMPLES}${NORM}."
echo "${REV}-w${NORM} <val> -- ${BOLD}Order N${NORM} or ${BOLD}Pick N${NORM} training samples. Default is ${BOLD}${SHUFFLE_TRAINING}${NORM}."
echo "${REV}-x${NORM} <val> --Sets the ${BOLD}lib Elemental block size${NORM}. Default is ${BOLD}${BLOCK_SIZE}${NORM}."
echo "${REV}-y${NORM} <val> --Sets the ${BOLD}number of nodes allowed in the allocation${NORM}. Default is ${BOLD}${SLURM_NNODES}${NORM}."
echo "${REV}-z${NORM} <val> --Sets the ${BOLD}tasks per node${NORM}. Default is ${BOLD}${TASKS_PER_NODE}${NORM}."
echo -e "${REV}-h${NORM} --Displays this help message. No further functions are performed."\\n
exit 1
}
while getopts ":a:b:cde:f:hi:j:k:l:m:n:o:p:q:r:s:t:uv:w:x:y:z:" opt; do
case $opt in
a)
ACT=$OPTARG
;;
b)
MB_SIZE=$OPTARG
;;
c)
TEST_W_TRAIN_DATA=1
;;
d)
RUN="totalview srun -a"
;;
e)
EPOCHS=$OPTARG
;;
f)
ROOT_DATASET_DIR=$OPTARG
;;
h)
HELP
exit 1
;;
i)
PARIO=$OPTARG
;;
j)
LR_DECAY=$OPTARG
;;
k)
CKPT=$OPTARG
;;
l)
LOAD_MODEL=$OPTARG
;;
m)
MODE=$OPTARG
;;
n)
NETWORK=$OPTARG
;;
o)
OUTPUT_DIR=$OPTARG
;;
p)
PARAM_DIR=$OPTARG
;;
q)
LRM=$OPTARG
;;
r)
LR=$OPTARG
;;
s)
SAVE_MODEL=$OPTARG
;;
t)
TRAINING_SAMPLES=$OPTARG
;;
u)
USE_LUSTRE_DIRECT=1
;;
v)
VALIDATION_SAMPLES=$OPTARG
;;
w)
SHUFFLE_TRAINING=$OPTARG
;;
x)
BLOCK_SIZE=$OPTARG
;;
y)
NNODES=$OPTARG
;;
z)
TASKS_PER_NODE=$OPTARG
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
shift $((OPTIND-1))
# now do something with $@
# Once all of the options are parsed, you can setup the environment
#source ${DIRNAME}/setup_brain_lbann_env.sh -m mvapich2 -v 0.86
#source ${DIRNAME}/setup_brain_lbann_env.sh -m debug_mvapich2 -v 0.86
#source ${DIRNAME}/setup_brain_lbann_env.sh -m openmpi -v 0.86
#source ${DIRNAME}/setup_brain_lbann_env.sh -m debug_openmpi -v 0.86
source ${DIRNAME}/setup_brain_lbann_env.sh -m mvapich2 -v El_0.86/v86-6ec56a
TASKS=$((${SLURM_NNODES} * ${SLURM_CPUS_ON_NODE}))
if [ ${TASKS} -gt 384 ]; then
TASKS=384
fi
LBANN_TASKS=$((${NNODES} * ${TASKS_PER_NODE}))
export PATH=/collab/usr/global/tools/stat/file_bcast/${SYS_TYPE}/fbcast:${PATH}
if [ ${USE_LUSTRE_DIRECT} -eq 1 ]; then
ROOT_DATASET_DIR=${LUSTRE_FILEPATH}
else
FILES=(labels.tar resized_256x256/train.tar resized_256x256/val.tar resized_256x256/test.tar)
for tarball in "${FILES[@]}"
do
FILE=`basename $tarball`
if [ ! -e ${ROOT_DATASET_DIR}/${FILE} ]; then
# CMD="pdcp /p/lscratchf/brainusr/datasets/ILSVRC2012/${tarball} /l/ssd/"
CMD="srun -n${TASKS} -N${SLURM_NNODES} file_bcast_par13 1MB ${LUSTRE_FILEPATH}/${DATASET_DIR}/${tarball} ${ROOT_DATASET_DIR}/${FILE}"
echo "${CMD}"
${CMD}
fi
done
if [ ! -d ${ROOT_DATASET_DIR}/${DATASET_DIR}/resized_256x256 ]; then
CMD="pdsh mkdir -p ${ROOT_DATASET_DIR}/${DATASET_DIR}/resized_256x256"
echo "${CMD}"
${CMD}
fi
FILES=(labels)
for tarball in "${FILES[@]}"
do
if [ ! -e ${ROOT_DATASET_DIR}/${DATASET_DIR}/${tarball} ]; then
CMD="pdsh /usr/bin/time tar xf ${ROOT_DATASET_DIR}/${tarball}.tar -C ${ROOT_DATASET_DIR}/${DATASET_DIR}/"
echo "${CMD}"
${CMD}
fi
done
FILES=(train val test)
for tarball in "${FILES[@]}"
do
if [ ! -e ${ROOT_DATASET_DIR}/${DATASET_DIR}/resized_256x256/${tarball} ]; then
CMD="pdsh /usr/bin/time tar xf ${ROOT_DATASET_DIR}/${tarball}.tar -C ${ROOT_DATASET_DIR}/${DATASET_DIR}/resized_256x256/"
echo "${CMD}"
${CMD}
fi
done
if [ ! -d ${PARAM_DIR} ]; then
CMD="mkdir -p ${PARAM_DIR}"
echo ${CMD}
${CMD}
fi
if [ ! -d ${OUTPUT_DIR} ]; then
CMD="mkdir -p ${OUTPUT_DIR}"
echo ${CMD}
${CMD}
fi
fi
echo ${CORES_PER_NODE}
CMD="${RUN} -N${NNODES} -n${LBANN_TASKS} ${ENABLE_HT} --ntasks-per-node=${TASKS_PER_NODE} --distribution=block --drop-caches=pagecache ${BINDIR}/lbann_dnn_multi_imagenet --hostname ${CLUSTER} --num-nodes ${NNODES} --num-cores $((${NNODES}*${CORES_PER_NODE})) --tasks-per-node ${TASKS_PER_NODE} --par-IO ${PARIO} --dataset ${ROOT_DATASET_DIR}/${DATASET_DIR}/ --max-validation-samples ${VALIDATION_SAMPLES} --profiling true --max-training-samples ${TRAINING_SAMPLES} --block-size ${BLOCK_SIZE} --output ${OUTPUT_DIR} --mode ${MODE} --num-epochs ${EPOCHS} --params ${PARAM_DIR} --save-model ${SAVE_MODEL} --load-model ${LOAD_MODEL} --mb-size ${MB_SIZE} --learning-rate ${LR} --activation-type ${ACT} --network ${NETWORK} --learning-rate-method ${LRM} --test-with-train-data ${TEST_W_TRAIN_DATA} --checkpoint ${CKPT} --lr-decay-rate ${LR_DECAY} --random-training-samples ${SHUFFLE_TRAINING}"
echo ${CMD}
${CMD}
| true
|
d70913c34ecc598d75b3064f0b3f0940c0d9a50e
|
Shell
|
Sir-Boops/scripts
|
/port-forward.sh
|
UTF-8
| 875
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
# Get addr to forward to
echo "What IP are you forwarding to?"
echo ""
read -p "IP address: " IP_ADDRESS
clear
# Get public IP
echo "What is your public IP?"
echo ""
read -p "Public IP: " PUBLIC_IP
# Get TCP ports to forward
echo "What TCP ports do you wish to forward"
echo "Example 80 53 500"
read -p "TCP ports: " TCP_PORTS
clear
# Get UDP Ports to forward
echo "What UDP ports do you wish to forward"
echo "Example 53 54 55"
read -p "UDP ports: " UDP_PORTS
clear
# Loop over and forward all the tcp ports
for PORT in $TCP_PORTS; do
iptables -t nat -A PREROUTING -p tcp --dport $PORT -j DNAT --destination $PUBLIC_IP --to-destination $IP_ADDRESS:$PORT
done
# Loop over and forward all the UDP ports
for PORT in $UDP_PORTS; do
iptables -t nat -A PREROUTING -p udp --dport $PORT -j DNAT --destination $PUBLIC_IP --to-destination $IP_ADDRESS:$PORT
done
| true
|
93e137eeacb5a1968597940359626fd67e7361d2
|
Shell
|
sb1975/iot-gcp
|
/setup.sh
|
UTF-8
| 1,569
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Data Streaming Pipeline setup will begin..."
echo ""
#Enable Cloud PubSub & Cloud IoT Core APIs
gcloud services enable compute.googleapis.com pubsub.googleapis.com cloudiot.googleapis.com
echo "Cloud PubSub & Cloud IoT Core APIs are enabled..."
echo ""
#Create a PubSub topic
gcloud pubsub topics create data-streaming-topic
echo "data-streaming-topic pubsub topic created..."
echo ""
#Create a Subscription for this PubSub topic created above for testing purpose
gcloud pubsub subscriptions create data-streaming-sub --topic=data-streaming-topic
echo "data-streaming-sub pubsub subscription created for testing purpose..."
echo ""
#Create the Cloud IoT Core Device Registry
gcloud iot registries create data-streaming-iot-core-registry --event-notification-config=topic=data-streaming-topic --region=us-central1 --no-enable-http-config
echo "data-streaming-iot-core-registry Cloud IoT Core Device Registry created..."
echo ""
#Generate a device key pair prefer RS256
openssl req -x509 -newkey rsa:2048 -keyout rsa_private.pem -nodes -out rsa_cert.pem -subj "/CN=unused"
openssl pkcs8 -topk8 -inform PEM -outform DER -in rsa_private.pem -nocrypt > rsa_private_pkcs8
echo "RS256 key paid generated..."
echo ""
#Add a device to the registry using the keys you generated
gcloud iot devices create device-1 --registry=data-streaming-iot-core-registry --region=us-central1 --public-key=path=./rsa_cert.pem,type=RS256
echo "device-1 is added to data-streaming-iot-core-registry registry"
echo ""
echo "Data Streaming Pipeline setup is completed..."
| true
|
0162b20962ee505cec02a96498841cd7471899cf
|
Shell
|
cbcrg/phecomp
|
/lib/bash/callHmmOneOutVal.sh
|
UTF-8
| 3,263
| 3.015625
| 3
|
[] |
no_license
|
#! /bin/bash
#################################################################################
### Jose Espinosa-Carrasco. CB/CSN-CRG. January 2013 ###
#################################################################################
### Code : 28.01 ###
### This script call hmmOneOutVal so many times as set ###
#################################################################################
## Export the environment
#$ -V
## Setting source files paths
bashGitDir=/users/cn/jespinosa/workspaceEclipse/phecomp/lib/bash/
## Some generic functions
source ${bashGitDir}generalFunctions.sh
# Queue variables
typeQ="short"
nameQ="cn-el6"
# time format HH:MM:SS
timeQ="-l h_rt=04:00:00"
amount_of_memoryG=8
## Variables
scriptName=$(basename "$0")
wDir=$PWD
defDumpDir="/users/cn/jespinosa/phecomp/20130610_HMM/"
binMode=four
resDir=${defDumpDir}"20130610_HMM${binMode}Signal/"
checkCreateDir $resDir
resDir=${resDir}"hmmOneOutValSecond/"
checkCreateDir ${resDir}
## Experiment HF May 2012
intFile2Val="/users/cn/jespinosa/phecomp/20130610_HMM/data/intFiles/20120502_FDF_CRG_hab_filt.int"
# Setting a folder for each int file to be analyzed
fileAndExt=${intFile2Val##*/}
fileName=`echo $fileAndExt | cut -d . -f1`
resDirFile=${resDir}"${fileName}/"
checkCreateDir ${resDirFile}
summaryTableAllRuns=${resDirFile}"summaryTableAllRuns.tbl"
echo -e "cage\tscore\trun" > ${summaryTableAllRuns}
###### FROM HERE
# for run in {1..30}
for run in {1..30}
do
echo "INFO: Execution of one out routine $run\n" 1>&2
resDirRun=${resDirFile}"run${run}/"
checkCreateDir $resDirRun
export path2intFileFilt=${intFile2Val}
export par2int2browser="value"
export binMode=${binMode}
export resDir=${resDirRun}
export evalTbl=${summaryTableAllRuns}
export run=${run}
${bashGitDir}hmmOneOutVal.sh
done
# ${bashGitDir}hmmOneOutVal.sh
# > ${resDir}"hmmOneOutVal.stdout" 2> ${resDir}"hmmOneOutVal.err"
#
# qsub -q $typeQ,$nameQ $timeQ -cwd -o ${resDir} -e ${resDir} -v path2intFileFilt=${intFile2Val},par2int2browser="value",binMode=${binMode} ${bashGitDir}hmmOneOutVal.sh 1>&2
# -v iniLight=6 -v path2intFileFiltered=${intFile}
resDir=${defDumpDir}"20130610_HMM${binMode}Signal/"
checkCreateDir $resDir
resDir=${resDir}"hmmOneOutValSecond/"
checkCreateDir ${resDir}
## Experiment Free Choice CD SC January 2013
intFile2Val="/users/cn/jespinosa/phecomp/20130610_HMM/data/intFiles/20130130_FCSC_CRG_hab_filt.int"
# Setting a folder for each int file to be analyzed
fileAndExt=${intFile2Val##*/}
fileName=`echo $fileAndExt | cut -d . -f1`
resDirFile=${resDir}"${fileName}/"
checkCreateDir ${resDirFile}
summaryTableAllRuns=${resDirFile}"summaryTableAllRuns.tbl"
echo -e "cage\tscore\trun" > ${summaryTableAllRuns}
####### FROM HERE
for run in {1..30}
# for run in {1..2}
do
echo "INFO: Execution of one out routine $run\n" 1>&2
resDirRun=${resDirFile}"run${run}/"
checkCreateDir $resDirRun
export path2intFileFilt=${intFile2Val}
export par2int2browser="value"
export binMode=${binMode}
export resDir=${resDirRun}
export evalTbl=${summaryTableAllRuns}
export run=${run}
${bashGitDir}hmmOneOutVal.sh
done
| true
|
646f38be6cb428c4b2670586fffdbdc7d9e81910
|
Shell
|
j123b567/xc32
|
/common-source/build-scripts/bamboo/full-v2/xc32-cyclone-rc-buildall.sh
|
UTF-8
| 517
| 3.03125
| 3
|
[] |
no_license
|
if [ -z $bamboo_XC32_SOURCE_BRATAG ]; then
bamboo_XC32_SOURCE_BRATAG=trunk
fi
if [ -e builddir ]; then
echo "Deleting existing builddir and log"
rm -rf builddir
rm xc32-build-all.log
fi
./build-scripts/xc32-build-all.sh --jobs=4 --branch=$bamboo_XC32_SOURCE_BRATAG
cd $bamboo_build_working_directory
cd builddir
echo "Compressing images.. PWD is $(pwd)"
tar -czf install-Linux.tar.gz install-Linux
tar -czf install-Linux-nolm.tar.gz install-Linux-nolm
tar -czf install-mingw.tar.gz install-mingw
cd ..
| true
|
583b7464b2f857bfda36ce48397175081395e9a7
|
Shell
|
Ishenko-Ilia/qsin
|
/script
|
UTF-8
| 213
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
tar -cvf /root/sda1.tar /home
mount /dev/sda1 /mnt
if [-e /mnt/check_mount]
then
echo "File Already"
else
echo "Mounting device /dev/sda1 - /mnt"
mount /dev/sda1/mnt
fi
rm -R /mnt/sda1.tar
umount /mnt
| true
|
21a52b4e30b2e3c6cb7072e33c61ab474dce4bf8
|
Shell
|
Alumniminium/PinePhone
|
/home/.local/bin/audioctl.sh
|
UTF-8
| 761
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
input=$1
volume=$2
earpiecevol=$(($volume * 2))
amixer sset "AIF1 DA0" 115
amixer sset "AIF1 DA0 Stereo" "Stereo"
amixer sset "Line Out Source" 'Mono Differential'
amixer sset "DAC" 100%
amixer sset "DAC" unmute
amixer sset "DAC Reversed" unmute
amixer sset "AIF1 Slot 0 Digital DAC" unmute
amixer sset "Earpiece Source" 'Left Mix'
if [ $input = "s" ]; then
amixer sset "Line Out" unmute
amixer sset "Earpiece" unmute
amixer sset "Earpiece" $earpiecevol%
amixer sset "Line Out" $volume%
elif [ $input = "h" ]; then
amixer sset "Earpiece" mute
amixer sset "Line Out" mute
amixer sset "Headphone" unmute
amixer sset "Headphone" $volume%
else
amixer sset "Line Out" mute
amixer sset "Earpiece" unmute
amixer sset "Earpiece" $volume%
fi
| true
|
87a8b002cdc2ebd3864aa3cbe492566d2bb7dcb6
|
Shell
|
sunshine69/ansible-role-openvpn-client
|
/templates/vpn-watcher.sh
|
UTF-8
| 246
| 2.703125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
if ! `ps -ef|grep openvpn | grep -v grep | grep 'openvpn {{ vpn_client_profile_name }}.ovpn' >/dev/null 2>&1`; then
echo "VPN has stopped. Trying to re-connect ..."
exec {{ vpn_remote_dir }}/{{ vpn_client_profile_name }}.py
fi
| true
|
e5bd3102cefc8f0248b5ddc7b21b2f9d85f62cce
|
Shell
|
jinga-lala/Secure-Personal-Cloud
|
/Secure_Personal_Cloud/scheduler.sh
|
UTF-8
| 321
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/sh
eval "export $(egrep -z DBUS_SESSION_BUS_ADDRESS /proc/$(pgrep -u $LOGNAME gnome-session)/environ)";
path="$1""/Secure_Personal_Cloud/linux/main.py"
#Code:
var=$(python3 $path auto_sync 2>&1 1>/dev/null)
# DISPLAY=:0 notify-send 2
# echo "ehr""$var""there"
DISPLAY=:0 notify-send "Secure-Personal-Cloud" "$var"
| true
|
1380f9a2a3b61d224c6580ec118529674aaf32a3
|
Shell
|
ushulau/open-trader
|
/redeploy.sh
|
UTF-8
| 581
| 3.03125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
################################################
# Example of build and deployment script for #
# open-trader into AWS VM #
################################################
HOST_IP="IP address of your host"
cd ~/work/open-trader/
mvn clean install
echo "build is complete"
scp ~/work/open-trader/target/open-trader.jar ubuntu@$HOST_IP:~/
ssh ubuntu@HOST_IP << 'ENDSSH'
echo "Stopping open trader client!"
for pid in $(sudo ps -ef | grep "java" | awk '{print $2}'); do sudo kill -9 $pid; done
nohup sudo java -jar open-trader.jar &
ENDSSH
| true
|
9bfac5b44271e790e2e3534ccce17c9abae9d723
|
Shell
|
autoantwort/Lichtsteuerung
|
/src/lib/aubio/build_aubio.sh
|
UTF-8
| 747
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
source ../scripts/set_env.sh
# anleitung von hier : https://aubio.org/manual/latest/installing.html#git-repository
GIT_DIR="aubio.git"
# add or update git
../scripts/clone_or_pull.sh $GIT_DIR https://github.com/aubio/aubio.git && exit 0
cd $GIT_DIR
# we are in the "$GIT_DIR" now
# get waf to build
./scripts/get_waf.sh
# build
if [[ "$OSTYPE" == "msys" ]]; then
# we are on windows
./waf configure --disable-wavwrite --disable-wavread -v --prefix= ""
else
if ! [[ -z "$GITLAB_CI" ]]; then
# we are on the ci
TARGET="--with-target-platform=win64"
fi
./waf configure --disable-wavwrite --disable-wavread $TARGET --prefix= ""
fi
./waf install --disable-tests --disable-examples --destdir "$PWD/../"
| true
|
a8e498d52ea209f8b8edaeebbe046ec0006fa3a6
|
Shell
|
ISUgenomics/common_scripts
|
/runGmap.sh
|
UTF-8
| 764
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
#Makes a database and searches your sequences.
#sh runGmap.sh <database name> <folder of database file ending with a "/"> <Fasta file> <query file>
#examples
#sh run_gmap.sh red_abalone_02Jun2017_5fUJu /work/GIF/remkv6/Serb/03_DavideGMAP/ red_abalone_02Jun2017_5fUJu.fasta DavideQuerydna.fasta
#sh run_gmap.sh m.yessoensisGenome /work/GIF/remkv6/Serb/03_DavideGMAP DavideQuerydna.fasta
#sh run_gmap.sh Crassostreagigasgenome /work/GIF/remkv6/Serb/03_DavideGMAP Crassostreagigasgenome.fa DavideQuerydna.fasta
module load gsnap
dbname=$1
dbloc=$2
dbfasta=$3
query=$4
gmap_build -d $dbname -D $dbloc $dbfasta
gmap -D $dbloc -d $dbname -B 5 -t 16 --input-buffer-size=1000000 --output-buffer-size=1000000 -f psl $query >${dbname%.*}.${query%.*}.psl
| true
|
c5d4d72e3d9294099199e840d5170b972af5c25d
|
Shell
|
HenriquesLab/NanoPyx
|
/runNoxTools.py
|
UTF-8
| 4,931
| 3.15625
| 3
|
[
"GPL-2.0-only",
"GPL-1.0-or-later"
] |
permissive
|
#!/bin/bash -c python3
import os
import subprocess
import sys
from inspect import isfunction
def run_cmd(command: str):
"""
Run a command in the shell
:param command: command to run
"""
print(f"Running command: {command}")
subprocess.run(command, shell=True, check=True)
def get_version():
# # get the version from the pyproject.toml file
# with open("pyproject.toml", "r") as f:
# txt = f.read()
# start = txt.find('version = "') + 11
# end = txt.find('"', start)
# version = txt[start:end]
# return version
import versioneer
return versioneer.get_version()
def find_files(root_dir: str, extension: str, partner_extension: str = None) -> list:
"""
Find all files with a given extension in a directory
:param root_dir: root directory to search
:param extension: file extension to search for
:param partner_extension: partner extension to search for (e.g. .pyx and .pxd)
:return: list of files
"""
target_files = []
for root, dirs, files in os.walk(root_dir):
for file in files:
file_name = os.path.splitext(file)[0]
if partner_extension is None:
if file.endswith(extension):
target_files.append(os.path.join(root, file))
else:
if file.endswith(extension) and os.path.exists(os.path.join(root, file_name + partner_extension)):
target_files.append(os.path.join(root, file))
# auto remove empty directories
for dir in dirs:
dir_path = os.path.join(root, dir)
if os.listdir(dir_path) == []:
print("Removing empty directory: ", dir_path)
os.rmdir(dir_path)
return target_files
def change_cython_profiler_flag(base_path: str, flag: bool):
"""
Change the cython profiler flag in all .pyx files
:param base_path: base path to search for .pyx files
:param flag: flag to set
"""
pyx_files = find_files(base_path, ".pyx")
for pyx_file in pyx_files:
with open(pyx_file, "r") as f:
lines = f.read().splitlines()
for i, line in enumerate(lines):
if line.startswith("# cython:") and f"profile={not flag}" in line:
print(f"Changing profile flag to {flag}: {pyx_file}")
lines[i] = line.replace(f"profile={not flag}", f"profile={flag}")
break
with open(pyx_file, "w") as f:
f.write("\n".join(lines))
def main(mode=None):
options = {
"List nox sessions": "nox -l",
"Run nox with test source": "NPX_PYTEST_ARGS='-n=auto' nox --session test_source",
"Run nox with build wheels": "nox --session clear_wheelhouse build_wheel build_sdist",
"Run nox with test wheels": "nox --session test_wheel",
"Run nox with build and test wheels": "nox --session clear_wheelhouse build_wheel build_sdist test_wheel",
"Run nox with test on test_pypi":"nox --session test_testpypi",
"Run nox with test on PyPi":"nox --session test_pypi",
"Run nox with generate docs": "nox --session generate_docs",
"Run nox with lint": "nox --session lint",
# "Run nox with all sessions": "pipx run nox",
}
# Show the logo
with open("logo_ascii.txt", "r") as f:
print(f.read())
if mode is not None:
selection = mode
else:
# print the options
print("Version: ", get_version())
print("(⌐⊙_⊙) what do you want to do?")
for i, option in enumerate(options.keys()):
cmd = options[option]
if type(cmd) == str:
print(f"{i+1}) {option}: [CMD]> {cmd if len(cmd)< 100 else cmd[:100]+'...'}")
elif isfunction(cmd):
print(f"{i+1}) {option}: [FUNCTION]> {repr(cmd)}")
# get the user's selection
selection = int(input("Enter your selection: ")) - 1
# print the selected option
cmd = list(options.values())[selection]
print(
r'''
,~-.
( ' )-. ,~'`-.
,~' ` ' ) ) _( _) )
( ( .--.===.--. ( ` ' )
`.%%.;::|888.#`. `-'`~~=~'
/%%/::::|8888\##\
|%%/:::::|88888\##|
|%%|:::::|88888|##|.,-.
\%%|:::::|88888|##/ )_
\%\:::::|88888/#/ ( `' )
\%\::::|8888/#/( , -'`-.
,~-. `%\:::|888/#'( ( ') )
( ) )_ `\__|__/' `~-~=--~~='
( ` ') ) [VVVVV]
(_(_.~~~' \|_|/ off we go...
[XXX]
`"""'
'''
)
if type(cmd) == str:
run_cmd(cmd)
elif isfunction(cmd):
cmd()
if __name__ == "__main__":
if len(sys.argv) > 1:
modes = sys.argv[1:]
for mode in modes:
main(int(mode) - 1)
else:
main()
print("\n(•_•) ( •_•)>⌐■-■\n(⌐■_■) Ready to rock!!")
| true
|
0e036b5011bd05e6800911ab24b0af73f306cffa
|
Shell
|
ozgurgul/hcp-demo-env-aws-terraform
|
/run_ide.sh
|
UTF-8
| 204
| 2.703125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
IMG=hpecp/hpecp-ide:latest
if [[ "$(docker images -q $IMG 2> /dev/null)" == "" ]]; then
./build_ide.sh
fi
docker run -it --init -p 3000:3000 -v "$(pwd):/home/project:cached" $IMG
| true
|
53ded913aa4d98666e22a0f15d9a96ae316a77f2
|
Shell
|
xkortex/panamax
|
/scripts/sccache.d/inject-sccache-docker.sh
|
UTF-8
| 2,284
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
## This will inject sccache into the system and create hooks
## Currently this is root-only, for containers
OSNAME=$(uname | tr '[:upper:]' '[:lower:]')
ARCH=$(uname -p)
errcho() {
(>&2 echo -e "\e[31m$1\e[0m")
}
check_dep() {
command -v ${1} >/dev/null 2>&1 || { errcho "Script requires '${1}' but it's not installed. Aborting."; exit 1; }
}
check_dep wget
if [[ -z "$SCCACHE_REDIS" ]]; then
errcho "Variable 'SCCACHE_REDIS' must be set"
exit 1
fi
## https://stackoverflow.com/questions/2497215/how-to-extract-domain-name-from-url
## I have no idea how it works
## Extract host in separate step because it is a bit safer and easier to debug
_SCCACHE_HOST=$(echo $SCCACHE_REDIS | sed -e "s/[^/]*\/\/\([^@]*@\)\?\([^:/]*\).*/\2/" )
_SCCACHE_HOSTPORT=$(echo "$SCCACHE_REDIS" | sed -e "s/[^/]*\/\/\([^@]*@\)\?\([^/]*\).*/\2/" )
_SCCACHE_PORT=$(echo "${_SCCACHE_HOSTPORT}" | grep -Po -e '(?<=:)(\d+$)')
if [[ -z "$_SCCACHE_HOST" ]] || [[ -z "$_SCCACHE_PORT" ]]; then
errcho "Variable 'SCCACHE_REDIS' must be of format 'redis://[:<passwd>@]<hostname>[:port][/<db>]'"
fi
## download and enable sccache
if [[ -f /usr/local/bin/sccache ]]; then
echo "<> <> sccache already exists <> <>"
else
wget --no-check-certificate -qO- \
"https://github.com/mozilla/sccache/releases/download/0.2.8/sccache-0.2.8-${ARCH}-unknown-${OSNAME}-musl.tar.gz" \
| tar xvz \
&& mv "sccache-0.2.8-${ARCH}-unknown-${OSNAME}-musl/sccache" /usr/local/bin/sccache \
&& rm -r "sccache-0.2.8-${ARCH}-unknown-${OSNAME}-musl/" \
&& chmod +x /usr/local/bin/sccache
fi
## Check if server is reachable
sccache -s
status=$?
if [[ $status -ne 0 ]]; then
errcho "Failed to contact sccache server at ${_SCCACHE_HOST}:${_SCCACHE_PORT}"
exit 1
fi
SCC_DIR=/usr/local/bin/sccache.d
mkdir -p "${SCC_DIR}"
printf '#!/bin/bash
## Wrap our compiler with sccache to allow setting CC=/path/to/sccache.d/gxx
exec sccache /usr/bin/gcc "$@"
' > "$SCC_DIR/gcc"
printf '#!/bin/bash
## Wrap our compiler with sccache to allow setting CC=/path/to/sccache.d/gxx
exec sccache /usr/bin/g++ "$@"
' > "$SCC_DIR/g++"
chmod +x "$SCC_DIR/gcc"
chmod +x "$SCC_DIR/g++"
export SCCACHE_CC="$SCC_DIR/gcc"
export SCCACHE_CXX="$SCC_DIR/g++"
| true
|
f0a562ef7a3770abcfa5f9ece42c1a42fe2fb938
|
Shell
|
jotak/scripts
|
/gitclean.sh
|
UTF-8
| 1,044
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [[ "$1" = "-h" || "$1" = "help" || "$1" = "--help" ]]; then
echo "Syntax: gitclean [-a]"
echo ""
echo "Fetch all branches one by one, show log and decide wether to keep or delete"
echo ""
echo "OPTIONS:"
echo " -a include remote branches"
echo ""
exit
fi
BR_OPTS=""
for arg in "$@"
do
if [[ "$arg" == "-a" ]]; then
BR_OPTS="-a"
fi
done
BRANCHES=`git branch $BR_OPTS | sed s/^..// | sed s/-\>.*//`
SPLIT=($BRANCHES)
TOTAL=${#SPLIT[@]}
COUNTER=0
for k in $BRANCHES
do
COUNTER=$((COUNTER+1))
DESC=`git log -1 --pretty=format:"%s" $k`
IN_MASTER=`git log --pretty=oneline --abbrev-commit main | grep "$DESC"`
echo "BRANCH: $k ($COUNTER/$TOTAL)"
git log -3 --pretty=format:"%C(blue)%s %C(yellow)(%an) %Cgreen(%cr)%Creset" $k --
if [[ "$IN_MASTER" == "" ]]; then
echo -e "\e[1m\e[33mWARN: last commit doesn't seem to exist in main!\e[0m"
fi
read -p "Delete? [y/N] " yn
case $yn in
[Yy]* ) git branch -D $k;;
* ) echo "Keeping.";;
esac
echo "-----"
done
| true
|
9163242e66ec1dc38756f7a98efd766036ce016d
|
Shell
|
Wildog/dotfiles
|
/.tmuxinator/safekill.sh
|
UTF-8
| 1,502
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
function safe_end_procs {
old_ifs="$IFS"
IFS=$'\n'
panes=$(tmux list-panes -s -F "#{pane_id} #{pane_current_command}")
for pane_set in $panes; do
pane_id=$(echo "$pane_set" | awk -F " " '{print $1}')
pane_proc=$(echo "$pane_set" | awk -F " " '{print tolower($2)}')
cmd="C-c"
if [[ "$pane_proc" == "vim" ]] || [[ "$pane_proc" == "vi" ]]; then
cmd='Escape ":qa" Enter'
elif [[ "$pane_proc" == "mc" ]]; then
cmd='F10 "exit" Enter'
elif [[ "$pane_proc" == "htop" ]]; then
cmd='"q" "exit" Enter'
elif [[ "$pane_proc" == "man" ]] || [[ "$pane_proc" == "less" ]]; then
cmd='"q"'
elif [[ "$pane_proc" == "bash" ]] || [[ "$pane_proc" == "zsh" ]] || [[ "$pane_proc" == "fish" ]]; then
cmd='C-c C-u "exit" Enter'
elif [[ "$pane_proc" == "ssh" ]]; then
cmd='Enter "~."'
elif [[ "$pane_proc" == "psql" ]]; then
cmd='Enter "\q"'
fi
echo $cmd | xargs tmux send-keys -t "$pane_id"
if [[ "$pane_proc" == "vim" ]] || [[ "$pane_proc" == "vi" ]]; then
sleep 0.1
echo '"exit" Enter' | xargs tmux send-keys -t "$pane_id"
fi
done
IFS="$old_ifs"
}
safe_end_tries=0
while [ $safe_end_tries -lt 1 ]; do
safe_end_procs
safe_end_tries=$[$safe_end_tries+1]
sleep 0.75
done
tmux send-message "Could not end all processes, you're on your own now!"
| true
|
cd039b835e1d7e86eb686a1efa0e98d1ebfe56c5
|
Shell
|
MestreLion/conquerclub
|
/web/cgi-bin/league_scoreboard.cgi
|
UTF-8
| 861
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ -f common ]]; then
source common
fi
if [[ "${GET[mode]}" == "matches" ]]; then
urlcc="https://www.conquerclub.com"
url="$urlcc/public.php?mode=showclans3"
league="${GET[league]:-44}" # Clan League 7 - Second Division
clanid="${GET[clanid]}"
post="search_clanleague=${league}&search_clan1=${clanid}" #&search_status=Active"
wget -qO- --post-data "$post" -- "$url" |
awk -v q="'" -v host="$urlcc" -v s='[ \n\t]*' '
/<table class="listing">/ { ok=1 }
ok { gsub("href" s "=" s q "/", "href=" q host "/"); print }
ok && /<\/table>/ { exit }
'
exit
fi
htmltableparams=()
scoreboardparams=()
if [[ "${GET[clan]}" ]]; then
htmltableparams+=(-v mark=2,"${GET[clan]}")
fi
if [[ "${GET[mode]}" == "ongoing" ]]; then
scoreboardparams+=(--ongoing)
fi
league_scoreboard "${scoreboardparams[@]}" | htmltable "${htmltableparams[@]}"
| true
|
f4593f2bdd1e4f45db56d87fac6087055bccdc2d
|
Shell
|
Vikash84/Metagenomics
|
/vk_reference_based_assembly.sh
|
UTF-8
| 748
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
# reference_base_assembly_pipeline
sample_prefix=SP0001
read1='$sample_prefix'_R1_val_1.fq.gz'
read2='$sample_prefix'_R2_val_2.fq.gz'
fasta_file=1045684451.fasta
#bwa mapping
bwa mem $fasta_file $read1 $read2 > $sample_prefix'.sam'
#samtools sort
samtools sort -O bam -T temp1 $sample_prefix'.sam' >| $sample_prefix'.bam'
#samtools index
samtools index $sample_prefix'.bam'
#samtools mpileup
samtools mpileup -f 1045684451.fasta -gu $sample_prefix'.bam' | bcftools call -c -O b -o $sample_prefix'.raw.bcf'
#convert file to fastq format
bcftools view -O v $sample_prefix'.raw.bcf' | vcfutils.pl vcf2fq > $sample_prefix'.fastq'
#convert fastq to fasta
python3 convert_fastq_to_fasta.py -q $sample_prefix'.fastq' -a $sample_prefix'.fasta'
| true
|
ee4f32f9f3c0b2f125f0446d7384e043100c070d
|
Shell
|
lucmir/bdata_scripts
|
/gnuplot_scripts/runGenerateCCDFNormalDist.sh
|
UTF-8
| 918
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
BASE_DIR='../../outputs/distributions'
for i in sections_by_users.data \
sections_by_videos.data \
sections_by_section_time.data \
sections_by_retention.data \
sections_by_days_after_publishing.data \
sections_by_hours_after_publishing.data \
sections_by_day_hour.data; do
./generateCCDFNormalDist.sh $BASE_DIR/$i $BASE_DIR/$i.normal 1
./generateCCDFNormalDist.sh $BASE_DIR/$i $BASE_DIR/$i.ccdf 2
./generateCCDFNormalDist.sh $BASE_DIR/$i $BASE_DIR/$i.normal.ccdf 3
done;
for i in sections_by_genre_and_retention \
sections_by_genre_and_hours_after_publishing \
sections_by_genre_and_section_time \
sections_by_genre_and_day_hour; do
for j in `ls -1 $BASE_DIR/$i`; do
./generateCCDFNormalDist.sh $BASE_DIR/$i/$j $BASE_DIR/$i/$j.normal 1
./generateCCDFNormalDist.sh $BASE_DIR/$i/$j $BASE_DIR/$i/$j.ccdf 2
./generateCCDFNormalDist.sh $BASE_DIR/$i/$j $BASE_DIR/$i/$j.normal.ccdf 3
done;
done;
| true
|
743963d381dd9a3e6722472bb3caee58e334ee60
|
Shell
|
weswalker125/ww-vpn
|
/container/setup.sh
|
UTF-8
| 1,850
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
function init() {
easyrsa init-pki
echo "ww-vpn-ca" | easyrsa build-ca nopass
easyrsa build-server-full server nopass
for i in {1..5}; do easyrsa build-client-full ww-vpn-${i} nopass; done
easyrsa gen-dh
openvpn --genkey --secret /pki/private/pfs.key
rm /pki/private/ca.key
# Generate server config file
sed -e '/%PFS_KEY%/{r /pki/private/pfs.key' \
-e 'd}' \
-e '/%DH_PEM%/{r /pki/dh.pem' \
-e 'd}' \
-e '/%CA_CRT%/{r /pki/ca.crt' \
-e 'd}' \
-e '/%CRT%/{r /pki/issued/server.crt' \
-e 'd}' \
-e '/%KEY%/{r /pki/private/server.key' \
-e 'd}' /init/openvpn-server.conf >/clients/ww-vpn-server.conf
cp /clients/ww-vpn-server.conf /etc/openvpn/server.conf
# Generate client config files
for i in {1..5}; do
mv /pki/issued/ww-vpn-${i}.crt /pki/issued/current.crt
mv /pki/private/ww-vpn-${i}.key /pki/private/current.key
sed -e "s/%SERVER_NAME%/vpn.dubyatoo.com/g" \
-e "s/%CLIENT%/ww-vpn-${i}/g" \
-e '/%PFS_KEY%/{r /pki/private/pfs.key' \
-e 'd}' \
-e '/%DH_PEM%/{r /pki/dh.pem' \
-e 'd}' \
-e '/%CA_CRT%/{r /pki/ca.crt' \
-e 'd}' \
-e '/%CRT%/{r /pki/issued/current.crt' \
-e 'd}' \
-e '/%KEY%/{r /pki/private/current.key' \
-e 'd}' /init/openvpn-client.conf >/clients/ww-vpn-${i}.conf
rm /pki/issued/current.crt /pki/private/current.key
done
}
# Create config if not provided
if [ ! -e /etc/openvpn/server.conf ]; then
init
fi
iptables -t nat -A POSTROUTING -s 10.8.0.0/16 -o eth0 -j MASQUERADE
iptables -t nat -A POSTROUTING -s 172.17.0.2/16 -o eth0 -j MASQUERADE
echo "Starting openvpn server..."
openvpn --config /etc/openvpn/server.conf
| true
|
4f58b39ac738494fc51d2f4bd5f0a6a4ef889004
|
Shell
|
mbonto/fewshot_neuroimaging_classification
|
/MAML_plus_plus/script_generation_tools/local_run_template_script.sh
|
UTF-8
| 278
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# export GPU_ID=$1
# echo $GPU_ID
cd ..
export DATASET_DIR="datasets/"
# export CUDA_VISIBLE_DEVICES=$GPU_ID
# Activate the relevant virtual environment:
python $execution_script$ --name_of_args_json_file experiment_config/$experiment_config$ # --gpu_to_use $GPU_ID
| true
|
22ade337bad8ffc7fc583523db25f30e9eefcac7
|
Shell
|
HKCaesar/wradlib
|
/scripts/run_tests.sh
|
UTF-8
| 1,209
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Copyright (c) 2017, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
exit_status=0
# export location of .coveragerc
if [[ "$COVERAGE" == "true" ]]; then
export COVERAGE_PROCESS_START=$WRADLIB_BUILD_DIR/.coveragerc
# run tests, retrieve exit status
./testrunner.py -u -c -s
(( exit_status = ($? || $exit_status) ))
./testrunner.py -d -c -s
(( exit_status = ($? || $exit_status) ))
./testrunner.py -e -c -s
(( exit_status = ($? || $exit_status) ))
./testrunner.py -n -c -s
(( exit_status = ($? || $exit_status) ))
# copy .coverage files to cov-folder
cov=`find . -name *.coverage.* -print`
echo $cov
mkdir -p cov
for cv in $cov; do
mv $cv cov/.
done
# combine coverage, remove cov-folder
coverage combine cov
rm -rf cov
else
# run tests, retrieve exit status
./testrunner.py -u -s
(( exit_status = ($? || $exit_status) ))
./testrunner.py -d -s
(( exit_status = ($? || $exit_status) ))
./testrunner.py -e -s
(( exit_status = ($? || $exit_status) ))
./testrunner.py -n -s
(( exit_status = ($? || $exit_status) ))
fi
exit $exit_status
| true
|
3c9569c987bde586b6423380df429027556fc6d2
|
Shell
|
shubhamdalal95/BashScript
|
/ICGPhotographSize.sh
|
UTF-8
| 733
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
for f in /home/cdac/Documents/{101..102}/Slot" "[A-M]/
do
cd "$f" #changing directory to /home/cdac/Documents/{101..102}/Slot" "[A-M]/
var1=`ls -l | awk -F" " '{print $9}' | grep -E '[0-9]{3}_Bio*'`
cd "$f""$var1" #changing directory to /home/cdac/Documents/{101..102}/Slot" "[A-M]/[0-9]{3}_Bio*/
var2=`ls -l "$f""$var1" | awk -F" " '{print $9}'` #getting CandidatePhotographs file
var3=`echo $var2 | awk -F" " '{print $1}'`
cd "$f""$var1"/"$var3" #changing directory to /home/cdac/Documents/{101..102}/Slot" "[A-M]/[0-9]{3}_Bio*/CandidatPhotographs/
list=`ls -l | awk -F" " '{print $9}'` #show all Centre wise slot wise candiate photographs
find $list -type f -size -2k
# unzip -o "$var2"
done
| true
|
9a7f6039495c45e263d7f68a2cdcd9d70558ccd7
|
Shell
|
Ascend/ModelZoo-PyTorch
|
/PyTorch/built-in/cv/detection/DB_ID0706_for_PyTorch/test/train_no_TQE_performance_RT2_8p.sh
|
UTF-8
| 7,075
| 3.265625
| 3
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
###############指定训练脚本执行路径###############
# cd到与test文件夹同层级目录下执行脚本,提高兼容性;test_path_dir为包含test文件夹的路径
cur_path=`pwd`
cur_path_last_dirname=${cur_path##*/}
if [ x"${cur_path_last_dirname}" == x"test" ];then
test_path_dir=${cur_path}
cd ..
cur_path=`pwd`
else
test_path_dir=${cur_path}/test
fi
################基础配置参数,需要模型审视修改##################
# 必选字段(必须在此处定义的参数): Network batch_size RANK_SIZE WORLD_SIZE MASTER_ADDR MASTER_PORT
# 网络名称,同目录名称
Network="DB_ID0706_for_PyTorch"
# 训练batch_size
batch_size=128
# 训练使用的npu卡数
export RANK_SIZE=8
export ENABLE_RUNTIME_V2=1
export WORLD_SIZE=8
export MASTER_ADDR='127.0.0.1'
export MASTER_PORT='18888'
export TASK_QUEUE_ENABLE=1
export DYNAMIC_OP="ADD"
# 数据集路径,保持为空,不需要修改
data_path=""
# 检验预训练模型的路径
model_path=$cur_path/path-to-model-directory
# 训练epoch
train_epochs=1
# 指定训练所使用的npu device卡id
device_id=6
bin=True
profiling=''
start_step=-1
stop_step=-1
# 参数校验,data_path为必传参数,其他参数的增删由模型自身决定;此处新增参数需在上面有定义并赋值
for para in $*
do
if [[ $para == --device_id* ]];then
device_id=`echo ${para#*=}`
elif [[ $para == --data_path* ]];then
data_path=`echo ${para#*=}`
elif [[ $para == --model_path* ]];then
model_path=`echo ${para#*=}`
elif [[ $para == --rt1 ]];then
rt1=True
elif [[ $para == --bin ]];then
bin=True
elif [[ $para == --profiling* ]];then
profiling=`echo ${para#*=}`
elif [[ $para == --start_step* ]];then
start_step=`echo ${para#*=}`
elif [[ $para == --stop_step* ]];then
stop_step=`echo ${para#*=}`
fi
done
# 校验是否传入data_path,不需要修改
if [[ $data_path == "" ]];then
echo "[Error] para \"data_path\" must be confing"
exit 1
fi
# 校验是否传入model_path不需要修改
if [[ $model_path == "" ]];then
echo "[Error] para \"model_path\" must be confing"
exit 1
fi
# 校验是否指定了device_id,分动态分配device_id与手动指定device_id,此处不需要修改
if [ $ASCEND_DEVICE_ID ];then
echo "device id is ${ASCEND_DEVICE_ID}"
elif [ ${device_id} ];then
export ASCEND_DEVICE_ID=${device_id}
echo "device id is ${ASCEND_DEVICE_ID}"
else
"[Error] device id must be config"
exit 1
fi
# 使能rt1
if [ $rt1 ];then
export ENABLE_RUNTIME_V2=0
echo "use rt1 runtime"
fi
if [[ $profiling == "GE" ]];then
export GE_PROFILING_TO_STD_OUT=1
fi
#################创建日志输出目录,不需要修改#################
if [ -d ${test_path_dir}/output/${ASCEND_DEVICE_ID} ];then
rm -rf ${test_path_dir}/output/${ASCEND_DEVICE_ID}
mkdir -p ${test_path_dir}/output/$ASCEND_DEVICE_ID
else
mkdir -p ${test_path_dir}/output/$ASCEND_DEVICE_ID
fi
#################启动训练脚本#################
#训练开始时间,不需要修改
start_time=$(date +%s)
# # 非平台场景时source 环境变量
# check_etp_flag=`env | grep etp_running_flag`
# etp_flag=`echo ${check_etp_flag#*=}`
# if [ x"${etp_flag}" != x"true" ];then
# source ${test_path_dir}/env_npu.sh
# fi
unset PTCOPY_ENABLE
unset SCALAR_TO_HOST_MEM
unset COMBINED_ENABLE
unset HCCL_CONNECT_TIMEOUT
unset MOTD_SHOWN
unset DYNAMIC_OP
unset TASK_QUEUE_ENABLE
unset HCCL_WHITELIST_DISABLE
sed -i "s|./datasets|$data_path|g" experiments/seg_detector/base_ic15.yaml
kernel_num=$(nproc)
if [ ${kernel_num} -lt 95 ];then
cpu_number=${kernel_num}
else
cpu_number=95
fi
taskset -c 0-${cpu_number} nohup python3 -W ignore train.py experiments/seg_detector/ic15_resnet50_deform_thre.yaml \
--data_path ${data_path}/icdar2015 \
--resume ${data_path}/db_ckpt/MLT-Pretrain-ResNet50 \
--seed=515 \
--distributed \
--amp \
--device_list "0,1,2,3,4,5,6,7" \
--num_gpus 8 \
--local_rank 0 \
--dist_backend 'hccl' \
--world_size 1 \
--epochs ${train_epochs} \
--batch_size ${batch_size} \
--lr 0.056 \
--addr $(hostname -I |awk '{print $1}') \
--amp \
--epochs ${train_epochs} \
--Port 2950 \
--bin ${bin} \
--profiling "${profiling}" \
--start_step ${start_step} \
--stop_step ${stop_step} > ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 &
wait
# 训练结束时间,不需要修改
end_time=$(date +%s)
e2e_time=$(( $end_time - $start_time ))
# 结果打印,不需要修改
echo "------------------ Final result ------------------"
# 输出性能FPS,需要模型审视修改
FPS=`grep -a 'FPS@all' ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk 'END {print}' | awk -F '[#@all]' '{print $NF}'`
FPS=${FPS#* } # 去除前面的空格字符
# 打印,不需要修改
echo "Final Performance images/sec : $FPS"
# 打印,不需要修改
echo "E2E Training Duration sec : $e2e_time"
# 性能看护结果汇总
# 训练用例信息,不需要修改
BatchSize=${batch_size}
DeviceType=`uname -m`
CaseName=${Network}_RT2_bs${BatchSize}_${RANK_SIZE}'p'_noeditor_'perf'
# 获取性能数据,不需要修改
# 吞吐量
ActualFPS=${FPS}
# 单迭代训练时长
TrainingTime=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'*1000/'${FPS}'}'`
# 从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
grep -a 'Epoch:' ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk -F 'Loss' '{print $NF}' | awk '{print $1}' > ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
# 最后一个迭代loss值,不需要修改
ActualLoss=`awk 'END {print}' ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
# 关键信息打印到${CaseName}.log中,不需要修改
echo "Network = ${Network}" > ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "RankSize = ${RANK_SIZE}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "BatchSize = ${BatchSize}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "DeviceType = ${DeviceType}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "CaseName = ${CaseName}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "ActualFPS = ${ActualFPS}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "TrainingTime = ${TrainingTime}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "ActualLoss = ${ActualLoss}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "E2ETrainingTime = ${e2e_time}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
| true
|
844d6cb770b99e06db366948221fde849f253e93
|
Shell
|
ravem/BatchScripts
|
/Telegram_certbot_notifier.sh
|
UTF-8
| 853
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/sh
KEY="INSERT_YOUR_KEY_HERE"
URL="https://api.telegram.org/bot$KEY/sendMessage"
CERTBOT_LOG_FILE="/var/log/certbot_telegram.log"
TARGET="INSERT_CHAT_ID_HERE" # Telegram ID of the conversation with the bot, get it from /getUpdates API
#this is the actual renew script, assuming you are using, as me, nginx :-)
certbot renew -w /var/appdata/ --pre-hook "systemctl stop nginx" --post-hook "systemctl start nginx" | tee ${CERTBOT_LOG_FILE}
TEXT="certbot renewal task run log:
+++++++++++++++++++++++++++++++++++
$(cat /var/log/certbot_telegram.log)
+++++++++++++++++++++++++++++++++++
Check for errors."
#Here you build the message to send
MESSAGE="chat_id=$TARGET&text=$TEXT&disable_web_page_preview=true"
#Here you send the mesage
curl -s --max-time 10 --retry 5 --retry-delay 2 --retry-max-time 10 -d "$MESSAGGIO" $URL > /dev/null 2>&1 &
| true
|
ad7a0b0be8f3e929064ecf7e404ca223731135c2
|
Shell
|
Cairnarvon/kopipe
|
/kopipe.sh
|
UTF-8
| 4,868
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -z "$KOPIPEDIR" ]; then
KOPIPEDIR=~/.kopipe
fi
# If it doesn't exist already, make a new kopipe directory
if [ ! -d "$KOPIPEDIR" ]; then
mkdir "$KOPIPEDIR" && \
echo -e "New kopipe directory created at \033[1m$KOPIPEDIR\033[0m" >&2 || \
echo -e "\033[1;31mERROR\033[0m Couldn't create kopipe directory at"\
"\033[1m$KOPIPEDIR\033[0m" >&2
fi
# Functions
# -h, --help, or no arguments: display help and exit
usage()
{
echo -e "\033[1mNAME\033[0m\n" \
"\tkopipe - \033[1mXarn\033[0m's kopipe manager.\n" \
"\n\033[1mUSAGE\033[0m\n" \
"\t$0 \033[4mNAME\033[0m...\n" \
"\t\tDisplay the named kopipe.\n\n" \
"\t$0 [\033[1m-h\033[0m|\033[1m--help\033[0m]\n" \
"\t\tShow this message and exit.\n\n" \
"\t$0 \033[1m-l\033[0m|\033[1m--list\033[0m" \
"['\033[4mGLOB\033[0m']\n" \
"\t\tList the available kopipe.\n\n" \
"\t$0 \033[1m-n\033[0m|\033[1m--new\033[0m" \
"\033[4mNAME\033[0m\n" \
"\t\tCreate new kopipe from stdin or in an editor and save" \
"as \033[4mNAME\033[0m.\n\n" \
"\t$0 \033[1m-e\033[0m|\033[1m--edit\033[0m" \
"\033[4mNAME\033[0m...\n" \
"\t\tEdit existing kopipe \033[4mNAME\033[0m in your favourite" \
"editor (\$EDITOR\n\t\tenvironment variable or" \
"\033[1mvi\033[0m), or append from stdin.\n\n" \
"\t$0 \033[1m-d\033[0m|\033[1m--delete\033[0m" \
"\033[4mNAME\033[0m...\n" \
"\t\tDelete kopipe \033[4mNAME\033[0m.\n\n" \
"\t$0 \033[1m-s\033[0m|\033[1m--search\033[0m" \
"\033[4mPATTERN\033[0m ['\033[4mGLOB\033[0m']\n" \
"\t\tFind kopipe matching \033[4mPATTERN\033[0m," \
"possibly just in \033[4mGLOB\033[0m.\n" >&2
exit 1
}
# -l, --list: show current kopipe collection
list()
{
ls $* 2>/dev/null || echo -e "\033[2m(Nothing here.)\033[0m" >&2
exit 0
}
# -n, --new: create new kopipe from stdin
new()
{
# Clear file if it exists already
rm -f "$1"
edit $@
exit 0
}
# -e, --edit: edit kopipe in editor
edit()
{
# Determine if stdin is a tty
stdin="$(ls -l /proc/self/fd/0)"
stdin="${stdin/*-> /}"
if [[ "$stdin" =~ ^/dev/pts/[0-9] ]]; then
# Yes. Open an editor
if [ -z "$EDITOR" ]; then
EDITOR=vi
fi
$EDITOR $*
else
# No. Read kopipe from stdin
while read -r line; do
echo $line >> "$1"
done
echo -e "\033[1m$1\033[0m saved." >&2
fi
exit 0
}
# -d, --del, --delete: delete existing kopipe
delete()
{
rm -i -- $*
exit 0
}
# -s, --search: search existing kopipe
search()
{
local PATTERN
local FILES
PATTERN="$1"
shift
FILES="$*"
[ -z "$FILES" ] && FILES='*'
grep -l "$PATTERN" $FILES || echo -e "\033[2mNo match.\033[0m" >&2
exit 0
}
# No options: just display kopipe
display()
{
if [ -f "$1" ]; then
echo -e "\033[2m$1\033[0m" >&2
cat "$1"
echo >&2
else
echo -e "\033[2mKopipe \033[1m$1\033[2m does not seem to exist.\033[0m" >&2
fi
shift
if [ ! -z "$1" ]; then
display $*
fi
exit 0
}
# Process arguments
arg=$1
shift
cd "$KOPIPEDIR"
# If there's a second argument, we're probably doing something
if [ ! -z "$1" ]; then
case $arg in
-n|--new)
new $*
;;
-e|--edit)
edit $*
;;
-d|--del|--delete)
delete $*
;;
-s|--search)
search $*
;;
esac
fi
# Maybe there's only one argument
case $arg in
-n|--new|-e|--edit|-d|--del|--delete|-s|--search)
echo "That command needs an argument, comrade." >&2
exit 2
;;
-h|--help)
usage
;;
-l|--list)
list $*
;;
esac
# However many arguments there are, they don't add up to a valid command, so
# they must be a request for kopipe
if [ ! -z "$arg" ]; then
display "$arg" $*
fi
# If there aren't any arguments at all, let's remind the user what's possible
usage
| true
|
42b50c84157e815c5401a6574f122f2ac6086fcb
|
Shell
|
andreadesalve/DART
|
/script
|
UTF-8
| 1,403
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Running test.."
for n_eligibles in 3 6 10 16 20
do
echo "N_eligibles $n_eligibles"
for n_universities in $(seq 1 $n_eligibles)
do
echo "N_universities $n_universities"
echo "N_eligibles $n_eligibles" >> testEligibles${n_eligibles}universities${n_universities}.txt
echo "N_universities $n_universities" >> testEligibles${n_eligibles}universities${n_universities}.txt
echo "Create blockchain " >> testEligibles${n_eligibles}universities${n_universities}.txt
n_accounts=$((n_universities + n_eligibles + 3))
echo "Numero accounts: $n_accounts"
echo "Numero accounts: $n_accounts" >> testEligibles${n_eligibles}universities${n_universities}.txt
ganache-cli --accounts $n_accounts --port 8545 --gasLimit 0xb71b00 --networkId 5777 &
ganache_cli_PID=$!
echo "Create blockchain with pid $ganache_cli_PID" >> testEligibles${n_eligibles}universities${n_universities}.txt
sleep 10
truffle migrate --network ganache >> testEligibles${n_eligibles}universities${n_universities}.txt
sleep 10
python3 test/TestRS.py --build build/contracts/RT.json --netid 5777 $n_eligibles $n_universities >> testEligibles${n_eligibles}universities${n_universities}.txt
sleep 5
echo "kill $ganache_cli_PID" >> testEligibles${n_eligibles}universities${n_universities}.txt
kill $ganache_cli_PID >> testEligibles${n_eligibles}universities${n_universities}.txt
done
done
exit 0
| true
|
da899d3a458bcfd46ca6888eaec82a2e187435b3
|
Shell
|
gladkih/dotfiles
|
/zsh/zshrc
|
UTF-8
| 3,166
| 2.90625
| 3
|
[] |
no_license
|
# Path to your oh-my-zsh installation.
export ZSH=/Users/max/.oh-my-zsh
set guifont=Menlo\ for\ Powerline
export SASS_LIBSASS_PATH=/Users/max/work/libsass
#Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
ZSH_THEME="meritt"
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion. Case
# sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
DISABLE_AUTO_UPDATE="true"
plugins=(git cloudapp node npm bower brew osx extract z mercurial)
# User configuration
export PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/opt/X11/bin:/opt/ImageMagick/bin"
source $ZSH/oh-my-zsh.sh
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# ssh
export SSH_KEY_PATH="~/.ssh/dsa_id"
#Aliases
alias nw="/Applications/nwjs.app/Contents/MacOS/nwjs"
alias py="ping 8.8.8.8"
alias pr="ping 192.168.1.1"
#Unused dir
alias hidedir="chflags hidden ~/Applications ~/Desktop ~/Public"
#Show/hide hidden files in Finder
alias show='defaults write com.apple.finder AppleShowAllFiles -bool true && killall Finder && chflags nohidden ~/Public/ ~/Desktop/ ~/Applications/'
alias hide='defaults write com.apple.finder AppleShowAllFiles -bool false && killall Finder && hidedir'
#Hide/show all desktop icons (useful when presenting)
alias hidedesktop='defaults write com.apple.finder CreateDesktop -bool false && killall Finder'
alias showdesktop='defaults write com.apple.finder CreateDesktop -bool true && killall Finder'
#Clean
alias cleanup='find . -name "*.DS_Store" -type f -delete'
alias removepart='find . -name "*.part" -type f -delete'
alias emptytrash="sudo rm -rfv /Volumes/*/.Trashes; sudo rm -rfv ~/.Trash; sudo rm -rfv /private/var/log/asl/*.asl"
#Jump back n directories at a time
alias ..='cd ..'
alias ...='cd ../../'
alias ....='cd ../../../'
alias .....='cd ../../../../'
alias ......='cd ../../../../../'
alias ~='cd ~'
#Kill app
alias quit='killall -9'
#Link
alias dwl='cd ~/Downloads/'
alias drp='cd ~/Dropbox/'
alias wrk='cd ~/Documents/Work/'
alias rmt='cd ~/Documents/Work/remote'
# List files in human readable format with color and without implied directories
alias ls='ls -lGFh -G'
#Show hidden files only
alias ls.='ls -dAh .*'
#Concat image
alias resize='mogrify -resize'
#Img optimization
alias imgopt='imageoptim -a -j -q -d'
#Create sprite (need glue)
function sprite-g() {
glue "$1" "$1"-res --namespace '' --scss --padding=0 --margin=3
}
function sprite-g-retina() {
glue "$1" "$1"-res --namespace '' --scss --padding=0 --margin=3 --retina
}
function sprite() {
montage -background none ./$1/*.png -tile 1 -geometry +0+0 $1.png
}
function crop() {
convert "$1" -crop 150x150 parts-%02d.jpg
}
#Удаления файлов по маске
function rm-mask() {
find . -name "*.$1" -type f -delete
}
#Find shorthand
function f() {
find . -name "$1" 2>/dev/null
}
| true
|
a40a24b0179f98ba483a743cb9d24aab44ff518a
|
Shell
|
Calliari/shell-script
|
/linux-ubuntu-shell/nginx-config/nginx_test_curl_opensssl.sh
|
UTF-8
| 1,599
| 2.734375
| 3
|
[] |
no_license
|
# Testing request
# send a request to the nginx sevrer with HOSTNAME from using the localhost
curl --verbose --header 'Host: HOSTNAME WHERE THE NGINX IS CONFIGURED' -k 'https://127.0.0.1:8201'
# Testing with the client certs to the endpoinht (http or https)
curl --cacert ./ca.crt \. # The CA certificate belonging to the CA that signed the server's certificate (if it is not already included with your OS trusted certs)
--key ./client.key \ # Your client certificate
--cert ./client.crt \ # Your client private key
https:[EXAMPLE_URL]
# Testing with the client certs to the endpoinht (http or https) using the localhost
curl -v --cert ./certificate.crt --key ./key-certificate.key --header 'Host: example.com' http://127.0.0.1:443
curl -v --cert ./certificate.crt --key ./key-certificate.key --cacert cert-authority-certificate.crt --header 'Host: example.com' http://127.0.0.1:443
# Testing with the client certs to the endpoinht (http or https) using the localhost POST request with credentials
curl -vv --location --request POST 'example.com/token' \
--header 'Content-Type: application/x-www-form-urlencoded' \
--data-urlencode 'grant_type=client_credentials' \
--data-urlencode 'client_id=CLIENT-ID' \
--data-urlencode 'client_secret=<CLIENT SECRET>'
# Testing with openssl
openssl s_client -connect localhost:443 -cert ./certificate.crt -key ./certificate-key.key -servername example.com
GET / HTTP/1.0
Host: example.com
# check the certificate used by the "servername"
openssl s_client -connect localhost:443 -servername example.com 2>&1 | openssl x509 -text
| true
|
cb7ccc8e32523dd24df1f73170500d086aee72d8
|
Shell
|
icapps/ios-crash-reporter
|
/Pod/Frameworks/SplunkMint.framework/Resource/splunkmint_postbuild_dsym_upload_script.sh
|
UTF-8
| 2,893
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Copyright 2015 Splunk, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
IFS='
'
if [[ -z $2 ]]; then
echo "Usage: $0 <API KEY> <API TOKEN>"
exit -1
fi
if [[ ! "${DWARF_DSYM_FOLDER_PATH}" ]]; then
echo "$0 Not in XCode build"
exit -2
fi
if [[ "${EFFECTIVE_PLATFORM_NAME}" == "-iphonesimulator" ]] || [[ "${CONFIGURATION}" == "Debug" ]]; then
echo "Splunk Mint: Skipping upload, simulator or Debug build symbols found"
exit 0
fi
API_KEY="$1"
API_TOKEN="$2"
TEMP_ZIP_PATH="/tmp/splunk-mint-dsyms"
DSYM_UUIDS=$(xcrun dwarfdump --uuid "${DWARF_DSYM_FOLDER_PATH}/${DWARF_DSYM_FILE_NAME}" | awk '{print $2}' | tr -d '[()]' | tr "\n" "," | sed 's/,$//')
if [[ "${DSYM_UUIDS}" =~ "unsupported" ]]; then
echo "Splunk Mint: Unsupported UUID found. Exiting..."
exit -2
fi;
mkdir -p "${TEMP_ZIP_PATH}"
/bin/rm -f "${TEMP_ZIP_PATH}/"*.zip
# Add one more field for primary file ${DWARF_DSYM_FILE_NAME}
cd "${DWARF_DSYM_FOLDER_PATH}"
for DIR in $(find . -name "*.dSYM" -maxdepth 1 -type d); do
APPNAME=$(ls -1 "${DIR}/Contents/Resources/DWARF/"* | awk -F/ '{print $NF}')
echo "Splunk Mint: Archiving \"${APPNAME}\" to \"${TEMP_ZIP_PATH}/${APPNAME}.zip\""
/usr/bin/zip -j "${TEMP_ZIP_PATH}/${APPNAME}.zip" "${DWARF_DSYM_FOLDER_PATH}/${DIR}/Contents/Resources/DWARF/"*
if [[ ! -f "${TEMP_ZIP_PATH}/${APPNAME}.zip" ]]; then
echo "Splunk Mint: Failed to archive dSYMs for \"${APPNAME}\" to \"${TEMP_ZIP_PATH}\""
/bin/rm -f "${TEMP_ZIP_PATH}/*.zip"
exit -3
fi
CURL_OPTIONS=( $CURL_OPTIONS "-F${APPNAME}-file=@${TEMP_ZIP_PATH}/${APPNAME}.zip" )
done
APPNAME=$(ls -1 "${DWARF_DSYM_FILE_NAME}/Contents/Resources/DWARF/"* | awk -F/ '{print $NF}')
CURL_OPTIONS=( $CURL_OPTIONS "-F${APPNAME}-uuids=${DSYM_UUIDS}" )
HTTP_RESPONSE=$(curl -w %{http_code} -s --output /dev/null ${CURL_OPTIONS[@]} -H "X-Splunk-Mint-apikey: ${API_KEY}" -H "X-Splunk-Mint-Auth-Token: ${API_TOKEN}" -XPOST https://ios.splkmobile.com/api/v1/upload/symbols)
HTTP_RESPONSE=$(( ${HTTP_RESPONSE} + 0 ))
if (( ${HTTP_RESPONSE} > 199 )) && (( $HTTP_RESPONSE < 400 )); then
echo "Splunk Mint: Successfully uploaded debug symbols"
else
echo "Splunk Mint: ERROR \"${HTTP_RESPONSE}\" while uploading \"${TEMP_ZIP_PATH}/${APPNAME}.zip\""
/bin/rm -f "${TEMP_ZIP_PATH}/"*.zip
exit -4
fi
/bin/rm -f "${TEMP_ZIP_PATH}/"*.zip
exit 0
| true
|
6b83553e1707a1520c9075663d9ab723d545ca2b
|
Shell
|
sncodeGit/MyScripts
|
/clippings_backup_autoremove.sh
|
UTF-8
| 659
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
source ${HOME}/MyScripts/tmp/vars.env
cd "$GOOGLE_DISK_BACKUP_DIR"
DIRS=$(find . -type d | grep /)
for DIR in ${DIRS}
do
FILES=$(find ${DIR} -type f)
for FILE in ${FILES}
do
INIT_FILE=$(cat ${FILE} | grep '__auto__sncodegit__init__:' | cut -d ':' -f 2)
if [[ "$INIT_FILE" == "/ClippingsBackup" ]]
then
cd ${DIR}
CLIPPINGS_COUNT=$(ls -l -c -t | grep -v 'total ' | grep -v $FILE | wc -l)
if (( "$CLIPPINGS_COUNT" <= "10" ))
then
exit 0
fi
rm -f $(ls -l -c -t | grep -v 'total ' | grep -v $FILE | tail -n $(($CLIPPINGS_COUNT - 10)) | awk {'print $9'})
exit 0
fi
done
done
| true
|
dc34eed4cee55d40c780efd672c8573dc21540b4
|
Shell
|
LudvigHz/dotfiles
|
/prompt.zsh
|
UTF-8
| 2,924
| 3.359375
| 3
|
[] |
no_license
|
# zsh prompt
autoload -Uz add-zsh-hook
VCS_SYMBOL_BRANCH=""
VCS_SYMBOL_AHEAD=""
VCS_SYMBOL_BEHIND=""
VCS_SYMBOL_STAGED="●"
VCS_SYMBOL_CONFLICTS=""
VCS_SYMBOL_UNSTAGED=""
VCS_SYMBOL_UNTRACKED=""
VCS_SYMBOL_STASHED=""
VCS_SYMBOL_CLEAN='✔'
# git info
vcs_prompt_info() {
# check if in git directory
if git rev-parse --is-inside-work-tree 2>/dev/null | grep -q 'true'; then
# set all variables
VCS_INFO_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null)
_VCS_INFO_COMMIT_STATUS=$(git for-each-ref --format="%(push:track)" refs/heads/$VCS_INFO_BRANCH | awk '{gsub(/\[|]|,/,""); print}')
_VCS_INFO_STATUS=$(git status --porcelain 2>/dev/null)
VCS_INFO_AHEAD=$(echo $_VCS_INFO_COMMIT_STATUS | awk '{for(i=1;i<=NF;i++) if ($i=="ahead") print $(i+1)}')
VCS_INFO_BEHIND=$(echo $_VCS_INFO_COMMIT_STATUS | awk '{for(i=1;i<=NF;i++) if ($i=="behind") print $(i+1)}')
VCS_INFO_STAGED=$(git diff --cached --numstat 2>/dev/null | wc -l | tr -d ' ')
VCS_INFO_UNSTAGED=$(git diff --name-status 2>/dev/null | wc -l | tr -d ' ')
VCS_INFO_UNTRACKED=$(echo "${_VCS_INFO_STATUS}" | grep "^??" | wc -l | tr -d ' ')
VCS_INFO_CONFLICTS=$(git ls-files --unmerged 2>/dev/null | wc -l | tr -d ' ')
VCS_INFO_STASHED=$(git rev-list --walk-reflogs --count refs/stash 2>/dev/null)
# add indicators to prompt
VCS_INFO="%F{green}${VCS_SYMBOL_BRANCH} %F{cyan}${VCS_INFO_BRANCH}%f "
if [ ! -z "$VCS_INFO_AHEAD" ]; then
VCS_INFO+="${VCS_SYMBOL_AHEAD} ${VCS_INFO_AHEAD} "
fi
if [ ! -z "$VCS_INFO_BEHIND" ]; then
VCS_INFO+="${VCS_SYMBOL_BEHIND} ${VCS_INFO_BEHIND} "
fi
if [ "$VCS_INFO_STAGED" -ne "+0" ]; then
VCS_INFO+="%F{green}${VCS_SYMBOL_STAGED} ${VCS_INFO_STAGED}%f "
fi
if [ "$VCS_INFO_UNSTAGED" -ne "+0" ]; then
VCS_INFO+="%F{yellow}${VCS_SYMBOL_UNSTAGED} ${VCS_INFO_UNSTAGED}%f "
fi
if [ "$VCS_INFO_UNTRACKED" -ne "+0" ]; then
VCS_INFO+="%F{red}${VCS_SYMBOL_UNTRACKED} ${VCS_INFO_UNTRACKED}%f "
fi
if [ "$VCS_INFO_STASHED" -ne "0" ]; then
VCS_INFO+="%F{blue}${VCS_SYMBOL_STASHED} ${VCS_INFO_STASHED}%f "
fi
if [ "$VCS_INFO_CONFLICTS" -ne "+0" ]; then
VCS_INFO+="%F{red}${VCS_SYMBOL_CONFLICTS} ${VCS_INFO_CONFLICTS}%f "
fi
if [ -z "${_VCS_INFO_STATUS}" ]; then
VCS_INFO+="%F{green}${VCS_SYMBOL_CLEAN}%f"
fi
echo $VCS_INFO
else
echo ""
fi
}
# Virtual env indicator
export VIRTUAL_ENV_DISABLE_PROMPT=yes
function virtenv_indicator() {
if [[ -z $VIRTUAL_ENV ]]; then
psvar[1]=''
else
psvar[1]=${VIRTUAL_ENV##*/}
fi
}
add-zsh-hook precmd virtenv_indicator
# Finalized prompt
precmd() {
PS1="%(1V.(%1v).)%f%(1j.%F{cyan}[%j]%f .)%F{blue}ﬦ %~%f " # start of promt: ﬦ ~
PS1+="$(vcs_prompt_info)" # git info
PS1+=$'\n%(?.%F{green}.%F{red}) %f' # input line
}
| true
|
a5f460b469fa170bd1266136ca21af837d7243ad
|
Shell
|
jibijose/gatling_vm_cluster
|
/runAllLocalRun.sh
|
UTF-8
| 1,055
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -ne 2 ]
then
echo "Usage: $0 local SimulationClass"
exit 1
fi
CLOUD=$1
SIMULATION_NAME=$2
GATLING_HOME_DIR_NAME=gatling-charts-highcharts-bundle-3.3.1
echo "******************************************************************************************************************"
echo "*********************************************** Ulimit values ****************************************************"
ulimit -a
echo "Local setup "
cp -rf $SIMULATION_NAME/$SIMULATION_NAME*.scala $GATLING_HOME_DIR_NAME/user-files/simulations/
cp -rf $SIMULATION_NAME/$SIMULATION_NAME*.json $GATLING_HOME_DIR_NAME/user-files/resources/
rm -rf $GATLING_HOME_DIR_NAME/results/*
echo "******************************************************************************************************************"
echo "*********************************************** Gatling execution ************************************************"
JAVA_OPTS="-Xms4096m -Xmx4096m" $GATLING_HOME_DIR_NAME/bin/gatling.sh -s $SIMULATION_NAME
echo "Local execution completed"
| true
|
8d7d72f0497b5f11e2571a67292ee474bfa5fce6
|
Shell
|
klabit87/multidisabler-samsung
|
/META-INF/com/google/android/update-binary
|
UTF-8
| 6,665
| 3.34375
| 3
|
[] |
no_license
|
#!/sbin/sh
#
# Flashable services disabler for G97[035][FN0], G977[BN] and N97[05][FN0],
# N976[BN0], N971N, A[1245]05[FN], A105M, A[25]05([YG]N|G), A405FM, T51[05],
# T72[05], T86[05], F900[FN] and F907[BN].
#
# by Ian Macdonald.
#
# Use this to prime your device after installing TWRP.
ZIPFILE=$3
ZIPNAME=${ZIPFILE##*/}
OUTFD=$2
scr_wdth=50
# Detect real $OUTFD
#
if readlink /proc/$$/fd/$OUTFD 2>/dev/null | grep /tmp >/dev/null; then
OUTFD=0
for FD in $( ls /proc/$$/fd ); do
if readlink /proc/$$/fd/$FD 2>/dev/null | grep pipe >/dev/null; then
if ps | grep " 3 $FD " | grep -v grep >/dev/null; then
OUTFD=$FD
break
fi
fi
done
fi
ui_print() {
echo -ne "ui_print $1\n" >> /proc/self/fd/$OUTFD
echo -ne "ui_print\n" >> /proc/self/fd/$OUTFD
}
print_full_bar() {
ui_print "$(printf '%*s\n' $scr_wdth | tr ' ' '=')"
}
print_justified() {
local str="$1"
local str_len=${#str}
local padding_len=$(( ($scr_wdth - $str_len - 2) / 2))
local ljust="$(printf '%*s' $padding_len)"
local rjust="$(printf '%*s' $(($padding_len + $str_len % 2)))"
ui_print "=$ljust$str$rjust="
}
rm_from_manifest() {
local service=$1
# Package path is different on Android 10. Check and adapt.
#
[ $major -eq 10 ] && local path_extra='\.hardware'
sed -i -e '/<hal format="hidl">/{N;/<name>vendor\.samsung'"$path_extra"'\.security\.'"$service"'<\/name>/{:loop;N;/<\/hal>/!bloop;d}}' /vendor/etc/vintf/manifest.xml
}
disable_fbe() {
ui_print " - Disabling file-based encryption (FBE) for /data..."
# S10 range = fstab.exynos9820.
# Note 10 range = fstab.exynos9825.
# A[124]0 range = fstab.exynos7885.
# A50 = fstab.exynos9610.
# Tab A 10.1 range = fstab.exynos7885.
#
for i in /vendor/etc/fstab.exynos[0-9][0-9][0-9][0-9] \
/vendor/etc/fstab.qcom; do
# Option 1: This replaces the offending line.
#sed -i -e 's/fileencryption=[^,]*/encryptable/' $i
# Option 2: This comments out the offending line and adds an edited one.
sed -i -e 's/^\([^#].*\)fileencryption=[^,]*\(.*\)$/# &\n\1encryptable\2/g' $i
done
}
disable_vaultkeeper() {
ui_print " - Disabling vaultkeeperd..."
if [ -f $ANDROID_ROOT/system/etc/init/vk*.rc ]; then
# This is Android 10: Vaultkeeper has its own init file.
#
sed -i -e 's/^[^#].*$/# &/' $ANDROID_ROOT/system/etc/init/vk*.rc
else
# This is Android 9: Vaultkeeper is started from init.rc.
#
sed -i -e 's/^[^#].*vaultkeeper.*$/# &/' \
-re '/\/system\/bin\/vaultkeeperd/,/^#?$/s/^[^#]*$/#&/' $ANDROID_ROOT/init.rc
fi
# Qualcomm devices such as the T860 and T865 need this, otherwise the log
# will be spammed with messages about failed connections to the Vaultkeeper
# service.
rm_from_manifest vaultkeeper
# Option 1: Unknown whether it even works.
# sed -i -e 's/\(ro\.security\.vaultkeeper\.native=\)1/\10/' /vendor/build.prop
#
# Option 2: This works and apparently won't spam logcat, but is drastic.
# rm $ANDROID_ROOT/system/bin/vaultkeeperd
#
# Option 3: This works and is and is the least invasive choice.
for i in $ANDROID_ROOT/system /vendor; do
[ -f $i/bin/vaultkeeperd ] && chmod 0 $i/bin/vaultkeeperd
done
}
disable_cass() {
# The T860 needs this. Otherwise, the log will fill with messages like this:
#
# 10-20 03:23:20.501 27757 27757 E CASS: Failed to connect(4)
# 10-20 03:23:20.501 27757 27757 E CASS: Failed to connect ril daemon(2). Retry cnt(6)
ui_print " - Disabling cass..."
sed -i -e 's/^[^#].*cass.*$/# &/' -re '/\/system\/bin\/cass/,/^#?$/s/^[^#]*$/#&/' $ANDROID_ROOT/init.rc
}
disable_proca() {
ui_print " - Disabling process authentication..."
# G97[035]F = pa_daemon.rc on Android 9; pa_daemon_teegris.rc on Android 10.
# G977B, N97[05]F, A105F, A505F = pa_daemon_teegris.rc
# T510 + T515 = pa_daemon_kinibi.rc
# T860 + T865 = pa_daemon_qsee.rc
#
sed -i -e 's/^[^#]/# &/' /vendor/etc/init/pa_daemon*.rc
# Option 1: Works only if operable XML stanza is not the last in file.
#sed -i -e '/<name>vendor\.samsung\.security\.proca<\/name>/,/<hal format="hidl">/d' /vendor/etc/vintf/manifest.xml
#
# Option 2: This is safer, but more complex.
# Package path is different on Android 10. Check and adapt.
#
rm_from_manifest proca
}
disable_wsm() {
ui_print " - Disabling wsm..."
rm_from_manifest wsm
}
disable_recovery_restoration() {
ui_print " - Disabling restoration of stock recovery..."
mv $ANDROID_ROOT/system/recovery-from-boot.p $ANDROID_ROOT/system/recovery-from-boot.p~
}
patch_libbluetooth() {
[ $major -ne 10 ] && return
local f=$ANDROID_ROOT/system/lib64/libbluetooth.so
local tf=/tmp/f
ui_print " - Bluetooth fix requested via renamed zip."
ui_print " - Attempting to patch $f..."
xxd -p $f | tr -d '\n ' |
sed -e 's/c8000034f4031f2af3031f2ae8030032/c8000035f4031f2af3031f2ae8031f2a/' |
xxd -rp > $tf
if ! cmp $tf $f >/dev/null; then
ui_print " - Patching succeeded."
touch -r $f $tf
mv $tf $f
else
ui_print " - Patching failed. No change made."
rm -f $tf
fi
}
ui_print " "
print_full_bar
print_justified "Multi-disabler v2.2 for"
print_justified "G97[035][FN0], G977[BN], N97[05][FN0],"
print_justified "N971N, N976[BN0], A[1245]05[FN], A105M,,"
print_justified "A[25]05G, A[1245]05[FN], A105M, A[25]05G,"
print_justified "A[25]05[YG]N, A405FM, T51[05], T72[05],"
print_justified "T86[05], F900[FN], F907[BN],"
print_justified "G96[50][00], N9600."
print_justified "by Ian Macdonald"
print_full_bar
ui_print " "
os=$(getprop ro.build.version.release)
major=${os%%.*}
bl=$(getprop ro.boot.bootloader)
# Firmware version starts at either 8th or 9th character, depending on length
# of bootloader string (12 or 13).
#
fw=${bl:$((${#bl} - 4)):4}
# Device is first 5 characters of bootloader string.
#
device=${bl:0:$((${#bl} - 8))}
if echo $device | grep -Ev 'G97([035][FN0]|7[BN])|N97([05][FN0]|6[BN0]|1N)|A([1245]05[FN]|105M|[25]05([YG]N|G)|405FM)|T(51|72|86)[05]|F90(0[FN]|7[BN])|G96[50][00]|N9600' >/dev/null; then
ui_print " - Unsupported device detected. Installation aborted."
ui_print " "
exit 1
fi
ui_print " - Detected a $device device on $fw firmware."
ui_print " - Mounting $ANDROID_ROOT..."
mount $ANDROID_ROOT
ui_print " - Mounting /vendor..."
mount /vendor
disable_fbe
disable_vaultkeeper
disable_proca
disable_recovery_restoration
[ $major -eq 10 ] && disable_wsm
[ $device = T860 ] && disable_cass
[ $ZIPNAME != ${ZIPNAME/_btfix//} ] && [ $major -eq 10 ] && patch_libbluetooth
ui_print " - Unmounting /vendor..."
umount /vendor
ui_print " - Unmounting $ANDROID_ROOT..."
umount $ANDROID_ROOT
ui_print " "
ui_print " - Finished."
ui_print " "
| true
|
05d5c1d46e4f19ed982e8f78caef6961840919d9
|
Shell
|
rp1703/myprojectdir
|
/script/logicaltest.sh
|
UTF-8
| 533
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
###################
#Purpose : To learn shell scripting
#Owner : Rasmita Pradhan
#Version : 1.0
#Input : None
#Output : None
###################
#Number Test
MARKS=$1
if [ ! "$MARKS" ]; then
echo "Please enter the mark"
exit
fi
if [ "$MARKS" -gt "100" -o "$MARKS" -le "0" ]; then
echo "Please enter the marks between 0 to 100"
exit
fi
if [ "$MARKS" -gt "70" ]; then
echo "First division"
elif [ "$MARKS" -gt "55" ]; then
echo "Second Division"
elif [ "$MARKS" -gt "45" ]; then
echo "Third Division"
else
echo "fail"
fi
| true
|
dc7b0964e0f81206e12b22205152cdddf936b3bb
|
Shell
|
zoeyingdi/website-l10n
|
/groups/Almost_There/directory/99_bottles.sh
|
UTF-8
| 424
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/sh
for ((i=99; i>=1; i--)); do
if ((i==1)); then
echo "1 bottle of beer on the wall
1 bottle of beer!
You take one down and pass it around,
No more bottles of beer on the wall :-("
else
echo "$i bottles of beer on the wall
$i bottles of beer!
You take one down and pass it around,
$((i-1)) bottles of beer on the wall! "
fi
done
| true
|
a74b548dd0add43243d91936c4c118f2a50bfc16
|
Shell
|
uprush/druid-satori-demo
|
/05hive_external_table.sh
|
UTF-8
| 407
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/sh
cat<<EOF>/tmp/create_table.sql
SET hive.druid.broker.address.default=druid.example.com:8082;
CREATE EXTERNAL TABLE if not exists cryptocurrency_market_data
STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler'
TBLPROPERTIES ("druid.datasource" = "cryptocurrency-market-data");
EOF
HS2=${1:-localhost:10000}
BEELINE="beeline -u jdbc:hive2://$HS2/default"
$BEELINE -f /tmp/create_table.sql
| true
|
c278ba3eb0eda4ef4033bc8619192e1ccab20934
|
Shell
|
celogeek/git-redmine-suite
|
/share/Git-Redmine-Suite/helpers/scripts/help.sh
|
UTF-8
| 1,901
| 3.375
| 3
|
[] |
no_license
|
#export params vars
while getopts frcav:t:hnm:peHT: opt; do
case $opt in
f) export REDMINE_FORCE=1 ;;
r) export REDMINE_REBASE=1 ;;
c) export REDMINE_CHAIN_FINISH=1 ;;
a) export REDMINE_AUTO_REASSIGN=1 ;;
v) export VERSION=$OPTARG ;;
t) export REDMINE_TIME="$OPTARG" ;;
h) export HELP=1 ;;
n) export NO_MESSAGE=1 ;;
p) export REDMINE_TASK_IS_PARENT=1 ;;
e) export REDMINE_TASK_WITHOUT_PARENT=1 ;;
H) export REDMINE_HIGHEST_PRIO_ONLY=1 ;;
T) export HOTFIX_PREFIX_TAG="$OPTARG" ;;
esac
done
shift $((OPTIND-1))
[ -z $HOTFIX_PREFIX_TAG ] && HOTFIX_PREFIX_TAG=hotfix
function help_command {
if [ -n "$HELP" ]; then
CMD=$(basename $0 | /usr/bin/perl -pe 's/\-/ /g')
if [ -n "$POSSIBLE_ENV" ]; then
CMD="$POSSIBLE_ENV $CMD"
fi
if [ -z "$HELP_NO_OPTION" ]; then
echo ""
fi
echo " * $CMD $*"
if [ -z "$HELP_NO_OPTION" ]; then
help_option_command
fi
exit 1
fi
}
function help_option_command {
cat <<__EOF__
[OPTIONS]
* -h => help : display the help message
* -f => force : don't ask question to start / finish / clear
* -r => rebase : before starting the review, rebase on origin devel
* -c => chain : chain review when finish a task, or finish the release after a start
* -a => auto reassign : reassign automatically the task
* -t => hours spent : hours spend on a task in decimal format (ex: 2.5, or 1)
* -n => no message : skip message edition
* -p => parent : indicate that the task is a parent task
* -e => no parent : display task without parent
* -H => highest prio only : display the highest priority of each project only
* -T => hotfix tag prefix : hotfix tag prefix (default: hotfix)
__EOF__
}
| true
|
78d68a148e7da02b61429cfb5c4b7ba9f2d0a6f0
|
Shell
|
whit98c/Experiments
|
/fswebcam-run.sh
|
UTF-8
| 363
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
USBNAME=webcam
LSUSB=$(lsusb | grep --ignore-case $USBNAME)
FOLD="/dev/bus/usb/"$(echo $LSUSB | cut --delimiter=' ' --fields='2')"/"$(echo $LSUSB | cut --delimiter=' ' --fields='4' | tr --delete ":")
echo $LSUSB
echo $FOLD
sudo ./usbreset $FOLD
sleep 5
sudo ./usbreset $FOLD
sleep 5
fswebcam -r 1920x1080 kitchen-`date +"%Y-%m-%d-%H-%M-%S"`.jpg
| true
|
605e6ffd792d08394b13a93de1186276b9f63a84
|
Shell
|
Helioviewer-Project/hvJP2K
|
/hvJP2K/jpx/test/test
|
UTF-8
| 704
| 3.46875
| 3
|
[] |
no_license
|
#! /bin/sh
function file_size {
echo $(wc -c "$1" | awk '{ print $1 }')
}
function do_test {
mkdir -p $1-test
cd $1-test
PRE=~/hvJP2K/bin
IN=$(echo ../"$1"-ref/*.jp2 | tr ' ' ,)
$PRE/hv_jpx_split -i ../$1.jpx
$PRE/hv_jpx_merge -i *.jp2 -o $1-hv.jpx
$PRE/hv_jpx_merge -i $IN -o $1-hv-links.jpx -links
for i in *; do
TST=$i
STST=$(file_size $TST)
REF=../$1-ref/$i
SREF=$(file_size $REF)
cmp -s "$TST" "$REF"
local status=$?
RES=fail
if [ $STST == $SREF ] && [ $status -eq 0 ]; then RES=pass; fi
echo $RES: $i
done
cd ..
rm -fr $1-test
}
do_test aia
do_test swap
do_test hetero
| true
|
b867b63f719bff68afc09aaa00f457810bed182c
|
Shell
|
shizonic/majestic
|
/bin/automount.sh
|
UTF-8
| 302
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/sh
volume="/media/dados"
device="/dev/disk/by-label/dados"
if mount | grep $volume 1> /dev/null; then
#echo "montado"
exit 0
else
#echo "não montado"
if [ -b $device ]; then
udisksctl mount -b /dev/disk/by-label/dados
else
#echo "$device não é um dispositivo"
exit 0
fi
fi
| true
|
0e720bc2fe07a2fa07db5f5d8826fbc84fe9bcca
|
Shell
|
trojanh/testing-git-rebase
|
/cordova/deploy/testfairy-android-upload.sh
|
UTF-8
| 2,684
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/sh
# https://github.com/testfairy/command-line-uploader/blob/master/testfairy-upload.sh
UPLOADER_VERSION=1.09
# Put your TestFairy API_KEY here. Find it in your TestFairy account settings.
# TESTFAIRY_API_KEY= # added as environment variable on circleci
# Tester Groups that will be notified when the app is ready. Setup groups in your TestFairy account testers page.
# This parameter is optional, leave empty if not required
TESTER_GROUPS=
# Should email testers about new version. Set to "off" to disable email notifications.
NOTIFY="off"
# If AUTO_UPDATE is "on" all users will be prompt to update to this build next time they run the app
AUTO_UPDATE="on"
# The maximum recording duration for every test.
MAX_DURATION="10m"
# Is video recording enabled for this build
VIDEO="on"
# Add a TestFairy watermark to the application icon?
ICON_WATERMARK="on"
# Comment text will be included in the email sent to testers
COMMENT="Uploaded on `date`"
# locations of various tools
CURL=curl
ZIPALIGN=zipalign
SERVER_ENDPOINT=http://app.testfairy.com
usage() {
echo "Usage: testfairy-android-upload.sh APK_FILENAME"
echo
}
verify_tools() {
# Windows users: this script requires zip, curl and sed. If not installed please get from http://cygwin.com/
# Check 'curl' tool
${CURL} --help >/dev/null
if [ $? -ne 0 ]; then
echo "Could not run curl tool, please check settings"
exit 1
fi
# Check 'zipalign' tool
OUTPUT=$( ${ZIPALIGN} 2>&1 | grep -i "Zip alignment" )
if [ $? -ne 0 ]; then
echo "Could not run zipalign tool, please check settings"
exit 1
fi
}
if [ $# -ne 1 ]; then
usage
exit 1
fi
# before even going on, make sure all tools work
verify_tools
APK_FILENAME=$1
if [ ! -f "${APK_FILENAME}" ]; then
usage
echo "Can't find file: ${APK_FILENAME}"
exit 2
fi
# temporary file paths
DATE=`date`
TMP_FILENAME=.testfairy.upload.apk
ZIPALIGNED_FILENAME=.testfairy.zipalign.apk
rm -f "${TMP_FILENAME}" "${ZIPALIGNED_FILENAME}"
/bin/echo -n "Uploading ${APK_FILENAME} to TestFairy.. "
JSON=$( ${CURL} -s ${SERVER_ENDPOINT}/api/upload -F api_key=${TESTFAIRY_API_KEY} -F apk_file="@${APK_FILENAME}" -F icon-watermark="${ICON_WATERMARK}" -F video="${VIDEO}" -F max-duration="${MAX_DURATION}" -F auto-update="${AUTO_UPDATE}" -F comment="${COMMENT}" -A "TestFairy Command Line Uploader ${UPLOADER_VERSION}" )
URL=$( echo ${JSON} | sed 's/\\\//\//g' | sed -n 's/.*"instrumented_url"\s*:\s*"\([^"]*\)".*/\1/p' )
if [ -z "${URL}" ]; then
echo "FAILED!"
echo
echo "Upload failed, please check your settings"
exit 1
fi
echo "OK!"
echo
echo "Build was successfully uploaded to TestFairy and is available at:"
echo ${URL}
| true
|
aad6c34707379d3a0241228c0293e665394cb53f
|
Shell
|
wkwiatkowski/kurs-scripts
|
/la/disp-env.sh
|
UTF-8
| 257
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
# Display environment and variables
clear
echo "Display a few variables of current user"
echo ""
echo "username is: $USER"
echo ""
echo "a home directory is: $HOME"
echo ""
echo "Hist control: $HISTCONTROL"
echo ""
echo "a terminal is: $TERM"
| true
|
bd742c6f4fe09dbda000323b3cec2e56d58997cf
|
Shell
|
grahammitchell/music-ripping-tools
|
/prep.sh
|
UTF-8
| 365
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/sh
if [ ! -z "$1" ]
then
rm -f *.mp3 full_album.m3u
if [ -s tracks ]
then
rm -f album.m3u
mv tracks /home/music/$1
else
echo Warning: no \"tracks\", you\'ll need to modify album.m3u.
fi
dirname `pwd` | cut -b 8- > name-foo
basename `pwd` >> name-foo
mv name-foo /home/music/$1
else
echo You must supply a directory to move things to.
fi
| true
|
deb7dfbe8b530cd14fe1662fcd9fc30c2407e26a
|
Shell
|
Doctor-wu/WhatIWanna
|
/webHooks/deploy.sh
|
UTF-8
| 354
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
WEB_PATH='/www/wwwroot/root/'$1
WEB_USER='root'
WEB_USERGROUP='root'
echo "Start deployment"
cd $WEB_PATH
echo "pulling source code..."
git pull
git checkout master
echo "changing permissions..."
cnpm i
echo "installing dependency..."
forever restartall
echo "restarting serves.."
chown -R $WEB_USER:$WEB_USERGROUP $WEB_PATH
echo "Finished."
| true
|
591da226c0f710797797ac60f39014ae46cb55a8
|
Shell
|
ar18-linux/ar18_lib_bash
|
/ar18_lib_bash/script/import.sh
|
UTF-8
| 2,244
| 3.640625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# ar18
function ar18.script._import(){
function ar18.script.import() {
# Prepare script environment
{
# Function template version 2021-07-14_00:22:16
# Get old shell option values to restore later
local shell_options
shopt -s inherit_errexit
IFS=$'\n' shell_options=($(shopt -op))
# Set shell options for this script
set +x
set -o pipefail
set -e
local LD_PRELOAD_old
LD_PRELOAD_old="${LD_PRELOAD}"
set -u
LD_PRELOAD=
local ret
ret=0
}
##############################FUNCTION_START#################################
local to_import
to_import="${1}"
if [ ! -v import_map ]; then
declare -Ag import_map
fi
if [ ! -v import_map["${to_import}"] ]; then
import_map["${to_import}"]=1
local old_cwd="${PWD}"
local to_import_transformed
to_import_transformed="${to_import/ar18./}"
to_import_transformed="${to_import_transformed/./\/}"
local target_path
# Check if lib is installed locally
if [ ! -f "/home/$(whoami)/.config/ar18/ar18_lib_bash/INSTALL_DIR" ]; then
target_path="/tmp/${ar18_parent_process}/ar18_lib_bash/${to_import_transformed}.sh"
mkdir -p "$(dirname "${target_path}")"
cd "$(dirname "${target_path}")"
curl -O "https://raw.githubusercontent.com/ar18-linux/ar18_lib_bash/master/ar18_lib_bash/${to_import_transformed}.sh" > /dev/null 2>&1
cd "${old_cwd}"
. "${target_path}"
else
target_path="$(cat "/home/$(whoami)/.config/ar18/ar18_lib_bash/INSTALL_DIR")/ar18_lib_bash/${to_import_transformed}.sh"
. "${target_path}"
fi
echo "${to_import} imported"
fi
###############################FUNCTION_END##################################
# Restore environment
{
set +x
LD_PRELOAD="${LD_PRELOAD_old}"
# Restore old shell values
for option in "${shell_options[@]}"; do
eval "${option}"
done
}
return "${ret}"
}
export -f "ar18.script.import"
}
type ar18.script.import > /dev/null 2>&1 || ar18.script._import
| true
|
8cc8d3f6032eaa3643d2eda14d1f68b7eecaaeeb
|
Shell
|
davidbeermann/dotdotdot
|
/bin/nlight
|
UTF-8
| 402
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
NLIGHT_STATUS=$(gsettings get org.gnome.settings-daemon.plugins.color night-light-enabled)
echo $NLIGHT_STATUS
if [ "$NLIGHT_STATUS" = "false" ]; then
echo "Turn night light on"
gsettings set org.gnome.settings-daemon.plugins.color night-light-enabled true
else
echo "Turn night light off"
gsettings set org.gnome.settings-daemon.plugins.color night-light-enabled false
fi
| true
|
28c34ab73bace55050db6c0fbe499523bd44496f
|
Shell
|
gokulchandrap/contrail-test
|
/solution_scripts/tools/check_test_discovery.sh
|
UTF-8
| 406
| 2.515625
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
echo "Validating if test discovery passes in scripts/ and serial_scripts"
echo ""
GIVEN_TEST_PATH=$OS_TEST_PATH
export PYTHONPATH=$PATH:$PWD/scripts:$PWD/fixtures
export OS_TEST_PATH=${GIVEN_TEST_PATH:-./scripts}; testr list-tests || exit 1
export PYTHONPATH=$PATH:$PWD/serial_scripts:$PWD/fixtures
export OS_TEST_PATH=${GIVEN_TEST_PATH:-./serial_scripts}; testr list-tests || exit 1
| true
|
351c355874922fbaf88b5ae412f1daa4eb51a36c
|
Shell
|
QuasarApp/sdkmanager-android
|
/scripts/start.sh
|
UTF-8
| 1,698
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
EXTERNAL_HOME=/home/$USER
if [ "$USER" = "root" ]
then
EXTERNAL_HOME=/root
fi
OLD_SDK_ROOT=$HOME/AndroidSDK
SDK_ROOT=$EXTERNAL_HOME/AndroidSDK
export JAVA_HOME="$SNAP/usr/lib/jvm/java-11-openjdk-amd64"
export ANDROID_SDK_HOME="$HOME/.Android"
export _JAVA_OPTIONS=-Duser.home=$HOME
for var in "$@"
do
if [[ $var == *"--sdk_root"* ]]; then
SDK_ROOT=${var:11}
fi
done
if [ -d "$OLD_SDK_ROOT" ] && [ ! -L "$OLD_SDK_ROOT" ] && [ "$(ls -A $OLD_SDK_ROOT)" ]
then
echo "The default sdk folder has been changed from $OLD_SDK_ROOT to $SDK_ROOT ."
echo "The Old location of sdk root have big problems with update the androidsdk package."
echo "For more information about this problem see the https://github.com/QuasarApp/sdkmanager-android/issues/2 issue."
echo ""
if [ ! -d $SDK_ROOT ]
then
mkdir $SDK_ROOT
else
if [ "$(ls -A $SDK_ROOT)" ]; then
echo "The $SDK_ROOT dir is not empty. Please move all data to an another location and run the androdisdk again"
exit 1
else
rm -d $SDK_ROOT
fi
fi
mv $OLD_SDK_ROOT $EXTERNAL_HOME
if [ "$?" = "0" ]; then
echo "All your installed sdk files has been moved to new location."
echo "If you want change a sdk location please use the --sdk_root option."
ln -s $SDK_ROOT $OLD_SDK_ROOT
else
echo "Unknown error occurred."
echo "Please report this issue to us. You can report a problem here:"
echo "https://github.com/QuasarApp/sdkmanager-android/issues/new"
exit $status
fi
fi
echo "SDK_ROOT=$SDK_ROOT"
exec "$SNAP/cmdline-tools/bin/sdkmanager" "--sdk_root=$SDK_ROOT" "$@"
| true
|
6425c929f989a00e88101c72c9f1b8fae0eae6d6
|
Shell
|
oleg-alexandrov/olegmisc
|
/bin/show_matchfile.sh
|
UTF-8
| 961
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -lt 1 ]; then echo Usage: $0 match-file.match; exit; fi
f=$1
log=$(ls -alh -rtd $(dirname $f)/*log* |grep -E "bundle_adjust|stereo_pprc" | tail -n 1 | ~/bin/print_col.pl 0)
l=$(echo $f | perl -p -e "s#^.*?\/##g" | perl -p -e "s#^.*?-(.*?)__.*?\$#\$1#g" | perl -p -e "s#disp-##g")
r=$(echo $f | perl -p -e "s#^.*?\/##g" | perl -p -e "s#^.*?-.*?__(.*?)(|-clean).match\$#\$1#g")
#echo 1 $l $r
l1=$l
r1=$r
#echo $log
l=$(grep -E "bundle_adjust|stereo_pprc" $log | perl -p -e "s#\s#\n#g" | grep $l | grep -i -E ".cub|.ntf|.tif")
r=$(grep -E "bundle_adjust|stereo_pprc" $log | perl -p -e "s#\s#\n#g" | grep $r | grep -i -E ".cub|.ntf.|.tif")
# This is a guess, just append a .tif extension. Ideally we should do
# ls ${l}* and pick the earliest matching pattern.
if [ "$l" = "" ]; then l="$l1.tif"; fi
if [ "$r" = "" ]; then r="$r1.tif"; fi
#echo 2 $l $r
echo stereo_gui $l $r $f
stereo_gui $l $r $f 2>&1 |grep -i -v fontconfig
| true
|
203d14b200f77ca62e00e9be9a9a09733a5a61bc
|
Shell
|
MiT-HEP/MonoX
|
/monophoton/fakemet/injection_test.sh
|
UTF-8
| 1,076
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
# edit parameters_fakemet.py to point sourcename = injection.root and outname = 'workspace.root' cardname = 'datacard.dat'
SOURCE="$1"
SIGS="$2"
FAKEN="$3"
IJOB=$3
HIST=/data/t3home000/$USER/monophoton
FAKEMET=$MONOPHOTON/fakemet
rm -f $THISDIR/norms.dat
echo $HIST
echo $FAKEMET
echo $PWD
i=0
while [ $i -lt 20 ]
do
python $FAKEMET/injection_test.py $HIST/plots/gghg${SOURCE}.root $HIST/plots/gghg.root $SIGS $FAKEN $PWD/injection.root
python $FAKEMET/../fit/workspace.py $FAKEMET/../fit/parameters_fakemet.py
combine $PWD/datacard.dat -M FitDiagnostics --saveNormalizations --saveShapes --saveWithUncertainties
mkdir -p $HIST/fakemet/${SOURCE}/fits_${SIGS}_${FAKEN}_${IJOB}
python $FAKEMET/plotfit.py fitDiagnostics.root $HIST/plots/gghg${SOURCE}.root ${SOURCE}/fits_${SIGS}_${FAKEN}_${IJOB}/${i} $SIGS $FAKEN
[ $? -eq 0 ] || continue
echo $SIGS $FAKEN $(python $FAKEMET/fetch_norm.py $PWD/fitDiagnostics.root) >> $PWD/norms.dat
i=$(($i+1))
done
mv $PWD/norms.dat $HIST/fakemet/${SOURCE}/norms_${SIGS}_${FAKEN}_${IJOB}.dat
| true
|
1aa1d3ee36bd5abc483b2d822cba6376453f74a4
|
Shell
|
ahheckel/RaspiCloud
|
/install/runscrpt.sh
|
UTF-8
| 1,023
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
scrpt="$1"
scrptname=$(basename "$scrpt")
if ! pgrep -f "^bash ${scrpt}" > /dev/null ; then
echo "[${scrptname} : no instance is running (0)]"
rm -f $HOME/.${scrptname}.lock
"${scrpt}"
elif [ ! -f $HOME/.${scrptname}.lock ] ; then
echo "[${scrptname} : no instance is running (1)]"
for i in $(pgrep -f "^bash ${scrpt}") ; do
echo "killing pid $i ..."
kill -9 $i
done
"${scrpt}"
else
echo "[${scrptname} : an instance is running]"
if [ ${scrptname} = "getgps.sh" ] ; then
if [ -f $HOME/.${scrptname}.lock ] ; then
t0=$(stat -c %Y $HOME/.${scrptname}.lock);
t1=$(date +%s);
dt=$(echo "$t1 - $t0" | bc -l);
echo "[${scrptname} : ...running since $dt seconds.]"
if [ $dt -gt 900 ] ; then
echo "[${scrptname} : $HOME/.${scrptname}.lock file is old... deleting it.]"
killall -9 termux-location
killall -9 ${scrptname}
for i in $(pgrep -f "^bash ${scrpt}") ; do
kill -9 $i
done
rm -f $HOME/.${scrptname}.lock
fi
fi
exit 1
fi
fi
| true
|
dab52d422397f9570062f464840e838cc183461b
|
Shell
|
tttapa/EAGLE-ZYBO-Linux
|
/scripts/install-python-module.sh
|
UTF-8
| 1,224
| 4.15625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -ex
# This scripts builds the Python module in setup.py, and then installs or
# upgrades to the latest version using Pip (at user level).
#
# The first command line argument should be the Python executable.
#
# Example usage:
# Install-Python.sh /usr/bin/python3
# Install-Python.sh python3.7
python_bin=$1
python_version_dot=$(${python_bin} -c 'import sys; print(sys.version_info[0], sys.version_info[1], sep=".")')
python_version_no_dot=$(${python_bin} -c 'import sys; print(sys.version_info[0], sys.version_info[1], sep="")')
python_arch=$(${python_bin} -c 'import platform; print(platform.machine())')
echo "Using Python $python_version_dot (${python_bin})"
# Install the Wheel module
$python_bin -m pip install --user --quiet wheel
# Build the Wheel package
$python_bin setup.py bdist_wheel
# List all Wheels compatible with this Python version
wheels=($(ls dist/*-cp${python_version_no_dot}-cp${python_version_no_dot}m-linux_${python_arch}.whl))
# Sort them by version number
wheels=($(sort --version-sort <<< ${wheels[*]}))
echo "${wheels[*]}"
# Select the last element (newest version)
wheel=${wheels[-1]}
# Install the package
$python_bin -m pip install --user --upgrade "$wheel"
| true
|
dc74c3e90058fe7f342927822a79c82d8717bc89
|
Shell
|
it-gro/dckr-hdp-sn
|
/hdp-hadoop/build/users.sh
|
UTF-8
| 430
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
## ##################################################
#
test -d ${HDP_USER_HOMES} || mkdir -p ${HDP_USER_HOMES}
groupadd -g $HADOOP_USER_ID $HADOOP_USER
useradd -u $HADOOP_USER_ID -g $HADOOP_USER_ID -s '/bin/bash' -m -d ${HDP_USER_HOMES}/$HADOOP_USER $HADOOP_USER
groupadd -g $HDFS_USER_ID $HDFS_USER
useradd -u $HDFS_USER_ID -g $HDFS_USER_ID -s '/bin/bash' -m -d ${HDP_USER_HOMES}/$HDFS_USER $HDFS_USER
| true
|
f6bd6a8ab667bfb272d8c2a8029e9850e7eba8fc
|
Shell
|
maitesin/dot-files
|
/scripts/install_polybar.sh
|
UTF-8
| 775
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
sudo apt install build-essential git cmake cmake-data pkg-config python3-sphinx libcairo2-dev libxcb1-dev libxcb-util0-dev libxcb-randr0-dev libxcb-composite0-dev python3-xcbgen xcb-proto libxcb-image0-dev libxcb-ewmh-dev libxcb-icccm4-dev
sudo apt install libxcb-xkb-dev libxcb-xrm-dev libxcb-cursor-dev libasound2-dev libpulse-dev i3 libjsoncpp-dev libmpdclient-dev libcurl4-openssl-dev libnl-genl-3-dev
# Download the latest version of the polybar (check if a newest version is available)
folder=$(mktemp -d)
pushd "${folder}" || exit
wget https://github.com/polybar/polybar/releases/download/3.5.4/polybar-3.5.4.tar.gz
tar vxzf polybar* && rm polybar*.tar.gz
cd polybar*
mkdir build && cd build && cmake .. && make && sudo make install
| true
|
f71d3f0e87064a910af0743e2824406c7d01547b
|
Shell
|
pratyush-1997/linux
|
/cpu.sh
|
UTF-8
| 414
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
printf "\n\n Memory\t\t Disk\t\t CPU \n"
MEMORY = $(free -m | awk 'NR==2{printf "%.2f%%\t\t",$3*100/$2 }')
DISK = $(df -h | awk '$NF == "/"{printf "%s\t\t",$5}')
CPU = $(top -bn1 | grep load | awk '{printf "%.2f%%\t\t\n", $(NF-2)}')
echo "$MEMORY $DISK $CPU"
printf "\n\nNetwork Usage\n"
echo "$(ip -s -h link)"
printf "\n\nActive Logged-in Users\n"
echo "$(who | cut -d' '-f1 | sort | uniq)"
| true
|
f40fed2014de33549918331d55586f28b08cdf80
|
Shell
|
hoanghaivnn/thuctap012017
|
/XuanSon/OpenStack/Install OpenStack/Ocata/Install_OPS_with_Linuxbridge/scripts/ctl-1-environment.sh
|
UTF-8
| 1,776
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
#Author Son Do Xuan
source function.sh
source config.sh
# Install crudini
echocolor "Install crudini"
sleep 3
apt-get install -y crudini
# Update and upgrade for controller
echocolor "Update and Update controller"
sleep 3
apt-get update -y&& apt-get upgrade -y
# Install and config NTP
echocolor "Install NTP"
sleep 3
apt-get install chrony -y
ntpfile=/etc/chrony/chrony.conf
sed -i 's/pool 2.debian.pool.ntp.org offline iburst/ \
server 0.asia.pool.ntp.org iburst \
server 1.asia.pool.ntp.org iburst \
server 2.asia.pool.ntp.org iburst/g' $ntpfile
echo "allow 10.10.10.0/24" >> $ntpfile
service chrony restart
# OpenStack packages (python-openstackclient)
echocolor "Install OpenStack client"
sleep 3
apt-get install software-properties-common -y
add-apt-repository cloud-archive:ocata -y
apt-get update -y && apt-get dist-upgrade -y
apt-get install python-openstackclient -y
# Install SQL database (Mariadb)
echocolor "Install SQL database - Mariadb"
sleep 3
apt-get install mariadb-server python-pymysql -y
sqlfile=/etc/mysql/mariadb.conf.d/99-openstack.cnf
touch $sqlfile
cat << EOF >$sqlfile
[mysqld]
bind-address = $CTL_MGNT_IP
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
EOF
service mysql restart
# Install Message queue (rabbitmq)
echocolor "Install Message queue (rabbitmq)"
sleep 3
apt-get install rabbitmq-server -y
rabbitmqctl add_user openstack $RABBIT_PASS
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
# Install Memcached
echocolor "Install Memcached"
sleep 3
apt-get install memcached python-memcache -y
memcachefile=/etc/memcached.conf
sed -i 's|-l 127.0.0.1|'"-l $CTL_MGNT_IP"'|g' $memcachefile
service memcached restart
| true
|
35abb0974aab9329e2039638a86472e8e48896de
|
Shell
|
mycaule/bash-scripts
|
/update_statuspage.sh
|
UTF-8
| 990
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
api_key="..."
page_id="..."
api_base="api.statuspage.io"
function send_fake_metric {
metric_id=$1
for i in $(seq 1 288)
do
ts=$(($(date +%s)-i*300))
value=$(((RANDOM%99)+0))
http -h POST "https://$api_base/v1/pages/$page_id/metrics/$metric_id/data.json" data:="{\"timestamp\": $ts, \"value\": $value }" Authorization:"OAuth $api_key"
echo "$i/288"
sleep 1
done
}
## Update a component
## List components
function list_components {
http "https://$api_base/v1/pages/$page_id/components" Authorization:"OAuth $api_key"
}
function set_status {
status=$1
# operational, under_maintenance, degraded_performance, partial_outage, major_outage
component_id=$2
http PATCH "https://$api_base/v1/pages/$page_id/components/$component_id" component:="{\"status\": \"$status\"}" Authorization:"OAuth $api_key"
}
# list_components
set_status "degraded_performance" "..."
set_status "operational" "..."
send_fake_metric "..."
send_fake_metric "..."
| true
|
dfcaef5c7d5e30d48b28123d60be86327e186d04
|
Shell
|
ponylang/rfcs
|
/.ci-scripts/pr-label.bash
|
UTF-8
| 2,218
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
# Announces a change in RFC status to based on label to LWIP
#
# Tools required in the environment that runs this:
#
# - bash
# - curl
# - jq
set -o errexit
# Verify ENV is set up correctly
# We validate all that need to be set in case, in an absolute emergency,
# we need to run this by hand. Otherwise the GitHub actions environment should
# provide all of these if properly configured
if [[ -z "${API_CREDENTIALS}" ]]; then
echo -e "\e[31mAPI_CREDENTIALS needs to be set in env. Exiting.\e[0m"
exit 1
fi
# no unset variables allowed from here on out
# allow above so we can display nice error messages for expected unset variables
set -o nounset
#
# Get label and see if it is a status label
# If it isn't a changelog label, let's exit.
#
LABEL=$(jq -r '.label.name' "${GITHUB_EVENT_PATH}")
STATUS_LABEL=$(
echo "${LABEL}" |
grep 'status - ' |
grep -o -E 'new|final comment period|ready for vote' ||
true
)
if [ -z "${STATUS_LABEL}" ];
then
echo -e "\e[34m'${LABEL}' isn't a status label. Exiting.\e[0m"
exit 0
fi
PR_TITLE=$(jq -r '.pull_request.title' "${GITHUB_EVENT_PATH}")
PR_HTML_URL=$(jq -r '.pull_request.html_url' "${GITHUB_EVENT_PATH}")
# Update Last Week in Pony
echo -e "\e[34mAdding RFC status change to Last Week in Pony...\e[0m"
result=$(curl https://api.github.com/repos/ponylang/ponylang-website/issues?labels=last-week-in-pony)
lwip_url=$(echo "${result}" | jq -r '.[].url')
if [ "$lwip_url" != "" ]; then
body="
The '${PR_TITLE}' RFC has been updated to '${STATUS_LABEL}'
See the [RFC](${PR_HTML_URL}) for more details.
"
jsontemplate="
{
\"body\":\$body
}
"
json=$(jq -n \
--arg body "$body" \
"${jsontemplate}")
result=$(curl -s -X POST "$lwip_url/comments" \
-H "Content-Type: application/x-www-form-urlencoded" \
-u "${API_CREDENTIALS}" \
--data "${json}")
rslt_scan=$(echo "${result}" | jq -r '.id')
if [ "$rslt_scan" != null ]; then
echo -e "\e[34mRFC status update posted to LWIP\e[0m"
else
echo -e "\e[31mUnable to post to LWIP, here's the curl output..."
echo -e "\e[31m${result}\e[0m"
fi
else
echo -e "\e[31mUnable to post to Last Week in Pony."
echo -e "Can't find the issue.\e[0m"
fi
| true
|
2860b3210959fa99e650b8c99d6fc07d07667993
|
Shell
|
kw90/dotfiles
|
/polybar/install.debian.sh
|
UTF-8
| 837
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
. ../helpers.sh
echo_info "SymLinking polybar config and spells"
mkdir -p ~/.config/polybar
ln -sfT ~/source/dotfiles/polybar/polybar.conf ~/.config/polybar/polybar.conf
ln -sfT ~/source/dotfiles/polybar/launch-polybar.debian.sh ~/.config/polybar/launch-polybar.sh
ln -sfT ~/source/dotfiles/polybar/spotify-artist.sh ~/.config/polybar/spotify-artist.sh
ln -sfT ~/source/dotfiles/polybar/spotify-track.sh ~/.config/polybar/spotify-track.sh
ln -sfT ~/source/dotfiles/polybar/spotify-status.py ~/.config/polybar/spotify-status.py
echo_done "polybar config applied"
echo_info "Installing fonts in local lib"
sudo cpan CPAN
perl -MCPAN -e 'install Font::FreeType'
echo_done "fonts installed successfully"
echo_info "Launching polybar"
bash ~/.config/polybar/launch-polybar.sh
echo_done "polybar launched successfully"
| true
|
69ee053fcd480ea6da597f680bbac073840fcc48
|
Shell
|
daiwa233/blog
|
/deploy.sh
|
UTF-8
| 728
| 2.703125
| 3
|
[] |
no_license
|
###
# @Author: your name
# @Date: 2020-02-13 08:46:03
# @LastEditTime : 2020-02-14 15:02:43
# @LastEditors : Please set LastEditors
# @Description: In User Settings Edit
# @FilePath: \vuepress-theme-dva-devloping\deploy.sh
###
#!/usr/bin/env sh
# 确保脚本抛出遇到的错误
set -e
# 生成静态文件
npm run docs:build
# 进入生成的文件夹
cd docs/.vuepress/dist
# 如果是发布到自定义域名
# echo 'www.example.com' > CNAME
git init
git add -A
git commit -m 'deploy'
# 如果发布到 https://<USERNAME>.github.io
git push -f git@github.com:lhj233/lhj233.github.io.git master
# 如果发布到 https://<USERNAME>.github.io/<REPO>
# git push -f git@github.com:<USERNAME>/<REPO>.git master:gh-pages
cd -
| true
|
6e9c85be697b4111dc28ec0b98c4dee7c384ee84
|
Shell
|
mephi42/deluged-via-proxy
|
/debug
|
UTF-8
| 421
| 2.8125
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/sh
set -e -u -x
cd "$(dirname "$0")"
base="$(./docker-build images/base)"
deluge="$(./docker-build images/deluged --build-arg=base="$base")"
exec docker run \
--interactive \
--tty \
--privileged \
--volume="$(realpath ~/var-lib-deluged):/var/lib/deluged" \
--dns=8.8.8.8 \
--network=deluge \
--init \
"$(./docker-build images/deluged-debug --build-arg=base="$deluge")" \
bash
| true
|
784b489c56240b7422ca04d8f9f5d1a067250f9a
|
Shell
|
lianfeng30/work-scripts
|
/script/check_houyi_tdc.sh
|
UTF-8
| 3,702
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/sh
###################################################
#
# Type : State Monitor Plugin
# Function : check houyi tdc status
# Usage : ./check_houyi_tdc.sh
# Creator : shikun.tiansk Date : 2011-10-30
# Modifier : Date :
#
###################################################
critical_mem_limit=800000 # 800M
warning_mem_limit=600000 # 600M
while [[ $i -lt 5 ]];do
tapdisk2_list=`ps auxf | grep tapdisk2 | grep "storage_.*:{conf" | awk {'print $13'}`
tapdisk2_num=`echo "$tapdisk2_list" | wc -w`
tdc_list_all=`ps auxf | grep td_connector | grep -v grep`
tdc_list=`echo "$tdc_list_all" | awk {'print $2'}`
tdc_mem_list=`echo "$tdc_list_all" | awk {'print $6'}`
tdc_list_pid_mem=`echo "$tdc_list_all" | awk {'print $2, $6'}`
tdc_num=`echo "$tdc_list" | wc -w`
tdc_main_var=`cat /var/run/tdc/tdc.pid`
tdc_main_pid=`ps auxf | grep td_connector | grep -v grep | awk {'print $2'} | grep $tdc_main_var `
if [ "$tapdisk2_list" = "" ]; then
if [ "$tdc_list" = "" ]; then
echo "OK - There is no tapdisk2 and td_connector."
exit 0;
else
if [ "$tdc_main_pid" = "" ]; then
error_state=1
else
echo "OK - There is no tapdisk2, td_connector main process exists."
exit 0;
fi
fi
else
if [ "$tdc_main_pid" = "" ]; then
if [ $((tdc_num)) -eq $((tapdisk2_num)) ]; then
error_state=2
elif [ $((tdc_num)) -lt $((tapdisk2_num)) ]; then
error_state=3
else
error_state=4
fi
else
if [ $((tdc_num)) -lt $((tapdisk2_num+1)) ]; then
error_state=5
elif [ $((tdc_num)) -gt $((tapdisk2_num+1)) ]; then
error_state=6
else
for tdc_mem in $tdc_mem_list
do
if [ $((tdc_mem)) -gt $((warning_mem_limit)) ]; then
pid_tdc=`echo "$tdc_list_pid_mem" | grep "$tdc_mem" | awk {'print $1'}`
if [ $((tdc_mem)) -gt $((critical_mem_limit)) ]; then
echo "Critical - td_connector main process exists, and number of tapdisk2 and td_connector are right, but memory of pid[$pid_tdc] is more than 300MB"
else
echo "Warning - td_connector main process exists, and number of tapdisk2 and td_connector are right, but memory of pid[$pid_tdc] is more than 200MB"
fi
exit 0; # no need to retry when memory is too much, and only deal with the first pid with too much memory
fi
done
echo "OK - td_connector main process exists, and number of tapdisk2 and td_connector are right."
exit 0;
fi
fi
fi
sleep 1
((i++));
done;
if [ $error_state -eq 1 ]; then
echo "Warning - There is no tapdisk2, but td_connector main process does not exists."
exit 1;
elif [ $error_state -eq 2 ]; then
echo "Warning - td_connector main process does not exist, and number of tapdisk2 and td_connector are right."
exit 1;
elif [ $error_state -eq 3 ]; then
echo "Critical - td_connector main process does not exist, and td_connector is too few."
exit 2;
elif [ $error_state -eq 4 ]; then
echo "Warning - td_connector main process does not exist, and td_connector is too much."
exit 1;
elif [ $error_state -eq 5 ]; then
echo "Critical - td_connector main process exists, but td_connector is too few."
exit 2;
elif [ $error_state -eq 6 ]; then
echo "Warning - td_connector main process exists, and td_connector is too much."
exit 1;
else
echo "Warning - unknow error."
exit 1;
fi
exit 0;
| true
|
30e4f8c220f6e43ca38b3229d76cec9608b402c9
|
Shell
|
flomotlik/StudentLife-grails
|
/install-grails.sh
|
UTF-8
| 332
| 2.578125
| 3
|
[] |
no_license
|
export DOWNLOAD="target/download"
mkdir -p $DOWNLOAD
cd $DOWNLOAD
wget -nc http://dist.codehaus.org/grails/grails-1.2.1.zip
unzip -u grails-1.2.1.zip
mv grails-1.2.1 /opt
rm -i /opt/grails
ln -s /opt/grails-1.2.1 /opt/grails
echo "GRAILS_HOME=/opt/grails" >> /etc/environment
echo "PATH=$PATH:/opt/grails/bin" >> /etc/environment
| true
|
4422aca4864d1e221805ecc2fe8f94be1f9a60a6
|
Shell
|
jeromebaudot/SiTrInEo
|
/Mimosa-private-setup.sh
|
UTF-8
| 1,160
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
### Basic environment setup ####
export CXX=`which g++`
export CC=`which gcc`
export GEANT4DIR="/usr/local/Geant4"
#### Geant4 environment setup ####
source $GEANT4DIR/share/Geant4-10.4.2/geant4make/geant4make.sh
##source $GEANT4DIR/../setup_g4datasets.sh
#### Library path setup ####
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib64
export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$LIBRARY_PATH:/usr/lib64
#### Root 5.34 version setup ####
source /home/jongho/Software/root_version5/root/bin/thisroot.sh
#### Qt environments setup ####
export QTDIR="/usr/lib64/qt-3.3"
export QTINC="/usr/lib64/qt-3.3/include"
export QTLIB="/usr/lib64/qt-3.3/lib"
#### CLHEP environment setup ####
export CLHEP_BASE_DIR="/usr/local"
export CLHEP_INCLUDE_DIR="/usr/local/include"
export CLHEP_LIB_DIR="/usr/local/lib"
### Set environments for complie ###
export DIGI_DIR="/home/jongho/Analysis/SiTrInEo/MIMOSA_DIGITIZER/trunk"
export GEANT4_BUILD_DIR="/home/jongho/Software/geant4/build"
source /home/jongho/Analysis/SiTrInEo/TestBeam_Geant4Simu_MagField/trunk/geant4make.sh
echo "Path to build directory $GEANT4_BUILD_DIR "
| true
|
7c2e89402e60f93109687063b266c0647500c08f
|
Shell
|
podioss/Weka_jobs
|
/parser/make_report.sh
|
UTF-8
| 3,290
| 3.125
| 3
|
[] |
no_license
|
#This script is made to sum up a report for a kmeans job submitted to Weka
#It depends on metricsparser.py and memparser.py scripts
METRICSPARSER="./metricsparser.py"
MEMPARSER="./memparser.py"
for ds in 75 #dataset sizes
do
for d in 10 50 100 250 500 750 1000 #dimensions
do
for c in 10 100 200 300 400 500 600 700 800 900 1000 1200 1400 1600 #clusters
do
for i in 10 #iterations
do
BASE_DIR="/opt/Weka_jobs/4cores_4GBram/ds${ds}/dim${d}/clus${c}/iter${i}"
PARSED="$BASE_DIR/parsed"
MODEL_DATA="$BASE_DIR/model_data"
mkdir -p $PARSED #folder to store the parsed metrics
#parse all the memory metrics at first
$METRICSPARSER $BASE_DIR/mem_cached $PARSED/mem_cached
$METRICSPARSER $BASE_DIR/mem_free $PARSED/mem_free
$METRICSPARSER $BASE_DIR/mem_buffers $PARSED/mem_buffers
#copy the iostat and time metric to the parsed folder
cp $BASE_DIR/iostat_csv $PARSED/iostat_csv
tail -1 $BASE_DIR/time > $PARSED/time
#cut to take only the mem fields
cut -d',' -f2 $PARSED/mem_free > $PARSED/mem_free_tmp
cut -d',' -f2 $PARSED/mem_cached > $PARSED/mem_cached_tmp
cut -d',' -f2 $PARSED/mem_buffers > $PARSED/mem_buffers_tmp
#chop the first 3 lines and the last two from the file
head -n -2 $PARSED/mem_free_tmp | tail -n +3 > $PARSED/mem_free_tmp1
head -n -2 $PARSED/mem_cached_tmp | tail -n +3 > $PARSED/mem_cached_tmp1
head -n -2 $PARSED/mem_buffers_tmp | tail -n +3 > $PARSED/mem_buffers_tmp1
paste -d"," $PARSED/mem_free_tmp1 $PARSED/mem_cached_tmp1 $PARSED/mem_buffers_tmp1 > $PARSED/mem_all
rm -f $PARSED/{mem_buffers_tmp,mem_cached_tmp,mem_free_tmp}
rm -f $PARSED/{mem_buffers_tmp1,mem_cached_tmp1,mem_free_tmp1}
$MEMPARSER $PARSED/mem_all $PARSED/average_mem_used
#create the 4 final data point for the job
mkdir -p $MODEL_DATA
POINTS=75000 #$(echo "10^$ds" | bc)
DISK_IO_IN=`cut -d',' -f5 $PARSED/iostat_csv | paste -sd+ | bc`
DISK_IO_OUT=`cut -d',' -f6 $PARSED/iostat_csv | paste -sd+ | bc`
echo "data_points,dimensions,k,time"> $MODEL_DATA/time_data
echo "data_points,dimensions,k,mem_used_kb"> $MODEL_DATA/average_mem_data
echo "data_points,dimensions,k,diskio_in"> $MODEL_DATA/diskio_in_data
echo "data_points,dimensions,k,diskio_out"> $MODEL_DATA/diskio_out_data
#echo $DISK_IO_IN
#echo $DISK_IO_OUT
echo "$POINTS,$d,$c,`cat $PARSED/time`" >> $MODEL_DATA/time_data
echo "$POINTS,$d,$c,`cat $PARSED/average_mem_used`" >> $MODEL_DATA/average_mem_data
echo "$POINTS,$d,$c,$DISK_IO_IN" >> $MODEL_DATA/diskio_in_data
echo "$POINTS,$d,$c,$DISK_IO_OUT" >> $MODEL_DATA/diskio_out_data
done
done
done
done
| true
|
7106c42f2d0db5cdf78b477d5f231281e9c846a5
|
Shell
|
silviodonato/PisaHmm
|
/workspace/fitAll.sh
|
UTF-8
| 342
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/sh
YEAR=$1
MASS=`seq 1251 1 1254`
MASS1=`seq 1256 1 1259`
MASS2=`seq 1200 5 1300`
ALLMASS=$MASS" "$MASS1" "$MASS2
#ALLMASS=`seq 1250 1 1260`
ALLMASS=12538
#ALLMASS="1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260"
echo $ALLMASS
for m in $ALLMASS ; do
cd MassScan$m
../fitMass.sh $YEAR $m >& fitmass$YEAR &
cd -
done
| true
|
078f13fb9b347c8985af81ea66c6f586c2e71c0d
|
Shell
|
babithar47/my_first_project
|
/add.sh
|
UTF-8
| 66
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
#addition of two numbers
sum=`expr $1 + $2`
echo $sum
| true
|
128e3e8a355276c01d88761fc508e11b598629eb
|
Shell
|
EgorLu/dev-scripts
|
/run_ci.sh
|
UTF-8
| 7,003
| 3.28125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -ex
# grabs files and puts them into $LOGDIR to be saved as jenkins artifacts
function getlogs(){
LOGDIR=/home/notstack/dev-scripts/logs
# Grab the host journal
sudo journalctl > $LOGDIR/bootstrap-host-system.journal
for c in httpd-${PROVISIONING_NETWORK_NAME} machine-os-downloader ipa-downloader ; do
sudo podman logs $c > $LOGDIR/$c.log || true
done
# And the VM journals and staticpod container logs
BM_SUB=""
if [[ -z "${EXTERNAL_SUBNET_V4}" ]]; then
BM_SUB=$(echo "${EXTERNAL_SUBNET_V6}" | cut -d"/" -f1 | sed "s/0$//")
else
BM_SUB=$(echo "${EXTERNAL_SUBNET_V4}" | cut -d"/" -f1 | sed "s/0$//")
fi
for HOST in $(sudo virsh net-dhcp-leases ${BAREMETAL_NETWORK_NAME} | grep -o "${BM_SUB}.*/" | cut -d"/" -f1) ; do
sshpass -p notworking $SSH core@$HOST sudo journalctl > $LOGDIR/$HOST-system.journal || true
sshpass -p notworking $SSH core@$HOST sudo journalctl -u ironic.service > $LOGDIR/$HOST-ironic.journal || true
for c in $(sshpass -p notworking $SSH core@$HOST sudo podman ps -a | grep -e ironic -e downloader -e httpd -e dnsmasq -e mariadb | awk '{print $NF}'); do
sshpass -p notworking $SSH core@$HOST sudo podman logs $c > $LOGDIR/${HOST}-${c}-container.log || true
done
done
# openshift info
export KUBECONFIG=ocp/$CLUSTER_NAME/auth/kubeconfig
oc --request-timeout=5s get clusterversion/version > $LOGDIR/cluster_version.log || true
oc --request-timeout=5s get clusteroperators > $LOGDIR/cluster_operators.log || true
oc --request-timeout=5s get pods --all-namespaces | grep -v Running | grep -v Completed > $LOGDIR/failing_pods.log || true
# Baremetal Operator info
mkdir -p $LOGDIR/baremetal-operator
BMO_POD=$(oc --request-timeout=5s get pods --namespace openshift-machine-api | grep metal3 | awk '{print $1}')
BMO_CONTAINERS=$(oc --request-timeout=5s get pods ${BMO_POD} -n openshift-machine-api -o jsonpath="{.spec['containers','initContainers'][*].name}")
for c in ${BMO_CONTAINERS}; do
oc --request-timeout=5s logs ${BMO_POD} -c ${c} --namespace openshift-machine-api > $LOGDIR/baremetal-operator/${c}.log
done
}
trap getlogs EXIT
# This is CI, no need to be cautious about data
sudo dnf install -y /opt/data/nosync-1.0-2.el7.x86_64.rpm
echo /usr/lib64/nosync/nosync.so | sudo tee -a /etc/ld.so.preload
# Use /opt for data we want to keep between runs
# TODO: /opt has 1.1T but we'll eventually need something to clean up old data
sudo mkdir -p /opt/data/dnfcache /opt/data/imagecache /home/dev-scripts/ironic/html/images /opt/data/occache /home/dev-scripts/oc
# Make dnf store its cache on /opt so packages don't need to be downloaded for each job
echo keepcache=True | sudo tee -a /etc/dnf/dnf.conf
sudo mount -o bind /opt/data/dnfcache /var/cache/dnf
# Save the images directory between jobs
sudo mount -o bind /opt/data/imagecache /home/dev-scripts/ironic/html/images
# Save the images directory between jobs
sudo mount -o bind /opt/data/occache /home/dev-scripts/oc
sudo chown -R notstack /home/dev-scripts
# Point at our CI custom config file (contains the PULL_SECRET)
export CONFIG=/opt/data/config_notstack.sh
sudo yum install -y jq golang make unzip
# Clone the project being tested, "dev-scripts" will have been cloned in the jenkins
# job definition, for all others we do it here
if [ -n "$REPO" -a -n "$BRANCH" ] ; then
pushd ~
if [ ! -d ${BASE_REPO#*/} ] ; then
git clone https://github.com/$BASE_REPO -b ${BASE_BRANCH:-master}
cd ${BASE_REPO#*/}
git pull --no-edit https://github.com/$REPO $BRANCH
git log --oneline -10 --graph
fi
popd
fi
# Project-specific actions. If these directories exist in $HOME, move
# them to the correct $GOPATH locations.
for PROJ in installer ; do
[ ! -d /home/notstack/$PROJ ] && continue
if [ "$PROJ" == "installer" ]; then
export KNI_INSTALL_FROM_GIT=true
fi
# Set origin so that sync_repo_and_patch is rebasing against the correct source
cd /home/notstack/$PROJ
git branch -M master
git remote set-url origin https://github.com/$BASE_REPO
cd -
mkdir -p $HOME/go/src/github.com/${BASE_REPO/\/*}
mv /home/notstack/$PROJ $HOME/go/src/github.com/$BASE_REPO
done
# If directories for the containers exists then we build the images (as they are what triggered the job)
if [ -d "/home/notstack/ironic-image" ] ; then
export IRONIC_LOCAL_IMAGE=https://github.com/metal3-io/ironic-image
export UPSTREAM_IRONIC=true
fi
if [ -d "/home/notstack/ironic-inspector-image" ] ; then
export IRONIC_INSPECTOR_LOCAL_IMAGE=https://github.com/metal3-io/ironic-inspector-image
export UPSTREAM_IRONIC=true
fi
if [ -d "/home/notstack/baremetal-runtimecfg" ] ; then
export BAREMETAL_RUNTIMECFG_LOCAL_IMAGE=https://github.com/openshift/baremetal-runtimecfg
fi
if [ -d "/home/notstack/mdns-publisher" ] ; then
export MDNS_PUBLISHER_LOCAL_IMAGE=https://github.com/openshift/mdns-publisher
fi
# coredns-mdns is unique because it is vendored into the openshift/coredns project
# and that is where the image gets built.
if [ -d "/home/notstack/coredns-mdns" ] ; then
pushd /home/notstack
git clone https://github.com/openshift/coredns
cd coredns
# Update the vendoring with our local changes
GO111MODULE=on go mod edit -replace github.com/openshift/coredns-mdns=/home/notstack/coredns-mdns
GO111MODULE=on go mod vendor
popd
export COREDNS_LOCAL_IMAGE=https://github.com/openshift/coredns
export COREDNS_DOCKERFILE=Dockerfile.openshift
fi
# Some of the setup done above needs to be done before we source common.sh
# in order for correct defaults to be set
source common.sh
if [ -n "$PS1" ]; then
echo "This script is for running dev-script in our CI env, it is tailored to a"
echo "very specific setup and unlikely to be usefull outside of CI"
exit 1
fi
# Display the "/" filesystem mounted incase we need artifacts from it after the job
mount | grep root-
# Install terraform
if [ ! -f /usr/local/bin/terraform ]; then
curl -O https://releases.hashicorp.com/terraform/0.12.2/terraform_0.12.2_linux_amd64.zip
unzip terraform_*.zip
sudo install terraform /usr/local/bin
rm -f terraform_*.zip terraform
fi
# Run dev-scripts
set -o pipefail
timeout -s 9 120m make |& sed -e 's/.*auths.*/*** PULL_SECRET ***/g'
# Deployment is complete, but now wait to ensure the worker node comes up.
export KUBECONFIG=ocp/$CLUSTER_NAME/auth/kubeconfig
wait_for_worker() {
worker_prefix=$1
echo "Waiting for worker $worker to appear ..."
while [ "$(oc get nodes | grep $worker_prefix)" = "" ]; do sleep 5; done
worker=$(oc get nodes | grep $worker_prefix | awk '{print $1}')
TIMEOUT_MINUTES=15
echo "$worker registered, waiting $TIMEOUT_MINUTES minutes for Ready condition ..."
oc wait node/$worker --for=condition=Ready --timeout=$[${TIMEOUT_MINUTES} * 60]s
}
wait_for_worker worker-0
| true
|
47c8e3ec1d97464c36df89787bf464178e59e3e7
|
Shell
|
q0015300153/service-hive
|
/init.sh
|
UTF-8
| 1,530
| 3.59375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# tw: 載入 dotenv
# en: Load dotenv
. $(dirname "$0")/dotenv
# tw: 下載 Hadoop 與 Hive 檔案
# en: Download Hadoop and Hive files
if ! [ -f "./hadoop/hadoop.tar.gz" ]; then
version=3.2.0
wget -O ./hadoop/hadoop.tar.gz \
https://www.apache.org/dist/hadoop/common/hadoop-${version}/hadoop-${version}.tar.gz
fi
if ! [ -f "./hive/apache-hive-bin.tar.gz" ]; then
version=3.1.1
wget -O ./hive/apache-hive-bin.tar.gz \
https://www.apache.org/dist/hive/hive-${version}/apache-hive-${version}-bin.tar.gz
fi
if ! [ -f "./hive/mariadb-java-client.jar" ]; then
version=2.4.3
wget -O ./hive/mariadb-java-client.jar \
https://downloads.mariadb.com/Connectors/java/connector-java-${version}/mariadb-java-client-${version}.jar
fi
if ! [ -f "./hive/mysql-connector-java.jar" ]; then
version=8.0.17
wget -O ./hive/mysql-connector-java.tar.gz \
https://cdn.mysql.com//Downloads/Connector-J/mysql-connector-java-${version}.tar.gz
tar zxvf ./hive/mysql-connector-java.tar.gz -C ./hive
cp ./hive/mysql-connector-java-${version}/mysql-connector-java-${version}.jar \
./hive/mysql-connector-java.jar
rm -r ./hive/mysql-connector-java.tar.gz
rm -rf ./hive/mysql-connector-java-${version}
fi
# tw: 建立資料夾
# en: Create folder
.env -f ./.env parse NAME_NODE DATA_NODE HIVE_DATA
for path in "${REPLY[@]}"; do
IFS='=' read -ra param <<< "$path"
if ! [[ -n "${param[1]}" ]]; then
continue
fi
if ! [ -d "$(dirname "$0")/${param[1]}" ]; then
mkdir -p "$(dirname "$0")/${param[1]}"
fi
done
| true
|
357c64fa5f9dce00b81b1ba48c139cd926e24a2e
|
Shell
|
lijitha4/EVB-KSZ9477
|
/KSZ/ptp/bin/power.sh
|
UTF-8
| 904
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/sh
# This script setups the VLAN for use with power profile.
ETH=0
VLAN=0
PRIO=4
if [ ! -z "$1" ]; then
VLAN=$1
if [ ! -z "$2" ]; then
ETH=$2
if [ ! -z "$3" ]; then
PRIO=$3
fi
fi
fi
if [ -e "/sys/class/net/eth$ETH.$VLAN" ]; then
exit 0
fi
vconfig add eth$ETH $VLAN
vconfig set_egress_map eth$ETH.$VLAN 0 $PRIO
MAC=$(ifconfig eth$ETH | grep HWaddr | cut -d":" -f7)
SUBNET=$(ifconfig eth$ETH | grep "inet addr" | cut -d":" -f2 | cut -d" " -f1)
if [ ! -z "$SUBNET" ]; then
SUBNET1=$(echo "$SUBNET" | cut -d"." -f1)
SUBNET2=$(echo "$SUBNET" | cut -d"." -f2)
SUBNET3=$(echo "$SUBNET" | cut -d"." -f3)
else
SUBNET1="10"
SUBNET2="1"
SUBNET3="157"
fi
MAC=$((0x$MAC))
ifconfig eth$ETH.$VLAN $SUBNET1.$SUBNET2.$VLAN.$MAC
if [ -e "/sys/class/net/eth$ETH/ptp/vid" ]; then
echo "$VLAN" > "/sys/class/net/eth$ETH/ptp/vid"
fi
| true
|
222374191226a808c7af8cf0f86fa26c8a42e1e7
|
Shell
|
ryan-williams/file-helpers
|
/rtf2md
|
UTF-8
| 309
| 3.59375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
if [ $# -eq 1 ]; then
in="$1"; shift
out="${in%.*}.md"
elif [ $# -eq 2 ]; then
in="$1"; shift
out="$1"; shift
else
echo "Usage: $0 <in> [out]" >&2
exit 1
fi
cat "$in" | \
textutil -stdin -convert html -stdout | \
pandoc --from=html --to=markdown > "$out"
| true
|
64afa08d4bc793f4caa296b86504dba5153fa76b
|
Shell
|
nelbren/npres
|
/bin/enable/hola.bash
|
UTF-8
| 574
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
hola_user() {
/usr/games/fortune /usr/share/games/fortunes/es /usr/share/games/fortunes/es/off | /usr/games/cowsay -d | /usr/games/lolcat
cmatrix -sb -C green
}
hola_root() {
neofetch
cmatrix -sb -C red
}
man_colors() {
export LESS_TERMCAP_mb=$'\e[1;32m'
export LESS_TERMCAP_md=$'\e[1;32m'
export LESS_TERMCAP_me=$'\e[0m'
export LESS_TERMCAP_se=$'\e[0m'
export LESS_TERMCAP_so=$'\e[01;33m'
export LESS_TERMCAP_ue=$'\e[0m'
export LESS_TERMCAP_us=$'\e[1;4;31m'
}
man_colors
if [ "$USER" == "root" ]; then
hola_root
else
hola_user
fi
| true
|
d0623cc87620c11b41eb2980db770473b2f41237
|
Shell
|
qianchu/ALaCarte
|
/install.sh
|
UTF-8
| 1,098
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
disp() { echo -e "\e[44m\e[97m$@\e[49m\e[39m" ; }
disp "Installing GCC-C++ and OpenMPI"
sudo yum install gcc-c++ openmpi -y
echo 'export PATH="/usr/lib64/openmpi/bin:$PATH"' >> $HOME/.bashrc
disp "Downloading and Installing Miniconda"
wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
sh Miniconda3-latest-Linux-x86_64.sh -b -p miniconda
echo 'export PATH="$HOME/miniconda/bin:$PATH"' >> $HOME/.bashrc
source ~/.bashrc
disp "Installing NumPy, MPI4Py, Scikit-Learn, Boto3, H5Py, CLD2, and NLTK"
conda install -y numpy mpi4py scikit-learn boto3 h5py nltk
pip install cld2-cffi
VERSION=840B.300d
#VERSION=42B.300d
disp "Downloading GloVe "$VERSION" Embeddings"
wget http://nlp.stanford.edu/data/glove.$VERSION.zip
unzip glove.$VERSION.zip
CRAWL=2014-52
disp "Downloading WET Paths for "$CRAWL" Crawl"
wget https://commoncrawl.s3.amazonaws.com/crawl-data/CC-MAIN-$CRAWL/wet.paths.gz
gunzip wet.paths.gz
GIT=https://raw.githubusercontent.com/NLPrinceton/ALaCarte/master
disp "Downloading A La Carte Files"
wget $GIT/alacarte.py
wget $GIT/transform/$VERSION.bin
| true
|
b8b53c965b4e5fa5994104f3163839cdb26e21b1
|
Shell
|
mattkingston/dotfiles
|
/bash/ubuntu/autocomplete.sh
|
UTF-8
| 180
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
if [[ -f /etc/bash_completion ]] && ! shopt -oq posix; then
. /etc/bash_completion
fi
if type -t 'aws' > /dev/null; then
complete -C aws_completer aws
fi
| true
|
e5597461a14fa5667db680ce681aa1dd6ade69e2
|
Shell
|
pyslackers/sirbot-pyslackers
|
/scripts/test.sh
|
UTF-8
| 310
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/sh
EXIT=0
echo "TEST: black"
black --check --diff . || EXIT=$?
echo "TEST: isort"
isort --recursive --check-only . || EXIT=$?
export PYTHONWARNINGS="ignore"
echo "TEST: flake8"
flake8 . || EXIT=$?
export PYTHONWARNINGS="default"
echo "TEST: pytest"
python -m pytest ./tests/ || EXIT=$?
exit $EXIT
| true
|
7da14ead6f1244272da22de8601fda143f8997de
|
Shell
|
NICTA/clouddb-replication
|
/scripts/analysis-kits/mysqldatabase-analysis/mysqldatabase-datasize.sh
|
UTF-8
| 2,516
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Copyright 2011 National ICT Australia Limited
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
if [ "${#}" -ne "1" ]
then
echo "This script takes address of MySQL instance as a parameter to generate data "
echo "size of workload archived."
echo ""
echo "Usage:"
echo " ${0} [MySQL address]"
exit 0;
fi
DIST_FOLDER=/root/mysql-data
deploy_database()
{
# Copy a snapshot to slave
ssh root@$1 "cp $DIST_FOLDER/mysql-$2.tar.bz2 /var/tmp/mysql.tar.bz2"
ssh root@$1 "killall -w mysqld"
# Restore databases
ssh root@$1 "cd /usr/local/mysql/data && rm -rf * \
&& tar jxf /var/tmp/mysql.tar.bz2 \
&& rm /var/tmp/mysql.tar.bz2"
}
deploy_master_database()
{
deploy_database $1 $2
ssh root@$1 "/etc/init.d/mysql.server start"
ssh root@$1 "mysql -u root -e \"UNLOCK TABLES;\""
}
log_path_name=distill/mysqldatabase-datasize.csv
workload_list=(`ssh root@$1 "ls mysql-data/mysql-*.tar.bz2 | grep -o -E '[0-9][0-9][0-9]*' | sort -n -k 1 | uniq"`)
COLUMN_1="\"# of Workloads\""
COLUMN_2="\"Database size\""
COLUMN_3="\"Data size\""
COLUMN_4="\"Index size\""
COLUMN_5="\"# of Users\""
printf "%s\t%s\t%s\t%s\t%s\n" "$COLUMN_1" "$COLUMN_2" "$COLUMN_3" "$COLUMN_4" "$COLUMN_5" > $log_path_name
for ((windex=1; windex < ${#workload_list[*]}; windex++)) do
deploy_master_database $1 ${workload_list[$windex]} > /dev/null 2>&1
printf "%s\t" "${workload_list[$windex]}" >> $log_path_name
echo -e \
`ssh root@$1 "mysql -N -e \"SELECT sum( data_length + index_length ) / 1024 / 1024, sum( data_length )/ 1024 / 1024, sum( index_length )/ 1024 / 1024 FROM information_schema.TABLES WHERE table_schema like '%olio%' GROUP BY table_schema;\""` \
`ssh root@$1 "mysql -N -e \"SELECT count(*) FROM olio.users;\""`>> $log_path_name
done
| true
|
90dab0efc7176cf1aab3c5f5b091aafa834e8979
|
Shell
|
msilmser/tower-test
|
/roles/weblogic-ans/files/cgscripts/ap/status.sh
|
UTF-8
| 2,064
| 4.1875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/ksh +x
#
# Check status of servers based on the standard Covalent directory structure
#
# Usage: status.sh [server1 server2....|ALL]
#
#
# History
# =======
#
# 12-22-2005 vsl created
# 05-26-2011 btt Modified for the new MWE Apache standard
#
####################################################################
if [ $# -lt 1 ]; then
echo "Usage: $0 appl [server1 | server2 | ... | ALL]" && exit 22
fi
SERVER_DIR="/users/domains/apache/$1"
RC=0
####################################################################
# Get the list of servers to find status on
####################################################################
if [ "$2" = "ALL" ]; then
cd $SERVER_DIR
SERVER=`ls -A -1`
else
SERVER="$2"
fi
####################################################################
# Process each server in the server list
####################################################################
for i in $SERVER
do
LOG_DIR=$SERVER_DIR/$i/logs
CONF_DIR=$SERVER_DIR/$i/conf
if [[ -d $CONF_DIR && -d $LOG_DIR ]]; then
cd $LOG_DIR
############################################################
# If apache is configured for this instance, check the
# existence of the process against the PID file in the log
# directory.
# NOTE: apache erases the PID file when it shuts down
############################################################
if [ -f $CONF_DIR/httpd.conf ]; then
if [ -f httpd.pid ]; then
PID=$(cat httpd.pid)
ps -e | grep $PID > /dev/null
N=$?
RC=$(($RC + $N))
if [ $N -eq 0 ]; then
echo " Apache($PID): OK"
else
echo " Apache: NOT RUNNING!"
fi
else
echo " Apache: NOT RUNNING or PID file removed abnormally!"
RC=$(($RC + 1))
fi
fi
else
echo "---------"
echo " ERROR: Server $i does not exist"
RC=$(($RC + 10))
fi
echo " "
done
exit $RC
| true
|
5a0d7a088d7cfdbcb79e30f32c5b5524912c6f15
|
Shell
|
ajb129/KeyakiTreebank
|
/scripts/collect_MAI_data
|
UTF-8
| 938
| 3.953125
| 4
|
[
"CC-BY-4.0"
] |
permissive
|
#!/bin/sh
usage () {
echo "Usage: $0 [-d cdrom_dir]"
exit 1
}
make_dir () {
if [ ! -d "$1" ]; then
mkdir -p $1
fi
}
CDROM_DIR=/mnt/cdrom
PERL=perl
BASE_DIR=`dirname $0`
SRC_DIR=$BASE_DIR/KyotoCorpus4.01-src
DEST_DIR=$BASE_DIR/../mainichi
while getopts d:h OPT
do
case $OPT in
d) CDROM_DIR=$OPTARG
;;
h) usage
;;
*) usage
;;
esac
done
shift `expr $OPTIND - 1`
if [ ! -f $CDROM_DIR/MAI95.TXT ]; then
echo "Not found: $CDROM_DIR/MAI95.TXT"
usage
exit
fi
make_dir $DEST_DIR
echo "converting MAI95.TXT to SGML ..."
$PERL $SRC_DIR/trans.pl < $CDROM_DIR/MAI95.TXT 2> /dev/null | $PERL $SRC_DIR/mainichi.pl $DEST_DIR
echo "making KyotoCorpus (syn) ..."
#for i in 01 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17
for i in 01 03
do
echo "converting 9501$i ..."
$PERL $SRC_DIR/format.pl -exed 9501$i < $DEST_DIR/9501.all | $PERL $SRC_DIR/dupli.pl > $DEST_DIR/9501$i.org
done
| true
|
8c241f7406baf1f8fbdff4298daa07907b0e9613
|
Shell
|
biwcream123/ArachneExecutionEngine
|
/src/main/dist/run_build.sh
|
UTF-8
| 2,488
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
DIST=trusty
ARCH=amd64
BUILD_PATH=./dist
WS=`dirname $0`
BQ_PATH=../extras/bigquery/
IMPALA_PATH=../extras/impala/
NETEZZA_PATH=../extras/netezza/
function print_help {
echo "Usage: run_build.sh [OPTIONS]"
echo "Available options are:"
echo -e " -a i386|amd64 \tDistribution architecture, default is amd64"
echo -e " -d DIST_NAME \t\tUbuntu distribution name, e.g. trusty or xenial, default is trusty"
echo -e " -b BUILDDIR \t\tDirectory where distribution build would be running"
echo -e " -f FILE \t\tOutput archive filename"
echo -e " -bq PATH \t\tPath to BigQuery drivers"
echo -e " -impala PATH \t\tPath to Impala drivers"
echo -e " -netezza PATH \t\tPath to Netezza drivers"
echo -e " -h \t\t\tPrints this"
}
OPTIND=1
while getopts ":a:d:b:f:h:bq:impala:netezza" opt; do
case $opt in
a)
ARCH=$OPTARG
;;
d)
DIST=$OPTARG
;;
b)
BUILD_PATH=$OPTARG
;;
f)
ARCHIVE=$OPTARG
;;
h)
print_help
exit 0
;;
bq)
BQ_PATH=$OPTARG
;;
impala)
IMPALA_PATH=$OPTARG
;;
netezza)
NETEZZA_PATH=$OPTARG
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
if [[ -z $ARCHIVE ]]; then
ARCHIVE=../r_base_${DIST}_${ARCH}.tar.gz
fi
if [[ ! -d $BUILD_PATH ]]; then
mkdir -p $BUILD_PATH
fi
if [[ "$(ls -A $BUILD_PATH)" ]]; then
echo "$BUILD_PATH is not empty, woun't continue"
exit 1
fi
echo "Starting build distribution."
echo "Release: $DIST $ARCH"
echo "Build dir: $BUILD_PATH"
echo "Output file: $ARCHIVE"
echo ""
debootstrap --arch amd64 $DIST $BUILD_PATH http://ubuntu.cs.utah.edu/ubuntu/
mount --bind /proc $BUILD_PATH/proc
cp $WS/install_packages.sh $BUILD_PATH/root/
cp $WS/libs.r $BUILD_PATH/root/
#Impala drivers
mkdir $BUILD_PATH/impala/
cp $IMPALA_PATH/*.jar $BUILD_PATH/impala/
cp ../docker/krb5.conf $BUILD_PATH/etc/
# BigQuery drivers
mkdir $BUILD_PATH/bigquery/
cp $BQ_PATH/*.jar $BUILD_PATH/bigquery/
# Netezza drivers
mkdir $BUILD_PATH/netezza/
cp $NETEZZA_PATH/*.jar $BUILD_PATH/netezza/
sudo chmod +x $BUILD_PATH/root/install_packages.sh
sudo chroot $BUILD_PATH /root/install_packages.sh $DIST
umount $BUILD_PATH/proc
rm $BUILD_PATH/root/install_packages.sh
rm $BUILD_PATH/root/libs.r
cd $BUILD_PATH
# To prevent unexpected package updates
cp $WS/.Rprofile $BUILD_PATH/root/
tar czf $ARCHIVE .
echo "Distribution Archive built and available at $ARCHIVE"
| true
|
587ef3efdbbdd267ff0b90a3313f08b621745d03
|
Shell
|
mrskycriper/itmo.operating-systems
|
/Lab01/menu.sh
|
UTF-8
| 192
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Menu:"
echo "1) Start nano"
echo "2) Start vim"
echo "3) Start links"
echo "4) Exit menu"
read action
case $action in
1 )
nano
;;
2 )
vi
;;
3 )
links
;;
4 )
exit 0
;;
esac
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.