blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9043b7b7e9ac06fa915f7f319f45f3220b07d778
|
Shell
|
nforgeio/archive-Neon
|
/Stack/Tools/neon/Linux/Ubuntu-16.04/setup/setup-environment.sh
|
UTF-8
| 2,795
| 4.375
| 4
|
[] |
no_license
|
#!/bin/bash
#------------------------------------------------------------------------------
# FILE: setup-environment.sh
# CONTRIBUTOR: Jeff Lill
# COPYRIGHT: Copyright (c) 2016-2017 by Neon Research, LLC. All rights reserved.
#
# NOTE: This script must be run under [sudo].
#
# NOTE: Variables formatted like $<name> will be expanded by [node-conf]
# using a [PreprocessReader].
#
# This script manages the global environment variables stored in [/etc/environment].
# The commands are:
#
# setup-environment.sh set NAME VALUE
# setup-environment.sh remove NAME
# setup-environment.sh remove-regex REGEX
#
# Note: A reboot is required for changes to take effect.
#
# The [set] command changes the value of an existing variable or
# adds a new one.
#
# The [remove] command removes a variable if it exists.
#
# The [remove-regex] removes variables whose names match a REGEX
# pattern.
environmentPath=/etc/environment
mkdir -p ${HOME}/temp
tempPath=${HOME}/temp/environment-`date --utc +%s-%N`.log
if [[ ${1} ]] ; then
command=${1}
else
command=
fi
# Implement the command.
case ${command} in
set)
if [[ ${2} ]] ; then
name=${2}
else
echo "ERROR[setup-environment]: NAME argument is required." >&2
exit 1
fi
if [[ ${3} ]] ; then
value=${3}
else
value=""
fi
regex="^${name}=.*$"
found=false
while IFS='' read -r line || [[ -n "${line}" ]];
do
if [[ ${line} =~ ${regex} ]] ; then
echo "${name}=${value}" >> ${tempPath}
found=true
else
echo ${line} >> ${tempPath}
fi
done < ${environmentPath}
if ! ${found} ; then
echo "${name}=${value}" >> ${tempPath}
fi
;;
remove)
if [[ ${2} ]] ; then
name=${2}
else
echo "ERROR[setup-environment]: NAME argument is required." >&2
exit 1
fi
regex="^${name}=.*$"
while IFS='' read -r line || [[ -n "${line}" ]];
do
if ! [[ ${line} =~ ${regex} ]] ; then
echo ${line} >> ${tempPath}
fi
done < ${environmentPath}
;;
remove-regex)
if [[ ${2} ]] ; then
regex=${2}
else
echo "ERROR[setup-environment]: REGEX argument is required." >&2
exit 1
fi
regex="^${regex}=.*$"
while IFS='' read -r line || [[ -n "${line}" ]];
do
if ! [[ ${line} =~ ${regex} ]] ; then
echo ${line} >> ${tempPath}
fi
done < ${environmentPath}
;;
*)
echo "ERROR[setup-environment]: Unknown command [${1}]." >&2
exit 1
;;
esac
# Overwrite [/etc/environment] with the generated temporary file
# end then remove the temp file.
mv ${tempPath} ${environmentPath}
rm -f ${tempPath}
exit 0
| true
|
53bc87a7b80b2139a30ae7e4a4d5e0e795b53106
|
Shell
|
atlury/aryalinux
|
/applications/perl-modules#font-ttf.sh
|
UTF-8
| 487
| 3.046875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
set +h
. /etc/alps/alps.conf
SOURCE_ONLY=y
URL=http://search.cpan.org/CPAN/authors/id/B/BH/BHALLISSY/Font-TTF-1.06.tar.gz
NAME="perl-modules#font-ttf"
VERSION=1.06
cd $SOURCE_DIR
wget -nc $URL
TARBALL=`echo $URL | rev | cut -d/ -f1 | rev`
DIRECTORY=`tar tf $TARBALL | cut -d/ -f1 | uniq`
tar -xf $TARBALL
cd $DIRECTORY
perl Makefile.PL
make
sudo make install
cd $SOURCE_DIR
rm -rf $DIRECTORY
echo "perl-modules#font-ttf=>`date`" | sudo tee -a $INSTALLED_LIST
| true
|
5efc34115ea876322c9f985740deb32a1c491443
|
Shell
|
RafaelOstertag/k8s-kafka
|
/docker/kafka/kafka-run-class.sh
|
UTF-8
| 1,070
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ $# -lt 1 ];
then
echo "USAGE: $0 classname [opts]"
exit 1
fi
CLASSPATH="/kafka/conf:$CLASSPATH"
for i in /kafka/libs/*.jar
do
CLASSPATH="$i:$CLASSPATH"
done
CLASSNAME=$1
shift
exec /usr/local/openjdk-11/bin/java $JAVA_OPTS -cp "${CLASSPATH}" ${CLASSNAME} "$@"
| true
|
2509911faf7a43ba8f2ed7c8047fb16a11caec52
|
Shell
|
qidizi/blog
|
/mac/make-nginx.sh
|
UTF-8
| 1,759
| 3.34375
| 3
|
[] |
no_license
|
nginxDir="/-/soft/nginx";
# exit2fail 上个命令退出值 退出提示[操作失败] 退出码[1]
function exit2fail () {
if [[ "$1" -ne "0" ]];then
if [[ -z "$2" ]];then
echo "操作失败";
else
echo "$2";
fi
if [[ -z "$3" ]];then
exit 1;
else
exit $3;
fi
fi
}
# 目录不存在就退出,参数1为目录路径
function exit2notDir() {
if [[ ! -d "${1}" ]];then
echo "目录 [${1}] 不存在";
exit 2;
fi
}
exit2notDir "$nginxDir"
echo "需要修改openssl的config成./Configure darwin64-x86_64-cc \$@; 因为opens的config默认是386的,nginx却要求64位;同时openssl不能有编译好的东西。否则在nginx调用make clean时出错,先rm 编译完了nginx再编译openssl"
openssl="${nginxDir}/../openssl"
exit2notDir $openssl
pcre="${nginxDir}/../pcre"
exit2notDir $pcre
cd "${nginxDir}";
bash configure \
--prefix=${nginxDir}/qidizi \
--http-client-body-temp-path=/tmp/nginx.client_body_temp \
--http-proxy-temp-path=/tmp/nginx.proxy_temp \
--http-fastcgi-temp-path=/tmp/nginx.fastcgi_temp \
--http-uwsgi-temp-path=/tmp/nginx.uwsgi_temp \
--http-scgi-temp-path=/tmp/nginx.scgi_temp \
--http-log-path=/tmp/nginx.access \
--error-log-path=/tmp/nginx.error \
--without-select_module \
--without-poll_module \
--with-threads \
--with-http_ssl_module \
--with-http_v2_module \
--with-http_addition_module \
--with-http_sub_module \
--with-http_gunzip_module \
--with-http_auth_request_module \
--with-http_secure_link_module \
--with-pcre=${pcre} \
--with-pcre-jit \
--with-openssl=${openssl} \
;
exit2fail $?
make
exit2fail $?
make install
exit2fail $?
echo -e "\n\n\n ${nginxBuild} build成功"
| true
|
e5dc513994faa39d439edeecc69816c4c502fea3
|
Shell
|
apire001/UNIXSystemAdministration
|
/runaway.sh
|
UTF-8
| 897
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh
top -b -n 1 > top.txt
filename=top.txt
counter1=0
counter2=0
pid=""
user=""
time=""
while read line; do
if [ "$counter1" -gt 6 ]
then
counter2=0
for word in $line
do
if [ "$counter2" -eq 0 ]
then
pid="$word"
#echo "$word"
fi
if [ "$counter2" -eq 1 ]
then
user="$word"
#echo "$word"
fi
if [ "$counter2" -eq 10 ]
then
time="$word"
#echo "$word"
fi
if [ "$counter2" -eq 11 ]
then
if [[ "$user" != "root" ]]
then
if [[ "$time" > "2:00.00" ]]
then
#echo "$time"
#echo "$line"
kill -9 "$pid"
echo "Killing process with pid: $pid"
fi
fi
fi
counter2=$((counter2+1))
done
else
counter1=$((counter1+1))
#echo "$line"
fi
done < $filename
| true
|
2937510926cb192b06e4c536dfbed5a68fdbb827
|
Shell
|
cbare/connect-four
|
/example_client_session.sh
|
UTF-8
| 1,362
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
# Some fun shell hackery to demo the connect-four API.
# NOTE: requires httpie and jq
host=127.0.0.1
port=5000
set -x
http GET http://${host}:${port}/drop_token
# new game
gameId=`http --body POST http://${host}:${port}/drop_token \
players:='["player1", "player2"]' \
rows:=7 columns:=7 \
| jq --raw-output '.gameId'`
# get state of game
http GET http://${host}:${port}/drop_token/${gameId}
# make a bunch of moves
http POST http://${host}:${port}/drop_token/${gameId}/player1 column:=3
http POST http://${host}:${port}/drop_token/${gameId}/player2 column:=4
http POST http://${host}:${port}/drop_token/${gameId}/player1 column:=2
http POST http://${host}:${port}/drop_token/${gameId}/player2 column:=1
http POST http://${host}:${port}/drop_token/${gameId}/player1 column:=3
http POST http://${host}:${port}/drop_token/${gameId}/player2 column:=2
http POST http://${host}:${port}/drop_token/${gameId}/player1 column:=3
http POST http://${host}:${port}/drop_token/${gameId}/player2 column:=2
http POST http://${host}:${port}/drop_token/${gameId}/player1 column:=3
# get state of game, player 1 wins!
http GET http://${host}:${port}/drop_token/${gameId}
# recap moves leading to glorious victory of player 1
http GET http://${host}:${port}/drop_token/${gameId}/moves
| true
|
5824696c715401f3fb5fc9b4659220fc9b868b85
|
Shell
|
gabibbo97/docker-compose-zammad
|
/scripts/grab-backup.sh
|
UTF-8
| 669
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
docker volume list -q | while read -r volume; do
# Find volume name
if ! docker volume inspect "${volume}" | grep 'com' | grep 'docker' | grep 'compose' | grep 'volume' | grep -q 'zammad_backup'; then
continue
fi
# Find mountpoint
MOUNTPOINT=$(docker volume inspect -f '{{.Mountpoint}}' "${volume}")
echo "Backup volume is at ${MOUNTPOINT}"
# Find latest backup
LATEST_BACKUP="$(find "${MOUNTPOINT}" -type f -print0 | xargs -r -0 ls -1 -t | head -1)"
echo "Latest backup is at ${LATEST_BACKUP}"
# Extract latest backup
rm -f database.sql files.tar
tar -xvzf "${LATEST_BACKUP}"
# Done
echo 'Grabbed backup'
break
done
| true
|
93a1dbda6f266aafc685d688166f736aa8ccd362
|
Shell
|
conda-forge/pyside2-feedstock
|
/recipe/build.sh
|
UTF-8
| 3,149
| 3.09375
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LGPL-3.0-only"
] |
permissive
|
#!/bin/sh
XVFB_RUN=""
if test `uname` = "Linux"
then
cp -r /usr/include/xcb ${PREFIX}/include/qt
XVFB_RUN="xvfb-run -s '-screen 0 640x480x24'"
fi
set -ex
# Remove running without PYTHONPATH
sed -i.bak "s/, '-E'//g" sources/shiboken2/libshiboken/embed/embedding_generator.py
sed -i.bak 's/${PYTHON_EXECUTABLE} -E/${PYTHON_EXECUTABLE}/g' sources/shiboken2/libshiboken/CMakeLists.txt
# Use build shiboken2
sed -i.bak "s/COMMAND Shiboken2::shiboken2/COMMAND shiboken2/g" sources/pyside2/cmake/Macros/PySideModules.cmake
sed -i.bak "s/COMMAND Shiboken2::shiboken2/COMMAND shiboken2/g" sources/pyside2/tests/pysidetest/CMakeLists.txt
extra_cmake_flags=
if [[ "${CONDA_BUILD_CROSS_COMPILATION:-}" != "1" || "${CROSSCOMPILING_EMULATOR:-}" != "" ]]; then
export RUN_TESTS=yes
# Shiboken6 has better support for cross compilation
# But for now, lets just specify the flags manually
PYTHON_EXTENSION_SUFFIX=$(${PYTHON} -c "import distutils.sysconfig, os.path; print(os.path.splitext(distutils.sysconfig.get_config_var('EXT_SUFFIX'))[0])")
extra_cmake_flags="${extra_cmake_flags} -DPYTHON_EXTENSION_SUFFIX=${PYTHON_EXTENSION_SUFFIX}"
else
export RUN_TESTS=no
fi
pushd sources/shiboken2
mkdir -p build && cd build
cmake -LAH -G "Ninja" ${CMAKE_ARGS} \
-DCMAKE_PREFIX_PATH=${PREFIX} \
-DCMAKE_INSTALL_PREFIX=${PREFIX} \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_TESTS=OFF \
-DPYTHON_EXECUTABLE=${PYTHON} \
${extra_cmake_flags} \
..
cmake --build . --target install
popd
${PYTHON} setup.py dist_info --build-type=shiboken2
cp -r shiboken2-${PKG_VERSION}.dist-info "${SP_DIR}"/
pushd sources/pyside2
mkdir -p build && cd build
cmake -LAH -G "Ninja" ${CMAKE_ARGS} \
-DCMAKE_PREFIX_PATH=${PREFIX} \
-DCMAKE_INSTALL_PREFIX=${PREFIX} \
-DCMAKE_BUILD_TYPE=Release \
-DPYTHON_EXECUTABLE=${PYTHON} \
${extra_cmake_flags} \
..
cmake --build . --target install
if test "$CONDA_BUILD_CROSS_COMPILATION" = "1"
then
# pyi files are generated in the host prefix and hence not installed
cp -v ${BUILD_PREFIX}/venv/lib/python${PY_VER}/site-packages/PySide2/*.pyi ${SP_DIR}/PySide2
fi
cp ./tests/pysidetest/libpysidetest${SHLIB_EXT} ${PREFIX}/lib
cp ./tests/pysidetest/testbinding*.so ${SP_DIR}
# create a single X server connection rather than one for each test using the PySide USE_XVFB cmake option
if [[ "${RUN_TESTS}" == "yes" ]]; then
eval ${XVFB_RUN} ctest -j${CPU_COUNT} --output-on-failure --timeout 200 -E QtWebKit || echo "no ok"
fi
rm ${SP_DIR}/testbinding*.so
popd
${PYTHON} setup.py dist_info --build-type=pyside2
cp -r PySide2-${PKG_VERSION}.dist-info "${SP_DIR}"/
pushd sources/pyside2-tools
mkdir -p build && cd build
cmake -LAH -G "Ninja" ${CMAKE_ARGS} \
-DCMAKE_PREFIX_PATH=${PREFIX} \
-DCMAKE_INSTALL_PREFIX=${PREFIX} \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_TESTS=OFF \
..
cmake --build . --target install
# Move the entry point for pyside2-rcc pyside2-uic and pyside2-designer to the right location
mkdir -p "${SP_DIR}"/PySide2/scripts
touch "${SP_DIR}"/PySide2/scripts/__init__.py
mv ${PREFIX}/bin/pyside_tool.py "${SP_DIR}"/PySide2/scripts/pyside_tool.py
rm -rf ${PREFIX}/include/qt/xcb
| true
|
0fcc6d477428f8d5b317f7ba273bc5f4cf0ca5d6
|
Shell
|
kswapd/docker-devops
|
/icbc-devops/cmd_parent.sh
|
UTF-8
| 228
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
export_func(){
sub="sub"
export _TEST_8_${sub}=1991
env
}
#export _TEST_8_=99
i=1
while((i<=3))
do
echo "cmd1",$1,$i
let ++i
sleep 1s
done
export_func
echo $_TEST_8_sub
#source ./cmd_child.sh
./cmd_child.sh
| true
|
a3210a781ef3777e0f04a1fd8ec954db60d260ea
|
Shell
|
leonardothibes/trac
|
/conf.d/18-trac/install.sh
|
UTF-8
| 1,015
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "Instalando o Trac..."
ETC=`dirname $0`/etc
#Instalando o trac.
apt-get -y install trac
#Criando diretório dos projetos e atribuindo permissão.
mkdir /var/lib/trac
chown www-data: /var/lib/trac
#Copiando arquivo do serviço init.
cp $ETC/init.d/trac /etc/init.d
chmod 755 /etc/init.d/trac
#Incluíndo o trac no boot do sistema.
TMP=/tmp/tmpfile
sed "s/clear/service trac start/" /etc/rc.local > $TMP
cat $TMP > /etc/rc.local
rm -f $TMP
echo "clear" >> /etc/rc.local
#Configurando página inicial do servidor.
TMP=`dirname $0`/template
cp -f $TMP/index.html /usr/share/pyshared/trac/templates
#Configurando redirecionamento para a porta do trac.
TMP=/tmp/tmpfile
HST=`cat /tmp/hostname`
rm -f /etc/apache2/sites-enabled/*
sed "s/HOSTNAME/$HST/" $ETC/apache2/sites-enabled/trac > $TMP
cat $TMP > /etc/apache2/sites-available/trac
cd /etc/apache2/sites-enabled
ln -sf ../sites-available/trac 00-trac
cd -
#Iniciando o trac.
service trac start
#Reiniciando o Apache.
service apache2 restart
| true
|
0538870efb1dd30218e214e53d34a3d66312f0ab
|
Shell
|
cdfeasy/samples
|
/facebookbot/src/deb/control/preinst
|
UTF-8
| 396
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
DUSER="citibank-facebook-bot"
PKGNAME="citibank-facebook-bot"
case "$1" in
install|upgrade)
if ! id ${DUSER} > /dev/null 2>&1; then
useradd --system -d /usr/share/${PKGNAME} \
--no-user-group --no-log-init -g nogroup \
--shell /bin/bash ${DUSER}
fi
;;
abort-upgrade)
;;
*)
echo "preinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
exit 0
| true
|
017372cdef43c60071f069664bba4a388c82ef37
|
Shell
|
barentsen/dotfiles
|
/.bash_profile
|
UTF-8
| 173
| 2.84375
| 3
|
[] |
no_license
|
# We execute .bashrc to avoid maintaining two files
#
# Note that .bash_profile is executed at login; .bashrc at non-login.
if [ -f ~/.bashrc ]; then
source ~/.bashrc
fi
| true
|
4316aac0d24010792f2c767017ef791653f3df41
|
Shell
|
RenatoGeh/snippets
|
/has_duplicate.sh
|
UTF-8
| 276
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
for f in "$@"; do
for g in "$@"; do
if [ "$f" != "$g" ]; then
diff "$f" "$g" > /dev/null
retval=$?
if [ $retval -eq 0 ]; then
echo "Has duplicate: $f and $g"
exit 1
fi
fi
done
done
echo "No duplicates."
exit 0
| true
|
33b2e4f3279228de93c15face5eb7d7898754a1c
|
Shell
|
italloalves9/learning-space
|
/Docker/post-docker-machine-learning.sh
|
UTF-8
| 979
| 2.640625
| 3
|
[] |
no_license
|
Build image in Docker
$ docker build -t oficina-ml .
Check the image name
$ docker images
Run the container with the machine learning space
$ docker run --name oficina-ml -p 8888:8888 -v "$PWD/notebooks:/opt/notebooks" -d oficina-ml
Tagging the image
$ docker tag 7e94b5c03aea nova6/oficina-ml:latest
Login no Dockerhub
$ docker login
Push in the image to Dockerhub
$ docker push nova6/oficina-ml
Remove the image in your local machine
$ docker rmi -f 4048f45d3323
Execution of the image using the run command
$ docker run nova6/oficina-ml
Push in the remote repo
$ docker push nova6/oficina-ml
Run the image locally
$ docker run -p 8888:8888 nova6/oficina-ml
Bash inside the image
$ docker exec -i -t c2f1db72e707 /bin/bash
Stop all containers
$ docker stop $(docker ps -q)
Kill all containers
$ docker kill $(docker ps -q)
Remove all containers
$ docker rm -f $(docker ps -q)
Purge everything unused
$ docker ps -q | xargs -r docker stop ; docker system purge -a
| true
|
a4f1de03f7524c0c84c96e5364b628e75bdf8002
|
Shell
|
lucaswannen/source_code_classification_with_CNN
|
/dataset_v2/bash/4645156.txt
|
UTF-8
| 288
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "enter the full path of script";
read path;
while true
ps aux | grep -v grep | grep -q $path || ( nohup php -f $path & )
done
bash test.sh
enter the full path of script
php_test.php
test.sh: line 7: syntax error near unexpected token `done'
test.sh: line 7: `done'
| true
|
4821da208f99f48ef853767925e0dc0e462fdf82
|
Shell
|
dpriest57/mac-cookbooks
|
/cookbooks/redis/recipes/install.bash
|
UTF-8
| 1,071
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -e
function install()
{
# Clean Up
rm -rf "${installBinFolder}" "${installConfigFolder}" "${installDataFolder}"
mkdir -p "${installBinFolder}" "${installConfigFolder}" "${installDataFolder}"
# Install
local currentPath="$(pwd)"
local tempFolder="$(getTemporaryFolder)"
unzipRemoteFile "${downloadURL}" "${tempFolder}"
cd "${tempFolder}"
make
find "${tempFolder}/src" -type f ! -name "*.sh" -perm -u+x -exec cp -f {} "${installBinFolder}" \;
rm -rf "${tempFolder}"
cd "${currentPath}"
# Config Server
local serverConfigData=(
'__INSTALL_DATA_FOLDER__' "${installDataFolder}"
6379 "${port}"
)
createFileFromTemplate "${appPath}/../files/conf/redis.conf" "${installConfigFolder}/redis.conf" "${serverConfigData[@]}"
}
function main()
{
appPath="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${appPath}/../../../lib/util.bash" || exit 1
source "${appPath}/../attributes/default.bash" || exit 1
header 'INSTALLING REDIS'
install
}
main "${@}"
| true
|
dbe5bd7911891a05255b16288f90777cc74939d7
|
Shell
|
ShalokShalom/plan.sh
|
/libisofs/plan.sh
|
UTF-8
| 560
| 2.625
| 3
|
[
"Apache-2.0"
] |
permissive
|
pkg_origin=cosmos
pkg_name=libisofs
pkg_version=1.4.6
pkg_description="Library to create an ISO-9660 filesystem with extensions like RockRidge or Joliet. "
pkg_upstream_url="http://libburnia-project.org"
pkg_license=('GPL')
pkg_deps=('acl' 'zlib')
pkg_source=("http://files.libburnia-project.org/releases/${pkg_name}-${pkg_version}.tar.gz")
pkg_shasum=('6ec515d9265fb75c48e8e73b3ea3f6c5')
do_build() {
./configure --prefix=/usr \
--enable-libacl \
--enable-xattr \
--disable-static
make
}
do_package() {
make DESTDIR=${pkg_prefix} install
}
| true
|
9e28db6c473a7356dcbafe736f53fd83c7b79ac3
|
Shell
|
openvax/mhcflurry
|
/downloads-generation/models_class1_selected_no_mass_spec/GENERATE.sh
|
UTF-8
| 2,079
| 3.671875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Model select standard MHCflurry Class I models.
#
set -e
set -x
DOWNLOAD_NAME=models_class1_selected_no_mass_spec
SCRATCH_DIR=${TMPDIR-/tmp}/mhcflurry-downloads-generation
SCRIPT_ABSOLUTE_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/$(basename "${BASH_SOURCE[0]}")"
SCRIPT_DIR=$(dirname "$SCRIPT_ABSOLUTE_PATH")
mkdir -p "$SCRATCH_DIR"
rm -rf "$SCRATCH_DIR/$DOWNLOAD_NAME"
mkdir "$SCRATCH_DIR/$DOWNLOAD_NAME"
# Send stdout and stderr to a logfile included with the archive.
exec > >(tee -ia "$SCRATCH_DIR/$DOWNLOAD_NAME/LOG.txt")
exec 2> >(tee -ia "$SCRATCH_DIR/$DOWNLOAD_NAME/LOG.txt" >&2)
# Log some environment info
date
pip freeze
git status
cd $SCRATCH_DIR/$DOWNLOAD_NAME
cp $SCRIPT_DIR/write_validation_data.py .
mkdir models
GPUS=$(nvidia-smi -L 2> /dev/null | wc -l) || GPUS=0
echo "Detected GPUS: $GPUS"
PROCESSORS=$(getconf _NPROCESSORS_ONLN)
echo "Detected processors: $PROCESSORS"
time python ./write_validation_data.py \
--include "$(mhcflurry-downloads path data_curated)/curated_training_data.no_mass_spec.csv.bz2" \
--exclude "$(mhcflurry-downloads path models_class1_unselected)/models/train_data.csv.bz2" \
--only-alleles-present-in-exclude \
--out-data test.csv \
--out-summary test.summary.csv
wc -l test.csv
time mhcflurry-class1-select-allele-specific-models \
--data test.csv \
--models-dir "$(mhcflurry-downloads path models_class1_unselected)/models" \
--out-models-dir models \
--scoring combined:mse,consensus \
--consensus-num-peptides-per-length 10000 \
--combined-min-models 8 \
--combined-max-models 16 \
--num-jobs $(expr $PROCESSORS \* 2) --gpus $GPUS --max-workers-per-gpu 2 --max-tasks-per-worker 5
time mhcflurry-calibrate-percentile-ranks \
--models-dir models \
--num-peptides-per-length 100000 \
--num-jobs $(expr $PROCESSORS \* 2) --gpus $GPUS --max-workers-per-gpu 2 --max-tasks-per-worker 50
cp $SCRIPT_ABSOLUTE_PATH .
bzip2 LOG.txt
tar -cjf "../${DOWNLOAD_NAME}.tar.bz2" *
echo "Created archive: $SCRATCH_DIR/$DOWNLOAD_NAME.tar.bz2"
| true
|
4a96e82c86634f8a83be2afefc97fb9f51f70260
|
Shell
|
BGCastro89/gRPC-pubsub-srv-demo
|
/pubsub/run-dev.sh
|
UTF-8
| 290
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
# Kill the existing process if it's already running
if [ "$(lsof -i:8085)" ]; then
kill $(lsof -t -i:8085)
fi
# Kick off the new process
gcloud beta emulators pubsub start --project=grpc-demo-proj
# Connect to environment variables
$(gcloud beta emulators pubsub env-init)
| true
|
e78743e821e0278e71cc7739d277d063ab06c785
|
Shell
|
skilbjo/aeon
|
/dev-resources/ssl/create-java-key-store
|
UTF-8
| 667
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eou pipefail
# https://stackoverflow.com/questions/11952274/how-can-i-create-keystore-from-an-existing-certificate-abc-crt-and-abc-key-fil
# these are file paths
keyfile="$1"
certfile="$2"
password="$quandl_api_key" # something random
# first we need to merge the keys into a pkcs12 keystore
openssl pkcs12 -export \
-inkey $keyfile \
-in $certfile \
-out key_crt.p12 \
-name key_crt \
-password "pass:${password}"
keytool -importkeystore \
-srckeystore key_crt.p12 \
-srcstoretype pkcs12 \
-srcstorepass "${password}" \
-srcalias key_crt \
-destkeystore java_key_store \
-deststoretype jks \
-deststorepass "${password}"
| true
|
13546143d0a101d9b64b4048af2ebee899fad4a7
|
Shell
|
pbowden-msft/InstallerCache
|
/InstallerCache
|
UTF-8
| 12,025
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
#set -x
TOOL_NAME="Microsoft Office Installer Cache"
TOOL_VERSION="1.1"
## Copyright (c) 2018 Microsoft Corp. All rights reserved.
## Scripts are not supported under any Microsoft standard support program or service. The scripts are provided AS IS without warranty of any kind.
## Microsoft disclaims all implied warranties including, without limitation, any implied warranties of merchantability or of fitness for a
## particular purpose. The entire risk arising out of the use or performance of the scripts and documentation remains with you. In no event shall
## Microsoft, its authors, or anyone else involved in the creation, production, or delivery of the scripts be liable for any damages whatsoever
## (including, without limitation, damages for loss of business profits, business interruption, loss of business information, or other pecuniary
## loss) arising out of the use of or inability to use the sample scripts or documentation, even if Microsoft has been advised of the possibility
## of such damages.
## Feedback: pbowden@microsoft.com
# External Constants - IT admins should feel free to customize this section to use different web folder paths
WEBFOLDER_MAU="InstallMAU"
WEBFOLDER_OFFICE="InstallOffice"
WEBFOLDER_WORD="InstallWord"
WEBFOLDER_EXCEL="InstallExcel"
WEBFOLDER_POWERPOINT="InstallPowerPoint"
WEBFOLDER_OUTLOOK="InstallOutlook"
WEBFOLDER_ONENOTE="InstallOneNote"
WEBFOLDER_OFFICE2016="InstallOffice2016"
WEBFOLDER_WORD2016="InstallWord2016"
WEBFOLDER_EXCEL2016="InstallExcel2016"
WEBFOLDER_POWERPOINT2016="InstallPowerPoint2016"
WEBFOLDER_OUTLOOK2016="InstallOutlook2016"
WEBFOLDER_ONENOTE2016="InstallOneNote2016"
WEBFOLDER_ONEDRIVE="InstallOneDrive"
WEBFOLDER_SKYPEBUSINESS="InstallSkypeBusiness"
WEBFOLDER_COMPANYPORTAL="InstallCompanyPortal"
WEBFOLDER_REMOTEDESKTOP="InstallRemoteDesktop"
WEBFOLDER_FILE="default.html"
# Internal Constants - Should not be customized
INSTALLER_MAU="https://go.microsoft.com/fwlink/?linkid=830196"
INSTALLER_OFFICE="https://go.microsoft.com/fwlink/?linkid=525133"
INSTALLER_WORD="https://go.microsoft.com/fwlink/?linkid=525134"
INSTALLER_EXCEL="https://go.microsoft.com/fwlink/?linkid=525135"
INSTALLER_POWERPOINT="https://go.microsoft.com/fwlink/?linkid=525136"
INSTALLER_OUTLOOK="https://go.microsoft.com/fwlink/?linkid=525137"
INSTALLER_ONENOTE="https://go.microsoft.com/fwlink/?linkid=820886"
INSTALLER_OFFICE2016="https://go.microsoft.com/fwlink/?linkid=871743"
INSTALLER_WORD2016="https://go.microsoft.com/fwlink/?linkid=871748"
INSTALLER_EXCEL2016="https://go.microsoft.com/fwlink/?linkid=871750"
INSTALLER_POWERPOINT2016="https://go.microsoft.com/fwlink/?linkid=871751"
INSTALLER_OUTLOOK2016="https://go.microsoft.com/fwlink/?linkid=871753"
INSTALLER_ONENOTE2016="https://go.microsoft.com/fwlink/?linkid=871755"
INSTALLER_ONEDRIVE="https://go.microsoft.com/fwlink/?linkid=823060"
INSTALLER_SKYPEBUSINESS="https://go.microsoft.com/fwlink/?linkid=831677"
INSTALLER_COMPANYPORTAL="https://go.microsoft.com/fwlink/?linkid=869655"
INSTALLER_REMOTEDESKTOP="https://go.microsoft.com/fwlink/?linkid=868963"
# Platform detection - allows this script to run on macOS and *nix variants
PLATFORM=$(uname -s)
# Shows tool usage and parameters
ShowUsage() {
echo $TOOL_NAME - $TOOL_VERSION
echo "Purpose: Downloads Office installer packages from the Office CDN to a shared folder"
echo "Usage: InstallerCache --CachePath:<path> [--CheckInterval:<minutes>] [--PurgeOldInstallers]"
echo "Example: InstallerCache --CachePath:/Volumes/web --CheckInterval:60"
echo " The <path> needs to be exposed as a web folder, such as http://webserver/folder"
echo " Clients can install packages by navigating to a URL, such as http://webserver/folder/InstallWord"
echo
exit 0
}
# Builds an array of all the installers we want to download - IT admins should feel free to remove items they don't care about
BuildInstallerArray() {
APP=()
APP+=("$INSTALLER_MAU")
APP+=("$INSTALLER_OFFICE")
APP+=("$INSTALLER_WORD")
APP+=("$INSTALLER_EXCEL")
APP+=("$INSTALLER_POWERPOINT")
APP+=("$INSTALLER_OUTLOOK")
APP+=("$INSTALLER_ONENOTE")
APP+=("$INSTALLER_ONEDRIVE")
APP+=("$INSTALLER_OFFICE2016")
APP+=("$INSTALLER_WORD2016")
APP+=("$INSTALLER_EXCEL2016")
APP+=("$INSTALLER_POWERPOINT2016")
APP+=("$INSTALLER_OUTLOOK2016")
APP+=("$INSTALLER_ONENOTE2016")
APP+=("$INSTALLER_SKYPEBUSINESS")
APP+=("$INSTALLER_COMPANYPORTAL")
APP+=("$INSTALLER_REMOTEDESKTOP")
}
# Performs a reverse look-up from ID to friendly name
GetAppNameFromID() {
case "$1" in
$INSTALLER_MAU) APPNAME="Microsoft AutoUpdate";;
$INSTALLER_OFFICE) APPNAME="Microsoft Office for Mac";;
$INSTALLER_WORD) APPNAME="Microsoft Word for Mac";;
$INSTALLER_EXCEL) APPNAME="Microsoft Excel for Mac";;
$INSTALLER_POWERPOINT) APPNAME="Microsoft PowerPoint for Mac";;
$INSTALLER_OUTLOOK) APPNAME="Microsoft Outlook for Mac";;
$INSTALLER_ONENOTE) APPNAME="Microsoft OneNote for Mac";;
$INSTALLER_OFFICE2016) APPNAME="Microsoft Office 2016 for Mac";;
$INSTALLER_WORD2016) APPNAME="Microsoft Word 2016 for Mac";;
$INSTALLER_EXCEL2016) APPNAME="Microsoft Excel 2016 for Mac";;
$INSTALLER_POWERPOINT2016) APPNAME="Microsoft PowerPoint 2016 for Mac";;
$INSTALLER_OUTLOOK2016) APPNAME="Microsoft Outlook 2016 for Mac";;
$INSTALLER_ONENOTE2016) APPNAME="Microsoft OneNote 2016 for Mac";;
$INSTALLER_ONEDRIVE) APPNAME="Microsoft OneDrive for Mac";;
$INSTALLER_SKYPEBUSINESS) APPNAME="Microsoft Skype for Business for Mac";;
$INSTALLER_COMPANYPORTAL) APPNAME="Microsoft Intune Company Portal for Mac";;
$INSTALLER_REMOTEDESKTOP) APPNAME="Microsoft Remote Desktop v10 for Mac";;
esac
echo "$APPNAME"
}
# Performs a reverse look-up from ID to web folder path
GetWebFolderFromID() {
case "$1" in
$INSTALLER_MAU) FOLDER="$WEBFOLDER_MAU";;
$INSTALLER_OFFICE) FOLDER="$WEBFOLDER_OFFICE";;
$INSTALLER_WORD) FOLDER="$WEBFOLDER_WORD";;
$INSTALLER_EXCEL) FOLDER="$WEBFOLDER_EXCEL";;
$INSTALLER_POWERPOINT) FOLDER="$WEBFOLDER_POWERPOINT";;
$INSTALLER_OUTLOOK) FOLDER="$WEBFOLDER_OUTLOOK";;
$INSTALLER_ONENOTE) FOLDER="$WEBFOLDER_ONENOTE";;
$INSTALLER_OFFICE2016) FOLDER="$WEBFOLDER_OFFICE2016";;
$INSTALLER_WORD2016) FOLDER="$WEBFOLDER_WORD2016";;
$INSTALLER_EXCEL2016) FOLDER="$WEBFOLDER_EXCEL2016";;
$INSTALLER_POWERPOINT2016) FOLDER="$WEBFOLDER_POWERPOINT2016";;
$INSTALLER_OUTLOOK2016) FOLDER="$WEBFOLDER_OUTLOOK2016";;
$INSTALLER_ONENOTE2016) FOLDER="$WEBFOLDER_ONENOTE2016";;
$INSTALLER_ONEDRIVE) FOLDER="$WEBFOLDER_ONEDRIVE";;
$INSTALLER_SKYPEBUSINESS) FOLDER="$WEBFOLDER_SKYPEBUSINESS";;
$INSTALLER_COMPANYPORTAL) FOLDER="$WEBFOLDER_COMPANYPORTAL";;
$INSTALLER_REMOTEDESKTOP) FOLDER="$WEBFOLDER_REMOTEDESKTOP";;
esac
echo "$CACHEPATH/$FOLDER"
}
# Resolves an FWLink to absolute download URL
ResolveFWLink() {
URL="$1"
local LOCATION=$(curl --head -s $URL | awk -v RS='\r' '/Location: / {print $2}')
if [ "$LOCATION" == "" ]; then
echo "$URL"
else
echo "$LOCATION"
fi
}
# Gets the size of a file based on its header, then strips non-numeric characters
GetDownloadSize() {
URL="$1"
local CONTENTHTTPLENGTH=$(curl --head -s $URL | awk '/Content-Length/' | cut -d ' ' -f2)
CONTENTLENGTH=$(echo ${CONTENTHTTPLENGTH//[!0-9]/})
echo $CONTENTLENGTH
}
# Gets the size of a file from the local disk
GetLocalSize() {
local FILENAME="$1"
local FOLDER="$2"
# The stat command works differently between macOS and other Linux platforms like RHEL
if [ "$PLATFORM" == "Darwin" ]; then
local FILELENGTH=($(cd "$FOLDER" && stat -f%z "$FILENAME" 2>/dev/null))
else
local FILELENGTH=($(cd "$FOLDER" && stat -c%s "$FILENAME" 2>/dev/null))
fi
echo $FILELENGTH
}
# Downloads the specified installer package
DownloadPackage() {
local URL="$1"
local APPLICATION="$2"
local SIZE="$3"
local FOLDER="$4"
local PACKAGE=$(basename "$1")
echo "================================================================================================="
echo Application: "$APPLICATION"
echo Package: "$PACKAGE"
echo Size: "$SIZE" MB
echo URL: "$URL"
(cd "$FOLDER" && curl --progress-bar --remote-name --location $URL)
}
# Create a client URL redirector
CreateRedirector() {
local FILE="$1"
local PKG="$2"
local FOLDER="$3"
if [ -f "$FOLDER/$FILE" ]; then
(cd "$FOLDER" && rm -f "$FILE")
fi
(cd "$FOLDER" && touch "$FILE")
echo "<!DOCTYPE HTML PUBLIC ""-//W3C//DTD HTML 4.0 Transitional//EN"">" >> "$FOLDER/$FILE"
echo "<html><head>" >> "$FOLDER/$FILE"
echo "<meta http-equiv=""REFRESH"" content=""0;url=./$PKG"">" >> "$FOLDER/$FILE"
echo "</head><body></body></html>" >> "$FOLDER/$FILE"
}
# Removes older versions of installer packages if they exist
PurgeOldInstallers() {
local LATESTPKG="$1"
local FOLDER="$2"
for package in $FOLDER/*.pkg; do
local FILE=$(basename "$package")
if [ "$FILE" != "$LATESTPKG" ]; then
echo "Removing old installer $FILE"
rm "$FOLDER/$FILE"
fi
done
}
# Evaluate command-line arguments
if [[ $# = 0 ]]; then
ShowUsage
else
for KEY in "$@"
do
case $KEY in
--Help|-h|--help)
ShowUsage
shift # past argument
;;
--CachePath:*|-c:*|--cachepath:*)
CACHEPATH=${KEY#*:}
shift # past argument
;;
--CheckInterval:*|-i:*|--checkinterval:*)
CHECKINTERVAL=${KEY#*:}
shift # past argument
;;
--PurgeOldInstallers|-p|--purgeoldinstallers)
PURGEOLD=true
shift # past argument
;;
*)
ShowUsage
;;
esac
shift # past argument or value
done
fi
## Main
while :
do
# Build an array of packages to download
BuildInstallerArray
# Build an array of each package location and download those packages
for a in "${APP[@]}"
do
# Get the friendly app name for display purposes
APPNAME=$(GetAppNameFromID "$a")
# Get the folder name on the local web server where the installer package should reside
WEBFOLDERPATH=$(GetWebFolderFromID "$a")
# Create the folder name on the local web server if it doesn't exist
if [ ! -d "$WEBFOLDERPATH" ]; then
mkdir -p "$WEBFOLDERPATH"
fi
# Resolve the FWLink to the actual download URL
PACKAGEURL=$(ResolveFWLink "$a")
# Get the installer filename
PACKAGENAME=$(basename "$PACKAGEURL")
# Get the size of the installer on the CDN
PACKAGESIZECDN=$(GetDownloadSize "$PACKAGEURL")
# Get the size of the installer on the local web server, if it exists
PACKAGESIZELOCAL=$(GetLocalSize "$PACKAGENAME" "$WEBFOLDERPATH")
# Convert the package size from bytes to megabytes
PACKAGESIZECDNMEG=$(expr $PACKAGESIZECDN / 1024 / 1024)
# Test whether we already have the installer downloaded
if [ -f "$WEBFOLDERPATH/$PACKAGENAME" ]; then
# Test whether the downloaded installer got interrupted when downloading
if [ "$PACKAGESIZELOCAL" == "$PACKAGESIZECDN" ]; then
# The installer is already downloaded and whole. We can stop here and move to the next package
echo "Package $PACKAGENAME already exists in the cache ...skipping"
continue
else
# The installer is present, but it's not whole, so lets remove it
echo "Package $PACKAGENAME exists in the cache but is corrupt ...removing"
(cd "$WEBFOLDERPATH" && rm -f "$PACKAGENAME")
fi
fi
# Download the installer package
DownloadPackage "$PACKAGEURL" "$APPNAME" "$PACKAGESIZECDNMEG" "$WEBFOLDERPATH"
# Create a default.html file in the web folder so that browser requests auto-navigate to the correct package
CreateRedirector "$WEBFOLDER_FILE" "$PACKAGENAME" "$WEBFOLDERPATH"
# Remove older installers if desired
if [ $PURGEOLD ]; then
PurgeOldInstallers "$PACKAGENAME" "$WEBFOLDERPATH"
fi
done
# If CheckInterval wasn't specified on the command-line, just run once
if [ "$CHECKINTERVAL" == '' ]; then
exit 0
else
# Otherwise, sleep for the specified number of minutes before checking again
echo "Sleeping for $CHECKINTERVAL minutes..."
CHECKINTERVALSECS=$(expr $CHECKINTERVAL \* 60)
# Wait until the next check interval
sleep "$CHECKINTERVALSECS"
fi
done
exit 0
| true
|
7c8bb5625b88e821514cce02da75b9bb0841c2e5
|
Shell
|
lassik/home
|
/.shrc.d/10-editor.sh
|
UTF-8
| 132
| 2.8125
| 3
|
[] |
no_license
|
for x in mg nano; do
if which "$x" >/dev/null 2>&1; then
export EDITOR="$x"
export ALTERNATE_EDITOR="$EDITOR"
break
fi
done
| true
|
30e4e3bcb33daae6739690ac2f31d29b7d0881a0
|
Shell
|
pedrovcc/RideUFSC
|
/setup.sh
|
UTF-8
| 1,084
| 3.140625
| 3
|
[] |
no_license
|
echo "\n----- Starting a new Flutter application -----\n\n"
echo "Enter project repo name: "
read newname
testNewName=${newname//-/_} #replace "-" characters with "_"
echo "\n----- Change bundleId -----\n\n"
echo "Enter bundleId: "
read newbundleID
oldBundleID="com.boilerplate"
# App name
echo "\n----- Change App Display Name -----\n\n"
echo "Enter AppName: "
read newAppName
oldAppName="BoilerPlateNameReplace"
# iOS
rm -rf Pods/
mkdir "../$newname"
cp -r ./ "../$newname"
cd "../$newname"
mv README-TEMPLATE.md README.md
cd "ios/"
find . -type f -name "*.xcconfig" -exec sed -i "" "s/$oldAppName/$newAppName/g" {} \; #Rename boilerplate mentions on config files
find . -type f -name "*.xcconfig" -exec sed -i "" "s/$oldBundleID/$newbundleID/g" {} \; #Rename boilerplate mentions on config files
sed -i "" "s/$oldAppName/$newAppName/g" ./README.md
rm Podfile.lock
rm setup.sh
cd ..
# Android
cd android/app
sed -i "" "s/$oldBundleID/$newbundleID/g" ./build.gradle
sed -i "" "s/$oldAppName/$newAppName/g" ./build.gradle
cd ..
cd ..
flutter pub get
cd ios/
pod install
| true
|
56dcda634a3db25732f86a117158cb2c575e6fc8
|
Shell
|
CyanogenModXT720/openrecovery_xt720
|
/OpenRecovery/app/sdutil/diagnostics.sh
|
UTF-8
| 838
| 3.203125
| 3
|
[] |
no_license
|
#!/sbin/sh
dirs="
/cache/dalvik-cache
/cache/dc
/data/app
/data/app-private
/data/dalvik-cache
/data/sdext2
/sddata/app
/sddata/app-private
/sddata/dalvik-cache
/sddata/link2sd
"
if ! grep -q " /sdcard " /proc/mounts ; then
echo ERROR: /sdcard is not mounted
exit
fi
exec >/sdcard/sdcard-info.txt 2>&1
date
cat /sdcard/OpenRecovery/version.txt
echo
echo Mounted filesystems
echo -------------------
cat /proc/mounts
echo
echo Directory configuration
echo -----------------------
for x in $dirs ; do
ls -ld $x
done
echo
for x in $dirs ; do
if [ -d $x ] ; then
echo Directory $x contains $(ls -1 $x | wc -l) files
fi
done
echo
echo SD card partition table:
echo ------------------------
fdisk -l /dev/block/mmcblk0
echo
echo EXT partition properties:
echo -------------------------
tune2fs -l /dev/block/mmcblk0p2
| true
|
1109880724f3673c97fe668050929a2b176d023f
|
Shell
|
jjm3x3/git-shell-setup
|
/bin/relpath
|
UTF-8
| 123
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#usage:
# ... | relpath
#
# To relative path.
pwd="$(pwd)"
exec sed -E \
'
\,^/, {
s,^'"$pwd"'/,,
}
'
| true
|
4226c5a89a90604c662db14d3a617c17b5bde96f
|
Shell
|
bdronneau/dotfiles
|
/sources/node
|
UTF-8
| 254
| 2.71875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if ! command -v n > /dev/null 2>&1; then
return
fi
export N_PREFIX="${HOME}/opt/n/node"
if ! command -v n > /dev/null 2>&1; then
return
fi
export PATH="${HOME}/opt/n/node/bin:${PATH}"
alias npmi="npm install --ignore-scripts"
| true
|
da5591fbd48d846183a868312b65aa5ce81b4e7d
|
Shell
|
zzuzayu/uai
|
/ufileup.sh
|
UTF-8
| 782
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
filepath=$(cd "$(dirname "$0")"; pwd)
USER=`whoami`
folder="/tmp/${USER}_ufile/"
here=`pwd`
mkdir $folder
echo "mkdir $folder"
path=$1
path_0=${path:0:1}
if [ $path_0 == "/" ]; then
echo "$path"
else
path="${here}/${path}"
echo "$path"
fi
ln -s ${path} ${folder}/${USER}_data_$2
echo "ln -s ${path} ${folder}/${USER}_data_$2"
cd ${filepath}
source config.txt
if [ $3 ]; then
bucket=$3
echo "get bucket: $bucket"
fi
filemgr-linux64 --action mput --bucket $bucket --dir ${folder}/${USER}_data_$2/ --trimpath ${folder} --threads 2
echo "filemgr-linux64 --action mput --bucket $bucket --dir ${folder}/${USER}_data_$2/ --trimpath ${folder} --threads 2 "
rm "${folder}/${USER}_data_$2"
echo "rm ${folder}/${USER}_data_$2"
rm -rf $folder
echo "rm -rf $folder"
| true
|
f6f2fbdb6c603290c4d0c2f36b516d3836799991
|
Shell
|
luwangli/Agg-Evict
|
/prepare-dpdk.sh
|
UTF-8
| 1,117
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/expect -f
# Drop all the system caches so that we can reserve huge pages
exec echo 3 | sudo tee /proc/sys/vm/drop_caches
# Setup the dpdk
spawn "tools/dpdk-setup.sh"
# Compile the kernel module
expect "Option:"
send "15\r"
expect "Press enter to continue ..."
send "\r"
# Reset everything
expect "Option:"
send "31\r"
expect "Press enter to continue ..."
send "\r"
# Reset everything
expect "Option:"
send "34\r"
expect "Press enter to continue ..."
send "\r"
# Setup the kernel module
expect "Option:"
send "18\r"
expect "Press enter to continue ..."
send "\r"
# Setup the NIC
expect "Option:"
send "24\r"
expect "Enter PCI address of device to bind to IGB UIO driver:"
send "0000:04:00.0\r"
expect "Press enter to continue ..."
send "\r"
# Setup the NIC
expect "Option:"
send "24\r"
expect "Enter PCI address of device to bind to IGB UIO driver:"
send "0000:04:00.1\r"
expect "Press enter to continue ..."
send "\r"
# Setup the huge pages
expect "Option:"
send "22\r"
expect "Number of pages for node0:"
send "2048\r"
expect "Press enter to continue ..."
send "\r"
expect "Option:"
send "35\r"
| true
|
5c13540b612d7fd2a6250d3bc90e4c23ca8d0945
|
Shell
|
karolgorecki/troll
|
/troll.sh
|
UTF-8
| 813
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
# troll script
# @author Karol Górecki
COUNT=0
# init troll script
rm troll.sh
touch troll.sh
# init repo
rm README.md
echo "TROLL repo" >> README.md
git add README.md
# create commits
while IFS='' read -r line
do
# inc COUNT
COUNT=`expr $COUNT + 1`
name=$line
# get back to the future
DATE_NOW=$(date +%s)
SECONDS_A_DAY=86400
DATE_NOW=`expr $DATE_NOW - $SECONDS_A_DAY`
DATE_OK=`date --date="@$DATE_NOW" +"%F %T"`
# set the date
date --set="$DATE_OK"
# just debug info
echo "Iteration number: $COUNT"
echo "Current date: $DATE_OK"
# feed the troll
echo "$line" >> troll.sh
# create git commits
git add troll.sh
git commit -m "Added new line: $line"
done < $1
echo "Created script"
echo "--------------"
cat troll.sh
| true
|
7640a0a5f816bdeb5dda4c8d9ff6d43b174d272d
|
Shell
|
gzchenhj/config
|
/LVS 负载均衡器脚本/ipvs_server.sh
|
UTF-8
| 1,011
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
#name chenhaijun
#filename LVS server start stop file
#version 1.0
. /etc/init.d/functions
VIP=192.168.36.158
PORT=80
RIP=(
192.168.36.139
192.168.36.143
)
function start(){
/sbin/ipvsadm -C #(清空所有)
/usr/sbin/ifconfig ens33:0 $VIP/24 up #(添加一个虚拟网络)
/usr/sbin/route add -host $VIP dev ens33 #(添加一个主机路由)
/sbin/ipvsadm -A -t $VIP:$PORT -s rr -p 20 #(添加VIP)
for ((i=0; i<${#RIP[*]};i++))
do
/sbin/ipvsadm -a -t $VIP:$PORT -r ${RIP[$i]} -g -w 1 #(添加RIP节点服务器)
done
}
function stop (){
/sbin/ipvsadm -C
/usr/sbin/ifconfig ens33:0 down
/usr/sbin/route del -host $VIP dev ens33
}
case $1 in
start)
start
action "ipvs is started" /bin/true
;;
stop)
stop
action "ipvs is stoped" /bin/true
;;
restart)
stop
action "ipvs is stoped" /true/bin
start
action "ipvs is started" /bin/true
;;
*)
echo "USAGE:$0 start|stop|restart"
;;
esac
| true
|
15ece291d1aa6b48c74b1c51c3a6051df22764a9
|
Shell
|
sk3l/movein
|
/movein.sh
|
UTF-8
| 3,235
| 4.3125
| 4
|
[] |
no_license
|
#!/bin/bash
# Utility functions
function usage {
echo -e "\nmovein - Make Yourself @ \$HOME\n"
echo -e "\tA provisioning script for development environments"
echo -e "\nUsage:\n"
echo -e " movein.sh [-h] [-l logfile] [-u user] <crate1> <crate2> ..."
echo -e "\nWhere:"
echo -e " -h = show this usage"
echo -e " -l logfile = location of movein log (default=/var/log)"
echo -e " -u user = name of base movein user (default=\$USER)"
echo -e " <crateN> = name of script in ./crates to source"
echo -e ""
}
NO_COLOR="\033[0m"
INFO_COLOR="\033[1;32m"
ERR_COLOR="\033[0;31m"
function prompt_user() {
local action=$1
local choice=0
while true; do
read -p "Do you wish to ${action}? " yn
case $yn in
[Yy]* ) choice=1; break;;
[Nn]* ) break;;
* ) echo "Please answer (Y)es or (N)o.";;
esac
done
echo ${choice}
}
function read_user_variable() {
local variable=$1
while true; do
read -p "Please enter a value for ${variable}" value
if ! [[ -z ${value} ]];then
break;
fi
done
echo ${value}
}
function run_as_user() {
cmd=$1
user=$2
print_cmd=${3:0}
if [[ ${print_cmd} -eq 1 ]];then
echo -e "${cmd}"
fi
if [[ ${user} != ${USER} ]];then
cmd="sudo -u ${user} ${cmd}"
fi
eval ${cmd}
}
function run_as_sudo() {
cmd=$1
print_cmd=${2:0}
if [[ ${print_cmd} -eq 1 ]];then
echo -e "${cmd}"
fi
if [[ "$EUID" -ne 0 ]];then
cmd="sudo -E ${cmd}"
fi
eval ${cmd}
}
function log_info() {
echo -e "${INFO_COLOR}$1${NO_COLOR}"
}
function log_error() {
echo -e "${ERR_COLOR}$1${NO_COLOR}"
}
# Variables
LOG=/var/log/movein-$(date "+%Y%m%d_%H_%M_%S")
DISTRO=""
# Parse cmd-line arguments
while getopts "d:hl:u:" option; do
case "${option}" in
d)
DISTRO=${OPTARG}
;;
l)
LOG=${OPTARG}
;;
u)
BASE_USER=${OPTARG}
;;
h)
usage
exit 1
;;
esac
done
shift "$((OPTIND-1))"
# Write output to the log file and stdout
exec &> >(tee -a "$LOG")
log_info "\n(movein)[I] - Starting movein"
log_info "\n(movein)[I] - Examining host Linux distro"
OS_TYPE=""
OS_VERSION=""
if [[ -f /etc/redhat-release ]]; then
if grep 'CentOS' /etc/redhat-release; then
log_info "* Detected CentOS distro *"
OS_TYPE="centos"
else
log_info "* Detected Red Hat Enterprise Linux distro *"
OS_TYPE="rhel"
fi
elif lsb_release -i -s 2>&1 | grep -q "Ubuntu"; then
log_info "* Detected Ubuntu distro *"
OS_TYPE="ubuntu"
elif lsb_release -i -s 2>&1 | grep -q "Debian"; then
log_info "* Detected Debian distro *"
OS_TYPE="debian"
else
log_error "(movein)[E] - Detected unknown distro"
log_error "Aborting movein due to unrecognized OS"
return 1
fi
# Custom scripts from argv are sourced here
for SCRIPT in $@; do
log_info "\n(movein)[I] - Sourcing script $SCRIPT"
if [[ ! -f ./crates/$SCRIPT ]]; then
log_error "(movein)[W] - No crate named $SCRIPT found; skipping"
else
source ./crates/$SCRIPT
fi
done
log_info "\n(movein)[I] - Movein has completed"
| true
|
a673259b598b1a66d98194c32241d2b5263d9bd3
|
Shell
|
urjaman/i586con
|
/brext/board/rootfs-overlay/etc/init.d/S89gpm
|
UTF-8
| 401
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/sh
start() {
echo "Starting gpm (eventually, maybe)"
modprobe mousedev
(sleep 10; gpm -m /dev/input/mice -t imps2) &
return 0
}
stop() {
gpm -k
return 0
}
restart() {
stop
sleep 1
start
return 0
}
case "$1" in
start|stop|restart)
"$1";;
reload)
# Restart, since there is no true "reload" feature.
restart;;
*)
echo "Usage: $0 {start|stop|restart|reload}"
exit 1
esac
| true
|
49490d493127e547b91ef02630a9c12a67022e2b
|
Shell
|
miur/tenjo
|
/etc/alias.sh
|
UTF-8
| 1,129
| 3.171875
| 3
|
[
"Apache-2.0"
] |
permissive
|
# vim: ft=sh
#
# SPDX-FileCopyrightText: 2020 Dmytro Kolomoiets <amerlyq+tenjo@gmail.com>
#
# SPDX-License-Identifier: Apache-2.0
#
#%SUMMARY: define optional aliases (if not present) and cleanup after yourself
#%BUG: loaded only on "login" and never inherited by newly opened X terminals
# https://bbs.archlinux.org/viewtopic.php?id=248224 ⌇⡞⡁⡃⡂
#%
function _tenjo_alias { local nm=$1; shift
command -v -- "$nm" >/dev/null 2>&1 && return
# shellcheck disable=SC2139
builtin alias -- "$nm=$*"
}
function _tenjo_init {
# BET:(zsh-only): $ { ... } always { unfunction -m "_tenjo_*"; }
# Re: Local inner functions ⌇⡞⡂⣣⡹
# https://www.zsh.org/mla/users/2011/msg00207.html
# BET:USE: (){ setopt local_options local_traps xtrace; ls }
function TRAPEXIT { unset -f _tenjo_alias _tenjo_init; } # if $ZSH_NAME
[[ -n ${BASH-} ]] && trap 'TRAPEXIT; unset -f TRAPEXIT; trap - RETURN' RETURN
## [_] FIND: how to define local name for function ⌇⡞⡁⠮⢮
# declare -rn A=_tenjo_alias
_tenjo_alias t tenjo
_tenjo_alias ta tenjo add
_tenjo_alias te tenjo edit
_tenjo_alias tx tenjo expand
}
_tenjo_init
| true
|
b60fc947d63e686e803b1b518b7c82aa68208db3
|
Shell
|
474420502/focus
|
/tree/rsync.sh
|
UTF-8
| 498
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
rsync() {
FILE_NAME=$3 #iterator_test.go
SRC_DIR_NAME=$1 #vbtkey dir
DEST_DIR_NAME=$2 # vbtdupkey dir
cp ./$SRC_DIR_NAME/$FILE_NAME ./$DEST_DIR_NAME/$FILE_NAME
sed -i "s/package $SRC_DIR_NAME/package ${DEST_DIR_NAME}/" ./$DEST_DIR_NAME/$FILE_NAME
}
# 同步iterator_test
for dst in "vbtkeydup" "avlkey" "avlkeydup"
do
rsync "vbtkey" $dst "iterator_test.go"
done
# 同步iterator_test
for dst in "vbtdup" "avl" "avldup"
do
rsync "vbt" $dst "iterator_test.go"
done
| true
|
3771b9f2ba948b3bd11dbc218640cb6b89bca5cc
|
Shell
|
happyshi0402/yikai
|
/start.sh
|
UTF-8
| 1,015
| 3.0625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#!/bin/sh
# name:start_web
# start web
base_path=/home/zkhc_wsf/zkhc_webhook/
soft_path=${base_path}
web_path=${base_path}
log_path=${base_path}/logs
web_soft_name=manage
log_name=zkhc_webhook_log
web_port=1571
for port in ${web_port}
do
lsof -i:$port | awk '{print $2}' | tail -n +2| while read id
do
kill -9 $id
done
done
echo "kill old system"
cd ${base_path}
today=`date +%Y-%m-%d`
echo "restart new code"ls
echo "nohup /home/zkhc_wsf/zkhc_wsf_env/bin/gunicorn --chdir ${web_path} -w 4 -k gevent -b 0.0.0.0:${web_port} ${web_soft_name}:app 1>> ${log_path}/${today}_${log_name}.log 2>> ${log_path}/${today}_${log_name}_error.log&"
nohup /home/zkhc_wsf/zkhc_wsf_env/bin/gunicorn --chdir ${web_path} -w 4 -k gevent -b 0.0.0.0:${web_port} ${web_soft_name}:app 1>> ${log_path}/${today}_${log_name}_error.log 2>> ${log_path}/${today}_${log_name}.log&
echo $! >> service.pid
echo "restart code over"
#cat ${log_path}/${today}_${log_name}_error.log
lsof -i:${web_port}
| true
|
b836b4ac012ea3295f50aa5579ddf60ad3276ccb
|
Shell
|
ibrarahmad/pg-tpch
|
/conf/pgtpch_defaults
|
UTF-8
| 3,270
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
# Configuration variables
# Scale factor. 1 = 1GB, 10 = 10GB. TPC-H has rules about which scale factors
# are considered valid for comparative purposes.
SCALE=25 #GB
# Other configuration variables
BASEDIR=$(dirname "$0")
BASEDIR=$(cd "$BASEDIR"; pwd)
TPCHTMP=$HOME/tpch/tpch_tmp
PGDATADIR=$HOME/tpch/pgdata${SCALE}GB
PGPORT=5432
REMAKE_DATA=true
DB_NAME="tpch"
POPULATE_DB=true
CREATE_MIN_INDEXES=false
CREATE_ALL_INDEXES=true
PERFDATADIR=perfdata
CORES=`grep -c ^processor /proc/cpuinfo`
PGUSER=$USER
PGBINDIR=/usr/local/pgsql.11/bin/
LOGGING=false
# Install teardown() function to kill any lingering jobs
teardown() {
echo "Cleaning up before exiting"
sudo -u $PGUSER $PGBINDIR/pg_ctl stop -m fast -D "$PGDATADIR" 2>/dev/null && sleep 1
JOBS=$(jobs -p)
test -z "$JOBS" || { kill $JOBS && sleep 2; }
JOBS=$(jobs -p)
test -z "$JOBS" || kill -9 $JOBS
}
test -z "${DEBUG-}" && trap "teardown" EXIT
# Set up perf
perf_set_kernel_params() {
if [ -r /proc/sys/kernel/kptr_restrict ] && [ $(cat /proc/sys/kernel/kptr_restrict) -ne 0 ]; then
echo "Perf requires reading kernel symbols."
echo 0 | sudo tee /proc/sys/kernel/kptr_restrict
fi
if [ -r /proc/sys/kernel/perf_event_paranoid ] && [ $(cat /proc/sys/kernel/perf_event_paranoid) -ne -1 ]; then
echo "Need to enable the reading of performance events."
echo -1 | sudo tee /proc/sys/kernel/perf_event_paranoid
fi
if [ -r /proc/sys/kernel/perf_event_mlock_kb ] && [ $(cat /proc/sys/kernel/perf_event_mlock_kb) -lt 1024 ]; then
echo "Need to give more memory to perf."
echo 1024 | sudo tee /proc/sys/kernel/perf_event_mlock_kb
fi
}
# Restart and drop caches
restart_drop_caches() {
echo "Restart postgres and drop caches."
sudo -u $PGUSER $PGBINDIR/pg_ctl stop -D $PGDATADIR
sync && echo 3 | sudo tee /proc/sys/vm/drop_caches
sudo -u $PGUSER taskset -c 2 $PGBINDIR/postgres -D "$PGDATADIR" -p $PGPORT &
PGPID=$!
while ! sudo -u $PGUSER $PGBINDIR/pg_ctl status -D $PGDATADIR | grep "server is running" -q; do
echo "Waiting for the Postgres server to start"
sleep 3
done
}
# Calculates elapsed time
timer() {
if [[ $# -eq 0 ]]; then
echo $(date '+%s')
else
local stime=$1
etime=$(date '+%s')
if [[ -z "$stime" ]]; then stime=$etime; fi
dt=$((etime - stime))
ds=$((dt % 60))
dm=$(((dt / 60) % 60))
dh=$((dt / 3600))
printf '%d:%02d:%02d' $dh $dm $ds
fi
}
# To perform checks
die() {
echo "$@"
exit -1;
}
# Check for the running Postgres; exit if there is any on the given port
PGPORT_PROCLIST="$(lsof -i tcp:$PGPORT | tail -n +2 | awk '{print $2}')"
if [[ $(echo "$PGPORT_PROCLIST" | wc -w) -gt 0 ]];
then
echo "The following processes have taken port $PGPORT"
echo "Please terminate them before running this script"
echo
for p in $PGPORT_PROCLIST;
do
ps -o pid,cmd $p
done
exit -1
fi
# Check if a Postgres server is running in the same directory
if sudo -u $PGUSER $PGBINDIR/pg_ctl status -D $PGDATADIR | grep "server is running" -q; then
echo "A Postgres server is already running in the selected directory. Exiting."
sudo -u $PGUSER $PGBINDIR/pg_ctl status -D $PGDATADIR
exit -2
fi
cd "$BASEDIR"
| true
|
67bffa614d183b2b5da48fcf37cb1b0af1c512a6
|
Shell
|
fwestling/PruneTreeLS
|
/phenotyping-lidar/respace
|
UTF-8
| 2,000
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
# Utility to artificially adjust the spacing of trees
export readonly name=$( basename $0 )
source $( type -p comma-application-util ) || (error "comma not installed")
function errcho { (>&2 echo "$name: $1") }
function error
{
errcho "error: $1"
exit 1
}
function go() {
# Input: a point cloud of the (segmented) density block I want to move
# Input: Amount to change spacing by (in metres)
bin=$1 # binary file format
fields=$2 # file fields
spacing=$3
REF_ID="/home/fwes7558/src/tree-crops/phenotyping-lidar/translations.bin"
ref_bin=ui,d
ref_fields=id,shift
xf=$(echo $fields | csv-fields clear --except=x,y,z)
cat | csv-join --binary=$bin --fields=$fields "$REF_ID;binary=$ref_bin;fields=$ref_fields" |
csv-eval --binary=$bin,$ref_bin --fields=$xf,,shift "x= x + shift*$spacing" |
csv-shuffle --binary=$bin,$ref_bin --fields=$fields -o=$fields
}
function option-description
{
cat <<eof
--binary=[<format>]; default="t,3d,ui"; Binary format used in point clouds
--fields=[<fields>]; default="t,x,y,z,id"; Point cloud fields, must include "x,y,z,id". "id" is the trunk id
--shift=[<shift>]; default=1; How many metres to adjust the tree spacing
eof
}
function usage
{
cat <<eof
$name artificially re-spaces the trees in the given point cloud
usage: cat pointcloud | $name <operation> [options]
options:
$( option-description | sed 's/^/ /g' )
eof
exit 1
}
if (( $( comma_options_has --help $@ ) || $( comma_options_has -h $@ ) )) ; then usage ; fi
options=$( option-description | comma-options-to-name-value "$@" ) || error "invalid command line options"
comma_path_value_to_var --prefix="options" <<< "$options"
eval "$( option-description | comma-options-to-name-value "$@" | comma_path_value_mangle )"
set -e # Kill if anything errors out; we don't want it to run everything on bad datasets
cat | go $options_binary $options_fields $options_shift
| true
|
91c5b0db9bc3d54f14b6532ba9ecb52f6f8fb295
|
Shell
|
qixin5/debloating_study
|
/expt/debaug/benchmark/grep-2.19/testscript/I0/4
|
UTF-8
| 151
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
BIN=$1
OUTDIR=$2
TIMEOUT=$3
INDIR=$4
#No result
timeout -k 9 ${TIMEOUT}s $BIN 127.0.0.13 /etc/hosts &>$OUTDIR/o4
echo "$?" >>$OUTDIR/o4
| true
|
e16793629eab9574a37a58e4183a69455e58a383
|
Shell
|
Ankk4/bash-setup
|
/install.sh
|
UTF-8
| 1,849
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
# Install nodejs
installNode(){
hash node &> /dev/null
if [ $? -eq 0 ]; then
read -p "Nodejs is already installed. Press any key to continue." rk
else
# Add personal package, more up to date than standard ubuntu.
curl -sL https://deb.nodesource.com/setup | sudo bash -
# Install node and accept all updates (-y)
sudo apt-get install -y nodejs
read -p "Node installed. Press any key to continue." rk
fi
}
installApache(){
hash apache &> /dev/null
if [ $? -eq 0 ]; then
read -p "Apache is already installed. Press any key to continue." rk
else
sudo apt-get install -y apache2
sudo apt-get install php5 libapache2-mod-php5
sudo service restart apache2
read -p "Apache installed. Press any key to continue" rdk
fi
}
installMysql(){
type mysql >/dev/null 2>&1
if [ $? -eq 0 ]; then
read -p "Mysql-server is already installed. Press any key to continue." rk
else
sudo apt-get install mysql-server
mysql -u root -p
fi
}
installMongo(){
type mongo >/dev/null 2>&1
if [ $? -eq 0 ]; then
read -p "MongoDB is already installed. Press any key to continue." rk
else
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | sudo tee /etc/apt/sources.list.d/mongodb.lis
sudo apt-get update
sudo apt-get install -y mongodb-org
sudo service mongod start
#Onnistuiko oikein
#Default port 27017
cat /var/log/mongodb/mongod.log
fi
}
installGit(){
type git >/dev/null 2>&1
if [ $? -eq 0 ]; then
read -p "Git is already installed. Press any key to continue." rk
else
sudo apt-get install git
fi
}
installUnzip(){
type unzip >/dev/null 2>&1
if [ $? -eq 0 ]; then
read -p "Unzip is already installed. Press any key to continue." rk
else
sudo apt-get install unzip
fi
}
| true
|
a6e90f48d25a76c89e1fec60ccc0c795008f7179
|
Shell
|
petronny/aur3-mirror
|
/kpass-tools/PKGBUILD
|
UTF-8
| 673
| 2.515625
| 3
|
[] |
no_license
|
# $Id$
# Maintainer: Brian De Wolf <arch@bldewolf.com>
pkgname=kpass-tools
pkgver=6
pkgrel=1
pkgdesc="kpass-tools is a set of experimental tools for using KeePass 1.x databases."
url="https://github.com/bldewolf/kpass-tools"
arch=('i686' 'x86_64')
license=('GPL')
depends=('libkpass' 'util-linux')
makedepends=('pkgconfig' 'intltool')
conflicts=()
replaces=()
backup=()
source=(https://github.com/bldewolf/$pkgname/releases/download/$pkgname-$pkgver/$pkgname-$pkgver.tar.gz)
md5sums=('81e7895326d7b00db3b4137e78a0bfe0')
build() {
cd "$srcdir/$pkgname-$pkgver"
./configure --prefix=/usr
make || return 1
}
package() {
cd "$srcdir/$pkgname-$pkgver"
make DESTDIR="$pkgdir/" install
}
| true
|
06ea862e6c2524f17c5c55cbf3b876911c4a9710
|
Shell
|
dadrc/scripts
|
/list-processes
|
UTF-8
| 288
| 3.34375
| 3
|
[] |
no_license
|
#!/usr/bin/zsh
set -o shwordsplit
procs=$(ls /proc | grep -oP "[0-9]*")
for pid in $procs; do
if [[ -d "/proc/$pid/" ]]; then
cmdline=$(cat /proc/$pid/cmdline)
if [[ -n "$cmdline" ]]; then
cmdline=$(echo $cmdline | cut -d' ' -f1)
echo ${cmdline##*/}| tr -d ' :'
fi
fi
done
| true
|
d869b6d34416b4f7cd30859e6d121b56671baea1
|
Shell
|
georgegoh/k8s-enablement
|
/scenarios/5_workload-tenancy/check.sh
|
UTF-8
| 812
| 3.671875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
NS=accounts
SERVICE_EP=`kubectl -n $NS get -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' svc accounts`
# scale down to 1 pod.
kubectl -n $NS scale --replicas=1 deploy/accounts
# Increase CPU.
# Increase Mem.
curl -X POST $SERVICE_EP/mem_workers\?value\=6
# Validate CPU.
CPU_USAGE=`kubectl -n $NS exec svc/accounts -- cat /sys/fs/cgroup/cpu/cpuacct.usage`
CPU=`expr ${CPU_USAGE} / 1000`
# Validate Mem.
MEM_USAGE=`kubectl -n $NS exec svc/accounts -- cat /sys/fs/cgroup/memory/memory.usage_in_bytes`
MEM_MB=`expr ${MEM_USAGE} / 1024 / 1024`
echo
echo "Mem used:" $MEM_MB
if [ "$MEM_MB" -gt 200 ]; then
echo -e "\033[0;31m[FAIL]\033[0m Memory used exceeds 200MB, and was not limited as required."
else
echo -e "\033[0;32m[PASS]\033[0m Memory used is below the limit."
fi
| true
|
5ca9ee9388943a0fcad771a9177513ca12bc16ea
|
Shell
|
dgoltzsche/faasm
|
/bin/entrypoint_upload.sh
|
UTF-8
| 116
| 2.609375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
# Run codegen
THIS_DIR=$(dirname $(readlink -f $0))
$THIS_DIR/entrypoint_codegen.sh
exec "$@"
| true
|
6a19b952895f585314730164e503c6ebbc49194a
|
Shell
|
jsdelivrbot/trunk
|
/projects/home/roles/Bash/files/backup.bash
|
UTF-8
| 901
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e -x
if [ -e ~/backup.tmp ]
then
rm ~/backup.tmp
fi
ls -aR ~/Downloads > ~/Downloads.ls || true
ls -aR ~/Videos > ~/Videos.ls || true
ls -aR ~/VirtualBox\ VMs > ~/VirtualBox\ VMs.ls || true
find ~ -maxdepth 1 \
-not -path ~ \
-not -path ~/backup.key -not -path ~/backup.tmp -not -path ~/backup.tar -not -path ~/backup.tar.gz -not -path ~/backup.tar.gz.gpg \
-not -path ~/Downloads \
-not -path ~/Videos \
-not -path ~/VirtualBox\ VMs \
-not -path ~/.wine \
-not -path ~/.PlayOnLinux -not -path ~/PlayOnLinux\'s\ virtual\ drives \
-not -path ~/.vagrant.d \
| sed 's/.*/"&"/' \
| xargs sudo tar -cz \
/etc \
/var/lib/emby \
| gpg --symmetric --passphrase-file ~/backup.key --batch --output ~/backup.tmp \
&& rm ~/*.ls
if [ -e ~/backup.tar.gz.gpg ]
then
rm ~/backup.tar.gz.gpg
fi
mv ~/backup.tmp ~/backup.tar.gz.gpg
| true
|
a3a96bf532dbe0c49ca2d6c83fdc7de03c0351b7
|
Shell
|
Senserk/NetNs
|
/list.sh
|
UTF-8
| 130
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $1 ]
then
echo "Interfaces are: "
sudo ip netns exec $1 ip link list
else
echo "No argument!"
fi
| true
|
3e41dfdbafc11789356e1dc0f4b00becb8a41dff
|
Shell
|
korkin25/iptables-wrappers
|
/test/test.sh
|
UTF-8
| 2,427
| 3.734375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
# Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eu
mode=$1
if [ -d /usr/sbin -a -e /usr/sbin/iptables ]; then
sbin="/usr/sbin"
elif [ -d /sbin -a -e /sbin/iptables ]; then
sbin="/sbin"
else
echo "ERROR: iptables is not present in either /usr/sbin or /sbin" 1>&2
exit 1
fi
ensure_iptables_undecided() {
iptables=$(realpath "${sbin}/iptables")
if [ "${iptables}" != "${sbin}/iptables-wrapper" ]; then
echo "iptables link was resolved prematurely! (${iptables})" 1>&2
exit 1
fi
}
ensure_iptables_resolved() {
expected=$1
iptables=$(realpath "${sbin}/iptables")
if [ "${iptables}" = "${sbin}/iptables-wrapper" ]; then
echo "iptables link is not yet resolved!" 1>&2
exit 1
fi
version=$(iptables -V | sed -e 's/.*(\(.*\)).*/\1/')
case "${version}/${expected}" in
legacy/legacy|nf_tables/nft)
return
;;
*)
echo "iptables link resolved incorrectly (expected ${expected}, got ${version})" 1>&2
exit 1
;;
esac
}
ensure_iptables_undecided
# Initialize the chosen iptables mode with a subset of kubelet's rules
iptables-${mode} -t nat -N KUBE-MARK-DROP
iptables-${mode} -t nat -A KUBE-MARK-DROP -j MARK --set-xmark 0x8000/0x8000
iptables-${mode} -t filter -N KUBE-FIREWALL
iptables-${mode} -t filter -A KUBE-FIREWALL -m comment --comment "kubernetes firewall for dropping marked packets" -m mark --mark 0x8000/0x8000 -j DROP
iptables-${mode} -t filter -I OUTPUT -j KUBE-FIREWALL
iptables-${mode} -t filter -I INPUT -j KUBE-FIREWALL
ensure_iptables_undecided
iptables -L > /dev/null
ensure_iptables_resolved ${mode}
# Fail on iptables 1.8.2 in nft mode
if ! iptables -C KUBE-FIREWALL -m comment --comment "kubernetes firewall for dropping marked packets" -m mark --mark 0x8000/0x8000 -j DROP; then
echo "failed to match previously-added rule; iptables is broken" 1>&2
exit 1
fi
| true
|
c5f2dab40eb7803faf87beb79d4d4e0a88e6dc71
|
Shell
|
qikushu/gbs
|
/Tassel5_mypipeline.sh
|
UTF-8
| 5,245
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
## USER SETTINGS
RUN_PIPELINE='/home/gbsuser/tassel-5-standalone/run_pipeline.pl'
ENZYME='KpnI-MspI'
KEYFILE='key.txt'
#TAXA='IBMGBSTaxaList.txt'
#TNAME="IBM"
INPUT_DIR='fastq/'
REFSEQ='/home/gbsuser/nipponbare_ref/IRGSP-1.0_genome.fasta'
VCFTOOLS='/home/gbsuser/tool/vcftools_0.1.13/cpp/vcftools'
## ANALYSIS PARAMETERS BY USERS
BWA='/usr/local/bin/bwa'
#####################
#MAIN
###################
TEMPDIR='tempDir/'
DB=$TEMPDIR'GBSv2.db'
HISTORY=$TEMPDIR'command_hisotories.txt'
DRYRUN="run"
[ $1 ] && DRYRUN=$1
[ $DRYRUN == "run" ] && rm -rf $TEMPDIR
[ $DRYRUN == "run" ] && mkdir $TEMPDIR
[ $DRYRUN == "run" ] && rm $HISTORY
[ $DRYRUN == "run" ] && rm GBSv2.db
# Step1: GBSSeqToTagDBPlugin:
LOG=$TEMPDIR'1_Log_GBSSeqToTagDBPlugin_'$(date +%Y%m%d-%Hh%Mm%Ss).txt
COMMAND="perl $RUN_PIPELINE -GBSSeqToTagDBPlugin -e $ENZYME -i $INPUT_DIR -db $DB -k $KEYFILE -kmerLength 64 -mnQS 0 -c 2 -endPlugin 2>&1 | tee $LOG"
echo $COMMAND >> $HISTORY
[ $DRYRUN == "run" ] && $COMMAND >> $LOG
# Step2: TagExportToFastqPlugin:
OUTPUT=$TEMPDIR'tagsForAlign.fa.gz'
LOG=$TEMPDIR'2_TagExportToFastqPlugin_'$(date +%Y%m%d-%Hh%Mm%Ss).txt
COMMAND="perl $RUN_PIPELINE -TagExportToFastqPlugin -db $DB -o $OUTPUT -endPlugin 2>&1 | tee $LOG"
echo $COMMAND >> $HISTORY
[ $DRYRUN == "run" ] && $COMMAND >> $LOG
# Step3: Alignment to the reference genome with BWA mem
LOG=$TEMPDIR'3_runBWA_mem_'$(date +%Y%m%d-%Hh%Mm%Ss).txt
MYSAM=$TEMPDIR'tagsForAlign.sam'
echo "$BWA mem $REFSEQ $OUTPUT 1> $MYSAM 2> $LOG"
[ $DRYRUN == "run" ] && $BWA mem $REFSEQ $OUTPUT 1> $MYSAM 2> $LOG
# Step4: SAMToGBSdbPlugin:
LOG=$TEMPDIR'4_SAMToGBSdbPlugin_'$(date +%Y%m%d-%Hh%Mm%Ss).txt
COMMAND="perl $RUN_PIPELINE -SAMToGBSdbPlugin -i $MYSAM -db $DB -endPlugin 2>&1 | tee $LOG"
echo $COMMAND >> $HISTORY
[ $DRYRUN == "run" ] && $COMMAND >> $LOG
# Step5: DiscoverySNPCallerPluginV2:
LOG=$TEMPDIR'5_DiscoverySNPCallerPluginV2_'$(date +%Y%m%d-%Hh%Mm%Ss).txt
COMMAND="perl $RUN_PIPELINE -DiscoverySNPCallerPluginV2 -db $DB -sC chr01 -eC chr12 -mnMAF 0.01 -endPlugin 2>&1 | tee $LOG"
echo $COMMAND >>$HISTORY
[ $DRYRUN == "run" ] && $COMMAND >> $LOG
# Step6a: SNPQualityProfilerPlugin using subset of individuals:
#STATFILE=$TEMPDIR'SNPQualityStatsIBM.txt'
#LOG=$TEMPDIR'6a_SNPQualityProfilerPlugin_a_'$(date +%Y%m%d-%Hh%Mm%Ss).txt
#COMMAND="perl $RUN_PIPELINE -SNPQualityProfilerPlugin -db $DB -taxa $TAXA -tname $TNAME -statFile $STATFILE -endPlugin 2>&1 | tee $LOG"
#echo $COMMAND >> $HISTORY
#$COMMAND >> $LOG
# Step6b: SNPQualityProfilerPlugin using all individuals:
STATFILE=$TEMPDIR'SNPQualityStats.txt'
LOG=$TEMPDIR'6b_SNPQualityProfilerPlugin_b_'$(date +%Y%m%d-%Hh%Mm%Ss).txt
COMMAND="perl $RUN_PIPELINE -SNPQualityProfilerPlugin -db $DB -statFile $STATFILE -endPlugin 2>&1 | tee $LOG"
echo $COMMAND >> $HISTORY
[ $DRYRUN == "run" ] && $COMMAND >> $LOG
# Making QSfile
QSFILE=$TEMPDIR'SNPQualityScoresAll.txt'
LOG=$TEMPDIR'addSNPQstat.txt'
#COMMAND="less $STATFILE | perl ./addSNPQStat.pl > $QSFILE | tee $LOG"
COMMAND1="echo 'CHROM\tPOS\tQUALITYSCORE' > $QSFILE"
COMMAND2="less SNPQualityStats.txt | awk 'BEGIN{OFS="\t"}{printf("%s\t%s\t10\n",$1,$2)}' >> $QSFILE | tee $LOG "
#echo $COMMAND >> $HISTORY
echo $COMMAND1 >> $HISTORY
echo $COMMAND2 >> $HISTORY
#$COMMAND >> $LOG
[ $DRYRUN == "run" ] && $COMMAND1 >> $LOG
[ $DRYRUN == "run" ] && $COMMAND2 >> $LOG
# Step7: UpdateSNPPositionQualityPlugin:
LOG=$TEMPDIR'7_UpdateSNPPositionQualityPlugin'$(date +%Y%m%d-%Hh%Mm%Ss).txt
COMMAND="perl $RUN_PIPELINE -UpdateSNPPositionQualityPlugin -db $DB -qsFile $QSFILE -endPlugin 2>&1 | tee $LOG"
echo $COMMAND >> $HISTORY
[ $DRYRUN == "run" ] && $COMMAND >> $LOG
# Step8: ProductionSNPCallerPluginV2:
LOG=$TEMPDIR'8_ProductionSNPCallerPluginV2'$(date +%Y%m%d-%Hh%Mm%Ss).txt
MYVCF=$TEMPDIR'TestGBSGenosMinQ1.vcf'
COMMAND="perl $RUN_PIPELINE -ProductionSNPCallerPluginV2 -db $DB -e $ENZYME -i $INPUT_DIR -k $KEYFILE -o $MYVCF -endPlugin 2>&1 | tee $LOG"
echo $COMMAND >> $HISTORY
[ $DRYRUN == "run" ] && $COMMAND >> $LOG
# Step9: generate hapmap
LOG=$TEMPDIR'9_generateHapmap'$(date +%Y%m%d-%Hh%Mm%Ss).txt
MYHMP=$TEMPDIR'TestGBSGenosMinQ1'
COMMAND="perl $RUN_PIPELINE -Xmx5g -fork1 -vcf $MYVCF -export $MYHMP -exportType Hapmap -runfork1 2>&1 | tee $LOG"
echo $COMMAND >> $HISTORY
[ $DRYRUN == "run" ] && $COMMAND >> $LOG
# Step 10 VCFTOOLS THIN
THINVCF=$TEMPDIR'TestGBSGenosMinQ1_thin.vcf'
LOG=$TEMPDIR'10_vcftools_thin'$(date +%Y%m%d-%Hh%Mm%Ss).txt
COMMAND="$VCFTOOLS --vcf $MYVCF --thin 63 --recode --recode-INFO-all --out $THINVCF"
THIN_VCF_OUTPUT=$TEMPDIR'TestGBSGenosMinQ1_thin.vcf.recode.vcf'
echo $COMMAND >> $HISTORY
[ $DRYRUN == "run" ] && $COMMAND >> $LOG
# Step11: generate hapmap for thin
THINHMP=$TEMPDIR'TestGBSGenosMinQ1_thin'
LOG=$TEMPDIR'11_generateHapmap_thin'$(date +%Y%m%d-%Hh%Mm%Ss).txt
COMMAND="perl $RUN_PIPELINE -Xmx5g -fork1 -vcf $THIN_VCF_OUTPUT -export $THINHMP -exportType Hapmap -runfork1 2>&1 | tee $LOG"
echo $COMMAND >> $HISTORY
[ $DRYRUN == "run" ] && $COMMAND >> $LOG
# Step 12 CONVERT vcf for R
OUTPUT_TEXT="result_tassel5.txt"
THIN_VCF_OUTPUT=$TEMPDIR'TestGBSGenosMinQ1_thin.vcf.recode.vcf'
COMMAND="less $THIN_VCF_OUTPUT | perl vcf_ad.pl > $OUTPUT_TEXT"
echo $COMMAND >> $HISTORY
[ $DRYRUN == "run" ] && $COMMAND
| true
|
41157b96dc3dbd0074062682ce3960d761b115d5
|
Shell
|
curso-4linux/750-vagrant
|
/files/lab-gamification/lab11
|
UTF-8
| 6,571
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
clear
export START=$(date +%Y-%m-%d\ %H:%M:%S)
echo "########## QUESTÃO 01 ##########"
echo "Empacotar e compactar o diretório /etc no arquivo /tmp/bkp_etc.tar.gz"
inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.1.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}'
export RESULTADO01=$(inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.1.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}')
echo $RESULTADO01 | grep '1 successful' > /dev/null
if [ $? -eq 0 ]; then
export NOTA01='1';
else
export NOTA01='0';
fi
echo " "
echo "########## QUESTÃO 02 ##########"
echo "Empacotar e compactar o diretório /home no arquivo /tmp/bkp_home.tar.bz2"
inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.2.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}'
export RESULTADO02=$(inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.2.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}')
echo $RESULTADO02 | grep '1 successful' > /dev/null
if [ $? -eq 0 ]; then
export NOTA02='1';
else
export NOTA02='0';
fi
echo " "
echo "########## QUESTÃO 03 ##########"
echo "Listar os arquivos que estão armazenados no arquivo /tmp/bkp_etc.tar.gz"
inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.3.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}'
export RESULTADO03=$(inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.3.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}')
echo $RESULTADO03 | grep '1 successful' > /dev/null
if [ $? -eq 0 ]; then
export NOTA03='1';
else
export NOTA03='0';
fi
echo " "
echo "########## QUESTÃO 04 ##########"
echo "Compactar o arquivo /etc/passwd no arquivo /tmp/passwd.gz"
inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.4.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}'
export RESULTADO04=$(inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.4.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}')
echo $RESULTADO04 | grep '1 successful' > /dev/null
if [ $? -eq 0 ]; then
export NOTA04='1';
else
export NOTA04='0';
fi
echo " "
echo "########## QUESTÃO 05 ##########"
echo "Bloquear o usuário suporte para que o mesmo não possa criar agendamento único"
inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.5.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}'
export RESULTADO05=$(inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.5.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}')
echo $RESULTADO05 | grep '1 successful' > /dev/null
if [ $? -eq 0 ]; then
export NOTA05='1';
else
export NOTA05='0';
fi
echo " "
echo "########## QUESTÃO 06 ##########"
echo "Listar os agendamentos periódicos do usuário vagrant"
inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.6.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}'
export RESULTADO06=$(inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.6.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}')
echo $RESULTADO06 | grep '1 successful' > /dev/null
if [ $? -eq 0 ]; then
export NOTA06='1';
else
export NOTA06='0';
fi
echo " "
echo "########## QUESTÃO 07 ##########"
echo "Criar agendamento no arquivo /etc/crontab para listar o diretório /etc a cada 30 minutos com o root"
inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.7.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}'
export RESULTADO07=$(inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.7.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}')
echo $RESULTADO07 | grep '1 successful' > /dev/null
if [ $? -eq 0 ]; then
export NOTA07='1';
else
export NOTA07='0';
fi
echo " "
echo "########## QUESTÃO 08 ##########"
echo "Criar o banco de dados de nome backup no MSYQL (Use os comandos SQL em letras maiúsculas)"
inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.8.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}'
export RESULTADO08=$(inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.8.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}')
echo $RESULTADO08 | grep '1 successful' > /dev/null
if [ $? -eq 0 ]; then
export NOTA08='1';
else
export NOTA08='0';
fi
echo " "
echo "########## QUESTÃO 09 ##########"
echo "Listar o banco de dados no MSYQL (Use os comandos SQL em letras maiúsculas)"
inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.9.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}'
export RESULTADO09=$(inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.9.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}')
echo $RESULTADO09 | grep '1 successful' > /dev/null
if [ $? -eq 0 ]; then
export NOTA09='1';
else
export NOTA09='0';
fi
echo " "
echo "########## QUESTÃO 10 ##########"
echo "Listar os registros da tabela user no banco mysql (Use os comandos SQL em letras maiúsculas)"
inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.10.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}'
export RESULTADO10=$(inspec exec https://raw.githubusercontent.com/instrutordocker/labs/master/lab12.10.tar.gz | tail -1 | awk -F" " '{ print $3,$4,$5,$6}')
echo $RESULTADO10 | grep '1 successful' > /dev/null
if [ $? -eq 0 ]; then
export NOTA10='1';
else
export NOTA10='0';
fi
echo " "
export FINAL=$(expr $NOTA01 + $NOTA02 + $NOTA03 + $NOTA04 + $NOTA05 + $NOTA06 + $NOTA07 + $NOTA08 + $NOTA09 + $NOTA10)
export END=$(date +%Y-%m-%d\ %H:%M:%S )
while true
do
echo "Você gostaria de enviar sua nota para o banco de dados da Dexter?"
read OPT
echo " "
case $OPT in
sim)
mysql -u suporte -p4linux -D labgamification 1> /dev/null 2> /dev/null << TERMINAR
INSERT INTO labs (id,inicio,fim,aula,nota) VALUES ('12','$START','$END','Aula 12','$FINAL');
TERMINAR
if [ $? -ne 0 ]; then
echo "A sua nota já esta registrada no banco de dados da Dexter"
echo " "
echo "Acesse no Browser o endereço 172.16.100.110 para visualizar suas notas"
exit
else
echo "Acesse no Browser o endereço 172.16.100.110 para visualizar suas notas"
exit
fi
;;
nao)
echo " "
echo -n "Sua nota final para este Lab Gamification é igual a" ; echo -n " " ; echo $FINAL
exit
;;
*) echo "Escolha uma opção válida: sim|nao"
echo " "
;;
esac
done
| true
|
3c4fd7978121f581246bfdd159a5dc12528340a6
|
Shell
|
sree86/Scripts
|
/slash-swap.sh
|
UTF-8
| 226
| 3.5625
| 4
|
[] |
no_license
|
##This script is used to swap "/" to "\" and "\" to "/"
#!/bin/bash
A="\\//"
B=$((${#A}-1))
echo $B
for i in `seq 0 $B`
do
C=${A:i:1}
if [ "$C" = "\\" ]
then
echo -n "/"
elif [ "$C" = "/" ]
then
echo -n "\\"
fi
done
| true
|
cb1d0de0615b78982f679b5e2111bc78af509990
|
Shell
|
KabyleAI/mbert-unseen-languages
|
/transfer/downstream/finetune/eval.sh
|
UTF-8
| 1,300
| 2.71875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
data_src="/data/almanach/user/bemuller/projects/data/fr_pred_tokens_conll_2018/pred_filtered/"
gold_src="/data/almanach/user/bemuller/projects/data/fr_pred_tokens_conll_2018/gold_filtered/"
src_model="/data/almanach/user/bemuller/projects/mt_norm_parse/checkpoints/bert/"
task="pos"
dir_pred="/data/almanach/user/bemuller/projects/data/pred"
module load conda
source activate lm
for args_model in "9990958-66052-9990958_job-c0ed8_model/9990958-66052-9990958_job-c0ed8_model-args.json" ; do
args_model=$src_model$args_model
for data in "fr_sequoia" ; do
data_dir=$data_src$data"-udpipe_multi_filtered.conllu"
gold=$gold_src$data"-ud-test-filtered.conllu"
echo "EVAL src $data_dir on gold $gold_dir"
python $CAMEMBERT_FINE_TUNE/predict.py --test_paths $data_dir --init_args_dir $args_model --tasks $task --end_predictions $dir_pred > pred.txt
pred=`grep "CREATING NEW FILE (io_/dat/normalized_writer) :" ./pred.txt | tail -1 | cut -c 49-`
echo "FOUND pred file $pred"
echo "EVAL --$gold-- vs --$pred--"
python $CAMEMBERT_FINE_TUNE/evaluate/conll18_ud_eval.py --verbose $gold $pred > ./results.txt
cat ./results.txt
done
done
#python ./evaluate/conll18_ud_eval.py $gold $pred --v
| true
|
9160e9cdbd965beaac6191e79833fafb5d2f7d75
|
Shell
|
Gentux/nanocloud-old
|
/src/nanocloud/scripts/exec.sh
|
UTF-8
| 317
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
# Load configuration file
source ../src/nanocloud/scripts/configuration.sh
if [ ${#} -lt 1 ]; then
echo "Not enough arguments"
exit 1
fi
COMMAND=${1}
SSH=$(which ssh)
sshpass -p "${PASSWORD}" "${SSH}" -o StrictHostKeyChecking=no -p "${PORT}" "${USER}@${SERVER}" "${COMMAND}"
| true
|
51275baea680db1834e5b6facf2ff2a6cda18238
|
Shell
|
loveparade/my-dot
|
/rofi/power.sh
|
UTF-8
| 198
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$@" ]
then
echo 'lock'
echo 'logout'
echo 'switch_user'
echo 'suspend'
echo 'hibernate'
echo 'reboot'
echo 'shutdown'
else
i3exit $@
exit 0
fi
| true
|
596beb4d15ce4d51df0da437e2639f2fe0fa99fa
|
Shell
|
wangxiaomo/config-all
|
/zshrc
|
UTF-8
| 1,832
| 2.59375
| 3
|
[] |
no_license
|
# Path to your oh-my-zsh configuration.
ZSH=$HOME/.oh-my-zsh
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
ZSH_THEME="bira"
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
# Set to this to use case-sensitive completion
# CASE_SENSITIVE="true"
# Comment this out to disable weekly auto-update checks
# DISABLE_AUTO_UPDATE="true"
# Uncomment following line if you want to disable colors in ls
# DISABLE_LS_COLORS="true"
# Uncomment following line if you want to disable autosetting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment following line if you want red dots to be displayed while waiting for completion
# COMPLETION_WAITING_DOTS="true"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
plugins=(git cpanm django github perl pip python redis-cli svn taskwarrior vi-mode)
source $ZSH/oh-my-zsh.sh
# Customize to your needs...
alias vi='vim'
alias grep='grep -i'
alias ls='ls -F --color'
alias ll='ls -lh'
alias as='aptitude search'
alias ai='sudo aptitude install'
alias vpn='sudo /etc/init.d/openvpn start'
alias python='python2.7'
alias ipython='ipython2.7'
alias tmux="TERM=screen-256color-bce tmux"
alias doubandev2="ssh wj_intern@doubandev2.intra.douban.com"
alias dev2=doubandev2
export DISPLAY=:0.0
export VISUAL='vim'
export EDITOR='vim'
export LANG='zh_CN.UTF-8'
export LANGUAGE='zh_CN.UTF-8'
export LC_ALL='zh_CN.UTF-8'
export XMODIFIERS="@im=ibus"
export GTK_IM_MODULE=ibus
export QT_IM_MODULE=ibus
#bindkey -v
bindkey -e
# Prompt
PROMPT="${current_dir} ${git_branch}%B%%%b "
unsetopt correct_all
| true
|
4c12b76341b505e6f9a86f3d1016d204325a297f
|
Shell
|
cncf/devstats
|
/devel/restore_db_all.sh
|
UTF-8
| 135
| 2.5625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
. ./devel/all_dbs.sh || exit 2
for db in $all
do
echo "DB: $db"
./devel/restore_db.sh $db || exit 2
done
echo 'OK'
| true
|
1644e422c2fad2e93354b6940b8edc10795e0b56
|
Shell
|
ymstrike/regExUp
|
/login_users_bash_shell.sh
|
UTF-8
| 668
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
#Name :Yuval Matityahu
#Purpose:Show the users that login with bash shell as a default
# by scanning /etc/passwd the last fiels
#Version 1.0
#Date : 18/12/2017
#This funcitscan and list all users that have bash shell as a default
#in /etc/passwd
scan_users_bash_shell()
{
printf "\n"
printf "List of Users with login bash shell as a default:\n"
printf "=================================================\n"
for name in $(cat /etc/passwd |cut -d ":" -f1,7) # first word username in /etc/passwd
do
shell_bash=$(echo $name |cut -d ":" -f2)
if [ $shell_bash = "/bin/bash" ];then
printf "$name\n"
fi
done
}
scan_users_bash_shell # call to function
| true
|
0ab82b18624e97acd6d473bba0c8023159a51fbe
|
Shell
|
belgacemghiloufi/nuxeo-helm-chart
|
/nev/deploy-preview.sh
|
UTF-8
| 508
| 3.140625
| 3
|
[] |
no_license
|
#/bin/bash
if [ -n "$1" ];then
tenant=$1
echo "Add previewer to tenant $tenant"
../repositories/deploy-arender-repository-secret.sh $tenant
helm3 upgrade -i arender arender \
--repo https://packages.nuxeo.com/repository/helm-releases/ \
--version 0.2.3 \
-n $tenant --create-namespace \
-f nev-common-values.yaml \
-f nev-previewer-values.yaml \
--password $NEXUS_TOKEN_PASS \
--username $NEXUS_TOKEN_NAME
else
echo "please provide a tenant/namespace as first argument"
fi
| true
|
2352fe20ef5fcdd4d0d8b3c2d14aeac2c40d9ef7
|
Shell
|
rinderknecht/Scripts
|
/update_site.sh
|
UTF-8
| 24,832
| 4.3125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# This script updates locally our web site before it is remotely
# synchronised using the script rsync_site.sh
#
# Author: Christian Rinderknecht
#set -x
#=====================================================================
# General Settings
# In a future release, $quiet could be passed as an option
#
quiet=no
script=$(basename $0)
#=====================================================================
# Wrappers for several kind of displays
#
print_nl () { if test "$quiet" != "yes"; then printf "$1\n"; fi }
fatal_error () {
echo "\n$script: fatal error:"
echo "$1" 1>&2
exit 1
}
warn () {
print_nl "\n$script: warning:"
print_nl "$1"
}
#=====================================================================
# Help
#
usage () {
cat <<EOF
Usage: $(basename $0) [-h][-d]
Update the web site on the local host.
The following options, if given, must be given only once.
Display control:
-h, --help display this help and exit
EOF
exit 1
}
# Other options:
# -d, --debug display configuration of tools
#=====================================================================
# Parsing loop
#
while : ; do
case "$1" in
"") break;;
# Help
#
-h | --help | -help)
help=yes
help_opt=$1
;;
# Recursive descent
#
# -d | --debug)
# debug=yes
# debug_opt=$1
# ;;
# Invalid option
#
-*)
fatal_error "Invalid option \"$1\"."
;;
# Additional arguments
#
*)
fatal_error "No argument taken."
;;
esac
shift
done
#=====================================================================
# Checking the command-line options and arguments and applying some of
# them.
# First, we check if the user asks for help.
#
if test "$help" = "yes"; then usage; fi
#=====================================================================
# Records source updates in $updates and exports updates in
# $updated_exports.
#
# Called by `update_src'
#
# The current directory is assumed to be
# $HOME/public_html/Lectures
#
record_update () {
updated=$1
updates=$2
exports=$3
updated_exports=$4
# echo "Entering record_update..."
# echo "\$updated=[$1]"
# echo "\$updates=[$2]"
# echo "\$exports=[$3]"
# echo "\$updated_exports=[$4]"
echo "$updated" \
| awk '{ print $2 }' \
| while read update; do \
new_update=$(expr $update : "'\(.*\)'")
if test -n "$new_update"; then update=$new_update; fi
if test -f "$update"
then update_dir=$(dirname $update)
else update_dir=
fi
if test -n "$update_dir"
then if test $(basename $update) = $exports
then echo $update_dir >> $updated_exports
elif ! (grep -x $update_dir $updates > /dev/null 2>&1)
then echo $update_dir >> $updates
fi
fi
done
}
#=====================================================================
# Update and/or checkout sources from the Subversion archive
#
# Example of Subversion update:
#
# $ svn update AI
# U AI/ai.tex
# Updated to revision 603.
# $ svn update IR
# At revision 603.
#
# The current directory is assumed to be
# $HOME/public_html/Lectures
#
update_src () {
catalog=$1
updates=$2
exports=$3
updated_exports=$4
if test -s "$catalog"
then
rm -f $updates $updated_exports
sed '/^[ ]*$/d' $catalog \
| while read lecture; do
if test -d $lecture
then
printf "Updating recursively $(pwd)/$lecture..."
updated=$(svn update $lecture)
if test $? -eq 0
then
formated=$(echo "$updated" | tr '\n' ' ')
if test $(expr "$formated" : "At revision") -ne 0
then echo " no update."
else echo " done."
record_update "$updated" $updates $exports $updated_exports
fi
else echo " FAILED. Skipping $lecture."
fi
else
printf "Checking out $(pwd)/$lecture..."
updated=$(svn checkout file://$HOME/SVN/Lectures/$lecture)
if test $? -eq 0
then echo " done."
record_update "$updated" $updates $exports $updated_exports
else echo " FAILED. Skipping $lecture."
fi
fi
done
else echo "No lectures to update or check out."
fi
}
#=====================================================================
# Update the documents in the updated copies using generic makefiles
#
# The current directory is assumed to be
# $HOME/public_html/Lectures
#
update_doc () {
catalog=$1
updates=$2
exports=$3
exported_updates=$4
updated_exports=$5
rm -f $exported_updates
if test -s "$catalog"
then
sed '/^[ ]*$/d' $catalog \
| while read lecture; do
if test -d $lecture
then
export_file=$lecture/$exports
if test -s $export_file
then
sed '/^[ ]*$/d' $export_file \
| while read export_line; do
paths=$(expr "$export_line" : "[^:]*: *\(.*\)")
if test -n "$paths"
then
for path in $paths; do
if test "$path" = "."
then qualified_path=$lecture
else qualified_path=$lecture/$path
fi
updated_src=$(grep -x $qualified_path $updates 2>/dev/null)
updated_exp=$(grep -x $lecture $updated_exports 2>/dev/null)
# Remaking the document and the accompanying .phtml
#
if test -n "$updated_src" -o -n "$updated_exp"
then
if test -d $qualified_path
then
echo "*** Entering $qualified_path"
echo $qualified_path >> $exported_updates
(cd $qualified_path
setup.sh
if test -f Makefile
then
doc=$(make -Rrs doc 2>/dev/null)
if test -n "$doc"
then
if test -n "$updated_src"
then
make -Rrs clean
printf "Deleting document parts..."
del_parts.sh $doc
echo " done."
if test "$(basename $qualified_path)" = "Answers"
then answers.sh $doc
fi
fi
parts.sh $doc
else
make -Rrs parts
fi
else warn "$(pwd)/Makefile not found. Skipping."
fi)
else warn "Directory $qualified_path not found."
fi
fi
done
fi
done
fi
else warn "Directory $lecture in catalog $catalog not found."
fi
done
fi
}
#=====================================================================
# Generating the main section HTML
#
# The current directory is assumed to be
# $HOME/public_html/Lectures
#
update_html () {
exports=$1
exported_updates=$2
updated_exports=$3
updated_lectures=$4
rm -f $updated_lectures
# echo "Entering update_html..."
# if test -s $updated_exports
# then
# echo ".updated_exports exists and is not empty."
# else
# echo ".updated_exports does no exist or is empty."
# fi
# if test -s $exported_updates
# then
# echo ".exported_updates exists and is not empty."
# else
# echo ".exported_updates does not exist or is empty."
# fi
if test -s $updated_exports -o -s $exported_updates
then
cat $updated_exports $exported_updates 2>/dev/null \
| while read line; do echo $(expr "$line" : "\([^/]*\)"); done \
| sort -u >| $updated_lectures
cat $updated_lectures 2>/dev/null \
| while read lecture; do
if test -s $lecture/$exports
then # Perhaps something to publish. Assuming none or one main section.
index=$lecture/index.html
printf "Updating $index (XHTML 1.0 Transitional)..."
echo "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"" >| $index
echo " \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">" >> $index
echo >> $index
echo "<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">" >> $index
echo >> $index
echo "<head>" >> $index
main=$(sed -n -E 's|#[^:]+:[ ]*([^ ]+)[ ]*|\1|p' \
$lecture/$exports 2>/dev/null)
if test -z "$main"
then # No main section. Let us try $lecture.
main=.
fi
title_file=$(ls $lecture/$main/.*.title 2>/dev/null)
title=
if test -n "$title_file"
then # Assuming only one path and one title for the main section
title=$(cat $title_file | sed -e "s|\\\'||g" -e "s|\\\^||g" \
| tr -d '`' | tr -d '\\' 2>/dev/null)
fi
if test -z "$title"; then title=Document; fi
echo " <title>$title</title>" >> $index
echo " <meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\"/>" >> $index
echo " <style type=\"text/css\">img {border-width: 0}</style>" >> $index
echo "</head>" >> $index
echo >> $index
echo "<body bgcolor=\"#FFFFFF\">" >> $index
echo >> $index
echo "<h3>$title</h3>" >> $index
echo >> $index
echo "<ul>" >> $index
sed '/^[ ]*$/d' $lecture/$exports \
| while read export_line; do
section=$(expr "$export_line" : "\([^:]*\):.*")
main=$(expr "$section" : "#\(.*\)")
paths=$(expr "$export_line" : "[^:]*: *\(.*\)")
if test -n "$main"
then # The main section title is followed by the date
# Only one path for the main section is assumed
path=$(echo "$paths" | sed -E 's/^([^ ]+).*$/\1/')
title_file=$(ls $lecture/$path/.*.title 2>/dev/null)
base=$(basename $title_file .title)
base=$(expr $base : "\.\(.*\)")
if test -f $lecture/$path/.$base.dvi.date
then
last_update=$(cat $lecture/$path/.$base.dvi.date)
echo " <li>$main (last updated $last_update)" >> $index
else echo " <li>$main" >> $index
fi
elif test -n "$section"
then echo " <li>$section" >> $index
fi
if test -n "$main" -o -n "$section"
then
echo " <ul>" >> $index
for path in $paths; do
html=$(ls $lecture/$path/*.phtml 2>/dev/null)
if test -n "$html" # Assuming only one *.phtml
then
if test $path = .
then # Handmade
cat $html >> $index
else # AutomaTeX
cat $html \
| sed -E -e "s|([^\"]*)\.ps|$path/\1.ps|g" \
-e "s|([^\"]*)\.pdf|$path/\1.pdf|g" >> $index
fi
fi
done
echo " </ul>" >> $index
echo " </li>" >> $index
fi
done
echo "</ul>" >> $index
echo >> $index
echo "<hr/>" >> $index
echo >> $index
echo "<p>This file has been automatically generated $(date).</p>" >> $index
echo >> $index
echo "<p>" >> $index
echo " <a href=\"http://validator.w3.org/check?uri=referer\">" >> $index
echo " <img src=\"http://www.w3.org/Icons/valid-html401\"" >> $index
echo " alt=\"Valid HTML 4.01 Transitional\" height=\"31\" width=\"88\"/>" >> $index
echo " </a>" >> $index
echo "</p>" >> $index
echo >> $index
echo "</body>" >> $index
echo "</html>" >> $index
echo " done."
fi
done
fi
}
#=====================================================================
# Update the main HTML
#
# The current directory is assumed to be
# $HOME/public_html/Lectures
#
update_root_idx () {
catalog=$1
exports=$2
updated_lectures=$3
updated_catalog=$4
index=index.html
if test -s $updated_lectures -o -f $updated_catalog
then
printf "Updating the root index (XHTML 1.0 Transitional)..."
echo "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"" >| $index
echo " \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">" >> $index
echo >> $index
echo "<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">" >> $index
echo >> $index
echo "<head>" >> $index
echo " <title>Teachings in Computer Science by Christian Rinderknecht</title>" >> $index
echo " <meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\"/>" >> $index
echo " <style type=\"text/css\">img {border-width: 0}</style>" >> $index
echo "</head>" >> $index
echo >> $index
echo "<body bgcolor=\"#FFFFFF\">" >> $index
echo >> $index
echo " <h2>Teachings in Computer Science</h2>" >> $index
echo " <h3>by Christian Rinderknecht</h3>" >> $index
echo >> $index
echo " <ul>" >> $index
sed '/^[ ]*$/d' $catalog \
| while read lecture; do
if test -s $lecture/$exports
then
if test -f $lecture/index.html
then
main=$(sed -n -E 's|#[^:]+:[ ]*([^ ]+)[ ]*|\1|p' $lecture/$exports 2>/dev/null)
if test -z "$main"
then # No main section. Let us try $lecture.
main=.
fi
lang_file=$(ls $lecture/$main/.*.lang 2>/dev/null)
if test -n "$lang_file" -a -f "$lang_file"
then lang=" ($(cat $lang_file))"
else lang=
fi
title_file=$(ls $lecture/$main/.*.title 2>/dev/null)
if test -n "$title_file"
then # Assuming only one title
title=$(cat $title_file)
if test -z "$title"
then
title=' <font color="red">The title is missing! Please report.</font>'
warn "Title of lecture $lecture not found. Skipping."
fi
else
title=' <font color="red">The title is missing! Please report.</font>'
warn "Title of lecture $lecture not found. Skipping."
fi
echo " <li><a href=\"$lecture/index.html\">$title$lang</a></li>" \
| sed -e "s|\\\'||g" -e "s|\\\^||g" \
| tr -d '`' | tr -d '\\' >> $index
echo >> $index
else warn "File $lecture/index.html not found."
fi
elif test ! -e $lecture/$exports
then warn "File $lecture/$exports not found."
fi
done
echo " </ul>" >> $index
echo >> $index
echo "<p>This file has been automatically generated $(date).</p>" >> $index
echo >> $index
echo "<hr/>" >> $index
echo >> $index
echo "<p>" >> $index
echo " <a href=\"http://validator.w3.org/check?uri=referer\">" >> $index
echo " <img src=\"http://www.w3.org/Icons/valid-html401\"" >> $index
echo " alt=\"Valid HTML 4.01 Transitional\" height=\"31\" width=\"88\"/>" >> $index
echo " </a>" >> $index
echo "</p>" >> $index
echo >> $index
echo "</body>" >> $index
echo "</html>" >> $index
echo " done."
fi
}
#=====================================================================
# Computing the prefix-free ordered set of directory names in a file.
#
# The method consists in, firstly, sorting the paths increasingly;
# secondly, scanning the sorted list with a two-line window: if the
# first path is not a prefix of the second, it is output; the window
# is then slided by one line. If there is no second name to compare
# with, the only name in the window is output and the process is over.
#
prefix_free () {
thefile=$1
sort -o $thefile -u $thefile
rm -f $thefile.prefix-free
current_path=$(head -n 1 $thefile)
cat $thefile \
| (while read new_path; do
if test "${new_path##$current_path}" = "$new_path"
then # $current_path is not a prefix of $new_path
echo $current_path >> $thefile.prefix-free
fi
current_path=$new_path
done;
echo $current_path >> $thefile.prefix-free)
}
#=====================================================================
# Taking a list of paths and outputting all the intermediary paths.
#
distribute () {
mirrors=../$1
paths=$2
rm -f $2.dist
cat $2 \
| while read path; do
base=$path
until test $base = $1; do
echo $base >> $2.dist
base=$(dirname $base)
done
done
sort -o $2.dist -u $2.dist
}
#=====================================================================
# Updating the mirror of the Web site
#
# The current directory is assumed to be
# $HOME/public_html/Lectures
#
update_mirror () {
catalog=$1
exports=$2
exported_updates=$3
updated_lectures=$4
mirror=../$5
exported_dirs=$6
mirrored_dirs=$7
updated_catalog=$8
updated_exports=$9
rm -f $exported_dirs $exported_dirs.dist $mirrored_dirs .removed
# updated_lectures <- $cat_path $updated_exports $exported_updates
# echo "Entering update_mirror..."
# if test -s $updated_lectures
# then
# echo ".updated_lectures exists and is not empty."
# else
# echo ".updated_lectures does not exist or is empty."
# fi
if test -s $updated_lectures -o -f $updated_catalog -o ! -d $mirror
then
# Mirroring the root index
#
printf "Mirroring the root index..."
mkdir -p $mirror
cp -f index.html $mirror
echo " done."
# Copying all the exported updates
#
cat $updated_lectures $exported_updates 2>/dev/null \
| while read path; do
backup=$(ls $path/*.java $path/*.pdf $path/*.html $path/*.xml \
$path/*.xsl $path/*.erl $path/*.ert $path/*.P $path/*.pl \
$path/*.c $path/*.txt $path/*.dtd 2>/dev/null)
if test -n "$backup"
then
printf "Mirroring $path..."
mkdir -p $mirror/$path
echo $backup \
| while read a_file; do cp -f $a_file $mirror/$path 2>/dev/null; done
echo " done."
fi
done
# Collecting the exported directories, sorting them and adding all
# the intermediate directories.
#
sed '/^[ ]*$/d' $catalog 2>/dev/null \
| while read lecture; do
if test -d $lecture
then # Some published lecture
sed '/^[ ]*$/d' $lecture/$exports 2>/dev/null \
| while read export_line; do
paths=$(expr "$export_line" : "[^:]*: *\(.*\)")
if test -n "$paths"
then # Indeed, something published
for path in $paths; do
if test "$path" = "."
then echo $mirror/$lecture >> $exported_dirs
else echo $mirror/$lecture/$path >> $exported_dirs
fi
done
fi
done
fi
done
sort -o $exported_dirs -u $exported_dirs
distribute $mirror $exported_dirs
# Collecting all the current directories in the mirror and removing
# all the names which are prefix of another.
#
find $mirror/* -type d >> $mirrored_dirs
sort -o $mirrored_dirs -u $mirrored_dirs
# Finding the directories which are in the mirror but which are not
# exported: they must be removed from the mirror.
#
comm -2 -3 $mirrored_dirs $exported_dirs.dist >| .removed
if test -s .removed
then
cat .removed \
| while read path; do
printf "Removing ${path#$mirror/} from the mirror..."
rm -fr $path
echo " done."
done
else rm -f .removed
fi
# Finding the directories which are exported but which are not
# in the mirror: they must be added to the mirror.
#
comm -1 -3 $mirrored_dirs $exported_dirs.dist >| .added
if test -s .added
then cat .added \
| while read path; do
short_path=${path#$mirror/}
backup=$(ls $short_path/*.pdf $short_path/*.html $short_path/*.xml \
$short_path/*.xsl $short_path/*.erl $short_path/*.ert \
$short_path/*.P $short_path/*.pl $short_path/*.c \
$short_path/*.txt 2>/dev/null)
if test -n "$backup"
then printf "Mirroring $short_path..."
mkdir -p $path
cp -f $short_path/*.pdf $short_path/*.html $short_path/*.xml \
$short_path/*.xsl $short_path/*.erl $short_path/*.ert \
$short_path/*.P $short_path/*.pl $short_path/*.c \
$short_path/*.txt $path 2>/dev/null
echo " done."
fi
done
else rm -f .added
fi
fi
}
#=====================================================================
# Update the user's tools needed for running this script
#
update_tools () {
echo "Updating tools on the file server $1: "
svn update $HOME/devel $HOME/Makefiles $HOME/LaTeX
}
#=====================================================================
#
#
update_lectures () {
updated_catalog=$1
catalog=.catalog
updates=.updates
exports=.exports
exported_updates=.exported_updates
updated_exports=.updated_exports
updated_lectures=.updated_lectures
mirror=Mirror
exported_dirs=.exported_dirs
mirrored_dirs=.mirrored_dirs
(cd $HOME/public_html/Lectures
update_src $catalog $updates $exports $updated_exports
update_doc $catalog $updates $exports $exported_updates $updated_exports
update_html $exports $exported_updates $updated_exports $updated_lectures
update_root_idx $catalog $exports $updated_lectures $updated_catalog
update_mirror $catalog $exports $exported_updates \
$updated_lectures $mirror $exported_dirs $mirrored_dirs \
$updated_catalog $updated_exports)
}
#=====================================================================
#
#
update_root () {
updated_catalog=.updated_catalog
public_html=$HOME/public_html
lectures=$public_html/Lectures
(cd $HOME
if test -d $HOME/public_html
then
printf "Updating $HOME/public_html..."
updated=$(svn update $public_html)
if test $? -eq 0
then formated=$(echo "$updated" | tr '\n' ' ')
if test $(expr "$formated" : "At revision") -ne 0
then echo " no update."
else echo " done."
symlinks.sh --set public_html
symlinks.sh --set --recursive public_html/Software
fi
else echo " FAILED. Aborting."
exit 1
fi
else printf "Checking out $HOME/public_html..."
svn checkout file://$HOME/SVN/public_html
if test $? -eq 0
then echo " done."
symlinks.sh --set public_html
symlinks.sh --set --recursive public_html/Software
else echo " FAILED. Aborting."
exit 1
fi
fi
if test -d $lectures
then
rm -f $lectures/$updated_catalog
printf "Updating $lectures..."
updated=$(svn update --non-recursive $lectures)
rm -f $updated_catalog
if test $? -eq 0
then formated=$(echo "$updated" | tr '\n' ' ')
if test $(expr "$formated" : "At revision") -ne 0
then echo " no update."
else echo " done."
touch $lectures/$updated_catalog
fi
update_lectures $updated_catalog
else echo " FAILED. Skipping."
fi
else
printf "Checking out $lectures..."
(cd $public_html > /dev/null
svn checkout --non-recursive file://$HOME/SVN/Lectures > /dev/null
if test $? -eq 0
then echo " done."
touch $lectures/$updated_catalog
update_lectures $updated_catalog
else echo " FAILED. Skipping."
fi)
fi)
}
#=====================================================================
# Updating the CV
#
update_cv () {
echo "Updating CV on the file server: "
svn update $HOME/CV/English
(cd $HOME
if test -d $HOME/public_html/CV
then (cd $HOME/public_html/CV
if test -f Makefile
then echo "Entering $HOME/public_html/CV..."
symlinks.sh --set
make -Rrs all
fi)
fi)
}
#=====================================================================
# Main
#
update_tools $(uname -n)
update_root
update_cv
| true
|
9f1e089a42e05efd178b5ca4dc4641c41007f3e3
|
Shell
|
nmarghetti/common_env
|
/tools/xampp/setup.sh
|
UTF-8
| 786
| 3.578125
| 4
|
[] |
no_license
|
#! /usr/bin/env bash
function setup_xampp() {
local ERROR=$SETUP_ERROR_CONTINUE
local xampp_path="$APPS_ROOT/PortableApps/XAMPP"
# Install XAMPP
if [[ ! -f "$xampp_path/xampp/setup_xampp.bat" ]]; then
mkdir -vp "$xampp_path"
local tarball=xampp-portable-windows-x64-7.4.2-0-VC15.zip
download_tarball -e -o "$tarball" -d "$xampp_path" "https://sourceforge.net/projects/xampp/files/XAMPP%20Windows/7.4.2/$tarball/download"
# Initialize
[[ ! -f "$xampp_path/xampp/xampp_shell.bat" ]] && (cd "$xampp_path/xampp" && "./setup_xampp.bat")
fi
[[ ! -f "$xampp_path/xampp/setup_xampp.bat" ]] && echo "Setup file not installed" && return $ERROR
# Better integrate in PortableApps menu
rsync -vau "$SETUP_TOOLS_ROOT/xampp/XAMPP" "$APPS_ROOT/PortableApps/"
}
| true
|
cc680d0b57c8e76fc2073e6ca95c4314f7858389
|
Shell
|
ofarukcaki/CSE333-Project-1
|
/solutions/4.sh
|
UTF-8
| 965
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
# regex: (bü[a-zA-Z]*ük)
# grep -oh "\w*th\w*" *
# perl -lane 'print m/(\s\w*very\s)/g' himalayas.txt
txt_files=`find *.txt`
originalWildcard=$1
regex=`echo $1 | sed 's/*/\\\w*/g'`
# loop through .txt files in directory
while read -r line; do
# perl -nle '@a = /\W*(\w*very)\w*/g; $,=","; print @a' test.txt # comma-seperated
matched=`perl -nle "@a = /\W*($regex)\w*/g; $,=','; print @a" $line`
echo "Found: $a"
# matched=`perl -lane "@a = m/(\s$regex)\W{0,1}/g; $,=","; print @a" $line`
# FIXME: loop through the comma-seperated "matched"
for value in $matched # loop through all matched results
do
echo "The word \"$value\" inside $line is substituted with ${value^^}."
value=" $value"
# `sed -i "s/$value/${value^^}/g" $line`
done
done <<< "$txt_files"
# - Loop through directory to get .txt files
# - search the query on every txt file
# - if available replace and print information
| true
|
15767e7bd5b5be2b507ba6cd944fadb685b8cb72
|
Shell
|
AlesKas/SchoolUP
|
/UNIX/5th/generate.sh
|
UTF-8
| 140
| 2.796875
| 3
|
[] |
no_license
|
for i in {0001..1000}; do
day=$(($RANDOM%30 + 1))
month=$(($RANDOM%11 + 1))
year=$(($RANDOM%3000 + 1))
echo $month/$day/$year > $i
done
| true
|
04e8a56b1e9925a1c873b98f74d3416037e634d9
|
Shell
|
edwinbalani/dotfiles
|
/shell/function.sh
|
UTF-8
| 2,447
| 3.875
| 4
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
########## functions
sdas () {
# Credit to mas90
user="$1"
shift
sudo -u "$user" XDG_RUNTIME_DIR=/run/user/`id -u "$user"` "$@"
}
# Update dotfiles on remote systems (how ~meta~ is that?)
update-dotfiles () {
for host in "${@}"; do
ssh "$host" '. ~/.dotfiles-location && cd $DOTFILES_DIR && git pull' \
|| echo "=== Update for $host failed ==="
done
}
# tmux:
if command_exists tmux 2>&1; then
# - create a new session, or join an existing one if it exists
att () {
if tmux has -t "$1" 2>/dev/null; then
# If a session exists...
if [ "$(tmux ls -F '#{session_name}:#{session_attached}' | awk -F: "/^$1:/ {print \$2}")" = "0" ]; then
# ...if it's detached, attach to it
tmux attach -t "$1"
else
# ...otherwise, spawn a temporary session to join its group,
# which is deleted at the end to avoid polluting with sessions
temp_session_name="$(tmux new-session -t "$1" -d -P -F "#{session_name}")"
tmux attach -t "$temp_session_name"
tmux kill-session -t "$temp_session_name"
fi
else
# and if a session doesn't exist, just create it
if [ -n "$1" ]; then
tmux new-session -s "$1"
else
tmux new-session
fi
fi
}
# - list sessions
# this should really be in alias.sh, but I wanted to keep these
# two commands together
alias tls="tmux ls"
fi
# thefuck (if it's installed)
if command_exists thefuck; then
eval "$(thefuck --alias)"
eval "$(thefuck --alias oops)" # Keeping it SFW
fi
# Mount and unmount SSHFS
mnt () {
host=$1
folder=$2
shift 2
mntpoint=$HOME/remote/$host
umnt "$host"
mkdir -p "$mntpoint"
sshfs "$host:$folder" "${@}" "$mntpoint"
cd "$mntpoint"
}
umnt () {
host=$1
shift
mntpoint=$HOME/remote/$host
if mountpoint -q -- "$mntpoint"
then
fusermount -u "$mntpoint" && rmdir "$mntpoint"
fi
}
# PDF tools
panpdf () {
pandoc -o "$(basename -s.md "$1").pdf" -Vpapersize:a4 -Vgeometry:'margin=1.1in' -Vmainfontoptions:'Scale=1.1' "$1"
}
pdf_bw () {
gs \
-sOutputFile=output.pdf \
-sDEVICE=pdfwrite \
-sColorConversionStrategy=Gray \
-dProcessColorModel=/DeviceGray \
-dCompatibilityLevel=1.4 \
-dNOPAUSE \
-dBATCH \
"$1"
}
| true
|
8bf309a4335c0e8914fa6a1a10297730fefa7ca3
|
Shell
|
zer0beat/nifi-for-aws
|
/nifi-configuration.sh
|
UTF-8
| 2,609
| 3.3125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
HOSTNAME=$(curl -sL http://instance-data/latest/meta-data/hostname)
AMI_LAUNCH_INDEX=$(curl -sL http://instance-data/latest/meta-data/ami-launch-index)
COORDINATION_PORT=9999
SITE2SITE_PORT=9998
DISK_PATH="\/mnt\/data01\/"
echo "##### Environment variables #####"
echo " JAVA_HOME=${JAVA_HOME}"
echo " VERSION=${VERSION}"
echo " NIFI_HOME=${NIFI_HOME}"
echo " STACKNAME=${STACKNAME}"
echo " REGION=${REGION}"
echo " HOSTNAME=${HOSTNAME}"
echo " AMI_LAUNCH_INDEX=${AMI_LAUNCH_INDEX}"
echo " COORDINATION_PORT=${COORDINATION_PORT}"
echo " SITE2SITE_PORT=${SITE2SITE_PORT}"
echo " DISK_PATH=${DISK_PATH}"
echo "##### NiFi ${VERSION} configuration script #####"
echo "Configure ${NIFI_HOME}/bin/nifi-env.sh"
JAVA_HOME_ESCAPED=$(echo "$JAVA_HOME" | sed 's/\//\\\//g')
sed -i.backup -e "/^#.*JAVA_HOME/s/^#//" -e "s/\(.*JAVA_HOME=\).*/\1$JAVA_HOME_ESCAPED/" ${NIFI_HOME}/bin/nifi-env.sh
clusterNodes="$(aws ec2 describe-instances --filters Name=instance-state-name,Values=running Name=tag:App,Values='Apache NiFi' Name=tag:aws:cloudformation:stack-name,Values=${STACKNAME} --region ${REGION} --query 'Reservations[*].Instances[*].[AmiLaunchIndex,PrivateDnsName]' --output text)"
echo "Configure ${NIFI_HOME}/conf/zookeeper.properties"
sed -i.backup -e "/^server.1/ d" ${NIFI_HOME}/conf/zookeeper.properties
IFS=$'\n'
for node in ${clusterNodes}
do
echo ${node} | awk -F' ' '{print "server."$1+1"="$2":2888:3888" }' >> ${NIFI_HOME}/conf/zookeeper.properties
done
echo "Configure ${NIFI_HOME}/state/zookeeper/myid"
ZOOKEEPER_INDEX=$((AMI_LAUNCH_INDEX+1))
mkdir -p ${NIFI_HOME}/state/zookeeper
echo "${ZOOKEEPER_INDEX}" > ${NIFI_HOME}/state/zookeeper/myid
echo "Configure ${NIFI_HOME}/conf/nifi.properties"
nifiZookeeperConnectString=$(echo "${clusterNodes}" | awk -F' ' '{print $2":2181"}' | xargs | sed 's/ /,/g')
sed -i.backup \
-e "s/\(nifi\.zookeeper\.connect\.string=\).*\$/\1${nifiZookeeperConnectString}/" \
-e "s/\(nifi\.state\.management\.embedded\.zookeeper\.start=\).*\$/\1true/" \
-e "s/\(nifi\.web\.http\.host=\).*\$/\1${HOSTNAME}/" \
-e "s/\(nifi\.cluster\.is\.node=\).*\$/\1true/" \
-e "s/\(nifi\.cluster\.node\.address=\).*\$/\1${HOSTNAME}/" \
-e "s/\(nifi\.cluster\.node\.protocol\.port=\).*\$/\1${COORDINATION_PORT}/" \
-e "s/\(nifi\.remote\.input\.host=\).*\$/\1${HOSTNAME}/" \
-e "s/\(nifi\.remote\.input\.secure=\).*\$/\1false/" \
-e "s/\(nifi\.remote\.input\.socket\.port=\).*\$/\1${SITE2SITE_PORT}/" \
-e "s/\(nifi\.content\.repository\.directory\.default=\).*\$/\1${DISK_PATH}/" \
${NIFI_HOME}/conf/nifi.properties
| true
|
0669a637e0aaf7208dbdc92e68f462e942675eeb
|
Shell
|
DimuthuKasunWP/dokku
|
/plugins/plugin/subcommands/trigger
|
UTF-8
| 252
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eo pipefail
[[ $DOKKU_TRACE ]] && set -x
plugin_trigger_cmd() {
declare desc="trigger an arbitrary plugin hook"
local cmd="plugin:trigger"
[[ "$1" == "$cmd" ]] && shift 1
plugn trigger "$@"
}
plugin_trigger_cmd "$@"
| true
|
daf03ec529918e193b80143c72126a2257487b2c
|
Shell
|
GDR/Arch-Install-Automation
|
/global_functions.sh
|
UTF-8
| 376
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
MOUNTPOINT="/mnt"
run_quiet() {
$@ > /dev/null 2>&1
}
check_internet_connection() {
if run_quiet "ping -q -c 1 -W 1 8.8.8.8" ; then
return 0
else
return 1
fi
}
install_with_pacman() {
pacman -Sy --noconfirm $1
}
install_with_yaourt() {
yaourt -Sy --noconfirm $1
}
arch_chroot() {
arch-chroot ${MOUNTPOINT} $@
}
| true
|
8a6ef42a9dd1885f8bcd0531d965f091ac669524
|
Shell
|
Nina-Om/wrfHydroScripts
|
/hgrep.sh
|
UTF-8
| 516
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
searchStr=("$@")
echo "${searchStr[@]}"
whmPath=`grep "wrf_hydro_model" ~/.wrfHydroScripts | cut -d '=' -f2 | tr -d ' '`
cd $whmPath/trunk/NDHMS/
egrep -s "${searchStr[@]}" */*.inc
egrep -s "${searchStr[@]}" */*.F
egrep -s "${searchStr[@]}" */*/*.F
egrep -s "${searchStr[@]}" */*/*/*.F
egrep -s "${searchStr[@]}" */*/*/*/*.F
egrep -s "${searchStr[@]}" */Makefile
egrep -s "${searchStr[@]}" */*/Makefile
egrep -s "${searchStr[@]}" */*/*/Makefile
egrep -s "${searchStr[@]}" */*/*/*/Makefile
exit 0
| true
|
aa7a883119c89db31d45525d20eca67226a3f809
|
Shell
|
tommycarstensen/tc9
|
/projects/uganda_gwas/bt.sh
|
UTF-8
| 847
| 2.734375
| 3
|
[] |
no_license
|
bcftools=/software/hgi/pkglocal/bcftools-1.2/bin/bcftools
tabix=/software/hgi/pkglocal/htslib-1.2.1/bin/tabix
chrom=$1
o=out_bt_concat/$chrom.vcf.gz
o=out_bt/$chrom
if [ $(ls ../pipeline_UG3.3/out_UnifiedGenotyper/$chrom/*.vcf.gz | wc -l) -ne $(ls ../pipeline_UG3.3/out_UnifiedGenotyper/$chrom/*.vcf.gz.tbi | wc -l) ]; then exit; fi
mkdir -p $(dirname $o)
if [ -f $o ]; then exit; fi
touch $o
$bcftools concat \
$(ls ../pipeline_UG3.3/out_UnifiedGenotyper/$chrom/*.vcf.gz | sort -V) \
-Ou | $bcftools view -M2 -m2 \
-Ou | $bcftools convert --gensample $o \
# -Oz -o $o \
#$tabix -p vcf $o
## Add the trio information to the .samples file.
cat "ID_1 ID_2 missing ID_father ID_mother
0 0 0
EGAN00001160764 EGAN00001160764 0 EGAN00001160765 EGAN00001160766
EGAN00001160765 EGAN00001160765 0
EGAN00001160766 EGAN00001160766 0" > $o.samples
| true
|
62d5544693b3d9a02d75a6f42d8b5c398dbbf89b
|
Shell
|
JasonMorgan/istio-getting-started
|
/01_install.sh
|
UTF-8
| 480
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
curl -L https://istio.io/downloadIstio | sh -
REL=$(curl -L -s https://api.github.com/repos/istio/istio/releases | \
grep tag_name | sed "s/ *\"tag_name\": *\"\\(.*\\)\",*/\\1/" | \
grep -v -E "(alpha|beta|rc)\.[0-9]$" | sort -t"." -k 1,1 -k 2,2 -k 3,3 -k 4,4 | tail -n 1)
pushd istio-$REL
export PATH=$PWD/bin:$PATH
istioctl install --set profile=demo
kubectl label namespace default istio-injection=enabled
istioctl analyze
popd
| true
|
25bd85931fc17651db8c263b9aaff7de5ef173ce
|
Shell
|
tomzhang/HorsePower
|
/docs/mkdocs/program/type/deploy.sh
|
UTF-8
| 315
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
from=genpdf/*.png
dest=../../docs/horseir/types/
list=(lt eq plus minus mul mod logic append like compress indexof order member vector)
#list=(eq)
for func in ${list[@]}
do
echo generating $func.png
./run.sh $func
echo copying genpdf/$func.png to $dest
cp genpdf/$func.png $dest
done
| true
|
b767315709aabdb55adf1f30824806506a6c0e72
|
Shell
|
osrf/cloudsim-sim
|
/aws/sasc_deploy.bash
|
UTF-8
| 3,923
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
# To be executed after the machine is created. It can read from cloudsim-options.json.
set -x
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
codedir="$DIR/../.."
# Helper to parse options from cloudsim-options.json
get_option(){
echo `node -pe "var f = \"$1\"; var query = \"$2\"; var j=require(f); j[query] "`
}
# This file is created by cloudsim when the machine is launched
optionsfile=$codedir/cloudsim-options.json
echo "codedir is $codedir"
# Common options
role=`get_option $optionsfile role`
token=`get_option $optionsfile token`
# Arbiter options
blue_route=`get_option $optionsfile blue_route`
gold_route=`get_option $optionsfile gold_route`
blue_subnet=`get_option $optionsfile blue_subnet`
gold_subnet=`get_option $optionsfile gold_subnet`
payload_count=`get_option $optionsfile payload_count`
# Payload options
client_route=`get_option $optionsfile client_route`
server_ip=`get_option $optionsfile server_ip`
client_id=`get_option $optionsfile client_id`
echo "role: $role"
echo "token: $token"
echo "blue_route: $blue_route"
echo "gold_route: $gold_route"
echo "blue_subnet: $blue_subnet"
echo "gold_subnet: $gold_subnet"
echo "server_ip: $server_ip"
echo "client_id: $client_id"
#apt-get update
#apt-get install sasc-gazebo-sitl
if [ $role == "arbiter" ]; then
# Fetch bundles
mkdir -p $codedir/blue $codedir/gold
curl -X GET --header 'Accept: application/json' --header "authorization: $token" $blue_route > $codedir/blue/bundle.tgz
curl -X GET --header 'Accept: application/json' --header "authorization: $token" $gold_route > $codedir/gold/bundle.tgz
# Unpack bundles
cd $codedir/blue
tar xf bundle.tgz
cd $codedir/gold
tar xf bundle.tgz
# Create static IP configuration for each client on each of the two subnets
mkdir -p $codedir/blue/staticclients
mkdir -p $codedir/gold/staticclients
# Just a bit of backward compatibility
if [ $payload_count == "undefined" ]; then
echo "No payload_count; falling back to old behavior of one payload per team."
echo "ifconfig-push ${blue_subnet}.10 255.255.255.0" > $codedir/blue/staticclients/payload
echo "ifconfig-push ${gold_subnet}.10 255.255.255.0" > $codedir/gold/staticclients/payload
else
echo "Creating static IPs for $payload_count payloads per team"
for (( payload_num=0; payload_num<$payload_count; payload_num++ )); do
echo "ifconfig-push ${blue_subnet}.$((10+payload_num)) 255.255.255.0" > $codedir/blue/staticclients/payload${payload_num}
echo "ifconfig-push ${gold_subnet}.$((10+payload_num)) 255.255.255.0" > $codedir/gold/staticclients/payload${payload_num}
done
fi
# Start servers
cd $codedir/blue
$codedir/blue/start_vpn.bash blue $blue_subnet openvpn.conf $gold_subnet
cd $codedir/gold
$codedir/gold/start_vpn.bash gold $gold_subnet openvpn.conf $blue_subnet
# Make the servers come back up on reboot
cat << EOF > /etc/rc.local
#!/bin/bash
cd $codedir/blue && $codedir/blue/start_vpn.bash blue $blue_subnet openvpn.conf $gold_subnet
cd $codedir/gold && $codedir/gold/start_vpn.bash gold $gold_subnet openvpn.conf $blue_subnet
exit 0
EOF
elif [ $role == "payload" ]; then
# Fetch bundle
mkdir -p $codedir/vpn
echo curl -X GET --header 'Accept: application/json' --header "authorization: $token" "${client_route}?serverIp=${server_ip}&id=${client_id}"
curl -X GET --header 'Accept: application/json' --header "authorization: $token" "${client_route}?serverIp=${server_ip}&id=${client_id}" > $codedir/vpn/bundle.tgz
# Unpack bundle
cd $codedir/vpn
tar xf bundle.tgz
# Start server
echo cd $codedir/vpn
cd $codedir/vpn
echo openvpn --config openvpn.conf --daemon
openvpn --config openvpn.conf --daemon
# Make the client come back up on reboot
cat << EOF > /etc/rc.local
#!/bin/bash
cd $codedir/vpn && openvpn --config openvpn.conf --daemon
exit 0
EOF
else
echo "ERROR: Unknown role \"$role\"."
fi
| true
|
5b65fcca32a18327412957029fddda4cc2e3b393
|
Shell
|
bluec0re/archlinux-repos
|
/vim-pct/PKGBUILD
|
UTF-8
| 1,100
| 2.84375
| 3
|
[] |
no_license
|
# Maintainer: Timo Schmid <arch@timoschmid.de>
pkgname=vim-pct-git # '-bzr', '-git', '-hg' or '-svn'
pkgver=r9.1ea3892
pkgrel=1
pkgdesc=""
arch=('any')
url="https://github.com/d0c-s4vage/pct-vim"
license=('MIT')
groups=()
depends=('gvim' 'python2-peewee' )
makedepends=('git') # 'bzr', 'git', 'mercurial' or 'subversion'
provides=("${pkgname%-git}")
conflicts=("${pkgname%-git}")
replaces=()
backup=()
options=()
install=
source=('vim-pct::git+https://github.com/d0c-s4vage/pct-vim#commit=1ea3892a79fc3462a4a9343bc98dfcfcc1a36ce2')
noextract=()
md5sums=('SKIP')
# Please refer to the 'USING VCS SOURCES' section of the PKGBUILD man page for
# a description of each element in the source array.
pkgver() {
cd "$srcdir/${pkgname%-git}"
# Git, tags available
# printf "%s" "$(git describe --long | sed 's/\([^-]*-\)g/r\1/;s/-/./g')"
# Git, no tags available
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
package() {
cd "$srcdir/${pkgname%-git}"
mkdir -p $pkgdir/usr/share/vim/vimfiles/plugin
install -m644 -t "$pkgdir/usr/share/vim/vimfiles/plugin" pct.vim
}
| true
|
9b49ccb0f9be871b1955a6a40870a35c35bcbd5a
|
Shell
|
tt-16/practice-code
|
/neo-shell/WRF/test/setNameList_input.sh
|
UTF-8
| 3,629
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
#********************************************************
# [modify the namelist f parameter]
#********************************************************
#Wname="name"
logfile="/wrf/LOG/$Wname/setConfig.log"
date=`date +%Y-%m-%d_%H:%M:%S`
echo " " > $logfile
echo "< setConfig shell start at " $date ">" >> $logfile
echo " " >> $logfile
#-------------
newStartYear=`date +%Y` #= 2015, 2015, 2015,
oldStartYear=`date --date='-24 hour' +%Y` #= 2015, 2015, 2015,
newStartMonth=`date +%m` #= 05, 05, 05,
oldStartMonth=`date --date='-24 hour' +%m` #= 05, 05, 05,
newStartDay=`date +%d` #= 12, 12, 12,
oldStartDay=`date --date='-24 hour' +%d` #= 12, 12, 12,
#newStartHour #= 15, 15, 15,
#oldStartHour #= 15, 15, 15,
#start_minute = 00, 00, 00,
#start_second = 00, 00, 00,
newEndYear=`date --date='+24 hour' +%Y` #= 2015, 2015, 2015,
oldEndYear=`date --date='+48 hour' +%Y` #= 2015, 2015, 2015,
newEndMonth=`date --date='+24 hour' +%m` #= 05, 05, 05,
oldEndMonth=`date --date='+48 hour' +%m` #= 05, 05, 05,
newEndDay=`date --date='+24 hour' +%d` #= 15, 15, 15,
oldEndDay=`date --date='+48 hour' +%d` #= 15, 15, 15,
#-------------
#modify /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input
oldStartYear=`awk '$1=="start_year"{print substr($3,1,4)}' /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input`
OSYNR=`awk '$1=="start_year"{print NR}' /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input`
oldStartMonth=`awk '$1=="start_month"{print substr($3,1,2)}' /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input`
OSMNR=`awk '$1=="start_month"{print NR}' /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input`
oldStartDay=`awk '$1=="start_day"{print substr($3,1,2)}' /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input`
OSDNR=`awk '$1=="start_day"{print NR}' /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input`
oldEndYear=`awk '$1=="end_year"{print substr($3,1,4)}' /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input`
OEYNR=`awk '$1=="end_year"{print NR}' /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input`
oldEndMonth=`awk '$1=="end_month"{print substr($3,1,2)}' /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input`
OEMNR=`awk '$1=="end_month"{print NR}' /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input`
oldEndDay=`awk '$1=="end_day"{print substr($3,1,2)}' /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input`
OEDNR=`awk '$1=="end_day"{print NR}' /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input`
sed -i "$OSYNR s/$oldStartYear/$newStartYear/g" /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input
sed -i "$OSMNR s/$oldStartMonth/$newStartMonth/g" /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input
sed -i "$OSDNR s/$oldStartDay/$newStartDay/g" /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input
sed -i "$OEYNR s/$oldEndYear/$newEndYear/g" /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input
sed -i "$OEMNR s/$oldEndMonth/$newEndMonth/g" /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input
sed -i "$OEDNR s/$oldEndDay/$newEndDay/g" /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input
#replace the namelist file
rm /wrf/LIBRARIES/WRFV3/test/em_real/namelist.input -rf
cp -r -f /wrf/LIBRARIES/WindFarmConfig/$Wname/namelist.input /wrf/LIBRARIES/WRFV3/test/em_real/
date=`date +%Y-%m-%d_%H:%M:%S`
echo " " >> $logfile
echo "< setConfig shell finished at " $date ">" >> $logfile
echo " " >> $logfile
| true
|
34737f623f0c44df0cf834d2dcc9fb103cc8068a
|
Shell
|
andy489/Linux_Shell
|
/1 – FMI Tasks/Shell/05-b-3300.sh
|
UTF-8
| 347
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
# 05-b-3300
if [ ! $# -eq 3 ]; then
echo "Invalid number of argumrnts!"
exit 1
fi
FILE_1="${1}"
FILE_2="${2}"
if [ ! -r "${FILE_1}" ] || [ ! -r ${FILE_2} ]; then
echo "First two files are not both readable."
exit 2
fi
paste "${FILE_1}" "${FILE_2}" | sort > "${3}"
# paste -d "\n" "${FILE_1}" "${FILE_2}" | sort > "${3}"
| true
|
9ffd3acb6a5f19372f27fcfd5db32c3d78ee4dd2
|
Shell
|
Filipposk9/Linux
|
/mfproc
|
UTF-8
| 4,898
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
#mfproc [-u username] [-s S|R|Z]
echo -e "Name\t\t\tPID\tPPID\tUID\tGID\tState"
if [ "$#" -eq "0" ]
then
ls /proc | egrep "[0-9]" | while read process
do
name=`(cat "/proc/$process/status" | egrep "^Name" | cut -f2 -d$'\t') 2>/dev/null`
processID=`(cat "/proc/$process/status" | egrep "^Pid" | cut -f2 -d$'\t') 2>/dev/null`
parentProcessID=`(cat "/proc/$process/status" | egrep "^PPid" | cut -f2 -d$'\t') 2>/dev/null`
userID=`(cat "/proc/$process/status" | egrep "^Uid" | cut -f2 -d$'\t') 2>/dev/null`
groupID=`(cat "/proc/$process/status" | egrep "^Gid" | cut -f2 -d$'\t') 2>/dev/null`
state=`(cat "/proc/$process/status" | egrep "^State" | cut -f2 -d$'\t') 2>/dev/null`
if [ "${#name}" -ge "8" ]
then
echo -e "$name\t\t$processID\t$parentProcessID\t$userID\t$groupID\t$state"
elif [ "${#name}" -le "7" ] && [ "${#name}" -gt "0" ]
then
echo -e "$name\t\t\t$processID\t$parentProcessID\t$userID\t$groupID\t$state"
fi
done
exit 0
elif [ "$#" -eq "2" ]
then
if [ "$1" == "-u" ]
then
if [ "`egrep "^$2" /etc/passwd`" != "" ]
then
userSpecified=`id -u $2`
ls /proc | egrep "[0-9]" | while read process
do
userID=`(cat "/proc/$process/status" | egrep "^Uid" | cut -f2 -d$'\t') 2>/dev/null`
if [ "$userID" == "$userSpecified" ]
then
name=`(cat "/proc/$process/status" | egrep "^Name" | cut -f2 -d$'\t') 2>/dev/null`
processID=`(cat "/proc/$process/status" | egrep "^Pid" | cut -f2 -d$'\t') 2>/dev/null`
parentProcessID=`(cat "/proc/$process/status" | egrep "^PPid" | cut -f2 -d$'\t') 2>/dev/null`
userID=`(cat "/proc/$process/status" | egrep "^Uid" | cut -f2 -d$'\t') 2>/dev/null`
groupID=`(cat "/proc/$process/status" | egrep "^Gid" | cut -f2 -d$'\t') 2>/dev/null`
state=`(cat "/proc/$process/status" | egrep "^State" | cut -f2 -d$'\t') 2>/dev/null`
if [ "${#name}" -ge "8" ]
then
echo -e "$name\t\t$processID\t$parentProcessID\t$userID\t$groupID\t$state"
elif [ "${#name}" -le "7" ] && [ "${#name}" -gt "0" ]
then
echo -e "$name\t\t\t$processID\t$parentProcessID\t$userID\t$groupID\t$state"
fi
fi
done
exit 0
else
exit 1
fi
elif [ "$1" == "-s" ]
then
if [ "$2" == "S" ] || [ "$2" == "R" ] || [ "$2" == "Z" ]
then
stateSpecified="$2"
ls /proc | egrep "[0-9]" | while read process
do
state=`(cat "/proc/$process/status" | egrep "^State" | cut -f2 -d$'\t') 2>/dev/null`
if [ "$state" == "$stateSpecified (sleeping)" ] || [ "$state" == "$stateSpecified (running)" ] || [ "$state" == "$stateSpecified (zombie)" ]
then
name=`(cat "/proc/$process/status" | egrep "^Name" | cut -f2 -d$'\t') 2>/dev/null`
processID=`(cat "/proc/$process/status" | egrep "^Pid" | cut -f2 -d$'\t') 2>/dev/null`
parentProcessID=`(cat "/proc/$process/status" | egrep "^PPid" | cut -f2 -d$'\t') 2>/dev/null`
groupID=`(cat "/proc/$process/status" | egrep "^Gid" | cut -f2 -d$'\t') 2>/dev/null`
if [ "${#name}" -ge "8" ]
then
echo -e "$name\t\t$processID\t$parentProcessID\t$userID\t$groupID\t$state"
elif [ "${#name}" -le "7" ] && [ "${#name}" -gt "0" ]
then
echo -e "$name\t\t\t$processID\t$parentProcessID\t$userID\t$groupID\t$state"
fi
fi
done
exit 0
else
exit 2
fi
fi
elif [ "$#" -eq "4" ]
then
if [ "`egrep "^$2" /etc/passwd`" != "" ] && ([ "$4" == "S" ] || [ "$4" == "R" ] || [ "$4" == "Z" ])
then
userSpecified=`id -u $2`
stateSpecified="$4"
ls /proc | egrep "[0-9]" | while read process
do
userID=`(cat "/proc/$process/status" | egrep "^Uid" | cut -f2 -d$'\t') 2>/dev/null`
state=`(cat "/proc/$process/status" | egrep "^State" | cut -f2 -d$'\t') 2>/dev/null`
if [ "$userID" == "$userSpecified" ] && ([ "$state" == "$stateSpecified (sleeping)" ] || [ "$state" == "$stateSpecified (running)" ] || [ "$state" == "$stateSpecified (zombie)" ])
then
name=`(cat "/proc/$process/status" | egrep "^Name" | cut -f2 -d$'\t') 2>/dev/null`
processID=`(cat "/proc/$process/status" | egrep "^Pid" | cut -f2 -d$'\t') 2>/dev/null`
parentProcessID=`(cat "/proc/$process/status" | egrep "^PPid" | cut -f2 -d$'\t') 2>/dev/null`
groupID=`(cat "/proc/$process/status" | egrep "^Gid" | cut -f2 -d$'\t') 2>/dev/null`
if [ "${#name}" -ge "8" ]
then
echo -e "$name\t\t$processID\t$parentProcessID\t$userID\t$groupID\t$state"
elif [ "${#name}" -le "7" ] && [ "${#name}" -gt "0" ]
then
echo -e "$name\t\t\t$processID\t$parentProcessID\t$userID\t$groupID\t$state"
fi
fi
done
exit 0
else
exit 3
fi
else
echo "Bad syntax"
fi
| true
|
ad03758975738168b2dfccb40de8ad7245f71eae
|
Shell
|
jhunkeler/spm_packages
|
/sqlite/build.sh
|
UTF-8
| 607
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
name=sqlite
version=3.29.0
_v=3290000
revision=0
sources=(
"https://sqlite.org/2019/${name}-autoconf-${_v}.tar.gz"
)
build_depends=(
"automake"
"autoconf"
"readline"
"zlib"
)
depends=(
"readline"
"zlib"
)
function prepare() {
tar xf ${name}-autoconf-${_v}.tar.gz
cd ${name}-autoconf-${_v}
if [[ $(uname -s) == Darwin ]]; then
# extra -rpath kills the build
LDFLAGS="-L${_runtime}/lib"
fi
}
function build() {
./configure --prefix=${_prefix}
make -j${_maxjobs}
}
function package() {
make install DESTDIR="${_pkgdir}"
}
| true
|
0a59c52d2a7d5de4a48924987aa9a71f6542f993
|
Shell
|
cwonrails/shell-scripts
|
/scripts/start-work
|
UTF-8
| 164
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -euo pipefail
native_apps=(
"Todoist"
"Slack"
"Trello"
"Spotify"
)
for native_app in "${native_apps[@]}"; do
open -a "$native_app"
done
| true
|
ddbed2ab75af1b542a9bdd006ce62182728c2cd5
|
Shell
|
pengchen98/bitcoin-controller
|
/travis/deploy.sh
|
UTF-8
| 2,015
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Push images to Docker hub and update Helm chart repository
#
# This script assumes that there is a file tag
# which contains the name of the tag to use for the push
#
# We assume that the following environment variables are set
# DOCKER_USER User for docker hub
# DOCKER_PASSWORD Password
# TRAVIS_BUILD_DIR Travis build directory
# TRAVIS_TAG Tag if the build is caused by a git tag
set -e
#
# Login to Docker hub
#
echo "$DOCKER_PASSWORD" | docker login --username $DOCKER_USER --password-stdin
#
# Get tag
#
tag=$(cat $TRAVIS_BUILD_DIR/tag)
#
# Push images
#
docker push christianb93/bitcoin-controller:$tag
docker push christianb93/bitcoin-controller:latest
#
# Now clone into the repository that contains the Helm chart
#
cd /tmp
git clone https://www.github.com/christianb93/bitcoin-controller-helm-qa
cd bitcoin-controller-helm-qa
#
# Replace image version in values.yaml by new tag
#
cat values.yaml | sed "s/controller_image_tag.*/controller_image_tag: \"$tag\"/" > /tmp/values.yaml.patched
cp /tmp/values.yaml.patched values.yaml
#
# Get current version from Chart file and remove build tag
#
current_version=$(cat Chart.yaml | grep "version" | awk '{ print $2 }' | sed 's/-dev[a-z,0-9]*//')
echo "Current chart version: $current_version"
#
# Update chart version and appVersion in chart file
#
if [ "X$TRAVIS_TAG" != "X" ]; then
chart_version=$TRAVIS_TAG
else
chart_version="$current_version-dev$tag"
fi
echo "Using chart version $chart_version"
cat Chart.yaml | sed "s/version.*/version: $chart_version/" | sed "s/appVersion.*/appVersion: $tag/" > /tmp/Chart.yaml.patched
cp /tmp/Chart.yaml.patched Chart.yaml
git add --all
git config --global user.name christianb93
git config --global user.email me@unknown
git config remote.origin.url https://$GITHUB_USER:$GITHUB_PASSWORD@github.com/christianb93/bitcoin-controller-helm-qa
git commit -m "Automated deployment of chart version $chart_version"
git push origin master
| true
|
c16d1cb752647e8d69e79bb4c682c7ae017ddafa
|
Shell
|
zhiru-liu/microbiome_evolution
|
/bash_scripts/cut_kegg_pathway.sh
|
UTF-8
| 582
| 2.65625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
while read PATRIC; do
file=`ls /pollard/shattuck0/snayfach/databases/PATRIC/genomes/${PATRIC}/${PATRIC}.PATRIC.features.tab*`
if file --mime-type "$file" | grep -q gzip$; then
zcat /pollard/shattuck0/snayfach/databases/PATRIC/genomes/${PATRIC}/${PATRIC}.PATRIC.features.tab | cut -f6,21 > ~/ben_nandita_hmp_data/kegg/${PATRIC}.kegg.txt
else
cat /pollard/shattuck0/snayfach/databases/PATRIC/genomes/${PATRIC}/${PATRIC}.PATRIC.features.tab | cut -f6,21 > ~/ben_nandita_hmp_data/kegg/${PATRIC}.kegg.txt
fi
done < ~/ben_nandita_hmp_data/patric_genomes.txt
| true
|
aad2abe3198335ed60146e294a87be880050b5a3
|
Shell
|
gobolinux/BuildLiveCD
|
/bin/MakeInitRDTree
|
UTF-8
| 5,975
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
source $(dirname $0)/CompatFunctions
helpOnNoArguments=no
scriptDescription="Builds the GoboLinux LiveCD InitRD tree."
scriptCredits="(C)2003-2009 by Hisham Muhammad et al. Released under the GNU GPL."
scriptUsage="[options]"
Add_Option_Boolean "i" "use-system-index" "Generate a tree using the /System/Index layout."
Parse_Options "$@"
unset usesystemindex
Boolean "use-system-index" && usesystemindex=true
####################################################
# Operation
####################################################
if [ "$UID" != 0 ]
then
echo "Must be run as superuser."
exit 1
fi
root=$PWD/Output/InitRD
rolayer=$PWD/Output/ROLayer
archives=$PWD/Archives/
rm -rf $root
mkdir -p $root
scriptsVersion=$(ls $rolayer/Programs/Scripts/ | grep -v "Current\|Settings\|Variable")
source $rolayer/Programs/Scripts/$scriptsVersion/Functions/Bootstrap
####################################################
# Fetch packages
####################################################
today=`date +%Y%m%d`
arch=`uname -m`
packages=(
$(basename $(ls $archives/BusyBox-InitRD--*--$arch.tar.bz2))
InitRDScripts--$today-GIT--$arch.tar.bz2
)
kernel_modules=(
lpc-ich
ehci
ohci
sdhci
usb-storage
usb-common
usbcore
bcma
fotg210
fusbh200
mmc_block
mmc_core
isp116x
isp1362
oxu210hp
r8a66597
sl811
ssb
u132
ufshcd
uhci
vhci
whci
xhci
overlay
uas
)
cd $root
####################################################
# Directory structure
####################################################
Create_Filesystem_Tree $root root $usesystemindex
rmdir $root/Mount/*
mkdir -p $root/Mount/HD0
mkdir -p $root/Mount/HD1
mkdir -p $root/Mount/HD2
mkdir -p $root/Mount/Media
mkdir -p $root/Mount/.Pivot
mkdir -p $root/Mount/TmpFS
mkdir -p $root/Mount/UnionFS
mkdir -p $root/Mount/SquashFS
mkdir -p $root/Mount/CD-ROM
####################################################
# Some additional links
####################################################
ln -nfs ../../proc/mounts $root/System/Settings/mtab
ln -nfs bin/startGoboLinux $root/linuxrc
####################################################
# Nano-InstallPackage
####################################################
cd $root/Programs
for package in "${packages[@]}"
do
echo "Installing $package inside InitRD..."
tar xjpf $archives/$package || Die "Could not install $package."
done
####################################################
# Nano-SymlinkProgram
####################################################
cd $root
if [ "$usesystemindex" ]
then bindir=usr/bin
else bindir=System/Links/Executables
fi
ls -d Programs/*/Current/bin/* Programs/*/Current/sbin/* | while read i
do
ln -nfs /$i $bindir/
done
####################################################
# Populate /System/Kernel/Devices
####################################################
Create_Device_Nodes $root
####################################################
# Install required kernel modules
####################################################
kernelversion=$(basename "$(readlink -f "$rolayer/Programs/Linux/Current")")
initrd_moddir="${root}/lib/modules/${kernelversion}-Gobo"
mkdir -p "${initrd_moddir}/kernel"
modulesdir="${rolayer}/Programs/Linux/Current/lib/modules/${kernelversion}-Gobo"
if [ "$kernel_modules" ]
then
# Satisfy depmod needs
[ -e "${modulesdir}/modules.order" ] && cp "${modulesdir}/modules.order" "$initrd_moddir"
[ -e "${modulesdir}/modules.builtin" ] && cp "${modulesdir}/modules.builtin" "$initrd_moddir"
for module in "${kernel_modules[@]}"
do
Quiet pushd "${modulesdir}/kernel"
modpath=( $(find -name "${module}*.ko" -or -name "${module}*.ko.xz") )
Quiet popd
[ -z "$modpath" ] && continue
for mod in "${modpath[@]}"
do
mkdir -p "${initrd_moddir}/kernel/$(dirname ${mod})"
cp --remove-destination \
${modulesdir}/kernel/${mod} \
${initrd_moddir}/kernel/${mod}
if echo "${mod}" | grep -q ".xz"
then
xz -d "${initrd_moddir}/kernel/${mod}"
fi
done
done
fi
####################################################
# Populate /System/Settings
####################################################
cat <<EOF > $root/System/Settings/fstab
/dev/ram0 / ext2 defaults
none /proc proc defaults
/lib/modules /System/Kernel/Modules none bind 0 0
EOF
cat <<EOF > $root/System/Settings/group
root:x:0:
EOF
cat <<EOF > $root/System/Settings/hostname
mini-GoboLinux
EOF
cat <<EOF > $root/System/Settings/inittab
::sysinit:/bin/startGoboLinux
::respawn:-/bin/ash
tty2::askfirst:-/bin/ash
tty3::askfirst:-/bin/ash
tty4::askfirst:-/bin/ash
tty5::askfirst:-/bin/ash
tty6::askfirst:-/bin/ash
::ctrlaltdel:/sbin/reboot
::shutdown:/bin/umount -a -r
::shutdown:/sbin/swapoff -a
EOF
cat <<EOF > $root/System/Settings/passwd
root:x:0:0::/Users/root:/bin/ash
bin:x:1:1:bin:/bin:
daemon:x:2:2:daemon:/sbin:
sync:x:5:0:sync:/sbin:/bin/sync
uucp:x:10:14:uucp:/var/spool/uucppublic:
nobody:x:99:99:nobody:/:
EOF
cat <<EOF > $root/System/Settings/profile
TERM=linux
PATH="/System/Index/bin"
#PS1="\w>$"
PS1='\[\033[1;33;41m\]RAMdisk\[\033[1;31;49m\] \w]\[\033[0m\]'
PS2='> '
export TERM PATH PS1 PS2
date +"%d/%m %H:%M:%S"
alias l='ls -l'
if [ "\$(basename \$TTY)" = "tty4" ]
then
/bin/chroot /Mount/SquashFS /usr/bin/env -i HOME=/Users/root TERM=linux /bin/zsh
else
exec /bin/chroot /Mount/SquashFS /usr/bin/env -i HOME=/Users/root TERM=linux /bin/zsh
fi
EOF
cat <<EOF > $root/System/Settings/rc
#!/bin/ash
/bin/mount -av
/bin/startGoboLinux
EOF
chmod +x $root/System/Settings/rc
cat <<EOF > $root/System/Settings/shadow
root::11501:0:::::
bin:*:9797:0:::::
daemon:*:9797:0:::::
sync:*:9797:0:::::
uucp:*:9797:0:::::
nobody:*:9797:0:::::
EOF
cat <<EOF > $root/System/Settings/shells
/bin/sh
/bin/ash
EOF
| true
|
35d5b43364f2e9282edecab56256d58520c759cc
|
Shell
|
Jenji/dynamodb-emr-exporter
|
/invokeEMR.sh
|
UTF-8
| 12,211
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Inputs
APPNAME=$1
CLUSTER_NAME=$2
PROFILE=$3
TABLE_FILTER=$4
READ_TPUT=$5
JSON_OUTPUT_DIR=$6
S3LOCATION=$7
# Hard-codes (but can be changed here)
WRITE_TPUT=0.8 # Used when we generate the Import steps
REGION=us-east-1
RETRY_DELAY=10
# Just vars
INSTALL_DIR=/usr/local/dynamodb-emr
COMMON_JSON=${INSTALL_DIR}/common-json
STEP_PRODUCER=${INSTALL_DIR}/produce-steps-json.py
NEXTPHASE=0
RETCODE=0
# Lock files (delivered to S3 at different phases)
BACKUP_RUNNING_LOCK_NAME=BACKUP_RUNNING
BACKUP_COMPLETE_SUCCESS_LOCK_NAME=BACKUP_COMPLETE_SUCCESS
BACKUP_COMPLETE_FAILED_LOCK_NAME=BACKUP_COMPLETE_FAILED
BACKUP_RUNNING_LOCK_LOCAL_FILE=${INSTALL_DIR}/${BACKUP_RUNNING_LOCK_NAME}
BACKUP_COMPLETE_SUCCESS_LOCK_LOCAL_FILE=${INSTALL_DIR}/${BACKUP_COMPLETE_SUCCESS_LOCK_NAME}
BACKUP_COMPLETE_FAILED_LOCK_LOCAL_FILE=${INSTALL_DIR}/${BACKUP_COMPLETE_FAILED_LOCK_NAME}
logMsg()
{
PROGNAME=invokeEMR
PID=$$
logger -t ${PROGNAME}[$PID] $1
echo $1
}
usage()
{
echo "Usage: invokeEMR app_name emr_cluster_name boto_profile_name table_filter read_throughput_percentage json_output_directory S3_location"
}
pollCluster()
{
PROFILE=$1
CLUSTERID=$2
CLUSTERNAME=$3
COMPLETE=0
ERRORS=0
logMsg "polling cluster NAME:${CLUSTERNAME} ID ${CLUSTERID} for status in profile ${PROFILE}"
while [ $COMPLETE -ne 1 ]
do
CLUSTER_STATUS=$(aws emr describe-cluster --cluster-id $CLUSTERID --profile $PROFILE |jq -r '.["Cluster"]["Status"]["State"]')
#echo "STATUS IS $CLUSTER_STATUS"
if [ "${CLUSTER_STATUS}" == "TERMINATED" ]; then
# We now need to check if there were step errors
STEPS_STATUS=$(aws emr describe-cluster --cluster-id $CLUSTERID --profile $PROFILE | jq -r '.["Cluster"]["Status"]["StateChangeReason"]["Message"]')
if [ "${STEPS_STATUS}" == "Steps completed with errors" ]; then
ERRORS=1
else
ERRORS=0
fi
COMPLETE=1
elif [ "${CLUSTER_STATUS}" == "TERMINATED_WITH_ERRORS" ]; then
ERRORS=1
COMPLETE=1
fi
sleep 10
done
return $ERRORS
}
if [ $# != 7 ]; then
usage
exit 1
fi
logMsg "Starting up"
######
## PHASE 1 - See if there are any clusters already runing with our name. If there are, exit
######
aws emr list-clusters --active --profile ${PROFILE} | grep -q ${CLUSTER_NAME}
STATUS=$?
if [ $STATUS == 0 ]; then
# We already have a cluster running - bail
logMsg "Cluster ERROR: existing cluster ${CLUSTER_NAME} running"
NEXTPHASE=0
RETCODE=2
else
logMsg "No existing EMR cluster with name ${CLUSTER_NAME} running. Creating"
NEXTPHASE=1
fi
######
## PHASE 2 - Copy in the common JSON files
######
if [ ! -d "${COMMON_JSON}" ]; then
logMsg "The common-json folder is missing - unable to continue"
NEXTPHASE=0
RETCODE=2
else
mkdir -p ${JSON_OUTPUT_DIR}
logMsg "Copying common json files to ${JSON_OUTPUT_DIR}"
cp -f ${COMMON_JSON}/applications.json ${JSON_OUTPUT_DIR}/applications.json
cp -f ${COMMON_JSON}/ec2-attributes.json ${JSON_OUTPUT_DIR}/ec2-attributes.json
cp -f ${COMMON_JSON}/instance-groups.json ${JSON_OUTPUT_DIR}/instance-groups.json
cp -f ${COMMON_JSON}/bootstrap-actions-export.json ${JSON_OUTPUT_DIR}/bootstrap-actions-export.json
cp -f ${COMMON_JSON}/bootstrap-actions-import.json ${JSON_OUTPUT_DIR}/bootstrap-actions-import.json
if [ ! -e "${JSON_OUTPUT_DIR}/applications.json" ] ||
[ ! -e "${JSON_OUTPUT_DIR}/ec2-attributes.json" ] ||
[ ! -e "${JSON_OUTPUT_DIR}/bootstrap-actions-export.json" ] ||
[ ! -e "${JSON_OUTPUT_DIR}/bootstrap-actions-import.json" ] ||
[ ! -e "${JSON_OUTPUT_DIR}/instance-groups.json" ]; then
logMsg "Error copying common json files to ${JSON_OUTPUT_DIR}"
NEXTPHASE=0
RETCODE=2
else
NEXTPHASE=1
fi
fi
######
## PHASE 2 - Generate the steps files
######
if [ $NEXTPHASE == 1 ]; then
# PHASE 2 - Get the EMR steps file for the tables to backup
logMsg "Generating JSON files (R:${REGION} READ:${READ_TPUT} WRITE:${WRITE_TPUT} FILT:${TABLE_FILTER} JDIR:${JSON_OUTPUT_DIR} S3DIR:${S3LOCATION}"
${STEP_PRODUCER} -a ${APPNAME} -p ${PROFILE} -r ${REGION} -e ${READ_TPUT} -w ${WRITE_TPUT} -f ${TABLE_FILTER} ${JSON_OUTPUT_DIR} ${S3LOCATION}
RESULT=$?
if [ $RESULT == 0 ]; then
NEXTPHASE=1
else
logMsg "Cluster ERROR: Unable to generate the EMR steps files NAME:${CLUSTER_NAME}"
RETCODE=3
NEXTPHASE=0
fi
# Get the location of where 'this' backup will be placed in S3
S3_BACKUP_BASE=$(cat ${JSON_OUTPUT_DIR}/s3path.info)
logMsg "The S3 base path for this backup is ${S3_BACKUP_BASE}"
if [ "${S3_BACKUP_BASE}" == "" ]; then
logMsg "ERROR: No S3 base location for this backup - unable to continue"
RETCODE=3
NEXTPHASE=0
fi
fi
######
## PHASE 3 - Create the EMR cluster (with retries)
######
if [ $NEXTPHASE == 1 ]; then
RETRIES=5
CURR_ATTEMPT=1
# we need some status files which are delivered to S3 if the job is running or if it fails.
# This just creates them - we deliver them to S3 at later steps
if [ ! -e "${BACKUP_RUNNING_LOCK_LOCAL_FILE}" ]; then
touch "${BACKUP_RUNNING_LOCK_LOCAL_FILE}"
fi
if [ ! -e "${BACKUP_COMPLETE_SUCCESS_LOCK_LOCAL_FILE}" ]; then
touch "${BACKUP_COMPLETE_SUCCESS_LOCK_LOCAL_FILE}"
fi
if [ ! -e "${BACKUP_COMPLETE_FAILED_LOCK_LOCAL_FILE}" ]; then
touch "${BACKUP_COMPLETE_FAILED_LOCK_LOCAL_FILE}"
fi
while [ $CURR_ATTEMPT -le $RETRIES ]
do
CLUSTERUP=0
# Invoke the aws CLI to create the cluster
logMsg "Creating new EMR Cluster NAME:${CLUSTER_NAME} Attempt ${CURR_ATTEMPT} of ${RETRIES}"
CLUSTERID=$(aws emr create-cluster --name "${CLUSTER_NAME}" \
--ami-version 3.8.0 \
--service-role "EMR_DefaultRole" \
--tags Name=${CLUSTER_NAME} signiant:product=devops signiant:email=devops@signiant.com \
--enable-debugging \
--log-uri ${S3LOCATION}/emr-logs \
--applications file://${JSON_OUTPUT_DIR}/applications.json \
--instance-groups file://${JSON_OUTPUT_DIR}/instance-groups.json \
--ec2-attributes file://${JSON_OUTPUT_DIR}/ec2-attributes.json \
--bootstrap-actions file://${JSON_OUTPUT_DIR}/bootstrap-actions-export.json \
--steps file://${JSON_OUTPUT_DIR}/exportSteps.json \
--auto-terminate \
--visible-to-all-users \
--output text \
--profile ${PROFILE})
logMsg "CLUSTERID for ${CLUSTER_NAME} is $CLUSTERID"
# Now use the waiter to make sure the cluster is launched successfully
if [ "$CLUSTERID" != "" ]; then
logMsg "Waiting for cluster NAME:${CLUSTER_NAME} ID:${CLUSTERID} to start...."
aws emr wait cluster-running --cluster-id ${CLUSTERID} --profile ${PROFILE}
STATUS=$?
if [ $STATUS == 0 ]; then
logMsg "Cluster NAME:${CLUSTER_NAME} ID:${CLUSTERID} launched successfully"
CLUSTERUP=1
break
else
logMsg "Cluster ERROR: launch failure NAME:${CLUSTER_NAME} ID:${CLUSTERID} Attempt ${CURR_ATTEMPT} of ${RETRIES} "
CLUSTERUP=0
# Fall into the next iteration of the loop to try and create the cluster again
fi
else
logMsg "Cluster ERROR: no cluster ID returned NAME:${CLUSTER_NAME}"
CLUSTERUP=0
fi
CURR_ATTEMPT=$[$CURR_ATTEMPT+1]
logMsg "Delaying ${RETRY_DELAY} seconds before attempting to create cluster..."
sleep ${RETRY_DELAY}
done
####
## Phase 3.5 - poll the cluster for status so we know when it's done
####
if [ $CLUSTERUP == 1 ]; then
# We have a cluster provisioned...now we can poll it's tasks and make sure it completes ok
# First tag the backup as in progress so any downstream processes know not to copy this
logMsg "Writing BACKUP_RUNNING_LOCK file for this backup"
aws s3 cp ${BACKUP_RUNNING_LOCK_LOCAL_FILE} ${S3_BACKUP_BASE}/${BACKUP_RUNNING_LOCK_NAME} --profile ${PROFILE}
pollCluster $PROFILE $CLUSTERID $CLUSTER_NAME
STATUS=$?
if [ $STATUS == 0 ]; then
logMsg "Cluster SUCCESS NAME:${CLUSTER_NAME} ID:${CLUSTERID}"
# Copy the steps json files to S3 so we have a copy for 'this' job
if [ "${S3_BACKUP_BASE}" != "" ]; then
logMsg "Copying steps files to S3"
aws s3 cp ${JSON_OUTPUT_DIR}/exportSteps.json ${S3_BACKUP_BASE}/exportSteps.json --profile ${PROFILE}
aws s3 cp ${JSON_OUTPUT_DIR}/importSteps.json ${S3_BACKUP_BASE}/importSteps.json --profile ${PROFILE}
logMsg "Removing the BACKUP_RUNNING_LOCK file for this backup"
aws s3 rm ${S3_BACKUP_BASE}/${BACKUP_RUNNING_LOCK_NAME} --profile ${PROFILE}
logMsg "Writing the BACKUP_COMPLETE_SUCCESS file for this backup"
aws s3 cp ${BACKUP_COMPLETE_SUCCESS_LOCK_LOCAL_FILE} ${S3_BACKUP_BASE}/${BACKUP_COMPLETE_SUCCESS_LOCK_NAME} --profile ${PROFILE}
else
logMsg "No S3 base location for this backup specified - unable to copy steps files to S3"
fi
RETCODE=0
else
logMsg "Cluster ERROR:task failure NAME:${CLUSTER_NAME} ID:${CLUSTERID}"
logMsg "Removing the BACKUP_RUNNING_LOCK file for this backup"
aws s3 rm ${S3_BACKUP_BASE}/${BACKUP_RUNNING_LOCK_NAME} --profile ${PROFILE}
logMsg "Writing the BACKUP_COMPLETE_FAILED file for this backup"
aws s3 cp ${BACKUP_COMPLETE_FAILED_LOCK_LOCAL_FILE} ${S3_BACKUP_BASE}/${BACKUP_COMPLETE_FAILED_LOCK_NAME} --profile ${PROFILE}
RETCODE=4
fi
else
logMsg "Unable to provision a new cluster after ${RETRIES} attempts"
RETCODE=6
fi
fi
exit ${RETCODE}
| true
|
1ab41995cb373d002af5036db7db7b890082edbf
|
Shell
|
xushuhui/offline-installer
|
/app.sh
|
UTF-8
| 1,134
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -z ${webroot} ]; then
Echo_Red 'webroot path is empty!'
exit 0
fi
if [ -z ${redisPort} ]; then
Echo_Red 'redis Port path is empty!'
exit 0
fi
if [ -z ${mysqlPassword} ]; then
Echo_Red 'mysql Password path is empty!'
exit 0
fi
if [ -z ${source_dir} ]; then
Echo_Red 'source_dir is empty!'
exit 0
fi
# tar -zcvf ${appName}.tar.gz ${appName}/* 将${appName}文件夹压缩打包
tar_code(){
cd ${source_dir}/code
tar -zxvf ${appName}.tar.gz
if [ ! -d ${webroot} ]; then
mkdir ${webroot}
fi
\cp ${source_dir}/code/${appName}/* ${webroot} -rf
}
update_config(){
sed -i "s/'app_debug' => true/'app_debug' => false/g" ${webroot}config/config.php
sed -i "s/'port' => 6380/'port' => ${redisPort}/g" ${webroot}config/config.php
sed -i "s/'debug' => true/'debug' => false/g" ${webroot}config/database.php
sed -i "s/'password' => '123456'/'password' => ${mysqlPassword}/g" ${webroot}config/database.php
chown -R www:www ${webroot}
chown -R www:www ${webroot}*
}
tar_code
update_config
| true
|
de5e58ecde8e633e8ad7e926ac29b7351524049c
|
Shell
|
virtualtam/dotfiles
|
/bin/git-blob-trotter
|
UTF-8
| 650
| 3.515625
| 4
|
[
"CC0-1.0"
] |
permissive
|
#!/bin/bash
#
# git-blob-trotter
#
# Displays the N largest blobs in the current repository tree
#
# See:
# - https://stackoverflow.com/questions/10622179/how-to-find-identify-large-commits-in-git-history
# - https://stackoverflow.com/questions/223678/which-commit-has-this-blob
#
# Find which commits add a given blob with:
#
# git find-object <BLOB>
N_BLOBS=${1:-10}
git rev-list --objects --all \
| git cat-file --batch-check='%(objecttype) %(objectname) %(objectsize) %(rest)' \
| sed -n 's/^blob //p' \
| sort --numeric-sort --key=2 --reverse \
| head -n ${N_BLOBS} \
| numfmt --field=2 --to=iec-i --padding=7 --suffix=B
| true
|
a0d52ebd8d9310ec8419517a94fdcd513befbac0
|
Shell
|
bpingris/qtile-desktop-wallpaper
|
/desktop_theme
|
UTF-8
| 478
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/sh
function help {
cat <<EOF
USAGE
$0 /path/to/image
DESCRIPTION
set a wallpaper and add colors on the bar (qlite only)
FLAGS
-h --help show this help
EOF
exit 0
}
function main {
if [ -f $1 ]
then
colorz $1 --no-preview | cut -d' ' -f1 > ~/.config/qtile/colors_schemes
qtile-cmd -o cmd -f restart 2>/dev/null
feh --bg-scale $1
else
echo "$1 is not a valid image !"
exit 1
fi
}
case $1 in
--help|-h)
help;;
*)
main $1
esac
| true
|
7d36a87d90badcaade46552b1d0b401db0f2aee5
|
Shell
|
jasonswords/College-Project-Scripts
|
/server_info.sh
|
UTF-8
| 1,332
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
IP_HONEYPOTS=(server ip addresses here with space between each ip)
HONEYPOT_LOC=(server location names here in same order as ip addresses)
PORT=2222
# this script will print all the relevant server information
# with the mhn honeypot framework project
for i in "${!IP_HONEYPOTS[@]}";
do
echo "--------------------------------------------"
echo "Information from "${HONEYPOT_LOC[$i]^}" honeypot"
echo "--------------------------------------------"
if [ "${HONEYPOT_LOC[$i]^}" == "MHN-SERVER" ];then
PORT=22;
ssh -p $PORT root@"${IP_HONEYPOTS[$i]}" "
echo "---- Hard drive space ----";df -h;
echo "---- Available RAM"; free -h;
echo "---- Size of log file ----"; ls -lh /var/log/mhn/mhn-splunk.*;
echo "---- Size of log file ----"; ls -lh /var/log/mhn/mhn.log*;
echo "---- The services running ----";supervisorctl status"
else
ssh -p $PORT root@"${IP_HONEYPOTS[$i]}" "
echo "---- Hard drive space ----";df -h;
echo "---- Available RAM"; free -h;
echo "---- Size of log file ----"; ls -lh /opt/dionaea/var/log/dionaea/;
echo "---- Number of bistreams files ----";ls -lh /opt/dionaea/var/lib/dionaea/bistreams/*| wc -l;
echo "---- Number of binaries ----"; ls -lh /opt/dionaea/var/lib/dionaea/binaries/ | wc -l;
echo "---- The services running ----";supervisorctl status"
fi
done
| true
|
471d9ae2e78ea6c99eab648a36d5103356dce6a3
|
Shell
|
petar-jovanovic/sdk
|
/samples/github/ios/compile.sh
|
UTF-8
| 2,920
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
# Copyright (c) 2015, the Dartino project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE.md file.
# Setup
# - Install and build dartino.
# - Install Cocoapods.
# - Run immic (output in generated/packages/immi).
# - Run servicec (output in generated/packages/service).
# - Generate libdartino.a for your choice of platforms and add it to xcode.
# - Generate snapshot of your Dart program and add it to xcode.
# - Write Podfile that links to {Dartino,Service,Immi}.podspec.
# - Run pod install.
# Build (implemented by the present script).
# - Run immic.
# - Run servicec.
# - Generate libdartino.
# - Generate snapshot of your Dart program.
# After this, hit the 'run' button in xcode.
set -ue
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJ=github
DARTINO_DIR="$(cd "$DIR/../../.." && pwd)"
DARTINO_PKG_DIR="$DARTINO_DIR/package"
TARGET_DIR="$(cd "$DIR/.." && pwd)"
TARGET_GEN_DIR="$TARGET_DIR/generated"
TARGET_PKG_FILE="$TARGET_DIR/.packages"
IMMI_GEN_DIR="$TARGET_GEN_DIR/immi"
SERVICE_GEN_DIR="$TARGET_GEN_DIR/service"
DART="$DARTINO_DIR/out/ReleaseIA32/dart"
IMMIC="$DART $DARTINO_DIR/tools/immic/bin/immic.dart"
DARTINO="$DARTINO_DIR/out/ReleaseIA32/dartino"
SERVICEC="$DARTINO x-servicec"
MOCK_SERVER_SNAPSHOT="$TARGET_DIR/github_mock_service.snapshot"
set -x
cd $DARTINO_DIR
ninja -C out/ReleaseIA32
./tools/persistent_process_info.sh -k
# Generate dart service file and other immi files with the compiler.
if [[ $# -eq 0 ]] || [[ "$1" == "immi" ]]; then
rm -rf "$IMMI_GEN_DIR"
mkdir -p "$IMMI_GEN_DIR"
$IMMIC --packages "$TARGET_PKG_FILE" --out "$IMMI_GEN_DIR" "$TARGET_DIR/lib/$PROJ.immi"
rm -rf "$SERVICE_GEN_DIR"
mkdir -p "$SERVICE_GEN_DIR"
$SERVICEC file "$IMMI_GEN_DIR/idl/immi_service.idl" out "$SERVICE_GEN_DIR"
# Regenerate the mock service after deleting the service-gen directory.
$DIR/../compile_mock_service.sh service
fi
if [[ $# -eq 0 ]] || [[ "$1" == "dartino" ]]; then
ninja -C out/ReleaseIA32IOS libdartino.a
ninja -C out/ReleaseXARM libdartino.a
lipo -create -output "$DIR/libdartinovm.a" \
out/ReleaseIA32IOS/libdartino.a \
out/ReleaseXARM/libdartino.a
fi
if [[ $# -eq 0 ]] || [[ "$1" == "snapshot" ]]; then
$DART -c --packages=.packages \
-Dsnapshot="$DIR/$PROJ.snapshot" \
-Dpackages="$TARGET_PKG_FILE" \
tests/dartino_compiler/run.dart "$TARGET_DIR/bin/$PROJ.dart"
fi
# Ensure that we have a mock server.
if [[ $# -eq 0 ]] && [[ ! -f "$MOCK_SERVER_SNAPSHOT" ]]; then
$DIR/../compile_mock_service.sh
fi
set +x
if [[ $# -eq 1 ]]; then
echo
echo "Only ran task $1."
echo "Possible tasks: immi, dartino, and snapshot"
echo "If Dartino or any IMMI files changed re-run compile.sh without arguments."
fi
| true
|
c4aa3191b94b2a670a2ba592b67832f8dda021d0
|
Shell
|
snvakula/svtools
|
/emcdpo-genbulk.sh
|
UTF-8
| 1,072
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
# Deps: php, pwgen, emc
URL="https://emcdpo.local"
SALT="fah6aZfewfwewej"
ADDRESS="ERGPQNKpmJNaXeaq6ZDYwgghQDMBQGVema"
VENDOR="Your Company"
ITEM="Name of your product"
PHOTO="http://www.blockchainengine.org/wp-content/uploads/2016/04/Smart4.png"
OTHERS="Description=The description of your product"
PREFIX="DEMO-"
FIRST=1001
LAST=1030
DAYS=730
while [ $FIRST -le $LAST ]; do
echo "Creating serial $PREFIX$FIRST:"
SECRET=$(pwgen 8 1)
OTP=$(php -r "echo(hash('sha256', md5('$SECRET'.'$SALT')));")
echo " * SECRET: $SECRET"
echo " * OTP: $OTP"
echo " * Public URL: $URL/key/$PREFIX$FIRST"
echo " * Private URL: $URL/key/$PREFIX$FIRST?otp=$SECRET"
COUNT=0
while emc name_show "dpo:$VENDOR:$PREFIX$FIRST:$COUNT" >/dev/null 2>&1
do
let COUNT=COUNT+1
done
echo " * NVS Record: dpo:$VENDOR:$PREFIX$FIRST:$COUNT"
VALUE="Item=$ITEM\nPhoto=$PHOTO\n$OTHERS\nOTP=$OTP"
VALUE=$(echo -e "$VALUE")
echo -n " * Transaction ID: "
emc name_new "dpo:$VENDOR:$PREFIX$FIRST:$COUNT" "$VALUE" $DAYS $ADDRESS
echo
let FIRST=FIRST+1
done
| true
|
a7f267a092d44da2e067d5052f007e0b36dd405b
|
Shell
|
multirom-nexus6p/android_device_huawei_angler
|
/pull_decrypt_libs.sh
|
UTF-8
| 1,333
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
# Run in TWRP after mounting vendor partition.
vendor_filelist="/vendor/lib64/libQSEEComAPI.so \
/vendor/lib64/hw/keystore.msm8994.so \
/vendor/lib64/libdrmfs.so \
/vendor/lib64/libdrmtime.so \
/vendor/lib64/librpmb.so \
/vendor/lib64/libssd.so \
/vendor/lib64/libdiag.so \
/vendor/bin/qseecomd \
/vendor/lib64/libkmcrypto.so"
# already copied: libcryptfslollipop.so libcrypto.so libc.so libcutils.so libdl.so libhardware.so liblog.so libm.so libstdc++.so libc++.so
system_filelist="libbacktrace.so libbase.so libnetd_client.so libunwind.so libutils.so linker64"
rm -r multirom_enc_blobs
for i in $vendor_filelist
do
echo $i
outfile="multirom_enc_blobs/`basename $i`"
mkdir -p `dirname $outfile`
adb pull $i $outfile
done
for i in $system_filelist
do
echo $i
outfile="multirom_enc_blobs/$i"
mkdir -p `dirname $outfile`
adb pull "/sbin/$i" $outfile
done
mv multirom_enc_blobs/linker64 multirom_enc_blobs/linker
mkdir -p multirom_enc_blobs/vendor/lib64/hw
# property service not running at startup, so it'll try to load keystore.default.so instead.
cp multirom_enc_blobs/keystore.msm8994.so multirom_enc_blobs/vendor/lib64/hw/keystore.default.so
# and just in case it wants the library under its real name
mv multirom_enc_blobs/keystore.msm8994.so multirom_enc_blobs/vendor/lib64/hw/
chmod 755 multirom_enc_blobs/*
| true
|
54d916eedd0c5cff690b06d110270f44d80878c8
|
Shell
|
joelmgjt/v2rayAdm
|
/install-v2r.sh
|
UTF-8
| 9,869
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
BARRA="\033[1;36m-----------------------------------------------------\033[0m"
IVAR="/etc/http-instas"
SCPT_DIR="/etc/SCRIPT"
SCPinstal="$HOME/install"
rm $(pwd)/$0
add-apt-repository universe
apt update -y; apt upgrade -y
install_ini () {
clear
echo -e "$BARRA"
echo -e "\033[92m -- INSTALANDO PAQUETES NECESARIOS -- "
echo -e "$BARRA"
#bc
[[ $(dpkg --get-selections|grep -w "bc"|head -1) ]] || apt-get install bc -y &>/dev/null
[[ $(dpkg --get-selections|grep -w "bc"|head -1) ]] || ESTATUS=`echo -e "\033[91mFALLO DE INSTALACION"` &>/dev/null
[[ $(dpkg --get-selections|grep -w "bc"|head -1) ]] && ESTATUS=`echo -e "\033[92mINSTALADO"` &>/dev/null
echo -e "\033[97m # apt-get install bc................... $ESTATUS "
#jq
[[ $(dpkg --get-selections|grep -w "jq"|head -1) ]] || apt-get install jq -y &>/dev/null
[[ $(dpkg --get-selections|grep -w "jq"|head -1) ]] || ESTATUS=`echo -e "\033[91mFALLO DE INSTALACION"` &>/dev/null
[[ $(dpkg --get-selections|grep -w "jq"|head -1) ]] && ESTATUS=`echo -e "\033[92mINSTALADO"` &>/dev/null
echo -e "\033[97m # apt-get install jq................... $ESTATUS "
#curl
[[ $(dpkg --get-selections|grep -w "curl"|head -1) ]] || apt-get install curl -y &>/dev/null
[[ $(dpkg --get-selections|grep -w "curl"|head -1) ]] || ESTATUS=`echo -e "\033[91mFALLO DE INSTALACION"` &>/dev/null
[[ $(dpkg --get-selections|grep -w "curl"|head -1) ]] && ESTATUS=`echo -e "\033[92mINSTALADO"` &>/dev/null
echo -e "\033[97m # apt-get install curl................. $ESTATUS "
#npm
[[ $(dpkg --get-selections|grep -w "npm"|head -1) ]] || apt-get install npm -y &>/dev/null
[[ $(dpkg --get-selections|grep -w "npm"|head -1) ]] || ESTATUS=`echo -e "\033[91mFALLO DE INSTALACION"` &>/dev/null
[[ $(dpkg --get-selections|grep -w "npm"|head -1) ]] && ESTATUS=`echo -e "\033[92mINSTALADO"` &>/dev/null
echo -e "\033[97m # apt-get install npm.................. $ESTATUS "
#nodejs
[[ $(dpkg --get-selections|grep -w "nodejs"|head -1) ]] || apt-get install nodejs -y &>/dev/null
[[ $(dpkg --get-selections|grep -w "nodejs"|head -1) ]] || ESTATUS=`echo -e "\033[91mFALLO DE INSTALACION"` &>/dev/null
[[ $(dpkg --get-selections|grep -w "nodejs"|head -1) ]] && ESTATUS=`echo -e "\033[92mINSTALADO"` &>/dev/null
echo -e "\033[97m # apt-get install nodejs............... $ESTATUS "
#socat
[[ $(dpkg --get-selections|grep -w "socat"|head -1) ]] || apt-get install socat -y &>/dev/null
[[ $(dpkg --get-selections|grep -w "socat"|head -1) ]] || ESTATUS=`echo -e "\033[91mFALLO DE INSTALACION"` &>/dev/null
[[ $(dpkg --get-selections|grep -w "socat"|head -1) ]] && ESTATUS=`echo -e "\033[92mINSTALADO"` &>/dev/null
echo -e "\033[97m # apt-get install socat................ $ESTATUS "
#netcat
[[ $(dpkg --get-selections|grep -w "netcat"|head -1) ]] || apt-get install netcat -y &>/dev/null
[[ $(dpkg --get-selections|grep -w "netcat"|head -1) ]] || ESTATUS=`echo -e "\033[91mFALLO DE INSTALACION"` &>/dev/null
[[ $(dpkg --get-selections|grep -w "netcat"|head -1) ]] && ESTATUS=`echo -e "\033[92mINSTALADO"` &>/dev/null
echo -e "\033[97m # apt-get install netcat............... $ESTATUS "
#netcat-traditional
[[ $(dpkg --get-selections|grep -w "netcat-traditional"|head -1) ]] || apt-get install netcat-traditional -y &>/dev/null
[[ $(dpkg --get-selections|grep -w "netcat-traditional"|head -1) ]] || ESTATUS=`echo -e "\033[91mFALLO DE INSTALACION"` &>/dev/null
[[ $(dpkg --get-selections|grep -w "netcat-traditional"|head -1) ]] && ESTATUS=`echo -e "\033[92mINSTALADO"` &>/dev/null
echo -e "\033[97m # apt-get install netcat-traditional... $ESTATUS "
#net-tools
[[ $(dpkg --get-selections|grep -w "net-tools"|head -1) ]] || apt-get net-tools -y &>/dev/null
[[ $(dpkg --get-selections|grep -w "net-tools"|head -1) ]] || ESTATUS=`echo -e "\033[91mFALLO DE INSTALACION"` &>/dev/null
[[ $(dpkg --get-selections|grep -w "net-tools"|head -1) ]] && ESTATUS=`echo -e "\033[92mINSTALADO"` &>/dev/null
echo -e "\033[97m # apt-get install net-tools............ $ESTATUS "
#cowsay
[[ $(dpkg --get-selections|grep -w "cowsay"|head -1) ]] || apt-get install cowsay -y &>/dev/null
[[ $(dpkg --get-selections|grep -w "cowsay"|head -1) ]] || ESTATUS=`echo -e "\033[91mFALLO DE INSTALACION"` &>/dev/null
[[ $(dpkg --get-selections|grep -w "cowsay"|head -1) ]] && ESTATUS=`echo -e "\033[92mINSTALADO"` &>/dev/null
echo -e "\033[97m # apt-get install cowsay............... $ESTATUS "
#figlet
[[ $(dpkg --get-selections|grep -w "figlet"|head -1) ]] || apt-get install figlet -y &>/dev/null
[[ $(dpkg --get-selections|grep -w "figlet"|head -1) ]] || ESTATUS=`echo -e "\033[91mFALLO DE INSTALACION"` &>/dev/null
[[ $(dpkg --get-selections|grep -w "figlet"|head -1) ]] && ESTATUS=`echo -e "\033[92mINSTALADO"` &>/dev/null
echo -e "\033[97m # apt-get install figlet............... $ESTATUS "
#lolcat
apt-get install lolcat -y &>/dev/null
sudo gem install lolcat &>/dev/null
[[ $(dpkg --get-selections|grep -w "lolcat"|head -1) ]] || ESTATUS=`echo -e "\033[91mFALLO DE INSTALACION"` &>/dev/null
[[ $(dpkg --get-selections|grep -w "lolcat"|head -1) ]] && ESTATUS=`echo -e "\033[92mINSTALADO"` &>/dev/null
echo -e "\033[97m # apt-get install lolcat............... $ESTATUS "
echo -e "$BARRA"
echo -e "\033[92m La instalacion de paquetes necesarios a finalizado"
echo -e "$BARRA"
echo -e "\033[97m Si la instalacion de paquetes tiene fallas"
echo -ne "\033[97m Puede intentar de nuevo [s/n]: "
read inst
[[ $inst = @(s|S|y|Y) ]] && install_ini
}
msg () {
BRAN='\033[1;37m' && VERMELHO='\e[31m' && VERDE='\e[32m' && AMARELO='\e[33m'
AZUL='\e[34m' && MAGENTA='\e[35m' && MAG='\033[1;36m' &&NEGRITO='\e[1m' && SEMCOR='\e[0m'
case $1 in
-ne)cor="${VERMELHO}${NEGRITO}" && echo -ne "${cor}${2}${SEMCOR}";;
-ama)cor="${AMARELO}${NEGRITO}" && echo -e "${cor}${2}${SEMCOR}";;
-verm)cor="${AMARELO}${NEGRITO}[!] ${VERMELHO}" && echo -e "${cor}${2}${SEMCOR}";;
-azu)cor="${MAG}${NEGRITO}" && echo -e "${cor}${2}${SEMCOR}";;
-verd)cor="${VERDE}${NEGRITO}" && echo -e "${cor}${2}${SEMCOR}";;
-bra)cor="${VERMELHO}" && echo -ne "${cor}${2}${SEMCOR}";;
"-bar2"|"-bar")cor="${VERMELHO}======================================================" && echo -e "${SEMCOR}${cor}${SEMCOR}";;
esac
}
ofus () {
unset server
server=$(echo ${txt_ofuscatw}|cut -d':' -f1)
unset txtofus
number=$(expr length $1)
for((i=1; i<$number+1; i++)); do
txt[$i]=$(echo "$1" | cut -b $i)
case ${txt[$i]} in
".")txt[$i]="*";;
"*")txt[$i]=".";;
"1")txt[$i]="@";;
"@")txt[$i]="1";;
"2")txt[$i]="?";;
"?")txt[$i]="2";;
"4")txt[$i]="%";;
"%")txt[$i]="4";;
"-")txt[$i]="K";;
"K")txt[$i]="-";;
esac
txtofus+="${txt[$i]}"
done
echo "$txtofus" | rev
}
verificar_arq () {
unset ARQ
case $1 in
"v2r.sh")ARQ="/usr/bin/";;
esac
mv -f ${SCPinstal}/$1 ${ARQ}/$1
chmod +x ${ARQ}/$1
}
meu_ip () {
MIP=$(ip addr | grep 'inet' | grep -v inet6 | grep -vE '127\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | head -1)
MIP2=$(wget -qO- ipv4.icanhazip.com)
[[ "$MIP" != "$MIP2" ]] && IP="$MIP2" || IP="$MIP"
}
function_verify () {
permited=$(curl -sSL "https://raw.githubusercontent.com/joelmgjt/v2rayAdm/master/IP")
[[ $(echo $permited|grep "${IP}") = "" ]] && {
echo -e "\n\n\n\033[1;31m====================================================="
echo -e "\033[1;31m ¡LA IP $(wget -qO- ipv4.icanhazip.com) NO ESTA AUTORIZADA!"
echo -e "\033[1;31m CONTACTE A @Rufu99"
echo -e "\033[1;31m=====================================================\n\n\n"
exit 1
} || {
### INTALAR VERSION DE SCRIPT
[[ ! -d /etc/v2r ]] && mkdir /etc/v2r
ver=$(curl -sSL "https://raw.githubusercontent.com/joelmgjt/v2rayAdm/master/version")
echo "$ver" > /etc/v2r/version
[[ -e /usr/bin/v2r.sh ]] && rm -rf /usr/bin/v2r.sh &>/dev/null
[[ -e /usr/bin/v2r ]] && rm -rf /usr/bin/v2r &>/dev/null
}
}
error_fun () {
msg -bar2 && msg -verm "ERROR de enlace VPS<-->GENERADOR" && msg -bar2
exit 1
}
invalid_key () {
msg -bar2 && msg -verm "#¡Key Invalida#! " && msg -bar2
[[ -e $HOME/lista-arq ]] && rm $HOME/lista-arq
exit 1
}
install_ini
meu_ip
clear
msg -bar2
figlet " -V2RAY-" | lolcat
while [[ ! $Key ]]; do
msg -bar2 && msg -ne "# DIGITE LA KEY #: " && read Key
tput cuu1 && tput dl1
done
msg -ne "# Verificando Key # : "
cd $HOME
wget -O $HOME/lista-arq $(ofus "$Key")/$IP > /dev/null 2>&1 && echo -e "\033[1;32m Key Completa" || {
echo -e "\033[1;91m Key Incompleta"
invalid_key
exit
}
IP=$(ofus "$Key" | grep -vE '127\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}') && echo "$IP" > /usr/bin/vendor_code
sleep 1s
function_verify
if [[ -e $HOME/lista-arq ]] && [[ ! $(cat $HOME/lista-arq|grep "KEY INVALIDA!") ]]; then
msg -bar2
msg -e "\033[1;33mDescargando archivos... \033[1;31m[Proyect by @Rufu99]"
REQUEST=$(ofus "$Key"|cut -d'/' -f2)
[[ ! -d ${SCPinstal} ]] && mkdir ${SCPinstal}
for arqx in $(cat $HOME/lista-arq); do
echo -ne "\033[1;33mDescargando: \033[1;31m[$arqx] "
wget --no-check-certificate -O ${SCPinstal}/${arqx} ${IP}:81/${REQUEST}/${arqx} > /dev/null 2>&1 && {
echo -e "\033[1;31m- \033[1;32mRecibido!"
verificar_arq "${arqx}"
} || {
echo -e "\033[1;31m- \033[1;31mFalla (no recibido!)"
error_fun
}
done
sleep 1s
rm -rf FERRAMENTA KEY KEY! INVALIDA!
rm $HOME/lista-arq
[[ -d ${SCPinstal} ]] && rm -rf ${SCPinstal}
echo "/usr/bin/v2r.sh" > /usr/bin/v2r && chmod +x /usr/bin/v2r
clear
echo -e "$BARRA"
echo -e "\033[92m -- INSTALANDO V2RAY -- "
echo -e "$BARRA"
sleep 2
source <(curl -sL https://multi.netlify.app/v2ray.sh)
clear
echo -e "$BARRA"
echo -e "\033[1;33m Perfecto, utilize el comando\n \033[1;31mv2r.sh o v2r\n \033[1;33mpara administrar v2ray"
echo -e "$BARRA"
echo -ne "\033[0m"
else
invalid_key
fi
rm -rf install-v2r.sh
| true
|
77870d4f5e8000ba3b629afd5d92299ba5bd504b
|
Shell
|
joehahn/epi_int_lite
|
/old/piggyback.sh
|
UTF-8
| 2,255
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
#
#piggyback.sh
#by Joe Hahn, jmh.datasciences@gmail.com, 3 August 2017.
#this is then executed on master after hadoop is launched.
#
#To execute: ./piggyback.sh
echo 'running piggyback.sh...'
echo $(whoami)
echo $(pwd)
#unpack the spark-one-off repo, with permissions set so that user=jupyter
#can read & write notebooks to this directory
echo 'installing spiral-waves repo...'
bucket_name="spiralwaves"
aws s3 cp s3://$bucket_name/spiral-waves.tar.gz /home/hadoop/.
cd /home/hadoop
gunzip --force spiral-waves.tar.gz
tar -xvf spiral-waves.tar
chmod 777 spiral-waves
cd spiral-waves
chmod 777 *.ipynb
##use spark to ...
##executing on four m4.2xlarge instances having 8cpus 32Gb each
#echo 'executing mlp.py...'
#logj4="spark.driver.extraJavaOptions=-Dlog4j.configuration=file:./log4j.properties"
#PYSPARK_PYTHON=/emr/miniconda2/bin/python spark-submit --master yarn --conf "$logj4" \
# --num-executors 29 --executor-cores 4 --executor-memory 4G --driver-memory 2G mlp.py
#hdfs dfs -cat data/grid/*.csv | wc
##copy hdfs input & output data to s3
#echo 'copying hdfs data to s3...'
#aws s3 rm --recursive s3://spark-one-off/data
#hadoop distcp data s3a://spark-one-off/data
#aws s3 ls --recursive s3://spark-one-off/data
#get aws access keys from s3
echo "getting aws access keys from s3..."
mkdir private
aws s3 cp s3://spiralwaves/accessKeys.csv private/accessKeys.csv
##plop athena table schemas on s3 datasets
#./athena_tables.sh
#uncomment to run jupyter dashboard on master node
#create user jupyter
echo "creating user jupyter..."
sudo adduser jupyter
#
#prep & start jupyter inside of a screen session, as user=jupyter
#jupyter's password=oneoff, see https://jupyter-notebook.readthedocs.io/en/stable/public_server.html
echo 'starting jupyter...'
sudo -u jupyter /emr/miniconda2/bin/jupyter notebook --generate-config
sudo -u jupyter cp jupyter_notebook_config.json /home/jupyter/.jupyter/.
sudo -u jupyter screen -dmS jupyter_sesh /emr/miniconda2/bin/jupyter notebook --ip 0.0.0.0 --no-browser --port 8765
#update locate database
echo 'updating locate...'
sudo updatedb
##sleep for 10 minutes, then cluster terminates
#echo 'piggyback sleeping for 10 minutes...'
#echo $(date)
#sleep 600
#echo $(date)
#done
echo 'piggyback.sh done!'
| true
|
304475055a5aa01e6fdc0a7a8082198284801663
|
Shell
|
tsmith512/dotfiles
|
/sublime/install.sh
|
UTF-8
| 823
| 2.921875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# _ _ _ _ _
# ___ _ _ | |__ | |(_) _ __ ___ ___ | |_ ___ __ __| |_
# / __|| | | || '_ \ | || || '_ ` _ \ / _ \ | __|/ _ \\ \/ /| __|
# \__ \| |_| || |_) || || || | | | | || __/ | |_| __/ > < | |_
# |___/ \__,_||_.__/ |_||_||_| |_| |_| \___| \__|\___|/_/\_\ \__|
#
# on mac os
cd ${0%/*}
IFS=$(echo -en "\n\b")
for i in $(find * -name "*.sublime-settings"); do
if [ -f "$HOME/Library/Application Support/Sublime Text 2/Packages/User/$i" ]; then
mv "$HOME/Library/Application Support/Sublime Text 2/Packages/User/$i" "$HOME/Library/Application Support/Sublime Text 2/Packages/User/$i.$(date +%Y%m%d-%H%M).backup"
fi
ln -s "$HOME/dotfiles/sublime/$i" "$HOME/Library/Application Support/Sublime Text 2/Packages/User/$i"
done
| true
|
d4f341ef64b8c9a8ba2f165bae359cae91f8f040
|
Shell
|
redx177/krb5-sidecar-container
|
/scripts/entrypoint.sh
|
UTF-8
| 1,680
| 3.5
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
printf "\n### MIT KRB5 sidecar ###\n\n"
printf "This container includes everything necessary to authenticate with an Active Directory KDC or Kerberos KDC using a keytab. At a\ngiven interval kinit is executed to get a fresh ticket from the TGS. \n\n"
# Secure keytab
chmod 400 /krb5/common/krb5.keytab
# Copy KRB5 assets to standard KRB5 configuration paths
cp /krb5/sidecar/krb5.conf /etc/krb5.conf
cp -r /krb5/common/krb5.conf.d /etc/krb5.conf.d
# Copy KRB5 assets to shared memory location so these assets can be utilized by the sidecar client container
cp /krb5/common/krb5.keytab /dev/shm/krb5.keytab
cp /krb5/client/krb5.conf /dev/shm/krb5-client.conf
cp -r /krb5/common/krb5.conf.d /dev/shm/krb5.conf.d
[[ "$KINIT_WAIT_INTERVAL_IN_SECONDS" == "" ]] && KINIT_WAIT_INTERVAL_IN_SECONDS=3600
if [[ "$KINIT_OPTIONS" == "" ]]; then
#[[ -e /krb5/common/krb5.keytab ]] && KINIT_OPTIONS="-k" && echo "*** using host keytab"
[[ -e $KRB5_KTNAME ]] && KINIT_OPTIONS="-k" && echo "*** using host keytab"
#[[ -e /krb5/common/client.keytab ]] && KINIT_OPTIONS="-k -i" && echo "*** using client keytab"
[[ -e $KRB5_CLIENT_KTNAME ]] && KINIT_OPTIONS="-k -i" && echo "*** using client keytab"
fi
if [[ -z "$(ls -A /krb5)" ]]; then
echo "*** Warning default keytab ($KRB5_KTNAME) or default client keytab ($KRB5_CLIENT_KTNAME) not found"
fi
while true
do
echo "*** kinit at "+$(date -I)
kinit -V $KINIT_OPTIONS $KINIT_APPEND_OPTIONS
# List tickets held in the given credentials cache.
# klist -c /dev/shm/ccache
klist -c $KRB5CCNAME
echo "*** Waiting for $KINIT_WAIT_INTERVAL_IN_SECONDS seconds"
sleep $KINIT_WAIT_INTERVAL_IN_SECONDS
done
| true
|
25d960f02dc3b8bb995dc9f641c028947593c5c2
|
Shell
|
amandasaurus/openstreetmap-americana
|
/style/scripts/import_rebusurance.sh
|
UTF-8
| 1,675
| 3.421875
| 3
|
[
"CC0-1.0"
] |
permissive
|
#!/bin/bash
if ! command -v xmlstarlet &> /dev/null
then
echo "xmlstarlet is not available."
exit -1
fi
find "build/rebusurance-v1.0.0/image2d" -name "*.svg" -type f -print0 | while read -d $'\0' f; do
newfile=`echo "$f" | sed -e 's/ /_/g; s/U.S./us/g'`
mv -n "$f" "$newfile";
done
for i in $( ls build/rebusurance-v1.0.0/image2d/ | grep [A-Z] );
do
svg="build/rebusurance-v1.0.0/image2d/$i"
#Remove text placeholder from shields
#Scale shields to a reasonable size for rasterization
xmlstarlet ed -L -N x=http://www.w3.org/2000/svg \
-u "//x:svg/@height" \
--value 50 \
-u "//x:svg/@width" \
--value 60 \
-d "//x:svg/x:text" \
-d "//x:svg/x:path/@transform" \
-a "//x:svg/x:path" -t attr -n transform \
--value "scale(0.5,0.5)" \
-d "//x:svg/x:rect/@transform" \
-a "//x:svg/x:rect" -t attr -n transform \
--value "scale(0.5,0.5)" \
-d "//x:svg/x:g/@transform" \
-a "//x:svg/x:g" -t attr -n transform \
--value "scale(0.5,0.5)" "$svg"
#Copy files to icons folder, converting space to underscore and lowercase letters
cp "$svg" icons/us_`echo "$i" | tr 'A-Z' 'a-z'`
done
#Customizations
#Make the crown on US Interstate highway shields pointier
xmlstarlet ed -L -N x=http://www.w3.org/2000/svg \
-u "//x:svg/@height" \
--value 70 \
-u "//x:svg/x:path[@id='interstate-crown']/@transform" \
--value "scale(0.50,1.5) translate(0,18)" \
icons/us_interstate.svg
#Fix resizing of Florida state outline
xmlstarlet ed -L -N x=http://www.w3.org/2000/svg \
-u "//x:svg/x:path[@id='florida-state']/@transform" \
--value "scale(0.5, 0.5) translate(-17,-39)" \
icons/us_florida.svg
| true
|
d8c3974fd7e1fe13902a02ab9324f87bd6ac9352
|
Shell
|
dhvssigrun/filezilla
|
/compiler.FileZilla_3.54.1.with.jstor.sh
|
UTF-8
| 4,026
| 2.609375
| 3
|
[] |
no_license
|
#Compiler FileZilla_3.54.1 with jstor by KAPITALSIN 2021
#sudo apt-get git subversion
#https://tecadmin.net/install-go-on-ubuntu/
sudo apt install libjson-c-dev libuv1-dev libmicrohttpd-dev
read -n1 -p "PULSA [ENTER] PARA CONTINUAR O [CTRL+C] PARA PARAR EL SCRIPT"
time_start=`date +%s`
#Create staging directory
STAGING=$HOME/staging/filezilla
mkdir -p $STAGING
#Sources
SRC=$STAGING/src
mkdir -p $SRC
#Build artifacts
OUT=$STAGING/build
mkdir -p $OUT
export LD_LIBRARY_PATH=$STAGING/build/lib:$LD_LIBRARY_PATH
export PKG_CONFIG_PATH=$STAGING/build/lib/pkgconfig:$PKG_CONFIG_PATH
export LDFLAGS='-L/$STAGING/build/lib/include'
#export LDFLAGS=:$LDFLAGS
PATH=$STAGING/src/wx3:$PATH
notify-send "INSTALLING GO LANGUAGE"
pushd $SRC
wget https://github.com/johna23-lab/filezilla/raw/main/go1.16.4.linux-amd64.7z
7z x go1.16.4.linux-amd64.7z
popd
notify-send "Building a static version of gmp"
wget https://gmplib.org/download/gmp/gmp-6.2.1.tar.xz -qO-|tar -C $SRC -xJ
pushd $SRC/gmp*/
./configure --build=x86_64 --prefix=$OUT --enable-static --disable-shared --enable-fat
make -j3 install
popd
notify-send "Building a static version of nettle"
wget https://ftp.gnu.org/gnu/nettle/nettle-3.6.tar.gz -qO-|tar -C $SRC -xz
pushd $SRC/nettle*/
./configure --build=x86_64 --prefix=$OUT --enable-static --disable-shared --enable-fat --enable-mini-gmp
make -j3 install
popd
notify-send "Building a static version of GNutls"
wget https://www.gnupg.org/ftp/gcrypt/gnutls/v3.7/gnutls-3.7.0.tar.xz -qO- | tar -C $SRC -xJ
pushd $SRC/gnutls-3.*/
./configure --prefix="$OUT" --enable-static --disable-shared --build=x86_64 --with-included-libtasn1 --disable-doc --disable-guile --enable-local-libopts --disable-nls --with-included-unistring --disable-tests --with-default-trust-store-pkcs11="pkcs11:"
make -j3 install
popd
notify-send "Building a static version of SQLite"
wget https://sqlite.org/2018/sqlite-autoconf-3250300.tar.gz -qO-|tar -C $SRC -xz
pushd $SRC/sql*/
./configure --build=x86_64 --prefix=$OUT --enable-static --disable-shared
make -j3 install
popd
notify-send "Building a static version of wxWidgets"
#git clone --branch WX_3_0_BRANCH --single-branch https://github.com/wxWidgets/wxWidgets.git $SRC/wx3
pushd $SRC
wget https://github.com/johna23-lab/filezilla/raw/main/wx3.7z
7z x $SRC/wx3.7z
popd
pushd $SRC/wx3
./configure --prefix=$(pwd) --enable-monolithic --disable-shared --enable-static --enable-unicode --with-libpng=builtin --with-libjpeg=builtin --with-libtiff=builtin --with-zlib=builtin --with-expat=builtin
make -j3
popd
notify-send "Building a static version of libfilezilla"
#svn co https://svn.filezilla-project.org/svn/libfilezilla/trunk $SRC/libfilezilla
wget https://download.filezilla-project.org/libfilezilla/libfilezilla-0.28.0.tar.bz2 -qO-|tar -C $SRC -xj
pushd $SRC/libfilezilla*/
./configure --prefix=$OUT --enable-static --disable-shared
make -j3 install
popd
notify-send "Building a static version of libidn"
wget ftp://ftp.gnu.org/gnu/libidn/libidn2-2.3.0.tar.gz -qO-|tar -C $SRC -xz
pushd $SRC/libidn*/
./configure --prefix="$OUT" --enable-static --disable-shared
make -j3 install
popd
notify-send "Building libjstor"
git clone https://github.com/storj/libstorj.git $SRC/jstor
pushd $SRC/jstor*/
./autogen.sh
./configure --prefix="$OUT" --disable-shared --enable-static
make -j3 install
popd
notify-send "Building UPLINK LIBRARY"
git clone https://github.com/storj/uplink-c.git $SRC/uplink
pushd $SRC/uplink
export GOROOT=$SRC/go
export PATH=$GOPATH/bin:$GOROOT/bin:$PATH
make install DESTDIR=$OUT
popd
notify-send "Building Filezilla"
wget https://github.com/johna23-lab/filezilla/raw/main/FileZilla_3.54.1_src.txz -qO-|tar -C $SRC -xJ
pushd $SRC/filezilla*/
./configure --prefix="$OUT" --enable-static --disable-shared --with-pugixml=builtin --enable-storj
make -j3 install
popd
time_end=`date +%s`
time_exec=`expr $(( $time_end - $time_start ))`
echo "EL PROCESO DE COMPILADO HA TARDADO UN TOTAL DE $(($time_exec / 60)) minutos y $(($time_exec % 60)) segundos."
| true
|
bee363ea4837aaa718f337e06bf9c1c9e4203569
|
Shell
|
rileyhales/nldas
|
/data_workflow/onion_creek_events/workflow.sh
|
UTF-8
| 971
| 3.515625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# Assuming this file is always run on a system that contains curl
# Adapted from script written by Rohit Khattar
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
if [ "$1" == "" ]; then
echo "Specify A Data Directory"
exit
fi
echo "Downloading Data..."
cd
#touch .netrc
#echo "machine urs.earthdata.nasa.gov login tethysgldas password KKP4E2sjTfQGsMX" >> .netrc
#touch .urs_cookies
chmod -R 0755 $1
#mkdir -p $1/2013event/
#cd $1/2013event/
#cat $DIR/2013event_urls.txt | tr -d '\r' | xargs -n 1 -P 4 curl -LJO -n -c ~/.urs_cookies -b ~/.urs_cookies
mkdir -p $1/2015event/
cd $1/2015event/
cat $DIR/2015event_urls.txt | tr -d '\r' | xargs -n 1 -P 4 curl -LJO -n -c ~/.urs_cookies -b ~/.urs_cookies
#mkdir -p $1/2018event/
#cd $1/2018event/
#cat $DIR/2018event_urls.txt | tr -d '\r' | xargs -n 1 -P 4 curl -LJO -n -c ~/.urs_cookies -b ~/.urs_cookies
echo "Download Done"
# Move NCML Files into thredds data directory
cp $DIR/*.ncml $1
| true
|
15aeacffbd492511aa590a6c870c2e4f4a1eca14
|
Shell
|
outtersg/guili
|
/apr-util
|
UTF-8
| 2,495
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Copyright (c) 2005 Guillaume Outters
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
set -e
DelieS() { local s2 ; while [ -h "$s" ] ; do s2="`readlink "$s"`" ; case "$s2" in [^/]*) s2="`dirname "$s"`/$s2" ;; esac ; s="$s2" ; done ; } ; SCRIPTS() { local s="`command -v "$0"`" ; [ -x "$s" -o ! -x "$0" ] || s="$0" ; case "`basename "$s"`" in *.*) true ;; *sh) s="$1" ;; esac ; case "$s" in [^/]*) s="`pwd`/$s" ;; esac ; DelieS ; s="`dirname "$s"`" ; DelieS ; SCRIPTS="$s" ; } ; SCRIPTS
. "$SCRIPTS/util.sh"
# Historique des versions gérées
v 1.2.1 && prerequis="apr >= 0" || true
v 1.2.2
v 1.2.7
v 1.2.12
v 1.3.4
v 1.3.9
v 1.3.12 && prerequis="apr expat" # Sinon apr installe l'expat embarquée, qui nous cause des soucis.
v 1.5.1 && prerequis="apr >= 1.4.6 expat" || true
v 1.5.4 && prerequis="apr >= 1.5 expat" || true
v 1.6.1 && prerequis="apr >= 1.6 expat" || true
# Modifications
# Variables
archive=http://apache.miroir-francais.fr/apr/$logiciel-$version.tar.bz2
archive="http://apache.multidist.com/apr/apr-util-$version.tar.bz2"
archive="http://mir2.ovh.net/ftp.apache.org/dist/apr/$logiciel-$version.tar.gz"
archive="http://mirrors.ircam.fr/pub/apache/apr/apr-util-$version.tar.bz2"
destiner
prerequis
obtenirEtAllerDansVersion
echo Correction… >&2
for modif in true $modifs ; do $modif ; done
echo Configuration… >&2
./configure --prefix="$dest" --with-apr="$INSTALLS"
echo Compilation… >&2
make
echo Installation… >&2
sudo make install
sutiliser
| true
|
8db6239255ac58968a44e1db0ab0bb2f861d9a57
|
Shell
|
STEllAR-GROUP/hpx
|
/tools/change_includes.sh
|
UTF-8
| 4,543
| 4.25
| 4
|
[
"BSL-1.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
#!/bin/bash
# Copyright (c) 2019 The STE||AR-Group
#
# SPDX-License-Identifier: BSL-1.0
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# This script aims at replacing the old headers by the new one
# /!\ This file is globbing through the include_compatibility folder, so basic
# files should already be here
# /!\ The sed command will replace all deprecated headers, including the one
# specified in the deprecation message in include_compatibility (so it is better
# to execute add_compat_headers.sh after this script)
# Not used in default mode (globbing), used if --files option specified
old_filenames=(
hpx/util/cache/local_cache.hpp
hpx/util/cache/lru_cache.hpp
)
new_filenames=(
hpx/cache/local_cache.hpp
hpx/cache/lru_cache.hpp
)
function extra_usage_message() {
echo
echo "Can specify the --project_path if different from the \$PWD variable"
echo
echo "In case you want to specify the files to replace manually, please"
echo "specify them at the beginning of this script source file $0 and use"
echo "the --files option"
}
if [[ $# -lt 1 ]]; then
arg=${BASH_SOURCE[0]}
echo "Usage : "$arg" -m <module_name> -p <project_path>"
echo "Example: "$arg" -m cache"
extra_usage_message
exit
fi
function parse_arguments() {
# store arguments list
POSITIONAL=()
while [[ $# -gt 0 ]]
do
local key="$1"
case $key in
-f|--files)
all_files=0
echo "Replacement based on manually specified files"
echo "(change directly those in the script $0)"
shift # pass option
;;
-m|--module)
module=$2
echo "module : ${module}"
shift # pass option
shift # pass value
;;
-p|--project_path)
project_path=$2
shift # pass option
shift # pass value
;;
--help|*)
echo $"Usage: $0 [-m, --module <value>] [-p, --project_path <value>]"
echo "[-f, --files \"<value1> <value2>\"]"
echo "Example: "$0" -m cache -p \$PWD"
echo
echo "- Can specify the --project_path if different from the environmental"
echo "variable \$PWD"
exit
return
esac
done
# restore positional parameters
set -- "${POSITIONAL[@]}"
}
# Retrieve the corresponding new header
function find_matching() {
new_file=""
notfound=false
for file in "${new_filenames[@]}"; do
basefile=$(basename $file)
if [[ "$basefile" = "$1" ]]; then
new_file=$file
return
fi
done
# In case no matching file is found in the list specified
if [[ "$new_file" = "" ]] && [[ $all_files -eq 0 ]]; then
notfound=true
fi
}
########
# MAIN #
########
# Defaults arguments
module=
project_path=$PWD
all_files=1 # default is globbing
echo
# Parsing arguments
parse_arguments "$@"
echo "project_path: ${project_path}"
echo
# Activate the ** globing
shopt -s globstar
if [[ $all_files -eq 1 ]]; then
echo
echo "Globbing has been specified, we will glob in include/ and"
echo "include_compatibility/"
echo
pushd ${project_path} > /dev/null
# Get all the old headers names
pushd libs/${module}/include_compatibility > /dev/null
old_filenames=($(ls **/*.hpp))
# Get all the new headers names
cd ../include
new_filenames=($(ls **/*.hpp))
popd > /dev/null # go back at the top level project_path
fi
name_it=0
# Introduce backslash in front of a . or a /
for file in "${old_filenames[@]}"; do
old_file=$file
basefilename=$(basename $old_file)
find_matching "$basefilename"
if [[ "$notfound" = "true" ]]; then
new_file=${new_filenames[$name_it]}
echo "new file !!!" $new_file
echo "(not found in the list specified)"
fi
echo "old header : $old_file"
echo "new header : $new_file"
echo
# Add backslash in front of the special chars
old_file=${old_file////\\/}
old_file=${old_file//./\\.}
new_file=${new_file////\\/}
new_file=${new_file//./\\.}
# Replace by the new header in all hpp and cpp files
sed -i "s/$old_file/$new_file/" **/*.{hpp,cpp}
name_it=$((name_it+1))
done
popd > /dev/null
| true
|
ab8364572371f0202fb3c5094bf29d22e451f5a0
|
Shell
|
tim-patterson/sql-dep-analyzer
|
/deploy.sh
|
UTF-8
| 362
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
./gradlew build
REMOTE=$(git remote get-url origin)
rm -rf build/tmp/ghpages
mkdir -p build/tmp/ghpages
cp src/main/web/* build/tmp/ghpages
cp build/bundle/* build/tmp/ghpages
(
cd build/tmp/ghpages
git init
git remote add origin $REMOTE
git add .
git commit -m "GH PAGES"
git push --force origin master:gh-pages
)
echo "Deploy Done"
| true
|
7863aec67fca0aac8437716f42d850cf3519fc81
|
Shell
|
Zefiro/tgigor
|
/start_igor.sh
|
UTF-8
| 555
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This starts the Telegram bot Igor in a screen for the current user
# this script is intended to be called at bootup, e.g. from /etc/rc.local with:
# su -c /home/zefiro/tgigor/start_igor.sh zefiro
cd "$(dirname "$0")"
# -A: auto-resize screen
# -dm: starts a new screen session in detached mode
# -S: set session name
# -t: set title of window
screen -AdmS tgigor -t bot
# -S: specify session name
# -p: selects window by title
# -X: sends the command into the window
screen -S tgigor -p bot -X stuff $'export NODE_ENV=dev && npm start\n'
| true
|
05a75706170f2dab145f803bd4303eea11146616
|
Shell
|
iSkans/OpsWorks
|
/proxy/templates/default/proxy2ensite.erb
|
UTF-8
| 1,228
| 3.65625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#-- Configuration --#
NGINX='<%= node[:nginx][:dir] %>'
APP_NAME=$1
APP_DIR=$2/current
APP_DOMAINS=`echo $3 | sed 's/,/ /g'`
#-- Retrieve Proxy ip and port from package.json --#
if [ -f $APP_DIR/package.json ]
then
APP_IP=`sed -n 's/[[:space:]]*"proxy"[[:space:]]*:[[:space:]]*"\([^"]*\).*/\1/p' $APP_DIR/package.json`
APP_PORT=`sed -n 's/[[:space:]]*"port"[[:space:]]*:[[:space:]]*"\([^"]*\).*/\1/p' $APP_DIR/package.json`
fi
#-- Available Proxy Website --#
if [ -n "$APP_DOMAINS" ] && [ -n "$APP_IP" ] && [ -n "$APP_PORT" ]
then
echo ""> $NGINX/sites-enabled/$APP_NAME.conf
SERVERNAMES=""
DOMAINS=$(echo $APP_DOMAINS | tr " " "\n")
for DOMAIN in $DOMAINS
do
REDIR=`echo $DOMAIN | grep '^[^.]*\.[^.]*$'`
if [ -n "$REDIR" ]
then
echo "server {
server_name $REDIR;
rewrite ^(.*) http://www.$REDIR\$1 permanent;
}
">> $NGINX/sites-enabled/$APP_NAME.conf
fi
REDIR2=`echo $DOMAIN | grep '^[^.]*\.[^.]*\.[^.]*$'`
if [ -n "$REDIR2" ]
then
SERVERNAMES="$SERVERNAMES $REDIR2"
fi
done
if [ -n "$SERVERNAMES" ]
then
echo "server {
listen 80;
server_name $SERVERNAMES;
location / {
proxy_pass http://$APP_IP:$APP_PORT/;
}
}">> $NGINX/sites-enabled/$APP_NAME.conf
fi
fi
| true
|
152864a83a60260cf74a932fc0ef784e3608689b
|
Shell
|
NoorahSmith/BlackBird-SubdomainEnum
|
/modules/bruteforcer.sh
|
UTF-8
| 815
| 3.03125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
########
#
# BlackBird's Bruteforcing Module
#
#######
PG_DIRSEARCH=$1
In_File=$2
Domain=$3
In_Path=$4
VAR_SLACK_TOKEN=$5
mkdir $In_Path$Domain/bruteforcer
slack-cli -t $VAR_SLACK_TOKEN -d blackbird-output "Dirsearch Bruteforcing Started Against $Domain"
while read DOMAIN
do
$PG_DIRSEARCH -u $(echo "http://""$DOMAIN") -e txt,css,html,js,zip,tar,config,xml,php,jsp,asp,aspx,cs,vb,py,pl,rb,csv,yml --plain-text-report=$In_Path$Domain/bruteforcer/$(echo "http_""$DOMAIN")
$PG_DIRSEARCH -u $(echo "https://""$DOMAIN") -e txt,css,html,js,zip,tar,config,xml,php,jsp,asp,aspx,cs,vb,py,pl,rb,csv,yml --plain-text-report=$In_Path$Domain/bruteforcer/$(echo "https_""$DOMAIN")
done < $In_File
slack-cli -t $VAR_SLACK_TOKEN -d blackbird-output "Dirsearch Bruteforcing Finished Against $Domain"
| true
|
8fc553d7f77dbe740c8db53a8933d18ece27bf97
|
Shell
|
Fersca/natyla
|
/scripts/reqTest.sh
|
UTF-8
| 384
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
c=1
while [ $c -le 19000 ]
do
echo "Welcone $c times"
curl -H "Content-Type: application/json" -X POST -d '{"name":"fer","surname":"scasserra","age":31, "location":"buenos aires de america", "scuadra":"river plata varias veces campeon del mundo","pais":"argentina, un gran pais para vivir"}' http://localhost:8080/fer/$1A$c
(( c++ ))
done
echo ""
echo "Fin cache test"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.