blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9710899af7f424e7d8377f730207de971a217222
|
Shell
|
compae/sparta
|
/docker/docker-entrypoint.sh
|
UTF-8
| 1,298
| 3.765625
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash -e
function _log_sparta_entrypoint() {
local message=$1
echo "[SPARTA-ENTRYPOINT] $message"
}
_log_sparta_entrypoint "Executing Sparta docker-entrypoint"
NAME=sparta
VARIABLES="/etc/default/$NAME-variables"
SYSTEM_VARIABLES="/etc/profile"
SPARTA_CONF_FILE=/etc/sds/sparta/reference.conf
_log_sparta_entrypoint "Loading Sparta common functions"
source /sparta-common.sh
## Vault and secrets (configured if enabled)
###################################################
if [ -v VAULT_ENABLE ] && [ ${#VAULT_ENABLE} != 0 ] && [ $VAULT_ENABLE == "true" ] && [ -v VAULT_HOST ] && [ ${#VAULT_HOST} != 0 ]; then
_log_sparta_entrypoint "Executing Sparta security script ... "
source /security-config.sh $1
_log_sparta_entrypoint "Sparta security script executed correctly"
fi
if [[ ! -v SPARTA_APP_TYPE ]]; then
SPARTA_APP_TYPE="server"
fi
case "$SPARTA_APP_TYPE" in
"marathon") # In this type, Sparta run as spark driver inside the marathon app
_log_sparta_entrypoint "Executing Sparta as marathon application ... "
source /sparta-marathon.sh
;;
*) # Default type: Sparta run as server streaming apps launcher
_log_sparta_entrypoint "Executing Sparta as server application ... "
source /sparta-server.sh
;;
esac
| true
|
65495127ad429b10a973c1fb9dc071bd2f420627
|
Shell
|
git-for-windows/MINGW-packages
|
/mingw-w64-ilmbase/PKGBUILD
|
UTF-8
| 2,027
| 2.8125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Maintainer: Alexey Pavlov <alexpux@gmail.com>
_realname=ilmbase
pkgbase=mingw-w64-${_realname}
pkgname="${MINGW_PACKAGE_PREFIX}-${_realname}"
pkgver=2.2.1
pkgrel=1
pkgdesc="Base libraries from ILM for OpenEXR (mingw-w64)"
arch=('any')
url="http://www.openexr.com/"
license=("custom")
makedepends=("${MINGW_PACKAGE_PREFIX}-cmake" "${MINGW_PACKAGE_PREFIX}-gcc" "${MINGW_PACKAGE_PREFIX}-pkg-config")
depends=("${MINGW_PACKAGE_PREFIX}-gcc-libs")
options=('staticlibs' 'strip')
source=("https://download.savannah.nongnu.org/releases/openexr/${_realname}-${pkgver}.tar.gz"
0001-cmake-install-binaries.patch
0002-cmake-soversion.patch
0003-pull93.patch
0004-ilmthread-mingw-pthreads.patch
0005-mingw-gcc6-bug.patch)
sha256sums=('cac206e63be68136ef556c2b555df659f45098c159ce24804e9d5e9e0286609e'
'570a45e1be480e7bcda9cc888c5f5c19bbbebc7d0448cb955621abe8a7e27b6d'
'431a832cc59373875ab844cbbff390eab4b0a28358546c84abf1636d93113a76'
'1f8b860329c8563533da1ed6e374c1821da689b05eb8d1ef50ca3b3289bd770c'
'f281401f925c6a543b0f17e79e8d8a39727b428c1d56d7876fa2a026143d4be3'
'e62987a5afa87cb1d58cd3d35989d7e274b194549f982c178cda962cc7ceb36c')
prepare(){
cd "${srcdir}/${_realname}-${pkgver}"
patch -p1 -i ${srcdir}/0001-cmake-install-binaries.patch
patch -p1 -i ${srcdir}/0002-cmake-soversion.patch
patch -p2 -i ${srcdir}/0003-pull93.patch
patch -p1 -i ${srcdir}/0004-ilmthread-mingw-pthreads.patch
patch -p1 -i ${srcdir}/0005-mingw-gcc6-bug.patch
}
build() {
[[ -d "build-${MINGW_CHOST}" ]] && rm -rf "build-${MINGW_CHOST}"
mkdir -p "${srcdir}/build-${MINGW_CHOST}"
cd "${srcdir}/build-${MINGW_CHOST}"
MSYS2_ARG_CONV_EXCL="-DCMAKE_INSTALL_PREFIX=" \
${MINGW_PREFIX}/bin/cmake \
-G"MSYS Makefiles" \
-DCMAKE_INSTALL_PREFIX=${MINGW_PREFIX} \
-DBUILD_SHARED_LIBS=ON \
-DNAMESPACE_VERSIONING=ON \
../${_realname}-${pkgver}
make
}
package() {
cd "${srcdir}/build-${MINGW_CHOST}"
make DESTDIR=${pkgdir} install
}
| true
|
68df9776bf2314f85ae63269b085231b2b7475ff
|
Shell
|
D0cT0r-inf0s3c/Praus
|
/audit/nginx.sh
|
UTF-8
| 4,130
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
home=$HOME
mkdir /$home/temp
touch /$home/temp/nginx.txt
echo "\e[1;95m-------------------------[nginx audit in progress]-------------------------"
installed=$(dpkg-query -W -f='${Status}' nginx 2>/dev/null | grep -c "ok installed")
if [ $installed -eq 0 ];
then
status="\e[91m[ BAD ]"
#exit
else
status="\e[92m[ GOOD ]"
fi
sleep 2
echo "\e[39m[*] Checking nginx installation\t\t\t\t\t\t\t\t$status"
signature=$(grep -cP '\s+server_tokens\soff;$' /etc/nginx/nginx.conf)
if [ $signature -eq 0 ];
then
status="\e[91m[ BAD ]"
#exit
else
status="\e[92m[ GOOD ]"
fi
sleep 2
echo "\e[39m[*] Checking if nginx version is hidden\t\t\t\t\t\t\t$status"
signature=$(grep -cP '^etag\soff;$' /etc/nginx/nginx.conf)
if [ $signature -eq 0 ];
then
status="\e[91m[ BAD ]"
#exit
else
status="\e[92m[ GOOD ]"
fi
sleep 2
echo "\e[39m[*] Checking if ETags is removed\t\t\t\t\t\t\t$status"
indexmod=$(cat /var/www/html/index.html|wc -w)
if [ $indexmod -ne 0 ];
then
status="\e[91m[ BAD ]"
#exit
else
status="\e[92m[ GOOD ]"
fi
sleep 2
echo "\e[39m[*] Checking if index.html is empty\t\t\t\t\t\t\t$status"
signature=$(grep -cP '^ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384;$' /etc/nginx/nginx.conf)
if [ $signature -eq 0 ];
then
status="\e[91m[ BAD ]"
#exit
else
status="\e[92m[ GOOD ]"
fi
sleep 2
echo "\e[39m[*] Checking if strong cipher suites are enabled\t\t\t\t\t$status"
signature=$(grep -cP '^ssl_session_timeout 5m;$' /etc/nginx/nginx.conf)
if [ $signature -eq 0 ];
then
status="\e[91m[ BAD ]"
#exit
else
status="\e[92m[ GOOD ]"
fi
sleep 2
echo "\e[39m[*] Checking if ssl session timeout is set\t\t\t\t\t\t$status"
signature=$(grep -cP '^ssl_session_cache shared:SSL:10m;$' /etc/nginx/nginx.conf)
if [ $signature -eq 0 ];
then
status="\e[91m[ BAD ]"
#exit
else
status="\e[92m[ GOOD ]"
fi
sleep 2
echo "\e[39m[*] Checking if ssl session cache is set\t\t\t\t\t\t$status"
signature=$(grep -cP '^proxy_cookie_path / \"/; secure; HttpOnly\";$' /etc/nginx/sites-available/default)
if [ $signature -eq 0 ];
then
status="\e[91m[ BAD ]"
#exit
else
status="\e[92m[ GOOD ]"
fi
sleep 2
echo "\e[39m[*] Checking if HttpOnly and Secure flags are enabled\t\t\t\t\t$status"
signature=$(grep -cP '^add_header X-Frame-Options DENY;$' /etc/nginx/sites-available/default)
if [ $signature -eq 0 ];
then
status="\e[91m[ BAD ]"
#exit
else
status="\e[92m[ GOOD ]"
fi
sleep 2
echo "\e[39m[*] Checking if Clickjacking Attack Protection is enabled\t\t\t\t$status"
signature=$(grep -cP '^add_header X-XSS-Protection \"1; mode=block\";$' /etc/nginx/sites-available/default)
if [ $signature -eq 0 ];
then
status="\e[91m[ BAD ]"
#exit
else
status="\e[92m[ GOOD ]"
fi
sleep 2
echo "\e[39m[*] Checking if XSS Protection is enabled\t\t\t\t\t\t$status"
signature=$(grep -cP '^add_header Strict-Transport-Security \"max-age=31536000; includeSubdomains;\";$' /etc/nginx/sites-available/default)
if [ $signature -eq 0 ];
then
status="\e[91m[ BAD ]"
#exit
else
status="\e[92m[ GOOD ]"
fi
sleep 2
echo "\e[39m[*] Checking if Enforce secure connections is enabled\t\t\t\t\t$status"
signature=$(grep -cP '^add_header X-Content-Type-Options nosniff;$' /etc/nginx/sites-available/default)
if [ $signature -eq 0 ];
then
status="\e[91m[ BAD ]"
#exit
else
status="\e[92m[ GOOD ]"
fi
sleep 2
echo "\e[39m[*] Checking if MIME sniffing Protection is enabled\t\t\t\t\t$status"
signature=$(grep -cP "^add_header Content-Security-Policy \"default-src 'self';\";$" /etc/nginx/sites-available/default)
if [ $signature -eq 0 ];
then
status="\e[91m[ BAD ]"
#exit
else
status="\e[92m[ GOOD ]"
fi
sleep 2
echo "\e[39m[*] Checking if Cross-site scripting and injections Protection is enabled\t\t$status"
signature=$(grep -cP '^add_header X-Robots-Tag none;$' /etc/nginx/sites-available/default)
if [ $signature -eq 0 ];
then
status="\e[91m[ BAD ]"
#exit
else
status="\e[92m[ GOOD ]"
fi
sleep 2
echo "\e[39m[*] Checking if X-Robots-Tag is set\t\t\t\t\t\t\t$status"
echo "\033[0m"
| true
|
9e6f3dc5042fc2e2f0a2aa4a4ee18fb8d9e5a7a4
|
Shell
|
chrisguest75/kind_examples
|
/08_skaffold/version.sh
|
UTF-8
| 212
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
CID=$(basename "$(cat /proc/1/cpuset)")
#CID=$(cat /proc/self/cgroup | grep "cpu:/" | sed 's/\([0-9]\):cpu:\/docker\///g')
env
while true; do
echo "$(date) '${CID}'"
sleep 10
done
| true
|
d38f9708e2a965f396159e97ed72ce038cc0cd91
|
Shell
|
apnex/pxe
|
/test.sh
|
UTF-8
| 593
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
INT="wlp0s20f3"
ADDRESS=$(ifconfig "${INT}" | grep inet[^6] | awk '{print $2}')
NETMASK=$(ifconfig "${INT}" | grep inet[^6] | awk '{print $4}')
GATEWAY=$(route -n | grep UG | awk '{print $2}')
DNS=$(nmcli -g ip4.dns device show "${INT}" | awk '{print $1}')
echo "ADDRESS : [$ADDRESS]"
echo "NETMASK : [$NETMASK]"
echo "GATEWAY : [$GATEWAY]"
echo "DNS : [$DNS]"
#cat > /etc/sysconfig/network-scripts/ifcfg-eth0 <<-EOM
cat > ./ifcfg-eth0 <<-EOM
TYPE=Ethernet
BOOTPROTO=none
NAME=eth0
DEVICE=eth0
ONBOOT=yes
IPADDR=${ADDRESS}
PREFIX=24
GATEWAY=${GATEWAY}
DNS1=${DNS}
EOM
| true
|
db782f3c2cc3b317cc2b7f1ae9bb1e02a0b38590
|
Shell
|
breyerml/prezto
|
/runcoms/zshrc
|
UTF-8
| 2,029
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#
# Executes commands at the start of an interactive session.
#
# Authors:
# Sorin Ionescu <sorin.ionescu@gmail.com>
#
# Source Prezto.
if [[ -s "${ZDOTDIR:-$HOME}/.zprezto/init.zsh" ]]; then
source "${ZDOTDIR:-$HOME}/.zprezto/init.zsh"
fi
# Customize to your needs...
# To customize prompt, run `p10k configure` or edit ~/p10k.zsh.
[[ ! -f ~/.p10k.zsh ]] || source ~/.p10k.zsh
# Redefine what is ignored by auto-notify
AUTO_NOTIFY_IGNORE+=("peek")
AUTO_NOTIFY_IGNORE+=("nohup")
AUTO_NOTIFY_IGNORE+=("okular")
AUTO_NOTIFY_IGNORE+=("clion")
AUTO_NOTIFY_IGNORE+=("pycharm")
### print environmental variables in a human readable way
function print_env_var {
eval "echo \"\${$1//:/\\n}\""
}
### add to path if not already present
function add_unique_path {
[[ ":$PATH:" != *":$1:"* ]] && export PATH="$1:${PATH}"
}
### make diff colored
alias diff='diff --color=auto'
### completely clear terminal
alias clearall='echo -e \\033c'
### immediately shutdown the system
alias goodnight='shutdown -h now'
### start pdf in presentation mode
alias present='okular --presentation'
### short application names
alias word='libreoffice --writer'
alias excel='libreoffice --calc'
alias powerpoint='libreoffice --impress'
### git related aliases
alias gits='git status'
alias gitd='git diff --color=auto'
alias gitc='git commit -m'
alias gita='git add -A'
alias gitac='git commit -am'
alias gitl='git log --graph --abbrev-commit --decorate'
### enable spack shell support
. $HOME/Programs/spack/share/spack/setup-env.sh
### enable spack's module support
. $(spack location -i lmod)/lmod/lmod/init/zsh
### add custom module path
module use $HOME/.modulefiles
### valgrind tooling aliases
alias -g memcheck='valgrind --tool=memcheck'
alias -g cachegrind='valgrind --tool=cachegrind'
alias -g callgrind='valgrind --tool=callgrind'
alias callgrind-gui='kcachegrind'
alias -g massif='valgrind --tool=massif'
alias massif-gui='massif-visualizer'
### icecc (distributed C++ compiler)
alias icecc-monitor='icemon'
alias icecc-daemon='iceccd'
| true
|
29b6d7278ace55d708ca1fbb79a10f9b2f3d191a
|
Shell
|
temptemp3/sonarqube-docker-compose
|
/scripts/install.sh
|
UTF-8
| 940
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
## needs revisions
(
cd ~
wget https://sonarsource.bintray.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-3.0.3.778-linux.zip
sudo apt-get install unzip
unzip sonar-scanner-cli-3.0.3.778-linux.zip
rm -rvf sonar-scanner-cli-3.0.3.778-linux.zip
cat >> .bashrc << EOF
alias sonar-scanner='~/sonar-scanner-3.0.3.778-linux/bin/sonar-scanner'
_sonar-scanner() { { local project_key ; project_key="${1}" ; }
sonar-scanner \
-Dsonar.host.url=http://localhost:9000 \
-Dsonar.jdbc.url=jdbc:postgresql://localhost/sonar \
-Dsonar.projectKey=${project_key} \
-Dsonar.sources=.
}
_sonar-provision-git() {
_() {
git clone ${1} ${2}
(
cd $( basename ${1} .git )
_sonar-scanner $( basename ${1} .git )
)
}
case ${#} in
1|2) {
_ ${@}
} ;;
0) {
while [ ! ]
do
echo adsf
read input
_ ${input}
done
} ;;
*) {
true
} ;;
esac
}
EOF
)
| true
|
04edaa6257a4b7890497a19f428c47fea3240f0c
|
Shell
|
Haegin/zsh-magic-history
|
/magic-history.plugin.zsh
|
UTF-8
| 1,236
| 2.859375
| 3
|
[] |
no_license
|
# zshrc/80_history
#
# Set up command line history functions
#
# Copyright © 1994–2008 martin f. krafft <madduck@madduck.net>
# Released under the terms of the Artistic Licence 2.0
#
# Source repository: git://git.madduck.net/etc/zsh.git
#
# typeset -g on this one to make warn_create_global happy
typeset -g HISTFILE=$ZVARDIR/history-$HOST
[[ -e $ZVARDIR/history ]] && mv $ZVARDIR/history $HISTFILE
HISTSIZE=10000
SAVEHIST=$HISTSIZE
LISTMAX=1000
# append to the history as line are executed in the shell
setopt inc_append_history
# update the history from other shells as commands are run
setopt share_history
# treat ! specially like csh did
setopt bang_hist
# ignore all duplicates in the history (keeping the most recent)
setopt hist_ignore_all_dups
# save timestamp and duration with each event
setopt extended_history
# properly lock the file on write, if that option exists
setopt hist_fcntl_lock 2>/dev/null
# skip over non-contiguous duplicates when searching history
setopt hist_find_no_dups
# don't store commands starting with a space in the history file
setopt hist_ignore_space
# don't store history/fc -l invocations
setopt hist_no_store
# remove superfluous blanks from each command line
setopt hist_reduce_blanks
# vim:ft=zsh
| true
|
317d11a7b9675a7b105289ce9a671da6e5a22f2b
|
Shell
|
chef/chef-workstation-app
|
/assets/scripts/chef_workstation_app_launcher
|
UTF-8
| 3,617
| 3.859375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
# Copyright:: Copyright 2020 Chef Software, Inc.
# Author:: Salim Afiune <afiune@chef.io>
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -eo pipefail
PROGNAME=$(basename "$0")
PRODUCTNAME="Chef Workstation App"
SERVICENAME="io.chef.chef-workstation.app"
PLISTFILE="${SERVICENAME}.plist"
usage() {
cat <<DOC
Usage: ${PROGNAME} <subcommand>
Controls the ${PRODUCTNAME} launcher behavior for macOS systems.
Subcommands:
show Show launchd information about the ${PRODUCTNAME} service.
load Bootstraps the ${PRODUCTNAME} service.
remove Removes the ${PRODUCTNAME} service.
startup <state> Enable or disable the ${PRODUCTNAME} to start at boot. (default: enable)
DOC
}
usage_startup() {
cat <<DOC
Usage: ${PROGNAME} startup <enable|disable>
Enable or disable the ${PRODUCTNAME} to start at boot. (default: enable)
Arguments:
enable Enable the ${PRODUCTNAME} to start at boot.
disable Disable the ${PRODUCTNAME} to start at boot.
DOC
}
main()
{
if ! is_darwin; then
error_exit "Launcher is only available for macOS systems"
fi
if [ $# -eq 0 ]; then
usage
exit 1
fi
case "$1" in
"-h"|"--help")
usage
;;
"show")
launchctl_show
;;
"load")
launchctl_load
;;
"remove")
launchctl_remove
;;
"startup")
startup "${2:-enable}"
;;
*)
error_exit "invalid option '$1'.\\nTry '--help' for more information."
esac
}
error_exit()
{
echo "${PROGNAME}: ${1:-"Unknown Error"}" 1>&2
exit 1
}
is_darwin()
{
uname -v | grep "^Darwin" >/dev/null 2>&1
}
startup()
{
case "$1" in
"-h"|"--help")
usage_startup
;;
"disable")
rm -f "$HOME/Library/LaunchAgents/${PLISTFILE}"
;;
"enable")
if [ ! -d "$HOME/Library/LaunchAgents" ]; then
mkdir "$HOME/Library/LaunchAgents"
fi
cp "/Applications/Chef Workstation App.app/Contents/Resources/assets/${PLISTFILE}" "$HOME/Library/LaunchAgents/."
;;
*)
error_exit "invalid startup state '$1'.\\nValid states are 'enable' or 'disable'.\\nTry '--help' for more information."
esac
}
launchctl_load()
{
if [ ! -d "/Applications/Chef Workstation App.app/Contents" ]; then
error_exit "${PRODUCTNAME} not found in /Applications folder. The application needs to be installed first."
fi
launchctl_remove
# we let launchd to process the removal of the service
sleep 1
startup "enable"
( cd "$HOME/Library/LaunchAgents" || error_exit "unable to enter LaunchAgents directory"
launchctl load ${PLISTFILE}
)
}
launchctl_remove()
{
if launchctl list "$SERVICENAME" >/dev/null 2>&1; then
launchctl remove "$SERVICENAME"
fi
if [ -f "$HOME/Library/LaunchAgents/${PLISTFILE}" ]; then
rm -rf "$HOME/Library/LaunchAgents/${PLISTFILE}"
fi
}
launchctl_show()
{
if launchctl list "$SERVICENAME" >/dev/null 2>&1; then
launchctl list "$SERVICENAME"
else
error_exit "$PRODUCTNAME is not yet loaded.\\nTry using the subcommand 'load'."
fi
}
main "$@"
| true
|
2ff953b95c2e765b0e57803a957b297ac91f1408
|
Shell
|
yuchangyang/ywsh
|
/gfs01/put_scp.sh
|
UTF-8
| 604
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
src_path=/home/welend/benson/data/config
work_path=/home/welend/xiaoayang/xyfile
#work_path=/tmp/applied/xyfile
data_path=$work_path/data_files
tar_path=$work_path/tar_files
dataname=$1
scp $work_path/$dataname fs-node3:$work_path/
if [ $? -ne 0 ];then
echo "$logtime ->send data file to gfs3 failed!"
exit 1
else
echo "$logtime ->send data file to gfs3 OK!"
fi
#Send data file to gfs3 server
scp $work_path/$dataname fs-node5:$work_path/
if [ $? -ne 0 ];then
echo "$logtime ->send data file to gfs5 failed!"
exit 1
else
echo "$logtime ->get data file to gfs5 OK!"
fi
| true
|
ec562e5c87edee93bda120a6bf463b3b1b14698d
|
Shell
|
LIParadise/DSnP_hw5
|
/tests.sh
|
UTF-8
| 1,702
| 2.828125
| 3
|
[] |
no_license
|
if [ $# -ne 1 ]; then
echo "usage: ./tests.sh <a/b/d>"
exit
fi
if [ ${1} = a ]
then
echo "case array"
ref/adtTest.array -f tests/do1 >& log/ref1
ref/adtTest.array -f tests/do2 >& log/ref2
ref/adtTest.array -f tests/do3 >& log/ref3
ref/adtTest.array -f tests/do4 >& log/ref4
# ref/adtTest.array -f tests/do6 >& log/ref6
echo "ref done"
./adtTest.array -f tests/do1 >& log/my1
./adtTest.array -f tests/do2 >& log/my2
./adtTest.array -f tests/do3 >& log/my3
./adtTest.array -f tests/do4 >& log/my4
# ./adtTest.array -f tests/do6 >& log/my6
echo "mine done"
echo ""
elif [ ${1} = b ]
then
echo "case bst"
ref/adtTest.bst -f tests/do1 >& log/ref1
ref/adtTest.bst -f tests/do2 >& log/ref2
ref/adtTest.bst -f tests/do3 >& log/ref3
ref/adtTest.bst -f tests/do4 >& log/ref4
# ref/adtTest.bst -f tests/do6 >& log/ref6
echo "ref done"
./adtTest.bst -f tests/do1 >& log/my1
./adtTest.bst -f tests/do2 >& log/my2
./adtTest.bst -f tests/do3 >& log/my3
./adtTest.bst -f tests/do4 >& log/my4
# ./adtTest.bst -f tests/do6 >& log/my6
echo "mine done"
echo ""
elif [ ${1} = d ]
then
echo "case dlist"
ref/adtTest.dlist -f tests/do1 >& log/ref1
ref/adtTest.dlist -f tests/do2 >& log/ref2
ref/adtTest.dlist -f tests/do3 >& log/ref3
ref/adtTest.dlist -f tests/do4 >& log/ref4
# ref/adtTest.dlist -f tests/do6 >& log/ref6
echo "ref done"
./adtTest.dlist -f tests/do1 >& log/my1
./adtTest.dlist -f tests/do2 >& log/my2
./adtTest.dlist -f tests/do3 >& log/my3
./adtTest.dlist -f tests/do4 >& log/my4
# ./adtTest.dlist -f tests/do6 >& log/my6
echo "mine done"
echo ""
else
exit
fi
exit
| true
|
3907680d3fa7a9f5829283abaff065a4b8984b2c
|
Shell
|
pcifarelli/emailfetch
|
/docker/docker-build.sh
|
UTF-8
| 139
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
x=`type docker 2> /dev/null`
if [ "x$x" = "x" ]; then
echo "docker not installed"
else
docker build -t emailfetch .
fi
| true
|
c1d9c5dacd880b712c1b7babdd8463d97c049b15
|
Shell
|
levii/feedbackbox-py
|
/bin/scenario.sh
|
UTF-8
| 1,577
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
prompt()
{
local message=$1
echo "========================================"
echo "$message"
echo -n "Please enter: "
read -r
}
run()
{
echo "$ python main.py $*"
python main.py "$@"
echo
}
rm -f datafile.pickle
prompt "ユーザを作成する"
run user create --name "Customer 1" --role "customer" --user_id=1001
run user create --name "Customer 2" --role "customer" --user_id=1002
run user create --name "Test Support" --role "support" --user_id=9001
prompt "ユーザ一覧を確認"
run user list
prompt "顧客1, 2 それぞれから要望を作成する"
run feedback create \
--title "顧客1からの機能追加の要望 (その1)" \
--description "あれこれと便利な機能を追加してほしいです" \
--user_id 1001
run feedback create \
--title "顧客1からの機能追加の要望 (その2)" \
--description "あれこれと便利な機能を追加してほしいです" \
--user_id 1001
run feedback create \
--title "顧客2からの機能追加の要望" \
--description "あれこれと便利な機能を追加してほしいです" \
--user_id 1002
prompt "サポートユーザは、全ての要望を確認できる"
run feedback list --user_id 9001
prompt "顧客は、自分の作成した要望だけを確認できる"
run feedback list --user_id 1001
echo "サポートユーザは、要望に対してコメントを追加できる"
echo "$ python main.py feedback comment create --user_id 9001 --feedback_id <FEEDBACK_ID> --comment <コメント本文>"
| true
|
d9a209626a0efb642077f6a58136d831547c8446
|
Shell
|
fnproject/fn
|
/images/dind/preentry.sh
|
UTF-8
| 520
| 3.203125
| 3
|
[
"Apache-2.0",
"BSD-3-Clause",
"Unlicense",
"MPL-1.0",
"MPL-2.0",
"ISC",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/sh
set -euo pipefail
fsdriver=$(grep -Eh -w -m1 "overlay|aufs" /proc/filesystems | cut -f2)
if [ $fsdriver == "overlay" ]; then
fsdriver="overlay2"
fi
mtu=$(ip link show dev $(ip route |
awk '$1 == "default" { print $NF }') |
awk '{for (i = 1; i <= NF; i++) if ($i == "mtu") print $(i+1)}')
# activate job control, prevent docker process from receiving SIGINT
set -m
dockerd-entrypoint.sh --storage-driver=$fsdriver --mtu=$mtu &
# give docker a few seconds
sleep 3
exec "$@"
| true
|
5f6224e9d02b857cb03473af4e182e5916d19dfa
|
Shell
|
magos-linux/magos-linux
|
/make_rpms/modmnger/2_build.sh
|
UTF-8
| 405
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
rm -fr rpmbuild
mkdir -p rpmbuild/SOURCES rpmbuild/SPECS rpms srpms
[ -d ~/rpmbuild ] || ln -s "$PWD/rpmbuild" ~/rpmbuild
cp -p SOURCES/*.* rpmbuild/SOURCES
cp -p SPECS/* rpmbuild/SPECS
cd rpmbuild/SPECS
cd ../../
rpmbuild -bs rpmbuild/SPECS/*.spec || exit 1
rpmbuild -bb rpmbuild/SPECS/*.spec || exit 1
find | grep .rpm$ | while read a ;do mv $a rpms ;done
mv rpms/*.src.rpm srpms
echo Done.
| true
|
599fc5bba5cd696ef70647ae3d2f19c71779ee32
|
Shell
|
liuheng2cqupt/FATE
|
/contrib/fate_script/run_fateScript.sh
|
UTF-8
| 2,217
| 3.140625
| 3
|
[
"Apache-2.0"
] |
permissive
|
DIRNAME=$0
if [ "${DIRNAME:0:1}" = "/" ];then
CURDIR=`dirname $DIRNAME`
else
CURDIR="`pwd`"/"`dirname $DIRNAME`"
fi
echo $CURDIR
generator_path=$(cd "$(dirname "$0")";pwd)'/../../federatedml/util/transfer_variable_generator.py'
transfer_json_path=$(cd "$(dirname "$0")";pwd)'/conf/FateScriptTransferVar.json'
transfer_variable_path=$(cd "$(dirname "$0")";pwd)'/fate_script_transfer_variable.py'
python $generator_path $transfer_json_path $transfer_variable_path
echo 'Finish generate fate_script_transfer_variable.py'
party_1='H'
party_2='G'
party_3='A'
curtime=$(date +%Y%m%d%H%M%S)
jobid=("hetero_logistic_regression_example_standalone_"${curtime})
base_conf_path=$(cd "$(dirname "$0")";pwd)'/conf/'
runtime_conf_path_1=$base_conf_path"host_runtime_conf.json"
runtime_conf_path_2=$base_conf_path"guest_runtime_conf.json"
runtime_conf_path_3=$base_conf_path"arbiter_runtime_conf.json"
echo $party_1" runtime conf path:"$runtime_conf_path_1
echo $party_2" runtime conf path:"$runtime_conf_path_2
echo $party_3" runtime conf path:"$runtime_conf_path_3
cp $runtime_conf_path_1 $runtime_conf_path_1"_"$jobid
cp $runtime_conf_path_2 $runtime_conf_path_2"_"$jobid
cp $runtime_conf_path_3 $runtime_conf_path_3"_"$jobid
echo 'Generate jobid:'$jobid
echo "Start run fateScript.py for "$party_1
python fateScript.py $party_1 $jobid $runtime_conf_path_1"_"$jobid&
#python fateScript.py $party_1 $jobid "/data/projects/qijun/fate/python/examples/hetero_logistic_regression/conf/host_runtime_conf.json_hetero_logistic_regression_example_standalone_20190508194322"&
echo "Start run fateScript.py for "$party_2
python fateScript.py $party_2 $jobid $runtime_conf_path_2"_"$jobid&
#python fateScript.py $party_2 $jobid "/data/projects/qijun/fate/python/examples/hetero_logistic_regression/conf/guest_runtime_conf.json_hetero_logistic_regression_example_standalone_20190508194322"&
sleep 1
echo "Start run fateScript.py for "$party_3
python fateScript.py $party_3 $jobid $runtime_conf_path_3"_"$jobid
#python fateScript.py $party_3 $jobid "/data/projects/qijun/fate/python/examples/hetero_logistic_regression/conf/arbiter_runtime_conf.json_hetero_logistic_regression_example_standalone_20190508194322"
sleep 1
| true
|
4caef4f2c9fd1a9f59ddf63ee49f1de640ff2702
|
Shell
|
OpenWise/wiseup-rpi
|
/service/wiseup-comm
|
UTF-8
| 768
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/sh
# Starts and stops wiseup-comm
# /etc/init.d/wiseup-comm
### BEGIN INIT INFO
# Provides: wiseup-comm
# Required-Start: $syslog
# Required-Stop: $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: wiseup-comm
### END INIT INFO
case "$1" in
start)
echo "# [START] Wiseup COMM service ..."
cd /home/pi/workspace/wiseup-rpi/build/src
sudo screen -dmS wiseup-comm ./wiseup
;;
stop)
echo "# [STOP] Wiseup COMM service ..."
sudo screen -S wiseup-comm -X quit
;;
restart)
echo "# [RESTART] Wiseup COMM service ..."
$0 stop
$0 start
;;
*)
echo "Usage: $0 {start|stop|restart}"
exit 1
;;
esac
| true
|
4a9b3c43b011ae4df9ccd2abcedf678b95fcdf11
|
Shell
|
nuxeo/nuxeo.io-docker-etcdump
|
/etcdump.sh
|
UTF-8
| 1,102
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/sh -
/bin/write-awscli-conf.sh
NOW=`date +"%Y_%m_%d-%H_%M"`
DUMPER=/usr/lib/node_modules/etcdump/bin/etcdump
# dump etcd
echo "Dumping arken etcd"
nodejs $DUMPER --config /root/config-arken.json --file etcd_dump_arken_$NOW.json dump
if [ ! $? -eq 0 ]; then
echo "Error while dumping arken etcd."
else
echo "Successfuly dumped arken etcd DB."
fi
echo "Dumping fleet etcd"
nodejs $DUMPER --config /root/config-fleet.json --file etcd_dump_fleet_$NOW.json dump
if [ ! $? -eq 0 ]; then
echo "Error while dumping fleet etcd."
else
echo "Successfuly dumped fleet etcd DB."
fi
# push on s3
/usr/bin/aws s3api put-object --bucket $S3_BUCKET --key etcd_dump_arken_$NOW.json --body etcd_dump_arken_$NOW.json
if [ ! $? -eq 0 ]; then
echo "Error while pushing arken etcd dump on S3."
else
echo "Successfuly pushed etcd arken dump on S3."
fi
/usr/bin/aws s3api put-object --bucket $S3_BUCKET --key etcd_dump_fleet_$NOW.json --body etcd_dump_fleet_$NOW.json
if [ ! $? -eq 0 ]; then
echo "Error while pushing etcd fleet dump on S3."
else
echo "Successfuly pushed etcd fleet dump on S3."
fi
| true
|
d4fc9e3a9becb2be55be4eef6b49c7c9efe6ee94
|
Shell
|
PaulTegel/testnetzrepository
|
/testnetz-dns/base-image/bin/ubuntu.sh
|
UTF-8
| 507
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/sh
# Note: The handling of this script does effect in Ubuntu (container)
# environment only; it solves a specific problem for successfully
# running 'tcpdump' command!
# (problem: /usr/sbin/tcpdump: error while loading shared libraries:
# libcrypto.so.1.0.0: cannot open shared object file: Permission denied
/bin/uname -a |/bin/grep "Ubuntu"
if [ $? -eq 0 ]; then
/bin/mv /usr/sbin/tcpdump /usr/bin/tcpdump
/bin/ln -s /usr/bin/tcpdump /usr/sbin/tcpdump
fi
| true
|
c9b2eb13c95e42ff303cad1cfe14105567057363
|
Shell
|
hly9624/shell
|
/day2/elif.sh
|
UTF-8
| 227
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 1 ]
then
exit 1
fi
if ! [ -e $1 ]
then
echo "the $1 not exist"
fi
if [ -f $1 ]
then
echo "$1 是普通文件"
elif [ -d $1 ]
then
echo "$1 是一个目录"
else
echo "$1 是其他类型"
fi
| true
|
d2b75d36d85cf5a3fb97e00393d7f646b13f99f8
|
Shell
|
leoribg/wisun-backhaul-docker
|
/init-container.sh
|
UTF-8
| 11,886
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/sh
# vim: set sw=4 expandtab:
#
# Copyright 2021, Silicon Labs
# SPDX-License-Identifier: zlib
# Main authors:
# - Jérôme Pouiller <jerome.pouiller@silabs.com>
#
set +m
TUNSLIP6_PID=-1
UART=/dev/ttyACM0
die()
{
echo "$@" >&2
exit 1
}
print_usage()
{
cat << EOF >&2
Usage: $1 [OPTIONS] [MODE]
Setup the docker container to create a Wi-SUN Border Router.
Container options:
-d, --device=DEVICE UART device to use (default: /dev/ttyACM0).
-D, --dhcp Configure IPv4 using DHCP. Use it if you rely on a
network interface with macvlan driver.
-r, --advert-route Advertise the new route on eth0. Only works with the subnet
mode. Most of the hosts won't accept le route unless the
parameter accept_ra_rt_info_max_plen is at least the size
of the advertised prefix size. You may use it if the
router of your network is not able to manage the new route
itself.
-F, --flash=FW_PATH Flash radio board with FW_PATH.
-T, --chip-traces Show traces from the chip.
-s, --shell Launch a shell on startup.
-h, --help Show this help.
Wi-SUN options:
-n, --ws-network=NAME Set Wi-SUN network name.
-C, --ws-domain=CC Set Wi-SUN regulatory domain. Valid values: EU, NA, JP,
...). (experimental)
-m, --ws-mode=HEX Set operating mode. Valid values: 1a, 1b, 2a, 2b, 3, 4a,
4b and 5. (experimental)
-c, --ws-class=NUM Set operating class. Valid values: 1, 2 or 3.
(experimental)
These parameters are automatically saved in the radio board. If a parameter is
not specified, the radio board value is used.
Modes:
local The nodes will be only able to communicate with the docker
instance using a random site-local prefix.
site_local Advertise a random site-local prefix and run a proxy. Local
workstations will retrieve an IPv6 address allowing them to
communicate with Wi-SUN nodes.
proxy Re-use the local IPv6 prefix to configure Wi-SUN nodes.
subnet [PREFIX] Use PREFIX to configure Wi-SUN nodes. PREFIX should come from
configuration of the parent router. If PREFIX is not defined,
generate a random site-local one.
auto Detect if a local IPv6 network is available and launch
\`site_local' or \`proxy' accordingly.
Note that random site-local prefixes are not routable (ie. you can't access
outside with these).
Examples:
Provide minimal infrastructure to configure Wi-SUN device through a Border
Router connected on /dev/ttyUSB0:
$1 -d /dev/ttyUSB0 local
Parent router is correctly configured to delegate prefix
2a01:e35:2435:66a1::/64 to my docker container:
$1 subnet 2a01:e35:2435:66a1::1/64
You want to test prefix delegation with a random prefix:
other_host> sysctl net.ipv6.conf.eth0.accept_ra_rt_info_max_plen=128
this_host> $1 subnet -r
EOF
[ "$2" ] && exit $2
}
get_random_prefix()
{
N1=$(dd bs=1 count=1 if=/dev/urandom 2> /dev/null | od -A n -t x1)
N2=$(dd bs=2 count=1 if=/dev/urandom 2> /dev/null | od -A n -t x2)
N3=$(dd bs=2 count=1 if=/dev/urandom 2> /dev/null | od -A n -t x2)
N4=$(dd bs=2 count=1 if=/dev/urandom 2> /dev/null | od -A n -t x2)
echo $N1:$N2:$N3:$N4 | tr -d ' '
}
check_privilege()
{
ip link add dummy0 type dummy 2> /dev/null || \
die "Not enough privilege to run (missing --privileged?)"
ip link delete dummy0
}
launch_icmp_monitoring()
{
# Interresting packets:
# echo-request: 128
# echo-reply: 129
# router-solicitation: 133
# router-advertisement: 134
# neighbor-solicitation: 135
# neighbor-advertisement: 136
tshark -i tun0 "icmp6 && (ip6[40] == 135 || ip6[40] == 136)" &
}
launch_dhcpc()
{
umount /etc/resolv.conf
udhcpc -i eth0
}
launch_tunslip6()
{
HAS_ARG=$1
IPV6_IP=${1:-fd01::1/64}
[ -e "$UART" ] || die "Failed to detect $UART"
echo " ---> [1mLaunch tunslip6 on $UART[0m"
tunslip6 -s $UART -B 115200 $IPV6_IP &
TUNSLIP6_PID=$!
for i in $(seq 10); do
ip -6 addr show scope global | grep -q tun0 && break
sleep 0.2
done
if [ ! "$HAS_ARG" ]; then
# tunslip6 add these addresses but it is useless.
ip addr del dev tun0 fe80::1/64
ip addr del dev tun0 fd01::1/64
else
# tunslip6 add this address but it is useless
#ip addr del dev tun0 fe80::1/64
true
fi
}
launch_radvd()
{
IPV6_NET=$1
EXT_BEHAVIOR=$2
echo " ---> [1mLaunch radvd on $IPV6_NET[0m"
cat << EOF > /etc/radvd.conf
interface tun0 {
AdvSendAdvert on;
IgnoreIfMissing on;
prefix $IPV6_NET { };
};
EOF
case "$EXT_BEHAVIOR" in
adv_prefix)
cat << EOF >> /etc/radvd.conf
interface eth0 {
AdvSendAdvert on;
AdvDefaultLifetime 0;
prefix $IPV6_NET { };
};
EOF
;;
adv_route)
cat << EOF >> /etc/radvd.conf
interface eth0 {
AdvSendAdvert on;
AdvDefaultLifetime 0;
route $IPV6_NET {
AdvRouteLifetime 1800;
};
};
EOF
;;
"")
;;
*)
die "internal error: unknown options: $EXT_BEHAVIOR"
;;
esac
radvd --logmethod stderr
}
launch_ndppd()
{
IPV6_NET=$1
echo " ---> [1mLaunch ndppd on $IPV6_NET[0m"
cat << EOF > /etc/ndppd.conf
proxy eth0 {
autowire yes
rule $IPV6_NET {
iface tun0
}
}
proxy tun0 {
autowire yes
rule $IPV6_NET {
iface eth0
}
}
EOF
ndppd -d
}
launch_last_process()
{
echo " ---> [1mResult of 'ip -6 addr':[0m"
ip -6 addr
if [ "$LAUNCH_SHELL" ]; then
if tty > /dev/nul 2> /dev/null; then
set -m
else
echo "Cannot get tty (missing -t in docker command line?)"
fi
echo " ---> [1mLaunch sh[0m"
exec sh
elif [ "$LAUNCH_TRACES" ]; then
echo " ---> [1mLaunch wisun-device-traces[0m"
exec wisun-device-traces
else
wait $TUNSLIP6_PID
echo " ---> [1mWi-SUN border router has disappeared[0m"
fi
}
jlink_rtt_run()
{
echo "run \"$*\" on radio board"
echo "$*" | nc 127.0.0.1 1001
}
jlink_run()
{
echo "run \"$*\" on JLink probe"
echo "$*" | nc 127.0.0.1 1002 > /dev/null
}
launch_openocd()
{
IS_MANDATORY=$1
cat << 'EOF' > /tmp/openocd-rtt.cfg
rtt setup 0x20001c00 0x04000 "SEGGER RTT"
rtt server start 1001 0
telnet_port 1002
gdb_port 1003
tcl_port 1004
init
rtt start
EOF
echo " ---> [1mLaunch OpenOCD[0m"
openocd -d0 -f board/efm32.cfg -f /tmp/openocd-rtt.cfg &
OPENOCD_PID=$!
sleep 1
[ -d /proc/$OPENOCD_PID ] || die "Cannot connect to JLink probe"
if [ "$WS_FIRMWARE" ]; then
[ -e "$WS_FIRMWARE" ] || die "'$WS_FIRMWARE' not found (missing -v in docker command?)"
jlink_run "program $WS_FIRMWARE 0 reset"
fi
[ "$WS_DOMAIN" ] && jlink_rtt_run "wisun set wisun.regulatory_domain $WS_DOMAIN"
[ "$WS_CLASS" ] && jlink_rtt_run "wisun set wisun.operating_class $WS_CLASS"
[ "$WS_MODE" ] && jlink_rtt_run "wisun set wisun.operating_mode $WS_MODE"
[ "$WS_NETWORK" ] && jlink_rtt_run "wisun set wisun.network_name $WS_NETWORK"
jlink_rtt_run "wisun save"
sleep 0.5
jlink_run "reset run"
kill $OPENOCD_PID
}
run_proxy()
{
sysctl -q net.ipv6.conf.default.disable_ipv6=0
sysctl -q net.ipv6.conf.all.disable_ipv6=0
sysctl -q net.ipv6.conf.default.forwarding=1
sysctl -q net.ipv6.conf.all.forwarding=1
sysctl -q net.ipv6.conf.default.accept_ra=2
sysctl -q net.ipv6.conf.all.accept_ra=2
for i in $(seq 10); do
ip -6 addr show scope global | grep -q eth0 && break
sleep 0.2
done
IPV6_NET=$(rdisc6 -r 10 -w 400 -q -1 eth0)
[ "$IPV6_NET" ] || die "Failed to get IPv6 address"
launch_tunslip6 $IPV6_NET
launch_radvd $IPV6_NET
launch_ndppd $IPV6_NET
launch_icmp_monitoring
launch_last_process
}
run_site_local()
{
sysctl -q net.ipv6.conf.default.disable_ipv6=0
sysctl -q net.ipv6.conf.all.disable_ipv6=0
sysctl -q net.ipv6.conf.default.forwarding=1
sysctl -q net.ipv6.conf.all.forwarding=1
sysctl -q net.ipv6.conf.default.accept_ra=2
sysctl -q net.ipv6.conf.all.accept_ra=2
SITE_PREFIX=$(get_random_prefix)
launch_tunslip6 fd$SITE_PREFIX::1/64
launch_radvd fd$SITE_PREFIX::/64 adv_prefix
launch_ndppd fd$SITE_PREFIX::/64
launch_icmp_monitoring
launch_last_process
}
run_local()
{
sysctl -q net.ipv6.conf.default.disable_ipv6=0
sysctl -q net.ipv6.conf.all.disable_ipv6=0
sysctl -q net.ipv6.conf.default.accept_ra=2
sysctl -q net.ipv6.conf.all.accept_ra=2
SITE_PREFIX=$(get_random_prefix)
launch_tunslip6 fd$SITE_PREFIX::1/64
launch_radvd fd$SITE_PREFIX::/64
launch_icmp_monitoring
launch_last_process
}
run_subnet()
{
sysctl -q net.ipv6.conf.default.disable_ipv6=0
sysctl -q net.ipv6.conf.all.disable_ipv6=0
sysctl -q net.ipv6.conf.default.forwarding=1
sysctl -q net.ipv6.conf.all.forwarding=1
sysctl -q net.ipv6.conf.default.accept_ra=2
sysctl -q net.ipv6.conf.all.accept_ra=2
IPV6_NET=${1:-fd$(get_random_prefix)::/64}
launch_tunslip6
if [ "$ADVERT_ROUTE" ]; then
launch_radvd $IPV6_NET adv_route
else
launch_radvd $IPV6_NET
fi
launch_icmp_monitoring
launch_last_process
}
run_auto()
{
HAVE_IPV6=
for i in $(seq 20); do
ip -6 addr show scope global | grep -q eth0 && HAVE_IPV6=1 && break
sleep 0.2
done
if [ "$HAVE_IPV6" ]; then
echo " ---> [1mFound IPv6 network[0m"
run_proxy
else
echo " ---> [1mNo network found[0m"
run_site_local
fi
}
OPTS=$(getopt -l shell,chip-traces,dhcp,device:,advert-route,flash:,ws-network:,ws-domain:,ws-mode:,ws-class:,help -- sTDd:rF:n:C:m:c:h "$@") || exit 1
eval set -- "$OPTS"
while true; do
case "$1" in
-s|--shell)
LAUNCH_SHELL=1
shift 1
;;
-T|--chip-traces)
LAUNCH_TRACES=1
shift 1
;;
-D|--dhcp)
LAUNCH_DHCPC=1
shift 1
;;
-d|--device)
UART=$2
shift 2
;;
-r|--advert-route)
ADVERT_ROUTE=1
shift 2
;;
-F|--flash)
WS_FIRMWARE=$2
LAUNCH_OPENOCD=1
shift 2
;;
-n|--ws-network)
WS_NETWORK=$2
LAUNCH_OPENOCD=1
shift 2
;;
-C|--ws-domain)
WS_DOMAIN=$2
LAUNCH_OPENOCD=1
shift 2
;;
-m|--ws-mode)
WS_MODE=$2
LAUNCH_OPENOCD=1
shift 2
;;
-c|--ws-class)
WS_CLASS=$2
LAUNCH_OPENOCD=1
shift 2
;;
-h|--help)
print_usage $0
exit 0
;;
--)
shift
break
;;
esac
done
[ "$LAUNCH_SHELL" -a "$LAUNCH_TRACE" ] && die "--shell and --chip-traces are exclusive"
check_privilege
sysctl -q net.ipv6.conf.eth0.accept_ra=2
sysctl -q net.ipv6.conf.eth0.disable_ipv6=0
[ "$LAUNCH_DHCPC" ] && launch_dhcpc
[ "$LAUNCH_OPENOCD" ] && launch_openocd
case "$1" in
auto|"")
run_auto
;;
site_local)
run_site_local
;;
local)
run_local
;;
proxy)
run_proxy
;;
subnet)
run_subnet $2
;;
*)
print_usage $0
exit 1
esac
| true
|
2dcaa70302bdc08410e5b81314104b17028da927
|
Shell
|
ISISComputingGroup/EPICS-ActiveMQ
|
/procserv_stop.sh
|
UTF-8
| 764
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
SCRIPT=$(readlink -f ${BASH_SOURCE[0]})
SCRIPTPATH=`dirname "$SCRIPT"`
MYDIR=$SCRIPTPATH
# Ensure environment is set up
if [ -z "$EPICS_ROOT" ]; then
. $MYDIR/../../../config_env_base.sh
fi
# kill procservs that manage active MQ
PIDFILE="$EPICS_ROOT/EPICS_JMS.pid"
if [ -r "$PIDFILE" ]; then
CSPID=`cat "$PIDFILE"`
echo "Killing JMS server PID: $CSPID"
kill $CSPID
rm "$PIDFILE"
else
echo "JMS server is not running (or $PIDFILE not readable)"
fi
PIDFILE="$MYDIR/ActiveMQ/data/activemq-`hostname`.pid"
if [ -r "$PIDFILE" ]; then
CSPID=`cat "$PIDFILE"`
echo "Killing JMS ActiveMQ server PID: $CSPID"
kill $CSPID
rm "$PIDFILE"
else
echo "JMS ActiveMQ server is not running (or $PIDFILE not readable)"
fi
| true
|
53de2a845dab2b69929819757c44017a834497c6
|
Shell
|
ravisantoshgudimetla/console-operator
|
/dev_run_local.sh
|
UTF-8
| 2,484
| 3.234375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# we need to be system admin to install these
oc login -u system:admin
# this just deploys everything under /manifests,
# but tries to space them out a bit to avoid errors.
# in the end, it creates a custom resource to kick
# the operator into action
# necessary if doing dev locally on a < 4.0.0 cluster
CLUSTER_OPERATOR_CRD_FILE="./examples/crd-clusteroperator.yaml"
echo "creating ${CLUSTER_OPERATOR_CRD_FILE}"
oc create -f "${CLUSTER_OPERATOR_CRD_FILE}"
# examples/cr.yaml is not necessary as the operator will create
# an instance of a "console" by default.
# use it if customization is desired.
for FILE in `find ./manifests -name '00-*'`
do
echo "creating ${FILE}"
oc create -f $FILE
done
sleep 1
for FILE in `find ./manifests -name '01-*'`
do
echo "creating ${FILE}"
oc create -f $FILE
done
sleep 2
# use the openshift-console project, for
# - when we create the CR (namespace: should be defined in the resource anyway)
# - when we run the operator locally.
oc project 'openshift-console'
for FILE in `find ./manifests -name '02-*'`
do
echo "creating ${FILE}"
oc create -f $FILE
done
sleep 1
for FILE in `find ./manifests -name '03-*'`
do
echo "creating ${FILE}"
oc create -f $FILE
done
sleep 1
for FILE in `find ./manifests -name '04-*'`
do
echo "creating ${FILE}"
oc create -f $FILE
done
sleep 1
# at this point, we should no longer be system:admin
# oc login -u developer -p 12345
# ensure the latest binary has been built
make build
# Don't deploy the operator in `manifests`
# instead, we will instantiate the operator locally
#
#for FILE in `find ./manifests -name '05-*'`
#do
# echo "creating ${FILE}"
# oc create -f $FILE
#done
# temporaily add the binary to path so we can call it below
export PATH="$PATH:$HOME/gopaths/consoleoperator/src/github.com/openshift/console-operator/_output/local/bin/darwin/amd64"
#IMAGE=docker.io/openshift/origin-console:latest \
# console operator \
# --kubeconfig $HOME/.kube/config \
# --config examples/config.yaml \
# --v 4
echo "TODO: support --create-default-console again!"
# TODO: GET BACK TO THIS:
IMAGE=docker.io/openshift/origin-console:latest \
console operator \
--kubeconfig $HOME/.kube/config \
--config examples/config.yaml \
--create-default-console \
--v 4
# NOT creating the CR as the operator should create one automatically.
# echo "Creating the CR to activate the operator"
# oc create -f "./examples/cr.yaml"
| true
|
03dffd6a15d87276572e095beebc713351c8eaeb
|
Shell
|
amerinero/Scripts
|
/s3sync-lexon.sh
|
UTF-8
| 377
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
REPOSITORY=/data/webs/lexon/public_html/repository/documents
S3BUCKET=s3://lexon-pre-repository
LOGFILE=lexon-sync-aws.log
cd ${REPOSITORY}
DATADIRS=`find . -maxdepth 1 -type d -printf "%f \n" | grep -v "^\..$"`
for FOLDER in ${DATADIRS}
do
echo "${FOLDER} ---> ${S3BUCKET}/${FOLDER}"
aws s3 sync ${FOLDER} ${S3BUCKET}/${FOLDER} --delete --only-show-errors
done
| true
|
c7221f2b2e906a6de0df650849e38d84132e6924
|
Shell
|
LeilyR/snakemake_workflows
|
/workflows/scRNAseq/scRNAseq_QC_metrics.sh
|
UTF-8
| 2,702
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
dir_in=$1
dir_out=$2
dir_in=$(readlink -m $dir_in)
mkdir -p $dir_out/data
#for i in $dir_in/*cout{b,c}.csv; do
# out=$(echo $i | sed 's/.*\///');
# out2=$(echo $out | sed 's/\.csv$//');
# echo $out 1>&2;
# cat $i | awk -v file=$out '{if (NR==1) {next;}; for (i=2;i<=NF;i++) COUNTS[i-1]+=$i;} \
# END{for (i=1;i<=192;i++){if (i<=96) sum1+=COUNTS[i];else sum2+=COUNTS[i];} if (sum1>sum2) offset=1; else offset=97; \
# for (i=offset;i<offset+96;i++) {OFS="\t";print file,i,COUNTS[i];sum+=COUNTS[i];}}' >$dir_out/data/$out.cellsum;
#done
if test -z "$(find $dir_in/ -maxdepth 1 -name '*.featureCounts_summary.txt')"; then
for i in $dir_in/*.cout{b,c}.csv; do
type=$(echo $i | sed 's/.*\///' | sed 's/.*\.\(cout.\)\.csv$/\1/'); ## type = "coutc" or "coutb"
sample=$(echo $i | sed 's/.*\///' | sed 's/\.cout.\.csv$//'); ## sample name without ending
echo $sample 1>&2;
cat $i | awk -v sample=$sample -v type=$type '{
if (NR==1) {cells = NF-1; next;};
for (i=2;i<=NF;i++) COUNTS[i-1]+=$i;
}
END{
#match(sample,"([^[:space:]\\.]+)\\.([^[:space:]\\.]+).csv",name)
if (type~"coutc") print "sample\tcell_idx\tREADS_UNIQFEAT";
else print "sample\tcell_idx\tUMI";
for (i=1;i<=cells;i++) {
OFS="\t";print sample,i,COUNTS[i];
}
}' > $dir_out/data/$sample.$type.cellsum;
done
for i in $dir_out/data/*.coutc.cellsum; do
coutb=$(echo $i | sed 's/\.coutc\.cellsum$/\.coutb\.cellsum/')
sample=$(echo $i | sed 's/.*\///' | sed 's/\.cout.\.cellsum$//'); ## sample name without ending
paste $i $coutb | cut -f1-3,6 > $dir_out/data/$sample.cellsum;
done
rm $dir_out/data/*.cout{b,c}.cellsum;
else
for i in $dir_in/*.featureCounts_summary.txt; do
out=$(echo $i | sed 's/.*\///' | sed 's/\.featureCounts_summary.txt//');
cat $i | sed -n -e '/sample.cell_idx.READS/,/#LIB/{{/#LIB/d;p}}' > $dir_out/data/$out.cellsum;
cat $i | grep "^#LIB" | tr -d "#" | sed -e 's/^/'$out'\t/' > $dir_out/data/$out.libsum;
done
fi
## with header!
#paste <(echo -e "sample\tcell\tcell_reads"; cat $dir_out/data/*coutc*.cellsum | sort -k1,1 -k2,2n -V ) <(echo -e "sample2\tcell2\tcell_transcripts"; cat $dir_out/data/*coutb*.cellsum | sort -k1,1 -k2,2n -V) > $dir_out/all_samples.cellsum_coutc_coutb.tsv
## summary per sample/lib
#cat $dir_out/all_samples.cellsum_coutc_coutb.tsv | awk '{if (NR<=2) last=$1; \
# if ($1==last) {sum1+=$3; sum2+=$6;} else {print last,sum1,sum2,sum2/sum1,sum1/96,sum2/96; sum1=0; sum2=0}; last=$1} \
# END{print last,sum1,sum2,sum2/sum1,sum1/96,sum2/96;}'
#> $dir/Results/QC_report.all_samples.tsv
#cat $dir/Results/QC_report.all_samples.tsv | column -t > $dir/Results/QC_report.all_samples.txt
| true
|
fc6d0632038dda1ffe58bff33553cea6efa4cef1
|
Shell
|
La-Lojban/lojban-spell-check
|
/createOpera.sh
|
UTF-8
| 580
| 3.46875
| 3
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
#
# Creates a LibreOffice/OpenOffice extension (spell-check only) Lojban.
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" # Script dir
source "$DIR/config.sh" || exit 4
# --------------------------------------------------------------------------------
dir="opera"
# Copy affix file, and generated dictionary
cp "$curLang.dic" "$dir/"
cp "$curLang.aff" "$dir/"
# Update/copy license & description
cp "LICENSE" "$dir/license.txt"
cp "libreoffice-oxt/package-description.txt" "$dir/README_$curLang.txt"
# Zip it
cd "$dir"
zip -rq9 "../$curLang.zip" *
cd -
| true
|
d82b742ae7ed55eb445a69e72c17549f326cc248
|
Shell
|
jay-ucla/jvmmethodinvocations
|
/latency-sample-projects/run.sh
|
UTF-8
| 545
| 3.171875
| 3
|
[] |
no_license
|
run_headless(){
echo Starting $1 $2
cd $1
if [ $2 == "_" ]; then
export AGENTPATH=""
else
export AGENTPATH=-agentpath:/home/jay/openjdktest/test/jvmti-builds/$2
fi
echo $AGENTPATH
mvn -fn test > test_output_$2.log
TIME=$(cat test_output_$2.log | awk '/Total time/' | awk -F': ' '{print $2}')
echo Finished $1 with ${TIME}
printf "$TIME," >> time
cd ..
}
ls -d */ | while read line
do
run_headless ${line} "_"
cat libs | while read lib
do
run_headless ${line} ${lib}
done
cd ${line}
printf "\n" >> time
cd ..
done
| true
|
2e3f9eccae11dd6addfc0259157e30b63dbd48d6
|
Shell
|
thohal/openqrm
|
/trunk/src/plugins/equallogic-storage/include/openqrm-plugin-equallogic-storage-functions
|
UTF-8
| 2,120
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
# this is the functions file for the equallogic-storage-plugin
#
# This file is part of openQRM.
#
# openQRM is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2
# as published by the Free Software Foundation.
#
# openQRM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with openQRM. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2009, Matthias Rechenburg <matt@openqrm.com>
#
# some definitions
DEFAULT_IMAGE_SIZE=5000
EQUALLOGIC_STORAGE_MGMT_LOCK_FILE=/tmp/equallogic-storage-plugin.lock
EQUALLOGIC_STORAGE_MGMT_LOCK_DELAY=5
EQUALLOGIC_STORAGE_MGMT_LOCK_RETRY=100
EQUALLOGIC_STORAGE_MGMT_LOCK_TIMEOUT=500
IMAGE_LUN=1
################ common equallogic-storage functions
# logs for equallogic_storage_storage
function equallogic_storage_storage_log() {
local COMPONENT=$1
shift
local MESSAGE=$@
logger -i -t "equallogic-storage plug-in" "$COMPONENT : $MESSAGE"
}
# locking function
function equallogic_storage_lock() {
local COMMAND=$1
local RET=1
if [ ! -d `dirname $EQUALLOGIC_STORAGE_MGMT_LOCK_FILE` ]; then
mkdir -p `dirname $EQUALLOGIC_STORAGE_MGMT_LOCK_FILE`
fi
case "$COMMAND" in
aquire)
lockfile -s $EQUALLOGIC_STORAGE_MGMT_LOCK_DELAY -r $EQUALLOGIC_STORAGE_MGMT_LOCK_RETRY -l $EQUALLOGIC_STORAGE_MGMT_LOCK_TIMEOUT $EQUALLOGIC_STORAGE_MGMT_LOCK_FILE
RET=$?
equallogic_storage_storage_log equallogic_storage_storage_lock "Aquired the lock"
return $RET
;;
release)
/bin/rm -f $EQUALLOGIC_STORAGE_MGMT_LOCK_FILE
RET=$?
equallogic_storage_storage_log equallogic_storage_storage_lock "Released the lock"
return $RET
;;
*)
equallogic_storage_storage_log equallogic_storage_storage_lock "No such command. Try 'aquire' or 'release'"
return 1
;;
esac
}
################ equallogic-storage functions
| true
|
a236d07b4fcb161795b027bfe7c4217d8adaf519
|
Shell
|
ayj/mixer
|
/bin/linters.sh
|
UTF-8
| 1,718
| 3.015625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Runs all requisite linters over the whole mixer code base.
set -e
prep_linters() {
echo Preparing linters
go get -u github.com/alecthomas/gometalinter
go get -u github.com/bazelbuild/buildifier/buildifier
go get -u github.com/3rf/codecoroner
gometalinter --install >/dev/null
bin/bazel_to_go.py
}
run_linters() {
echo Running linters
buildifier -showlog -mode=check $(find . -name BUILD -type f)
# TODO: Enable this once more of mixer is connected and we don't
# have dead code on purpose
# codecoroner funcs ./...
# codecoroner idents ./...
gometalinter --deadline=300s --disable-all\
--enable=aligncheck\
--enable=deadcode\
--enable=errcheck\
--enable=gas\
--enable=goconst\
--enable=gofmt\
--enable=goimports\
--enable=golint --min-confidence=0 --exclude=.pb.go --exclude="should have a package comment" --exclude="adapter.AdapterConfig"\
--enable=gosimple\
--enable=ineffassign\
--enable=interfacer\
--enable=lll --line-length=160\
--enable=misspell\
--enable=staticcheck\
--enable=structcheck\
--enable=unconvert\
--enable=unused\
--enable=varcheck\
--enable=vetshadow\
./...
# TODO: These generate warnings which we should fix, and then should enable the linters
# --enable=dupl\
# --enable=gocyclo\
#
# This doesn't work with our source tree for some reason, it can't find vendored imports
# --enable=gotype\
}
set -e
SCRIPTPATH=$( cd "$(dirname "$0")" ; pwd -P )
ROOTDIR=$SCRIPTPATH/..
cd $ROOTDIR
prep_linters
run_linters
echo Done running linters
| true
|
eb78b5e5a5124b8aa7c134dcd6c7032779218d77
|
Shell
|
tomkulish/docker-elasticsearch
|
/run.sh
|
UTF-8
| 1,695
| 3.875
| 4
|
[] |
no_license
|
#!/bin/sh
BASE=/elasticsearch
run_as_other_user_if_needed() {
if [[ "$(id -u)" == "0" ]]; then
# If running as root, drop to specified UID and run command
exec chroot --userspec=1000 / "${@}"
else
# Either we are running in Openshift with random uid and are a member of the root group
# or with a custom --user
exec "${@}"
fi
}
# allow for memlock
ulimit -l unlimited
sysctl -w vm.max_map_count=262144
# Set a random node name if not set.
if [ -z "${NODE_NAME}" ]; then
NODE_NAME=$(uuidgen)
fi
export NODE_NAME=${NODE_NAME}
# Prevent "Text file busy" errors
sync
if [ ! -z "${ES_PLUGINS_INSTALL}" ]; then
OLDIFS=$IFS
IFS=','
for plugin in ${ES_PLUGINS_INSTALL}; do
if ! $BASE/bin/elasticsearch-plugin list | grep -qs ${plugin}; then
yes | $BASE/bin/elasticsearch-plugin install --batch ${plugin}
fi
done
IFS=$OLDIFS
fi
if [ ! -z "${SHARD_ALLOCATION_AWARENESS_ATTR}" ]; then
# this will map to a file like /etc/hostname => /dockerhostname so reading that file will get the
# container hostname
if [ "$NODE_DATA" == "true" ]; then
ES_SHARD_ATTR=`cat ${SHARD_ALLOCATION_AWARENESS_ATTR}`
NODE_NAME="${ES_SHARD_ATTR}-${NODE_NAME}"
echo "node.attr.${SHARD_ALLOCATION_AWARENESS}: ${ES_SHARD_ATTR}" >> $BASE/config/elasticsearch.yml
fi
if [ "$NODE_MASTER" == "true" ]; then
echo "cluster.routing.allocation.awareness.attributes: ${SHARD_ALLOCATION_AWARENESS}" >> $BASE/config/elasticsearch.yml
fi
fi
# run
chown -R elasticsearch:elasticsearch $BASE
chown -R elasticsearch:elasticsearch /data
run_as_other_user_if_needed $BASE/bin/elasticsearch -v
| true
|
822e69d2c5ee0e79fce0eab01ad986537f6fc297
|
Shell
|
darrinwillis/devenv
|
/bash_aliases
|
UTF-8
| 755
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
loop ()
{
if [ "$*" = "" ]; then
echo "Usage:\n\tloop <args>\n" 1>&2;
return 1;
fi;
count=1;
while true; do
echo "`date`: Running iteration $count: $*" 1>&2;
"$@";
if [ "$?" != "0" ]; then
echo "`date`: $* failed at iteration $count" 1>&2;
break;
else
echo "`date`: $* passed at iteration $count" 1>&2;
fi;
count=$(( $count + 1 ));
done
}
alias v="cd $HOME/dev/rust/vrproto/"
alias c_w='cargo watch -s "cargo check --color always 2>&1 | head -63" -c -q'
alias c_t='cargo watch -s "cargo test --color always 2>&1| head -63" -c -q'
alias t="cd $HOME/dev/rust/threes"
alias s="cd $HOME/dev/space_survivors"
alias r="(s;./wsl-launch.sh)"
| true
|
c0bef43a72c404a04f74fd20991681e19bbcdab7
|
Shell
|
DnZmfr/spinnaker-canary-demo
|
/scripts/prepare.sh
|
UTF-8
| 2,679
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
bold() {
echo ". $(tput bold)" "$*" "$(tput sgr0)";
}
if [ $USER != "root" ]; then
bold "This script should be executed as root."
exit 1
fi
if [ ! -f /usr/local/bin/kubectl ]; then
bold "Install kubectl cli..."
curl -o /usr/local/bin/kubectl -sLO https://amazon-eks.s3.us-west-2.amazonaws.com/1.19.6/2021-01-05/bin/linux/amd64/kubectl
chmod +x /usr/local/bin/kubectl
else
bold "kubectl cli already installed."
fi
if [ ! -f /usr/local/bin/k9s ]; then
bold "Install k9s cli..."
curl -sLO https://github.com/derailed/k9s/releases/latest/download/k9s_$(uname -s)_$(uname -m).tar.gz
tar xf k9s_$(uname -s)_$(uname -m).tar.gz k9s
rm -f k9s_$(uname -s)_$(uname -m).tar.gz
mv k9s /usr/local/bin/
else
bold "k9s cli already installed."
fi
if [ ! -f /usr/local/bin/aws-iam-authenticator ]; then
bold "Install aws-iam-authenticator cli..."
curl -o /usr/local/bin/aws-iam-authenticator -sLO https://amazon-eks.s3.us-west-2.amazonaws.com/1.19.6/2021-01-05/bin/linux/amd64/aws-iam-authenticator
chmod +x /usr/local/bin/aws-iam-authenticator
else
bold "aws-iam-authenticator cli already installed."
fi
bold "Install awscli..."
yum install -y python3-pip
pip3 install awscli --upgrade --user
if [ ! -f /usr/local/bin/eksctl ]; then
bold "Install eksctl cli..."
curl -sL "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp
mv /tmp/eksctl /usr/local/bin/
chmod +x /usr/local/bin/eksctl
rm -f eksctl_$(uname -s)_amd64.tar.gz
else
bold "eksctl cli already installed."
fi
bold "Install java..."
yum install -y java-11-amazon-corretto.x86_64
if [ ! -f /usr/local/bin/hal ]; then
bold "Install Halyard..."
curl -sO https://raw.githubusercontent.com/spinnaker/halyard/master/install/debian/InstallHalyard.sh
bash InstallHalyard.sh -y --user ec2-user
rm -f InstallHalyard.sh
else
bold "Halyard cli already installed."
fi
if [ ! -f /usr/local/bin/spin ]; then
bold "Install spin cli..."
curl -o /usr/local/bin/spin -sLO https://storage.googleapis.com/spinnaker-artifacts/spin/$(curl -s https://storage.googleapis.com/spinnaker-artifacts/spin/latest)/linux/amd64/spin
chmod +x /usr/local/bin/spin
else
bold "spin cli already installed."
fi
if [ ! -f /usr/bin/docker ]; then
bold "Install docker..."
yum install -y docker
systemctl enable docker
systemctl start docker
else
bold "docker already installed."
fi
if [ ! -f /usr/local/bin/helm ]; then
bold "Install helm cli and add some repos..."
curl -sSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
else
bold "helm cli already installed."
fi
| true
|
8af26f62e384134585dcf5ae17ec68e2a53b2011
|
Shell
|
teju85/programming
|
/euler/status.sh
|
UTF-8
| 223
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
# COMMENTS:
# Utility script to check the current status
#
# USAGE:
# ./status.sh
#
solved=`ls solved/*.cpp | wc -l`
unsolved=`ls *.cpp | wc -l`
echo "NUM SOLVED: $solved"
echo "NUM UNSOLVED: $unsolved"
| true
|
23f44c43784a12c735a3883e3ae77111d71d83a0
|
Shell
|
impact48/PurpleFriday
|
/remote_deploy.sh
|
UTF-8
| 2,915
| 4.21875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#################################################
# deploy.sh
# ---------
#
# Deployment script on host for PurpleFriday software.
#
#################################################
function error_handler()
{
local l_save_command=${l_save_command:-$BASH_COMMAND}
local l_save_line=${l_save_line:-$1}
local l_save_source=${l_save_source:-${BASH_SOURCE[1]}}
echo ""
echo "==================================="
echo "An error has occurred... Exiting..."
echo ""
echo "[Command: ${l_save_command} Function: ${FUNCNAME[1]} Line: ${l_save_line} in ${l_save_source}]"
echo ""
echo "---------- Stack Trace ------------"
for IDX in ${!FUNCNAME[*]} ; do
echo "[$IDX] Function: ${FUNCNAME[$IDX]} in ${BASH_SOURCE[$IDX]}"
done
echo "==================================="
exit $1
}
function install_docker()
{
l_app="docker"
l_app_path="$(which ${l_app})" || true
if [[ -n ${l_app_path// } ]] ; then
echo "${l_app} already installed..."
return
else
echo "${l_app} not installed..."
fi
yes | apt-get update
yes | apt install docker.io
docker --version
}
function install_docker_compose()
{
local l_app="docker-compose"
local l_app_path="$(which ${l_app})" || true
if [[ -n ${l_app_path// } ]] ; then
echo "${l_app} already installed..."
return
fi
yes | apt purge docker-compose
yes | curl -L https://github.com/docker/compose/releases/download/1.24.1/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
ln -sf /usr/local/bin/docker-compose /usr/bin/docker-compose
docker-compose --version
}
function set_up_firewall()
{
local l_app="ufw"
local l_app_path="$(which ${l_app})" || true
echo "${l_app} path [${l_app_path}]"
if [[ -n ${l_app_path// } ]] ; then
echo "${l_app} already installed..."
return
fi
yes | apt install ufw
yes | apt autoremove
yes | ufw allow ssh
yes | ufw allow http
yes | ufw allow https
yes | apt install fail2ban
}
function take_action()
{
# Ensure firewall is installed
set_up_firewall
# Ensure docker is installed
install_docker
install_docker_compose
# Create required directories (if they don't already exist)
if [[ ! -d "./_datastore" ]] ; then mkdir _datastore ; fi
if [[ ! -d "./logs" ]] ; then mkdir logs ; fi
if [[ ! -d "./app" ]] ; then mkdir app ; fi
# Load the PurpleFriday tar files into Docker.
docker load -i purplefriday_wa.tar
docker load -i purplefridaytweetlistener.tar
}
function main()
{
set -e -o errtrace # Set up error trapping
trap 'error_handler $LINENO' ERR
set -u # Trigger a bad state if any env variables are undefined
take_action
}
main "$@"
| true
|
e89ce841c5407998d5119cd48e41383fad4672b5
|
Shell
|
Vinotha16/WIN_ROLLBACK
|
/templates/linux_actualfacts/ubuntu18.04/fat_1118_actual.fact
|
UTF-8
| 365
| 3.09375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
cmd=$(cat /etc/modprobe.d/vfat.conf | grep "install vfat /bin/true" | paste -sd "," -)
if [ $(sudo modprobe --showconfig 2>/dev/null | grep vfat | grep "install vfat /bin/true" | wc -l) -eq 0 ] || [ $(sudo lsmod | grep vfat | wc -l) -ne 0 ]; then
echo "{ \"fat_1118_actual\" : \"\" }"
else
echo "{ \"fat_1118_actual\" : \"${cmd}\" }"
exit 1
fi
| true
|
c98de10324c2609afa3d80780e20dc2e65b3ef82
|
Shell
|
tmtxt/truongtx-ansible
|
/minecraft.sh
|
UTF-8
| 302
| 2.921875
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
# $1: list of tags separated by comma
function run_playbook {
if [[ -n "$1" ]]; then
ansible-playbook -i hosts --ask-vault-pass --tags $1 minecraft.yml
else
ansible-playbook -i hosts --ask-vault-pass minecraft.yml
fi
}
# trigger tag run
run_playbook run
| true
|
f69a2c9c6000f7dfb6f0512041084f58c8f3bf24
|
Shell
|
ludar/make-nginx
|
/bin/sources.sh
|
UTF-8
| 1,127
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
# Fetch nginx+modules sources based on etc/versions
APP_PATH=$(dirname "$0" | xargs -I{} readlink -m {}/..)
. $APP_PATH/share/lib.sh
function usage() {
bye Usage: $(basename "$0") sources_dir
}
[[ -n "$1" ]] || usage
# Important here to ensure all read/write in the set of tools is done by the same user
root_only
sources_dir=$(readlink -m "$1")
# Check for dir existence
[[ -d "$sources_dir" ]] || bye_no the dir doesnt exist
q cd "$sources_dir" || bye_no cant chdir to the dir
. $APP_PATH/etc/versions.sh
ng_name=nginx-$ng_version
# Get nginx sources
download http://nginx.org/download/$ng_name.tar.gz $ng_name.tar.gz || exit
tar xf $ng_name.tar.gz || exit
mkdir -p $ng_name/extra || exit
for module in ${!ng_modules[@]}; do
version=${ng_modules["$module"]}
name=$module-$version
source=${ng_sources["$module"]}
download ${source/\{\}/$version} $name.tar.gz || exit
tar xf $name.tar.gz || exit
# Overwrite existing symlink if any
ln -sfn ../../$name $ng_name/extra/$module || exit
done
echo nginx: $ng_version
for module in ${!ng_modules[@]}; do
echo $module: ${ng_modules["$module"]}
done
| true
|
7feb3975692a694c7d28ccc534ff9cdc5b5623a8
|
Shell
|
marcellinamichie291/misc
|
/util/adjust.sh
|
UTF-8
| 196
| 3.203125
| 3
|
[] |
no_license
|
#/bin/bash
FOLDER=$1;
srt=(`ls $FOLDER*.srt`);
avi=(`ls $FOLDER*.avi`);
i=0;
for video in ${avi[@]}; do
mv ${srt[$i]} `echo ${avi[$i]}|sed -e 's/avi/pt-br.srt/g'` -v ;
let "i++";
done
| true
|
8786f27cfa9e553fe6319737a98338042d3a644a
|
Shell
|
hiromzn/pgtools
|
/runarchivebackup.sh
|
UTF-8
| 906
| 3.421875
| 3
|
[] |
no_license
|
MYNAME="$0"
MYDIR=`dirname $0`
. $MYDIR/postgre.env
DATE_STR="`date +%Y-%m%d-%H%M-%S`"
BACKUP_MARKFILE_BASE="BACKUP_MARK_FILE";
BACKUP_MARKFILE=$ARCHIVE_DIR/${BACKUP_MARKFILE_BASE}-$DATE_STR
echo "execute archive backup : mark_file is $BACKUP_MARKFILE"
touch $BACKUP_MARKFILE
if [ "$?" -ne 0 ]; then
echo "ERROR: can't touch mark_file : $BACKUP_MARKFILE"
echo " check this file or directory permission"
exit 1;
fi
MARKFILE_NUM="`ls -1 $ARCHIVE_DIR/${BACKUP_MARKFILE_BASE}* |wc -l`"
if [ "$MARKFILE_NUM" -le 1 ]; then
echo "FIRST run : touch -t 190001010101 $ARCHIVE_DIR/${BACKUP_MARKFILE_BASE}-OLDEST"
touch -t 190001010101 $ARCHIVE_DIR/${BACKUP_MARKFILE_BASE}-OLDEST
fi
LAST_MARKFILE="`ls -t $ARCHIVE_DIR/${BACKUP_MARKFILE_BASE}* |head -1`"
PREV_MARKFILE="`ls -t $ARCHIVE_DIR/${BACKUP_MARKFILE_BASE}* |head -2 |tail -1`"
echo "last:$LAST_MARKFILE"
echo "prev:$PREV_MARKFILE"
exit 0;
| true
|
c3cc58ff5e3067a49d89053052753fd16e3a0d7d
|
Shell
|
Ajedi32/bash_settings
|
/nx.sh
|
UTF-8
| 125
| 2.984375
| 3
|
[] |
no_license
|
# Execute a Node.js bin within the current project
function nx() {
local bin=$1;
shift;
./node_modules/.bin/$bin $@;
}
| true
|
0187dd9c941a453490b1a001050d11d85e19d72e
|
Shell
|
alvinkrebs/scripts
|
/TryReadWrite.q
|
UTF-8
| 1,912
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/sh
##
## set -x
## 96.90.247.82: {"owner_token":"913d503d-6b0e-48ea-49f6-13d7f8a0359f","writer_token":"f3477060-8fd2-4aba-5a3a-8ed7d9833b30"}
## 96.90.247.83: {"owner_token":"f8315a85-a541-4acc-6898-ae44c821e553","writer_token":"9e1cfe57-e749-4e7a-54b8-f424eada56d8"}
## 192.168.1.73: {"owner_token":"dfa7bac3-891e-441d-7de4-a160218eb6dc","writer_token":"31c961ef-1494-42c4-774e-09a94db3420e"}
port=8089
ttl=5
message="$(date) :: hello barracuda firewall and load balancer I am $(hostname) pleased to meet you."
data=$(echo ${message} | base64)
home() {
## sanity ...
owner_token=dfa7bac3-891e-441d-7de4-a160218eb6dc
writer_token=31c961ef-1494-42c4-774e-09a94db3420e
server=192.168.1.73
}
the83() {
## sanity ...
owner_token=f8315a85-a541-4acc-6898-ae44c821e553
writer_token=9e1cfe57-e749-4e7a-54b8-f424eada56d8
server=96.90.247.83
}
the82() {
server=96.90.247.82
owner_token=913d503d-6b0e-48ea-49f6-13d7f8a0359f
writer_token=f3477060-8fd2-4aba-5a3a-8ed7d9833b30
}
apricot() {
server=96.90.247.82
owner_token=efb45c75-d9dd-4a85-5157-9ccbdabf6604
writer_token=a989c229-4053-4363-7894-1fbed508d761
}
plum() {
server=96.90.247.82
owner_token=c9d8a096-197e-44eb-6b6f-4c950a2bcb11
writer_token=c44837e8-cdda-4aa6-62df-6f1f0927529e
}
decmp=-d
echo ${data} | base64 ${decmp}
cnt=1
while [ 1 = 1 ]; do
fruit=$(expr ${cnt} % 2)
if [ ${fruit} -eq 0 ]; then
plum
else
apricot
fi
cnt=$(expr ${cnt} + 1)
date
msgid=$(echo '{"cmd":"write-dd","ttl":'${ttl}',"writer_token":"'${writer_token}'","data":"'${data}'"}' | nc ${server} 8089)
stat=$?
if [ -z ${msgid} ]; then
if [ ${stat} -ne 0 ]; then
echo write status ${stat}
fi
else
echo got one ${msgid} ...
if [ ${stat} -ne 0 ]; then
echo write status ${stat}
fi
fi
message_back=$(echo '{"cmd":"read-entry","owner_token":"'${owner_token}'"}' | nc ${server} 8089)
if [ -z ${message_back} ]; then
if [ $? -ne 0 ]; then
echo read status $?
fi
else
echo returns: ${message_back}
fi
message="$(date) :: hello barracuda firewall and load balancer I am $(hostname) pleased to meet you."
data=$(echo ${message} | base64)
sleep 1
done
| true
|
676b06a9be5e67ded924a04f4ccb6e2bc7c72cba
|
Shell
|
haojinIntel/OAP
|
/oap-mllib/mllib-dal/build.sh
|
UTF-8
| 555
| 3.375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Check envs for building
if [[ -z $JAVA_HOME ]]; then
echo $JAVA_HOME not defined!
exit 1
fi
if [[ -z $DAALROOT ]]; then
echo DAALROOT not defined!
exit 1
fi
if [[ -z $TBBROOT ]]; then
echo TBBROOT not defined!
exit 1
fi
if [[ -z $CCL_ROOT ]]; then
echo CCL_ROOT not defined!
exit 1
fi
echo === Building Environments ===
echo JAVA_HOME=$JAVA_HOME
echo DAALROOT=$DAALROOT
echo TBBROOT=$TBBROOT
echo CCL_ROOT=$CCL_ROOT
echo GCC Version: $(gcc -dumpversion)
echo =============================
mvn -DskipTests clean package
| true
|
dd41c0826362e75dfaee98b5f5bc284fa524102a
|
Shell
|
andrewchilds/overcast
|
/scripts/install/imagemagick
|
UTF-8
| 274
| 3.046875
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Install ImageMagick.
# Tested on:
# Ubuntu 14.04
# Usage:
# overcast run myInstanceOrCluster install/imagemagick
# set -x
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root." 1>&2
exit 1
fi
aptitude -y install imagemagick
exit 0
| true
|
af66d3ecef0f5f2184f1e283e0e579c789702aee
|
Shell
|
simple-framework/simple_grid_puppet_module
|
/templates/retry_command_wrapper.sh.epp
|
UTF-8
| 2,271
| 4.3125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
for i in "$@"
do
case $i in
--command=*)
COMMAND="${i#*=}"
shift
;;
--retries=*)
NO_OF_RETRIES="${i#*=}"
shift
;;
--recovery-command=*)
RECOVERY_COMMAND="${i#*=}"
shift
;;
--reattempt-interval=*)
REATTEMPT_INTERVAL="${i#*=}"
shift
;;
--flag=*)
FLAG="${i#*=}"
shift
;;
-h|--help)
echo "Usage:"
echo "retry_command_wrapper.sh [--command=<value>] [--recovery-command=<value>] [--reattempt-interval=<value>] [--retries=<value>] [--flag=<value>]"
printf "\n"
echo "Options:"
echo "1. command: REQUIRED; The shell command that needs to be executed"
echo "2. recovery-command: OPTIONAL; If the command fails, the recovery-command is executed before re-running the command."
echo "3. retries: OPTIONAL; Number of times the command must be run before giving up."
echo "4. reattempt-interval: OPTIONAL; Number of seconds to wait before retrial."
echo "4. flag: OPTIONAL; String to flag the exit code."
exit 0
shift
;;
esac
done
if [ -z "$COMMAND" ]
then
echo "Please specify the command you wish to execute as a string."
exit 1
elif [ -z "$RECOVERY_COMMAND" ]
then
echo "Using default retry command: sleep 30;"
RECOVERY_COMMAND='sleep 30'
elif [ -z "$NO_OF_RETRIES" ]
then
echo "Using default number of retries: 3"
NO_OF_RETRIES=3
elif [ -z "$REATTEMPT_INTERVAL" ]
then
echo "Using default reattempt interval of 0 seconds"
REATTEMPT_INTERVAL=0
elif [ -z "$FLAG" ]
then
FLAG=''
fi
CURRENT_TRIAL=0
until [ $CURRENT_TRIAL -ge $NO_OF_RETRIES ]
do
echo "Executing ${COMMAND} for trial ${CURRENT_TRIAL}"
$(echo "${COMMAND}")
EXIT_CODE=$?
echo "${FLAG} Exit Code: ${EXIT_CODE}"
if [ $EXIT_CODE -eq 0 ]; then
exit
fi
if [[ -n "${RECOVERY_COMMAND}" ]]; then
echo "Executing Recovery Command ${RECOVERY_COMMAND} for trial ${CURRENT_TRIAL}"
$(echo "${RECOVERY_COMMAND}")
sleep $REATTEMPT_INTERVAL
fi
CURRENT_TRIAL=$[$CURRENT_TRIAL+1]
done
if [ $CURRENT_TRIAL -eq NUM_OF_RETRIES]; then
exit 1
fi
set -e
| true
|
79c4698fdc08783b274ed5f930bb036c395d91c6
|
Shell
|
valhakis/master
|
/home/.bashrc
|
UTF-8
| 1,549
| 2.78125
| 3
|
[] |
no_license
|
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
export TERM="xterm-256color"
force_color_prompt=yes
alias ls="ls --color "
alias ls='ls --color=auto'
PS1='[\u@\h \W]\$ '
alias svim="sudo -E vim "
alias so="source ~/.bashrc"
alias tree="tree -C"
alias so="source ~/.bashrc"
function make_ps1()
{
local YELLOW="\[$(tput setaf 3)\]"
local RESET="\[$(tput sgr0)\]"
local RED="\[$(tput setaf 9)\]"
local RED2="\[$(tput setaf 1)\]"
local RED3="\[$(tput setaf 19)\]"
local BLUE="\[$(tput setaf 4)\]"
local WHITE="\[$(tput setaf 195)\]"
local BLUE2="\[$(tput setaf 12)\]"
local YELLOW2="\[$(tput setaf 11)\]"
local YELLOW3="\[$(tput setaf 190)\]"
local UNAME="${BLUE2}\u${RESET}"
local HOST="${YELLOW2}\h${RESET}"
local CDIR="${RED}\W${RESET}"
local PATH="${YELLOW3}\w${RESET}"
export PS1="${YELLOW}[${RESET} ${UNAME}@${HOST} | ${PATH} ${YELLOW}]${RESET}\n> ${RESET}"
}
make_ps1
export PATH="$(ruby -e 'print Gem.user_dir')/bin:$PATH"
export PATH=$PATH:"$HOME/include"
export C_INCLUDE_PATH=$C_INCLUDE_PATH:"$HOME/include"
export CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:"$HOME/include"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:"$HOME/lib"
export LIBRARY_PATH=$LIBRARY_PATH:"$HOME/lib"
[ -f ~/.fzf.bash ] && source ~/.fzf.bash
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
export FZF_DEFAULT_COMMAND='ag -l -g ""'
# export TERM="xterm-color"
| true
|
e668120172dd82062328f7e0094ccac410f383f8
|
Shell
|
gauthier-voron/xmde
|
/scripts/xmde-start
|
UTF-8
| 2,100
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Launch the XMonad window manager.
# Before to launch it, ensures that "xmonad --recompile" has been called
# since the last update of binary, libraries or configuration files
#
# First step: locate the xmonad user configuration file.
# If it does not exist, impossible to launch xmde. Print an error and exit.
# If it exists, get its modification date.
#
if [ ! -d "$HOME/.xmonad" -o ! -f "$HOME/.xmonad/xmonad.hs" ] ; then
echo "xmde: missing xmonad configuration file '$HOME/.xmonad/xmonad.hs'"
echo "Please visit 'https://wiki.haskell.org/Xmonad/Config_archive'"
exit 1
else
conftime=$(stat -c '%Z' "$HOME/.xmonad/xmonad.hs")
fi>&2
# Second step: locate the xmonad user executable.
# If this executable does not exist, then recompile. Otherwise, get the
# modification date.
#
for file in "$HOME/.xmonad/xmonad-"* ; do
if [ -f "$file" -a -x "$file" ] ; then
found="$file"
break
fi
done
if [ "x$found" = 'x' ] ; then
if ! xmonad --recompile ; then
echo "xmde: cannot recompile xmonad"
exit 1
fi >&2
fi
exectime=$(stat -c '%Z' "$found")
# Third step: compare the executable time and the configuration file time.
# If the configuration file is newer than the executable, then recompile
#
if [ $conftime -gt $exectime ] ; then
if ! xmonad --recompile ; then
echo "xmde: cannot recompile xmonad"
exit 1
fi >&2
exectime=$(stat -c '%Z' "$found")
fi
# Fourth step: compare to the package files.
# If the executable is older than any file listed in the packages 'xmonad' or
# 'xmonad-contrib', then recompile.
#
pacman -Ql 'xmonad' 'xmonad-contrib' | cut -d' ' -f 2 | while read file ; do
pkgtime=$(stat -c '%Z' "$file")
if [ $pkgtime -gt $exectime ] ; then
if ! xmonad --recompile ; then
echo "xmde: cannot recompile xmonad"
exit 1
fi >&2
break
fi
done
exectime=$(stat -c '%Z' "$found")
# Fifth step: launch xmonad
# If the --restart option is specified, then only restart xmonad instead of
# launching it.
#
if [ "x$1" = 'x--restart' ] ; then
exec xmonad --restart
else
exec xmonad
fi
| true
|
75c5b16346e55f12567ef3a9c78ef5963bead3d5
|
Shell
|
aimanwahdi/Bittorent
|
/bittorensimag/src/test/script/clientOptions.sh
|
UTF-8
| 3,027
| 3.34375
| 3
|
[] |
no_license
|
#! /bin/sh
cd "$(dirname "$0")"/../../.. || exit 1
PATH=./src/main/bin:"$PATH"
echo "Working directory :" $(pwd)
# Without options
out=$(bittorensimag)
if [ -z "$out" ]; then
echo "bad response without options should return usage"
exit 1
else
echo "good response without option displays usage"
fi
# Banner otion
banner=$(bittorensimag -b)
if [ -z "$banner" ]; then
echo "bad response -b should print the banner"
exit 1
else
echo "good response -b prints banner"
fi
# Unkown option
out=$(bittorensimag -k src/test/exampleTorrents/Aigle.jpg.torrent src/test/outputFolder/ 2>/dev/null)
if [ ! -f src/test/outputFolder/Aigle.jpg ]; then
echo "good response with unknown option"
else
echo "bad response shouldn't accept unknown option"
exit 1
fi
rm -f src/test/outputFolder/Aigle.jpg
# Otions after file
out=$(bittorensimag src/test/exampleTorrents/Aigle.jpg.torrent src/test/outputFolder/ -d 2>/dev/null)
if [ ! -f src/test/outputFolder/Aigle.jpg ]; then
echo "good response with option after file"
else
echo "bad response shouldn't accept options after filename"
exit 1
fi
rm -f src/test/outputFolder/Aigle.jpg
# Torrent file after folder
out=$(bittorensimag src/test/outputFolder/ src/test/exampleTorrents/Aigle.jpg.torrent 2>/dev/null)
if [ ! -f src/test/outputFolder/Aigle.jpg ]; then
echo "good response with torrent after folder"
else
echo "bad response shouldn't accept torrent after folder"
exit 1
fi
rm -f src/test/outputFolder/Aigle.jpg
# Multiple Torrent files
out=$(bittorensimag src/test/exampleTorrents/Aigle.jpg.torrent src/test/exampleTorrents/lion-wallpaper.jpg.torrent src/test/outputFolder/ 2>/dev/null)
if [ ! -f src/test/outputFolder/Aigle.jpg ]; then
echo "good response with multiple torrent files"
else
echo "bad response shouldn't accept multiple torrent files"
exit 1
fi
rm -f src/test/outputFolder/Aigle.jpg
rm -f src/test/outputFolder/lion-wallpaper.jpg
# Multiple folders
out=$(bittorensimag src/test/exampleTorrents/Aigle.jpg.torrent src/test/outputFolder/ target/ 2>/dev/null)
if [ ! -f src/test/outputFolder/Aigle.jpg ]; then
echo "good response with multiple output folders"
else
echo "bad response shouldn't accept multiple output folders"
exit 1
fi
rm -f src/test/outputFolder/Aigle.jpg
rm -f target/Aigle.jpg
# # TODO option d
# debug=$(bittorensimag -d src/test/exampleTorrents/Aigle.jpg.torrent src/test/outputFolder/ 2>/dev/null)
# rm -f src/test/outputFolder/Aigle.jpg
# foundDebug=$(echo "$debug" | grep DEBUG)
# if [ -z "$foundDebug" ]
# then
# echo "bad response -d should output debug"
# exit 1
# else
# echo "good response -d"
# fi
# # TODO option i
# info=$(bittorensimag -i src/test/exampleTorrents/Aigle.jpg.torrent src/test/outputFolder/ 2>/dev/null)
# rm -f src/test/outputFolder/Aigle.jpg
# foundInfo=$(echo "$info" | grep Information)
# if [ -z "$foundInfo" ]
# then
# echo "bad response -i should output info"
# exit 1
# else
# echo "good response -i"
# fi
| true
|
001ea7abf6df4e6554c18c1520256ae1ec30caf6
|
Shell
|
socialmetrix/docker-playframework
|
/build-all.sh
|
UTF-8
| 1,062
| 3.484375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
read -r -d '' PLAY_VERSIONS <<-LIST
1.2.7.2
1.2.6.2
1.2.5.6
1.3.4
1.4.2
LIST
MY_VERSION=1.0
read -r -d '' WARN_HEADER <<-EOM
#
# AUTO-GENERATED - DO NOT EDIT THIS FILE
# EDIT Dockerfile.template AND RUN update.sh
#
# GENERATED: $(date -u)
#
EOM
for PLAY_VERSION in $(echo ${PLAY_VERSIONS}); do
IMAGE_VERSION=${PLAY_VERSION}-${MY_VERSION}
mkdir -p ${IMAGE_VERSION}
echo "${WARN_HEADER}" > ${IMAGE_VERSION}/Dockerfile
sed "s/__PLAY_VERSION__/${PLAY_VERSION}/g" Dockerfile.template >> ${IMAGE_VERSION}/Dockerfile
done
OLD_PWD=$(pwd)
for PLAY_VERSION in $(echo $PLAY_VERSIONS); do
IMAGE_VERSION=${PLAY_VERSION}-${MY_VERSION}
cd ${IMAGE_VERSION}
docker build -t play:${IMAGE_VERSION} .
cd ${OLD_PWD}
docker run --rm -it play:${IMAGE_VERSION}
done
docker images play
# Push to socialmetrix/play repo
for PLAY_VERSION in $(echo $PLAY_VERSIONS); do
IMAGE_VERSION=${PLAY_VERSION}-${MY_VERSION}
docker tag play:${IMAGE_VERSION} socialmetrix/play:${IMAGE_VERSION}
docker push socialmetrix/play:${IMAGE_VERSION}
done
| true
|
c5cada114e999c6f60f7d5c51fcd57cde48ba32d
|
Shell
|
klixxezitz/yubikey_lock
|
/yubikey-screen-lock
|
UTF-8
| 2,490
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Script for locking/unlocking screen via yubikey
# Enhanced for GNome Desktops
#
# Requires: - ykinfo (Package ykpers)
#
#
YUBIMAPFILE="/etc/sysconfig/yubikeys"
USER=$(ps -ef | grep -v grep | grep -v root | grep session | head -n 1 | awk -F ' ' '{ print $1 }')
LOCKFILE="/usr/local/bin/yubikey-screen-lock.locked"
CHECK_GNOME=$(ps -ef | grep -v grep | grep ${USER} | grep gnome-session | wc -l)
if [ ${CHECK_GNOME} -ne 0 ]
then
DESKTOP="gnome"
SESSIONID=$(/bin/loginctl list-sessions | grep "seat" | grep ${USER} | awk -F ' ' '{ print $1 }')
fi
CHECK_KDE=$(ps -ef | grep -v grep | grep ${USER} | grep kded | wc -l)
if [ ${CHECK_KDE} -ne 0 ]
then
DESKTOP="kde"
fi
CHECK_CINNAMON=$(ps -ef | grep -v grep | grep ${USER} | grep cinnamon-session | wc -l)
if [ ${CHECK_CINNAMON} -ne 0 ]
then
DESKTOP="cinnamon"
fi
case "${1}" in
enable)
#We do not want somebody to unlock your manually locked screen be removing and entering the yubikey...
IS_ALREADY_LOCKED=$(/bin/loginctl show-session ${SESSIONID} | grep "IdleHint" | awk -F '=' '{ print $2 }')
if [ "${IS_ALREADY_LOCKED}" == "yes" ]
then
exit 1
fi
#Is this user allowed to lock the screen...
YUBISERIALFROMFILE=$(grep ${USER} ${YUBIMAPFILE} | awk -F '00000-' '{ print $2 }')
#YUBISERIALFROMKEY=$(ykinfo -s | awk -F ' ' '{ print $2 }')
#if [ "${YUBISERIALFROMKEY}" == "${YUBISERIALFROMFILE}" ]
# --> This does not make sense, as we eject the key in order to lock the screen.
# --> --> So, 'ykinfo -s' won't work...
if [ -n "${YUBISERIALFROMFILE}" ]
then
case "${DESKTOP}" in
gnome)
/bin/loginctl lock-session ${SESSIONID}
touch ${LOCKFILE}
;;
kde)
/bin/su ${USER} -c "DISPLAY=:0 /usr/bin/xscreensaver-command -activate"
touch ${LOCKFILE}
;;
cinnamon)
/bin/su ${USER} -c "DISPLAY=:0 /usr/bin/cinnamon-screensaver-command -a"
touch ${LOCKFILE}
;;
esac
fi
;;
disable)
if [ -e ${LOCKFILE} ]
then
YUBISERIALFROMFILE=$(grep ${USER} ${YUBIMAPFILE} | awk -F '00000-' '{ print $2 }')
YUBISERIALFROMKEY=$(ykinfo -s | awk -F ' ' '{ print $2 }')
if [ "${YUBISERIALFROMKEY}" == "${YUBISERIALFROMFILE}" ]
then
case "${DESKTOP}" in
gnome)
/bin/loginctl unlock-session ${SESSIONID}
rm ${LOCKFILE}
;;
kde)
/bin/su ${USER} -c "DISPLAY=:0 /usr/bin/xscreensaver-command -deactivate"
rm ${LOCKFILE}
;;
cinnamon)
/bin/su ${USER} -c "DISPLAY=:0 /usr/bin/cinnamon-screensaver-command -d"
rm ${LOCKFILE}
;;
esac
fi
fi
;;
esac
| true
|
86d3d7df8f7b23d51a3edc6038213ee3462bd15b
|
Shell
|
MW-autocat-script/MW-autocat-script
|
/catscripts/Government/Countries/United_States/US_states/North_Carolina/NorthCarolina.sh
|
UTF-8
| 350
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
KEYWORDS_NORTHCAROLINA="North(| )Carolina|, N(|\.)C(|\.)\b"
KEYWORDS_NORTHCAROLINA_ALL="$KEYWORDS_NORTHCAROLINA"
if [ "$1" == "" ]; #Normal operation
then
debug_start "North Carolina"
NORTHCAROLINA=$(egrep -i "$KEYWORDS_NORTHCAROLINA" "$NEWPAGES")
categorize "NORTHCAROLINA" "North Carolina"
debug_end "North Carolina"
fi
| true
|
222bfb219d5ead3a5f097dd871273fcefc126089
|
Shell
|
fgarel/myDebianInstall01
|
/usr/sbin/bfaptitudeinstallvim01b.sh
|
UTF-8
| 2,330
| 3.25
| 3
|
[] |
no_license
|
#!/bin/sh
# Installation et configuration de vim
###########################################
echo "bfaptitudeinstallvim01b.sh : Installation et configuration de vim (Partie 01b)"
mkdir /var/log/bf 2> /dev/null
# on fabrique en premier nos fichiers .local
# puis, on utilisera le script spf13-vim3
# Quand on travaille dans cet ordre, alors l'installation
# (commande BundleInstall) prend en compte nos fichiers .local
# fabrication du fichier ~/.vimrc.local
#######################################
echo '" sous-fichier de configuration de l apparence' > ~/.vimrc.local
echo 'source ~/.vimrc.local.d/theme.vim' >> ~/.vimrc.local
echo '" sous-fichier de configuration de vos fonctions personnelles par exemple' >> ~/.vimrc.local
echo 'source ~/.vimrc.local.d/functions.vim' >> ~/.vimrc.local
# fabrication du sous repertoire ~/.vimrc.local.d
mkdir ~/.vimrc.local.d 2> /dev/null
# theme.vim
echo '" chargment du theme molokai' > ~/.vimrc.local.d/theme.vim
echo 'colorscheme molokai' >> ~/.vimrc.local.d/theme.vim
echo '" chargment du theme solarized' > ~/.vimrc.local.d/theme.vim
echo 'colorscheme solarized' >> ~/.vimrc.local.d/theme.vim
# functions.vim
echo '" chargement des fonctions' > ~/.vimrc.local.d/functions.vim
# fabrication du fichier ~/.vimrc.bundles.local
###############################################
echo "\" Une bonne base d'extension quel que soit votre langage de programmation :" > ~/.vimrc.bundles.local
echo "let g:spf13_bundle_groups=['general', 'neocomplcache', 'programming', 'python', 'javascript', 'html', 'php', 'misc']" >> ~/.vimrc.bundles.local
echo "Bundle 'ZoomWin'" >> ~/.vimrc.bundles.local
echo "\"Bundle 'spf13/vim-colors'" >> ~/.vimrc.bundles.local
echo "Bundle 'ajashton/cascadenik-vim'" >> ~/.vimrc.bundles.local
echo "\"Bundle 'scrooloose/nerdcommenter'" >> ~/.vimrc.bundles.local
echo "Bundle 'vimpager'" >> ~/.vimrc.bundles.local
# recupération de spf13-vim3 et execution du script
###################################################
curl http://j.mp/spf13-vim3 -L -o - | sh
# installation des bundles listés dans ~/.vimrc.bundles.local
#if [ ! -e /var/log/bf/bfaptitudeinstallvim01-vundle02.log ]
#then
# echo " Installation des extensions vim"
# vim +BundleInstall! +BundleClean! +q +q +q
# date +"%F %T" >> /var/log/bf/bfaptitudeinstallvim01-vundle02.log
#fi
| true
|
90a04e7dde66cbc1eb26f4daef4a48ad4532c760
|
Shell
|
LauriU2/skriptimine
|
/praktikum9/yl1
|
UTF-8
| 231
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
#
for (( i = 1; i <= 5; i++ )) # Väline for tsükkel, mitu rida tuleb
do
echo -n "$i. "
for (( j = 1; j <= 5; j++ )) # Sisemine for tsükkel, mitu tärni tuleb
do
echo -n "* "
done
echo "" # Prindib uue rea
done
| true
|
d4a657df678ca13b3c63c07dc3d03684985dd4c8
|
Shell
|
piyush82/vagrant-foreman1.2
|
/src/script.sh
|
UTF-8
| 3,461
| 3.0625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# ensure latest version of puppet (3.X)
wget http://apt.puppetlabs.com/puppetlabs-release-raring.deb
dpkg -i puppetlabs-release-raring.deb
# add foreman installer repo
echo "deb http://deb.theforeman.org/ precise 1.2" > /etc/apt/sources.list.d/foreman.list
wget -q http://deb.theforeman.org/foreman.asc -O- | apt-key add -
#update and upgrade
apt-get update
apt-get upgrade -y
# apt-get dist-upgrade -y
#install foreman
apt-get install -y foreman-installer
# install git
apt-get install -y git
# install puppet module
gem install puppet-module
# setup the dhcp
echo "Setting up DHCP params..."
cp /tmp/files-to-go/proxydhcp.pp /usr/share/foreman-installer/foreman_proxy/manifests/proxydhcp.pp
echo "Setting up Foreman params..."
cp /tmp/files-to-go/answers.yaml /usr/share/foreman-installer/foreman_installer/answers.yaml
#wget -O /usr/share/foreman-installer/foreman/templates/external_node.rb.erb https://raw.github.com/theforeman/puppet-foreman/master/templates/external_node_v2.rb.erb
#wget -O /usr/share/foreman-installer/foreman/templates/foreman-report.rb.erb https://raw.github.com/theforeman/puppet-foreman/master/templates/foreman-report_v2.rb.erb
#install apt-cache
puppet module install markhellewell/aptcacherng --target-dir /usr/share/foreman-installer
echo "Running foreman installer..."
puppet apply --modulepath /usr/share/foreman-installer -v /tmp/files-to-go/install-fman.pp
echo "Setting the Net forwarding rules now."
cp /tmp/files-to-go/fwd-traff /etc/init.d/fwd-traff
chmod a+x /etc/init.d/fwd-traff
#set to run on boot
ln -s /etc/init.d/fwd-traff /etc/rc2.d/S96forwardtraffic
# install the rules
/etc/init.d/fwd-traff
#install common modules
puppet module install puppetlabs/ntp --target-dir /etc/puppet/environments/common/modules
git clone http://github.com/joemiller/puppet-newrelic /etc/puppet/environments/common/modules/newrelic
#install stable modules
#puppet module install puppetlabs/openstack
git clone https://github.com/stackforge/puppet-openstack -b stable/grizzly /etc/puppet/environments/production/modules/openstack
cd /etc/puppet/environments/production/modules/openstack
gem install librarian-puppet
librarian-puppet install --verbose --path ../
git clone http://github.com/dizz/icclab-os /etc/puppet/environments/production/modules/icclab
cat > /etc/resolvconf/resolv.conf.d/head << EOF
nameserver 127.0.0.1
search cloudcomplab.dev
EOF
# setup resolv.conf
cat > /etc/resolv.conf << EOF
nameserver 127.0.0.1
search cloudcomplab.dev
EOF
#enable the foreman service to run
sed -i 's/^START=no/START=yes/' /etc/default/foreman
#install host discovery
echo "Installing host discovery plugin"
apt-get install -y libsqlite3-dev squashfs-tools advancecomp
echo "gem 'foreman_discovery', :git => \"https://github.com/theforeman/foreman_discovery.git\"" >> /usr/share/foreman/bundler.d/Gemfile.local.rb
echo "gem 'sqlite3'" >> /usr/share/foreman/bundler.d/Gemfile.local.rb
cd /usr/share/foreman/
bundle update
echo "Building discovery PXE boot image"
rake discovery:build_image mode=prod #takes time
cp /usr/share/foreman/discovery_image/initrd.gz /var/lib/tftpboot/boot/disco-initrd.gz
cp /usr/share/foreman/discovery_image/vmlinuz /var/lib/tftpboot/boot/disco-vmlinuz
# fix puppet common environment
sed -i 's/\/etc\/puppet\/environments\/common/\/etc\/puppet\/environments\/common\/modules/' /etc/puppet/puppet.conf
#start foreman
service foreman start
#clean up apt
apt-get -y autoremove
| true
|
fe0f552ccf76ca4e82753e8ecd9cd2e6193bef80
|
Shell
|
Jeanflo100/ros-isen
|
/installation
|
UTF-8
| 2,619
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
# Indique au système que l'argument qui suit est le programme utilisé pour exécuter ce fichier
# En règle générale, les "#" servent à mettre en commentaire le texte qui suit comme ici
cd ~
# Mise à jour de l'ordi
sudo apt-get update
sudo apt-get upgrade
sudo apt-get autoremove
sudo apt-get purge
# création d'une commande simplifié pour les mises à jour
sudo echo "alias maj='sudo apt-get update && sudo apt-get upgrade && sudo apt-get autoremove && sudo apt-get purge" >> ~/.bashrc
# installation de ros
sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list'
sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-key 421C365BD9FF1F717815A3895523BAEEB01FA116
sudo apt-get update
sudo apt-get install ros-kinetic-desktop-full
echo "source /opt/ros/kinetic/setup.bash" >> ~/.bashrc
source ~/.bashrc
# Création du tuto test de ros
echo "roscore" >> "Desktop/test ROS"
echo "rosrun turtlesim turtlesim_node" >> "Desktop/test ROS"
echo "rosrun turtlesim turtle_teleop_key" >> "Desktop/test ROS"
echo "" >> "Desktop/test ROS"
echo "La troisième console doit être sélectionnée pour que ça fonctionne" >> "Desktop/test ROS"
# Installation google chrome
sudo sh -c 'echo "deb [arch=amd64] https://dl.google.com/linux/chrome/deb/ stable main" > /etc/apt/sources.list.d/google-chrome.list'
wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
sudo apt-get update
sudo apt-get install google-chrome-stable
# Installation du simulateur de TutleBot
sudo apt-get update
sudo apt-get upgrade
wget https://raw.githubusercontent.com/ROBOTIS-GIT/robotis_tools/master/install_ros_kinetic.sh && chmod 755 ./install_ros_kinetic.sh && bash ./install_ros_kinetic.sh
sudo apt-get install ros-kinetic-joy ros-kinetic-teleop-twist-joy ros-kinetic-teleop-twist-keyboard ros-kinetic-laser-proc ros-kinetic-rgbd-launch ros-kinetic-depthimage-to-laserscan ros-kinetic-rosserial-arduino ros-kinetic-rosserial-python ros-kinetic-rosserial-server ros-kinetic-rosserial-client ros-kinetic-rosserial-msgs ros-kinetic-amcl ros-kinetic-map-server ros-kinetic-move-base ros-kinetic-urdf ros-kinetic-xacro ros-kinetic-compressed-image-transport ros-kinetic-rqt-image-view ros-kinetic-gmapping ros-kinetic-navigation ros-kinetic-interactive-markers
cd ~/catkin_ws/src/
git clone https://github.com/ROBOTIS-GIT/turtlebot3_msgs.git
git clone https://github.com/ROBOTIS-GIT/turtlebot3.git
cd ~/catkin_ws && catkin_make
cd ~/catkin_ws/src/
git clone https://github.com/ROBOTIS-GIT/turtlebot3_simulations.git
cd ~/catkin_ws && catkin_make
| true
|
4c401cb763559d78304646552c9076dadb3cb76b
|
Shell
|
mkhetale/NodeJsTemplate
|
/src/scripts/route.sh
|
UTF-8
| 1,218
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
export namelo="appointments";
export nameup="$(tr '[:lower:]' '[:upper:]' <<< ${namelo:0:1})${namelo:1}";
export fromnamelo="user";
export fromnameup="$(tr '[:lower:]' '[:upper:]' <<< ${fromnamelo:0:1})${fromnamelo:1}";
cp libs/${fromnamelo}.js libs/${namelo}.js
sed -i -e "s/${fromnamelo}/${namelo}/g" libs/${namelo}.js
sed -i -e "s/${fromnameup}/${nameup}/g" libs/${namelo}.js
# sed -i '' does not work with other linux distributions
# MacOs by default creates a file with -e as backup
rm -f libs/${namelo}.js-e
echo "Created libs...."
cp controllers/${fromnamelo}Ctlr.js controllers/${namelo}Ctlr.js
sed -i -e "s/${fromnamelo}/${namelo}/g" controllers/${namelo}Ctlr.js
sed -i -e "s/${fromnameup}/${nameup}/g" controllers/${namelo}Ctlr.js
rm -f controllers/${namelo}Ctlr.js-e
echo "Created controller...."
cp models/${fromnamelo}.js models/${namelo}.js
sed -i -e "s/${fromnamelo}/${namelo}/g" models/${namelo}.js
rm -f models/${namelo}.js-e
echo "Created model...."
# Adding route
echo "objApp.use('/projectname/${namelo}', require('./controllers/${namelo}Ctlr.js'));" >> router.js
echo "Route added...."
# Remove files
rm libs/${namelo}.js
rm controllers/${namelo}Ctlr.js
rm models/${namelo}.js
| true
|
16d5c4075ccb1f0cf0533487aaaf0d33ce6f3cdd
|
Shell
|
akash99-code/Shell_Programming
|
/Scripts/p12factorial.sh
|
UTF-8
| 180
| 3.171875
| 3
|
[] |
no_license
|
#
# Write a shell script to find out the factorial of an input.
#
echo "Enter A Number"
read N
res=1
while test $N -ge 1
do
res=$((res * N))
N=$((N-1))
done
echo Factorial = $res
| true
|
fc0fe8ac57293465b9da45dd5fd4ae249cbcff50
|
Shell
|
Waynelau410/bell5_jaco_kinect2
|
/mpi_attach/best_compile_and_run.sh
|
UTF-8
| 302
| 2.890625
| 3
|
[] |
no_license
|
reset
if [ -z $1 ];
then
echo "must give number of cores to use"
exit
fi
PROG="attachment"
CORES=$1
COMP="mpicc -o $PROG attachment.c"
RUN="mpirun --host localhost -np $CORES ./$PROG best datasets/diverse_movement_skeleton.csv datasets/diverse_movement_pcl.csv output.csv"
clear; $COMP && $RUN
| true
|
9c5358507ae94b23e8c4c51f8139a7aae145f170
|
Shell
|
woodun/insight_files
|
/hima_scripts/bfloat_exp/exp1_setup_int_data_dbi.sh
|
UTF-8
| 1,161
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/sh
#####################################################
mother_dir=/sciclone/pscr/hwang07/bfloat_analysis
for number in 1 4 7 10 13 16 19 22 25 28 31
do
cat pbs_exp1_base_int_data_dbi.pbs | sed -e "s/TBD/uniform_int_one_side_exp$number/g" | sed -e "s/DBT/uniform_int_one_side_result$number/g" | sed -e "s|MOTHERDIR|$mother_dir|g" > $mother_dir/scripts_int_dbi/pbs_uniform_int_one_side_exp$number.pbs
cat pbs_exp1_base_int_data_dbi.pbs | sed -e "s/TBD/uniform_int_two_sides_exp$number/g" | sed -e "s/DBT/uniform_int_two_sides_result$number/g" | sed -e "s|MOTHERDIR|$mother_dir|g" > $mother_dir/scripts_int_dbi/pbs_uniform_int_two_sides_exp$number.pbs
cat pbs_exp1_base_int_data_dbi.pbs | sed -e "s/TBD/normal_int_one_side_exp$number/g" | sed -e "s/DBT/normal_int_one_side_result$number/g" | sed -e "s|MOTHERDIR|$mother_dir|g" > $mother_dir/scripts_int_dbi/pbs_normal_int_one_side_exp$number.pbs
cat pbs_exp1_base_int_data_dbi.pbs | sed -e "s/TBD/normal_int_two_sides_exp$number/g" | sed -e "s/DBT/normal_int_two_sides_result$number/g" | sed -e "s|MOTHERDIR|$mother_dir|g" > $mother_dir/scripts_int_dbi/pbs_normal_int_two_sides_exp$number.pbs
done
| true
|
555cc97c2528c5f0a396209d924345931c96b284
|
Shell
|
phrocker/nifi-minifi-cpp
|
/docker/DockerVerify.sh
|
UTF-8
| 1,972
| 3.1875
| 3
|
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Zlib",
"MIT-0",
"LicenseRef-scancode-openssl",
"ISC",
"LicenseRef-scancode-ssleay-windows",
"OpenSSL",
"JSON",
"BSL-1.0",
"curl",
"LicenseRef-scancode-public-domain",
"MIT",
"BSD-2-Clause",
"BSD-1-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LicenseRef-scancode-bsd-unchanged"
] |
permissive
|
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the \"License\"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an \"AS IS\" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
docker_dir="$( cd ${0%/*} && pwd )"
export MINIFI_VERSION=$1
# Create virutal environment for testing
if [[ ! -d ./test-env-py3 ]]; then
echo "Creating virtual environment in ./test-env-py3" 1>&2
virtualenv --python=python3 ./test-env-py3
fi
echo "Activating virtual environment..." 1>&2
. ./test-env-py3/bin/activate
pip install --trusted-host pypi.python.org --upgrade pip setuptools
# Install test dependencies
echo "Installing test dependencies..." 1>&2
# hint include/library paths if homewbrew is in use
if brew list 2> /dev/null | grep openssl > /dev/null 2>&1; then
echo "Using homebrew paths for openssl" 1>&2
export LDFLAGS="-L$(brew --prefix openssl)/lib"
export CFLAGS="-I$(brew --prefix openssl)/include"
export SWIG_FEATURES="-cpperraswarn -includeall -I$(brew --prefix openssl)/include"
fi
pip install --upgrade \
pytest \
docker \
PyYAML \
m2crypto \
watchdog
export JAVA_HOME="/usr/lib/jvm/default-jvm"
export PATH="$PATH:/usr/lib/jvm/default-jvm/bin"
export PYTHONPATH="${PYTHONPATH}:${docker_dir}/test/integration"
exec pytest -s -v "${docker_dir}"/test/integration
| true
|
eaeaec051efc365413acadc4487934de112642fc
|
Shell
|
tyagi619/Labs
|
/Utility_commands/adv_shell/script7.sh
|
UTF-8
| 337
| 2.984375
| 3
|
[] |
no_license
|
#! /bin/bash
#install pdftk from ubuntu software
#no need to install anything to use convert
mkdir multiple_pdf
for ((i=1;i<=192;i++));
do
convert Linux.Shell.Scripting.Cookbook.pdf[$((2*i-1))-$((2*i))] ./multiple_pdf/${i}.pdf
# pdftk Linux.Shell.Scripting.Cookbook.pdf cat $((2*i-1))-$((2*i)) output ./multiple_pdf/${i}.pdf
done
| true
|
8d00e80045af0314dad4f94118b198397fe58531
|
Shell
|
nackstein/dex-lock
|
/dex-show
|
UTF-8
| 1,007
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/dash
# Copyright (c) 2014 - 2016 Luigi Tarenga <luigi.tarenga@gmail.com>
# Distributed under the terms of a MIT license.
# parameters: [ lock_name ]
export LANG=C
cd ${0%/*}
lname=${1:-default}
read lock_servers < conf/lock_servers
read rpath < conf/remote_path
read sshopt < conf/sshopt
if [ -f temp/${lname}_counter ] ; then
read counter < temp/${lname}_counter
read holder term seq previous < temp/${lname}_htsp
echo local term seq for $lname:
echo holder: $holder
echo term: $term
echo seq: $seq
echo counter: $counter
echo previous: $previous
else
echo lock $lname not initialized
fi
read hostname majority < temp/${lname}_params
echo
echo remote state sequence for $lname:
echo lock_server holder term seq
for h in $lock_servers ; do
echo $h: $(ssh $sshopt $h "cd $rpath ; " \
"[ -f state/$lname ] && " \
"cat state/$lname || " \
"echo lock file not found")
done
echo
| true
|
14f892b0d259a4e3e68f5062303864ba5c14debd
|
Shell
|
AndreiBarsan/caffe-setup
|
/torch/install-torch.sh
|
UTF-8
| 5,590
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Job to install torch7 distro in local folder
# assumes local packages are in ~/user/...
#
#SBATCH --job-name="torch-install"
#SBATCH --output=torch-install.out
#SBATCH --error=torch-install.out
#
# run on all cores minus one of the node, require 2GB per core = 14GB
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=7
#SBATCH --mem-per-cpu=2048
#SBATCH --gres=gpu:1
################################################################################
# Helpers and Miscellaneous
################################################################################
function fail {
LAST_ERR="$?"
echo >&2 "Failed to set up Torch: $1"
exit $LAST_ERR
}
# Uses a proper machine and not the login node to run stuff.
# If SLURM is not present, simply replace the 'srun -N 1' part with 'eval'.
function run {
srun "$@"
}
function run_gpu {
srun --gres=gpu:1 "$@"
}
################################################################################
# Printing Info
################################################################################
tstamp="`date '+%D %T'`"
hn="`hostname -f`"
jobid=${SLURM_JOB_ID}
jobname=${SLURM_JOB_NAME}
if [ -z "${jobid}" ] ; then
echo "ERROR: SLURM_JOBID undefined, are you running this script directly ?"
exit 1
fi
printf "%s: starting %s(%s) on host %s\n" "${tstamp}" "${jobname}" "${jobid}" "${hn}"
echo "**"
echo "** SLURM_CLUSTER_NAME="$SLURM_CLUSTER_NAME
echo "** SLURM_JOB_NAME="$SLURM_JOB_NAME
echo "** SLURM_JOB_ID="$SLURM_JOBID
echo "** SLURM_JOB_NODELIST"=$SLURM_JOB_NODELIST
echo "** SLURM_NUM_NODES"=$SLURM_NUM_NODES
echo "** SLURMTMPDIR="$SLURMTMPDIR
echo "** working directory = "$SLURM_SUBMIT_DIR
echo
################################################################################
# Basic Setup
################################################################################
echo "SCRIPT_OUT:loading modules:"
# Load appropriate modules.
# If executing remotely, make sure you source the appropriate system-level configs
# in order to expose the 'module' command.
# Brotip: The Euryale Titan X cards are the Pascal version. CUDA 7.5 does NOT
# support them!
CUDA_VERSION="8.0.44"
# CUDA_VERSION="7.5.18"
MODULE_CUDA_DIR="/site/opt/cuda/${CUDA_VERSION}/x64"
module load cuda/"${CUDA_VERSION}" || fail 'Could not load CUDA module.'
module load cudnn/v5.1 || fail 'Could not load cuDNN module.'
#module load opencv/3.1.0 || fail 'Could not load OpenCV module (v3.1.0)'
# Fun fact: Boost 1.60 had a bug preventing it from being used to compile Caffe.
module load boost/1.62.0 || fail 'Could not load boost module (v1.62.0).'
module load mpich || fail 'Could not load mpi module.'
module load openmpi || fail 'Could not load openmpi.'
# default load is gcc 4.9, but we need the same version as libgomp.so.1
# because libtorch (libTH.so) needs GOMP_4.0 which for some reason does not change
# on module load gcc, while it should be of the same version as gcc
module unload gcc || fail 'Could not load gcc'
echo "SCRIPT_OUT:Relevant modules loaded OK."
################################################################################
# Local libraries
################################################################################
INSTALL_DIR="${HOME}/user"
echo "SCRIPT_OUT:Install dir is ${INSTALL_DIR}"
# INSTALL_DIR added to PATH and LD_LIBRARY_PATH in .bashrc
CMAKE_VERSION="$(cmake --version)"
echo "SCRIPT_OUT:Cmake version is " $CMAKE_VERSION " minimum is 2.8 in CMakeLists.txt"
# Default gcc 4.8 does not have GLIBCXX_3.4.20, required by cmake 3.7,
# see strings /usr/lib/x86_64-linux-gnu/libstdc++.so.6 | grep GLIBCXX
#if [[ "$CMAKE_VERSION" =~ '3.' ]]; then
# echo "SCRIPT_OUT:CMake Version Ok"
#else
# echo "SCRIPT_OUT:Installing CMake 3.7 locally"
# wget https://cmake.org/files/v3.7/cmake-3.7.2.tar.gz
# tar -xvzf cmake-3.7.2.tar.gz
# cd cmake-3.7.2
# ./bootstrap --prefix="${INSTALL_DIR}"
# make -j4
# make check -j4
# make install
# echo "SCRIPT_OUT:CMake installed"
#fi
################################################################################
# Install Torch dependencies and Torch distro
################################################################################
if [[ -d "${HOME}/torch" ]]; then
echo "SCRIPT_OUT:Torch already installed, why are you running this script??"
else
echo "SCRIPT_OUT:Installing torch dependencies, modified version of install-deps script in torch/"
git clone https://github.com/torch/distro.git ~/torch --recursive
cd ~/torch;
#bash install-deps;
#Probably not needed :/
#bash ../install-deps-local.sh "${INSTALL_DIR}"
echo "SCRIPT_OUT:Installing torch"
export CMAKE_LIBRARY_PATH=${INSTALL_DIR}/lib:$CMAKE_LIBRARY_PATH
export CMAKE_INCLUDE_PATH=${INSTALL_DIR}/include:$CMAKE_INCLUDE_PATH
export CMAKE_PREFIX_PATH=${INSTALL_DIR}:$CMAKE_PREFIX_PATH
# install.sh has a batch-install prompt, check that
./install.sh
echo "SCRIPT_OUT:Torch installed! Party!!"
fi
################################################################################
# Install Luarocks and sample packages
################################################################################
if which luarocks >/dev/null 2>&1; then
echo "SCRIPT_OUT:Luarocks already installed"
else
echo "SCRIPT_OUT:Installing luarocks"
wget https://luarocks.org/releases/luarocks-2.4.1.tar.gz
tar zxpf luarocks-2.4.1.tar.gz
cd luarocks-2.4.1
./configure --prefix="${INSTALL_DIR}"
make build -j4
make install
echo "SCRIPT_OUT:Luarocks installed"
fi
| true
|
e445f33026287343e60a1ddc97bbf53cf91223c3
|
Shell
|
ministryofjustice/hmpps-network-terraform-mis
|
/scripts/aws-get-temp-creds.sh
|
UTF-8
| 485
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
OUTPUT_FILE="env_configs/inspec-creds.properties"
temp_role=$(aws sts assume-role --role-arn ${TERRAGRUNT_IAM_ROLE} --role-session-name testing --duration-seconds 900)
echo "unset AWS_PROFILE
export AWS_ACCESS_KEY_ID=$(echo ${temp_role} | jq .Credentials.AccessKeyId | xargs)
export AWS_SECRET_ACCESS_KEY=$(echo ${temp_role} | jq .Credentials.SecretAccessKey | xargs)
export AWS_SESSION_TOKEN=$(echo ${temp_role} | jq .Credentials.SessionToken | xargs)" > ${OUTPUT_FILE}
| true
|
b12eec8927e7c2e879b77b638711cc7d4ce7d6e7
|
Shell
|
vcaen/openshift-jenkins-with-jobs
|
/configuration/scripts/src/main/scripts/fr/geoportail/entrepot/scripts/utils/save-update-static-ref.sh
|
UTF-8
| 371
| 2.515625
| 3
|
[
"Apache-2.0"
] |
permissive
|
date
echo "COMMIT des modifications locales"
hg commit -A -u entrepot -m "Commit automatique du referentiel statique"
echo "PULL du repository central"
hg pull
echo "MERGE avec les modifications exterieures"
hg merge
echo "COMMIT du merge"
hg commit -A -u entrepot -m "Merge automatique du referentiel statique"
echo "PUSH sur le repository central"
hg push
echo ""
| true
|
947e325cb7f12db62f272264c9f24f0c090f59ed
|
Shell
|
MartinWan/LSGAN
|
/stability_comparison/mixture_gaussian/batch_cmp.sh
|
UTF-8
| 177
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
export QT_QPA_PLATFORM=offscreen
for x in `seq 0 9`; do
python -u ls.py
mkdir ls_$x
mv *.png ls_$x
python -u sig.py
mkdir sig_$x
mv *.png sig_$x
done
| true
|
7d6424177ab89c9c5154e1ab36a592cab9352c78
|
Shell
|
completeworks/b-log
|
/examples/example.sh
|
UTF-8
| 1,716
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#########################################################################
# Script Name: example
# Script Version: 0.0.1
# Script Date: 30 June 2016
#########################################################################
# example of b-log.sh
#########################################################################
# global parameters
set -e # kill script if a command fails
set -o nounset # unset values give error
set -o pipefail # prevents errors in a pipeline from being masked
script_path="$(dirname "$( realpath ${BASH_SOURCE[0]} )" )" # the path to the script
source "${script_path}"/../b-log.sh # include the log script
echo "--------------------------------------------------"
echo "Example of"
B_LOG --version --help
echo "--------------------------------------------------"
echo ""
echo "** Setting the log level"
echo "B_LOG --log-level 'nr'"
echo "B_LOG --log-level '\$LOG_LEVEL_OFF'"
echo "or using the direct aliases: 'LOG_LEVEL_OFF', 'LOG_LEVEL_FATAL' ..."
echo "these are aliases that call the 'B_LOG --log-level' function"
echo ""
echo "--------------------------------------------------"
B_LOG -o true
B_LOG -f log/log.txt --file-prefix-enable --file-suffix-enable
LOG_LEVEL_ALL
FATAL "fatal level"
ERROR "error level"
WARN "warning level"
NOTICE "notice level"
INFO "info level"
DEBUG "debug level"
TRACE "trace level"
echo "--------------------------------------------------"
echo "piped into INFO" | INFO
echo "--------------------------------------------------"
LOG_LEVELS+=("50" "EXAMPLE" "[@23:1@][@7:2@][@3@:@4@] @5@" "\e[95m" "\e[0m")
B_LOG_MESSAGE 50 "custom log level"
echo "--------------------------------------------------"
| true
|
5a7d8db897206ac8d4cffbeab87d3fd101ea5c17
|
Shell
|
benlawson/ezws
|
/bash_completion.d/ezws.sh
|
UTF-8
| 508
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
_ezws()
{
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
opts="connect startup sftp stop bind add sshfs list reindex"
case "${prev}" in
connect|startup|sftp|stop|bind|sshfs)
local hostname=""
COMPREPLY=( $(compgen -W "${hostname}" ${cur}) )
return 0
;;
*)
;;
esac
COMPREPLY=( $(compgen -W "${opts}" ${cur}) )
return 0
}
complete -F _ezws ezws.sh
| true
|
235da4ac12f2f7f8aa23d94edec7d54b2904055f
|
Shell
|
TheMasterGuy/openshift-bukkit
|
/.openshift/action_hooks/build
|
UTF-8
| 267
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
SERVER_PROPERTIES=server.properties
CRAFTBUKKIT_SERVER_JAR=craftbukkit-latest.jar
set -x
cd $OPENSHIFT_DATA_DIR
if [ ! -e "$CRAFTBUKKIT_SERVER_JAR" ]; then
wget "http://tcpr.ca/files/craftbukkit/craftbukkit-latest.jar" -O $CRAFTBUKKIT_SERVER_JAR
fi
| true
|
a3db13d8031219f3a572fefd4a39c04e9f97270f
|
Shell
|
io-monad/dotfiles
|
/dotfiles/zshrc
|
UTF-8
| 761
| 2.75
| 3
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
# Prezto
if [[ -s "${ZDOTDIR:-$HOME}/.zprezto/init.zsh" ]]; then
source "${ZDOTDIR:-$HOME}/.zprezto/init.zsh"
fi
# Preferred zsh options
unsetopt correct_all
# Alias
alias lla='ls -lA'
alias mate=atom
alias ag=pt
alias diff='command diff' # for Prezto
# ls on cd
chpwd() { ls }
# golang
export GOPATH=$HOME
# homebrew-file
export HOMEBREW_BREWFILE="$HOME/.brewfile"
export HOMEBREW_CASK_OPTS="--appdir=/Applications"
# nodebrew
path=(~/.nodebrew/current/bin(N-/) $path)
# fzf
[ -s ~/.fzf.zsh ] && . ~/.fzf.zsh
# z
[ -s /usr/local/etc/profile.d/z.sh ] && . /usr/local/etc/profile.d/z.sh
# ~/bin
path=(~/bin(N-/) $path)
# Load .zsh/*.zsh
for zshfile in ~/.zsh/*.zsh; do . $zshfile; done
# Load .zshrc.local
[ -s ~/.zshrc.local ] && . ~/.zshrc.local
| true
|
7192db049370937b9356072710245257d64fa104
|
Shell
|
rochacbruno/pulp-qe-tools
|
/pulp3/install_pulp3/install.sh
|
UTF-8
| 2,967
| 3.90625
| 4
|
[] |
no_license
|
#! /bin/bash
# export PULP3_HOST=myhostname.com
# export PULP3_PLAYBOOK=source-install-plugins.yml
# curl https://raw.githubusercontent.com/PulpQE/pulp-qe-tools/master/pulp3/install_pulp3/install.sh | bash
# Optionally if you clone the qe-tools and want to use local playboks
# export PULP3_INSTALL_MODE=local
# export PULP3_HOST=myhostname.com
# export PULP3_PLAYBOOK=source-install-plugins.yml
# export PULP3_ROLES_PATH=/path/to/local/ansible-pulp3 (optional)
# ./install.sh
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
HOST="${PULP3_HOST:-$(hostname)}"
echo "Installing on host: ${HOST} - set PULP3_HOST env var to override it"
PLAYBOOK="${PULP3_PLAYBOOK:-source-install-plugins.yml}"
echo "Will use: ${PLAYBOOK} - set PULP3_PLAYBOOK env var to override it"
# wether we get the playbooks from qe-tools repo or use the local one
INSTALL_MODE="${PULP3_INSTALL_MODE:-github}"
echo "Will install from ${INSTALL_MODE} - set PULP3_INSTALL_MODE=local|github env var to override it"
# Where the roles are located? if empty will fetch from github
ROLES_PATH="${PULP3_ROLES_PATH:-github}"
echo "Will use ${ROLES_PATH} roles - set PULP3_ROLES_PATH=/path/to/ansible-pulp3/ env var to override it"
# requirements
if ! git --version > /dev/null; then
echo 'git is required'
exit 1
fi
if ! sed --version > /dev/null; then
echo 'sed is required'
exit 1
fi
if ! python3 -V > /dev/null; then
echo 'python3 is required'
exit 1
fi
if ! ansible-playbook --version > /dev/null; then
echo 'Ansible Playbook is required is required'
exit 1
fi
if ! ansible-galaxy --version > /dev/null; then
echo 'Ansible Galaxy is required is required'
exit 1
fi
# make a temp dir to clone all the things
tempdir="$(mktemp --directory)"
pushd "${tempdir}"
if [ "$INSTALL_MODE" == "github" ]; then
# get the playbook locally
echo "Fetching playbook from github"
curl https://raw.githubusercontent.com/PulpQE/pulp-qe-tools/master/pulp3/install_pulp3/ansible.cfg > ansible.cfg
curl https://raw.githubusercontent.com/PulpQE/pulp-qe-tools/master/pulp3/install_pulp3/"${PLAYBOOK}" > install.yml
else
# For local debugging uncomment this line
echo "Using local repo playbook"
cp "$DIR"/ansible.cfg ansible.cfg
cp "$DIR"/"${PLAYBOOK}" install.yml
fi
if [ "$ROLES_PATH" == "github" ]; then
echo "Fetching ansible installer roles from github"
git clone https://github.com/pulp/ansible-pulp3.git
else
echo "Using local roles from $ROLES_PATH"
cp -R "$ROLES_PATH" ./ansible-pulp3
fi
echo "Installing roles."
export ANSIBLE_ROLES_PATH="./ansible-pulp3/roles/"
ansible-galaxy install -r ./ansible-pulp3/requirements.yml --force
echo "Available roles."
ansible-galaxy list
echo "Starting Pulp 3 Installation."
ansible-playbook -v -i "${HOST}", -u root install.yml -e pulp_content_host="${HOST}:8080"
echo "Cleaning."
popd
rm -r -f "${tempdir}"
echo "Is it working?"
curl -u admin:admin "${HOST}":80/pulp/api/v3/status/
| true
|
93127ea373c484919b30706104cecbe9b304b981
|
Shell
|
IvanWoo22/prepare18.04
|
/1-apt.sh
|
UTF-8
| 2,402
| 3.15625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "====> Install softwares via apt <===="
echo "==> Disabling the release upgrader"
sudo sed -i.bak 's/^Prompt=.*$/Prompt=never/' /etc/update-manager/release-upgrades
echo "==> Switch to the NJU mirror"
# http://nju-mirror-help.njuer.org/ubuntu.html
cat <<EOF > list.tmp
# NJU MIRRORS
deb http://mirrors.nju.edu.cn/ubuntu/ bionic main restricted universe multiverse
deb http://mirrors.nju.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse
deb http://mirrors.nju.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse
deb http://mirrors.nju.edu.cn/ubuntu bionic-security main restricted universe multiverse
EOF
if [ ! -e /etc/apt/sources.list.bak ]; then
sudo cp /etc/apt/sources.list /etc/apt/sources.list.bak
fi
sudo mv list.tmp /etc/apt/sources.list
if [ "$(whoami)" == 'vagrant' ]; then
echo "==> Disable AppArmor"
sudo service apparmor stop
sudo update-rc.d -f apparmor remove
fi
echo "==> Disable whoopsie"
sudo service whoopsie stop
echo "==> Install linuxbrew dependences"
sudo apt update
sudo apt -y upgrade
sudo apt -y install gnome-tweak-tool build-essential curl git m4 ruby texinfo
sudo apt -y install libbz2-dev zlib1g-dev
sudo apt -y install libcurl4-openssl-dev libexpat1-dev libncurses5-dev
echo "==> Install other software"
sudo apt -y install gawk csh parallel vim graphviz screen unzip xsltproc numactl
sudo apt -y install libdb-dev libreadline-dev libedit-dev
sudo apt -y install libgd-dev libxml2-dev
echo "==> Install gsl"
sudo apt -y install libgsl-dev
echo "==> Install gtk3"
sudo apt -y install libcairo2-dev libglib2.0-0 libglib2.0-dev libgtk-3-dev libgirepository1.0-dev
sudo apt -y install gir1.2-glib-2.0 gir1.2-gtk-3.0 gir1.2-webkit-3.0
echo "==> Install gtk3 related tools"
sudo apt -y install xvfb glade
# install mongodb and redis by apt
echo "==> Install mongodb"
sudo apt -y install mongodb
echo "==> Install redis"
sudo apt -y install redis-server
# Mysql will be installed separately.
# Remove system provided mysql package to avoid confusing linuxbrew.
echo "==> Remove system provided mysql"
sudo apt -y purge mysql-common
echo "==> Restore original sources.list"
if [ -e /etc/apt/sources.list.bak ]; then
sudo rm /etc/apt/sources.list
sudo mv /etc/apt/sources.list.bak /etc/apt/sources.list
fi
echo "====> Basic software installation complete! <===="
| true
|
9908c42ade52887c9f812583637223c1eeb63ace
|
Shell
|
kiranpe/Ansible_Playbooks
|
/DockerImage/scripts/check_svc_deploy.sh
|
UTF-8
| 580
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
#written by kiran to delete existing svc and deploy
#svc logic has cahnged
port=$(sed -n '/k8sworker]/,/k8smaster]/p' /sites/scripts/hosts | sed '1d; $d;' | awk '{print $1}')
for i in $port
do
j=$(echo "$i" | cut -d '.' -f1)
kubectl get svc -n com-att-oce-test | grep my-service-${j}
if [ $? = 0 ];then
kubectl delete svc my-service-${j} -n com-att-oce-test
else
echo "Nothing to delete..."
fi
done
kubectl get deploy -n com-att-oce-test | grep casestudy
if [ $? = 0 ];then
kubectl delete deploy casestudy -n com-att-oce-test
else
echo "Nothing to delete..."
fi
| true
|
fd286da0d181e48782391c47844e8332fde24ace
|
Shell
|
michalschott/bzwbk_transfer_creator
|
/scripts/build.sh
|
UTF-8
| 1,076
| 3.6875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# This script builds the application from source.
# Get the parent directory of where this script is.
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
# Change into that directory
cd "$DIR"
# Get the git commit
GIT_COMMIT=$(git rev-parse HEAD)
GIT_DIRTY=$(test -n "`git status --porcelain`" && echo "+CHANGES" || true)
# Delete the old dir
echo "==> Removing old directory..."
rm -f bin/*
rm -rf pkg/*
mkdir -p bin/
# Allow LD_FLAGS to be appended during development compilations
LD_FLAGS="-X main.GitCommit=${GIT_COMMIT}${GIT_DIRTY} $LD_FLAGS"
# In release mode we don't want debug information in the binary
if [[ -n "${RELEASE}" ]]; then
LD_FLAGS="-X main.GitCommit=${GIT_COMMIT}${GIT_DIRTY} -X github.com/michalschott/bzwbk_transfer_creator/bzwbk_transfer_creator.VersionPrerelease= -s -w"
fi
# Build!
echo "==> Building..."
go build \
-o bin/bzwbk_transfer_creator \
-ldflags "${LD_FLAGS}"
# Done!
echo
echo "==> Results:"
ls -hl bin/
| true
|
4b6898f4fc616de3918c27f532e3e03b0b25b597
|
Shell
|
jvlad/zZshFramework
|
/src/iOS/iOS_main.sh
|
UTF-8
| 564
| 2.578125
| 3
|
[] |
no_license
|
#!/usr/bin/env zsh
_main_IOS() {
iOSSymbolicate() {
_import_shFile "$(_iosExtrasDir)/symbolicate_crash" $@
}
iOSRemove_appID() {
mobiledevice uninstall_app $@
py "$(_iosExtrasDir)/remove_app_from_all_devices.py" $@
# xcrun simctl uninstall booted "$1"
}
iOSSimulatorsList() {
xcrun simctl list
}
iOSDevicesList() {
instruments -s devices
}
_iosExtrasDir() {
print$(zsf) "$(consoleToolsSrcDir)/iOS/extras"
}
_unset_functions _main_IOS
}
_main_IOS
| true
|
85a2da0cae6dce5195c066b4d3590dae6ac9403f
|
Shell
|
wangmeijiao/code_examples
|
/perl/getChain_twochr/getChain/getChain_twoSpeciesPipe.sh
|
UTF-8
| 1,056
| 3.09375
| 3
|
[] |
no_license
|
target=$1
query=$2
#pipeline for make chain file of two genomes
#input file: target.genome.smsk.fa query.genome.smsk.fa
#need file: HoxD55, which is a blastz score matrix
#note: change blastz pars in multiBlastzAll2All.sh
#0. init work dirs
echo "start pipe.."
date
echo "check dirs"
if [ ! -d faSplit_T ]; then
mkdir faSplit_T
fi
if [ ! -d faSplit_Q ]; then
mkdir faSplit_Q
fi
if [ ! -d lavAxtChain ]; then
mkdir lavAxtChain
fi
if [ ! -d chainMergeSort ]; then
mkdir chainMergeSort
fi
if [ ! -d netChain ]; then
mkdir netChain
fi
if [ ! -f HoxD55.q ]; then
echo "HoxD55.q not exists"
exit
fi
#2. split fa files
echo "split fa..."
faSplit byname $query faSplit_Q/
faSplit byname $target faSplit_T/
#3. run multiBlastz
echo "run multiBlastzAll2All.sh"
bash multiBlastzAll2All.sh > multiblastz.log 2>multiblastz.err
#4. lav->axt->chain
echo "run multiDirCollector.sh"
bash multiDirCollector.sh $target $query
#5. all.chain->net->liftOver
echo "run bash chainHarvest.sh"
bash chainHarvest.sh $target $query
echo
date
echo "all done"
| true
|
e00a29d1b07c6effad2b8570fefebf234179aa1d
|
Shell
|
OfficialBAMM/dotfiles
|
/dotfiles/config/zsh/settings/functions.zsh
|
UTF-8
| 2,483
| 3.734375
| 4
|
[] |
no_license
|
#!/usr/bin/zsh
# shell helper functions
# most written by Nathaniel Maia
# some pilfered from around the web
# better ls and cd
unalias ls >/dev/null 2>&1
ls()
{
command ls --color=auto -F "$@"
}
mir()
{
if hash reflector >/dev/null 2>&1; then
su -c 'reflector --score 100 --fastest 10 --sort rate \
--save /etc/pacman.d/mirrorlist --verbose'
else
local pg="https://www.archlinux.org/mirrorlist/?country=US&country=CA&use_mirror_status=on"
su -c "printf 'ranking the mirror list...\n'; curl -s '$pg' |
sed -e 's/^#Server/Server/' -e '/^#/d' |
rankmirrors -v -t -n 10 - > /etc/pacman.d/mirrorlist"
fi
}
ranger()
{
local dir tmpf
[[ $RANGER_LEVEL && $RANGER_LEVEL -gt 2 ]] && exit 0
local rcmd="command ranger"
[[ $TERM == 'linux' ]] && rcmd="command ranger --cmd='set colorscheme default'"
tmpf="$(mktemp -t tmp.XXXXXX)"
eval "$rcmd --choosedir='$tmpf' '${*:-$(pwd)}'"
[[ -f $tmpf ]] && dir="$(cat "$tmpf")"
[[ -e $tmpf ]] && rm -f "$tmpf"
[[ -z $dir || $dir == "$PWD" ]] || builtin cd "${dir}" || return 0
}
mkzip()
{
zip -r ${PWD##*/}.zip $*
}
arc()
{
arg="$1"; shift
case $arg in
-e|--extract)
if [[ $1 && -e $1 ]]; then
case $1 in
*.tbz2|*.tar.bz2) tar xvjf "$1" ;;
*.tgz|*.tar.gz) tar xvzf "$1" ;;
*.tar.xz) tar xpvf "$1" ;;
*.tar) tar xvf "$1" ;;
*.gz) gunzip "$1" ;;
*.zip) unzip "$1" ;;
*.bz2) bunzip2 "$1" ;;
*.7zip) 7za e "$1" ;;
*.rar) unrar x "$1" ;;
*) printf "'%s' cannot be extracted" "$1"
esac
else
printf "'%s' is not a valid file" "$1"
fi ;;
-n|--new)
case $1 in
*.tar.*)
name="${1%.*}"
ext="${1#*.tar}"; shift
tar cvf "$name" "$@"
case $ext in
.gz) gzip -9r "$name" ;;
.bz2) bzip2 -9zv "$name"
esac ;;
*.gz) shift; gzip -9rk "$@" ;;
*.zip) zip -9r "$@" ;;
*.7z) 7z a -mx9 "$@" ;;
*) printf "bad/unsupported extension"
esac ;;
*) printf "invalid argument '%s'" "$arg"
esac
}
| true
|
70c855b1bd5fb93799d9af87bd4a609b4c5fa1ee
|
Shell
|
naps62/unix
|
/files/shell/zsh/aliases
|
UTF-8
| 678
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
alias reboot='sudo reboot'
alias halt='sudo poweroff'
alias apt='sudo aptitude'
alias open='xdg-open'
alias ack=ack-grep
# one-char aliases
alias g=git
alias t=tig
alias o=open
alias v=vim
alias m=make
alias r=rails
alias rk=rake
alias a=ack
# cd
alias cd='cdls'
alias ..='cd ..'
alias ..2='cd ../..'
alias ..3='cd ../../..'
# ls
alias ll='ls -alF'
alias la='ls -A'
alias lla='la -AlF'
# alert
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
# proxy
alias proxy='source ${NAPS62_CONFDIR}/home/bin/proxy'
# git
eval "$(hub alias -s)"
| true
|
5f54fef43536d58c5ab24312a245e5fd3f377616
|
Shell
|
shijie-wu/crosslingual-nlp
|
/example/contrastive-alignment/evaluate-mapping.sh
|
UTF-8
| 2,245
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
seed=${1:-42}
# or xlm-roberta-base/large
model=${2:-"bert-base-multilingual-cased"}
task=${3:-"xnli"}
mode=${4:-"finetune"}
mapping=${5:-linear-orth0.01}
model_name=$(echo "$model" | tr '/' '\n' | tail -n1)
root_dir="${ROOT_DIR:-/bigdata}"
save_path="$root_dir"/checkpoints/crosslingual-nlp
case "$task" in
"xnli")
src="en"
tgt=(ar bg de el en es fr hi ru sw th tr ur vi zh)
data_path="$root_dir"/dataset/"$task"
;;
"ner-wiki")
src="en"
tgt=(ar de en es fr hi ru vi zh-word)
data_path="$root_dir"/dataset/"$task"
;;
"udpos" | "parsing")
src="English-EWT"
tgt=(Arabic-PADT German-GSD English-EWT Spanish-GSD French-GSD Hindi-HDTB Russian-GSD Vietnamese-VTB Chinese-GSD)
data_path="$root_dir"/dataset/universaldependencies/ud-treebanks-v2.6
;;
*)
echo Unsupported task "$task"
exit 1
;;
esac
case "$mode" in
"feature")
ep=20
bs=128
if [ "$task" = "xnli" ] || [ "$task" = "ner-wiki" ]; then
lr=1e-4
else
lr=1e-3
fi
val_check_interval=1
if [ "$task" = "ner-wiki" ]; then
extra_args=(--projector transformer --projector_trm_num_layers 4 --freeze_layer 12 --weighted_feature True --tagger_use_crf True)
else
extra_args=(--projector transformer --projector_trm_num_layers 4 --freeze_layer 12 --weighted_feature True)
fi
;;
*)
echo Unsupported task "$mode"
exit 1
;;
esac
for ckpt in "$save_path"/"$task"/0-shot-feature/"$model_name"/bs"$bs"-lr"$lr"-ep"$ep"/*/ckpts/*.ckpt; do
echo "$ckpt"
python src/train.py \
--seed "$seed" \
--task "$task" \
--data_dir "$data_path" \
--trn_langs $src \
--val_langs $src \
--tst_langs "${tgt[@]}" \
--pretrain "$model" \
--batch_size $bs \
--learning_rate $lr \
--max_epochs $ep \
--warmup_portion 0.1 \
--val_check_interval $val_check_interval \
--gpus 1 \
--default_save_path "$save_path"/"$task"/0-shot-feature-map-"$mapping"/"$model_name" \
--do_train False \
--mapping mapping/"$mapping"/"$model_name".pth \
--resume_from_checkpoint "$ckpt" \
--exp_name bs$bs-lr$lr-ep$ep "${extra_args[@]}"
done
| true
|
575f3138596b280b92f6a0a1cd6e4e74aa105d58
|
Shell
|
Fenixinfo/cookbook-linux
|
/zabbix-agent.sh
|
UTF-8
| 745
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/sh
T="sleep 2"
T2="sleep 1"
echo "inicializando atualização sistema......."
$T
apt update && apt -y upgrade && apt clean
echo "baixando agente......."
$T
wget https://repo.zabbix.com/zabbix/4.2/debian/pool/main/z/zabbix-release/zabbix-release_4.2-1+buster_all.deb
echo "Instalando sistema........"
$T
apt install -y ./zabbix-release_4.2-1+buster_all.deb
apt update
apt install -y zabbix-agent
$T
echo "Instalação OK"
systemctl restart zabbix-agent
systemctl status zabbix-agent.service | grep active
$T
echo "..........................................OK"
echo "..........................................OK"
echo "..........................................OK"
echo "Configurar ips do Server e Hostname /etc/zabbix/zabbix_agentd.conf"
| true
|
cf2fac0b1653a1399e1f989fb9bfa614cafdf3f4
|
Shell
|
plesager/ece3-raw-backup
|
/check_bckp.sh
|
UTF-8
| 3,287
| 3.921875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
usage() {
cat << EOT >&2
Usage:
${0##*/} [-r] [-f NB] [-o MODEL1 -o MODEL2 ...] EXP
Check the backup of EVERY legs of one run, by listing
output and restart directories that are not empty.
Options are:
-l : list the [L]ocal source dir
-r : list the [R]emote target dir
-o model : limit to ouput from model. Can be several. Default is all possible.
-f LEG_NUMBER : specify leg number, for which the output/restart [F]iles are listed
EOT
}
set -e
. ./config.cfg
# -- options
omodels=
while getopts "rlo:f:h?" opt; do
case "$opt" in
h|\?)
usage
exit 0
;;
l) local=1 ;;
o) omodels="$OPTARG $omodels" ;;
r) remote=1
;;
f) full=$OPTARG
esac
done
shift $((OPTIND-1))
# -- Arg
if [ "$#" -ne 1 ]; then
echo; echo " NEED ONE ARGUMENT, NO MORE, NO LESS!"; echo
usage
exit 0
fi
if [[ -z $omodels ]] # default to all models
then
omodels="ifs nemo tm5 oasis"
exit 0
fi
# -- Utils
not_empty_dir () {
[[ ! -d "$1" ]] && return 1
[ -n "$(ls -A $1)" ] && return 0 || return 1
}
# -- basic check
if (( local ))
then
for model in $omodels
do
for ff in output restart
do
if [ -d ${runs_dir}/$1/${ff}/${model} ]
then
echo ; echo "*II* checking ${model} ${ff} [dir_size leg_nb]" ; echo
cd ${runs_dir}/$1/${ff}/${model}
#quick-but-not-rigourous: du -sh * | grep -v "^4.0K"
for ddd in *
do
if not_empty_dir $ddd
then
du -sh $ddd
(( $full )) && [[ $(printf %03d $full) == $ddd ]] && \
ls ${runs_dir}/$1/${ff}/${model}/$(printf %03d $full)
fi
done
fi
done
done
fi
if (( remote ))
then
# echo ; echo "*II* checking REMOTE top dir" ; echo
# els -l $ecfs_dir/$1
# -- Output
for model in ${omodels//tm5}
do
[[ $model == oasis ]] && continue
for ff in output
do
echo ; echo "*II* checking REMOTE ${model} ${ff} [leg_nb/ nb_files]" ; echo
#els -l $ecfs_dir/$1/${ff}/${model}
for ddd in $(els $ecfs_dir/$1/${ff}/${model})
do
if (( $full )) && [[ $(printf %03d/ $full) = $ddd ]]
then
echo $ddd
els -l $ecfs_dir/$1/${ff}/${model}/$ddd
else
echo $ddd $(els $ecfs_dir/$1/${ff}/${model}/$ddd | wc -w)
fi
done
done
done
if [[ $omodels =~ tm5 ]]
then
echo ; echo "*II* checking REMOTE tm5 output" ; echo
els -l $ecfs_dir/$1/output.tm5.*.tar
fi
# -- Restart
for model in ${omodels//tm5}
do
echo ; echo "*II* checking REMOTE RESTART for ${model}" ; echo
els -l $ecfs_dir/$1/restart.${model}.*.tar
done
if [[ $omodels =~ tm5 ]]
then
echo ; echo "*II* checking REMOTE RESTART for tm5" ; echo
els -l $ecfs_dir/$1/tm5-restart-*.tar
fi
fi
| true
|
809bc11b7bc3c3781f346f2b7c651f15fee8cc41
|
Shell
|
initialed85/eds-cctv-system
|
/docker/entrypoint.sh
|
UTF-8
| 1,862
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# if [[ ! -f "/etc/motion.conf" ]]; then
# echo "error: /etc/motion.conf does not exist, you'll need to mount it in; see /etc/motion/examples/motion.conf"
#
# exit 1
# fi
set -e -x
# ---- variables
EVENT_ROOT=/srv/target_dir/events
EVENT_STORE=${EVENT_ROOT}/persistence.jsonl
EVENT_STORE_BACKUP=${EVENT_STORE}.old
SEGMENT_ROOT=/srv/target_dir/segments
SEGMENT_STORE=${SEGMENT_ROOT}/persistence.jsonl
SEGMENT_STORE_BACKUP=${SEGMENT_STORE}.old
# if [[ -f ${EVENT_STORE} ]]; then
# echo "backing up events store"
# cp -frv ${EVENT_STORE} ${EVENT_STORE_BACKUP}
# cp -frv ${EVENT_STORE} "${EVENT_STORE_BACKUP}_$(date)"
# echo ""
# else
# echo "cannot backup events store, it doesn't yet exist"
# fi
# if [[ -f ${SEGMENT_STORE} ]]; then
# echo "backing up segments store"
# cp -frv ${SEGMENT_STORE} ${SEGMENT_STORE_BACKUP}
# cp -frv ${SEGMENT_STORE} "${SEGMENT_STORE_BACKUP}_$(date)"
# echo ""
# else
# echo "cannot backup segments store, it doesn't yet exist"
# fi
# ---- deduplication stuff
# echo "clearing original events"
# echo "" >${EVENT_STORE}
# echo ""
# echo "clearing original segments"
# echo "" >${SEGMENT_STORE}
# echo ""
# echo "deduplicating events store"
# event_store_deduplicator -sourcePath ${EVENT_STORE_BACKUP} -destinationPath ${EVENT_STORE}
# echo ""
# echo "deduplicating segments store"
# event_store_deduplicator -sourcePath ${SEGMENT_STORE_BACKUP} -destinationPath ${SEGMENT_STORE}
# echo ""
# ---- recreation stuff
# echo "recreating event store"
# python3 -m utils.event_store_rebuilder_for_events -r ${EVENT_ROOT} -j ${EVENT_STORE}
# echo ""
# echo "recreating segment store"
# python3 -m utils.event_store_rebuilder_for_segments -r ${SEGMENT_ROOT} -j ${SEGMENT_STORE}
# echo ""
# ---- start services
echo "starting supervisord"
supervisord -n -c /etc/supervisor/supervisord.conf
| true
|
72f82a0af090f5cae5cb8a64b44672e9943f4c06
|
Shell
|
luotao717/arsdk
|
/rootfs/pb9x-carrier/etc/ath/confusb
|
UTF-8
| 714
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/sh
if [ $# -lt 1 ]; then
echo "confusb <host/device>"
exit
fi
config_usb_host() {
rmmod ar9130_udc
rmmod g_audio
insmod /lib/modules/2.6.15/usb/usbcore.ko
insmod /lib/modules/2.6.15/usb/ehci-hcd.ko
insmod /lib/modules/2.6.15/usb/usb-storage.ko
}
config_usb_dev() {
rmmod usbcore
rmmod ehci-hcd
rmmod usb-storage
insmod /lib/modules/2.6.15/usb/ar9130_udc.ko
insmod /lib/modules/2.6.15/usb/g_audio.ko
}
USB_TYPE=$1
config_usb() {
if [ ${USB_TYPE} == "host" ]; then
config_usb_host
elif [ ${USB_TYPE} == "device" ]; then
config_usb_dev
else
echo "confusb <host/device>"
exit
fi
}
config_usb
| true
|
b66ff3b62b939b4a0f3187f241de60a121856f8d
|
Shell
|
sbrk-org/slackbuilds
|
/update.sh
|
UTF-8
| 1,294
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# borrowed from slapt-src's README
set -e
for sb in $(find . -name '*.SlackBuild' | sort)
do
name=$(basename $sb | sed -re 's/\.SlackBuild$//')
location=$(dirname $sb)
if [ -f $location/$name.info ]
then
echo "SLACKBUILD NAME: $name"
echo "SLACKBUILD LOCATION: $location"
files=$(cd $location && find . -type f -printf '%P\n' | sort | xargs)
echo "SLACKBUILD FILES: $files"
# remove those pesky multi line listings for each interesting field
TMP=$(mktemp)
sed ':a;N;$!ba;s/\\\n*\s*//g' $location/$name.info > $TMP
DOWNLOAD=$(grep ^DOWNLOAD= $TMP | cut -f2 -d\" )
DOWNLOAD_x86_64=$DOWNLOAD
MD5SUM=$(grep ^MD5SUM= $TMP | cut -f2 -d\" )
MD5SUM_x86_64=$(grep ^MD5SUM_x86_64= $TMP | cut -f2 -d\" )
VERSION=$(grep ^VERSION= $TMP | cut -f2 -d\" )
echo "SLACKBUILD VERSION: $VERSION"
echo "SLACKBUILD DOWNLOAD: $DOWNLOAD"
echo "SLACKBUILD DOWNLOAD_x86_64: $DOWNLOAD_x86_64"
echo "SLACKBUILD MD5SUM: $MD5SUM"
if [ -f $location/slack-desc ]; then
SHORTDESC=$(grep ^$name: $location/slack-desc | head -n 1 | sed -re "s/^$name://")
echo "SLACKBUILD SHORT DESCRIPTION: $SHORTDESC"
else
echo "SLACKBUILD SHORT DESCRIPTION: "
fi
echo
rm -f $TMP
fi
done > SLACKBUILDS.TXT
gzip -9 SLACKBUILDS.TXT -c > SLACKBUILDS.TXT.gz
| true
|
21154e5ef1119db904bb5310e02fae33c606bc72
|
Shell
|
screepers/screeps-launcher
|
/build.sh
|
UTF-8
| 273
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
pushd dist
for D in ../cmd/*
do
export GOARCH=amd64
export GOOS=windows
go build -o $(basename $D)-${GOOS}-${GOARCH}.exe $D
export GOOS=linux
go build -o $(basename $D)-${GOOS}-${GOARCH} $D
export GOOS=darwin
go build -o $(basename $D)-${GOOS}-${GOARCH} $D
done
popd
| true
|
55092d60d22471be024f6e8e44d8f221eb882c6c
|
Shell
|
tobiaslocker/post-install
|
/post-install.sh
|
UTF-8
| 1,876
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
sudo apt update
sudo apt install -y \
vim \
git \
curl \
build-essential \
cmake \
python3-dev \
python3-pip \
vifm
# git global settings
git config --global user.email "tobias@tobiaslocker.de"
git config --global user.name "Tobias Locker"
git config --global core.editor "vim"
# dotfiles
mkdir -p ~/Workspace/src/github.com/tobiaslocker
cd ~/Workspace/src/github.com/tobiaslocker
git clone https://github.com/tobiaslocker/dotfiles.git
ln -s ~/Workspace/src/github.com/tobiaslocker/dotfiles/.vimrc \
~/.vimrc
ln -s ~/Workspace/src/github.com/tobiaslocker/dotfiles/.global_extra_conf.py \
~/.global_extra_conf.py
#ln -s ~/Workspace/src/github.com/tobiaslocker/dotfiles/.bash_profile \
# ~/.bash_profile
rm ~/.bashrc
ln -s ~/Workspace/src/github.com/tobiaslocker/dotfiles/.bashrc \
~/.bashrc
source ~/.bashrc
# Vundle and pathogen
mkdir -p ~/.vim/autoload ~/.vim/bundle && \
curl -LSso ~/.vim/autoload/pathogen.vim https://tpo.pe/pathogen.vim
git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
# Install plugins handled by Vundle
vim +PluginInstall +qall
# Python packages
python3 -m pip install --upgrade pip
python3 -m pip install --user \
numpy \
scipy \
matplotlib \
ipython \
jupyter \
pandas \
sympy \
nose \
tensorflow \
keras \
virtualenv
# Install go
GOVERSION=1.13.4
OS=linux
ARCH=amd64
cd ~/Downloads
wget https://dl.google.com/go/go${GOVERSION}.${OS}-${ARCH}.tar.gz
cd ~/Downloads
sudo tar -C /usr/local -xzf go${GOVERSION}.${OS}-${ARCH}.tar.gz
# vim-go dependencies
vim +GoInstallBinaries +qall
source .bashrc
# Install rust
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -vy
rustup component add rust-src
# YouCompleteMe
cd ~/.vim/bundle/YouCompleteMe
python3 install.py --clang-completer --go-completer --rust-completer
| true
|
be03d10ebd2127bd8ae2fd2523d2be8e261cde25
|
Shell
|
lishafaev/GLDevOpsProCam
|
/metrics.sh
|
UTF-8
| 557
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$1" = cpu ]; then
top -n 1| grep 'Cpu' | awk '{print "user "$2"\n" "system " $4"\n" "nice " $6"\n" "idle " $8"\n" "wait " $10"\n" "interrupt " $12"\n" "softirq " $14"\n" "steal " $16}'
elif [ "$1" = mem ]; then
free | grep 'Mem' | awk '{print "total " $2"\n" "used " $3"\n" "free " $4"\n" "shared " $5}'
free | grep 'Swap' | awk '{print "Swap total " $2"\n" "Swap used " $3"\n" "Swap free " $4}'
else
echo "No parameter found or wrong parameter. Please specify parameter: 'cpu' or 'mem'. For example, ./metrics.sh cpu"
fi
| true
|
79209ec90c9bb2316bd311cfdacdd230388b0c4d
|
Shell
|
MarkHalls/CTV-Utility
|
/collector_scripts/logHealthMonitor.sh
|
UTF-8
| 4,485
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
### logHealthMonitor.sh - Monitors the log output directory for excessive unzipped logs and the unmatched lines directory as measure that the icservices.sh script is still running.
#
#
## Written by Chase Hatch (chatch@securityondemand.com) under consultation of Mark Halls (mhalls@securityondemand.com)
## Last modified Nov/3/2015
output='/opt/open/stm/slm/output/'
unmatched_lines='/opt/open/stm/tmp/'
last_UML_dir_size_count=0 # initialize; used to keep track of if service is running.
THRESH_MAX_UNGZIPPED=10
LAST_PASS_GZIPPED=0 # keeps track of num gzipped files on last pass
THRESH_MIN_GZIPPED=5
THRESH_MAX_GZIPPED=10
THRESH_MAX_DISK_USAGE=40 # percentage
EVALUATE_INTERVAL=600 # in seconds
GZ_COUNT=$(find $output -type f -name '*.gz$' | wc -l)
SHA_COUNT=$(find $output -type f -name '*.gz.sha' | wc -l)
THRESH_DIFFERENCE=3
## High-volume clients use Unmatched Lines as a metric of health monitoring.
UnmatchedLinesIsMetric=0 # only sets to 1 if the client is listed in the array.
declare -a HighVolClients=( "Sitel" ) # array; syntax = ( "one", "two", "three" )
for i in "${HighVolClients[@]}"; do if [[ $(hostname |grep -ci `echo $i |sed s/,//g`) -ge 1 ]]; then UnmatchedLinesIsMetric=1; fi; done
logHealthMonitor=$(cat /etc/rc.d/rc.local | grep logHealthMonitor.sh)
if [ -z "$logHealthMonitor" ]; then
echo '/usr/bin/logHealthMonitor.sh > /dev/null 2>&1 &' >> /etc/rc.d/rc.local
fi
cd $output # ensure we're in the right directory
while [ -d "$output" ]; do
if [[ $UnmatchedLinesIsMetric -eq 1 ]]; then
# IF this host has been explicitly noted as needing to scrutinize the unmatched lines dir for activity (i.e., ic-collector service running?)
if [[ $(du -scb $unmatched_lines |cut -f 1 |head -n 1) -eq $last_UML_dir_size_count ]]; then
# if true then we know with almost 100% confidence that the service must have stopped so we need to restart it; this should always be a different value across two pollings.
# STUB: restart the service
icservices.sh
logger "$(date) /usr/bin/logHealthMonitor.sh: Restarted icservices.sh." |tee -a /var/log/icprocess.log
fi
fi
if [[ $(ls -al $output | grep -v .gz | grep -v .sha | wc -l) -gt $THRESH_MAX_UNGZIPPED ]] || [[ $(df |grep -e /$ | cut -f 1 -d % | cut -f 8 -d " ") -gt $THRESH_MAX_DISK_USAGE ]]; then
# IF $output dir has more unzipped files than a given threshold, then cleanup script needs to be running.
# OR
# IF $output dir's disk partition usage greater than threshold, then cleanup script needs to be running.
# STUB: Mark's multi-thread cleanup.sh bash one-liner
logger "$(date) /usr/bin/logHealthMonitor.sh: Too many files in output... running cleanup script instances per-file." |tee -a /var/log/icprocess.log
$(for i in $(ls -1tr /opt/open/stm/slm/output/ | grep -v \.gz); do /boot/cleanupparallel.sh $i & done;)
fi
if [[ $(ls $output | grep \.gz | wc -l) -ge $THRESH_MAX_GZIPPED ]] && [[ $(ls $output | grep \.gz | wc -l) -ge $THRESH_MIN_GZIPPED ]]; then
# IF the gzipped files aren't transferring, restart the services.
# ONLY do so if the number of gzipped files is above a certain number.
logger "$(date) /usr/bin/logHealthMonitor.sh: Gzipped files not transferring out; restarting the SOD services to remediate this." |tee -a /var/log/icprocess.log
killall -r SOD*
fi
if [[ $(($GZ_COUNT - $SHA_COUNT)) -ge $THRESH_DIFFERENCE ]] || [[ $(($SHA_COUNT - $GZ_COUNT)) -ge $THRESH_DIFFERENCE ]]; then
# IF (number of .gz) minus (number of .sha) greater-than-or-equal-to $THRESH_DIFFERENCE
# OR
# IF (number of .sha) minus (number of .gz) greater-than-or-equal-to $THRESH_DIFFERENCE
# THEN delete all .sha files, and re-generate per each .gz file.
logger "$(date) /usr/bin/logHealthMonitor.sh: Detected variation between count of .gz files ($GZ_COUNT) amd matching .sha files ($SHA_count) in output dir ($output). Re-generating .sha files to fix this." |tee -a /var/log/icprocess.log
rm -f /opt/open/stm/slm/output/*.sha
for i in $( ls -1tr /opt/open/stm/slm/output/ |grep \.gz | grep -v \.sha ); do sha1sum /opt/open/stm/slm/output/$i > /opt/open/stm/slm/output/$i.sha & done;
fi
LAST_PASS_GZIPPED=$(ls $output | grep \.gz | wc -l)
last_UML_dir_size_count=$(du -scb $unmatched_lines |cut -f 1 |head -n 1) # grab size of dir
sleep $EVALUATE_INTERVAL #, then sleep for n seconds
done
#
| true
|
d6083e758f13c90b8e17b471ce7a89bb61bda616
|
Shell
|
KaiSforza/dotfiles
|
/mkshrc
|
UTF-8
| 10,199
| 3.140625
| 3
|
[] |
no_license
|
# $Id$
# $MirOS: src/bin/mksh/dot.mkshrc,v 1.79 2013/04/05 15:39:52 tg Exp $
#-
# Copyright (c) 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013
# Thorsten Glaser <tg@mirbsd.org>
#
# Provided that these terms and disclaimer and all copyright notices
# are retained or reproduced in an accompanying document, permission
# is granted to deal in this work without restriction, including un-
# limited rights to use, publicly perform, distribute, sell, modify,
# merge, give away, or sublicence.
#
# This work is provided "AS IS" and WITHOUT WARRANTY of any kind, to
# the utmost extent permitted by applicable law, neither express nor
# implied; without malicious intent or gross negligence. In no event
# may a licensor, author or contributor be held liable for indirect,
# direct, other damage, loss, or other issues arising in any way out
# of dealing in the work, even if advised of the possibility of such
# damage or existence of a defect, except proven that it results out
# of said person's immediate fault when using the work as intended.
#-
# ${ENV:-~/.mkshrc}: mksh initialisation file for interactive shells
# Set colors {{{
unset ALL_OFF BOLD BLUE GREEN RED YELLOW BRED
ALL_OFF="$(tput sgr0)"
BOLD="$(tput bold)"
BLUE="$(tput setaf 4)"
GREEN="$(tput setaf 2)"
RED="$(tput setaf 1)"
BRED="$(tput setaf 1)"
YELLOW="$(tput setaf 3)"
#}}}
# Prompt: {{{
PS1='#'; (( USER_ID )) && PS1='$'; [[ ${HOSTNAME:=$(ulimit -c 0; hostname -s \
2>/dev/null)} = *([ ]|localhost) ]] && HOSTNAME=$(ulimit -c 0; hostname \
2>/dev/null); : ${EDITOR:=/bin/ed} ${HOSTNAME:=nil} ${TERM:=vt100}
function precmd {
local e=$?
(( e )) && print -n "${RED}${e}${ALL_OFF}|"
# precmd is required to retain the errorlevel when ${ …;} is used
return $e
}
unset _USER_NAME
if [[ -n $SSH_CONNECTION ]]; then
_USER_NAME="${BOLD}${YELLOW}${USER:=$(ulimit -c 0; id -un 2>/dev/null || echo )}${ALL_OFF}"
else
_USER_NAME="${BOLD}${BLUE}${USER:=$(ulimit -c 0; id -un 2>/dev/null || echo )}${ALL_OFF}"
fi
PS1='$(precmd)${_USER_NAME}@${GREEN}${HOSTNAME%%.*}${ALL_OFF}:${YELLOW}${PWD}${ALL_OFF} '"$PS1 "
: ${MKSH:=$(whence -p mksh)}; export EDITOR HOSTNAME MKSH TERM USER
# }}}
# Some default, unneeded aliases {{{
# Just commenting these out.
#whence -p rot13 >/dev/null || alias rot13='tr \
# abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ \
# nopqrstuvwxyzabcdefghijklmNOPQRSTUVWXYZABCDEFGHIJKLM'
#whence -p hd >/dev/null || function hd {
# hexdump -e '"%08.8_ax " 8/1 "%02X " " - " 8/1 "%02X "' \
# -e '" |" "%_p"' -e '"|\n"' "$@"
#}
#}}}
# Berkeley C shell compatible dirs, popd, and pushd functions#{{{
# Z shell compatible chpwd() hook, used to update DIRSTACK[0]
DIRSTACKBASE=$(realpath ~/. 2>/dev/null || print -nr -- "${HOME:-/}")
set -A DIRSTACK
function chpwd {
DIRSTACK[0]=$(realpath . 2>/dev/null || print -r -- "$PWD")
[[ $DIRSTACKBASE = ?(*/) ]] || \
DIRSTACK[0]=${DIRSTACK[0]/#$DIRSTACKBASE/~}
:
}
chpwd .
function cd {
builtin cd "$@"
chpwd "$@"
}
function cd_csh {
local d t=${1/#~/$DIRSTACKBASE}
if ! d=$(builtin cd "$t" 2>&1); then
print -u2 "${1}: ${d##*$t - }."
return 1
fi
cd "$t"
}
function dirs {
local d dwidth
local -i fl=0 fv=0 fn=0 cpos=0
while getopts ":lvn" d; do
case $d in
(l) fl=1 ;;
(v) fv=1 ;;
(n) fn=1 ;;
(*) print -u2 'Usage: dirs [-lvn].'
return 1 ;;
esac
done
shift $((OPTIND - 1))
if (( $# > 0 )); then
print -u2 'Usage: dirs [-lvn].'
return 1
fi
if (( fv )); then
fv=0
while (( fv < ${#DIRSTACK[*]} )); do
d=${DIRSTACK[fv]}
(( fl )) && d=${d/#~/$DIRSTACKBASE}
print -r -- "$fv $d"
let fv++
done
else
fv=0
while (( fv < ${#DIRSTACK[*]} )); do
d=${DIRSTACK[fv]}
(( fl )) && d=${d/#~/$DIRSTACKBASE}
(( dwidth = (${%d} > 0 ? ${%d} : ${#d}) ))
if (( fn && (cpos += dwidth + 1) >= 79 && \
dwidth < 80 )); then
print
(( cpos = dwidth + 1 ))
fi
print -nr -- "$d "
let fv++
done
print
fi
return 0
}
function popd {
local d fa
local -i n=1
while getopts ":0123456789lvn" d; do
case $d in
(l|v|n) fa+=" -$d" ;;
(+*) n=2
break ;;
(*) print -u2 'Usage: popd [-lvn] [+<n>].'
return 1 ;;
esac
done
shift $((OPTIND - n))
n=0
if (( $# > 1 )); then
print -u2 popd: Too many arguments.
return 1
elif [[ $1 = ++([0-9]) && $1 != +0 ]]; then
if (( (n = ${1#+}) >= ${#DIRSTACK[*]} )); then
print -u2 popd: Directory stack not that deep.
return 1
fi
elif [[ -n $1 ]]; then
print -u2 popd: Bad directory.
return 1
fi
if (( ${#DIRSTACK[*]} < 2 )); then
print -u2 popd: Directory stack empty.
return 1
fi
unset DIRSTACK[n]
set -A DIRSTACK -- "${DIRSTACK[@]}"
cd_csh "${DIRSTACK[0]}" || return 1
dirs $fa
}
function pushd {
local d fa
local -i n=1
while getopts ":0123456789lvn" d; do
case $d in
(l|v|n) fa+=" -$d" ;;
(+*) n=2
break ;;
(*) print -u2 'Usage: pushd [-lvn] [<dir>|+<n>].'
return 1 ;;
esac
done
shift $((OPTIND - n))
if (( $# == 0 )); then
if (( ${#DIRSTACK[*]} < 2 )); then
print -u2 pushd: No other directory.
return 1
fi
d=${DIRSTACK[1]}
DIRSTACK[1]=${DIRSTACK[0]}
cd_csh "$d" || return 1
elif (( $# > 1 )); then
print -u2 pushd: Too many arguments.
return 1
elif [[ $1 = ++([0-9]) && $1 != +0 ]]; then
if (( (n = ${1#+}) >= ${#DIRSTACK[*]} )); then
print -u2 pushd: Directory stack not that deep.
return 1
fi
while (( n-- )); do
d=${DIRSTACK[0]}
unset DIRSTACK[0]
set -A DIRSTACK -- "${DIRSTACK[@]}" "$d"
done
cd_csh "${DIRSTACK[0]}" || return 1
else
set -A DIRSTACK -- placeholder "${DIRSTACK[@]}"
cd_csh "$1" || return 1
fi
dirs $fa
}
# pager (not control character safe)
function smores {
local dummy line llen curlin=0
cat "$@" | while IFS= read -r line; do
llen=${%line}
(( llen == -1 )) && llen=${#line}
(( llen = llen ? (llen + COLUMNS - 1) / COLUMNS : 1 ))
if (( (curlin += llen) >= LINES )); then
print -n -- '\033[7m--more--\033[0m'
read -u1 dummy
[[ $dummy = [Qq]* ]] && return 0
curlin=$llen
fi
print -r -- "$line"
done
}
# base64 encoder and decoder, RFC compliant, NUL safe
function Lb64decode {
[[ -o utf8-mode ]]; local u=$?
set +U
local c s="$*" t=
[[ -n $s ]] || { s=$(cat;print x); s=${s%x}; }
local -i i=0 j=0 n=${#s} p=0 v x
local -i16 o
while (( i < n )); do
c=${s:(i++):1}
case $c in
(=) break ;;
([A-Z]) (( v = 1#$c - 65 )) ;;
([a-z]) (( v = 1#$c - 71 )) ;;
([0-9]) (( v = 1#$c + 4 )) ;;
(+) v=62 ;;
(/) v=63 ;;
(*) continue ;;
esac
(( x = (x << 6) | v ))
case $((p++)) in
(0) continue ;;
(1) (( o = (x >> 4) & 255 )) ;;
(2) (( o = (x >> 2) & 255 )) ;;
(3) (( o = x & 255 ))
p=0
;;
esac
t+=\\x${o#16#}
(( ++j & 4095 )) && continue
print -n $t
t=
done
print -n $t
(( u )) || set -U
}
set -A Lb64encode_code -- A B C D E F G H I J K L M N O P Q R S T U V W X Y Z \
a b c d e f g h i j k l m n o p q r s t u v w x y z 0 1 2 3 4 5 6 7 8 9 + /
function Lb64encode {
[[ -o utf8-mode ]]; local u=$?
set +U
local c s t
if (( $# )); then
read -raN-1 s <<<"$*"
unset s[${#s[*]}-1]
else
read -raN-1 s
fi
local -i i=0 n=${#s[*]} j v
while (( i < n )); do
(( v = s[i++] << 16 ))
(( j = i < n ? s[i++] : 0 ))
(( v |= j << 8 ))
(( j = i < n ? s[i++] : 0 ))
(( v |= j ))
t+=${Lb64encode_code[v >> 18]}${Lb64encode_code[v >> 12 & 63]}
c=${Lb64encode_code[v >> 6 & 63]}
if (( i <= n )); then
t+=$c${Lb64encode_code[v & 63]}
elif (( i == n + 1 )); then
t+=$c=
else
t+===
fi
if (( ${#t} == 76 || i >= n )); then
print $t
t=
fi
done
(( u )) || set -U
}
# mksh NUL counting, never zero
typeset -Z11 -Uui16 Lnzathash_v
function Lnzathash_add {
[[ -o utf8-mode ]]; local u=$?
set +U
local s
if (( $# )); then
read -raN-1 s <<<"$*"
unset s[${#s[*]}-1]
else
read -raN-1 s
fi
local -i i=0 n=${#s[*]}
while (( i < n )); do
((# Lnzathash_v = (Lnzathash_v + s[i++] + 1) * 1025 ))
((# Lnzathash_v ^= Lnzathash_v >> 6 ))
done
(( u )) || set -U
}
function Lnzaathash_end {
((# Lnzathash_v *= 1025 ))
((# Lnzathash_v ^= Lnzathash_v >> 6 ))
((# Lnzathash_v += Lnzathash_v << 3 ))
((# Lnzathash_v = (Lnzathash_v ^
(Lnzathash_v >> 11)) * 32769 ))
print ${Lnzathash_v#16#}
}
function Lnzaathash {
Lnzathash_v=0
Lnzathash_add "$@"
Lnzaathash_end
}
function Lnzathash {
Lnzathash_v=0
Lnzathash_add "$@"
Lnzathash_end
}
function Lnzathash_end {
if (( Lnzathash_v )); then
Lnzaathash_end
else
Lnzathash_v=1
print ${Lnzathash_v#16#}
fi
}
# strip comments (and leading/trailing whitespace if IFS is set) from
# any file(s) given as argument, or stdin if none, and spew to stdout
function Lstripcom {
cat "$@" | { set -o noglob; while read _line; do
_line=${_line%%#*}
[[ -n $_line ]] && print -r -- $_line
done; }
}
# give MidnightBSD's laffer1 a bit of csh feeling
function setenv {
eval export "\"$1\""'="$2"'
}
#}}}
: place customisations below this line
for p in ~/.etc/bin ~/bin; do
[[ -d $p/. ]] || continue
[[ :$PATH: = *:$p:* ]] || PATH=$p:$PATH
done
export SHELL=$MKSH MANWIDTH=80 LESSHISTFILE=-
alias cls='print -n \\033c'
export HISTFILE="$HOME/.mksh_history"
# some basic aliases: {{{
# main aliases for system administration {{{
alias s=sudo
alias sudo='sudo '
alias pc=pacman
alias sc=systemctl
alias scu='systemct --user'
alias jc=journalctl
alias aurct='cower'
alias grep='grep --color=auto'
alias psg='ps aux | grep'
# }}}
# Aliases for moving around the filesystem {{{
alias dir='ls -1'
alias c=clear
alias ..='cd ..'
alias ls=ls
unalias ls
alias ls='ls --color=auto -F'
alias l='ls'
alias la='l -a'
alias lh='la -h'
alias ll='l -l'
alias lo='l -alo'
# }}}
alias doch='fc -ln -1 | sudo mksh -s'
alias ytv='youtube-viewer -4 --mplayer=mpv'
alias present_term="urxvtc -title 'presenting' -fn 'xft:Bitstream Vera Sans Mono:pixelsize=14' -fb 'xft:Bitstream Vera Sans Mono:pixelsize=14:bold'"
# }}}
#unset LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_IDENTIFICATION LC_MONETARY \
# LC_NAME LC_NUMERIC LC_TELEPHONE LC_TIME
#p=en_GB.UTF-8
#set -U
#export LANG=C LC_CTYPE=$p LC_MEASUREMENT=$p LC_MESSAGES=$p LC_PAPER=$p
unset p
: place customisations above this line
# vim: set ft=sh noet :
| true
|
36b6a818e086927075e1710b73132b5bf233943c
|
Shell
|
ck0/dotfiles
|
/setup.zsh
|
UTF-8
| 746
| 3.359375
| 3
|
[] |
no_license
|
#! /usr/bin/env zsh
# because OSX's `readlink` is all retarded
function readlinkf () {(cd -P $1 && echo $(pwd))}
dotdir=$(readlinkf $(pwd))
backupdir=dotfiles.$(date +%s).backup
function dotrelink () {
if [[ -a $2 ]]; then
mv -v $2 $backupdir;
fi
ln -svf $dotdir/$1 $2
}
(
cd $HOME
mkdir $backupdir
# nope
#for f in $(find $dotdir -type -f -name "*dot"); do
#dotrelink $f $(echo $f | sed "s/dot/./")
#done
dotrelink dotaliases .aliases
dotrelink dotctags .ctags
dotrelink dotinputrc .inputrc
dotrelink dotpentadactylrc .pentadactylrc
dotrelink dotpythonrc .pythonrc
dotrelink dotrubyrc .rubyrc
dotrelink dotscreenrc .screenrc
dotrelink dotvimrc .vimrc
dotrelink dotzshrc .zshrc
source .zshrc
)
| true
|
8bf3cd6a3a31d6a021f2710a87bb67e556291f92
|
Shell
|
lnsongxf/EURACE
|
/models/EURACE_Model/branches/Experiment_setup_linux/plots/latex_script_tex_figs.sh
|
UTF-8
| 2,417
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
######### CREATION OF THE SPECIFIC SETTINGS #########################################################
# Script to create the file specific.xml that contains the specific parameter settings for the cases
# - Enter folder ./duration_x/intensity_y/frequency_z
# - Write xml tags and parameter settings to the file specific.xml
#####################################################################################################
BASE=$PWD
F0="symmetric_shock2"
#F1="40 120 240 560" #duration: 240+"40 120 240 560"=280 360 480 801
#F2="0.01 0.05 0.10 0.20" #intensity
#F3="0 20 40 60" #frequency
F1="40 240"
#F1="560" #duration: 240+"40 120 240 560"=280 360 480 801
F2="0.01 0.05 0.10" #intensity
F3="0 20 60" #frequency
#Create main latex document
echo ' Creating main latex document...'
rm -f main.tex
echo '\documentclass{article}'>>main.tex
echo '\usepackage{epsfig,graphicx,verbatim, boxedminipage}'>>main.tex
echo '\begin{document}'>>main.tex
echo ' Creating latex figures in folder hierarchy...'
for folder0 in $F0; do
for f1 in $F1; do
folder1=duration_$f1
for f2 in $F2; do
folder2=intensity_$f2
for f3 in $F3; do
folder3=frequency_$f3
#echo $PWD
cd ./$folder0/$folder1/$folder2/$folder3
rm -f plot.tex
echo '\begin{figure}[ht!]'>>plot.tex
echo '\centering\leavevmode'>>plot.tex
echo '\begin{minipage}{14cm}'>>plot.tex
echo '\centering\leavevmode'>>plot.tex
echo '{$d='$f1', i='$f2', f='$f3'$}\\'>>plot.tex
echo '\includegraphics[width=4cm]{'./$folder0/$folder1/$folder2/$folder3/'IGFirm-capital_good_price(double).png}'>>plot.tex
echo '\includegraphics[width=4cm]{'./$folder0/$folder1/$folder2/$folder3/'Eurostat-gdp(double).png}'>>plot.tex
echo '\includegraphics[width=4cm]{'./$folder0/$folder1/$folder2/$folder3/'Eurostat-unemployment_rate(double).png}'>>plot.tex
echo '\end{minipage}'>>plot.tex
echo '%\caption{$d='$f1', i='$f2', f='$f3'$}'>>plot.tex
echo '%\label{Figure: IGFirm-capital_good_price}'>>plot.tex
echo '\end{figure}'>>plot.tex
echo ''>>plot.tex
cd $BASE
#In main document: add an input line
echo '\input{'./$folder0/$folder1/$folder2/$folder3'/plot}'>>main.tex
done
done
done
done
#Close latex document
echo '\end{document}'>>main.tex
echo ' Finished creation of latex figures files.'
| true
|
1df7c331051d5f40b898e2560f4b2fc583267ceb
|
Shell
|
yarem4uk/dotfiles
|
/files/zshrc
|
UTF-8
| 4,214
| 2.890625
| 3
|
[] |
no_license
|
autoload -Uz compinit && compinit -i
umask 022
# default mask
# -rwxr-xr-x
# 777 - 022 = 755
setopt autocd
###Настройка истории
HISTFILE=~/.zsh_history
HISTSIZE=5000
SAVEHIST=5000
setopt HIST_IGNORE_ALL_DUPS
setopt HIST_REDUCE_BLANKS
setopt HIST_IGNORE_SPACE
setopt INC_APPEND_HISTORY
setopt share_history
setopt append_history
##Расширенный глобинг
setopt extended_glob
#Настройка разделителей
WORDCHARS=''@
stty stop undef
stty start undef
# export EDITOR='vim'
# export PAGER='nvimpager'
##Загрузка алиасов
if [[ -f $HOME/.aliases ]]; then
source $HOME/.aliases
fi
if [[ -f $HOME/.zsh_functions ]]; then
source $HOME/.zsh_functions
fi
alias ls='ls --color=auto --group-directories-first'
##Цветной grep
export GREP_COLOR='1;33'
_comp_options+=(globdots) # completion for dotfiles
zstyle ':completion:*' menu select
#VIM стиль
bindkey -v
export KEYTIMEOUT=1
#Добавление правого PROMT с именем git brunch and git status
function gprompt() {
mes=`git symbolic-ref HEAD 2>/dev/null | cut -d / -f 3`
durty=`git diff --shortstat 2> /dev/null | tail -n1`
white=%{$fg[white]%}
blue=%{$fg[blue]%}
red=%{$fg[red]%}
yellow=%{$fg[yellow]%}
reset=%{$reset_color%}
if [[ "$mes" != "" ]]; then
if [[ $durty != "" ]]; then
echo "$red $mes $reset"
# echo "$(tput setaf 16) $mes $(tput sgr0)"
else
echo "$blue $mes $reset"
fi
fi
}
RPROMPT='$(gprompt) '
#PROMPT for user and root
if [[ $EUID == 0 ]]; then
PROMPT=' %{$fg[red]%} (root) %{$fg[white]%}%~
%{$fg[white]%}->%{$reset_color%} '
else
# PROMPT=' $(tput setaf 246)%~$(tput sgr0)
PROMPT=' %{$fg[white]%}%~
%{$fg[green]%}->%{$reset_color%} '
# %{$fg[red]%}✘%{$reset_color%} '
fi
#✗
#Поиск по истории клавишами p и n в стиле vim
bindkey '^p' history-beginning-search-backward
bindkey '^n' history-beginning-search-forward
#
# export TERM='xterm-256color'
autoload -U colors && colors
autoload -Uz surround
setopt prompt_subst
# BASE16_SHELL=$HOME/.config/base16-shell/
# [ -n "$PS1" ] && [ -s $BASE16_SHELL/profile_helper.sh ] && eval "$($BASE16_SHELL/profile_helper.sh)"
BASE16_SHELL="$HOME/.config/base16-shell/"
[ -n "$PS1" ] && \
[ -s "$BASE16_SHELL/profile_helper.sh" ] && \
eval "$("$BASE16_SHELL/profile_helper.sh")"
fpath=(~/.zsh/completion $fpath)
# Hooks
autoload -U add-zsh-hook
function auto-ls-after-cd() {
emulate -L zsh
if [ "$ZSH_EVAL_CONTEXT" = "toplevel:shfunc" ]; then
ls
fi
}
add-zsh-hook chpwd auto-ls-after-cd
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
# export FZF_DEFAULT_COMMAND="fd --type file . $HOME"
export FZF_DEFAULT_COMMAND="fd --type f ."
# export FZF_DEFAULT_COMMAND="rg --files --smart-case"
# export FZF_DEFAULT_OPTS="--inline-info"
function fzf-bookmarks-widget() {
cd $(cat "$HOME/.config/bookmarks.cfg" | fzf --tiebreak=begin --tac --height 10%| awk '{print $1}')
# cd $(ls | fzf --tiebreak=begin --tac --height 10%| awk '{print $1}')
zle reset-prompt
zle redisplay
}
function fzf-history-widget() {
LBUFFER=$(fc -lnr 1 | fzf --tiebreak=begin)
zle redisplay
}
function insert-double-roundBrackets() {
LBUFFER="${LBUFFER}("
RBUFFER=")${RBUFFER}"
}
function insert-double-curlyBrackets() {
LBUFFER="${LBUFFER}{"
RBUFFER="}${RBUFFER}"
}
function insert-double-singleQuotes() {
LBUFFER="${LBUFFER}'"
RBUFFER="'${RBUFFER}"
}
function insert-double-singleGrave() {
LBUFFER="${LBUFFER}\`"
RBUFFER="\`${RBUFFER}"
}
zle -N fzf-bookmarks-widget
bindkey '^o' fzf-bookmarks-widget
zle -N fzf-history-widget
bindkey '^k' fzf-history-widget
zle -N insert-double-roundBrackets
bindkey '(' insert-double-roundBrackets
zle -N insert-double-curlyBrackets
bindkey '{' insert-double-curlyBrackets
zle -N insert-double-singleQuotes
bindkey "'" insert-double-singleQuotes
zle -N insert-double-singleGrave
bindkey "\`" insert-double-singleGrave
zle -N delete-surround surround
zle -N add-surround surround
zle -N change-surround surround
bindkey -a cs change-surround
bindkey -a ds delete-surround
bindkey -a ys add-surround
# bindkey -M visual S add-surround
| true
|
4b1bab3033e73f1772b1f9988ce703171b92f637
|
Shell
|
microsoft/FreeBSD-Test-Automation
|
/HyperV/WS2008R2/lisa/remote-scripts/freebsd/MultiDisk.sh
|
UTF-8
| 4,340
| 4.21875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# MultiDisk.sh
#
# Description:
# This script was created to automate the testing of a FreeBSD
# Integration services. This script test the detection of disks
# inside the Linux VM by performing the following
# Steps:
# 1. Make sure the device file was created
# 2. fdisk the device
# 3. newfs the device
# 4. Mount the device
# 5. Create a directory on the device
# 6. Copy a file to the directory
# 7. Unmount the device
#
# Test parameters used by this scripts are:
# NO: Number of disks attached
# Example: If we have SCSI=0,0
# SCSI=0,1
# Then NO=2
#
# Note: Only supports BIS driver
#
#####################################################################
ICA_TESTRUNNING="TestRunning"
ICA_TESTCOMPLETED="TestCompleted"
ICA_TESTABORTED="TestAborted"
ICA_TESTFAILED="TestFailed"
LogMsg()
{
echo `date "+%a %b %d %T %Y"` : ${1} # To add the timestamp to the log file
}
UpdateTestState()
{
echo $1 > $HOME/state.txt
}
#
# Let ICA know we are running
#
UpdateTestState $ICA_TESTRUNNING
#
# Cleanup from any previous test run
#
if [ -e ~/summary.log ]; then
rm -rf ~/summary.log
fi
touch ~/summary.log
#
# Source constants.sh
#
if [ -e ~/constants.sh ]; then
. ~/constants.sh
else
LogMsg "ERROR: Unable to source the constants file."
UpdateTestState $ICA_TESTABORTED
exit 10
fi
#
# Make sure constants.sh defines the test parameters we need
#
if [ ${NO:-UNDEFINED} = "UNDEFINED" ]; then
LogMsg "Error: constants.sh did not define the variable TEST_DEVICE"
UpdateTestState $ICA_TESTABORTED
exit 20
fi
if [ ${TC_COVERED:-UNDEFINED} = "UNDEFINED" ]; then
LogMsg "Error: constants.sh did not define the variable TC_COVERED"
UpdateTestState $ICA_TESTABORTED
exit 30
fi
echo "Covers ${TC_COVERED}" >> ~/summary.log
echo "Number of disks attached : $NO" >> ~/summary.log
#
#Operations performed on disks
#
i=1
while [ $i -le $NO ]
do
LogMsg "TEST DEVICE is /dev/da${i}"
echo "TEST DEVICE is /dev/da${i}" >> ~/summary.log
DEVICE=~/disk.txt
ls /dev > $DEVICE
DISK=`echo /dev/da$i|cut -c 6-8`
grep -q "${DISK}p1" $DEVICE
if [ $? -eq 0 ]; then
LogMsg "Deleting filesystem"
gpart delete -i 1 "${DISK}"
gpart destroy "${DISK}"
else
LogMsg "No filesystem exits"
fi
sleep 2
gpart create -s GPT /dev/da${i}
if [ $? -ne 0 ]; then
LogMsg "Error: Unable to create GPT on /dev/da${i}"
UpdateTestState $ICA_TESTFAILED
exit 40
fi
gpart add -t freebsd-ufs /dev/da${i}
if [ $? -ne 0 ]; then
LogMsg "Error: Unable to add freebsd-ufs slice to /dev/da${i}"
UpdateTestState $ICA_TESTFAILED
exit 50
fi
newfs /dev/da${i}p1
if [ $? -ne 0 ]; then
LogMsg "Error: Unable to format the device /dev/da${i}p1"
UpdateTestState $ICA_TESTFAILED
exit 60
fi
LogMsg "mount /dev/da${i}p1 /mnt"
mount /dev/da${i}p1 /mnt
if [ $? -ne 0 ]; then
LogMsg "Error: Unable mount device ${TEST_DEVICE}p1"
UpdateTestState $ICA_TESTFAILED
exit 70
fi
TARGET_DIR="/mnt/IcaTest"
LogMsg "mkdir ${TARGET_DIR}"
mkdir ${TARGET_DIR}
if [ $? -ne 0 ]; then
LogMsg "Error: unable to create ${TARGET_DIR}"
UpdateTestState $ICA_TESTFAILED
exit 80
fi
LogMsg "cp ~/*.sh ${TARGET_DIR}"
cp ~/*.sh ${TARGET_DIR}
if [ $? -ne 0 ]; then
LogMsg "Error: unable to copy files to ${TARGET_DIR}"
UpdateTestState $ICA_TESTFAILED
exit 90
fi
if [ ! -e "${TARGET_DIR}/constants.sh" ]; then
LogMsg "Error: Write to disk failed"
UpdateTestState $ICA_TESTFAILED
exit 100
fi
LogMsg "rm -f ${TARGET_DIR}/constants.sh"
rm -f ${TARGET_DIR}/constants.sh
if [ -e "${TARGET_DIR}/constants.sh" ]; then
LogMsg "Error: Delete of file on disk failed"
UpdateTestState $ICA_TESTFAILED
exit 110
fi
LogMsg "umount /mnt"
umount /mnt
if [ $? -ne 0 ]; then
LogMsg "Error: unable to unmount /mnt"
UpdateTestState $ICA_TESTFAILED
exit 120
fi
i=$[$i+1]
done
#
#If we are here test is completed successfully
#
UpdateTestState $ICA_TESTCOMPLETED
exit 0
| true
|
3b12794a2e8033a095453ac9f807b16a670d4c4a
|
Shell
|
sunsingerus/clickhouse-demo-dataset-deb
|
/chdemo-common_0.1-1/DEBIAN/postinst
|
UTF-8
| 526
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
# fail on any error
set -e
# treat unset variables as errors
set -u
# let shell functions inherit ERR trap
set -E
# Trap non-normal exit signals:
# 1/HUP, 2/INT, 3/QUIT, 15/TERM, ERR
trap err_handler 1 2 3 15 ERR
function err_handler {
local exit_status=${1:-$?}
logger -s -p "syslog.err" -t "chdemo.deb" "chdemo.deb script '$0' error code $exit_status (line $BASH_LINENO: '$BASH_COMMAND')"
exit $exit_status
}
##
## MAIN CODE
##
echo "About to run post-install ClickHouse Demo configuration"
exit 0
| true
|
fe2e9f08d2302d27bc8615d3147c57c9e1c3e1c8
|
Shell
|
sreev/examples-2
|
/ocp4-aws-labs/setup.sh
|
UTF-8
| 1,578
| 2.890625
| 3
|
[] |
no_license
|
# you must be root to run this script
# sudo -i
echo ${GUID}
# Download the latest AWS Command Line Interface
curl "https://s3.amazonaws.com/aws-cli/awscli-bundle.zip" -o "awscli-bundle.zip"
unzip awscli-bundle.zip
# Install the AWS CLI into /bin/aws
./awscli-bundle/install -i /usr/local/aws -b /bin/aws
# Validate that the AWS CLI works
aws --version
# Clean up downloaded files
rm -rf /root/awscli-bundle /root/awscli-bundle.zip
# Please pick your OCP_VERSION
OCP_VERSION=$1
wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/${OCP_VERSION}/openshift-install-linux-${OCP_VERSION}.tar.gz
tar zxvf openshift-install-linux-${OCP_VERSION}.tar.gz -C /usr/bin
rm -f openshift-install-linux-${OCP_VERSION}.tar.gz /usr/bin/README.md
chmod +x /usr/bin/openshift-install
wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/${OCP_VERSION}/openshift-client-linux-${OCP_VERSION}.tar.gz
tar zxvf openshift-client-linux-${OCP_VERSION}.tar.gz -C /usr/bin
rm -f openshift-client-linux-${OCP_VERSION}.tar.gz /usr/bin/README.md
chmod +x /usr/bin/oc
ls -l /usr/bin/{oc,openshift-install}
oc completion bash >/etc/bash_completion.d/openshift
# Please add your AWSKEY and AWSSECRETKEY
export AWSKEY=$2
export AWSSECRETKEY=$3
export REGION=us-east-2
mkdir $HOME/.aws
cat << EOF >> $HOME/.aws/credentials
[default]
aws_access_key_id = ${AWSKEY}
aws_secret_access_key = ${AWSSECRETKEY}
region = $REGION
EOF
aws sts get-caller-identity
echo "run \"openshift-install create cluster\" with you are already."
echo "please go to cloud.redhat.com if you need more details."
| true
|
ad0f71a781e15b3a028f83af78ae1e9b2e9fb681
|
Shell
|
AndriiNikitin/mariadb-environs
|
/_plugin/sakila/m-all/load_sakila.sh
|
UTF-8
| 377
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
FILE=http://downloads.mysql.com/docs/sakila-db.tar.gz
mkdir -p __workdir/../_depot/sakila
(
cd __workdir/../_depot/sakila
[[ -f $(basename $FILE) ]] || { wget -nc $FILE && tar -zxf $(basename $FILE) --strip 1; }
)
__workdir/sql.sh 'source __workdir/../_depot/sakila/sakila-schema.sql' && \
__workdir/sql.sh 'source __workdir/../_depot/sakila/sakila-data.sql'
| true
|
131e2e1aa437457d97e417a4f2996ce05c74c8de
|
Shell
|
broznich/server-tools
|
/ec2_backup
|
UTF-8
| 3,705
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
# It backups into two amazon data centers:
# /vol - user assets, binary mysql files, mysql dump
# / - all system drive
#
# It requires configs in place:
# /root/
# .my.cnf - MYSQL config
# .awssecret - Keys for amazon server
# .awsvolumes - Config for backup volumes and snapshots
#
# More detail in issue #2135
#
# This script dumped mysql database to sql file, make snapshot of each volume (specified in the config file), copy snapshots to different datacenters
#
#ZONES
#us-east-1 - N.Virginia
#us-west-2 - Oregon
#us-west-1 - N.California
#eu-west-1 - Irland
#ap-southeast-1 - Singapore
#ap-northeast-1 - Tokyo
#ap-southeast-2 - Sydney
#sa-east-1 - Sao Paulo
source /etc/profile.d/extended_env
#COMMENT="dcGuiStaging"
#ZONES="ap-southeast-2 eu-west-1 sa-east-1"
#ZONES="ap-southeast-2"
CREATED_DATE=`date +%d.%m.%Y`
MYSQL_CONFIG=$HOME/.my.cnf
MYSQL_USER=`cat $MYSQL_CONFIG | grep user | awk -F= '{ print $2 }'`
MYSQL_PASS=`cat $MYSQL_CONFIG | grep password | awk -F= '{ print $2 }'`
AWS_CONFIG=$HOME/.awsvolumes
AWS_ACCESS_FILE=$HOME/.awssecret
AWS_ACCESS=`cat $AWS_ACCESS_FILE | awk 'BEGIN{ RS="\n\n";FS="\n" }{ print $1 }'`
AWS_KEY=`cat $AWS_ACCESS_FILE | awk 'BEGIN{ RS="\n\n";FS="\n" }{ print $2 }'`
#Create backup directory on vol storage
if [[ ! -d "/vol/backup" ]]
then
echo -e 'Create backup directory'
mkdir /vol/backup
fi
#Add environment to description for snapshot
if [ ! -n "$ENV" ]; then
DESCRIPTION_ENV="Prod [Undefined server] "
else
DESCRIPTION_ENV="Prod $ENV "
fi
#Dump database to sql file on /vol storage + gzip
#mysqldump --complete-insert --force --add-drop-table --create-options --lock-tables -u $MYSQL_USER -p$MYSQL_PASS easylitics > /vol/backup/easylitics.sql;gzip -9 -f /vol/backup/easylitics.sql;
#Read line by line .awsvolumes config file (ec2 volumes which need to backup)
cat $AWS_CONFIG | while read line
do
DESCRIPTION_FROM_CONFIG=`echo $line | awk -F';' '{ print $1 }'`
VOLUME_NAME=`echo $line | awk -F';' '{ print $2 }'`
SOURCE_DATACENTER=`echo $line | awk -F';' '{ print $3 }'`
DESTINATION_DATACENTER=`echo $line | awk -F';' '{ print $6 }'`
NEED_FREEZE_DB=`echo $line | awk -F';' '{ print $5 }'`
FREEZE_PATH=`echo $line | awk -F';' '{ print $4 }'`
#If config has parametr mysql (freeze mysql) add it to exec string
if [[ "$NEED_FREEZE_DB" == "mysql" ]]
then
DATABASE_STRING=" --mysql"
else
DATABASE_STRING=""
fi
if [[ "$FREEZE_PATH" == "false" ]]
then
FREEZE_STRING=""
else
FREEZE_STRING="--freeze-filesystem $FREEZE_PATH"
fi
echo -en "\nCreating snapshot of $VOLUME_NAME. "
#Create snapshot $VOLNAME and save to SNAPSHOT_ID variable Snapshot Id
SNAPSHOT_ID=`ec2-consistent-snapshot --description "$DESCRIPTION_ENV $DESCRIPTION_FROM_CONFIG $BTYPE $CREATED_DATE" $DATABASE_STRING $FREEZE_STRING $VOLUME_NAME`
echo -en "Snapshot created: $SNAPSHOT_ID.\n"
sleep 120
#Wait 30s timeout as creating backup takes time (100Gb ~ 7-8c)
#Separate destination zones and copy snapshot to each server
echo -en "Copy snapshot $SNAPSHOT_ID to "
echo $DESTINATION_DATACENTER | sed -e 's/ /\n/g' | while read dline
do
echo -en "$dline datacenter\n"
ec2-copy-snapshot --aws-access-key $AWS_ACCESS --aws-secret-key $AWS_KEY --region $dline -s $SNAPSHOT_ID -r $SOURCE_DATACENTER > /dev/null
#Wait 5s (not necessarily)
sleep 5
done
done
| true
|
7053f7cc4a395bae256b5fbae1ec6992e5bed3b4
|
Shell
|
kunal-kushwaha/thanos
|
/scripts/website/contentpreprocess.sh
|
UTF-8
| 3,047
| 3.6875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
OUTPUT_CONTENT_DIR=$1
TOP_WEIGHT=$2
COMMIT_SHA=$(git rev-parse HEAD)
echo ">> preprocessing content of dir ${OUTPUT_CONTENT_DIR}"
# Add headers to special CODE_OF_CONDUCT.md, CONTRIBUTING.md and CHANGELOG.md files.
echo "$(
cat <<EOF
---
title: Code of Conduct
type: docs
menu: contributing
---
EOF
)" >${OUTPUT_CONTENT_DIR}/CODE_OF_CONDUCT.md
cat CODE_OF_CONDUCT.md >>${OUTPUT_CONTENT_DIR}/CODE_OF_CONDUCT.md
echo "$(
cat <<EOF
---
title: Contributing
type: docs
menu: contributing
---
EOF
)" >${OUTPUT_CONTENT_DIR}/CONTRIBUTING.md
cat CONTRIBUTING.md >>${OUTPUT_CONTENT_DIR}/CONTRIBUTING.md
echo "$(
cat <<EOF
---
title: Changelog
type: docs
menu: thanos
---
EOF
)" >${OUTPUT_CONTENT_DIR}/CHANGELOG.md
cat CHANGELOG.md >>${OUTPUT_CONTENT_DIR}/CHANGELOG.md
echo "$(
cat <<EOF
---
title: Maintainers
type: docs
menu: thanos
---
EOF
)" >${OUTPUT_CONTENT_DIR}/MAINTAINERS.md
cat MAINTAINERS.md >>${OUTPUT_CONTENT_DIR}/MAINTAINERS.md
echo "$(
cat <<EOF
---
title: Security
type: docs
menu: thanos
---
EOF
)" >${OUTPUT_CONTENT_DIR}/SECURITY.md
cat SECURITY.md >>${OUTPUT_CONTENT_DIR}/SECURITY.md
# Move root things to "thanos" and "contributing" dir for easy organizing.
mkdir -p ${OUTPUT_CONTENT_DIR}/thanos/
mv ${OUTPUT_CONTENT_DIR}/*.md ${OUTPUT_CONTENT_DIR}/thanos/
mv ${OUTPUT_CONTENT_DIR}/thanos/CONTRIBUTING.md ${OUTPUT_CONTENT_DIR}/contributing/CONTRIBUTING.md
mv ${OUTPUT_CONTENT_DIR}/thanos/CODE_OF_CONDUCT.md ${OUTPUT_CONTENT_DIR}/contributing/CODE_OF_CONDUCT.md
# This is needed only for v0.13-0.16 versions.
mv ${OUTPUT_CONTENT_DIR}/thanos/community.md ${OUTPUT_CONTENT_DIR}/contributing/community.md
# Create an _index.md in Thanos dir.
echo "$(
cat <<EOF
---
title: "Thanos General Documents:"
---
EOF
)" >${OUTPUT_CONTENT_DIR}/thanos/_index.md
# Create an _index.md in this dir to enable sorting capabilities and make this version appear top in version picker
echo "$(
cat <<EOF
---
weight: ${TOP_WEIGHT}
---
EOF
)" >${OUTPUT_CONTENT_DIR}/_index.md
# Add edit footer to all markdown files assumed as content.
ALL_DOC_CONTENT_FILES=$(echo "${OUTPUT_CONTENT_DIR}/**/*.md")
for file in ${ALL_DOC_CONTENT_FILES}; do
relFile=${file##${OUTPUT_CONTENT_DIR}/}
echo "$(
cat <<EOF
---
Found a typo, inconsistency or missing information in our docs?
Help us to improve [Thanos](https://thanos.io) documentation by proposing a fix [on GitHub here](https://github.com/thanos-io/thanos/edit/master/docs/${relFile}) :heart:
EOF
)" >>${file}
done
# All the absolute links are replaced with a direct link to the file on github, including the current commit SHA.
perl -pi -e 's/]\(\//]\(https:\/\/github.com\/thanos-io\/thanos\/tree\/'${COMMIT_SHA}'\//g' ${ALL_DOC_CONTENT_FILES}
# All the relative links needs to have ../ This is because Hugo is missing: https://github.com/gohugoio/hugo/pull/3934
perl -pi -e 's/]\((?!http)/]\(..\//g' ${ALL_DOC_CONTENT_FILES}
# All the relative links in src= needs to have ../ as well.
perl -pi -e 's/src=\"(?!http)/src=\"..\//g' ${ALL_DOC_CONTENT_FILES}
| true
|
8303b1ce49d79d3d9724869ff2e9423494c8d449
|
Shell
|
hkoba/hktools
|
/ttvars
|
UTF-8
| 1,953
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/zsh
# See: https://github.com/hkoba/mytools/wiki/ttvars
emulate -L zsh
setopt extended_glob
scriptfn=$0
bindir=$(cd $scriptfn:h; print $PWD)
function die { echo 1>&2 $*; exit 1 }
function usage {
if ((ARGC)); then print -- $* 1>&2; fi
cat 1>&2 <<EOF; exit 1
Usage: ${scriptfn:t} Vars Vars...
Example:
${scriptfn:t} y,n yes,no
${scriptfn:t} 'x <= 3, x > 3' 'y <= 5, y > 5' 'z <= 0, z > 0'
Or even simply:
${scriptfn:t} x y z
EOF
}
if [[ -n $PROLOG_CMD && -x $PROLOG_CMD ]]; then
prolog=$PROLOG_CMD
elif (($+commands[gprolog])); then
prolog=gprolog
elif (($+commands[swipl])); then
prolog=swipl
else
die "Can't find prolog interpreter!"
fi
function list {
print -- "[${(j/,/)argv}]"
}
function literal2boolpairs {
local exp
for exp in ${argv//<\=/\=<}; do
if [[ $exp == [_[:alpha:]][_[:alnum:]]# ]]; then
print "[$exp=1, $exp=0]"
else
print -R "[$exp]"
fi
done
}
function cmd_gprolog {
# To suppress compilation messages from consult, I use redirection here.
# To use this, /dev/fd/3 should be available
[[ -w /dev/fd/1 ]] || die /dev/fd/N is not available on your system.
local script input
input=$(list $argv)
script=(
"consult('$solver')"
"open('/dev/fd/3',write,Out)"
"set_output(Out)"
"tt($input, write_tsv)"
)
[[ -n $o_verbose ]] || exec 2>/dev/null
gprolog --init-goal "(${(j/,/)script};halt(0))" 3>&1 1>&2
}
function cmd_swipl {
local script input
input=$(list $argv)
script=(
"tt($input, write_tsv)"
)
[[ -n $o_verbose ]] || exec 2>/dev/null
swipl -f $solver -g "(${(j/,/)script};halt(0))"
}
solver=$scriptfn.prolog
[[ -r $solver ]] || die "Can't find solver program $solver"
zparseopts -D -K h=o_help v=o_verbose x=o_xtrace || usage
[[ -n $o_help ]] && usage
((ARGC)) || usage
[[ -n $o_xtrace ]] && set $o_xtrace
cmd_$prolog ${(f)"$(literal2boolpairs "$@")"}
| true
|
400440e38800d70d4324aeaa943dcdcc6952429c
|
Shell
|
RobOJHU/Shell_Scripts
|
/freak_check.sh
|
UTF-8
| 1,360
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
# THIS LITTLE BASH SCRIPT CHECKS WHETHER YOUR SERVER IS VUNLNERABLE TO CVE-2015-0204
# A.K.A. FREAK ATTACK, WHICH ENABLES A MITM TO TRICK CERTAIN CLIENTS INTO CONNECTING
# USING INSECURE CIPHER SUITES KNOWN AS 'EXPORT SUITES'. EXPORT CIPHERS ARE DELIBERATELY
# INSECURE AND WERE THE ONLY CIPHER SUITES THAT WERE ALLOWED TO BE EXPORTED FROM THE US
# DURING THE 90'S. ONCE AGAIN IT WAS PROVEN THAT CRYPTO "BACKDOORS" ARE A REALLY BAD IDEA!
#
# USAGE: $./freak_check.sh
# --RobO from https://github.com/SnijderC/freak_check/blob/master/freak_check.sh
# MODIFY usage so tha it ONLY check "localhost" --I figure --why get fancey at this stage ?
PORT="443"
HOSTNAME="localhost"
RESULT=$(echo -n | openssl s_client -cipher "EXPORT" -connect localhost:443 2>&1)
if [[ "$RESULT" =~ "Cipher is" || "$RESULT" =~ "Cipher :" ]]; then
echo "Oh my god.. you are so vulnerable.."
exit 1
elif [[ "$RESULT" =~ "handshake failure" ]]; then
echo "You disabled EXPORT ciphers like you should have, good for you! ;)"
exit 0
elif [[ "$RESULT" =~ "gethostbyname failure" ]]; then
echo "Hostname cannot be found."
exit 2
elif [[ "$RESULT" =~ "Connection refused" ]]; then
echo "The server refused your connection, is this the correct port?"
exit 3
else
echo "OpenSSL returned an unexpected result, meaning the test is inconclusive."
exit 4
fi
| true
|
b2db11f2cd0dd13d7d1b28ced7c71f97c7f6bc0c
|
Shell
|
Pating/lkp-tests
|
/tests/ftq
|
UTF-8
| 443
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh
# - nr_task
# - samples
# - freq
## The FTQ benchmarks measure hardware and softwareinterference
## or 'noise' on a node from the applications perspective.
cd $BENCHMARK_ROOT/ftq || exit
[ 0"$nr_task" -gt 1 ] && opt_task="-t $nr_task"
[ -n "$samples" ] || samples=10000
[ -n "$freq" ] && opt_freq="-f $freq"
samples=${samples%%ss}
log_cmd ./ftq $opt_freq -n $samples $opt_task || exit
copy-results $BENCHMARK_ROOT/ftq/*.dat
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.