blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
8a1bcd166c2031036c3fd2cde12eafeed168fe91
|
Shell
|
fergald/virtual-scroller
|
/publish.sh
|
UTF-8
| 1,239
| 4.1875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash -ex
# Publish a sha and/or branch to gh-pages.
#
# publish.sh <sha>
#
# Will cause the repo to be published under
# https://fergald.github.io/virtual-scroller/versions/<sha>
# and
#
# publish.sh <branch>
#
# will figure out the sha for that branch, publish that and
# link versions/<branch> to that sha.
if [ ! -e .git ]; then
echo >2 No .git
exit 1
fi
if [ $# -eq 0 ]; then
echo >2 No revision
exit 1
fi
revision=$1
shift
git checkout gh-pages
sha=$(git rev-parse "$revision")
dest=versions/"$sha"
git clone -s -n . "$dest"
(
cd "$dest"
git checkout "$sha"
echo Deleting `pwd`/.git
read f
rm -rf .git
git add .
git commit -a -m"Add gh-pages revision $sha"
if [ "$sha" = "$revision" ]; then
exit
fi
cd ..
ln -sfT "$sha" "$revision"
git add "$revision"
git commit -a -m"Update $revision->$sha"
)
git push
git checkout -
base=$(git remote show -n origin|grep "Fetch URL" | perl -lne 'print "https://$1.github.io/$2" if m#github.com:(.*?)/(.*)#')
if [ -z "$base" ]; then
echo >2 "Couldn't get base"
exit 1
fi
echo Published to "$base/versions/$revision"
if [ "$sha" != "$revision" ]; then
echo Published to "$base/versions/$sha"
fi
| true
|
fa058cd3a9d91b8006dee0ffc2464d35edc90f00
|
Shell
|
Lyuji282/extempore
|
/extras/integration-test.sh
|
UTF-8
| 1,394
| 3.5
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env bash
# this script should be run from inside the extras/ subdirectory
cd .. # move up one level into top-level extempore directory
SRC_DIR=$PWD
# uncomment this next line if you want to test the full "pull down LLVM" behaviour
# unset EXT_LLVM_DIR
if [ ! -f $SRC_DIR/extras/integration-test.sh ]; then
echo -e "\033[0;31mError:\033[0;00m integration-test.sh must be run from inside the extras/ directory"
exit 1
fi
TEST_DIR=/tmp/extempore-integration-test
# port to run the Extempore primary process on
TEST_PORT=17097
if [ -d $TEST_DIR ]; then
rm -r $TEST_DIR
fi
mkdir $TEST_DIR && cd $TEST_DIR
echo "Running tests in ${TEST_DIR}..."
cmake -DCMAKE_INSTALL_PREFIX=$TEST_DIR -DCMAKE_BUILD_TYPE=Release -DIN_TREE=OFF $SRC_DIR && make clean && make -j4 && make install && make clean_aot && $TEST_DIR/bin/extempore --noaudio --port=${TEST_PORT} --sharedir $TEST_DIR/share/extempore --run tests/all.xtm
if (($? != 0)); then
echo -e "\033[0;31mIntegration test failed (AOT:false) $f\033[0;00m"
echo
exit 1
fi
make -j4 aot_extended && $TEST_DIR/bin/extempore --noaudio --port=${TEST_PORT} --sharedir $TEST_DIR/share/extempore --run tests/all.xtm
if (($? != 0)); then
echo -e "\033[0;31mIntegration test failed (AOT:true) $f\033[0;00m"
echo
exit 1
else
echo -e "\033[0;32mAll integration tests passed\033[0;00m"
echo
exit 0
fi
| true
|
f733af5f279b945a6ba2023d728a50edb120d4a9
|
Shell
|
Tech-XCorp/bilder
|
/packages/funcsigs.sh
|
UTF-8
| 1,847
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh
######################################################################
#
# @file funcsigs.sh
#
# @brief Build information for funcsigs.
#
# @version $Rev$ $Date$
#
# Copyright © 2015-2017, Tech-X Corporation, Boulder, CO.
# See LICENSE file (EclipseLicense.txt) for conditions of use.
#
######################################################################
######################################################################
#
# Trigger variables set in funcsigs_aux.sh
#
######################################################################
mydir=`dirname $BASH_SOURCE`
source $mydir/funcsigs_aux.sh
######################################################################
#
# Set variables that should trigger a rebuild, but which by value change
# here do not, so that build gets triggered by change of this file.
# E.g: mask
#
######################################################################
setFuncsigsNonTriggerVars() {
FUNCSIGS_UMASK=002
}
setFuncsigsNonTriggerVars
#####################################################################
#
# Launch builds.
#
######################################################################
buildFuncsigs() {
if bilderUnpack funcsigs; then
bilderDuBuild funcsigs "" "$DISTUTILS_ENV"
fi
}
######################################################################
#
# Test
#
######################################################################
testFuncsigs() {
techo "Not testing Funcsigs."
}
######################################################################
#
# Install
#
######################################################################
installFuncsigs() {
local FUNCSIGS_INSTALL_ARGS="--single-version-externally-managed --record='$PYTHON_SITEPKGSDIR/funcsigs.files'"
bilderDuInstall funcsigs "$FUNCSIGS_INSTALL_ARGS" "$DISTUTILS_ENV"
}
| true
|
e9a62f0c6bf46169d00db37090182870df02fab2
|
Shell
|
vers10ne/nim-beacon-chain
|
/scripts/reset_testnet.sh
|
UTF-8
| 1,093
| 3.15625
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -eu
cd $(dirname "$0")
NETWORK_NAME=$1
source "$NETWORK_NAME.env"
cd ..
if [ -f .env ]; then
# allow server overrides for WWW_DIR and DATA_DIR
source .env
fi
PUBLIC_IP=$(curl -s ifconfig.me)
NETWORK_DIR=$WWW_DIR/$NETWORK_NAME
NIM_FLAGS="-d:release -d:SECONDS_PER_SLOT=$SECONDS_PER_SLOT -d:SHARD_COUNT=$SHARD_COUNT -d:SLOTS_PER_EPOCH=$SLOTS_PER_EPOCH ${2:-}"
nim c -d:"network_type=$NETWORK_TYPE" $NIM_FLAGS beacon_chain/beacon_node
if [ ! -f $NETWORK_DIR/genesis.json ]; then
rm -f $NETWORK_DIR/*
beacon_chain/beacon_node makeDeposits \
--totalDeposits=$VALIDATOR_COUNT \
--depositDir="$NETWORK_DIR" \
--randomKeys=true
fi
beacon_chain/beacon_node \
--network=$NETWORK_NAME \
--dataDir=$DATA_DIR/node-0 \
createTestnet \
--validatorsDir=$NETWORK_DIR \
--totalValidators=$VALIDATOR_COUNT \
--lastUserValidator=$LAST_USER_VALIDATOR \
--outputGenesis=$NETWORK_DIR/genesis.json \
--outputNetwork=$NETWORK_DIR/network.json \
--bootstrapAddress=$PUBLIC_IP \
--bootstrapPort=$BOOTSTRAP_PORT \
--genesisOffset=600 # Delay in seconds
| true
|
dcfc9f5f58f0832f313b2982ebb4673662515deb
|
Shell
|
navneetyadav/lab2
|
/lab2_q1.sh
|
UTF-8
| 268
| 3.125
| 3
|
[] |
no_license
|
# introduce the library
#!/bin/sh
# ask the user his name
echo "HELLO"
echo "WHAT IS YOUR NAME?"
# enter the name
read "name"
# print user name
echo "YOU ARE $name"
# making the folder of his name
mkdir $name
cd $name
# making a cpp_file of his name
touch $name.cpp
| true
|
efe90a42801cbbdec35ec2b8825e036087210ff3
|
Shell
|
jbacon/Portfolio
|
/ExpressApi/docker-run.sh
|
UTF-8
| 1,000
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash -ex
set -x;
DIR="$(cd "$(dirname "$0")" && pwd)"
NPM_RUN_SCRIPT='startDev'
if [ "${ENVIRONMENT}" = "production" ]; then
NPM_RUN_SCRIPT='startProd'
fi
# Stop/Remove Existing Node
docker stop node
docker rm node
MONGOD_CONTAINER_IP=$(docker inspect mongod --format '{{ .NetworkSettings.IPAddress }}')
docker run \
--name node \
--interactive \
--tty \
--detach \
--volume ${DIR}/:/app/ \
--expose 3000 \
$(if [ "${ENVIRONMENT}" = "production" ]; then
echo '--env PORTFOLIO_CONFIG_FILE=./configs-prod.json '
else
echo '--publish 9229:9229 --env PORTFOLIO_CONFIG_FILE=./configs-dev.json '
fi) \
--env PORTFOLIO_MONGODB_URL='mongodb://'${MONGOD_CONTAINER_IP}':27017/portfolio' \
--env HTTP_PROXY=${HTTP_PROXY} \
--env HTTPS_PROXY=${HTTPS_PROXY} \
--env http_proxy=${http_proxy} \
--env https_proxy=${https_proxy} \
--env NO_PROXY=${NO_PROXY} \
--env no_proxy=${no_proxy} \
node:7.10.0-alpine \
/bin/sh -c \
"
apk update;
apk add git;
cd /app/;
npm install;
npm run ${NPM_RUN_SCRIPT};
"
| true
|
c271b7c35280bfb5aa37ecda24a80ed099b6005d
|
Shell
|
3286465057/blog
|
/mysql/init/backup.sh
|
UTF-8
| 294
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
backup_dir="/backup"
password="setapassword"
if [ ! -d $backup_dir ]; then
mkdir -p $backup_dir
fi
mysqldump --defaults-extra-file=/etc/mysql/my.cnf -uroot -p"$password" --databases solo > $backup_dir/data_$(date +%Y%m%d).sql
rm -f $backup_dir/data_$(date -d -7day +%Y%m%d).sql
| true
|
accf6dd5c3719fdcdc1502beb9e1714fadace45b
|
Shell
|
DarkWiiPlayer/darkrc
|
/bin/git-as
|
UTF-8
| 326
| 2.921875
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh
user=$1
shift 1
export GIT_AUTHOR_NAME="$(git config user.$user.name)"
export GIT_AUTHOR_EMAIL="$(git config user.$user.email)"
export GIT_COMMITTER_NAME="$(git config user.$user.name)"
export GIT_COMMITTER_EMAIL="$(git config user.$user.email)"
echo "Running as $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL>"
git "$@"
| true
|
d3f513f7d776382218554e4d29171454085c4dba
|
Shell
|
Zero-Grav/voron-script
|
/modules/french.sh
|
UTF-8
| 845
| 3.609375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Passage du RPI en français
if [ -f _common.sh ]; then
source ./_common.sh
elif [ -d modules ]; then
source ./modules/_common.sh
fi
#########################
### CHECK UTILISATEUR ###
#########################
if [ `whoami` == 'root' ]; then
_log "${RED}Ne lancez pas ce script en super-admin"
exit
fi
sudo echo "" > /dev/null
#####################
### INTERNATIONAL ###
#####################
_log "=> Passage du RPI en FR"
# Timezone
_log " => Timezone"
echo "Europe/Paris" | sudo tee /etc/timezone > /dev/null
sudo rm /etc/localtime
sudo ln -s /usr/share/zoneinfo/Europe/Paris /etc/localtime
# Keyboard
_log " => Clavier"
sudo sed -i 's/^XKBLAYOUT="gb"$/XKBLAYOUT="fr"/' /etc/default/keyboard
sudo sed -i 's/# fr_FR.UTF-8 UTF-8/fr_FR.UTF-8 UTF-8/' /etc/locale.gen
sudo locale-gen
_log "=> Date : $(date)"
| true
|
fac9a022f34f9ff1d1e2a8ef18d663ede0bfbe49
|
Shell
|
ItzMxrcxl/backupmypi
|
/ressources/usr/bin/backupmypi/main.sh
|
UTF-8
| 2,358
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
# Author: Marcel Kallinger https://github.com/ItzMxrcxl
# BackupMyPi Script
echo "~~~~~~~~~~~~~~~BackupMyPi - Backup~~~~~~~~~~~~~~~~"
echo "Author: Marcel Kallinger"
echo "https://github.com/ItzMxrcxl"
echo ""
if [[ ! -f "/usr/bin/backupmypi/config.txt" ]]
then
echo "ERROR: Config Datei existiert nicht"
exit 2
fi
. /usr/bin/backupmypi/config.txt # Load Config
DATE=$(date '+%Y-%m-%d_%H-%M-%S')
self_update() {
echo "Prüfe updates..."
cd /usr/bin/backupmypi/
sudo bash updater.sh
if [ ! $? -eq 0 ]; then
echo "Error! "$?
fi
}
main() (
sudo mount $backup_mount_path
backup
)
backup() (
DATE_STARTED=$(date '+%Y-%m-%d_%H-%M-%S')
BOOT=$backup_drive
test -e $BOOT
if [ ! $?=0 ]; then
echo "Backup Quelle "$BOOT" exisitert nicht! Bitte überprüfe Konfig Datei"
exit 1
fi
if [[ ! -d $backup_path ]]; then
echo "ERROR: Backup Ordner "$backup_path" exisitert nicht, Ist das Speichergerät mounted? Existiert der Ordner auf dem Gerät?"
exit 1
fi
a=$SECONDS #Start the timer
backup_file=$backup_path'/'$backup_name'.img'
if [ $shrink_image = 'True' ]; then
if [ $compress_image = 'True' ]; then
echo "Erstelle Backup von "$BOOT", dies wird nach dem Kopieren verkleinert und gepackt."
normal_backup
just_shrink
else
echo "Erstelle Backup von "$BOOT", speichere dies unter "$backup_file" , dies wird nach dem Kopieren verkleinert"
normal_backup
just_shrink
fi
else
echo "Erstelle Backup von "$BOOT", speichere dies unter "$backup_file".gz"
if [ $compress_image = 'True' ]; then
zip_copy
fi
fi
)
zip_copy() ( #Shrinking image while copying
echo "Backupdatei wird wärend dem Backup zusätzlich gepackt."
sudo dd if=$BOOT conv=sparse bs=512 status=progress | gzip -c > $backup_file'.gz'
)
normal_backup() (
sudo dd if=$BOOT of=$backup_file conv=sparse bs=512 status=progress
)
just_shrink() (
echo "Verkleinere Image"
sudo pishrink $backup_file
)
just_zip() (
gzip $backup_file $backup_file'.gz'
rm $backup_file
)
output() (
duration=$SECONDS #Stop the timer
echo "Das Backup wurde in $(($duration / 60)) Minuten und $(($duration % 60)) Sekunden erstellt." #Output the Timer
if [ unmount_after_backup = 'True' ]; then
echo "Hänge Backup drive "$backup_mount_path" aus"
umount $backup_mount_path
fi
)
self_update #If you want to disable the Autoupdate, mark this line
main
output
| true
|
32a572463290de4c96d6b0340dc0eed6a8e2e07c
|
Shell
|
BuaBook/archive-system
|
/archive.sh
|
UTF-8
| 3,521
| 4.125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# BuaBook Archive System
# Copyright (C) Sport Trades Ltd
# 2016
readonly PROGNAME=$(basename $0)
readonly PROGDIR=$(readlink -m $(dirname $0))
readonly SOURCE_CONFIG=${BAS_CONFIG}/$(hostname)/archive.config
readonly TARGET_CONFIG=${BAS_CONFIG}/$(hostname)/archive.target
readonly RSYNC_OPTIONS="--archive --ipv4 --compress --verbose --copy-links --progress --relative --keep-dirlinks"
source ${PROGDIR}/bash-helpers/src/bash-helpers.sh
set +e
main()
{
logInfo "\n**************************************"
logInfo "**** BUABOOK ARCHIVE SYSTEM ****"
logInfo "**************************************\n"
if [[ ! -f $SOURCE_CONFIG ]]; then
logError "\nERROR: Source configuration file could not be found"
logError "\tExpecting it @ $SOURCE_CONFIG"
exit 1
fi
if [[ ! -f $TARGET_CONFIG ]]; then
logError "\nERROR: Target configuration file could not be found"
logError "\tExpecting it @ $TARGET_CONFIG"
exit 2
fi
local sourceConfig=($(loadConfigFile $SOURCE_CONFIG))
local targetConfig=($(loadConfigFile $TARGET_CONFIG))
local singleTarget=${targetConfig[0]}
if [[ "" == $singleTarget ]]; then
logError "\nERROR: No target configuration specified"
logError "\tEnsure target configuration is set in $TARGET_CONFIG"
exit 3
fi
logInfo " * rsync Options:\t$RSYNC_OPTIONS\n"
for sourceRow in "${sourceConfig[@]}"; do
archiveSourceRow $sourceRow $singleTarget
local archiveResult=$(echo $?)
if [[ $archiveResult -ne 0 ]]; then
logError "\nERROR: Previous archive attempt failed. Continuing...\n"
fi
done
logInfo "\nARCHIVE COMPLETE\n"
}
archiveSourceRow()
{
export IFS=","
local sourceInfo=($1)
local targetConfig=($2)
unset IFS
local configSourceFilePath=${sourceInfo[0]}
local archiveCount=${sourceInfo[1]}
local archiveFreq=${sourceInfo[2]}
local shouldDelete=${sourceInfo[3]}
local partialDate=$(getPreviousPartialDate $archiveCount $archiveFreq)
local sourceFilePath=${configSourceFilePath/\{BBDATE\}/${partialDate}}*
local targetLocalRemote=${targetConfig[0]}
local target=${targetConfig[1]}
local anyFilesMatch=$(find $sourceFilePath > /dev/null 2>&1; echo $?)
logInfo "\n * [ $(date) ] Process start"
logInfo "\t - Source:\t$sourceFilePath"
logInfo "\t - Target:\t$target (${targetLocalRemote})"
logInfo "\t - Lookback:\t${archiveCount} ${archiveFreq}"
logInfo "\t - Type:\t$shouldDelete\n"
if [[ $anyFilesMatch -ne 0 ]]; then
logInfo " * [ $(date) ] No archive required (no files match)"
return 0
fi
if [[ "delete-only" == $shouldDelete ]]; then
logInfo " * [ $(date) ] WARN: No archiving configured"
else
logInfo " * [ $(date) ] Starting archive\n"
rsync $RSYNC_OPTIONS $sourceFilePath $target
local archiveResult=$(echo $?)
if [[ $archiveResult -ne 0 ]]; then
logError " * [ $(date) ] ERROR: Archived failed. Aborting..."
return 1
fi
logInfo "\n * [ $(date) ] Archive complete"
fi
if [[ "delete-only" == $shouldDelete ]] || [[ "archive-delete" == $shouldDelete ]]; then
logInfo " * [ $(date) ] Deleting archived files\n"
rm -v $sourceFilePath
logInfo "\n * [ $(date) ] Deletion complete"
fi
logInfo " * [ $(date) ] Process complete"
return 0
}
main
| true
|
005db5e10b76d61389827cc18f29d284bfd69526
|
Shell
|
git-for-windows/MINGW-packages
|
/mingw-w64-ladspa-sdk/PKGBUILD
|
UTF-8
| 669
| 2.6875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Maintainer: Jeff Hubbard <musikernel@gmail.com>
_realname=ladspa-sdk
pkgbase=mingw-w64-${_realname}
pkgname="${MINGW_PACKAGE_PREFIX}-${_realname}"
pkgver=1.13
pkgrel=2
pkgdesc="Linux Audio Developer's Simple Plugin API (LADSPA) SDK (mingw-w64)"
arch=('any')
url="https://www.ladspa.org/"
license=("LGPL")
# bsdtar fails to untar their SDK tarball because of a symlink, so just
# download the header directly
source=("https://www.ladspa.org/ladspa_sdk/ladspa.h.txt")
sha256sums=('1bd380baaf018be5e05323c192444adf058ed8a35b8cbf4c8a11519fc81cb036')
package() {
install -d "${pkgdir}/${MINGW_PREFIX}/include"
cp "${srcdir}/ladspa.h.txt" "${pkgdir}/${MINGW_PREFIX}/include/ladspa.h"
}
| true
|
5d229daed80514edba71cdabd7c79a8fbf58c8c9
|
Shell
|
SNURFER/scan-build-customize
|
/analyzer/run-analyzer.sh
|
UTF-8
| 613
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/sh
rm -rf analysis_result
rm -rf static_analysis
mkdir static_analysis
cd static_analysis
../scan-build cmake -DCMAKE_BUILD_TYPE=Debug ../..
../scan-build -plist-html -o ../analysis_result --use-cc /usr/lib/llvm-7/bin/clang --use-c++ /usr/lib/llvm-7/bin/clang --use-analyzer /usr/lib/llvm-7/libexec/c++-analyzer make
echo 'scan-build after process start'
#reduce html size
cd ../analysis_result
echo "current dir is : $PWD"
find . -name "report-*.html" -exec sed -i '/class=\"codeline\"/d' {} +
find . -name "report-*.html" -exec sed -i '/class=\"num\"/d' {} +
echo 'scan-build after process ends'
| true
|
4ef6600491a9d0d05063b122a231770a6a6dae57
|
Shell
|
termux/termux-packages
|
/scripts/build/termux_create_pacman_subpackages.sh
|
UTF-8
| 9,289
| 3.609375
| 4
|
[
"Apache-2.0",
"MIT",
"ISC",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Libpng",
"X11",
"BSD-3-Clause",
"Zlib"
] |
permissive
|
termux_create_pacman_subpackages() {
# Sub packages:
if [ "$TERMUX_PKG_NO_STATICSPLIT" = "false" ] && [[ -n $(shopt -s globstar; shopt -s nullglob; echo lib/**/*.a) ]]; then
# Add virtual -static sub package if there are include files:
local _STATIC_SUBPACKAGE_FILE=$TERMUX_PKG_TMPDIR/${TERMUX_PKG_NAME}-static.subpackage.sh
echo TERMUX_SUBPKG_INCLUDE=\"$(find lib -name '*.a' -o -name '*.la') $TERMUX_PKG_STATICSPLIT_EXTRA_PATTERNS\" > "$_STATIC_SUBPACKAGE_FILE"
echo "TERMUX_SUBPKG_DESCRIPTION=\"Static libraries for ${TERMUX_PKG_NAME}\"" >> "$_STATIC_SUBPACKAGE_FILE"
fi
# Now build all sub packages
rm -Rf "$TERMUX_TOPDIR/$TERMUX_PKG_NAME/subpackages"
for subpackage in $TERMUX_PKG_BUILDER_DIR/*.subpackage.sh $TERMUX_PKG_TMPDIR/*subpackage.sh; do
test ! -f "$subpackage" && continue
local SUB_PKG_NAME
SUB_PKG_NAME=$(basename "$subpackage" .subpackage.sh)
if [ "$TERMUX_PACKAGE_LIBRARY" = "glibc" ] && ! package__is_package_name_have_glibc_prefix "$SUB_PKG_NAME"; then
SUB_PKG_NAME="${SUB_PKG_NAME}-glibc"
fi
# Default value is same as main package, but sub package may override:
local TERMUX_SUBPKG_PLATFORM_INDEPENDENT=$TERMUX_PKG_PLATFORM_INDEPENDENT
local SUB_PKG_DIR=$TERMUX_TOPDIR/$TERMUX_PKG_NAME/subpackages/$SUB_PKG_NAME
local TERMUX_SUBPKG_ESSENTIAL=false
local TERMUX_SUBPKG_BREAKS=""
local TERMUX_SUBPKG_DEPENDS=""
local TERMUX_SUBPKG_RECOMMENDS=""
local TERMUX_SUBPKG_SUGGESTS=""
local TERMUX_SUBPKG_CONFLICTS=""
local TERMUX_SUBPKG_REPLACES=""
local TERMUX_SUBPKG_PROVIDES=""
local TERMUX_SUBPKG_CONFFILES=""
local TERMUX_SUBPKG_DEPEND_ON_PARENT=""
local TERMUX_SUBPKG_EXCLUDED_ARCHES=""
local TERMUX_SUBPKG_GROUPS=""
local SUB_PKG_MASSAGE_DIR=$SUB_PKG_DIR/massage/$TERMUX_PREFIX
local SUB_PKG_PACKAGE_DIR=$SUB_PKG_DIR/package
mkdir -p "$SUB_PKG_MASSAGE_DIR" "$SUB_PKG_PACKAGE_DIR"
# Override termux_step_create_subpkg_debscripts
# shellcheck source=/dev/null
source "$TERMUX_SCRIPTDIR/scripts/build/termux_step_create_subpkg_debscripts.sh"
# shellcheck source=/dev/null
source "$subpackage"
# Allow globstar (i.e. './**/') patterns.
shopt -s globstar
for includeset in $TERMUX_SUBPKG_INCLUDE; do
local _INCLUDE_DIRSET
_INCLUDE_DIRSET=$(dirname "$includeset")
test "$_INCLUDE_DIRSET" = "." && _INCLUDE_DIRSET=""
if [ -e "$includeset" ] || [ -L "$includeset" ]; then
# Add the -L clause to handle relative symbolic links:
mkdir -p "$SUB_PKG_MASSAGE_DIR/$_INCLUDE_DIRSET"
mv "$includeset" "$SUB_PKG_MASSAGE_DIR/$_INCLUDE_DIRSET"
fi
done
shopt -u globstar
# Do not create subpackage for specific arches.
# Using TERMUX_ARCH instead of SUB_PKG_ARCH (defined below) is intentional.
if [ "$TERMUX_SUBPKG_EXCLUDED_ARCHES" != "${TERMUX_SUBPKG_EXCLUDED_ARCHES/$TERMUX_ARCH}" ]; then
echo "Skipping creating subpackage '$SUB_PKG_NAME' for arch $TERMUX_ARCH"
continue
fi
local SUB_PKG_ARCH=$TERMUX_ARCH
[ "$TERMUX_SUBPKG_PLATFORM_INDEPENDENT" = "true" ] && SUB_PKG_ARCH=any
cd "$SUB_PKG_DIR/massage"
# Check that files were actually installed, else don't subpackage.
if [ "$SUB_PKG_ARCH" = "any" ] && [ "$(find . -type f -print | head -n1)" = "" ]; then
echo "No files in subpackage '$SUB_PKG_NAME' when built for $SUB_PKG_ARCH with package '$TERMUX_PKG_NAME', so"
echo "the subpackage was not created. If unexpected, check to make sure the files are where you expect."
cd "$TERMUX_PKG_MASSAGEDIR/$TERMUX_PREFIX_CLASSICAL"
continue
fi
local SUB_PKG_INSTALLSIZE
SUB_PKG_INSTALLSIZE=$(du -bs . | cut -f 1)
local BUILD_DATE
BUILD_DATE=$(date +%s)
local PKG_DEPS_SPC=" ${TERMUX_PKG_DEPENDS//,/} "
if [ -z "$TERMUX_SUBPKG_DEPEND_ON_PARENT" ] && [ "${PKG_DEPS_SPC/ $SUB_PKG_NAME /}" = "$PKG_DEPS_SPC" ]; then
# Does pacman supports versioned dependencies?
#TERMUX_SUBPKG_DEPENDS+=", $TERMUX_PKG_NAME (= $TERMUX_PKG_FULLVERSION)"
TERMUX_SUBPKG_DEPENDS+=", $TERMUX_PKG_NAME"
elif [ "$TERMUX_SUBPKG_DEPEND_ON_PARENT" = unversioned ]; then
TERMUX_SUBPKG_DEPENDS+=", $TERMUX_PKG_NAME"
elif [ "$TERMUX_SUBPKG_DEPEND_ON_PARENT" = deps ]; then
TERMUX_SUBPKG_DEPENDS+=", $TERMUX_PKG_DEPENDS"
fi
if [ "$TERMUX_GLOBAL_LIBRARY" = "true" ] && [ "$TERMUX_PACKAGE_LIBRARY" = "glibc" ]; then
test ! -z "$TERMUX_SUBPKG_DEPENDS" && TERMUX_SUBPKG_DEPENDS=$(package__add_prefix_glibc_to_package_names "$TERMUX_SUBPKG_DEPENDS")
test ! -z "$TERMUX_SUBPKG_BREAKS" && TERMUX_SUBPKG_BREAKS=$(package__add_prefix_glibc_to_package_names "$TERMUX_SUBPKG_BREAKS")
test ! -z "$TERMUX_SUBPKG_CONFLICTS" && TERMUX_SUBPKG_CONFLICTS=$(package__add_prefix_glibc_to_package_names "$TERMUX_SUBPKG_CONFLICTS")
test ! -z "$TERMUX_SUBPKG_RECOMMENDS" && TERMUX_SUBPKG_RECOMMENDS=$(package__add_prefix_glibc_to_package_names "$TERMUX_SUBPKG_RECOMMENDS")
test ! -z "$TERMUX_SUBPKG_REPLACES" && TERMUX_SUBPKG_REPLACES=$(package__add_prefix_glibc_to_package_names "$TERMUX_SUBPKG_REPLACES")
test ! -z "$TERMUX_SUBPKG_PROVIDES" && TERMUX_SUBPKG_PROVIDES=$(package__add_prefix_glibc_to_package_names "$TERMUX_SUBPKG_PROVIDES")
test ! -z "$TERMUX_SUBPKG_SUGGESTS" && TERMUX_SUBPKG_SUGGESTS=$(package__add_prefix_glibc_to_package_names "$TERMUX_SUBPKG_SUGGESTS")
fi
# Package metadata.
{
echo "pkgname = $SUB_PKG_NAME"
echo "pkgbase = $TERMUX_PKG_NAME"
echo "pkgver = $TERMUX_PKG_FULLVERSION_FOR_PACMAN"
echo "pkgdesc = $(echo "$TERMUX_SUBPKG_DESCRIPTION" | tr '\n' ' ')"
echo "url = $TERMUX_PKG_HOMEPAGE"
echo "builddate = $BUILD_DATE"
echo "packager = $TERMUX_PKG_MAINTAINER"
echo "size = $SUB_PKG_INSTALLSIZE"
echo "arch = $SUB_PKG_ARCH"
if [ -n "$TERMUX_SUBPKG_REPLACES" ]; then
tr ',' '\n' <<< "$TERMUX_SUBPKG_REPLACES" | sed 's|(||g; s|)||g; s| ||g; s|>>|>|g; s|<<|<|g' | awk '{ printf "replaces = " $1; if ( ($1 ~ /</ || $1 ~ />/ || $1 ~ /=/) && $1 !~ /-/ ) printf "-0"; printf "\n" }'
fi
if [ -n "$TERMUX_SUBPKG_CONFLICTS" ]; then
tr ',' '\n' <<< "$TERMUX_SUBPKG_CONFLICTS" | sed 's|(||g; s|)||g; s| ||g; s|>>|>|g; s|<<|<|g' | awk '{ printf "conflict = " $1; if ( ($1 ~ /</ || $1 ~ />/ || $1 ~ /=/) && $1 !~ /-/ ) printf "-0"; printf "\n" }'
fi
if [ -n "$TERMUX_SUBPKG_BREAKS" ]; then
tr ',' '\n' <<< "$TERMUX_SUBPKG_BREAKS" | sed 's|(||g; s|)||g; s| ||g; s|>>|>|g; s|<<|<|g' | awk '{ printf "conflict = " $1; if ( ($1 ~ /</ || $1 ~ />/ || $1 ~ /=/) && $1 !~ /-/ ) printf "-0"; printf "\n" }'
fi
if [ -n "$TERMUX_SUBPKG_PROVIDES" ]; then
tr ',' '\n' <<< "$TERMUX_SUBPKG_REPLACES" | sed 's|(||g; s|)||g; s| ||g; s|>>|>|g; s|<<|<|g' | awk '{ printf "provides = " $1; if ( ($1 ~ /</ || $1 ~ />/ || $1 ~ /=/) && $1 !~ /-/ ) printf "-0"; printf "\n" }'
fi
if [ -n "$TERMUX_SUBPKG_DEPENDS" ]; then
tr ',' '\n' <<< "${TERMUX_SUBPKG_DEPENDS/#, /}" | sed 's|(||g; s|)||g; s| ||g; s|>>|>|g; s|<<|<|g' | awk '{ printf "depend = " $1; if ( ($1 ~ /</ || $1 ~ />/ || $1 ~ /=/) && $1 !~ /-/ ) printf "-0"; printf "\n" }' | sed 's/|.*//'
fi
if [ -n "$TERMUX_SUBPKG_RECOMMENDS" ]; then
tr ',' '\n' <<< "$TERMUX_SUBPKG_RECOMMENDS" | awk '{ printf "optdepend = %s\n", $1 }'
fi
if [ -n "$TERMUX_SUBPKG_SUGGESTS" ]; then
tr ',' '\n' <<< "$TERMUX_SUBPKG_SUGGESTS" | awk '{ printf "optdepend = %s\n", $1 }'
fi
if [ -n "$TERMUX_SUBPKG_CONFFILES" ]; then
tr ',' '\n' <<< "$TERMUX_SUBPKG_CONFFILES" | awk '{ printf "backup = '"${TERMUX_PREFIX:1}"'/%s\n", $1 }'
fi
if [ -n "$TERMUX_SUBPKG_GROUPS" ]; then
tr ',' '\n' <<< "${TERMUX_SUBPKG_GROUPS/#, /}" | awk '{ printf "group = %s\n", $1 }'
fi
} > .PKGINFO
# Build metadata.
{
echo "format = 2"
echo "pkgname = $SUB_PKG_NAME"
echo "pkgbase = $TERMUX_PKG_NAME"
echo "pkgver = $TERMUX_PKG_FULLVERSION_FOR_PACMAN"
echo "pkgarch = $SUB_PKG_ARCH"
echo "packager = $TERMUX_PKG_MAINTAINER"
echo "builddate = $BUILD_DATE"
} > .BUILDINFO
# Write package installation hooks.
termux_step_create_subpkg_debscripts
termux_step_create_pacman_install_hook
# Configuring the selection of a copress for a batch.
local COMPRESS
local PKG_FORMAT
case $TERMUX_PACMAN_PACKAGE_COMPRESSION in
"gzip")
COMPRESS=(gzip -c -f -n)
PKG_FORMAT="gz";;
"bzip2")
COMPRESS=(bzip2 -c -f)
PKG_FORMAT="bz2";;
"zstd")
COMPRESS=(zstd -c -z -q -)
PKG_FORMAT="zst";;
"lrzip")
COMPRESS=(lrzip -q)
PKG_FORMAT="lrz";;
"lzop")
COMPRESS=(lzop -q)
PKG_FORMAT="lzop";;
"lz4")
COMPRESS=(lz4 -q)
PKG_FORMAT="lz4";;
"lzip")
COMPRESS=(lzip -c -f)
PKG_FORMAT="lz";;
"xz" | *)
COMPRESS=(xz -c -z -)
PKG_FORMAT="xz";;
esac
# Create the actual .pkg file:
local TERMUX_SUBPKG_PACMAN_FILE=$TERMUX_OUTPUT_DIR/${SUB_PKG_NAME}${DEBUG}-${TERMUX_PKG_FULLVERSION_FOR_PACMAN}-${SUB_PKG_ARCH}.pkg.tar.${PKG_FORMAT}
shopt -s dotglob globstar
printf '%s\0' **/* | bsdtar -cnf - --format=mtree \
--options='!all,use-set,type,uid,gid,mode,time,size,md5,sha256,link' \
--null --files-from - --exclude .MTREE | \
gzip -c -f -n > .MTREE
printf '%s\0' **/* | bsdtar --no-fflags -cnf - --null --files-from - | \
$COMPRESS > "$TERMUX_SUBPKG_PACMAN_FILE"
shopt -u dotglob globstar
# Go back to main package:
cd "$TERMUX_PKG_MASSAGEDIR/$TERMUX_PREFIX_CLASSICAL"
done
}
| true
|
54d1edebd25d89ec6da157acf74b090af22eadfd
|
Shell
|
melindam/chef
|
/cookbooks/jmh-bamboo/templates/default/bamboo_agent.erb
|
UTF-8
| 652
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
# description: Bamboo Agent
# chkconfig: 234 21 81
case "$1" in
start)
su - bamboo -c "cd /home/bamboo; export JAVA_HOME=<%= @java_home %>; \
<%= @java_home %>/bin/java -Dbamboo.home=<%= @agent_home %> -jar <%= @agent_name %> <%= @bamboo_server %> start"
;;
stop)
su - bamboo -c "cd /home/bamboo; export JAVA_HOME=<%= @java_home %>; \
<%= @java_home %>/bin/java -Dbamboo.home=<%= @agent_home %> -jar <%= @agent_name %> <%= @bamboo_server %> stop"
;;
restart)
$0 stop
sleep 5
$0 start
;;
status)
ps ax | grep <%= @agent_home %>
;;
*)
echo $"Usage: $0 {start|stop|restart|status}"
;;
esac
| true
|
4eaa94e4fe92c2ff8a2687c5dd84ca583c31d475
|
Shell
|
alvinamartya/infra-test-so
|
/setup-infra.sh
|
UTF-8
| 521
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
# prog : setup-infra.sh
if [[ $(id -u) -ne 0 ]] ; then echo "Please run as root" ; exit 1 ; fi
# install required apps
apt install -y nginx mysql-server php-fpm php-mysql composer php-json php-xmlrpc php-curl php-gd php-xml php-mbstring
# remove original content
rm -rf /etc/nginx/sites-available/* /etc/nginx/sites-enabled/*
# copy content from repo
cp -R nginx/sites-available/. /etc/nginx/sites-available/
cp -R nginx/sites-available/. /etc/nginx/sites-enabled/
# restart nginx
systemctl restart nginx
| true
|
7ab0378a7f8fd78a4f8e299d4d73743163c5d525
|
Shell
|
marionxue/Ansible
|
/mongodb-rs/roles/exporter/templates/runexporter.sh.j2
|
UTF-8
| 850
| 3.203125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
exporterpath="/opt/ansible/exporter"
mongodb_exporter="mongodb_exporter-linux-amd64"
node_tar_exporter="node_exporter-0.16.0.linux-amd64.tar.gz"
if [ -f ${exporterpath}/${node_tar_exporter} ];then
cd ${exporterpath}
tar xf ${exporterpath}/${node_tar_exporter}
chmod +x ${exporterpath}/node_exporter-0.16.0.linux-amd64/node_exporter
`which nohup` ${exporterpath}/node_exporter-0.16.0.linux-amd64/node_exporter &
else
exit 3
fi
if [ -f ${exporterpath}/${mongodb_exporter} ];then
if [ -x ${exporterpath}/${mongodb_exporter} ];then
`which nohup` ${exporterpath}/${mongodb_exporter} -mongodb.uri mongodb://{{ ansible_ssh_host }}:27017 &
else
chmod +x ${exporterpath}/${mongodb_exporter}
`which nohup` ${exporterpath}/${mongodb_exporter} -mongodb.uri mongodb://{{ ansible_ssh_host }}:27017 &
fi
else
exit 3
fi
| true
|
ec6e7d3472bdd292cf3285e91a69bd01423190d9
|
Shell
|
efficientbug/dotfiles-neutron
|
/x/.xinitrc
|
UTF-8
| 720
| 2.953125
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
# Source misc. environment system settings
if [ -d /etc/X11/xinit/xinitrc.d ]; then
for f in /etc/X11/xinit/xinitrc.d/*; do
[ -x "$f" ] && . "$f"
done
unset f
fi
# Wallpaper
sh .fehbg
# Enable restarting X with Ctrl+Alt+Backspace
setxkbmap -option terminate:ctrl_alt_bksp
# Remap Caps Lock to Esc
setxkbmap -option caps:escape
# Set keyboard repeat rate
# TODO maybe set it faster?
xset r rate 400 30
# Hide cursor on idle
unclutter --fork --ignore-scrolling --timeout 3
# Set cursor
xsetroot -cursor_name left_ptr
# Source .Xresources
[ -f ~/.Xresources ] && xrdb ~/.Xresources
# Redshift
# redshift &
# Tint
tint2 &
# Compositor
# compton &
# bspwm
# exec bspwm
# Openbox
exec openbox
| true
|
7fdb9649ef2e86c46df75d0ed696b16d6a34b978
|
Shell
|
JDevTechnology/Scripts
|
/auto-wall.sh
|
UTF-8
| 225
| 2.734375
| 3
|
[] |
no_license
|
#!/usr/bin/bash
img=(`find /home/user/WallPapers/DistroTube/wallpapers/ -name '*' -exec file {} \; | grep -o -P '^.+: \w+ image' | cut -d':' -f1`)
while true
do
feh --bg-scale "${img[$RANDOM % ${#img[@]} ]}"
sleep 10m
done
| true
|
c620e38657ec32418b3bd32a8b83df96a9739e98
|
Shell
|
dotCMS/zarchive_qa
|
/artifacts/aws/createAMI_ubuntu.sh
|
UTF-8
| 2,374
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
# Must be run as root user
date > /root/setup_start.txt
env > /root/setup_env.txt
#setup jenkins directory
mkdir /opt/jenkins
chown ubuntu:ubuntu /opt/jenkins
#update software repo lists
apt-get -y update
#upgrade all installed software
apt-get -y upgrade
# update hard and soft file limits
sed -i.dist 's/\(.*End of file.*\)/\n \
\*\thard\tnofile\t1000000 \
\*\tsoft\tnofile\t1000000 \
root\thard\tnofile\t1000000 \
root\tsoft\tnofile\t1000000 \
\n\1/g' /etc/security/limits.conf
echo -e "session\trequired\tpam_limits.so" >> /etc/pam.d/common-session
echo -e "session\trequired\tpam_limits.so" >> /etc/pam.d/common-session-noninteractive
#install expected packages
apt-get -y install telnet uuid xfsprogs git awscli xvfb firefox unzip
apt-get -y install postgresql-9.3 ant
#mysql-server-5.6
# Set timezone to central
timedatectl set-timezone America/Chicago
# create download folder
mkdir /root/downloads
# create aws credentials
mkdir /root/.aws
echo '[default]' > /root/.aws/config
echo 'region = us-east-1' >> /root/.aws/config
echo 'aws_access_key_id = AKIAJB7GBDVDNTROV7LQ' >> /root/.aws/config
echo 'aws_secret_access_key = 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' >> /root/.aws/config
# download and install java
cd /root/downloads
aws s3 cp s3://qa.dotcms.com/testautomation/software/java/jdk-7u71-linux-x64.gz .
mkdir -p /opt/oracle/java
cd /opt/oracle/java/
tar -xvf /root/downloads/jdk-7u71-linux-x64.gz
ln -s jdk1.7.0_*/ latest
update-alternatives --install /usr/bin/java java /opt/oracle/java/latest/bin/java 99999
update-alternatives --install /usr/bin/javac javac /opt/oracle/java/latest/bin/javac 99999
# configure postgreSQL
cd /root/downloads
curl -u b.rent.griffin@dotcms.com:@s3cur3 https://raw.githubusercontent.com/dotCMS/qa/${QA_BRANCH}/artifacts/aws/database/postgres/postgresql.conf > postgresql.conf
cp ./postgresql.conf /etc/postgresql/9.3/main/postgresql.conf
curl -u b.rent.griffin@dotcms.com:@s3cur3 https://raw.githubusercontent.com/dotCMS/qa/${QA_BRANCH}/artifacts/aws/database/postgres/pg_hba.conf > pg_hba.conf
cp ./pg_hba.conf /etc/postgresql/9.3/main/pg_hba.conf
# restart postgresql server so configuration changes can take effect
/etc/init.d/postgresql restart
# configure mySQL
# TODO
# configure and start Xvfb service
aws s3 cp s3://qa.dotcms.com/testautomation/xvfb /etc/init.d/xvfb
chmod +x /etc/init.d/xvfb
sudo update-rc.d xvfb defaults
/etc/init.d/xvfb start
date > /root/setup_done.txt
| true
|
50852e4633ebe214dda8e07a3a622fafaf610850
|
Shell
|
danielhoherd/pub-bin
|
/docker-images
|
UTF-8
| 504
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Author: github.com/danielhoherd
# License: MIT
# Purpose: Format 'docker images' in a vertical style similar to mysql's \G
# https://docs.docker.com/engine/reference/commandline/images/#formatting
docker images --format="{{.Repository}}:{{.Tag}}
.ID: {{.ID}}
.Repository: {{.Repository}}
.Tag: {{.Tag}}
.Digest: {{.Digest}}
.CreatedSince: {{.CreatedSince}}
.CreatedAt: {{.CreatedAt}}
.Size: {{.Size}}" "$@"
| true
|
256dc2a0acb9ae4def09d4221d20be83717b4d44
|
Shell
|
neffets/docker-omd
|
/entrypoint.sh
|
UTF-8
| 1,880
| 4
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
SLEEP_TIME=60
MAX_TRIES=5
SITE=${SITENAME:=sp}
#######################################################################
groupadd -g 1001 "${SITE}" || echo "* group $SITE already exists"
useradd -s /bin/bash -m -d /omd/sites/${SITE} -u 1001 -g "${SITE}" "${SITE}" || echo "* User $SITE already exists"
# Fix some permission issues (not sure why it happens)
[ -d "/omd/sites/${SITE}" ] && chown -R ${SITE}.${SITE} "/omd/sites/${SITE}"
# Check if SITE is initialized ; else copy "sp"-site
omd sites -b | egrep -e "^${SITE}$"
if [ "$?" -eq 1 ]; then
# Set up a default site
omd create "${SITE}"
# We don't want TMPFS as it requires higher privileges ?
# Accept connections on any IP address, since we get a random one ?
for varname in ${!OMD_*}
do
declare -n configval=$varname
if [ ! -z "$configval" ]; then
configkey=${varname/OMD_/}
omd config "${SITE}" set "${configkey}" "${configval}"
fi
done
else
adduser "${SITE}" "${SITE}" || true
omd update --conflict install "${SITE}"
ln -sfn "../../versions/`omd versions -b|head -1`" /opt/omd/sites/${SITE}/version
fi
# Add the new user to crontab, to avoid error merging crontabs
adduser "${SITE}" crontab || true
adduser "${SITE}" omd || true
omd enable "${SITE}"
if [ "${SITE}" != "sp" ]
then
omd disable "sp"
fi
#######################################################################
# watching for startup
tries=0
echo "** Starting OMD **"
omd start "${SITE}"
while /bin/true; do
sleep $SLEEP_TIME
omd status "${SITE}" | grep -q "stopped" && {
if [ $tries -gt $MAX_TRIES ]; then
echo "** ERROR: Stopped service found; aborting (after $tries tries) **"
exit 1
fi
tries=$(( tries + 1 ))
echo "** ERROR: Stopped service found; trying to start again **"
omd start "${SITE}"
}
done
| true
|
fd4b96332714ca451f4f0708c32dde6c89d6f33b
|
Shell
|
nordugrid/arc
|
/debian/nordugrid-arc-arex.postinst
|
UTF-8
| 254
| 2.921875
| 3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"CPL-1.0"
] |
permissive
|
#!/bin/sh
set -e
if [ "$1" = "configure" ] ; then
# check hostcert is already generated (update vs install)
if [ ! -f /etc/grid-security/testCA-hostcert.pem ] ; then
arcctl test-ca init
arcctl test-ca hostcert
fi
fi
#DEBHELPER#
| true
|
b267c932bca577ded2e24958be0b86421c55f991
|
Shell
|
denniskano/presto-hive-kerberos-docker
|
/test-presto.sh
|
UTF-8
| 1,064
| 3.203125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
# Terminal Control codes
# see: https://stackoverflow.com/a/5947802
COLOR_GREEN='\033[0;32m'
COLOR_RESET='\033[0m'
function log() {
echo -e "${COLOR_GREEN}[$(date +'%Y-%m-%dT%H:%M:%S%z')]: $*${COLOR_RESET}"
}
BUCKET_NAME=hadoop-data
log "Create buckets"
./aws-cli.sh s3 rm --recursive "s3://${BUCKET_NAME}" || true
./aws-cli.sh s3 mb "s3://${BUCKET_NAME}" || true
log "List catalogs"
./presto-send-query.sh "SHOW CATALOGS"
log "Run simple TPCH query"
./presto-send-query.sh "SELECT name FROM tpch.sf1.customer ORDER BY custkey ASC LIMIT 3"
log "Create new schema"
bash -x ./presto-send-query.sh "CREATE SCHEMA hive.sample_schema
WITH (
location = 's3a://${BUCKET_NAME}/'
)"
log "Create new table"
./presto-send-query.sh "CREATE TABLE hive.sample_schema.sample_table (
col1 varchar,
col2 varchar
)"
log "Insert data"
./presto-send-query.sh "INSERT INTO hive.sample_schema.sample_table SELECT 'value1.1', 'value1.2'"
log "Query data"
./presto-send-query.sh "SELECT * FROM hive.sample_schema.sample_table"
| true
|
8a1f2fd1265fbcdaad987e8929dfcbc57d949763
|
Shell
|
thebigb/lxc-utils
|
/etc/bash-completion.d/lxc-utils
|
UTF-8
| 844
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
have lxc-start &&
{
_lxc_names()
{
COMPREPLY=( $( compgen -W "$( lxc-ls )" "$cur" ) )
}
_lxc_running()
{
COMPREPLY=( $( compgen -W "$( lxc-ls --running )" "$cur" ) )
}
_lxc_generic_n()
{
local cur prev
COMPREPLY=()
_get_comp_words_by_ref cur prev
_lxc_names "$cur"
return 1
}
_lxc_running_n()
{
local cur prev
COMPREPLY=()
_get_comp_words_by_ref cur prev
_lxc_running "$cur"
return 1
}
complete -o default -F _lxc_generic_n lxa
complete -o default -F _lxc_generic_n lxb
complete -o default -F _lxc_generic_n lxc
complete -o default -F _lxc_generic_n lxclone
complete -o default -F _lxc_generic_n lxconf
complete -o default -F _lxc_generic_n lxdiag
complete -o default -F _lxc_generic_n lxi
complete -o default -F _lxc_generic_n lxlog
complete -o default -F _lxc_running_n lxs
}
| true
|
534dcc44490ae2a0a8f05286c9ba57ee23151d93
|
Shell
|
VividCortex/johnny-deps
|
/configure
|
UTF-8
| 613
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
prefix=/usr/local
usage()
{
echo "Usage: configure [-h|--help] [--prefix=prefix]"
}
while [ "$1" != "" ]; do
case "$1" in
--prefix=*)
prefix=${1#--prefix=}
;;
--prefix)
shift
prefix=$1
;;
-h|--help)
usage
exit
;;
*)
echo "$0: unknown argument $1" >&2
usage
exit 1
;;
esac
shift
done
cat <<EOF
configuring godeps...
prefix=$prefix
>> config.make
EOF
cat > ./config.make <<EOF
SUBDIRS=
prefix=$prefix
EOF
| true
|
39145cd5224e2b7c5641fa3dc4f3539ce187a6d3
|
Shell
|
doohee323/tz-k8s-vagrant
|
/tz-local/resource/nexus/install.sh
|
UTF-8
| 2,321
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
sudo apt-get install openjdk-8-jdk -y
cd /opt
wget http://download.sonatype.com/nexus/3/nexus-3.22.0-02-unix.tar.gz
sudo tar -xvf nexus-3.22.0-02-unix.tar.gz
ln -s nexus-3.22.0-02 nexus
sudo adduser nexus
sudo chown -R nexus:nexus /opt/nexus
sudo chown -R nexus:nexus /opt/sonatype-work
sudo sed -i "s|#run_as_user=\"\"|run_as_user=\"root\"|g" /opt/nexus/bin/nexus.rc
#sudo sed -i "s|#application-port=8081|application-port=8081|g" /opt/sonatype-work/nexus3/etc/nexus.properties
#vi /opt/nexus/bin/nexus.vmoptions
echo '
[Unit]
Description=nexus service
After=network.target
[Service]
Type=forking
LimitNOFILE=65536
User=root
Group=root
ExecStart=/opt/nexus/bin/nexus start
ExecStop=/opt/nexus/bin/nexus stop
Restart=on-abort
[Install]
WantedBy=multi-user.target
' > /etc/systemd/system/nexus.service
sudo systemctl enable nexus
sudo service nexus start
sudo service nexus stop
#sudo service nexus status
# every client pc needs this setting
echo '
{
"insecure-registries" : [
"192.168.1.10:5000",
"192.168.0.180:5000",
]
}
' > /etc/docker/daemon.json
sudo service docker restart
echo '
##[ Nexus ]##########################################################
- url: http://192.168.1.10:8081
- id: admin
- passwd: cat /opt/sonatype-work/nexus3/admin.password
http://192.168.1.10:8081/#admin/repository/blobstores
Create blob store
docker-hosted
docker-hub
http://192.168.1.10:8081/#admin/repository/repositories
Repositories > Select Recipe > Create repository: docker (hosted)
name: docker-hosted
http: 5000
Enable Docker V1 API: checked
Blob store: docker-hosted
Repositories > Select Recipe > Create repository: docker (proxy)
name: docker-hub
Enable Docker V1 API: checked
Remote storage: https://registry-1.docker.io
select Use Docker Hub
Blob store: docker-hub
http://192.168.1.10:8081/#admin/security/realms
add "Docker Bearer Token Realm" Active
docker login 192.168.1.10:5000
docker pull busybox
RMI=`docker images -a | grep busybox | awk '{print $3}'`
docker tag $RMI 192.168.1.10:5000/busybox:v20201225
docker push 192.168.1.10:5000/busybox:v20201225
http://192.168.1.10:8081/#browse/browse:docker-hosted
#######################################################################
' >> /vagrant/info
cat /vagrant/info
| true
|
2862b96971c210211611177df2c4b08559ed5837
|
Shell
|
brianlions/solar-wind-lisp
|
/add_header.sh
|
UTF-8
| 709
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
author_name="Brian Yi ZHANG"
author_email="brianlions@gmail.com"
date_format="%a %b %d %H:%M:%S %Y %Z"
#date_format="%Y/%m/%d %H:%M:%S"
for fn in "$@"
do
# get first commit id of the specified file
commit_id=`git log --oneline "$fn" | tail -1 | gawk '{ print $1 }'`
# get date and time from commit message of that commit
log_date=`git log $commit_id -1 | grep Date | cut -c 9-32`
# format the date and time
header_date=`date -d "$log_date" +"$date_format"`
# add header to the file
sed -i -e '1 i\
/*\
* file name: '"$fn"'\
*\
* author: '"$author_name"'\
* email: '"$author_email"'\
* date created: '"$header_date"'\
*/\
' "$fn"
done
| true
|
c67e5d2102312b1be693caafe514892ab6fd1f93
|
Shell
|
Nina-Om/wrf_hydro_model_tools
|
/forcing/download/HRRR/download_HRRR.sh
|
UTF-8
| 352
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
# Purpose: Download HRRR files for WRF-Hydro forcing
# Author: K. FitzGerald
# Date: March 2018
# Usage: ./download_HRRR.sh <YYYYMMDD>
if [ "$#" -ne 1 ]; then
echo "Incorrect number of arguments"
exit
fi
date_str=$1
wget -r -np -nd -A "hrrr.t14z.wrfsfcf??.grib2" http://www.ftp.ncep.noaa.gov/data/nccf/com/hrrr/prod/hrrr.${date_str}/
| true
|
70e34a24f66863ef5b93474f3bad8bb357c3f9d3
|
Shell
|
woojiahao/meetup
|
/heroku-run.sh
|
UTF-8
| 454
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
read -p "Enter your application name: " applicationName
echo "Creating Heroku application"
heroku create "$applicationName"
echo "Adding Heroku Postgresql addon"
heroku addons:create heroku-postgresql:hobby-dev
read -p "Enter your bot token: " token
heroku config:set BOT_TOKEN="$token"
echo "Setting Heroku stack to Docker container"
heroku stack:set container
git push heroku master
echo "Enabling Discord bot"
heroku ps:scale worker=1
| true
|
4e885050248cf97c894f1f6bb1cba7f826ec6d66
|
Shell
|
p4p1/Projects
|
/usb_onion-master/bin/help
|
UTF-8
| 801
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
# Made by: papii
# help command when you need help :D
function loadupMessage {
printf "+---------------------------------------------------------------------+\n"
printf "|------------- Welcome to ONION's HELP, made by papiii ---------------|\n"
printf "|-- here you can find most of the documentation you would ever need --|\n"
printf "+---------------------------------------------------------------------+\n"
}
function availableCommands {
printf "help\t\t-\tCommand you just run to have help.\n"
printf "exit\t\t-\tCommand to quit the interpretor.\n"
printf "new_account\t-\tAdd a new account to the epm page\n"
printf "web\t\t-\tLoad up the epm web interface on the given port\n"
printf "\t\t\t{!} Note port 80 needs root privilege\n"
}
loadupMessage
availableCommands
| true
|
241c9f64d2e0176da3e979490b67675643f41ff4
|
Shell
|
netsec-ethz/scion
|
/rules_openapi/internal/header.sh
|
UTF-8
| 304
| 3.078125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# This file is copied from https://github.com/cgrindel/rules_updatesrc/blob/main/examples/simple/header/header.sh
src="$1"
out="$2"
header="$3"
first_line=$(head -n 1 "${src}")
if [[ "${first_line}" != "${header}" ]]; then
echo "${header}" > "${out}"
fi
cat "${src}" >> "${out}"
| true
|
3b3ef2da1cbbfd36a03c317186a3d55c6d292734
|
Shell
|
Gitlinzi/abcdefgtesttodo
|
/save-verinfo.sh
|
UTF-8
| 319
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh
#version file name
fileName="version.html"
#read custom file name
if [ -n "$1" ];then
fileName="$1"
fi
info="git branch: $GIT_BRANCH \ngit url: $GIT_URL \nbuild url: $BUILD_URL \nbuild id: $BUILD_ID \n"
echo "============= SAVE VERSION INFO =============="
echo -e $info > $fileName
cat $fileName
| true
|
6e1285d64d2f595a1fc93408d4c26560ef3def1d
|
Shell
|
marcushill/troubadour-server
|
/scripts/install.sh
|
UTF-8
| 325
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
set -x # Show the output of the following commands (useful for debugging)
# Import the SSH deployment key
openssl aes-256-cbc -K $encrypted_7fecd50c2af5_key -iv $encrypted_7fecd50c2af5_iv -in troubadour_key.enc -out troubadour_key -drm troubadour_key.enc
chmod 600 troubadour_key
mv troubadour_key ~/.ssh/id_rsa
| true
|
a25f96585254bbd4f23073e02401623eec01b371
|
Shell
|
ajp8164/ows
|
/scripts/debian/postinst
|
UTF-8
| 963
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
set -o pipefail
# add group
if ! getent group | grep -q "^ows:" ; then
echo "Creating system group: ows"
groupadd --system ows
fi
# add user
if ! getent passwd | grep -q "^ows:"; then
echo "Creating ows system user"
useradd --gid "ows" --system -m ows
fi
# build nodejs addons
cd "/usr/opt/ows"
SKIP_BITCOIN_DOWNLOAD=1 npm rebuild
# setup data directory
mkdir -p "/home/ows/.ows/data"
chown -R ows:ows "/home/ows/.ows"
# start ows
if hash service 2> /dev/null; then
service ows start || echo "ows could not be registered or started"
elif hash start 2> /dev/null; then
start ows || echo "ows could not be registered or started"
elif hash systemctl 2> /dev/null; then
{
systemctl enable "ows.service" && \
systemctl start "ows.service"
} || echo "ows could not be registered or started"
else
echo 'Your system does not appear to use upstart or systemd, so ows could not be started'
fi
| true
|
b135df0338ecae2abaff0faffacd168645b1ab11
|
Shell
|
missionLife/mission-life-new-user-email-invitation
|
/scripts/build
|
UTF-8
| 1,270
| 3.234375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e # Exit on any child process error
source ./scripts/setup_env
npm run clean
mkdir -p dist
echo "Transpile ES6 to CJS"
npm install
npm run transpile -- --ignore spec.js
echo "Processing infrastructure template for $DRONE_BRANCH"
# echo "{\"SEARCH_ALERT_AWS_ACCESS_KEY_ID\":\"${SEARCH_ALERT_AWS_ACCESS_KEY_ID}\",
# \"SEARCH_ALERT_AWS_SECRET_ACCESS_KEY\":\"${SEARCH_ALERT_AWS_SECRET_ACCESS_KEY}\",
# \"SEARCH_SERVICE_API_KEY\":\"${SEARCH_SERVICE_API_KEY}\"}" | mustache - ./infrastructure/constants/$DRONE_BRANCH.json > ./dist/constants.json
cat ./infrastructure/constants/$DRONE_BRANCH.json > ./dist/constants.json
cat ./dist/constants.json | mustache - ./infrastructure/cloudformation.yaml > ./dist/cloudformation.yaml
# cat ./dist/constants.json | mustache - ./infrastructure/coverage-checker-subscription-api-swagger.yaml > ./dist/coverage-checker-subscription-api-swagger.yaml
echo "Production NPM Install"
rm -rf ./node_modules
npm install --production
for i in ./dist/cjs/* ; do
if [ -d "$i" ]; then
CURRENT_FOLDER=$(basename "$i")
echo "Preparing $CURRENT_FOLDER"
pushd ./dist/cjs/$CURRENT_FOLDER
ln -s ../../../node_modules/
popd
pushd dist/cjs/$CURRENT_FOLDER
zip -q -r ../../$CURRENT_FOLDER.zip .
popd
fi
done
| true
|
b7e985b3e647acdfa043b6dde01be5fa1084a9f0
|
Shell
|
Tylores/BESS
|
/tools/build-run.sh
|
UTF-8
| 315
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
# setup environment
export CPU=x86_64
export OS=linux
export VARIANT=debug
# modbus setup
export MBLIB=/usr/local/lib
export MBINC=/usr/local/include
# boost setup
export BOOSTINC=$HOME/src/boost_1_66_0
# build
export SRC=bess
make -C ../build
# run
./../build/bin/debug/$SRC -c ../data/config.ini
| true
|
4a0551671d355a042b8b60fba97a340eb2b36e93
|
Shell
|
gitPty/hello-world
|
/scripts01/show123.sh
|
UTF-8
| 327
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
function printit(){
echo -n "Your choice is "
}
echo "This progran will print your selection!"
case ${1} in
"one")
printit;echo ${1} | tr 'a-z' 'A-Z'
;;
"two")
printit;echo ${1} |tr 'a-z' 'A-Z'
;;
"three")
printit;echo ${1} |tr 'a-z' 'A-Z'
;;
*)
echo "Usage ${0} {one|two|three}"
;;
esac
| true
|
9bfff82ecb5d285f9c7b8ebfcaf23bdc11ebbbab
|
Shell
|
pseyfert-cern-gitlab-backup/Urania
|
/PhysFit/B2DXFitters/scripts/Bash/runSFitOnData.sh
|
UTF-8
| 1,149
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
#Prevent core dump
ulimit -c 0
#source /afs/cern.ch/lhcb/software/releases/LBSCRIPTS/prod/InstallArea/scripts/LbLogin.sh
#source `which SetupProject.sh` Urania v5r0
#Options
export nickname="SSbarAccFloatingNoFTcalib"
export inputfile="root://eoslhcb.cern.ch//eos/lhcb/wg/b2oc/TD_DPi_3fb/sWeightedData/sWeights_AllData_from04ps.root"
export outputdir="/afs/cern.ch/work/v/vibattis/public/B2DX/Bd2DPi/sFit/${nickname}/"
export outputfile=${outputdir}"workResults.root"
export config="/afs/cern.ch/user/v/vibattis/cmtuser/Urania_v5r0/PhysFit/B2DXFitters/data/Bd2DPi_3fbCPV/Bd2DPi/Bd2DPiConfigForSFitOnData.py"
export pol="both"
export mode="kpipi"
export year="run1"
export hypo="Bd2DPi"
export pyscriptpath="/afs/cern.ch/user/v/vibattis/cmtuser/Urania_v5r0/PhysFit/B2DXFitters/scripts/"
rm -rf $outputdir
mkdir -p $outputdir
export Start=`date`
echo "==> Start fitting at ${Start}"
python ${pyscriptpath}runSFit_Bd.py --debug --fileName $inputfile --save $outputfile --configName $config --pol $pol --mode $mode --year $year --hypo $hypo --merge both >& ${outputdir}logfile.txt
export Stop=`date`
echo "==> Stop fitting at ${Stop}"
| true
|
516f13d224dbc5578e694b543d95d053b3e707e4
|
Shell
|
schanur/libbivalvia
|
/bivalvia/message.sh
|
UTF-8
| 4,299
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
BIVALVIA_PATH="$(dirname "${BASH_SOURCE[0]}")"
# source "${BIVALVIA_PATH}/config.sh"
# source "${BIVALVIA_PATH}/require.sh"
#Debug
source "${BIVALVIA_PATH}/debug.sh"
# Allowed message level:
# 0: emerg
# 1: alert
# 2: crit
# 3: err
# 4: warning
# 5: notice
# 6: info
# 7: debug
# 8: trace
# if no message level was set by using "msg_set_level" function,
# "info" message level is used.
MSG_MAX_LEVEL_NUM=6
MSG_PRINT_MSG_LEVEL=0
# PRIVATE
# Convert message level string to message level number.
function msg_level_str_to_number {
local MSG_LEVEL="${1}"
local MSG_LEVEL_NUM=255
case "${MSG_LEVEL}" in
emerg)
MSG_LEVEL_NUM=0
;;
alert)
MSG_LEVEL_NUM=1
;;
crit)
MSG_LEVEL_NUM=2
;;
err)
MSG_LEVEL_NUM=3
;;
warning)
MSG_LEVEL_NUM=4
;;
notice)
MSG_LEVEL_NUM=5
;;
info)
MSG_LEVEL_NUM=6
;;
debug)
MSG_LEVEL_NUM=7
;;
trace)
MSG_LEVEL_NUM=8
;;
*)
msg err "Invalid message level string: ${MSG_LEVEL}. Print message as error message."
MSG_LEVEL_NUM=3
;;
esac
echo ${MSG_LEVEL_NUM}
}
# Set message level as string. Each message with higher integer (lower
# priority) will be ignored.
function msg_set_level {
local NEW_MSG_LEVEL_STR="${1}"
MSG_MAX_LEVEL_NUM="$(msg_level_str_to_number ${NEW_MSG_LEVEL_STR})"
}
# Set if each message has the message level string as prefix. Allowed
# values: 0, 1.
function msg_set_opt_print_level_str {
local VALUE="${1}"
case "${VALUE}" in
0|1)
MSG_PRINT_MSG_LEVEL="${VALUE}"
;;
*)
msg err "Invalid option value: ${VALUE}"
;;
esac
}
# Prints the message level with spaces added at the end so that all
# strings have the same length.
function msg_num_to_formated_msg_level_str {
local MSG_LEVEL_NO="${1}"
local FORMATED_STR=""
if [ ${MSG_PRINT_MSG_LEVEL} -eq 1 ]; then
case ${MSG_LEVEL_NO} in
0)
FORMATED_STR="EMERGENCY: "
;;
1)
FORMATED_STR="ALERT: "
;;
2)
FORMATED_STR="CRITICAL: "
;;
3)
FORMATED_STR="ERROR: "
;;
4)
FORMATED_STR="WARNING: "
;;
5)
FORMATED_STR="NOTICE: "
;;
6)
FORMATED_STR="INFO: "
;;
7)
FORMATED_STR="DEBUG: "
;;
8)
FORMATED_STR="TRACE: "
;;
esac
else
FORMATED_STR=""
fi
echo "${FORMATED_STR}"
}
# Print message. First parameter is interpreted as message level
# string. Message levels include all that are defined by Syslog plus
# trace.
function msg {
local MSG_LEVEL_STR="${1}"
local MSG_LEVEL_NO="$(msg_level_str_to_number ${MSG_LEVEL_STR})"
# local MSG_MIN_LEVEL_NUM
local MSG
shift
if [ ${#} -ne 0 ]; then
MSG="${*}"
else
MSG=""
fi
# If no message level was set, use "info" as default.
if [[ ! -v "${MSG_MAX_LEVEL_NUM}" ]]; then
true
# msg_set_level "tra"
fi
# echo ":::MSG_MAX_LEVEL_NUM: ${MSG_MAX_LEVEL_NUM}"
# echo "---${MSG_LEVEL_NO} -le ${MSG_MAX_LEVEL_NUM}///"
# echo
# stack_trace
if [ ${MSG_LEVEL_NO} -le ${MSG_MAX_LEVEL_NUM} ]; then
# echo "+++"
case ${MSG_LEVEL_NO} in
0)
echo "$(msg_num_to_formated_msg_level_str ${MSG_LEVEL_NO}): ${MSG_LEVEL_STR}: ${MSG}"
;;
1|2|3)
echo "$(msg_num_to_formated_msg_level_str ${MSG_LEVEL_NO}): ${MSG_LEVEL_STR}: ${MSG}"
;;
4|5|6|7|8)
echo "$(msg_num_to_formated_msg_level_str ${MSG_LEVEL_NO}): ${MSG_LEVEL_STR}: ${MSG}"
;;
*)
msg err "Invalid message level no: ${MSG_LEVEL_NO}"
;;
esac
fi
}
| true
|
f355e58954ed47a8458ceda3ac1b194b0fa6e501
|
Shell
|
VUW-FAIR/CascadeDataStream-SciFacHPC
|
/SciFac_Job_Submission_Script.sl
|
UTF-8
| 2,471
| 3.703125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
### To submit a job to the queing system, use: qsub SciFac_Job_Submission_Script.sl
#$ -S /bin/bash
#$ -N DataStream # Job Name as it appears in the que.
#$ -wd /srv/global/scratch/hashmimu
#$ -l s_rt=240:00:00 # Walltime
#$ -l h_slots=1 # Number of Nodes
#$ -l virtual_free=2.5G # Memory Requirement per processor
#$ -pe smp 24 # Number of Processors
#$ -M Muhammad.Hashmi@vuw.ac.nz # Email to send job completion email
# A check to see of the GridEngine is working correctly
if [ -d /srv/local/work/$JOB_ID ]; then
cd /srv/local/work/$JOB_ID
else
echo "There's no job directory to change into "
echo "Here's LOCAL WORK "
ls -la /srv/local/work
echo "Exiting"
exit 1
fi
# Save the current files path and names. There is a need to specify the path of the script and
# the input_file. I am not that expert of bash scripting to include these as first arguments.
python_script="/home/scifachpc-fs01/hashmimu/big-data-analysis/CascadeDataStream_SciFac.py"
input_file="/home/scifachpc-fs01/hashmimu/big-data-analysis/input_files/sorted.csv"
python_script_basename=$(basename ${python_script} .${python_script_ext})
input_file_basename=$(basename ${input_file} .${input_file_ext})
# Make sure the input file exists and is readable
if [ ! -f "${input_file}" ]
then
echo "${input_file}: no such file" >&2
exit 1
elif [ ! -r "${input_file}" ]
then
echo "${input_file}: permission denied" >&2
exit 1
fi
# Make sure the Python Script file exists and is readable
if [ ! -f "${python_script}" ]
then
echo "${python_script}: no such file" >&2
exit 1
elif [ ! -r "${python_script}" ]
then
echo "${python_script}: permission denied" >&2
exit 1
fi
# Create a job-specific directory back on the globally visible scratch area
#mkdir -p /srv/global/scratch/hashmimu/$JOB_ID
# Load the user environment. You can also use python 2.7 by replacing "34" by "27"
module load scl-python/34
# Now we are in the job-specific directory. Copy input files here
cp ${python_script} .
cp ${input_file} .
# Run the job now
python3 ${python_script} ${input_file_basename}
# Copy the results back to the original directory
mkdir -p /home/scifachpc-fs01/hashmimu/big-data-analysis/output_files/$JOB_ID || exit 1
rm -f ${input_file_basename} # Remove the original input file so that it don't get copied again.
cp -r *.csv /home/scifachpc-fs01/hashmimu/big-data-analysis/output_files/$JOB_ID/ || exit 1
| true
|
7d130ee64df291913f45c8dde15bf65fa3f738f1
|
Shell
|
xiuminga/examples
|
/todo-list/setup/createModels.sh
|
UTF-8
| 1,172
| 3.53125
| 4
|
[
"UPL-1.0"
] |
permissive
|
#!/bin/bash
#
# Copyright (c) 2020, Oracle and/or its affiliates.
#
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
#
#
# If you used createDomain.sh to create the WebLogic Server domain, this script can be used to discover
# that domain, and generate the Verrazzano models with WDT.
#
if [ -z "$ORACLE_HOME" ] || [ -z "$WDT_HOME" ] || [ -z "$JAVA_HOME" ]; then
echo "This script requires that these variables be set: ORACLE_HOME, WDT_HOME, and JAVA_HOME."
echo "JAVA_HOME = ${JAVA_HOME}"
echo "ORACLE_HOME = ${ORACLE_HOME}"
echo "WDT_HOME = ${WDT_HOME}"
exit
fi
# if DOMAIN_HOME is not defined
if [ -z "$DOMAIN_HOME" ]; then
DOMAIN_HOME=./tododomain
fi
# if OUT_DIR is not defined
if [ -z "$OUT_DIR" ]; then
OUT_DIR=./v8o
fi
echo Clearing output directory: $OUT_DIR
rm -rf $OUT_DIR
mkdir $OUT_DIR
# Run WDT to discover the on-premises domain generating a WDT model, Verrazzano model, and the binding YAML files.
"$WDT_HOME"/bin/discoverDomain.sh -domain_home $DOMAIN_HOME -model_file $OUT_DIR/wdt-model.yaml -archive_file $OUT_DIR/wdt-archive.zip -target vz -output_dir $OUT_DIR
| true
|
2c31fb101e52799267b8b97de5fa415324eeeb20
|
Shell
|
ypereirareis/consul-template-example
|
/tests.sh
|
UTF-8
| 674
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eux
./launch.sh start
# Give time to consult-template to generate files
sleep 2
# Check that files were generated after consul-template has started
cat ./config/dev.json | grep "http://dev.mycompany.com"
cat ./config/prod.json | grep "http://prod.mycompany.com"
curl --request PUT --data PROD http://10.123.100.78:8500/v1/kv/api.endpoint.prod
curl --request PUT --data DEV http://10.123.100.78:8500/v1/kv/api.endpoint.dev
# Give time to consult-template to generate files
sleep 2
# Check that files were updated after we update keys
cat ./config/dev.json | grep "DEV"
cat ./config/prod.json | grep "PROD"
# Remove the stack
./launch.sh remove
| true
|
e945909e8846e0cb38012e21422a0a8daa487a5b
|
Shell
|
PavaniSamineni/Script
|
/toPrintUsersAndDirectories
|
UTF-8
| 204
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "current home Directoriy : " $HOME
echo "Currecnt user : " $(whoami)
echo "today is : " $(date +%d/%m/%y)
echo "current terminal number : " $(tty)
echo "number of user logged in : " $(w)
| true
|
0462bb05d3baa51575d9ec5cebca08dd36ac7a68
|
Shell
|
mmondelli/swift-gecko
|
/bin/fasta_extractor
|
UTF-8
| 946
| 3.625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
db=$1
runId=$2
if [ $# != 2 ]; then
echo "***ERROR*** Use: $0 database script_run_id"
exit -1
fi
SQLITE_CMD="sqlite3 $db"
for i in $(ls *.fasta)
do
#echo $i
id=$(echo "select distinct file_id from file f natural join staged_in s where f.name like '$i' and s.app_exec_id like '%$runId%';" | $SQLITE_CMD)
#echo $id
token1=$(echo $line | head -1 $i | awk '{print $1}' | cut -f 2 -d '>')
token2=$(echo $line | head -1 $i | awk '{print $2, $3}')
token3=$(echo $line | head -1 $i | awk '{print $1}' | cut -f 4 -d '|' | cut -f 1 -d '.') #accession
token4=$(echo $line | head -1 $i | awk '{print $1}' | cut -f 4 -d '|' | cut -f 2 -d '.') #version
echo "INSERT INTO file_annot_text VALUES ('$id', 'specie', '$token2');" | $SQLITE_CMD
echo "INSERT INTO file_annot_text VALUES ('$id', 'accession', '$token3');" | $SQLITE_CMD
echo "INSERT INTO file_annot_text VALUES ('$id', 'version', '$token4');" | $SQLITE_CMD
done
| true
|
8d57eb8e4d7f18221eae3e39a4eb0e5a5d7c5919
|
Shell
|
mplsbk/themes
|
/bak-magic.zsh-theme
|
UTF-8
| 829
| 3.0625
| 3
|
[] |
no_license
|
# af-magic.zsh-theme
# Repo: https://github.com/andyfleming/oh-my-zsh
# Direct Link: https://github.com/andyfleming/oh-my-zsh/blob/master/themes/af-magic.zsh-theme
if [ $UID -eq 0 ]; then NCOLOR="red"; else NCOLOR="green"; fi
local return_code="%(?..%{$fg[red]%}%? ↵%{$reset_color%})"
# primary prompt
PROMPT='$FG[237]------------------------------------------------------------%{$reset_color%}
$my_purple%~\
$(git_prompt_info) \
$FG[105]%(!.#.»)%{$reset_color%} '
PROMPT2='%{$fg[red]%}\ %{$reset_color%}'
RPS1='${return_code}'
# color vars
eval my_green='$FG[022]'
eval my_orange='$FG[214]'
eval my_purple='$FG[090]'
# git settings
ZSH_THEME_GIT_PROMPT_PREFIX="$my_green("
ZSH_THEME_GIT_PROMPT_CLEAN=""
ZSH_THEME_GIT_PROMPT_DIRTY="$my_orange ✗%{$reset_color%}"
ZSH_THEME_GIT_PROMPT_SUFFIX="$my_green)%{$reset_color%}"
| true
|
941aa2d99ca6ee456de09b48f76883ec4b0cbedc
|
Shell
|
lpolyudova/bin
|
/zshrc
|
UTF-8
| 2,013
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/zsh
# Load custom aliases
if [[ -f ~/.public_alias ]]
then
. ~/.public_alias
fi
if [[ -f ~/.private_alias ]]
then
. ~/.private_alias
fi
#
## Load PATH
if [[ -f ~/.path ]]
then
. ~/.path
fi
#
# library-cli-sdk
# The next line updates PATH for the Google Cloud SDK.
#source '/Users/lpolyudova/Downloads/Documents/google-cloud-sdk/path.zsh.inc'
# The next line enables shell command completion for gcloud.
# source '/Users/lpolyudova/Downloads/Documents/google-cloud-sdk/completion.zsh.inc'
# Enable git autocomplete
autoload -Uz compinit && compinit
# Add branch name to promp
autoload -Uz vcs_info
setopt prompt_subst
autoload -Uz vcs_info
zstyle ':vcs_info:*' actionformats \
'%F{5}(%f%s%F{5})%F{3}-%F{5}[%F{2}%b%F{3}|%F{1}%a%F{5}]%f '
zstyle ':vcs_info:*' formats \
'%F{5}%F{3} %F{5}[%F{2}%b%F{5}]%f '
zstyle ':vcs_info:(sv[nk]|bzr):*' branchformat '%b%F{1}:%F{3}%r'
precmd () { vcs_info }
PS1=' %F{12}%3~ ${vcs_info_msg_0_}%f
> '
# Set history file variable
export HISTFILE=~/.zsh_history
# Set history search
bindkey "^[[A" history-beginning-search-backward
bindkey "^[[B" history-beginning-search-forward
extract ()
{
if test -f "$1" && test -n "$1" ; then
case $1 in
*.tar.bz2) tar xvjf $1 ;;
*.tar.gz) tar xvzf $1 ;;
*.bz2) bunzip2 $1 ;;
*.rar) unrar x $1 ;;
*.gz) gunzip $1 ;;
*.tar) tar xvf $1 ;;
*.tbz2) tar xvjf $1 ;;
*.tgz) tar xvzf $1 ;;
*.zip) unzip $1 ;;
*.Z) uncompress $1 ;;
*.7z) 7z x $1 ;;
*) echo "don't know how to extract '$1'..." ;;
esac
elif test -n "$1"; then
echo "'$1' is not a valid file!"
else
echo "no file to extract"
fi
}
print_ascii()
{
# Get random id f the picture
idx=$(( RANDOM % $(ls $BIN/ascii_art | wc -l ) +1 ))
echo $idx
list=($(ls))
echo $list[idx]
cat $BIN/ascii_art/$(echo $list[idx])
}
| true
|
305f6eee5415abc7eb7eb85e708e048a762b988d
|
Shell
|
asheplyakov/hntwsm
|
/script68.sh
|
UTF-8
| 212
| 3.25
| 3
|
[] |
no_license
|
#!/bin/sh
set -x
module=$(find /lib/modules/`uname -r` -name foo.ko)
# WRONG by construction
if [ -n $module ]; then
echo do something wrong
fi
# Correct
if [ -n "$module" ]; then
echo do someting right
fi
| true
|
21be726a32293243ff9abb074acefbb4f2b3f9af
|
Shell
|
mfalgert/ews
|
/impl/final-t1-i2/scripts/test.sh
|
UTF-8
| 469
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
# perform test
echo "removing old procsses..."
sudo pkill -f -9 send_from_peers.py
sudo pkill -f -9 test_wd.sh
sudo pkill -f -9 test_an.sh
echo "test.sh now running..."
sudo python scripts/send_from_peers.py &
sleep="$(shuf -i 33-63 -n 1)"
sleep $sleep
while true; do
sudo sh scripts/test_wd.sh "20.0.7.1"
sleep="$(shuf -i 101-111 -n 1)"
sleep $sleep
sudo sh scripts/test_an.sh "20.0.7.1"
sleep="$(shuf -i 101-111 -n 1)"
sleep $sleep
done
| true
|
2ac57a44b9ed38a040ef8bd0251ec065ea5ded18
|
Shell
|
rahul0204/labexam
|
/grep_io_least_one_ocurrence_of_pattern_hello.sh
|
UTF-8
| 229
| 2.515625
| 3
|
[] |
no_license
|
Q . Display lines of file which can contain at
least one ocurrence of pattern "hello" in abc.txt and
xyz.txt redirect output to output.txt (use grep command)
#!/bin/sh
grep -l "rohit" abc.txt xyz.txt > output.txt
| true
|
1184f423c3a1a941b4e0141c16a5455be010c50c
|
Shell
|
tower111/pwn-change-libc
|
/clibc
|
UTF-8
| 660
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
FILE_NAME=$1
LIBC_VERSION=$2
WORKDIR=$(pwd)
LIBC_DIR=/glibc
LIBC_DIR=$(find $LIBC_DIR -name "$LIBC_VERSION*")
if [ "$LIBC_DIR" = "" ];then
echo "Not support version or your $LIBC_DIR don't have libc"
exit
fi
EBIT=$(file $FILE_NAME |awk '{print$3}'|cut -c 1-2)
if [ $EBIT -eq "32" ];then
libc_dir=$LIBC_DIR/32/lib
elif [ $EBIT -eq "64" ];then
libc_dir=$LIBC_DIR/64/lib
else
echo "It's not a elf file"
exit
fi
if [ "$3" ]
then
patchelf --set-interpreter $libc_dir/ld-$LIBC_VERSION.so --set-rpath $WORKDIR/ $1
else
patchelf --set-interpreter $libc_dir/ld-$LIBC_VERSION.so --set-rpath $libc_dir/ $1
fi
echo "success!!!"
| true
|
0325e84224e2d540c8f1b2e6958b9fdc17aa822c
|
Shell
|
LeSpocky/eis
|
/cui-vmail/cui-vmail-update.sh
|
UTF-8
| 6,396
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/sh
#------------------------------------------------------------------------------
# eisfair configuration update script
# Copyright 2007 - 2014 the eisfair team, team(at)eisfair(dot)org
#------------------------------------------------------------------------------
# include configlib
. /var/install/include/configlib
packages_name='vmail'
### ---------------------------------------------------------------------------
### read old and default variables
### ---------------------------------------------------------------------------
. /etc/default.d/${packages_name}
. /etc/config.d/${packages_name}
### ---------------------------------------------------------------------------
### Write config and default files
### ---------------------------------------------------------------------------
(
printgpl --conf "$packages_name"
#-----------------------------------------------------------------------
printgroup "Vmail settings"
#-----------------------------------------------------------------------
printvar "START_VMAIL" "Use VMail service"
printvar "VMAIL_SQL_HOST" "MySQL host. (localhost or IP)"
printvar "VMAIL_SQL_USER" "MySQL user name"
printvar "VMAIL_SQL_PASS" "MySQL connet password"
printvar "VMAIL_SQL_DATABASE" "MySQL database name"
printvar "VMAIL_SQL_ENCRYPT_KEY" "Password encryption key"
printvar "VMAIL_LOGIN_WITH_MAILADDR" "login with completed mail address or username only"
#-----------------------------------------------------------------------
printgroup "SMTP Postfix general settings"
#-----------------------------------------------------------------------
printvar "POSTFIX_SMTP_TLS" "use STARTTLS or SMTP over SSL"
printvar "POSTFIX_HELO_HOSTNAME" "use alternate external host name"
printvar "POSTFIX_AUTOSIGNATURE" "write automatic signature to all mail"
printvar "POSTFIX_QUEUE_LIFETIME" "change default queue lifetime"
printvar "POSTFIX_RELAY_FROM_NET_N" "Count of internal networks"
count=1
while [ ${count} -le ${POSTFIX_RELAY_FROM_NET_N} ]
do
printvar "POSTFIX_RELAY_FROM_NET_${count}" "NETWORK/NETMASK 172.16.0.0/16"
count=$((count+1))
done
printvar "POSTFIX_SMARTHOST" "send all e-mails to external e-mail server"
#printvar "POSTFIX_SMARTHOST_TLS" "set TLS"
printvar "POSTFIX_LIMIT_DESTINATIONS" "Max count of destination recipients"
printvar "POSTFIX_LIMIT_MAILSIZE" "Max size of e-mail message (default 20MB)"
#-----------------------------------------------------------------------
printgroup "SMTP Postfix antispam settings"
#-----------------------------------------------------------------------
printvar "POSTFIX_REJECT_UNKN_CLIENT" "reject not dns based hostnames"
printvar "POSTFIX_REJECT_UNKN_SEND_DOM" "Reject unknown sender domain"
printvar "POSTFIX_REJECT_NON_FQDN_HOST" "Reject non full qualif. hostname"
printvar "POSTFIX_REJECT_DYNADDRESS" "Block all sender with pppoe, dialin etc. names"
printvar "POSTFIX_REJECT_BOGUS_MX" "Block faked DNS entries"
printvar "POSTFIX_MIME_HEADER_CHECK" "Block all exe,com,vba... files"
printvar "POSTFIX_GREYLISTING_FOR_ALL" "Use greyfix for all SMTP Traffic"
printvar "POSTFIX_POSTSCREEN" "Use Postscreen antispam preegreeting"
printvar "POSTFIX_RBL" "Use Realtime Blackhole List"
printvar "POSTFIX_RBL_N" "Count of Realtime Blackhole List server"
count=1
while [ ${count} -le ${POSTFIX_RBL_N} ]
do
printvar "POSTFIX_RBL_${count}_SERVER" "Realtime Blackhole List server $count name"
printvar "POSTFIX_RBL_${count}_WEIGHT" "Blackhole server $count blocking weight"
count=$((count+1))
done
printvar "POSTFIX_HEADER_N" "Count of header checks"
count=1
while [ ${count} -le ${POSTFIX_HEADER_N} ]
do
printvar "POSTFIX_HEADER_${count}_CHECK" "PCRE check string"
printvar "POSTFIX_HEADER_${count}_HANDL" "handling: REJECT, IGNORE + logstring"
count=$((count+1))
done
printvar "POSTFIX_CLIENT_N" "Count of checked email clients"
count=1
while [ ${count} -le ${POSTFIX_CLIENT_N} ]
do
printvar "POSTFIX_CLIENT_${count}_CHECK" "PCRE check string"
printvar "POSTFIX_CLIENT_${count}_HANDL" "handling: REJECT, IGNORE + logstring"
count=$((count+1))
done
#-----------------------------------------------------------------------
printgroup "Antivirus settings"
#-----------------------------------------------------------------------
printvar "POSTFIX_AV_CLAMAV" "Use ClamAV antivirus scanner"
printvar "POSTFIX_AV_FPROTD" "Use F-Prot daemon antivirus scanner"
printvar "POSTFIX_AV_SCRIPT" "Use scripfile"
printvar "POSTFIX_AV_SCRIPTFILE" "Scriptfile name"
printvar "" "/usr/bin/smc-milter-new-unzip.sh"
printvar "POSTFIX_AV_VIRUS_INFO" "Send virus warn message to e-mail recipient"
printvar "POSTFIX_AV_QUARANTINE" "Store the original virus to the quarantain"
#-----------------------------------------------------------------------
printgroup "POP3/IMAP settings"
#-----------------------------------------------------------------------
printvar "START_POP3IMAP" "Start POP3 and IMAP"
#-----------------------------------------------------------------------
printgroup "Fetchmail settings"
#-----------------------------------------------------------------------
printvar "START_FETCHMAIL" "Start fetchmail service"
printvar "FETCHMAIL_CRON_SCHEDULE" "mail check time"
printvar "FETCHMAIL_TIMEOUT" "server timeout"
printvar "FETCHMAIL_POSTMASTER" "store all error messages to"
#-----------------------------------------------------------------------
printgroup "Logfile settings"
#-----------------------------------------------------------------------
printvar "POSTFIX_LOGLEVEL" "Debug and loglevel 0...3"
printvar "FETCHMAIL_LOG" "activate fetchmail log entries"
#-----------------------------------------------------------------------
printend
#-----------------------------------------------------------------------
) > /etc/config.d/${packages_name}
chmod 0640 /etc/config.d/${packages_name}
chown root /etc/config.d/${packages_name}
### ---------------------------------------------------------------------------
exit 0
| true
|
460cfb5cdb828fb41fb1c3db8a0e492995196933
|
Shell
|
titopluto/virl_troubleshoot
|
/srv_validate.sh
|
UTF-8
| 6,116
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
## This script will collect key parts of your VIRL server configuration settings and
## place them into a single file (SrvValTest.txt). This file can be collected and
## forwarded to VIRL support community for assistance.
## Validation script created by alejandro gallego (alegalle@cisco.com)
## Last modified on Aug 29, 2017
##
## Current version supports Openstack Kilo, Mitaka
##
## TEMP_FILE=/tmp/${PROGNAME}.$$.$RANDOM
## Global vars
tstmp=$(date +%H.%M_%Y.%m.%d)
_ntp=$(ntpq -p)
ntrn=$(neutron agent-list)
ntrnsub=$(neutron subnet-list)
nva=$(nova service-list)
ver=$(sudo salt-call --local grains.get virl_release | egrep -v 'local:')
lver=$(lsb_release -a 2> /dev/null)
vc=$(sudo -H pip list | egrep VIRL-CORE)
lbrdg=$(brctl show)
mstr=$(sudo salt-call --local grains.get salt_master | egrep -v 'local:' )
lic=$(sudo salt-call --local grains.get id | egrep -v 'local:' )
_out=~/SrvValTest.txt
###
int_exit ()
{
echo "${PROGNAME}: Aborted by user"
exit
}
term_exit ()
{
echo "${PROGNAME}: Terminated"
exit
}
## Results of commands in script are sent to text file. The text file
## will be found under the default username 'virl' home directory.
function _result
{
rm $_out >& /dev/null
touch $_out
echo "Checking server configuration!
Please wait...."
}
_messg ()
{
printf "\nResults printed to file \"%s\" in
\"virl user's home directory\"\n" "$_out"
sleep 2
}
## Deployment type checks for VMware PCI devices
_dtype ()
{
echo $tstmp
lspci |grep ' peripheral: VMware' > /dev/null
if [[ $? -ne 0 ]] ; then
printf "\nInstallation Type: \"OTHER\"\n"
else
printf "\nInstallation Type: \"OVA\"\n\n"
fi
}
## Checking installed version of typical packages
_verchk ()
{
printf "Component Version(s)\nVIRL-RELEASE %s\n%6s\n\nOS INFO\n%s\n" "$ver" "$vc" "$lver"
printf "\nOPENSTACK %6s" && openstack --version
printf "NEUTRON %6s" && neutron --version
printf "NOVA %6s" && nova --version
printf "\nPYTHON MODULES\n"
declare iver=($(sudo -H pip list | egrep '\bautonetkit'\|'\bvirl-'))
echo "AutoNetkit: ${iver[1]}"
echo "AutoNetkit Cisco: ${iver[3]}"
echo "Topology Vis Eng: ${iver[5]}"
echo "Live Net Collection Eng: ${iver[7]}"
echo ""
printf "SALT VERSION(s)\n"
printf "%s$(sudo salt-minion --versions | egrep -v 'Version:')\n\n"
}
## Check openstack command version
_o ()
{
grep -i xenial /etc/os-release > /dev/null 2>&1
if [ $? -ne 0 ]; then
_kostack >> $_out 2>&1
else
_ostack >> $_out 2>&1
fi
}
## Display Openstack server information
## Mitaka Openstack
_ostack ()
{
printf "OPENSTACK INFO / STATS\n"
printf "VIRL HOST\n%s" && openstack host show $HOSTNAME
printf "\n%sVIRL IMAGES \n%s" && openstack image list
printf "\n%s\n%s\n%s\n" "OPENSTACK NETWORKING" "$ntrn" "$ntrnsub"
printf "\n%s\n%s\n" "OPENSTACK NOVA" "$nva"
printf "\n%s\n%s" "OPENSTACK USER(s)" && openstack user list
printf "\n%sVIRL HYPERVISOR\n%s" && openstack hypervisor stats show
printf "\n%sOPENSTACK SERVICES\n%s" && openstack service list --long
}
## Kilo Openstack
_kostack ()
{
kstn=$(keystone user-list | grep -v "WARNING" 2> /dev/null)
printf "OPENSTACK INFO / STATS\n"
printf "VIRL HOST\n%s" && nova host-list
printf "\n%sVIRL IMAGES \n%s" && nova image-list
printf "\n%s\n%s\n%s\n" "OPENSTACK NETWORKING" "$ntrn" "$ntrnsub"
printf "\n%s\n%s\n" "OPENSTACK NOVA" "$nva"
printf "\n%s\n%s\n" "OPENSTACK USER(s)" "$kstn"
printf "\n%sVIRL HYPERVISOR\n%s" && nova hypervisor-stats
}
## Network information checks for configured interfaces, compares assigned MAC addr. to HW Mac addr.
## and looks for "link" detection of reported interfaces.
_netint ()
{
printf "\nVIRL SERVER NETWORKING\n\n"
ifquery --list | egrep -v lo | sort | while read intf
do
ipadr=$(ip addr show dev $intf 2> /dev/null | awk '$1 == "inet" { sub("/.*", "", $2); print $2 }' )
mac=$(ip link show $intf 2> /dev/null | awk '/ether/ {print $2}' )
hwmac=$(cat /sys/class/net/$intf/address 2> /dev/null )
printf "%s CONFIGURED\n" "$intf"
printf "MAC: %s\n" "$mac"
printf "HW: %s\n" "$hwmac"
ip link show $intf > /dev/null 2>&1
if [ $? -ne 0 ] ; then
printf ">>> Interface %s DOWN\n" "$intf"
else
printf "IP: %s\n" "$ipadr"
echo ""
fi
done
## Print summary
printf "\nBridge Info: \n %s\n" "$lbrdg"
vini=$(egrep '\bsalt_'\|'\bhost'\|'\bdomain'\|'\bpublic_'\|'\bStatic_'\|'\busing_'\|'\bl2_'\|'\bl3_'\|'\bdummy_'\|'\bvirl_'\|'\binternalnet_'\|'_nameserver' /etc/virl.ini)
printf "\nVIRL CONFIG SUMMARY\n%s" "$vini"
printf "\n\nHOSTNAME / NETWORK INTERFACES"
sleep 2
ARRAY=(SYS_HOSTNAME NET_HOSTS NET_INTERFACES)
g=0
while (( g < ${#ARRAY[*]} ))
do
for h in /etc/hostname /etc/hosts /etc/network/interfaces
do
printf "\n%s\n" "${ARRAY[g]}"
cat $h
((g++))
done
done
}
## Salt test will check configured salt servers, connectivity to configured salt servers, and license validation.
## License validation only checks to see if configured license is accepted not expiry or syntax.
_saltst ()
{
printf "\n%s\n" "SALT CONFIGURATION"
sleep 1
printf "\nNTP Peers:\n%s\n" "$_ntp"
printf "\nConfigured Salt masters:\n%s\n" "$mstr"
printf "\nConfigured Salt ID:\n%s\n" "$lic"
for srv in ${mstr//,/ }
do
idig=$(dig $srv | egrep -o '([0-9]+\.){3}[0-9]+' |head -1)
printf "\nTesting Connectivity to: [%3s %s]\n" "$srv" "$idig"
nc -zv $srv 4505-4506
echo ""
printf "\nChecking License....[ %s ]\n" $lic
printf "\nAuth test --> Salt Server [ %s ]\n" "$srv"
sudo salt-call --master $srv -l debug test.ping
done
}
## Script Start
PROGNAME=$(basename $0)
trap term_exit TERM HUP
trap int_exit INT
rm $_out >& /dev/null
touch $_out
_result
echo "Checking deployment type...."
_dtype >> $_out 2>&1
echo "Checking installed versions...."
_verchk >> $_out 2>&1
_o
echo "Checking networking configuration...."
_netint >> $_out 2>&1
echo "Checking salt connectivity and license...."
_saltst >> $_out 2>&1
echo "
DONE...."
_messg
# sleep 5
| true
|
bfaa9d1b690c880820efbc2229c2ac7d20b55371
|
Shell
|
skyNet2017/Luban
|
/lubanturbo/src/main/cpp/include/pkgscripts/makecygwinpkg
|
UTF-8
| 929
| 3.4375
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"IJG",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -u
set -e
trap onexit INT
trap onexit TERM
trap onexit EXIT
TMPDIR=
onexit()
{
if [ ! "$TMPDIR" = "" ]; then
rm -rf $TMPDIR
fi
}
PACKAGE_NAME=libjpeg-turbo
VERSION=1.1.1
SRCDIR=/home/tgall/libjpeg-turbo-android/wip
umask 022
rm -f $PACKAGE_NAME-$VERSION-cygwin.tar.bz2
TMPDIR=`mktemp -d /tmp/ljtbuild.XXXXXX`
__PWD=`pwd`
make install DESTDIR=$TMPDIR/pkg mandir=/opt/$PACKAGE_NAME/man
rm $TMPDIR/pkg/opt/$PACKAGE_NAME/lib/*.la
DOCDIR=$TMPDIR/pkg/usr/share/doc/$PACKAGE_NAME-$VERSION
mkdir -p $DOCDIR
install -m 644 $SRCDIR/README-turbo.txt $DOCDIR
install -m 644 $SRCDIR/README $DOCDIR
install -m 644 $SRCDIR/libjpeg.txt $DOCDIR
install -m 644 $SRCDIR/usage.txt $DOCDIR
install -m 644 $SRCDIR/LICENSE.txt $DOCDIR
install -m 644 $SRCDIR/LGPL.txt $DOCDIR
ln -fs lib $TMPDIR/pkg/opt/$PACKAGE_NAME/lib32
cd $TMPDIR/pkg
tar cfj ../$PACKAGE_NAME-$VERSION-cygwin.tar.bz2 *
cd $__PWD
mv $TMPDIR/*.tar.bz2 .
exit 0
| true
|
6cf498cc310a284ab12a47b19bc2da26a094a4c6
|
Shell
|
olgaiv39/faceswap-data-generation
|
/preprocess/convert-tedx-videos.sh
|
UTF-8
| 505
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
cd ..
cd videos
cd tedx
cd youth
ls > list-youth.txt
for i in *.mkv; do
ffmpeg -i "$i" -codec copy -threads 0 -c:a aac "${i%.*}.mp4"
done
a=1
for i in *.mp4; do
new=$(printf "tedx-youth_%02d.mp4" "$a")
mv -i -- "$i" "$new"
let a=a+1
done
cd ..
cd france
ls > list-france.txt
for i in *.mkv; do
ffmpeg -i "$i" -codec copy -threads 0 -c:a aac "${i%.*}.mp4"
done
a=1
for i in *.mp4; do
new=$(printf "tedx-france_%02d.mp4" "$a")
mv -i -- "$i" "$new"
let a=a+1
done
cd ..
cd ..
cd ..
| true
|
ce2b4b4340ec4a40b0a1e6d0d669d7f2742a2864
|
Shell
|
karenyyng/AirlineDelayBenchmark
|
/config/load_py3_2018.0.0.sh
|
UTF-8
| 2,895
| 2.578125
| 3
|
[
"Apache-2.0"
] |
permissive
|
export CONDA_ENV=airline
export ENV_DIR=/global/common/software/bdc
export INTEL_PYTHON_VERSION=2018.0.0
export PY_VERSION=35
export PY_DOT_VERSION=3.5
export PY_MAJOR_VERSION=3
if [[ -z $CONDA_ENV ]]; then
echo "CONDA_ENV is not defined. Quitting installation script."
exit 1
fi
if [[ $NERSC_HOST == "cori"* ]]; then
DIR=/global/common/cori/software/python/3.5-anaconda
# ENV_DIR=/global/common/software/bdc/py35_envs
module load python/3.5-anaconda
echo 'On Cori: echo finish loading python/3.5-anaconda'
module load craype-hugepages2M
echo 'On Cori: loading craype-hugepages2M'
fi
export CONDA=$ENV_DIR/py${PY_VERSION}_envs/${CONDA_ENV}/bin/conda
cp $HOME/.condarc_$CONDA_ENV $HOME/.condarc
echo "Activating conda environment at $ENV_DIR/py35_envs/$CONDA_ENV using"
echo "source ${ENV_DIR}/py${PY_VERSION}_envs/${CONDA_ENV}/bin/activate $CONDA_ENV"
source ${ENV_DIR}/py${PY_VERSION}_envs/${CONDA_ENV}/bin/activate $CONDA_ENV
# echo "Using some recommended settings that may not be the best for your use case"
export NUM_OF_THREADS=$(grep 'model name' /proc/cpuinfo | wc -l)
# # # https://software.intel.com/en-us/node/522691
# export KMP_AFFINITY=granularity=fine,compact
# # # https://software.intel.com/en-us/mkl-macos-developer-guide-mkl-dynamic
# export MKL_DYNAMIC=false
if [[ $(grep 'model name' /proc/cpuinfo ) == *'Xeon Phi'* ]]; then
export KMP_AFFINITY=disabled
# export KMP_AFFINITY=granularity=fine,compact
export MKL_DYNAMIC=false
export KMP_BLOCKTIME=800
export OMP_NUM_THREADS=1
unset MKL_NUM_THREADS
export OMP_NUM_THREADS=$(( $NUM_OF_THREADS / 4 ))
export MKL_NUM_THREADS=$(( $NUM_OF_THREADS / 4 ))
# export KMP_HW_SUBSET=${OMP_NUM_THREADS}c,1t # or use 64c,2t
export MKL_VERBOSE=0
export KMP_COMPOSABILITY=mode=exclusive python
else
export KMP_BLOCKTIME=800
export OMP_NUM_THREADS=$(( $NUM_OF_THREADS / 4 ))
export MKL_NUM_THREADS=$(( $NUM_OF_THREADS / 4 ))
fi
echo "Setting KMP_BLOCKTIME = $KMP_BLOCKTIME"
echo "Setting KMP_AFFINITY = $KMP_AFFINITY"
echo "Setting MKL_DYNAMIC = $MKL_DYNAMIC"
echo "Setting HPL_LARGEPAGE= $HPL_LARGEPAGE"
echo "Setting OMP_NUM_THREADS=$OMP_NUM_THREADS"
echo "Setting MKL_NUM_THREADS=$MKL_NUM_THREADS"
echo "Setting KMP_HW_SUBSET=$KMP_HW_SUBSET"
echo "Setting MKL_VERBOSE=$MKL_VERBOSE"
echo "Setting MKL_FFT_VERBOSE=$MKL_FFT_VERBOSE"
echo "Setting HUGETLB_DEFAULT_PAGE_SIZE=$HUGETLB_DEFAULT_PAGE_SIZE"
echo "Setting PE_HUGEPAGES_PKGCONFIG_VARIABLES=$PE_HUGEPAGES_PKGCONFIG_VARIABLES"
echo "PE_HUGEPAGES_TEXT_SEGMENT=$PE_HUGEPAGES_TEXT_SEGMENT"
echo "PE_HUGEPAGES_TEXT_SEGMENT=$PE_HUGEPAGES_TEXT_SEGMENT"
echo "PE_HUGEPAGES_PAGE_SIZE=$PE_HUGEPAGES_PAGE_SIZE"
echo "HUGETLB_MORECORE_HEAPBASE=$HUGETLB_MORECORE_HEAPBASE"
echo "HUGETLB_MORECORE=$HUGETLB_MORECORE"
echo "HUGETLB_ELFMAP=$HUGETLB_ELFMAP"
echo "HUGETLB_FORCE_ELFMAP=$HUGETLB_FORCE_ELFMAP"
module load java
module load scala
| true
|
8d172df1485821ee95d01abbc710dcc41bc8ea3e
|
Shell
|
tom-weiss-github/home
|
/tag_count_audit.sh
|
UTF-8
| 170
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
while true
do
git tag -d $(git tag) > /dev/null 2>&1 && git fetch > /dev/null 2>&1
echo Tag count is `git tag | wc -l` at `date`.
sleep 300
done
| true
|
509477e3d58375fe815e860024a1e92d342bce95
|
Shell
|
idachev/own-debian-configs
|
/win_cntrl_start.sh
|
UTF-8
| 1,012
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
#set -v
WIN_CLASS=$1
PRG=$2
# check if the program has a window visible
FOUND=$(wmctrl -l -x | awk -F ' ' "BEGIN {found=0;} {if (\$3 == \"$WIN_CLASS\") {found=1;}} END {print found;}")
#echo $FOUND
# find on which desctop is the program window
WIN_DESKTOP=$(wmctrl -l -x | awk -F ' ' "BEGIN {found=-2;} {if (\$3 == \"$WIN_CLASS\") {found=\$2;}} END {print found;}")
#echo $WIN_DESKTOP
# find the current desktop number
CUR_DESKTOP=$(wmctrl -d | awk -F ' ' "BEGIN {found=-3;} {if (\$2 == \"*\") {found=\$1;}} END {print found;}")
#echo $CUR_DESKTOP
if [ $FOUND -eq 1 ]; then
if [ ! $CUR_DESKTOP -eq $WIN_DESKTOP ]; then
# move to current desktop
wmctrl -x -r "$WIN_CLASS" -t $CUR_DESKTOP
fi
# and activate the window
wmctrl -x -a "$WIN_CLASS"
# Set window to sticky to be visible on all virtual desktops
# do not use it just move the window to the desktop
# wmctrl -x -F -r "$WIN_CLASS" -b toggle,sticky
# Bring widnow to main desktop
# wmctrl -x -F -R "$WIN_CLASS"
else
$PRG &
fi
| true
|
ad4c653b2bbc770e8b671dc8a923e41fd811d380
|
Shell
|
leizhnxp/nginx_ssl
|
/run.sh
|
UTF-8
| 267
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
self_dir=`readlink -f $0|xargs dirname`
case $1 in
start)
sudo nginx -p $self_dir/ -c $self_dir/nginx.conf
;;
stop)
sudo sh -c "cat $self_dir/nginx.pid | xargs kill"
;;
*)
echo "usage : run.sh [start|stop]"
esac
| true
|
00af4e91eeeccd53cb18fa8ec6989de50fd954b7
|
Shell
|
Tronix117/gith-monitor
|
/scripts/gith-monitor
|
UTF-8
| 826
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# gith-monitor init file for starting up the gith-monitor daemon
#
# chkconfig: - 20 80
# description: Starts and stops the gith-monitor daemon.
PATH=$PATH:/usr/local/bin
NAME="gith-monitor"
PID="/var/run/$NAME.pid"
LOG_DIR="/var/log/$NAME"
SERVER="/usr/local/lib/node_modules/$NAME/index.coffee"
if [ ! -d "/var/log/$NAME" ]
then
mkdir /var/log/$NAME
fi
start() {
if [ -e $PID ]
then
echo "Always running"
exit
fi
forever start -c coffee -a -l $LOG_DIR/daemon.log -o $LOG_DIR/out.log -e $LOG_DIR/err.log --pidFile $PID $SERVER
}
stop() {
forever stop $SERVER
rm $PID
}
case "$1" in
start) start;;
stop) stop;;
status) forever list | grep $SERVER;;
restart)
stop
start
;;
*)
echo $"Usage: $0 {start|stop|restart|status}"
exit 2
esac
exit $?
| true
|
6ca1e3414af2033b6a2ecacc464625f2d36b525e
|
Shell
|
rondemena/firecracker-containerd
|
/tools/thinpool.sh
|
UTF-8
| 1,432
| 3.5
| 4
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/sh
#
# Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
set -eu
VOLUME_GROUP='fcci-vg'
subcommand="$1"
name="$2"
if [ -z "$name" ]; then
exit 0
fi
dm_device="/dev/mapper/$(echo ${VOLUME_GROUP} | sed -e s/-/--/g)-$name"
pool_create() {
sudo lvcreate --type thin-pool \
--poolmetadatasize 16GiB \
--size 1G \
-n "$name" "$VOLUME_GROUP"
}
pool_remove() {
sudo dmsetup remove "${dm_device}-snap-"* || true
sudo dmsetup remove \
"${dm_device}" \
"${dm_device}_tdata" "${dm_device}_tmeta" || true
sudo lvremove -f "$dm_device"
}
case "$subcommand" in
'create')
pool_create
;;
'remove')
pool_remove
;;
'reset')
if [ -e "${dm_device}" ]; then
pool_remove
fi
pool_create
;;
*)
echo "This script doesn't support $subcommand"
exit 1
esac
| true
|
e7b9395cb95b7a935c1c29002b854c93bf1e1a5d
|
Shell
|
efiens/saarctf-example-service
|
/install.sh
|
UTF-8
| 1,775
| 3.6875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -eux
# Install the service on a fresh vulnbox. Target should be /home/<servicename>
# You get:
# - $SERVICENAME
# - $INSTALL_DIR
# - An user account with your name ($SERVICENAME)
# 1. TODO Install dependencies
# EXAMPLE: apt-get install -y nginx
apt-get update
apt-get install -y socat
# 2. TODO Copy/move files
mv service/* "$INSTALL_DIR/"
chown -R "$SERVICENAME:$SERVICENAME" "$INSTALL_DIR"
# 3. TODO Configure the server
# ...
# For example:
# - adjust configs of related services (nginx/databases/...)
# - Build your service if there's source code on the box
# - ...
#
# Useful commands:
# - nginx-location-add <<EOF
# location {} # something you want to add to nginx default config (port 80)
# EOF
# 4. TODO Configure startup for your service
# Typically use systemd for that:
# Install backend as systemd service
# Hint: you can use "service-add-simple '<command>' '<working directory>' '<description>'"
# service-add-simple "$INSTALL_DIR/TODO-your-script-that-should-be-started.sh" "$INSTALL_DIR/" "<TODO>"
service-add-simple "socat -s -T10 TCP-LISTEN:31337,reuseaddr,fork EXEC:bash,pty,stderr,setsid,sigint,sane" "$INSTALL_DIR/" "<TODO>"
# Example: Cronjob that removes stored files after a while
# cronjob-add "*/6 * * * * find $INSTALL_DIR/data -mmin +45 -type f -delete"
# Example: Initialize Databases (PostgreSQL example)
# Example: 5. Startup database (CI DOCKER ONLY, not on vulnbox)
# if [ -f /.dockerenv ]; then
# EXAMPLE for PostgreSQL: pg_ctlcluster 11 main start
# fi
# Example: 6. Configure PostgreSQL
# cp $INSTALL_DIR/*.sql /tmp/
# sudo -u postgres psql -v ON_ERROR_STOP=1 -f "/tmp/init.sql"
# Example: 7 Stop services (CI DOCKER ONLY)
# if [ -f /.dockerenv ]; then
# pg_ctlcluster 11 main stop
# fi
| true
|
adf48376e942cfc0b6232aff6303ddc431d8555d
|
Shell
|
born4web/install_gitea
|
/install_gitea_sqlite_download_gitea.sh
|
UTF-8
| 2,292
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
# Gitea demanded version
. setup_gitea_demanded_version.sh
# library with help scripts
. ./bash_script_library.sh
echo "
-------------------------------------------
* Checking Gitea demanded version *
-------------------------------------------
"
if [[ $gitea_version == "" ]]; then
echo "Error: Gitea version not defined in gitea_demanded_version.sh exiting..."
exit 1
else
echo "I will install version : ${gitea_version}"
fi
echo "
--------------------------------------
* System update/upgrade *
--------------------------------------
"
error_code=$(sudo apt-get update)
echo "Error code: $error_code"
if $error_code
then
echo "
Error: 'apt-get update' Update did not run correctly\\
"
exit 1
fi
echo "
-------------------------------
* sqlite installation *
-------------------------------
"
sudo apt install sqlite3
echo "
----------------------------
* git installation *
----------------------------
"
sudo apt install git
echo "
--------------------------------------------
* add system user git to run gitea *
--------------------------------------------
"
# create git user
sudo adduser --system --group --disabled-password --shell /bin/bash --home /home/git --gecos 'Git Version Control' git
echo "
--------------------------------------
* download and install gitea *
--------------------------------------
"
sudo wget -O /tmp/gitea https://dl.gitea.io/gitea/${gitea_version}/gitea-${gitea_version}-linux-amd64
sudo mv /tmp/gitea /usr/local/bin
sudo chmod +x /usr/local/bin/gitea
sudo mkdir -p /var/lib/gitea/{custom,data,indexers,public,log}
sudo chown git: /var/lib/gitea/{data,indexers,log}
sudo chmod 750 /var/lib/gitea/{data,indexers,log}
sudo mkdir /etc/gitea
sudo chown root:git /etc/gitea
sudo chmod 770 /etc/gitea
echo "
-------------------------------------------
* gitea daemon recommended setup *
-------------------------------------------
"
sudo wget https://raw.githubusercontent.com/go-gitea/gitea/master/contrib/systemd/gitea.service -P /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable --now gitea
echo "
----------------------------------------
* gitea installation done OK *
----------------------------------------
"
| true
|
a5cd8241f4e57ec69fca15c83b0d6b8bf904d15d
|
Shell
|
mfens98/p-one_noise
|
/G4/absorption/reanalyze/run.sh
|
UTF-8
| 336
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/bash
ST=$1
EN=$2
(( ST = 1*ST ))
(( EN = 1*EN ))
echo "Arguments: $ST $EN"
for (( i = ST; i <= EN; i++ )); do
printf "$i/$EN\n"
/cvmfs/icecube.opensciencegrid.org/py3-v4.0.1/RHEL_7_x86_64/bin/python3 /data/p-one/mens/G4/absorption/reanalyze/reanalyze.py /data/p-one/mens/G4/absorption/reanalyze/cedarLinks/"$i".txt
done
| true
|
e394ec2f4290205e224f654f5ca69203f6101d33
|
Shell
|
BongoFriendee/CryptoInfo
|
/cinfo
|
UTF-8
| 645
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
#defining basic urls
cburl='https://min-api.cryptocompare.com/data'
ccurrprice='/price?fsym='
dayavg='/dayAvg?fsym='
#search file to see if a fiat currency is defined.
while read i; do
if [[ "${i,,}" == usd ]] ;
then fiat='&tsym=USD' ; fiats='&tsyms=USD'
elif [[ "${i,,}" == eur ]] ;
then fiat='&tsym=EUR' ; fiats='&tsyms=EUR'
fi
done <$PWD/cryptoconfig
while read curr ; do
echo "${curr^^}"
echo "========="
echo "Current price" $(curl -s $cburl$ccurrprice"${curr^^}"$fiats)
echo "Today's average" $( curl -s $cburl$dayavg"${curr^^}"$fiat | awk -F, '{print $1"}"}')
done < <(sed -e '1,/starting/ d' $PWD/cryptoconfig)
| true
|
1be31265765766f78e32a1ca13714449bcc10ddf
|
Shell
|
michaelerule/michaelerule.github.io
|
/publications/media/2021_sorrell_rule_oleary/highres_rasters/rasterize_all.sh
|
UTF-8
| 508
| 2.984375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
for IMGNAME in ./*.svg; do
echo $IMGNAME
BASENAME="${IMGNAME%.*}"
echo $BASENAME
#cairosvg $IMGNAME -o $BASENAME.pdf
inkscape --file=$BASENAME.svg --export-area-drawing --without-gui --export-pdf=$BASENAME.pdf
inkscape -z --export-dpi 1200 -e $BASENAME.png $BASENAME.pdf
convert $BASENAME.png -background white -alpha remove $BASENAME.gif
convert $BASENAME.gif $BASENAME.png
rm $BASENAME.gif
echo ==================================
done
| true
|
d87bec9591c59d115874e6d3da0b2189b96a5a75
|
Shell
|
GuoMingJian/Turbo
|
/TBXcworkspace/TBBusiness/TBBusiness/Server/Analytics/TACCore.framework/Scripts/basements/utils.sh
|
UTF-8
| 303
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
# 入参 path 要查找的路径, pattern 模式
SEARCH_FIELS=()
searchFiles() {
path=$1
pattern=$2
files=`find ${path} -name "${pattern}" -maxdepth 100`
if [ ${#files[@]} == 0 ]; then
echo "xx"
else
files=`echo ${files[@]} | tr ' ' '\n' | sort -r`
SEARCH_FIELS=${files[@]}
fi
}
| true
|
b4adca4a065ccee05887cdf354230edcd1153370
|
Shell
|
SamoulY/chuanqi_dev
|
/server/trunk/server/sh/start_all.sh
|
UTF-8
| 666
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
# 开启服务器
# @author: zhengsiying
# @date: 2015.04.09
#
#获取当前 sh 文件的目录
source `dirname $0`/header.sh
source ${SCRIPT_PATH}/config.sh
cd ${SCRIPT_PATH}
chmod +x start_one.sh
echo "======================================================="
#获取数组的 个数
NODE_COUNT=${#NODES[*]}
for ((i=0; i<${NODE_COUNT}; i++));
do
if [ ${i} -gt 0 ]; then
echo "-------------------------------------------------------"
fi
NODE=(${NODES[$i]})
NODE_NAME=${NODE[0]}
NODE_NUM=${NODE[1]}
NODE_FLAG=${NODE_NAME}_${NODE_NUM}
./start_one.sh ${NODE_NAME} ${NODE_NUM}
done
echo "======================================================="
| true
|
dff21f598340c071a8f269430b690ba9feb6dfc0
|
Shell
|
coughls/MRSA_scripts
|
/scripts/quality_control.sh
|
UTF-8
| 2,172
| 3.265625
| 3
|
[] |
no_license
|
#!/usr/bin/env
#Requires these tools:
#FASTQC
#Trimmomatic
#fastx_quality_stats from the FASTX toolit
#and scripts:
#get_seq_len_fastq_2.py #(in this repository)
#read_stats.py (in this repository)
#Run FASTQC
fastqc *.fastq -t 12 -o FASTQC #uses 12 threads
#Quality Trim
##parameters for trimmomatic
l=30 #quality score threshold to trim bases below
w=4 #windowsize
minlen=36 #min length of reads to retain. Reads discarded if they below this length after trimming
##can change the adapaters specified by changing the file after ILLLUMINACLIP
mkdir TRIMMED #directory for output files
#the number after -c may will also need to be changed if filenames are different
for i in $(ls *.fastq | rev | cut -c 19- | rev | uniq)
do
echo "$i"
java -jar trimmomatic-0.32.jar PE -threads 12 -phred33 -trimlog ${i}.log ${i}_L001_R1_001.fastq ${i}_L001_R2_001.fastq TRIMMED/${i}_L001_R1_001_paired.fastq TRIMMED/${i}_L001_R1_001_unpaired.fastq TRIMMED/${i}_L001_R2_001_paired.fastq TRIMMED/${i}_L001_R2_001_unpaired.fastq ILLUMINACLIP:Trimmomatic-0.32/adapters/NexteraPE-PE.fa:2:30:10 LEADING:$l TRAILING:$l SLIDINGWINDOW:$w:$l MINLEN:$minlen
done
##Get some other quality metrics
#output directories
mkdir LENS
mkdir QUALSTATS
#make function to calculate quality statistics
quality_metrics(){
#Q of 33 needs to be changed for some files produced by different sequencers -fastqc can tell the illumina/solexa etc file type and googling will retrieve its phred qua
#lity encoding for that file type. Important that this is correct as the wrong phred encoding will mean your quality scores make no sense!
local file=$1
echo "Running quality stats on $i"
fastx_quality_stats -Q 33 -i $file -o QUALSTATS/$file.qualstats
echo "Running sequence length analysis on $i"
./get_seq_len_fastq_2.py $file LENS/$file.lens
}
#Run function on every fastq file in the directory
for fastq_file in *.fastq
do
quality_metrics $fastq_file
done
cd LENS
#read_stats.txt contains information on read length in each file of that directory -files must end in .lens for the script to work
./read_stats.py > read_stats.txt
#zip up all files
gzip *.lens &
cd ../QUALSTATS
gzip *.qualstats &
| true
|
b8be17ff362c8b14ad1ee91599992e7b4ebe6697
|
Shell
|
sq3ope/iot
|
/linuxpl/librus/deploy_to_remote_host.sh
|
UTF-8
| 408
| 2.734375
| 3
|
[] |
no_license
|
project_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source "$project_dir/src/config/remote.inc"
rsync -av -e "ssh -p $port" src/ "$username@$host:public_html/librus/"
ssh -p $port "$username@$host" <<EOF
./public_html/librus/create_dummy_files.sh ./public_html
find ~/public_html/librus -type d -exec chmod 755 {} \;
find ~/public_html/librus -type f -exec chmod 644 {} \;
EOF
| true
|
315b3e2334432bf843ebcb6c8c20afc95b3977e1
|
Shell
|
chiju/automation_scripts
|
/ssh-with-pki-certs/ssh_ca.sh
|
UTF-8
| 1,981
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
pki_name='chiju'
ca_dns_name=$(curl -s ifconfig.co)
listen_address='0.0.0.0:8443'
provisioner='chiju'
password="pass"
# Installing step
if ! [[ $(dpkg -l | grep step-cli) ]]
then
curl -LO https://github.com/smallstep/cli/releases/download/v0.12.0/step-cli_0.12.0_amd64.deb
sudo dpkg -i step-cli_0.12.0_amd64.deb
fi
# Installing step-ca
if ! [[ $(dpkg -l | grep step-certificates) ]]
then
curl -LO https://github.com/smallstep/certificates/releases/download/v0.12.0/step-certificates_0.12.0_amd64.deb
sudo dpkg -i step-certificates_0.12.0_amd64.deb
fi
# Initiating CA
step ca init --ssh --name=$pki_name --dns=$ca_dns_name --address=$listen_address --provisioner=$provisioner --provisioner-password-file <(echo $password) --password-file <(echo $password)
# Adding claim line
sed -i '/"encryptedKey":/a ,"claims": { "enableSSHCA": true }' $(step path)/config/ca.json
# Starting CA server
#step-ca $(step path)/config/ca.json --password-file <(echo 'pass')
mkdir /usr/local/lib/step
echo -e "$password" > /root/.step_ca_password
cat > /etc/systemd/system/step-ca.service <<- "EOF"
[Unit]
Description=Step Certificates
Wants=basic.target
After=basic.target network.target
[Service]
WorkingDirectory=/usr/local/lib/step
ExecStart=/usr/bin/step-ca /root/.step/config/ca.json --password-file /root/.step_ca_password
KillMode=process
Restart=on-failure
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
# Reloading systemd for the new service and starting step-ca service
systemctl daemon-reload
systemctl restart step-ca.service
systemctl status step-ca.service -l
# Printing user public key
echo "\n===="
echo -e "CA User public key $(step path)/certs/ssh_user_key.pub is"
cat $(step path)/certs/ssh_user_key.pub
echo "===="
# Printing host public key
echo -e "CA Host public key $(step path)/certs/ssh_host_key.pub is"
cat $(step path)/certs/ssh_host_key.pub
echo "===="
# Printing fringerprint and CA URL
cat $(step path)/config/defaults.json
| true
|
6fcc310e11761d9a37bae553cb4b29cebbe23a99
|
Shell
|
bishopst28/CIS361
|
/project3/filterNoiceWords.sh
|
UTF-8
| 245
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ ! -f $* ]
then
echo "filterNoiceWords Usage: bash filterNoiceWords [FileName]"
exit 1
else
noiceFile=${*: -1}
fi
while read -r line
do
set $line
if [ ! grep -Fxq $1 $noiceFile ]
then
echo $line
fi
done
| true
|
5375de0f8f45b178796e7b7281e23d58e9dcf6ad
|
Shell
|
chintitomasud2/Script
|
/Script-Primax/lab-teambridge
|
UTF-8
| 9,704
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Copyright 2014 Red Hat, Inc.
#
# NAME
# lab-teambridge - grading script for RH254/RH299 link aggregation labs
#
# SYNOPSIS
# lab-teambridge {setup|solve|grade}
#
# This script only works on server.
#
# DESCRIPTION
# This script performs the grading steps for the link aggregation
# practice exercises and lab for RH254/RH299.
#
# When it grades, it looks for a teamed interface with a specific
# name, checks to see if it has the correct port interfaces, then
# makes sure it is using the correct teaming mode. This script also
# looks for a bridge with a specific name that is based upon the
# teamed interface. It also confirms that the IPv4 network settings
# are correct.
#
# CHANGELOG
# * Tue Jul 8 2014 George Hacker <ghacker@redhat.com>
# - converted from bonded to teamed interface
# * Thu Jun 3 2014 George Hacker <ghacker@redhat.com>
# - added systemd unit by Wander and error checking for setup function
# * Thu May 29 2014 George Hacker <ghacker@redhat.com>
# - added setup content provided by Steve Bonneville
# * Wed May 28 2014 George Hacker <ghacker@redhat.com>
# - original code
PATH=/usr/bin:/bin:/usr/sbin:/sbin
# Initialize and set some variables
MYHOST=''
CMD=''
DEBUG='true'
RUN_AS_ROOT='true'
team_name='team0'
team_port1='eno1'
team_port2='eno2'
team_runner='activebackup'
bridge_name="br${team_name}"
bridge_IP_regex="192.168.0.100/24"
gateway_IP=192.168.0.254/24
team_config_file=/etc/sysconfig/network-scripts/ifcfg-${team_name}
port1_config_file=/etc/sysconfig/network-scripts/ifcfg-${team_name}-port1
port2_config_file=/etc/sysconfig/network-scripts/ifcfg-${team_name}-port2
bridge_config_file=/etc/sysconfig/network-scripts/ifcfg-${bridge_name}
# Source library of functions
source /usr/local/bin/labtool.shlib
#trap on_exit EXIT
# Additional functions for this shell script
function print_usage {
local problem_name=$(basename $0 | sed -e 's/^lab-//')
cat << EOF
Usage: lab ${problem_name} COMMAND
lab ${problem_name} -h|--help
COMMAND is one of:
grade - perform any grading steps and report on this machine
setup - perform any setup steps for this lab on this machine
EOF
}
function lab_setup {
VMNAME=$1
case $VMNAME in
server)
echo -n 'Setting up for link aggregation lab ... '
if ip link | grep -q "${team_port1}"; then
print_FAIL
echo 'Warning: virtual network devices already exist.'
exit 1
else
# Steve Bonneville <sbonnevi@redhat.com>
#
# This script will appear to create two network
# interfaces, eno1 and eno2, which are connected
# to an "invisible" bridge hidden in a network
# namespace. Also hidden in that namespace is a
# "host" on 192.168.0.254 that won't respond to
# pings unless eno1 or eno2 are configured with
# an address on the 192.168.0.0/24 network!
#
# To reset this script to normal, from the default
# namespace just run "ip netns del hidden" and it'll
# clean everything up!
#
# To inspect the hidden namespace from inside, start
# a subshell with "ip netns exec hidden bash" and then
# run normal networking commands. You can get back to
# the default namespace by exiting the subshell.
# "ip netns exec bash" is to network interfaces
# what
# "chroot" is to file systems
#
# Documentation in ip-netns(8).
#
cat > /usr/local/sbin/teambridge << EOF
#!/bin/bash
# Pardon the dirty SELinux hack, didn't feel like writing a policy
# module just for this.
OLDENFORCE=\$(getenforce)
setenforce 0
# create namespace
ip netns add hidden
# Activate ::1 inside namespace
ip netns exec hidden ip link set dev lo up
# Add bridge inside "hidden" namespace and turn on
ip netns exec hidden brctl addbr hiddenbr0
ip netns exec hidden ip link set dev hiddenbr0 up
# Add virtual patch cables to bridge
# the eno1/eno2 ends are student-visible
ip link add ${team_port1} type veth peer name ${team_port1}-port
ip link add ${team_port2} type veth peer name ${team_port2}-port
ip link set ${team_port1}-port netns hidden up
ip link set ${team_port2}-port netns hidden up
ip netns exec hidden brctl addif hiddenbr0 ${team_port1}-port
ip netns exec hidden brctl addif hiddenbr0 ${team_port2}-port
# Attach virtual patch cable to bridge inside namespace
# and assign its far side address as 192.168.0.254/24.
ip netns exec hidden ip link add inside0 type veth peer name inside0-port
ip netns exec hidden brctl addif hiddenbr0 inside0-port
ip netns exec hidden ip link set inside0-port up
ip netns exec hidden ip link set inside0 up
ip netns exec hidden ip addr add ${gateway_IP} dev inside0
setenforce \${OLDENFORCE}
EOF
chmod +x /usr/local/sbin/teambridge
cat > /etc/systemd/system/hiddenbridge.service << EOF
[Unit]
Description=Create two virtual network interfaces for Red Hat Enterprise Linux System Administration III training (RH254)
Before=NetworkManager.service
[Service]
Type=oneshot
ExecStart=/usr/local/sbin/teambridge
RemainAfterExit=yes
ExecStopPost=ip netns del hidden
[Install]
WantedBy=network.target
EOF
systemctl daemon-reload
systemctl enable hiddenbridge.service &> /dev/null
systemctl start hiddenbridge.service
print_SUCCESS
fi
;;
desktop )
print_FAIL && echo "The setup script needs to be run on server"
;;
*)
# Should never get here, but what the hey....
print_FAIL && echo "The setup script needs to be run on server"
;;
esac
}
function lab_solve {
VMNAME=$1
case $VMNAME in
server)
if ip link | grep -q "${team_name}"; then
print_FAIL
echo 'Warning: teamed network interface already exists.'
exit 1
else
# If the port interfaces don't exist, run the setup function
if ! ip link | grep -q "${team_port1}"; then
lab_setup $MYHOST
fi
echo -n 'Creating the team interface ........... '
nmcli con add type team con-name ${team_name} ifname ${team_name} \
config '{"runner": {"name": "activebackup"}}' &> /dev/null
print_SUCCESS
echo -n 'Creating the port interfaces .......... '
nmcli con add type team-slave con-name ${team_name}-port1 \
ifname ${team_port1} master ${team_name} &> /dev/null
nmcli con add type team-slave con-name ${team_name}-port2 \
ifname ${team_port2} master ${team_name} &> /dev/null
print_SUCCESS
echo -n 'Disabling the team interface .......... '
nmcli dev dis ${team_name} &> /dev/null
print_SUCCESS
echo -n 'Disabling NetworkManager .............. '
(
systemctl stop NetworkManager
systemctl disable NetworkManager
) &> /dev/null
print_SUCCESS
echo -n 'Creating bridge configuration file .... '
cat > ${bridge_config_file} << EOF
DEVICE=${bridge_name}
ONBOOT=yes
TYPE=Bridge
IPADDR0=${bridge_IP_regex%/*}
PREFIX0=${bridge_IP_regex#*/}
EOF
print_SUCCESS
echo -n 'Removing IP configuration from ports .. '
for config in ${port1_config_file} ${port2_config_file}; do
sed -i -n -e '/NAME/,$p' ${config}
done
print_SUCCESS
echo -n 'Attaching team interface to bridge .... '
echo "BRIDGE=${bridge_name}" >> ${team_config_file}
print_SUCCESS
echo -n 'Restarting the network ................ '
systemctl restart network
print_SUCCESS
fi
;;
desktop )
print_FAIL && echo "The solve script needs to be run on server"
;;
*)
# Should never get here, but what the hey....
print_FAIL && echo "The solve script needs to be run on server"
;;
esac
}
function lab_grade {
VMNAME=$1
case $VMNAME in
desktop)
print_FAIL && echo "The grade script needs to be run on server"
;;
server)
# perform the teamed interface checks
echo -n 'Confirming teamed inferface exists .... '
if team_info=$(teamdctl ${team_name} state); then
print_PASS
# check port interfaces
echo -n 'It uses specified port interfaces ..... '
if teamdctl ${team_name} port present "${team_port1}" &> /dev/null &&
teamdctl ${team_name} port present "${team_port2}" &> /dev/null; then
print_PASS
else
print_FAIL
fi
# check teaming runner
echo -n 'Correct team runner implemented ....... '
if echo "${team_info}" | grep -q "runner:.*${team_runner}"; then
print_PASS
else
print_FAIL
fi
else
print_FAIL
fi
# perform the bridge checks
echo -n 'Confirming bridge exists .............. '
if brctl show | grep -q "^${bridge_name}"; then
print_PASS
echo -n 'Bridge uses team interface ............ '
# bridge uses the team interface?
if brctl show "${bridge_name}" | grep -q "${team_name}$"; then
print_PASS
else
print_FAIL
fi
# bridge network settings correct?
echo -n 'Bridge network settings correct ....... '
if ip addr show dev "${bridge_name}" | grep -q "${bridge_IP_regex}"; then
print_PASS
else
print_FAIL
fi
else
print_FAIL
fi
;;
*)
# Should never get here, but what the hey....
print_FAIL && echo "The grade script needs to be run on server"
;;
esac
}
# Main area
# Check if to be run as root (must do this first)
if [[ "${RUN_AS_ROOT}" == 'true' && "${EUID}" -gt "0" ]] ; then
if [[ -x /usr/bin/sudo ]] ; then
${SUDO:-sudo} $0 "$@"
exit
else
# Fail out if not running as root
check_root
fi
fi
# Process command line
# Parse input
case $1 in
setup|solve|grade)
CMD=$1
;;
-h|--help)
print_usage
exit 0
;;
*)
echo Bad option $1
print_usage
exit 1
;;
esac
# Validation
[[ -z "$CMD" ]] && debug "Missing command" && print_usage && exit 2
# Get local system information
get_X
# Branch based on short (without number) hostname
case $MYHOST in
desktop|server)
lab_$CMD $MYHOST
;;
*)
debug "Bad or missing hostname - $MYHOST"
debug "This command needs to be run on desktop or server"
exit 3
;;
esac
| true
|
bc3dfb0db2eb85f1c54e3078ae459c13d76b5e0e
|
Shell
|
pgajdos/apache-rex
|
/mod_proxy_ftp-basic/run.sh
|
UTF-8
| 1,519
| 3.3125
| 3
|
[
"LicenseRef-scancode-other-permissive",
"MIT",
"NTP",
"LicenseRef-scancode-rsa-1990",
"LicenseRef-scancode-rsa-md4",
"Beerware",
"RSA-MD",
"HPND-sell-variant",
"Spencer-94",
"LicenseRef-scancode-zeusbench",
"metamail",
"Apache-2.0"
] |
permissive
|
exit_code=0
. ../lib/processman
mkdir $AREX_RUN_DIR/ftpmirror
echo 'FTP HELLO' > $AREX_RUN_DIR/ftpmirror/welcome
# run ftp daemon
cat << EOF > $AREX_RUN_DIR/vsftpd.conf
anonymous_enable=YES
anon_root=$AREX_RUN_DIR/ftpmirror
anon_world_readable_only=YES
listen=YES
listen_address=127.0.0.1
listen_port=$AREX_FTP_PORT
run_as_launching_user=YES
xferlog_enable=YES
vsftpd_log_file=$AREX_RUN_DIR/vsftpd.log
EOF
if [ $AREX_VSFTPD_VERSION -ge 300 ]; then
# https://wiki.archlinux.org/index.php/Very_Secure_FTP_Daemon#vsftpd:_Error_421_Service_not_available,_remote_server_has_closed_connection
echo 'seccomp_sandbox=NO' >> $AREX_RUN_DIR/vsftpd.conf
fi
######
echo -n 'Starting vsftpd ... '
vsftpd $AREX_RUN_DIR/vsftpd.conf&
sleep 3
vsftpd_pid=$(get_pid_port $AREX_FTP_PORT)
if [ -z "$vsftpd_pid" ]; then
echo "FAILED."
echo +++++++ vsftpd.log ++++++++
cat $AREX_RUN_DIR/vsftpd.log
echo +++++++ vsftpd.log ++++++++
exit 1
fi
echo $vsftpd_pid
echo
########
echo "[1] demonstrate directory listing on ftp server"
curl -s http://localhost:$AREX_PORT/ | grep '<a href="welcome">welcome</a>' || exit_code=1
echo "[2] show file contents on ftp server"
curl -s http://localhost:$AREX_PORT/welcome | grep 'FTP HELLO' || exit_code=2
########
if [ $exit_code -gt 0 ]; then
echo +++++++ vsftpd.log ++++++++
cat $AREX_RUN_DIR/vsftpd.log
echo +++++++ vsftpd.log ++++++++
fi
echo
echo -n 'Stopping vsftpd ... '
kill_pid_port $vsftpd_pid $AREX_FTP_PORT && echo 'done.' || echo 'FAILED.'
########
exit $exit_code
| true
|
7e4d2ae1348a0d17e42740d855c4dbe1aea1de55
|
Shell
|
sifadil/pcsx2-playground
|
/build.sh
|
UTF-8
| 919
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/sh
# Usage: sh build.sh [option]
# option can be all (rebuilds everything), clean, or nothing (incremental build)
# Modify the individual build.sh for specific program options like debug symbols
# Uncomment if building by itself, rather then with all the plugins
#Normal
export PCSX2OPTIONS="--enable-sse3 --enable-sse4 --prefix `pwd`"
#Optimized, but a devbuild
#export PCSX2OPTIONS="--enable-sse3 --enable-sse4 --enable-devbuild --prefix `pwd`"
#Debug / Devbuild version
#export PCSX2OPTIONS="--enable-debug --enable-devbuild --enable-sse3 --prefix `pwd`"
# Make sure we have plugins, and bring the normal plugins in.
sh fetch.sh
option=$@
export PCSX2PLUGINS="`pwd`/bin/plugins"
curdir=`pwd`
cd ${curdir}/plugins
sh build.sh $option
if [ $? -ne 0 ]
then
echo Error with building plugins
exit 1
fi
cd ${curdir}/pcsx2
sh build.sh $option
if [ $? -ne 0 ]
then
echo Error with building pcsx2
exit 1
fi
| true
|
0832f5dc4c5dbf6c362b406eca7fc13e856264f0
|
Shell
|
JSterling8/AllAboutThatBash
|
/palindrome
|
UTF-8
| 1,744
| 4.34375
| 4
|
[] |
no_license
|
#!/bin/bash
# This command uses the bash shell.
# Author: Jonathan Sterling
# Date last modified: 13 December 2012
# This command will determine whether or not a word or phrase entered is a palindrome.
expectedArgs=1
if [ $# -ne $expectedArgs ] # If there's not 1 and only 1 input.
then
echo "The correct usage of this command is \"palindrome WORD_OR_PHRASE_TO_CHECK\"" # Instruct on correct command usage.
else
userEntry=$1
userEntry=`echo -n $userEntry | tr A-Z a-z` # This converts the entry to lowercase.
userEntry=${userEntry//[^a-zA-z0-9]/} # This removes anything that's not alphanumeric.
entryLength=${#userEntry} # This gets the inputs length.
i=0 # This initializes the counter to 0.
half=$(($entryLength/2)) # This finds half of the input length.
while [ $i -lt $half ] # While i is less than half of the input length.
do
if [ "${userEntry:$i:1}" != "${userEntry: -$(($i+1)):1}" ] # If this 1st letter doesn't match the last, or 2nd 2nd to last, etc.
then
echo "\"$1\" is not a palindrome." # Then this is not a palindrome.
exit # And we should stop this script here.
fi
i=$(($i+1)) # Increment the counter.
done
echo "\"$1\" is a palindrome." # If we've not had any mismatches, then it's a palindrome.
fi
| true
|
34073f7f61100ed0561890ac58543bc5578f3cf7
|
Shell
|
mohisen/zdotfiles
|
/7/ping-cmd.sh
|
UTF-8
| 2,950
| 2.890625
| 3
|
[] |
no_license
|
# commandlinefu.com by David Winterbottom
# Ping all hosts on 192.168.1.0/24
fping -ga 192.168.1.0/24 2> /dev/null
# Ping all hosts on 192.168.1.0/24
nmap -sn 192.168.1.0/24
# Ping all hosts on 192.168.1.0/24
nmap -sP 192.168.0.1-254
# Ping all hosts on 192.168.1.0/24
ping -b 192.168.0.255
# Ping all hosts on 192.168.1.0/24
for i in {0..255} ; do (ping 192.168.1.$i -c 1 > /dev/null && echo "192.168.1.$i" & ) ; done
# The awesome ping / traceroute combination
mtr <URL>
# Pop up a Growl alert if Amtrak wifi doesn't know where to find The Google
while [ 1 ]; do (ping -c 1 google.com || growlnotify -m 'ur wifiz, it has teh sad'); sleep 10; done
# Ping xxx.xxx.xxx.xxx ip 100000 times with size 1024bytes
ping xxx.xxx.xxx.xxx size 1024 repeat 100000
# csv file of ping every minutes
while true; do (date | tr "\n" ";") && ping -q -c 1 www.google.com|tail -1|cut -d/ -f5 ;sleep 1; done >> uptime.csv
# Test against loopback address with the 0.0.0.0 default route.
telnet 0 <port>
# avoid ssh hangs using jobs
for host in $HOSTNAMES; do ping -q -c3 $host && ssh $host 'command' & for count in {1..15}; do sleep 1; jobs | wc -l | grep -q ^0\$ && continue; done; kill %1; done &>/dev/null
# avoid ssh hangs using jobs
for host in $MYHOSTS; do ping -q -c3 $H 2>&1 1>/dev/null && ssh -o 'AllowedAuthe ntications publickey' $host 'command1; command2' & for count in 1 2 3 4 5; do sleep 1; jobs | wc -l | grep -q ^0\$ && continue; done; kill %1; done
# Ping a range of numbered machines
c:\>for %t in (0 1 2 3 4 5 6 7) do for %d in (0 1 2 3 4 5 6 7 8 9) do ping -n 1 machine-0%t%d
# Send you 1000 packages icmp eco to "host" in short time
# ping -c 1000 -i 0.001 <host>
# Ping sweep without NMAP
prefix="169.254" && for i in {0..254}; do echo $prefix.$i/8; for j in {1..254}; do sh -c "ping -m 1 -c 1 -t 1 $prefix.$i.$j | grep \"icmp\" &" ; done; done
# Text graphing ping output filter
ping g.co|perl -ne'$|=/e=(\S+)/||next;(push@_,$1)>30&&shift@_;print"\r",(map{"\xe2\x96".chr(128+7*$_/(sort{$b<=>$a}@_)[0])." "}@_),"$1ms"'
# scan subnet for used IPs
nmap -T4 -sn 192.168.1.0/24
# Find all machines on the network using broadcast ping
ping -b <broadcast address>
# What value should I set my TCP/IP MTU (Max. Transmission Unit) to?
pktsize=1516;for i in $( seq $pktsize -8 1450 ) ; do ping -M do -s $i -c 1 slashdot.org; done
# ping as traceroute
mtr google.com
# ping as traceroute
for i in {1..30}; do ping -t $i -c 1 google.com; done | grep "Time to live exceeded"
# Check if a machine is online
ping1 IPaddr_or_hostname
# Check if a machine is online with better UI
echo -n "IP Address or Machine Name: "; read IP; ping -c 1 -q $IP >/dev/null 2>&1 && echo -e "\e[00;32mOnline\e[00m" || echo -e "\e[00;31mOffline\e[00m"
# Check if a machine is online
ping -c 1 -q MACHINE_IP_OR_NAME >/dev/null 2>&1 && echo ONLINE || echo OFFLINE
# Send e-mail if host is 'dead' or not reachable
10,30,50 * * * * ping -c1 -w3 192.168.0.14 >/dev/null
| true
|
3b749c3036ccfad93ad0aad92f6e890189923f4b
|
Shell
|
yocra3/CHD_Marato
|
/scripts/set_project.sh
|
UTF-8
| 2,733
| 2.671875
| 3
|
[] |
no_license
|
#'#################################################################################
#'#################################################################################
#' Set up server for CHD_MARATO project
#'#################################################################################
#'#################################################################################
## Project folder: /home/SHARED/PROJECTS/CHD_MARATO/
# Create folders
mkdir data
ln -s /media/Lacie_1/DATA/Methylation_Marato/ data/methylation
ln -s /media/Lacie_1/DATA/MARATÓ/RNASEQ/ data/RNAseq_fastq
mkdir data/RNAseq
mkdir data/GEO
## Add links to WGS bams
mkdir data/WGS/
mkdir data/WGS/BAMS/
ln -s /media/carlos/PORSCHE_2/RAW_DATA/Marato_Fetus/BAMS/*S?.bam data/WGS/BAMS/
ln -s /media/carlos/PORSCHE_1/RAW_DATA/Marato_Fetus/BAMS/*S?.bam data/WGS/BAMS/
ln -s /media/carlos/PORSCHE_1/RAW_DATA/Marato_Fetus/BAMS/*S??.bam data/WGS/BAMS/
ln -s /media/carlos/CANVIO_2/RAW_DATA/Marato_Fetus/BAMS/*S?.bam data/WGS/BAMS/
ln -s /media/carlos/CANVIO_1/RAW_DATA/Marato_Fetus/BAMS/*S?.bam data/WGS/BAMS/
ln -s /media/carlos/PORSCHE_2/RAW_DATA/Marato_Fetus/BAMS/*S?.bam.bai data/WGS/BAMS/
ln -s /media/carlos/PORSCHE_1/RAW_DATA/Marato_Fetus/BAMS/*S?.bam.bai data/WGS/BAMS/
ln -s /media/carlos/PORSCHE_1/RAW_DATA/Marato_Fetus/BAMS/*S??.bam.bai data/WGS/BAMS/
ln -s /media/carlos/CANVIO_2/RAW_DATA/Marato_Fetus/BAMS/*S?.bam.bai data/WGS/BAMS/
ln -s /media/carlos/CANVIO_1/RAW_DATA/Marato_Fetus/BAMS/*S?.bam.bai data/WGS/BAMS/
## Remove bams from other projects
rm data/WGS/BAMS/624603*
rm data/WGS/BAMS/624604*
rm data/WGS/BAMS/62460506_S1.bam
rm data/WGS/BAMS/62460517_S2.bam
## Add links to WGS fastqs
mkdir data/WGS/FASTQ/
ln -s /media/carlos/PORSCHE_1/RAW_DATA/Marato_Fetus/20180104_aeav/*.fastq.gz data/WGS/FASTQ/
### Copy quantification data from dropbox
### Download general sample data to data folder
mkdir results
mkdir results/methylation
mkdir results/methylation/QC_intermediate
mkdir results/methylation/finalQC_files
mkdir results/methylation/SNPs
mkdir results/methylation/Episignatures
mkdir results/G2CS2CO
mkdir scripts
mkdir workflows
## Function to extract VCFs from zip to folder (run in /home/SHARED/PROJECTS/CHD_MARATO/data/WGS/VCFs/)
function extractVCF {
unzip $2/$1.zip -d /home/SHARED/PROJECTS/CHD_MARATO/data/WGS/VCFs/ -x *fastq*
rm *.xls*
rm *bam*
rm *summary*
rm downloadzip.log
vcf=$(basename "`echo $1*.vcf.gz`" .gz)
gunzip ${vcf}.gz
bgzip $vcf
tabix -p vcf ${vcf}.gz
}
### Copy VCFs from UPF cluster to PC
mkdir -p results/VariantCalling/SNV/
rsync -azvh --progress cruizg@marvin.s.upf.edu:/homes/users/cruizg/projects/CHD_MARATO/results/VariantCalling/SNV/ results/VariantCalling/SNV/
| true
|
ab09b96d170c14e2ffa24899f2939b3ad935d216
|
Shell
|
darcseid/PQ-OpenSSL
|
/oqs-scripts/build_liboqs.sh
|
UTF-8
| 910
| 3.5625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
###########
# Build liboqs
#
# Environment variables:
# - OPENSSL_DIR: path to install liboqs, default ${PROJECT_ROOT}/oqs
# - LIBOQS_LIBTYPE: if 'shared', build a shared library, else build a static library.
# - LIBOQS_USE_OPENSSL: the value to pass to the -DOQS_USE_OPENSSL build flag. Can be 'ON' or 'OFF',
# and is 'ON' by default.
###########
set -exo pipefail
OPENSSL_DIR=${OPENSSL_DIR:-"$(pwd)/oqs"}
LIBOQS_USE_OPENSSL=${LIBOQS_USE_OPENSSL:-"ON"}
cd oqs-test/tmp/liboqs
rm -rf build
mkdir build && cd build
if [ "x${LIBOQS_LIBTYPE}" == "xshared" ]; then
cmake .. -GNinja -DCMAKE_INSTALL_PREFIX="${OPENSSL_DIR}" -DOQS_BUILD_ONLY_LIB=ON -DBUILD_SHARED_LIBS=ON -DOQS_USE_OPENSSL="${LIBOQS_USE_OPENSSL}"
else
cmake .. -GNinja -DCMAKE_INSTALL_PREFIX="${OPENSSL_DIR}" -DOQS_BUILD_ONLY_LIB=ON -DOQS_USE_OPENSSL="${LIBOQS_USE_OPENSSL}"
fi
ninja
ninja install
| true
|
6515ade14d141e9e860ff75ac5baaa67dd0ae8cb
|
Shell
|
nagyist/countly-sdk-js
|
/example/ionic_example.sh
|
UTF-8
| 785
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# This is a shell script to create example app for Ionic.
rm -rf app_ionic
ionic start app_ionic blank
cd app_ionic/src/app/home/
rm home.page.ts
curl https://raw.githubusercontent.com/Countly/countly-sdk-cordova-example/master/app_ionic/src/app/home/home.page.ts --output home.page.ts
ionic cordova plugin add https://github.com/Countly/countly-sdk-cordova.git
read -p 'Enter your server URL: ' serverURL
read -p 'Enter your App Key: ' appKey
sed -i'.bak' -e "s/YOUR_API_KEY/$appKey/g" home.page.ts
sed -i'.bak' -e "s+\"https://try.count.ly\"+"\"${serverURL}\""+g" home.page.ts
rm home.page.ts.bak
cd ..
cd ..
cd ..
npm install
npm run build
ionic cordova platform add android
ionic cordova platform add ios
ionic cordova prepare android
ionic cordova prepare ios
| true
|
af8cba845d97a104019818ca5728fb929a614ac7
|
Shell
|
mylxsw/init.d-template
|
/examples/consul
|
UTF-8
| 2,183
| 3.90625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
### BEGIN INIT INFO
# Provides: consul
# Required-Start: $local_fs $network $named $time $syslog
# Required-Stop: $local_fs $network $named $time $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Description: Service Discovery and Configuration Made Easy
### END INIT INFO
BIN=/usr/local/consul/bin/consul
CONF=/usr/local/consul/etc
RUNAS=root
SCRIPT="$BIN agent"
PIDFILE=/var/run/consul.pid
LOGFILE=/var/log/consul.log
start_server() {
if [ -f "$PIDFILE" ] && kill -0 $(cat $PIDFILE); then
echo 'Service already running' >&2
return 1
fi
echo 'Starting service...' >&2
local CMD="$SCRIPT --config-dir $CONF/server &> \"$LOGFILE\" & echo \$!"
su -c "$CMD" $RUNAS > "$PIDFILE"
echo 'Service started' >&2
}
start_bootstrap() {
if [ -f "$PIDFILE" ] && kill -0 $(cat $PIDFILE); then
echo 'Service already running' >&2
return 1
fi
echo 'Starting service...' >&2
local CMD="$SCRIPT --config-dir $CONF/bootstrap &> \"$LOGFILE\" & echo \$!"
su -c "$CMD" $RUNAS > "$PIDFILE"
echo 'Service started' >&2
}
stop() {
if [ ! -f "$PIDFILE" ] || ! kill -0 $(cat "$PIDFILE"); then
echo 'Service not running' >&2
return 1
fi
echo 'Stopping service...' >&2
kill -15 $(cat "$PIDFILE") && rm -f "$PIDFILE"
echo 'Service stopped' >&2
}
status() {
if [ ! -f "$PIDFILE" ] || ! kill -0 $(cat "$PIDFILE"); then
echo 'Service stopped' >&2
else
PID=`cat $PIDFILE`
echo "Service (pid=$PID) is running" >&2
fi
return 1
}
force_quit() {
if [ ! -f "$PIDFILE" ] || ! kill -0 $(cat "$PIDFILE"); then
echo 'Service not running' >&2
return 1
fi
echo 'Stopping service...' >&2
kill -TERM $(cat "$PIDFILE") && rm -f "$PIDFILE"
echo 'Service stopped' >&2
}
case "$1" in
start-server)
start_server
;;
start-bootstrap)
start_bootstrap
;;
stop)
stop
;;
restart-server)
stop
start_server
;;
restart-bootstrap)
stop
start_bootstrap
;;
force-quit)
force_quit
;;
status)
status
;;
*)
echo "Usage: $0 {start-bootstrap|start-server|stop|restart-server|restart-bootstrap|status|force-quit}"
esac
| true
|
8f064afd83fd21c93050961f01636f7f0ecd2c53
|
Shell
|
ffac/munin-plugins
|
/dnsmasq_stats
|
UTF-8
| 342
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/sh
case $1 in
config)
cat <<'EOM'
graph_title IPv4 leases given
graph_vlabel lease count
graph_category dnsmasq
graph_args --base 1000 -l 0
graph_scale no
IPv4leases.label IPv4 lease count
IPv4leases.info Reserved IPv4 Adresses
EOM
exit 0;;
esac
printf "IPv4leases.value "
cat /var/lib/misc/dnsmasq.leases | wc -l
| true
|
f534f7280d6c2a1206e5cbe42c20f98d7aa12be5
|
Shell
|
iov-one/ponferrada
|
/scripts/test_restart.sh
|
UTF-8
| 487
| 3.53125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -o errexit -o nounset -o pipefail
command -v shellcheck > /dev/null && shellcheck "$0"
# get this files directory regardless of pwd when we run it
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
"${SCRIPT_DIR}"/test_stop.sh
if [[ $(docker ps -q) ]]; then
echo "Some docker containers are still running, which indicates some kind of problem. Please check manually."
echo ""
echo "$ docker ps"
docker ps
exit 1
fi
"${SCRIPT_DIR}"/test_start.sh
| true
|
f42a291f7e56c2b1105aa975a8881e90ef774bdb
|
Shell
|
vunhan/dots
|
/.xinitrc
|
UTF-8
| 577
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/sh
#
# ~/.xinitrc
#
if [ -d /etc/X11/xinit/xinitrc.d ]; then
for f in /etc/X11/xinit/xinitrc.d/*; do
[ -x "$f" ] && . "$f"
done
unset f
fi
## Source .xprofile - Vũ Nhân
[ -f /etc/xprofile ] && source /etc/xprofile
[ -f ~/.xprofile] && source ~/.xprofile
amixer -c 0 set Speaker 100%
[[ -f ~/.Xresources ]] && xrdb -merge ~/.Xresources # update x resources db
sleep 10 && xscreensaver -no-splash & # starts screensaver daemon
xsetroot -cursor_name left_ptr &
unset GREP_OPTIONS
# Run your window manager from here - Vũ Nhân
exec awesome
| true
|
2870c378a6fc38f40e3a565fb2946317d6357c65
|
Shell
|
cjvance/Slomoco
|
/rus.call.slomoco.glm.sh
|
UTF-8
| 628
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
mc_list=( nomc slo slo2 volreg )
smooth_list=( smooth nosmooth )
stim_list=( learn unlearn )
for mc in ${mc_list[*]}; do
case ${mc} in
"nomc" )
condition_list=( nocovar )
;;
"slo"|"slo2"|"volreg" )
condition_list=( covar nocovar )
;;
esac
for condition in ${condition_list[*]}; do
for smooth in ${smooth_list[*]}; do
for stim in ${stim_list[*]}; do
echo -e "\nCalling /usr/local/Utilities/Russian/rus.glm.slomoco.sh ${mc} ${condition} ${smooth} ${stim}\n"
sudo /usr/local/Utilities/Russian/rus.glm.slomoco.sh ${mc} ${condition} ${smooth} ${stim}
done
done
done
done
| true
|
0624d501a674e9de1cb4a27c9e359daa9c32922d
|
Shell
|
handyc/aks
|
/createmasters
|
UTF-8
| 476
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
SOURCE=$1
MASTERS=$2
NVALUE=$3
mkdir -p $MASTERS
COUNTER=1
while [ $COUNTER -le "$3" ]; do
####### create dictionary files for all texts
echo "Now processing ngrams of length $COUNTER..."
for f in $SOURCE/*.$COUNTER.ngram
do
cat "$f" >> $MASTERS/$COUNTER.master.txt
echo "Added $f to master unsorted list"
done
COUNTER=$((COUNTER+1))
done
## finished creating unsorted master lists, now created unique sorted masters
echo "Master lists have been created."
| true
|
1756380eba8abbfd8358c3b8c67629904eb41d95
|
Shell
|
polyactis/ICNNPipeline
|
/GenSeq/wigFilteredByHeight.sh
|
UTF-8
| 422
| 2.984375
| 3
|
[] |
no_license
|
#$ -cwd
#$ -l h_data=1024M
#$ -pe shared 4
set -o nounset # Treat unset variables as an error
if [ $# -ne 2 ]; then
echo SYNOPSIS: `basename $0` wig baseline
exit
fi
if [ "x`echo $PATH | grep -o PIPELINE | uniq`" != "xPIPELINE" ]; then
export PATH=.:~/PIPELINE/bin:$PATH
fi
wig=$1
baseline=$2
wigFilteredByHeight.awk -v baseline=$baseline $wig > $wig.baseline_$baseline
| true
|
c2f05faaa5427f2e97ba57fecb18e65047517d19
|
Shell
|
jmcalalang/f5-big-iq-lab
|
/lab/scripts/reactivate_licenses.sh
|
UTF-8
| 1,759
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Uncomment set command below for code debuging bash
# set -x
# BIG-IQ must be configured for basic auth, in the console run `set-basic-auth on`
bigiq="10.1.1.4"
bigiq_user="admin"
bigiq_password="purple123"
############# ############# #############
############# License Pool #############
############# ############# #############
generate_post_data() {
cat <<EOF
{
"state": "RELICENSE",
"method": "AUTOMATIC"
}
EOF
}
# Specific to UDF license pools
byolpool[1]="7686f428-3849-4450-a1a2-ea288c6bcbe0" #byol-pool
byolpool[2]="2b161bb3-4579-44f0-8792-398f7f54512e" #byol-pool-access
byolpool[3]="0a2f68b5-1646-4a60-a276-8626e3e9fb8e" #byol-pool-perAppVE
# get length of the array
arraylength=${#byolpool[@]}
for (( i=1; i<${arraylength}+1; i++ ));
do
echo "byol-pool $1"
curl -k -i \
-H "Accept: application/json" \
-H "Content-Type:application/json" \
-X PATCH --data "$(generate_post_data)" "https://$bigiq_user:$bigiq_password@$bigiq/mgmt/cm/device/licensing/pool/purchased-pool/licenses/${byolpool[$i]}"
done
############# ############# #############
############# Utility #############
############# ############# #############
generate_post_data() {
cat <<EOF
{
status: "ACTIVATING_AUTOMATIC"
}
EOF
}
# Specific to UDF license pools
byolutility[1]="A2762-65666-03770-68885-8401075" #byol-pool-utility
# get length of the array
arraylength2=${#byolutility[@]}
for (( i=1; i<${arraylength2}+1; i++ ));
do
echo "byol-utility $1"
curl -k -i \
-H "Accept: application/json" \
-H "Content-Type:application/json" \
-X PATCH --data "$(generate_post_data)" "https://$bigiq_user:$bigiq_password@$bigiq/mgmt/cm/device/licensing/pool/utility/licenses/${byolutility[$i]}"
done
| true
|
89ef06b029a84556b66737987b116820dd218bbf
|
Shell
|
mumbai272/aryalinux
|
/scripts/blfs/2.0/ojdk-conf.sh
|
UTF-8
| 1,561
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
set +h
export PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin
export XORG_PREFIX="/usr"
export XORG_CONFIG="--prefix=$XORG_PREFIX --sysconfdir=/etc \
--localstatedir=/var --disable-static"
. /etc/alps/alps.conf
cd $SOURCE_DIR
cat > 1434309266731.sh << "ENDOFFILE"
cat > /etc/profile.d/openjdk.sh << "EOF"
# Begin /etc/profile.d/openjdk.sh
# Set JAVA_HOME directory
JAVA_HOME=/opt/jdk
# Adjust PATH
pathappend $JAVA_HOME/bin
# Add to MANPATH
pathappend $JAVA_HOME/man MANPATH
# Auto Java CLASSPATH: Copy jar files to, or create symlinks in, the
# /usr/share/java directory. Note that having gcj jars with OpenJDK 8
# may lead to errors.
AUTO_CLASSPATH_DIR=/usr/share/java
pathprepend . CLASSPATH
for dir in `find ${AUTO_CLASSPATH_DIR} -type d 2>/dev/null`; do
pathappend $dir CLASSPATH
done
for jar in `find ${AUTO_CLASSPATH_DIR} -name "*.jar" 2>/dev/null`; do
pathappend $jar CLASSPATH
done
export JAVA_HOME
unset AUTO_CLASSPATH_DIR dir jar
# End /etc/profile.d/openjdk.sh
EOF
ENDOFFILE
chmod a+x 1434309266731.sh
sudo ./1434309266731.sh
sudo rm -rf 1434309266731.sh
cat > 1434309266731.sh << "ENDOFFILE"
cat >> /etc/man_db.conf << "EOF" &&
# Begin Java addition
MANDATORY_MANPATH /opt/jdk/man
MANPATH_MAP /opt/jdk/bin /opt/jdk/man
MANDB_MAP /opt/jdk/man /var/cache/man/jdk
# End Java addition
EOF
mkdir -p /var/cache/man
mandb -c /opt/jdk/man
ENDOFFILE
chmod a+x 1434309266731.sh
sudo ./1434309266731.sh
sudo rm -rf 1434309266731.sh
cd $SOURCE_DIR
echo "ojdk-conf=>`date`" | sudo tee -a $INSTALLED_LIST
| true
|
4e28d24e24e33d94c563e26237a7a8ea6e15b018
|
Shell
|
bodgergely/linux_kernel_hacking
|
/boot_x64.sh
|
UTF-8
| 292
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
# BRANCH=master
BRANCH=`cat branch-linux`
if [ ! -z $1 ]
then
BRANCH=$1
fi
qemu-system-x86_64 \
-kernel ~/busybox/build/linux-$BRANCH/arch/x86_64/boot/bzImage \
-initrd ~/busybox/build/initramfs-busybox-x86.cpio.gz \
-nographic -append "console=ttyS0 nokaslr" \
-s
| true
|
ce9ba828c7e09ee28250fb1bc07838388c77ba6d
|
Shell
|
freestar4ever/VHTE
|
/start.sh
|
UTF-8
| 1,304
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
function get_random_file {
echo "${1}/$(ls $1 | grep jpg | sort -R | tail -1)"
}
clear
echo "removing all known profiles...date. Current time is: "`date`
rm -Rf profile
echo "Start training for ==steve_jobs=="
python recognition.py --name steve_jobs --profile "faces/steve_jobs/40.jpg" "faces/steve_jobs/9.jpg" "faces/steve_jobs/10.jpg"
echo "Start training for ==Michelle Obama=="
python recognition.py --name michelle_obama --profile "faces/michelle_obama/18.jpg" "faces/michelle_obama/22.jpg" "faces/michelle_obama/36.jpg"
echo "Start training for ==Adrien Brody=="
python recognition.py --name adrien_brody --profile "faces/adrien_brody/13.jpg" "faces/adrien_brody/25.jpg" "faces/adrien_brody/47.jpg"
echo "--------------------"
echo "Done Training!"
steve_jobs="faces/steve_jobs/16.jpg"
michelle_obama="faces/michelle_obama/92.jpg"
adrien_brody="faces/adrien_brody/60.jpg"
echo "--------------------"
echo "This should be steve jobs: ${steve_jobs}"
python recognition.py ${steve_jobs}
echo "--------------------"
echo "This should be michelle obama: ${michelle_obama}"
python recognition.py ${michelle_obama}
echo "--------------------"
echo "This should should be adrien_brody: ${adrien_brody}"
python recognition.py ${adrien_brody}
echo "--------------------"
echo "End!"
| true
|
225e6c72fe910db987f4831d1e998b00581500e4
|
Shell
|
xborder/ClearPhoto
|
/experiments/simple experiments/sat_avg.sh
|
UTF-8
| 338
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
sum=0
count=0
num=0
for i in $(ls $@)
do
# printf "$i\n"
./sat "$i"
# echo "$?"
num="$?"
sum=`expr $sum + $num`
count=`expr $count + 1`
if [ "$num" == 0 ]
then
rm "$i"
fi
done
#avg=`expr $sum/$count`
#printf "Avg= $avg \n"
RESULT=$(awk "BEGIN {printf \"%.2f\",${sum}/${count}}")
printf "$RESULT \n"
exit 0
| true
|
a65e0da1948a4da2bbbc6e5dec07fd583c5bcc6f
|
Shell
|
vipshmily/rt-n56u3
|
/Padavan-build-k2p/shadowsocks.sh
|
UTF-8
| 22,327
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Copyright (C) 2017 openwrt-ssr
# Copyright (C) 2017 yushi studio <ywb94@qq.com>
# Copyright (C) 2018 lean <coolsnowwolf@gmail.com>
# Copyright (C) 2019 chongshengB <bkye@vip.qq.com>
#
# This is free software, licensed under the GNU General Public License v3.
# See /LICENSE for more information.
#
NAME=shadowsocksr
http_username=`nvram get http_username`
CONFIG_FILE=/tmp/${NAME}.json
CONFIG_UDP_FILE=/tmp/${NAME}_u.json
CONFIG_SOCK5_FILE=/tmp/${NAME}_s.json
CONFIG_KUMASOCKS_FILE=/tmp/kumasocks.toml
v2_json_file="/tmp/v2-redir.json"
trojan_json_file="/tmp/tj-redir.json"
v2_bin="/usr/bin/v2ray"
tj_bin="/usr/bin/trojan"
server_count=0
redir_tcp=0
v2ray_enable=0
redir_udp=0
tunnel_enable=0
local_enable=0
pdnsd_enable_flag=0
chinadnsng_enable_flag=0
wan_bp_ips="/tmp/whiteip.txt"
wan_fw_ips="/tmp/blackip.txt"
lan_fp_ips="/tmp/lan_ip.txt"
run_mode=`nvram get ss_run_mode`
ss_turn=`nvram get ss_turn`
lan_con=`nvram get lan_con`
GLOBAL_SERVER=`nvram get global_server`
socks=""
find_bin() {
case "$1" in
ss) ret="/usr/bin/ss-redir" ;;
ss-local) ret="/usr/bin/ss-local" ;;
ssr) ret="/usr/bin/ssr-redir" ;;
ssr-local) ret="/usr/bin/ssr-local" ;;
ssr-server) ret="/usr/bin/ssr-server" ;;
v2ray) ret="$v2_bin" ;;
xray) ret="$v2_bin" ;;
trojan) ret="$tj_bin" ;;
socks5) ret="/usr/bin/ipt2socks" ;;
esac
echo $ret
}
gen_config_file() {
fastopen="false"
case "$2" in
0) config_file=$CONFIG_FILE && local stype=$(nvram get d_type) ;;
1) config_file=$CONFIG_UDP_FILE && local stype=$(nvram get ud_type) ;;
*) config_file=$CONFIG_SOCK5_FILE && local stype=$(nvram get s5_type) ;;
esac
local type=$stype
case "$type" in
ss)
lua /etc_ro/ss/genssconfig.lua $1 $3 >$config_file
sed -i 's/\\//g' $config_file
;;
ssr)
lua /etc_ro/ss/genssrconfig.lua $1 $3 >$config_file
sed -i 's/\\//g' $config_file
;;
trojan)
tj_bin="/usr/bin/trojan"
if [ ! -f "$tj_bin" ]; then
if [ ! -f "/tmp/trojan" ]; then
curl -L -k -s -o /tmp/trojan --connect-timeout 10 --retry 3 https://cdn.jsdelivr.net/gh/eprea/cdn/trojan
if [ ! -f "/tmp/trojan" ]; then
logger -t "SS" "trojan二进制文件下载失败,可能是地址失效或者网络异常!准备切换备用下载地址!"
#curl -L -k -s -o /tmp/trojan --connect-timeout 10 --retry 3 https://bin.wololo.vercel.app/trojan
curl -L -k -s -o /tmp/trojan --connect-timeout 10 --retry 3 https://ghproxy.com/https://github.com/eprea/cdn/blob/master/trojan
if [ ! -f "/tmp/trojan" ]; then
logger -t "SS" "trojan二进制文件备用地址下载失败!请自查网络!"
nvram set ss_enable=0
ssp_close
else
logger -t "SS" "trojan二进制文件备用地址下载成功"
chmod -R 777 /tmp/trojan
tj_bin="/tmp/trojan"
fi
else
logger -t "SS" "trojan二进制文件下载成功"
chmod -R 777 /tmp/trojan
tj_bin="/tmp/trojan"
fi
else
tj_bin="/tmp/trojan"
fi
fi
if [ "$2" = "0" ]; then
lua /etc_ro/ss/gentrojanconfig.lua $1 nat 1080 >$trojan_json_file
sed -i 's/\\//g' $trojan_json_file
else
lua /etc_ro/ss/gentrojanconfig.lua $1 client 10801 >/tmp/trojan-ssr-reudp.json
sed -i 's/\\//g' /tmp/trojan-ssr-reudp.json
fi
;;
v2ray)
v2_bin="/usr/bin/v2ray"
if [ ! -f "$v2_bin" ]; then
if [ ! -f "/tmp/v2ray" ]; then
curl -L -k -s -o /tmp/v2ray --connect-timeout 10 --retry 3 https://cdn.jsdelivr.net/gh/eprea/cdn/xray
if [ ! -f "/tmp/v2ray" ]; then
logger -t "SS" "v2ray二进制文件下载失败,可能是地址失效或者网络异常!准备切换备用下载地址!"
curl -L -k -s -o /tmp/v2ray --connect-timeout 10 --retry 3 https://ghproxy.com/https://github.com/eprea/cdn/blob/master/xray
if [ ! -f "/tmp/v2ray" ]; then
logger -t "SS" "v2ray二进制文件备用地址下载失败!请自查网络!"
nvram set ss_enable=0
ssp_close
else
logger -t "SS" "v2ray二进制文件备用地址下载成功"
chmod -R 777 /tmp/v2ray
v2_bin="/tmp/v2ray"
fi
else
logger -t "SS" "v2ray二进制文件下载成功"
chmod -R 777 /tmp/v2ray
v2_bin="/tmp/v2ray"
fi
else
v2_bin="/tmp/v2ray"
fi
fi
v2ray_enable=1
if [ "$2" = "1" ]; then
lua /etc_ro/ss/genv2config.lua $1 udp 1080 >/tmp/v2-ssr-reudp.json
sed -i 's/\\//g' /tmp/v2-ssr-reudp.json
else
lua /etc_ro/ss/genv2config.lua $1 tcp 1080 >$v2_json_file
sed -i 's/\\//g' $v2_json_file
fi
;;
xray)
v2_bin="/usr/bin/v2ray"
if [ ! -f "$v2_bin" ]; then
if [ ! -f "/tmp/v2ray" ]; then
curl -L -k -s -o /tmp/v2ray --connect-timeout 10 --retry 3 https://cdn.jsdelivr.net/gh/eprea/cdn/xray
if [ ! -f "/tmp/v2ray" ]; then
logger -t "SS" "v2ray二进制文件下载失败,可能是地址失效或者网络异常!准备切换备用下载地址!"
curl -L -k -s -o /tmp/v2ray --connect-timeout 10 --retry 3 https://ghproxy.com/https://github.com/eprea/cdn/blob/master/xray
if [ ! -f "/tmp/v2ray" ]; then
logger -t "SS" "v2ray二进制文件备用地址下载失败!请自查网络!"
nvram set ss_enable=0
ssp_close
else
logger -t "SS" "v2ray二进制文件备用地址下载成功"
chmod -R 777 /tmp/v2ray
v2_bin="/tmp/v2ray"
fi
else
logger -t "SS" "v2ray二进制文件下载成功"
chmod -R 777 /tmp/v2ray
v2_bin="/tmp/v2ray"
fi
else
v2_bin="/tmp/v2ray"
fi
fi
v2ray_enable=1
if [ "$2" = "1" ]; then
lua /etc_ro/ss/genxrayconfig.lua $1 udp 1080 >/tmp/v2-ssr-reudp.json
sed -i 's/\\//g' /tmp/v2-ssr-reudp.json
else
lua /etc_ro/ss/genxrayconfig.lua $1 tcp 1080 >$v2_json_file
sed -i 's/\\//g' $v2_json_file
fi
;;
esac
}
get_arg_out() {
router_proxy="1"
case "$router_proxy" in
1) echo "-o" ;;
2) echo "-O" ;;
esac
}
start_rules() {
logger -t "SS" "正在添加防火墙规则..."
lua /etc_ro/ss/getconfig.lua $GLOBAL_SERVER > /tmp/server.txt
server=`cat /tmp/server.txt`
cat /etc/storage/ss_ip.sh | grep -v '^!' | grep -v "^$" >$wan_fw_ips
cat /etc/storage/ss_wan_ip.sh | grep -v '^!' | grep -v "^$" >$wan_bp_ips
#resolve name
if echo $server | grep -E "^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$" >/dev/null; then
server=${server}
elif [ "$server" != "${server#*:[0-9a-fA-F]}" ]; then
server=${server}
else
server=$(ping ${server} -s 1 -c 1 | grep PING | cut -d'(' -f 2 | cut -d')' -f1)
if echo $server | grep -E "^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$" >/dev/null; then
echo $server >/etc/storage/ssr_ip
else
server=$(cat /etc/storage/ssr_ip)
fi
fi
local_port="1080"
lan_ac_ips=$lan_ac_ips
lan_ac_mode="b"
#if [ "$GLOBAL_SERVER" == "$UDP_RELAY_SERVER" ]; then
# ARG_UDP="-u"
if [ "$UDP_RELAY_SERVER" != "nil" ]; then
ARG_UDP="-U"
lua /etc_ro/ss/getconfig.lua $UDP_RELAY_SERVER > /tmp/userver.txt
udp_server=`cat /tmp/userver.txt`
udp_local_port="1080"
fi
if [ -n "$lan_ac_ips" ]; then
case "$lan_ac_mode" in
w | W | b | B) ac_ips="$lan_ac_mode$lan_ac_ips" ;;
esac
fi
#ac_ips="b"
gfwmode=""
if [ "$run_mode" = "gfw" ]; then
gfwmode="-g"
elif [ "$run_mode" = "router" ]; then
gfwmode="-r"
elif [ "$run_mode" = "oversea" ]; then
gfwmode="-c"
elif [ "$run_mode" = "all" ]; then
gfwmode="-z"
fi
if [ "$lan_con" = "0" ]; then
rm -f $lan_fp_ips
lancon="all"
lancons="全部IP走代理"
cat /etc/storage/ss_lan_ip.sh | grep -v '^!' | grep -v "^$" >$lan_fp_ips
elif [ "$lan_con" = "1" ]; then
rm -f $lan_fp_ips
lancon="bip"
lancons="指定IP走代理,请到规则管理页面添加需要走代理的IP。"
cat /etc/storage/ss_lan_bip.sh | grep -v '^!' | grep -v "^$" >$lan_fp_ips
fi
dports=$(nvram get s_dports)
if [ $dports = "0" ]; then
proxyport=" "
else
proxyport="-m multiport --dports 22,53,587,465,995,993,143,80,443"
fi
/usr/bin/ss-rules \
-s "$server" \
-l "$local_port" \
-S "$udp_server" \
-L "$udp_local_port" \
-a "$ac_ips" \
-i "" \
-b "$wan_bp_ips" \
-w "$wan_fw_ips" \
-p "$lan_fp_ips" \
-G "$lan_gm_ips" \
-G "$lan_gm_ips" \
-D "$proxyport" \
-k "$lancon" \
$(get_arg_out) $gfwmode $ARG_UDP
return $?
}
start_redir_tcp() {
ARG_OTA=""
gen_config_file $GLOBAL_SERVER 0 1080
stype=$(nvram get d_type)
local bin=$(find_bin $stype)
[ ! -f "$bin" ] && echo "$(date "+%Y-%m-%d %H:%M:%S") Main node:Can't find $bin program, can't start!" >>/tmp/ssrplus.log && return 1
if [ "$(nvram get ss_threads)" = "0" ]; then
threads=$(cat /proc/cpuinfo | grep 'processor' | wc -l)
else
threads=$(nvram get ss_threads)
fi
logger -t "SS" "启动$stype主服务器..."
case "$stype" in
ss | ssr)
last_config_file=$CONFIG_FILE
pid_file="/tmp/ssr-retcp.pid"
for i in $(seq 1 $threads); do
$bin -c $CONFIG_FILE $ARG_OTA -f /tmp/ssr-retcp_$i.pid >/dev/null 2>&1
usleep 500000
done
redir_tcp=1
echo "$(date "+%Y-%m-%d %H:%M:%S") Shadowsocks/ShadowsocksR $threads 线程启动成功!" >>/tmp/ssrplus.log
;;
trojan)
for i in $(seq 1 $threads); do
$bin --config $trojan_json_file >>/tmp/ssrplus.log 2>&1 &
usleep 500000
done
echo "$(date "+%Y-%m-%d %H:%M:%S") $($bin --version 2>&1 | head -1) Started!" >>/tmp/ssrplus.log
;;
v2ray)
$bin -config $v2_json_file >/dev/null 2>&1 &
echo "$(date "+%Y-%m-%d %H:%M:%S") $($bin -version | head -1) 启动成功!" >>/tmp/ssrplus.log
;;
xray)
$bin -config $v2_json_file >/dev/null 2>&1 &
echo "$(date "+%Y-%m-%d %H:%M:%S") $($bin -version | head -1) 启动成功!" >>/tmp/ssrplus.log
;;
socks5)
for i in $(seq 1 $threads); do
lua /etc_ro/ss/gensocks.lua $GLOBAL_SERVER 1080 >/dev/null 2>&1 &
usleep 500000
done
;;
esac
return 0
}
start_redir_udp() {
if [ "$UDP_RELAY_SERVER" != "nil" ]; then
redir_udp=1
logger -t "SS" "启动$utype游戏UDP中继服务器"
utype=$(nvram get ud_type)
local bin=$(find_bin $utype)
[ ! -f "$bin" ] && echo "$(date "+%Y-%m-%d %H:%M:%S") UDP TPROXY Relay:Can't find $bin program, can't start!" >>/tmp/ssrplus.log && return 1
case "$utype" in
ss | ssr)
ARG_OTA=""
gen_config_file $UDP_RELAY_SERVER 1 1080
last_config_file=$CONFIG_UDP_FILE
pid_file="/var/run/ssr-reudp.pid"
$bin -c $last_config_file $ARG_OTA -U -f /var/run/ssr-reudp.pid >/dev/null 2>&1
;;
v2ray)
gen_config_file $UDP_RELAY_SERVER 1
$bin -config /tmp/v2-ssr-reudp.json >/dev/null 2>&1 &
;;
xray)
gen_config_file $UDP_RELAY_SERVER 1
$bin -config /tmp/v2-ssr-reudp.json >/dev/null 2>&1 &
;;
trojan)
gen_config_file $UDP_RELAY_SERVER 1
$bin --config /tmp/trojan-ssr-reudp.json >/dev/null 2>&1 &
ipt2socks -U -b 0.0.0.0 -4 -s 127.0.0.1 -p 10801 -l 1080 >/dev/null 2>&1 &
;;
socks5)
echo "1"
;;
esac
fi
return 0
}
ss_switch=$(nvram get backup_server)
if [ $ss_switch != "nil" ]; then
switch_time=$(nvram get ss_turn_s)
switch_timeout=$(nvram get ss_turn_ss)
#/usr/bin/ssr-switch start $switch_time $switch_timeout &
socks="-o"
fi
#return $?
start_dns() {
case "$run_mode" in
router)
echo "create china hash:net family inet hashsize 1024 maxelem 65536" >/tmp/china.ipset
awk '!/^$/&&!/^#/{printf("add china %s'" "'\n",$0)}' /etc/storage/chinadns/chnroute.txt >>/tmp/china.ipset
ipset -! flush china
ipset -! restore </tmp/china.ipset 2>/dev/null
rm -f /tmp/china.ipset
if [ $(nvram get ss_chdns) = 1 ]; then
chinadnsng_enable_flag=1
logger -t "SS" "下载cdn域名文件..."
wget --no-check-certificate --timeout=8 -qO - https://gitee.com/bkye/rules/raw/master/cdn.txt > /tmp/cdn.txt
if [ ! -f "/tmp/cdn.txt" ]; then
logger -t "SS" "cdn域名文件下载失败,可能是地址失效或者网络异常!可能会影响部分国内域名解析了国外的IP!"
else
logger -t "SS" "cdn域名文件下载成功"
fi
logger -st "SS" "启动chinadns..."
dns2tcp -L"127.0.0.1#5353" -R"$(nvram get tunnel_forward)" >/dev/null 2>&1 &
chinadns-ng -b 0.0.0.0 -l 65353 -c $(nvram get china_dns) -t 127.0.0.1#5353 -4 china -M -m /tmp/cdn.txt >/dev/null 2>&1 &
sed -i '/no-resolv/d' /etc/storage/dnsmasq/dnsmasq.conf
sed -i '/server=127.0.0.1/d' /etc/storage/dnsmasq/dnsmasq.conf
cat >> /etc/storage/dnsmasq/dnsmasq.conf << EOF
no-resolv
server=127.0.0.1#65353
EOF
fi
;;
gfw)
if [ $(nvram get pdnsd_enable) = 0 ]; then
dnsstr="$(nvram get tunnel_forward)"
dnsserver=$(echo "$dnsstr" | awk -F '#' '{print $1}')
#dnsport=$(echo "$dnsstr" | awk -F '#' '{print $2}')
ipset add gfwlist $dnsserver 2>/dev/null
logger -st "SS" "启动dns2tcp:5353端口..."
dns2tcp -L"127.0.0.1#5353" -R"$dnsstr" >/dev/null 2>&1 &
pdnsd_enable_flag=0
logger -st "SS" "开始处理gfwlist..."
fi
;;
oversea)
ipset add gfwlist $dnsserver 2>/dev/null
mkdir -p /etc/storage/dnsmasq.oversea
sed -i '/dnsmasq-ss/d' /etc/storage/dnsmasq/dnsmasq.conf
sed -i '/dnsmasq.oversea/d' /etc/storage/dnsmasq/dnsmasq.conf
cat >>/etc/storage/dnsmasq/dnsmasq.conf <<EOF
conf-dir=/etc/storage/dnsmasq.oversea
EOF
;;
*)
ipset -N ss_spec_wan_ac hash:net 2>/dev/null
ipset add ss_spec_wan_ac $dnsserver 2>/dev/null
;;
esac
/sbin/restart_dhcpd
}
start_AD() {
mkdir -p /tmp/dnsmasq.dom
curl -k -s -o /tmp/adnew.conf --connect-timeout 10 --retry 3 $(nvram get ss_adblock_url)
if [ ! -f "/tmp/adnew.conf" ]; then
logger -t "SS" "AD文件下载失败,可能是地址失效或者网络异常!"
else
logger -t "SS" "AD文件下载成功"
if [ -f "/tmp/adnew.conf" ]; then
check = `grep -wq "address=" /tmp/adnew.conf`
if [ ! -n "$check" ] ; then
cp /tmp/adnew.conf /tmp/dnsmasq.dom/ad.conf
else
cat /tmp/adnew.conf | grep ^\|\|[^\*]*\^$ | sed -e 's:||:address\=\/:' -e 's:\^:/0\.0\.0\.0:' > /tmp/dnsmasq.dom/ad.conf
fi
fi
fi
rm -f /tmp/adnew.conf
}
# ================================= 启动 Socks5代理 ===============================
start_local() {
local s5_port=$(nvram get socks5_port)
local local_server=$(nvram get socks5_enable)
[ "$local_server" == "nil" ] && return 1
[ "$local_server" == "same" ] && local_server=$GLOBAL_SERVER
local type=$(nvram get s5_type)
local bin=$(find_bin $type)
[ ! -f "$bin" ] && echo "$(date "+%Y-%m-%d %H:%M:%S") Global_Socks5:Can't find $bin program, can't start!" >>/tmp/ssrplus.log && return 1
case "$type" in
ss | ssr)
local name="Shadowsocks"
local bin=$(find_bin ss-local)
[ ! -f "$bin" ] && echo "$(date "+%Y-%m-%d %H:%M:%S") Global_Socks5:Can't find $bin program, can't start!" >>/tmp/ssrplus.log && return 1
[ "$type" == "ssr" ] && name="ShadowsocksR"
gen_config_file $local_server 3 $s5_port
$bin -c $CONFIG_SOCK5_FILE -u -f /var/run/ssr-local.pid >/dev/null 2>&1
echo "$(date "+%Y-%m-%d %H:%M:%S") Global_Socks5:$name Started!" >>/tmp/ssrplus.log
;;
v2ray)
lua /etc_ro/ss/genv2config.lua $local_server tcp 0 $s5_port >/tmp/v2-ssr-local.json
sed -i 's/\\//g' /tmp/v2-ssr-local.json
$bin -config /tmp/v2-ssr-local.json >/dev/null 2>&1 &
echo "$(date "+%Y-%m-%d %H:%M:%S") Global_Socks5:$($bin -version | head -1) Started!" >>/tmp/ssrplus.log
;;
xray)
lua /etc_ro/ss/genxrayconfig.lua $local_server tcp 0 $s5_port >/tmp/v2-ssr-local.json
sed -i 's/\\//g' /tmp/v2-ssr-local.json
$bin -config /tmp/v2-ssr-local.json >/dev/null 2>&1 &
echo "$(date "+%Y-%m-%d %H:%M:%S") Global_Socks5:$($bin -version | head -1) Started!" >>/tmp/ssrplus.log
;;
trojan)
lua /etc_ro/ss/gentrojanconfig.lua $local_server client $s5_port >/tmp/trojan-ssr-local.json
sed -i 's/\\//g' /tmp/trojan-ssr-local.json
$bin --config /tmp/trojan-ssr-local.json >/dev/null 2>&1 &
echo "$(date "+%Y-%m-%d %H:%M:%S") Global_Socks5:$($bin --version 2>&1 | head -1) Started!" >>/tmp/ssrplus.log
;;
*)
[ -e /proc/sys/net/ipv6 ] && local listenip='-i ::'
microsocks $listenip -p $s5_port ssr-local >/dev/null 2>&1 &
echo "$(date "+%Y-%m-%d %H:%M:%S") Global_Socks5:$type Started!" >>/tmp/ssrplus.log
;;
esac
local_enable=1
return 0
}
rules() {
[ "$GLOBAL_SERVER" = "nil" ] && return 1
UDP_RELAY_SERVER=$(nvram get udp_relay_server)
if [ "$UDP_RELAY_SERVER" = "same" ]; then
UDP_RELAY_SERVER=$GLOBAL_SERVER
fi
if start_rules; then
return 0
else
return 1
fi
}
start_watchcat() {
if [ $(nvram get ss_watchcat) = 1 ]; then
let total_count=server_count+redir_tcp+redir_udp+tunnel_enable+v2ray_enable+local_enable+pdnsd_enable_flag+chinadnsng_enable_flag
if [ $total_count -gt 0 ]; then
#param:server(count) redir_tcp(0:no,1:yes) redir_udp tunnel kcp local gfw
/usr/bin/ssr-monitor $server_count $redir_tcp $redir_udp $tunnel_enable $v2ray_enable $local_enable $pdnsd_enable_flag $chinadnsng_enable_flag >/dev/null 2>&1 &
fi
fi
}
auto_update() {
sed -i '/update_chnroute/d' /etc/storage/cron/crontabs/$http_username
sed -i '/update_gfwlist/d' /etc/storage/cron/crontabs/$http_username
sed -i '/ss-watchcat/d' /etc/storage/cron/crontabs/$http_username
if [ $(nvram get ss_update_chnroute) = "1" ]; then
cat >>/etc/storage/cron/crontabs/$http_username <<EOF
0 8 */10 * * /usr/bin/update_chnroute.sh > /dev/null 2>&1
EOF
fi
if [ $(nvram get ss_update_gfwlist) = "1" ]; then
cat >>/etc/storage/cron/crontabs/$http_username <<EOF
0 7 */10 * * /usr/bin/update_gfwlist.sh > /dev/null 2>&1
EOF
fi
}
# ================================= 启动 SS ===============================
ssp_start() {
ss_enable=`nvram get ss_enable`
if rules; then
if start_redir_tcp; then
start_redir_udp
#start_rules
#start_AD
start_dns
fi
fi
start_local
start_watchcat
auto_update
ENABLE_SERVER=$(nvram get global_server)
[ "$ENABLE_SERVER" = "-1" ] && return 1
logger -t "SS" "启动成功。"
logger -t "SS" "内网IP控制为:$lancons"
nvram set check_mode=0
}
# ================================= 关闭SS ===============================
ssp_close() {
rm -rf /tmp/cdn
/usr/bin/ss-rules -f
kill -9 $(ps | grep ssr-switch | grep -v grep | awk '{print $1}') >/dev/null 2>&1
kill -9 $(ps | grep ssr-monitor | grep -v grep | awk '{print $1}') >/dev/null 2>&1
kill_process
sed -i '/no-resolv/d' /etc/storage/dnsmasq/dnsmasq.conf
sed -i '/server=127.0.0.1/d' /etc/storage/dnsmasq/dnsmasq.conf
sed -i '/cdn/d' /etc/storage/dnsmasq/dnsmasq.conf
sed -i '/gfwlist/d' /etc/storage/dnsmasq/dnsmasq.conf
sed -i '/dnsmasq.oversea/d' /etc/storage/dnsmasq/dnsmasq.conf
if [ -f "/etc/storage/dnsmasq-ss.d" ]; then
rm -f /etc/storage/dnsmasq-ss.d
fi
clear_iptable
/sbin/restart_dhcpd
}
clear_iptable()
{
s5_port=$(nvram get socks5_port)
iptables -t filter -D INPUT -p tcp --dport $s5_port -j ACCEPT
iptables -t filter -D INPUT -p tcp --dport $s5_port -j ACCEPT
ip6tables -t filter -D INPUT -p tcp --dport $s5_port -j ACCEPT
ip6tables -t filter -D INPUT -p tcp --dport $s5_port -j ACCEPT
}
kill_process() {
v2ray_process=$(pidof v2ray)
if [ -n "$v2ray_process" ]; then
logger -t "SS" "关闭V2Ray进程..."
killall v2ray >/dev/null 2>&1
kill -9 "$v2ray_process" >/dev/null 2>&1
fi
ssredir=$(pidof ss-redir)
if [ -n "$ssredir" ]; then
logger -t "SS" "关闭ss-redir进程..."
killall ss-redir >/dev/null 2>&1
kill -9 "$ssredir" >/dev/null 2>&1
fi
rssredir=$(pidof ssr-redir)
if [ -n "$rssredir" ]; then
logger -t "SS" "关闭ssr-redir进程..."
killall ssr-redir >/dev/null 2>&1
kill -9 "$rssredir" >/dev/null 2>&1
fi
sslocal_process=$(pidof ss-local)
if [ -n "$sslocal_process" ]; then
logger -t "SS" "关闭ss-local进程..."
killall ss-local >/dev/null 2>&1
kill -9 "$sslocal_process" >/dev/null 2>&1
fi
trojandir=$(pidof trojan)
if [ -n "$trojandir" ]; then
logger -t "SS" "关闭trojan进程..."
killall trojan >/dev/null 2>&1
kill -9 "$trojandir" >/dev/null 2>&1
fi
kumasocks_process=$(pidof kumasocks)
if [ -n "$kumasocks_process" ]; then
logger -t "SS" "关闭kumasocks进程..."
killall kumasocks >/dev/null 2>&1
kill -9 "$kumasocks_process" >/dev/null 2>&1
fi
ipt2socks_process=$(pidof ipt2socks)
if [ -n "$ipt2socks_process" ]; then
logger -t "SS" "关闭ipt2socks进程..."
killall ipt2socks >/dev/null 2>&1
kill -9 "$ipt2socks_process" >/dev/null 2>&1
fi
socks5_process=$(pidof srelay)
if [ -n "$socks5_process" ]; then
logger -t "SS" "关闭socks5进程..."
killall srelay >/dev/null 2>&1
kill -9 "$socks5_process" >/dev/null 2>&1
fi
ssrs_process=$(pidof ssr-server)
if [ -n "$ssrs_process" ]; then
logger -t "SS" "关闭ssr-server进程..."
killall ssr-server >/dev/null 2>&1
kill -9 "$ssrs_process" >/dev/null 2>&1
fi
cnd_process=$(pidof chinadns-ng)
if [ -n "$cnd_process" ]; then
logger -t "SS" "关闭chinadns-ng进程..."
killall chinadns-ng >/dev/null 2>&1
kill -9 "$cnd_process" >/dev/null 2>&1
fi
dns2tcp_process=$(pidof dns2tcp)
if [ -n "$dns2tcp_process" ]; then
logger -t "SS" "关闭dns2tcp进程..."
killall dns2tcp >/dev/null 2>&1
kill -9 "$dns2tcp_process" >/dev/null 2>&1
fi
microsocks_process=$(pidof microsocks)
if [ -n "$microsocks_process" ]; then
logger -t "SS" "关闭socks5服务端进程..."
killall microsocks >/dev/null 2>&1
kill -9 "$microsocks_process" >/dev/null 2>&1
fi
}
# ================================= 重启 SS ===============================
ressp() {
BACKUP_SERVER=$(nvram get backup_server)
start_redir $BACKUP_SERVER
start_rules $BACKUP_SERVER
start_dns
start_local
start_watchcat
auto_update
ENABLE_SERVER=$(nvram get global_server)
logger -t "SS" "备用服务器启动成功"
logger -t "SS" "内网IP控制为:$lancons"
}
case $1 in
start)
ssp_start
;;
stop)
killall -q -9 ssr-switch
ssp_close
;;
restart)
ssp_close
ssp_start
;;
reserver)
ssp_close
ressp
;;
*)
echo "check"
#exit 0
;;
esac
| true
|
45a40bc7b1e8790030b2b1577176e4fde207bbf6
|
Shell
|
A2-Collaboration/epics
|
/scripts/cbrem/scripts/setPlane.sh
|
UTF-8
| 970
| 3.75
| 4
|
[
"EPICS"
] |
permissive
|
#!/bin/sh
#Script to set the coherent peak to para (1) or perp (2)
#
#source the epics env to get the pathe to caget etc.
. /opt/epics/thisEPICS.sh
if [ $# -ne "3" ]; then
echo
echo "Usage: $0 <P> <G> <plane>"
echo " P and G are the EPICS macros for goni and cbrem. Eg. HD:CBREM: HD:GONI:";
echo " <plane> = PARA or PERP"
fi
P=$1;
G=$2;
plane=$3;
if [ $plane = "PARA" ] || [ $plane = "PERP" ]; then
pitch=`caget -t ${P}${plane}_PITCH`
yaw=`caget -t ${P}${plane}_YAW`
#call the function to move on pitch and yaw
warning="This will put the crystal in the ${plane} setting by going to:\n YAW = $yaw, and PITCH = $pitch \nARE YOU SURE YOU WANT TO DO THIS?";
if ! zenity --title="Adjust coherent bremsstrahlung conditions" --question --text "$warning" --width 650 --height 100; then
exit;
fi
caput "${G}PITCH" $pitch;
caput "${G}YAW" $yaw;
exit;
fi
echo "ERROR plane \"${plane}\" not known";
exit;
| true
|
d90af8ac59f5f5636d2210eb945b509a61a933bf
|
Shell
|
yeyinzhu321/mutual-ssl-demo
|
/src/main/resources/shell/2.intermediate.sh
|
UTF-8
| 1,329
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# create intermediate intermediate
mkdir /root/ca/intermediate
cd /root/ca/intermediate
mkdir certs crl csr newcerts private
chmod 700 private
touch index.txt
echo 1000 > serial
echo 1000 > crlnumber
# update the configuration file
cp -p /root/ca/openssl.cnf /root/ca/intermediate/openssl.cnf
vi /root/ca/intermediate/openssl.cnf
# create intermediate key, input 'intermediate' as secret key
openssl genrsa -des3 \
-out private/intermediate.key.pem 4096
chmod 400 private/intermediate.key.pem
# create intermediate certificate, after signed, the index.txt will have a record for the it
openssl req -config openssl.cnf -new -sha256 \
-key private/intermediate.key.pem \
-out csr/intermediate.csr.pem
cd /root/ca
openssl ca -config openssl.cnf -extensions v3_intermediate_ca \
-days 3650 -notext -md sha256 \
-in intermediate/csr/intermediate.csr.pem \
-out intermediate/certs/intermediate.cert.pem
chmod 444 intermediate/certs/intermediate.cert.pem
# verify the intermediate cert information
openssl x509 -noout -text -in intermediate/certs/intermediate.cert.pem
# create the certificate chain file
cd /root/ca
cat intermediate/certs/intermediate.cert.pem \
certs/ca.cert.pem > intermediate/certs/ca-chain.cert.pem
chmod 444 intermediate/certs/ca-chain.cert.pem
| true
|
869cea640f3e0a84453dbb038e0d09ec33d24343
|
Shell
|
CumulusNetworks/DUE
|
/templates/common-templates/post-install-config.sh.template
|
UTF-8
| 744
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# DUE_VERSION_COMPATIBILITY_TRACKING=1.0.0
# SCRIPT_PURPOSE: Run configuration inside the docker container, after package install.
# NOTE: this script will normally be overridden by one in the container template directory.
# Bash should be installed at this point.
# Copyright 2021,2022 NVIDIA Corporation. All rights reserved.
# Copyright 2019 Cumulus Networks, Inc. All rights reserved.
#
# SPDX-License-Identifier: MIT
. /due-configuration/install-config-common.lib
# Add any additional repository keys
fxnInstallAptKeys
# Add any additional sources.list files
fxnInstallSourcesList
# The default post install operations exist as a function
# for easy invocation (or not) in overriding scripts.
fxnPostInstallCommon
| true
|
b6f9b0d32b01310c38043532039b6e0d8bdc883a
|
Shell
|
wtmJepsen/Paracooba
|
/aws-run-march.sh
|
UTF-8
| 666
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
DAEMON_THREADS=$(grep -c ^processor /proc/cpuinfo)
let DAEMON_THREADS=DAEMON_THREADS/2
if [ -z ${AWS_BATCH_JOB_MAIN_NODE_PRIVATE_IPV4_ADDRESS+x} ]; then
aws s3 cp s3://${S3_BKT}/${COMP_S3_PROBLEM_PATH} $DIR/build/problem.cnf
$DIR/build/parac --march-cubes --march-path $DIR/build/third_party/March/march_cu --resplit-cubes $DIR/build/problem.icnf $@
else
$DIR/build/parac --daemon --resplit-cubes --threads $DAEMON_THREADS $Q
fi
# if [ $? -ne 0 ]; then
# echo "Return code of parac was not successful! Try to print coredump."
# coredumpctl dump
# fi
| true
|
05effa0af533943eed12638c0e5d109ff713559f
|
Shell
|
calidion/node-auto
|
/node-auto.sh
|
UTF-8
| 1,022
| 3.359375
| 3
|
[] |
no_license
|
#!/usr/bin/bash
# or git clone git://github.com/joyent/node.git if you want to checkout a stable tag
git clone --depth 1 git://github.com/joyent/node.git
cd node
if [ -z $1 ]; then
echo "checking default version v0.10.6"
git checkout v0.10.6 # optional. Note that master is unstable.
else
echo "checking custom version $1"
git checkout $1 # optional. Note that master is unstable.
fi
if [ -z $2 ]; then
echo "using local default location"
location=~/local/node
else
echo "using custom location"
if [ -d $2 ]; then
location=$2
else
echo "directory not found!"
echo "using default location instead"
location=~/local/node
fi
fi
./configure --prefix=$location
make -j2 # -j sets the number of jobs to run
make install
export NODE_PATH=$location:$location/lib/node_modules
export PATH=$PATH:$location/bin
# ~/.bash_profile or ~/.bashrc on some systems
echo "export NODE_PATH=$location:$location/lib/node_modules" >> ~/.profile
echo "export PATH=$PATH:$location/bin" >> ~/.profile
| true
|
6f1b9ab3ad7f4e338d371a81d579533a26e6fd92
|
Shell
|
SuJiKiNen/pyfiledir
|
/shell/completion.bash
|
UTF-8
| 1,945
| 3.90625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
_pyfiledir_setup_pythonpath(){
_PYFILEDIR_PATH="$(cd "$(dirname "$(dirname "${BASH_SOURCE[0]}")")" || return; pwd -P )"
if [ "$OSTYPE" = "msys" ]; then
PYTHONPATH=${PYTHONPATH:+${PYTHONPATH};}$_PYFILEDIR_PATH
else
#----------|-if PYTHONPATH not empty add leading colon |
PYTHONPATH=${PYTHONPATH:+${PYTHONPATH}:}$_PYFILEDIR_PATH
fi
export PYTHONPATH
unset _PYFILEDIR_PATH
}
_pyfiledir_setup_pythonpath
_pyfiledir_completion() {
# use printf to handle space,parentheses etc in filename properly
# see https://stackoverflow.com/questions/1146098/properly-handling-spaces-and-quotes-in-bash-completion
local IFS
local cur
local length
local words
IFS=$'\n'
cur="${COMP_WORDS[COMP_CWORD]}"
cur=$(eval printf '%s' "$cur") # unquote current input
if [ -z "$cur" ]; then
return 0
fi
if [ "$OSTYPE" = "msys" ] && command -v dos2unix >/dev/null 2>&1; then
words=$(pyfiledir "$cur" | dos2unix)
else
words=$(pyfiledir "$cur")
fi
words=($(compgen -W "${words[*]}"))
if [ "$OSTYPE" = "msys" ] && command -v cygpath > /dev/null 2>&1; then
# convert Windows style path to Unix One
# like D:\ => /d/
for ix in "${!words[@]}"; do
words[$ix]=$(cygpath -u "${words[$ix]}")
done
fi
length="${#words[@]}"
if [ "$length" -eq 0 ]; then
COMPREPLY=()
else
COMPREPLY=($(printf '%q'"$IFS" "${words[@]}"))
fi
}
if [ -z "$PYFILEDIR_BASH_COMPLETE_OPTIONS" ]; then
PYFILEDIR_BASH_COMPLETE_OPTIONS="-o nospace -o bashdefault -o default -F _pyfiledir_completion"
fi
if [ -z "$PYFILEDIR_BASH_COMPLETION_COMMANDS" ]; then
PYFILEDIR_BASH_COMPLETION_COMMANDS="cat cd cp emacs ln ls mkdir mv rm rmdir vi vim wc"
fi
if [ -n "$BASH_VERSION" ]; then
complete $PYFILEDIR_BASH_COMPLETE_OPTIONS $PYFILEDIR_BASH_COMPLETION_COMMANDS
fi
| true
|
53ad4f23ef8739ff9b3431b23a77ebe3714dd328
|
Shell
|
pki-io/build
|
/scripts/build/centos6/centos6.sh
|
UTF-8
| 237
| 2.8125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -e
set -u
set -x
GO_PACKAGE=go1.4.1.linux-amd64.tar.gz
if [ -e *.rpm ]; then
rm *.rpm
fi
cp $HOME/$GO_PACKAGE ./
docker build -t=centos6 .
rm $GO_PACKAGE
docker run -it --rm -v `pwd`:/pki centos6 /bin/sh /pki/makeRPM.sh
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.