blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
7eec85072c3e2d556dd28fbd6b3d53048a99f090
|
Shell
|
akiljames83/linux_shell
|
/localusers/first-section/test.sh
|
UTF-8
| 126
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
COUNT=0
while read line
do
#COUNT=$((COUNT+1))
#if [ "${COUNT}" = "10" ]
#then
echo $line
#fi
done < "file.txt"
| true
|
4139da769124e5d2c054fc9660cd90556615c362
|
Shell
|
nsfabina/gcrmn-benthic-classification
|
/gcrmnbc/data_acquisition/download_global_feature_quads.sh
|
UTF-8
| 267
| 2.5625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
DIR_DEST='/scratch/nfabina/gcrmn-benthic-classification/global_data'
if [[ ! -d "${DIR_DEST}" ]]; then
mkdir -p "${DIR_DEST}"
fi
gsutil -m cp -n -r "gs://coral-atlas-data-share/coral_reefs_2018_visual_v1_mosaic/**/L15-*.tif" "${DIR_DEST}"
| true
|
9488067ee9a60cf53c05ce821223126b6155db11
|
Shell
|
chrisgeo/yeti
|
/scripts/bootstrap.sh
|
UTF-8
| 504
| 3.125
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/sh
# Requires Mac OS X.
[ ! -e /usr/local/.git ] && echo "Installing homebrew." \
&& ruby -e "$(curl -fsS http://gist.github.com/raw/323731/install_homebrew.rb)"
echo "Installing node, npm, etc."
brew install node
# Homebrew's npm install is horribly broken, again
[ -f "$(which npm 2>/dev/null)" ] || curl http://npmjs.org/install.sh | sh
echo "Linking npm and yeti."
ln -s `brew --prefix node`/bin/npm /usr/local/bin/npm
ln -s `brew --prefix node`/bin/yeti /usr/local/bin/yeti
hash -r
| true
|
59b639f3638f9fc15fa87ef93b14fe8edace9792
|
Shell
|
Appdynamics/apigee-monitoring-extension
|
/analytics/analytics_events.sh
|
UTF-8
| 5,559
| 3.3125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#Send Analytics Events to AppD
schema_template="analytics/schema.json"
markerfile="analytics/schema.markerfile" # The existence of this file will prevent the creation of a new schema. Delete it if a new schema is required.
biq_request_payload="biq_request_payload.json"
schema_name=$(jq -r '.analytics_details[].schema_name' < ${apigee_conf_file})
analytics_ep=$(jq -r '.analytics_details[].analytics_endpoint' < ${apigee_conf_file})
analytics_key=$(jq -r '.analytics_details[].analytics_key' < ${apigee_conf_file})
global_account_name=$(jq -r '.analytics_details[].global_account_name' < ${apigee_conf_file})
proxy_url=$(jq -r '.analytics_details[].proxy_url' < ${apigee_conf_file})
proxy_port=$(jq -r '.analytics_details[].proxy_port' < ${apigee_conf_file})
connection_timeout_seconds=$(jq -r '.analytics_details[].connection_timeout_seconds' < ${apigee_conf_file})
echo "endpoint - $analytics_ep "
echo "key - ***"
echo "global account name - $global_account_name"
echo "Proxy URL - $proxy_url"
echo "Proxy port - $proxy_port"
echo "connection_timeout_seconds - $connection_timeout_seconds"
echo "schema_name - $schema_name"
if [ -z "$analytics_ep" ] || [ -z "$analytics_key" ] || [ -z "$global_account_name" ] ; then
msg=" analytics endpoint, analytics key and global account name must be filled in the config.json file - if BiQ is enabled"
echo "${msg}"
echo "[$(date '+%d-%m-%Y %H:%M:%S')] [FATAL] ${msg}" >> ${log_path}
exit 0
fi
if [ -z "$connection_timeout_seconds" ] || [ -z "$connection_timeout_seconds" ]; then
connection_timeout_seconds=30 #Defaults to 30 seconds if not defined
echo "Connection timeout not defined, assigned the default value - $connection_timeout_seconds"
fi
if [ -z "$proxy_url" ] || [ -z "$proxy_port" ]; then
echo "Not Using proxy"
proxy_details=""
else
echo "Using proxy - $proxy_url:$proxy_port"
proxy_details="-x $proxy_url:$proxy_port"
fi
if [ ! -f "${markerfile}" ]; then
curl_response_code=$(curl -X POST "${analytics_ep}/events/schema/$schema_name" -H"X-Events-API-AccountName:${global_account_name}" -H"X-Events-API-Key:${analytics_key}" -H"Content-type: application/vnd.appd.events+json;v=2" --data @${schema_template} -s -w "%{http_code}" --connect-timeout $connection_timeout_seconds $proxy_details)
echo "Create Schema response code $curl_response_code"
if [ "${curl_response_code}" -eq 201 ]; then
msg=" The ${schema_name} schema was succesfully created. And marker file was also created- this file ensures the post request is made once"
echo "${msg}"
echo "[$(date '+%d-%m-%Y %H:%M:%S')] [INFO] ${msg}" >> ${log_path}
touch ${markerfile}
fi
else
msg= "==>Marker file exist. This means $schema_name already exist. Skipping"
echo "${msg}"
echo "[$(date '+%d-%m-%Y %H:%M:%S')] [INFO] ${msg}" >> ${log_path}
fi
#normalize the JSON payload from Apigee response to match AppDynamics Schema.
######### APIPROXY #############
# #normalize the JSON payload from Apigee response to match AppDynamics Schema.
if [ "${found_401}" = "true" ] || [ "${found_403}" = "true" ] || [ "${found_4xx}" = "true" ] || [ "${found_502}" = "true" ] || [ "${found_503}" = "true" ] || [ "${found_504}" = "true" ] || [ "${found_5xx}" = "true" ]; then
echo "Some 401, 403, 4xx, 502, 503, 504 or 5xx error files are found..merging json"
#jq -s '[ .[0] + .[1] | group_by(.apiproxy)[] | add ]' biq_prepped*.json > temp_${biq_request_payload}
jq -s '[ .[0] + .[1] + .[2] + .[3] + .[4] + .[5] + .[6] | group_by(.apiproxy)[] | add ]' biq_prepped_5*.json biq_prepped_4*.json > temp_${biq_request_payload}
else
echo "No 401, 403, 4xx, 502, 503, 504 or 5xx error files are found..nothing to merge"
biq_request_payload = ${biq_perf_metrics}
fi
jq -s '[ .[0] + .[1] | group_by(.apiproxy)[] | add ]' temp_${biq_request_payload} ${biq_perf_metrics} > ${biq_request_payload}
rm temp_${biq_request_payload}
#decorate biq payload
cat ${biq_request_payload} | sed 's/min(/min_/g; s/max(/max_/g; s/is_error/error_count/g; s/)//g;s/sum(//g; s/)//g; s/avg(//g; s/-/_/g' > "decorated_${biq_request_payload}"
biq_request_payload="decorated_${biq_request_payload}"
if [ ! -f "${biq_request_payload}" ]; then
msg="${biq_request_payload} does not exist. No metric will be sent to Apigee. "
echo "${msg}"
echo "[$(date '+%d-%m-%Y %H:%M:%S')] [ERROR] ${msg} " >>${log_path}
exit 0
else
curl_response_code=$(curl -X POST "${analytics_ep}/events/publish/$schema_name" -H"X-Events-API-AccountName:${global_account_name}" -H"X-Events-API-Key:${analytics_key}" -H"Content-type:application/vnd.appd.events+json;v=2" -H"Accept:application/json" -d "$(cat ${biq_request_payload})" -s -w "%{http_code}" --connect-timeout $connection_timeout_seconds $proxy_details)
echo "response code = $curl_response_code"
if [ "${curl_response_code}" -eq 200 ]; then
msg="Succesfully sent analytics event to AppDynamics."
echo "${msg}"
echo "[$(date '+%d-%m-%Y %H:%M:%S')] [INFO] ${msg}" >> ${log_path}
#clean up
rm biq_*.json decorated_biq_*.json raw_biq_prepped*.json
else
msg="Response code: ${curl_response_code}. Failed to send analytics event to AppDynamics. Note the HTTP response code and send it to support."
echo "${msg}"
echo "[$(date '+%d-%m-%Y %H:%M:%S')] [ERROR] ${msg}" >> ${log_path}
# No need to clean up, leave files to help support with troubleshooting
fi
fi
| true
|
1f2893cd74d9d1cc63209bfe1851afe2e06bbb96
|
Shell
|
jukylin/k3s
|
/scripts/build
|
UTF-8
| 1,976
| 3.078125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e -x
source $(dirname $0)/version.sh
cd $(dirname $0)/..
LDFLAGS="-X github.com/rancher/k3s/pkg/version.Version=$VERSION -X github.com/rancher/k3s/pkg/version.GitCommit=${COMMIT:0:8} -w -s"
STATIC="-extldflags '-static'"
STATIC_SQLITE="-extldflags '-static -lm -ldl -lz -lpthread'"
TAGS="ctrd apparmor seccomp no_btrfs netgo osusergo"
if [ "$STATIC_BUILD" != "true" ]; then
STATIC=""
STATIC_SQLITE=""
else
TAGS="static_build libsqlite3 $TAGS"
fi
mkdir -p bin
if [ -z "$GOARM" ] && [ "arm" = "$(go env GOARCH)" ]; then
GOARM=7
fi
rm -f bin/k3s-agent bin/hyperkube bin/containerd bin/cni ./bin/runc bin/containerd-shim bin/k3s-server bin/kubectl bin/crictl
# echo Building agent
# CGO_ENABLED=1 go build -tags "$TAGS" -ldflags "$LDFLAGS $STATIC" -o bin/k3s-agent ./cmd/agent/main.go
echo Building server
CGO_ENABLED=1 go build -tags "$TAGS" -ldflags "$LDFLAGS $STATIC_SQLITE" -o bin/containerd ./cmd/server/main.go
ln -s containerd ./bin/k3s-agent
ln -s containerd ./bin/k3s-server
ln -s containerd ./bin/kubectl
ln -s containerd ./bin/crictl
echo Building hyperkube
CGO_ENABLED=1 go build -tags "$TAGS" -ldflags "$LDFLAGS $STATIC_SQLITE" -o bin/hyperkube ./vendor/k8s.io/kubernetes/cmd/hyperkube/
#echo Building ctr
#CGO_ENABLED=1 go build -tags "$TAGS" -ldflags "$LDFLAGS $STATIC_SQLITE" -o bin/ctr ./cmd/ctr/main.go
# echo Building containerd
# CGO_ENABLED=0 go build -tags "$TAGS" -ldflags "$LDFLAGS $STATIC" -o bin/containerd ./cmd/containerd/
echo Building cni
CGO_ENABLED=0 go build -tags "$TAGS" -ldflags "$LDFLAGS $STATIC" -o bin/cni ./cmd/cni/main.go
echo Building runc
make EXTRA_LDFLAGS="-w -s" BUILDTAGS="apparmor seccomp" -C ./vendor/github.com/opencontainers/runc static
cp -f ./vendor/github.com/opencontainers/runc/runc ./bin/runc
echo Building containerd-shim
make -C ./vendor/github.com/containerd/containerd bin/containerd-shim
cp -f ./vendor/github.com/containerd/containerd/bin/containerd-shim ./bin/containerd-shim
| true
|
2581773d858ced3c1eb489610723054af42e1e76
|
Shell
|
WangHL0927/grafana
|
/scripts/build/build.sh
|
UTF-8
| 4,025
| 3.671875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# shellcheck disable=SC2086
#
# This script is executed from within the container.
#
set -e
##########
CCARMV6=/opt/rpi-tools/arm-bcm2708/arm-linux-gnueabihf/bin/arm-linux-gnueabihf-gcc
CCARMV7=arm-linux-gnueabihf-gcc
CCARMV7_MUSL=/tmp/arm-linux-musleabihf-cross/bin/arm-linux-musleabihf-gcc
CCARM64=aarch64-linux-gnu-gcc
CCARM64_MUSL=/tmp/aarch64-linux-musl-cross/bin/aarch64-linux-musl-gcc
CCX64=/tmp/x86_64-centos6-linux-gnu/bin/x86_64-centos6-linux-gnu-gcc
CCX64_MUSL=/tmp/x86_64-linux-musl-cross/bin/x86_64-linux-musl-gcc
BUILD_FAST=0
BUILD_BACKEND=1
BUILD_FRONTEND=1
BUILD_PACKAGE=1
while [ "$1" != "" ]; do
case "$1" in
"--fast")
BUILD_FAST=1
echo "Fast build enabled"
shift
;;
"--backend-only")
BUILD_FRONTEND=0
BUILD_PACKAGE=0
echo "Building only backend"
shift
;;
"--frontend-only")
BUILD_BACKEND=0
BUILD_PACKAGE=0
echo "Building only frontend"
shift
;;
"--package-only")
BUILD_BACKEND=0
BUILD_FRONTEND=0
echo "Building only packaging"
shift
;;
* )
# unknown param causes args to be passed through to $@
break
;;
esac
done
# shellcheck disable=SC2124
EXTRA_OPTS="$@"
cd /go/src/github.com/grafana/grafana
echo "current dir: $(pwd)"
if [ "$CIRCLE_TAG" != "" ]; then
echo "Building releases from tag $CIRCLE_TAG"
OPT="-includeBuildId=false ${EXTRA_OPTS}"
else
echo "Building incremental build for $CIRCLE_BRANCH"
OPT="-buildId=${CIRCLE_WORKFLOW_ID} ${EXTRA_OPTS}"
fi
echo "Build arguments: $OPT"
echo "current dir: $(pwd)"
function build_backend_linux_amd64() {
if [ ! -d "dist" ]; then
mkdir dist
fi
CC=${CCX64} go run build.go ${OPT} build
CC=${CCX64_MUSL} go run build.go -libc musl ${OPT} build
}
function build_backend() {
if [ ! -d "dist" ]; then
mkdir dist
fi
go run build.go -goarch armv6 -cc ${CCARMV6} ${OPT} build
go run build.go -goarch armv7 -cc ${CCARMV7} ${OPT} build
go run build.go -goarch arm64 -cc ${CCARM64} ${OPT} build
go run build.go -goarch armv7 -libc musl -cc ${CCARMV7_MUSL} ${OPT} build
go run build.go -goarch arm64 -libc musl -cc ${CCARM64_MUSL} ${OPT} build
build_backend_linux_amd64
}
function build_frontend() {
if [ ! -d "dist" ]; then
mkdir dist
fi
yarn install --pure-lockfile --no-progress
echo "Building frontend"
start=$(date +%s%N)
go run build.go ${OPT} build-frontend
runtime=$((($(date +%s%N) - start)/1000000))
echo "Frontent build took: $runtime ms"
echo "FRONTEND: finished"
}
function package_linux_amd64() {
echo "Packaging Linux AMD64"
go run build.go -goos linux -pkg-arch amd64 ${OPT} package-only
go run build.go -goos linux -pkg-arch amd64 ${OPT} -libc musl -skipRpm -skipDeb package-only
go run build.go latest
echo "PACKAGE LINUX AMD64: finished"
}
function package_all() {
echo "Packaging ALL"
go run build.go -goos linux -pkg-arch armv6 ${OPT} -skipRpm package-only
go run build.go -goos linux -pkg-arch armv7 ${OPT} package-only
go run build.go -goos linux -pkg-arch arm64 ${OPT} package-only
go run build.go -goos linux -pkg-arch armv7 -libc musl -skipRpm -skipDeb ${OPT} package-only
go run build.go -goos linux -pkg-arch arm64 -libc musl -skipRpm -skipDeb ${OPT} package-only
package_linux_amd64
echo "PACKAGE ALL: finished"
}
function package_setup() {
echo "Packaging: Setup environment"
if [ -d "dist" ]; then
rm -rf dist
fi
mkdir dist
go run build.go -gen-version ${OPT} > dist/grafana.version
# Load ruby, needed for packing with fpm
# shellcheck disable=SC1091
source /etc/profile.d/rvm.sh
}
if [ $BUILD_FAST = "0" ]; then
build_backend
build_frontend
package_setup
package_all
else
if [ $BUILD_BACKEND = "1" ]; then
build_backend_linux_amd64
fi
if [ $BUILD_FRONTEND = "1" ]; then
build_frontend
fi
if [ $BUILD_PACKAGE = "1" ]; then
package_setup
package_linux_amd64
# last step
#go run build.go latest
fi
fi
| true
|
8e344e2603322275df4c3752b10bd1c8aede6c98
|
Shell
|
pnlbwh/pnlutil
|
/scripts-util/transpose.sh
|
UTF-8
| 936
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash -eu
transpose() {
awk '
BEGIN { FS=OFS=" " }
{
for (rowNr=1;rowNr<=NF;rowNr++) {
cell[rowNr,NR] = $rowNr
}
maxRows = (NF > maxRows ? NF : maxRows)
maxCols = NR
}
END {
for (rowNr=1;rowNr<=maxRows;rowNr++) {
for (colNr=1;colNr<=maxCols;colNr++) {
printf "%s%s", cell[rowNr,colNr], (colNr < maxCols ? OFS : ORS)
}
}
}' $1
}
needs_transpose() {
input=$1
numcols=$(awk '{ print NF; exit }' $input)
if [ "$numcols" -eq 3 ]; then
return 0; # true
elif [ "$numcols" -eq 1 ]; then
return 0; # true
else
return 1; # false
fi
}
for input in $@; do
if needs_transpose $input; then
echo "'$input' needs to be tranposed"
tmp=$(mktemp)
transpose $input > $tmp
mv $input /tmp
mv $tmp $input
echo "'$input' is transposed"
else
echo "'$input' is fine."
fi
done
| true
|
9973cd294ccae0fbb9764c9b77237d173f128c79
|
Shell
|
Disparity/git-hooks
|
/post-checkout-composer-auto-install
|
UTF-8
| 571
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Auto install composer dependencies when composer.json/composer.lock is changed
#
# @author Disparity <disparity-github@yandex.ru>
# @site https://github.com/Disparity/git-hooks
# @dependency git
# @dependency php@>=5.3
# @dependency composer@~1.0
if [ "$1" == "$2" ]; then exit 0; fi
git diff --name-only --diff-filter=AM $2 $1 -- composer.json composer.lock|grep composer > /dev/null && (
composer install -n --no-ansi --quiet --working-dir `git rev-parse --show-toplevel` && echo Composer dependencies installed || echo Composer dependencies install failed
)
exit 0
| true
|
58d3161f99aebc6b4520255a42537fbf22f2a046
|
Shell
|
PMET-public/magento-cloud-extension
|
/tests/new-branch-commands.bats
|
UTF-8
| 952
| 2.65625
| 3
|
[] |
no_license
|
#!/usr/bin/env ./tests/libs/bats/bin/bats
# bats will loop indefinitely with debug mode on (i.e. set -x)
unset debug
load 'libs/bats-assert/load'
load 'libs/bats-support/load'
load 'libs/bats-file/load'
load 'bats-lib.sh'
setup() {
shopt -s nocasematch
cd "$proj_dir/sh-scripts" || exit
export tab_url="https://demo.magento.cloud/projects/$MCE_PROJECT_ID/environments/$GITHUB_RUN_ID"
export ext_ver="$GITHUB_SHA"
}
@test 'add-grocery' {
script="$(create_script_from_command_id add-grocery)"
run "$script" 3>&-
assert_success
# assert_output -e ""
}
@test 'admin-create' {
script="$(create_script_from_command_id admin-create)"
run "$script" 3>&- << RESPONSES
admin2
123123q
admin@test.com
RESPONSES
assert_success
assert_output -e "created.*user"
}
@test 'delete-env' {
script="$(create_script_from_command_id delete-env)"
run "$script" 3>&- << RESPONSES
y
RESPONSES
assert_success
assert_output -e "branch.*deleted"
}
| true
|
195f449f0376c438334dc1f0635d7f5008c00390
|
Shell
|
sociomantic-tsunami/beaver
|
/bin/beaver-codecov
|
UTF-8
| 1,762
| 4.25
| 4
|
[
"BSL-1.0"
] |
permissive
|
#!/bin/sh
# Copyright dunnhumby Germany GmbH 2017.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#
# Report codecoverage via codecov in a sandboxed environment.
#
# This command is just a wrapper to codecov-bash
# (https://github.com/codecov/codecov-bash) that runs inside docker and with
# a fixed version of the script. It also only exposes the source code as
# present in the git repo and the reports to the script, not all the dirty
# build directory.
#
# To pass reports to upload use the variable BEAVER_CODECOV_REPORTS. These
# reports are passed without any quoting, so globbing can be used, so if any
# weird characters could appear in the reports file names, you need to escape
# them properly. Entire directories can be included, the copy will be
# recursive.
set -eu
if test -z "${BEAVER_CODECOV_REPORTS:-}"
then
echo "No reports passed via BEAVER_CODECOV_REPORTS!" >&2
exit 0
fi
# Paths
r="$(readlink -f $(dirname $0)/..)"
beaver="$r/bin/beaver"
# Export any codecov-specific environment variable
codecov_vars="$(printenv -0 | sed -zn 's/^\(CODECOV_[^=]\+\)=.*$/\1\n/p' |
tr -d '\0')"
export BEAVER_DOCKER_VARS="${BEAVER_DOCKER_VARS:-} $codecov_vars"
# Arguments to pass to codecov by default
# Copy coverage reports and git structure to a clean directory
tmp=`mktemp -d`
trap 'r=$?; rm -fr "$tmp"; exit $r' EXIT INT TERM QUIT
mkdir -p "$tmp/reports"
cp -a $BEAVER_CODECOV_REPORTS "$tmp/reports/"
git archive --format=tar HEAD | (cd "$tmp" && tar xf -)
cp -a $(git rev-parse --git-dir) "$tmp/"
cp -av "$r/bin/codecov-bash" "$tmp/codecov"
# Run codecov in the confined environment
cd "$tmp"
"$beaver" run ./codecov -n beaver -s reports "$@"
| true
|
cb6358e7afc00d3af0a51d3e662b1e21be840552
|
Shell
|
Vinotha16/WIN_ROLLBACK
|
/templates/linux_actualfacts/suse11/cron_511_actual.fact
|
UTF-8
| 204
| 2.890625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
if [ $( chkconfig --list cron | grep '2:on.*3:on.*4:on.*5:on' | wc -l) -eq 0 ]; then
echo "{ \"cron_511_actual\" : \"\" }"
else
echo "{ \"cron_511_actual\" : \"enabled\" }"
exit 1
fi
| true
|
a5939691b3e13ebcf9743c3e6328c35b559cbba5
|
Shell
|
GobletSky31689/CS256
|
/Assignments/HomeWork 3/src/Scripts/train_4_Generator.sh
|
UTF-8
| 484
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
rm -rf $BASEDIR"/Train_4"
mkdir $BASEDIR"/Train_4"
echo
echo "Generating data for Train_4 folder with 60,000 items of NON-STICKY class."
rm -rf $BASEDIR"/out1.txt"
rm -rf $BASEDIR"/out2.txt"
python $BASEDIR"/sticky_snippet_generator.py" 30000 0 0 $BASEDIR"/out1.txt"
python $BASEDIR"/sticky_snippet_generator.py" 30000 0 0 $BASEDIR"/out2.txt"
cat $BASEDIR"/out1.txt" $BASEDIR"/out2.txt" > $BASEDIR"/Train_4/file1.txt"
echo "Dataset of Train_4 Generated"
echo
echo
| true
|
17713dbba17f9e3f69b1faa92b977342d084b00b
|
Shell
|
XGWang0/Suse_testsuite
|
/tests/qa_test_coreutils/qa_test_coreutils/orig_test_suite/chgrp/posix-H
|
UTF-8
| 1,499
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/sh
# Test POSIX-mandated -H option.
if test "$VERBOSE" = yes; then
set -x
chgrp --version
fi
. $srcdir/../group-names
set _ $groups; shift
g1=$1
g2=$2
pwd=`pwd`
t0=`echo "$0"|sed 's,.*/,,'`.tmp; tmp=$t0/$$
trap 'status=$?; cd $pwd; chmod -R u+rwx $t0; rm -rf $t0 && exit $status' 0
trap '(exit $?); exit $?' 1 2 13 15
framework_failure=0
mkdir -p $tmp || framework_failure=1
cd $tmp || framework_failure=1
mkdir 1 2 3 || framework_failure=1
touch 1/1F 2/2F 3/3F || framework_failure=1
ln -s 1 1s || framework_failure=1
ln -s ../3 2/2s || framework_failure=1
chgrp -R $g1 1 2 3 || framework_failure=1
if test $framework_failure = 1; then
echo "$0: failure in testing framework" 1>&2
(exit 1); exit 1
fi
fail=0
chgrp -H -R $g2 1s 2 || fail=1
# These must have group $g2.
# =========================
changed='
1
1/1F
2
2/2F
3
'
for i in $changed; do
# Filter out symlinks (entries that end in `s'), since it's not
# possible to change their group/owner information on some systems.
case $i in *s) continue;; esac
set _ `ls -dgn $i`; shift
group=$3
test $group = $g2 || fail=1
done
# These must have group $g1.
# =========================
not_changed='
1s
2/2s
3/3F
'
for i in $not_changed; do
# Filter out symlinks (entries that end in `s'), since it's not
# possible to change their group/owner information on some systems.
case $i in *s) continue;; esac
set _ `ls -dgn $i`; shift
group=$3
test $group = $g1 || fail=1
done
(exit $fail); exit $fail
| true
|
e3509c97e52e9c07359086e26b3825f654b61371
|
Shell
|
bytedance/fedlearner
|
/deploy/scripts/aliyun/install.sh
|
UTF-8
| 28,826
| 2.984375
| 3
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ACCESS_KEY_ID=$1
ACCESS_KEY_SECRET=$2
DB_PASSWORD=$3
ES_PASSWORD=$4
BUCKET=$5
PAY_TYPE=$6
REGION="cn-beijing"
ZONE_ID="cn-beijing-h"
GENERATER_NAME="fedlearnerwins"
VPC_CIDR_BLOCK="192.168.0.0/16"
function echo_exit {
echo $1
exit 1
}
function echo_log {
msg=$1
echo $msg
echo $msg >> install.log
}
function json2yaml {
python -c 'import json; open("config", "w").write(json.load(open("./tmp","r"))["config"]);'
}
function install_cli {
# Download kubectl
kubectl help >/dev/null 2>&1
if [ $? -ne 0 ]
then
echo_log "Download kubectl."
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/darwin/amd64/kubectl
mv kubectl /usr/local/bin/
chmod 755 /usr/local/bin/kubectl
fi
# Download helm
helm version | grep Version:\"v3 >/dev/null 2>&1
if [ $? -ne 0 ]
then
echo_log "Download helm."
curl -LO https://get.helm.sh/helm-v3.2.3-darwin-amd64.tar.gz
tar -zxvf helm-v3.2.3-darwin-amd64.tar.gz
mv darwin-amd64/helm /usr/local/bin/
chmod 755 /usr/local/bin/helm
rm -rf darwin-amd64 helm-v3.2.3-darwin-amd64.tar.gz
fi
# Download aliyun cli
aliyun version >/dev/null 2>&1
if [ $? -ne 0 ]
then
echo_log "Download aliyun cli."
curl -LO https://aliyuncli.alicdn.com/aliyun-cli-macosx-3.0.32-amd64.tgz
tar -zxvf aliyun-cli-macosx-3.0.32-amd64.tgz
mv aliyun /usr/local/bin
chmod 755 /usr/local/bin/aliyun
rm -rf aliyun-cli-macosx-3.0.32-amd64.tgz
fi
# Configure aliyun cli
aliyun auto-completion
aliyun configure set --profile akProfile --region $REGION --access-key-id $ACCESS_KEY_ID --access-key-secret $ACCESS_KEY_SECRET --language en
if [ $? -ne 0 ]
then
echo_exit "Failed to initiate aliyun cli."
fi
}
function create_role {
ROLE_NAME=$1
POLICY_NAME=$2
SERVICE=$3
aliyun ram CreateRole --RoleName $ROLE_NAME --AssumeRolePolicyDocument "{\"Statement\": [{\"Action\": \"sts:AssumeRole\",\"Effect\": \"Allow\",\"Principal\": {\"Service\": [\"$SERVICE\"]}}],\"Version\": \"1\"}" >/dev/null 2>&1
aliyun ram AttachPolicyToRole --RoleName $ROLE_NAME --PolicyType System --PolicyName $POLICY_NAME >/dev/null 2>&1
}
function init_policy {
create_role AliyunNASDefaultRole AliyunNASRolePolicy nas.aliyuncs.com
create_role AliyunCSClusterRole AliyunCSClusterRolePolicy cs.aliyuncs.com
create_role AliyunCSDefaultRole AliyunCSDefaultRolePolicy cs.aliyuncs.com
create_role AliyunCSKubernetesAuditRole AliyunCSKubernetesAuditRolePolicy cs.aliyuncs.com
create_role AliyunCSManagedArmsRole AliyunCSManagedArmsRolePolicy cs.aliyuncs.com
create_role AliyunCSManagedCmsRole AliyunCSManagedCmsRolePolicy cs.aliyuncs.com
create_role AliyunCSManagedCsiRole AliyunCSManagedCsiRolePolicy cs.aliyuncs.com
create_role AliyunCSManagedKubernetesRole AliyunCSManagedKubernetesRolePolicy cs.aliyuncs.com
create_role AliyunCSManagedLogRole AliyunCSManagedLogRolePolicy cs.aliyuncs.com
create_role AliyunCSManagedNetworkRole AliyunCSManagedNetworkRolePolicy cs.aliyuncs.com
create_role AliyunCSManagedVKRole AliyunCSManagedVKRolePolicy cs.aliyuncs.com
create_role AliyunCSServerlessKubernetesRole AliyunCSServerlessKubernetesRolePolicy cs.aliyuncs.com
create_role AliyunESSDefaultRole AliyunESSRolePolicy cs.aliyuncs.com
}
function create_oss_bucket {
aliyun oss mb oss://$BUCKET --storage-class Standard >/dev/null 2>&1
}
function create_vpc {
VPC_ID=`aliyun vpc DescribeVpcs --VpcName $GENERATER_NAME | grep VpcId | awk -F "\"" '{print $4}'`
if [[ $VPC_ID == "vpc"* ]]
then
echo_log "Vpc $GENERATER_NAME already exists with vpc id $VPC_ID."
else
aliyun vpc CreateVpc --CidrBlock $VPC_CIDR_BLOCK --VpcName $GENERATER_NAME
VPC_ID=`aliyun vpc DescribeVpcs --VpcName $GENERATER_NAME | grep VpcId | awk -F "\"" '{print $4}'`
if [[ $VPC_ID == "vpc"* ]]
then
echo_log "Create vpc $GENERATER_NAME success with vpc id $VPC_ID."
else
echo_exit "Failed to create vpc"
fi
sleep 5
fi
}
function create_vswitch {
VSWITCH_ID=`aliyun vpc DescribeVSwitches --VSwitchName $GENERATER_NAME | grep VSwitchId | awk -F "\"" '{print $4}'`
if [[ $VSWITCH_ID == "vsw"* ]]
then
echo_log "Vswitch $GENERATER_NAME already exists with vswitch id $VSWITCH_ID."
else
aliyun vpc CreateVSwitch --VpcId $VPC_ID --CidrBlock $VPC_CIDR_BLOCK --VSwitchName $GENERATER_NAME --ZoneId $ZONE_ID
VSWITCH_ID=`aliyun vpc DescribeVSwitches --VSwitchName $GENERATER_NAME | grep VSwitchId | awk -F "\"" '{print $4}'`
if [[ $VSWITCH_ID == "vsw"* ]]
then
echo_log "Create vswtich $GENERATER_NAME success with vswtich id $VSWITCH_ID."
else
echo_exit "Failed to create vswtich"
fi
sleep 5
fi
}
function create_secret {
KEYPAIR_NAME=`aliyun ecs DescribeKeyPairs --KeyPairName $GENERATER_NAME | grep KeyPairName | awk -F "\"" '{print $4}'`
if [ -n "$KEYPAIR_NAME" ]
then
echo_log "Keypair $GENERATER_NAME already exists."
else
aliyun ecs CreateKeyPair --KeyPairName $GENERATER_NAME
if [ $? -eq 0 ]
then
echo_log "Create keypair $GENERATER_NAME Success."
else
echo_exit "Failed to create keypair $GENERATER_NAME."
fi
fi
}
function create_k8s_cluster_config {
rm -rf k8s.json
echo_log "Generate the kubernetes cluster config file for aliyun."
if [[ $PAY_TYPE == "postpaid" ]]
then
cat <<EOF >>k8s.json
{
"name": "$GENERATER_NAME",
"cluster_type": "ManagedKubernetes",
"disable_rollback": true,
"timeout_mins": 60,
"kubernetes_version": "1.16.9-aliyun.1",
"region_id": "$REGION",
"snat_entry": true,
"cloud_monitor_flags": false,
"endpoint_public_access": true,
"deletion_protection": false,
"node_cidr_mask": "26",
"proxy_mode": "ipvs",
"tags": [],
"addons": [
{
"name": "flannel"
},
{
"name": "csi-plugin"
},
{
"name": "csi-provisioner"
},
{
"name": "nginx-ingress-controller",
"disabled": true
}
],
"os_type": "Linux",
"platform": "CentOS",
"runtime": {
"name": "docker",
"version": "19.03.5"
},
"worker_instance_types": [
"ecs.c6.3xlarge"
],
"num_of_nodes": 3,
"worker_system_disk_category": "cloud_efficiency",
"worker_system_disk_size": 120,
"worker_instance_charge_type": "PostPaid",
"vpcid": "$VPC_ID",
"container_cidr": "172.20.0.0/16",
"service_cidr": "172.21.0.0/20",
"vswitch_ids": [
"$VSWITCH_ID"
],
"key_pair": "$GENERATER_NAME",
"cpu_policy": "none",
"is_enterprise_security_group": true
}
EOF
else
cat <<EOF >>k8s.json
{
"name": "$GENERATER_NAME",
"cluster_type": "ManagedKubernetes",
"disable_rollback": true,
"timeout_mins": 60,
"kubernetes_version": "1.16.9-aliyun.1",
"region_id": "$REGION",
"snat_entry": true,
"cloud_monitor_flags": false,
"endpoint_public_access": true,
"deletion_protection": false,
"node_cidr_mask": "26",
"proxy_mode": "ipvs",
"tags": [],
"addons": [
{
"name": "flannel"
},
{
"name": "csi-plugin"
},
{
"name": "csi-provisioner"
},
{
"name": "nginx-ingress-controller",
"disabled": true
}
],
"os_type": "Linux",
"platform": "CentOS",
"runtime": {
"name": "docker",
"version": "19.03.5"
},
"worker_instance_types": [
"ecs.c6.3xlarge"
],
"num_of_nodes": 3,
"worker_system_disk_category": "cloud_efficiency",
"worker_system_disk_size": 120,
"worker_instance_charge_type": "PrePaid",
"worker_period_unit": "Month",
"worker_period": 1,
"worker_auto_renew": true,
"worker_auto_renew_period": 1,
"vpcid": "$VPC_ID",
"container_cidr": "172.20.0.0/16",
"service_cidr": "172.21.0.0/20",
"vswitch_ids": [
"$VSWITCH_ID"
],
"key_pair": "$GENERATER_NAME",
"cpu_policy": "none",
"is_enterprise_security_group": true
}
EOF
fi
}
function create_k8s {
create_k8s_cluster_config
CLUSTER_ID=`aliyun cs DescribeClusters | grep -A 1 name | grep -A 1 $GENERATER_NAME | grep cluster_id | awk -F "\"" '{print $4}'`
if [ -n "$CLUSTER_ID" ]
then
echo_log "Kubernetes cluster $GENERATER_NAME already exists with id $CLUSTER_ID."
else
CLUSTER_ID=`aliyun cs POST /clusters --header "Content-Type=application/json" --body "$(cat ./k8s.json)" | grep cluster_id | awk -F "\"" '{print $4}'`
if [ -n "$CLUSTER_ID" ]
then
echo_log "Kubernetes cluster $GENERATER_NAME create success with id $CLUSTER_ID."
STATUS=`aliyun cs DescribeClusters | grep -A 5 name | grep -A 5 $GENERATER_NAME | grep state | awk -F "\"" '{print $4}'`
while [ "$STATUS" != "running" ]
do
echo_log "Current kubernetes cluster status is $STATUS, loop wait until it's running."
sleep 30
STATUS=`aliyun cs DescribeClusters | grep -A 5 name | grep -A 5 $GENERATER_NAME | grep state | awk -F "\"" '{print $4}'`
done
else
echo_exit "Failed to create k8s cluster $GENERATER_NAME."
fi
fi
CLUSTER_ID=`aliyun cs DescribeClusters | grep -A 1 name | grep -A 1 $GENERATER_NAME | grep cluster_id | awk -F "\"" '{print $4}'`
rm -rf tmp config
echo_log "Creating config file in current dir, you can move it to ~/.kube/config."
aliyun cs GET /k8s/$CLUSTER_ID/user_config > ./tmp
json2yaml
CURRENT_DIR=`pwd`
export KUBECONFIG="$CURRENT_DIR/config"
rm -rf tmp k8s.json
}
function create_db {
DB_INSTANCE_ID=`aliyun rds DescribeDBInstances --VpcId $VPC_ID | grep \"DBInstanceId\" | awk -F "\"" '{print $4}' | head -1`
if [ -n "$DB_INSTANCE_ID" ]
then
echo_log "Database already exists with id $DB_INSTANCE_ID."
else
if [[ $PAY_TYPE == "postpaid" ]]
then
aliyun rds CreateDBInstance --Engine MySQL --EngineVersion 8.0 --DBInstanceClass rds.mysql.t1.small --DBInstanceStorage 20 --SecurityIPList 0.0.0.0/0 --PayType Postpaid --DBInstanceNetType Intranet --RegionId $REGION --ZoneId $ZONE_ID --VPCId $VPC_ID --InstanceNetworkType VPC
else
aliyun rds CreateDBInstance --Engine MySQL --EngineVersion 8.0 --DBInstanceClass rds.mysql.t1.small --DBInstanceStorage 20 --SecurityIPList 0.0.0.0/0 --DBInstanceNetType Intranet --RegionId $REGION --ZoneId $ZONE_ID --VPCId $VPC_ID --InstanceNetworkType VPC --PayType Prepaid --UsedTime 1 --Period Month --AutoRenew true
fi
DB_INSTANCE_ID=`aliyun rds DescribeDBInstances --VpcId $VPC_ID | grep \"DBInstanceId\" | awk -F "\"" '{print $4}' | head -1`
if [ -n "$DB_INSTANCE_ID" ]
then
echo_log "Create db instance success with instance id $DB_INSTANCE_ID."
STATUS=`aliyun rds DescribeDBInstances --VpcId $VPC_ID | grep DBInstanceStatus | awk -F "\"" '{print $4}' | head -1`
while [ "$STATUS" != "Running" ]
do
echo_log "Current db instance status is $STATUS, loop wait until it's running."
sleep 30
STATUS=`aliyun rds DescribeDBInstances --VpcId $VPC_ID | grep DBInstanceStatus | awk -F "\"" '{print $4}' | head -1`
done
else
echo_exit "Failed to create db instance."
fi
fi
# Create database account.
DB_ACCOUNT=`aliyun rds DescribeAccounts --DBInstanceId $DB_INSTANCE_ID | grep fedlearner`
if [ -z "$DB_ACCOUNT" ]
then
aliyun rds CreateAccount --AccountName fedlearner --AccountPassword $DB_PASSWORD --AccountType Super --DBInstanceId $DB_INSTANCE_ID
if [ $? -eq 0 ]
then
echo_log "Create db account fedlearner success."
sleep 5
else
echo_exit "Failed to create db account fedlearner."
fi
else
echo_log "DB account fedlearner already exists."
fi
# Create the database.
DB_FEDLEARNER=`aliyun rds DescribeDatabases --DBInstanceId $DB_INSTANCE_ID | grep fedlearner`
if [ -z "$DB_ACCOUNT" ]
then
aliyun rds CreateDatabase --CharacterSetName utf8mb4 --DBName fedlearner --DBInstanceId $DB_INSTANCE_ID
if [ $? -eq 0 ]
then
echo_log "Create db fedlearner success, waiting 1 minute to update account."
sleep 60
else
echo_exit "Failed to create db fedlearner."
fi
else
echo_log "DB fedlearner already exists."
fi
}
function create_nas {
FILE_SYSTEM_ID=`aliyun nas DescribeFileSystems --Description $GENERATER_NAME | grep FileSystemId | awk -F "\"" '{print $4}'`
if [ -n "$FILE_SYSTEM_ID" ]
then
echo_log "Nas file system already exists with id $FILE_SYSTEM_ID."
else
FILE_SYSTEM_ID=`aliyun nas CreateFileSystem --ProtocolType NFS --StorageType Capacity --ZoneId $ZONE_ID --Description $GENERATER_NAME | grep FileSystemId | awk -F "\"" '{print $4}'`
if [ -n "$FILE_SYSTEM_ID" ]
then
echo_log "Create nas file system success with id $FILE_SYSTEM_ID."
else
echo_exit "Failed to create nas file system."
fi
fi
MOUNT_TARGET_DOMAIN=`aliyun nas DescribeMountTargets --FileSystemId $FILE_SYSTEM_ID | grep MountTargetDomain | awk -F "\"" '{print $4}'`
if [ -n "$MOUNT_TARGET_DOMAIN" ]
then
echo_log "Nas file system $FILE_SYSTEM_ID already has mount target domain $MOUNT_TARGET_DOMAIN"
else
aliyun nas CreateMountTarget --AccessGroupName DEFAULT_VPC_GROUP_NAME --NetworkType Vpc --VpcId $VPC_ID --VSwitchId $VSWITCH_ID --FileSystemId $FILE_SYSTEM_ID
MOUNT_TARGET_DOMAIN=`aliyun nas DescribeMountTargets --FileSystemId $FILE_SYSTEM_ID | grep MountTargetDomain | awk -F "\"" '{print $4}'`
if [ -n "$MOUNT_TARGET_DOMAIN" ]
then
echo_log "Create nas file system mount target success with domain $MOUNT_TARGET_DOMAIN."
else
echo_exit "Failed to create nas file system mount target."
fi
fi
}
function list_k8s_nodes {
NODES=`aliyun ecs DescribeInstances --InstanceName worker-k8s-for-cs-$CLUSTER_ID | grep InstanceId | awk -F "\"" '{print $4}'`
if [ -n "$NODES" ]
then
echo_log "Kubernetes cluster has following nodes:"
echo_log $NODES
else
echo_exit "Failed to list the k8s nodes with name worker-k8s-for-cs-$CLUSTER_ID"
fi
}
function create_eip {
EIP_NODE=""
for node in $NODES
do
ALLOCATION_ID=`aliyun vpc DescribeEipAddresses --AssociatedInstanceId $node --AssociatedInstanceType EcsInstance | grep AllocationId | awk -F "\"" '{print $4}'`
if [ -n "$ALLOCATION_ID" ]
then
EIP_NODE=$node
echo_log "Public ip with id $ALLOCATION_ID has already associate with $EIP_NODE."
return
fi
done
# Allocate the public ip.
ALLOCATION_ID=`aliyun vpc AllocateEipAddress --RegionId $REGION --Bandwidth 50 --InstanceChargeType PostPaid --InternetChargeType PayByTraffic --Netmode public | grep AllocationId | awk -F "\"" '{print $4}'`
if [ -n "$ALLOCATION_ID" ]
then
echo_log "Allocate pulbic ip success with id $ALLOCATION_ID."
sleep 5
else
echo_exit "Failed to allocate public ip."
fi
# Select the first ecs node to associate with a public ip address.
for node in $NODES
do
EIP_NODE=$node
break
done
# Associate the public ip to the eip node.
aliyun vpc AssociateEipAddress --AllocationId $ALLOCATION_ID --InstanceId $EIP_NODE
if [ $? -eq 0 ]
then
echo_log "Asscociate public ip $ALLOCATION_ID with instance $EIP_NODE success."
else
echo_exit "Failed to asscociate public ip $ALLOCATION_ID with instance $EIP_NODE."
fi
}
function config_security_group {
aliyun ecs DescribeSecurityGroups | grep alicloud-cs-auto-created-security-group-$CLUSTER_ID
if [ $? -ne 0 ]
then
echo_exit "Failed to get the wanted security group."
fi
SECURITY_GROUP_ID=`aliyun ecs DescribeSecurityGroups --VpcId $VPC_ID | grep -A 5 "ACS Cluster" | grep SecurityGroupId | awk -F "\"" '{print $4}' | head -1`
echo_log "Config secrity group with id $SECURITY_GROUP_ID"
aliyun ecs AuthorizeSecurityGroup --RegionId $REGION --SecurityGroupId $SECURITY_GROUP_ID --IpProtocol tcp --PortRange=1/65535 --SourceCidrIp 0.0.0.0/0 --Priority 1
if [ $? -ne 0 ]
then
echo_exit "Failed to config the security group $SECURITY_GROUP_ID."
fi
}
function config_slb {
LOADBALANCER_ID=`aliyun slb DescribeLoadBalancers --LoadBalancerName ManagedK8SSlbIntranet-$CLUSTER_ID | grep LoadBalancerId | awk -F "\"" '{print $4}'`
if [ -z "$LOADBALANCER_ID" ]
then
echo_exit "Failed to get the wanted loadbalancers."
fi
aliyun slb DescribeVServerGroups --LoadBalancerId $LOADBALANCER_ID --RegionId $REGION | grep http_ingress
if [ $? -ne 0 ]
then
HTTP_INGRESS_VSERVER_GROUP_ID=`aliyun slb CreateVServerGroup --LoadBalancerId $LOADBALANCER_ID --RegionId $REGION --VServerGroupName http_ingress | grep VServerGroupId | awk -F "\"" '{print $4}'`
echo_log "Create http ingress vserver group success with id $HTTP_INGRESS_VSERVER_GROUP_ID."
sleep 3
for node in $NODES
do
echo_log "Add backend server $node to vserver group $HTTP_INGRESS_VSERVER_GROUP_ID."
aliyun slb AddVServerGroupBackendServers --VServerGroupId $HTTP_INGRESS_VSERVER_GROUP_ID --BackendServers "[{\"ServerId\":\"$node\", \"Weight\":\"100\",\"Type\": \"ecs\", \"Port\":\"32080\"}]"
done
sleep 3
echo_log "Create load balancer tpc listener for port 32080 success."
aliyun slb CreateLoadBalancerTCPListener --ListenerPort 32080 --LoadBalancerId $LOADBALANCER_ID --VServerGroupId $HTTP_INGRESS_VSERVER_GROUP_ID --BackendServerPort 32080 --Bandwidth 50
sleep 3
echo_log "Start load balancer listener with port 32080."
aliyun slb StartLoadBalancerListener --ListenerPort 32080 --LoadBalancerId $LOADBALANCER_ID
sleep 3
else
echo_log "Vserver group http_ingress already exists."
fi
aliyun slb DescribeVServerGroups --LoadBalancerId $LOADBALANCER_ID --RegionId $REGION | grep https_ingress
if [ $? -ne 0 ]
then
HTTPS_INGRESS_VSERVER_GROUP_ID=`aliyun slb CreateVServerGroup --LoadBalancerId $LOADBALANCER_ID --RegionId $REGION --VServerGroupName https_ingress | grep VServerGroupId | awk -F "\"" '{print $4}'`
echo_log "Create https ingress vserver group success with id $HTTPS_INGRESS_VSERVER_GROUP_ID."
sleep 3
for node in $NODES
do
echo_log "Add backend server $node to vserver group $HTTPS_INGRESS_VSERVER_GROUP_ID."
aliyun slb AddVServerGroupBackendServers --VServerGroupId $HTTPS_INGRESS_VSERVER_GROUP_ID --BackendServers "[{\"ServerId\":\"$node\", \"Weight\":\"100\",\"Type\": \"ecs\", \"Port\":\"32443\"}]"
done
sleep 3
echo_log "Create load balancer tpc listener for port 32443."
aliyun slb CreateLoadBalancerTCPListener --ListenerPort 32443 --LoadBalancerId $LOADBALANCER_ID --VServerGroupId $HTTPS_INGRESS_VSERVER_GROUP_ID --BackendServerPort 32443 --Bandwidth 50
sleep 3
echo_log "Start load balancer listener with port 32443."
aliyun slb StartLoadBalancerListener --ListenerPort 32443 --LoadBalancerId $LOADBALANCER_ID
sleep 3
else
echo_log "Vserver group https_ingress already exists."
fi
}
function create_elasticsearch_config {
rm -rf es.json
echo_log "Generate the elasticsearch cluster config file for aliyun."
if [[ $PAY_TYPE == "postpaid" ]]
then
cat <<EOF >>es.json
{
"description": "$GENERATER_NAME",
"nodeAmount": 3,
"paymentType": "postpaid",
"enablePublic": false,
"esAdminPassword": "$ES_PASSWORD",
"nodeSpec": {
"spec": "elasticsearch.sn1ne.large",
"disk": 200,
"diskType": "cloud_ssd",
"diskEncryption": false
},
"networkConfig": {
"vpcId": "$VPC_ID",
"vswitchId": "$VSWITCH_ID",
"vsArea": "$ZONE_ID",
"type": "vpc"
},
"extendConfigs": [
{
"configType": "usageScenario",
"value": "general"
}
],
"esVersion": "7.7_with_X-Pack",
"haveKibana": true,
"instanceCategory": "x-pack",
"kibanaConfiguration": {
"spec": "elasticsearch.n4.small",
"amount": 1,
"disk": 0
}
}
EOF
else
cat <<EOF >>es.json
{
"description": "$GENERATER_NAME",
"nodeAmount": 3,
"paymentType": "prepaid",
"enablePublic": false,
"esAdminPassword": "$ES_PASSWORD",
"nodeSpec": {
"spec": "elasticsearch.sn1ne.large",
"disk": 200,
"diskType": "cloud_ssd",
"diskEncryption": false
},
"networkConfig": {
"vpcId": "$VPC_ID",
"vswitchId": "$VSWITCH_ID",
"vsArea": "$ZONE_ID",
"type": "vpc"
},
"extendConfigs": [
{
"configType": "usageScenario",
"value": "general"
}
],
"esVersion": "7.7_with_X-Pack",
"haveKibana": true,
"instanceCategory": "x-pack",
"kibanaConfiguration": {
"spec": "elasticsearch.n4.small",
"amount": 1,
"disk": 0
}
}
EOF
fi
}
function create_filebeat_config {
rm -rf filebeat.yml
cat <<EOF >>filebeat.yml
filebeat.config:
modules:
path: \${path.config}/modules.d/*.yml
reload.enabled: false
processors:
- drop_fields:
fields:
- "agent.ephemeral_id"
- "agent.hostname"
- "agent.id"
- "agent.type"
- "agent.version"
- "ecs.version"
- "agent"
- "ecs"
- include_fields:
when.not:
has_fields: [ "index_type__" ]
fields:
- "host.name"
- "input.type"
- "kubernetes.container.name"
- "kubernetes.namespace"
- "kubernetes.node.name"
- "kubernetes.pod.name"
- "kubernetes.pod.uid"
- "log.file.path"
- "log.offset"
- "message"
- "stream"
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/*.log
- /var/log/messages
- /var/log/syslog
- type: docker
containers.ids:
- "*"
exclude_lines: [ '"index_type__":' ]
processors:
- add_kubernetes_metadata:
in_cluster: true
- drop_event:
when:
equals:
kubernetes.container.name: filebeat
- type: docker
containers.ids:
- "*"
processors:
- include_fields:
fields: [ "index_type__", "name", "value", "tags" ]
json.keys_under_root: true
json.overwrite_keys: true
json.ignore_decoding_error: true
http.enabled: true
http.port: 5066
output.elasticsearch:
hosts:
- http://$ES_INSTANCE_ID.elasticsearch.aliyuncs.com:9200
username: elastic
password: $ES_PASSWORD
indices:
- index: "data_join"
when.equals:
index_type__: "data_join"
- index: "raw_data"
when.equals:
index_type__: "raw_data"
- index: "metrics_v2"
when.equals:
index_type__: "metrics"
- index: "filebeat-7.0.1"
EOF
}
function create_elasticsearch {
create_elasticsearch_config
ES_INSTANCE_ID=`aliyun elasticsearch ListInstance --description $GENERATER_NAME | grep instanceId | awk -F "\"" '{print $4}' | head -1`
if [ -n "$ES_INSTANCE_ID" ]
then
echo_log "Elasticsearch instance $GENERATER_NAME already exists with id $ES_INSTANCE_ID."
else
ES_INSTANCE_ID=`aliyun elasticsearch createInstance --header "Content-Type=application/json" --body "$(cat ./es.json)" | grep instanceId | awk -F "\"" '{print $4}'`
if [ -n "$ES_INSTANCE_ID" ]
then
echo_log "Elasticsearch instance $GENERATER_NAME create success with id $ES_INSTANCE_ID."
STATUS=`aliyun elasticsearch DescribeInstance --InstanceId $ES_INSTANCE_ID | grep status | awk -F "\"" '{print $4}' | grep -v NORMAL | head -1`
while [ "$STATUS" != "active" ]
do
echo_log "Current elasticsearch instance status is $STATUS, loop wait until it's active."
sleep 30
STATUS=`aliyun elasticsearch DescribeInstance --InstanceId $ES_INSTANCE_ID | grep status | awk -F "\"" '{print $4}' | grep -v NORMAL | head -1`
done
else
echo_exit "Failed to create elasticsearch instance $GENERATER_NAME."
fi
fi
create_filebeat_config
}
function install_fedlearner {
kubectl get pods | grep fedlearner
if [ $? -ne 0 ]
then
echo_log "Install fedlearner-stack with helm."
helm install fedlearner-stack ../../charts/fedlearner-stack --set nfs-server-provisioner.enabled=false --set nfs-client-provisioner.enabled=true --set nfs-client-provisioner.nfs.server=$MOUNT_TARGET_DOMAIN --set mariadb.enabled=false
fi
#WAITING=`kubectl get pods | grep -E "ContainerCreating|PodInitializing"`
#while [ -n "$WAITING" ]
#do
# echo_log "Loop waiting until all the pods are running."
# sleep 30
# WAITING=`kubectl get pods | grep -E "ContainerCreating|PodInitializing"`
#done
DB_URL=`aliyun rds DescribeDBInstanceNetInfo --DBInstanceId $DB_INSTANCE_ID | grep ConnectionString\" | awk -F "\"" '{print $4}'`
if [ -n "$DB_URL" ]
then
kubectl get pods | grep fedlearner-operator
if [ $? -ne 0 ]
then
echo_log "Install fedlearner operator, apiserver with helm."
helm install fedlearner ../../charts/fedlearner --set fedlearner-web-console.cluster.env.DB_USERNAME=fedlearner --set fedlearner-web-console.cluster.env.DB_PASSWORD=$DB_PASSWORD --set fedlearner-web-console.cluster.env.DB_HOST=$DB_URL --set fedlearner-web-console.cluster.env.DB_PORT=3306
fi
else
echo_exit "Failed to install fedlearner-operator/api/console since db url not found."
fi
rm -rf fedlearner-pvc.yaml
cat <<EOF >>fedlearner-pvc.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-fedlearner-default
spec:
accessModes:
- ReadWriteMany
capacity:
storage: 10Gi
mountOptions:
- vers=3
- nolock,tcp,noresvport
nfs:
path: /
server: $MOUNT_TARGET_DOMAIN
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-fedlearner-default
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
volumeName: pv-fedlearner-default
storageClassName: nfs
EOF
kubectl apply -f fedlearner-pvc.yaml
rm -rf fedlearner-pvc.yaml
}
function usage {
echo "Usage: "
echo " ./install.sh access_key_id access_key_secret db_password bucket pay_type"
echo ""
echo "Params:"
echo ""
echo " access_key_id: the access key id provided by aliyun, required"
echo " access_key_secret: the access key secret provided by aliyun, required"
echo " db_password: the database password for fedlearner account, required"
echo " es_password: the elasticesearch password for fedlearner account, required"
echo " bucket: the oss bucket to be created, required"
echo " pay_type: the pay_type, default to Prepaid."
}
if [[ -z $ACCESS_KEY_ID ]] || [[ -z $ACCESS_KEY_SECRET ]] || [[ -z $DB_PASSWORD ]] || [[ -z $ES_PASSWORD ]]
then
usage
exit 1
else
install_cli
init_policy
create_oss_bucket
create_vpc
create_vswitch
create_secret
create_db
create_elasticsearch
create_nas
create_k8s
list_k8s_nodes
create_eip
config_security_group
config_slb
install_fedlearner
fi
| true
|
8b2cfe7b76cec3f33c087b40e0fb9ae61e94e661
|
Shell
|
wp4613/HI3531D_BSP
|
/resource/nova/rootfs/etc/init.d/S90init-task.sh
|
UTF-8
| 1,380
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/sh
NOVA_DRIVER_PATH=/usr/nova/driver
FPGA_CNT_MAX=100
FPGA_CFG_SLEEP=0.1
cnt=0
# start telnetd
/usr/sbin/telnetd &
#debugfs mount
mount -t debugfs none /sys/kernel/debug/
#insmod GPIO module
insmod ${NOVA_DRIVER_PATH}/hi_gpio.ko
#release reset of CDCM6208
# CDCM6208 U0(GPIO11_6)
echo 94 > /sys/class/gpio/export
echo out > /sys/class/gpio/gpio94/direction
echo 1 > /sys/class/gpio/gpio94/value
# CDCM6208 U1(GPIO11_7)
echo 95 > /sys/class/gpio/export
echo out > /sys/class/gpio/gpio95/direction
echo 1 > /sys/class/gpio/gpio95/value
#configure FPGA until finishing configuring
# FPGA DONE(GPIO11_5)
echo 93 > /sys/class/gpio/export
echo in > /sys/class/gpio/gpio93/direction
/usr/nova/bin/enable_fpga_cfg.sh
while [ $cnt -lt ${FPGA_CNT_MAX} ];do
fpga_done=`cat /sys/class/gpio/gpio93/value`
if [ ${fpga_done} -eq 1 ];then
break;
fi
sleep ${FPGA_CFG_SLEEP}
cnt=$(expr $cnt + 1)
done
# judge whether FPGA configured successfully
if [ $cnt -ge ${FPGA_CNT_MAX} ];then
echo "FPGA configured failed."
else
echo "FPGA configured OK."
fi
#install drivers
cd ${NOVA_DRIVER_PATH}
osmem=$(cat /proc/cmdline | cut -d ' ' -f 1 | cut -d '=' -f 2 | sed 's/M//g')
./load3531d_mvr_card -i -osmem $osmem
insmod hi_stub.ko
insmod hi_comif.ko
#insmod hi_fpga_cfg.ko
insmod hi_sysinit.ko
insmod hi_i2cdev.ko
insmod hi_sil9136.ko
insmod hi_spidev.ko
cd -
#other tasks
exit 0
| true
|
83bae22cd3a2e1ecf2bf472b91dc5f0f37696289
|
Shell
|
wang-shun/beidou-cron
|
/src/main/shell/bin/loadSiteSize.sh
|
GB18030
| 1,496
| 2.75
| 3
|
[] |
no_license
|
#!/bin/sh
CONF_SH="/home/work/.bash_profile"
[ -f "${CONF_SH}" ] && source $CONF_SH || echo "not exist ${CONF_SH} "
CONF_SH=../conf/common.conf
[ -f "${CONF_SH}" ] && source $CONF_SH || echo "not exist ${CONF_SH} "
CONF_SH=../conf/classpath_rpc.conf
[ -f "${CONF_SH}" ] && source $CONF_SH || echo "not exist ${CONF_SH} "
CONF_SH=../conf/rpc.conf
[ -f "${CONF_SH}" ] && source $CONF_SH || echo "not exist ${CONF_SH} "
CONF_SH=alert.sh
[ -f "${CONF_SH}" ] && source $CONF_SH || echo "not exist ${CONF_SH} "
program=loadSiteSize.sh
reader_list=yang_yun
LOG_FILE=${LOG_PATH}/loadSiteSize.log
mkdir -p ${ROOT_PATH}
mkdir -p ${LOG_PATH}
function call() {
msg="ִԶ̵-վߴڴfailed($1)"
url=http://$1:8080/rpc/loadSiteSize
java -cp ${CUR_CLASSPATH} com.baidu.ctclient.HessianRpcClientUsingErrorCode $url $username $password
alert_return $? "${msg}"
}
function call_noport() {
msg="ִԶ̵-վߴڴ("$1")"
url=http://$1/rpc/loadSiteSize
java -cp ${CUR_CLASSPATH} com.baidu.ctclient.HessianRpcClientUsingErrorCode $url $username $password
alert_return $? "${msg}"
}
for server in `echo ${WEB_SERVER_IP_LIST[@]}`; do
call $server;
done
for server in `echo ${API_SERVER_IP_PORT_LIST[@]}`; do
call_noport $server;
done
for server in `echo ${APIV2_SERVER_IP_PORT_LIST[@]}`; do
call_noport $server;
done
#add by kanghongwei for exp
for server in `echo ${EXP_SERVER_IP_LIST[@]}`; do
call $server;
done
| true
|
19264ba6cf916220bd665dd7d9575874104d38d5
|
Shell
|
webis-de/ICWSM-17
|
/wikipedia-reverts-geolocating/src-shell/make-geolocation-decision-tree.sh
|
UTF-8
| 3,175
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ];then
echo "Usage:"
echo " $0 <country>"
echo "With"
echo " country"
echo " The two-letter country code as used by Wikipedia (en, de, fr, ...)"
exit 1
fi
country=$1
shell_source_dir=`dirname $0`
source $shell_source_dir/config.sh
source $shell_source_dir/../$detection_dir/src-shell/config.sh
wiki=$country"wiki"
country_long=$(grep "^$country " $shell_source_dir/../$detection_dir/src-shell/countries.txt | cut -d" " -f2-)
dir=$shell_source_dir/../data/$wiki-$version-results
working_dir=$dir/tmp-$$
mkdir -p $working_dir
cp $shell_source_dir/decision-tree-counts/* $working_dir
input=$dir/decision-tree-counts.txt
coordinate_file=$working_dir/geolocation-decision-tree-input.tex
frame_file_name=geolocation-decision-tree.tex
compiled_file=$working_dir/geolocation-decision-tree.pdf
output=$dir/geolocation-decision-tree-$country.pdf
function latex_number() {
local value=$1
local length=${#value}
local pos=3
while [ $pos -lt $length ];do
local p=$(($length - $pos))
value=${value:0:$p}","${value:$p}
pos=$((pos + 3))
done
echo $value
}
function replace_values() {
local total=$(latex_number $1)
local reverted=$(latex_number $2)
local placeholder_letter=$3
sed -i "s/TEXT"$placeholder_letter"1/$total/" $coordinate_file
sed -i "s/TEXT"$placeholder_letter"2/$reverted/" $coordinate_file
}
function get_values_by_key() {
local key=$1
if [ "$key" == "" ];then
head -1 $input
else
cat $input \
| grep "^[[:blank:]]*$key[[:blank:]]" \
| cut -f2-3
fi
}
function replace_values_by_key() {
local key=$1
local placeholder_letter=$2
local value=($(get_values_by_key "$key"))
local total=${value[0]}
local reverted=${value[1]}
replace_values $total $reverted $placeholder_letter
}
function get_values_by_class() {
local class=$1
cat $input \
| awk -F"\t" '{
if ($NF == "'"$class"'") {
sum_total += $2
sum_reverted += $3
}
} END {
print sum_total"\t"sum_reverted
}'
}
function replace_values_by_class() {
local class=$1
local placeholder_letter=$2
local value=($(get_values_by_class "$class"))
local total=${value[0]}
local reverted=${value[1]}
replace_values $total $reverted $placeholder_letter
}
function replace_all() {
replace_values_by_key "" A
replace_values_by_key "RIR = true" B
replace_values_by_key "RIR = false" C
replace_values_by_key "IPlocation = true" D
replace_values_by_key "IPlocation = false" E
replace_values_by_key "inconsistent = false" F
replace_values_by_key "inconsistent = true" G
replace_values_by_key "1 time zone = true" N
replace_values_by_key "1 time zone = false" H
replace_values_by_key "time zone consistent = true" I
replace_values_by_key "time zone consistent = false" J
replace_values_by_key "locally time zone consistent = true" K
replace_values_by_key "locally time zone consistent = false" L
replace_values_by_class "true" M
replace_values_by_class "false" O
}
replace_all
pushd $working_dir
pdflatex $frame_file_name
pdflatex $frame_file_name
popd
cp $compiled_file $output
rm -rf $working_dir
| true
|
c02a12db8aba009a7cba01c9705166a9d68d8c07
|
Shell
|
sggdv/linuxops
|
/src/shellscripts/apache.sh
|
UTF-8
| 508
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "starting apache.sh"
WEB_FILE=../../public/templates/apache.html
rm -f $WEB_FILE
echo "<p>主机名: <strong>$(hostname)</strong></p>" >> $WEB_FILE
echo "<p>检测时间: <strong>$(date)</strong></p>" >> $WEB_FILE
echo "<hr><h1>版本信息</h1><p><code>service httpd -v</code></p>" >> $WEB_FILE
echo "<pre>$(service httpd -v)</pre>" >> $WEB_FILE
echo "<hr><h1>已加载的模块</h1><p><code>service httpd -M</code></p>" >> $WEB_FILE
echo "<pre>$(service httpd -M)</pre>" >> $WEB_FILE
| true
|
4e2ed898610407635311ca9248aaa3f1285ff43c
|
Shell
|
PandikKumar/dell-devops
|
/ci-scripts/test/unit/pingfederate/configure-delegated-admin/12-sessions-tests.sh
|
UTF-8
| 1,025
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
# Source the script we're testing
# Suppress env vars noise in the test output
. "${HOOKS_DIR}"/utils.lib.sh > /dev/null
. "${HOOKS_DIR}"/util/configure-delegated-admin-utils.sh > /dev/null
# Mock up is_multi_cluster, this logic sets is_multi_cluster to false.
is_multi_cluster() {
return 0
}
testSadPathCreateSession() {
# Mock up get_session as a failure.
get_session() {
return 1
}
# Mock up make_api_request as a failure.
# When calling set_session function, its
# expected to fail when make_api_request fails to create session for DA.
make_api_request() {
return 1
}
set_session > /dev/null 2>&1
exit_code=$?
assertEquals 1 ${exit_code}
}
testHappyPathCreateSession() {
# Mock up get_session as a failure.
get_session() {
return 1
}
# Mock up make_api_request as a success for creating session for DA.
make_api_request() {
return 0
}
set_session > /dev/null 2>&1
exit_code=$?
assertEquals 0 ${exit_code}
}
# load shunit
. ${SHUNIT_PATH}
| true
|
2e9d36639cb77c70fd19811ba18b4039396da21d
|
Shell
|
sambowler/dotfiles
|
/bash/functions/git.bash
|
UTF-8
| 911
| 3.59375
| 4
|
[] |
no_license
|
# Remove all deleted files
function grm { for i in `git status | grep deleted | awk '{print $3}'`; do git rm $i; done }
# Pull all git repo's within the current folder
function gpa { find . -type d -depth 1 -exec git --git-dir={}/.git --work-tree=$PWD/{} pull origin master \;; }
cp_p() {
strace -q -ewrite cp -- "${1}" "${2}" 2>&1 \
| awk '{
count += $NF
if (count % 10 == 0) {
percent = count / total_size * 100
printf "%3d%% [", percent
for (i=0;i<=percent;i++)
printf "="
printf ">"
for (i=percent;i<100;i++)
printf " "
printf "]\r"
}
}
END { print "" }' total_size=$(stat -c '%s' "${1}") count=0
}
# Delete all .svn folders within current folder
function dsvn { find . -type d -name '.svn' -print -exec rm -rf {} \;; }
| true
|
4da2b53651cee6bfe098a93aedfe7032c2dbd393
|
Shell
|
ForrestKnight/ODUCS418F17
|
/DockerThings/run.she
|
UTF-8
| 814
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
VOLUME_HOME="/var/lib/mysql"
sed -ri -e "s/^upload_max_filesize.*/upload_max_filesize = ${PHP_UPLOAD_MAX_FILESIZE}/" \
-e "s/^post_max_size.*/post_max_size = ${PHP_POST_MAX_SIZE}/" /etc/php5/apache2/php.ini
if [[ ! -d $VOLUME_HOME/mysql ]]; then
echo "=> An empty or uninitialized MySQL volume is detected in $VOLUME_HOME"
echo "=> Installing MySQL ..."
mysql_install_db > /dev/null 2>&1
echo "=> Done!"
/create_mysql_admin_user.sh
else
echo "=> Using an existing volume of MySQL"
fi
#sleep 5
mysqlFile="/app/milestone2dump.sql"
if [ -f $mysqlFile ]; then
echo "Importing MySQL data from $mysqlFile"
/import_sql.sh &
# mysql -u root -p < $mysqlFile
else
echo "I could not find $mysqlFile"
ls -l /app
fi
#ls -l /var/www/html/*
exec supervisord -n
| true
|
25395e54285896c9402c81344d0dc56888eb5461
|
Shell
|
imaxct/SH
|
/exercises/ex_ops.sh
|
UTF-8
| 395
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
ARGS=2
E_BADARGS=85
if [ $# -ne "$ARGS" ]; then
echo "Usage: `basename $0` number1 number2"
exit $E_BADARGS
fi
if [[ ! $1 =~ [0-9]{1,} || ! $2 =~ [0-9]{1,} ]]; then
echo "arguments are not numbers"
exit $E_BADARGS
fi
gcd(){
x=$1
y=$2
r=1
until [[ "$r" -eq 0 ]]; do
let "r = $x % $y"
x=$y
y=$r
done
return $x
}
gcd $1 $2
echo "The gcd of $1 and $2 is $?"
exit 0
| true
|
4fe4af98c7c4a67a77b2055489d260b602a50bd2
|
Shell
|
wicadmin/Goldenorb
|
/luci-app-hotspot/files/usr/lib/hotspot/band.sh
|
UTF-8
| 546
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/sh
log() {
logger -t "band change" "$@"
}
BAND=$1
if [ $BAND = "1" ]; then
WW=$(uci get travelmate.global.radio24)
else
WW=$(uci get travelmate.global.radio5)
fi
uci set wireless.wwan.device=$WW
uci set wireless.wwan.ssid="Changing Wifi Radio"
uci set wireless.wwan.encryption="none"
uci set wireless.wwan.disabled="1"
uci commit wireless
wifi
result=`ps | grep -i "travelmate.sh" | grep -v "grep" | wc -l`
if [ $result -ge 1 ]
then
logger -t TRAVELMATE-DEBUG "Travelmate already running"
else
/usr/lib/hotspot/travelmate.sh &
fi
| true
|
a0f03c2065d1f0c1ad899eaf0de466ddae1cc7f5
|
Shell
|
dinos80152/dotfiles
|
/.oh-my-zsh/custom/powerlevel9k.sh
|
UTF-8
| 3,251
| 2.890625
| 3
|
[] |
no_license
|
# powerlevel9k tonylambiris powerlevel9k themes
powerlevel9k_random_color(){
local code
#for code ({000..255}) echo -n "$%F{$code}"
#code=$[${RANDOM}%11+10] # random between 10-20
code=$[${RANDOM}%211+20] # random between 20-230
printf "%03d" $code
}
zsh_wifi_signal(){
local signal=$(nmcli -t device wifi | grep '^*' | awk -F':' '{print $6}')
local color="yellow"
[[ $signal -gt 75 ]] && color="green"
[[ $signal -lt 50 ]] && color="red"
echo -n "%F{$color}$signal \uf1eb" # \uf1eb is
}
if [ "$ZSH_THEME" = "powerlevel9k/powerlevel9k" ]; then
# General config
POWERLEVEL9K_MODE='nerdfont-complete'
POWERLEVEL9K_INSTALLATION_PATH=$ZSH/custom/themes/powerlevel9k
POWERLEVEL9K_CUSTOM_WIFI_SIGNAL="zsh_wifi_signal"
# Prompts
# system: background_jobs battery context date dir dir_writable disk_usage history host ip vpn_ip public_ip load os_icon ram root_indicator status swap time user vi_mode ssh
# extesino: vcs aws kubecontext custom_command command_execution_time detect_virt newline
# for demo:
# POWERLEVEL9K_LEFT_PROMPT_ELEMENTS=(os_icon user host dir_writable dir virtualenv vcs)
# POWERLEVEL9K_RIGHT_PROMPT_ELEMENTS=(status command_execution_time history root_indicator ip custom_wifi_signal battery disk_usage ram swap load background_jobs time)
if [ -n "$SSH_CLIENT" ] || [ -n "$SSH_TTY" ]; then
POWERLEVEL9K_LEFT_PROMPT_ELEMENTS=(os_icon ssh context dir virtualenv vcs)
else
POWERLEVEL9K_LEFT_PROMPT_ELEMENTS=(os_icon user dir_writable dir virtualenv vcs)
fi
POWERLEVEL9K_RIGHT_PROMPT_ELEMENTS=(status command_execution_time root_indicator ip background_jobs time)
POWERLEVEL9K_SHORTEN_DIR_LENGTH=5
POWERLEVEL9K_SHORTEN_DELIMITER=..
POWERLEVEL9K_LEFT_SEGMENT_SEPARATOR='\uE0B4'
POWERLEVEL9K_RIGHT_SEGMENT_SEPARATOR='\uE0B6'
POWERLEVEL9K_PROMPT_ON_NEWLINE=true
POWERLEVEL9K_RPROMPT_ON_NEWLINE=false
# POWERLEVEL9K_DISABLE_RPROMPT=true
POWERLEVEL9K_PROMPT_ADD_NEWLINE=true
POWERLEVEL9K_PROMPT_ADD_NEWLINE_COUNT=1
POWERLEVEL9K_MULTILINE_FIRST_PROMPT_PREFIX="╭"
POWERLEVEL9K_MULTILINE_LAST_PROMPT_PREFIX="╰\uF460\uF460\uF460 "
# Colors
POWERLEVEL9K_VIRTUALENV_BACKGROUND=107
POWERLEVEL9K_VIRTUALENV_FOREGROUND='white'
POWERLEVEL9K_OS_ICON_BACKGROUND='white'
POWERLEVEL9K_OS_ICON_FOREGROUND='black'
POWERLEVEL9K_TIME_BACKGROUND='white'
POWERLEVEL9K_TIME_FOREGROUND='black'
# POWERLEVEL9K_COLOR_SCHEME='light'
# Battery colors
POWERLEVEL9K_BATTERY_CHARGING='107'
POWERLEVEL9K_BATTERY_CHARGED='blue'
POWERLEVEL9K_BATTERY_LOW_THRESHOLD='50'
POWERLEVEL9K_BATTERY_LOW_COLOR='red'
POWERLEVEL9K_BATTERY_CHARGED_BACKGROUND='blue'
POWERLEVEL9K_BATTERY_CHARGED_FOREGROUND='white'
POWERLEVEL9K_BATTERY_CHARGING_BACKGROUND='107'
POWERLEVEL9K_BATTERY_CHARGING_FOREGROUND='white'
POWERLEVEL9K_BATTERY_LOW_BACKGROUND='red'
POWERLEVEL9K_BATTERY_LOW_FOREGROUND='white'
POWERLEVEL9K_BATTERY_DISCONNECTED_FOREGROUND='white'
POWERLEVEL9K_BATTERY_DISCONNECTED_BACKGROUND='214'
# VCS colors
POWERLEVEL9K_VCS_CLEAN_FOREGROUND='cyan'
POWERLEVEL9K_VCS_CLEAN_BACKGROUND='black'
POWERLEVEL9K_VCS_UNTRACKED_FOREGROUND='white'
POWERLEVEL9K_VCS_UNTRACKED_BACKGROUND='red'
POWERLEVEL9K_VCS_MODIFIED_FOREGROUND='black'
POWERLEVEL9K_VCS_MODIFIED_BACKGROUND='yellow'
fi
| true
|
9991e6d81aa4d9c59825facd3f1ada36fbd00264
|
Shell
|
LLNL/serac
|
/scripts/llnl/build_src.py
|
UTF-8
| 6,665
| 3.28125
| 3
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
"exec" "python3" "-u" "-B" "$0" "$@"
# Copyright (c) 2019-2023, Lawrence Livermore National Security, LLC and
# other Serac Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: (BSD-3-Clause)
"""
file: build_src.py
description:
Builds Serac with the host-configs for the current machine.
"""
from common_build_functions import *
from optparse import OptionParser
def parse_args():
"Parses args from command line"
parser = OptionParser()
# Location of source directory to build
parser.add_option("-d", "--directory",
dest="directory",
default="",
help="Directory of source to be built (Defaults to current)")
# Whether to build a specific hostconfig
parser.add_option("--host-config",
dest="hostconfig",
default="",
help="Specific host-config file to build (Tries multiple known paths to locate given file)")
# Extra cmake options to pass to config build
parser.add_option("--extra-cmake-options",
dest="extra_cmake_options",
default="",
help="Extra cmake options to add to the cmake configure line")
parser.add_option("--automation-mode",
action="store_true",
dest="automation",
default=False,
help="Toggle automation mode which uses env $HOST_CONFIG then $SYS_TYPE/$COMPILER if found")
parser.add_option("--skip-install",
action="store_true",
dest="skip_install",
default=False,
help="Skip testing install target which does not work in some configurations (codevelop)")
parser.add_option("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Output logs to screen as well as to files")
###############
# parse args
###############
opts, extras = parser.parse_args()
# we want a dict b/c the values could
# be passed without using optparse
opts = vars(opts)
# Ensure correctness
if opts["automation"] and opts["hostconfig"] != "":
print("[ERROR: automation and host-config modes are mutually exclusive]")
sys.exit(1)
return opts
def main():
opts = parse_args()
# Determine source directory to be built
if os.environ.get("UBERENV_PREFIX") != None:
repo_dir = os.environ["UBERENV_PREFIX"]
if not os.path.isdir(repo_dir):
print("[ERROR: Given environment variable 'UBERENV_PREFIX' is not a valid directory]")
print("[ 'UBERENV_PREFIX' = %s]" % repo_dir)
return 1
if opts["directory"] != "":
repo_dir = opts["directory"]
if not os.path.isdir(repo_dir):
print("[ERROR: Given command line variable '--directory' is not a valid directory]")
print("[ '--directory' = %s]" % repo_dir)
return 1
else:
repo_dir = get_repo_dir()
try:
original_wd = os.getcwd()
os.chdir(repo_dir)
timestamp = get_timestamp()
# Default to build all SYS_TYPE's host-configs in host-config/
build_all = not opts["hostconfig"] and not opts["automation"]
if build_all:
res = build_and_test_host_configs(repo_dir, timestamp, False,
opts["verbose"], opts["extra_cmake_options"],
opts["skip_install"])
# Otherwise try to build a specific host-config
else:
# Command-line arg has highest priority
if opts["hostconfig"]:
hostconfig = opts["hostconfig"]
# Otherwise try to reconstruct host-config path from SYS_TYPE and COMPILER
elif opts["automation"]:
if not "SYS_TYPE" in os.environ:
print("[ERROR: Automation mode required 'SYS_TYPE' environment variable]")
return 1
if not "COMPILER" in os.environ:
print("[ERROR: Automation mode required 'COMPILER' environment variable]")
return 1
import socket
hostname = socket.gethostname()
# Remove any numbers after the end
hostname = hostname.rstrip('0123456789')
sys_type = os.environ["SYS_TYPE"]
# Remove everything including and after the last hyphen
sys_type = sys_type.rsplit('-', 1)[0]
compiler = os.environ["COMPILER"]
compiler = compiler.rsplit('-', 1)[0]
hostconfig = "%s-%s-%s.cmake" % (hostname, sys_type, compiler)
# First try with where uberenv generates host-configs.
hostconfig_path = os.path.join(repo_dir, hostconfig)
if not os.path.isfile(hostconfig_path):
print("[INFO: Looking for hostconfig at %s]" % hostconfig_path)
print("[WARNING: Spack generated host-config not found, trying with predefined]")
# Then look into project predefined host-configs.
hostconfig_path = os.path.join(repo_dir, "host-configs", hostconfig)
if not os.path.isfile(hostconfig_path):
print("[INFO: Looking for hostconfig at %s]" % hostconfig_path)
print("[WARNING: Predefined host-config not found, trying with Docker]")
# Otherwise look into project predefined Docker host-configs.
hostconfig_path = os.path.join(repo_dir, "host-configs", "docker", hostconfig)
if not os.path.isfile(hostconfig_path):
print("[INFO: Looking for hostconfig at %s]" % hostconfig_path)
print("[WARNING: Predefined Docker host-config not found]")
print("[ERROR: Could not find any host-configs in any known path. Try giving fully qualified path.]")
return 1
test_root = get_build_and_test_root(repo_dir, timestamp)
os.mkdir(test_root)
res = build_and_test_host_config(test_root, hostconfig_path, opts["verbose"], opts["extra_cmake_options"],
opts["skip_install"])
finally:
os.chdir(original_wd)
return res
if __name__ == "__main__":
sys.exit(main())
| true
|
f8e79c35c016421c22a84c8e796073a1bcdf8e57
|
Shell
|
MD-Studio/cerise-mdstudio-base
|
/api/mdstudio/files/energies/call_getenergies.sh
|
UTF-8
| 269
| 2.65625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
CERISE_API_FILES="$1"
# remove the CERISE_API_FILES from the input array
shift
source $CERISE_API_FILES/miniconda/bin/activate root
# Run script
$CERISE_API_FILES/miniconda/bin/python2.7 $CERISE_API_FILES/energies/getEnergies.py energy -o energy.ene $*
| true
|
a21e89896f65b9588e6f588bdb4693416a3e21f8
|
Shell
|
fernandoPalaciosGit/dotfiles
|
/zsh/envs/brew
|
UTF-8
| 281
| 3
| 3
|
[] |
no_license
|
if [[ -a ~/.homebrew/bin/brew ]]; then
path=($HOME/.homebrew/bin $path)
path=($(brew --prefix)/sbin $path)
manpath=($(brew --prefix)/share/man $manpath)
# Homebrew Python setup
path=($(python -c "from sys import prefix; print prefix")/bin $path)
fi
# vim: filetype=zsh
| true
|
22d02f0e15b2e4bfc9e25daaad2572dac5475fc5
|
Shell
|
Sincere1One/myhktools
|
/mkInstall.sh
|
UTF-8
| 499
| 2.8125
| 3
|
[] |
no_license
|
rm ins.tmp
find . -name "*.js" -exec grep -h -R -Eo "require\(['\"]([^\.][^'\"\/]+?)['\"]\)" {} \; >>ins.tmp
grep -Eo "['\"]([^'\"\.\/]+?)['\"]" ins.tmp |sed 's/"//g'|sed "s/'//g"|sort|uniq >>ins1.tmp
rm ins.tmp
ls -1 /usr/local/lib/node_modules/ >ins3.tmp
grep -F -f ins3.tmp ins1.tmp| sort | uniq > ins4.tmp
chmod 777 install.sh
cat ins4.tmp | awk '{print "npm i "$1}' > install.sh
rm ins1.tmp
rm ins4.tmp
rm ins3.tmp
chmod 555 install.sh
git add install.sh
git commit -m "update install.sh" .
| true
|
c6ca7d4c818bd86d09cca8238d31de496a203b90
|
Shell
|
flash-ai-fydp/auto-gfqg
|
/src/main/script/modified_demo-word.sh
|
UTF-8
| 1,078
| 3.578125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# DEV is the directory where all of your projects are
cd $DEV
# clone and move into project
git clone git@github.com:dav/word2vec.git
cd word2vec
# pasted modified "script/demo-word.sh" script that
# doesn't do the word distance demonstration and
# saves the word vectors in text format
DATA_DIR=./data
BIN_DIR=./bin
SRC_DIR=./src
TEXT_DATA=$DATA_DIR/text8
ZIPPED_TEXT_DATA="${TEXT_DATA}.zip"
VECTOR_DATA=$DATA_DIR/text8-vector.bin
# build all programs
pushd ${SRC_DIR} && make; popd
if [ ! -e $VECTOR_DATA ]; then
if [ ! -e $TEXT_DATA ]; then
if [ ! -e $ZIPPED_TEXT_DATA ]; then
wget http://mattmahoney.net/dc/text8.zip -O $ZIPPED_TEXT_DATA
fi
unzip $ZIPPED_TEXT_DATA
mv text8 $TEXT_DATA
fi
echo -----------------------------------------------------------------------------------------------------
echo -- Training vectors...
time $BIN_DIR/word2vec -train $TEXT_DATA -output $VECTOR_DATA -cbow 0 -size 200 -window 5 -negative 0 -hs 1 -sample 1e-3 -threads 12 -binary 0 # the key is `binary 0` means "save as text"
fi
| true
|
9e0c3a3a6b797a4b3e9aaee822316e3ee36c2c13
|
Shell
|
tanghuan0827/bigdata01
|
/temp/other2/st4.sh
|
UTF-8
| 2,196
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
echo " begin st4"
source st3.sh
#字段类型替换 判断第3列是进行整个值得判断,单独一个值全部判断并进行替换,不是部分替换
awk 'BEGIN{FS=OFS=","}{if($3=="varchar"){$3="text"};print}' $source__txt>401__text
awk 'BEGIN{FS=OFS=","}{if($3=="int"){$3="integer"};print}' 401__text>402__integer
awk 'BEGIN{FS=OFS=","}{if($3=="long"){$3="bigint"};print}' 402__integer>403__bigint
#获取字段名 decimal分隔符有问题,并且也带有引号
sed s/\"//g 403__bigint > 404__noyinhao
awk -F, '{print $2" "$3","$4","}' 404__noyinhao>405__field
sed -i "s/,,/,/" 405__field
>406__field
#将每行中只包含多余的,去掉
cat 405__field | while read myline1
do
myline=`echo $myline1`
if [ "$myline" == "," ]; then
echo "">>406__field
else
echo "$myline" >> 406__field
fi
done
#注意 拆分文档有时最后一个文档会多出一行,不知道原因,检查最后一个表
awk 'BEGIN{RS="\n\n"}{a++}{print >"f1__"a}' 406__field
#获取有多少个
f1_count=`ls -l | grep f1 | wc -l`
echo $f1_count
> ${pre_name_temp}/total.sql
#数组遍历从0开始 创建外表最后一行去掉逗号, conf文件还留有逗号
for i in ${!arr[@]}
do
echo ${arr[$i]}
m=$(($i+1))
#获取最后一行内容和长度
last_row_content=`awk 'NF{a=$0}END{print a}' "f1__"$m`
last_row_length=`echo ${last_row_content} | wc -L`
sed -i '$d' "f1__"$m
#注意最后一个表
#if [ $f1_count = $m ];then
# echo "======================"
# sed -i '$d' "f1__"$m
#fi
echo ${last_row_content:0:$(($last_row_length-1))}>> "f1__"$m
cat pg__head.txt> "f2__"$m
cat "f1__"$m>>"f2__"$m
cat pg__end.txt>>"f2__"$m
tb_dir=${pre_name_temp}/${arr[$i]}
#这里复制文件到temp下,将表空间的名字替换成表名
mkdir -p $tb_dir
cp -r ${pre_name}/create_js_sql/* $tb_dir/
cd $tb_dir/
cp ${pre_name}/"f2__"$(($i+1)) $tb_dir/pg.sql
sed -i "s/sequoiadb_cl/${arr[$i]}/g" $tb_dir/*
#将所有的sql都给m.sql m.sql再给total.sql
# cat del.sql>m.sql
cat pg.sql>>m.sql
cat grant.sql>>m.sql
cat m.sql>>${pre_name_temp}/total.sql
cd ${pre_name}
done
#删除file文件
#rm -rf f1__*
#rm -rf f2__*
echo end st4
| true
|
8cd48c6602a5d544c6190cf01f8b364c28bb2b4e
|
Shell
|
robozavri/market
|
/server/gulp/tasks/server-env/scripts/root/add-user.sh
|
UTF-8
| 356
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
USER=$1
PASSWORD=$2
adduser --disabled-password --gecos "" $USER
echo "$USER:$PASSWORD" | chpasswd
echo "$USER ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
echo "$USER ALL=NOPASSWD: /usr/sbin/service, /bin/chown" >> /etc/sudoers
mkdir /home/gdg/.ssh
cat ~/.ssh/authorized_keys >> /home/gdg/.ssh/authorized_keys
printf "\n\n__GDG__: Added user ($USER)\n\n\n"
| true
|
89213f66b344d9201dc98f321b79999003e80333
|
Shell
|
lnxbm3r1/myenv
|
/zshrc
|
UTF-8
| 1,117
| 2.515625
| 3
|
[] |
no_license
|
#PROMPT="%n@%m:%/
#`date +%a` `date +%m`/`date +%d` %@ %h > "
HISTSIZE=20000
HISTFILE=~/.zsh_history
SAVEHIST=20000
EDITOR=/usr/bin/vim
LANG=C
export LESSCHARSET=utf-8
export LESS="-MireX"
set extenedhistory
autoload -U compinit
compinit
autoload -U promptinit
promptinit
prompt clint
bindkey -v
#alias ls='ls -F --color'
#case $TERM in
# xterm*)
# precmd () {print -Pn "\e]0;%n@%m: %~\a"}
# ;;
#esac
#n start_agent {
#echo "Initialising new SSH agent..."
#/usr/bin/ssh-agent | sed 's/^echo/#echo/' > "${SSH_ENV}"
#echo succeeded
#chmod 600 "${SSH_ENV}"
#. "${SSH_ENV}" > /dev/null
#/usr/bin/ssh-add;
#}
#
# Source SSH settings, if applicable
#
# if [ -f "${SSH_ENV}" ]; then
# . "${SSH_ENV}" > /dev/null
# #ps ${SSH_AGENT_PID} doesn�t work under cywgin
# ps -ef | grep ${SSH_AGENT_PID} | grep ssh-agent$ > /dev/null || {
# start_agent;
# }
# else
# start_agent;
# fi
#export PATH="$PATH:$HOME/.rvm/bin" # Add RVM to PATH for scripting
#export PATH="$PATH:/opt/ec2/bin/"
#export JAVA_HOME=$(/usr/libexec/java_home)
#export EC2_HOME=/opt/ec2
#export AWS_ACCESS_KEY=
#export AWS_SECRET_KEY=
| true
|
b8634b4d50f77fd3b09847c28dd5273685593b31
|
Shell
|
linux-can/socketcand
|
/debian_pack.in
|
UTF-8
| 1,482
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh
mkdir debianpack
git archive HEAD --format tar.gz -o debianpack/socketcand-@PACKAGE_VERSION@.tar.gz --prefix socketcand-@PACKAGE_VERSION@/
cd debianpack
tar xfz socketcand-@PACKAGE_VERSION@.tar.gz
rm socketcand-@PACKAGE_VERSION@.tar.gz
cd socketcand-@PACKAGE_VERSION@
./autogen.sh
cd ..
tar cfz socketcand-@PACKAGE_VERSION@.tar.gz socketcand-@PACKAGE_VERSION@
cd socketcand-@PACKAGE_VERSION@
dh_make -f ../socketcand-@PACKAGE_VERSION@.tar.gz -s
cat > debian/postinst <<-EOF
#!/bin/sh
# postinst script for socketcand
#
# see: dh_installdeb(1)
set -e
case "\$1" in
configure)
update-rc.d socketcand defaults
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \\\`\$1'" >&2
exit 1
;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
#DEBHELPER#
exit 0
EOF
cat > debian/postrm <<-EOF
#!/bin/sh
# postrm script for socketcand
#
# see: dh_installdeb(1)
set -e
case "\$1" in
purge|remove)
update-rc.d -f socketcand remove
;;
pgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
;;
*)
echo "postrm called with unknown argument \\\`\$1'" >&2
exit 1
;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
#DEBHELPER#
exit 0
EOF
dpkg-buildpackage
cd ../..
cp debianpack/*.deb .
rm -rf debianpack
| true
|
26d5df485c8f5596b42fb50a41ca9b333769990d
|
Shell
|
tomekl007/docker-images
|
/studio/6.0/files/entrypoint.sh
|
UTF-8
| 506
| 2.921875
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# -*- mode: sh -*-
#
# Copyright DataStax, Inc, 2017
# Please review the included LICENSE file for more information.
#
set -e
. /base-checks.sh
link_external_config "${STUDIO_HOME}"
############################################
# Set up variables/configure the image
############################################
############################################
# Run the command
############################################
echo "Starting DataStax Studio"
exec "${STUDIO_HOME}/bin/server.sh"
| true
|
3883217a9089bb3fdec998bffd02979b6138cf7a
|
Shell
|
oraclebase/vagrant
|
/em/ol7_em134/scripts/oracle_user_environment_setup.sh
|
UTF-8
| 3,038
| 2.78125
| 3
|
[] |
no_license
|
echo "******************************************************************************"
echo "Create environment script." `date`
echo "******************************************************************************"
mkdir -p /home/oracle/scripts
cat > /home/oracle/scripts/setEnv.sh <<EOF
# Regular settings.
export TMP=/u01/tmp
export TMPDIR=\${TMP}
export ORACLE_HOSTNAME=${HOSTNAME}
export ORACLE_UNQNAME=emcdb
export ORACLE_BASE=/u01/app/oracle
#export ORACLE_HOME=\${ORACLE_BASE}/product/18.0.0/dbhome_1
export ORACLE_HOME=\${ORACLE_BASE}/product/19.0.0/dbhome_1
export ORACLE_SID=emcdb
export PATH=/usr/sbin:/usr/local/bin:\${PATH}
export PATH=\${ORACLE_HOME}/bin:\${PATH}
export LD_LIBRARY_PATH=\${ORACLE_HOME}/lib:/lib:/usr/lib
export CLASSPATH=\${ORACLE_HOME}/jlib:\${ORACLE_HOME}/rdbms/jlib
export ORA_INVENTORY=/u01/app/oraInventory
# Database installation settings.
export SOFTWARE_DIR=/u01/software
#export DB_SOFTWARE="LINUX.X64_180000_db_home.zip"
export DB_SOFTWARE="LINUX.X64_193000_db_home.zip"
export ORACLE_PASSWORD="oracle"
export SCRIPTS_DIR=/home/oracle/scripts
export ORACLE_SID=emcdb
export SYS_PASSWORD="SysPassword1"
export PDB_NAME="emrep"
export PDB_PASSWORD="PdbPassword1"
export DATA_DIR=/u01/oradata
# EM settings.
export UNIX_GROUP_NAME=oinstall
export MW_HOME=\${ORACLE_BASE}/middleware
export OMS_HOME=\${MW_HOME}
export GC_INST=\${ORACLE_BASE}/gc_inst
export AGENT_BASE=\${ORACLE_BASE}/agent
export AGENT_HOME=\${AGENT_BASE}/agent_inst
export WLS_USERNAME=weblogic
export WLS_PASSWORD=Welcome1
export SYSMAN_PASSWORD=\${WLS_PASSWORD}
export AGENT_PASSWORD=\${WLS_PASSWORD}
export SOFTWARE_LIBRARY=\${ORACLE_BASE}/swlib
export DATABASE_HOSTNAME=localhost
export LISTENER_PORT=1521
EOF
echo "******************************************************************************"
echo "Add it to the .bash_profile." `date`
echo "******************************************************************************"
echo ". /home/oracle/scripts/setEnv.sh" >> /home/oracle/.bash_profile
echo "******************************************************************************"
echo "Create start/stop scripts." `date`
echo "******************************************************************************"
. /home/oracle/scripts/setEnv.sh
cat > $SCRIPTS_DIR/start_all.sh <<EOF
#!/bin/bash
. $SCRIPTS_DIR/setEnv.sh
export ORAENV_ASK=NO
. oraenv
export ORAENV_ASK=YES
dbstart \$ORACLE_HOME
\$SCRIPTS_DIR/start_cloud_control.sh
EOF
cat > $SCRIPTS_DIR/stop_all.sh <<EOF
#!/bin/bash
. $SCRIPTS_DIR/setEnv.sh
\$SCRIPTS_DIR/stop_cloud_control.sh
export ORAENV_ASK=NO
. oraenv
export ORAENV_ASK=YES
dbshut \$ORACLE_HOME
EOF
cat > $SCRIPTS_DIR/start_cloud_control.sh <<EOF
#!/bin/bash
. $SCRIPTS_DIR/setEnv.sh
\$OMS_HOME/bin/emctl start oms
\$AGENT_HOME/bin/emctl start agent
EOF
cat > $SCRIPTS_DIR/stop_cloud_control.sh <<EOF
#!/bin/bash
. $SCRIPTS_DIR/setEnv.sh
\$AGENT_HOME/bin/emctl stop agent
\$OMS_HOME/bin/emctl stop oms -all
EOF
chown -R oracle.oinstall ${SCRIPTS_DIR}
chmod u+x ${SCRIPTS_DIR}/*.sh
| true
|
234e59d75fd92506db8975c8ec012a42e29aa3a3
|
Shell
|
Cloudxtreme/autoscripts
|
/Services/upgrade_pip.sh
|
UTF-8
| 337
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
date '+[ %m/%d/%Y %H:%M:%S ]'
envs=/home/blake/Py3Envs
# Change directory to venvs
cd $envs
# For each folder in dir, check for .pyenv
for env in */; do
echo "* Updating pip for $env:"
# Activate env
source $envs/$env/bin/activate
# Upgrade pip
pip install --upgrade pip
done
echo "Complete."
echo
| true
|
39cc585a6eb6ccf6248a86417abe81c3935f2d4a
|
Shell
|
hydronica/task_ffmpeg
|
/deploy/start.sh
|
UTF-8
| 610
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
docker-compose -f "docker/docker-compose-nsq.yml" up -d
# make sure the topics exist in nsq
docker run -it --network nsq_network_default task_ffmpeg/build:stage /bin/sh -c \
"curl -X POST 'nsqd:4151/topic/create?topic=mediainfo'; \
curl -X POST 'nsqd:4151/topic/create?topic=done'; \
curl -X POST 'nsqd:4151/topic/create?topic=files'; \
curl -X POST 'nsqd:4151/topic/create?topic=ffmpeg';"
printf "be sure to set the .env with the correct shared media locations"
docker-compose -f "docker/docker-compose-stage.yml" up -d
docker-compose -f "docker/docker-compose-stage.yml" logs -f
| true
|
4e0ac9bae1969eb3faec45b510f97b50904b0d4b
|
Shell
|
ksrt12/tools
|
/tag
|
UTF-8
| 1,509
| 3.15625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
currpath=`pwd`;
case $1 in
0) ;;
*platform*) tag=$1;;
*security*) tag=$1;;
*) tag=11.0.0_r$1;;
esac
caf=$2;
function TagAosp() {
for i in $1;
do
cd $i; echo $i;
$echo pll aosp $tag $2;
cd $currpath;
done
}
function fullAosp() {
local list="
art
bionic
build/soong
build/blueprint
bootable/recovery
development
libcore
external/boringssl
external/cldr
external/e2fsprogs
external/gptfdisk
external/guice
external/icu
external/libcxx
external/mksh
external/perfetto
external/tinyalsa
external/tinycompress
external/toybox
hardware/broadcom/libbt
hardware/interfaces
hardware/libhardware
hardware/nxp/nfc
hardware/ril
"
TagAosp "build/make" "build";
TagAosp "$list";
TagAosp "`repo list -r frameworks/ -p | grep -v 'vendor/'`" ;
TagAosp "`repo list -r system/ -p | grep -v 'vendor/'`" ;
TagAosp "`repo list -r packages/ -p | grep -v 'vendor/'`" ;
}
function TagCAF(){
local_name_list=$3
local_path=$2
caf_path=$1
for local_name in $local_name_list;
do
full_path=$local_path/$local_name
cd $full_path; echo $full_path;
if [ -n "$4" ]; then local_name=""; fi
$echo pll caf $caf $caf_path/$local_name;
cd $currpath;
done
}
function fullCAF() {
local list="
audio-hal/st-hal
data-ipa-cfg-mgr
dataservices
healthd-ext
thermal-engine
usb
"
local list2="
audio
display
media"
TagCAF "hardware/qcom" "hardware/qcom-caf/sm8150" "$list2"
TagCAF "vendor/qcom-opensource" "vendor/qcom/opensource" "$list"
}
if [ -n "$tag" ]; then fullAosp; fi
if [ -n "$caf" ]; then fullCAF; fi
| true
|
f077c5ee9a4ab2d179ddfe56cf80b3ddeb24e204
|
Shell
|
crenv/crenv
|
/libexec/crenv-version-name
|
UTF-8
| 938
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Summary: Show the current Crystal version
set -e
[ -n "$CRENV_DEBUG" ] && set -x
if [ -z "$CRENV_VERSION" ]; then
CRENV_VERSION_FILE="$(crenv-version-file)"
CRENV_VERSION="$(crenv-version-file-read "$CRENV_VERSION_FILE" || true)"
fi
OLDIFS="$IFS"
IFS=$'\n' scripts=(`crenv-hooks version-name`)
IFS="$OLDIFS"
for script in "${scripts[@]}"; do
source "$script"
done
if [ -z "$CRENV_VERSION" ] || [ "$CRENV_VERSION" = "system" ]; then
echo "system"
exit
fi
version_exists() {
local version="$1"
[ -d "${CRENV_ROOT}/versions/${version}" ]
}
if version_exists "$CRENV_VERSION"; then
echo "$CRENV_VERSION"
elif version_exists "${CRENV_VERSION#crystal-}"; then
echo "warning: ignoring extraneous \`crystal-' prefix in version \`${CRENV_VERSION}'" >&2
echo "${CRENV_VERSION#crystal-}"
else
echo "crenv: version \`$CRENV_VERSION' is not installed (set by $(crenv-version-origin))" >&2
exit 1
fi
| true
|
1d571a4d26f103cb08aeb9f3996b2c8420212598
|
Shell
|
meshkat632/elasticsearch-poc
|
/vagrant-scripts/templates/proxy-provisioner.sh
|
UTF-8
| 463
| 2.625
| 3
|
[] |
no_license
|
#!/bin/sh
set -e # Exit script immediately on first error.
set -x # Print commands and their arguments as they are executed.
cp /tmp/vagrant-scripts/build/apt.conf /etc/apt/apt.conf
cp /tmp/vagrant-scripts/build/.curlrc /home/vagrant/.curlrc
cp /tmp/vagrant-scripts/build/.wgetrc /home/vagrant/.wgetrc
cp /tmp/vagrant-scripts/build/.wgetrc /etc/wgetrc
cp /tmp/vagrant-scripts/build/.gitconfig ~/.gitconfig
echo "proxy provisioning is done"
sudo apt-get update -y
| true
|
6637336d878624c725d317440433061c2ca02a42
|
Shell
|
cmstas/ZMET2016
|
/ZMET2015/limitcode/TChiWZ/setupLimits.sh
|
UTF-8
| 872
| 3.390625
| 3
|
[] |
no_license
|
#! /bin/bash
# Instructions to run limit code taken from:
# https://twiki.cern.ch/twiki/bin/viewauth/CMS/SWGuideHiggsAnalysisCombinedLimit#ROOT5_SLC6_release_CMSSW_7_1_X
export SCRAM_ARCH=slc6_amd64_gcc481
cmsrel CMSSW_7_1_5
if [ ! -e CMSSW_7_1_5 ]; then
cd CMSSW_7_1_5/src/
cmsenv
else
echo "Directory: CMSSW_7_1_5 Does not exist. Exiting."
exit 1
fi
git clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit
cd HiggsAnalysis/CombinedLimit
git fetch origin
git checkout v5.0.1
scramv1 b clean; scramv1 b -j8
didcompile=$?
if [ $didcompile -eq 0 ]; then
echo "Compiled successfully."
else
echo "Did not compile successfully. Exiting."
exit 2
fi
cd $CMSSW_BASE/src/
if [ ! -e log ]; then
cout "Making directory: log"
mkdir log
fi
# makeLimitTable.C
# make_rValues.C
# make_sigValues.C
# doLimits.sh
# doSignif.sh
| true
|
3d650f64acde5c2584b8be7ececc617fe12f810f
|
Shell
|
harindaka/ubuntu-helpers
|
/uh-startup-command-create
|
UTF-8
| 1,426
| 4.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
if [ "$1" = "--help" ]
then
echo "Desc. : creates a systemd service which runs a command or shell script once upons startup"
echo "Usage : sudo ./uh-startup-command-create"
echo "Eg. : sudo ./uh-startup-command-create"
exit 0;
fi
echo "Enter the name of the service (without the .service extension) which will contain the startup command below (no spaces):"
read SERVICE_FILE
SERVICE_FILE=$SERVICE_FILE.service
echo
echo "Enter the description of the startup command below:"
read SERVICE_DESC
echo
echo "Enter command to be executed on startup below:"
read COMMAND
SERVICE_SCRIPT=""
read -d '' SERVICE_SCRIPT << EOF || true
[Unit]
Description=$SERVICE_DESC
[Service]
Type=oneshot
ExecStart=$COMMAND
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target
EOF
SERVICE_DIR="/etc/systemd/system/"
echo
echo "The below mentioned systemd service entry will be created at $SERVICE_DIR$SERVICE_FILE"
echo
echo "$SERVICE_SCRIPT"
echo
echo "Please enter your choice:"
select yn in "Proceed" "Cancel"; do
case $yn in
Proceed )
echo "$SERVICE_SCRIPT" >| $SERVICE_DIR$SERVICE_FILE
systemctl enable $SERVICE_FILE
echo
echo "Done. Systemd service entry created at $SERVICE_DIR$SERVICE_FILE"
break;;
Cancel )
echo
echo "Script execution cancelled"
break;;
esac
done
| true
|
ca468ebe65e3b0068da0a3295aabc9efd70dbbe5
|
Shell
|
jackbarsotti/sfdx-travisci2
|
/deployment.sh
|
UTF-8
| 9,086
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
# Provide basic information about the current build type
echo
echo "Travis event type: $TRAVIS_EVENT_TYPE"
if [ "$TRAVIS_EVENT_TYPE" == "pull_request" ]; then
echo "Travis pull request branch: $TRAVIS_PULL_REQUEST_BRANCH"
fi;
echo
# Install sfdx plugins and configure build with sfdx settings
export SFDX_AUTOUPDATE_DISABLE=false
export SFDX_USE_GENERIC_UNIX_KEYCHAIN=true
export SFDX_DOMAIN_RETRY=300
export SFDX_DISABLE_APP_HUB=true
export SFDX_LOG_LEVEL=DEBUG
echo 'mkdir sfdx...'
mkdir sfdx
wget -qO- $URL | tar xJ -C sfdx --strip-components 1
"./sfdx/install"
export PATH=./sfdx/$(pwd):$PATH
sfdx --version
sfdx plugins --core
# Create temporary diff folder to paste files into later for incremental deployment
# This is the deploy directory (see below in before_script)
sudo mkdir -p /Users/jackbarsotti/sfdx-travisci2/force-app/main/default/diff
# Pull our local branches so they exist locally
# We are on a detached head, so we keep track of where Travis puts us
echo
echo 'Running: export build_head=$(git rev-parse HEAD)'
export build_head=$(git rev-parse HEAD)
echo "Build head: $build_head"
echo
# Overwrite remote.origin.fetch to fetch the remote branches (overrides Travis's --depth clone)
git config --replace-all remote.origin.fetch +refs/heads/*:refs/remotes/origin/*
echo 'Running: git fetch'
git fetch
# Create variables for frequently-referenced file paths and branches
export BRANCH=$TRAVIS_BRANCH
export branch=$TRAVIS_BRANCH
echo
echo "Travis branch: $TRAVIS_BRANCH"
echo
export userPath=/Users/jackbarsotti/sfdx-travisci2/force-app/main/default
export diffPath=/diff/force-app/main/default
# For a full build, deploy directory should be "- export DEPLOYDIR=force-app/main/default":
export DEPLOYDIR=/Users/jackbarsotti/sfdx-travisci2/force-app/main/default/diff
export classPath=force-app/main/default/classes
export triggerPath=force-app/main/default/triggers
# Run a git diff for the incremental build depending on checked-out branch (if-statement per branch)
#dev branch:
if [ "$BRANCH" == "dev" ]; then
#create tracking branch
echo 'Your current branches: '
echo
for branch in $(git branch -r|grep -v HEAD); do
echo $branch
git checkout -qf ${branch#origin/}
done;
echo
git checkout dev
# Copy the files from a git diff into the deploy directory
export CHANGED_FILES=$(git diff --name-only master force-app/)
sudo cp --parents $(git diff --name-only master force-app/) $DEPLOYDIR;
# Show which files will be deployed in the Travis build job log
echo
echo 'Your changed files: '
echo
for FILE in $CHANGED_FILES; do
echo ../$FILE
done;
echo
fi;
#qa branch:
if [ "$BRANCH" == "qa" ]; then
echo 'Your current branches: '
echo
for branch in $(git branch -r|grep -v HEAD); do
echo $branch
git checkout -qf ${branch#origin/}
done;
echo
git checkout qa
export CHANGED_FILES=$(git diff --name-only dev force-app/)
sudo cp --parents $(git diff --name-only dev force-app/) $DEPLOYDIR;
echo
echo 'Your changed files: '
echo
for FILE in $CHANGED_FILES; do
echo ../$FILE
done;
echo
fi;
#uat branch:
if [ "$BRANCH" == "uat" ]; then
echo 'Your current branches: '
echo
for branch in $(git branch -r|grep -v HEAD); do
echo $branch
git checkout -qf ${branch#origin/}
done;
echo
git checkout uat
export CHANGED_FILES=$(git diff --name-only qa force-app/)
sudo cp --parents $(git diff --name-only qa force-app/) $DEPLOYDIR;
echo
echo 'Your changed files: '
echo
for FILE in $CHANGED_FILES; do
echo ../$FILE
done;
echo
fi;
#master branch
if [ "$BRANCH" == "master" ]; then
echo 'Your current branches: '
echo
for branch in $(git branch -r|grep -v HEAD); do
echo $branch
git checkout -qf ${branch#origin/}
done;
echo
git checkout master
export CHANGED_FILES=$(git diff --name-only dev force-app/)
sudo cp --parents $(git diff --name-only dev force-app/) $DEPLOYDIR;
echo
echo 'Your changed files: '
echo
for FILE in $CHANGED_FILES; do
echo ../$FILE
done;
echo
fi;
# List each changed file from the git diff command
# For any changed class or trigger file, it's associated meta data file is copied to the deploy directory (and vice versa)
for FILE in $CHANGED_FILES; do
echo ' ';
echo "Found changed file:`echo ' '$FILE`";
# NOTE - naming convention used for <className>Test.cls files: "Test":
if [[ $FILE == *Test.cls ]]; then
sudo cp --parents "$(find $classPath -samefile "$FILE-meta.xml")"* $DEPLOYDIR;
echo 'Copying class file to diff folder for deployment...';
echo 'Class files that will be deployed:';
ls $userPath$diffPath/classes;
elif [[ $FILE == *Test.cls-meta.xml ]]; then
export FILE2=${FILE%.cls-meta.xml};
sudo cp --parents "$(find $classPath -samefile "$FILE2.cls")"* $DEPLOYDIR;
echo 'Copying class meta file to diff folder for deployment...';
echo 'Class files that will be deployed:';
ls $userPath$diffPath/classes;
elif [[ $FILE == *.cls ]]; then
sudo cp --parents "$(find $classPath -samefile "$FILE-meta.xml")"* $DEPLOYDIR;
echo 'Copying class file to diff folder for deployment...';
echo 'Class files that will be deployed:';
ls $userPath$diffPath/classes;
elif [[ $FILE == *.cls-meta.xml ]]; then
export FILE2=${FILE%.cls-meta.xml};
sudo cp --parents "$(find $classPath -samefile "$FILE2.cls")"* $DEPLOYDIR;
echo 'Copying class meta file to diff folder for deployment...';
echo 'Class files that will be deployed:';
ls $userPath$diffPath/classes;
elif [[ $FILE == *.trigger ]]; then
sudo cp --parents "$(find $triggerPath -samefile "$FILE-meta.xml")"* $DEPLOYDIR;
echo 'Copying trigger file to diff folder for deployment...';
echo 'Trigger files that will be deployed:';
ls $userPath$diffPath/triggers;
elif [[ $FILE == *.trigger-meta.xml ]]; then
export FILE3=${FILE%.trigger-meta.xml};
sudo cp --parents "$(find $triggerPath -samefile "$FILE3.trigger")"* $DEPLOYDIR;
echo 'Copying trigger meta file to diff folder for deployment...';
echo 'Trigger files that will be deployed:';
ls $userPath$diffPath/triggers;
fi;
done;
# Make temporary folder for our <className>Test.cls files that will be parsed
sudo mkdir -p /Users/jackbarsotti/sfdx-travisci2/force-app/main/default/unparsedTests
export unparsedTestsDir=/Users/jackbarsotti/sfdx-travisci2/force-app/main/default/unparsedTests
# Search the local "classes" folder for <className>Test.cls files
export classTests=$(find $classPath -name "*Test.cls")
# Parse the <className>Test.cls filenames to remove each file's path and ".cls" ending, result: <className>Test
# Exports as a string that will be called in the deploy command in script phase IF branch is dev or qa
export parsedList=''
for testfiles in $classTests; do
sudo cp "$testfiles"* $unparsedTestsDir;
export parsed=$(find $unparsedTestsDir -name "*Test.cls");
export parsed=${parsed##*/};
export parsed=${parsed%.cls*};
export parsedList="${parsedList}${parsed},";
done;
# Finally, go back to the HEAD from the before_script phase
echo 'Running: git checkout $build_head'
git checkout $build_head
# Automatically authenticate against current branch's corresponding SalesForce org
# Create deployment variable for "sfdx:force:source:deploy RunSpecifiedTests -r <variable>" (see script phase below)
# Only validate, not deploy, when a pull request is being created
# When a pull request is MERGED, deploy it
if [ "$BRANCH" == "dev" ]; then
echo $SFDXAUTHURLDEV>authtravisci.txt;
if [ "$TRAVIS_EVENT_TYPE" == "pull_request" ]; then
export TESTLEVEL="RunSpecifiedTests -r $parsedList -c";
else
export TESTLEVEL="RunSpecifiedTests -r $parsedList";
fi;
fi;
if [ "$BRANCH" == "qa" ]; then
echo $SFDXAUTHURLQA>authtravisci.txt;
if [ "$TRAVIS_EVENT_TYPE" == "pull_request" ]; then
export TESTLEVEL="RunSpecifiedTests -r $parsedList -c";
else
export TESTLEVEL="RunSpecifiedTests -r $parsedList";
fi;
fi;
if [ "$BRANCH" == "uat" ]; then
echo $SFDXAUTHURLUAT>authtravisci.txt;
if [ "$TRAVIS_EVENT_TYPE" == "pull_request" ]; then
export TESTLEVEL="RunLocalTests -c";
else
export TESTLEVEL="RunLocalTests";
fi;
fi;
if [ "$BRANCH" == "master" ]; then
echo $SFDXAUTHURL>authtravisci.txt;
if [ "$TRAVIS_EVENT_TYPE" == "pull_request" ]; then
export TESTLEVEL="RunLocalTests -c";
else
export TESTLEVEL="RunLocalTests";
fi;
fi;
# Store our auth-url for our targetEnvironment alias for deployment
sfdx force:auth:sfdxurl:store -f authtravisci.txt -a targetEnvironment
# Create error message to account for potential deployment failure
export deployErrorMsg='There was an issue deploying. Check ORG deployment status page for details'
# Run apex tests and deploy apex classes/triggers
sfdx force:org:display -u targetEnvironment
sfdx force:source:deploy -w 10 -p $DEPLOYDIR -l $TESTLEVEL -u targetEnvironment
echo
# Failure message if deployment fails
if [ TRAVIS_TEST_RESULT != 0 ]; then
echo $deployErrorMsg;
echo
fi;
| true
|
ed51e45232146c7c57fa96fe5d454e232f252916
|
Shell
|
qrsforever/homebrain
|
/homebrain/build/platforms/larfe/startup.sh
|
UTF-8
| 959
| 3.375
| 3
|
[] |
no_license
|
#!/bin/sh
export PATH=/mnt/nandflash:$PATH
cur_dir=`pwd`
UPTIME=30
MAXCNT=10
DCOUNT=0
PROGRAM="$cur_dir/larfe_ga"
DBNAME="$cur_dir/lf.db"
SYSCMD="$cur_dir/system.cmd"
__reset_program() {
if [ -f ${PROGRAM}.bk ]
then
cp ${PROGRAM}.bk ${PROGRAM}
rm ${DBNAME}
fi
}
__check_systemcmd() {
if [ ! -f $SYSCMD ]
then
return
fi
cmd=`cat $SYSCMD`
if [[ x$cmd == x"recovery" ]]
then
__reset_program
fi
rm -f $SYSCMD
}
$cur_dir/hostdiscovery &
while :
do
mv homebrain.log homebrain.log.pre
st=`cat /proc/uptime | cut -d\ -f1 | cut -d. -f1`
$PROGRAM -d $cur_dir -f
ed=`cat /proc/uptime | cut -d\ -f1 | cut -d. -f1`
if [[ $(expr $ed - $st) -lt $UPTIME ]]
then
DCOUNT=`expr $DCOUNT + 1`
if [[ $DCOUNT -gt $MAXCNT ]]
then
__reset_program
DCOUNT=0
fi
else
__check_systemcmd
fi
sleep 5
done
| true
|
6ed467f108b24572d07113d2edfa24588b8d697e
|
Shell
|
RITResearchComputing/Google-Code
|
/AdminScripts/walker/AGWalker.sh.old
|
UTF-8
| 826
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
# Walking program to execute commands across multiple linux nodes
# using SSH connections
#read -s -p "Enter Password: " passwd
#echo ""
function printhelp {
echo "Usage: AGWalker commands.list nodes.list"
}
#read in list of nodes to walk
echo "Reading walk.list"
n=0
while read line
do
nodes[$n]=$line
#echo "$n = $line"
n=$[$n + 1]
done < walk.list
#echo ${nodes[*]}
echo "Done reading walk.list"
#read in command list
echo "Reading walk.cmd"
c=0
while read line
do
cmd[$c]=$line
#echo "$c = $line"
c=$[$c + 1]
done < walk.cmd
#echo ${cmd[*]}
echo "Done reading walk.cmd"
#iterate through list of nodes
echo "Starting to walk..."
for node in ${nodes[@]}
do
ssh user@${node}
#send password somehow?
for (( command = 0 ; command < ${#cmd[@]} ; command++ ))
do
${cmd[$command]}
done
exit
done
| true
|
b2f4d58d798c1740d1cda5815a40ce60fd3dca1d
|
Shell
|
dave-burke/dotfiles
|
/bootstrap.sh
|
UTF-8
| 1,588
| 4.4375
| 4
|
[] |
no_license
|
#!/bin/bash
DOT_FILES="$(dirname ${0})"
# Just some boilerplate for consistent behavior and verbose output
function create_directory() {
local dir_name="${1}"
echo -n "Creating ${dir_name}..."
if [[ -d "${dir_name}" ]]; then
echo "Already exists"
else
mkdir --parents --verbose "${dir_name}"
fi
}
function link_dotfile() {
# target=/full/path/to/file ('realpath' is not always available or consistent)
local target="$(cd $(dirname ${1}); pwd)/$(basename ${1})"
local path_relative_to_home="${target##*/home/}"
local link_name="${HOME}/${path_relative_to_home}"
echo -n "Linking ${link_name} to ${target}"
if [[ -e "${link_name}" ]]; then
local do_overwrite
if [[ "${force_overwrite}" == "true" ]]; then
do_overwrite="y"
else
echo "${link_name} already exists. Overwrite?"
read do_overwrite
fi
if [[ "${do_overwrite:0:1}" != "y" ]]; then
echo "skipping ${path_relative_to_home}"
return
fi
fi
if [[ ! -d "$(dirname ${link_name})" ]]; then
mkdir --verbose --parents "$(dirname ${link_name})"
fi
ln --verbose --force --symbolic "${target}" "${link_name}"
}
if [[ "${1}" == "-f" ]]; then
force_overwrite="true"
shift
fi
echo "Creating symlinks for config files"
#This causes issues with the nested read command in link_dotfiles
#find "${DOT_FILES}/home" -type f -print0 | while IFS= read -r -d $'\0' f; do
for f in $(find "${DOT_FILES}/home" -type f); do # This won't work if any files have spaces
link_dotfile "${f}"
done
echo "Setting up VIM"
for d in backup undo swap; do
create_directory "${HOME}/.vim/${d}"
done
echo "Done!"
| true
|
09b8eaafd41f08b0a376d5878188727d56e7d261
|
Shell
|
uavpal/disco-storage
|
/disco/uavpal/bin/uavpal_storage.sh
|
UTF-8
| 1,836
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/sh
# variables
partition="$(echo $2 | cut -d "/" -f 3)"
disk="$(echo $partition | rev | cut -c 2- | rev)"
media_path="/data/ftp/internal_000/Disco/media"
# functions
led_indicate_external () {
ldc set_pattern color_wheel true
sleep 5
ldc set_pattern idle true
}
led_indicate_internal () {
ldc set_pattern demo_low_bat true
sleep 3
ldc set_pattern idle true
}
if [ "$1" == "add" ]; then
last_partition=$(ls /dev/${disk}? | tail -n 1)
if [ "$last_partition" != "/dev/${partition}" ]; then
exit 1 # only proceed if the last partition has triggered the script (necessary for GPT partition tables)
fi
ulogger -s -t uavpal_storage "... disk ${disk} has been detected, trying to mount its last partition ${partition}"
mount -t vfat -o rw,noatime /dev/${partition} ${media_path}
if [ $? -ne 0 ]; then
ulogger -s -t uavpal_storage "... could not mount USB mass storage partition ${partition} - please ensure the file system is FAT32 (and not exFAT!). Exiting!"
led_indicate_internal
exit 1
fi
ulogger -s -t uavpal_storage "... partition ${partition} has been mounted successfully"
diskfree=$(df -h | grep ${partition})
ulogger -s -t uavpal_storage "... photos and videos will now be stored on the USB mass storage device (capacity: $(echo $diskfree | awk '{print $2}') / available: $(echo $diskfree | awk '{print $4}'))"
led_indicate_external
elif [ "$1" == "remove" ]; then
ulogger -s -t uavpal_storage "... disk ${disk} has been removed"
umount -f ${media_path}
diskfree=$(df -h | grep internal_000)
ulogger -s -t uavpal_storage "... photos and videos will now be stored on the drone's internal memory (capacity: $(echo $diskfree | awk '{print $2}') / available: $(echo $diskfree | awk '{print $4}'))"
mkdir ${media_path}
chmod 755 ${media_path}
chown root:root ${media_path}
led_indicate_internal
fi
| true
|
d7b42299d2c55830f6c585cf81c36e409bd9c5ca
|
Shell
|
superheroCEO/practice-shell-scripts
|
/Shell_Scripting_Bible/Ch13/1.10b_Until-command.sh
|
UTF-8
| 399
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
var1=100
until [ $var1 -eq 0 ]
do
echo $var1
var1=$[ $var1 - 25 ]
done
echo $?
echo "Think, 'As long as 1, execute the command'"
echo
echo "This example tests the var1 variable TO DETERMINE WHEN the until loop should stop. As soon as the variable valueof the variable is equal to 0, the UNTIL command stops the loop. Remember to be careful on when usin multiple test commands..."
| true
|
144808fd9d167733d640841419be16d1fd574eee
|
Shell
|
mruse/rsyncd
|
/rsync.sh
|
UTF-8
| 2,466
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
# @author: me@mruse.cn
# @create: 2015-05-23
# @update: 2015-05-23
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin
export PATH=$PATH
############ Define ############
rsyncd_mod="mruse"
rsyncd_path="/path/to/webroot"
rsyncd_comment="Comments for MrUse Project"
rsyncd_allows="192.168.0.1,192.168.0.2"
# Install rsync
yum --noplugins --nogpgcheck -y install rsync
# Create /etc/rsyncd.conf
cat >> /etc/rsyncd.conf <<RSYNC
# @start Rsyncd by me@mruse.cn $(date +%F_%T)
# @daemon: /usr/bin/rsync --daemon --config=/etc/rsyncd.conf
# @demo:
# rsync -arogvzP --delete --exclude=filter/* /path/to/webroot/* 192.168.0.1::mruse
# rsync -vzrtopg --delete --exclude /path/to/webroot/* /path/to/ahother/webroot/
# rsync -arogvzP --delete --exclude=data/* /path/to/webroot/* 192.168.0.1:/path/to/webroot
##########
# Global #
##########
uid = root
gid = root
port = 873
lock file = /var/run/rsyncd.lock
pid file = /var/run/rsyncd.pid
log file = /var/log/rsyncd.log
max connections = 36000
#syslog facility = local5
###########
# Modules #
###########
[mruse]
path = /path/to/webroot
comment = Comments for MrUse Project
list = false
read only = no
write only = yes
use chroot = no
ignore errors = yes
hosts allow = 192.168.0.1,192.168.0.2
hosts deny = *
#auth users =
#secrets file =
# @end Rsyncd by me@mruse.cn $(date +%F_%T)
RSYNC
# Modify config
sed -i 's#mruse#'$rsyncd_mod'#g' /etc/rsyncd.conf
sed -i 's#/path/to/webroot#'$rsyncd_path'#g' /etc/rsyncd.conf
#sed -i 's#^comment.*#comment = '$rsyncd_comment'#g' /etc/rsyncd.conf # todo:
sed -i 's#192.168.0.1,192.168.0.2#'$rsyncd_allows'#g' /etc/rsyncd.conf
# Start & Bootup
/usr/bin/rsync --daemon --config=/etc/rsyncd.conf
grep '^/usr/bin/rsync' /etc/rc.local|| echo '/usr/bin/rsync --daemon --config=/etc/rsyncd.conf' >> /etc/rc.local
# Rotate /var/log/rsyncd.log
cat >> /etc/logrotate.d/rsync <<LOGROTATE
/var/log/rsyncd.log {
compress
delaycompress
missingok
notifempty
rotate 6
create 0600 root root
}
LOGROTATE
/etc/init.d/rsyslog restart
# Config Iptables
line_number=$(iptables -n -L --line-number |grep ':22\|:80'|awk '{print $1}'| head -1)
#echo $line_number
iptables -I INPUT $line_number -p tcp -m state --state NEW -m tcp -s $rsyncd_allows --dport 873 -j ACCEPT
/etc/init.d/iptables save
# Shell for {start|stop}
wget https://raw.githubusercontent.com/mruse/rsyncd/master/rsyncd -P /etc/init.d/
chmod +x /etc/init.d/rsyncd
| true
|
a5eb0bb60bd829b82b610199103fa86dd15af386
|
Shell
|
IvoNet/jakarta-ee-microprofile-companion
|
/usb-stick/VM/import-appliance.sh
|
UTF-8
| 963
| 3.234375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#set -x
OVA=$(ls -1 *.ova|tail -r|head -1)
if [ -z ${OVA} ]; then
echo "No *.ova file fond to import as an appliance..."
exit 1
fi
echo "Importing: ${OVA}"
echo "Configuring host-only network with specific ip addresses..."
network_interface=$(VBoxManage hostonlyif create|grep Interface|awk '{print $2}'|sed "s/'//g")
VBoxManage hostonlyif ipconfig "${network_interface}" \
--ip 192.168.10.1 \
--netmask 255.255.255.0
VBoxManage dhcpserver add \
--ifname "${network_interface}" \
--ip 192.168.10.2 \
--netmask 255.255.255.0 \
--lowerip 192.168.10.100 \
--upperip 192.168.10.200 \
--enable
echo "The configured network to use is: ${network_interface}"
VM_NAME=jakartaee-microprofile-box
VBoxManage import "${OVA}" --vsys 0 --vmname ${VM_NAME} --options keepnatmacs
VBoxManage modifyvm ${VM_NAME} --nic2 hostonly --hostonlyadapter2 "${network_interface}"
VBoxManage startvm ${VM_NAME} --type headless
| true
|
363aa22e1251953a640049b01a2ab8fa03c74c81
|
Shell
|
jcelliott/dotfiles
|
/.zshrc
|
UTF-8
| 5,348
| 3.203125
| 3
|
[] |
no_license
|
# ___ __
# /_ / ___ / / ________
# _ / /_(_-</ _ \/ __/ __/
# (_)___/___/_//_/_/ \__/
#
# Joshua Elliott
#
# TODO:
# - Move from oh-my-zsh to just antigen
# --------------------------
# colorized output
function cinfo() {
echo -e "\x1b[32m$1\x1b[0m" # green
}
function cwarn() {
echo -e "\x1b[33m$1\x1b[0m" # yellow
}
function cerror() {
echo -e "\x1b[31m$1\x1b[0m" # red
}
### antigen ###
ZSH_CUSTOM="$HOME/.zsh"
ADOTDIR="$HOME/.zsh"
source "$HOME/.zsh/plugins/antigen/antigen.zsh"
# Bundles
antigen use oh-my-zsh
ZSH_TMUX_AUTOCONNECT=false
# don't let the plugin override our TERM setting
# ZSH_TMUX_FIXTERM=false
# antigen bundle tmux
antigen bundle gitfast
antigen bundle ruby
antigen bundle python
antigen bundle golang
if [[ $platform == 'darwin' ]]; then
antigen bundle osx
antigen bundle brew
fi
antigen bundle zsh-users/zsh-syntax-highlighting
# Theme
antigen theme afowler
antigen apply
# TODO: move this to a more appropriate place
ZSH_THEME_GIT_PROMPT_DIRTY="%{$fg[red]%}*%{$fg[yellow]%}"
### zsh options ###
# GLOBDOTS lets files beginning with a . be matched without explicitly specifying the dot
setopt globdots
# HISTIGNOREDUPS prevents the current line from being saved in the history if it is the same as the previous one
setopt histignoredups
# TODO: look into getting corrections working better
# CORRECT turns on spelling correction for commands
unsetopt correct
# CORRECTALL turns on spelling correction for all arguments.
unsetopt correctall
# Use vi style bindings
#bindkey -v
# use <C-space> like up arrow
bindkey '^ ' up-line-or-search
# use <C-B> instead of <C-A> (tmux prefix)
bindkey '^B' beginning-of-line
### Check OS ###
platform='unknown'
case `uname` in
Darwin)
platform='darwin'
;;
Linux)
platform='linux'
;;
esac
# Determine the specific linux distro
distro=''
if [ $platform = 'linux' ]; then
if [ -f /etc/debian_version ]; then
distro='debian'
elif [ -f /etc/arch-release ]; then
distro='arch'
else
distro='unknown'
fi
fi
cinfo "Operating System: $platform $distro"
### Path ###
PATH=/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/X11/bin:/usr/texbin
if [ -d "$HOME/bin" ]; then
PATH=$HOME/bin:$PATH
fi
export PATH
### Term ###
# export TERM='xterm-256color'
# export TERM='xterm-16color'
### Editor ###
export EDITOR='vim -f'
### Use vim for man pager ###
function vman {
vim -MRn -c 'set ft=man nomod nolist nonumber' \
-c 'map q :q<CR>' \
-c 'map <SPACE> <C-D>' \
-c 'map b <C-U>' \
-c 'nmap K :Man <C-R>=expand(\\\"<cword>\\\")<CR><CR>' \
=(/usr/bin/man $* | col -bx)
# zsh process substitution: =(...) is replaced with the name of a file containing its output
# this is not the same as <(...) which creates a named pipe (FIFO) instead
}
# alias man='vman'
### Aliases ###
if [[ $platform == 'darwin' ]]; then
# Using GNU coreutils
alias ls='gls --color=auto'
alias ll='gls -lah --color=auto'
alias la='gls -a --color=auto'
alias ql='quick-look'
alias tmux='TERM=xterm-256color tmux -2'
elif [[ $platform == 'linux' ]]; then
alias ls='ls --color=auto'
alias ll='ls -lah --color=auto'
alias la='ls -a --color=auto'
if [[ $distro == 'arch' ]]; then
# alias tmux='tmux -2'
# alias tmux='TERMINFO=/usr/share/terminfo/x/xterm-16color TERM=xterm-16color tmux -2'
alias tmux='TERM=screen-256color-bce tmux'
fi
fi
alias df='df -h'
# alias no='ls' # for dvorak
alias vimconf='vim ~/.vimrc'
alias zshconf='vim ~/.zshrc'
# Git aliases
# TODO: move these to real git aliases?
alias gst='git status'
alias gc='git commit -v'
alias glg='git log --all --stat --graph --decorate'
alias glgo='git log --all --graph --decorate --oneline'
alias glgs='git log --all --stat --graph --decorate --max-count=3'
alias ga='git add'
alias gsa='git submodule add' # gsa <repo> <directory>
alias gsi='git submodule init'
alias gsu='git submodule update'
alias gsuu='git submodule foreach git pull origin master'
alias gcm='git checkout master'
alias gch='git checkout'
alias gcb='git checkout -b'
alias gl='git pull'
alias gs='git push'
alias gpp='git pull;git push'
alias gf='git diff'
alias gba='git branch -a'
# turn off globbing for rake commands (rake task[arg] breaks)
alias rake="noglob rake"
# Use hub (github extensions for git) if it's installed
command -v hub >/dev/null 2>&1
if [ $? -eq 0 ]; then
eval "$(hub alias -s)"
# alias git='hub'
else
cwarn "You should install hub (defunkt.io/hub)"
fi
# easy json pretty print (e.g., `curl something | json`)
alias json='python -m json.tool'
### directory colors on linux (for using solarized color scheme) ###
if [ -f "$HOME/.config/.dircolors" ]; then
#echo "using .dircolors"
if [[ $platform == 'darwin' ]]; then
eval `gdircolors $HOME/.config/.dircolors`
elif [[ $platform == 'linux' ]]; then
eval `dircolors ~/.config/.dircolors`
fi
fi
### Ruby ###
if [ -d "$HOME/.rvm" ]; then
#echo "Set up ruby stuff"
PATH=$PATH:$HOME/.rvm/bin # Add RVM to PATH for scripting
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
fi
### Local customizations ###
if [ -f "$HOME/.zshrc.local" ]; then
cinfo "loading .zshrc.local"
source "$HOME/.zshrc.local" ]
fi
### Start in home directory and Confirm load ###
# cd $HOME
cinfo "energize!"
| true
|
b11ed3c8b3cc24f7c186173918b85445093b3057
|
Shell
|
Ambrose-Chen/DepTools
|
/init.sh
|
UTF-8
| 791
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
export baseurl=`cd $(dirname $0) ; pwd`
source $baseurl/common
source $baseurl/config
sed -i "s/{filePath}/`echo $mount_addr | sed 's/\//\\\\\//g'`/g" $baseurl/base.repo
cp $baseurl/base.repo /etc/yum.repos.d/
[ -d $mount_addr ]
step_check "config: mount directory ------ exist" "config: mount directory ------ not exist" 1
check_cdrom
step_check "cdrom ------ yes" "cdrom ------ no" 1
check_mount '/dev/sr0'
step_check "/dev/sr0 not mount yet" "/dev/sr0 already mounted" 1
mount /dev/sr0 $mount_addr >/dev/null 2>&1
check_yum
step_check "yum ------ sccess" "yum ------ error" 1
yum -y install \
bash-completion \
wget \
vim \
&>/dev/null
echo_green "init sccessful"
umount $mount_addr
| true
|
de2b1418caf92bde421017d522ec2d9054af0b23
|
Shell
|
tapiau/btrfs-snap
|
/bin/incremental
|
UTF-8
| 400
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
NOW=`date +%Y%m%d-%H%M%S`
if [ "$1" != "" ]; then
if [ "$1" == "--help" ]; then
echo "--help"
echo "--all - shows all snaps in all filesystems"
echo "--list mountpoint - shows snaps"
echo "mountpoint - do snapshot!"
else
DATE="$1"
btrfs subvolume snapshot -r $1 $1/.snap/ro-${NOW}
fi
else
echo "Missing param"
fi
| true
|
df3c94d0efd25d09dc0cf367e1df6a2cac8d108e
|
Shell
|
beatrohrer/dotfiles
|
/scripts/install-dotfiles.sh
|
UTF-8
| 601
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
sudo apt-get update && \
sudo apt-get upgrade -y && \
sudo apt-get install -y vim zsh tree tmux cdargs git curl httpie htop
# set symlinks
ln -s ~/dotfiles/.vim ~/.vim
ln -s ~/dotfiles/.vimrc ~/.vimrc
ln -s ~/dotfiles/.zshrc ~/.zshrc
ln -s ~/dotfiles/.tmux.conf ~/.tmux.conf
mkdir -p ~/.config/nvim
ln -s ~/dotfiles/config/nvim/init.vim ~/.config/nvim/init.vim
# ssh pub keys
mkdir ~/.ssh 2> /dev/null
cp ~/dotfiles/ssh/authorized_keys ~/.ssh/
# install vim plugins
vim +PluginInstall +qall
# select zsh as default shell
command -v zsh >/dev/null 2>&1 && chsh -s $(which zsh)
| true
|
8e067154bd4e50e555efc6f216932b997737e2c9
|
Shell
|
danbikle/stk_mkt_madlib_logregr
|
/wget_ydata.bash
|
UTF-8
| 1,796
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
# ~/tv/wget_ydata.bash
# I use this script to wget some yahoo stock prices.
mkdir -p /tmp/ydata/
cd /tmp/ydata/
wget --output-document=SPY.csv http://ichart.finance.yahoo.com/table.csv?s=SPY
wget --output-document=BA.csv http://ichart.finance.yahoo.com/table.csv?s=BA
wget --output-document=CAT.csv http://ichart.finance.yahoo.com/table.csv?s=CAT
wget --output-document=CVX.csv http://ichart.finance.yahoo.com/table.csv?s=CVX
wget --output-document=DD.csv http://ichart.finance.yahoo.com/table.csv?s=DD
wget --output-document=DIS.csv http://ichart.finance.yahoo.com/table.csv?s=DIS
wget --output-document=ED.csv http://ichart.finance.yahoo.com/table.csv?s=ED
wget --output-document=GE.csv http://ichart.finance.yahoo.com/table.csv?s=GE
wget --output-document=HON.csv http://ichart.finance.yahoo.com/table.csv?s=HON
wget --output-document=HPQ.csv http://ichart.finance.yahoo.com/table.csv?s=HPQ
wget --output-document=IBM.csv http://ichart.finance.yahoo.com/table.csv?s=IBM
wget --output-document=JNJ.csv http://ichart.finance.yahoo.com/table.csv?s=JNJ
wget --output-document=KO.csv http://ichart.finance.yahoo.com/table.csv?s=KO
wget --output-document=MCD.csv http://ichart.finance.yahoo.com/table.csv?s=MCD
wget --output-document=MMM.csv http://ichart.finance.yahoo.com/table.csv?s=MMM
wget --output-document=MO.csv http://ichart.finance.yahoo.com/table.csv?s=MO
wget --output-document=MRK.csv http://ichart.finance.yahoo.com/table.csv?s=MRK
wget --output-document=MRO.csv http://ichart.finance.yahoo.com/table.csv?s=MRO
wget --output-document=NAV.csv http://ichart.finance.yahoo.com/table.csv?s=NAV
wget --output-document=PG.csv http://ichart.finance.yahoo.com/table.csv?s=PG
wget --output-document=XOM.csv http://ichart.finance.yahoo.com/table.csv?s=XOM
exit
| true
|
d2863ec64a88f4f9365402d7fb60f8778f6b9f6b
|
Shell
|
pvcastro/1-billion-word-language-modeling-benchmark
|
/scripts/get-data-datalawyer.sh
|
UTF-8
| 4,068
| 3.640625
| 4
|
[
"Apache-2.0",
"GPL-3.0-or-later"
] |
permissive
|
#!/bin/bash
# Copyright 2013 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Does all the corpus preparation work.
#
# Assumes ptdatalawyer-latest-pages-articles.xml.bz2 (downloaded from https://dumps.datalawyermedia.org/ptdatalawyer/latest/)
# have already been extracted by https://github.com/attardi/datalawyerextractor using:
# WikiExtractor.py -b 2G ptdatalawyer-latest-pages-articles.xml.bz2 (the parameter -b 2G produces a single file for the whole dump)
#
# The extracted text file are extracted into a folder which must be set as ${DATALAWYER_FILE}
#
# Takes the data in:
# ${DATALAWYER_FILE}, strips xml tags which separates each document in datalawyer, removes duplication with sort -u.
# Removes lines that contains only numbers and puncuation, shuffles every sentence, and runs punctuation
# normalization and tokenization, producing the data in ./datalawyer-processed/datalawyer.tokenized.txt
#
# It then splits the data in 100 shards, randomly shuffled, sets aside
# held-out data, and splits it into 50 test partitions.
echo ${DATALAWYER_FILE}
if [[ -d datalawyer-processed ]]
then
rm -rf datalawyer-processed/*
else
mkdir datalawyer-processed
fi
FOLDER=${DATALAWYER_FILE}
printf "\n***** file ${DATALAWYER_FILE} *****\n"
#python ./scripts/strip_xml.py "${DATALAWYER_FILE}"
echo "Sorting ${DATALAWYER_FILE}"
cat "${DATALAWYER_FILE}" | sort -u --output=datalawyer-processed/datalawyer.sort.txt
echo "Done sorting ${DATALAWYER_FILE}"
python ./scripts/strip_special_lines.py datalawyer-processed/datalawyer.sort.txt datalawyer-processed/datalawyer.sort.clean.txt datalawyer-processed/datalawyer.sort.filtered.txt
echo "Shuffling lines from clean file"
shuf datalawyer-processed/datalawyer.sort.clean.txt > datalawyer-processed/datalawyer.shuffled.txt
echo "Done shuffling lines"
# Set environemnt vars LANG and LANGUAGE to make sure all users have the same locale settings.
export LANG=pt_BR.UTF-8
export LANGUAGE=pt_BR:
export LC_ALL=pt_BR.UTF-8
echo "Tokenizing shuffled file"
time cat datalawyer-processed/datalawyer.shuffled.txt | \
./scripts/normalize-punctuation.perl -l pt | \
./scripts/tokenizer.perl -l pt > \
datalawyer-processed/datalawyer.tokenized.txt
echo "Done tokenizing"
# Split the data in 100 shards
if [[ -d training-datalawyer ]]
then
rm -rf training-datalawyer/*
else
mkdir training-datalawyer
fi
./scripts/split-input-data.perl \
--output_file_base="$PWD/training-datalawyer/datalawyer" \
--num_shards=10 \
--input_file=datalawyer-processed/datalawyer.tokenized.txt
echo "Done splitting corpus into 10 shards datalawyer-000??-of-00010."
# Hold 00000 shard out, and split it 50 way.
if [[ -d heldout-datalawyer ]]
then
rm -rf heldout-datalawyer/*
else
mkdir heldout-datalawyer
fi
mv ./training-datalawyer/datalawyer-00000-of-00010 \
heldout-datalawyer/
echo "Set aside shard 00000 of datalawyer-000??-of-00010 as held-out data."
./scripts/split-input-data.perl \
--output_file_base="$PWD/heldout-datalawyer/datalawyer.heldout" \
--num_shards=1 \
--input_file=heldout-datalawyer/datalawyer-00000-of-00010
echo "Done splitting held-out data into 1 shard."
rm -rf heldout-datalawyer/datalawyer-00000-of-00010
if [[ -d training-datalawyer.tar.gz ]]
then
rm -rf training-datalawyer.tar.gz
rm -rf heldout-datalawyer.tar.gz
else
tar -czvf training-datalawyer.tar.gz training-datalawyer
tar -czvf heldout-datalawyer.tar.gz heldout-datalawyer
fi
echo "python ./scripts/generate_vocabulary.py --corpus-prefix datalawyer --path-in "$(pwd)""
| true
|
93da87bf451eee1788e1b8ab9d2df6e8cfdd2ff8
|
Shell
|
bilts/thin-egress-app
|
/setup_jwt_cookie.sh
|
UTF-8
| 806
| 3.265625
| 3
|
[] |
no_license
|
#! /usr/bash
function GENERATE_JWTKEYS_FILE {
cat > /tmp/jwtkeys.json <<EOL
{
"rsa_priv_key": "${rsa_priv_key}",
"rsa_pub_key": "${rsa_pub_key}"
}
EOL
}
function GENERATE_TEA_CREDS {
cd /tmp || exit 1
ssh-keygen -t rsa -b 4096 -m PEM -f ./jwtcookie.key -N ''
openssl base64 -in jwtcookie.key -out jwtcookie.key.b64 -A
openssl base64 -in jwtcookie.key.pub -out jwtcookie.key.pub.b64 -A
export rsa_priv_key=$(<jwtcookie.key.b64)
export rsa_pub_key=$(<jwtcookie.key.pub.b64)
rm jwtcookie.key*
GENERATE_JWTKEYS_FILE
}
GENERATE_TEA_CREDS
aws secretsmanager create-secret --name jwt_secret_for_tea --profile ${profile_name:-default} --region ${aws_region:-us-east-1} \
--description "RS256 keys for TEA app JWT cookies" \
--secret-string file:///tmp/jwtkeys.json
| true
|
79a89b4610b28414fe7105568245081ef07fc5f4
|
Shell
|
akshaybarve06/BASIC_SHELL_PROGRAMS
|
/operatorPrecedence.sh
|
UTF-8
| 791
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash -x
# @ Purpose : Check Operator Precedence and Print The Result
# @ Author : Akshay Dhananjay Barve
# @ Version : 18.04.3 lts
# @ Since : 29-02-2020 / Saturday
# We Have To Check Operator Precedence
# For Given Expressions and Print its output
read -p "Enter Three Numbers To Do Operations " num1 num2 num3
echo "Output of This (a + b * c) Expression is.."
answer=`echo "scale=2;( $num1 + $num2 * $num3 ) " | bc `
echo $answer
echo "Output of This (c + a / b) Expression is.."
answer=`echo "scale=2; ( $num3 + $num1 / $num2 ) " | bc `
echo $answer
echo "Output of This (a % b + c) Expression is.."
answer=`echo "scale=2; ( $num1 % $num2 + $num3 )" | bc `
echo $answer
echo "Output of This (a * b + c) Expression is.."
answer=`echo "scale=2; ( $num1 * $num2 + $num3 )" | bc `
echo $answer
| true
|
d321b919118ee5c711c18a619e4622cad8272392
|
Shell
|
wanglianlian/360
|
/4.sh
|
UTF-8
| 442
| 3.875
| 4
|
[] |
no_license
|
#!/bin/sh
#日志切割 :如果文件大于阈值M,则复制备份access.log并清空源文件重新开始写入
#切割生成文件以时间.log命名
M=1024Byte
filename=access.log
watch ()
{
if [ $1 -ge $M ]
then
echo true
else
echo false
fi
}
while $(watch $(du -k $filename|awk '{print $1}'))
do
newfilename=`date +"%F-%H:%M:%S"`
cp $filename $newfilename
echo "">$filename
done
| true
|
f83760fac7e58bfaed36f15c636dad72cfa27803
|
Shell
|
intellivoid/Intellivoid-Docker
|
/SpamProtectionBot/check_git.shx
|
UTF-8
| 639
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/sh
echo "[SpamProtectionBot-Updater] Starting..."
while true; do
echo "[SpamProtectionBot-Updater] Checking for updates..."
changed=0
#cd /data/SpamProtectionBot
#git remote update && git status -uno | grep -q 'Your branch is behind' && changed=1
if [ $changed = 1 ]; then
echo "[SpamProtectionBot-Updater] Attempting to terminate processes & starting update..."
killall php
echo "[SpamProtectionBot-Updater] Killing bootstrap. Restarting updater in DL mode..."
exit 0
else
echo "[SpamProtectionBot-Updater] No updates found. Checking in the next 30 seconds..."
fi
logrotate -v /etc/logrotate.conf
sleep 30
done
| true
|
97bacf2fc14f615d8bee47701929ee51dc697778
|
Shell
|
shikibu9419/git-fzf-extender
|
/functions/pull-request.zsh
|
UTF-8
| 2,078
| 3.6875
| 4
|
[] |
no_license
|
git-extended-pull-request() {
__git_extended::init || return 1
[[ $1 = create ]] \
&& __git_extended::create_pr \
|| __git_extended::list_pr
}
__git_extended::list_pr() {
open_prs="$(unbuffer hub pr list -s open | sed 's/ *#/#/')"
closed_prs="$(unbuffer hub pr list -s closed | sed 's/ *#/#/')"
opts="${YELLOW}+ CREATE PULL REQUEST\n${MAGENTA}<-> SWITCH STATUS${DEFAULT}"
stts=open
while true ; do
[ $stts = open ] && prs=$open_prs || prs=$closed_prs
prompt_msg="SELECT PR ($stts)> "
selected=$(echo -e "$prs\n$opts" | sed '/^$/d' |
$=FZF --prompt=$prompt_msg
--bind "ctrl-l:execute(echo {} | cut -d' ' -f1 | cut -b 2- | hub pr checkout)")
case "$selected" in
'<->'*)
[ $stts = open ] && stts=closed || stts=open
continue
;;
'+'*)
__git_extended::create_pr
;;
"")
;;
*)
hub pr show $(echo $selected | cut -d' ' -f1 | cut -b 2-)
esac
return
done
}
__git_extended::create_pr() {
GIT_ROOT=$(git rev-parse --show-cdup)
TEMPLATES_ROOT=${GIT_ROOT}${GITHUB_TEMPLATES_PATH}
echo "${BOLD}--- CREATE MODE ---${DEFAULT}"
if [ -d $TEMPLATES_ROOT ] && [ -f $TEMPLATES_ROOT/PULL_REQUEST* ]; then
printf 'Template: '
local prompt_msg='SELECT TEMPLATE> '
local prev_cmd="less -R $TEMPLATES_ROOT/{}"
template=$(ls $TEMPLATES_ROOT/PULL_REQUEST* | xargs -I % sh -c 'basename %' |
$=FZF --prompt=$prompt_msg --preview=$prev_cmd)
echo $template
fi
if [[ -z $template ]]; then
printf 'Message: '; read msg
fi
local prompt_msg='SELECT LABEL> '
printf 'Labels: '
selected=$(unbuffer hub issue labels |
$=FZF -m --prompt=$prompt_msg |
tr '\n' ',' | sed -e 's/ *//g' -e 's/,$//')
echo $selected
echo
echo 'Creating PR...'
label_opt=${selected:+-l $selected}
template_opt=${template:+-F $TEMPLATES_ROOT/$template --edit}
hub pull-request ${msg:+-m $msg} $=template_opt $=label_opt \
&& echo 'Done!' \
|| echo 'Failed...'
}
| true
|
7d1207eca5b2aa2919332a6062573c080ec243ba
|
Shell
|
jfuerlinger/leocloud
|
/www/docker/fix-base-url.sh
|
UTF-8
| 544
| 3.734375
| 4
|
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env bash
# this script changes the <base href="..."> header tag to the environment variable BASE_HREF.
# then the same docker image can be deployed to multiple sub-paths of the same hostname
HTML=/usr/share/nginx/html
if [[ "$BASE_HREF." == "." ]]
then
echo "no BASE_HREF environment variable set, keep base href... as is is"
else
echo "BASE_HREF=$BASE_HREF patch the head to base href=\"$BASE_HREF\""
find $HTML -type f -name "*.html" -print -exec sed -i -e "s,<base href=\"/\",<base href=\"/$BASE_HREF/\"," {} \;
fi
| true
|
130b8646b3690c1e050eda9dd384e6e60bc386c2
|
Shell
|
goolzerg/nexus-registry-docker
|
/init.sh
|
UTF-8
| 1,390
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
if ! [ -x "$(command -v docker-compose)" ]; then
echo 'Error: docker-compose is not installed.' >&2
echo 'Installing...'
sudo curl -L "https://github.com/docker/compose/releases/download/1.25.5/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
fi
if ! [ -x "$(command -v openssl)" ]; then
echo 'Error: openssl is not installed.' >&2
echo 'Installing...'
sudo apt-get update
sudo apt-get install openssl
fi
openssl req -nodes -days 3650 -x509 -newkey rsa:2048 -keyout ./private.key -out ./cert.crt -subj '/CN=www.test.com/O=My Company Name LTD./C=US'
sudo docker volume create --name=nexus-data
sudo docker-compose up -d
until curl --fail --insecure https://localhost; do
echo "Nexus server is starting"
echo "Wait..."
sleep 20
done
password=$(sudo docker-compose exec nexus cat /nexus-data/admin.password)
curl -X POST -u "admin:$password" --insecure "https://127.0.0.1/service/rest/beta/repositories/docker/hosted" -H "accept: application/json" -H "Content-Type: application/json" -d "{ \"name\": \"docker_repo\", \"online\": true, \"storage\": { \"blobStoreName\": \"default\", \"strictContentTypeValidation\": true, \"writePolicy\": \"ALLOW_ONCE\" }, \"docker\": { \"v1Enabled\": false, \"forceBasicAuth\": true, \"httpPort\": 5000 }}"
sleep 5
echo "Admin password is: $password"
| true
|
b021fe99b475c87d10540da0f3e74fc6fed877eb
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/bbswitch-e531/PKGBUILD
|
UTF-8
| 1,062
| 2.875
| 3
|
[] |
no_license
|
pkgname=bbswitch-e531
_basename=bbswitch
groups=('ThinkPad-E531')
pkgver=0.8
_extramodules=extramodules-4.2-e531 # Don't forget to update bbswitch-lts.install
pkgrel=6
pkgdesc="Kernel module allowing to switch dedicated graphics card on Optimus laptops"
arch=('i686' 'x86_64')
url=("http://github.com/Bumblebee-Project/bbswitch")
license=('GPL')
depends=('linux-e531>=4.2' 'linux-e531<4.3')
makedepends=('linux-e531-headers>=4.2' 'linux-e531-headers<4.3')
install=${pkgname}.install
source=("${_basename}-$pkgver.tar.gz::https://github.com/Bumblebee-Project/bbswitch/archive/v${pkgver}.tar.gz")
sha256sums=('76cabd3f734fb4fe6ebfe3ec9814138d0d6f47d47238521ecbd6a986b60d1477')
build() {
cd ${srcdir}/${_basename}-${pkgver}
_kernver="$(cat /usr/lib/modules/${_extramodules}/version)"
make KDIR=/lib/modules/${_kernver}/build
}
package() {
cd ${srcdir}/${_basename}-${pkgver}
install -Dm644 bbswitch.ko "${pkgdir}"/usr/lib/modules/${_extramodules}/bbswitch.ko
gzip "${pkgdir}/usr/lib/modules/${_extramodules}/bbswitch.ko"
}
| true
|
efd9f212fd9f12678133cc9af6ea9a3ef000372e
|
Shell
|
sutizi/dotfiles
|
/run.sh
|
UTF-8
| 620
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
#Upddate and upgrade the system
sudo apt-get update && sudo apt-get upgrade -y
#Install all aplications
sudo apt install tmux -y
sudo apt install neovim -y
sudo snap install --classic code
sudo apt-get install chromium-browser
#Clone this repository
git clone https://github.com/sutizi/dotfiles
#Move the configuration files
cd dotfiles/
mv .tmux.conf ~/
mv init.vim ~/.config/nvim/
mv .bash_aliases ~/
#Remove the folder
rm -r -f dotfiles/
echo "--------------------------------------"
echo "Instalation and configuration finished"
echo "--------------------------------------"
| true
|
3c95d864b29b305fee48cb784c731bbfea17d58a
|
Shell
|
bifferos/bb
|
/openwrt/package/feeds/packages/djbdns/files/walldns.init
|
UTF-8
| 1,266
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/sh /etc/rc.common
# Copyright (C) 2007 OpenWrt.org
#
# version 20090401 jhalfmoon
START=46
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
DAEMON=/usr/bin/walldns
NAME=walldns
DESC="Reverse DNS wall"
ROOT=/tmp/walldns
start() {
echo "Starting $DESC: $NAME"
config_load djbdns
config_foreach get_userids global
rm -rf $ROOT
mkdir -p $ROOT
chown -R $UID:$GID $ROOT
config_foreach start_daemon walldns
}
get_userids() {
local cfg="$1"
config_get UID "$cfg" runasuser
config_get GID "$cfg" runasgroup
UID=`id -u $UID`
GID=`id -g $GID`
}
start_daemon() {
local cfg="$1"
config_get logging "$cfg" logging
config_get iface "$cfg" interface
# Translate listening interfaces to ip addresses
include /lib/network
scan_interfaces
config_get IP "$iface" ipaddr
export ROOT
if [ "$DEBUG" == 1 ] ; then
$DAEMON
elif [ "$logging" == '1' ] ; then
$DAEMON 2>&1 | logger -p local1.info -t $NAME &
else
$DAEMON > /dev/null 2>&1 &
fi
}
stop() {
echo -n "Stopping $DESC: $NAME"
kill `pidof $NAME|sed "s/$$//g"` > /dev/null 2>&1
echo " ."
}
restart() {
echo "Restarting $DESC: $NAME... "
stop
sleep 2
start
}
| true
|
360862e176b79d9b2ae03b32563d36dbeac334e1
|
Shell
|
ServerSide-CLS/homework
|
/2017212212167/adduser_exp3.sh
|
UTF-8
| 1,359
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
cnt=0
sp_num=0
flag=0
PARAMS=(${@})
NUM=${#}
user_name=${PARAMS[$NUM-1]}
while getopts l:s:o OPTION
do
case "${OPTION}" in
l)
cnt="${OPTARG}"
flag=1
;;
s)
sp_num="${OPTARG}"
flag=2
;;
o)
flag=3
;;
?)
echo "!!Invalid Option!!>m<" >&2
exit 1
;;
esac
done
i=0
if [[ $flag -eq 1 ]]
then
while [ $i -lt $cnt ]
do
tmp="$(date +%s%N | sha256sum | head -c1)"
password=${password}${tmp}
let i++
done
else
while [ $i -lt $(( $cnt-$sp_num )) ]
do
let i++
tmp="$(date +%s%N | sha256sum | head -c1)"
password=${password}${tmp}
done
i=0
while [ $i -lt $sp_num ]
do
let i++
special_char=$(echo '!@#$%^&*()_+=' | fold -w1 | shuf | head -c1)
password2=${password2}${special_char}
done
fi
if [[ $flag -eq 2 ]]
then
password=${password}${password2}
elif [[ $flag -eq 3 ]]
then
password=$(echo "${password}${password2}" | fold -w1 | shuf | tr -d '\n')
fi
sudo useradd -m "${user_name}"
echo "Success!0v0 Your password is: $password"
echo ${password}|passwd --stdin ${user_name} &> /dev/null
#sudo echo ${user_name}:${password}|chpasswd
#passwd -e ${user_name}
| true
|
ebb89af4020379c44c360463f8e7b9d0655f33f5
|
Shell
|
kumaraish/mingw-distro
|
/libpng.sh
|
UTF-8
| 510
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/sh
source ./0_append_distro_path.sh
untar_file libpng-1.6.37.tar
cd /c/temp/gcc
mv libpng-1.6.37 src
mkdir build dest
cd build
cmake \
"-DCMAKE_BUILD_TYPE=Release" \
"-DCMAKE_C_FLAGS=-s -O3" \
"-DCMAKE_INSTALL_PREFIX=/c/temp/gcc/dest" \
"-DM_LIBRARY=" \
"-DPNG_SHARED=OFF" \
-G Ninja /c/temp/gcc/src
ninja
ninja install
cd /c/temp/gcc
rm -rf build src
mv dest libpng-1.6.37
cd libpng-1.6.37
rm -rf bin include/libpng16 lib/libpng lib/pkgconfig lib/libpng16.a share
7z -mx0 a ../libpng-1.6.37.7z *
| true
|
afedeeea7818a86cbabfc1a8f6e89105d8e8848e
|
Shell
|
flowable/flowable-engine
|
/docs/public-api/tools/flowable-slate/generate.sh
|
UTF-8
| 1,061
| 3.140625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# CLEAN
rm -rf target
mkdir -p target/specfile
# COPY SPEC FILES Automatically Generated
# cp -r ../flowable-oas-generator/target/oas/v2/ target/specfile
# COPY SPEC FILES based on References
cp -r ../../references/swagger/ target/specfile
for apiName in {"content","decision","form","process"}; do
# EXECUTE WINDERSHIN
widdershins --summary --noschema --user_templates templates/ -y target/specfile/$apiName/flowable-swagger-$apiName.yaml target/specfile/$apiName/_rest-body.md
# COPY TO SLATE
# Remove header from the body (roughly the first 40 lines)
sed -e '1,40d' target/specfile/$apiName/_rest-body.md > slate/source/includes/_rest-body.md
# Add Header API Name Title.
title="$(tr '[:lower:]' '[:upper:]' <<< ${apiName:0:1})${apiName:1}"
sed -e "s/API_NAME/$title/g" templates/_rest-title.md > slate/source/includes/_rest-title.md
# BUILD SLATE
cd slate
bundle exec middleman build --clean
# MOVE TO TARGET
cd ..
mkdir -p target/slate/$apiName
mv slate/build/* target/slate/$apiName
done
| true
|
58c6c573c775ae1bf06886bec638d226a8eae867
|
Shell
|
yoshi01/dotfiles
|
/dotfilesLink.sh
|
UTF-8
| 255
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/sh
DOTFILES=(.bash_profile .bashrc .zprofile .zshrc .tmux.conf .vimrc .vim .ideavimrc .gprompt .config/peco/config.json)
for file in ${DOTFILES[@]}
do
if [ ! -L $HOME/$file ]; then
ln -fnsv $HOME/dotfiles/$file $HOME/$file
fi
done
| true
|
04fd286af3862afed7dd6b08cc4676b0adb9fe55
|
Shell
|
kabalin/homedir
|
/.bashrc
|
UTF-8
| 7,468
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# Hopefully fix java issues with awesome
# http://extramem.blogspot.com/2009/04/java-misbehaving-in-awesome-window.html
export AWT_TOOLKIT=MToolkit
############################################################################
# History management
############################################################################
# don't put duplicate lines in the history. See bash(1) for more options
export HISTCONTROL=ignoredups
# ... and ignore same sucessive entries.
export HISTCONTROL=ignoreboth
# show when command was run
export HISTTIMEFORMAT='%F %T '
# Store 999 entries
export HISTSIZE=999
# Make bash append rather than overwrite history on disk
shopt -s histappend
############################################################################
# Window size
############################################################################
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
############################################################################
# More pleasing less
############################################################################
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(lesspipe)"
############################################################################
# Initial PS1 building
############################################################################
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color)
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
;;
*)
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
;;
esac
############################################################################
# xterm title
############################################################################
# If this is an xterm set the title to user@host:dir
case "$TERM" in
xterm*|rxvt*)
PROMPT_COMMAND='echo -ne "\033]0;${USER}@${HOSTNAME}: ${PWD/$HOME/~}\007"'
;;
*)
;;
esac
############################################################################
# Bash Completion
############################################################################
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
if [ -f /opt/csw/etc/bash_completion ]; then
. /opt/csw/etc/bash_completion
fi
if [ -f /etc/opt/csw/bash_completion ]; then
. /etc/opt/csw/bash_completion
fi
############################################################################
# git prompt settings
############################################################################
default_prompt="\! \u@\h:\w> "
function get_prompt_command ()
{
local RED='\[\e[0;31m\]'
local YELLOW='\[\e[0;33m\]'
local GREEN='\[\e[0;32m\]'
local NOCOL='\[\e[0m\]'
git_branch=$( parse_git_branch )
if [ -n "$git_branch" ]; then
# Set the branch name to green if it's clean, red otherwise.
git_clean_p
if [[ $? == 0 ]]; then
dirstate='clean'
if [[ "$git_branch" == "master" ]]; then
GITCOLOR=$YELLOW
else
GITCOLOR=$GREEN
fi
else
dirstate='dirty'
GITCOLOR=$RED
fi
PS1="\! $(path_within_git_repo):$GITCOLOR$git_branch$NOCOL> "
else
PS1="$NOCOL$default_prompt"
fi
}
function parse_git_branch {
git branch --no-color 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/\1/'
}
# This will fail if your directory name contains a '!', but then you
# deserve to be flogged anyway.
function path_within_git_repo ()
{
repo_base=$( git rev-parse --git-dir )
if [ $( expr "$repo_base" : '/' ) -lt 1 ]
then
# We're in the base repo directory, so git-rev-parse has just
# given us '.git'
repo_base="$(pwd)/$repo_base"
fi
repo_parent=$( expr "$repo_base" : '\(.*\)/.*/\.git$' )
pwd | sed -e "s!$repo_parent/!!"
}
# Exit value of 0 if the repo is clean, 1 otherwise
function git_clean_p ()
{
git status | egrep 'working (directory|tree) clean' 2>&1 > /dev/null
}
PROMPT_COMMAND='get_prompt_command'
PS2='> '
############################################################################
# Useful general aliases
############################################################################
alias ..='cd ..'
alias ...='cd ../..'
alias ....='cd ../../..'
alias ll='ls -l'
alias la='ls -A'
alias l='ls -CF'
alias cdg='cd $HOME/git'
alias ma='git checkout master'
alias cerebro='cd $HOME/git/cerebro'
alias kcol='cd $HOME/git/kcol'
alias rectags="find . -name .ctags.local -type f -print0 | xargs --null -n 1 -IX sh -c 'cd \`dirname \"X\"\` && ctags -R'"
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f $HOME/.bash_aliases ]; then
. $HOME/.bash_aliases
fi
############################################################################
# Include $HOME in $PATH and $MANPATH
############################################################################
PATH=$PATH:$HOME/bin:$HOME/local/bin
MANPATH=$MANPATH:$HOME/local/share/man
############################################################################
# Operating System specific settings
############################################################################
case $(uname) in
Linux)
# We almost certainly have a GNU userland, so set up a few aliases
# specific to the GNU tools
alias ls='ls -F --color'
alias ll='ls -lF --color'
;;
*)
;;
esac
############################################################################
# Yet more colour...
############################################################################
# enable color support of ls and also add handy aliases
if [ "$TERM" != "dumb" ]; then
eval "`dircolors -b`"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
#alias fgrep='fgrep --color=auto'
#alias egrep='egrep --color=auto'
fi
############################################################################
# Actually set most of our environment variables
############################################################################
export EDITOR=vim
export EMAIL='ruslan.kabalin@gmail.com' # Used by debchange
export PATH MANPATH TERM
# Debian package management variables
export DEBEMAIL="ruslan.kabalin@gmail.com"
export DEBFULLNAME="Ruslan Kabalin"
############################################################################
# Include any other bash_includes if they exist
############################################################################
if [ -f $HOME/.bash/bash_includes ]; then
source $HOME/.bash/bash_includes
fi
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
| true
|
c748c4e667f3bf35051cc3bf8984a681e161ee3b
|
Shell
|
alessandroargentieri/rabbit-go
|
/init.sh
|
UTF-8
| 1,450
| 3.59375
| 4
|
[] |
no_license
|
#! /bin/bash
# pull docker image of rabbitmq and launch it
if [[ $( docker images | grep rabbitmq) ]]; then
echo "rabbitmq image already pulled."
else
echo "pulling rabbitmq docker image"
docker pull rabbitmq:3.8-management
fi
if [[ $( docker ps -a | grep my-rabbit | head -c12 ) ]]; then
echo "my-rabbit container already present..."
if [[ $(docker ps | grep my-rabbit | head -c12 ) ]]; then
echo "...and running!"
else
docker start my-rabbit
echo "...starting container"
fi
else
docker run -d --name my-rabbit -e RABBITMQ_DEFAULT_USER=myuser -e RABBITMQ_DEFAULT_PASS=password -p 5672:5672 -p 15672:15672 rabbitmq:3.8-management
fi
# build executable
echo "...building Go executables..."
cd ./publisher && go build -o rabbit-publisher && cd ../
cd ./consumer && go build -o rabbit-consumer && cd ../
echo "wait until the rabbitmq instance is ready"
sleep 10s
# export env vars
export RABBITMQ_USER=myuser
export RABBITMQ_PASSWORD=password
export RABBITMQ_URL=localhost:5672
# launch 1 publisher app
echo "...starting producer app..."
export PORT=8080
./publisher/rabbit-publisher >> publisher-logs.txt 2>&1 &
# launch 3 consumer apps
echo "...starting 3 consumer app..."
export PORT=8081
./consumer/rabbit-consumer >> consumer-1-logs.txt 2>&1 &
export PORT=8082
./consumer/rabbit-consumer >> consumer-2-logs.txt 2>&1 &
export PORT=8083
./consumer/rabbit-consumer >> consumer-3-logs.txt 2>&1 &
| true
|
7e3613dc21ebf876735348318555cbb412c4d49b
|
Shell
|
tomcat123a/dnase_pipeline
|
/dnanexus/old/dnase-idr/resources/usr/bin/dnase_idr.sh
|
UTF-8
| 2,155
| 3.828125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -e
if [ $# -ne 6 ]; then
echo "usage v1: dnase_idr.sh <peaks_a_bed> <peaks_b_bed> <format> <rank_col> <chrom_sizes> <idr_root>"
echo "Compares two sets of peaks and generates an Irreproducible Discovery Rate report. Is independent of DX and encodeD."
echo "If format is 'broadPeak' or 'broadPeak' then output will be converted to bigBed."
exit -1;
fi
peaks_a_bed=$1 # Rampage dumplicates marked bam file.
peaks_b_bed=$2 # long-RNA-seq control bam file.
format=$3 # File input format: bed, narrowPeak, broadPeak
rank_col=$4 # Column that idr should use for ranking (signal.value, q.value, score, index # for bed)
chrom_sizes=$5 # chrom_sizes file that matches the genome used to create bam_root.
idr_root=$6 # root name for output bb and png files
peaks_a_file=$peaks_a_bed
if [[ "$peaks_a_file" == *.gz ]]; then
echo "-- Uncompressing ${peaks_a_file} ..."
set -x
gunzip $peaks_a_file
set +x
peaks_a_file=${peaks_a_file%.gz}
fi
peaks_b_file=$peaks_b_bed
if [[ "$peaks_b_file" == *.gz ]]; then
echo "-- Uncompressing ${peaks_b_file} ..."
set -x
gunzip $peaks_b_file
set +x
peaks_b_file=${peaks_b_file%.gz}
fi
echo "-- Removing any spike-ins from bed files..."
set -x
grep "^chr" $peaks_a_file | grep -v "^chrEBV" > peaks_a_clean.bed
grep "^chr" $peaks_b_file | grep -v "^chrEBV" > peaks_b_clean.bed
set -x
echo "-- Running IDR..."
set -x
idr/bin/idr --input-file-type $format --rank $rank_col --plot --verbose --samples peaks_a_clean.bed peaks_b_clean.bed 2>&1 | tee idr_summary.txt
sort -k1,1 -k2,2n < idrValues.txt > ${idr_root}.bed
mv idrValues.txt.png ${idr_root}.png
set +x
if [ "$format" == "broadPeak" ]; then
echo "* Converting bed to 'broad_idr' bigBed..."
set -x
bedToBigBed ${idr_root}.bed -type=bed6+ -as=/usr/bin/broad_idr.as $chrom_sizes ${idr_root}.bb
set +x
elif [ "$format" == "narrowPeak" ]; then
echo "* Converting bed to 'narrow_idr' bigBed..."
set -x
bedToBigBed ${idr_root}.bed -type=bed6+ -as=/usr/bin/narrow_idr.as $chrom_sizes ${idr_root}.bb
set +x
fi
echo "-- The results..."
ls -l ${idr_root}*
| true
|
075a768d0ce3ca4a6904577aee30bfe5eb74f29a
|
Shell
|
jooaodanieel/tmux-tooling
|
/scripts/solus-set-env
|
UTF-8
| 466
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/zsh
SESSION_NAME=solus
if ! tmux has-session -t $SESSION_NAME; then
tmux new -s $SESSION_NAME -d
tmux split-window -t $SESSION_NAME:1 -h
tmux send-keys -t $SESSION_NAME:1.0 \
cd\ ~/workspace/solus/frontend Enter
tmux send-keys -t $SESSION_NAME:1.0 \
nvim Enter
tmux send-keys -t $SESSION_NAME:1.1 \
cd\ ~/workspace/solus/frontend Enter
tmux send-keys -t $SESSION_NAME:1.1 \
make\ dev Enter
fi
tmux attach -t $SESSION_NAME
| true
|
11d55930e3313f789085668e3084152b283e6ee8
|
Shell
|
bricewge/dotfiles
|
/rofi/post-stow
|
UTF-8
| 1,005
| 2.90625
| 3
|
[
"CC0-1.0"
] |
permissive
|
#!/bin/sh
# shellcheck disable=SC1090
. "${DOTFILES:-.}/shell/.shell/lib/utils.sh"
# * rofi
theme=$(readlink "$XDG_CONFIG_HOME/rofi/config")
if [ ! "$(dirname "$theme")" = "$XDG_CONFIG_HOME/rofi" ]; then
symlink "$XDG_CONFIG_HOME/rofi/solarized-dark" \
"$XDG_CONFIG_HOME/rofi/config"
fi
# * rofi calc
repository https://github.com/onespaceman/menu-calc.git \
"$HOME/.local/src/menu-calc"
symlink "$HOME/.local/src/menu-calc/=" "$HOME/.local/bin/="
# * rofi wifi
# WAITING https://github.com/zbaylin/rofi-wifi-menu/pull/15
# git clone https://github.com/zbaylin/rofi-wifi-menu.git \
repository https://github.com/bricewge/rofi-wifi-menu.git \
"${HOME}/.local/src/rofi-wifi-menu"
symlink "$HOME/.local/src/rofi-wifi-menu/rofi-wifi-menu.sh" \
"$HOME/.local/bin/rofi-wifi"
# * udiskie dmenu
repository https://github.com/fogine/udiskie-dmenu.git \
"$HOME/.local/src/udiskie-dmenu"
symlink "$HOME/.local/src/udiskie-dmenu/udiskie-dmenu" \
"$HOME/.local/bin/udiskie-dmenu"
| true
|
716b09ccedf775654c7dd5c4b599581c7ef87ec0
|
Shell
|
Tao4free/Bash_like
|
/img_related/create_img_from_text.sh
|
UTF-8
| 266
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
input="time.list"
while IFS= read -r line
do
echo "$line"
png="$line.png"
png=`echo ${png// /_}`
png=`echo ${png//\//_}`
png=`echo ${png//\:/}`
convert -background lightblue -fill blue -pointsize 48 \
label:"$line" $png
done < "$input"
| true
|
a47192ff4b048996510fa1892cde4108c68a17ec
|
Shell
|
benizi/dotfiles
|
/bin/yarn
|
UTF-8
| 305
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
base=/opt
yarn=$base/yarn/versions/nightly/bin/yarn
maybe_tput() {
tput "$@" 2>/dev/null || :
}
test -e "$yarn" || {
maybe_tput setaf 1
echo Need to install
maybe_tput sgr0
exit 1
} >&2
env \
XDG_CONFIG_HOME=$base/yarn/config \
XDG_DATA_HOME=$base \
"$yarn" \
"$@"
| true
|
aa0bd40216f36403881bdf95880dedacf0f47a05
|
Shell
|
bioboxes/gatb-minia
|
/image/bin/install/gatb_minia.sh
|
UTF-8
| 328
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
URL="https://github.com/GATB/gatb-minia-pipeline/archive/${GATB_VERSION}.tar.gz"
fetch_archive.sh ${URL} gatb
ln -s /usr/local/gatb/{gatb,minia} /usr/local/bin
# Remove BESST because /usr/local/gatb/BESST/scripts/ is referenced by gatb, but
# does not exist in the installed directory
rm -rf /usr/local/gatb/BESST
| true
|
31d79e978e3f77d5cdc7138ba057de8b4d1b2809
|
Shell
|
backwardn/conbox
|
/build.sh
|
UTF-8
| 971
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
export GO111MODULE=on
export CGO_ENABLED=0
msg() {
echo 2>&1 "$0": $@
}
if [ -n "$1" ]; then
lint=1
else
lint=
fi
if [ -n "$lint" ]; then
msg linting
else
msg NOT linting
fi
if [ -n "$lint" ]; then
hash gosimple 2>/dev/null && gosimple ./applets
hash golint 2>/dev/null && golint ./applets
hash staticcheck 2>/dev/null && staticcheck ./applets
fi
gofmt -s -w ./applets
go fix ./applets/...
go test ./applets/...
go install -v ./applets/...
if [ -n "$lint" ]; then
hash gosimple 2>/dev/null && gosimple ./common
hash golint 2>/dev/null && golint ./common
hash staticcheck 2>/dev/null && staticcheck ./common
fi
gofmt -s -w ./common
go fix ./common
go test ./common
go install -v ./common
if [ -n "$lint" ]; then
hash gosimple 2>/dev/null && gosimple ./conbox
hash golint 2>/dev/null && golint ./conbox
hash staticcheck 2>/dev/null && staticcheck ./conbox
fi
gofmt -s -w ./conbox
go fix ./conbox
go test ./conbox
go install -v ./conbox
| true
|
df6567c320efef0101a20d3b01f448a9195108e0
|
Shell
|
mattboston/mysql_qa_slave
|
/dropsnapshots.sh
|
UTF-8
| 1,195
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/sh
# Copyright 2006 Matthew Shields matt@mattshields.org
# this script drops LVM snapshots on the qadbm:3306 and qadbs:3306 mysql instances
######
# stop 3306:slave
echo "stopping mysql-slave"
service mysql-slave stop
# stop 3306
echo "stopping mysql"
service mysql stop
# unmount snapshot1
echo "unmounting snapshot1"
umount /var/lib/mysql/data
# unmount snapshot2
echo "unmounting snapshot2"
umount /var/lib/mysql-slave/data
# test to see if snapshot1 and snapshot2 are unmounted, exit if still mounted.
mounted=0;
one=`df | grep /var/lib/mysql/data`;
if [ ! -z "$one" ]; then
echo "/var/lib/mysql/data is still mounted"
mounted=1;
fi
two=`df | grep /var/lib/mysql-slave/data`;
if [ ! -z "$two" ]; then
echo "/var/lib/mysql-slave/data is still mounted"
mounted=1;
fi
if [ $mounted == "1" ]; then
echo "something wasn't unmounted, please unmount the two snapshots by hand and re-run this script."
echo " exiting"
exit
else
echo "both snapshots are unmounted. continue"
fi
# remove snapshot1
echo "removing snapshot1"
lvremove -f /dev/vg.db/db.snapshot1
# remove snapshot2
echo "removing snapshot2"
lvremove -f /dev/vg.db/db.snapshot2
| true
|
024a113e2d5dacc89933731a12b6ed392717683b
|
Shell
|
sirrax/otus-linux
|
/m1_l10_PAM/dz1_login/pam_script
|
UTF-8
| 132
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
DAT=$(date +"%m%d")
for i in 0101 0223 0308 0501 0509 1107 1231
do
if [ $DAT == $i ]
then
exit 1
fi
done
exit 0
| true
|
89c8ea52e55ea3af6b2c24a4fa5c7ea57af7c116
|
Shell
|
spiralgenetics/biograph
|
/tools/aws/ec2mgr/install
|
UTF-8
| 478
| 3.375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
set -e
export DEST='/usr/local/sbin'
if [ "$EUID" != "0" ]; then
echo "This script must be run as root"
exit 1
fi
pushd /tmp
apt-get install python3-pip
pip3 install -U boto3
popd
cp ec2mgr ${DEST}/ec2mgr
chmod a+x ${DEST}/ec2mgr
cp ec2mgr-warn.sh /etc/profile.d/
echo "@reboot root ${DEST}/ec2mgr boot" > /etc/cron.d/ec2mgr
echo "0,30 * * * * root ${DEST}/ec2mgr poll" >> /etc/cron.d/ec2mgr
echo
echo "ec2mgr has been installed"
${DEST}/ec2mgr boot
| true
|
2573110a950dfb6a9beee45efd6c1821ce17fe4e
|
Shell
|
kunegis/phd
|
/mkresults-sign
|
UTF-8
| 654
| 2.546875
| 3
|
[] |
no_license
|
#! /bin/sh
#
# Generate the link sign prediction evaluation results.
#
# PARAMETERS
# $1 bar chart name
#
set -e
TMP_FILE=${TMP:-/tmp}/`basename $0`.$$
CORPORA="slashdot-zoo epinions libimseti elec"
export CORPORA
export OUTPUT=$TMP_FILE.out
export OUTPUT_BAR="`pwd`/$1"
export METHODS_PATHA="adad"
export METHODS_SYM="polyn"
export METHODS_SYM_N="lap"
export METHODS_LAPS="lrat"
export METHODS_EXTR="sne_abs"
# export METHODS="poly polyn exp dim_lin odd"
# export METHODS_PATHA="pref"
# export METHODS_PATH="meaneuv jaccard"
export HIDE_SIZE=1
export HIDE_TYPE_NAME=1
cd ../webstore/analysis/
../matlab/matlab result2
cat $TMP_FILE.out
| true
|
ce6ac2f588a634590aaa92da776ab7f0b460a220
|
Shell
|
louisallain/TWITTOPP
|
/dodwan/bin/util/dodwan_functions.sh
|
UTF-8
| 7,259
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -z $DODWAN_HOME ]; then
echo "Error: \$DODWAN_HOME is not defined."
exit
fi
#---------------------------------------------------------------------
# Download the necessary external jars for all the plugins
#---------------------------------------------------------------------
load_libs() {
shopt -s nullglob
# DoDWAN libs
echo "# Downloading external jars for DoDWAN..."
pushd ${DODWAN_HOME}/libs > /dev/null
wget -nc -nv -i dependencies
popd > /dev/null
# Plugin libs
pushd ${DODWAN_HOME}/plugins > /dev/null
for p in * ; do
echo "# Downloading external jars for plugin" $p ...
pushd $p > /dev/null
wget -nc -nv -i dependencies
popd > /dev/null
done
popd > /dev/null
}
#---------------------------------------------------------------------
# Make a classpath with all the jar files for DoDWAN and its plugins
#---------------------------------------------------------------------
make_dodwan_classpath() {
shopt -s nullglob
classpath=""
for f in ${DODWAN_HOME}/libs/*.jar ; do
classpath="${f}:${classpath}"
done
for p in ${dodwan_plugins//,/ } ; do
for f in ${DODWAN_HOME}/plugins/${p}/*.jar ; do
classpath="${f}:${classpath}"
done
done
echo "${classpath}"
}
# ------------------------------------------------------------
init() {
dodwan_log_dir=/run/shm/${USER}/dodwan
node_dir=${dodwan_log_dir}/var/node/${node_id}
pid_file=${node_dir}/pid
ports_file=${node_dir}/ports
fifo_file=${node_dir}/in
if [ -e $ports_file ] ; then
tmp=$(grep dodwan.console_port ${ports_file})
cport=${tmp#*=}
fi
}
# ------------------------------------------------------------
send_command() {
# if [ ! -e ${fifo_file} ] ; then
# echo "Error: file ${fifo_file} does not exist"
# exit 1
# fi
# echo $* > ${fifo_file}
if [ -z $cport ] ; then
echo "Error: no console is available"
exit 1
fi
echo $* | nc -q 1 localhost $cport | sed -e 's/^% //'
}
# ------------------------------------------------------------
console() {
init
check_running
if [ -z $cport ] ; then
echo "Error: no console is available"
exit 1
fi
nc localhost $cport
}
# ------------------------------------------------------------
check_running() {
if [ ! -e ${pid_file} ] ; then
echo "Error: it seems node ${node_id} is not running"
exit 1
fi
pid=$(cat $pid_file)
kill -0 $pid
res=$?
if [ $res -eq 1 ] ; then
echo "Error: node ${node_id} is not running"
rm -f $pid_file
fi
}
# ------------------------------------------------------------
start_node() {
#
# Starts a DoDWAN node
#
# Uses the following variables:
#
# node_id : id of the node (required)
# node_start_time: time when the node should start (EPOCH in ms, optional)
# node_end_time : time when the node should stop (EPOCH in ms, optional)
# node_seed : seed to be used by that node's random generator (optional)
# jvm_opts : options for the JVM (optional)
# classpath : class path for Java code
# dodwan_log_dir : log directory (required)
# console_port : TCP port number to be used by the node's console (optional)
echo Starting node $node_id
init
if [ -e ${pid_file} ] ; then
pid=$(cat ${pid_file})
kill -0 $pid
res=$?
if [ $res -eq 1 ] ; then
rm -f $pid_file
else
echo "Error: node ${node_id} is already running (with pid ${pid})"
exit 1
fi
fi
mkdir -p $node_dir
console_port=0
props="-Ddodwan.host=${node_id} -Ddodwan.directory=${node_dir} -Ddodwan.base=${dodwan_log_dir}/base"
if [ ! -z $console_port ] ; then
props="$props -Ddodwan.console_port=${console_port}"
fi
if [ ! -z $dodwan_plugins ] ; then
props="$props -Ddodwan.plugins=${dodwan_plugins}"
fi
# ---- FG: Modif temporaire
#plugins_options="-Ddodwan_napi_tcp.port=0 -Ddodwan_napi_ws.port=0"
if [ ! -z "$plugins_options" ] ; then
props="$props ${plugins_options}"
fi
# ---- FG
opts=""
if [ ! -z $node_start_time ] ; then
opts="$opts -begin $node_start_time"
fi
if [ ! -z $node_end_time ] ; then
opts="$opts -end $node_end_time"
fi
if [ ! -z $node_seed ] ; then
opts="$opts -seed $node_seed"
fi
# Passing initialization commands to this node
if [ ! -z "$init_cmd" ] ; then
echo $init_cmd > ${node_dir}/cmd
opts="$opts -c ${node_dir}/cmd"
fi
# if [ ! -e ${fifo_file} ] ; then
# mkfifo ${fifo_file}
# fi
# opts="$opts -i ${fifo_file}"
# Starting DoDWAN node
# java -Xms4m -Xmx4m \
echo java ${jvm_opts} ${props} \
-cp $(make_dodwan_classpath) \
casa.dodwan.run.dodwand ${opts}
java ${jvm_opts} ${props} \
-cp $(make_dodwan_classpath) \
casa.dodwan.run.dodwand ${opts} \
>> ${node_dir}/out &
echo $! > ${pid_file}
}
# ------------------------------------------------------------
stop_node() {
#
# Stops a DoDWAN node
#
# Uses the following variables:
#
# node_id : id of the node (required)
echo Stopping node $node_id
init
check_running
pid=$(cat $pid_file)
kill $pid >& /dev/null
# rm -f $pid_file $cport_file
}
# ------------------------------------------------------------
status_node() {
#
# Shows the status of DoDWAN (i.e. running or not running)
#
# node_id: id of the node (required)
init
if [ ! -e ${pid_file} ] ; then
echo "Node ${node_id} is not running"
return
fi
check_running
echo "Node ${node_id} is running"
}
# ------------------------------------------------------------
clear() {
#
# Flushes the cache of DoDWAN
#
# node_id: id of the node (required)
init
if [ -e ${pid_file} ] ; then
send_command "do ca cl"
else
rm -Rf ${node_dir}/cache/*
fi
}
# ------------------------------------------------------------
publish() {
#
# Publish a message, with a file as its payload
#
# node_id: id of the node (required)
# $1 : desc (comma-separated list of name=value pairs)
# $2 : fname (file to be published)
desc=$1
fname=$2
init
check_running
if [ ! -e $fname ] ; then
echo "Error: file $fname does not exist"
exit 1
fi
send_command "do ps p -desc \"src=${node_id},${desc}\" -f $fname"
}
# ------------------------------------------------------------
subscribe() {
#
# Sets a subscription
#
# node_id: id of the node (required)
# $1 : key (used to unsubscribe)
# $2 : pattern (comma-separated list of name=value pairs)
# $3-* : options (e.g., -d <dir> | -e <cmd>)
key=$1
pattern=$2
shift 2
options=$*
init
check_running
send_command "do ps add -k ${key} -desc \"${pattern}\" ${options}"
}
# ------------------------------------------------------------
unsubscribe() {
#
# Remove subscriptions
#
# node_id: id of the node (required)
# keys : keys of the subscriptions to be removed
keys=$*
init
check_running
send_command "do ps rem ${keys}"
}
| true
|
081ac4ecff3a350dc94c2c3e0db97f42ffd19722
|
Shell
|
milesrichardson/bbb-ap
|
/remote_scripts/genesis.sh
|
UTF-8
| 597
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/sh -e
# Update kernel
sh /opt/scripts/tools/update_kernel.sh
# Update and upgrade packages
apt-get -y update
apt-get -y upgrade
# Necessary for building kernel packages
apt-get install -y linux-headers-$(uname-r)
apt-get install -y udhcpd
# Create (and delete if exists) buildroot at ~/bbb-ap
cd ~
rm -rf bbb-ap || true
git clone https://github.com/milesrichardson/bbb-ap
cd bbb-ap
# Load deps into ~/bbb-ap/build
cd build
git clone https://github.com/milesrichardson/mt7601u
# Run update script (with build arg for full update including mt7601u)
cd remote_scripts
./update.sh build
| true
|
40050e646220785ddd2627a9c819a5a589e5ad84
|
Shell
|
reddragon/new-machine-setup
|
/osx.sh
|
UTF-8
| 826
| 2.671875
| 3
|
[] |
no_license
|
echo "Installing Brew"
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
echo "Setting up utilities"
brew install zsh
brew install golang
brew install mosh
# Add brew to the PATH.
echo 'eval "$(/opt/homebrew/bin/brew shellenv)"' >> ~/.zprofile
eval "$(/opt/homebrew/bin/brew shellenv)"
echo "Setting up zsh"
sh -c "$(curl -fsSL https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
echo "Setting up nice defaults"
# Disable the swipe navigation for Chrome
defaults write com.google.Chrome.plist AppleEnableSwipeNavigateWithScrolls -bool FALSE
# Allow a key which is depressed to emit chars continuously
defaults write -g ApplePressAndHoldEnabled -bool false
# Set the mouse tracking speed.
defaults write -g com.apple.mouse.scaling 7.0
printf '\e[?2004l'
| true
|
888966e58186c04c89ef0d53563e132f5e2c1cbe
|
Shell
|
TwitchyLinux/ccr
|
/testdata/syslibs/make_test_libs.sh
|
UTF-8
| 623
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
set -ex
rm -rf *.o *.so *.c *.so.* lib2
cat <<'EOF' >> lib.c
void assign_i(int *i)
{
*i=5;
}
EOF
cat <<'EOF' >> bin.c
#include <stdio.h>
void assign_i(int *);
void _start()
{
int x;
assign_i(&x);
asm("mov $60,%rax; mov $10,%rdi; syscall");
}
EOF
gcc -Wall -fPIC -c lib.c
gcc -shared -Wl,-soname,libsomething.so -o libsomething.so *.o
gcc -Wall -nostdlib -L./ bin.c -lsomething -o bin
mkdir lib2
gcc -shared -Wl,-soname,libsoho.so -o lib2/libsoho.so lib.o
gcc -Wall -Wl,-rpath,'$ORIGIN/lib2' -nostdlib -L./lib2 bin.c -lsoho -o bin2
rm -rf *.c *.o lib2
cp /lib64/ld-linux-x86-64.so.2 ld.so.2
| true
|
213a9bc497ca2cebefa3f14ddfe295107578547f
|
Shell
|
IntelLabs/SkimCaffe
|
/models/resnet/train_script.sh
|
UTF-8
| 3,098
| 3.5
| 4
|
[
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
#!/bin/bash
set -e
set -x
folder="models/resnet"
file_prefix="caffenet_train"
model_path="models/resnet"
if [ "$#" -lt 7 ]; then
echo "Illegal number of parameters"
echo "Usage: train_script base_lr weight_decay prune_threshold max_threshold_factor winograd_sparsity_factor ic_decay oc_decay kernel_decay device_id template_solver.prototxt [finetuned.caffemodel/.solverstate]"
exit
fi
base_lr=$1
weight_decay=$2
prune_threshold=$3
max_threshold_factor=$4
winograd_sparsity_factor=$5
ic_decay=$6
oc_decay=$7
kernel_decay=$8
solver_mode="GPU"
device_id=0
current_time=$(date +%Y-%m-%d-%H-%M-%S)
#current_time=$(date)
#current_time=${current_time// /_}
#current_time=${current_time//:/-}
snapshot_name=${base_lr}_${weight_decay}_${prune_threshold}_${max_threshold_factor}_${winograd_sparsity_factor}_${ic_decay}_${oc_decay}_${kernel_decay}_${current_time}
snapshot_path=$folder/$snapshot_name
mkdir $snapshot_path
echo $@ > $snapshot_path/cmd.log
solverfile=$snapshot_path/solver.prototxt
template_file='template_solver.prototxt'
#if [ "$#" -ge 7 ]; then
template_file=${10}
#fi
cat $folder/${template_file} > $solverfile
echo "block_group_decay: $kernel_decay" >> $solverfile
echo "kernel_shape_decay: $ic_decay" >> $solverfile
echo "breadth_decay: $oc_decay" >> $solverfile
echo "winograd_sparsity_factor: $winograd_sparsity_factor" >> $solverfile
echo "prune_threshold: $prune_threshold" >> $solverfile
echo "max_threshold_factor: $max_threshold_factor" >> $solverfile
echo "weight_decay: $weight_decay" >> $solverfile
echo "base_lr: $base_lr" >> $solverfile
echo "snapshot_prefix: \"$snapshot_path/$file_prefix\"" >> $solverfile
#if [ "$#" -ge 6 ]; then
if [ "$9" -ne "-1" ]; then
device_id=$9
#echo "device_id: $device_id" >> $solverfile
echo $snapshot_name > $folder/$device_id
else
solver_mode="CPU"
fi
#fi
echo "solver_mode: $solver_mode" >> $solverfile
#echo "regularization_type: \"$regularization_type\"" >> $solverfile
#cat $solverfile
if [ "$#" -ge 11 ]; then
tunedmodel=${11}
file_ext=$(echo ${tunedmodel} | rev | cut -d'.' -f 1 | rev)
if [ "$file_ext" = "caffemodel" ]; then
if [ "$9" -ne "-1" ]; then
./build/tools/caffe.bin train -gpu $device_id --solver=$solverfile --weights=$model_path/$tunedmodel > "${snapshot_path}/train.info" 2>&1
else
../caffe_scnn_cpu_only/build/tools/caffe.bin train --solver=$solverfile --weights=$model_path/$tunedmodel > "${snapshot_path}/train.info" 2>&1
fi
else
./build/tools/caffe.bin train -gpu $device_id --solver=$solverfile --snapshot=$model_path/$tunedmodel > "${snapshot_path}/train.info" 2>&1
fi
else
./build/tools/caffe.bin train -gpu $device_id --solver=$solverfile > "${snapshot_path}/train.info" 2>&1
fi
cat ${snapshot_path}/train.info | grep loss+ | awk '{print $8 " " $11}' > ${snapshot_path}/loss.info
#cd $folder
#finalfiles=$(ls -ltr *caffemodel *.solverstate | awk '{print $9}' | tail -n 2 )
#for file in $finalfiles; do
# cp $file "$current_time-$file"
#done
content="$(hostname) done: ${0##*/} ${@}. Results in ${snapshot_path}"
echo ${content} | mail -s "Training done" jongsoo.park@intel.com
| true
|
f83696b339416e0d84c66df0b1e6fffc4828bce9
|
Shell
|
eemc2oops/zkhx
|
/source/srio/RapidIO_RRMAP-master/install_list.sh
|
UTF-8
| 6,200
| 4.03125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Files required for installation
# Note the names of these file name (different root) are also used by make_install.sh
#
REMOTE_ROOT="/opt/rapidio/.install"
LOCAL_SOURCE_ROOT="$(pwd)"
SCRIPTS_PATH=$LOCAL_SOURCE_ROOT/install
NODEDATA_FILE="nodeData.txt"
TMP_NODEDATA_FILE=/tmp/$$_$NODEDATA_FILE
SRC_TAR="rapidio_sw.tar"
TMPL_FILE="config.tmpl"
MY_USERID=root
PGM_NAME=install_list.sh
PGM_NUM_PARMS=5
# Validate input
#
PRINTHELP=0
if [ "$#" -lt $PGM_NUM_PARMS ]; then
echo $'\n$PGM_NAME requires $PGM_NUM_PARMS parameters.\n'
PRINTHELP=1
else
SERVER=$1
OK=1
ALLNODES=();
# format of input file: <master|slave> <hostname> <rioname> <nodenumber>
while read -r line || [[ -n "$line" ]]; do
# allow empty lines
if [ -z "$line" ]; then
continue;
fi
arr=($line)
if [ ${#arr[@]} -lt 4 ]; then
echo "Incorrect line format ($line) in $2"
OK=0
fi
host="${arr[1]}"
if [ "${arr[0]}" = 'master' ]; then
if [ -n "$MASTER" ]; then
echo "Multiple master entries ($line) in $2"
OK=0
fi
MASTER=$host
fi
ALLNODES+=("$host")
echo $line >> $TMP_NODEDATA_FILE
done < "$2"
if [ -z "$MASTER" ]; then
echo "No master entry in $2"
OK=0
fi
if [ $OK -eq 0 ]; then
echo "Errors in nodeData file $2, exiting..."
rm -rf $TMP_NODEDATA_FILE &> /dev/null
exit
fi
MEMSZ=$3
SW_TYPE=$4
GRP=$5
REL=$6
if [ $MEMSZ != 'mem34' -a $MEMSZ != 'mem50' -a $MEMSZ != 'mem66' ] ; then
echo $'\nmemsz parameter must be mem34, mem50, or mem66.\n'
PRINTHELP=1
fi
MASTER_CONFIG_FILE=$SCRIPTS_PATH/$SW_TYPE-master.conf
MASTER_MAKE_FILE=$SCRIPTS_PATH/$SW_TYPE-master-make.sh
if [ ! -e "$MASTER_CONFIG_FILE" ] || [ ! -e "$MASTER_MAKE_FILE" ]
then
echo $'\nSwitch type \"$SW_TYPE\" configuration support files do not exist.\n'
PRINTHELP=1
fi
fi
if [ $PRINTHELP = 1 ] ; then
echo "$PGM_NAME <SERVER> <nData> <memsz> <sw> <group> <rel>"
echo "<SERVER> Name of the node providing the files required by installation"
echo "<nData> The file describing the target nodes of the install"
echo " The file has the format:"
echo " <master|slave> <IP_Name> <RIO_name> <node>"
echo " Where:"
echo " <IP_name> : IP address or DNS name of the node"
echo " <RIO_name>: Fabric management node name."
echo " <node> : String to replace in template file,"
echo " of the form node#."
echo " EXAMPLE: master 10.64.15.199 gry37 node1"
echo " NOTE: Example nodeData.sh files are create by install.sh"
echo "<memsz> RapidIO memory size, one of mem34, mem50, mem66"
echo " If any node has more than 8 GB of memory, MUST use mem50"
echo "<sw> Type of switch the four nodes are connected to."
echo " Files exist for the following switch types:"
echo " tor - Prodrive Technologies Top of Rack Switch"
echo " cps - StarBridge Inc RapidExpress Switch"
echo " auto - configuration determined at runtime"
echo " rxs - StarBridge Inc RXS RapidExpress Switch"
echo "<group> Unix file ownership group which should have access to"
echo " the RapidIO software"
echo "<rel> The software release/version to install."
echo " If no release is supplied, the current release is installed."
rm -rf $TMP_NODEDATA_FILE &> /dev/null
exit
fi
# Only proceed if all nodes can be reached
#
echo "Prepare for installation..."
echo "Checking connectivity..."
OK=1
ping -c 1 $SERVER > /dev/null
if [ $? -ne 0 ]; then
echo " $SERVER not accessible"
OK=0
else
echo " $SERVER accessible."
fi
for host in "${ALLNODES[@]}"
do
[ "$host" = 'none' ] && continue;
[ "$host" = "$SERVER" ] && continue;
ping -c 1 $host > /dev/null
if [ $? -ne 0 ]; then
echo " $host not accessible"
OK=0
else
echo " $host accessible."
fi
done
if [ $OK -eq 0 ]; then
echo "\nCould not connect to all nodes, exiting..."
rm -rf $TMP_NODEDATA_FILE &> /dev/null
exit
fi
echo "Creating install files for $SERVER..."
# First create the files that would be available on the server
#
TMP_DIR="/tmp/$$"
rm -rf $TMP_DIR;mkdir -p $TMP_DIR
# Copy nodeData.txt
#
mv $TMP_NODEDATA_FILE $TMP_DIR/$NODEDATA_FILE
# Create the source.tar
#
pushd $LOCAL_SOURCE_ROOT &> /dev/null
make clean &>/dev/null
tar -cf $TMP_DIR/$SRC_TAR * .git* &>/dev/null
popd &> /dev/null
# Copy the template file
#
cp $MASTER_CONFIG_FILE $TMP_DIR/$TMPL_FILE
# Transfer the files to the server
#
echo "Transferring install files to $SERVER..."
SERVER_ROOT="/opt/rapidio/.server"
ssh $MY_USERID@"$SERVER" "rm -rf $SERVER_ROOT;mkdir -p $SERVER_ROOT"
scp $TMP_DIR/* $MY_USERID@"$SERVER":$SERVER_ROOT/. > /dev/null
ssh $MY_USERID@"$SERVER" "chown -R root.$GRP $SERVER_ROOT"
rm -rf $TMP_DIR
# Transfer the make_install.sh script to a known location on the target machines
#
for host in "${ALLNODES[@]}"; do
[ "$host" = 'none' ] && continue;
echo "Transferring install script to $host..."
ssh $MY_USERID@"$host" "rm -rf $REMOTE_ROOT;mkdir -p $REMOTE_ROOT/script"
scp $SCRIPTS_PATH/make_install_common.sh $MY_USERID@"$host":$REMOTE_ROOT/script/make_install_common.sh > /dev/null
if [ "$host" = "$MASTER" ]; then
scp $MASTER_MAKE_FILE $MY_USERID@"$host":$REMOTE_ROOT/script/make_install.sh > /dev/null
else
scp $SCRIPTS_PATH/make_install-slave.sh $MY_USERID@"$host":$REMOTE_ROOT/script/make_install.sh > /dev/null
fi
ssh $MY_USERID@"$host" "chown -R root.$GRP $REMOTE_ROOT;chmod 755 $REMOTE_ROOT/script/make_install.sh"
done
# Call out to make_install.sh
echo "Beginning installation..."
for host in "${ALLNODES[@]}"; do
[ "$host" = 'none' ] && continue;
ssh $MY_USERID@"$host" "$REMOTE_ROOT/script/make_install.sh $SERVER $SERVER_ROOT $MEMSZ $GRP"
done
echo "Installation complete."
| true
|
3d11ad992331fb7f422bf17b57a2179982121cd3
|
Shell
|
wangpanqiao/COME
|
/bin/Index.sh
|
UTF-8
| 1,187
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
# Print help message if no parameter given
if [ "$#" == 0 ];then
echo "Usage: ./Index.sh in_gtf out_index
in_gtf is the transcript.gtf.foo file
out_index is the transcript index
"
exit;
fi
######################## predefined parameters
RI=50;#the resolution of index is half bin size. which is 50 nt;
RA=0.5;#RATIO is the ratio mapped to a 50nt resolution, should be [0.02,1.00];
######################## input_parameters
in_gtf=$1;
out_index=$2;
######################## index for each exon:
#lap=`echo ""|awk -v RA="$RA" -v RI="$RI" '{print int(RI*RA)}'`;
awk -F '\t' -v RA="$RA" -v RI="$RI" '{ lap=int(RA*RI);
N1=int(($2-1)/RI)+1; n1=($2-1)%RI; if(n1 >= lap){N1=N1+1;}
N2=int(($3-1)/RI)+0; n2=($3-1)%RI; if(n2 >= lap){N2=N2+1;}
if(N2>=N1){
print $1"."$4"\t"$5"\t"N1":"N2;
}else{
print $1"."$4"\t"$5"\t0:0";
}}' $in_gtf > $out_index.foo2;
######################## index for each transcript
awk -F '\t' '{if(NR==1){ID=$2;foo=$3;chrstr=$1;}else{if(ID!=$2){print chrstr"\t"ID"\tc("foo")";ID=$2;foo=$3;chrstr=$1;}else{foo=foo","$3;}}
}END{print chrstr"\t"ID"\tc("foo")";}' $out_index.foo2 > $out_index;
######################## stpe3: clean
rm -rf $out_index.foo2;
| true
|
b0f8f97adde9214177acc4503af69433a5f89b46
|
Shell
|
PrismaPhonic/dotfiles-xmonad-arch
|
/install-readyset-reqs.sh
|
UTF-8
| 1,763
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
echo Welcome to ReadySet! Let\s get you setup!
read -p 'Clone readyset git repo to: ' gitrepo
#####################################
## General Work Dependencies ##
#####################################
## Start with repo update & pkg upgrade
sudo pacman -Syu
# Install tailscale for work vpn
sudo pacman -S --needed --noconfirm tailscale
# enable and start tailscale
sudo systemctl enable --now tailscaled.service
sleep 1
sudo systemctl start tailscaled.service
sleep 1
# Bring tailscale up - note, this will require you clicking a link and
# authenticating with SSO login
sudo tailscale up
#####################################
## ReadySet (Product) Dependencies ##
#####################################
# Install rust
curl https://sh.rustup.rs -sSf | sh
source $HOME/.cargo/env
# Install nightly rust
rustup install nightly
# Install RLS
rustup component add rls rust-analysis rust-src
# Install mdbook
cargo install mdbook
# Install all basic deps
## OpenSSL is already installed by default on Arch systems.
sudo pacman -S --needed --noconfirm base-devel clang lz4 docker
# Enable and start docker service
sudo systemctl enable docker.service
sudo systemctl start docker.service
# Add current user to the docker group
sudo gpasswd -a $(whoami) docker
# Install MariaDB clients
sudo pacman -S mariadb-clients
# Clone readyset repo
eval cd "${gitrepo}"
git clone git@github.com:readysettech/readyset.git
cd readyset
# Build release
cargo build --release
# Install UI deps
sudo pacman -S --noconfirm --needed python-pip
pip3 install pystache
# Install graphviz package for creating useful graphs
sudo pacman -S --noconfirm --needed graphviz
echo 'All finished! Please add $HOME/.cargo/bin to your PATH permanentely.'
| true
|
752f0c0ee4bf56bd0a723c808698efd2fe4dd97b
|
Shell
|
jtaleric/osk-networking
|
/OSP-MutliNetwork-UPerf.sh
|
UTF-8
| 20,920
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
#-------------------------------------------------------------------------------
# OSP-Network-Perf.sh
#
#
# -- To do --
# ----
# ---- Currently now VLAN testing... This assumes there is a br-tun bridge
#
# -- Updates --
# 10/26/15 - Cleaned things up a bit
# 01/22/14 - moved to uperf, removed netperf
# 01/11/14 - uperf-bpench inital drop
# 07/16/14 - Super-netperf integration... Layed things down for FloatingIP
# 04/21/14 - Neutron changed how the flows are built. Previously there was a
# signle flow for the dhcp-agent, now there is a per-guest flow
# which is a bit more secure.
# 04/17/14 - Sync the throughput runs... Sync with Lock file
# 03/18/14 - Fixes
# 02/20/14 - Working on Scale
# 11/13/13 - Create a new security group, then remove it for the cleanup
# 11/12/13 - Added code to allow multiple launches of this script
# 11/11/13 - First drop
# @author Joe Talerico (jtaleric@redhat.com)
#-------------------------------------------------------------------------------
RUN=1
TESTNAME="no-name"
FLAVOR="m1.small"
#---------------------Directory to store the results -------------------------------------
FOLDER=OSP-NetworkScale-Output_$(date +%Y_%m_%d_%H_%M_%S)
function usage() {
printf "The following options are available:\n"
printf "\n"
printf -- "\t --help : Help \n"
printf -- "\t --run : Run number- useful for multi-run tests \n"
printf -- "\t --testname : Name of test \n"
printf -- "\t --flavor : flavor of instances \n"
}
opts=$(getopt -o hr:t:f: --longoptions "help,run:,testname:,flavor:" -n "getopt.sh" -- "$@");
eval set -- "$opts";
while true; do
case "$1" in
-h|--help)
usage
exit
;;
-r|--run)
RUN=$2
echo Run : $2
shift 2
;;
-t|--testname)
TESTNAME="$2"
FOLDER=$TESTNAME
echo Test name: $2
shift 2
;;
-f|--flavor)
FLAVOR="$2"
echo Flavor: $2
shift 2
;;
--)
shift
break
;;
esac
done
PLUG=true
HOST=$(hostname)
UPERF=true
SAMPLES=1
TESTS="stream"
PROTO="tcp"
ROUTER_ID="bf545bbc-fe09-47c0-b98e-8c6f54b8f4e5"
SECGROUP=true
HIGH_INTERVAL=false
KEYSTONE_ADMIN="/root/overcloudrc"
NETPERF_IMG_NAME="pbench-image"
NETPERF_IMG=""
GUEST_SIZE=$FLAVOR
PBENCH_SERVER_IP="1.1.1.1"
PBENCH_SERVER_HOSTNAME="pbench.server.mine.com"
NAMESERVER="1.1.1.1"
#-------------------------------------------------------------------------------
# Folder where to store the the results of netperf and the output of
# the OSP Script.
#-------------------------------------------------------------------------------
mkdir -p $FOLDER
#-------------------------------------------------------------------------------
# Set this to true if tunnels are used, set to false if VLANs are used.
#
# !! If VLANs are used, the user must setup the flows and veth before running !!
#-------------------------------------------------------------------------------
TUNNEL=true
#----------------------- Array to hold guest ID --------------------------------
declare -A GUESTS
#-------------------------------------------------------------------------------
# Clean will remove Network and Guest information
#-------------------------------------------------------------------------------
CLEAN=true
#----------------------- Show Debug output -------------------------------------
DEBUG=false
#-------------------------------------------------------------------------------
# CLEAN_IMAGE will remove the netperf-networktest image from glance
#-------------------------------------------------------------------------------
CLEAN_IMAGE=false
#----------------------- Hosts to Launch Guests -------------------------------
ZONE[0]="nova:overcloud-novacompute-0.localdomain"
ZONE[1]="nova:overcloud-novacompute-0.localdomain"
#----------------------- Network -----------------------------------------------
TUNNEL_NIC="p1p1"
TUNNEL_SPEED=`ethtool ${TUNNEL_NIC} | grep Speed | sed 's/\sSpeed: \(.*\)Mb\/s/\1/'`
TUNNEL_TYPE=`ovs-vsctl show | grep -E 'Port.*gre|vxlan|stt*'`
NETWORK="private-${RUN}"
SUBNET="12.0.${RUN}.0/24"
INTERFACE="12.0.${RUN}.150/14"
SUB_SEARCH="12.0.${RUN}."
MTU=1500
SSHKEY="/root/.ssh/id_rsa.pub"
SINGLE_TUNNEL_TEST=true
#----------------------- No Code.... yet. --------------------------------------
MULTI_TUNNEL_TEST=false
#----------------------- Need to determine how to tell ( ethtool? STT ) --------
HARDWARE_OFFLOAD=false
#----------------------- Is Jumbo Frames enabled throughout? -------------------
JUMBO=false
#----------------------- Ignore DHCP MTU ---------------------------------------
DHCP=false
#-------------------------------------------------------------------------------
# Params to set the guest MTU lower to account for tunnel overhead
# or if JUMBO is enabled to increase MTU
#-------------------------------------------------------------------------------
if $DHCP ; then
if $TUNNEL ; then
MTU=1500
if $JUMBO ; then
MTU=8950
fi
fi
fi
#-------------------------------------------------------------------------------
# Must have admin rights to run this...
#-------------------------------------------------------------------------------
if [ -f $KEYSTONE_ADMIN ]; then
source $KEYSTONE_ADMIN
else
echo "ERROR :: Unable to source keystone_admin file"
exit 1
fi
if ! [ -f $NETPERF_IMG ]; then
echo "WARNING :: Unable to find the Netperf image"
echo "You must import the image before running this script"
fi
#-------------------------------------------------------------------------------
# cleanup()
#
#
#
#
#-------------------------------------------------------------------------------
cleanup() {
echo "#-------------------------------------------------------------------------------"
echo "Cleaning up...."
echo "#-------------------------------------------------------------------------------"
if [ -n "$search_string" ] ; then
echo "Cleaning netperf Guests"
for key in ${search_string/|/ }
do
key=${key%"|"}
echo "Removing $key...."
nova delete $key
done
fi
#-------------------------------------------------------------------------------
# Is Nova done deleting?
#-------------------------------------------------------------------------------
while true;
do
glist=`nova list | grep -E ${search_string%?} | awk '{print $2}'`
if [ -z "$glist" ] ; then
break
fi
sleep 5
done
#-------------------------------------------------------------------------------
# Remove test Networks
#-------------------------------------------------------------------------------
nlist=$(neutron subnet-list | grep "${SUBNET}" | awk '{print $2}')
if ! [ -z "$nlist" ] ; then
echo "Cleaning test networks..."
neutron router-interface-delete ${ROUTER_ID} $(neutron subnet-list | grep "${SUBNET}" | awk '{print $2}')
for port in `neutron port-list | grep $(neutron subnet-list | grep "${SUBNET}" | awk '{print $2}') | awk '{print $2}'`; do
neutron port-delete $port
done
neutron subnet-delete $(neutron subnet-list | grep "${SUBNET}" | awk '{print $2}')
neutron net-delete $NETWORK
ovs-vsctl del-port rook-${RUN}
fi
if $CLEAN_IMAGE ; then
ilist=$(glance image-list | grep netperf | awk '{print $2}')
if ! [ -z "$ilist" ] ; then
echo "Cleaning Glance..."
yes | glance image-delete $ilist
fi
fi
} #END cleanup
if [ -z "$(neutron net-list | grep "${NETWORK}")" ]; then
#----------------------- Create Subnet ----------------------------------------
echo "#-------------------------------------------------------------------------------"
echo "Creating Subnets "
echo "#-------------------------------------------------------------------------------"
neutron net-create $NETWORK
neutron subnet-create $NETWORK $SUBNET
neutron net-show $NETWORK
fi
NETWORKID=`nova network-list | grep -e "${NETWORK}\s" | awk '{print $2}'`
if $DEBUG ; then
echo "#----------------------- Debug -------------------------------------------------"
echo "Network ID :: $NETWORKID"
echo "#-------------------------------------------------------------------------------"
fi
neutron router-interface-add $ROUTER_ID `neutron net-list | grep ${NETWORKID} | awk '{print $6}'`
if $PLUG ; then
echo "#------------------------------------------------------------------------------- "
echo "Plugging Neutron"
echo "#-------------------------------------------------------------------------------"
PORT_INFO=$(neutron port-create --name rook-${RUN} --binding:host_id=${HOST} ${NETWORKID})
echo "$PORT_INFO"
PORT_ID=$(echo "$PORT_INFO" | grep "| id" | awk '{print $4}')
MAC_ID=$(echo "$PORT_INFO" | grep "mac" | awk '{print $4}')
IP_ADDY=$(echo "$PORT_INFO" | grep "ip_address" | awk '{print $7}'| grep -Eow '[0-9]+.[0-9]+\.+[0-9]+\.[0-9]+')
PORT_SUB=$(neutron net-list| grep $NETWORKID | awk '{print $7}' | sed -rn 's/.*\/(.*)$/\1/p')
OVSPLUG="rook-${RUN}"
ovs-vsctl -- --may-exist add-port br-int ${OVSPLUG} -- set Interface ${OVSPLUG} type=internal -- set Interface ${OVSPLUG} external-ids:iface-status=active -- set Interface ${OVSPLUG} external-ids:attached-mac=${MAC_ID} -- set Interface ${OVSPLUG} external-ids:iface-id=${PORT_ID}
echo $IP_ADDY
echo $MAC_ID
echo $PORT_ID
echo $PORT_SUB
sleep 5
service neutron-openvswitch-agent restart
ip link set address $MAC_ID dev $OVSPLUG
ip a a ${IP_ADDY}/${PORT_SUB} dev $OVSPLUG
ip l s up $OVSPLUG
fi
#
# Glance is erroring out.
#
# BZ 1109890
# max database connection issue
#
# IMAGE_ID="f6e00ceb-3c79-41f6-9d09-a163df637328"
# GLANCE=false
if $GLANCE ; then
if [ -z "$(glance image-list | grep -E "${NETPERF_IMG_NAME}")" ]; then
#----------------------- Import image into Glance ------------------------------
echo "#------------------------------------------------------------------------------- "
echo "Importing Netperf image into Glance"
echo "#-------------------------------------------------------------------------------"
IMAGE_ID=$(glance image-create --name ${NETPERF_IMG_NAME} --disk-format=qcow2 --container-format=bare < ${NETPERF_IMG} | grep id | awk '{print $4}')
else
IMAGE_ID=$(glance image-list | grep -E "${NETPERF_IMG_NAME}" | awk '{print $2}')
fi
fi
if [ -z "$(nova keypair-list | grep "network-testkey")" ]; then
#----------------------- Security Groups ---------------------------------------
echo "#------------------------------------------------------------------------------- "
echo "Adding SSH Key"
echo "#-------------------------------------------------------------------------------"
if [ -f $SSHKEY ]; then
nova keypair-add --pub_key ${SSHKEY} network-testkey
else
echo "ERROR :: SSH public key not found"
exit 1
fi
fi
if $SECGROUP; then
if [ -z "$(nova secgroup-list | egrep -E "netperf-networktest")" ] ; then
echo "#------------------------------------------------------------------------------- "
echo "Adding Security Rules"
echo "#-------------------------------------------------------------------------------"
nova secgroup-create netperf-networktest "network test sec group"
nova secgroup-add-rule netperf-networktest tcp 22 22 0.0.0.0/0
nova secgroup-add-rule netperf-networktest icmp -1 -1 0.0.0.0/0
fi
fi
#----------------------- Launch Instances --------------------------------------
echo "#------------------------------------------------------------------------------- "
echo "Launching netperf instnaces"
echo "#-------------------------------------------------------------------------------"
echo "Launching Instances, $(date)"
search_string=""
NETSERVER_HOST="0"
for host_zone in "${ZONE[@]}"
do
echo "Launching instnace on $host_zone"
host=$(echo ${host_zone} | awk -F':' '{print $2}')
if [ "$NETSERVER_HOST" == "0" ] ; then
NETSERVER_HOST=$host
pbench-register-tool-set --remote=${host} --label=uperf-server
else
NETCLIENT_HOST=$host
pbench-register-tool-set --remote=${host} --label=uperf-client
fi
if $SECGROUP; then
command_out=$(nova boot --image ${IMAGE_ID} --nic net-id=${NETWORKID} --flavor ${GUEST_SIZE} --availability-zone ${host_zone} netperf-${host_zone} --key_name network-testkey --security_group default,netperf-networktest | egrep "\sid\s" | awk '{print $4}')
else
command_out=$(nova boot --image ${IMAGE_ID} --nic net-id=${NETWORKID} --flavor ${GUEST_SIZE} --availability-zone ${host_zone} netperf-${host_zone} --key_name network-testkey | egrep "\sid\s" | awk '{print $4}')
fi
search_string+="$command_out|"
done
#-------------------------------------------------------------------------------
# Give instances time to get Spawn/Run
# This could vary based on Disk and Network (Glance transfering image)
#-------------------------------------------------------------------------------
echo "#------------------------------------------------------------------------------- "
echo "Waiting for Instances to begin Running"
echo "#-------------------------------------------------------------------------------"
if $DEBUG ; then
echo "#----------------------- Debug -------------------------------------------------"
echo "Guest Search String :: $search_string"
echo "#-------------------------------------------------------------------------------"
fi
if $TUNNEL ; then
while true; do
if ! [ -z "$(nova list | egrep -E "${search_string%?}" | egrep -E "ERROR")" ]; then
echo "ERROR :: Netperf guest in error state, Compute node issue"
if $CLEAN ; then
cleanup
fi
exit 1
fi
#-------------------------------------------------------------------------------
# This is assuming the SINGLE_TUNNEL_TEST
#-------------------------------------------------------------------------------
if [ "$(nova list | egrep -E "${search_string%?}" | egrep -E "Running" | wc -l)" -gt 1 ]; then
break
fi
done
#-------------------------------------------------------------------------------
# Determine the Segmentation ID if not using
#-------------------------------------------------------------------------------
PORT=0
for ports in `neutron port-list | grep -e "${SUB_SEARCH}" | awk '{print $2}'` ; do
if $DEBUG ; then
echo "#----------------------- Debug -------------------------------------------------"
echo "Ports :: $ports"
echo "#-------------------------------------------------------------------------------"
fi
if [[ ! -z $(neutron port-show $ports | grep "device_owner" | grep "compute:nova") ]] ; then
echo "#----------------------- Debug -------------------------------------------------"
echo "Ports :: $ports"
echo "#-------------------------------------------------------------------------------"
PORT=$(neutron port-show $ports | grep "mac_address" | awk '{print $4}');
fi
done;
if [[ -z "${PORT}" ]] ; then
echo "ERROR :: Unable to determine DHCP Port for Network"
if $CLEAN ; then
cleanup
fi
exit 0
fi
try=0
if $DEBUG ; then
echo "#----------------------- Debug -------------------------------------------------"
echo "Port :: $PORT"
echo "#-------------------------------------------------------------------------------"
fi
while true ; do
FLOW=`ovs-ofctl dump-flows br-tun | grep "${PORT}"`
IFS=', ' read -a array <<< "$FLOW"
VLANID_HEX=`echo ${array[9]} | sed 's/vlan_tci=//g' | sed 's/\/.*//g'`
if $DEBUG ; then
echo "#----------------------- Debug -------------------------------------------------"
echo "VLAN HEX :: $VLANID_HEX"
echo "#-------------------------------------------------------------------------------"
fi
if [[ $(echo $VLANID_HEX | grep -q "dl_dst") -eq 1 ]] ; then
continue
fi
VLAN=`printf "%d" ${VLANID_HEX}`
ovs-vsctl set Port $OVSPLUG tag=$VLAN
if $DEBUG ; then
echo "#----------------------- Debug -------------------------------------------------"
echo "VLAN :: $VLAN"
echo "#-------------------------------------------------------------------------------"
fi
if [ $VLAN -ne 0 ] ; then
break
else
sleep 10
fi
if [[ $try -eq 15 ]] ; then
echo "ERROR :: Attempting to find the VLAN to use failed..."
if $CLEAN ; then
cleanup
fi
exit 0
fi
try=$((try+1))
done
if $DEBUG ; then
echo "Using VLAN :: $VLAN"
fi
fi
echo "#------------------------------------------------------------------------------- "
echo "Waiting for instances to come online"
echo "#-------------------------------------------------------------------------------"
INSTANCES=($(nova list | grep -E "${search_string%?}" | egrep -E "Running|spawning" | egrep -oe '([0-9]{1,3}\.[0-9]{1,3}\.[0-9}{1,3}\.[0-9]+)'))
#-------------------------------------------------------------------------------
# Single Tunnel Test -
# 1. Launch a instance on each side of a GRE/VXLAN/STT Tunnel.
# 2. Make sure there is connectivity from the Host to the guests via Ping
# 3. Attempt to Login to the Guest via SSH
#-------------------------------------------------------------------------------
if $SINGLE_TUNNEL_TEST ; then
ALIVE=0
NETSERVER=0
NETCLIENT=0
for instance in ${INSTANCES[@]}; do
TRY=0
if [ "$NETSERVER" == "0" ] ; then
NETSERVER=$instance
else
NETCLIENT=$instance
fi
while [ "$TRY" -lt "10" ] ; do
REPLY=`ping $instance -c 5 | grep received | awk '{print $4}'`
if [ "$REPLY" != "0" ]; then
let ALIVE=ALIVE+1
echo "Instance ${instance} is network reachable, $(date)"
break
fi
sleep 5
let TRY=TRY+1
done
done
#-------------------------------------------------------------------------------
# Check to see if instances became pingable
#-------------------------------------------------------------------------------
if [ $ALIVE -lt 2 ] ; then
echo "ERROR :: Unable to reach one of the guests..."
if $CLEAN ; then
cleanup
fi
exit 1
fi
if $DHCP ; then
pass=0
breakloop=0
while true
do
if [[ ${pass} -lt 2 ]] ; then
pass=0
fi
ssh -o BatchMode=yes -o ConnectTimeout=3 -o StrictHostKeyChecking=no -q -t ${NETSERVER} "ip l s mtu ${MTU} dev ${GUEST_VETH}"
if [ $? -eq 0 ] ; then
pass=$((pass+1))
fi
ssh -o BatchMode=yes -o ConnectTimeout=3 -o StrictHostKeyChecking=no -q -t ${NETCLIENT} "ip l s mtu ${MTU} dev ${GUEST_VETH}"
if [ $? -eq 0 ] ; then
pass=$((pass+1))
fi
if [ ${pass} -eq 2 ] ; then
break
fi
if [ $? -eq 0 ] ; then
pass=$((pass+1))
fi
if $DEBUG ; then
echo "pass=$pass , breakloop=$breakloop"
fi
if [ $breakloop -eq 10 ] ; then
echo "Error : unable to set MTU within Guest"
fi
breakloop=$((breakloop+1))
done
fi
if $UPERF ; then
ssh -o BatchMode=yes -o ConnectTimeout=3 -o StrictHostKeyChecking=no -q -t ${NETSERVER} 'setenforce 0'
ssh -o BatchMode=yes -o ConnectTimeout=3 -o StrictHostKeyChecking=no -q -t ${NETSERVER} 'systemctl stop iptables'
ssh -o BatchMode=yes -o ConnectTimeout=3 -o StrictHostKeyChecking=no -q -t ${NETSERVER} 'systemctl stop firewalld'
ssh -o BatchMode=yes -o ConnectTimeout=3 -o StrictHostKeyChecking=no -q -t ${NETSERVER} '/usr/local/bin/uperf -s ' > /dev/null &
else
ssh -o BatchMode=yes -o ConnectTimeout=3 -o StrictHostKeyChecking=no -q -t ${NETSERVER} 'netserver ; sleep 4'
fi
if $DEBUG ; then
D1=$(ssh -o BatchMode=yes -o ConnectTimeout=3 -o StrictHostKeyChecking=no -q -t ${NETSERVER} 'ls')
echo "#----------------------- Debug -------------------------------------------------"
echo "SSH Output :: $D1"
echo "#-------------------------------------------------------------------------------"
fi
if $UPERF ; then
TCP_STREAM=false
UDP_STREAM=false
echo "Running UPerf"
ssh -o ConnectTimeout=3 -o StrictHostKeyChecking=no -q -t ${NETCLIENT} 'setenforce 0'
ssh -o ConnectTimeout=3 -o StrictHostKeyChecking=no -q -t ${NETCLIENT} 'systemctl stop iptables'
ssh -o ConnectTimeout=3 -o StrictHostKeyChecking=no -q -t ${NETCLIENT} 'systemctl stop firewalld'
# Below is specific to Red hat
# We could add this to the Neutron subnet
ssh -o ConnectTimeout=3 -o StrictHostKeyChecking=no -q -t ${NETCLIENT} "echo ${PBENCH_SERVER_IP} ${PBENCH_SERVER_HOSTNAME} >> /etc/hosts"
ssh -o ConnectTimeout=3 -o StrictHostKeyChecking=no -q -t ${NETSERVER} "echo ${PBENCH_SERVER_IP} ${PBENCH_SERVER_HOSTNAME} >> /etc/hosts"
ssh -o ConnectTimeout=3 -o StrictHostKeyChecking=no -q -t ${NETSERVER} "echo nameserver ${NAMESERVER} > /etc/resolv.conf"
pbench-register-tool-set --remote=${NETCLIENT}
pbench-register-tool-set --remote=${NETSERVER}
if $HIGH_INTERVAL ; then
for tool in sar pidstat; do
pbench-register-tool --name=${tool} --remote=${NETCLIENT} -- --interval=1
pbench-register-tool --name=${tool} --remote=${NETSERVER} -- --interval=1
done
fi
pbench-uperf --clients=${NETCLIENT} --servers=${NETSERVER} --samples=${SAMPLES} --test-types=${TESTS} --protocols=${PROTO} --config=${TESTNAME}
pbench-move-results
pbench-clear-tools
fi
fi # End SINGLE_TUNNEL_TEST
#----------------------- Cleanup -----------------------------------------------
if $CLEAN ; then
cleanup
fi
| true
|
5f51604a68bf6f047b1d8d1f59b41380873a2156
|
Shell
|
josegl/linuxDistrosSetups
|
/arch-mountain/prechroot-setup.sh
|
UTF-8
| 3,139
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
# Este script se ejecuta al inicicio de la ejecucion de un livecd de archlinux
# Esto funciona solo para mi portatil. Es decir. El mountain
# Esta particionado de la siguiente manera:
# ssd = 120GB = sdb
# hd = 750GB = sda
# El ssd tiene 3 particiones:
# sdb1 = particion 10mb bios
# sdb2 = particion 200 MB para /boot
# sdb3 = particion del resto del espacio para lvm
# El Disco duro normal tiene 1 unica particion:
# sda1 = Todo el disco se usa para lvm
# Configuracion de lvm
# sda1 y sdb3 son physical volumes
# Tenemos el vg0 que es un grupo que contiene a sdb3 y sda1
#
# El particionado que se ha hecho del volumen es el siguiente:
#
# /dev/vg0/lvroot = 20GiB = cifrado con luks. passphrase ="di amigo y entra: mellon" se monta en /dev/mapper/rootCifrado
# /dev/vg0/lvhome = 600 GiB cifrado con luks, con una llave que se crea con dd y se almacena en /etc/lukskeys/home.key se monta en /dev/mapper/homeCifrado
# /dev/vg0/lvswap = 16 GiB cifrado con luks con una llave aleatoria temporal. (cryptab se encarga de ello) se monta en /dev/mapper/swapCifrado
# Esquema de particiones:
# /dev/sdb2 ------------------> /boot
# /dev/mapper/rootCifrado ----> /
# /dev/mapper/homeCifrado ----> /home
# Ramdisk --------------------> /tmp
# Ramdisk --------------------> /var/tmp
# No se usa todo el disco para que si necesitamos mas espacio en el futuro para alguna de las particiones, podamos utilizarlo para dicho
# ampliar la que necesitemos.
# Las siguientes instrucciones se ejecutan para dejar el sistema correctamente particionado y formateado
# 1. Formateamos la particion sda2 para /boot. Lo haremos con ext4
mkfs.ext4 /dev/sdb2
# 2. Abrimos /dev/vg0/lvroot, lo mapeamos a rootCifrado y lo formateamos con ext4
cryptsetup open --type luks /dev/vg0/lvroot rootCifrado
mkfs.ext4 /dev/mapper/rootCifrado
# 3. Montamos las particiones
mount /dev/mapper/rootCifrado /mnt
mkdir /mnt/boot
mount /dev/sdb2 /mnt/boot
# 4. Instalamos los paquetes basicos. En este caso como vamos a desarrollar en esta maquina son necesarios tanto base como base-devel
# 4.1 metemos unos repos en condiciones. Para eso necesitamos el paquete reflector.
pacman -Syy
pacman -S reflector
reflector -a 8 -f 10 > /etc/pacman.d/mirrorlist
pacman -Syy
pacstrap /mnt base base-devel
# 5. En este punto generariamos el fstab con genfstab, pero en este caso vamos a restaurar el fichero original.
cp linuxDistrosSetups/arch-mountain/fstab /mnt/etc/fstab
# 6. Aqui nos chrooteamos.
# Tendremos que partir la instalacion del setup en dos partes. La prechroot, que es esta que se encarga del tema del particionado, montaje
# de particiones etc. Y despues, la parte postchroot que se encargara de instalar los paquetes necesarios, y poner los ficheros de configuracion
# con los parametros adecuados. Es por ello que habra que clonear de nuevo el repo dentro del entorno chroot. En lugar de hacerlo con git, copiaremos
# el directorio completo dentro de /mnt. No podemos olvidar el eliminar todo el directorio del repo cuando terminemos.
cp -R linuxDistrosSetups /mnt
arch-chroot /mnt /bin/bash -c "sh linuxDistrosSetups/arch-mountain/postchroot-setup.sh"
| true
|
81c3a208f9b64ff59129ee3d0586f28c59e8a1c4
|
Shell
|
rubencarneiro/ubports-pdk
|
/scripts/setup.sh
|
UTF-8
| 3,191
| 4.03125
| 4
|
[] |
no_license
|
function warnMissingData {
IS_VALID=0
echo "Please enter the directory path you want to set up"
while [ "$IS_VALID" == "0" ]; do
printf "Path: "
read NEW_DATA_ROOT
if [ -d "$NEW_DATA_ROOT" ]; then
IS_VALID=1
continue;
fi
echo "Please make sure the directory path is valid and exists"
done
echo "DATA_ROOT=$NEW_DATA_ROOT" > "$CONFIG_ROOT/config.sh"
echo "SRC_ROOT=$NEW_DATA_ROOT/sources" >> "$CONFIG_ROOT/config.sh"
echo "USER=$USER" >> "$CONFIG_ROOT/config.sh"
source "$CONFIG_ROOT/config.sh"
}
function tryInstallSshd {
if [ "$(uname -s)" == "Linux" ]; then
if [ -f /usr/bin/apt ]; then
sudo apt install openssh-server && \
sudo systemctl enable ssh && \
echo "SSH enabled successfully!"
else
echo "Unknown package manager used, please add support for it in UBports PDK".
fi
elif [ "$(uname -s)" == "Darwin" ]; then
sudo systemsetup -setremotelogin on && echo "SSH enabled successfully!"
fi
}
function checkSsh {
if [ "$(uname -s)" == "Linux" ]; then
systemctl status ssh 1&> /dev/null
if [ "$?" != "0" ]; then
echo "WARNING: The OpenSSH server seems to be missing or not activated, please install it using your package manager."
while true; do
read -p "Would you like to do that automatically now [y/n]? " yn
case $yn in
[Yy]* ) tryInstallSshd; break;;
[Nn]* ) break;;
* ) echo "Please answer yes or no.";;
esac
done
fi
elif [ "$(uname -s)" == "Darwin" ]; then
OUTPUT=$(sudo systemsetup -getremotelogin)
if [ "$OUTPUT" != "Remote Login: On" ]; then
echo "WARNING: SSH doesn't seem to be enabled!"
while true; do
read -p "Would you like to enable it now [y/n]? " yn
case $yn in
[Yy]* ) tryInstallSshd; break;;
[Nn]* ) break;;
* ) echo "Please answer yes or no.";;
esac
done
fi
fi
}
function setup {
bash $SCRIPTPATH/scripts/prerequisites.sh
warnMissingData
if [ ! -d "$DATA_ROOT/sources" ]; then
mkdir -p "$DATA_ROOT/sources"
fi
if [ ! -d "$DATA_ROOT/sshd" ]; then
mkdir -p "$DATA_ROOT/sshd"
fi
if [ -f "$DATA_ROOT/sshd/id_rsa" ]; then
rm "$DATA_ROOT/sshd/id_rsa"
fi
if [ -f "$DATA_ROOT/sshd/id_rsa.pub" ]; then
rm "$DATA_ROOT/sshd/id_rsa.pub"
fi
checkSsh
ssh-keygen -q -t rsa -N '' -f "$DATA_ROOT/sshd/id_rsa"
PUBKEY_CONTENTS=$(cat "$DATA_ROOT/sshd/id_rsa.pub")
if grep -q "^$PUBKEY_CONTENTS" "$HOME/.ssh/authorized_keys"; then
echo "Public key contents already registered, continuing"
else
if [ ! -d "$HOME/.ssh" ]; then
mkdir "$HOME/.ssh"
chmod 700 "$HOME/.ssh"
fi
echo "Inserting ssh key into authorized keys list"
echo "$PUBKEY_CONTENTS" >> $HOME/.ssh/authorized_keys
fi
}
| true
|
4831e088175a535fb9ea44185d65e9c9f459fc4d
|
Shell
|
SvirgunA/docker
|
/proxy/proxy.sh
|
UTF-8
| 625
| 3
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
if [ ! -z "$PROXY" ]
then
export HTTP_PROXY=$PROXY
export HTTPS_PROXY=$PROXY
export http_proxy=$PROXY
export https_proxy=$PROXY
cat /etc/*-release | eval
if [ "$ID_LIKE" == "debian" ]
then
cat > /etc/apt/apt.conf.d/01proxy < EOF
Acquire::http::proxy "$PROXY";
Acquire::https::proxy "$PROXY";
EOF
fi
mkdir ~/.gradle
cat > ~/.gradle/gradle.properties < EOF
systemProp.http.proxyHost=$PROXY_HOST
systemProp.http.proxyPort=$PROXY_PORT
systemProp.https.proxyHost=$PROXY_HOST
systemProp.https.proxyPort=$PROXY_PORT
EOF
fi
| true
|
43989ec4f00d718274bd3e4a5e1390aef3927800
|
Shell
|
maiminh1996/maiminh1996.github.io
|
/scripts/opencv.sh
|
UTF-8
| 2,991
| 2.90625
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "***** Install OpenCV (02/02/2022) *****"
echo "***** 1. Updating & Upgrading in Ubuntu *****"
sudo apt-get update
sudo apt-get upgrade
echo "***** 2. Installing Dependencies *****"
sudo apt install build-essential
sudo apt install gcc cmake git wget unzip
sudo apt install libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev
sudo apt install libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev
sudo apt install libtiff5-dev libeigen3-dev libtheora-dev libvorbis-dev libxvidcore-dev libx264-dev
# sudo apt install ibgtk2.0-dev sphinx-common yasm libfaac-dev libopencore-amrnb-dev \
# libgstreamer-plugins-base1.0-dev libavutil-dev libavfilter-dev libavresample-dev libjasper1
echo "***** 3. Getting OpenCV *****"
# sudo -s
# 1. Cloning current branch (current opencv version)
# cd /opt # install in opt
git clone https://github.com/opencv/opencv ~/opencv
git clone https://github.com/opencv/opencv_contrib ~/opencv_contrib
cd ~/opencv/ && git pull origin master && git checkout $1
cd ~/opencv_contrib/ && git pull origin master && git checkout $1
# 2. Change to other versions
# Option 1: checkout
# git checkout 3.4
# Option 2: Download released version https://opencv.org/releases.html
# wget -O opencv.zip https://github.com/opencv/opencv/archive/3.4.16.zip
# wget -O opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/3.4.16.zip
# unzip opencv.zip
# unzip opencv_contrib.zip
# ls -s opencv-3.4.16 opencv # create symbolic link
# ls -s opencv_contrib-3.4.16 opencv_contrib
echo "***** 4. Building and Installing OpenCV *****"
# Change to a specific version. e.g. 3.4.16
# git checkout 3.4
cd ~/opencv/
mkdir -p build && cd build
cmake -D BUILD_TIFF=ON \
-D WITH_CUDA=OFF \
-D ENABLE_AVX=OFF \
-D WITH_OPENGL=OFF \
-D WITH_OPENCL=OFF \
-D WITH_IPP=OFF \
-D WITH_TBB=ON \
-D BUILD_TBB=ON \
-D WITH_EIGEN=OFF \
-D WITH_V4L=OFF \
-D WITH_VTK=OFF \
-D BUILD_TESTS=OFF \
-D BUILD_PERF_TESTS=OFF \
-D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D OPENCV_EXTRA_MODULES_PATH=~/opencv_contrib/modules \
-D PYTHON3_EXECUTABLE=$(which python) \
-D PYTHON_INCLUDE_DIR=$(python -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") \
-D PYTHON3_PACKAGES_PATH=$(python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") \
-D PYTHON_DEFAULT_EXECUTABLE=$(which python) \
~/opencv/
# run from this cmd if /opencv/build/Makefile exited
# git pull
make -j$(nproc --all) # jcore check cores: grep 'cpu cores' /proc/cpuinfo | uniq hoac nproc --all
sudo make install # make global
sudo sh -c 'echo "/usr/local/lib" > /etc/ld.so.conf.d/opencv.conf'
sudo ldconfig
echo "***** Finished *****"
# Check opencv version
pkg-config --modversion opencv
pkg-config --modversion opencv4
python -c "import cv2; print(cv2.__version__)"
| true
|
21cbc894831b9f39b41b9bead5932243b47a1379
|
Shell
|
zEduardofaria/my-scripts
|
/bash/portscan-network.sh
|
UTF-8
| 278
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$1" == "" ]
then
echo "PORTSCAN NETWORK"
echo "Use mode: $0 NETWORK PORT"
echo "Example: $0 172.16.1 80"
else
for ip in {1..254};
do
hping3 -S -p $2 -c 1 $1.$ip 2> /dev/null | grep "flags=SA" | cut -d " " -f 2 | cut -d "=" -f 2;
done
fi
| true
|
44dbc5a1c993005e7bd80388739af3a6b32aa66d
|
Shell
|
roycollings/saltstack
|
/states/handy_scripts/bright_max
|
UTF-8
| 250
| 2.546875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
max_brightness=$(cat /sys/class/backlight/intel_backlight/max_brightness)
brightness=$(cat /sys/class/backlight/intel_backlight/brightness)
echo "echo $max_brightness > /sys/class/backlight/intel_backlight/brightness" | sudo sh
| true
|
4b4c147afe7b6d0ddc77fc984f1ac59c1607df4f
|
Shell
|
guimiu/SwiftGit2
|
/script/build_openssl.sh
|
UTF-8
| 953
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
# source the common build functions
SCRIPT_DIR=$(dirname "$0")
source "${SCRIPT_DIR}/common.sh"
function build_ssl ()
{
echo "Building $1 binary..."
MACHINE=$1 ./config --prefix="$OUTPUT_DIR"
make
make install
cd "${OUTPUT_DIR}/lib"
rm -rf libcrypto.dylib
cp libcrypto.1.1.dylib libcrypto.dylib
install_name_tool -id @rpath/libcrypto.dylib libcrypto.dylib
rm -rf libcrypto.1.1.dylib
rm -rf libssl.dylib
cp libssl.1.1.dylib libssl.dylib
install_name_tool -id @rpath/libssl.dylib libssl.dylib
install_name_tool -change "${OUTPUT_DIR}/lib/libcrypto.1.1.dylib" @rpath/libcrypto.dylib libssl.dylib
rm -rf libssl.1.1.dylib
cd -
}
OUTPUT_DIR=$(pwd)/External/output/openssl
rm -rf "$OUTPUT_DIR"
mkdir -p "$OUTPUT_DIR"
cd External/openssl
cleanup
build_ssl arm64
save_arch arm64
cleanup
build_ssl x86_64
save_arch x86_64
fat_binary arm64 x86_64
echo "Building done."
| true
|
ac6a54014852c7920879905d4efaba5bb7f70238
|
Shell
|
Ruitulyu/KAS-Analyzer
|
/scripts/fastqc_fastqc.sh
|
UTF-8
| 3,065
| 4.21875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# 'KAS-Analyzer fastqc' was developped by Ruitu Lyu on 12-10-2021.
# Stop on error
set -e
## Read arguments
usageHelp="Usage: KAS-Analyzer fastqc [ -h/--help ] [ -t threads ] [ -c contaminants ] [ -o output dir ] [ -k KAS-seq ] "
exampleHelp="Example: nohup KAS-Analyzer fastqc -t 10 -k KAS-seq.rep1.fastq.gz,KAS-seq.rep2.fastq.gz,KAS-seq.rep3.fastq.gz &"
threadsHelp="-t [threads]: please input number of threads to be used for quality control check. Default: 1."
contaminantsHelp="-c [contaminants]: please specify a file which contains the list of contaminants (format: name[tab]sequence) to screen overrepresented sequences against. Default: no."
outputdirHelp="-o [output dir]: please specify the output directory with output files."
KASseqHelp="-k [KAS-seq]: please input the KAS-seq data that you want to know the quality control, like sequencing quality, duplicates, contaminants (adapter sequence)."
helpHelp="-h/--help: print this help and exit.
Note: The 'KAS-Analyzer fastqc' shell script is applied to check quality control and identify a potential type of problem in your KAS-seq data in non-interactive mode. It mainly invoke FASTQC, please refer to the FASTQC official website for more information."
printHelpAndExit() {
echo -e ""
echo -e "$usageHelp"
echo -e ""
echo -e "$exampleHelp"
echo -e ""
echo -e "$threadsHelp"
echo -e ""
echo -e "$contaminantsHelp"
echo -e ""
echo -e "$outputdirHelp"
echo -e ""
echo -e "$KASseqHelp"
echo -e ""
echo -e "$helpHelp"
echo -e ""
exit -1
}
# if no parameters or '--help' was provided, 'KAS-Analyzer fastqc' will print the help.
if [[ $# == 1 ]] || [[ $1 == "--help" ]] || [[ $1 == "-help" ]] ;then
printHelpAndExit
fi
# get the value of options.
while getopts 'ht:c:o:k:' opt; do
case $opt in
h) printHelpAndExit 0;;
t) threads=$OPTARG ;;
c) contaminants=$OPTARG ;;
o) outputdir=$OPTARG ;;
k) KASseq=$OPTARG ;;
?) printHelpAndExit ;;
esac
done
# Required options.
if test -z $KASseq ;then
echo ""
echo "please input the KAS-seq data that you want to know the quality control. -k [KAS-seq]"
echo ""
printHelpAndExit
fi
# setup the default parameters.
if test -z $threads ;then
threads=1
fi
if test -z $contaminants ;then
contaminants="off"
fi
# setup the $outputdir if specified.
if test -z $outputdir ;then
outputdir="off"
else
mkdir -p $outputdir
fi
# generate the KAS-seq sample list.
echo $KASseq > .KASseq.txt
KASseqlist=$(sed "s/,/ /g" .KASseq.txt)
rm -f .KASseq.txt
if [[ $contaminants == "off" ]] ;then
if [[ $outputdir == "off" ]] ;then
fastqc -t $threads $KASseqlist
else
fastqc -t $threads -o $outputdir $KASseqlist
fi
else
if [[ $outputdir == "off" ]] ;then
fastqc -t $threads -c $contaminants $KASseqlist
else
fastqc -t $threads -c $contaminants -o $outputdir $KASseqlist
fi
fi
echo "'KAS-Analyzer fastqc' run successfully!"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.