blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
054c42ee24555c0014bc48094987cc02a64bc832
|
Shell
|
xavileon/mast
|
/bin/testcontinuous
|
UTF-8
| 351
| 2.859375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -o pipefail
cd "$(dirname $0)/.." || exit 1
echo "Waiting for filesystem changes. Hit Ctrl+C to exit."
fswatch -e '.*' -i '.*\.py$' --event=Updated --event=Created --event=MovedTo --event=Renamed --event=MovedFrom --event=Removed -o -r . | while read line; do python setup.py test '--addopts="-m not integration_test"'; done
| true
|
4cda89fba97cdf91ce597b375b269812be012758
|
Shell
|
srcc-msu/octotron
|
/scripts/single_mail.sh
|
UTF-8
| 324
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# sends a single mail
# $1 - recipient
# $2 - subject
# $3 - msg, if $3 not specified - stdin will be used
NEW_SUBJ=`echo $2 | sed 's/Subject: //'` # TODO: wtf?
if [ $# -eq 3 ]
then
echo -e "$3" | mail -s "$NEW_SUBJ" "$1"
elif [ $# -eq 2 ]
then
mail -s "$NEW_SUBJ" "$1" <&0
else
echo "wrong params count"
fi
| true
|
04dc369bccd8951ca7a785bc605306136adf4511
|
Shell
|
jaseg/scripts
|
/harddisk
|
UTF-8
| 1,186
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
#This script has been written to mount and dismount an luks-encrypted hard drive. I needed it because back then on my ubuntu the hard drive would not properly survive a suspend, with the links in /dev/mapper being unreadable as well as undeletable. Although it is *not* the most elegant solution at the time it worked.
if [ $(id -u) -ne 0 ]; then
echo "This script must run as root."
sudo $0
exit
fi
if [ "$1"="mount" -o "$1"="m" -o "$1"="open" ]
then while umount /media/harddisk&>/dev/null
do true
done
for mapping in $(ls /dev/mapper|egrep 'harddisk+$')
do cryptsetup luksClose $mapping&>/dev/null
done
newmapping=$(ls /dev/mapper|egrep 'harddisk+$'|tail -n 1)a
cryptsetup luksOpen /dev/disk/by-uuid/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx $newmapping
mount /dev/mapper/$newmapping /media/harddisk
else if [ "$1"="unmount" -o "$2"="umount" -o "$2"="u" -o "$2"="close" ]
then while umount /media/harddisk&>/dev/null
do true
done
for mapping in $(ls /dev/mapper|egrep 'harddisk+$')
do cryptsetup luksClose $mapping&>/dev/null
done
else echo 'Operation not found. Valid are e.g. "mount" and "umount"'
exit 2
fi
fi
| true
|
18f01fefc4612ab1f90815937220d00b8dfd0a4d
|
Shell
|
warrenlp/2015VisionCode
|
/cascade_training/mergevec/make/replace_vcproj.sh
|
UTF-8
| 580
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
file=$1.vcproj
sed 's!../../../bin!../bin!g' $file > tmp; mv tmp $file # escape . as \. if it does not work well because of them.
sed 's!../../../_temp!../_temp!g' $file > tmp; mv tmp $file
sed 's!../../../lib/cvhaartraining!../lib/cvhaartraining!g' $file > tmp; mv tmp $file
sed 's!../../../lib!../lib,C:/Program Files/OpenCV/lib!g' $file > tmp; mv tmp $file
sed 's!../../../!C:/Program Files/OpenCV/!g' $file > tmp; mv tmp $file
sed "s!${1}d!${1}!g" $file > tmp; mv tmp $file # Why did they add 'd'?
sed 's!cvhaartrainingd!cvhaartraining!g' $file > tmp; mv tmp $file
| true
|
ad09cebbbea62c7f6d8c3df029222588d07f3d9d
|
Shell
|
linuxmuster/linuxmuster-client-adsso
|
/etc/scripts/onboot.sh
|
UTF-8
| 1,843
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# do onboot stuff
#
# thomas@linuxmuster.net
# 20181127
#
# read setup values
source /etc/linuxmuster-client/scripts/readvars.sh || exit 1
if [ ! -s "$sssd_conf" ]; then
echo "$sssd_conf does not exist or is not valid!"
exit 1
fi
# test ad connection
host "$servername.$ad_domain" | grep -q "$serverip" && ad_connected="yes"
# if ad connection is present
if [ -n "$ad_connected" ]; then
# switch sssd configuration to ad connected
echo "Connected to ad server $servername.$ad_domain."
# set proxy profile
if [ -n "$proxy_url" -a -n "$proxy_profile" -a -n "$proxy_template" ]; then
echo "Creating proxy environment."
sed -e "s|@@network@@|$network|
s|@@domainname@@|$ad_domain|
s|@@proxy_url@@|$proxy_url|g" "$proxy_template" > "$proxy_profile"
fi
else
# local mode
echo "Not connected to ad server $servername.$ad_domain!"
# remove proxy environment
if [ -n "$proxy_profile"]; then
echo "Removing proxy environment."
rm -f "$proxy_profile"
fi
fi
# handle swapfile
if [ -n "$swapfile" ]; then
if grep -qw ^"$swapfile" /etc/fstab; then
if ! -e "$swapfile"; then
[ -z "$swapsize" ] && swapsize=2
if fallocate -l "$swapsize"G "$swapfile"; then
echo "Creating $swapfile."
mkswap "$swapfile"
swapon "$swapfile"
else
echo "Creating $swapfile failed!"
rm -f "$swapfile"
fi
fi
fi
fi
# Template directory to common-session
if [ -f /etc/pam.d/common-session ]; then
sed -i -e ":pam_mkhomedir.so: s:pam_mkhomedir.so.*$:pam_mkhomedir.so skel=${template_directory}:" /etc/pam.d/common-session
fi
# source onboot hookdir
if ls "$onboot_hookdir"/*.sh &> /dev/null; then
echo "Sourcing onboot hookdir:"
for i in "$onboot_hookdir"/*.sh; do
echo "* $(basename $i) ..."
source "$i"
done
fi
exit 0
| true
|
92b76be16fced81ef4b38ebc687fa5564b7bd71f
|
Shell
|
lmammino/dotfiles
|
/shell/python.sh
|
UTF-8
| 905
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
# Python
# Pip3 installed binaries
if test -d "$HOME/Library/Python/"
for p in (ls -d $HOME/Library/Python/*)
set -x PATH "$p/bin" $PATH
end
end
# Gives priority to brew installed python
if test -d "/usr/local/opt/python/libexec/bin"
set -x PATH "/usr/local/opt/python/libexec/bin" $PATH
end
# Gives priority to brew installed python
if test -d "/usr/local/opt/python/libexec/bin"
set -x PATH "/usr/local/opt/python/libexec/bin" $PATH
end
# Supports for pipx installed binaries (https://pypa.github.io/pipx/)
if test -d "$HOME/.local/bin"
set -x PATH "$HOME/.local/bin" $PATH
end
# supports pyenv
if test -d "$HOME/.pyenv"
set -Ux PYENV_ROOT $HOME/.pyenv
set -U fish_user_paths $PYENV_ROOT/bin $fish_user_paths
# Load pyenv automatically by appending
# the following to ~/.config/fish/config.fish:
status is-login; and pyenv init --path | source
pyenv init - | source
end
| true
|
3c0b2d75d033bd37f84c458dd46f020f7108e4cf
|
Shell
|
aravindkukku/kukku
|
/fol.sh
|
UTF-8
| 264
| 2.828125
| 3
|
[] |
no_license
|
read -p "enter the name of file" ex
mkdir /home/aravind/$ex;
for i in range $(seq 3)
do
nano /home/aravind/$ex/file$i
done
tar -cf /home/aravind/$ex/$ex.tar /home/aravind/$ex
git pull origin master
git add -A
git commit -m "$ex commited"
git push origin master
| true
|
04a605d026494a1b2787bad64525f646dfd3fff8
|
Shell
|
wantonsolutions/bully
|
/test/test.sh
|
UTF-8
| 235
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
#args grouplistFileName Timeout AYA SendFailure
bool=0
while read line
do
for word in $line
do
if [ $bool == 0 ]
then
bool=1
else
echo $word
`../node $word $1 - $2 $3 $4` &
bool=0
fi
done
done < $1
| true
|
1c83923829f169e671788e114b0ac829b28464d5
|
Shell
|
DalavanCloud/tools
|
/perf/istio/allconfig/setup.sh
|
UTF-8
| 796
| 3.734375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#/bin/bash
set -ex
DNS_DOMAIN=${DNS_DOMAIN:?"DNS_DOMAIN like v104.qualistio.org"}
GATEWAY_URL=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
function install_all_config() {
local DIRNAME="${1:?"output dir"}"
local domain=${DNS_DOMAIN:-qualistio.org}
local OUTFILE="${DIRNAME}/all_config.yaml"
kubectl create ns test || true
kubectl label namespace test istio-injection=enabled || true
helm -n test template \
--set fortioImage=fortio/fortio:latest \
--set ingress="${GATEWAY_URL}" \
--set domain="${domain}" . > "${OUTFILE}"
if [[ -z "${DRY_RUN}" ]]; then
kubectl -n test apply -f "${OUTFILE}"
fi
}
WD=$(dirname $0)
WD=$(cd $WD; pwd)
mkdir -p "${WD}/tmp"
install_all_config "${WD}/tmp" $*
| true
|
a5e0554aecec5779f3f1af806ce963c41d46a64e
|
Shell
|
CS5331-GROUP-7/as3
|
/benchmarks/ssl/genInsecureKey.sh
|
UTF-8
| 128
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/sh
a=2
while [ "$a" -lt 21 ]; # this is loop1
do
openssl rsa -in app$a.key -out app$a-insecure.key
a=$((a+1))
done
| true
|
2d0a4b70838b6f1b591d89602f2b418feccc395b
|
Shell
|
torchbox/k8s-ts-ingress
|
/tests/e2erun.sh
|
UTF-8
| 7,527
| 3.421875
| 3
|
[] |
no_license
|
#! /bin/sh
# vim:set sw=8 ts=8 noet:
set -e
# We can't use 127.0.0.1 for test endpoints because the apiserver won't let us
# created an Endpoints with that address. Pick up the first reasonable-looking
# address from the host instead; it doesn't need Internet connectivity, it just
# needs to exist.
TEST_IP_ADDRESS=$(/sbin/ip addr | awk '/ inet / && !/127.0.0/ { print $2 }' | head -1 | cut -d/ -f1)
if [ -z "$TEST_IP_ADDRESS" ]; then
echo >&2 "$0: cannot determine external IP address for tests"
exit 1
fi
if [ -z "$E2E_KUBERNETES_VERSION" ]; then
E2E_KUBERNETES_VERSION=1.6.4
fi
if [ -z "$E2E_TS_VERSION" ]; then
E2E_TS_VERSION=7.1
fi
printf 'Using Kubernetes version: %s (change with $E2E_KUBERNETES_VERSION)\n' $E2E_KUBERNETES_VERSION
printf 'Using Traffic Server version: %s (change with $E2E_TS_VERSION)\n' $E2E_TS_VERSION
printf 'Using test IP address: %s\n' $TEST_IP_ADDRESS
download_hyperkube() {
if [ -e "$HYPERKUBE" ]; then
return 0
fi
echo '>>> Downloading hyperkube'
curl -Lo$HYPERKUBE https://storage.googleapis.com/kubernetes-release/release/v${E2E_KUBERNETES_VERSION}/bin/linux/amd64/hyperkube
chmod 755 $HYPERKUBE
}
download_etcd() {
if [ -e "$ETCD" ]; then
return 0
fi
echo '>>> Downloading etcd'
curl -L https://storage.googleapis.com/etcd/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz | gzip -dc | tar xf - --strip-components=1 -C _test etcd-v${ETCD_VERSION}-linux-amd64/etcd
mv _test/etcd $ETCD
chmod 755 $ETCD
}
start_etcd() {
printf 'starting etcd: '
mkdir -p $TESTDIR/etcd.data
$ETCD --data-dir $TESTDIR/etcd.data --listen-client-urls=http://127.0.0.1:42379 --listen-peer-urls=http://127.0.0.1:42380 --advertise-client-urls=http://127.0.0.1:42379 >>$TESTDIR/log 2>&1 &
pid=$!
echo $pid > $TESTDIR/etcd.pid
# wait a little bit for etcd to get started
sleep 10
printf 'ok, pid %d\n' $pid
}
stop_etcd() {
printf 'stopping etcd: '
pid=$(cat $TESTDIR/etcd.pid)
kill $pid
wait $pid || true
printf 'ok\n'
}
start_httpd() {
printf 'starting httpd: '
tests/httpd.sh $TEST_IP_ADDRESS >>$TESTDIR/log 2>&1 &
pid=$!
echo $pid > $TESTDIR/httpd.pid
printf 'ok, pid %d\n' $pid
}
start_ts() {
idir=$(pwd)/_test/ts-install-${E2E_TS_VERSION}
printf 'starting traffic_server: '
cp tests/e2e-kubernetes.config $idir/etc/trafficserver/kubernetes.config
cp tests/plugin.config $idir/etc/trafficserver/plugin.config
cp docker/records.config $idir/etc/trafficserver/records.config
PROXY_CONFIG_HTTP_SERVER_PORTS='58080 58443:ssl'
export PROXY_CONFIG_HTTP_SERVER_PORTS
#echo 'CONFIG proxy.config.http.server_ports STRING 58080 58443:ssl' \
# >>$idir/etc/trafficserver/records.config
# Make sure the cache is empty before starting the test.
$idir/bin/traffic_server -Cclear_cache >>$TESTDIR/log 2>&1 || true
$idir/bin/traffic_server >>$TESTDIR/log 2>&1 &
pid=$!
echo $pid > $TESTDIR/ts.pid
printf 'ok, pid %d\n' $pid
}
stop_ts() {
printf 'stopping traffic_server: '
pid=$(cat $TESTDIR/ts.pid)
kill $pid
wait $pid || true
printf 'ok\n'
}
stop_httpd() {
printf 'stopping httpd: '
pid=$(cat $TESTDIR/httpd.pid)
kill $pid
wait $pid || true
printf 'ok\n'
}
start_apiserver() {
printf 'starting apiserver: '
ln -s $HYPERKUBE $TESTDIR/apiserver
$TESTDIR/apiserver --etcd-servers http://127.0.0.1:42379 --service-cluster-ip-range=10.3.0.0/24 --cert-dir $TESTDIR/apiserver-certs --insecure-port=48888 --insecure-bind-address=127.0.0.1 --secure-port=48844 --bind-address=127.0.0.1 >>$TESTDIR/log 2>&1 &
pid=$!
echo $pid > $TESTDIR/apiserver.pid
# apiserver takes a little while to get started
sleep 15
printf 'ok, pid %d\n' $pid
}
stop_apiserver() {
printf 'stopping apiserver: '
pid=$(cat $TESTDIR/apiserver.pid)
kill $pid
wait $pid || true
printf 'ok\n'
}
install_ts() {
if [ -e "_test/ts-install-${E2E_TS_VERSION}" ]; then
return 0
fi
echo '>>> Installing Traffic Server'
if [ ! -d "_test/${TS_DIR}" ]; then
if [ ! -e "_test/${TS_ARCHIVE}" ]; then
curl -Lo_test/${TS_ARCHIVE} ${TS_URL}
fi
gzip -dc _test/${TS_ARCHIVE} | tar xf - -C _test
fi
idir=$(pwd)/_test/ts-install-${E2E_TS_VERSION}
( cd _test/${TS_DIR}
if [ $TS_AUTORECONF = true ]; then
autoreconf -if >log 2>&1 || (cat log; exit 1)
fi
./configure --prefix=$idir --enable-asan >>$TESTDIR/log 2>&1 || \
(cat $TESTDIR/log; exit 1)
make >>$TESTDIR/log 2>&1 || (cat $TESTDIR/log; exit 1)
make install >>$TESTDIR/log 2>&1 || (cat $TESTDIR/log; exit 1)
)
}
install_plugin() {
echo '>>> Building plugin'
idir=$(pwd)/_test/ts-install-${E2E_TS_VERSION}
(
rm -rf _testbuild
mkdir _testbuild
cd _testbuild
../configure --with-tsxs=$idir/bin/tsxs >>$TESTDIR/log 2>&1 \
|| (cat $TESTDIR/log; exit 1)
make >>$TESTDIR/log 2>&1 || (cat $TESTDIR/log; exit 1)
cp kubernetes.so $idir/libexec/trafficserver/
cd ..
rm -rf _testbuild
)
}
_actually_runtest() {
test=$1
printf '%-40s: ' "$test"
printf >>$TESTDIR/log 'Creating resources for test...\n'
for resource in tests/e2e/$test/resources/*.json; do
sed -e "s/\$TEST_IP_ADDRESS/$TEST_IP_ADDRESS/g" \
$resource >$TESTDIR/tmp.json
if ! $KUBECTL create -f $TESTDIR/tmp.json >> $TESTDIR/log 2>&1; then
return 1
fi
done
# wait a few seconds for TS to notice the resource changes
sleep 10
if tests/e2e/$test/run.sh; then
printf 'ok\n'
return 0
else
printf 'failed\n'
return 1
fi
}
_runtest() {
test=$1
status=0
TESTS_RUN=$(expr $TESTS_RUN + 1)
if _actually_runtest $1; then
TESTS_OK=$(expr $TESTS_OK + 1)
else
TESTS_FAILED=$(expr $TESTS_FAILED + 1)
printf '\n*** TEST FAILED ***\n'
status=1
fi
$KUBECTL delete -f tests/e2e/$test/resources >>$TESTDIR/log 2>&1 || true
}
if [ -z "$E2E_KUBERNETES_VERSION" ]; then
echo >&2 "$0: expected \$E2E_KUBERNETES_VERSION to be set"
exit 1
fi
if [ -z "$E2E_TS_VERSION" ]; then
echo >&2 "$0: expected \$E2E_TS_VERSION to be set"
exit 1
fi
# Sometimes we need to build TS from Git.
case $E2E_TS_VERSION in
7.1)
TS_URL=https://github.com/apache/trafficserver/archive/7.1.x.tar.gz
TS_ARCHIVE=trafficserver-7.1.x.tar.gz
TS_DIR=trafficserver-7.1.x
TS_AUTORECONF=true
;;
*)
TS_URL=http://www-eu.apache.org/dist/trafficserver/trafficserver-${E2E_TS_VERSION}.tar.gz
TS_ARCHIVE=trafficserver-${E2E_TS_VERSION}.tar.gz
TS_DIR=trafficserver-$E2E_TS_VERSION
TS_AUTORECONF=false
;;
esac
case $E2E_KUBERNETES_VERSION in
1.6.*)
ETCD_VERSION=3.1.5
;;
*)
echo >&2 "$0: unsupported Kubernetes version $E2E_KUBERNETES_VERSION"
exit 1
;;
esac
HYPERKUBE=$(pwd)/_test/hyperkube-$E2E_KUBERNETES_VERSION
ETCD=$(pwd)/_test/etcd-$ETCD_VERSION
TESTDIR=$(mktemp -d /tmp/test.XXXXXX)
ln -s $HYPERKUBE $TESTDIR/kubectl
TESTS_RUN=0
TESTS_OK=0
TESTS_FAILED=0
KUBECTL="$TESTDIR/kubectl --kubeconfig=$(pwd)/tests/kubeconfig"
mkdir -p _test
download_etcd
download_hyperkube
install_ts
install_plugin
cleanup() {
stop_ts || true
stop_httpd || true
stop_apiserver || true
stop_etcd || true
rm -rf $TESTDIR
}
trap cleanup TERM INT
start_etcd
start_apiserver
start_httpd
start_ts
printf '\nRunning tests:\n\n'
if [ -z "$1" ]; then
tests="$(cd tests/e2e; echo * | sort)"
else
tests="$*"
fi
for test in $tests; do
_runtest $test
done
printf '\n'
printf '>>> Ran %d tests, %d ok, %d failed\n\n' $TESTS_RUN $TESTS_OK $TESTS_FAILED
exit=0
if [ $TESTS_RUN -ne $TESTS_OK ]; then
printf '*** FAILED.\n\n'
echo '------------------- log output: -------------------'
cat $TESTDIR/log
echo '---------------------------------------------------'
exit=1
fi
cleanup
exit $exit
| true
|
78c672edbf58236edb8ed5d6aaa3a50d072ab0f7
|
Shell
|
jameskbride/nuforc-search
|
/elasticsearch/load-elasticsearch.sh
|
UTF-8
| 260
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
# $1 - The directory containing the encounter json files.
# $2 - The server location (http://localhost:9200 for development).
for file in $1/*.json
do
curl -X POST -H "Content-Type: application/json" -T "$file" $2/encounters/encounter;
done;
| true
|
1c47481c7355b2a1ef51b305c49cb39458bc4243
|
Shell
|
xavier506/eosio-network-bootstrap
|
/services/validator3/start.sh
|
UTF-8
| 969
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
echo "Starting VALIDATOR Node...";
set -e;
ulimit -n 65535
ulimit -s 64000
mkdir -p $CONFIG_DIR
cp $WORK_DIR/config.ini $CONFIG_DIR/config.ini
pid=0;
nodeos=$"nodeos \
--config-dir $CONFIG_DIR \
--data-dir $DATA_DIR \
--blocks-dir $DATA_DIR/blocks \
--signature-provider $EOS_PUB_KEY=KEY:$EOS_PRIV_KEY" ;
term_handler() {
if [ $pid -ne 0 ]; then
kill -SIGTERM "$pid";
wait "$pid";
fi
exit 0;
}
start_nodeos() {
$nodeos &
sleep 10;
if [ -z "$(pidof nodeos)" ]; then
$nodeos --hard-replay-blockchain &
fi
}
start_fresh_nodeos() {
echo 'Starting new chain from genesis JSON'
$nodeos --delete-all-blocks --genesis-json $WORK_DIR/genesis.json &
}
trap 'echo "Shutdown of EOSIO service...";kill ${!}; term_handler' 2 15;
if [ ! -d $DATA_DIR/blocks ]; then
start_fresh_nodeos &
elif [ -d $DATA_DIR/blocks ]; then
start_nodeos &
fi
pid="$(pidof nodeos)"
while true
do
tail -f /dev/null & wait ${!}
done
| true
|
599f3e491a9372356ec74641b78c858eeabe1867
|
Shell
|
emgunn/Tesseract-Training-Automation
|
/finetune.sh
|
UTF-8
| 1,256
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
# This script trains/fine-tunes our existing LSTM model.
# --continue_from should point to the extracted .lstm model of the model we want to continue fine-tuning from. This should be extracted by a call to extract_lstm.sh.
# --model_output should point to where we want our output to be
# --traineddata should point to our base .traineddata file from tessdata best
# --train_listfile should point to our eng.training_files.txt generated from generate_training_data.sh
# --max_iterations is the number of iterations to perform the fine-tuning.
# If this is too high, our model may be too fine-tuned that it will only recognize the given font.
# If this is too low, our model may struggle to recognize text of this font.
# A good number for this is 400 (recommended by YouTube tutorial).
if [ $# -eq 0 ]
then
echo 'No arguments passed, first argument must specify output name prefix.'
exit 1
fi
echo 'Executing "finetune.sh"...'
echo "\n"
# save .keep file to retain directory structure in Git
shopt -s extglob
rm -rf Output/!('.keep')
OMP_THREAD_LIMIT=8 lstmtraining \
--continue_from eng.lstm \
--model_output "Output/${1}" \
--traineddata Trained\ Data/eng.traineddata \
--train_listfile Train/eng.training_files.txt \
--max_iterations 400
| true
|
971a8b022453b74a83099e327cb79bae3e545e21
|
Shell
|
malston/tanzu-pipelines
|
/concourse/setup.sh
|
UTF-8
| 1,889
| 3.6875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
function die() {
2>&1 echo "$@"
exit 1
}
function login_harbor() {
echo "${REGISTRY_PASSWORD}" | docker login -u admin "${REGISTRY}" --password-stdin
}
function create_harbor_projects() {
for p in {concourse-images,kpack,tanzu}; do
echo "Creating '${p}' in Harbor."
curl --user "admin:${REGISTRY_PASSWORD}" -X POST \
"https://${REGISTRY}/api/v2.0/projects" \
-H "Content-type: application/json" --data \
'{ "project_name": "'${p}'",
"metadata": {
"auto_scan": "true",
"enable_content_trust": "false",
"prevent_vul": "false",
"public": "true",
"reuse_sys_cve_whitelist": "true",
"severity": "high" }
}'
done
}
function build_kpack_concourse_resource() {
# Container: pipeline talks to kpack
docker pull gcr.io/cf-build-service-public/concourse-kpack-resource:1.0
docker tag gcr.io/cf-build-service-public/concourse-kpack-resource:1.0 "$REGISTRY/concourse-images/concourse-kpack-resource:1.0"
docker push "$REGISTRY/concourse-images/concourse-kpack-resource:1.0"
}
function build_kubectl_image() {
local k8s_version="${1}"
# Container: pipeline talks to k8s
docker build --platform linux/amd64 --build-arg "KUBERNETES_VERSION=$k8s_version" --rm -t "$REGISTRY/concourse-images/kubectl-docker:$k8s_version" .
docker push "$REGISTRY/concourse-images/kubectl-docker:$k8s_version"
}
DOMAIN="${DOMAIN:-"example.com"}"
REGISTRY="${REGISTRY:-"https://registry.${DOMAIN}"}"
KUBERNETES_VERSION="${KUBERNETES_VERSION:-1.20.7}"
if [[ -z "$REGISTRY_PASSWORD" ]]; then
echo -n "Enter password for $REGISTRY: "
read -rs REGISTRY_PASSWORD
echo
fi
login_harbor || die "Check the password for $REGISTRY"
create_harbor_projects
build_kpack_concourse_resource
build_kubectl_image "$KUBERNETES_VERSION"
| true
|
23d91f3e4e2b75891e678dd806b9499f592df68d
|
Shell
|
liuping001/deploy
|
/d.sh
|
UTF-8
| 491
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/sh
usage()
{
echo "Usage: $0 [server_define.yml] [host.txt] [start] [server_name]... "
echo "example:"
echo " $0 server_define.yml host.txt push server1 server2"
}
if [ $# -lt 4 ];then
usage
exit
fi
for server in ${@:4};
do
# echo $server
export ANSIBLE_FORCE_COLOR=true
export ANSIBLE_STDOUT_CALLBACK=yaml
ansible-playbook -i $2 task.yml --tags="$3" -e "server_name=$server server_define_file=$1" |grep -v -P 'PLAY|unreachable|WARN'
done;
| true
|
5089f7e4eebd3d7391b508444cbd9b3fba3c3cc6
|
Shell
|
basile-parent/impro-photo
|
/launchImpro.sh
|
UTF-8
| 560
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
java -fullversion
version=$(java -version 2>&1 | awk -F '"' '/version/ {print $2}')
if [[ "$version" < "1.8" ]]; then
>&2 echo
>&2 echo "ERREUR : La version de installée de Java est inférieure à la version minimale requise (1.8+)"
exit 1;
fi
cd /var/www/impro-photo
lastJar="$(ls -tr target | grep '\.jar$' | tail -1)"
echo
echo "Launch JAR : ${lastJar}"
echo
# java -Xms1024m -Xdebug -jar -Dspring.profiles.active=prod target/${lastJar} -browser
java -Xms1024m -Xdebug -jar -Dspring.profiles.active=prod target/${lastJar} -raspberry
| true
|
bcc6ecf4921ab0f3e5228cf16476cbbaddf252df
|
Shell
|
mateuszmidor/KubernetesStudy
|
/KubernetesPoPolsku/s01/e10-helm-2/chart-museum/run_all.sh
|
UTF-8
| 2,416
| 4.125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
trap tearDown SIGINT
CHARTMUSEUM_NAME="mychartmuseum"
CHARTMUSEUM_URL="http://charts.127.0.0.1.nip.io" # configured in chart-museum-values.yaml
function stage() {
GREEN="\e[92m"
RESET="\e[0m"
msg="$1"
echo
echo -e "$GREEN$msg$RESET"
}
function waitUrlAvailable() {
url="$1"
while [[ `curl -s -o /dev/null -w "%{http_code}" $url` != "200" ]]; do
echo "Waiting for $url ..."
sleep 3
done
}
function checkPrerequsites() {
stage "Checking prerequisites"
command minikube > /dev/null 2>&1
[[ $? != 0 ]] && echo "You need to install minicube to run local cluster" && exit 1
command helm > /dev/null 2>&1
[[ $? != 0 ]] && echo "You need to install helm to run this lesson" && exit 1
echo "OK"
}
function runMinikube() {
stage "Running Minikube"
desired_status=": Running : Running : Running : Configured "
if [[ `sudo minikube status | egrep -o ":.*" | tr '\n' ' '` != $desired_status ]]; then
sudo rm -f /tmp/juju-mk*
sudo minikube stop
sudo rm -f /tmp/juju-mk*
echo "Running minikube"
sudo minikube start --vm-driver=none
else
echo "Minikube is running"
fi
ip=`sudo minikube ip`
echo "Your ClusterIP: $ip"
}
function installChartMuseum() {
stage "Installing chart museum and helm push plugin"
sudo helm repo add stable https://kubernetes-charts.storage.googleapis.com
sudo helm repo update
sudo helm install $CHARTMUSEUM_NAME stable/chartmuseum --version 2.9.0 -f chart-museum-values.yaml
sudo helm plugin install https://github.com/chartmuseum/helm-push.git
waitUrlAvailable $CHARTMUSEUM_URL
sudo helm repo add $CHARTMUSEUM_NAME $CHARTMUSEUM_URL
}
function publishMyChart() {
stage "Publishing mychart into local chart repo called $CHARTMUSEUM_NAME"
sudo helm push mychart/ $CHARTMUSEUM_NAME
sudo helm repo update
}
function searchPublishedChart() {
stage "Searching mychart"
sudo helm search repo mychart
}
function showWebPage() {
stage "Running chartmuseum web page"
sleep 10
firefox $CHARTMUSEUM_URL
echo "OK"
}
function keepAlive() {
while true; do sleep 1; done
}
function tearDown() {
sudo helm delete $CHARTMUSEUM_NAME
exit 0
}
checkPrerequsites
runMinikube
installChartMuseum
publishMyChart
searchPublishedChart
showWebPage
keepAlive
| true
|
150d5da8b97828023eaf831170925ceaae9858ea
|
Shell
|
USDA-ARS-GBRU/gbru_fy19_candis
|
/runall.sh
|
UTF-8
| 8,853
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
module load bbtools
# Use BBtools to demultiplex files based on the headers
for file in *_R1.fastq.gz
do
bname=`basename $file _R1.fastq.gz`
demuxbyname.sh in=$file in2=${bname}_R2.fastq.gz delimiter=colon column=10 out=${bname}_%_R1.fastq.gz out2=${bname}_%_R2.fastq.gz
done
# cpy origional data to a new folder
mkdir original
mv L_Feed_R* original
mv 16S_Feed_R* original
mv L_Water_R* original
mv L_Gut_R* original
mv 16S_Water_R* original
mv 16S_Gut_R* original
# create the manifest
ls *.fastq.gz > filenames.txt
python filenames_2_manifest.py > manifest.txt
# 16S and LSU should be processed separatly, split them now
head -n1 manifest.txt > 16S_manifest.txt
head -n1 manifest.txt > LSU_manifest.txt
grep "LSU" manifest.txt >> LSU_manifest.txt
grep "16S" manifest.txt >> 16S_manifest.txt
source activate /project/gbru/gbru_fy18_candis/qiime2-2019.1
qiime tools import --type 'SampleData[PairedEndSequencesWithQuality]' \
--input-format PairedEndFastqManifestPhred33 \
--input-path 16S_manifest.txt \
--output-path 16S_demuxed.qza
qiime tools import --type 'SampleData[PairedEndSequencesWithQuality]' \
--input-format PairedEndFastqManifestPhred33 \
--input-path LSU_manifest.txt \
--output-path LSU_demuxed.qza
time qiime demux summarize \
--i-data 16S_demuxed.qza \
--o-visualization 16S_demuxed.qzv
time qiime demux summarize \
--i-data LSU_demuxed.qza \
--o-visualization LSU_demuxed.qzv
qiime dada2 denoise-paired --i-demultiplexed-seqs LSU_demuxed.qza \
--p-trunc-len-f 220 \
--p-trunc-len-r 150 \
--p-n-threads 38 \
--p-n-reads-learn 1000000 \
--output-dir dada2_LSU \
--verbose
qiime dada2 denoise-paired --i-demultiplexed-seqs 16S_demuxed.qza \
--p-trunc-len-f 275 \
--p-trunc-len-r 240 \
--p-n-threads 38 \
--p-n-reads-learn 1000000 \
--output-dir dada2_16S \
--verbose
# ad environemntal metadata
time qiime feature-table summarize \
--i-table /dada2_LSU/table.qza \
--o-visualization LSU_table-dada2.qzv \
--m-sample-metadata-file LSU_metadata.txt
time qiime feature-table summarize \
--i-table dada2_16S/table.qza \
--o-visualization 16S_table-dada2.qzv \
--m-sample-metadata-file 16S_metadata.txt
#align rep set sequences with mafft
time qiime alignment mafft \
--i-sequences dada2_LSU/representative_sequences.qza \
--o-alignment LSU_aligned-rep-seqs.qza \
--p-n-threads 16
time qiime alignment mafft \
--i-sequences dada2_16S/representative_sequences.qza \
--o-alignment 16S_aligned-rep-seqs.qza \
--p-n-threads 16
time qiime alignment mask \
--i-alignment 16S_aligned-rep-seqs.qza \
--o-masked-alignment 16S_masked-aligned-rep-seqs.qza
time qiime alignment mask \
--i-alignment LSU_aligned-rep-seqs.qza \
--o-masked-alignment LSU_masked-aligned-rep-seqs.qza
time qiime phylogeny fasttree \
--i-alignment LSU_masked-aligned-rep-seqs.qza \
--o-tree LSU_unrooted-tree.qza
time qiime phylogeny fasttree \
--i-alignment 16S_masked-aligned-rep-seqs.qza \
--o-tree 16S_unrooted-tree.qza
time qiime phylogeny midpoint-root \
--i-tree 16S_unrooted-tree.qza \
--o-rooted-tree 16S_rooted-tree.qza
time qiime phylogeny midpoint-root \
--i-tree LSU_unrooted-tree.qza \
--o-rooted-tree LSU_rooted-tree.qza
# download the taxonomy from databases from Silva
mkdir taxonomy
mkdir taxonomy/SILVA_132_QIIME_release
cd taxonomy/SILVA_132_QIIME_release
wget https://www.arb-silva.de/fileadmin/silva_databases/qiime/Silva_132_release.zip
unzip Silva_132_release.zip
cd ../
#mkdir LSU_silva
#cd LSU_silva
# Silva 128 LSU data generated by Victor Carrillo -- Did not work only has bacteria
#https://forum.qiime2.org/t/large-23s-28s-lsu-subunit-ribosomal-rna-in-qiime-compatible-silva-database/3126/16
#wget https://www.dropbox.com/s/bcwo1qz0oci849k/LSU_cluster_99.zip?dl=0
#unzip LSU_cluster_99.zip?dl=0
#rm LSU_cluster_99.zip?dl=0
#format database
mkdir new_LSU
cd new_LSU
wget https://www.arb-silva.de/fileadmin/silva_databases/release_132/Exports/SILVA_132_LSURef_tax_silva_trunc.fasta.gz
gunzip SILVA_132_LSURef_tax_silva_trunc.fasta.gz
prep_silva_data.py --infile SILVA_132_LSURef_tax_silva_trunc.fasta \
--taxafile LSU_taxonomy.txt \
--outfasta LSU_centroids.fasta \
--clusterid 0.99 \
--threads 36
cd ../..
# import data, trim the databases to the region of interest, and train classiifer
## Silva_132_release SSU
qiime tools import \
--type 'FeatureData[Sequence]' \
--input-path taxonomy/SILVA_132_QIIME_release/rep_set/rep_set_16S_only/99/silva_132_99_16S.fna \
--output-path taxonomy/SILVA_132_QIIME_release/rep_set/rep_set_16S_only/99/silva_132_99_16S.qza
qiime tools import \
--type 'FeatureData[Taxonomy]' \
--input-format HeaderlessTSVTaxonomyFormat \
--input-path taxonomy/SILVA_132_QIIME_release/taxonomy/16S_only/99/majority_taxonomy_7_levels.txt \
--output-path taxonomy/SILVA_132_QIIME_release/taxonomy/16S_only/99/majority_taxonomy_7_levels.qza
qiime feature-classifier extract-reads \
--i-sequences taxonomy/SILVA_132_QIIME_release/rep_set/rep_set_16S_only/99/silva_132_99_16S.qza \
--p-f-primer AGAGTTTGATCCTGGCTCAG \
--p-r-primer ATTACCGCGGCTGCTGG \
--p-min-length 300 \
--p-max-length 600 \
--o-reads taxonomy/SILVA_132_QIIME_release/rep_set/rep_set_16S_only/99/silva_132_99_16S_trimmed_27-534.qza
qiime feature-classifier fit-classifier-naive-bayes \
--i-reference-reads taxonomy/SILVA_132_QIIME_release/rep_set/rep_set_16S_only/99/silva_132_99_16S_trimmed_27-534.qza \
--i-reference-taxonomy taxonomy/SILVA_132_QIIME_release/taxonomy/16S_only/99/majority_taxonomy_7_levels.qza \
--o-classifier 16S_classifier.qza
## Silva 132 LSU
qiime tools import \
--type 'FeatureData[Sequence]' \
--input-path taxonomy/new_LSU/LSU_centroids_clean.fasta \
--output-path taxonomy/new_LSU/LSU_centroids.qza
qiime tools import \
--type 'FeatureData[Taxonomy]' \
--input-format HeaderlessTSVTaxonomyFormat \
--input-path taxonomy/new_LSU/LSU_taxonomy.txt \
--output-path taxonomy/new_LSU/LSU_taxonomy.qza
qiime feature-classifier extract-reads \
--i-sequences taxonomy/new_LSU/LSU_centroids.qza \
--p-f-primer AACKGCGAGTGAAGCRGBM \
--p-r-primer TCTTTCCCTCACGGTACTTG \
--p-min-length 100 \
--p-max-length 400 \
--verbose \
--o-reads taxonomy/new_LSU/LSU_taxonomy_trimmed_200-481.qza
qiime feature-classifier fit-classifier-naive-bayes \
--i-reference-reads taxonomy/new_LSU/LSU_taxonomy_trimmed_200-481.qza \
--i-reference-taxonomy taxonomy/new_LSU/LSU_taxonomy.qza \
--o-classifier LSU_classifier.qza
# CLassify taxonomically
## 16S
time qiime feature-classifier classify-sklearn \
--i-classifier 16S_classifier.qza \
--i-reads dada2_16S/representative_sequences.qza \
--o-classification 16S_taxonomy.qza \
--p-n-jobs 34 \
--verbose
time qiime feature-classifier classify-sklearn \
--i-classifier LSU_classifier.qza \
--i-reads dada2_LSU/representative_sequences.qza \
--o-classification LSU_taxonomy.qza \
--p-n-jobs 34 \
--verbose
#plot samples
qiime taxa barplot \
--i-table dada2_LSU/table.qza \
--i-taxonomy LSU_taxonomy.qza \
--m-metadata-file LSU_metadata.txt \
--o-visualization LSU_taxa-bar-plots.qzv
qiime taxa barplot \
--i-table dada2_16S/table.qza \
--i-taxonomy 16S_taxonomy.qza \
--m-metadata-file 16S_metadata.txt \
--o-visualization 16S_taxa-bar-plots.qzv
qiime diversity core-metrics-phylogenetic \
--i-table dada2_16S/table.qza \
--i-phylogeny 16S_rooted-tree.qza \
--m-metadata-file 16S_metadata.txt \
--p-n-jobs 30 \
--output-dir 16S_diversity \
--p-sampling-depth 5000
qiime diversity core-metrics-phylogenetic \
--i-table dada2_LSU/table.qza \
--i-phylogeny LSU_rooted-tree.qza \
--m-metadata-file LSU_metadata.txt \
--p-n-jobs 30 \
--output-dir LSU_diversity \
--p-sampling-depth 5000
# pip install deicode
# qiime dev refresh-cache
# this is the prefered ordination methods because the data is compositional count database
qiime deicode rpca \
--i-table dada2_16S/table.qza \
--p-min-feature-count 10 \
--p-min-sample-count 500 \
--o-biplot 16S_ordination.qza \
--o-distance-matrix 16S_distance.qza
qiime emperor biplot \
--i-biplot 16S_ordination.qza \
--m-sample-metadata-file 16S_metadata.txt \
--m-feature-metadata-file 16S_taxonomy.qza \
--o-visualization 16S_biplot.qzv \
--p-number-of-features 5
qiime deicode rpca \
--i-table dada2_LSU/table.qza \
--p-min-feature-count 10 \
--p-min-sample-count 500 \
--o-biplot LSU_ordination.qza \
--o-distance-matrix LSU_distance.qza
qiime emperor biplot \
--i-biplot LSU_ordination.qza \
--m-sample-metadata-file LSU_metadata.txt \
--m-feature-metadata-file LSU_taxonomy.qza \
--o-visualization LSU_biplot.qzv \
--p-number-of-features 5
| true
|
3e602741048e7c0bc2697e038dff8de777c4f5c5
|
Shell
|
jakewendt/nrl_solar_physics
|
/Software/RawLZCD/CopyRawCDFiles
|
UTF-8
| 1,364
| 3.578125
| 4
|
[] |
no_license
|
#! /bin/sh -a
#
#
# begin script CopyRawCDFiles
#
# author: the Jake
# born: 000705
#
# modifications:
# 000719 - the Jake - including jget technique
# 011114 - the jake - began trying to link file instead of copy them
#
#
# CopyCDFiles 1266
#
# Need to set CDLTS, CDROOT before running manually
#
# part of RawLZCD
#
# purpose: Copies/Links files to CD structure
#
# 020318 Jake Added CD-NUMBER line
#
Usage()
{ echo
echo "Usage: $0 CDNUM"
echo
echo "Example: $0 1266"
echo
return;
}
if [ $# -ne 1 ]; then Usage; exit 1; fi
if [ -r $CDLTS/$1.list -a -s $CDLTS/$1.list ]
then
CDNUM=$1
else
echo " ERROR: List not found for $TYPE CD $1"
Usage
exit 1
fi
if [ -d "$CDROOT" ]
then
echo " Clearing out existing data..."
cd $CDROOT
/bin/rm -rf data/
/bin/rm -rf document/
/bin/rm -rf index/
/bin/rm -rf voldesc.sfd
else
echo "Bad CDROOT"
exit 1
fi
#echo " Creating directory structure..."
#for EACHDIR in `cat $SOCDRAW/dirstruct`
#do
# mkdir -p $EACHDIR
#done
echo "CD-NUMBER: $CDNUM" > $CDROOT/voldesc.sfd
for EACHDIR in `/bin/sort -r $CDLTS/$CDNUM.list`
do
#cd $SOCDRAW/SO_ALL_LZ_$EACHDIR
cd $SOCDRAW/$EACHDIR
cat ./voldesc.sfd >> $CDROOT/voldesc.sfd
find ./* -type f -exec jlink {} $CDROOT \; # newest first
done
/usr/ucb/echo -n "Files Copied/Linked:" >> $STATUSLOG
date >> $STATUSLOG
exit
#
# end script CopyRawCDFiles
#
| true
|
31504583a3eba35088331d19132fd27fe138f40d
|
Shell
|
hype-moment/Dotfiles
|
/rofi/scripts/power.sh
|
UTF-8
| 629
| 3.09375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
rofi_theme="rofi -theme ~/.config/rofi/Power-Menus/Power2.rasi"
# Options
shutdown="襤"
reboot="ﰇ"
lock=""
suspend="鈴"
logout=""
# Variable passed to rofi
options="$shutdown\n$reboot\n$lock\n$suspend\n$logout"
chosen="$(echo -e "$options" | $rofi_theme -p " " -dmenu -selected-row 1)"
case $chosen in
$shutdown)
systemctl poweroff
;;
$reboot)
systemctl reboot
;;
$lock)
i3lock
;;
$suspend)
mpc -q pause
amixer set Master mute
systemctl suspend
;;
$logout)
openbox --exit
;;
esac
| true
|
96838c0877c051285251f567d4eb90acd3c3fae0
|
Shell
|
Schwib225/Bash-Scripts
|
/basics/for_do_done.sh
|
UTF-8
| 118
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
COUNT=0
for LETTER in {A..Z}
do
COUNT=`/usr/bin/expr $COUNT + 1`
echo "Letter $COUNT is [$LETTER]"
done
| true
|
c9167176670d04617be276ec9c98ca85c55e1e26
|
Shell
|
ShenXiaoxue/acoustic-sources
|
/1_4Cavity_monopole/Allclean
|
UTF-8
| 1,630
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/sh
#------------------------------------------------------------------------------
# ========= |
# \\ / F ield | foam-extend: Open Source CFD
# \\ / O peration |
# \\ / A nd | For copyright notice see file Copyright
# \\/ M anipulation |
#------------------------------------------------------------------------------
# License
# This file is part of foam-extend.
#
# foam-extend is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# foam-extend is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with foam-extend. If not, see <http://www.gnu.org/licenses/>.
#
# Script
# Allclean
#
# Description
#
#------------------------------------------------------------------------------
echo "Cleaning backup files"
find . -type f \( -name "*~" -o -name "*.bak" \) -exec rm {} \;
find . \( -name 'core' -o -name 'core.[1-9]*' \) -exec rm {} \;
find . \( -name '*.pvs' -o -name '*.foam' \) -exec rm {} \;
rm logs > /dev/null 2>&1
rm testLoopReport > /dev/null 2>&1
echo ""
foamCleanTutorials cases
# ----------------------------------------------------------------- end-of-file
| true
|
7a0a8fd7d2231a65706925690f99e175f04182ba
|
Shell
|
AlexDikelsky/dotfiles
|
/.profile
|
UTF-8
| 1,360
| 2.875
| 3
|
[] |
no_license
|
# ~/.profile: executed by the command interpreter for login shells.
# This file is not read by bash(1), if ~/.bash_profile or ~/.bash_login
# exists.
# see /usr/share/doc/bash/examples/startup-files for examples.
# the files are located in the bash-doc package.
# the default umask is set in /etc/profile; for setting the umask
# for ssh logins, install and configure the libpam-umask package.
#umask 022
# if running bash
if [ -n "$BASH_VERSION" ]; then
# include .bashrc if it exists
if [ -f "$HOME/.bashrc" ]; then
. "$HOME/.bashrc"
fi
fi
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/bin" ] ; then
PATH="$HOME/bin:$PATH"
fi
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/.local/bin" ] ; then
PATH="$HOME/.local/bin:$PATH"
fi
PATH="/home/alex/.idris2/bin:$PATH"
. "$HOME/.cargo/env"
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
alias egrep='egrep --color=auto'
alias fgrep='fgrep --color=auto'
alias grep='grep --color=auto'
alias l='ls -CF'
alias la='ls -A'
alias ll='ls -alF'
alias ls='ls --color=auto'
PATH="/home/alex/.idris2/bin:$PATH"
PATH="/home/alex/.local/bin:$PATH"
export PATH="$HOME/.elan/bin:$PATH"
export GCM_CREDENTIAL_STORE=secretservice
| true
|
d9c9dbc9d11d6e11386c223ebeca3e08d2df881d
|
Shell
|
cod-developers/qchem-converters
|
/codes/quantum-espresso/examples/ESM_example/reference/esm_data.sh
|
UTF-8
| 498
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/sh
# Prints the ESM summary data (charge and potentials) to stdout
# Usage: esm_data.sh {pw output filename}
#
# Original version by Brandon Wood and Minoru Otani
#
echo '# z (A) rho (e) Avg v_hartree Avg v_local Avg v_hart+v_loc'
echo '# (eV/A) (eV/A) (eV/A)'
ngrid=`grep 'FFT grid: (' $1 | awk -F ',' '{print $3}' | sed 's/)//'`
let ngrid=$ngrid+5
grep -A${ngrid} 'ESM Charge and Potential' $1 | tail -n${ngrid} | tail -n+6
| true
|
96f545d94dffe4704dca24ffe31393347ca726e2
|
Shell
|
aviau/dotfiles
|
/macos/.bashrc
|
UTF-8
| 2,178
| 3.625
| 4
|
[] |
no_license
|
#Set variables
export EDITOR="vim"
export GOPATH=$HOME/go
export GPG_TTY=$(tty)
export PINENTRY_USER_DATA="USE_TTY=1"
# Enable completion
eval $(/opt/homebrew/bin/brew shellenv)
if [[ -f ${HOMEBREW_PREFIX}/etc/profile.d/bash_completion.sh ]]; then
. ${HOMEBREW_PREFIX}/etc/profile.d/bash_completion.sh
fi
# GPG+SSH Agent
export SSH_AUTH_SOCK=${HOME}/.gnupg/S.gpg-agent.ssh
if [[ ! -S ${HOME}/.gnupg/S.gpg-agent ]]; then
gpg-agent --daemon --quiet > /dev/null
fi
# set PATH to include golang stuff
if [ -d "$GOPATH/bin" ] ; then
export PATH="$GOPATH/bin:$PATH"
fi
# set path to include flare stuff
if [ -d "$HOME/git/flare/flare-tools/bin" ] ; then
export PATH="$HOME/git/flare/flare-tools/bin:$PATH"
fi
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/bin" ] ; then
export PATH="$HOME/bin:$PATH"
fi
PROMPT_COMMAND=set_prompt
set_prompt() {
# Capture exit code of last command
local ex=$?
#----------------------------------------------------------------------------#
# Bash text colour specification: \e[<STYLE>;<COLOUR>m
# (Note: \e = \033 (oct) = \x1b (hex) = 27 (dec) = "Escape")
# Styles: 0=normal, 1=bold, 2=dimmed, 4=underlined, 7=highlighted
# Colours: 31=red, 32=green, 33=yellow, 34=blue, 35=purple, 36=cyan, 37=white
#----------------------------------------------------------------------------#
local default='\e[1;0m'
local red='\e[1;31m'
local reset='\e[0m'
# Set prompt content
PS1="\u@\h:\w$\[$reset\] "
# If exit code of last command is non-zero, prepend this code to the prompt
[[ "$ex" -ne 0 ]] && PS1="$red$ex$reset|$PS1"
# Set colour of prompt
PS1="\[$color\]$PS1"
}
export -f set_prompt
# pyenv
if [ -d "$HOME/.pyenv" ]; then
eval "$(pyenv init --path)"
eval "$(pyenv init -)"
fi
# nvm
export NVM_DIR="$HOME/.nvm"
[ -s "${HOMEBREW_PREFIX}/opt/nvm/nvm.sh" ] && \. "${HOMEBREW_PREFIX}/opt/nvm/nvm.sh" # This loads nvm
[ -s "${HOMEBREW_PREFIX}/opt/nvm/etc/bash_completion.d/nvm" ] && \. "${HOMEBREW_PREFIX}/opt/nvm/etc/bash_completion.d/nvm" # This loads nvm bash_completion
function title {
echo -ne "\033]0;"$*"\007"
}
# direnv
eval "$(direnv hook bash)"
| true
|
a6fbd5280b1cbd6963a5728052c9273ed255499f
|
Shell
|
demsheng/wxWidgets-example
|
/chap20/install/makeinno.sh
|
UTF-8
| 3,282
| 4.3125
| 4
|
[] |
no_license
|
#! /bin/sh
# Make an Inno Setup distribution list, where files and dirs are represented by
# sections like this:
# [Dirs]
# Name: {app}\backgrounds
#
# [Files]
# Source: C:\program\setup\about.htm; DestDir: {app}\; DestName: about.htm
#
#
# Usage: makeinno.sh sourcedir inno-topfile inno-bottomfile destfile
# For example: makeinno.sh c:/project/allfiles c:/project/innotop.txt c:/project/innobott.txt c:/project/project.iss
#
PROGNAME=$0
SOURCEDIR=$1
TOPFILE=$2
BOTTOMFILE=$3
INNOFILE=$4
TEMPDIR=/tmp
dochecks()
{
if [ "$SOURCEDIR" = "" ] || [ "$TOPFILE" = "" ] || [ "$BOTTOMFILE" = "" ] || [ "$INNOFILE" = "" ] ; then
usage
fi
if [ ! -d $SOURCEDIR ]; then
echo "Sorry, the source directory $SOURCEDIR does not exist."
usage
fi
if [ ! -f $TOPFILE ]; then
echo "Sorry, the Inno Setup header $TOPFILE does not exist."
usage
fi
if [ ! -f $BOTTOMFILE ]; then
echo "Sorry, the Inno Setup header $BOTTOMFILE does not exist."
usage
fi
if [ ! -d $TEMPDIR ]; then
mkdir $TEMPDIR
fi
}
doreplace()
{
thefile=$1
theexpr=$2
if [ -f $thefile ]; then
sed -e "$theexpr" < $thefile > $thefile.tmp
mv $thefile.tmp $thefile
else
echo "*** $thefile not found."
fi
}
generateinno()
{
# SRCDIR=`cygpath -u $SRCDIR`
# DESTDIR=`cygpath -u $DESTDIR`
# TEMPDIR=`cygpath -u $TEMP`
# Generate a list of all files in the distribution.
# We pass the output through sed in order to remove the preceding "./"
# Also substitute space for @, and put them back later
cd $SOURCEDIR
find . -print | sed -e "s/\.\\///g" | sed -e "s/ /@/g" > $TEMPDIR/files1.tmp
echo "[Dirs]" > $TEMPDIR/files2.tmp
for line in `cat $TEMPDIR/files1.tmp` ; do
# If a directory, add to file
if [ -d $line ] ; then
# The relative path
# TODO: make into DOS filename form
#line2=`cygpath -w $line`
line2=$line
echo " Name: {app}\\"$line2 >> $TEMPDIR/files2.tmp
fi
done
echo "" >> $TEMPDIR/files2.tmp
echo "[Files]" >> $TEMPDIR/files2.tmp
for line in `cat $TEMPDIR/files1.tmp` ; do
# If not a directory, add to file
if [ ! -d $line ] ; then
# The relative path
line2=$line
# The absolute path
line1=$SOURCEDIR"\\"$line2
pathonly=`dirname $line`
echo " Source: "$line1"; DestDir: {app}\\"$pathonly >> $TEMPDIR/files2.tmp
fi
done
echo "" >> $TEMPDIR/files2.tmp
doreplace $TEMPDIR/files2.tmp "s/\//\\\/g"
doreplace $TEMPDIR/files2.tmp "s/@/ /g"
# Concatenate the 3 sections
cat $TOPFILE $TEMPDIR/files2.tmp $BOTTOMFILE > $INNOFILE
# rm -f $TEMPDIR/files1.tmp
}
usage()
{
echo Usage: $PROGNAME sourcedir inno-topfile inno-bottomfile destfile
echo For example: $PROGNAME c:/project/allfiles c:/project/innotop.txt c:/project/innobott.txt c:/project/project.iss
echo Remember to use paths of the form c:/thing rather than /c/thing.
exit 1
}
dochecks
generateinno
| true
|
cc494d17b9778d0bd69f8910db36a5952ffc11b4
|
Shell
|
adkinsjd/compactor
|
/vagrant/provision.sh
|
UTF-8
| 1,435
| 3.234375
| 3
|
[
"Apache-2.0"
] |
permissive
|
apt-get update
apt-get -y install \
autoconf \
git \
libapr1 \
libapr1-dev \
libaprutil1 \
libaprutil1-dev \
libcurl4-openssl-dev \
libsasl2-dev \
libsvn-dev \
libtool \
maven \
openjdk-7-jdk \
python-dev \
python-pip \
zookeeper
# Ensure java 7 is the default java.
update-alternatives --set java /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
# Set the hostname to the IP address. This simplifies things for components
# that want to advertise the hostname to the user, or other components.
hostname 192.168.33.2
MESOS_VERSION=0.20.1
function build_mesos {
# wget -q -c http://downloads.mesosphere.io/master/ubuntu/12.04/mesos_${MESOS_VERSION}-1.0.ubuntu1204_amd64.deb
# dpkg --install mesos_${MESOS_VERSION}-1.0.ubuntu1204_amd64.deb
git clone https://github.com/wickman/mesos mesos-fork
pushd mesos-fork
git checkout wickman/pong_example
./bootstrap
popd
mkdir -p mesos-build
pushd mesos-build
../mesos-fork/configure
pushd 3rdparty
make
popd
pushd src
make pong-process
popd
popd
ln -s mesos-build/src/pong-process pong
}
function install_ssh_config {
cat >> /etc/ssh/ssh_config <<EOF
# Allow local ssh w/out strict host checking
Host $(hostname)
StrictHostKeyChecking no
UserKnownHostsFile /dev/null
EOF
}
function install_tox {
pip install tox
}
install_ssh_config
install_tox
build_mesos
| true
|
3e5eca07935c95c80afcdcb0c6fc94bdc6f7a477
|
Shell
|
chi0tzp/bin-dir
|
/se7endb
|
UTF-8
| 7,523
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
################################################################################
## se7endb.sh <flag> ##
## A bash script for mounting/unmounting the following SevenDB remote ##
## directories (hosted on pi7db) using sshfs / fusermount: ##
## -- pi7db:/home/sevendb/FILMS001 ##
## -- pi7db:/home/sevendb-plus/FILMS002 ##
## --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##
## ##
## Usage: se7endb [options] ##
## Options: ##
## -r <remote> : set the remote that hosts SevenDB ##
## -m <mount_point> : set directory where remote sevendb will be ##
## mounted on ##
## <flag> : set either to "on", for mounting remote sevendb ##
## direcotry using sshff, "off" for unmounting it ##
## using fusermount, or leave it empty for getting ##
## the status of mount point ##
## ##
################################################################################
b=$(tput bold)
n=$(tput sgr0)
red=`tput setaf 1`
green=`tput setaf 2`
reset=`tput sgr0`
# Help function
help(){
echo "${b}Usage:${n} se7endb.sh [-m <mount_point>] <flag>" 1>&2; exit 1;
}
# Initialize variables
OPTIND=1
SEVENDB_MOUNT_POINT=~/SEVENDB
# Parse command line arguments
while getopts ":m:" options
do
case $options in
m ) SEVENDB_MOUNT_POINT="$OPTARG"
;;
* ) help
;;
esac
done
shift $(expr $OPTIND - 1 )
if [[ $# -gt 0 ]]; then
# A flag has been given
flag=$1
if [[ "${flag}" == "on" ]]; then
echo "Mount SEVENDB..."
mkdir -p $SEVENDB_MOUNT_POINT
mkdir -p $SEVENDB_MOUNT_POINT/FILMS001
sshfs -o idmap=user pi7db:/home/sevendb/films1/FILMS001 $SEVENDB_MOUNT_POINT/FILMS001
mkdir -p $SEVENDB_MOUNT_POINT/FILMS002
sshfs -o idmap=user pi7db:/home/sevendb/films2/FILMS002 $SEVENDB_MOUNT_POINT/FILMS002
mkdir -p $SEVENDB_MOUNT_POINT/FILMS003
sshfs -o idmap=user pi7db:/home/sevendb/films3/FILMS003 $SEVENDB_MOUNT_POINT/FILMS003
mkdir -p $SEVENDB_MOUNT_POINT/FILMS004
sshfs -o idmap=user pi7db:/home/sevendb/films4/FILMS004 $SEVENDB_MOUNT_POINT/FILMS004
mkdir -p $SEVENDB_MOUNT_POINT/FILMS005
sshfs -o idmap=user pi7db:/home/sevendb/films5/FILMS005 $SEVENDB_MOUNT_POINT/FILMS005
mkdir -p $SEVENDB_MOUNT_POINT/FILMS006
sshfs -o idmap=user pi7db:/home/sevendb/films6/FILMS006 $SEVENDB_MOUNT_POINT/FILMS006
mkdir -p $SEVENDB_MOUNT_POINT/FILMS007
sshfs -o idmap=user pi7db:/home/sevendb/films7/FILMS007 $SEVENDB_MOUNT_POINT/FILMS007
# mkdir -p $SEVENDB_MOUNT_POINT/SERIES
# sshfs -o idmap=user pi7db:/home/sevendb/series/Series $SEVENDB_MOUNT_POINT/SERIES
elif [[ "${flag}" == "off" ]]; then
echo "Unmount SEVENDB..."
fusermount3 -u $SEVENDB_MOUNT_POINT/FILMS001
fusermount3 -u $SEVENDB_MOUNT_POINT/FILMS002
fusermount3 -u $SEVENDB_MOUNT_POINT/FILMS003
fusermount3 -u $SEVENDB_MOUNT_POINT/FILMS004
fusermount3 -u $SEVENDB_MOUNT_POINT/FILMS005
fusermount3 -u $SEVENDB_MOUNT_POINT/FILMS006
fusermount3 -u $SEVENDB_MOUNT_POINT/FILMS007
# fusermount3 -u $SEVENDB_MOUNT_POINT/SERIES
else
echo "${b}${red}[Invalid flag]${reset}${n} Choose:"
echo " -- ${b}${red}on${reset}${n} (to mount SEVENDB on $SEVENDB_MOUNT_POINT),"
echo " -- ${b}${red}off${reset}${n} (to unmount SEVENDB from $SEVENDB_MOUNT_POINT), or "
echo " -- leave it ${b}${red}empty${reset}${n} for getting current status"
fi
else
# No flag has been given -- print status
# --- FILMS001 ---
mountpoint -q $SEVENDB_MOUNT_POINT/FILMS001 >> /dev/null
status=$?
if [[ $status -eq 0 ]]; then
echo "SEVENDB/FILMS001 is mounted on ${b}${red}$SEVENDB_MOUNT_POINT/FILMS001${reset}${n}"
lsof -w $SEVENDB_MOUNT_POINT/FILMS001
else
echo "SEVENDB/FILMS001 is ${b}${red}not${reset}${n} mounted on ${b}${red}$SEVENDB_MOUNT_POINT/FILMS001${reset}${n}"
fi
# --- FILMS002 ---
mountpoint -q $SEVENDB_MOUNT_POINT/FILMS002 >> /dev/null
status=$?
if [[ $status -eq 0 ]]; then
echo "SEVENDB/FILMS002 is mounted on ${b}${red}$SEVENDB_MOUNT_POINT/FILMS002${reset}${n}"
lsof -w $SEVENDB_MOUNT_POINT/FILMS002
else
echo "SEVENDB/FILMS002 is ${b}${red}not${reset}${n} mounted on ${b}${red}$SEVENDB_MOUNT_POINT/FILMS002${reset}${n}"
fi
# --- FILMS003 ---
mountpoint -q $SEVENDB_MOUNT_POINT/FILMS003 >> /dev/null
status=$?
if [[ $status -eq 0 ]]; then
echo "SEVENDB/FILMS003 is mounted on ${b}${red}$SEVENDB_MOUNT_POINT/FILMS003${reset}${n}"
lsof -w $SEVENDB_MOUNT_POINT/FILMS003
else
echo "SEVENDB/FILMS003 is ${b}${red}not${reset}${n} mounted on ${b}${red}$SEVENDB_MOUNT_POINT/FILMS003${reset}${n}"
fi
# --- FILMS004 ---
mountpoint -q $SEVENDB_MOUNT_POINT/FILMS004 >> /dev/null
status=$?
if [[ $status -eq 0 ]]; then
echo "SEVENDB/FILMS004 is mounted on ${b}${red}$SEVENDB_MOUNT_POINT/FILMS004${reset}${n}"
lsof -w $SEVENDB_MOUNT_POINT/FILMS004
else
echo "SEVENDB/FILMS004 is ${b}${red}not${reset}${n} mounted on ${b}${red}$SEVENDB_MOUNT_POINT/FILMS004${reset}${n}"
fi
# --- FILMS005 ---
mountpoint -q $SEVENDB_MOUNT_POINT/FILMS005 >> /dev/null
status=$?
if [[ $status -eq 0 ]]; then
echo "SEVENDB/FILMS005 is mounted on ${b}${red}$SEVENDB_MOUNT_POINT/FILMS005${reset}${n}"
lsof -w $SEVENDB_MOUNT_POINT/FILMS005
else
echo "SEVENDB/FILMS005 is ${b}${red}not${reset}${n} mounted on ${b}${red}$SEVENDB_MOUNT_POINT/FILMS005${reset}${n}"
fi
# --- FILMS006 ---
mountpoint -q $SEVENDB_MOUNT_POINT/FILMS006 >> /dev/null
status=$?
if [[ $status -eq 0 ]]; then
echo "SEVENDB/FILMS006 is mounted on ${b}${red}$SEVENDB_MOUNT_POINT/FILMS006${reset}${n}"
lsof -w $SEVENDB_MOUNT_POINT/FILMS006
else
echo "SEVENDB/FILMS006 is ${b}${red}not${reset}${n} mounted on ${b}${red}$SEVENDB_MOUNT_POINT/FILMS006${reset}${n}"
fi
# --- FILMS007 ---
mountpoint -q $SEVENDB_MOUNT_POINT/FILMS007 >> /dev/null
status=$?
if [[ $status -eq 0 ]]; then
echo "SEVENDB/FILMS007 is mounted on ${b}${red}$SEVENDB_MOUNT_POINT/FILMS007${reset}${n}"
lsof -w $SEVENDB_MOUNT_POINT/FILMS007
else
echo "SEVENDB/FILMS007 is ${b}${red}not${reset}${n} mounted on ${b}${red}$SEVENDB_MOUNT_POINT/FILMS007${reset}${n}"
fi
# --- SERIES ---
#mountpoint -q $SEVENDB_MOUNT_POINT/SERIES >> /dev/null
#status=$?
#if [[ $status -eq 0 ]]; then
# echo "SEVENDB/SERIES is mounted on ${b}${red}$SEVENDB_MOUNT_POINT/SERIES${reset}${n}"
# lsof -w $SEVENDB_MOUNT_POINT/SERIES
#else
# echo "SEVENDB/SERIES is ${b}${red}not${reset}${n} mounted on ${b}${red}$SEVENDB_MOUNT_POINT/SERIES${reset}${n}"
#fi
fi
| true
|
36b9c2961ae17b69ce1cc0c43f661a82046b9567
|
Shell
|
ernestchu/java-compiler-front-end
|
/src/utils/ListTokens.sh
|
UTF-8
| 148
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/sh
if [ $# != 1 ]
then
echo "Usage: $0 file (e.g. Scanner.l)"
exit 1
fi
sed -n 's/.*return *\(.*\) *;.*/\1/p' $1 | tr '\n' ' '
echo
| true
|
8f3399291f584b7b84713a71cb9a36f722fb8b0c
|
Shell
|
fov42550564/command
|
/xffmpeg
|
UTF-8
| 9,027
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
#############################################
# author:fang
# version : v1.0
# name : xffmpeg
# dispcripe: use of ffmpeg
# CopyRight@fangyunjiang[42550564@qq.com]
#############################################
source ~/command/common
_oper=0
function show_full_help() {
echo "缩放视频"
echo "ffmpeg -i 1.mp4 -s 320x240 -y -acodec copy 2.mp4"
echo "ffmpeg -i 1.mp4 -vf scale=320:240 -y -acodec copy 2.mp4"
echo "ffmpeg -i 1.mp4 -vf scale=iw/2:ih/2 -y -acodec copy 2.mp4"
echo "ffmpeg -i 1.mp4 -vf scale=320:-2 -y -acodec copy 2.mp4"
echo
echo "按照帧剪切视频图片"
echo "ffmpeg -i 1.mp4 -vf \"select=between(n\,0\,10)\" -y -acodec copy 2.mp4"
echo
echo "按照帧生成图片"
echo "ffmpeg -i 1.mp4 -vf \"select=between(n\,0\,2)\" -f image2 fang/%02d.jpg"
echo
echo "提取关键帧 eq(pict_type\,B) eq(pict_type\,P)"
echo "ffmpeg -i 2.mp4 -vf select='eq(pict_type\,I)' -vsync 2 -f image2 fang/%02d.jpeg"
echo
echo "把视频5-10s和15-20s静音"
echo "ffmpeg -i 1.mp4 -af \"volume=enable='between(t,5,10)':volume=0, volume=enable='between(t,15,20)':volume=0\" 2.mp4"
echo
echo "添加水印"
echo "ffmpeg -i 2.mp4 -i logo.png -filter_complex \"overlay=W-w-5:H-h-5\" -codec:a copy -y 3.mp4"
echo "ffmpeg -i 2.mp4 -i logo.png -filter_complex \"overlay=main_w-overlay_w-5:main_h-overlay_h-5\" -codec:a copy -y 3.mp4"
echo
echo "去除水印"
echo "ffmpeg -i 3.mp4 -vf delogo=x=5:y=5:w=72:h=72 4.mp4"
echo
echo "视频或图像加上黑边pad, 原视频1920×1080,新视频2000x1160 40x2=2000-1920 40x2=1160-1080"
echo "ffmpeg -i 2.mp4 -vf pad=2000:1160:40:40:black -y 3.mp4"
echo
echo "添加水印,带scale"
echo "ffmpeg -i 1.mp4 -vf \"movie=test.PNG,scale=100:150[watermask];[in][watermask] overlay=100:100[out]\" -y 2.mp4"
echo
echo "切割成m3u8"
echo "ffmpeg -i 1.mp4 -c:v libx264 -c:a aac -map 0 -f segment -segment_list dist/index.m3u8 -segment_time 5 dist/%03d.ts"
}
function cut_video()
{
if [ $# != "4" ];then
echo "参数错误,正确格式为:xffmpeg -cut srcVideo startTime timeLong distVideo"
exit
fi
echo ffmpeg -ss $2 -t $3 -accurate_seek -i "$1" -vcodec copy -acodec copy ${4} -y
ffmpeg -ss $2 -t $3 -accurate_seek -i "$1" -vcodec copy -acodec copy ${4} -y
}
function join_video()
{
local i n arr
if [ $# -lt 3 ];then
echo "参数错误,正确格式为:xffmpeg -join srcVideo1 srcVideo2 ..."
exit
fi
n="$#"
arr=("$@")
echo "file ${arr[0]}" > .__video_list
for ((i=1; i<n-1; i++));do
echo "file ${arr[i]}" >> .__video_list
done
cat .__video_list
echo ffmpeg -f concat -i .__video_list -c copy ${arr[i]} -y
ffmpeg -f concat -i .__video_list -c copy ${arr[i]} -y
rm .__video_list
}
function logo_video()
{
local dist overlay
if [ $# != "5" -a $# != "7" ];then
echo "参数错误,正确格式为:xffmpeg -logo video logo logoX logoY [logoWidth logoHeight] distVideo"
exit
fi
if [ $# = "7" ];then
echo ffmpeg -i $2 -s $5x$6 .__$2
ffmpeg -i $2 -s $5x$6 .__$2
dist="$7"
else
cp $2 .__$2
dist="$5"
fi
if (($3<0));then
if (($4<0));then
overlay="W-w+$3:H-h+$4"
else
overlay="W-w+$3:$4"
fi
else
if (($4<0));then
overlay="$3:H-h+$4"
else
overlay="$3:$4"
fi
fi
echo ffmpeg -i "$1" -vf "movie=.__$2[watermark];[in][watermark] overlay=$overlay[out]" "$dist" -y
ffmpeg -i "$1" -vf "movie=.__$2[watermark];[in][watermark] overlay=$overlay[out]" "$dist" -y
rm .__$2
}
function cover_video()
{
local wh
if [ $# != "3" ];then
echo "参数错误,正确格式为:xffmpeg -cover srcVideo srcImage distVideo"
exit
fi
get_video_width_height $1 wh
echo ffmpeg -i "$1" -i "$2" -filter_complex [1:v]scale=${wh}[ovrl],[0:v][ovrl]overlay=enable='between(n\,0\,1)' -y "$3"
ffmpeg -i "$1" -i "$2" -filter_complex [1:v]scale=${wh}[ovrl],[0:v][ovrl]overlay=enable='between(n\,0\,1)' -y "$3"
# 不计算宽度
#ffmpeg -i 1.mp4 -i 1.png -filter_complex overlay=enable='between(n\,0\,1)' -y 2.mp4
# 只添加封面
#ffmpeg -i 1.mp4 -i 1.png -map 0 -map 1 -c copy -c:v:1 png -disposition:v:1 attached_pic -y 2.mp4
}
function set_contrast_brightness_video()
{
if [ $# != "4" ];then
echo "参数错误,正确格式为:xffmpeg -bright srcVideo contrast brightness distVideo"
exit
fi
echo ffmpeg -i $1 -vf eq=contrast=$2:brightness=$3 -y $4
ffmpeg -i $1 -vf eq=contrast=$2:brightness=$3 -y $4
}
function get_video_time()
{
local list sum var
list=($(ffmpeg -i "$1" 2>&1|grep Duration| cut -d ' ' -f 4 | sed 's/\..*//'|sed 's/:/ /g'))
if [ -z "$list" ];then
echo "这不是一个视频文件"
exit
fi
sum=0
for var in "${list[@]}";do
((sum=sum*60+var))
done
eval $2=$sum
}
function get_video_width_height()
{
local ret
ret=$(ffmpeg -i "$1" 2>&1|grep Stream.*Video|cut -d ',' -f 3|sed 's/x/:/'|sed 's/\s//g')
eval $2="$ret"
}
function set_audio_video()
{
local vtime atime n joinstr
if [ $# != "3" ];then
echo "参数错误,正确格式为:xffmpeg -audio srcVideo srcAudio distVideo"
exit
fi
get_video_time "$1" vtime
get_video_time "$2" atime
((n=(vtime-1)/atime+1))
if ((n>1));then
joinstr=""
for ((i=0;i<n;i++));do
joinstr="$joinstr $2"
done
join_video $joinstr __audio.mp3
else
cp "$2" __audio.mp3
fi
echo ffmpeg -i $1 -c:v copy -an __video.mp4
ffmpeg -i $1 -c:v copy -an __video.mp4
echo ffmpeg -i __video.mp4 -i __audio.mp3 -t $vtime -c copy -y $3
ffmpeg -i __video.mp4 -i __audio.mp3 -t $vtime -c copy -y $3
#ffmpeg -i __video.mp4 -i __audio.mp3 -t $vtime –vcodec copy –acodec copy -y $3
rm __audio.mp3
rm __video.mp4
}
function split_video_audio()
{
if [ $# != "3" ];then
echo "参数错误,正确格式为:xffmpeg -split srcVideo video|audio distFile"
exit
fi
if [ "$2" = "video" ];then
ffmpeg -i "$1" -vcodec copy -an "$3"
elif [ "$2" = "audio" ];then
ffmpeg -i "$1" -acodec copy -vn "$3"
else
echo "参数错误,正确格式为:xffmpeg -split srcVideo video|audio distFile"
fi
}
function crop_video()
{
if [ $# != "6" ];then
echo "参数错误,正确格式为:xffmpeg -crop srcVideo x y width height distVideo"
exit
fi
ffmpeg -i "$1" -vf crop=${4}:${5}:${2}:${3} -y "$6"
}
function reverse_video()
{
if [ $# != "2" ];then
echo "参数错误,正确格式为:xffmpeg -reverse srcVideo distVideo"
exit
fi
ffmpeg -i "$1" -vf reverse -y "$2"
}
function get_video_image()
{
if [ $# != "3" ];then
echo "参数错误,正确格式为:xffmpeg -image srcVideo time distImage"
exit
fi
ffmpeg -ss $2 -i "$1" -vframes 1 -q:v 2 "$3"
}
function get_video_info()
{
# ffprobe -select_streams v:0 -v quiet -show_streams -of json -i "$1"
echo ffprobe -v quiet -select_streams v:0 -show_entries stream=nb_frames,width,height,duration -of default=nokey=0:noprint_wrappers=1 "$1"
ffprobe -v quiet -select_streams v:0 -show_entries stream=nb_frames,width,height,duration -of default=nokey=0:noprint_wrappers=1 "$1"
}
function show_help()
{
local -a list
list="help"
list=("${list[@]}" "Usage: xffmpeg [OPTIONS]")
list=("${list[@]}" " -logo video logo logoX logoY [logoWidth logoHeight] distVideo: [设置logo -10 -10表示右下角] xffmpeg -logo 1.mp4 1.jpg 10 10 [100 100] 2.mp4")
list=("${list[@]}" " -cut srcVideo startTime timeLong distVideo: [剪切] xffmpeg -cut 1.mp4 05 10 2.mp4")
list=("${list[@]}" " -join srcVideo1 srcVideo2 ... distVideo: [连接] xffmpeg -join 1.mp4 2.mp4")
list=("${list[@]}" " -cover srcVideo srcImage distVideo: [封面] xffmpeg -cover 1.mp4 1.png 2.mp4")
list=("${list[@]}" " -bright srcVideo contrast brightness distVideo: [对比度亮度] xffmpeg -bright 1.mp4 1.1 -0.1 2.mp4")
list=("${list[@]}" " -audio srcVideo srcAudio distVideo: [添加音频] xffmpeg -audio 1.mp4 1.mp3 2.mp4")
list=("${list[@]}" " -split srcVideo video|audio distFile: [分离音视频] xffmpeg -split 1.mp4 video 2.mp4")
list=("${list[@]}" " -crop srcVideo x y width height distVideo: [裁剪视频] xffmpeg -crop 1.mp4 iw/4 ih/4 iw/2 ih/2 2.mp4")
list=("${list[@]}" " -reverse srcVideo distVideo: [倒放视频] xffmpeg -reverse srcVideo distVideo")
list=("${list[@]}" " -image srcVideo time distImage: [截图] xffmpeg -image 1.mp4 10 1.jpg")
list=("${list[@]}" " -i srcVideo: [查看视频信息]")
list=("${list[@]}" " -h: show help ")
list=("${list[@]}" " --help: show full help ")
__msgbox "${list[@]}"
exit
}
function main()
{
__init_opts "$@"
__check_opts "-h" && show_help
__check_opts "--help" && show_full_help
__check_opts "-cut" && cut_video ${_args_[@]}
__check_opts "-join" && join_video ${_args_[@]}
__check_opts "-logo" && logo_video ${_args_[@]}
__check_opts "-cover" && cover_video ${_args_[@]}
__check_opts "-bright" && set_contrast_brightness_video ${_args_[@]}
__check_opts "-audio" && set_audio_video ${_args_[@]}
__check_opts "-split" && split_video_audio ${_args_[@]}
__check_opts "-crop" && crop_video ${_args_[@]}
__check_opts "-reverse" && reverse_video ${_args_[@]}
__check_opts "-image" && get_video_image ${_args_[@]}
__check_opts "-i" && get_video_info ${_args_[@]}
}
main "$@"
| true
|
6a61bffea673b33920a34b3f5d323dfc75e12b82
|
Shell
|
imos/imosh
|
/library/60-constants/flag.sh
|
UTF-8
| 781
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
readonly IMOSH_PARSE_ARGUMENTS='
local IMOSH_ARGV IMOSH_ARGS
imosh::internal::parse_args arg "$@"
if [ "${#IMOSH_ARGS[*]}" -ne 0 ]; then
local __imosh_parse_arguments_arg=""
for __imosh_parse_arguments_arg in "${IMOSH_ARGS[@]}"; do
eval "local ${__imosh_parse_arguments_arg}"
done
fi
if [ "${#IMOSH_ARGV[*]}" -ne 0 ]; then
set -- "${IMOSH_ARGV[@]}"
else
set --
fi'
readonly IMOSH_WRONG_NUMBER_OF_ARGUMENTS='
LOG ERROR "Wrong number of arguments: $#"
return 1'
readonly IMOSH_INIT='
set -e -u
imosh::internal::init "$@"
if [ "${#IMOSH_ARGV[*]}" -ne 0 ]; then
set -- "${IMOSH_ARGV[@]}"
else
set --
fi'
__IMOSH_FLAGS=()
__IMOSH_FLAGS_ALIASES=()
__IMOSH_FLAGS_MULTIALIASES=()
| true
|
fd0d19791c3a2e34b47985930d08fab8e624eee8
|
Shell
|
SnailCpp/shell
|
/tool/menu.sh
|
UTF-8
| 1,082
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
function ShowDiskSpace {
clear;
df -k;
}
function WhoseOnline {
clear;
who;
}
function MemeryUsage {
clear;
cat /proc/meminfo | more;
}
function Menu {
clear;
echo -e "\t\t\tSys Admin Menu\n";
echo -e "\t1. Display disk space";
echo -e "\t2. Display logged on users";
echo -e "\t3. Display memory usage";
echo -e "\t0. Exit menu\n\n";
echo -en "\t\tEnter moption: ";
read -n 1 option;
case $option in
0)
clear;
exit;;
1)
ShowDiskSpace;;
2)
WhoseOnline;;
3)
MemeryUsage;;
*)
clear;
echo "Sorry, wory selection.";;
esac
}
function main {
while [ 1 ]; do
Menu;
echo -en "\n\n\t\t\tHit any key to continue";
read -n 1 line;
done;
}
main;
| true
|
a99555799f96dc5f9f77c94b9371b400890d6fbe
|
Shell
|
Trietptm-on-Coding-Algorithms/dotfiles-39
|
/.zshrc
|
UTF-8
| 1,076
| 2.890625
| 3
|
[] |
no_license
|
export ZSH=~/.oh-my-zsh
ZSH_THEME="avit"
plugins=(git zsh-autosuggestions zsh-syntax-highlighting)
source $ZSH/oh-my-zsh.sh
export EDITOR=emacs
export VIRTUALENVWRAPPER_PYTHON=$(which python3)
export VIRTUAL_ENV_DISABLE_PROMPT=true
export PIP_REQUIRE_VIRTUALENV=true
source /usr/local/bin/virtualenvwrapper.sh
if (tty -s); then
workon +
fi
export MPLBACKEND="module://itermplot"
export ITERMPLOT=rv
test -e "${HOME}/.iterm2_shell_integration.zsh" && source "${HOME}/.iterm2_shell_integration.zsh"
# https://www.emacswiki.org/emacs/AnsiTermHints
if [ "$TERM" = "eterm-color" ]; then
precmd() {
echo -e "\033AnSiTu" "$LOGNAME" # $LOGNAME is more portable than using whoami.
echo -e "\033AnSiTc" "$(pwd)"
if [ $(uname) = "SunOS" ]; then
# The -f option does something else on SunOS and is not needed anyway.
hostname_options="";
else
hostname_options="-f";
fi
echo -e "\033AnSiTh" "$(hostname $hostname_options)" # Using the -f option can
# cause problems on some OSes.
}
fi
| true
|
3a6ab12921061d8948049dcd13988ded79832ce1
|
Shell
|
wudidatao/docker
|
/graylog/docker.sh
|
UTF-8
| 371
| 2.671875
| 3
|
[] |
no_license
|
#首次启动,创建容器
docker-compose up -d
#初始化容器
docker cp 9414e701e6b2:/usr/share/graylog /data/
#容器写入时会有权限问题,暂设777解决
chmod -R 777 /data/graylog
#二次启动,映射配置
docker-compose up -d
#如果需要重建容器的数据文件,可以把数据目录改个名
mv -r /usr/share/graylog /usr/share/graylog_bak
| true
|
8feca1f252e9918d03d640834a7ac40daa48d874
|
Shell
|
checkcheckzach/coding
|
/src/Leetcode/Q195_Tenth_Line.sh
|
UTF-8
| 360
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Given a text file file.txt, print just the 10th line of the file.
#
# Example:
#
# Assume that file.txt has the following content:
#
# Line 1
# Line 2
# Line 3
# Line 4
# Line 5
# Line 6
# Line 7
# Line 8
# Line 9
# Line 10
# Your script should output the tenth line, which is:
#
# Line 10
#
awk 'NR==10' file.txt
| true
|
6608d18e3599738e8eb01595c80cd67a7f9eddf8
|
Shell
|
marcusbotacin/ELF.Classifier
|
/tests/compile_tests.sh
|
UTF-8
| 74
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
for j in `ls *.c`; do
echo "Compiling "$j;
gcc $j -o ${j%.*}.bin;
done
| true
|
be92610eea4c054334d679238ebc158c22b989f3
|
Shell
|
xinxiamu/MuStudy
|
/doc/shell-study/par.sh
|
UTF-8
| 227
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
# shell传递参数实例
echo "执行的文件名:$0"
echo "第一个参数:$1"
echo "第二个参数:$2"
td=$3
echo "第三个参数:${td}"
echo "参数个数:$#"
echo "参数作为字符串显示:$*"
| true
|
f7c09bf57666dd7584bc9ca2ecc7cf273c98d4c8
|
Shell
|
kakabomba/nicescripts
|
/linux/.proxmox-host-v-4/usr/local/bin/ntaxa-mount-worker.sh
|
UTF-8
| 671
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
mkdir /mnt/workers/
mkdir /mnt/workers/$1
echo "umounting kvm"
/usr/local/bin/ntaxa-umount-worker.sh $1
sleep 3
losetup /dev/loop6 /images/images/$1/vm-$1-disk-1.raw
kpartx -a /dev/loop6
vg=$(vgscan | grep 'Found volume group "main" using metadata type lvm2' | sed -e 's/^.*"\([^"]*\)".*$/\1/g')
echo "Found volume group $vg"
vgchange -ay
for i in /dev/$vg/*
do
dir="/mnt/workers/$1/"$(basename $i)
echo "creating dir '$dir' and mounting '$i'"
mkdir $dir
mount $i $dir
if [ "$?" -ne "0" ]; then
echo "cant mount '$i', removing dir '$dir'"
rmdir $dir
else
ls -l1sh $dir
fi
done
echo "All mounted"
df -h | grep '/mnt/workers/$1'
| true
|
1751d581c11855274e67f8665b855f9e3a1098ca
|
Shell
|
edwinkost/process_era5-land
|
/unify_mergegrid/etc/merge_grid_africa.sh
|
UTF-8
| 5,864
| 2.640625
| 3
|
[] |
no_license
|
printf "$0"
for arg in "$@"
do
printf " $arg"
done
echo
echo "Arg 0: $0"
echo "Arg 1: $1"
echo "Arg 2: $2"
echo "Arg 3: $3"
set -x
#~ esutanud@login-7:/rds/general/user/esutanud/ephemeral/meteo_arise/africa_example$ ls -lah Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily*/*daily.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_d2m-average/era5-land_daily_d2m-average_rempacon-150-arcsec_daily.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_d2m-maximum/era5-land_daily_d2m-maximum_rempacon-150-arcsec_daily.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_d2m-minimum/era5-land_daily_d2m-minimum_rempacon-150-arcsec_daily.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_fal-average/era5-land_daily_fal-average_rempacon-150-arcsec_daily.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_spressu-avg/era5-land_daily_spressu-avg_rempacon-150-arcsec_daily.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_t2m-average/era5-land_daily_t2m-average_rempacon-150-arcsec_daily.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_t2m-maximum/era5-land_daily_t2m-maximum_rempacon-150-arcsec_daily.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_t2m-minimum/era5-land_daily_t2m-minimum_rempacon-150-arcsec_daily.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_total-preci/era5-land_daily_total-preci_rempacon-150-arcsec_daily.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_total-ssrad/era5-land_daily_total-ssrad_rempacon-150-arcsec_daily.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_wind10m-avg/era5-land_daily_wind10m-avg_rempacon-150-arcsec_daily.nc
#~ esutanud@login-7:/rds/general/user/esutanud/ephemeral/meteo_arise/africa_example$ ls -lah Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily*/*monthly.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_d2m-average/era5-land_daily_d2m-average_rempacon-150-arcsec_monthly.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_d2m-maximum/era5-land_daily_d2m-maximum_rempacon-150-arcsec_monthly.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_d2m-minimum/era5-land_daily_d2m-minimum_rempacon-150-arcsec_monthly.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_fal-average/era5-land_daily_fal-average_rempacon-150-arcsec_monthly.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_spressu-avg/era5-land_daily_spressu-avg_rempacon-150-arcsec_monthly.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_t2m-average/era5-land_daily_t2m-average_rempacon-150-arcsec_monthly.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_t2m-maximum/era5-land_daily_t2m-maximum_rempacon-150-arcsec_monthly.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_t2m-minimum/era5-land_daily_t2m-minimum_rempacon-150-arcsec_monthly.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_total-preci/era5-land_daily_total-preci_rempacon-150-arcsec_monthly.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_total-ssrad/era5-land_daily_total-ssrad_rempacon-150-arcsec_monthly.nc
#~ Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/era5-land_daily_wind10m-avg/era5-land_daily_wind10m-avg_rempacon-150-arcsec_monthly.nc
MAIN_INPUT_FOLDER="/rds/general/user/esutanud/ephemeral/meteo_arise/africa_example/"
MAIN_INPUT_FOLDER=$1
OUTPUT_FOLDER="/rds/general/user/esutanud/ephemeral/meteo_arise/africa_example/africa_merged/"
OUTPUT_FOLDER=$2
# merging daily files
DAILY_OUTPUT_FOLDER=${OUTPUT_FOLDER}/daily/
mkdir -p ${DAILY_OUTPUT_FOLDER}
INP_FILENAME="era5-land_daily_total-preci/era5-land_daily_total-preci_rempacon-150-arcsec_daily.nc"
INP_FILENAME=$3/$3_rempacon-150-arcsec_daily.nc
OUT_FILENAME=${DAILY_OUTPUT_FOLDER}/"africa_era5-land_daily_total-preci_rempacon-150-arcsec_daily.nc"
OUT_FILENAME=${DAILY_OUTPUT_FOLDER}/africa_$3_rempacon-150-arcsec_daily.nc
BOX1_FILENAME=${MAIN_INPUT_FOLDER}"/Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/${INP_FILENAME}"
BOX2_FILENAME=${MAIN_INPUT_FOLDER}"/Africa-box2/daily_after_remapcon/test_all_variables/150_arcsec/${INP_FILENAME}"
BOX3_FILENAME=${MAIN_INPUT_FOLDER}"/Africa-box3/daily_after_remapcon/test_all_variables/150_arcsec/${INP_FILENAME}"
cdo -L -mergegrid ${BOX3_FILENAME} -mergegrid ${BOX1_FILENAME} ${BOX2_FILENAME} ${OUT_FILENAME} &
# merging monthly files
MONTHLY_OUTPUT_FOLDER=${OUTPUT_FOLDER}/monthly/
mkdir -p ${MONTHLY_OUTPUT_FOLDER}
INP_FILENAME="era5-land_daily_total-preci/era5-land_daily_total-preci_rempacon-150-arcsec_monthly.nc"
INP_FILENAME=$3/$3_rempacon-150-arcsec_monthly.nc
OUT_FILENAME=${MONTHLY_OUTPUT_FOLDER}/"africa_era5-land_daily_total-preci_rempacon-150-arcsec_monthly.nc"
OUT_FILENAME=${MONTHLY_OUTPUT_FOLDER}/africa_$3_rempacon-150-arcsec_monthly.nc
BOX1_FILENAME=${MAIN_INPUT_FOLDER}"/Africa-box1/daily_after_remapcon/test_all_variables/150_arcsec/${INP_FILENAME}"
BOX2_FILENAME=${MAIN_INPUT_FOLDER}"/Africa-box2/daily_after_remapcon/test_all_variables/150_arcsec/${INP_FILENAME}"
BOX3_FILENAME=${MAIN_INPUT_FOLDER}"/Africa-box3/daily_after_remapcon/test_all_variables/150_arcsec/${INP_FILENAME}"
cdo -L -mergegrid ${BOX3_FILENAME} -mergegrid ${BOX1_FILENAME} ${BOX2_FILENAME} ${OUT_FILENAME} &
wait
set +x
| true
|
a197e11310ad8f5fda3655dfa55386c1a5a1479e
|
Shell
|
libcommon/template-repo-py
|
/build-support/docker/adduser.sh
|
UTF-8
| 642
| 3.703125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
. /build-support/shell/common/log.sh
if [ -z "${UID}" ]
then
error "Must set the UID environment variable"
exit 1
fi
if [ -z "${USERNAME}" ]
then
error "Must set the USERNAME environment variable"
exit 1
fi
apk add --no-cache sudo
if ! ( getent passwd "${USERNAME}" )
then
adduser \
-h "/home/${USERNAME}" \
-s /bin/bash \
-u ${UID} \
-D \
${USERNAME}
passwd -d ${USERNAME}
info "Added user ${USERNAME} with id ${UID}"
echo "${USERNAME} ALL=(NOPASSWD) ALL" > "/etc/sudoers.d/${USERNAME}" && chmod 0440 "/etc/sudoers.d/${USERNAME}"
fi
| true
|
4f4866b2c635de51e4ab40b01bd375e04483d13a
|
Shell
|
mmoneib/shell-playground
|
/turn_based__sourced.sh
|
UTF-8
| 7,337
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
################################################################################
# Turned-Based Game Engine
#
# A backend endgine which facilitates developing turn-based games using Bash.
# The engine allows rapid development of new games by allowing the developer
# to focus on the development of the game's display, the conditions for moves,
# the conditions for winning or losing, and the heuristics which define the
# strategy and artificial intelligence of the computer openent.
# The engine allows an indefinte number of plauyers of any combination of
# humans or computers. The AI is applied using a single level of heuristics
# *mainly for performance reasons, so there's only a single level depth search
# in the state tree. Minimax can be simulated using negative heuristics for
# a defensive strategy. The menus of the game along with the desired actions
# is also fully customizable.
# This script is not meant to be used on its own as it's only meant to be
# sourced by other game-implementing script.
#
# Developer: Muhammad Moneib
################################################################################
#TODO Add description comments for each variable.
title=;
state=;
currentState=;
newState=;
menuOptions=();
menuActions=();
choices=();
heuristics=();
rules=();
winConditions=();
drawConditions=();
players=();
currentPlayer=;
isComputer=();
#TODO Add score keeping.
function _tb_echoMenu { # Prints the intro and menu options.
echo;
echo "Welcome to $title!";
echo;
echo "Please choose one of the following options:"
for ((i=0;i<${#menuOptions[@]};i++)); do
echo "$((i+1))-$(eval ${menuOptions[$i]})";
done
echo;
}
function initialize { # Sets the initial state. To be called at the beginning and after each game.
echo "Menu options, initial values, and state must be initialized using an initialize function.";
}
function echoState { # Prints the current stete in the game's format.
echo "Echoing state must be implemented.";
}
function getPotentialChoices { # For computer input, as for human, it's more efficient to get the choice and then evaluate.
echo "Load the choices array with all the possible legal choices from this current state.";
}
function _tb_checkHeuristics { # Calculates the value of the future state based on the heuritics applied.
local overallResult=0;
for ((j=0;j<${#heuristics[@]};j++)); do
${heuristics[$j]} result ${players[$currentPlayer]};
overallResult=$((overallResult+result)); #echo "J$j $result";
done
eval $1=$overallResult;
}
function _tb_isComputerTurn { # Identifies if the current player is not human.
local playerNum=$2;
if [[ ${isComputer[$playerNum]} == true ]]; then
isComputerTurn=true;
else
isComputerTurn=false;
fi
eval $1=$isComputerTurn;
}
function _tb_pointToPotentialState {
for _stateItem in ${!state[@]}; do # TODO Source out as generic array copying for both indexed and associative arrays.
newState[_stateItem]=${state[_stateItem]};
done
declare -ng currentState=newState; # g for global scope. Otherwise, declare is only function scoped.
}
function _tb_pointToCurrentState {
declare -ng currentState=state;
}
function _tb_getComputerInput {
highestValue=-9999999;
currentPlayer=$2;
getPotentialChoices;
input=choices[$((RANDOM%${#choices[@]}))];
for ((k=0;k<${#choices[@]};k++)); do
_tb_pointToPotentialState;
applyPhysics ${choices[$k]} $currentPlayer;
_tb_checkHeuristics value;
_tb_pointToCurrentState;
if (($value>$highestValue)); then
input=${choices[$k]};
highestValue=$value;
fi
done
echo "Computer made the choice "$((input+1));
eval $1=$input; echo "Highest value: $highestValue";
}
function _tb_getUserInput {
read -p "${players[$2]}'s turn. Please make a move: " -e inp;
echo;
eval $1=$inp;
}
function _tb_getInput { # Gets the current turn's input.
currentPlayer=$2;
_tb_isComputerTurn isComputerTurn $currentPlayer;
if [[ $isComputerTurn == true ]]; then
_tb_getComputerInput choice $currentPlayer;
else
isValidState=false;
while [[ $isValidState != true ]]; do
_tb_getUserInput choice $currentPlayer;
_tb_isValidState $isValidState $choice;
done
fi
eval $1=$choice;
}
function _tb_isValidState { # Invisible constraints of the game rules and visible constraints the environmetn.
isValidState=$1;
choice=$2;
for ((i=0;i<${#rules[@]};i++)); do
${rules[$i]} $isValidState $choice;
if [[ $isValidState == false ]]; then
echo "Incorrect choice as rule $((i+1)) was violated! Please make another choice.";
return;
fi
done
}
function tryChoice { # Apart of constraints, this is how the environmet of the state impacts the choice's outcome.
echo "The new state from the player's (potential) choice must be returned by the tryChoice function."
}
function applyPhysics { # Apart of constraints, this is how the environmet of the state impacts the choice's outcome.
echo "The interaction of the player's choice after it was applied with the environment must be implemented in applyPhysics function.";
}
function _tb_isWinningState { # aka. Game-Ending condition, including a draw.
isWinningState=false;
for ((i=0;i<${#winConditions[@]};i++)); do
${winConditions[$i]} result;
if [[ $result == true ]]; then
isWinningState=true;
fi
done
eval $1=$isWinningState;
}
function _tb_isDrawState { # aka. Game-Ending condition, including a draw.
isDrawState=false;
for ((i=0;i<${#drawConditions[@]};i++)); do
${drawConditions[$i]} result;
if [[ $result == true ]]; then
isDrawState=true;
fi
done
eval $1=$isDrawState;
}
function _tb_announceWinner {
echo
echo "Game is over. ${players[$currentPlayer]} wins!";
echo
}
function _tb_announceDraw {
echo
echo "Game is over with a draw.";
echo
}
function _tb_shiftPlayer {
currentPlayer=$1;
currentPlayer=$(((currentPlayer+1)%${#players[@]}));
}
function _tb_getUserMenuSelection {
local isValidInput=false;
local incorrectInputWarning=""
while [[ $isValidInput == false ]]; do
read -N 1 -p "$incorrectInputWarning ""Your choice: " inp;
echo;
for ((i=1;i<=${#menuActions[@]};i++)) do
if [[ "$inp" == "$i" ]]; then
isValidInput=true;
fi
done
incorrectInputWarning="Incorrect input, please choose one of the above options.";
done
eval ${menuActions[$(("$inp"-1))]};
}
function _initialize {
isWinningState=false;
isDrawState=false;
initialize;
}
function tb_engine { # What sets the game in motion. Caller of all other functions except initialize. Runs indefinitely and not to be overridden.
while (true); do
_initialize;
_tb_echoMenu;
_tb_getUserMenuSelection;
_tb_pointToCurrentState;
echoState;
currentPlayer=0;
while [[ $isWinningState != true ]]; do
_tb_getInput choice $currentPlayer;
applyPhysics $choice $currentPlayer;
echoState;
_tb_isWinningState isWinningState;
if [[ $isWinningState == true ]]; then
_tb_announceWinner;
break;
else
_tb_isDrawState isDrawState;
if [[ $isDrawState == true ]]; then
_tb_announceDraw;
break;
fi
fi
_tb_shiftPlayer $currentPlayer;
done
echoState
done
}
| true
|
a3e6b8766708f6e81e0ccb0e26d487ff61934f05
|
Shell
|
hreese/dotfiles
|
/.profile
|
UTF-8
| 1,835
| 2.890625
| 3
|
[] |
no_license
|
# .profile
# According to bash(1), interactive login shells read and execute
# /etc/profile
# ~/.bash_profile
# ~/.bash_login
# ~/.profile
#
# Interactive shells that are note login shells, bash reads and executes
# ~/.bashrc,
# Get the aliases and functions
if [ -n "$BASH_VERSION" ]; then
# include .bashrc if it exists
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
fi
# local binaries
if [ -d "$HOME/bin" ] ; then
PATH="$HOME/bin:$PATH"
fi
if [ -d "$HOME/.local/bin" ] ; then
PATH="$HOME/.local/bin:$PATH"
fi
# local config
if [ -r $HOME/.profile.local ]; then
. $HOME/.profile.local
fi
export PATH
# local TeXLive
export PATH=/usr/local/texlive/2012/bin/x86_64-linux:$PATH
export MANPATH=/usr/local/texlive/2012/texmf/doc/man:$MANPATH
export INFOPATH=/usr/local/texlive/2012/texmf/doc/info:$INFOPATH
# Google Go
#if [ -d "${HOME}/go" ]; then
# export GOROOT=${HOME}/go
# export GOBIN=${GOROOT}/bin
# export GOOS=linux
# export GOARCH=amd64
# export PATH=${PATH}:${GOBIN}
# export GOPATH=$HOME/go.heiko
#fi
export PATH=/usr/local/go/bin:$PATH
# 256 colors
if [ -e /lib/terminfo/x/xterm-256color -o -e /usr/share/terminfo/x/xterm-256color ]; then
export TERM='xterm-256color'
else
export TERM='xterm-color'
fi
# dircolors (Solarized)
# https://github.com/seebi/dircolors-solarized
if [ -r "$HOME/.dircolors-solarized/dircolors.256dark" ]; then
eval $(dircolors -b "$HOME/.dircolors-solarized/dircolors.256dark")
fi
# colorize grep matches
export GREP_OPTIONS="--color=always ${GREP_OPTIONS}"
export GREP_COLOR=31
# needed by the gpg-vim-plugin
export GPG_TTY=$(tty)
# some aliases
alias lsh='ls -lthr'
alias lsha='ls -lthra'
alias g='git'
alias s='ssh'
alias rot13="tr '[A-Za-z]' '[N-ZA-Mn-za-m]'"
export MC_SKIN="$HOME/.themes/mc-solarized-skin/solarized.ini"
| true
|
ea1593325401ad87f4f60e08e9dc30edf6333361
|
Shell
|
salmon4973/federatednode_build
|
/dist/linux/runit/mongod/run
|
UTF-8
| 981
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
exec 2>&1
#SET LIMITS
#limit fsize unlimited unlimited
ulimit -f unlimited
#limit cpu unlimited unlimited
ulimit -t unlimited
#limit as unlimited unlimited
#limit nofile 64000 64000
ulimit -n 64000
#limit rss unlimited unlimited
ulimit -m unlimited
#limit nproc 32000 32000
ulimit -u 32000
mkdir -p /var/lib/mongodb/
mkdir -p /var/log/mongodb/
CONF=/etc/mongod.conf
DAEMON=/usr/bin/mongod
DAEMONUSER=${DAEMONUSER:-mongodb}
if [ -f /etc/default/mongod ]; then . /etc/default/mongod; fi
# Handle NUMA access to CPUs (SERVER-3574)
# This verifies the existence of numactl as well as testing that the command works
NUMACTL_ARGS="--interleave=all"
if which numactl >/dev/null 2>/dev/null && numactl $NUMACTL_ARGS ls / >/dev/null 2>/dev/null
then
NUMACTL="$(which numactl) $NUMACTL_ARGS"
DAEMON_OPTS=${DAEMON_OPTS:-"--config $CONF"}
else
NUMACTL=""
DAEMON_OPTS=" "${DAEMON_OPTS:-"--config $CONF"}
fi
exec chpst -u ${DAEMONUSER} $NUMACTL $DAEMON $DAEMON_OPTS
| true
|
38bbe9ea48b387e6b33561818dca83284cefa9ea
|
Shell
|
jlanga/oari_paper
|
/bin/mapping.sh
|
UTF-8
| 1,978
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
set -o nounset # Prevent using undefined variables
set -o errexit # Stop the entire script if an error found
# ENV
referenceWD=data/reference # Folder with the downloaded fasta reference
indexWD=data/index # Folder with bowtie2's reference files
mappingWD=data/mapping # Folder where to write the BAM outputs
trimmedWD=data/reads/trimmed # Folder with the trimmed reads to be mapped
mappingLogWD=results/mapping # Folder were to store logs and reports
cpu=12
# Create folders
mkdir -p $mappingWD
mkdir -p $mappingLogWD
# bowtie | samtools | picard
mapping(){
readsF=$1
readsR=$2
index=$3
logFile=$4
cpu=$5
bam=$6
fifo1_name=$(mktemp -u) # FIFO picard - samtools
mkfifo $fifo1_name
bowtie2 \
--no-unal \
-p $cpu \
-x $index \
-1 $readsF \
-2 $readsR \
| samtools view \
-@ $cpu \
-u \
- \
> $fifo1_name \
| picard-tools SortSam \
I=$fifo1_name \
O=$bam \
COMPRESSION_LEVEL=9 \
VALIDATION_STRINGENCY=SILENT \
SO=coordinate \
&> $logFile
# Clean
rm $fifo1_name
unset fifo1_name readsF readsR index logFile cpu bam
}
export -f mapping
parallel mapping \
${trimmedWD}/{}_1.fastq.gz \
${trimmedWD}/{}_2.fastq.gz \
${indexWD}/oari \
${mappingLogWD}/{}.log \
$cpu \
${mappingWD}/{}.bam \
::: {1..3}_1
# Build BAM indexes (.bai)
parallel samtools index $mappingWD/{}.bam ::: {1..3}_1
| true
|
22f9a22f1773b74424ff5dd43f1881857623c87d
|
Shell
|
e1399579/chat
|
/tests/test_connections.js
|
UTF-8
| 2,955
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
// node test_connections.js
// npm install -g ws msgpackr
// linux: export NODE_PATH=/usr/lib/node_modules/
// windows: NODE_PATH=%AppData%\npm\node_modules
const ws = require('ws');
const WebSocket = ws.WebSocket;
const msgpack = require('msgpackr');
let success = 0, fail = 0, closed = 0;
let test_num = process.argv.length > 2 ? parseInt(process.argv[2]) : 255;
let sockets = [];
const USER_ONLINE = 200;//用户上线
const USER_QUIT = 201;//用户退出
const USER_REGISTER = 204;//用户注册
const USER_LOGIN = 205;//用户登录
const ERROR = 900;//错误消息
const WARNING = 901;//警告消息
const SYSTEM = 902;//系统消息
const PORT = 8080;
const PROTOCOL = 'ws://';
const HOST = '192.168.0.10';
const SERVER_URL = PROTOCOL + HOST + ':' + PORT;
class DataHelper {
static encode(obj) {
//return JSON.stringify(obj);
return msgpack.pack(obj);
}
static decode(str) {
//return JSON.parse(str);
return msgpack.unpack(new Uint8Array(str)); //ArrayBuffer->Uint8Array
}
}
let success_set = new Set();
function init_sockets(num, sockets) {
for (let i=0;i<num;i++) {
let socket = new WebSocket(SERVER_URL);
socket.binaryType = 'arraybuffer'; //设为二进制的原始缓冲区
socket.on('open', () => {
socket.send(DataHelper.encode({
type: USER_REGISTER,
username: 'test_' + Math.random().toString(36).substr(2,10),
password: 123456
}));
});
socket.on('message', (message) => {
let dec = DataHelper.decode(message);
let type = dec.type;
switch (type) {
case ERROR:
case WARNING:
case SYSTEM:
fail++;
console.log("success:", success, "fail:", fail, "type:",dec.type);
break;
case USER_LOGIN:
++success;
console.log("success:", success, "fail:", fail);
socket.ping();
break;
case USER_ONLINE: // 每次登录会通知所有人,去重数量
// let prev = success;
// let user_id = dec.user.user_id;
// success_set.add(user_id);
// success = success_set.size;
// if (success > prev) {
// console.log("success:", success, "fail:", fail);
// }
break;
case USER_QUIT:
default:
break;
}
});
socket.on('close', (code) => {
++closed;
console.log("closed:", closed);
});
socket.on('error', (code) => {
console.log("error", code);
});
sockets[i] = socket;
}
}
init_sockets(test_num, sockets);
| true
|
30196ae1561c9b284bbe89204773364c9060c000
|
Shell
|
gitter-badger/chos
|
/utils/mkchos
|
UTF-8
| 1,775
| 3.328125
| 3
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
#!/bin/sh
#
#
# CHOS (c) 2004, The Regents of the University of California, through
# Lawrence Berkeley National Laboratory (subject to receipt of any
# required approvals from the U.S. Dept. of Energy). All rights
# reserved.
#
# If you have questions about your rights to use or distribute this
# software, please contact Berkeley Lab's Technology Transfer
# Department at TTD@lbl.gov referring to "CHOS (LBNL Ref CR-2025)"
#
# NOTICE. This software was developed under funding from the U.S.
# Department of Energy. As such, the U.S. Government has been granted
# for itself and others acting on its behalf a paid-up, nonexclusive,
# irrevocable, worldwide license in the Software to reproduce, prepare
# derivative works, and perform publicly and display publicly.
# Beginning five (5) years after the date permission to assert
# copyright is obtained from the U.S. Department of Energy, and subject
# to any subsequent five (5) year renewals, the U.S. Government is
# granted for itself and others acting on its behalf a paid-up,
# nonexclusive, irrevocable, worldwide license in the Software to
# reproduce, prepare derivative works, distribute copies to the public,
# perform publicly and display publicly, and to permit others to do so.
#
#
# Description:
#
# Helper script called during the rpmbuild. This
# creates the framework directory.
#
if [ $# -lt 1 ] ; then
echo "Specify base"
exit
fi
BASE=$1
LOCAL=$BASE/local
MOUNT=/proc/chos/link/
echo "Creating directories"
mkdir $BASE
mkdir $BASE/auto
mkdir $BASE/local
echo "Creating links"
for dir in "bin" "dev" "etc" "extra" "home" "initrd" "lib" "opt" "sbin" "u" "usr" "var"
do
if [ -e $BASE/$dir ] ; then
rm $BASE/$dir
fi
ln -sf $MOUNT/$dir $BASE/$dir > /dev/null 2>&1
done
ln -sf local/proc $BASE/proc
| true
|
6501b7c9a8c2fd55d3ffc43ae9e5e76103cd98f4
|
Shell
|
silvijah/rackspace_api_tool
|
/dns.sh
|
UTF-8
| 2,369
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
function dns()
{
echo -e -n "\n\n\t1 Limits
\t2 Domains
\t3 Subdomains
\t4 Records
\t5 Reverse DNS
\t----------
\t99 Main Products Menu
\t0 Exit\n\n>>>>\t"
while true
do
read DNSMENU
case $DNSMENU in
1|limits)
source ./dns.sh
limits
;;
2|domains)
source ./dns.sh
domains
;;
3|subdomains)
source ./dns.sh
subdomains
;;
4|records)
source ./dns.sh
records
;;
5|reverse)
source ./dns.sh
reversedns
;;
99|menu)
source ./main_menu.sh
main_menu
;;
0|exit)
echo "Thank You for Using API Client"
exit
;;
*) "Choose One Of the Available Menu Options"
esac
done
}
function limits()
{
echo -e -n "\n\n\t\t1 List All Limits
\t\t2 List Limit Types
\t\t3 List Specific Limit
\t\t-------------
\t\t99 Main DNS Menu
\t\t0 Exit\n\n>>>>\t"
while true
do
read DNSLIMITS
case $DNSLIMITS in
1|alllimits)
call_dnsapi /limits |tr "{}[]" "\n" |tr "," "\n\t" |tr '\"' "\0"
source ./dns.sh
limits
;;
2|limittypes)
call_dnsapi /limits/types |tr "{}[]" "\n" |tr "," "\n\t" |tr '\"' "\0"
source ./dns.sh
limits
;;
3|specificlimit)
echo -e -n "\n\t\t\t1 RATE_LIMIT
\t\t\t2 DOMAIN_LIMIT
\t\t\t3 DOMAIN_RECORD_LIMIT
\t\t\t-----------------
\t\t\t99 Return To Limits Menu
\t\t\t0 Exit\n>>>>\t"
read SPECIFICLIMIT
case $SPECIFICLIMIT in
1|ratelimit)
call_dnsapi /limits/rate_limit |tr "{}[]" "\n" |tr "," "\n\t" |tr '\"' "\0"
source ./dns.sh
limits
;;
2|domainlimit)
call_dnsapi /limits/domain_limit |tr "{}[]" "\n" |tr "," "\n\t" |tr '\"' "\0"
source ./dns.sh
limits
;;
3|recordlimit)
call_dnsapi /limits/domain_record_limit |tr "{}[]" "\n" |tr "," "\n\t" |tr '\"' "\0"
source ./dns.sh
limits
;;
99|return)
source ./dns.sh
limits
;;
0|exit)
echo "Thank You for Using API Client"
;;
*) echo "Choose One of the Available Options"
esac
;;
99|menu)
source ./dns.sh
dns
;;
0|exit)
echo "Thank You for Using API Client"
exit
;;
*) echo "Choose One Of the Available DNS Options"
esac
done
}
function call_dnsapi()
{
curl -s -i -XGET -H "X-Auth-User: $USERNAME" -H "X-Auth-Token: $APITOKEN" https://{$LOCATION}dns.api.rackspacecloud.com/v1.0/${ACCOUNT}${1}
}
| true
|
662b51339a3bc790b2f714399576b8099bcd46b9
|
Shell
|
andras-tim/installer-scripts
|
/noip2/install.sh
|
UTF-8
| 596
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash -e
function cout()
{
echo -e "\n### $1..."
}
######
# MAIN
#
DIR="`pwd`"
TMP="`mktemp -d`"
cd "$TMP"
cout 'Getting source'
wget 'http://www.no-ip.com/client/linux/noip-duc-linux.tar.gz' -O 'noip-duc-linux.tar.gz'
tar xzf 'noip-duc-linux.tar.gz'
cd `find . -type d -name 'noip-*'`
cout 'Building'
make
cout 'Installing'
make install
cout 'Configuring rc'
cp 'debian.noip2.sh' '/etc/init.d/noip2'
chmod 755 '/etc/init.d/noip2'
update-rc.d noip2 defaults
cout 'Starting ip'
'/etc/init.d/noip2' start
noip2 -S
cout 'Cleaning up'
cd "$DIR"
rm -r "$TMP"
cout 'All Done'
exit 0
| true
|
bd3e1ad7339b942db97335710970f9c2a97fe1c8
|
Shell
|
BIitzkrieg/randomBash
|
/parseIOC
|
UTF-8
| 1,339
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
# 11/7/18
# Converts PDF's to a text file then grabs all IPs, URLs, Filenames, and Hashes it sees
echo "Checking for dependency poppler-utils, installing if not installed."
sleep 1s
rpm -qa | grep -qw poppler-utils || yum install -y poppler-utils
echo ""
echo -e "Enter the full path that contains the PDF files you want to parse IP/URL/Filenames/Hashes from. (you can tab-complete)"
read -e -p "File path:" path
echo ""
for file in "$path"/*.pdf; do
pdftotext "$file" -raw >/dev/null 2>&1
done
if [ -f "$path"/IOCs.txt ]; then
rm "$path"/IOCs.txt
fi
if [ "$path" == "/root/" ]; then
echo "Can't use the root directory!"
exit 1
fi
echo -e "Parsing"
sleep 1s
strings "$path"/*.txt | egrep -io "(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" >> ioc.tmp
strings "$path"/*.txt | egrep -io "((http|ftp|https):\/\/)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&\/\/=]*)" >> ioc.tmp
strings "$path"/*.txt | egrep -io "[^\\]*\.(\w+)$" >> ioc.tmp
strings "$path"/*.txt | egrep -io "^([a-f0-9]{32})$|^([a-f0-9]{40})$|^([a-f0-9]{64})$" >> ioc.tmp
cat ioc.tmp | sort -u > "$path"/IOCs.txt
echo -e "Done, check IOCs.txt in the path with your PDF's"
function finish {
rm ioc.tmp
}
trap finish EXIT
| true
|
ff10ead6fadaa4ce7e644cec0611489dfb947297
|
Shell
|
mfonville/pa_gapps
|
/make_installerdata.sh
|
UTF-8
| 11,807
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/sh
#This file is part of The PA GApps script of @mfonville.
#
# The PA GApps scripts are free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# These scripts are distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
now=$(date +"%Y-%m-%d")
echo "# Installation Data for PA Lollipop GApps Installer by @mfonville based on the work of @TKruzze
# Last Updated: "$now > installer.data
echo '# _____________________________________________________________________________________________________________________
# Define Current Package Variables
# List of GApps packages that can be installed with this installer
pkg_names="pico nano micro mini full stock";
# Installer Name (32 chars Total, excluding "")
installer_name="PA Google Stock GApps 5.1 - ";
req_android_version="5.1";
keybd_lib_filename1="libjni_latinimegoogle.so";
keybd_lib_filename2="libjni_latinime.so";
FaceLock_lib_filename="libfacelock_jni.so";
# Google Play Services version sizes' >> installer.data
gms0=`du -s GMSCore/0 | cut -f 1`
gms2=`du -s GMSCore/2 | cut -f 1`
gms4=`du -s GMSCore/4 | cut -f 1`
gms6=`du -s GMSCore/6 | cut -f 1`
gms8=`du -s GMSCore/8 | cut -f 1`
gmscommon=`du -s GMSCore/common | cut -f 1`
echo "gms_0_size="`expr $gms0 + $gmscommon`"; gms_2_size="`expr $gms2 + $gmscommon`"; gms_4_size="`expr $gms4 + $gmscommon`"; gms_6_size="`expr $gms6 + $gmscommon`"; gms_8_size="`expr $gms8 + $gmscommon`";
# Google Play Games version sizes" >> installer.data
pg0=`du -s PlayGames/0 | cut -f 1`
pg2=`du -s PlayGames/0 | cut -f 1`
pg4=`du -s PlayGames/4 | cut -f 1`
pg6=`du -s PlayGames/6 | cut -f 1`
pg8=`du -s PlayGames/8 | cut -f 1`
echo "pg_0_size="$pg0"; pg_2_size="$pg2"; pg_4_size="$pg4"; pg_6_size="$pg6"; pg_8_size="$pg8";
# Core & Optional Apps size" >> installer.data
core=`du -s Core | cut -f 1`
keybdlib=`du -s Optional/keybd_lib | cut -f 1`
echo "core_size="$core"; keybd_lib_size="$keybdlib";">> installer.data
#Note the use of ' here to have the LITERAL contents (also the $signs) into the file
tee >>installer.data <<'EOF'
# Buffer of extra system space to require for GApps install (9216=9MB)
# This will allow for some ROM size expansion when GApps are restored
buffer_size_kb=9216; small_buffer_size=2048;
# List of GApps files that should NOT be automatically removed as they are also included in (many) ROM's
removal_bypass_list="
";
# Define exit codes (returned upon exit due to an error)
E_ROMVER=20; # Wrong ROM version
E_NOSPACE=70; # Insufficient Space Available in System Partition
E_NONPA=40; # NON-PA GApps Currently Installed
E_64BIT=64 ; # 64-bit Device Detected
#_________________________________________________________________________________________________________________
# GApps List (Applications user can Select/Deselect)
# calsync will be added to GApps Install List as needed during script execution
stock_gapps_list="
cameragoogle
keyboardgoogle
sheets
slides
";
full_gapps_list="
books
chrome
cloudprint
docs
drive
ears
earth
keep
messenger
movies
music
newsstand
newswidget
playgames
talkback
wallet
";
mini_gapps_list="
googleplus
hangouts
maps
street
youtube
";
micro_gapps_list="
calendargoogle
exchangegoogle
faceunlock
gmail
googlenow
googletts
";
nano_gapps_list="
search
speech
";
pico_gapps_list="
";
# _____________________________________________________________________________________________________________________
# Default Stock/AOSP Removal List (Stock GApps Only)
default_aosp_remove_list="
browser
email
gallery
launcher
mms
picotts
";
# _____________________________________________________________________________________________________________________
# Optional Stock/AOSP/ROM Removal List
optional_aosp_remove_list="
basicdreams
calendarstock
camerastock
cmaudiofx
cmaccount
cmeleven
cmfilemanager
cmupdater
cmwallpapers
exchangestock
fmradio
galaxy
holospiral
keyboardstock
livewallpapers
lockclock
noisefield
phasebeam
photophase
phototable
terminal
themes
simtoolkit
studio
sykopath
visualizationwallpapers
whisperpush
";
# _____________________________________________________________________________________________________________________
# Stock/AOSP/ROM File Removal Lists
browser_list="
app/Browser
";
basicdreams_list="
app/BasicDreams
";
# Must be used when GoogleCalendar is installed
calendarstock_list="
app/Calendar
priv-app/Calendar
";
# Must be used when GoogleCamera is installed
camerastock_list="
app/Camera
app/Camera2
priv-app/Camera
priv-app/Camera2
";
cmaccount_list="
priv-app/CMAccount
";
cmaudiofx_list="
priv-app/AudioFX
";
cmeleven_list="
app/Eleven
";
cmfilemanager_list="
app/CMFileManager
";
cmupdater_list="
priv-app/CMUpdater
";
cmwallpapers_list="
app/CMWallpapers
";
email_list="
app/Email
";
exchangestock_list="
app/Exchange2
priv-app/Exchange2
";
fmradio_list="
app/FM2
app/FMRecord
";
galaxy_list="
app/Galaxy4
";
gallery_list="
app/Gallery
priv-app/Gallery
app/Gallery2
priv-app/Gallery2
";
holospiral_list="
app/HoloSpiralWallpaper
";
# Must be used when GoogleKeyboard is installed
keyboardstock_list="
app/LatinIME
";
launcher_list="
app/CMHome
app/CustomLauncher3
app/Launcher2
app/Launcher3
app/LiquidLauncher
app/Paclauncher
app/SlimLauncher
app/Trebuchet
priv-app/CMHome
priv-app/CustomLauncher3
priv-app/Launcher2
priv-app/Launcher3
priv-app/LiquidLauncher
priv-app/Paclauncher
priv-app/SlimLauncher
priv-app/Trebuchet
";
livewallpapers_list="
app/LiveWallpapers
";
lockclock_list="
app/LockClock
";
mms_list="
priv-app/Mms
";
noisefield_list="
app/NoiseField
";
phasebeam_list="
app/PhaseBeam
";
photophase_list="
app/PhotoPhase
";
phototable_list="
app/PhotoTable
";
picotts_list="
app/PicoTts
priv-app/PicoTts
lib/libttscompat.so
lib/libttspico.so
tts
";
simtoolkit_list="
app/Stk
";
studio_list="
app/VideoEditor
";
sykopath_list="
app/Layers
";
terminal_list="
app/Terminal
";
themes_list="
priv-app/ThemeChooser
priv-app/ThemesProvider
";
visualizationwallpapers_list="
app/VisualizationWallpapers
";
whisperpush_list="
app/WhisperPush
";
# _____________________________________________________________________________________________________________________
# Permanently Removed Folders
# Pieces that may be left over from AIO ROM's that can/will interfere with these GApps
other_list="
/system/app/BrowserProviderProxy
/system/app/Gmail
/system/app/GoogleCalendar
/system/app/GoogleCloudPrint
/system/app/GoogleHangouts
/system/app/GoogleKeep
/system/app/GoogleLatinIme
/system/app/GooglePlus
/system/app/PartnerBookmarksProvider
/system/app/QuickSearchBox
/system/app/Vending
/system/priv-app/GmsCore
/system/priv-app/GoogleNow
/system/priv-app/GoogleSearch
/system/priv-app/GoogleHangouts
/system/priv-app/OneTimeInitializer
/system/priv-app/Provision
/system/priv-app/QuickSearchBox
/system/priv-app/Vending
";
# Apps from 'app' that need to be installed in 'priv-app'
privapp_list="
/system/app/GoogleBackupTransport
/system/app/GoogleFeedback
/system/app/GoogleLoginService
/system/app/GoogleOneTimeInitializer
/system/app/GooglePartnerSetup
/system/app/GoogleServicesFramework
/system/app/Hangouts
/system/app/OneTimeInitializer
/system/app/Phonesky
/system/app/PrebuiltGmsCore
/system/app/SetupWizard
/system/app/Velvet
/system/app/Wallet
";
# Stock/AOSP Keyboard lib (and symlink) that are always removed since they're always replaced
reqd_list="
/system/lib/libjni_latinime.so
/system/lib/libjni_latinimegoogle.so
/system/app/LatinIME/lib/arm/libjni_latinime.so
/system/app/LatinIME/lib/arm/libjni_latinimegoogle.so
";
# Remove talkback from priv-app since it was moved to app in 5.1
obsolete_list="
/system/priv-app/talkback
#";
# Obsolete files from xxxx
#obsolete_list="${obsolete_list}
#";
# Old gaddon.d backup scripts as we'll be replacing with updated version during install
oldscript_list="
/system/etc/g.prop
/system/addon.d/70-gapps.sh
/system/addon.d/71-faceunlock.sh
/system/addon.d/72-keyboards.sh
/system/addon.d/74-googlecamera.sh
/system/addon.d/78-chromebrowser.sh
";
remove_list="${other_list}${privapp_list}${reqd_list}${obsolete_list}${oldscript_list}";
# _____________________________________________________________________________________________________________________
# Installer Error Messages
64bit_compat_msg="INSTALLATION FAILURE: PA GApps are not compatible with 64-bit devices. You will\nneed to find a 64-bit compatible GApps package that will worok with your device.\n";
camera_sys_msg="WARNING: Google Camera has/will not be installed as requested. Google Camera\ncan only be installed during a Clean Install or as an update to an existing\nGApps Installation.\n";
camera_compat_msg="WARNING: Google Camera has/will not be installed as requested. Google Camera is\nNOT compatible with your device if installed in the system partition. Try\ninstalling from the Play Store instead.\n";
faceunlock_msg="NOTE: FaceUnlock can only be installed on devices with a front facing camera.\n";
googlenow_msg="WARNING: Google Now Launcher has/will not be installed as requested. Google \nSearch must be added to the GApps installation if you want to install the Google\nNow Launcher.\n";
keyboard_sys_msg="WARNING: Google Keyboard has/will not be installed as requested. Google Keyboard\ncan only be installed during a Clean Install or as an update to an existing\nGApps Installation.\n";
nokeyboard_msg="NOTE: The Stock/AOSP keyboard was NOT removed as requested to ensure your device\nwas not accidentally left with no keyboard installed. If this was intentional,\nyou can add 'Override' to your gapps-config to override this protection.\n";
nolauncher_msg="NOTE: The Stock/AOSP Launcher was NOT removed as requested to ensure your device\nwas not accidentally left with no Launcher. If this was your intention, you can\nadd 'Override' to your gapps-config to override this protection.\n";
nomms_msg="NOTE: The Stock/AOSP MMS app was NOT removed as requested to ensure your device\nwas not accidentally left with no way to receive text messages. If this WAS\nintentional, add 'Override' to your gapps-config to override this protection.\n";
non_pa_gapps_msg="INSTALLATION FAILURE: PA GApps can only be installed on top of an existing\nPA GApps installation. Since you are currently using another GApps package, you\nwill need to wipe (format) your system partition before installing PA GApps.\n";
rom_version_msg="INSTALLATION FAILURE: This GApps package can only be installed on a $req_android_version.x ROM.\n";
simulation_msg="TEST INSTALL: This was only a simulated install. NO CHANGES WERE MADE TO YOUR\nDEVICE. To complete the installation remove 'Test' from your gapps-config.\n";
system_space_msg="INSTALLATION FAILURE: Your device does not have sufficient space available in\nthe system partition to install this GApps package as currently configured.\nYou will need to switch to a smaller GApps package or use gapps-config to\nreduce the installed size.\n";
del_conflict_msg="!!! WARNING !!! - Duplicate files were noted between your ROM and this GApps\npackage. The duplicate files are shown in the log portion below. Please report\nthis information to the PA GApps developer (TKruzze) in the XDA forum.\n";
EOF
| true
|
d7e8416d0255c311eb024764e2851bd780588562
|
Shell
|
barclaysteven/bash-sandbox
|
/user-mgmt-operations
|
UTF-8
| 1,253
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
usage() {
echo "user-mgmt-operations [COMMAND] [OPTIONS]"
echo " Commands:"
echo " add-operator-admin"
echo " Options:"
echo " --username {username}"
echo " --accountName {AWS Account Name}"
echo ""
echo " remove-operator-admin"
echo " Options:"
echo " --username {username}"
echo " --accountName {AWS Account Name}"
echo ""
echo " add-operator"
echo " Options:"
echo " --username {username}"
echo " --accountName {AWS Account Name}"
echo ""
echo " remove-operator"
echo " Options:"
echo " --username {username}"
echo " --accountName {AWS Account Name}"
# TODO: Add options for operator actions
exit 0
}
case $1 in
'add-operator-admin')
;;
'remove-operator-admin')
;;
'add-operator')
;;
'remove-operator')
;;
*)
invalidCommand=$1
;;
esac
if [ ${invalidCommand} ] ; then
usage
fi
until [ -z $1 ]
do
case $1 in
'--account')
account=$2
shift
;;
'--region')
region=$2
shift
;;
'--help')
usage
;;
*)
invalidArgs=$1
;;
esac
if [ ${invalidArgs} ] ; then
usage
fi
shift
done
| true
|
6c2873382d5a26a62c849f01e7d9b344e64226b3
|
Shell
|
richRemer/gzen
|
/lib/repo.sh
|
UTF-8
| 233
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash -e
[ -z "$GZEN" ] && echo "use gzen to run this script" 1>&2 && exit 100
[ -z "$REPO" ] && $GZEN error "Use 'gzen here' first"
[ ! -d "$REPO/.git" ] && $GZEN error "REPO '$REPO' is not a git repository"
cd "$REPO"
"$@"
| true
|
ee990936a9f0aae406f691421ecf6d1c5b68d003
|
Shell
|
Ianneo/FIA
|
/1503322H/LAB3BRemediation.sh
|
UTF-8
| 5,681
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
#7.1
#Set the PASS_MAX_DAYS parameter to 90 in /etc/login.defs
sudo nano /etc/login.defs
PASS_MAX_DAYS 90
#Modify user parameters for all users with a password set to match
chage --maxdays 90 <user>
#7.2
#Set the PASS_MIN_DAYS parameter to 7 in /etc/login.defs
sudo nano /etc/login.defs
#Modify user parameters for all users with a password set to match:
chage --mindays 7 <user>
#7.3
#Set the PASS_WARN_AGE parameter to 7 in /etc/login.defs:
sudo nano /etc/login.defs
#Modify user parameters for all users with a password set to match:
chage --warndays 7 <user>
#7.4
#Execute the following commands for each misconfigured system account,
usermod -L <user name>
usermod -s /sbin/nologin <user name>
#7.5
#Run the following command to set the root user default group to GID 0:
usermod -g 0 root
#7.6
#Edit the /etc/bashrc and /etc/profile.d/cis.sh files (and the appropriate files for any other shell supported on your system) and add the following the UMASK parameter as shown:
umask 077
#7.7
#Run the following command to disable accounts that are inactive for 35 or more days
useradd -D -f 35
#7.8
#If any accounts in the /etc/shadow file do not have a password, run the following command to lock the account until it can be determined why it does not have a password
/usr/bin/passwd -l <username>
#Also, check to see if the account is logged in and investigate what it is being used for to determine if it needs to be forced off.
#7.9
#Run the following command and verify that no output is returned:
grep '^+:' /etc/passwd
grep '^+:' /etc/shadow
grep ‘^+:’ /etc/group
#Delete these entries if they exist using userdel.
#This script will give the information of legacy account.
LG=$(grep '^+:' /etc/passwd) #if they're in passwd, they're a user
if [$? -eq 0]; then
#We've found a user
echo "We've found the user '+'!"
sudo userdel '+'
echo "Deleted."
else
echo "Couldn't find the user '+'."
fi
#7.10
#Run the following command and verify that only the word "root" is returned:
/bin/cat /etc/passwd | /bin/awk -F: '($3 == 0) { print $1 }‘
root
#Delete any other entries that are displayed using userdel
userdel -r <username>
#7.11
#Rectify or justify any questionable entries found in the path.
- none of the path entries should be empty
- none of the path entries should be the “.” (current directory)
- path entries should be directories
- path entries should only be writable by the owner (use the chmod command to rectify)
- path entries should preferably be owned by root (use the chown command to rectify)
#7.12
printf "It is recommended that a monitoring policy be established to report user file permissions."
#7.13
printf "It is recommended that a monitoring policy be established to report user dot file permissions."
#7.14
printf "It is recommended that a monitoring policy be established to report users’ use of .netrc and .netrc file permissions."
#7.15
#If any users have .rhosts files determine why they have them. These files should be deleted if they are not needed.
#To search for and remove .rhosts files by using the find(1) command
find /export/home -name .rhosts -print | xargs -i -t rm{}
#7.16
printf "Analyze the output of the Verification step on the right and perform the appropriate action to correct any discrepancies found."
#7.17
#If any users' home directories do not exist, create them and make sure the respective user owns the directory.
#Users without assigned home directories should be removed or assigned a home directory as appropriate.
useradd john
mkdir -p /home/john
chown john:john /home/john
#To remove users
userdel john
#7.18
#Based on the results of the script, establish unique UIDs and review all files owned by the shared UID to determine which UID they are supposed to belong to.
#7.19
#Based on the results of the script, establish unique GIDs and review all files owned by the shared GID to determine which group they are supposed to belong to.
#7.20
#Based on the results of the above, change any UIDs that are in the reserved range to one that is in the user range.
#Review all files owned by the reserved UID to determine which UID they are supposed to belong to.
#7.21
#Based on the results of the script, establish unique user names for the users.
#File ownerships will automatically reflect the change as long as the users have unique UIDs.
#7.22
#Based on the results of the script, establish unique names for the user groups.
#File group ownerships will automatically reflect the change as long as the groups have unique GIDs.
#7.23
#Making global modifications to users' files without alerting the user community can result in unexpected outages and unhappy users.
#Therefore, it is recommended that a monitoring policy be established to report user .forward files and determine the action to be taken in accordance with site policy.
#8.1
touch /etc/motd
echo "Authorized uses only. All activity may be \monitored and reported." > /etc/issue
echo "Authorized uses only. All activity may be \monitored and reported." > /etc/issue.net
chown root:root /etc/motd; chmod 644 /etc/motd
chown root:root /etc/issue; chmod 644 /etc/issue
chown root:root /etc/issue.net; chmod 644 /etc/issue.net
#8.2
#Edit the /etc/motd, /etc/issue and /etc/issue.net files and remove any lines containing \m, \r, \s or \v.
sed -i '/\m/ d' /etc/motd
sed -i '/\r/ d' /etc/motd
sed -i '/\s/ d' /etc/motd
sed -i '/\v/ d' /etc/motd
sed -i '/\m/ d' /etc/issue
sed -i '/\r/ d' /etc/issue
sed -i '/\s/ d' /etc/issue
sed -i '/\v/ d' /etc/issue
sed -i '/\m/ d' /etc/issue.net
sed -i '/\r/ d' /etc/issue.net
sed -i '/\s/ d' /etc/issue.net
sed -i '/\v/ d' /etc/issue.net
| true
|
ef0f435134a3ed5802ca0e76d2d4dbddd252f1c5
|
Shell
|
npmaile/GH-Actions-ShellCheck
|
/entrypoint.sh
|
UTF-8
| 354
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/sh -l
EXCLUSIONS=${INPUT_EXCLUSIONS:-'^$'}
REGEX=${INPUT_FIND_REGEX:-'.*\.sh'}
pwd
echo "checking files:"
files="$(find . -regex "${REGEX}" -not -regex "${EXCLUSIONS}")"
returncode=0
for file in $files; do
echo "testing ${file}"
shellcheck "$file"
status="$?"
if [ ${status} != 0 ] ; then
returncode=${status}
fi
done
exit "${returncode}"
| true
|
7c5dd9908935e04631389a764f5eae52bbc0f954
|
Shell
|
nathanwbrei/epsci-spack
|
/admin/make_new_platform_centos8.sh
|
UTF-8
| 1,004
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
#
# This script is used to setup a singularity container and then run
# the mnp.sh script in it. The heavy lifing is done in mnp.sh.
#
# Here you can specify a compiler version that will be needed. The
# compiler should be specified here so it can be built by the
# mnp.sh script if necessary.
#
# If you wish to use the default system compiler that already exists
# in the container then set spack_compiler to "system".
#
# NOTE: It is safe to run this for a spack platform that already exists.
# It is non-destructive and will not rebuild anything that is already
# built.
#
spack_os=centos
spack_ver=8.3.2011
#spack_compiler=9.3.0
spack_compiler=system
spack_build_threads=16
source /etc/profile.d/modules.sh
module load singularity
singularity exec \
-B /scigroup/cvmfs:/cvmfs/oasis.opensciencegrid.org/jlab \
-B /scigroup:/scigroup \
/scigroup/spack/mirror/singularity/images/epsci-${spack_os}-${spack_ver}.img \
./mnp.sh $spack_os $spack_ver $spack_compiler $spack_build_threads
| true
|
f0ed93107fde944c2151425c562fd596d19a439e
|
Shell
|
adsva/dotfiles
|
/font-scale/set-font-scale
|
UTF-8
| 990
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
#Detect the name of the display in use
display=":$(ls /tmp/.X11-unix/* | sed 's#/tmp/.X11-unix/X##' | head -n 1)"
#Detect the user using such display
user="adsva"
#Detect the id of the user
uid=$(id -u $user)
# Get Current Scaling factor
SCALE=$(sudo -H -u $user DISPLAY=$display DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/$uid/bus gsettings get org.gnome.desktop.interface text-scaling-factor)
# Set what to toggle to
#if [ $SCALE == '1.0' ]; then
# SCALE_SWITCH=1.2
#else [ $SCALE == '1.2' ]
# SCALE_SWITCH=1.0
#fi
SCALE_SWITCH=$1
logger "Setting font scale to $SCALE_SWITCH"
# (Optional) Message intentions to CLI and GNOME Notifications
sudo -u $user DISPLAY=$display DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/$uid/bus notify-send "Setting font scale to $SCALE_SWITCH"
# Run switch command
sudo -H -u $user DISPLAY=$display DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/$uid/bus gsettings set org.gnome.desktop.interface text-scaling-factor $SCALE_SWITCH
| true
|
f269606386ec2e71b9dabe9c118721e8ce71b933
|
Shell
|
sgalella/BashScripts
|
/scripts/format_text.sh
|
UTF-8
| 315
| 3.953125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Get filename
IN=$1
# Check if output file exists
OUT="out.txt"
if [ -d $OUT ]
then
rm "out.txt"
fi
# Add additional newline at the end (for reading purposes)
printf "\n" >> $IN
# Append newline to output file
s=""
while IFS= read -r line
do
printf "%s " $line | tr -d '\n' >> $OUT
done < $IN
| true
|
e260cac4b8e121c5c8fd28b274a6dcf36c8606de
|
Shell
|
Funlik/UNIX-Homework
|
/29.04.2020/configs/backup.sh
|
UTF-8
| 176
| 2.53125
| 3
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
if ! (curl -s wttr.in | head -n 3 | grep -i -q rain); then
killall -TSTP smbd
tar zcf /files/backup-$(date +%Y-%m-%d).tar.gz /files/*.html
killall -CONT smbd
fi
| true
|
0ca843769439b9a598de56803cabe86233c834a9
|
Shell
|
ratnaIS/Containers-on-Azure-demo
|
/DockerizedEncoder/Image/dockerizedencoder/docker-entrypoint.sh
|
UTF-8
| 2,489
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ "$5" = '' ]; then
echo "Usage:"
echo " $0 SourceUrl StorageAccount StorageKey ContainerName TargetGUID ExpirationEpoch"
echo " SourceUrl: URI from the source file will be downloaded"
echo " StorageAccount: storage account name to store a results"
echo " StorageKey: storage account access key"
echo " ContainerName: blob container name"
echo " TargetGUID: alphanumeric unique id of target"
echo " ExpirationEpoch: result file expiration at given seconds since epoch UTC"
exit 1
fi
SourceUrl=$1
export AZURE_STORAGE_ACCOUNT=$2
export AZURE_STORAGE_ACCESS_KEY=$3
export ContainerName=$4
TargetGUID=$5
Expiration=$6
IntermediateName=${TargetGUID}_Intermediate_${Expiration}
TargetName=${TargetGUID}_Success_${Expiration}
ErrorName=${TargetGUID}_Error_${Expiration}
SourceFile=/tmp/`basename $SourceUrl`
Zero=/tmp/zero
>$Zero
function cecho()
{
echo -e "\x1B[32m$@\x1B[0m"
}
function result()
{
if [ $1 ]; then
cecho Uloading an Error file ...
azure storage blob upload $Zero "$ContainerName" "$ErrorName"
else
cecho Uploading a Success file ...
azure storage blob upload "$SourceFile" "$ContainerName" "$TargetName"
fi
cecho Deleting intermediate state marker file...
azure storage blob delete "$ContainerName" "$IntermediateName"
cecho Finished
exit 0
}
cecho Writing intermediate state marker file...
azure storage blob upload $Zero "$ContainerName" "$IntermediateName"
if [ $? -ne 0 ]; then
echo -e "\x1B[31mContainer access error. Terminated.\x1B[0m"
exit 1
fi
cecho Removing expired files...
export Now=`date +%s`
azure storage blob list "$ContainerName" --json | awk ' /'name'/ {print $2}' | sed -e "s/[\",]//g" | awk -F "_" ' { if ($3 < ENVIRON["Now"]) { print "azure storage blob delete " ENVIRON["ContainerName"] " " $0 } } ' > /tmp/deleteExpired.sh
bash /tmp/deleteExpired.sh
cecho Downloading source file...
rm $SourceFile &> /dev/null
wget $SourceUrl -O $SourceFile
if [ $? -ne 0 ]; then
echo -e "\x1B[31mSource file download error\x1B[0m"
result 1
fi
cecho Dummy encoding with heavy CPU load for 120+ seconds...
End=$((SECONDS+120))
while [ $SECONDS -lt $End ]; do
dd if=/dev/zero of=/dev/null bs=1K count=10M &> /dev/null
done
cecho This is as designed: 20% of runs will produce an error result
result "$(((RANDOM % 100)+1)) -lt 20"
| true
|
4af58ef233b0f8747e91dbea90d6776a390f6089
|
Shell
|
reeson46/Planner-reactified
|
/app/bin/gunicorn_start
|
UTF-8
| 649
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
python manage.py collectstatic --noinput
python manage.py migrate
NAME="core"
DJANGODIR=/usr/src/app/
SOCKFILE=/usr/src/app/run/gunicorn.sock
USER=root
GROUP=root
NUM_WORKERS=3
DJANGO_SETTINGS_MODULE=core.settings
DJANGO_WSGI_MODULE=core.wsgi
echo "Starting $NAME as `whoami`"
cd $DJANGODIR
export DJANGO_SETTINGS_MODULE=$DJANGO_SETTINGS_MODULE
export PYTHONPATH=$DJANGODIR:$PYTHONPATH
RUNDIR=$(dirname $SOCKFILE)
test -d $RUNDIR || mkdir -p $RUNDIR
exec gunicorn ${DJANGO_WSGI_MODULE}:application \
--name $NAME \
--workers $NUM_WORKERS \
--user=$USER --group=$GROUP \
--bind=unix:$SOCKFILE \
--log-level=debug \
--log-file=-
| true
|
390c34ef7cc9819f15f315473a49fca38e2e296b
|
Shell
|
Snakemake-Profiles/slurm
|
/tests/deploystack.sh
|
UTF-8
| 5,914
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/bash
#
# Deploy docker stack
#
# Compose file
DOCKER_COMPOSE=${DOCKER_COMPOSE:=docker-compose.yaml}
# Images
SNAKEMAKE_IMAGE=${SNAKEMAKE_IMAGE:=quay.io/biocontainers/snakemake:7.30.1--hdfd78af_0}
SLURM_IMAGE=${SLURM_IMAGE:=giovtorres/docker-centos7-slurm:20.11.8}
docker pull $SNAKEMAKE_IMAGE
docker pull $SLURM_IMAGE
# Stack and service config
STACK_NAME=cookiecutter-slurm
SLURM_SERVICE=${STACK_NAME}_slurm
SNAKEMAKE_SERVICE=${STACK_NAME}_snakemake
LOCAL_USER_ID=$(id -u)
##############################
## Functions
##############################
## Add slurm user to container
function add_slurm_user {
user=$1
container=$2
# check if user exists
docker exec $container /bin/bash -c "id $user" > /dev/null
if [ $? -eq 1 ]; then
echo "Adding user $user to docker container"
docker exec $container /bin/bash -c "useradd --shell /bin/bash -u $user -o -c \"\" -m -g slurm user"
if [ $? -eq 1 ]; then
echo "Failed to add user $user"
exit 1;
fi
fi
}
SLURM_CONF=$(cat <<EOF
# NEW COMPUTE NODE DEFINITIONS
NodeName=DEFAULT Sockets=1 CoresPerSocket=2 ThreadsPerCore=2 State=UNKNOWN TmpDisk=10000
NodeName=c1 NodeHostName=slurmctl NodeAddr=127.0.0.1 RealMemory=500 Feature=thin,mem500MB
NodeName=c2 NodeHostName=slurmctl NodeAddr=127.0.0.1 RealMemory=500 Feature=thin,mem500MB
NodeName=c3 NodeHostName=slurmctl NodeAddr=127.0.0.1 RealMemory=800 Feature=fat,mem800MB
NodeName=c4 NodeHostName=slurmctl NodeAddr=127.0.0.1 RealMemory=800 Feature=fat,mem800MB
NodeName=c5 NodeHostName=slurmctl NodeAddr=127.0.0.1 RealMemory=500 Feature=thin,mem500MB
# NEW PARTITIONS
PartitionName=normal Default=YES Nodes=c[1-4] Shared=NO MaxNodes=1 MaxTime=5-0 DefaultTime=00:00:01 State=UP DefMemPerNode=0 OverSubscribe=NO
PartitionName=debug Nodes=c[5] Shared=NO MaxNodes=1 MaxTime=01:00:00 DefaultTime=00:00:01 State=UP DefMemPerNode=0 QOS=debug
EOF
)
function modify_slurm_conf {
container=$1
slurmconf=/etc/slurm/slurm.conf
docker exec $container /bin/bash -c "cat $slurmconf" | grep -q "NEW COMPUTE NODE DEFINITIONS"
if [ $? -eq 1 ]; then
echo "Rewriting /etc/slurm/slurm.conf"
# Change consumable resources to Core, else threads configuration fails
docker exec $container /bin/bash -c "sed -i -e \"s/CR_CPU_Memory/CR_Core/g\" $slurmconf ;"
# Comment out node names and partition names that are to be redefined
docker exec $container /bin/bash -c "sed -i -e \"s/^GresTypes/# GresTypes/g\" $slurmconf ;"
docker exec $container /bin/bash -c "sed -i -e \"s/^NodeName/# NodeName/g\" $slurmconf ;"
docker exec $container /bin/bash -c "sed -i -e \"s/^PartitionName/# PartitionName/g\" $slurmconf ;"
echo " setting up slurm partitions..."
docker exec $container /bin/bash -c "echo \"$SLURM_CONF\" >> $slurmconf ; "
# Need to be sure slurmdb is available for sacctmgr to work
database_up $container
# Restart services; needed for sacct; see https://github.com/giovtorres/docker-centos7-slurm/issues/3
echo " restarting slurm services..."
docker exec $container /bin/bash -c 'sacctmgr --immediate add cluster name=linux'
docker exec $container supervisorctl restart slurmdbd
docker exec $container supervisorctl restart slurmctld
docker exec $container /bin/bash -c "sacctmgr --immediate add account none,test Description=\"none\" Organization=\"none\""
docker exec $container sinfo
fi
}
### Check if database is up
function database_up {
COUNT=1
MAXCOUNT=10
container=$1
docker exec $container mysqladmin status 2> /dev/null
database_up=$?
until [ $database_up -eq 0 ]; do
echo "$COUNT: database unavailable"
sleep 5
docker exec $container mysqladmin status 2> /dev/null
database_up=$?
if [ $COUNT -eq $MAXCOUNT ]; then
echo "database connection failed"
return
fi
COUNT=$((COUNT+1))
done
echo "database up!"
}
### Check if service is up
function service_up {
SERVICE=$1
COUNT=1
MAXCOUNT=30
docker service ps $SERVICE --format "{{.CurrentState}}" 2>/dev/null | grep Running
service_up=$?
until [ $service_up -eq 0 ]; do
echo "$COUNT: service $SERVICE unavailable"
sleep 5
docker service ps $SERVICE --format "{{.CurrentState}}" 2>/dev/null | grep Running
service_up=$?
if [ $COUNT -eq $MAXCOUNT ]; then
echo "service $SERVICE not found; giving up"
exit 1
fi
COUNT=$((COUNT+1))
done
echo "service $SERVICE up!"
}
##############################
## Deploy stack
##############################
# Check if docker stack has been deployed
docker service ps $SLURM_SERVICE --format "{{.CurrentState}}" 2>/dev/null | grep Running
service_up=$?
if [ $service_up -eq 1 ]; then
docker stack deploy --with-registry-auth -c $DOCKER_COMPOSE $STACK_NAME;
fi
service_up $SLURM_SERVICE
service_up $SNAKEMAKE_SERVICE
CONTAINER=$(docker ps | grep cookiecutter-slurm_slurm | awk '{print $1}')
# Add local user id as user to container
add_slurm_user $LOCAL_USER_ID $CONTAINER
# Fix snakemake header to point to /opt/local/bin
docker exec $CONTAINER /bin/bash -c "head -1 /opt/local/bin/snakemake" | grep -q "/usr/local/bin"
if [ $? -eq 0 ]; then
echo "Rewriting snakemake header to point to /opt/local/bin"
docker exec $CONTAINER /bin/bash -c 'sed -i -e "s:/usr:/opt:" /opt/local/bin/snakemake'
fi
# Rewrite slurm config
modify_slurm_conf $CONTAINER
# Add pandas to snakemake
CONTAINER=$(docker ps | grep cookiecutter-slurm_snakemake | awk '{print $1}')
docker exec $CONTAINER pip install pandas
# Make sure sacct is function properly
CONTAINER=$(docker ps | grep cookiecutter-slurm_slurm | awk '{print $1}')
jobid=$(docker exec $CONTAINER sbatch --parsable --wrap "sleep 1" --job-name check-sacct)
sleep 5
docker exec $CONTAINER sacct -o JobName -p | grep check-sacct -q
if [ $? -eq 1 ]; then
echo "sacct not working properly; tests will fail"
exit 1
fi
docker exec $CONTAINER scancel $jobid
| true
|
a8f1a39fd45efa12859d7887dc68226b1219f404
|
Shell
|
gudiandian/ElasticFlow
|
/ElasticFlow/scheduler/run_fig8a.sh
|
UTF-8
| 1,775
| 2.90625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Figure 8(a)
schedule=("edf" "gandiva" "dlas-gpu" "themis" "ef-accessctrl" )
#placement=("elastic" "gandiva" "elastic" "elastic" "elastic")
setups=("n16g4")
jobs=("elasticVpollux_e")
echo "running..."
for setup in ${setups[@]};do
cluster_spec="cluster_specs/${setup}.csv"
for job in ${jobs[@]};do
job_file="../traces_for_ElasticFlow/${job}.csv"
#job_file="test.csv"
log_folder="../../plot_figure/logs/figure8a"
mkdir ${log_folder}
for s in ${schedule[@]};do
if [ $s = "gandiva" ]; then
placement="gandiva"
else
placement="elastic"
fi
log_name="${log_folder}/${s}"
mkdir $log_name
python3 scheduler.py --cluster_spec=${cluster_spec} --print --scheme=${placement} --trace_file=${job_file} --schedule=${s} --log_path=${log_name} --simulation=True --scheduling_slot=60 --gpu_type=T4&
done
done
done
cd ../chronus-scheduler/utils
# get trace and namelist
python3 convert_ef_trace_to_chronus.py -t ../../traces_for_ElasticFlow/elasticVpollux_e.csv -o ../../traces_for_chronus/elasticVpollux_e.csv
python3 get_name_list.py -t ../../traces_for_chronus/elasticVpollux_e.csv -o ../../traces_for_chronus/elasticVpollux_e.lst
cd ..
python3 main.py --schedule=time-aware-with-lease --trace=../traces_for_chronus/elasticVpollux_e.csv --save_log_dir=../../plot_figure/logs/figure8a/chronus --ident=chronus --aggressive=True --mip_objective=adaptive --placement=local_search --profile=True --check_time_interval=60 --disable_turn_off=True --num_node_p_switch=16 --lease_term_interval=240 --name_list=../traces_for_chronus/elasticVpollux_e.lst --simulation=True --gpu_type=T4 --num_gpu_p_node=4
cd ../scheduler
| true
|
1b3dfc8c661bc826ba4c6a8d5bd8559bd6c03041
|
Shell
|
prashilbhimani/PlugNSearch
|
/SetupScripts/archiveBashScripts/archivedScript.sh
|
UTF-8
| 1,056
| 3.234375
| 3
|
[] |
no_license
|
# Step 6: Set up kafka
sudo chmod 755 kafka/kafka_main.sh
./kafka/kafka_main.sh 1
for i in `seq 1 $1`;do
kafka+=($(gcloud compute instances describe "kafka$i" |sed -n "/networkIP\:\ /s/networkIP\:\ //p"))
done
kafka_ips="$(IFS=, ; echo "${kafka[*]}")"
# Setp 7 : Set up storm
sudo chmod 755 storm/create.sh
./storm/create.sh 1
read -p "Press enter when the file downloads is complete"
# Step 8 : Submit topology
# Step 9 : Copy files
gcloud compute --project "datacenterscaleproject" ssh --zone "us-east1-b" "torrentgetter" --command "mkdir data && sudo cp -r /var/lib/transmission-daemon/Downloads/ data/"
gcloud compute scp --zone "us-east1-b" $2 torrentgetter:~/data/
gcloud compute --project "datacenterscaleproject" ssh --zone "us-east1-b" "torrentgetter" --command "chmod +x data/$2 && ./data/$2"
# Step 10 : Send to kafka
gcloud compute scp --zone "us-east1-b" kafkaProducer.py torrentgetter:~/
gcloud compute --project "datacenterscaleproject" ssh --zone "us-east1-b" "torrentgetter" --command "python kafkaProducer.py data/ $kafka_ips"
| true
|
0e4b484699656b184302a6b30e5bd53eee8863e6
|
Shell
|
john-aws/backendjs
|
/tools/bkjs-monit
|
UTF-8
| 5,791
| 3.34375
| 3
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# Author: Vlad Seryakov vseryakov@gmail.com
# Sep 2013
#
case "$BKJS_CMD" in
init-monit-system)
find_bkjsbin
$ECHO "set daemon 30 with start delay 60" > /etc/monit.d/system.conf
$ECHO "check system localhost if loadavg(1min) > 5 for 10 cycles then exec \"$BKJS_BIN send-alert\"" >> /etc/monit.d/system.conf
$ECHO "check filesystem rootfs with path / every 30 cycles if space usage > 90% then exec \"$BKJS_BIN send-alert\"" >> /etc/monit.d/system.conf
monit reload
;;
init-monit-bkjs)
find_user
find_bkjsbin
$ECHO "check process $BKJS with pidfile \"$BKJS_HOME/var/master.pid\" start program = \"$BKJS_BIN start $(get_all_args)\" as uid $BKJS_USER and gid $BKJS_GROUP with timeout 60 seconds stop program = \"$BKJS_BIN stop\"" > /etc/monit.d/$BKJS.conf
monit reload
;;
stop-monit-bkjs)
if [ -f /etc/monit.d/$BKJS.conf ]; then
rm /etc/monit.d/$BKJS.conf
monit reload
fi
;;
init-monit-alerts|check-server)
get_config email -alert-email
if [[ ! -z $email ]]; then
get_config user -alert-user
get_config host -alert-host
get_config password -alert-password
echo "Init monit alert: $email, $host, $user"
$ECHO "set alert $email" > /etc/monit.d/alert.conf
$ECHO "set mail-format { from: $email }" >> /etc/monit.d/alert.conf
if [[ ! -z $host ]]; then
server="set mailserver $host"
[[ ! -z $user ]] && server="$server username $user"
[[ ! -z $password ]] && server="$server password $password"
[[ $host =~ amazonaws ]] && server="$server using tlsv1"
$ECHO $server >> /etc/monit.d/alert.conf
fi
monit reload
fi
;;
init-monit-elasticsearch)
find_user
find_bkjsbin
echo "check process elasticsearch with pidfile \"$BKJS_HOME/var/elasticsearch.pid\" start program = \"$BKJS_BIN run-elasticsearch $(get_all_args)\" as uid $BKJS_USER and gid $BKJS_GROUP stop program = \"$BKJS_BIN stop-elasticsearch\" if failed url http://127.0.0.1:9200/ with timeout 15 seconds for 2 cycles then restart" > /etc/monit.d/elasticsearch.conf
echo "check file elasticsearch-log with path $BKJS_HOME/log/elasticsearch.log if match 'java.lang.OutOfMemoryError' then exec \"$BKJS_BIN restart-elasticsearch $(get_all_args)\"" >> /etc/monit.d/elasticsearch.conf
monit reload
;;
init-monit-elasticsearch-health)
host=$(get_arg -host elasticsearch)
echo "check host elasticsearch-health with address $host if failed url http://$host:9200/_cluster/health and content = 'green' with timeout 60 seconds for 2 cycles then alert" > /etc/monit.d/elasticsearch-health.conf
monit reload
;;
init-monit-redis)
# There is no startup script because we rely on the monit to handle processes
[ "$(whoami)" != "root" ] && echo "Run as root please" && exit 1
echo 1 > /proc/sys/vm/overcommit_memory
echo never > /sys/kernel/mm/transparent_hugepage/enabled
if [ "$(grep -s 'overcommit_memory' /etc/sysctl.conf)" = "" ]; then
echo 'vm.overcommit_memory=1' >> /etc/sysctl.conf
fi
if [ "$(grep -s 'transparent_hugepage' /etc/rc.local)" = "" ]; then
echo 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' >> /etc/rc.local
fi
find_user
find_bkjsbin
$ECHO "$BKJS_HOME/log/redis.log {\n weekly\n rotate 10\n copytruncate\n delaycompress\n compress\n notifempty\n missingok\n}" > /etc/logrotate.d/redis
$ECHO "check process redis-server with pidfile \"$BKJS_HOME/var/redis.pid\" start program = \"$BKJS_BIN run-redis $(get_all_args)\" as uid $BKJS_USER and gid $BKJS_GROUP stop program = \"$BKJS_BIN stop-redis\" if failed host 127.0.0.1 port 6379 for 2 cycles then restart" > /etc/monit.d/redis.conf
monit reload
;;
init-monit-sentinel)
find_user
find_bkjsbin
host=$(get_arg -host 127.0.0.1)
port=$(get_arg -port 6379)
quorum=$(get_arg -quorum 2)
name=$(get_arg -name redis)
dtimeout=$(get_arg -down-timeout 10000)
ftimeout=$(get_arg -failover-timeout 180000)
conf=$BKJS_HOME/etc/sentinel.conf
$ECHO "daemonize yes" > $conf
$ECHO "syslog-enabled yes" >> $conf
$ECHO "sentinel monitor $name $host $port $quorum" >> $conf
$ECHO "sentinel down-after-milliseconds $name $dtimeout" >> $conf
$ECHO "sentinel failover-timeout $name $ftimeout" >> $conf
$ECHO "sentinel parallel-syncs $name 1" >> $conf
$ECHO "dir $BKJS_HOME/var/" >> $conf
$ECHO "pidfile $BKJS_HOME/var/sentinel.pid" >> $conf
chown $BKJS_USER $conf
$ECHO "check process redis-sentinel with pidfile \"$BKJS_HOME/var/sentinel.pid\" start program = \"$BKJS_BIN run-sentinel $(get_all_args)\" as uid $BKJS_USER and gid $BKJS_GROUP stop program = \"$BKJS_BIN stop-sentinel\"" > /etc/monit.d/sentinel.conf
monit reload
;;
help)
echo ""
echo "Monit setup commands:"
echo ""
echo " init-monit-system - setup system monitoring with monit, CPU, disk, send alert via '$0 send-alert' command"
echo " init-monit-bkjs - setup monit to keep $BKJS service running without using any other services and monitor"
echo " stop-monit-bkjs - stop monitoring $BKJS service by monit"
echo " init-monit-alerts - setup monit mail alerts, use DB config for the specified app name"
echo " init-monit-elasticsearch [-memsize PERCENT] [-memmax SIZE] [-nodetype TYPE] - setup monit to keep elasticsearch service running"
echo " init-monit-sentinel [-host HOST] [-port PORT] - setup Redis Sentinel server to be run on start and to be monitored (Linux only)"
echo " init-monit-redis [-memsize PERCENT] [-memmax SIZE] - setup Redis server to be run on start and to be monitored (Linux only)"
echo ""
;;
*)
BKJS_UNKNOWN=1
;;
esac
| true
|
e852ad10afa8c0377c60e5d31ae9191de91ecaf9
|
Shell
|
hanghang2333/news-recommendation
|
/bin/v2.0/merge.sh
|
UTF-8
| 692
| 3.140625
| 3
|
[] |
no_license
|
#########################################################################
# File Name: merge.sh
# Author: HouJP
# mail: houjp1992@gmail.com
# Created Time: Wed Mar 16 17:18:21 2016
#########################################################################
#! /bin/bash
PATH_PRE="`pwd`"
PATH_NOW="`dirname $0`"
cd ${PATH_NOW}
source ../conf/conf.sh
cd ${PATH_PRE}
function shut() {
curl -X POST http://${HOST}:${PORT}/golaxy/recommend/stop
}
function merge() {
date=`date -d last-day +%Y-%m-%d`
echo "[INFO] date=$date"
echo "[INFO] boot ..."
${PROJECT_DIR}/bin/boot.sh $date
if [ 0 -ne $? ]; then
echo "[ERROR] boot failed."
return 255
else
echo "[INFO] boot success."
fi
}
shut
merge
| true
|
d12ff11b1ec4ac5b38419208586ca9d10f6384d5
|
Shell
|
diegosanchezstrange/mydotfiles
|
/.zshrc
|
UTF-8
| 1,060
| 2.53125
| 3
|
[] |
no_license
|
# If you come from bash you might have to change your $PATH.
# export PATH=$HOME/bin:/usr/local/bin:$PATH
# Path to your oh-my-zsh installation.
export ZSH="/home/diego/.oh-my-zsh"
export JAVA_HOME="/usr/jdk/jdk1.8.0_191"
PATH=$HOME/mydotfiles/tmux-spotify-info:$PATH
PATH="$JAVA_HOME/bin:$PATH"
ZSH_THEME="robbyrussell"
# Base16 Shell
BASE16_SHELL="$HOME/.config/base16-shell/"
[ -n "$PS1" ] && \
[ -s "$BASE16_SHELL/profile_helper.sh" ] && \
eval "$("$BASE16_SHELL/profile_helper.sh")"
# Uncomment the following line to display red dots whilst waiting for completion.
COMPLETION_WAITING_DOTS="true"
plugins=(
git
)
source $ZSH/oh-my-zsh.sh
fortune | cowsay -f tux
#Alias
alias or="python3 ~/mydotfiles/scripts.exclude/fileOrganizer/downloadsOrganizer.exclude.py"
alias eclipse="~/programs/eclipse/eclipse/eclipse"
alias jsc="node"
alias py="python3"
alias ctux="clear ; fortune | cowsay -f tux"
alias ts="tmux new -s work"
alias ta="tmux a -t work"
alias zshrc="vim ~/.zshrc"
alias vimrc="vim ~/.vimrc"
alias tmuxrc="vim ~/.tmux.conf"
| true
|
4b31e252df8c42b75fa2359682b05aaa4799fea7
|
Shell
|
subash98/subash
|
/7
|
UTF-8
| 189
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
file=$2
if test -f $file
then echo "file exist"
if grep -Fq "$1" $2
then
echo The string was found
else
echo The string was not found
fi
else
echo "file doesn't exist"
fi
| true
|
c887ddd7d5271b9936b14f8928f4bc7efdd8cb40
|
Shell
|
mgperez/spring-cloud
|
/spring-boot-docker/cleaning_images.sh
|
UTF-8
| 651
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
# https://gist.github.com/bastman/5b57ddb3c11942094f8d0a97d461b430
# https://linuxize.com/post/how-to-remove-docker-images-containers-volumes-and-networks/
# To remove all docker containers based on servidor:latest
# docker ps -a | awk '{ print $1,$2 }' | grep servidor | awk '{print $1 }' | xargs -I {} docker rm -f {}
# Delete all images
# docker rmi $(docker images -q)
#$ docker images
docker rmi $(docker images --filter "dangling=true" -q --no-trunc)
#$ docker images | grep "smart"
docker rmi -f $(docker images | grep "smart" | awk '/ / { print $3 }')
docker rmi -f $(docker images | grep "mgperez" | awk '/ / { print $3 }')
| true
|
d903446bc730eddd0bc868706a5c8c16a9803fa1
|
Shell
|
Link-Start/Tools
|
/__AutoPackageScript
|
UTF-8
| 7,418
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# 使用方法:
# step1: 将该脚本放在工程的根目录下(跟.xcworkspace文件or .xcodeproj文件同目录)
# step2: 根据情况修改下面的参数
# step3: 打开终端,执行脚本。(输入sh ,然后将脚本文件拉到终端,会生成文件路径,然后enter就可)
# =============项目自定义部分(自定义好下列参数后再执行该脚本)=================== #
#APP名称 导出的ipa包名称
app_name="app__nmae_测试"
# 是否编译工作空间 (例:若是用Cocopods管理的.xcworkspace项目,赋值true;用Xcode默认创建的.xcodeproj,赋值false)
is_workspace="true"
# .xcworkspace的名字,如果is_workspace为true,则必须填。否则可不填
workspace_name="LSProjectTool"
# .xcodeproj的名字,如果is_workspace为false,则必须填。否则可不填
project_name="LSProjectTool"
# 指定项目的scheme名称(也就是工程的target名称),必填
scheme_name="LSProjectTool"
# 指定要打包编译的方式 : Release,Debug。一般用Release。必填
build_configuration="Release"
# method,打包的方式。方式分别为 development, ad-hoc, app-store, enterprise 。必填
method="development"
# 下面两个参数只是在手动指定Pofile文件的时候用到,如果使用Xcode自动管理Profile,直接留空就好
# (跟method对应的)mobileprovision文件名,需要先双击安装.mobileprovision文件.手动管理Profile时必填
mobileprovision_name=""
# 项目的bundleID,手动管理Profile时必填
bundle_identifier=""
echo "--------------------脚本配置参数检查--------------------"
echo "\033[33;1mis_workspace=${is_workspace} "
echo "workspace_name=${workspace_name}"
echo "project_name=${project_name}"
echo "scheme_name=${scheme_name}"
echo "build_configuration=${build_configuration}"
echo "bundle_identifier=${bundle_identifier}"
echo "method=${method}"
echo "mobileprovision_name=${mobileprovision_name} \033[0m"
# =======================脚本的一些固定参数定义(无特殊情况不用修改)====================== #
# 获取当前脚本所在目录
script_dir="$( cd "$( dirname "$0" )" && pwd )"
# 桌面
desktop_dir="/Users/$USER/Desktop"
# 工程根目录
project_dir=$script_dir
# 时间
DATE=`date '+%Y%m%d_%H%M%S'`
#打包相关文件
relevant_file_path="${desktop_dir}/${scheme_name}_Package"
# 指定输出导出文件夹路径
# export_path="$project_dir/Package/$scheme_name-$DATE" #当前项目根目录下
export_path="${relevant_file_path}/$scheme_name-$DATE" #桌面
# 指定输出归档文件路径
export_archive_path="$export_path/$scheme_name.xcarchive"
# 指定输出ipa文件夹路径
export_ipa_path="$export_path"
# 指定输出ipa名称
ipa_name="${scheme_name}_${DATE}"
# 指定导出ipa包需要用到的plist配置文件的路径
# export_options_plist_path="$project_dir/ExportOptions.plist"
export_options_plist_path="$export_ipa_path/ExportOptions.plist"
echo "--------------------脚本固定参数检查--------------------"
echo "\033[33;1mproject_dir=${project_dir}"
echo "DATE=${DATE}"
echo "export_path=${export_path}"
echo "export_archive_path=${export_archive_path}"
echo "export_ipa_path=${export_ipa_path}"
echo "export_options_plist_path=${export_options_plist_path}"
echo "ipa_name=${ipa_name} \033[0m"
# =======================自动打包部分(无特殊情况不用修改)====================== #
echo "------------------------------------------------------"
echo "\033[32m开始构建项目 \033[0m"
# 进入项目工程目录
cd ${project_dir}
# 指定输出文件目录不存在则创建
if [ -d "$export_path" ] ; then
echo $export_path
else
mkdir -pv $export_path
fi
#打印scheme
xcodebuild \
-list \
-project ${script_dir}/${project_name}.xcodeproj
# 判断编译的项目类型是workspace还是project
if $is_workspace ; then
# 编译前清理工程
xcodebuild clean -workspace ${workspace_name}.xcworkspace \
-scheme ${scheme_name} \
-configuration ${build_configuration}
xcodebuild archive -workspace ${workspace_name}.xcworkspace \
-scheme ${scheme_name} \
-configuration ${build_configuration} \
-archivePath ${export_archive_path}
else
# 编译前清理工程
xcodebuild clean -project ${project_name}.xcodeproj \
-scheme ${scheme_name} \
-configuration ${build_configuration}
xcodebuild archive -project ${project_name}.xcodeproj \
-scheme ${scheme_name} \
-configuration ${build_configuration} \
-archivePath ${export_archive_path}
fi
# 检查是否构建成功
# xcarchive 实际是一个文件夹不是一个文件所以使用 -d 判断
if [ -d "$export_archive_path" ] ; then
echo "\033[32;1m项目构建成功 🚀 🚀 🚀 \033[0m"
else
echo "\033[31;1m项目构建失败 😢 😢 😢 \033[0m"
exit 1
fi
echo "------------------------------------------------------"
echo "\033[32m开始导出ipa文件 \033[0m"
# 先删除export_options_plist文件
if [ -f "$export_options_plist_path" ] ; then
#echo "${export_options_plist_path}文件存在,进行删除"
rm -f $export_options_plist_path
fi
# 根据参数生成export_options_plist文件
/usr/libexec/PlistBuddy -c "Add :method String ${method}" $export_options_plist_path
/usr/libexec/PlistBuddy -c "Add :provisioningProfiles:" $export_options_plist_path
/usr/libexec/PlistBuddy -c "Add :provisioningProfiles:${bundle_identifier} String ${mobileprovision_name}" $export_options_plist_path
xcodebuild -exportArchive \
-archivePath ${export_archive_path} \
-exportPath ${export_ipa_path} \
-exportOptionsPlist ${export_options_plist_path} \
-allowProvisioningUpdates
# 检查ipa文件是否存在
if [ -f "$export_ipa_path/$scheme_name.ipa" ] ; then
echo "\033[32;1mexportArchive ipa包成功,准备进行重命名\033[0m"
else
echo "\033[31;1mexportArchive ipa包失败 😢 😢 😢 \033[0m"
exit 1
fi
# 修改ipa文件名称
# mv $export_ipa_path/$scheme_name.ipa $export_ipa_path/$ipa_name.ipa
mv $export_ipa_path/$scheme_name.ipa ${desktop_dir}/${app_name}.ipa
# 检查文件是否存在
if [ -f "${desktop_dir}/${app_name}.ipa" ] ; then
echo "\033[32;1m导出 小钱多多.ipa 包成功 🎉 🎉 🎉 \033[0m"
open $export_path
else
echo "\033[31;1m导出 小钱多多.ipa 包失败 😢 😢 😢 \033[0m"
exit 1
fi
# -d filename 如果 filename为目录,则为真
# -f filename 如果 filename为常规文件,则为真
# 删除export_options_plist文件(中间文件)
if [ -f "$export_options_plist_path" ] ; then
echo "${export_options_plist_path}文件存在,准备删除"
rm -f $export_options_plist_path
fi
#删除多余打包相关文件
if [[ -d "${relevant_file_path}" ]]; then
echo "${relevant_file_path}文件夹存在,准备删除"
rm -rf ${relevant_file_path}
while [[ -d "${relevant_file_path}" ]]; do
#statements
rm -rf ${relevant_file_path}
done
if [[ -d "${relevant_file_path}" ]]; then
echo "${relevant_file_path} 删除失败"
else
echo "${relevant_file_path} 删除成功"
fi
fi
# 输出打包总用时
echo "\033[36;1m使用AutoPackageScript打包总用时: ${SECONDS}s \033[0m"
exit 0
| true
|
38cf6c2469722870f756a4eca34154d9fa0da36b
|
Shell
|
stajichlab/Afum_popgenome
|
/variantcall/pipeline/01_aln_evol20.sh
|
UTF-8
| 1,472
| 3.265625
| 3
|
[] |
no_license
|
#!/usr/bin/bash
#SBATCH --mem 8G --ntasks 8 --nodes 1 -J evol20.Afum --out logs/Afum_Evol20.bwa.%a.log --time 8:00:00
module load bwa/0.7.17
module unload java
module load java/8
module load picard
CENTER=UCR
GENOME=genome/Af293
GENOMESTRAIN=Af293
INDIR=input/UCR_FC548
TOPOUTDIR=aln
mkdir -p $TOPOUTDIR
TEMP=/scratch
N=${SLURM_ARRAY_TASK_ID}
CPU=1
if [ $SLURM_CPUS_ON_NODE ]; then
CPU=$SLURM_CPUS_ON_NODE
fi
SAMPFILE=UCR_FC548_samples.csv
if [ -z $N ]; then
N=$1
fi
if [ -z $N ]; then
echo "need to provide a number by --array or cmdline"
exit
fi
MAX=$(wc -l $SAMPFILE | awk '{print $1}')
echo "$N $MAX for $SAMPFILE"
if [ $N -gt $MAX ]; then
echo "$N is too big, only $MAX lines in $SAMPFILE"
exit
fi
IFS=,
tail -n +2 $SAMPFILE | sed -n ${N}p | while read PREFIX STRAIN LEFT RIGHT BARCODE DESC
do
OUTDIR=$TOPOUTDIR
PAIR1=$INDIR/$LEFT
PAIR2=$INDIR/$RIGHT
SAMFILE=NULL
if [ -e $PAIR1 ]; then
SAMFILE=$OUTDIR/${PREFIX}.PE.unsrt.sam
echo "SAMFILE is $SAMFILE"
if [ ! -f $SAMFILE ]; then
bwa mem -t $CPU -R "@RG\tID:$PREFIX\tSM:$PREFIX\tLB:$PREFIX\tPL:illumina\tCN:$CENTER" $GENOME $PAIR1 $PAIR2 > $SAMFILE
fi
if [ ! -f $OUTDIR/${PREFIX}.PE.bam ]; then
samtools fixmate --threads $CPU -O bam $SAMFILE $TEMP/${PREFIX}.fixmate.bam
samtools sort --threads $CPU -O bam -o $OUTDIR/${PREFIX}.PE.bam -T $TEMP $TEMP/${PREFIX}.fixmate.bam
/usr/bin/rm $TEMP/${PREFIX}.fixmate.bam
/usr/bin/rm $SAMFILE
fi
fi
done
| true
|
d91c66ba7546d31cda02a95c988a846ec553151d
|
Shell
|
whisshe/workshell
|
/config/cron_dep/cron_shell/cron_status.sh
|
UTF-8
| 225
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
status=$(cat /tmp/cron-st)
if [ $status -lt 1 ];then
echo "*/15 * * * * /tmp/mem_free.sh" >> /var/spool/cron/crontabs/root
exit 0
else
echo -e "\033[31mcrontab is already added\033[0m"
exit 0
fi
| true
|
7357fa225a1173542e109816696fbb5f92c2fc26
|
Shell
|
jsonn/pkgsrc
|
/lang/openjdk8/patches/patch-common_autoconf_generated-configure.sh
|
UTF-8
| 9,534
| 2.9375
| 3
|
[] |
no_license
|
$NetBSD: patch-common_autoconf_generated-configure.sh,v 1.12 2017/05/10 13:59:57 ryoon Exp $
BOOT_JDK_VERSION part: pkg/51221 (Build error with OpenJDK8 and i386)
--- common/autoconf/generated-configure.sh.orig 2016-10-26 22:56:42.000000000 +0000
+++ common/autoconf/generated-configure.sh
@@ -8454,9 +8454,9 @@ done
# We need to find a recent version of GNU make. Especially on Solaris, this can be tricky.
if test "x$MAKE" != x; then
# User has supplied a make, test it.
- if test ! -f "$MAKE"; then
- as_fn_error $? "The specified make (by MAKE=$MAKE) is not found." "$LINENO" 5
- fi
+# if test ! -f "$MAKE"; then
+# as_fn_error $? "The specified make (by MAKE=$MAKE) is not found." "$LINENO" 5
+# fi
MAKE_CANDIDATE=""$MAKE""
DESCRIPTION="user supplied MAKE=$MAKE"
@@ -11755,7 +11755,7 @@ $as_echo "$as_me: Potential Boot JDK fou
BOOT_JDK_FOUND=no
else
# Oh, this is looking good! We probably have found a proper JDK. Is it the correct version?
- BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
+ BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | grep version`
# Extra M4 quote needed to protect [] in grep expression.
FOUND_VERSION_78=`echo $BOOT_JDK_VERSION | grep '\"1\.[78]\.'`
@@ -12539,7 +12539,7 @@ fi
# Resolve file symlinks
while test $COUNTER -lt 20; do
ISLINK=`$LS -l $sym_link_dir/$sym_link_file | $GREP '\->' | $SED -e 's/.*-> \(.*\)/\1/'`
- if test "x$ISLINK" == x; then
+ if test "x$ISLINK" = x; then
# This is not a symbolic link! We are done!
break
fi
@@ -16225,16 +16225,15 @@ $as_echo_n "checking flags for boot jdk
# Maximum amount of heap memory.
# Maximum stack size.
if test "x$BOOT_JDK_BITS" = x32; then
- JVM_MAX_HEAP=1100M
STACK_SIZE=768
else
# Running Javac on a JVM on a 64-bit machine, takes more space since 64-bit
# pointers are used. Apparently, we need to increase the heap and stack
# space for the jvm. More specifically, when running javac to build huge
# jdk batch
- JVM_MAX_HEAP=1600M
STACK_SIZE=1536
fi
+ JVM_MAX_HEAP=800M
$ECHO "Check if jvm arg is ok: -Xmx$JVM_MAX_HEAP" >&5
$ECHO "Command: $JAVA -Xmx$JVM_MAX_HEAP -version" >&5
@@ -19454,7 +19453,7 @@ $as_echo "(none, will use system headers
elif test "x$OPENJDK_TARGET_OS" = "xwindows"; then
COMPILER_CHECK_LIST="cl"
elif test "x$OPENJDK_TARGET_OS" = "xsolaris"; then
- COMPILER_CHECK_LIST="cc gcc"
+ COMPILER_CHECK_LIST="gcc cc"
elif test "x$OPENJDK_TARGET_OS" = "xaix"; then
# Do not probe for cc on AIX.
COMPILER_CHECK_LIST="xlc_r"
@@ -19910,7 +19909,7 @@ $as_echo_n "checking resolved symbolic l
# Resolve file symlinks
while test $COUNTER -lt 20; do
ISLINK=`$LS -l $sym_link_dir/$sym_link_file | $GREP '\->' | $SED -e 's/.*-> \(.*\)/\1/'`
- if test "x$ISLINK" == x; then
+ if test "x$ISLINK" = x; then
# This is not a symbolic link! We are done!
break
fi
@@ -20348,7 +20347,7 @@ $as_echo_n "checking for resolved symbol
# Resolve file symlinks
while test $COUNTER -lt 20; do
ISLINK=`$LS -l $sym_link_dir/$sym_link_file | $GREP '\->' | $SED -e 's/.*-> \(.*\)/\1/'`
- if test "x$ISLINK" == x; then
+ if test "x$ISLINK" = x; then
# This is not a symbolic link! We are done!
break
fi
@@ -20376,7 +20375,7 @@ $as_echo "no, keeping CC" >&6; }
COMPILER=$CC
COMPILER_NAME=$COMPILER_NAME
- if test "x$OPENJDK_TARGET_OS" = xsolaris; then
+ if test "x$OPENJDK_TARGET_OS" = xsolaris && test "x$GCC" = xno; then
# Make sure we use the Sun Studio compiler and not gcc on Solaris, which won't work
COMPILER_VERSION_TEST=`$COMPILER -V 2>&1 | $HEAD -n 1`
$ECHO $COMPILER_VERSION_TEST | $GREP "^.*: Sun $COMPILER_NAME" > /dev/null
@@ -21511,7 +21510,7 @@ $as_echo_n "checking resolved symbolic l
# Resolve file symlinks
while test $COUNTER -lt 20; do
ISLINK=`$LS -l $sym_link_dir/$sym_link_file | $GREP '\->' | $SED -e 's/.*-> \(.*\)/\1/'`
- if test "x$ISLINK" == x; then
+ if test "x$ISLINK" = x; then
# This is not a symbolic link! We are done!
break
fi
@@ -21949,7 +21948,7 @@ $as_echo_n "checking for resolved symbol
# Resolve file symlinks
while test $COUNTER -lt 20; do
ISLINK=`$LS -l $sym_link_dir/$sym_link_file | $GREP '\->' | $SED -e 's/.*-> \(.*\)/\1/'`
- if test "x$ISLINK" == x; then
+ if test "x$ISLINK" = x; then
# This is not a symbolic link! We are done!
break
fi
@@ -21977,7 +21976,7 @@ $as_echo "no, keeping CXX" >&6; }
COMPILER=$CXX
COMPILER_NAME=$COMPILER_NAME
- if test "x$OPENJDK_TARGET_OS" = xsolaris; then
+ if test "x$OPENJDK_TARGET_OS" = xsolaris && test "x$GCC" = xno; then
# Make sure we use the Sun Studio compiler and not gcc on Solaris, which won't work
COMPILER_VERSION_TEST=`$COMPILER -V 2>&1 | $HEAD -n 1`
$ECHO $COMPILER_VERSION_TEST | $GREP "^.*: Sun $COMPILER_NAME" > /dev/null
@@ -29390,6 +29389,15 @@ $as_echo "$ac_cv_c_bigendian" >&6; }
if test "x$OPENJDK_TARGET_OS" = xbsd || test "x$OPENJDK_TARGET_OS" = xmacosx; then
SET_EXECUTABLE_ORIGIN="$SET_SHARED_LIBRARY_ORIGIN"
fi
+ if test "x$OPENJDK_TARGET_OS" = xsolaris; then
+ SET_SHARED_LIBRARY_NAME=''
+ SET_SHARED_LIBRARY_MAPFILE=''
+ SET_SHARED_LIBRARY_ORIGIN='-R\$$$$ORIGIN$1'
+ SET_EXECUTABLE_ORIGIN="$SET_SHARED_LIBRARY_ORIGIN"
+ CFLAGS_JDK="${CFLAGS_JDK} -D__solaris__"
+ CXXFLAGS_JDK="${CXXFLAGS_JDK} -D__solaris__"
+ POST_STRIP_CMD="$STRIP -x"
+ fi
else
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
# If it is not gcc, then assume it is the Oracle Solaris Studio Compiler
@@ -29531,6 +29539,13 @@ rm -f core conftest.err conftest.$ac_obj
C_O_FLAG_NORM="-Os"
C_O_FLAG_NONE=""
;;
+ solaris )
+ # libverify currently crashes in 32-bit builds with
+ # alignment faults, temporary workaround with -O2
+ C_O_FLAG_HI="-O2"
+ C_O_FLAG_NORM="-O2"
+ C_O_FLAG_NONE="-O0"
+ ;;
*)
C_O_FLAG_HI="-O3"
C_O_FLAG_NORM="-O2"
@@ -29732,7 +29747,7 @@ fi
#
case $COMPILER_NAME in
gcc )
- CCXXFLAGS_JDK="$CCXXFLAGS $CCXXFLAGS_JDK -W -Wall -Wno-unused -Wno-parentheses -Wno-sign-compare \
+ CCXXFLAGS_JDK="$CCXXFLAGS $CCXXFLAGS_JDK -W -Wall -Wno-unused -Wno-parentheses -Wno-sign-compare -Wno-unused-parameter \
-pipe \
-D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE"
case $OPENJDK_TARGET_CPU_ARCH in
@@ -30343,7 +30358,8 @@ $as_echo "alsa pulse x11" >&6; }
if test "x$OPENJDK_TARGET_OS" = xbsd; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking what is not needed on BSD?" >&5
$as_echo_n "checking what is not needed on BSD?... " >&6; }
- if test "x$OPENJDK_TARGET_OS_VENDOR" = xopenbsd; then
+ if test "x$OPENJDK_TARGET_OS_VENDOR" = xopenbsd -o "x$OPENJDK_TARGET_OS_VENDOR" = xnetbsd -o "x$OPENJDK_TARGET_OS_VENDOR" = xfreebsd; then
+# XXX revisit this
ALSA_NOT_NEEDED=yes
PULSE_NOT_NEEDED=yes
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: alsa pulse" >&5
@@ -31555,7 +31571,11 @@ $as_echo "$as_me: WARNING: freetype not
# Allow --with-freetype-lib and --with-freetype-include to override
if test "x$with_freetype_include" != x; then
- POTENTIAL_FREETYPE_INCLUDE_PATH="$with_freetype_include"
+ POTENTIAL_FREETYPE_INCLUDE_PATH="$with_freetype_include"
+ # deal w/ freetype2 in new location
+ if test -f "$with_freetype_include"/freetype2/ft2build.h; then
+ POTENTIAL_FREETYPE_INCLUDE_PATH="$with_freetype_include"/freetype2
+ fi
fi
if test "x$with_freetype_lib" != x; then
POTENTIAL_FREETYPE_LIB_PATH="$with_freetype_lib"
@@ -34373,7 +34393,7 @@ $as_echo "$as_me: The path of FREETYPE_I
FREETYPE_INCLUDE_PATH="`cd "$path"; $THEPWDCMD -L`"
fi
- if test -d $FREETYPE_INCLUDE_PATH/freetype2/freetype; then
+ if test -d $FREETYPE_INCLUDE_PATH/freetype2; then
FREETYPE_CFLAGS="-I$FREETYPE_INCLUDE_PATH/freetype2 -I$FREETYPE_INCLUDE_PATH"
else
FREETYPE_CFLAGS="-I$FREETYPE_INCLUDE_PATH"
@@ -34506,7 +34526,7 @@ $as_echo "$as_me: The path of FREETYPE_L
if test "x$OPENJDK_TARGET_OS" = xwindows; then
FREETYPE_LIBS="$FREETYPE_LIB_PATH/freetype.lib"
else
- FREETYPE_LIBS="-L$FREETYPE_LIB_PATH -lfreetype"
+ FREETYPE_LIBS="-Xlinker -R$FREETYPE_LIB_PATH -L$FREETYPE_LIB_PATH -lfreetype"
fi
fi
@@ -35732,9 +35752,6 @@ fi
- if test "x$LLVM_CONFIG" != xllvm-config; then
- as_fn_error $? "llvm-config not found in $PATH." "$LINENO" 5
- fi
llvm_components="jit mcjit engine nativecodegen native"
unset LLVM_CFLAGS
@@ -35777,7 +35794,7 @@ fi
fi
# libCrun is the c++ runtime-library with SunStudio (roughly the equivalent of gcc's libstdc++.so)
- if test "x$OPENJDK_TARGET_OS" = xsolaris && test "x$LIBCXX" = x; then
+ if test "x$OPENJDK_TARGET_OS" = xsolaris && test "x$LIBCXX" = x && test "x$GCC" = "xno"; then
LIBCXX="/usr/lib${OPENJDK_TARGET_CPU_ISADIR}/libCrun.so.1"
fi
| true
|
05685822a3e8dc1960627d875930e9d72a29df25
|
Shell
|
rbarzic/MAGICAL
|
/test/run.sh
|
UTF-8
| 2,667
| 3.109375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
##
# @file run.sh
# @author Yibo Lin
# @date Dec 2018
#
#!/bin/bash
# get script directory instead of execution directory
TOP_SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
if [ -e "${TOP_SCRIPT_DIR}/color.sh" ]; then
source ${TOP_SCRIPT_DIR}/color.sh
fi
if [ ! -n "$1" ]
then
echo -e "${STATUS_PREFIX_COLOR}Usage:${NC} `basename $0` circuit techfile simple_techfile spacing_rule width_area_rule enclosure_rule well_contact_GDSII lef_file"
return
fi
TOP_CIRCUIT=$1
TOP_CIRCUIT_NAME=$(basename ${TOP_CIRCUIT})
TOP_TECHFILE=$2
TOP_SIMPLE_TECHFILE=$3
TOP_SPACING_RULE_FILE=$4
TOP_WIDTH_AREA_RULE_FILE=$5
TOP_ENCLOSURE_RULE_FILE=$6
TOP_WELL_CON_GDS_FILE=$7
TOP_LEF_FILE=$8
TOP_RESULT_DIR=results
echo "CIRCUIT = ${TOP_CIRCUIT}"
echo "CIRCUIT_NAME = ${TOP_CIRCUIT_NAME}"
echo "TOP_SCRIPT_DIR = ${TOP_SCRIPT_DIR}"
# run device generation and constraint generation
echo -e "${STATUS_PREFIX_COLOR}${TOP_CIRCUIT_NAME}:${NC} constraint generation"
source ${TOP_SCRIPT_DIR}/../constraint_generation/test/run.sh ${TOP_RESULT_DIR}/${TOP_CIRCUIT}/${TOP_CIRCUIT_NAME}.sp
## run device generation
#echo -e "${STATUS_PREFIX_COLOR}${TOP_CIRCUIT_NAME}:${NC} device generation"
##source ${TOP_SCRIPT_DIR}/../placement/device_generation/test/run.sh ${TOP_CIRCUIT}
# run analog placement
echo -e "${STATUS_PREFIX_COLOR}${TOP_CIRCUIT_NAME}:${NC} analog placement"
source ${TOP_SCRIPT_DIR}/../placement/idea_place/test/run.sh \
${TOP_RESULT_DIR}/${TOP_CIRCUIT} \
${TOP_SIMPLE_TECHFILE} \
${TOP_SPACING_RULE_FILE} \
${TOP_WIDTH_AREA_RULE_FILE}
# run well generation
echo -e "${STATUS_PREFIX_COLOR}${TOP_CIRCUIT_NAME}:${NC} well generation"
source ${TOP_SCRIPT_DIR}/../placement/well_generation/test/run.sh \
${TOP_RESULT_DIR}/${TOP_CIRCUIT} \
${TOP_SIMPLE_TECHFILE} \
${TOP_SPACING_RULE_FILE} \
${TOP_ENCLOSURE_RULE_FILE} \
${TOP_WELL_CON_GDS_FILE} \
${TOP_RESULT_DIR}/${TOP_CIRCUIT}/result_legal_detail.txt
# run analog routing
echo -e "${STATUS_PREFIX_COLOR}${TOP_CIRCUIT_NAME}:${NC} analog routing"
source ${TOP_SCRIPT_DIR}/../routing/test/run.sh \
${TOP_RESULT_DIR}/${TOP_CIRCUIT_NAME}/${TOP_CIRCUIT_NAME}.wellgen.gds \
${TEMP_RESULT_DIR}/${TOP_CIRCUIT_NAME}/${TOP_CIRCUIT_NAME}.result.final \
${TOP_RESULT_DIR}/${TOP_CIRCUIT_NAME}/${TOP_CIRCUIT_NAME}.pin \
${TOP_RESULT_DIR}/${TOP_CIRCUIT_NAME}/${TOP_CIRCUIT_NAME}.wcon \
${TOP_RESULT_DIR}/${TOP_CIRCUIT_NAME}/${TOP_CIRCUIT_NAME}.sub \
${TOP_RESULT_DIR}/${TOP_CIRCUIT_NAME}/${TOP_CIRCUIT_NAME}.iopin \
${TOP_RESULT_DIR}/${TOP_CIRCUIT_NAME}/${TOP_CIRCUIT_NAME}.symnet \
${TOP_LEF_FILE} \
${TOP_TECHFILE}
| true
|
83630b369882d1d8f50b24438f0aebd0f8daeb36
|
Shell
|
bioconda/bioconda-recipes
|
/recipes/mmult/build.sh
|
UTF-8
| 301
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
export CFLAGS="-I$PREFIX/include -I$PREFIX/include/eigen3"
export LDFLAGS="-L$PREFIX/lib"
export CPATH=${PREFIX}/include
cd "$SRC_DIR"
autoreconf -i
./configure CPPFLAGS="-I${PREFIX}/include -I${PREFIX}/include/eigen3" LDFLAGS="-L${PREFIX}/lib" --prefix=$PREFIX
make
make install -C src
| true
|
5745e5acbac8ae0b68bd693a4f3b87df5f0d358a
|
Shell
|
gummi-stack/gummi-git-hook
|
/pre-receive
|
UTF-8
| 2,897
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
# flush STDIN coming from git; we have no use for that info in this hook but
# if you don't do this, git-shell sometimes dies of a signal 13 (SIGPIPE)
#[ -t 0 ] || cat >/dev/null
PATH=/usb/bin/:$PATH
APIURL="http://192.168.13.7:9001"
APIKEY="cM7I84LFT9s29u0tnxrvZaMze677ZE60" #test only
set -e
status() {
echo "-----> $*"
}
indent() {
c='s/^/ /'
case $(uname) in
Darwin) sed -l "$c";; # mac/bsd sed: -l buffers on line boundaries
*) sed -u "$c";; # unix/gnu sed: -u unbuffered (arbitrary) chunks of data
esac
}
suppress () {
c="s/^/`echo $'\e[1G'`/"
case $(uname) in
# Darwin) sed -e "s/^/\x1b[1g/";; # mac/bsd sed: -l buffers on line boundaries
Darwin) sed -l "$c";; # mac/bsd sed: -l buffers on line boundaries
*) sed -u "$c";; # unix/gnu sed: -u unbuffered (arbitrary) chunks of data
esac
}
echo | suppress
while read oldrev newrev refname
do
# Do something with $oldrev $newrev $refname
REF=$refname
NEWREV=$newrev
done
# GUMMI=`git ls-tree $NEWREV --name-only | grep .gummi`
# if [ "$GUMMI" = "" ]; then
# exit 0
# fi
BRANCH=$(echo $REF | grep "refs/heads" | sed 's/refs\/heads\///g')
if [ "$BRANCH" = "" ]; then
print "Couldn't determine branch. Ignoring."
exit 0;
fi
#REPO_NAME=$(pwd | awk -F"/" '{ print $NF }')
REPO_NAME=$(pwd | awk -F"/" '{ print $(NF-1),":",$NF }' | tr -d ' ' )
REPO_NAME=$(pwd | awk -F"/" '{print $NF}')
#git config -l
#pwd
#echo $REPO_NAME
#exit 1
status "Update pushed to branch: $BRANCH" | suppress
if [ "$(git ls-tree $NEWREV --name-only .gitmodules)" != "" ]; then
echo | suppress
echo " ! Git submodules are notsupported " | suppress
echo "" | suppress
exit 1
fi
# git archive --format tar.gz $NEWREV | ssh -o StrictHostKeyChecking=no -i ~/.ssh/id_dsa cdn@10.1.69.105 "cat - > git/$REPO_NAME-$BRANCH-$NEWREV.tar.gz"
# print " > Repository exported"
# echo " > Call api build: " curl -N -u :cM7I84LFT9s29u0tnxrvZaMze677ZE60 -s "http://api.nibbler.cz/git/$REPO_NAME/$BRANCH/$NEWREV"
echo "Reponame" $REPO_NAME | indent | suppress
echo "Reponame" $BRANCH | indent | suppress
echo "Reponame" $NEWREV | indent | suppress
status "Calling build api" | suppress
IFS=$'\n'
curl -s -N -u :$APIKEY -s "$APIURL/git/$REPO_NAME/$BRANCH/$NEWREV" 2>&1 | \
while read i
do
WAS_RESULT=1
if [ "$i" = "94ed473f82c3d1791899c7a732fc8fd0_exit_0" ]; then
export TERM="xterm"
tput setaf 2
echo -n " > Success" | suppress
tput sgr0
echo -e "\n" | suppress
exit 5
fi
# TODO predelat na regular
if [ "$i" = "94ed473f82c3d1791899c7a732fc8fd0_exit_1" ]; then
export TERM="xterm"
tput setaf 1
echo -n " > Build failed! " | suppress
tput sgr0
echo -e "\n" | suppress
echo | suppress
exit 1
fi
echo " $i" | suppress | indent
done
if [ "$?" = "5" ]; then
echo | suppress
exit 0
fi
export TERM="xterm"
tput setaf 1
status "Invalid api reposne " | suppress
tput sgr0
echo | suppress
exit 1
| true
|
2e7b037222b1b69828178b84d38e8c48765cabd0
|
Shell
|
jdotw/Lithium
|
/util/checkin_all.sh
|
UTF-8
| 286
| 3.296875
| 3
|
[] |
no_license
|
#! /bin/sh
echo "=============================================================="
for i in *
do
if test -d $i; then
cd $i
echo "Checking in $i (Message: $1)"
svn commit -m "$1"
echo "=============================================================="
cd ..
fi
done
| true
|
6c001aa3ce5b198492823a30142b490231fbe98e
|
Shell
|
andtherand/dotfiles
|
/system/.function_network
|
UTF-8
| 717
| 3.734375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Webserver
srv() {
local DIR=${1:-.}
local AVAILABLE_PORT=$(get-port)
local PORT=${2:-$AVAILABLE_PORT}
if [ "$PORT" -le "1024" ]; then
sudo -v
fi
open "http://localhost:$PORT"
superstatic "$DIR" -p "$PORT"
}
# Get IP from hostname
hostname2ip() {
ping -c 1 "$1" | egrep -m1 -o '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'
}
# Upload file to transfer.sh
# https://github.com/dutchcoders/transfer.sh/
transfer() {
tmpfile=$( mktemp -t transferXXX )
curl --progress-bar --upload-file "$1" https://transfer.sh/$(basename $1) >> $tmpfile;
cat $tmpfile;
rm -f $tmpfile;
}
# Find real from shortened url
unshorten() {
curl -sIL $1 | sed -n 's/Location: *//p'
}
| true
|
88bcca3085b6391c2fccb613862aa7bd49d2fc31
|
Shell
|
BigNerd95/Grandstream-Firmware-HT802
|
/FirmwareDumps/HT802-1.0.10.6/etc/rc.common
|
UTF-8
| 657
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh
NFS=0
if [ "`grep \"\/dev\/root\" /etc/mtab | grep nfs`" ]; then
NFS=1
fi
FACTORY_MODE=0
DEV_MAC="`cat /proc/gxp/dev_info/dev_mac 2>/dev/null|tr '[A-Z]' '[a-z]' |tr -d ':'|cut -c7-12`"
if [ "${DEV_MAC}" = "000000" ]; then
# :factorymac enables the ate built into the aplication
#nvram set :factorymac=1
FACTORY_MODE=1
fi
#####################################
case "${1}" in
boot)
boot
;;
start)
start
;;
stop)
stop
;;
restart)
restart
;;
*)
echo $"Usage ${0} {boot|start|stop|restart}"
exit 1
esac
exit $?
| true
|
836b8e88d6cde0602029177ac0f7600338cbc31d
|
Shell
|
Keyang/openadv
|
/Infrastructure/bin/waitPodReady.sh
|
UTF-8
| 282
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Wait Pod with name ${1} in Project ${2}"
sleep 5
while : ; do
echo "Checking if ${1} is Ready..."
oc get pod -n $2 | grep $1 | grep -v build | grep -v deploy |grep "1/1.*Running"
[[ "$?" == "1" ]] || break
echo "...no. Sleeping 10 seconds."
sleep 10
done
| true
|
56279dc135c75772624031623adae259f618e076
|
Shell
|
jht0664/Utility_python_gromacs
|
/python/dnum-avg-lj.sh
|
UTF-8
| 728
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# make average 1d-number profile from .dnums file
# $1 : begin frame
# $2 : tolerance for block avg
i1="a"
i2="b"
inputgen=".dnums.align"
avggen=".dnums"
input1=$i1$inputgen
avg1=$i1$avggen
output1=$i1$outputgen
input2=$i2$inputgen
output2=$i2$outputgen
avg2=$i2$avggen
if [ -f "$input1" ]
then
echo "$input1 found."
else
echo "$input1 not found."
exit 1
fi
if [ -f "$input2" ]
then
echo "$input2 found."
else
echo "$input2 not found."
exit 1
fi
## extract trajectory
python3 ~/Utility/python/dnum-avg.py -i $input1 -avg $avg1 -b $1 -tol $2 | tee $il1.dnum-avg.log
python3 ~/Utility/python/dnum-avg.py -i $input2 -avg $avg2 -b $1 -tol $2 | tee $il2.dnum-avg.log
| true
|
fc49de1e6ffbf3f4a2897af747ea8a854f6f5a35
|
Shell
|
Chromico/zip-uitility-GCI
|
/zip-utility.sh
|
UTF-8
| 933
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
clear
figlet ZIP-UTILITY
echo "Version 1.0"
echo "Press Enter to continue or Press CTRL-C to exit"
read al
echo 'Installing Requirements....'
echo .
echo .
apt install python3
apt install python3-pip
echo Requirements Installed....
echo Press Enter To Continue...
read upd
clear
figlet ZIP-UTILITY
echo "Version 1.0"
echo -e "\e[4;31m This tool should not be used for any malicious purposes. Use this tool at your own risk. \e[0m"
echo "Press 1 To Read Zip file "
echo "Press 2 To Extract Zip file "
echo "Press 3 To Brute Force Password protected Zip file"
echo "Press 4 To EXIT "
read ch
if [ $ch -eq 1 ];then
clear
echo -e "\e[1;32m"
#rm *.xxx >/dev/null 2>&1
python3 zip-read.py
#rm *.xxx >/dev/null 2>&1
exit 0
elif [ $ch -eq 2 ];then
clear
python3 zip-extract.py
exit 0
elif [ $ch -eq 3 ];then
clear
python3 zip-crack.py
exit 0
elif [ $ch -eq 4 ];then
clear
echo -e "\e[1;31m"
figlet ZIP-UTILITY
exit 0
fi
done
| true
|
fb7b43484dd0d2f13c9c965d06dedffc09cb24b0
|
Shell
|
dunnkers/roosters-api
|
/.openshift/cron/daily/update
|
UTF-8
| 166
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ $(date +%u) -eq 6 ]]; then
echo "`date +%A`. Updating...";
/usr/bin/node $OPENSHIFT_REPO_DIR/update.js
else
echo "`date +%A`. Not updating.";
fi
| true
|
f0c49448b91a0e7ec997e19627e8554efeea4d66
|
Shell
|
pfernandez/Stardisks
|
/public_html/cgi-bin/getKojiValues.bk
|
UTF-8
| 14,471
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Returns values based on input arguments. The first argument is the absolute
# /path/to/folder of the pertinient data, the second is the name of a function below.
#
# NOTE: not all of the functions below have been tested to work, because all of the
# references to the 'fact' and 'polyout' file were changed to 'fort.50' for use with
# the koji disk models. Before using, confirm that the value exist in fort.50 and that
# its columns are the same as in the function.
max=$3
gvUpStp=$4 # Step at which to change Y2 convergence test value.
d1=$5 # Allowed difference between Y2 values before gvUpStep.
d2=$6 # Allowed difference between Y2 values at or after gvUpStep.
function notDone
{ # Returns 1 if a run needs to be started or restarted, otherwise returns empty.
# Conditions are: if no growth rate convergence and less than "max" steps
# (set below,) or if fort.50 not present. Set neverRestart to false to enable.
neverRestart=true
[ ! $max ] && max=200000
if [ "$neverRestart" != "true" ]; then
if [ -f fort.50 ]; then
if [ "$(Y2)" = "0" ] && [ $(stepsCompleted) -lt $max ]; then
notDone=1
fi
else
notDone=1
fi
fi
echo $notDone
}
function stepsCompleted
{
if [ -f fort.23a ]; then
steps=$(cat fort.23a | wc -l)
elif [ -f fort.23 ]; then
steps=$(cat fort.23 | wc -l)
else
steps=-1
fi
echo $steps
}
function isRunning
{
[ $(qstat -f | grep Job_Name | cut -c 16- | grep `basename $(pwd)`) ] && echo true;
}
function jVec
{
for folder in `ls`; do
if [ -f $folder/fort.50 ]; then
j=${folder##*j}; jVec="$jVec $j"
fi
done
echo $jVec | sed 's/ /\n/g' | sort -gu
}
function MVec
{
for folder in `ls`; do
if [ -f $folder/fort.50 ]; then
M=${folder%%j*}; M=${M##*M}; MVec="$MVec $M"
fi
done
echo $MVec | sed 's/ /\n/g' | sort -gu
}
function mVec
{
for folder in `ls`; do
if [ -f $folder/fort.50 ]; then
m=${folder%%M*}; m=${m##*m}; mVec="$mVec $m"
fi
done
echo $mVec | sed 's/ /\n/g' | sort -gu
}
function qVec
{
for folder in `ls`; do
if [ -f $folder/fort.50 ]; then
q=${folder%%m*}; q=${q##*q}; qVec="$qVec $q"
fi
done
echo $qVec | sed 's/ /\n/g' | sort -gu
}
function nVec
{
for folder in `ls`; do
if [ -f $folder/fort.50 ]; then
n=${folder%%q*}; n=${n##*n}; nVec="$nVec $n"
fi
done
echo $nVec | sed 's/ /\n/g' | sort -gu
}
function jmax
{
jmax=`grep -w jmax fort.50 | cut -c 22-25`
echo $jmax
}
function jtotp
{
exp=`grep -w jtotp fort.50 | cut -c 29-31`
base=`grep -w jtotp fort.50 | cut -c 18-27`
jtotp=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $jtotp
}
function omegaMax
{
exp=`grep -w omega\ max fort.50 | cut -c 29-32`
base=`grep -w omega\ max fort.50 | cut -c 18-27`
omegaMax=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $omegaMax
}
function etot
{
exp=`grep -w etot fort.50 | cut -c 29-32`
base=`grep -w etot fort.50 | cut -c 18-27`
etot=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $etot
}
function tjeans
{
exp=`grep -w tjeans: fort.50 | cut -c 24-27`
base=`grep -w tjeans: fort.50 | cut -c 16-22`
tjeans=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $tjeans
}
function tsound
{
exp=`grep -w tsound: fort.50 | cut -c 24-27`
base=`grep -w tsound: fort.50 | cut -c 16-22`
tsound=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $tsound
}
function starMass
{
exp=`grep -w star/disk: fort.50 | cut -c 24-26`
base=`grep -w star/disk: fort.50 | cut -c 16-22`
starMass=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $starMass
}
function TW
{
exp=`grep -w t/\|w\| fort.50 | cut -c 36-38`
base=`grep -w t/\|w\| fort.50 | cut -c 28-34`
TW=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $TW
}
function rInOut
{
exp=`grep -w r-/r+: fort.50 | cut -c 24-26`
base=`grep -w r-/r+: fort.50 | cut -c 16-22`
rInOut=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $rInOut
}
function rPlusR0
{
exp=`grep -w r+/ro: fort.50 | cut -c 24-26`
base=`grep -w r+/ro: fort.50 | cut -c 16-22`
rPlusR0=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $rPlusR0
}
function rMinusR0
{
exp=`grep -w r-/ro: fort.50 | cut -c 24-26`
base=`grep -w r-/ro: fort.50 | cut -c 16-22`
rMinusR0=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $rMinusR0
}
function rhomax
{
exp=`grep -w rho\(max\): torus.out | cut -c 45-47`
base=`grep -w rho\(max\): torus.out | cut -c 37-43`
rhomax=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $rhomax
}
function qMinusR0
{
exp=`grep -w Q-/r0: fort.50 | cut -c 25-27`
base=`grep -w Q-/r0: fort.50 | cut -c 17-23`
qMinusR0=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $qMinusR0
}
function qPlusR0
{
exp=`grep -w Q+/r0: fort.50 | cut -c 28-30`
base=`grep -w Q+/r0: fort.50 | cut -c 20-26`
qPlusR0=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $qPlusR0
}
function rLambdaR0
{
exp=`grep -w rvortmax/r0: fort.50 | cut -c 28-30`
base=`grep -w rvortmax/r0: fort.50 | cut -c 20-26`
rLambdaR0=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $rLambdaR0
}
function MIRP
{
exp=`grep -w MIRP: fort.50 | cut -c 24-26`
base=`grep -w MIRP: fort.50 | cut -c 16-22`
MIRP=`echo "scale=8; ($base/1)*10^(0$exp)" | bc`
echo $MIRP
}
function eta
{
exp=`grep -w eta: fort.50 | cut -c 24-26`
base=`grep -w eta: fort.50 | cut -c 16-22`
eta=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $eta
}
function p
{
#exp=`grep -w p: fort.50 | cut -c 24-26`
#base=`grep -w p: fort.50 | cut -c 16-22`
#p=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
#echo $p
# Above pulls incorrectly calculated value from fort.50. Instead:
exp=`grep -w omegazero: fort.50 | cut -c 28-30`
base=`grep -w omegazero: fort.50 | cut -c 20-26`
omegazero=`echo "scale=10; ($base/1)*10^(0$exp)" | bc`
exp=`grep -w rhomax: fort.50 | cut -c 24-26`
base=`grep -w rhomax: fort.50 | cut -c 16-22`
rhomax=`echo "scale=10; ($base/1)*10^(0$exp)" | bc`
echo "scale=4; sqrt(4*3.1415926536*${rhomax}/${omegazero}^2)" | bc -l
}
function tauzero
{
exp=`grep -w tauzero: fort.50 | cut -c 24-26`
base=`grep -w tauzero: fort.50 | cut -c 16-22`
tauzero=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $tauzero
}
function omegazero
{
exp=`grep -w omegazero: fort.50 | cut -c 28-30`
base=`grep -w omegazero: fort.50 | cut -c 20-26`
omegazero=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $omegazero
}
function jeansfreq
{
exp=`grep -w jeans\ freq: fort.50 | cut -c 28-30`
base=`grep -w jeans\ freq: fort.50 | cut -c 20-26`
jeansfreq=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $jeansfreq
}
function cfreqzero
{
exp=`grep -w c\ freq\ zero: fort.50 | cut -c 28-30`
base=`grep -w c\ freq\ zero: fort.50 | cut -c 20-26`
cfreqzero=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $cfreqzero
}
function keplerfreq
{
exp=`grep -w kepler\ freq: fort.50 | cut -c 28-30`
base=`grep -w kepler\ freq: fort.50 | cut -c 20-26`
keplerfreq=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $keplerfreq
}
function virialError
{
exp=`grep -w virial\ error fort.50 | cut -c 36-39`
base=`grep -w virial\ error fort.50 | cut -c 28-34`
virialError=`echo "scale=8; ($base/1)*10^(0$exp)" | bc`
echo $virialError
}
function mass
{
exp=`grep -w mass: fort.50 | cut -c 18-21`
base=`grep -w mass: fort.50 | cut -c 10-16`
mass=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $mass
}
function oneMinusRInR0
{
exp=`grep -w 1\ -\ r-/r0: fort.50 | cut -c 26-29`
base=`grep -w 1\ -\ r-/r0: fort.50 | cut -c 18-24`
oneMinusRInR0=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $oneMinusRInR0
}
function r0
{
exp=`grep -w \ \ \ \ \ r0: fort.50 | cut -c 21-24`
base=`grep -w \ \ \ \ \ r0: fort.50 | cut -c 13-19`
r0=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
echo $r0
}
function checkConvergence
{ # Y2 is the mode growth rate, and is used to determine whether frequency-based
# values are valid and whether to restart upon completion.
# check for inputs or set defaults
[ ! $gvUpStp ] && gvUpStp=100000
[ ! $d1 ] && d1=.001
[ ! $d2 ] && d2=.01
if [ $(stepsCompleted) -lt $gvUpStp ]; then d=$d1
else d=$d2; fi
# screen for NaNs, convert to testable values
test1=`grep -w y2_1: fort.50 | cut -c 24-26`
if [ "$test1" = "NaN" ]; then n1=-999999
else
exp1=$test1
base1=`grep -w y2_1: fort.50 | cut -c 16-22`
yn1=`echo "scale=10; ($base1/1)*10^(0$exp1)" | bc`
fi
test2=`grep -w y2_2: fort.50 | cut -c 24-26`
if [ "$test2" = "NaN" ]; then n2=-888888
else
exp2=$test2
base2=`grep -w y2_2: fort.50 | cut -c 16-22`
yn2=`echo "scale=10; ($base2/1)*10^(0$exp2)" | bc`
fi
test3=`grep -w y2_3: fort.50 | cut -c 24-26`
if [ "$test3" = "NaN" ]; then n3=-777777
else
exp3=$test3
base3=`grep -w y2_3: fort.50 | cut -c 16-22`
yn3=`echo "scale=10; ($base3/1)*10^(0$exp3)" | bc`
fi
# calculate differences between y2 values
y2_12diff=$(echo "scale=10; sqrt(($yn1-($yn2))^2)" | bc);
y2_13diff=$(echo "scale=10; sqrt(($yn1-($yn3))^2)" | bc);
y2_23diff=$(echo "scale=10; sqrt(($yn2-($yn3))^2)" | bc);
# find smallest two differences and average them
if [ "$(echo "$y2_12diff < $y2_13diff" | bc)" = "1" ]; then
diff2=$y2_12diff
growthRate=`echo "scale=4; ($yn1 + $yn2)/2" | bc`
radius1=true
radius2=true
radius3=$NULL
else
diff2=$y2_13diff
growthRate=`echo "scale=4; ($yn1 + $yn3)/2" | bc`
radius1=true
radius2=$NULL
radius3=true
fi
if [ "$(echo "$y2_23diff < $diff2" | bc)" = "1" ]; then
diff2=$y2_23diff
growthRate=`echo "scale=4; ($yn2 + $yn3)/2" | bc`
radius1=$NULL
radius2=true
radius3=true
fi
# test if average is less than specified requirement
if [ "$(echo "$diff2 < $d" | bc)" != "1" ]; then
growthRate=0
stable=true
fi
}
function Y2
{
checkConvergence
echo $growthRate
}
function Y1
{
checkConvergence
if [ $stable ]; then avg="NaN"
else
# get values from correct radii
if [ $radius1 ]; then
exp=`grep -w y1_1: fort.50 | cut -c 24-26`
base=`grep -w y1_1: fort.50 | cut -c 16-22`
n1=`echo "scale=10; ($base/1)*10^(0$exp)" | bc`
fi
if [ $radius2 ]; then
exp=`grep -w y1_2: fort.50 | cut -c 24-26`
base=`grep -w y1_2: fort.50 | cut -c 16-22`
n2=`echo "scale=10; ($base/1)*10^(0$exp)" | bc`
fi
if [ $radius3 ]; then
exp=`grep -w y1_3: fort.50 | cut -c 24-26`
base=`grep -w y1_3: fort.50 | cut -c 16-22`
n3=`echo "scale=10; ($base/1)*10^(0$exp)" | bc`
fi
# average them
if [ $radius1 ] && [ $radius2 ]; then
avg=`echo "scale=4; ($n1 + $n2)/2" | bc`
elif [ $radius1 ] && [ $radius3 ]; then
avg=`echo "scale=4; ($n1 + $n3)/2" | bc`
elif [ $radius2 ] && [ $radius3 ]; then
avg=`echo "scale=4; ($n2 + $n3)/2" | bc`
fi
fi
echo $avg
}
function RcoR0
{
checkConvergence
if [ $stable ]; then avg="NaN"
else
# get values from correct radii
if [ $radius1 ]; then
exp=`grep -w Rco/R01: fort.50 | cut -c 26-28`
base=`grep -w Rco/R01: fort.50 | cut -c 18-24`
n1=`echo "scale=10; ($base/1)*10^(0$exp)" | bc`
fi
if [ $radius2 ]; then
exp=`grep -w Rco/R02: fort.50 | cut -c 26-28`
base=`grep -w Rco/R02: fort.50 | cut -c 18-24`
n2=`echo "scale=10; ($base/1)*10^(0$exp)" | bc`
fi
if [ $radius3 ]; then
exp=`grep -w Rco/R03: fort.50 | cut -c 26-28`
base=`grep -w Rco/R03: fort.50 | cut -c 18-24`
n3=`echo "scale=10; ($base/1)*10^(0$exp)" | bc`
fi
# average them
if [ $radius1 ] && [ $radius2 ]; then
avg=`echo "scale=4; ($n1 + $n2)/2" | bc`
elif [ $radius1 ] && [ $radius3 ]; then
avg=`echo "scale=4; ($n1 + $n3)/2" | bc`
elif [ $radius2 ] && [ $radius3 ]; then
avg=`echo "scale=4; ($n2 + $n3)/2" | bc`
fi
fi
echo $avg
}
function rPhiR0
{
if [ "$(Y2)" = "0" ]; then rPhiR0="NaN"; else
exp=`grep -w drho\ min/ro: fort.50 | cut -c 29-31`
base=`grep -w drho\ min/ro: fort.50 | cut -c 20-27`
rPhiR0=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
fi
echo $rPhiR0
}
function rGammaR0
{
if [ "$(Y2)" = "0" ]; then rGammaR0="NaN"; else
exp=`grep -w R\ torque1: fort.50 | cut -c 27-29`
base=`grep -w R\ torque1: fort.50 | cut -c 19-25`
rGammaR0=`echo "scale=4; ($base/1)*10^(0$exp)" | bc`
fi
echo $rGammaR0
}
function RilrR0
{
checkConvergence
if [ $stable ]; then avg="NaN"
else
# get values from correct radii
if [ $radius1 ]; then
exp=`grep -w lindin1: fort.50 | cut -c 24-26`
base=`grep -w lindin1: fort.50 | cut -c 16-22`
n1=`echo "scale=10; ($base/1)*10^(0$exp)" | bc`
fi
if [ $radius2 ]; then
exp=`grep -w lindin2: fort.50 | cut -c 24-26`
base=`grep -w lindin2: fort.50 | cut -c 16-22`
n2=`echo "scale=10; ($base/1)*10^(0$exp)" | bc`
fi
if [ $radius3 ]; then
exp=`grep -w lindin3: fort.50 | cut -c 24-26`
base=`grep -w lindin3: fort.50 | cut -c 16-22`
n3=`echo "scale=10; ($base/1)*10^(0$exp)" | bc`
fi
# average them
if [ $radius1 ] && [ $radius2 ]; then
avg=`echo "scale=4; ($n1 + $n2)/2" | bc`
elif [ $radius1 ] && [ $radius3 ]; then
avg=`echo "scale=4; ($n1 + $n3)/2" | bc`
elif [ $radius2 ] && [ $radius3 ]; then
avg=`echo "scale=4; ($n2 + $n3)/2" | bc`
fi
fi
echo $avg
}
function mTorqueMd
{ # returns first occurence in file
if [ "$(Y2)" = "0" ]; then mTorqueMd="NaN"; else
exp=`grep -m 1 M\ torque/M fort.50 | cut -c 29-31`
base=`grep -m 1 M\ torque/M fort.50 | cut -c 21-27`
mTorqueMd=`echo "scale=4; ($base/1*1)*10^(0$exp)" | bc`
fi
echo $mTorqueMd
}
function jTorqueJ1
{
if [ "$(Y2)" = "0" ]; then jTorqueJ1="NaN"; else
exp=`grep -w Jtorque/J1: fort.50 | cut -c 27-29`
base=`grep -w Jtorque/J1: fort.50 | cut -c 19-25`
jTorqueJ1=`echo "scale=4; ($base/1*1)*10^(0$exp)" | bc`
fi
echo $jTorqueJ1
}
function lindoutAvg
{
checkConvergence
if [ $stable ]; then avg="NaN"
else
# get values from correct radii
if [ $radius1 ]; then
exp=`grep -w lindout1: fort.50 | cut -c 27-29`
base=`grep -w lindout1: fort.50 | cut -c 19-25`
n1=`echo "scale=10; ($base/1)*10^(0$exp)" | bc`
fi
if [ $radius2 ]; then
exp=`grep -w lindout2: fort.50 | cut -c 27-29`
base=`grep -w lindout2: fort.50 | cut -c 19-25`
n2=`echo "scale=10; ($base/1)*10^(0$exp)" | bc`
fi
if [ $radius3 ]; then
exp=`grep -w lindout3: fort.50 | cut -c 27-29`
base=`grep -w lindout3: fort.50 | cut -c 19-25`
n3=`echo "scale=10; ($base/1)*10^(0$exp)" | bc`
fi
# average them
if [ $radius1 ] && [ $radius2 ]; then
avg=`echo "scale=4; ($n1 + $n2)/2" | bc`
elif [ $radius1 ] && [ $radius3 ]; then
avg=`echo "scale=4; ($n1 + $n3)/2" | bc`
elif [ $radius2 ] && [ $radius3 ]; then
avg=`echo "scale=4; ($n2 + $n3)/2" | bc`
fi
fi
echo $avg
}
cd $1
echo `$2`
| true
|
3d90cb3831cf9278cd069710795e4a3543e06d61
|
Shell
|
trodemaster/timelapse-tools
|
/archive_webcam
|
UTF-8
| 1,386
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#set -euo pipefail
#IFS=$'\n\t'
#shopt -s nullglob
#shopt -s nocaseglob
dl_verify_jpeg () {
curl -s $1 -o $2
until [[ $(/usr/bin/jpeginfo -c $2 | grep -e OK) ]]; do
if [[ $RETRY_COUNT -eq 5 ]]; then
echo "Failed to download after 5 tries..."
fi
echo bad jpeg found $2 attempting redownload
rm $2
sleep 5
let RETRY_COUNT=RETRY_COUNT+1
curl -s $1 -o $2
done
}
dl_verify_jpeg http://common.snow.com/mtncams/Cowboy%20Mountain.jpg /zen/images/StevensPass20-21/CowboyMountain/$(date "+%y%m%d-%H%M").jpg
dl_verify_jpeg http://common.snow.com/mtncams/Skyline.jpg /zen/images/StevensPass20-21/UpperSkylineCamera/$(date "+%y%m%d-%H%M").jpg
dl_verify_jpeg http://common.snow.com/mtncams/Glacier%20Peak.jpg /zen/images/StevensPass20-21/GlacierPeak/$(date "+%y%m%d-%H%M").jpg
dl_verify_jpeg http://common.snow.com/mtncams/Gemini.jpg /zen/images/StevensPass20-21/Gemini/$(date "+%y%m%d-%H%M").jpg
dl_verify_jpeg http://common.snow.com/mtncams/Tye%20Mill%20Top%20Terminal.jpg /zen/images/StevensPass20-21/TyeMillTopTerminal/$(date "+%y%m%d-%H%M").jpg
dl_verify_jpeg http://common.snow.com/mtncams/South%20Divide.jpg /zen/images/StevensPass20-21/SouthDivide/$(date "+%y%m%d-%H%M").jpg
dl_verify_jpeg http://common.snow.com/mtncams/SPBaseArea.jpg /zen/images/StevensPass20-21/SPBaseArea/$(date "+%y%m%d-%H%M").jpg
echo "archive_webcam downloads completed"
exit 0
| true
|
8e1cf475bd8a8945ba90bb6854c7adecbbf30813
|
Shell
|
kqwyf/ScriptsForWork
|
/completions/cancel-completion.bash
|
UTF-8
| 478
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
_cancel_completions() {
local truncated_line=(${COMP_LINE:0:$COMP_POINT})
local job_name_list=$(squeue -o '%64j' -u $(whoami) | tail -n +2 | awk '{print $1;}')
local job_id_list=$(squeue -o '%64i' -u $(whoami) | tail -n +2 | awk '{print $1;}')
local completion_list=$(echo ${job_name_list} ${job_id_list} | sort | uniq)
COMPREPLY=($(compgen -W "${completion_list}" -- "${truncated_line[$COMP_CWORD]}"))
}
complete -F _cancel_completions cancel
| true
|
9c98a45ed90bb300f3c13d136c212b60dbbf0b52
|
Shell
|
ARM64Darwin1820/BetterRes
|
/install.sh
|
UTF-8
| 5,406
| 3
| 3
|
[] |
no_license
|
MODEL=$(uname -m)
case ${MODEL} in
iPhone6,1 | iPhone6,2 | iPhone8,4)
X=640 Y=1136;;
iPhone7,2 | iPhone8,1 | iPhone9,1 | iPhone9,3 | iPhone10,1 | iPhone10,4)
X=750 Y=1334;;
iPhone7,1 | iPhone8,2 | iPhone9,2 | iPhone9,4 | iPhone10,2 | iPhone10,5)
X=1242 Y=2208;;
iPhone10,3 | iPhone10,6 | iPhone11,2)
X=1125 Y=2436;;
esac
echo ${MODEL} detected!
echo ${X}x${Y} is your default resolution.
echo Creating stock IOMobileGraphicsFamily.plist...
if [ -f /private/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist ]; then
rm -f /private/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist
fi
cat > /private/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist << EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>canvas_height</key>
<integer>${Y}</integer>
<key>canvas_width</key>
<integer>${X}</integer>
</dict>
</plist>
EOF
chmod 0755 /private/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist
echo Default IOMobileGraphicsFamily.plist created!
echo Creating BetterRes resolution files...
if [ ! -f /private/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist.setrestarget.bak ]; then
cp /private/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist /private/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist.setrestarget.bak
fi
cp /private/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist /private/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist.setresoriginal.bak
echo Copying launchdaemons...
if [ -d /private/var/containers/Bundle/iosbinpack64/ ]; then
cat > /private/var/containers/Bundle/iosbinpack64/LaunchDaemons/com.horizon.setres.setrestarget.plist<< EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.horizon.setres.setrestarget</string>
<key>LaunchOnlyOnce</key>
<true/>
<key>ProgramArguments</key>
<array>
<string>/var/containers/Bundle/iosbinpack64/bin/bash</string>
<string>-c</string>
<string>/private/var/containers/Bundle/iosbinpack64/bin/cp /private/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist.setrestarget.bak /private/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist ; /var/containers/Bundle/iosbinpack64/usr/bin/killall cfprefsd</string>
</array>
<key>RunAtLoad</key>
<true/>
</dict>
</plist>
EOF
cat > /private/var/containers/Bundle/iosbinpack64/LaunchDaemons/com.horizon.setres.setresoriginal.plist << EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.horizon.setres.setresoriginal</string>
<key>LaunchOnlyOnce</key>
<true/>
<key>ProgramArguments</key>
<array>
<string>/private/var/containers/Bundle/iosbinpack64/bin/cp</string>
<string>/private/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist.setresoriginal.bak</string>
<string>/private/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist</string>
</array>
<key>RunAtLoad</key>
<false/>
<key>StartInterval</key>
<integer>30</integer>
</dict>
</plist>
EOF
chmod 0644 /private/var/containers/Bundle/iosbinpack64/LaunchDaemons/com.horizon.setres.setresoriginal.plist
chmod 0644 /private/var/containers/Bundle/iosbinpack64/LaunchDaemons/com.horizon.setres.setrestarget.plist
chown root:wheel /private/var/containers/Bundle/iosbinpack64/LaunchDaemons/com.horizon.setres.setresoriginal.plist
chown root:wheel /private/var/containers/Bundle/iosbinpack64/LaunchDaemons/com.horizon.setres.setrestarget.plist
else
cat > /private/etc/rc.d/betterres << EOF
#!/bin/bash
/bin/cp /private/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist.setrestarget.bak /private/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist
EOF
cat > /Library/LaunchDaemons/com.horizon.setres.setresoriginal.plist << EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.horizon.setres.setresoriginal</string>
<key>LaunchOnlyOnce</key>
<true/>
<key>ProgramArguments</key>
<array>
<string>/bin/cp</string>
<string>/private/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist.setresoriginal.bak</string>
<string>/private/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist</string>
</array>
<key>RunAtLoad</key>
<false/>
<key>StartInterval</key>
<integer>30</integer>
</dict>
</plist>
EOF
chmod 0777 /private/etc/rc.d/betterres
chmod 0644 /Library/LaunchDaemons/com.horizon.setres.setresoriginal.plist
chown root:wheel /Library/LaunchDaemons/com.horizon.setres.setresoriginal.plist
fi
echo Done! Set your custom resolution in /private/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist.setrestarget.bak then reboot and rejailbreak!
| true
|
d04b84ef06f39973b0bb8bad98845951a0e13f47
|
Shell
|
rankun203/ModernWebStudy
|
/shell/max.sh
|
UTF-8
| 291
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# 创建一个 shell 脚本, 它从用户那里接收 10 个数, 并显示已输入的最大的数
read max
i=1
while [ ${i} -lt 10 ]; do
read val
if [ ${val} -gt ${max} ]; then
max=${val}
fi
i=`expr ${i} + 1`
done
echo "Max number is ${max}"
| true
|
915e4bd59143537d88e3a6eb306535f08665ec4e
|
Shell
|
apollo-black/dev-vm
|
/setup.sh
|
UTF-8
| 8,036
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "# -------------------------------- #"
echo "# Apollo Ruby VM Install #"
echo "# -------------------------------- #"
mkdir /home/vagrant/downloads
sudo su -c "echo \"vagrant ALL=(ALL) NOPASSWD:ALL\" > /etc/sudoers.d/vagrant"
sudo apt-get update
sudo apt-get upgrade -y
sudo apt-get -y install wget ca-certificates gcc g++ gnupg2 make software-properties-common \
git-core curl build-essential zlib1g-dev libssl-dev libreadline6-dev libyaml-dev libcurl4-openssl-dev \
libsqlite3-dev sqlite3 libxml2-dev libxslt1-dev libffi-dev libpq-dev tcl8.5 libexpat1-dev gettext unzip \
libmagick++-dev libv8-dev libffi-dev libpulse0 imagemagick
echo "# -------------------------------- #"
echo "# Setting SSH Keys #"
echo "# -------------------------------- #"
mkdir -p /home/vagrant/.ssh
chmod 0700 /home/vagrant/.ssh
mv ./ssh/vagrant.pub /home/vagrant/.ssh/authorized_keys
chmod 0600 /home/vagrant/.ssh/authorized_keys
sudo chown -R vagrant:root /home/vagrant/.ssh
sudo apt-get install -y openssh-server
echo 'AuthorizedKeysFile %h/.ssh/authorized_keys' | sudo tee --append /etc/ssh/sshd_config > /dev/null
sudo service ssh restart
echo "# -------------------------------- #"
echo "# Installing Guest Additions #"
echo "# -------------------------------- #"
sudo mkdir /cdrom
sudo mount /dev/cdrom /cdrom
cd /cdrom
sudo apt-get install -y dkms build-essential linux-headers-generic linux-headers-$(uname -r)
sudo su -c "./VBoxLinuxAdditions.run"
echo "# -------------------------------- #"
echo "# Installing Ruby #"
echo "# -------------------------------- #"
echo "gem: --no-ri --no-rdoc" > ~/.gemrc
cd /home/vagrant/downloads && wget http://cache.ruby-lang.org/pub/ruby/2.6/ruby-2.6.1.tar.gz -O ruby.tar.gz
cd /home/vagrant/downloads && tar xzf ruby.tar.gz
cd /home/vagrant/downloads/ruby-2.6.1 && ./configure -prefix=$HOME
cd /home/vagrant/downloads/ruby-2.6.1 && make
cd /home/vagrant/downloads/ruby-2.6.1 && make install
# gem install bundler
echo "# -------------------------------- #"
echo "# Installing Redis #"
echo "# -------------------------------- #"
cd /home/vagrant/downloads && wget http://download.redis.io/releases/redis-stable.tar.gz -O redis.tar.gz
cd /home/vagrant/downloads && tar xzf redis.tar.gz
cd /home/vagrant/downloads/redis-stable && make
cd /home/vagrant/downloads/redis-stable && sudo make install
cd /home/vagrant/downloads/redis-stable/utils && sudo ./install_server.sh
echo "# -------------------------------- #"
echo "# Installing PostgreSQL #"
echo "# -------------------------------- #"
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" >> /etc/apt/sources.list.d/pgdg.list'
sudo apt-get update
sudo apt-get -y install postgresql postgresql-contrib
echo "# -------------------------------- #"
echo "# Setting up DB User #"
echo "# -------------------------------- #"
sudo -u postgres psql -c "CREATE USER vagrant WITH PASSWORD 'vagrant';"
sudo -u postgres createdb -O vagrant vagrant
echo "# -------------------------------- #"
echo "# Installing Java 8 #"
echo "# -------------------------------- #"
sudo apt install -y default-jre default-jre-headless
#echo "# -------------------------------- #"
#echo "# Installing Neo4j #"
#echo "# -------------------------------- #"
# wget --no-check-certificate -O - https://debian.neo4j.org/neotechnology.gpg.key | sudo apt-key add -
# echo 'deb http://debian.neo4j.org/repo stable/' | sudo tee /etc/apt/sources.list.d/neo4j.list
# sudo apt update
# sudo apt install -y neo4j
# sudo service neo4j stop
# sudo service neo4j start
echo "# -------------------------------- #"
echo "# Installing Nodejs #"
echo "# -------------------------------- #"
curl -sL https://deb.nodesource.com/setup_10.x | sudo -E bash -
sudo apt-get install -y nodejs
sudo apt install -y npm
curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.11/install.sh | bash
sudo npm install --global webpack
sudo apt-get install gcc g++ make
curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
sudo apt-get update && sudo apt-get install yarn
echo "# -------------------------------- #"
echo "# Installing Crystal #"
echo "# -------------------------------- #"
curl -sSL https://dist.crystal-lang.org/apt/setup.sh | sudo bash
sudo apt install -y crystal
echo "# -------------------------------- #"
echo "# Installing PHP #"
echo "# -------------------------------- #"
sudo apt install -y curl php-cli php-mbstring git unzip
sudo apt install -y php
sudo apt install -y php-mysql php-gd
curl -sS https://getcomposer.org/installer -o composer-setup.php
sudo php composer-setup.php --install-dir=/usr/local/bin --filename=composer
echo "# -------------------------------- #"
echo "# Installing Lua #"
echo "# -------------------------------- #"
sudo apt-get install -y lua5.3
#echo "# -------------------------------- #"
#echo "# Installing Cassandra #"
#echo "# -------------------------------- #"
#echo "deb http://www.apache.org/dist/cassandra/debian 36x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list
#curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add -
#sudo apt-get update
#sudo apt-get install -y cassandra
echo "# -------------------------------- #"
echo "# Installing PIP #"
echo "# -------------------------------- #"
sudo apt install -y python3-pip
#echo "# -------------------------------- #"
#echo "# Installing Tensorflow #"
#echo "# -------------------------------- #"
#pip3 install tensorflow
#pip3 install keras
#echo "# -------------------------------- #"
#echo "# Installing Tesseract #"
#echo "# -------------------------------- #"
#sudo apt install -y tesseract-ocr
#sudo apt install -y libtesseract-dev
echo "# -------------------------------- #"
echo "# Installing FFmpeg #"
echo "# -------------------------------- #"
sudo apt install -y ffmpeg
echo "# -------------------------------- #"
echo "# Installing Typescript #"
echo "# -------------------------------- #"
sudo npm install -g typescript
echo "# -------------------------------- #"
echo "# Setting ENV Vars #"
echo "# -------------------------------- #"
echo 'RAILS_ENV="development"' | sudo tee --append /etc/environment > /dev/null
echo 'DATABASE_URL="postgresql://vagrant:vagrant@127.0.0.1:5432/vagrant"' | sudo tee --append /etc/environment > /dev/null
echo 'REDIS_URL="redis://localhost:6379"' | sudo tee --append /etc/environment > /dev/null
echo "# -------------------------------- #"
echo "# Setting Up Utils #"
echo "# -------------------------------- #"
cp /home/vagrant/dev-vm/utils/ngrok /usr/bin
cp /home/vagrant/dev-vm/utils/pgweb /usr/bin
#sudo npm install -g is-up-cli
#sudo npm install -g loadtest
sudo apt-get install -y htop
sudo apt-get install -y httpie
#echo "# -------------------------------- #"
#echo "# Installing Docker #"
#echo "# -------------------------------- #"
#curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -
#sudo apt-key fingerprint 0EBFCD88
#sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable"
#sudo apt-get update
#sudo apt-get install docker-ce docker-ce-cli containerd.io
#sudo curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
#sudo chmod +x /usr/local/bin/docker-compose
echo "# -------------------------------- #"
echo "# Done, Rebooting #"
echo "# -------------------------------- #"
sudo reboot
| true
|
a32126a62d7b9261cb3582aa3abe0c2952c19465
|
Shell
|
AlixBernard/DST
|
/reducer.sh
|
UTF-8
| 384
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
lastkey=""
count=0
while read line; do
this_key="$(echo ${line} | awk '{print $1}')"
value="$(echo ${line} | awk '{print $2}')"
count="$(expr ${count} + 1)"
if [ "${last_key}" != "${this_key}" ]; then
if [ "${last_key}" != "" ]; then
echo "${last_key} ${count}"
fi
count=0
last_key="${this_key}"
fi
done
| true
|
2fd967c3f6fcae74af129def1f136b6e988f35ab
|
Shell
|
RyanTech/yueji
|
/tools/study/linux/script/yueji/switch.datasource.connection.info.sh
|
UTF-8
| 2,035
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
export LANG=en_US.UTF-8
base='/yueji'
#base='/home/tianliang/workspace'
cd $base
echo `pwd`
echo "找到含有master的配置文件:"
find . -maxdepth 3 -name '*.datasource.properties*' |xargs grep master -l
echo ""
echo "找到含有slave的配置文件:"
find . -maxdepth 3 -name '*.datasource.properties*' |xargs grep slave -l
echo ""
echo "------------------------------------------------------"
echo "--------------------start switch----------------------"
echo "------------------------------------------------------"
echo ""
#备份master property文件
find . -maxdepth 3 -name '*.datasource.properties' |xargs grep :master -l | awk '{printf("cp %s \t %s \n",$1,$1".master")}'|sh
#将slave和51改为master和50
find . -maxdepth 3 -name '*.datasource.properties.slave'|xargs -I {} sed -i 's#jdbc:oracle:thin:@192.168.1.51:1521:slave#jdbc:oracle:thin:@192.168.1.50:1521:master#g' {}
#备份slave property文件
find . -maxdepth 3 -name '*.datasource.properties' |xargs grep :master -l | awk '{printf("cp %s \t %s \n",$1,$1".slave")}'|sh
#将master和50改为slave和51
find . -maxdepth 3 -name '*.datasource.properties.slave'|xargs -I {} sed -i 's#jdbc:oracle:thin:@192.168.1.50:1521:master#jdbc:oracle:thin:@192.168.1.51:1521:slave#g' {}
case "$1" in
master)
#切换为master
find . -maxdepth 3 -name '*.datasource.properties.master' |xargs grep :master -l | awk '{printf("cp %s \t %s \n",$1,substr($1,0,length($1)-6))}' |sh
echo 'switch database connection to ** master ** success'
;;
slave)
#切换为slave
find . -maxdepth 3 -name '*.datasource.properties.slave' |xargs grep :slave -l | awk '{printf("cp %s \t %s \n",$1,substr($1,0,length($1)-5))}' |sh
echo 'switch database connection to ** slave ** suceess'
;;
*)
echo $"Usage: $0 {master|slave}"
exit 2
esac
echo ""
echo "------------------------------------------------------"
echo "------------------finished switch---------------------"
echo "------------------------------------------------------"
echo ""
| true
|
3d6e4dbba90fc51e0bb532dd5f1fa682bcccda45
|
Shell
|
Twitter-R01/TwitterR01
|
/generatemodel.sb
|
UTF-8
| 690
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH -N 2
#SBATCH -p RM
#SBATCH -t 48:00:00
#SBATCH -J model_gen
echo Start time:
date +"%D %T"
# Load the correct version of python
module load anaconda3
source activate bertweet
# Navigate to your TwitterR01 directory, the folder with generate_model.py in it
cd /jet/home/bakerw/thesis_research/TwitterR01/classifiers/BERTweet
# python3 generate_model.py [relevance, commercial, policy, or sentiment] [file with coded tweets (either policy or comm file)] [data directory with parsed tweets which have IDs matching coded file]
python3 generate_model.py policy Policy_2.24.21.csv /ocean/projects/sbe180010p/bakerw/data_for_model_test/policy
echo End time:
date +"%D %T"
| true
|
8a1da3f40809a059e95ce7546e83b3ae442be3bd
|
Shell
|
jopmas/scripts
|
/zip_plots.sh
|
UTF-8
| 253
| 2.78125
| 3
|
[] |
no_license
|
dirout=${PWD##*/}
echo $dirout
cd ../
# DIRNAME=${PWD##*/}
# cd $dirout
# echo "Zipping $DIRNAME directory..."
# zip $DIRNAME'_imgs.zip' *.png
# zip $DIRNAME'_videos.zip' *.mp4
# zip $DIRNAME'_gifs.zip' *.gif
# # rm *.png
# echo "Zipping completed!"
| true
|
25873b01725a6379c26d4d480bcdac56585399c4
|
Shell
|
happy-bubbles/ble_button
|
/nrf_sdk/gcc/build_with_softdevice
|
UTF-8
| 697
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/sh
make
res=$?
if [ "$res" -ne "0" ]
then
echo "BAD MAKE"
exit 5
fi
serial=$1
size=${#serial}
if [ "$size" -ne "8" ]
then
echo "bad serial"
exit 5
fi
s1=${serial:0:2}
s2=${serial:2:2}
s3=${serial:4:2}
s4=${serial:6:2}
printf \\x$s1\\x$s2\\x$s3\\x$s4 > serial
# take off last newline char from serial
printf %s "$(cat serial)" > serial
# same for device_secret
printf %s "$(cat device_secret)" > device_secret
srec_cat ../s110_nrf51822_7.3.0_softdevice.hex -intel _build/bunchie_buttons_s110_xxaa.bin -binary -offset 0x16000 serial -binary -offset 0x10001080 device_secret -binary -offset 0x10001088 -o combined.hex -intel
cp combined.hex ../../
cd ../..
./flash.sh
| true
|
950c13e042681ad952e710f4e3e74cd207dc2830
|
Shell
|
hypnoglow/macOS-bootstrap
|
/scripts/defaults.sh
|
UTF-8
| 2,205
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# This file contains automated macOS system preferences configuration.
# TODO: make it conditional, and execute `killal {Dock/Finder}` only if there
# were changes.
#
# Also see: https://macos-defaults.com
#
# System Preferences
#
# System Preferences > General
# Ask to keep changes when closing documents
defaults write -globalDomain NSCloseAlwaysConfirmsChanges -int 1
# Prefer tabs when opening documents: Always
defaults write -globalDomain AppleWindowTabbingMode -string always
## System Preferences > Dock & Menu Bar
# Size
defaults write com.apple.dock tilesize -int 54
# Magnification
defaults write com.apple.dock magnification -bool true
defaults write com.apple.dock largesize -int 65
# Minimise windows into application icon
defaults write com.apple.dock minimize-to-application -bool true
# Automatically hide and show the Dock
defaults write com.apple.dock autohide -bool true
defaults write com.apple.dock autohide-time-modifier -float 1
defaults write com.apple.dock autohide-delay -float 0.0001
# Show recent applications in Dock
defaults write com.apple.dock show-recents -bool false
## System Preferences > Mission Control
# Automatically rearrange Spaces based on most recent use
defaults write com.apple.dock mru-spaces -bool false
## System Preferences > Keyboard | Keyboard
# Use F1, F2, etc. keys as standard function keys
defaults write -globalDomain com.apple.keyboard.fnState -int 1
## System Preferences > Trackpad | Point & Click
# Tap to click
defaults write com.apple.AppleMultitouchTrackpad Clicking -int 1
# Force click
defaults write -globalDomain com.apple.trackpad.forceClick -int 1
# ## System Preferences > Trackpad | More Gestures
# App Expose
defaults write com.apple.dock showAppExposeGestureEnabled -bool true
#
# Finder
#
## Finder > Preferences | General
# New Finder windows show
defaults write com.apple.finder NewWindowTarget -string PfHm
defaults write com.apple.finder NewWindowTargetPath -string "file:///$HOME/"
## Finder > Preferences | Advanced
# Show all filename extensions
defaults write -globalDomain AppleShowAllExtensions -bool true
# #
# # Apply changes immediately
# #
killall Dock
killall Finder
| true
|
a1622c5ce942819782bc1344515b81a355ec355d
|
Shell
|
stickpro/dots
|
/scripts/polybar/btc_price_coinm.sh
|
UTF-8
| 162
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/sh
DATA=$(curl -s 'https://api.coinmarketcap.com/v2/ticker/' | jq -r '.data ."1" .quotes .USD .price' | cut -c 1-9)
echo $DATA
#printf '%0.3f\n' $DATA
| true
|
7bcc76813176a4c80ff52c906f396cb23ff7b7a3
|
Shell
|
Undomyr/aryalinux
|
/applications/linux-pam.sh
|
UTF-8
| 5,107
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
set +h
. /etc/alps/alps.conf
. /var/lib/alps/functions
SOURCE_ONLY=n
DESCRIPTION="br3ak The Linux PAM package containsbr3ak Pluggable Authentication Modules used to enable the local systembr3ak administrator to choose how applications authenticate users.br3ak"
SECTION="postlfs"
VERSION=1.3.0
NAME="linux-pam"
#OPT:db
#OPT:cracklib
#OPT:libtirpc
#OPT:docbook
#OPT:docbook-xsl
#OPT:fop
#OPT:libxslt
#OPT:w3m
cd $SOURCE_DIR
URL=http://linux-pam.org/library/Linux-PAM-1.3.0.tar.bz2
if [ ! -z $URL ]
then
wget -nc http://mirrors-ru.go-parts.com/blfs/conglomeration/Linux-PAM/Linux-PAM-1.3.0.tar.bz2 || wget -nc http://linux-pam.org/library/Linux-PAM-1.3.0.tar.bz2 || wget -nc http://ftp.lfs-matrix.net/pub/blfs/conglomeration/Linux-PAM/Linux-PAM-1.3.0.tar.bz2 || wget -nc ftp://ftp.lfs-matrix.net/pub/blfs/conglomeration/Linux-PAM/Linux-PAM-1.3.0.tar.bz2 || wget -nc ftp://ftp.osuosl.org/pub/blfs/conglomeration/Linux-PAM/Linux-PAM-1.3.0.tar.bz2 || wget -nc http://ftp.osuosl.org/pub/blfs/conglomeration/Linux-PAM/Linux-PAM-1.3.0.tar.bz2 || wget -nc http://mirrors-usa.go-parts.com/blfs/conglomeration/Linux-PAM/Linux-PAM-1.3.0.tar.bz2
wget http://www.linux-pam.org/documentation/Linux-PAM-1.2.0-docs.tar.bz2 -O Linux-PAM-1.2.0-docs.tar.bz2
TARBALL=`echo $URL | rev | cut -d/ -f1 | rev`
if [ -z $(echo $TARBALL | grep ".zip$") ]; then
DIRECTORY=`tar tf $TARBALL | cut -d/ -f1 | uniq | grep -v "^\.$"`
tar --no-overwrite-dir -xf $TARBALL
else
DIRECTORY=$(unzip_dirname $TARBALL $NAME)
unzip_file $TARBALL $NAME
fi
cd $DIRECTORY
fi
whoami > /tmp/currentuser
tar -xf ../Linux-PAM-1.2.0-docs.tar.bz2
./configure --prefix=/usr \
--sysconfdir=/etc \
--libdir=/usr/lib \
--disable-regenerate-docu \
--enable-securedir=/lib/security \
--docdir=/usr/share/doc/Linux-PAM-1.3.0 &&
make "-j`nproc`" || make
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
install -v -m755 -d /etc/pam.d &&
cat > /etc/pam.d/other << "EOF"
auth required pam_deny.so
account required pam_deny.so
password required pam_deny.so
session required pam_deny.so
EOF
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
rm -fv /etc/pam.d/*
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
make install &&
chmod -v 4755 /sbin/unix_chkpwd &&
for file in pam pam_misc pamc
do
mv -v /usr/lib/lib${file}.so.* /lib &&
ln -sfv ../../lib/$(readlink /usr/lib/lib${file}.so) /usr/lib/lib${file}.so
done
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
cat > /etc/pam.d/system-account << "EOF"
# Begin /etc/pam.d/system-account
account required pam_unix.so
# End /etc/pam.d/system-account
EOF
cat > /etc/pam.d/system-auth << "EOF"
# Begin /etc/pam.d/system-auth
auth required pam_unix.so
# End /etc/pam.d/system-auth
EOF
cat > /etc/pam.d/system-session << "EOF"
# Begin /etc/pam.d/system-session
session required pam_unix.so
# End /etc/pam.d/system-session
EOF
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
cat > /etc/pam.d/system-password << "EOF"
# Begin /etc/pam.d/system-password
# check new passwords for strength (man pam_cracklib)
password required pam_cracklib.so type=Linux retry=3 difok=5 \
difignore=23 minlen=9 dcredit=1 \
ucredit=1 lcredit=1 ocredit=1 \
dictpath=/lib/cracklib/pw_dict
# use sha512 hash for encryption, use shadow, and use the
# authentication token (chosen password) set by pam_cracklib
# above (or any previous modules)
password required pam_unix.so sha512 shadow use_authtok
# End /etc/pam.d/system-password
EOF
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
cat > /etc/pam.d/system-password << "EOF"
# Begin /etc/pam.d/system-password
# use sha512 hash for encryption, use shadow, and try to use any previously
# defined authentication token (chosen password) set by any prior module
password required pam_unix.so sha512 shadow try_first_pass
# End /etc/pam.d/system-password
EOF
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
cat > /etc/pam.d/other << "EOF"
# Begin /etc/pam.d/other
auth required pam_warn.so
auth required pam_deny.so
account required pam_warn.so
account required pam_deny.so
password required pam_warn.so
password required pam_deny.so
session required pam_warn.so
session required pam_deny.so
# End /etc/pam.d/other
EOF
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
if [ ! -z $URL ]; then cd $SOURCE_DIR && cleanup "$NAME" "$DIRECTORY"; fi
register_installed "$NAME" "$VERSION" "$INSTALLED_LIST"
| true
|
c62356764fbe279dea21f6b4db4641afa9ff5819
|
Shell
|
paulgalow/CreateOSXSystemReport
|
/Create OS X System Report.app/Contents/Resources/script
|
UTF-8
| 417
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash -l
echo "OS X System Report is being created..."
cd $TMPDIR
system_profiler -xml > System\ Report.spx
echo "Compressing report..."
tar -czf System\ Report.tar.gz System\ Report.spx
mv System\ Report.tar.gz ~/Desktop/
rm System\ Report.spx
echo "PROGRESS:100"
echo "System Report.tar.gz has been created on your Desktop"
printf "NOTIFICATION:System Report.tar.gz has been created on your Desktop\n"
exit 0
| true
|
338eec7395c1df3fd69a52bbcfd1276b16515954
|
Shell
|
mikelh/oyo-hacking
|
/rootfs/mnt/etc/apmd_proxy
|
UTF-8
| 9,685
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/sh
#
# apmd_proxy - program dispatcher for APM daemon
# Craig Markwardt (craigm@lheamail.gsfc.nasa.gov) 21 May 1999
# David Brownell (db@post.harvard.edu) 9 June 1999
#
# This shell script is called by the APM daemon (apmd) when the state
# of any power management function has changed. The exact events that
# trigger the calling of apmd_proxy depend on how apmd was configured
# at compile time.
#
# Within this script the system administrator should put any commands
# or actions which should be performed upon state transitions.
#
# apmd_proxy is called with specific arguments that describe the event
# that has occurred. It is this script's responsibility to query the
# hardware or the APM service (via /proc/apm) for more information,
# and to take the appropriate action.
#
# For example, apmd will call "apmd_proxy suspend system" just before
# the system is scheduled to go into suspend mode. The administrator
# may wish to perform site-specific actions like unloading drivers or
# disabling the network interface. When the system is resumed later,
# apmd will call "apmd_proxy resume normal", at which time those actions
# should be reversed.
#
# If the kernel APM driver is version 1.10 or higher (use the "apm"
# command to find out), apmd_proxy can return an error code for the
# suspend and standby events, indicating whether the pending mode
# should be rejected. For example, apmd_proxy may decide if, based on
# CPU or network activity or user instructions, a suspend initiated by
# the APM BIOS should be rejected.
#
# !! NOTE !! This is not the apmd_proxy used on most Debian systems!
#
# RETURN VALUE:
# 0 - nominal return; suspend and standby events are accepted
# 1 - reject a suspend or standby (MUST HAVE APM DRIVER 1.10 OR HIGHER)
#
# Here are the calling sequences for apmd_proxy:
#
# apmd_proxy start - APM daemon has started
# apmd_proxy stop - APM daemon is shutting down
# apmd_proxy suspend system - APM system has requested suspend mode
# apmd_proxy suspend critical - APM system indicates critical suspend (*)
# apmd_proxy standby system - APM system has requested standby mode
# apmd_proxy suspend user - User has requested suspend mode
# apmd_proxy standby user - User has requested standby mode
# apmd_proxy resume suspend - System has resumed from suspend mode
# apmd_proxy resume standby - System has resumed from standby mode
# apmd_proxy resume critical - System has resumed from critical suspend
# apmd_proxy change battery - APM system reported low battery
# apmd_proxy change power - APM system reported AC/battery change
# apmd_proxy change time - APM system reported need for time update (*)
# apmd_proxy change capability - APM system reported config. change (+)
#
# (*) - APM daemon may be modified to call these sequences
# (+) - Available if kernel APM driver supports it (driver ver. 1.10 or higher)
#
# SIMPLIFIED CONFIGURATION
#
# The operation of this script can be controlled either by setting the
# following variables appropriately, or by editing the script itself.
#
# Set UTC to true if your clock is based on UTC time. This settting
# is overridden by any settings in /etc/sysconfig/clock.
UTC=false
#
# Set SUSPEND_ON_AC to false if you wish to avoid suspend and standby
# events when your machine is connected to AC power. By default
# suspends can occur on either battery or AC power.
SUSPEND_ON_AC=true
#
# PCMCIA cards can be more or less amenable to an APM suspend event.
# If you have a card that cannot be suspended properly (such as a SCSI
# card), then it should be "ejected" before entering suspend mode.
# The cards are not physically ejected; rather, the power is turned
# off to them via the "cardctl eject" command, and is reactivated upon
# resume.
PCMCIA_EJECT_ON_SUSPEND=false
#
#
# DEBUGGING
#
# Uncomment commands under either METHOD 1 or METHOD 2 for debugging
# messages to the system log. Not recommended for general use, since
# it may activate your disk more than needed. The second method will
# log all commands and error messages encountered by apmd_proxy.
#
# METHOD 1 - Logs command line arguments of apmd_proxy only
# logger apmd_proxy $*
# METHOD 2 - Logs entire run of apmd_proxy to /tmp/apmd_proxy.log
# echo '****************' >> /tmp/apmd_proxy.log
# echo "$0 $*" >> /tmp/apmd_proxy.log
# date >> /tmp/apmd_proxy.log
# echo '----------------' >> /tmp/apmd_proxy.log
# exec 2>> /tmp/apmd_proxy.log
# set -x
#
# A convenience bash routine is included to show how to query AC power
# status.
#
# *******************************************************************
power_conserve() {
# Set IDE hard disk spindown time to a short time
# Disabled by default.
# /sbin/hdparm -q -S 18 /dev/hda # 18 == 1.5 minutes
true;
}
power_performance() {
# Disable IDE hard disk spindown
# Disabled by default.
# /sbin/hdparm -q -S 0 /dev/hda
true;
}
# NOTE: APM BIOS drivers in kernel 2.2 and later have handled this
update_clock () {
# update kernel to match hardware clock
if [ -f /etc/sysconfig/clock ]; then
. /etc/sysconfig/clock
# old style
if [ "$CLOCKMODE" = GMT ]; then
UTC=true
fi
fi
if [ $UTC = false ]; then
FLAG=""
else
FLAG="-u"
fi
[ -x /sbin/clock ] && clock -s $FLAG || hwclock -s $FLAG
}
# Start of main procedure. Included are examples of some mild power
# management profiling, disabled by default. Timer-based system
# suspends and standbys can be rejected if we are on AC power.
#
case "$1" in
# ----------------------- SUSPEND and STANDBY ----------------------
# Handle customized behavior for APM standby and suspend events
# here. Depending on your system, you may wish to enable some of
# the example actions that follow.
"suspend"|"standby")
# Activate this segment if you wish to disable normal suspend
# events when you are on AC power. This segment only works if
# your APM BIOS sends "suspend system" events after an idle
# period. Also, you must be running a Linux kernel APM
# driver version 1.10 or higher (run "apm" to find out).
#
if [ $SUSPEND_ON_AC = false -a $2 = system ]; then
if on_ac_power >/dev/null; then
exit 1 # Reject (NOTE kernel support must be enabled)
fi
fi
if [ $1 = standby ]; then
exit 0
fi
# Standby events typically do not go past this point, but can
# if you comment out the above lines.
# Activate this segment if you wish to disable PCMCIA services
# upon suspend events. The PCMCIA driver nominally will
# suspend all cards before reaching this point, but certain
# cards cannot be suspended properly (notably, SCSI cards).
# These cards must be forcefully software-ejected. If you
# uncomment this code, then be sure to also uncomment the
# corresponding segment under RESUME. Calling "cardctl
# suspend" is needed for systems whose PCMCIA modules are
# available but not APM-aware. Calling it more than once is
# not harmful.
#
if [ -x /sbin/cardctl ]; then
if [ $PCMCIA_EJECT_ON_SUSPEND = true ]; then
/sbin/cardctl eject
else
/sbin/cardctl suspend
fi
fi
# Uncomment this segment if your graphics card does not resume
# in graphics mode properly (ie, in X windows). This action
# changes the screen to virtual console number 1, which is
# usually a text console. Upon resume, you will need to
# change back to your X console manually.
#
# if [ -x /usr/bin/chvt ]; then
# /usr/bin/chvt 1; sleep 1
# fi
# other common actions: unload troublesome drivers
# EXAMPLE: OSS sound may not suspend/resume properly
# - Unload the drivers here and then reload upon resume
# Path may vary. Be sure to enable "soundon" below.
# /usr/local/bin/soundoff
;;
# ------------------------------- RESUME ---------------------------
# Resume ... from standby is a NOP, except the clock update.
"resume")
# Typically the Linux system clock needs to be reset.
update_clock
# Activate this segment if you "ejected" PCMCIA cards above.
# The default operation is to "resume", which is required for
# systems whose PCMCIA modules are not APM-aware.
#
if [ $2 = suspend -a -x /sbin/cardctl ]; then
if [ $PCMCIA_EJECT_ON_SUSPEND = true ]; then
/sbin/cardctl insert
else
/sbin/cardctl resume
fi
fi
# Drives can forget their time-out setting after suspend,
# so we may have to reprogram the drive.
if on_ac_power >/dev/null; then
power_performance
else
power_conserve
fi
# other common actions: reload troublesome drivers
# EXAMPLE: reload OSS sound drivers. Path may vary.
# /usr/local/bin/soundon
;;
# ------------------------------- START ----------------------------
# Called when apmd first starts.
# If we are on battery power, then attempt to "conserve" power.
"start")
if on_ac_power >/dev/null; then
power_performance
else
power_conserve
fi
;;
# ------------------------------- STOP -----------------------------
# Called when apmd is terminated.
# Default mode, when apmd is off, is to be in "performance" mode.
"stop")
power_performance
;;
# ------------------------ CHANGE in STATUS ------------------------
"change")
case $2 in
"power")
# switched to/from AC power, added/removed battery, etc
if on_ac_power >/dev/null; then
power_performance
else
power_conserve
fi
;;
"time")
# normally not called
update_clock
;;
"battery")
# battery is at "low" level (BIOS defines -- e.g. as 50%)
# can't do much of anything useful
;;
"capability")
# e.g. added new hardware (not battery!)
;;
esac
;;
esac
exit 0
| true
|
115314f73bbfdb14918bc6cb84006514f52c8253
|
Shell
|
ishtiaque05/dotfiles
|
/recipes/ruby.sh
|
UTF-8
| 1,498
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
set -o pipefail
curl -sL https://deb.nodesource.com/setup_8.x | sudo -E bash -
curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
sudo apt-get update
sudo apt-get install git-core curl zlib1g-dev build-essential libssl-dev libreadline-dev libyaml-dev libsqlite3-dev sqlite3 libxml2-dev libxslt1-dev libcurl4-openssl-dev software-properties-common libffi-dev nodejs yarn
# rbenv installation
echo "############### SETTING UP RBENV ########################"
cd
git clone https://github.com/rbenv/rbenv.git ~/.rbenv
echo 'export PATH="$HOME/.rbenv/bin:$PATH"' >> ~/.bashrc
echo 'eval "$(rbenv init -)"' >> ~/.bashrc
exec $SHELL
echo "############## SETTING UP RUBY BUILD ####################"
git clone https://github.com/rbenv/ruby-build.git ~/.rbenv/plugins/ruby-build
echo 'export PATH="$HOME/.rbenv/plugins/ruby-build/bin:$PATH"' >> ~/.bashrc
exec $SHELL
RUBY_VERSION=$(rbenv install -l | awk -F '.' '
/^[[:space:]]*[0-9]+\.[0-9]+\.[0-9]+[[:space:]]*$/ {
if ( ($1 * 100 + $2) * 100 + $3 > Max ) {
Max = ($1 * 100 + $2) * 100 + $3
LATEST_RUBY_VERSION=$0
}
}
END {print LATEST_RUBY_VERSION}')
echo "########### INSTALLING LATEST RUBY VERSION ${RUBY_VERSION} ###########"
rbenv install $RUBY_VERSION
rbenv global $RUBY_VERSION
echo "Installed ruby version:"
ruby -v
gem install bundler
rbenv rehash
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.