blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
57dd32b8f916c55970256bb3b255a3fb9294625f
|
Shell
|
smart-patrol/python-template
|
/scripts/push_ecr.sh
|
UTF-8
| 1,368
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
REPO_NAME_DEFAULT=docker-image
ACCOUNT_ID_DEFAULT=$(aws sts get-caller-identity --query Account --output text)
AWS_REGION_DEFAULT=$(aws configure get region)
REPO_NAME=${1:-${REPO_NAME_DEFAULT}}
AWS_ACCOUNT_ID=${2:-{ACCOUNT_ID_DEFAULT}}
AWS_REGION=${3:-${AWS_REGION_DEFAULT}}
# repository must exist and login needs to happen prior
# see: https://docs.aws.amazon.com/AmazonECR/latest/userguide/getting-started-cli.html
aws ecr create-repository --repository-name $REPO_NAME
aws ecr get-login-password --region $AWS_REGION | docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com
docker build -t $REPO_NAME .
docker tag $REPO_NAME:latest $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$REPO_NAME:latest
docker push $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$REPO_NAME:latest
# scan for vulnerabilities
aws ecr put-image-scanning-configuration \
--repository-name $REPO \
--image-scanning-configuration scanOnPush=true
# find old image
OLD_IMAGE_DIGESTS=`aws ecr --region $AWS_REGION list-images --repository-name $REPO_NAME --filter tagStatus=UNTAGGED | jq '.imageIds | map({imageDigest: .imageDigest})'`
# delete old images if they exist
if [ ! "$OLD_IMAGE_DIGESTS" = '[]' ]; then
aws ecr --region $AWS_REGION batch-delete-image --repository-name $REPO_NAME --image-ids "$OLD_IMAGE_DIGESTS"
fi
| true
|
9f189ec7a7656e6a1094359325b9915a64a9b978
|
Shell
|
Nitinrajyadav/CFML-CI-2019
|
/tests/ci/scripts/ci-helper-acf.sh
|
UTF-8
| 553
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
CONTROL_SCRIPT='coldfusion10/cfusion/bin/coldfusion'
PLATFORM_DIR="coldfusion10"
WEBROOT="coldfusion10/cfusion/wwwroot"
MY_DIR=`dirname $0`
source $MY_DIR/ci-helper-base.sh $1 $2
case $1 in
install)
echo "Fixing ACF install directory..."
grep -rl "/opt/coldfusion10/" --exclude-dir=$WEBROOT . | xargs -n 1 sed -i "s#/opt/coldfusion10/#$WORK_DIR/coldfusion10/#g"
sed -i "s/8500/$SERVER_PORT/g" coldfusion10/cfusion/runtime/conf/server.xml
;;
start|stop)
;;
*)
echo "Usage: $0 {install|start|stop}"
exit 1
;;
esac
exit 0
| true
|
dbfa501a071a210c4126d204324ad1564a4bb474
|
Shell
|
mrodalgaard/alfred-network-workflow
|
/tests/dnsTests.bats
|
UTF-8
| 1,926
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bats
. src/helpers.sh
load variables
@test "getDNS: get current DNS list" {
run getDNS "$DNS"
[ "$output" = "8.8.8.8 / 8.8.4.4 / 192.168.1.1" ]
}
@test "parseDNSLine: parse a single dns config line" {
run parseDNSLine "Google DNS:8.8.8.8,8.8.4.4"
IFS='~' read -r -a ARRAY <<< "$output"
[ "$status" -eq 0 ]
[ "${ARRAY[0]}" == "Google DNS" ]
[ "${ARRAY[1]}" == "8.8.8.8 / 8.8.4.4" ]
[ "${ARRAY[2]}" == "$ICON_DNS" ]
}
@test "parseDNSLine: parse simple config" {
run parseDNSLine "OpenerDNS:42.120.21.30"
IFS='~' read -r -a ARRAY <<< "$output"
[ "${ARRAY[0]}" == "OpenerDNS" ]
[ "${ARRAY[1]}" == "42.120.21.30" ]
}
@test "parseDNSLine: parse with spaces" {
run parseDNSLine " Random DNS : 1.2.3.4 , 6.7.8.9"
IFS='~' read -r -a ARRAY <<< "$output"
[ "${ARRAY[0]}" == "Random DNS" ]
[ "${ARRAY[1]}" == "1.2.3.4 / 6.7.8.9" ]
}
@test "parseDNSLine: ignore comments" {
run parseDNSLine "# comment"
IFS='~' read -r -a ARRAY <<< "$output"
[ "${ARRAY[0]}" == "" ]
}
@test "parseDNSLine: ignore comments with separator" {
run parseDNSLine "# comment: this is a comment"
IFS='~' read -r -a ARRAY <<< "$output"
[ "${ARRAY[0]}" == "" ]
}
@test "parseDNSLine: ignore empty lines" {
run parseDNSLine " "
IFS='~' read -r -a ARRAY <<< "$output"
[ "${ARRAY[0]}" == "" ]
}
@test "parseDNSLine: set used state" {
run parseDNSLine "Google DNS:8.8.8.8,8.8.4.4" "8.8.8.8 / 8.8.4.4"
IFS='~' read -r -a ARRAY <<< "$output"
[ "${ARRAY[0]}" == "Google DNS (used)" ]
[ "${ARRAY[1]}" == "8.8.8.8 / 8.8.4.4" ]
[ "${ARRAY[2]}" == "$ICON_DNS_USED" ]
}
@test "parseDNSLine: handle invalid line" {
run parseDNSLine "Invalid 1.2.3.4"
IFS='~' read -r -a ARRAY <<< "$output"
[ "${ARRAY[0]}" == "" ]
}
@test "parseDNSLine: handle missing ip" {
run parseDNSLine "Invalid:"
IFS='~' read -r -a ARRAY <<< "$output"
[ "${ARRAY[0]}" == "" ]
}
| true
|
f58a9046a24360386323121f084729c60adfe1e4
|
Shell
|
skasn/cenpb
|
/code/process_trace.sh
|
UTF-8
| 1,882
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#SBATCH -n 1
#SBATCH --cpus-per-task=4
#SBATCH --time=24:00:00
DDIR=$1
HMMFN=$2
MINL=141
MAXL=201
NPROC=4
# E-value and overlap
# thresholds for parsing HMM outputs
ETHR=10
OTHR=40
cd ${DDIR}
if [ ! -d hmm ]; then
mkdir hmm
fi
HOPTS="${MINL} ${MAXL} ${NPROC}"
for F in fasta*.gz; do
FPFX=`basename ${F%.*}`
FBED=${FPFX%.*}.hmm.bed
# Run HMMER
while read LINE; do
HMM=`echo ${LINE} | awk '{print $1}'`
STRAND=`echo ${LINE} | awk '{print $2}'`
HPFX=`basename ${HMM%.*}`
HMMBED=${FPFX}.${HPFX}.hmm.bed
# Run HMM
run_hmmsearch.sh ${F} ${HMM} ${STRAND} ${HOPTS}
# Get the subsequences that match HMM
if [ -f ${HMMBED} ] && [ ! -s ${HMMBED} ]; then
rm ${HMMBED}
fi
done < ${HMMFN}
# # If there are HMM matches, then process them
# # by first deduplicating and then getting the
# # corresponding FASTA sequence
# BEDS=(`ls ${FPFX}*.bed 2>/dev/null`)
# if [ ${#BEDS[@]} -gt 0 ]; then
# HPFX=`basename ${HMMFN%.*}`
# HMMBED=${FPFX}.${HPFX}.bed
# HMMFA=${FPFX}.${HPFX}.fa
# HMMFULLFA=${FPFX}.${HPFX}.full.fa
# HMMACC=${FPFX}.${HPFX}.acc
# # get_optimal_bed_intervals.py -b -f ${BEDS[@]} > ${HMMBED}
# cat ${BEDS[@]} | sort -k1,1 -k2,2n -k5,5nr > ${FPFX}.tmp
# resolve_hmmsearch_overlaps.py -f ${FPFX}.tmp -e ${ETHR} -o ${OTHR} > ${HMMBED}
# rm ${FPFX}.tmp
# if [ -s ${HMMBED} ]; then
# # Get the HMM matches
# stranded_bed_to_fasta.sh ${HMMBED} ${F} ${HMMFA}
# fi
# # Get the full reads that match
# cat ${BEDS[@]} | awk '{print $1}' | sort | uniq > ${HMMACC}
# seqtk subseq ${F} ${HMMACC} > ${HMMFULLFA}
# rm ${HMMACC}
# mv ${BEDS[@]} hmm/
# fi
done
echo "JOB DONE."
| true
|
ca0ef36ec586247e5447921a7b47910a1670bf90
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/garchdeps-git/PKGBUILD
|
UTF-8
| 780
| 2.890625
| 3
|
[] |
no_license
|
# Maintainer: Matthew Gamble
pkgname=garchdeps-git
pkgver=2013.09.21
pkgrel=1
pkgdesc="Tool that shows dependencies of Arch Linux packages in a graphical format"
arch=('any')
url="http://bruno.adele.im/projets/garchdeps"
license=('GPL3')
# Previously graphviz was a hard dependency. This was unnecessary, as the script
# does not call any graphviz binaries directly as far as I can tell. It just generates
# files that can be processed by the dot binary.
depends=('python')
makedepends=('git')
optdepends=('graphviz')
source=('git://github.com/badele/garchdeps.git')
sha256sums=('SKIP')
pkgver() {
cd "garchdeps"
git log -1 --format=format:%cd --date=short | sed 's|-|.|g'
}
package(){
cd "$srcdir/garchdeps"
install -Dm 755 garchdeps.py $pkgdir/usr/bin/garchdeps.py
}
| true
|
962c08d61bbc3b19b688968450e5e4b181d06203
|
Shell
|
abicky/fluentd-daemon-service-example
|
/fluentd-forwarder/entrypoint.sh
|
UTF-8
| 1,002
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/sh
#source vars if file exists
DEFAULT=/etc/default/fluentd
if [ -r $DEFAULT ]; then
set -o allexport
. $DEFAULT
set +o allexport
fi
# If the user has supplied only arguments append them to `fluentd` command
if [ "${1#-}" != "$1" ]; then
set -- fluentd "$@"
fi
erb /fluentd/etc/${FLUENTD_CONF}.erb > /fluentd/etc/${FLUENTD_CONF}
# If user does not supply config file or plugins, use the default
if [ "$1" = "fluentd" ]; then
if ! echo $@ | grep ' \-c' ; then
set -- "$@" -c /fluentd/etc/${FLUENTD_CONF}
fi
if ! echo $@ | grep ' \-p' ; then
set -- "$@" -p /fluentd/plugins
fi
if [ $(id -u) -eq 0 ]; then
# We assume that /var/run/fluentd on the host is mounted into /fluentd/var/run
# and the ECS agent creates the directory with root:root ownership,
# so the ownership must be changed to create a unix domain socket.
chown -R fluent /fluentd/var/run
set -- su-exec fluent "$@"
fi
fi
exec "$@"
| true
|
1a9adf077f99e81dd63bc95211cf1915b9d8011a
|
Shell
|
mijime/dotfiles
|
/scripts/scalabin-installer
|
UTF-8
| 617
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -ue
bin_dir=${HOME}/bin
if [[ ! -f "${bin_dir}/coursier" ]]; then
curl -L -o "${bin_dir}/coursier" https://git.io/coursier
fi
chmod +x "${bin_dir}/coursier"
"${bin_dir}/coursier" bootstrap \
--java-opt -Xss4m \
--java-opt -Xms1G \
--java-opt -Dmetals.client=coc.nvim \
org.scalameta:metals_2.12:0.7.5 \
-r bintray:scalacenter/releases \
-r sonatype:snapshots \
-o "${bin_dir}/metals-vim" -f
"${bin_dir}/coursier" bootstrap \
org.scalameta:scalafmt-cli_2.12:2.1.0-RC1 \
-r sonatype:snapshots \
--standalone --main org.scalafmt.cli.Cli \
-o "${bin_dir}/scalafmt" -f
| true
|
bef5ca31606755c173180dfab576f9eeae0d67d8
|
Shell
|
LoicMarechal/GMlib
|
/utilities/gmlpart.sh
|
UTF-8
| 431
| 3.390625
| 3
|
[
"LicenseRef-scancode-philippe-de-muyter",
"MIT"
] |
permissive
|
#!/bin/sh
if [ $# -ne 2 ]
then
echo "gmlpart source.meshb nb_parts"
exit 1
fi
inputbase=`basename $1 .meshb`
metismesh=${inputbase}_metis.mesh
outname=${inputbase}$2"parts"
if [ ! -f "$metismesh" ]
then
echo "convert" $1 "to" $metismesh
meshb2metis -in $1 -out ${inputbase}
fi
mpmetis -gtype=nodal ${metismesh} $2
hilbert -in $1 -out $outname -gmlib -ndom $2 -npart ${metismesh}.npart.$2 -epart ${metismesh}.epart.$2 8
| true
|
b43cd6370dfb8c981132981542dd8869df2aee2a
|
Shell
|
Feynman27/HIMuonSkimming
|
/MuonSlimming/WmunuMonteCarlo/mergeFiles.sh
|
UTF-8
| 519
| 2.71875
| 3
|
[] |
no_license
|
#!binbash
if [ -e input.txt ]
then
rm input.txt
fi
ls -m -U user.tbalestr.NTUP_HI.mc11_2TeV.PythiaWmunu_1Lepton.DataOverlay.Slimmed.2013.03.19.v01.*/*root* --color=never > input.txt;tr -d '\n' <input.txt> input2.txt;mv input2.txt input.txt
echo "Merging Wmunu MC in Data..."
root -l -q -b hadd.C+
echo "Done.Cleaning..."
if [ -e HISingleMuonWmunuPYTHIADataOverlay.root ]
then
mv HISingleMuonWmunuPYTHIADataOverlay.root HISingleMuonWmunuPYTHIADataOverlay.`date +%m.%d.%Y`.root
fi
echo "Done running script."
| true
|
e2627f6a6e82b5dc0cd4963e7f7aab31cdf58244
|
Shell
|
12019/WISEAgent
|
/build/tools/PortingBuild/CheckoutAndComplieSource.sh
|
UTF-8
| 639
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
USERNAME=$1
PASSWD=$2
echo "================================================"
echo " Check Out SUSI Driver from SVN"
echo "================================================"
svn co https://172.20.2.44/svn/ess/SUSI/SUSI_4.0//SUSI/SourceCode \
--username ${USERNAME} --password ${PASSWD}
if [ "$?" -ne "0" ]; then
echo "Check out SUSI Driver Error!!!!"
exit 1
fi
echo "================================================"
echo " Build SUSI Driver SourceCode"
echo "================================================"
cd SourceCode/
make
if [ "$?" -ne "0" ]; then
echo "Build SUSI Driver Error!!!!"
exit 1
fi
| true
|
de1c8bbed95d9d7ce2fc100226c74636a1a6a31c
|
Shell
|
Axway/elasticsearch-docker-beat
|
/start.sh
|
UTF-8
| 273
| 3.1875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
if [[ "${1:0:1}" != '-' ]]; then
echo "Overriding command: $@"
exec $@
fi
PROGRAM=/etc/dbeat/dbeat
set -- $PROGRAM "$@"
cd /etc/dbeat
echo "Starting configuration file updater"
./updater || exit 1
cat /etc/beatconf/dbeat.yml
echo "Starting dbeat: $@"
exec $@
| true
|
16690d968fa3b438e959de85f624364bb63c0c81
|
Shell
|
exodious/bats-mock
|
/test/mock_set_status.bats
|
UTF-8
| 914
| 2.984375
| 3
|
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env bats
load mock_test_suite
@test 'mock_set_status requires mock to be specified' {
run mock_set_status
[[ "${status}" -eq 1 ]]
[[ "${output}" =~ 'Mock must be specified' ]]
}
@test 'mock_set_status requires status to be specified' {
run mock_set_status "${mock}"
[[ "${status}" -eq 1 ]]
[[ "${output}" =~ 'Status must be specified' ]]
}
@test 'mock_set_status 0 status is set by default' {
run "${mock}"
[[ "${status}" -eq 0 ]]
}
@test 'mock_set_status sets a status' {
mock_set_status "${mock}" 127
run "${mock}"
[[ "${status}" -eq 127 ]]
}
@test 'mock_set_status sets an n-th status' {
mock_set_status "${mock}" 127 2
mock_set_status "${mock}" 255 4
run "${mock}"
[[ "${status}" -eq 0 ]]
run "${mock}"
[[ "${status}" -eq 127 ]]
run "${mock}"
[[ "${status}" -eq 0 ]]
run "${mock}"
[[ "${status}" -eq 255 ]]
run "${mock}"
[[ "${status}" -eq 0 ]]
}
| true
|
88b479d9f046e0dc433fd43678c49e55d21436cd
|
Shell
|
oconnor663/dotfiles
|
/bin/kblinks
|
UTF-8
| 626
| 2.828125
| 3
|
[] |
no_license
|
#! /bin/bash
set -e -u -o pipefail
username="${1:-}"
if [ -z $username ] ; then
echo need a username
exit 1
fi
if [ -z "${local:-}" ] ; then
base="https://keybase.io"
else
base="http://localhost:3000"
fi
uid="$(curl --silent "$base/_/api/1.0/user/lookup.json?username=$username" \
| python -c "import json, sys; o=json.load(sys.stdin); print(o['them']['id'])")"
chromium "$base/$username" --new-window
chromium "$base/$username/chain"
chromium "$base/_/api/1.0/user/lookup.json?username=$username"
chromium "$base/_/api/1.0/sig/get.json?uid=$uid"
chromium "$base/_/api/1.0/merkle/path.json?username=$username"
| true
|
7ce215cfc6a017b093eb63725883697e9c82793a
|
Shell
|
ml6team/connexion
|
/release.sh
|
UTF-8
| 558
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
if [ $# -ne 1 ]; then
>&2 echo "usage: $0 <version>"
exit 1
fi
set -o errexit
set -o xtrace
python3 --version
git --version
version=$1
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i "" "s/__version__ = .*/__version__ = '${version}'/" */__init__.py
else
sed -i "s/__version__ = .*/__version__ = '${version}'/" */__init__.py
fi
tox -e py37-pypi,flake8 --skip-missing-interpreters
rm -fr dist/*
python3 setup.py sdist bdist_wheel
twine upload dist/*
# revert version
git checkout -- */__init__.py
git tag "${version}"
git push --tags
| true
|
427b99a10db6818bc4e7597d6f5daba191f17225
|
Shell
|
bereziat/heimdali-test
|
/inr2h5/tests.sh
|
UTF-8
| 2,753
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
case $# in
0)
cat <<EOF
Performs some tests on inr2h5/h52inr commands
Usage is: $0 (float|double|unsigned|signed|bits|packed|exponent)
if SILENT env variable is defined, print only the test result
EOF
exit
;;
esac
[ -z "$SILENT" ] && set -x
extg ../lena.inr >lena.inr -y 200
case $1 in
float)
echo "####### Float simple precision #######"
cco -r lena.inr | ./inr2h5 - lena.h5
[ -z "$SILENT" ] && h5dump lena.h5 | grep 'Voxel' -A10
./h52inr lena.h5 | so lena.inr | ical
;;
double)
echo "####### Float double precision #######"
cco -r lena.inr -o 8 | ./inr2h5 - lena.h5
[ -z "$SILENT" ] && h5dump lena.h5 | grep 'Voxel' -A10
# 'so' do not deal with double precision
./h52inr lena.h5 | cco -r | so lena.inr | ical
;;
unsigned)
for nbytes in 1 2 4; do
echo "####### Unsigned with " $nbytes " byte(s) #######"
cco -o $nbytes lena.inr | ./inr2h5 - lena.h5
[ -z "$SILENT" ] && h5dump lena.h5 | grep 'Voxel' -A10
./h52inr lena.h5 | so lena.inr | ical
done
;;
signed)
for nbytes in 1 2 4; do
echo "####### Signed with " $nbytes " byte(s) #######"
cco -s -o $nbytes lena.inr | ./inr2h5 - lena.h5
[ -z "$SILENT" ] && h5dump lena.h5 | grep 'Voxel' -A10
./h52inr lena.h5 | so lena.inr | ical
done
;;
bits)
for bits in 1 2 3 4 5 6 7 9 10 11 12 ; do
echo "####### Unsigned with " $bits " bit(s) #######"
cco -b $bits lena.inr > lena-bits.inr
./inr2h5 lena-bits.inr lena.h5
[ -z "$SILENT" ] && h5dump lena.h5 | grep 'Voxel' -A10
./h52inr lena.h5 | so lena-bits.inr | ical
done
;;
packed)
for bits in 1 2 3 4 5 6 7 9 10 11 12 ; do
echo "####### Packed with " $bits " bit(s) #######"
cco -p -b $bits lena.inr > lena-bits.inr
./inr2h5 lena-bits.inr lena.h5
[ -z "$SILENT" ] && h5dump lena.h5 | grep 'Voxel' -A10
./h52inr lena.h5 | so lena-bits.inr | ical
done
;;
exponent)
for exp in -1 0 1 ; do
echo "####### Unsigned with exponent " $exp " #######"
cco -e $exp lena.inr | ./inr2h5 - lena.h5
cco -e $exp lena.inr | par
cco -e $exp lena.inr | cpar -n | grep EXP
[ -z "$SILENT" ] && h5dump lena.h5 | grep exponent -A5
./h52inr lena.h5 | par
./h52inr lena.h5 | cpar -n | grep EXP
done
for exp in -1 0 1 ; do
echo "####### Signed with exponent " $exp " #######"
cco -s -e $exp lena.inr | ./inr2h5 - lena.h5
cco -s -e $exp lena.inr | par
cco -s -e $exp lena.inr | cpar -n | grep EXP
[ -z "$SILENT" ] && h5dump lena.h5 | grep exponent -A5
./h52inr lena.h5 | par
./h52inr lena.h5 | cpar -n | grep EXP
done
;;
clean)
rm -f lena-bits.inr lena.inr lean.h5
;;
*)
$0
esac
# signed bits (packed), scale, biais, history
| true
|
91dbad84320f6292dead0efbb52fa4ed4d9d7b38
|
Shell
|
joeblew99/postgres-migrator
|
/src/db/unschedule_pgcron_jobs.sh
|
UTF-8
| 631
| 3.53125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
unschedule_pgcron_jobs() {
# Set script variables
local PG_SERVER=$1
if [[ ! ${PG_SERVER} ]]; then
echo "Restore database server name is required."
exit 1
fi
local PG_DATABASE=$2
if [[ ! $PG_DATABASE ]]; then
echo "Backup database name is required."
exit 1
fi
local PG_USER=$3
if [[ ! $PG_USER ]]; then
PG_USER=postgres
fi
if [ $PG_DATABASE != 'pgcrontab' ]; then
exit 1
fi
echo "Create pg_cron"
### Init cron tab extension
sudo psql -v ON_ERROR_STOP=1 --username "${PG_USER}" -h ${PG_SERVER} -d ${PG_DATABASE} <<-EOSQL
SELECT cron.unschedule(jobid) FROM cron.job;
EOSQL
}
| true
|
695b033322bed6ff4aa4e000c8f4f27f65e81298
|
Shell
|
alisw/AliRoot
|
/ITSMFT/ITS/FT2/aliroot_fastMC
|
UTF-8
| 1,454
| 3.4375
| 3
|
[] |
permissive
|
#!/bin/bash
free
echo _____________________________________________
echo "HOME IS $HOME"
ls $HOME
length=`echo $HOME |wc -c`
if (( $length >= 100 )) ;
then
echo "WARNING: The home directory $HOME is longer than 100 char"
OLDHOME=$HOME
NEWHOME="/tmp/alien_home_dir.${ALIEN_PROC_ID}"
echo "CHANGING HOME TO $NEWHOME"
ln -s "$HOME" "$NEWHOME"
export HOME=$NEWHOME
fi
echo _____________________________________________
export PRODUCTION_METADATA="$ALIEN_JDL_LPMMETADATA"
if [ "$1" = "OCDB" ]; then
echo "Setting env for generating OCDB.root"
export OCDB_SNAPSHOT_CREATE="kTRUE"
export OCDB_SNAPSHOT_FILENAME="OCDB.root"
touch OCDB.generating.job
shift
fi
if [ -f simrun.sh ]; then
echo "Calling simrun.sh with $*"
chmod a+x simrun.sh
./simrun.sh $*
error=$?
if [ $error -ne 0 ]; then
echo "*! Command 'simrun.sh $*' exited with error code $error"
echo "Command 'simrun.sh $*' exited with error code $error" > validation_error.message
exit $error
fi
fi
if [ ! -f jstiller_AliESDs.root ]; then
echo "*! Could not find jstiller_AliESDs.root, the simulation/reconstruction chain failed!"
echo "Could not find jstiller_AliESDs.root, the simulation/reconstruction chain failed!" > validation_error.message
exit 2
fi
if [ ! -z "$NEWHOME" ]; then
echo "DELETING $NEWHOME"
export HOME=$OLDHOME
rm -rf $NEWHOME
fi
exit 0
| true
|
11f80800e17ceff3200a0301fbb0cf5815fba9c3
|
Shell
|
xprime480/projects
|
/tools/csvtools/test/ec_tester.sh
|
UTF-8
| 425
| 2.71875
| 3
|
[] |
no_license
|
#!/usr/bin/bash
EXE=../equ_columns.py
SAMPLEDIR=./sample
CONTROLDIR=./control
ROOT=${1}
VERS=${2}
KEY1=${3}
KEY2=${4}
INPUT_FILE=${ROOT}.csv
SAMPLE_FILE=${SAMPLEDIR}/${ROOT}_${VERS}.txt
CONTROL_FILE=${CONTROLDIR}/${ROOT}_${VERS}.txt
STD_OUT=${SAMPLE_FILE}
source ./do_the_test.sh
do_the_test \
${EXE} \
"${FLAGS} ${KEY1} ${KEY2} ${INPUT_FILE}" \
${SAMPLE_FILE} \
${CONTROL_FILE} \
${STD_OUT}
exit 0
| true
|
ad01af99e2e0eafc86f8304ff3219861e48e4cd8
|
Shell
|
tbernacchi/scripts-bash
|
/scripts/get-metricas-vmware.sh
|
UTF-8
| 960
| 3.125
| 3
|
[] |
no_license
|
#!/bin/sh
# Script que invoka (executa) PS no servidor monitinfra01 para coletar o status do VMWare suas VMs
# permitindo a identificacao e uma rapida recuperacao em caso de falha na virtualizacao como o
# shutdown do DC
# Autor: Ambrosia Ambrosiano
# mail: ambrosia.ambrosiano@tabajara.com.br / ambrosia@gmail.com
# Data: 04/08/2018
# Variaveis:
USER="svc-virt-prod"
PASS="kwf4384R?"
REMOTE="infrajobs01.tabajara.local"
SCRIPT="c:/scripts/rotinas/metricas/exec_gets-2-graylog.bat"
# LOCKFILE
LOCK="/tmp/report-vmware.lck"
### LOCK
fn_check_lock()
{
if [ -e $LOCK ]
then
echo "Arquivo de lock $LOCK encontrado, saindo..."
exit 0
else
echo $$ > $LOCK
fn_exec_reports
fi
}
fn_exec_reports()
{
###
# Executando os procedimentos de criacao e copia dos arquivos
sshpass -p $PASS ssh $USER@$REMOTE "$SCRIPT"
}
fn_gc()
{
rm -f $LOCK
}
fn_check_lock
fn_gc
| true
|
9d50a1ecd06d71c9460e5f5378eb96057d1dbfec
|
Shell
|
akemery/initiation_to_linux
|
/findfile/run
|
UTF-8
| 1,041
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
random_input=`getinput @random`
set -- $random_input
# Get the random value and convert it to index in files data set
rand1=`echo "$1" | grep -Eo '[+-]?[0-9]+([.][0-9]+)?'`
rand2=`echo "$2" | grep -Eo '[+-]?[0-9]+([.][0-9]+)?'`
index1=`bc <<< "scale=0; (($rand1)*100)%3" | awk '{printf("%d\n",$1)}' `
index2=`bc <<< "scale=0; (($rand2)*100)%3" | awk '{printf("%d\n",$1)}' `
files=("come.doc" "agoue.avi" "lokossa.doc")
outputs=("results.out" "Results.Out" "rEsults.out")
file=${files[$index1]}
output=${outputs[$index2]}
ssh_student --setup-script "python3 student/start.py"
# get the last line of file
solution1=`find /task/student/ -name $file`
solution2=`cd /task/student/ && find . -name $file`
solution3=`cd /task/ && find student -name $file`
student=`cat /task/student/$output`
if [ "$solution1" = "$student" ] || [ "$solution2" = "$student" ] || [ "$solution3" = "$student" ]; then
feedback-result success
else
feedback-result failed
feedback-msg -m "You have to use find $student $output $file $solution"
fi
| true
|
dd347bab4455cab52fceed26aac3b7dd701b681c
|
Shell
|
jandrovins/PRESTO-automatization
|
/apps/cfitsio.sh
|
UTF-8
| 659
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
export ROOT_DIR=$(pwd)
source /share/apps/intel/setvars.sh --config=${ROOT_DIR}/config.txt
export CC='icx' CFLAGS='-march=sandybridge -O3 -Ofast -fp-model fast' F77='ifort' FFLAGS='-f77rtl -fast'
# cfitsio compilation
mkdir -p /share/apps/cfitsio/3.49
cd /share/apps/cfitsio/3.49
wget http://heasarc.gsfc.nasa.gov/FTP/software/fitsio/c/cfitsio-3.49.tar.gz
tar -xf cfitsio-3.49.tar.gz
mv cfitsio-3.49 intel-2021
cd intel-2021
./configure --prefix=/share/apps/cfitsio/3.49/intel-2021
make -j 16 || exit 1
make install || exit 2
# module
mkdir /share/apps/modules/cfitsio
cp ${ROOT_DIR}/modules/cfitsio/3.49_intel-2021 /share/apps/modules/cfitsio
| true
|
9393041df442b74e0a1b121acebf1df2b835512b
|
Shell
|
sunliang711/init
|
/tools/lamp/ubuntu1404wordpress.sh
|
UTF-8
| 958
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
if (($EUID!=0));then
echo -e "Need $(tput setaf 1)root$(tput sgr0) priviledge!$(tput setaf 1)\u2717"
exit 1
fi
rm /tmp/latest.tar.gz >/dev/null 2>&1
rm -rf /var/www/html/wordpress >/dev/null 2>&1
wget https://wordpress.org/latest.tar.gz -O /tmp/latest.tar.gz
tar -C /var/www/html -xzvf /tmp/latest.tar.gz
chown -R www-data:www-data /var/www/html/wordpress
cp /etc/apache2/sites-available/000-default.conf /etc/apache2/sites-available/wordpress.conf
sed -i '/<VirtualHost/a ServerName eaagle.me' /etc/apache2/sites-available/wordpress.conf
sed -i '/<VirtualHost/a ServerAlias www.eaagle.me' /etc/apache2/sites-available/wordpress.conf
sed -i 's+.*DocumentRoot .*+ DocumentRoot /var/www/html/wordpress+' /etc/apache2/sites-available/wordpress.conf
a2ensite wordpress.conf
#reboot apache
service apache2 restart
echo "create database for wordpress manually"
echo "for example: mysql -u root -p"
echo " then : create database wordpress;"
| true
|
22d59974f1fac0a3c79a26d54063846c3ec654ce
|
Shell
|
yeasinopu17/LinuxShellCommand-ScriptBible
|
/Ch12Scripts/test20.sh
|
UTF-8
| 327
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
# testing the file dates
# -nt file is newer than
if [ test19.sh -nt test18.sh ]; then
echo "The test19 file is newer than test18"
else
echo "The test19 file is older than test18"
fi
# -ot file is older than
if [ test17.sh -ot test19.sh ]; then
echo "The test17 file is older than the test19file"
fi
| true
|
27b28683e3c633fd0b05d806a1afae5d3cbc0bcd
|
Shell
|
jayagami/T4SEfinder
|
/pred_all_model.sh
|
UTF-8
| 1,827
| 3.109375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# usage: ./pred_all_model.sh Acession_Number (e.g. NC_009494)
acc=$1
res=results/$acc
fasta=$res/sequence.fasta
pssmdir=$res/pssm_files
####################################################
mkdir -p $res
echo "Downloading protein sequences corresponding to Accession number: "$acc" ..."
efetch -db nuccore -id $acc -format fasta_cds_aa > $fasta
python scripts/filter.py $fasta
####################################################
echo "Running PSI-BLAST to generate PSSM profiles for "$fasta" ..."
python scripts/run_pssm.py -i $fasta -db blastdb/swissprot -e 10 -n 3 -o $pssmdir -threads 16
python scripts/find_difference.py $fasta $pssmdir
if [ -f $fasta.par ]; then
python scripts/run_pssm.py -i $fasta.par -db blastdb/swissprot -e 50 -n 3 -o $pssmdir -threads 16
fi
####################################################
echo "Predicting T4SEs in "$fasta" ..."
python main.py -in $fasta \
-weights weights/mlp/ \
-out $res/tapebert_mlp \
--vote_required \
tapebert_mlp \
python main.py -in $fasta \
-weights weights/svm/ \
-out $res/tapebert_svm \
--vote_required \
tapebert_svm \
python main.py -in $fasta \
-weights weights/cnn/ \
-out $res/pssm_cnn \
--vote_required \
pssm_cnn \
-pssm $pssmdir
python main.py -in $fasta \
-weights weights/bilstm/ \
-out $res/hybrid_bilstm \
--vote_required \
hybrid_bilstm \
-pssm $pssmdir
####################################################
echo "You can view the summarized results in "$res"/summary.txt"
python scripts/summary.py -i $res
####################################################
| true
|
c90fdbfa3226c819defb41a6ec9345b537da6526
|
Shell
|
ARHEIO/typescript-aws-lambda
|
/scripts/delete-lambda.sh
|
UTF-8
| 781
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
lambdaName=${PWD##*/}
echo "Deleting lambda $lambdaName"
aws lambda delete-function --function-name $lambdaName
echo "Finished deleting lambda $lambdaName"
attachedPolicies=$( aws iam list-attached-role-policies --role-name $lambdaName )
echo "The following policies are attached" $( echo $attachedPolicies | jq -c '.AttachedPolicies' )
for row in $( echo $attachedPolicies | jq -c '.AttachedPolicies[]' ) ; do
policy=$( echo $row | jq -r '.PolicyArn' )
echo "Deleting policy" $policy "from role" $lambdaName
aws iam detach-role-policy --role-name $lambdaName --policy-arn $policy
done
echo "Finished detaching policies from role $lambdaName"
echo "Deleting role $lambdaName"
aws iam delete-role --role-name $lambdaName
echo "Finished role $lambdaName"
| true
|
69c1c0c68828ab56294676aebff721d1db13173f
|
Shell
|
wicadmin/Goldenorb
|
/luci-app-hotspot/files/usr/lib/hotspot/inrange.sh
|
UTF-8
| 1,660
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/sh
. "/lib/functions.sh"
log() {
logger -t "In Range" "$@"
}
rm -f /tmp/hot1
cnt=0
trm_ifstatus="false"
while [ ${cnt} -lt 20 ]
do
trm_ifstatus="$(ubus -S call network.wireless status | jsonfilter -l1 -e '@.*.up')"
if [ "${trm_ifstatus}" = "true" ]
then
break
fi
cnt=$((cnt+1))
sleep 1
done
RADIO=$(uci get wireless.wwan.device)
if [ $RADIO = "radio0" ]; then
ap_list="$(ubus -S call network.wireless status | jsonfilter -e '@.radio0.interfaces[@.config.mode="ap"].ifname')"
else
if [ $RADIO = "radio1" ]; then
ap_list="$(ubus -S call network.wireless status | jsonfilter -e '@.radio1.interfaces[@.config.mode="ap"].ifname')"
fi
fi
trm_scanner="$(which iw)"
for ap in ${ap_list}
do
ssid_list="$(${trm_scanner} dev "${ap}" scan 2>/dev/null > /tmp/scan
cat /tmp/scan | awk '/SSID: /{if(!seen[$0]++){printf "\"";for(i=2; i<=NF; i++)if(i==2)printf $i;else printf " "$i;printf "\" "}}')"
if [ -n "${ssid_list}" ]
then
if [ -f "/etc/hotspot" ]; then
while IFS='|' read -r ssid encrypt key
do
ssidq="\"$ssid\""
if [ -n "$(printf "${ssid_list}" | grep -Fo "${ssidq}")" ]
then
echo $ssid"|"$encrypt"|"$key"|1" >> /tmp/hot1
else
echo $ssid"|"$encrypt"|"$key"|0" >> /tmp/hot1
fi
done <"/etc/hotspot"
fi
else
if [ -f "/etc/hotspot" ]; then
while IFS='|' read -r ssid encrypt key
do
echo $ssid"|"$encrypt"|"$key"|0" >> /tmp/hot1
done <"/etc/hotspot"
fi
fi
done
if [ -f "/tmp/hot1" ]; then
mv -f /tmp/hot1 /tmp/hot
fi
| true
|
b3c806644a72a3b9ceb09a4315e9705cd2830280
|
Shell
|
apache/hudi
|
/docker/hoodie/hadoop/hive_base/entrypoint.sh
|
UTF-8
| 4,975
| 3.046875
| 3
|
[
"CC0-1.0",
"MIT",
"JSON",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-unknown"
] |
permissive
|
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set some sensible defaults
export CORE_CONF_fs_defaultFS=${CORE_CONF_fs_defaultFS:-hdfs://`hostname -f`:8020}
function addProperty() {
local path=$1
local name=$2
local value=$3
local entry="<property><name>$name</name><value>${value}</value></property>"
local escapedEntry=$(echo $entry | sed 's/\//\\\//g')
sed -i "/<\/configuration>/ s/.*/${escapedEntry}\n&/" $path
}
function configure() {
local path=$1
local module=$2
local envPrefix=$3
local var
local value
echo "Configuring $module"
for c in `printenv | perl -sne 'print "$1 " if m/^${envPrefix}_(.+?)=.*/' -- -envPrefix=$envPrefix`; do
name=`echo ${c} | perl -pe 's/___/-/g; s/__/_/g; s/_/./g'`
var="${envPrefix}_${c}"
value=${!var}
echo " - Setting $name=$value"
addProperty $path $name "$value"
done
}
configure /etc/hadoop/core-site.xml core CORE_CONF
configure /etc/hadoop/hdfs-site.xml hdfs HDFS_CONF
configure /etc/hadoop/yarn-site.xml yarn YARN_CONF
configure /etc/hadoop/httpfs-site.xml httpfs HTTPFS_CONF
configure /etc/hadoop/kms-site.xml kms KMS_CONF
configure /etc/hadoop/mapred-site.xml mapred MAPRED_CONF
configure /etc/hadoop/hive-site.xml hive HIVE_SITE_CONF
if [ "$MULTIHOMED_NETWORK" = "1" ]; then
echo "Configuring for multihomed network"
# HDFS
addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.rpc-bind-host 0.0.0.0
addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.servicerpc-bind-host 0.0.0.0
addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.http-bind-host 0.0.0.0
addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.https-bind-host 0.0.0.0
addProperty /etc/hadoop/hdfs-site.xml dfs.client.use.datanode.hostname true
addProperty /etc/hadoop/hdfs-site.xml dfs.datanode.use.datanode.hostname true
# YARN
addProperty /etc/hadoop/yarn-site.xml yarn.resourcemanager.bind-host 0.0.0.0
addProperty /etc/hadoop/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
addProperty /etc/hadoop/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
addProperty /etc/hadoop/yarn-site.xml yarn.timeline-service.bind-host 0.0.0.0
# MAPRED
addProperty /etc/hadoop/mapred-site.xml yarn.nodemanager.bind-host 0.0.0.0
fi
if [ -n "$GANGLIA_HOST" ]; then
mv /etc/hadoop/hadoop-metrics.properties /etc/hadoop/hadoop-metrics.properties.orig
mv /etc/hadoop/hadoop-metrics2.properties /etc/hadoop/hadoop-metrics2.properties.orig
for module in mapred jvm rpc ugi; do
echo "$module.class=org.apache.hadoop.metrics.ganglia.GangliaContext31"
echo "$module.period=10"
echo "$module.servers=$GANGLIA_HOST:8649"
done > /etc/hadoop/hadoop-metrics.properties
for module in namenode datanode resourcemanager nodemanager mrappmaster jobhistoryserver; do
echo "$module.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31"
echo "$module.sink.ganglia.period=10"
echo "$module.sink.ganglia.supportsparse=true"
echo "$module.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both"
echo "$module.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40"
echo "$module.sink.ganglia.servers=$GANGLIA_HOST:8649"
done > /etc/hadoop/hadoop-metrics2.properties
fi
function wait_for_it()
{
local serviceport=$1
local service=${serviceport%%:*}
local port=${serviceport#*:}
local retry_seconds=5
local max_try=100
let i=1
nc -z $service $port
result=$?
until [ $result -eq 0 ]; do
echo "[$i/$max_try] check for ${service}:${port}..."
echo "[$i/$max_try] ${service}:${port} is not available yet"
if (( $i == $max_try )); then
echo "[$i/$max_try] ${service}:${port} is still not available; giving up after ${max_try} tries. :/"
exit 1
fi
echo "[$i/$max_try] try in ${retry_seconds}s once again ..."
let "i++"
sleep $retry_seconds
nc -z $service $port
result=$?
done
echo "[$i/$max_try] $service:${port} is available."
}
for i in ${SERVICE_PRECONDITION[@]}
do
wait_for_it ${i}
done
exec $@
| true
|
657a79417cdb31c82d0358a09749de92f12ff7a8
|
Shell
|
metwork-framework/mfext
|
/layers/_checksum_helper.sh
|
UTF-8
| 1,037
| 3.953125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
ARCHIVE_FILE=$1
CHECKSUM_TYPE=$2
CHECKSUM_VALUE=$3
if ! test -f ${ARCHIVE_FILE}; then
exit 1
fi
if test "${CHECKSUM_TYPE}" = "NONE"; then
if test -s ${ARCHIVE_FILE}; then
echo "No checksum to compute but size > 0 => success"
exit 0
else
echo "Empty file"
exit 1
fi
else
echo "Computing checksum for ${ARCHIVE_FILE}..."
if test "$CHECKSUM_TYPE" = "MD5"; then
CHECKSUM=`md5sum ${ARCHIVE_FILE} 2>/dev/null |awk '{print $1;}'`
elif test "${CHECKSUM_TYPE}" = "SHA256"; then
CHECKSUM=`sha256sum ${ARCHIVE_FILE} 2>/dev/null |awk '{print $1;}'`
elif test "${CHECKSUM_TYPE}" = "SHA1"; then
CHECKSUM=`sha1sum ${ARCHIVE_FILE} 2>/dev/null |awk '{print $1;}'`
else
echo "ERROR: unknown checksum type: ${CHECKSUM_TYPE}"
exit 1
fi
if test "${CHECKSUM_VALUE}" = "${CHECKSUM}"; then
echo "Good checksum"
exit 0
else
echo "Bad checksum ${CHECKSUM_VALUE} != ${CHECKSUM}"
exit 2
fi
fi
| true
|
2217cca801fb8318ae6609b8ec04f9f5802985d5
|
Shell
|
dijkman/MinifyAllCli
|
/bin/versionUpdater.sh
|
UTF-8
| 1,903
| 3.9375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#
# SCRIPT: versionUpdater.sh
# USAGE: bash versionUpdater.sh | ./versionUpdater.sh
# PURPOSE: Shell script that finds the actual version, and replaces the old version from the comments of the
# help argument to match the actual one.
# TITLE: versionUpdater.sh
# AUTHOR: Jose Gracia
# VERSION: 1.1.3
# NOTES: This script is automatically called by npm when pre-publishing. There is no need to manually run it.
# When publishing, it should be done from the root directory of the repository.
# BASH_VERSION: 5.0.16(1)-release
# LICENSE: see in ../LICENSE (project root) or https://github.com/Josee9988/MinifyAllCli/blob/master/LICENSE
# GITHUB: https://github.com/Josee9988/
# REPOSITORY: https://github.com/Josee9988/MinifyAllCli
# ISSUES: https://github.com/Josee9988/MinifyAllCli/issues
# MAIL: jgracia9988@gmail.com
#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#
# the actual version that needs to be updated with the new package version
VERSION_TO_UPDATE=$(cat src/cli/informationCLI.ts \
| grep -o -m 1 -h "[0-9]*\.[0-9]*\.[0-9]*" \
| head -1)
# obtains the package version from the package.json file.
PACKAGE_VERSION=$(cat package.json \
| grep version \
| head -1 \
| awk -F: '{ print $2 }' \
| sed 's/[",\t ]//g')
# replaces the outdated version to the new version from the
# informationCLI.ts, informationalArguments.test.ts and README.md file.
sed -i -e "s/$VERSION_TO_UPDATE/$PACKAGE_VERSION/g" src/cli/informationCLI.ts
sed -i -e "s/$VERSION_TO_UPDATE/$PACKAGE_VERSION/g" README.md
sed -i -e "s/$VERSION_TO_UPDATE/$PACKAGE_VERSION/g" tests/informationalArguments.test.ts
| true
|
27fc5519ed360d3c89f5d9cb798b8dc9047a6e50
|
Shell
|
imekuto/node_dev
|
/provision-script/install_expect.sh
|
UTF-8
| 285
| 2.515625
| 3
|
[] |
no_license
|
COMMAND='export DEBIAN_FRONTEND=noninteractive'
echo $COMMAND && eval ${COMMAND}
COMMAND='sudo -E apt update'
echo $COMMAND && eval ${COMMAND}
COMMAND='sudo -E apt upgrade -y'
echo $COMMAND && eval ${COMMAND}
COMMAND='sudo -E apt install expect -y'
echo $COMMAND && eval ${COMMAND}
| true
|
3e7a3a5265db870f382cb61a5c841b3e03bea081
|
Shell
|
Molmed/multiseq-ds
|
/scWGBS/scripts/get_cpg_islands.sh
|
UTF-8
| 322
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
#download and format cpg islands
wget -qO- http://hgdownload.cse.ucsc.edu/goldenpath/hg38/database/cpgIslandExt.txt.gz | gunzip -c > hg38.cpg.txt
awk '{print $2 " " $3 " " $4}' hg38.cpg.txt > hg38.cpg.positions.txt
sed -i 's/chr//g' hg38.cpg.positions.txt
sed -i '1i chr start end' hg38.cpg.positions.txt
| true
|
886962e6055ac5dd8ef0606f6a56b5ee90027010
|
Shell
|
BillUtada/leetcode-python-solution
|
/localrun.sh
|
UTF-8
| 2,185
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
SUCCESS_CASE=0
FAILED_CASE=0
# check case, change filename to filename---OK if success
function check_case_result() {
result=$(awk 'END {print}' $1)
if [ ${result} == "OK" ]; then
let SUCCESS_CASE+=1;
mv $1 $1"--------OK"
else
let FAILED_CASE+=1;
mv $1 $1"--------FAIL"
fi
}
# config
PROJECT_PATH="/home/william/workspace/entitlement-ci/"
VIRTWHO_PATH=${PROJECT_PATH}"testcases/virt_who/"
TESTCASE_PATH=${VIRTWHO_PATH}"all/"
home=/home/william
RESULT_PATH=${home}"/testing-result-`date +%Y%m%d`"
TESTCASE_LIST=${RESULT_PATH}"/testcase_list"
mkdir ${RESULT_PATH}
# export env
export PYTHONPATH=${PROJECT_PATH}
export HYPERVISOR_TYPE=$1
if [ "${HYPERVISOR_TYPE}" == "" ]; then
export HYPERVISOR_TYPE=xen
fi
export RHEL_COMPOSE=RHEL-7.5
export REMOTE_IP=10.8.246.17
export SERVER_TYPE=STAGE
export VIRTWHO_SRC=default
# setup virt-who env
case ${HYPERVISOR_TYPE} in
"xen")
env_setup_file=${VIRTWHO_PATH}"virtwho_xen_setup.py"
;;
"esx")
env_setup_file=${VIRTWHO_PATH}"virtwho_esx_setup.py"
;;
"hyperv")
env_setup_file=${VIRTWHO_PATH}"virtwho_hyperv_setup.py"
;;
"rhevm")
env_setup_file=${VIRTWHO_PATH}"virtwho_rhevm_setup.py"
;;
"vdsm")
env_setup_file=${VIRTWHO_PATH}"virtwho_vdsm_setup.py"
;;
"kvm")
env_setup_file=${VIRTWHO_PATH}"virtwho_kvm_setup.py"
;;
esac
setup_res=$RESULT_PATH"/VIRT_WHO_ENV_SETUP"
touch ${setup_res}
python ${env_setup_file} &> ${setup_res}
check_case_result ${setup_res}
# run testcase
ls -l ${TESTCASE_PATH}| grep tc_ID.*.py$ | sed 's/.*tc_ID/tc_ID/g' &> ${TESTCASE_LIST}
for line in $(cat ${TESTCASE_LIST})
do
filename=$(echo ${line} | sed 's/^tc_ID/ID/g' | sed 's/.py$//g')
touch ${RESULT_PATH}"/"${filename}
nosetests ${TESTCASE_PATH}"/"${line} &> ${RESULT_PATH}"/"${filename}
check_case_result ${RESULT_PATH}"/"${filename}
done
echo "================================RUNNING RESULT================================"
echo "==============================${SUCCESS_CASE}=CASE=SUCCESSED==============================="
echo "==============================${FAILED_CASE}=CASE=FAILED=================================="
| true
|
563b97d9e9f64d3cdfff8b5535645cf1344c709e
|
Shell
|
GrayHat12/LoanSpringAssignment
|
/buildAndRun.sh
|
UTF-8
| 380
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
ENV=$1
VERSION=$2
if [[ ${VERSION} = "" ]]; then
VERSION="0.0.1"
fi
if [[ ${ENV} = "" ]]; then
echo "Building the project pointing to local environment"
./gradlew clean build
else
echo "Building the project pointing to ${ENV} environment"
./gradlew clean build
fi
echo `pwd`
echo "Starting the service"
java -jar build/libs/main-${VERSION}.jar
| true
|
e710ffdadbd9101c0d2b6859dbe4b196322510cc
|
Shell
|
DJaySathe/CloudBurst
|
/script2.sh
|
UTF-8
| 274
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
####
# This script automatically creates user accounts with random passwords.
#
# Author: Russ Sanderlin
# Date: 01/21/15
#
###
# Declare local variables, generate random password.
hostip=$1
username=$2
flag=$3
if [ "$3" = "1" ]
then echo "random pw"
else
echo "1"
fi
| true
|
f11463d9d3df8989cd924d8056b20616c0c7b617
|
Shell
|
lassetraberg/SensumUdred
|
/run.sh
|
UTF-8
| 256
| 3.109375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
dir=${PWD##*/}
if [[ $dir != "SensumUdred" ]]; then
echo "ERROR: Must be in root of project, SensumUdred directory"
exit 1
fi
echo "Compiler..."
./scripts/unix/compile.sh $@
echo "Starter program..."
./scripts/unix/launch.sh
| true
|
efbe758c00ecadd014e80aeee11d2b9be58fd4bb
|
Shell
|
jonathanmak/embla
|
/postprocess/report.sh
|
UTF-8
| 906
| 3.34375
| 3
|
[] |
no_license
|
#! /bin/sh
tracebase=$1
dirname=$2 # fname=$2
# fromline=$3
# toline=$4
basefname=`basename $fname`
rundir=`pwd`
cd $dirname
tracename=${rundir}/${tracebase}.trace
repname=${rundir}/${tracebase}.tex
tmpname=${rundir}/${tracebase}_tmp
ptrname=${tracename}_${basefname}_${fromline}-${toline}
pfname=${basefname}_${fromline}-${toline}
echo '\documentclass{article} \usepackage{color}' > $repname
echo '\usepackage{epic} \usepackage{verbdef}' >> $repname
echo '\begin{document}' >> $repname
for fname in `awk -f findfile.awk $tracename`; do
awk -f ${rundir}/findfun.awk $fname > $tmpname.funs
awk "{print $0 > ${tmpname}_
awk -f focus.awk -v theFile=$basefname -v fromLine=$fromline\
-v toLine=$toline $tracename > $ptrname
awk "FNR>=$fromline && FNR<=$toline" $fname > $pfname
awk -f depgraph.awk $pfname $ptrname >> $repname
done
echo '\end{document}' >> $repname
| true
|
2cbd491a5b5e593e0b22c62149b34c7bbc8b2afb
|
Shell
|
salamp/SlackBot
|
/auto.sh
|
UTF-8
| 6,769
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
api='https://slack.com/api'
jq='./jq'
statusfile='status.conf'
configfile='config.conf'
if [ -e "$configfile" ]; then
. "$configfile"
else
cat - > "$configfile" <<-CONFIGFILE
# Bot token
token='xoxb-xxx-xxx'
# Team full access token
ttoken='xoxp-xxx-xxx-xxx-xxx'
# Webhook
hook='https://hooks.slack.com/services/Txxx/Bxxx/xxx'
# Channel
channel='general'
# Channel ID
#chid='xxx'
# Is it a IM channel?
#imchannel=1
# Name of the bot
botname='I'm a bot'
# User name of the bot (all lower case)
botuname='the_bot'
# Admin premission to shell execution
admin='admin_account'
# Refresh interval
refresh=1
CONFIGFILE
echo "No configuration file found."
exit 1
fi
# Access API
# Parameter: METHOD TOKEN ARGUMENTS
get()
{
#echo "curl -s \"$api/$1?token=$2&$3\"" >&2
curl -s "$api/$1?token=$2&$3"
}
# Send message
# Parameter: CHANNEL TEXT ARGUMENTS
post()
{
#echo '{"text": "'"$2"'", "channel": "'"$1"'", "username": "zhiyb-AUTO" '"$3"'}' | $jq >&2
curl -s "$hook" --data 'payload={"icon_url": "'"$iconurl"'", "text": "'"$2"'", "channel": "'"$1"'", "username": "'"$botname"'" '"$3"'}'
}
# Convert special characters to HTML characters
# Parameter: STRING
text2html()
{
sed ' s/\&/\&/g
s/</\</g
s/>/\>/g
s/%/%25/g
s/\\/%5C%5C/g
s/\t/\\t/g
s/"/\\"/g
s/&/%26/g
s/+/%2B/g
s/</%26lt;/g
s/>/%26gt;/g
s/%26lt;%26lt;%26lt;%26lt;/</g
s/%26gt;%26gt;%26gt;%26gt;/>/g'
}
# Convert HTML characters to plain text
# Parameter: STRING
html2text()
{
sed ' s/&/\&/g
s/</\</g
s/>/\>/g
s/<.*|\(.*\)>/\1/
s/<\(.\+\)>/\1/'
}
# Update user list
updateUsers()
{
# Load user list
echo -n "Updating user list..."
users="$(get users.list $token)"
if [ "$(echo "$users" | $jq -r ".ok")" != "true" ]; then
echo "Update user list failed:"
echo "$users"
return 1
fi
echo " Done."
}
# List users
listUsers()
{
updateUsers
count="$(echo "$users" | $jq -r ".members | length")"
# Iteration through messages
for ((i = 0; i != count; i++)); do
user="$(echo "$users" | $jq -r ".members[$i]")"
name="$(echo "$user" | $jq -r ".name")"
rname="$(echo "$user" | $jq -r ".real_name")"
userid="$(echo "$user" | $jq -r ".id")"
email="$(echo "$user" | $jq -r ".profile.email")"
if [ "$email" != "null" ]; then
echo "$name($userid|<<<<mailto:$email|$email>>>>): $rname"
else
echo "$name($userid): $rname"
fi
[ "$name" == "$botuname" ] && iconurl="$(echo "$user" | $jq -r ".profile.image_original")" #&& echo "Icon get: $iconurl";
done
echo "Total: $count users."
}
# Reply to user
# Parameter: USERID USERNAME REPLY
reply()
{
userid="$1"
name="$2"
if [ -n "$4" ]; then
reply="$3"
else
if (($(echo "$3" | wc -l) > 1)); then
reply="$(echo "<@$userid>:"; echo "\`\`\`$3\`\`\`" | text2html)"
else
reply="$(echo -n "<@$userid>: "; echo "$3" | text2html)"
fi
fi
echo -n "Reply to $name($userid): $reply"
post $chid "$reply" > /dev/null
}
# Get bot API
getapi()
{
method="$1"
shift
get "$method" $token $@
}
# Get team API
getapit()
{
method="$1"
shift
get "$method" $ttoken $@
}
# Handle specific messages
handler()
{
userid="$1"
name="$2"
text="$(echo "$3" | sed -e 's/^[ \t\s]*//;s/[ \t]*$//')"
if [ "$4" == "zh_CN" ]; then
export TZ=Asia/Shanghai
export LC_ALL=zh_CN.utf8
fi
case "${text%%\ *}" in
date ) date '+%F %A'; return;;
time ) date '+%H:%M:%S'; return;;
datetime ) date '+%F %A %H:%M:%S'; return;;
users ) listUsers; return;;
api ) getapi ${text##*\ } | $jq -r "."; return;;
apit ) getapit ${text##*\ } | $jq -r "."; return;;
esac
if [ "$4" == "zh_CN" ]; then
case "$text" in
"" ) echo -n "你好, $name! 你的ID是 ${userid}. "; date '+今天是 %x %A, 第%V周, 这一年的第%j天, epoch开始的第%s秒, 时区: %Z(%:z), 现在是%X.';;
* ) echo "你好, $name! 你的ID是 ${userid}.";;
esac
else
case "$text" in
"" ) echo -n "Hello, $name! Your ID is ${userid}. "; date '+Today is %A, %x, week %V, day %j of the year, %s seconds since epoch, time zone %Z(%:z), the time now is %X.';;
* ) echo "Hello, $name! Your user ID is ${userid}.";;
esac
fi
}
# Handle general text messages
genericMessageHandler()
{
userid="$1"
text="$(echo "$2" | html2text)"
msg="$3"
if [ "$userid" == USLACKBOT ]; then
namestr="slackbot"
else
user="$(echo "$users" | $jq -r ".members[] | select(.id == \"$userid\")")"
name="$(echo "$user" | $jq -r ".name")"
rname="$(echo "$user" | $jq -r ".real_name")"
if [ "$rname" == null ]; then
namestr="$name"
else
namestr="$rname($name)"
fi
fi
echo "$namestr: $text"
case "${text:0:1}" in
'!' ) reply "$userid" "$name" "$(handler "$userid" "$name" "${text:1}")";;
'!' ) reply "$userid" "$name" "$(handler "$userid" "$name" "${text:1}" zh_CN)";;
'$' ) reply "$userid" "$name" "$(eval "${text:1}" 2>&1)"; return
if [ "$name" == "$admin" ]; then
reply "$userid" "$name" "$(eval "${text:1}" 2>&1)"
else
reply "$userid" "$name" "Insufficient permission."
fi;;
'|' ) reply "$userid" "$name" "${text:1}" no;;
esac
}
# Handle individual messages
messageHandler()
{
user="$(echo "$*" | $jq -r ".user")"
[ "$user" == null ] && user="$(echo "$*" | $jq -r ".username")"
text="$(echo "$*" | $jq -r ".text")"
type="$(echo "$*" | $jq -r ".subtype")"
case "$type" in
channel_join ) userupdate=1;;
null ) genericMessageHandler "$user" "$text" "$*";;
bot_message ) :;;
* ) echo "$*" | $jq;;
esac
}
# Update status file
updateStatus()
{
echo "oldest=$oldest" > $statusfile
}
#if [ "$(get rtm.start $token | $jq -r '.ok')" != true ]; then
# echo "rtm.start failed"
# exit 1
#fi
[ -e $statusfile ] && . $statusfile
oldest="${oldest:-0}"
# Find channel ID
if [ -z "$chid" ]; then
chid="$(get channels.list $token | $jq -r ".channels[] | {name, id} | select(.name == \"$channel\") | .id")"
if [ -z "$chid" ]; then
echo "Cannot find channel #$channel"
exit 1
fi
echo "Channel $channel ID found: $chid"
else
echo "Using specified channel ID: $chid"
fi
userupdate=1
while :; do
# Update user list
((userupdate)) && userupdate=0 && listUsers
# Load new messages
if ((imchannel)); then
msgs="$(get im.history $ttoken "channel=$chid&oldest=$oldest")"
else
msgs="$(get channels.history $ttoken "channel=$chid&oldest=$oldest")"
fi
if [ "$(echo "$msgs" | $jq -r ".ok")" != "true" ]; then
echo "Load messages failed:"
echo "$msgs"
exit 1
fi
count="$(echo "$msgs" | $jq -r ".messages | length")"
((count == 0)) && sleep $refresh && continue
# Update oldest with the newest message
oldest="$(echo "$msgs" | $jq -r ".messages[0].ts")"
# Update status file
updateStatus
# Iteration through messages
echo "Found $count message(s)."
for ((i = count; i != 0; i--)); do
messageHandler "$(echo "$msgs" | $jq -r ".messages[$((i - 1))]")"
done
done
| true
|
c6d1c176f2f20cc146fb131b3a91c64f8e9b4735
|
Shell
|
shmilee/vivo-Y13L
|
/rom-base/update-patch-script/merge_patch.sh
|
UTF-8
| 6,305
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
shopt -s extglob
in_array() {
local s
for s in ${@:2}; do
if [[ $s == $1 ]];then
return 0
fi
done
return 1
}
mesge() {
local arrow=$1 msg1="$2" msg2="$3"; shift; shift; shift
if [[ $arrow == '1' ]]; then
printf "==>\e[1m\e[32m ${msg1} \e[0m${msg2}" "$@" >&2
else
printf " ->\e[1m\e[32m ${msg1} \e[0m${msg2}" "$@" >&2
fi
}
error() {
local arrow=$1 msg1="$2" msg2="$3"; shift; shift; shift
if [[ $arrow == '1' ]]; then
printf "==>\e[1m\e[31m ${msg1} \e[0m${msg2}" "$@" >&2
else
printf " ->\e[1m\e[31m ${msg1} \e[0m${msg2}" "$@" >&2
fi
}
full_zip_dir=${1:-./PD1304CL_A_1.19.1-update-full}
patch_zip_dir=${2:-./PD1304CL-PD1304CLMA-update-patch_1.19.3_for_1.19.1_201607121833576429}
#9446ed7ee0d21048389e68f650e01630 PD1304CL_A_1.19.1-update-full.zip
#188b12b2f1ee62424252c9970e9efacd PD1304CL-PD1304CLMA-update-patch_1.19.3_for_1.19.1_201607121833576429.zip
new_zip_dir=${3:-./new-update-full-zip-dir}
patch_dir="$patch_zip_dir/patch"
updater_script="$patch_zip_dir/META-INF/com/google/android/updater-script"
patch_cmd='./applypatch'
mesge 1 '' '1. Check update-full-zip and update-patch-zip ...\n'
lost_f=()
>lost.list
for f in $(find $patch_dir -type f); do
_tfile=$(echo $f|sed -e "s|^$patch_dir||" -e 's/\.p$//')
if [ -f "${full_zip_dir}"/$_tfile ]; then
mesge 2 ' ok ' '%s\n' $f
else
error 2 'lost' '%s\n' $f
echo $f >>lost.list
lost_f+=("$_tfile")
fi
done
>change.list
boot_line="$(grep 'apply_patch_check.*EMMC' $updater_script | awk -F\: '{print $4}')::/boot.img"
for line in $(grep apply_patch_check $updater_script | grep -v EMMC | awk -F\" '{print $6"::"$2}') $boot_line; do
sha1_x=${line/%::*/}
file=${line/#*::/}
if in_array $file ${lost_f[@]}; then
continue
fi
sha1_f=$(sha1sum "${full_zip_dir}"/$file | cut -d' ' -f1)
if [ x$sha1_x == x$sha1_f ]; then
mesge 2 ' ok ' '%s %s\n' $sha1_x $file
else
error 2 ' ~= ' '%s %s (%s)\n' $sha1_x $file $sha1_f
echo $file >>change.list
fi
done
if [ x$(stat --format=%s lost.list) == x0 -a x$(stat --format=%s change.list) == x0 ]; then
mesge 1 ' update-full-zip and update-patch-zip match well' '\n'
rm lost.list change.list
else
error 1 ' unmatched update-full-zip and update-patch-zip' '\n'
error 1 ' Check info in file lost.list and change.list' '\n'
exit 1
fi
mesge 1 '' '2. apply update-patch-zip/patch to update-full-zip ...\n'
if [ ! -d "$new_zip_dir" ]; then
mesge 2 '' 'prepare a copy of update-full-zip ... '
cp -r "$full_zip_dir" "$new_zip_dir"
printf 'Done.\n'
else
mesge 2 '' 'use existing %s \n' "$new_zip_dir"
fi
cp $updater_script updater-script.left2do
>patch-failed.list
for file in $(find $patch_dir -type f); do
_tfile=$(echo $file|sed -e "s|^$patch_dir||" -e 's/\.p$//')
mesge 2 'apply' '%s ... ' $file
if [ -f "${new_zip_dir}"/$_tfile ]; then
if [ $_tfile == '/boot.img' ]; then
info=$(sed -n "/^assert(apply_patch.*EMMC.*by-name\/boot/{N;N;N; p}" updater-script.left2do)
tgt_sha1=$(echo $info | sed 's/,//g'|awk '{printf $3}')
tgt_size=$(echo $info | sed 's/,//g'|awk '{printf $4}')
src_sha1=$(echo $info | sed 's/,//g'|awk '{printf $5}')
if $patch_cmd "${new_zip_dir}"/$_tfile - $tgt_sha1 $tgt_size ${src_sha1}:$file; then
mesge 2 'Done.' '\n'
sed -i "/^apply_patch_check.*EMMC.*by-name\/boot.*abort/{N; d}" updater-script.left2do
sed -i "/^assert(apply_patch.*EMMC.*by-name\/boot/{N;N;N; d}" updater-script.left2do
else
echo $file >>patch-failed.list
error 2 'Failed.' '\n'
fi
else
info=$(sed -n "/^assert(apply_patch.*$(echo $_tfile|sed 's/\//\\\//g')/{N;N;N; p}" updater-script.left2do)
tgt_sha1=$(echo $info | sed 's/,//g'|awk '{printf $3}')
tgt_size=$(echo $info | sed 's/,//g'|awk '{printf $4}')
src_sha1=$(echo $info | sed 's/,//g'|awk '{printf $5}')
if $patch_cmd "${new_zip_dir}"/$_tfile - $tgt_sha1 $tgt_size ${src_sha1}:$file; then
mesge 2 'Done.' '\n'
sed -i "/^apply_patch_check.*$(echo $_tfile|sed 's/\//\\\//g').*abort/{N; d}" updater-script.left2do
sed -i "/^assert(apply_patch.*$(echo $_tfile|sed 's/\//\\\//g')/{N;N;N; d}" updater-script.left2do
else
echo $file >>patch-failed.list
error 2 'Failed.' '\n'
fi
fi
else
echo $file >>patch-failed.list
error 1 ' Never happen.' '\n'
exit 1
fi
done
if [ x$(stat --format=%s patch-failed.list) == x0 ]; then
mesge 1 ' All patches are done.' '\n'
rm patch-failed.list
else
error 1 ' Some patches are failed.' '\n'
error 1 ' Check info in file patch-failed.list' '\n'
fi
mesge 1 '' '3. copy update-patch-zip/{recovery,system} to update-full-zip ...\n'
>copy-failed.list
for file in $(find $patch_zip_dir/{recovery,system} -type f); do
_tfile=$(echo $file|sed "s|^$patch_zip_dir||")
mesge 2 'copy' '%s ... \n' $file
if install -Dvm644 $file $new_zip_dir/$_tfile; then
mesge 2 'Done.' '\n'
sed -i "s|$_tfile||g" updater-script.left2do
else
echo $file >>copy-failed.list
error 2 'Failed.' '\n'
fi
done
mesge 1 '' '4. copy update-patch-zip/{*.bin,*.mbn} to update-full-zip ...\n'
for file in $(find $patch_zip_dir/{*.bin,*.mbn} -type f); do
_tfile=$(echo $file|sed "s|^$patch_zip_dir||")
mesge 2 'copy' '%s ... \n' $file
if install -Dvm644 $file $new_zip_dir/$_tfile; then
mesge 2 'Done.' '\n'
sed -i "s|$_tfile||g" updater-script.left2do
sed -i "/.*$(basename $_tfile).*dev\/block/d" updater-script.left2do
else
echo $file >>copy-failed.list
error 2 'Failed.' '\n'
fi
done
if [ x$(stat --format=%s copy-failed.list) == x0 ]; then
mesge 1 ' All files are copied successfully.' '\n'
rm copy-failed.list
else
error 1 ' Some files are not copied.' '\n'
error 1 ' Check info in file copy-failed.list' '\n'
fi
mesge 1 ' The merge is complete.' '\n'
exit 0
| true
|
5580e928eda0211b6c825dfa97efb9ac1c415063
|
Shell
|
aar10n/Lemon-OS
|
/Ports/buildport.sh
|
UTF-8
| 167
| 2.875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
export JOBCOUNT=$(nproc)
if [ -z "$LEMON_SYSROOT" ]; then
echo "error: LEMON_SYSROOT not set"
exit
fi
export LEMON_PREFIX=/system
. ./$1.sh
unpack
buildp
| true
|
311e0808c1accdcedefc49826a517a72f5673095
|
Shell
|
larsla/bash2rest_example
|
/scripts/users/GET_.sh
|
UTF-8
| 433
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
mkdir -p /tmp/users
# check if we are called as part of fetching a user object
CALLED_NAME=`basename $0`
ID=`echo $CALLED_NAME |sed 's|GET_||' |sed 's|\.sh||'`
if [ -n "$ID" ]; then
if [ -f /tmp/users/${ID} ]; then
cat /tmp/users/${ID} |jq .
else
echo '{"result": "error", "reason": "no such user"}' |jq .
exit 1
fi
else
echo '{"result": "error", "reason": "no user id provided"}' |jq .
exit 1
fi
| true
|
ec9417fcf5db1171f1fd16658cd9fa13f862f982
|
Shell
|
blank-blank/graylog_ansible_roles
|
/run_playbook.sh
|
UTF-8
| 204
| 3.015625
| 3
|
[] |
no_license
|
playbook_name="$1"
playbook_file="${playbook_name}.yml"
#check args
if [ $# -ne 1 ]
then
echo "Usage: run_playbook.sh profile_name"
exit 1
fi
ansible-playbook -i "localhost," -c local $playbook_file
| true
|
2a91dc35e473f9826c3be6db2575fe93d34246ca
|
Shell
|
mp3splt/mp3splt-web
|
/mp3splt-project/branches/mp3splt-project_0.8.2__2.5.2/scripts/bsd_rc.sh
|
UTF-8
| 982
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/sh
echo "This script must not be run (see its content)"
exit 1
#at the end of the /etc/rc file :
########### mp3splt-project packages
rm -rf /etc/make_packages
echo "I will compile the packages in 5 seconds..."
echo "Press Enter to cancel"
touch /etc/make_packages
/etc/rc.mp3splt&
read NAME
rm -rf /etc/make_packages
########### end mp3splt-project packages
#rc.mp3splt :
#!/usr/local/bin/bash
#make openbsd packages
make_packages()
{
export PATH=$PATH:/usr/local/bin:/usr/bin
echo
echo "Making OpenBSD packages...";
echo
cd /root/progs_src/ &&\
rm -rf ./mp3splt-project &&\
scp -P 4422 -r ion@10.0.2.2:/mnt/personal/hacking/mp3splt/mp3splt-project . &&\
cd mp3splt-project && make openbsd_packages &&\
scp -P 4422 -r *obsd*.tgz \
ion@10.0.2.2:/mnt/personal/hacking/mp3splt/mp3splt-project &&\
rm *obsd*.tgz &&\
halt -p || exit 1
exit 0
}
sleep 5
if [[ -e /etc/make_packages ]];then
make_packages
fi
| true
|
321a72e37ab5326da1c40191dd70775215c82398
|
Shell
|
SeattleTestbed/attic
|
/branches/repy_v2/benchmarking-support/benchmark-blockstore.sh
|
UTF-8
| 1,153
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Used to automate benchmarking
TEST="Block Store Server"
# Command to run for seclayer
SEC_CMD="python repy.py restrictions.full encasementlib.repy"
NORM_CMD="python repy.py restrictions.full"
SEC_LAYERS="all-logsec.py ip-seclayer.py forensiclog.repy"
SERVER="dylink.repy librepy.repy blockstore.py armon 12345"
# Kill all python instances
echo "Killing python"
killall -9 python Python
echo
echo "####"
echo "$TEST test"
echo "####"
for LAYER in $SEC_LAYERS
do
echo
echo "Layer: $LAYER"
for iter in {1..3}
do
$SEC_CMD $LAYER $SERVER >/dev/null 2>&1 &
PID=$!
sleep 4
for i in {1..10}
do
{ time ./test_blockstore_fetch.sh; } 2>&1 | grep real | sed -e 's|^.*0m\([0-9.]*s\)$|\1|' -e 's|s||'
done
kill -9 $PID
wait $PID
done
done
# Do the no security now
echo
echo "Layer: No security"
for iter in {1..3}
do
$NORM_CMD $SERVER >/dev/null 2>&1 &
PID=$!
sleep 4
for i in {1..10}
do
{ time ./test_blockstore_fetch.sh; } 2>&1 | grep real | sed -e 's|^.*0m\([0-9.]*s\)$|\1|' -e 's|s||'
done
kill -9 $PID
wait $PID
done
| true
|
d0ca9b1caeef285114d75a67030c4dae09e91b08
|
Shell
|
Joshua-Anderson/travis-vagrant-images
|
/install/virtualbox.sh
|
UTF-8
| 362
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
# Downloads and installs virtualbox
SCRIPTDIR=$(dirname $0)
mkdir -p $SCRIPTDIR/../downloads
curl -o $SCRIPTDIR/../downloads/virtualbox.dmg 'http://dlc.sun.com.edgesuite.net/virtualbox/4.3.14/VirtualBox-4.3.14-95030-OSX.dmg'
hdiutil mount $SCRIPTDIR/../downloads/virtualbox.dmg
sudo installer -package /Volumes/VirtualBox/VirtualBox.pkg -target "/"
| true
|
1cd4c298710d6e566c4d92852869cc7c6c44ca09
|
Shell
|
vikigenius/voidwoken
|
/actions/zsh/ziminstall
|
UTF-8
| 294
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/sh
export ZIM_HOME=${ZIM_HOME:-$HOME/.local/share/zim}
export ZDOTDIR=${ZDOTDIR:-$HOME/.config/zsh}
export ZSH_CACHE_HOME=$HOME/.cache/zsh
mkdir -p $ZIM_HOME
wget https://github.com/zimfw/zimfw/releases/latest/download/zimfw.zsh -O $ZIM_HOME/zimfw.zsh
zsh $ZIM_HOME/zimfw.zsh install
| true
|
bc739b18bffc1e5bdb8944c38036678457e42e90
|
Shell
|
dianlight/addon-plex
|
/plex/rootfs/etc/cont-init.d/webtools.sh
|
UTF-8
| 807
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/with-contenv bashio
# ==============================================================================
# Home Assistant Community Add-on: Plex Media Server
# Enables the WebTools plugin if the user requested that
# ==============================================================================
if bashio::config.true 'webtools' && ! bashio::fs.directory_exists \
"/data/Plex Media Server/Plug-ins/WebTools.bundle"; then
bashio::log.info 'Enabling WebTools plugin...'
mkdir -p "/data/Plex Media Server/Plug-ins/"
ln -s "/opt/WebTools.bundle" "/data/Plex Media Server/Plug-ins/"
fi
if bashio::config.false 'webtools' && bashio::fs.directory_exists \
"/data/Plex Media Server/Plug-ins/WebTools.bundle"; then
rm -f "/data/Plex Media Server/Plug-ins/WebTools.bundle"
fi
| true
|
ab98ce975a707b7b1501f1b45b149686b8e14fbc
|
Shell
|
Imran01000/shell_programs
|
/arrays/Random.sh
|
UTF-8
| 559
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash -x
#To print the 2nd minimum and maximam value from the array.
for(( count=0;count<10;count++))
do
r=$(($RANDOM%900+100));
a[count]=$r;
done
echo "${a[@]}";
second_max=${a[0]};
second_min=${a[0]};
max=${a[0]};
min=${a[0]};
for(( count=0;count<10;count++))
do
if (($max < ${a[count]}))
then
second_max=$max;
max=${a[count]};
fi
if (($min > ${a[count]}))
then
second_min=$min;
min=${a[count]};
fi
done
echo "The second maximum value is $second_max";
echo "The second minimum value is $second_min";
| true
|
6460f52ef837549c420143ec189c0969579f15cf
|
Shell
|
ppyTeam/armsoft
|
/acme/acme/install.sh
|
UTF-8
| 1,804
| 3.640625
| 4
|
[] |
no_license
|
#! /bin/sh
source /koolshare/scripts/base.sh
alias echo_date='echo 【$(TZ=UTC-8 date -R +%Y年%m月%d日\ %X)】:'
DIR=$(cd $(dirname $0); pwd)
# 判断路由架构和平台
case $(uname -m) in
armv7l)
if [ "`uname -o|grep Merlin`" ] && [ -d "/koolshare" ] && [ -n "`nvram get buildno|grep 384`" ];then
echo_date 固件平台【koolshare merlin armv7l 384】符合安装要求,开始安装插件!
else
echo_date 本插件适用于【koolshare merlin armv7l 384】固件平台,你的固件平台不能安装!!!
echo_date 退出安装!
rm -rf /tmp/acme* >/dev/null 2>&1
exit 1
fi
;;
*)
echo_date 本插件适用于【koolshare merlin armv7l 384】固件平台,你的平台:$(uname -m)不能安装!!!
echo_date 退出安装!
rm -rf /tmp/acme* >/dev/null 2>&1
exit 1
;;
esac
# 安装插件
cd /tmp
cp -rf /tmp/acme/acme /koolshare/
cp -rf /tmp/acme/res/* /koolshare/res/
cp -rf /tmp/acme/scripts/* /koolshare/scripts/
cp -rf /tmp/acme/webs/* /koolshare/webs/
cp -rf /tmp/acme/uninstall.sh /koolshare/scripts/uninstall_acme.sh
[ ! -L "/koolshare/init.d/S99acme.sh" ] && ln -sf /koolshare/scripts/acme_config.sh /koolshare/init.d/S99acme.sh
chmod 755 /koolshare/acme/*
chmod 755 /koolshare/init.d/*
chmod 755 /koolshare/scripts/acme*
sed -i '/rogcss/d' /koolshare/webs/Module_acme.asp
# 离线安装需要向skipd写入安装信息
dbus set acme_version="$(cat $DIR/version)"
dbus set softcenter_module_acme_version="$(cat $DIR/version)"
dbus set softcenter_module_acme_install="1"
dbus set softcenter_module_acme_name="acme"
dbus set softcenter_module_acme_title="Let's Encrypt"
dbus set softcenter_module_acme_description="自动部署SSL证书"
# 完成
echo_date "Let's Encrypt插件安装完毕!"
rm -rf /tmp/acme* >/dev/null 2>&1
exit 0
| true
|
c51d75c68c5011097d83d768e84b0717827e4f2d
|
Shell
|
rlauer6/google-drive-mailer
|
/create-custom-role.sh
|
UTF-8
| 3,579
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
# -*- mode: sh -*-
# Example script to create a role for the Lambda to assume that allows
# access to the mail bucket, creation of CloudWatch logs and the SSM
# key that holds credentials
# create-role.sh policy-name role-name bucket ssm-key region
# Example:
# create-role.sh google-drive-mailer-policy google-drive-mailer-role google-drive-mailer /google/google-drive-mailer us-east-1
function usage() {
cat <<EOF
usage $0 options
EOF
exit 0;
}
function create_policy() {
POLICY=$(mktemp)
cat >$POLICY <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject"
],
"Resource": [
"arn:aws:s3:::$BUCKET_NAME/*"
]
},
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": [
"arn:aws:logs:*:*:*"
]
},
{
"Effect": "Allow",
"Action": "ssm:Get*",
"Resource": [
"arn:aws:ssm:us-east-1:$ACCOUNT:parameter$SSM_KEY"
]
},
{
"Effect": "Allow",
"Action": "kms:Decrypt",
"Resource": "arn:aws:kms:us-east-1:$ACCOUNT:key/*"
}
]
}
EOF
ASSUME_ROLE_POLICY=$(mktemp)
cat >$ASSUME_ROLE_POLICY <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
EOF
cat $POLICY
$DRYRUN aws iam create-policy --policy-name=$POLICY_NAME \
--policy-document=file://$POLICY
rm $POLICY
rm $ASSUME_ROLE_POLICY
}
function attach_policy() {
$DRYRUN aws iam attach-role-policy --role-name $ROLE_NAME \
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME
}
function create_role() {
create_policy
$DRYRUN aws iam create-role --role-name $ROLE_NAME \
--assume-role-policy file://$ASSUME_ROLE_POLICY
attach_policy
}
# +-------------------------+
# | MAIN SCRIPT STARTS HERE |
# +-------------------------+
OPTS=$(getopt -o hvr:p:R:a:b:s:d -- "$@")
# set defaults
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
POLICY_NAME="google-drive-mailer-policy"
ROLE_NAME="google-drive-mailer-role"
SSM_KEY="/google-drive-mailer/credentials"
REGION="us-east-1"
DEFAULT_COMMAND="create"
AWS_REGION=${AWS_REGION:-us-east-1}
if [ $? -ne 0 ]; then
echo "could not parse options"
exit $?
fi
eval set -- "$OPTS"
while [ $# -gt 0 ]; do
case "$1" in
-b)
BUCKET_NAME="$2";
shift;
shift;
;;
-h)
usage;
;;
-R)
AWS_REGION="$2"
shift;
shift;
;;
-r)
ROLE_NAME="$2";
shift;
shift;
;;
-a)
ACCOUNT="$2";
shift;
shift;
;;
-p)
POLICY_NAME="$2";
shift;
shift;
;;
-v)
VERBOSE="-x"
shift;
;;
-d)
DRYRUN="echo"
shift;
;;
--)
break;
;;
*)
break;
;;
esac
done
test -n $VERBOSE && set -e $VERBOSE
shift;
command="$1"
command=${command:-$DEFAULT_COMMAND}
if [ "$command" = "create" ]; then
create_role
fi
| true
|
361cf615a6dec9ccd1bd444ba8884b4c20823a36
|
Shell
|
mm4tt/k8s-util
|
/k8s-tests/golang-kubemark-test.sh
|
UTF-8
| 3,398
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -euo pipefail
if [ $# -ne 2 ]
then
echo "Usage: ${0} <run-name> <num-nodes>"
exit 1
fi
run_name=${1?}
golang_commit=
num_nodes=${2:-2500}
log() { echo $1 | ts | tee -a ${log_file}; }
apply_patch() {
cl_id=${1?}
revision=${2?}
echo "Applying patch ${cl_id} at revision ${revision}"
wget https://go-review.googlesource.com/changes/go~${cl_id}/revisions/${revision}/patch?zip -O patch.zip
unzip patch.zip && rm patch.zip
git apply --3way *.diff
rm *.diff
git add .
git commit -a -m "Applied ${cl_id} revision ${revision}"
}
build_golang() {
echo "Building golang for $run_name"
cd ~/golang/go/src
git checkout master
git pull
git checkout 7b62e98
git branch -D ${run_name} || true
git checkout -b ${run_name}
#git revert f1a8ca30fcaa91803c353999448f6f3a292f1db1 --no-edit
#apply_patch 186598 3
./make.bash
cd -
}
build_k8s() {
log "Building k8s"
cd $GOPATH/src/k8s.io/kubernetes
git checkout $k8s_branch
cd build/build-image/cross/
rm -rf go || true
cp -R ~/golang/go/ go
echo "$run_name" > VERSION
git add .
git commit -a -m "Update golang version for run ${run_name}"
make build
cd -
make clean quick-release
}
log_dir=~/log/${run_name}
mkdir -p ${log_dir}
log_file=${log_dir}/log_$(date +%Y%m%d_%H%M%S)
log "Running the ${run_name} test with ${num_nodes} nodes"
k8s_branch=golang_kubemark_932487c7440b05_no_patches
perf_test_branch=golang1.13
test_infra_commit=63eb09459
build_golang 2>&1 | ts | tee -a ${log_file}
build_k8s 2>&1 | ts | tee -a ${log_file}
log "k8s.io/perf-tests branch is: $perf_test_branch"
log "k8s.io/test-infra commit is: $test_infra_commit"
go install k8s.io/test-infra/kubetest
cd ~/go/src/k8s.io/perf-tests && git checkout ${perf_test_branch} && cd -
source $GOPATH/src/github.com/mm4tt/k8s-util/set-common-envs/set-common-envs.sh preset-e2e-kubemark-common ${test_infra_commit}
source $GOPATH/src/github.com/mm4tt/k8s-util/set-common-envs/set-common-envs.sh preset-e2e-kubemark-gce-scale ${test_infra_commit}
export PROJECT=mmatejczyk-gke-dev
export ZONE=us-east1-b
export HEAPSTER_MACHINE_TYPE=n1-standard-32
export KUBE_DNS_MEMORY_LIMIT=300Mi
export CLUSTER=${run_name}
export KUBE_GCE_NETWORK=${CLUSTER}
export INSTANCE_PREFIX=${CLUSTER}
export KUBE_GCE_INSTANCE_PREFIX=${CLUSTER}
go run hack/e2e.go -- \
--gcp-project=$PROJECT \
--gcp-zone=$ZONE \
--cluster=$CLUSTER \
--gcp-node-size=n1-standard-8 \
--gcp-nodes=50 \
--provider=gce \
--kubemark \
--kubemark-nodes=$num_nodes \
--check-version-skew=false \
--up \
--down \
--test=false \
--test-cmd=$GOPATH/src/k8s.io/perf-tests/run-e2e.sh \
--test-cmd-args=cluster-loader2 \
--test-cmd-args=--enable-prometheus-server=true \
--test-cmd-args=--experimental-gcp-snapshot-prometheus-disk=true \
--test-cmd-args=--experimental-prometheus-disk-snapshot-name="${run_name}" \
--test-cmd-args=--nodes=$num_nodes \
--test-cmd-args=--provider=kubemark \
--test-cmd-args=--report-dir=/tmp/${run_name}/artifacts \
--test-cmd-args=--tear-down-prometheus-server=true \
--test-cmd-args=--testconfig=$GOPATH/src/k8s.io/perf-tests/clusterloader2/testing/load/config.yaml \
--test-cmd-args=--testoverrides=./testing/load/kubemark/throughput_override.yaml \
--test-cmd-name=ClusterLoaderV2 2>&1 | ts | tee -a ${log_file}
| true
|
63cdf0c09771610272b6171e6fb84592075a6b17
|
Shell
|
newrain7803/yuki-emby-crack
|
/sign/selfsign_ca.sh
|
UTF-8
| 566
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
echo -e "\n初始化Ca目录\n"
mkdir -p ./demoCA/{private,newcerts} && \
touch ./demoCA/index.txt && \
touch ./demoCA/serial && \
echo 01 > ./demoCA/serial
echo -e "\n生成 CA 根密钥\n"
openssl genrsa -out ./demoCA/private/cakey.pem 2048
echo -e "\n自签发 CA 根证书\n"
openssl req -new -x509 -key ./demoCA/private/cakey.pem -out ./demoCA/cacert.pem -days 7300 -config ./root.conf
echo -e "\n重命名ca, pem == crt in linux\n"
cp -rfv ./demoCA/private/cakey.pem ./demoCA/private/cakey.crt
echo -e "\nCa目录\n"
tree demoCA/
| true
|
908658e41a1a9e5c4eee56d1587c74fdc3f92a1c
|
Shell
|
xuanngo2001/drupal-examples
|
/d8-create-empty-module.sh
|
UTF-8
| 1,293
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
# Description: Create empty module structure.
script_name=$(basename "${0}")
module_name=$1
# Error handling.
cmd_eg=$(printf "%s\n%s\n%s\n" \
" e.g. ./${script_name} <MODULE_NAME>"\
)
if [ -z "${module_name}" ]; then
echo "Error: Module name can't empty. Allow characters: [A-Z_] Aborted!"
echo "${cmd_eg}"
exit 1;
fi
# Create module dir.
mkdir "${module_name}"
# Change *.info.yml
cat << EOF > "${module_name}/${module_name}.info.yml"
name: 'MODULE NAME'
description: DESCRIPTION BLA BLA.
package: PACKAGE_NAME
type: module
core: 8.x
dependencies:
- field
EOF
# Create config/ to hold *.yml files.
config_dir="${module_name}/config/install/"
mkdir -p "${config_dir}"
# Create src/ for routing
routing_yml="${module_name}/${module_name}.routing.yml"
src_dir="${module_name}/src/"
> "${routing_yml}"
# Create Form routing
form_dir="${module_name}/src/Form/"
mkdir -p "${form_dir}"
cat << EOF >> "${routing_yml}"
${module_name}.my_form:
path: '/my_form'
defaults:
_form: 'Drupal\\${module_name}\Form\MyForm'
_title: 'My Form'
requirements:
_permission: 'access content'
EOF
# Dipslay info.
echo "${module_name} created."
tree "${module_name}"
| true
|
4263b5988eda2960b4fd7cb9e1961f5ac17d413d
|
Shell
|
microsoft/WebTemplateStudio
|
/.githooks/pre-commit
|
UTF-8
| 210
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
PREFIX="pre-commit:"
cd ./src/client/
echo "$PREFIX Execute client Lint"
if ! yarn lint; then
exit 1
fi
cd ../extension/
echo "$PREFIX Execute extension Lint"
if ! yarn lint; then
exit 1
fi
| true
|
7f8e1e054cd903c8a84548c1aeebee2f9ffb85bd
|
Shell
|
gggah/Random_generation
|
/Fibre_src/analyse.sh
|
UTF-8
| 523
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#./analyse.sh analyze 0601 5 10
#./analyse.sh analyze 0601 6 pcdata.dat
dir=/mnt/sdb1
direction=df
cd $dir
cd $2
if [ ! -d $direction ];then
mkdir $direction
fi
cd $direction
c1=`echo $2 |cut -c 1`;#echo -e "c1=$c1\t"
c2=`echo $2 |cut -c 2`;#echo -e "c2=$c2\t"
c3=`echo $2 |cut -c 3`;#echo -e "c3=$c3\t"
c4=`echo $2 |cut -c 4`;#echo -e "c4=$c4\t"
if [ $c3 -eq 0 ]; then
c=$c4
else
c=${c3}${c4}
fi
#echo -e "c=$c"
nohup ../../$1 ../FibreParameter.dat ../RawData.dat $c1.$c2 $c $3 $4 &> anarecord.log &
| true
|
04950fa58f614c580f9e422d125894643b9c7e0f
|
Shell
|
abaty/TrackCorrRun2
|
/Condor/submit_pp.sh
|
UTF-8
| 1,790
| 2.84375
| 3
|
[] |
no_license
|
if [ $# -ne 0 ]
then
echo "Usage: ./psort.sh <trackqual> <file-list> <tag> <nmin> <nmax> <pttrigmin> <pttrigmax> <ptassmin> <ptassmax>"
exit 1
fi
now="Corrections_$(date +"%Y_%m_%d__%H_%M_%S")"
njobs=10
mkdir $now
cp ../TrkCorrInputFile.txt $now
cp ../src/calcCorr.C $now
cp ../src/TrkSettings.h $now
cp ../src/prepStep.C $now
cp ../src/makeSkim.C $now
cp ../src/getWeights.C $now
cp ../src/iterate.C $now
cp ../src/PlotPlotChi2Scaling_PbPb.root $now
cp -R ../src/chi2Reweighting $now
cp run.sh $now
#insert blacklist code
blacklist=""
for i in $(cat /net/hisrv0001/home/abaty/condor_blacklist/condor_blacklist.txt); do
blacklist=$blacklist$i" \&\& "
done
blacklist=$blacklist"endoflinetag"
blacklist=$(echo $blacklist | sed "s@ \\\&\\\& endoflinetag@@g")
echo "blacklist: "$blacklist
cat run.condor | sed "s@blacklist_here@$blacklist@g" > $now/run.condor
cat $now/TrkCorrInputFile.txt | sed "s@SEDTARGETDATE@$(date +"%Y_%m_%d__%H_%M_%S")@g" > $now/TrkCorrInputFile.txt
cat $now/run.condor | sed "s@SEDTARGETDATE@$(date +"%Y_%m_%d__%H_%M_%S")@g" > $now/run.condor
cat $now/run.condor | sed "s@log_flag@$now@g" | sed "s@dir_flag@$PWD/$now@g" | sed "s@user_flag@$USER@g" | sed "s@arglist@@g" | sed "s@njobs@$njobs@g" | sed "s@SPECIES@"pp"@g"> $now/run.condor
sleep 1
cd $now
g++ prepStep.C $(root-config --cflags --libs) -Wall -O2 -o "prepStep.exe"
g++ calcCorr.C $(root-config --cflags --libs) -Wall -O2 -o "calcCorr.exe"
#g++ prepStep.C $(root-config --cflags --libs) -Werror -Wall -O2 -o "prepStep.exe"
#g++ calcCorr.C $(root-config --cflags --libs) -Werror -Wall -O2 -o "calcCorr.exe"
echo finished compilation
echo
sleep 1
cat run.condor
echo
echo running prepStep in 10s
sleep 10
./prepStep.exe
#echo condor_submit $now/run.condor
#echo
# condor_submit $now/run.condor
| true
|
82c9442f0c361cb85ccf7cd0f984a88e97c33f63
|
Shell
|
graspfunc/graspfunc
|
/src/run_intra_group.sh
|
UTF-8
| 1,403
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
# this assumes that the input files are arranged in subdirectories: g1, g2, ...
DIRS=$1/g* # enable this for all groups
# DIRS=$1 # enable this for just one group
for d in $DIRS
do
echo "***********************************************************************************"
echo $d
echo "***********************************************************************************"
for i in 4
do
FILES1=($d/*.sc)
echo "#############################################################################"
echo $i
echo "#############################################################################"
for ((j=0; j < ${#FILES1[@]}; j++))
do
for ((k=j+1; k < ${#FILES1[@]}; k++))
do
file1=`dirname ${FILES1[$j]}`/`basename ${FILES1[$j]} .sc`
file2=`dirname ${FILES1[$k]}`/`basename ${FILES1[$k]} .sc`
echo ============== $j $k
echo python2.7 mains_18.py $file1 $file2 $i 25 10 $d/../matrix/BLOSUM62.txt 12
echo "#############################################################################"
python2.7 mains_18.py $file1 $file2 $i 25 10 $d/../matrix/BLOSUM62.txt 12 | tee intra.temp
head -4 intra.temp >> $2
tail -1 intra.temp >> $2
done
done
done
done
rm intra.temp
| true
|
f9b0a666ef3b91803bf6a796fa7f11cb7b04fce9
|
Shell
|
Kodehuset/mygitbackup
|
/mygitbackup.sh
|
UTF-8
| 1,992
| 4.15625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
SCRIPT_NAME="MyGitBackup"
SCRIPT_VERSION="Alpha 1"
echo "$SCRIPT_NAME v. $SCRIPT_VERSION"
# Load user configuration
if [ ! -f ~/.mygitbackup ]
then
echo "ERROR: Unable to locate configuration parameters. Please configure $SCRIPT_NAME via ~/.mygitbackup"
exit 0
fi
source ~/.mygitbackup
echo "Checking..."
if [ ! $DATABASE_HOST ] || [ ! "$DATABASE_NAMES" ] || [ ! $DATABASE_USER ]
then
echo "Please provide the required DATABASE_HOST, DATABASE_NAMES and DATABASE_USER variables."
exit 0
fi
if [ ! $GIT_BACKUP_REPO ]
then
echo "Please provide the location of the Git backup repo on this box by configuring the GIT_BACKUP_REPO variable."
exit 0
fi
if [ ! -d $GIT_BACKUP_REPO ]
then
echo "Unable to locate the folder $GIT_BACKUP_REPO. Are you sure the backup repo has been cloned to this box?"
exit 0
fi
MYSQL_DUMP=$(which mysqldump)
if [ ! $MYSQL_DUMP ] || [ ! -f $MYSQL_DUMP ]
then
echo "Unable to locate mysqldump. Are you sure you have installed MySQL on this box?"
exit 0
fi
echo "Found MySQL Dump at $MYSQL_DUMP"
GIT=$(which git)
if [ ! $GIT ] || [ ! -f $GIT ]
then
echo "Unable to locate Git. Are you sure you have installed Git on this box?"
exit 0
fi
echo "Found Git at $GIT"
#cd "$(dirname "$GIT_BACKUP_REPO/.")"
cd "$GIT_BACKUP_REPO"
MYSQL_DUMP_OPTIONS="--skip-extended-insert --compact"
for DATABASE in $DATABASE_NAMES
do
echo "Performing MySQL dump of $DATABASE_HOST/$DATABASE"
if [ ! -z $DATABASE_PASSWORD ]
then
$MYSQL_DUMP $MYSQL_DUMP_OPTIONS -u$DATABASE_USER -p$DATABASE_PASSWORD -h$DATABASE_HOST $DATABASE > $DATABASE.sql
else
$MYSQL_DUMP $MYSQL_DUMP_OPTIONS -u$DATABASE_USER -h$DATABASE_HOST $DATABASE > $DATABASE.sql
fi
done
if [ ! $? = 0 ]
then
echo "MySQL Dump terminated with an unexpected result. Please investigate the errors which should be printed above and try again."
exit 0
fi
echo "Database dump completed. Committing..."
$GIT add *.sql
$GIT commit -m "New database backup."
$GIT push
echo "All done."
| true
|
2408790ddb733fe8af74fa49bf6518046212e28e
|
Shell
|
tmillsclare/zk
|
/bin/maven.javadoc
|
UTF-8
| 1,028
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
#
#{{IS_NOTE
# Purpose:
# To build javadoc per jar file for maven repository
# Description:
# History:
# August 22 11:13 2011, Created by simon
#}}IS_NOTE
#
#Copyright (C) 2011 Potix Corporation. All Rights Reserved.
#
#{{IS_RIGHT
#}}IS_RIGHT
#
maindir="$(pwd)"
version=$1
if [ "$2" == "" ] ; then
version=$version.FL.$(date +%Y%m%d)
fi
function jvdoc {
cp $maindir/$1/pom.xml $maindir/$1/pom.xml.bak
sed -i "1,/version>.*<\/version/s/version>.*<\/version/version>$version<\/version/" $maindir/$1/pom.xml
cd $maindir/$1
echo Generating javadoc for $1
mvn --quiet javadoc:jar
mv -f $maindir/$1/pom.xml.bak $maindir/$1/pom.xml
rm -rf $maindir/$1/debug/apidocs
rm -rf $maindir/$1/debug/javadoc-bundle-options
cp $maindir/$1/debug/$1-$version-javadoc.jar /tmp/maven/_javadoc/$1-$version-javadoc.jar
chmod 644 /tmp/maven/_javadoc/$1-$version-javadoc.jar
cd ..
}
mkdir -p /tmp/maven/_javadoc
jvdoc zcommon
jvdoc zweb
jvdoc zk
jvdoc zul
jvdoc zhtml
jvdoc zml
jvdoc zkex
jvdoc zkmax
jvdoc zkplus
| true
|
3cb79438e4d327f7db4d3cbd7f4d7fe1ee0887d3
|
Shell
|
bjanderson70/sfdx-misc
|
/bin/funcs.sh
|
UTF-8
| 17,334
| 3.34375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
<<<<<<< HEAD
#!/usr/bin/env bash
############################################################################
# Copyright (c) 2020, Salesforce. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# + Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# + Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# + Neither the name of Salesforce nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
############################################################################
#######################################################
#
# Core functions used by scripts
#
#######################################################
#######################################################
# For UI (curses)
#######################################################
red=`tput setaf 1`
green=`tput setaf 2`
yellow=`tput setaf 3`
blue=`tput setaf 4`
magenta=`tput setaf 5`
cyan=`tput setaf 6`
white=`tput setaf 7`
bold=`tput bold`
reset=`tput sgr0`
#######################################################
# Common variables
#######################################################
userDir=`pwd`
SFDX_CLI_EXEC=sfdx
orgName=
scratchOrg=
runUnitTests=
quietly=
installBase=
shellLocation=`basename $0`;
numOfSODays="2";
devhub=;
#######################################################
# Utility to reset cursor
#
#######################################################
function resetCursor() {
echo "${reset}"
}
#######################################################
# Utility print out error
#
#######################################################
function handleError() {
echo "${red}${bold}"
printf >&2 "\n\tERROR: $1"" Aborted\n";
resetCursor;
exit -1;
}
#######################################################
# Utility print out error
#
#######################################################
function runFromRoot() {
local cdir=`pwd | grep "/scripts"`
if [ ! -z ${cdir} ]; then
cd ../;
fi
userDir=`pwd`;
}
#######################################################
# Utility called when user aborts ( reset )
#
#######################################################
function shutdown() {
tput cnorm # reset cursor
cd $userDir;
resetCursor
}
#######################################################
# SFDX present
#
#######################################################
function print(){
if [ -z ${quietly} ]; then
echo "${green}${bold}$1";
resetCursor;
fi
}
#######################################################
# SFDX present
#
#######################################################
function checkForSFDX(){
type $SFDX_CLI_EXEC >/dev/null 2>&1 || { handleError " $shellLocation requires sfdx but it's not installed or found in PATH."; }
}
#######################################################
# Utility for help
#
#######################################################
function help() {
echo "${green}${bold}"
echo ""
echo "Usage: $shellLocation -v <Dev-Hub> [ -u <username|targetOrg> | -l <num of Days to keep Scratch Org, default to 2> | -t | -d | -q | -h ]"
printf "\n\t -u <username|targetOrg>"
printf "\n\t -v <dev-hub>"
printf "\n\t -l <# of days to keep scratch org , defaults to $numOfSODays days>"
printf "\n\t -t run unit tests"
printf "\n\t -d turn on debug"
printf "\n\t -q run quietly"
printf "\n\t -h the help\n"
resetCursor;
exit 0
}
#######################################################
# Command Line Arguments
#
#######################################################
function getCommandLineArgs() {
while getopts u:l:v:shdqtb option
do
case "${option}"
in
u) orgName=${OPTARG};;
l) numOfSODays=${OPTARG};;
v) devhub=${OPTARG};;
d) set -xv;;
s) scratchOrg=1;;
t) runUnitTests=1;;
b) installBase=1;;
q) quietly=1;;
h) help;;
esac
done
#if no org, then creating a scratch org
if [ -z ${orgName} ]; then
scratchOrg=1;
fi
#need to know dev-hub
if [ -z ${devhub} ]; then
handleError "Need to know the Dev-Hub when creating scratch org "
fi
}
#######################################################
# Determine CI Environment
#
#######################################################
function isCIEnvironment() {
# determine who is running
if [[ ! -z "${IS_CI}" ]]; then
print "Script is running on CI Environment"
SFDX_CLI_EXEC=node_modules/sfdx-cli/bin/run
fi
}
#######################################################
# Scratch Org
#
#######################################################
function createScratchOrg() {
if [ ! -z ${scratchOrg} ]; then
print "Creating Scratch org..."
# get username
orgName=`$SFDX_CLI_EXEC force:org:create -v $devhub -s -f config/project-scratch-def.json -d $numOfSODays --json | grep username | awk '{ print $2}' | sed 's/"//g'`
print "Scratch org created (user=$orgName)."
if [ -z ${orgName} ]; then
handleError "Problem creating scratch Org (could be network issues, permissions, or limits) [sfdx force:org:create -s -f config/project-scratch-def.json -d $numOfSODays --json] "
fi
fi
}
#######################################################
# Run Apex Unit Tests
#
#######################################################
function runApexTests() {
if [ ! -z ${runUnitTests} ]; then
print "Running Apex Unit Tests (target=$orgName) [w/ core-coverage]"
# run tests
$SFDX_CLI_EXEC force:apex:test:run -r human -c -u "$orgName" -w 30
fi
}
#######################################################
# set permissions
#
#######################################################
function setPermissions() {
print "Setting up permissions."
# place here, if any
# sfdx force:user:permset:assign -n FinancialServicesCloudStandard
}
#######################################################
# Install Packages
#
#######################################################
function installPackages() {
if [ ! -z ${orgName} ]; then
local step=0;
# get our package ids ( do not want to keep updating this script)
cat sfdx-project.json | grep 04t | awk '{print $1" "$2}' | sed 's/["|,|:]//g' | while read line ; do
local pgkId=`echo $line | awk '{print $2}'`
local name=`echo $line | awk '{print $1}'`
print "Installing package $name ($pgkId) for $orgName"
$SFDX_CLI_EXEC force:package:install -a package --package "$pgkId" --wait 20 --publishwait 20 -u "$orgName"
((step=step+1));
done
fi
}
#######################################################
# Push to Scratch Orgs
#
#######################################################
function pushToScratch() {
if [ ! -z ${orgName} ]; then
print "pushing content to scratch org ..."
$SFDX_CLI_EXEC force:source:push -u "$orgName"
fi
}
#######################################################
# Open Org
#
#######################################################
function openOrg() {
if [ ! -z ${orgName} ]; then
print "Launching Org now ..."
$SFDX_CLI_EXEC force:org:open -u "$orgName"
fi
}
#######################################################
# complete
#
#######################################################
function complete() {
print " *** Completed ***"
}
=======
#!/usr/bin/env bash
############################################################################
# Copyright (c) 2020, Salesforce. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# + Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# + Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# + Neither the name of Salesforce nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
############################################################################
#######################################################
#
# Core functions used by scripts
#
#######################################################
#######################################################
# For UI (curses)
#######################################################
red=`tput setaf 1`
green=`tput setaf 2`
yellow=`tput setaf 3`
blue=`tput setaf 4`
magenta=`tput setaf 5`
cyan=`tput setaf 6`
white=`tput setaf 7`
bold=`tput bold`
reset=`tput sgr0`
#######################################################
# Common variables
#######################################################
userDir=`pwd`
SFDX_CLI_EXEC=sfdx
orgName=
scratchOrg=
runUnitTests=
quietly=
installBase=
shellLocation=`basename $0`;
numOfSODays="2";
devhub=;
#######################################################
# Utility to reset cursor
#
#######################################################
function resetCursor() {
echo "${reset}"
}
#######################################################
# Utility print out error
#
#######################################################
function handleError() {
echo "${red}${bold}"
printf >&2 "\n\tERROR: $1"" Aborted\n";
resetCursor;
exit -1;
}
#######################################################
# Utility print out error
#
#######################################################
function runFromRoot() {
local cdir=`pwd | grep "/scripts"`
if [ ! -z ${cdir} ]; then
cd ../;
fi
userDir=`pwd`;
}
#######################################################
# Utility called when user aborts ( reset )
#
#######################################################
function shutdown() {
tput cnorm # reset cursor
cd $userDir;
resetCursor
}
#######################################################
# SFDX present
#
#######################################################
function print(){
if [ -z ${quietly} ]; then
echo "${green}${bold}$1";
resetCursor;
fi
}
#######################################################
# SFDX present
#
#######################################################
function checkForSFDX(){
type $SFDX_CLI_EXEC >/dev/null 2>&1 || { handleError " $shellLocation requires sfdx but it's not installed or found in PATH."; }
}
#######################################################
# Utility for help
#
#######################################################
function help() {
echo "${green}${bold}"
echo ""
echo "Usage: $shellLocation -v <Dev-Hub> [ -u <username|targetOrg> | -l <num of Days to keep Scratch Org, default to 2> | -t | -d | -q | -h ]"
printf "\n\t -u <username|targetOrg>"
printf "\n\t -v <dev-hub>"
printf "\n\t -l <# of days to keep scratch org , defaults to $numOfSODays days>"
printf "\n\t -t run unit tests"
printf "\n\t -d turn on debug"
printf "\n\t -q run quietly"
printf "\n\t -h the help\n"
resetCursor;
exit 0
}
#######################################################
# Command Line Arguments
#
#######################################################
function getCommandLineArgs() {
while getopts u:l:v:shdqtb option
do
case "${option}"
in
u) orgName=${OPTARG};;
l) numOfSODays=${OPTARG};;
v) devhub=${OPTARG};;
d) set -xv;;
s) scratchOrg=1;;
t) runUnitTests=1;;
b) installBase=1;;
q) quietly=1;;
h) help;;
esac
done
#if no org, then creating a scratch org
if [ -z ${orgName} ]; then
scratchOrg=1;
fi
#need to know dev-hub
if [ -z ${devhub} ]; then
handleError "Need to know the Dev-Hub when creating scratch org "
fi
}
#######################################################
# Determine CI Environment
#
#######################################################
function isCIEnvironment() {
# determine who is running
if [[ ! -z "${IS_CI}" ]]; then
print "Script is running on CI Environment"
SFDX_CLI_EXEC=node_modules/sfdx-cli/bin/run
fi
}
#######################################################
# Scratch Org
#
#######################################################
function createScratchOrg() {
if [ ! -z ${scratchOrg} ]; then
print "Creating Scratch org..."
# get username
orgName=`$SFDX_CLI_EXEC force:org:create -v $devhub -s -f config/project-scratch-def.json -d $numOfSODays --json | grep username | awk '{ print $2}' | sed 's/"//g'`
print "Scratch org created (user=$orgName)."
if [ -z ${orgName} ]; then
handleError "Problem creating scratch Org (could be network issues, permissions, or limits) [sfdx force:org:create -s -f config/project-scratch-def.json -d $numOfSODays --json] "
fi
fi
}
#######################################################
# Run Apex Unit Tests
#
#######################################################
function runApexTests() {
if [ ! -z ${runUnitTests} ]; then
print "Running Apex Unit Tests (target=$orgName) [w/ core-coverage]"
# run tests
$SFDX_CLI_EXEC force:apex:test:run -r human -c -u "$orgName" -w 30
fi
}
#######################################################
# set permissions
#
#######################################################
function setPermissions() {
print "Setting up permissions."
# place here, if any
# sfdx force:user:permset:assign -n FinancialServicesCloudStandard
}
#######################################################
# Install Packages
#
#######################################################
function installPackages() {
if [ ! -z ${orgName} ]; then
local step=0;
# get our package ids ( do not want to keep updating this script)
cat sfdx-project.json | grep 04t | awk '{print $1" "$2}' | sed 's/["|,|:]//g' | while read line ; do
local pgkId=`echo $line | awk '{print $2}'`
local name=`echo $line | awk '{print $1}'`
print "Installing package $name ($pgkId) for $orgName"
$SFDX_CLI_EXEC force:package:install -a package --package "$pgkId" --wait 20 --publishwait 20 -u "$orgName"
((step=step+1));
done
fi
}
#######################################################
# Push to Scratch Orgs
#
#######################################################
function pushToScratch() {
if [ ! -z ${orgName} ]; then
print "pushing content to scratch org ..."
$SFDX_CLI_EXEC force:source:push -u "$orgName"
fi
}
#######################################################
# Open Org
#
#######################################################
function openOrg() {
if [ ! -z ${orgName} ]; then
print "Launching Org now ..."
$SFDX_CLI_EXEC force:org:open -u "$orgName"
fi
}
#######################################################
# complete
#
#######################################################
function complete() {
print " *** Completed ***"
}
>>>>>>> bc78e37306d6fa3b91cfcf4fc9fa5c03ecdb13b3
| true
|
b2e2eeed307c8dbbd7978ccded170980d68eabf5
|
Shell
|
beaconzhang/yagami..ko
|
/yagamiko/bin/generate_nginx_conf.sh
|
UTF-8
| 629
| 3.125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# set global variables project name and path
export YGM_APP_NAME=yagamiko
export OPENRESTY_HOME=/usr/local
export YAGAMI_HOME=/freeflare/trunk/foundation/server/lua/yagami
#echo $OPENRESTY_HOME
#echo $YAGAMI_HOME
PWD=`pwd`
NGINX_FILES=$PWD"/nginx_runtime"
mkdir -p $NGINX_FILES"/conf"
mkdir -p $NGINX_FILES"/logs"
rm -rf $NGINX_FILES"/conf/*"
cp -r $PWD"/conf" $NGINX_FILES
sed -e "s|__YAGAMI_HOME_VALUE__|$YAGAMI_HOME|" \
-e "s|__YAGAMI_APP_PATH_VALUE__|$PWD|" \
-e "s|__YAGAMI_APPNAME_VALUE__|$YGM_APP_NAME|" \
$PWD/conf/vhosts/$YGM_APP_NAME.conf > $NGINX_FILES/conf/vhosts/$YGM_APP_NAME.conf
| true
|
d779673ff25ef114dec581228a8853b462404712
|
Shell
|
sin6pi7/hexp-web
|
/pre-commit.sh
|
UTF-8
| 191
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh
# Install yourself on first execution
if [ ! -f .git/hooks/pre-commit ]; then
ln -s ../../pre-commit.sh .git/hooks/pre-commit
fi
# Lint and tests check
npm run lint && npm test
| true
|
c2231d6c1b27dfd4b0939faa4292ec7581a520c2
|
Shell
|
pinard/paxutils
|
/scripts/level-1.10
|
UTF-8
| 5,628
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Run this script as root on the machine that has the tape drive, to make a
# level-1 dump containing all files changed since the last full dump.
#
# If you give `now' as an argument, the dump is done immediately.
# Otherwise, it waits until 1am.
#
# You must edit the file `backup-specs' to set the parameters for your site.
if [ ! -w / ]; then
echo The backup must be run as root,
echo or else some files will fail to be dumped.
exit 1
else
false
fi
# Get the values of BACKUP_DIRS and BACKUP_FILES, and other variables.
. ./backup-specs
# Maybe sleep until around specified or default hour.
#
if [ "${1}" != "now" ]; then
if [ "${1}"x != x ]; then
spec=${1}
else
spec=${BACKUP_HOUR}
fi
pausetime=`date | awk '{hr=substr($4,1,2);\\
mn=substr($4,4,2);\\
if((hr+0)<spec+0)\\
print 3600*(spec-hr)-60*mn;\\
else\\
print 3600*(spec+(24-hr))-60*mn; }' spec=$spec`
clear
cat ./dont_touch
sleep ${pausetime}
fi
# start doing things
here=`pwd`
LOGFILE=log-`date | awk '{print $2 "-" $3 "-" $6}'`-level-1
HOST=`hostname | sed 's/\..*//'`
TAR_PART1="/usr/local/bin/tar -c --multi-volume --one-file-system --block=${BLOCKING} --sparse --volno-file=${VOLNO_FILE}"
# Only use --info-script if DUMP_REMIND_SCRIPT was defined in backup-specs
if [ x != "x${DUMP_REMIND_SCRIPT}" ]; then
TAR_PART1="${TAR_PART1} --info-script=${DUMP_REMIND_SCRIPT}"
fi
# Make sure the log file did not already exist. Create it.
if [ -f ${LOGFILE} ] ; then
echo Log file ${LOGFILE} already exists.
exit 1
else
touch ${LOGFILE}
fi
mt -f ${TAPE_FILE} rewind
rm ${VOLNO_FILE}
set ${BACKUP_DIRS}
while [ $# -ne 0 ] ; do
host=`echo ${1} | sed 's/:.*$//'`
fs=`echo ${1} | sed 's/^.*://'`
date=`date`
fsname=`echo ${1} | sed 's/\//:/g'`
# This filename must be absolute; it is opened on the machine that runs tar.
TAR_PART2="--listed=/etc/tar-backup/temp.level-1"
TAR_PART3="--label='level 1 backup of ${fs} on ${host} at ${date}' -C ${fs} ."
echo Backing up ${1} at ${date} | tee -a ${LOGFILE}
echo Last full dump on this filesystem: | tee -a ${LOGFILE}
if [ ${HOST} != ${host} ] ; then
rsh ${host} "ls -l /etc/tar-backup/${fsname}.level-0; \
cp /etc/tar-backup/${fsname}.level-0 /etc/tar-backup/temp.level-1" \
2>&1 | tee -a ${LOGFILE}
else
ls -l /etc/tar-backup/${fsname}.level-0 2>&1 | tee -a ${LOGFILE}
cp /etc/tar-backup/${fsname}.level-0 /etc/tar-backup/temp.level-1 2>&1 | tee -a ${LOGFILE}
fi
# Actually back things up.
if [ ${HOST} != ${host} ] ; then
rsh ${host} ${TAR_PART1} -f ${HOST}:${TAPE_FILE} ${TAR_PART2} ${TAR_PART3} 2>&1 | tee -a ${LOGFILE}
else
# Using `sh -c exec' causes nested quoting and shell substitution
# to be handled here in the same way rsh handles it.
sh -c "exec ${TAR_PART1} -f ${TAPE_FILE} ${TAR_PART2} ${TAR_PART3}" 2>&1 | tee -a ${LOGFILE}
fi
# This doesn't presently work, of course, because $? is set to the exit
# status of the last thing in the pipeline of the previous command,
# namely `tee'. We really want the exit status of the sh command
# running tar, but getting this seems to be nontrivial. --friedman
if [ $? -ne 0 ] ; then
echo Backup of ${1} failed. | tee -a ${LOGFILE}
# I'm assuming that the tar will have written an empty
# file to the tape, otherwise I should do a cat here.
else
if [ ${HOST} != ${host} ] ; then
rsh ${host} mv -f /etc/tar-backup/temp.level-1 /etc/tar-backup/${fsname}.level-1 2>&1 | tee -a ${LOGFILE}
else
mv -f /etc/tar-backup/temp.level-1 /etc/tar-backup/${fsname}.level-1 2>&1 | tee -a ${LOGFILE}
fi
fi
${TAPE_STATUS} | tee -a ${LOGFILE}
sleep 60
shift
done
# Dump any individual files requested.
if [ x != "x${BACKUP_FILES}" ] ; then
date=`date`
TAR_PART2="--listed=/etc/tar-backup/temp.level-1"
TAR_PART3="--label='Incremental backup of miscellaneous files at ${date}'"
echo Backing up miscellaneous files at ${date} | tee -a ${LOGFILE}
echo Last full dump of these files: | tee -a ${LOGFILE}
ls -l /etc/tar-backup/misc.level-0 2>&1 | tee -a ${LOGFILE}
rm -f /etc/tar-backup/temp.level-1 2>&1 | tee -a ${LOGFILE}
cp /etc/tar-backup/misc.level-0 /etc/tar-backup/temp.level-1 2>&1 | tee -a ${LOGFILE}
echo Backing up miscellaneous files at ${date} | tee -a ${LOGFILE}
# Using `sh -c exec' causes nested quoting and shell substitution
# to be handled here in the same way rsh handles it.
sh -c "exec ${TAR_PART1} -f ${TAPE_FILE} ${TAR_PART2} ${TAR_PART3} \
${BACKUP_FILES}" 2>&1 | tee -a ${LOGFILE}
# This doesn't presently work, of course, because $? is set to the exit
# status of the last thing in the pipeline of the previous command,
# namely `tee'. We really want the exit status of the sh command
# running tar, but getting this seems to be nontrivial. --friedman
if [ $? -ne 0 ] ; then
echo Backup of miscellaneous files failed. | tee -a ${LOGFILE}
# I'm assuming that the tar will have written an empty
# file to the tape, otherwise I should do a cat here.
else
mv -f /etc/tar-backup/temp.level-1 /etc/tar-backup/misc.level-1 2>&1 | tee -a ${LOGFILE}
fi
${TAPE_STATUS} | tee -a ${LOGFILE}
else
echo No miscellaneous files specified | tee -a ${LOGFILE}
false
fi
mt -f ${TAPE_FILE} rewind
mt -f ${TAPE_FILE} offl
echo Sending the dump log to ${ADMINISTRATOR}
cat ${LOGFILE} | sed -f logfile.sed > ${LOGFILE}.tmp
/usr/ucb/mail -s "Results of backup on `date`" ${ADMINISTRATOR} < ${LOGFILE}.tmp
rm -f ${LOGFILE}.tmp
| true
|
6b84960515a494fb27c9a4576a08d09a7a6d1b81
|
Shell
|
plavin/sst-autoinstall
|
/install_sst.sh
|
UTF-8
| 2,745
| 3.609375
| 4
|
[] |
no_license
|
set -euo pipefail
TOP=$(pwd)
SRCDIR=$TOP/source
INSTALLDIR=$TOP/install
DEFFILE=$TOP/sstdefs.sh
LOGS=$TOP/logs
# Pull In Urls
. configure_urls.sh
echo "Source code will be placed in $SRCDIR"
echo "Programs will be installed in $INSTALLDIR"
echo "Necessary environment variables will be placed in $DEFFILE"
rm -f sstdefs.sh
rm -rf logs
mkdir logs
PATH=$INSTALLDIR/bin:$PATH
echo export PATH=$INSTALLDIR/bin:'$PATH' >> $DEFFILE
echo -n "Installing autotools... "
touch $LOGS/install_autotools.log
mkdir -p $SRCDIR/autotools
cp install_autotools.sh $SRCDIR/autotools/
cd $SRCDIR/autotools/
PREFIX=$INSTALLDIR ./install_autotools.sh > $LOGS/install_autotools.log 2>&1
cd $TOP
echo "done."
echo -n "Installing Pin 3... "
touch $LOGS/install_pin.log
mkdir -p $INSTALLDIR/packages/pin
cd $INSTALLDIR/packages/pin
wget $PINURL > $LOGS/install_pin.log 2>&1
tar xvzf *.tar.gz >> $LOGS/install_pin.log 2>&1
PIN_HOME=$PWD/$(ls -d */)
cd $TOP
echo "done."
SST_CORE_HOME=$INSTALLDIR
SST_CORE_ROOT=$SRCDIR/sst-core
echo export SST_CORE_HOME=$INSTALLDIR >> $DEFFILE
echo export SST_CORE_ROOT=$SRCDIR/sst-core >> $DEFFILE
#echo export PATH=$INSTALLDIR/bin:'$PATH' >> $DEFFILE
# Install Core
echo -n "Installing sst-core... "
touch $LOGS/install_sst_core.log
mkdir -p $SRCDIR
cd $SRCDIR
git clone $COREREPO
#git clone $COREREPO >> $LOGS/install_sst_core.log 2>&1
cd sst-core
./autogen.sh
#./autogen.sh >> $LOGS/install_sst_core.log 2>&1
./configure --prefix=$SST_CORE_HOME --disable-mpi >> $LOGS/install_sst_core.log 2>&1
make all -j8 >> $LOGS/install_sst_core.log 2>&1
make install >> $LOGS/install_sst_core.log 2>&1
echo "done."
cd $TOP
# Test core
echo -n "Testing sst-core installation... "
touch $LOGS/sst_tests.log
sst-core-test > $LOGS/sst_tests.log 2>&1
if grep -q "TESTING PASSED" $LOGS/sst_tests.log; then
echo "done."
else
echo "failed. Exiting. View logs in $LOGS for details."
exit
fi
exit
# Install Cmake
mkdir -p $SRCDIR/cmake
cd $SRCDIR/cmake
wget $CMAKEURL
tar xzf *.tar.gz
cd $(ls -d */)
cp bin/* $INSTALLDIR/bin/
cp -r share/* $INSTALLDIR/share/
cd $TOP
# Install DramSIM3
mkdir $SRCDIR
cd $SRCDIR
git clone $DRAMSIM3REPO
cd DRAMsim3
cmake .
make -j8
DRAMDIR=$PWD
cd $TOP
# Install Elements
SST_ELEMENTS_HOME=$INSTALLDIR
SST_ELEMENTS_ROOT=$SRCDIR/sst-elements
echo export SST_ELEMENTS_HOME=$INSTALLDIR >> $DEFFILE
echo export SST_ELEMENTS_ROOT=$SRCDIR/sst-elements >> $DEFFILE
mkdir -p $SRCDIR
cd $SRCDIR
git clone $ELEMENTSREPO
cd sst-elements
# Remove Werror lmao
find . -name Makefile.am -exec sed -i s'/-Werror//g' {} \;
./autogen.sh
./configure --prefix=$SST_ELEMENTS_HOME --with-sst-core=$SST_CORE_HOME --with-pin=$PIN_HOME --with-dramsim3=$DRAMDIR
#make all -j8
#make install
cd $TOP
echo "Done"
| true
|
8ea532ef9609a3642bf6d97e5907ce6c985ab5f0
|
Shell
|
knowrob/docker
|
/scripts/update-log-data
|
UTF-8
| 954
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
wget -r ftp://open-ease-stor.informatik.uni-bremen.de
#EXPERIMENTS="pick-and-place simulation robosherlock chemlab mocap"#
#for exp in $EXPERIMENTS; do
# # Export mongo data from open-ease to json
# mongoexport --host data.open-ease.org --port 27017 \
# --db $exp --collection tf -o ease.$exp.tf.json
# mongoexport --host data.open-ease.org --port 27017 \
# --db $exp --collection logged_designators -o ease.$exp.logged_designators.json
#
# # Import from json to local mongo
# mongoimport --db $exp --collection tf --file ease.$exp.tf.json
# mongoimport --db $exp --collection logged_designators --file ease.$exp.logged_designators.json
#
# # set indexes in MongoDB
# mongo $exp --eval 'db.tf.ensureIndex( { __recorded : 1 } )'
# mongo $exp --eval 'db.tf.ensureIndex( { "transforms.header.stamp" : 1 } )'
# mongo $exp --eval 'db.logged_designators.ensureIndex( { __recorded : 1 } )'
#done
| true
|
b70fd1215c04c761b94c6183d8f5de70128a4466
|
Shell
|
kemalsami/python-mongoDB
|
/callPython.sh
|
UTF-8
| 1,780
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
#initialization with default values
SERVICE='cpu_util'
#function to print the help info
printusage()
{
echo "This plug-in uses the OpenStack Ceilometer API to let Nagios query Ceilometer metrics of VMs."
echo "usage:"
echo "ceilometer-call -s metric_name -t nagios_warning_threshold -T nagios_critical_threshold"
echo "-h: print this message"
echo "-s service: The Ceilometer metric which you want to query"
echo "-t threshold: Threshold value which causes Nagios to create a warning message"
echo "-T threshold for alert: Threshold value which causes Nagios to send a critical alert message"
echo "-c configuration file for tenants"
echo "-r resourceid"
exit ${EXITPROB}
}
#parse the arguments
while getopts ":hs:t:T:c:r:v:" opt
do
case $opt in
h ) printusage;;
s ) SERVICE=${OPTARG};;
t ) THRESHOLD=${OPTARG};;
T ) CRITICAL_THRESHOLD=${OPTARG};;
c ) CREDENTIAL=${OPTARG};;
r ) RESOURCE=${OPTARG};;
v ) HOST=${OPTARG};;
? ) printusage;;
esac
done
#####################################################
#####################################################
# CREDENTIAL PART REMOVED and
# python module added for getting result from mongoDB
#####################################################
#####################################################
pythonResult=`python3.4 mongoDBhandler.py $RESOURCE $SERVICE $THRESHOLD $CRITICAL_THRESHOLD`
statusInfo=$(echo $pythonResult | awk -F '|' '{print $1 "|" $2}')
perfData=$(echo $pythonResult | awk -F '|' '{print $2}')
returnCode=$(echo $pythonResult | awk -F '|' '{print $3}')
#echo "$pythonResult"
echo "$statusInfo"
#echo "$perfData"
#echo "$returnCode"
exit $returnCode
| true
|
ba29b747d86214de9ae19a0d37e79c34835f7486
|
Shell
|
sigrlami/pegnet
|
/initialization/fundEC.sh
|
UTF-8
| 763
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# Default is to fund a simulation EC address for testing
# Replace the EC address and the FCT address with your own to initialize a PegNet
# on the Factom MainNet
EC="EC3TsJHUs8bzbbVnratBafub6toRYdgzgbR7kWwCW4tqbmyySRmg"
FCT="FA2jK2HcLnRdS94dEcU27rF3meoJfpUcZPSinpb7AwQvPRY6RL1Q"
# Imports addresses for the Local Testnet. These addresses are no good for the mainnet, but
# for ease of developing on local simulated networks, we add them to your wallet here.
factom-cli importaddress Fs3E9gV6DXsYzf7Fqx1fVBQPQXV695eP3k5XbmHEZVRLkMdD9qCK
factom-cli importaddress Es2XT3jSxi1xqrDvS5JERM3W3jh1awRHuyoahn3hbQLyfEi1jvbq
# Fund the EC address
# Creates and sends a FCT tx that adds 100 FCT worth of ECs to the given EC address
factom-cli rmtx tx
factom-cli newtx tx
factom-cli addtxinput tx $FCT 100
factom-cli addtxecoutput tx $EC 100
factom-cli addtxfee tx $FCT
factom-cli signtx tx
factom-cli sendtx tx
factom-cli listaddresses
| true
|
b9d7f5c1b33c7d8bfc97081d05d6c43e152d6cf9
|
Shell
|
lfos/calcurse
|
/test/todo-001.sh
|
UTF-8
| 286
| 3.03125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
. "${TEST_INIT:-./test-init.sh}"
if [ "$1" = 'actual' ]; then
"$CALCURSE" --read-only -D "$DATA_DIR"/ -t | sort
elif [ "$1" = 'expected' ]; then
(
echo 'to do:'
sed '/^\[-/d; s/^\[\([0-9]\)\] \(.*\)/\1. \2/' "$DATA_DIR"/todo
) | sort
else
./run-test "$0"
fi
| true
|
5309bf3abb0edb8fb8a3c5faaa7116365acd2819
|
Shell
|
ntd/aur-fedora-mingw
|
/aur/afm-mingw-w64-makedepends/PKGBUILD
|
UTF-8
| 3,140
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
# Maintainer: Nicola Fontana <ntd@entidi.it>
#
# This package automatically pulls in the makedepends needed for MinGW
# cross-building and provides the following helper stuff:
#
# - pkg-config wrappers
# - CMake configurations for MinGW
# - configure wrappers
#
# It borrows ideas from the mingw-filesystem Fedora package [1] and some
# AUR packages [2] [3] that in my opinion has wrong names.
#
# [1] http://pkgs.fedoraproject.org/cgit/mingw-filesystem.git/
# [2] https://aur.archlinux.org/packages/mingw-w64-pkg-config/
# [3] https://aur.archlinux.org/packages/mingw-w64-cmake/
pkgname=afm-mingw-w64-makedepends
pkgver=3
pkgrel=8
pkgdesc='A collection of stuff needed for MinGW cross-building'
arch=(any)
url='http://dev.entidi.com/p/aur-fedora-mingw/'
license=(mit)
groups=(afm-i686 afm-x86_64)
depends=(mingw-w64-gcc)
conflicts=(mingw-w64-pkg-config mingw-w64-cmake)
provides=(mingw-w64-pkg-config mingw-w64-cmake)
source=("configure-wrapper"
"i686-w64-mingw32-cmake"
"i686-w64-mingw32-configure"
"i686-w64-mingw32-pkg-config"
"makepkg.common.conf"
"makepkg.i686.conf"
"makepkg.native.conf"
"makepkg.x86_64.conf"
"x86_64-w64-mingw32-cmake"
"x86_64-w64-mingw32-configure"
"x86_64-w64-mingw32-pkg-config"
"Toolchain-i686-w64-mingw32.cmake"
"Toolchain-x86_64-w64-mingw32.cmake")
md5sums=('872de7f30558017886e0bc62f1fa3f77'
'4abb097f8387f364a9877e28c7c880a3'
'2fe711bc73b89cff50baa5b785e35b4c'
'2c2641547debc5d289bb85b0939e4380'
'9bf6ea428f78d5f163c4b23d48eba9e1'
'a38a29dd8f512e71a657490c9c82004c'
'f8123a57a0d15de814c23668334e3dca'
'80969e92a600cec8de480b64bcefaa98'
'23961cda7ebd6a35aeff3345c780c6b5'
'e08642fb0279dd280b85ad1fb1b6215e'
'0a879bf20cd82e44b73eb49cf2484eda'
'4704995815374af7b3b853b139d04ad5'
'cdb9f76c5ab95038df1c03755e9119e7')
package() {
local bindir="$pkgdir/usr/bin"
local sharedir="$pkgdir/usr/share/mingw"
install -d "$bindir"
install -d "$sharedir"
# Find the root directory and substitute it in makepkg.common.conf
local root=$(cd "$startdir/../.." && echo $PWD)
sed \
-e "s|@root@|$root|g" \
"$srcdir/makepkg.common.conf" > "$sharedir/makepkg.common.conf"
chmod 0644 "$sharedir/makepkg.common.conf"
install -m 755 i686-w64-mingw32-cmake "$bindir/"
install -m 755 x86_64-w64-mingw32-cmake "$bindir/"
install -m 755 i686-w64-mingw32-configure "$bindir/"
install -m 755 x86_64-w64-mingw32-configure "$bindir/"
install -m 755 i686-w64-mingw32-pkg-config "$bindir/"
install -m 755 x86_64-w64-mingw32-pkg-config "$bindir/"
install -m 644 configure-wrapper "$sharedir/"
install -m 644 makepkg.i686.conf "$sharedir/"
install -m 644 makepkg.native.conf "$sharedir/"
install -m 644 makepkg.x86_64.conf "$sharedir/"
install -m 644 Toolchain-i686-w64-mingw32.cmake "$sharedir/"
install -m 644 Toolchain-x86_64-w64-mingw32.cmake "$sharedir/"
}
# vim:set ts=2 sw=2 et:
| true
|
14315c8440f85d9a9f58475b41aa880f18686018
|
Shell
|
builddouble/util
|
/scripts/count.sh
|
UTF-8
| 154
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "0" > cnt.txt
echo $CNT
for i in `seq $1`
do
CNT=`cat cnt.txt`
CNT=$(($CNT + 1))
echo $CNT > cnt.txt
echo $CNT
done
| true
|
23200ad1821afdf9ef0f04a1989b4b8d66c9d445
|
Shell
|
flaviobarci/dotfiles
|
/.zshrc
|
UTF-8
| 2,705
| 3.1875
| 3
|
[] |
no_license
|
# If you come from bash you might have to change your $PATH.
# export PATH=$HOME/bin:/usr/local/bin:$PATH
# Path to your oh-my-zsh installation.
export ZSH="/home/barci/.oh-my-zsh"
export EDITOR="nvim"
export FZF_BASE=/usr/bin/fzf
# Set name of the theme to load --- if set to "random", it will
# load a random theme each time oh-my-zsh is loaded, in which case,
# to know which specific one was loaded, run: echo $RANDOM_THEME
# See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes
ZSH_THEME="barci"
# Uncomment the following line to use hyphen-insensitive completion.
# Case-sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
# COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# You can set one of the optional three formats:
# "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# or set a custom format using the strftime function format specifications,
# see 'man strftime' for details.
# HIST_STAMPS="mm/dd/yyyy"
# Which plugins would you like to load?
# Standard plugins can be found in ~/.oh-my-zsh/plugins/*
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(git fzf)
source $ZSH/oh-my-zsh.sh
# User configuration
# export MANPATH="/usr/local/man:$MANPATH"
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# Preferred editor for local and remote sessions
# if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
# else
# export EDITOR='mvim'
# fi
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
#
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
pve(){
source /home/barci/penvs/$1/bin/activate
}
nvim(){
nohup alacritty --command nvim $1 > /dev/null 2>&1 & disown
}
alias gs='git status'
alias go='git checkout'
alias ga='git add'
alias gc='git commit'
alias gl='git log --oneline'
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
| true
|
13c535a835ed8928f48eb22d5cecb34b09fb3158
|
Shell
|
ShubhamKul14/assignment-probs9
|
/day10/arithmatic.sh
|
UTF-8
| 603
| 3.15625
| 3
|
[] |
no_license
|
declare -A dict
declare -a arr
read -p "ENTER A : " a
read -p "ENTER B : " b
read -p "ENTER C : " c
c1=$(($a+$b*$c))
c2=$(($a*$b+$c))
c3=$(($c+$a/$b))
c4=$(($a%$b+$c))
echo -e "\n( a + b x c ) : $(($a+$b*$c))"
echo -e "\n( a x b + c ) : $(($a*$b+$c))"
echo -e "\n( c + a / b ) : $(($c+$a/$b))"
echo -e "\n( a % b + c ) : $(($a%$b+$c))"
dict["c1"]+=$c1
dict["c2"]+=$c2
dict["c3"]+=$c3
dict["c4"]+=$c4
a=0
arr[((a++))]=${dict[@]}
echo -e "\nDICTIONARY : ${dict[@]}"
echo -e "\nARRAY VALs : ${arr[@]}"
s_arr=($(echo ${arr[@]}| tr " " "\n" | sort -n))
echo -e "\nSORTED ASC : ${s_arr[@]}" #MIN TO MAX
| true
|
6d9e4beccf53d76b7b4fe997a1f60640ee491276
|
Shell
|
Boukefalos/dotfiles
|
/install.sh
|
UTF-8
| 400
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
echo "Installing dotfiles"
echo "Initializing submodule(s)"
git submodule update --init --recursive
if apropos "package manager" | grep -q apt; then
echo -e "\n\nUsing APT package manager"
source install/apt.sh
fi
source install/terminfo.sh
source install/ruby.sh
source install/link.sh
# echo "Configuring zsh as default shell"
# chsh -s $(which zsh)
echo "Done."
| true
|
2be6dd9dd68285d6e93ee7b9ae4fc0b4e699b2b2
|
Shell
|
AncientMariner/LearningMongoDB
|
/docker/prepareContainers.sh
|
UTF-8
| 358
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
echo "preparing tomcat container"
cd ../ && mvn clean package -Dmaven.test.skip=true && cd - && cp ../target/LearningMongoDB.war tomcat/
cd tomcat/
docker build -t tomcat .
rm LearningMongoDB.war
cd ../
echo "preparing mongo container"
cd mongo/
pwd
unzip ../../primer-dataset.json.zip -d .
docker build -t mongo .
rm primer-dataset.json
cd ../
| true
|
c47dcbf8e8779407a6da6acb41e1f92fbd1b396f
|
Shell
|
hristogenev/devtunnel-cli
|
/DevTunnelMultipleSession.sh
|
UTF-8
| 1,210
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
red=$'\e[1;31m'
grn=$'\e[1;32m'
end=$'\e[0m'
domain=$1
os=$2
input="DeviceList.txt"
readLineIndex=0
while IFS= read -r line || [ -n "$line" ]
do
deviceIdArray[readLineIndex]=$line
readLineIndex=$((readLineIndex+1))
done < "$input"
prep_term()
{
unset term_kill_needed
trap 'handle_term' TERM INT
}
handle_term()
{
if [ "${term_child_pid[0]}" ]; then
for ((i=0; i<readLineIndex; i++))
do
kill -TERM "${term_child_pid[$i]}" 2>/dev/null
done
else
term_kill_needed="yes"
fi
}
wait_term()
{
if [ "${term_kill_needed}" ]; then
for ((i=0; i<readLineIndex; i++))
do
kill -TERM "${term_child_pid[$i]}" 2>/dev/null
done
fi
for ((i=0; i<readLineIndex; i++))
do
wait "${term_child_pid[$i]}" 2>/dev/null
done
trap - TERM INT
for ((i=0; i<readLineIndex; i++))
do
wait "${term_child_pid[$i]}" 2>/dev/null
done
}
prep_term
for ((i=0; i<readLineIndex; i++))
do
echo "${deviceIdArray[$i]}"
/bin/bash DevTunnelCLISession.sh "$domain" "${deviceIdArray[$i]}" "$os" &
term_child_pid[$i]=$!
sleep 15
done
wait_term "$term_child_pid1" "$term_child_pid2"
| true
|
1332ec3eaa2877db9204484b7cfaf94618e773f5
|
Shell
|
markstoehr/lstm_acoustic_embedding
|
/BUILD.sh
|
UTF-8
| 706
| 3.328125
| 3
|
[] |
no_license
|
# get data
TIMIT=~/Data/timit
if [ -z $1 ] ; then
basedir=`pwd`
else
basedir=$1
fi
echo "Building directory in $basedir"
datadir=$basedir/data
mkdir -p $datadir
# get word list
# python data_preparation/collect_timit_words.py $TIMIT/test $datadir/test_wordlist
# python data_preparation/collect_timit_words.py $TIMIT/train $datadir/train_wordlist
# generate MFCC features
echo "Generating testing MFCC features"
python data_preparation/wordlist_to_mfccs.py $datadir/test_wordlist $datadir/test_mfccs.hdf5 $datadir/test_wordkey.txt
echo "Generating training MFCC features"
python data_preparation/wordlist_to_mfccs.py $datadir/train_wordlist $datadir/train_mfccs.hdf5 $datadir/train_wordkey.txt
| true
|
4de989ce7502a03479437e6dd46a27b56829c617
|
Shell
|
Chenmengpin/scRNASeq-CNVCaller
|
/preprocessing/vcfCaller.sh
|
UTF-8
| 687
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
#Script to generate the vcf file for HoneyBADGER
# Variables for the scripts
FASTA=/home/bkw2118/extDrive/tools/cellranger-5.0.0/reference/refdata-gex-GRCh38-2020-A/fasta/genome.fa
BAM=/mnt/disks/ext/scRNASeq-CNVCaller/data/cellRangerOutput/outs/possorted_genome_bam.bam
OUT=/mnt/disks/ext/scRNASeq-CNVCaller/data/pt14.d.vcf.gz
OUT_AF=/mnt/disks/ext/scRNASeq-CNVCaller/data/pt14.d.vcf.filtAF.gz
# Create the raw vcf file
bcftools mpileup -Ou -f $FASTA $BAM | bcftools call -vmO z -o $OUT
# Filter the variant calls
bcftools filter -O z -o $OUT_FILTERED -s LOWQUAL -i'%QUAL>10' $OUT
# Add all the tags for HoneyBADGER
bcftools +fill-tags $OUT_FILTERED -o $OUT_AF -- -t AF
| true
|
2b685fa2a995a1573648ba0c1ce89dbc44c19c51
|
Shell
|
delippi/PhD-globalRadarOSSE
|
/arch_NEXRAD/run_clean.ksh
|
UTF-8
| 1,006
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/ksh -x
set -x
set -e
pdy=$PDY
cyc=$cyc
PSLOT="NEXRAD-2018092300-2018100700"
typeset -Z2 cyc
ROTDIR="/gpfs/hps2/ptmp/Donald.E.Lippi/fv3gfs_dl2rw/2018092300/$PSLOT"
CDATE=${pdy}${cyc}
cd /gpfs/hps3/emc/meso/save/Donald.E.Lippi/PhD-globalRadarOSSE/arch_NEXRAD
cd $pdy/$cyc
cat << EOF > ./clean_NEXRAD.t${cyc}z.${CDATE}.ksh
#!/bin/ksh
#BSUB -P GFS-DEV
#BSUB -J gfsclean.t${cyc}z.${CDATE}
#BSUB -W 00:20 # wall-clock time (hrs:mins)
#BSUB -n 1 # number of tasks in job
#BSUB -R "rusage[mem=8192]" # number of cores
#BSUB -q "dev_transfer" # queue
#BSUB -o gfsclean.t${cyc}z.${CDATE}.log # output file name in which %J is replaced by the job ID
cd $ROTDIR
rm -rf enkfgdas.$PDY/$cyc
rm -rf gdas.$PDY/$cyc
rm -rf gfs.$PDY/$cyc
cd /gpfs/hps2/stmp/Donald.E.Lippi/RUNDIRS/$PSLOT
rm -rf ${PDY}${cyc}
EOF
chmod 755 ./clean_NEXRAD.t${cyc}z.${CDATE}.ksh
bsub -K < ./clean_NEXRAD.t${cyc}z.${CDATE}.ksh
#ksh ./clean_NEXRAD.t${cyc}z.${CDATE}.ksh
| true
|
16a0be7ae04167a45d4ea405df74f4fe5feefe58
|
Shell
|
petronny/aur3-mirror
|
/pacmanlog2gource-git/PKGBUILD
|
UTF-8
| 1,101
| 2.9375
| 3
|
[] |
no_license
|
# Maintainer: matthiaskrgr <matthias · krueger _strange_curved_character_ famsik · de
# address: run echo "matthias · krueger _strange_curved_character_ famsik · de" | sed -e 's/\ _strange_curved_character_\ /@/' -e 's/\ ·\ /./g'
pkgname=pacmanlog2gource-git
pkgver() {
cd pacmanlog2gource
git describe --tags | sed -e 's/^pacmanlog2gource\-//' -e 's/-/./g'
}
pkgver=2.0.3
pkgrel=0
pkgdesc="A bash script to convert a copy of /var/log/pacman.log into a log file allowing later visualisation using gource - git version"
arch=('any')
url="https://github.com/matthiaskrgr/pacmanlog2gource"
license=('GPL')
depends=('gource' 'calc' 'ffmpeg')
conflicts=('pacmanlog2gource')
replaces=('pacmanlog2gource')
changelog=changelog
source=('pacmanlog2gource::git://github.com/matthiaskrgr/pacmanlog2gource.git'
'changelog')
sha1sums=('SKIP'
'4a05b15313dfae6564ca5f0ea1ee0c6f600e2f73')
package() {
cd "$srcdir/pacmanlog2gource"
install -D pacmanlog2gource.1 ${pkgdir}/usr/share/man/man1/pacmanlog2gource.1
install -Dm 755 pacmanlog2gource.sh ${pkgdir}/usr/bin/pacmanlog2gource
}
| true
|
86872977889287e4b1fc29310a4039899cf4c43f
|
Shell
|
RSEnergyGroup/azure.devops.virtual-environments
|
/images/linux/scripts/installers/homebrew-validate.sh
|
UTF-8
| 848
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -e
################################################################################
## File: homebrew-validate.sh
## Desc: Validate the Homebrew can run after reboot without extra configuring
################################################################################
# Validate the installation
echo "Validate the Homebrew can run after reboot"
set -x
if test -d ~/.linuxbrew
then
echo "User-dir brew"
eval $(~/.linuxbrew/bin/brew shellenv)
export PATH=${PATH}:~/.linuxbrew/bin
fi
if test -d /home/linuxbrew/.linuxbrew
then
echo "System-wide brew"
eval $(/home/linuxbrew/.linuxbrew/bin/brew shellenv)
export PATH=${PATH}:/home/linuxbrew/.linuxbrew/bin
fi
# eval $($(brew --prefix)/bin/brew shellenv)
set +x
if ! command -v brew; then
echo "brew executable not found after reboot"
exit 1
fi
| true
|
35db3328a23f420d2d1008b17a011941a5715728
|
Shell
|
roshal/dotfiles
|
/user/.config/bash/bash-config.sh
|
UTF-8
| 1,352
| 2.8125
| 3
|
[] |
no_license
|
### if not running interactively do not do anything
! echo "${-}" | grep --quiet i && return
### variables
PATH="${BASE}"
# # https://classic.yarnpkg.com/en/docs/cli/global#adding-the-install-location-to-your-path
PATH="${HOME}/.yarn/bin:${PATH}"
PATH="${HOME}/.local/bin:${PATH}"
### heterogeneous
test -e "${HOME}/.local/bin/register-python-argcomplete" && eval "$(register-python-argcomplete pipx)"
test -e "${HOME}/.yarn/bin/nps" && eval "$(nps completion)"
### sources
# # https://wiki.archlinux.org/index.php/Termite#Ctrl+Shift+t
source /etc/profile.d/vte.sh
# # https://wiki.archlinux.org/index.php/Bash#Command_not_found
source /usr/share/doc/pkgfile/command-not-found.bash
# sudo pkgfile -u
# # https://wiki.archlinux.org/index.php/Git#Git_prompt
source /usr/share/git/git-prompt.sh
source /usr/share/nvm/init-nvm.sh
### environment after vte
# # https://askubuntu.com/questions/67283
# # https://unix.stackexchange.com/questions/1288
export PROMPT_COMMAND="${PROMPT_COMMAND:-true}"
export PROMPT_COMMAND="${PROMPT_COMMAND} && history -a"
export PROMPT_COMMAND="${PROMPT_COMMAND} && history -n"
# export PROMPT_COMMAND="${PROMPT_COMMAND} && history -a"
# export PROMPT_COMMAND="${PROMPT_COMMAND} && history -c"
# export PROMPT_COMMAND="${PROMPT_COMMAND} && history -r"
### origin
source "${HOME}/.config/bash/bash-origin.sh"
| true
|
1b16f76dc6677d5cfb21d80f9fc823f46c48e06f
|
Shell
|
SanmiAndreSofa/maldi_amr
|
/amr_maldi_ml/scripts/sort_files.sh
|
UTF-8
| 397
| 3.640625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
#
# Script for sorting result files into their respective directories,
# creating them in the process if they do not exist. This is one way
# to ensure that results files are collated nicely.
if [ -z ${1+x} ]; then
exit
fi
ROOT=$1
for MODEL in 'lightgbm' 'lr' 'rf' 'svm-linear' 'svm-rbf'; do
mkdir -p $ROOT/$MODEL
git mv -f $ROOT/*Model_$MODEL*.json $ROOT/$MODEL
done
| true
|
90f73815b2d276d0717ac79518d4cb94c61a518a
|
Shell
|
oraclecloudbricks/endevour_webinar
|
/functions/create_venv.sh
|
UTF-8
| 401
| 3.078125
| 3
|
[
"UPL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
# Copyright (c) 2021 Oracle and/or its affiliates.
#!/bin/bash
# All rights reserved. The Universal Permissive License (UPL), Version 1.0 as shown at http://oss.oracle.com/licenses/upl
# setup_env.sh
#
# Purpose: Setup Python Virtual Environments to work with Functions
if [ "$#" -ne 1 ]; then
echo "Missing system to create. Usage: ./create_venv.sh VIRTUAL_ENV_NAME"
else
mkvirtualenv $1
fi
| true
|
0633a4fc8b127ddf48f7b0110d02b7dabbd9f265
|
Shell
|
Aghost/BuffOverflow
|
/generate_shellcode/generate_shellcode.sh
|
UTF-8
| 332
| 3.5
| 4
|
[] |
no_license
|
#!/bin/sh
# '\x41' = 'A'
# '\x90' = no-op
# $1 = int times
# $2 = str characterto add to string
print_chars() {
str=""
for i in `seq $1`; do
str=$str$2
done
printf "$str"
#./Buffo.out $str
}
if [ $# -lt "1" ]; then return; fi
if [ $1 -gt "1" ]; then
print_chars $1 '\x41'
else
echo "no"
fi
| true
|
ae82430610e8f460342df74dd5c3758002765d85
|
Shell
|
dokku-alt/mongodb-dockerfiles
|
/start_mongodb.sh
|
UTF-8
| 748
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
if [[ ! -f /opt/mongodb_password ]]; then
echo "No mongodb password defined"
exit 1
fi
if [[ ! -f /opt/mongodb/initialized ]]; then
mkdir -p /opt/mongodb
/usr/bin/mongod --bind_ip=127.0.0.1 --dbpath=/opt/mongodb --noauth --fork --syslog
DB_PASSWORD="$(cat "/opt/mongodb_password")"
sleep 2s
echo "Creating admin user..."
mongo <<EOF
use admin
db.addUser({user: "admin", pwd:"${DB_PASSWORD}", roles:["clusterAdmin", "userAdminAnyDatabase"]})
EOF
kill $(pidof mongod)
sleep 8s
touch /opt/mongodb/initialized
fi
# always update permissions in case of user-id being switched
chown -R mongodb:mongodb /opt/mongodb
chmod 755 /opt/mongodb
exec /usr/bin/mongod --dbpath=/opt/mongodb --auth
| true
|
3a769da331104d03edee4eb67d966294d4f50c28
|
Shell
|
devops-life/shell_python
|
/doc/Service/nginx/Nginx的平滑升级.sh
|
UTF-8
| 1,538
| 2.5625
| 3
|
[] |
no_license
|
Nginx的平滑升级
1、查看当然版本
#cd /usr/local/nginx/ #进入Nginx安装目录
# sbin/nginx -V #查看版本
nginx version: nginx/0.7.60
configure arguments: –user=www –group=www –prefix=/usr/local/nginx –with-http_stub_status_module –with-http_ssl_module #编译项
得到原来./configure 的编译项
2.下载最新版
前往查看最新版,http://nginx.org/en/download.html
#cd /data/soft/
#wget http://nginx.org/download/nginx-0.8.36.tar.gz #下载
#tar xzvf nginx-0.8.36.tar.gz #解压缩
#cd nginx-0.8.36
3.编译
#./configure –user=www –group=www –prefix=/usr/local/nginx –with-http_stub_status_module –with-http_ssl_module #按原来的选项configure
#make #编译
#mv /usr/local/nginx/sbin/nginx /usr/local/nginx/sbin/nginx.old #移动旧版本
#cp objs/nginx /usr/local/nginx/sbin/ #复制新版本nginx过去
#cd /usr/local/nginx
#sbin/nginx -t #测试下,显示如下就是通过
the configuration file /usr/local/nginx/conf/nginx.conf syntax is ok
configuration file /usr/local/nginx/conf/nginx.conf test is successful
4.启动新的,关掉旧的
让nginx把nginx.pid改成nginx.pid.oldbin 跟着启动新的nginx
# kill -USR2 `cat /usr/local/nginx/nginx.pid`
退出旧的nignx
# kill -QUIT `cat /usr/local/nginx/nginx.pid.oldbin`
5.升级完成。
# sbin/nginx -V
nginx version: nginx/0.8.36
TLS SNI support disabled
configure arguments: --user=www --group=www --prefix=/usr/local/nginx --with-http_stub_status_module --with-http_ssl_module
| true
|
cf36bfca4f2939c79270375cd24d710f5f0bc2a5
|
Shell
|
cottrell/consul_examples
|
/simple_service
|
UTF-8
| 1,566
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
# https://www.consul.io/api/agent.html
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# PORT=$($DIR/get_free_port.sh)
curl --request PUT --data @$DIR/check_pinggoogle.json http://localhost:8500/v1/agent/check/register
curl --request PUT --data @$DIR/service_httpserver.json http://localhost:8500/v1/agent/service/register
read -p "Setup service and checks. Service check should be failing."
python -m http.server --cgi 3000 &
read -p "Started http.server. Service check should be passing."
kill %1
read -p "Stopped http.server. Check should failing."
curl --request PUT http://localhost:8500/v1/agent/check/deregister/pinggoogle
curl --request PUT http://localhost:8500/v1/agent/service/deregister/httpserver
read -p "Deregistered service and checks. Done."
# curl http://localhost:8500/v1/agent/checks
# curl --request PUT --data @payload.json http://localhost:8500/v1/agent/check/register
# curl --request PUT http://localhost:8500/v1/agent/check/deregister/:checkid
# curl http://localhost:8500/v1/agent/check/pass/:checkid
# curl http://localhost:8500/v1/agent/check/warn/:checkid
# curl http://localhost:8500/v1/agent/check/fail/:checkid
# curl --request PUT --data @payload.json http://localhost:8500/v1/agent/check/update/:checkid
#
# curl http://localhost:8500/v1/agent/services
# curl --request PUT --data @payload.json http://localhost:8500/v1/agent/service/register
# curl --request PUT http://localhost:8500/v1/agent/service/deregister/:serviceid
# curl --request PUT http://localhost:8500/v1/agent/service/maintenance/:serviceid
| true
|
f3b45790ff86987046f218d4ec272c6bcf64c879
|
Shell
|
sinofeng/niagara-load-balancer
|
/prototype/mininet/exNgFull.sh
|
UTF-8
| 2,518
| 2.921875
| 3
|
[] |
no_license
|
# vip is 10.6.1.1
# direct 10.0/16 to 10.4.1.1 and 10.1/16 to 10.5.1.1
hswitch_id=0000000000000001
s1_id=0000000000000002
s2_id=0000000000000003
# let dumb switches direct vip to sswitches
if [ "$1" == "rt" ]; then
curl -X POST -d '{"destination":"10.6.1.1/16", "gateway":"10.4.1.1"}' http://localhost:8080/router/$s1_id
curl -X POST -d '{"destination":"10.6.1.1/16", "gateway":"10.5.1.1"}' http://localhost:8080/router/$s2_id
# install niagara rules
elif [ "$1" == "ng" ]; then
curl -X POST -d '{"niagara":"1","vip":"10.6.1.1","sip":"10.0.0.0","pattern":"0x00000000","gateway":"192.168.0.2"}' http://localhost:8080/router/$hswitch_id
curl -X POST -d '{"niagara":"2","vip":"10.6.1.1","sip":"10.1.0.0","pattern":"0xffff0000","gateway":"192.168.1.2"}' http://localhost:8080/router/$hswitch_id
elif [ "$1" == "sw" ]; then
if [ "$2" == "sswitch1" ]; then
echo "setting" $2
sysctl -w net.ipv4.ip_forward=1
iptables -t nat -A POSTROUTING -s 10.0.0.0/16 -j SNAT --to-source 10.4.1.1
iptables -t nat -A PREROUTING -d 10.6.1.1 -p icmp -j DNAT --to-destination 10.2.1.1
iptables -t nat -A PREROUTING -d 10.6.1.1 -p tcp --dport 80 -j DNAT --to-destination 10.2.1.1
elif [ "$2" == "sswitch2" ]; then
echo "setting" $2
sysctl -w net.ipv4.ip_forward=1
iptables -t nat -A POSTROUTING -s 10.1.0.0/16 -j SNAT --to-source 10.5.1.1
iptables -t nat -A PREROUTING -d 10.6.1.1 -p icmp -j DNAT --to-destination 10.3.1.1
iptables -t nat -A PREROUTING -d 10.6.1.1 -p tcp --dport 80 -j DNAT --to-destination 10.3.1.1
else
echo "unknown" $2
fi
elif [ "$1" == "sv" ]; then
python -m SimpleHTTPServer 80
elif [ "$1" == "wget" ]; then
wget -O - 10.6.1.1
elif [ "$1" == "rm" ]; then
if [ "$2" == "rt" ]; then
curl -X DELETE -d '{"route_id":"4"}' http://localhost:8080/router/$s1_id
curl -X DELETE -d '{"route_id":"4"}' http://localhost:8080/router/$s2_id
elif [ "$2" == "ng" ]; then
curl -X DELETE -d '{"niagara_id":"all"}' http://localhost:8080/router/$hswitch_id
elif [ "$2" == "sswitch1" ]; then
iptables -t nat -D POSTROUTING -s 10.0.0.0/16 -j SNAT --to-source 10.4.1.1
iptables -t nat -D PREROUTING -d 10.6.1.1 -p icmp -j DNAT --to-destination 10.2.1.1
elif [ "$2" == "sswitch2" ]; then
iptables -t nat -D POSTROUTING -s 10.1.0.0/16 -j SNAT --to-source 10.5.1.1
iptables -t nat -D PREROUTING -d 10.6.1.1 -p icmp -j DNAT --to-destination 10.3.1.1
fi
else
echo "options = [rt | ng | sw {sswitch1, sswitch2} | sv | wget | rm {rt, ng, sswitch1, sswitch2}]"
fi
| true
|
d43ffee9e4b22cdeb7c9ee2ee29206cc73b3c2cf
|
Shell
|
SPriyal/work
|
/github/script/shell/ApipooldCtrl.sh
|
UTF-8
| 877
| 3.4375
| 3
|
[] |
no_license
|
#! /bin/sh
ip_name='127.0.0.1'
do_kill() {
echo shutdown > /dev/tcp/${ip_name}/8014
sleep 2
killall -9 apipoold >/dev/null 2>&1
}
do_stop() {
do_kill
}
do_start() {
tempdate=`date +%Y%m%d'-'%T`
# cp -a ../log/*log ../log/logbak
cp ./apipoold ./apipoold.$tempdate
./apipoold ./apipoold.conf &
}
do_loadcfg() {
killall -USER1 apipoold >/dev/null 2>&1
}
case "$1" in
'start')
do_start
;;
'restart')
rm -rf ../log/*.log
do_kill
do_start
;;
'stop')
do_stop
;;
'kill')
do_kill
;;
'loadcfg')
do_loadcfg
;;
*)
echo -n "usage :"$0 " start | stop | restart | loadcfg "
echo
esac
| true
|
7158a3a489d70f079355b2a6eddd9687937b3eb3
|
Shell
|
Schibsted-Tech-Polska/svd.travis-commands
|
/bin/tasks/run-php-cs-fixer.sh
|
UTF-8
| 272
| 2.796875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
# skip this step if previous has failed
if [ "$TRAVIS_TEST_RESULT" = "1" ]; then
exit;
fi
# add code BELOW this line
PHPCSFIXER_DIR="$TRAVIS_BUILD_DIR/vendor/friendsofphp/php-cs-fixer"
$PHPCSFIXER_DIR/php-cs-fixer --dry-run --verbose fix
| true
|
047909e07f8bf8f86ac375fbec74a752c70b5315
|
Shell
|
luckyraul/puppet-zabbix
|
/templates/scripts/iostat-cron.sh.erb
|
UTF-8
| 719
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
##################################
# Zabbix monitoring script
#
# Info:
# - cron job to gather iostat data
# - can not do real time as iostat data gathering will exceed
# Zabbix agent timeout
##################################
mkdir -p /var/lib/zabbix-agent-ops
# source data file
DEST_DATA=/var/lib/zabbix-agent-ops/iostat-data
TMP_DATA=/var/lib/zabbix-agent-ops/iostat-data.tmp
#
# gather data in temp file first, then move to final location
# it avoids zabbix-agent to gather data from a half written source file
#
# iostat -kx 10 2 - will display 2 lines :
# - 1st: statistics since boot -- useless
# - 2nd: statistics over the last 10 sec
#
iostat -kx 10 2 > $TMP_DATA
mv $TMP_DATA $DEST_DATA
| true
|
ffc2bd1cb188c4bfcd83393fa13f1f49d7e313f1
|
Shell
|
airstack/core
|
/core/build/core-slashpackage-install
|
UTF-8
| 1,618
| 3.96875
| 4
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/sh
###*
# Slashpackage install helper script.
#
# Use this function to install packages from an Airstack compatible slashpackage collection.
#
# Use: core-slashpackage-install PACKAGE_NAME_1 PACKAGE_NAME_2
# Examples: core-slashpackage-install haproxy
# core-slashpackage-install openssh-server wget
#
# @return 0 on success
###
###*
# Slashpackage install helper function.
#
# Use: core_slashpackage_install PACKAGE_NAME_1 PACKAGE_NAME_2
# Examples: core_slashpackage_install haproxy
# core_slashpackage_install openssh-server wget
#
# @param package Space separated list of package names
# @return 0 on success
# @see docs/references.md#apt
###
core_slashpackage_install() {
#TODO: decide what to do about private installs that require a token.
local tmp_pkgs; tmp_pkgs="$@"
local i; local tmp_dir
for i in $tmp_pkgs; do
tmp_dir=/tmp/$i
local tmp_name; tmp_name=$(echo $i | cut -d "-" -f 1)
mkdir -p $tmp_dir; cd $tmp_dir
set -e
wget -O ./install https://raw.githubusercontent.com/airstack/package/master/$tmp_name/$i/install
chmod +x ./install; ./install $i
set +e
cd /tmp; rm -rf $tmp_dir
done
}
core_slashpackage_install_error_check() {
if [ "$?" -ne 0 ]; then
local err_value; err_value="$?"
printf "%s\n" "[ERROR] core_slashpackage_install failed" >&2
exit ${2-113}
fi
}
# Main
# Only run script if params are provided.
# Useful for sourcing the file for testing or direct access to functions.
if [ "$#" -gt 0 ]; then
core_slashpackage_install "$@"
core_slashpackage_install_error_check
fi
| true
|
e5390c56770b1d8898018371f96b8738918cdb89
|
Shell
|
oiramario/nanopi-m4-ubuntu-docker
|
/run.sh
|
UTF-8
| 439
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
#
#set -x
NANOPI4_DISTRO=$(pwd)/distro
[ ! -d $NANOPI4_DISTRO ] && mkdir -p $NANOPI4_DISTRO
NANOPI4_DEVKIT=/opt/devkit
[ ! -d $NANOPI4_DEVKIT ] && sudo mkdir -p $NANOPI4_DEVKIT
docker run -it \
-v $NANOPI4_DISTRO:/root/distro \
-v $NANOPI4_DEVKIT:/root/devkit \
-v $(pwd)/scripts:/root/scripts:ro \
-v /etc/localtime:/etc/localtime:ro \
--privileged \
rk3399:latest \
/bin/bash #./make.sh rootfs
| true
|
8bea0bb3bae07019f0b970b488826593a45f05dc
|
Shell
|
HariTharmalingam/tharmalingam_harishanth_M1_2020_admin
|
/Exercice_6/script.sh
|
UTF-8
| 963
| 4.28125
| 4
|
[] |
no_license
|
#!/bin/bash
groupStatus = "notDefined"
function listGroups() {
for group in $(cat /etc/group )
do
tmp=$( echo $group | cut -d: -f1)
if [ $1 = $tmp ];then
$groupStatus = "exists"
else
$groupStatus = "dontExists"
fi
done
if [ $groupStatus = "dontExists" ] && [ $2 = "add" ];then
sudo groupadd "$1"
echo "Le groupe a été ajouté"
elif [ $groupStatus = "exists" ] && [ $2 = "add" ];then
echo "$1 existe déjà"
elif [ $groupStatus = "exists" ] && [ $2 = "delete" ];then
sudo groupdel $1
echo "$1 supprimé de /etc/group"
else
echo "$1 n'existe pas."
fi
}
printf "Voulez vous ajouter ou supprimer un groupe ? (add/delete) ? "
read choice
case $choice in
add)
printf "Quel groupe voulez vous ajouter ? "
read groupName
listGroups $groupName "add"
;;
delete)
printf "Quel groupe voulez vous supprimer ? "
read groupName
listGroups $groupName "delete"
esac
| true
|
94c2f6a1c19e1c6282bfb9e90fe137ba9b88ac77
|
Shell
|
akshshar/xr-app-manager
|
/src/installhelpers/helper1/setup_vrfforwarding.sh
|
UTF-8
| 3,053
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/sh
#
# /etc/init.d/setup_vrfforwarding.sh
# Subsystem file for "Vrf forwarding" service
#
# chkconfig: 2345 96 05
# description: Port forwarding across vrfs/netns
#
# processname: VrfForwardingService
# config: /misc/app_host/helpers/vrf_forwarding/config.json
# pidfile: /var/run/vrf_forwarding.pid
# source function library
. /etc/rc.d/init.d/functions
# pull in sysconfig settings
NAME=vrf_forwarding
PIDFILE=/var/run/$NAME.pid
DAEMON="/misc/app_host/installhelpers/vrf_forwarding/vrf_forwarding.py"
DAEMON_INPUT_JSON="/misc/app_host/installhelpers/vrf_forwarding/config.json"
DAEMON_ARGS=" --json-config $DAEMON_INPUT_JSON"
DAEMON_USER="root"
do_start() {
# Return
# 0 if daemon has been started
# 1 if daemon was already running
# 2 if daemon could not be started
echo "Starting port forwarding across vrfs based on input config file"
if [ -f $PIDFILE ]; then
echo "VRF forwarding Service already running: see $PIDFILE. Current PID: $(cat $PIDFILE)"
return 1
fi
start-stop-daemon --start --make-pidfile --background --pidfile $PIDFILE --quiet \
--user $DAEMON_USER --startas $DAEMON -- $DAEMON_ARGS \
|| return 2
echo "OK"
}
do_stop() {
# Return
# 0 if daemon has been stopped
# 1 if daemon was already stopped
# 2 if daemon could not be stopped
# other if a failure occurred
start-stop-daemon --signal SIGTERM --stop --quiet --retry=TERM/30/KILL/5 --oknodo --pidfile $PIDFILE -- $DAEMON_ARGS
RETVAL="$?"
[ "$RETVAL" = 2 ] && return 2
rm -f $PIDFILE
return "$RETVAL"
echo "OK"
}
case "$1" in
start)
do_start
case "$?" in
0|1) echo -ne "VRF forwarding service started successfully\n" ;;
2) echo -ne "Failed to start VRF forwarding service \n" ;;
esac
;;
stop)
do_stop
case "$?" in
0|1) echo -ne "VRF forwarding Service stopped successfully\n" ;;
2) echo -ne "Failed to stop VRF forwarding Service \n" ;;
esac
;;
restart)
echo "Restarting $DESC" "$NAME"
do_stop
case "$?" in
0|1)
echo -ne "VRF forwarding Service stopped successfully.\n"
do_start
case "$?" in
0|1) echo "VRF forwarding Service started successfully" ;;
*) echo "Failed to start VRF forwarding Service " ;; # Failed to start
esac
;;
*)
# Failed to stop
echo "Failed to stop VRF forwarding Service"
exit 1
;;
esac
;;
*)
echo "Usage: $0 {start|stop|restart}"
;;
esac
exit 0
| true
|
e2313849b08c5df523859191851d9162066e9d73
|
Shell
|
OwenLiGithub/nocalhost
|
/scripts/build/dep/installer-job
|
UTF-8
| 620
| 3.28125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -eu -o pipefail
VERSION=${VERSION:-$(git describe 2>/dev/null | sed 's/refs\/tags\///' | sed 's/\(.*\)-.*/\1/' | sed 's/-[0-9]*$//' || true)}
GIT_COMMIT_SHA=`git describe --match=NeVeRmAtCh --always --abbrev=40 --dirty`
DOCKERFILE="deployments/dep-install-job/Dockerfile"
TARGET="dep-installer-job"
BRANCH=`git rev-parse --abbrev-ref HEAD`
DEVGITCOMMIT=`git rev-parse HEAD`
BUILDARG=${DEVGITCOMMIT}
if [ "${BRANCH}" = "HEAD" ]; then
BUILDARG=${VERSION}
fi
docker build --build-arg dep_version=${BUILDARG} -t ${TARGET} -f ${DOCKERFILE} .
docker tag ${TARGET}:latest ${TARGET}:${GIT_COMMIT_SHA}
| true
|
770be0581d17cc86b6a71b7be4ede79f9b21ca04
|
Shell
|
stevenctl/env
|
/pre-commit
|
UTF-8
| 578
| 3.375
| 3
|
[] |
no_license
|
#!/bin/sh
changed() {
echo $(git --no-pager diff --name-status --no-color --cached | awk '$1 != "D" { print $2 }')
}
listgo() {
echo $(changed \
| grep -v vendor \
| grep '.go$' \
| grep -v '.pb.go' \
| grep -v '.gen.go')
}
fix_goimports() {
! command -v goimports && exit 0
gofiles=$(listgo)
[ -z "$gofiles" ] && exit 0
goimports -w $(echo "$gofiles" | xargs)
}
fix_gofmt() {
! command -v gofumpt && exit 0
gofiles=$(listgo)
[ -z "$gofiles" ] && exit 0
# TODO use gofumpt
gofmt -w $(echo "$gofiles" | xargs)
}
fix_goimports
fix_gofmt
| true
|
adeb05fc3691ff8d83d17e0ce82f8bec0aa7c330
|
Shell
|
kdaily/Agora
|
/get-data-local.sh
|
UTF-8
| 499
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
[ -d ../data/ ] || mkdir ../data/
# Version key/value should be on his own line
DATA_VERSION=$(cat package.json | grep data-version | head -1 | awk -F: '{ print $2 }' | sed 's/[",]//g' | tr -d '[[:space:]]')
synapse cat --version $DATA_VERSION syn13363290 | tail -n +2 | while IFS=, read -r id version; do
synapse get --downloadLocation ../data/ -v $version $id ;
done
[ -d ../data/team_images/ ] || mkdir ../data/team_images/
synapse get -r --downloadLocation ../data/team_images/ syn12861877
| true
|
f4d5af971a442e1fa59c32b9ba07326cdf8aad18
|
Shell
|
rongcheng2017/coolcar
|
/server/genProto.sh
|
UTF-8
| 1,209
| 2.96875
| 3
|
[
"Apache-2.0"
] |
permissive
|
function genProto {
DOMAIN=$1
SKIP_GATE_WAY=$2
PROTO_PATH=./${DOMAIN}/api
GO_OUT_PATH=./${DOMAIN}/api/gen/v1
mkdir -p $GO_OUT_PATH
protoc -I=$PROTO_PATH --go_out=paths=source_relative:$GO_OUT_PATH ${DOMAIN}.proto
protoc -I=$PROTO_PATH --go-grpc_out=paths=source_relative:$GO_OUT_PATH ${DOMAIN}.proto
if [ $SKIP_GATE_WAY ]; then
return
fi
protoc -I=$PROTO_PATH --grpc-gateway_out=paths=source_relative,grpc_api_configuration=$PROTO_PATH/${DOMAIN}.yaml:$GO_OUT_PATH ${DOMAIN}.proto
PBTS_BIN_DIR=../wx/miniprogram/node_modules/.bin
PBTS_OUT_DIR=../wx/miniprogram/service/proto_gen/${DOMAIN}
mkdir -p $PBTS_OUT_DIR
$PBTS_BIN_DIR/pbjs -t static -w es6 $PROTO_PATH/${DOMAIN}.proto --no-create --no-decode --no-encode --no-verify --no-delimited --force-number -o $PBTS_OUT_DIR/${DOMAIN}_pb_tmp.js
echo 'import * as $protobuf from "protobufjs";\n' > $PBTS_OUT_DIR/${DOMAIN}_pb.js
cat $PBTS_OUT_DIR/${DOMAIN}_pb_tmp.js >> $PBTS_OUT_DIR/${DOMAIN}_pb.js
rm $PBTS_OUT_DIR/${DOMAIN}_pb_tmp.js
$PBTS_BIN_DIR/pbts -o $PBTS_OUT_DIR/${DOMAIN}_pb.d.ts $PBTS_OUT_DIR/${DOMAIN}_pb.js
}
genProto auth
genProto rental
genProto blob 1
genProto car
| true
|
81c1cc37788a2306b56178769d258a491b342fd0
|
Shell
|
yajamon/dotfiles
|
/bin/build-latest-vim
|
UTF-8
| 716
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
readonly VIMREPO=$(ghq root)/github.com/vim/vim
if ! test -d $VIMREPO; then
echo "Not foind $VIMREPO" >&2
exit 1
fi
cd $VIMREPO
git fetch --all --prune
if [ $(git rev-parse master) = $(git rev-parse origin/master) ]; then
param=""
while [ -z $param ]; do
echo -n "Already up-to-date. continue?(yN): " >&2
read -r param _trush
done
if [ $param != "y" ]; then
exit 0
fi
fi
git switch master && git merge --ff origin/master
make distclean
./configure --with-features=huge \
--enable-perlinterp \
--enable-rubyinterp \
--enable-luainterp \
--enable-fail-if-missing \
--prefix=/usr/local
make
sudo make uninstall
sudo make install
| true
|
2a7365f1155d97659667e84b1dd0a3af9878ab75
|
Shell
|
mnvoh/OpenStreetMap-Docker
|
/postgres/docker-entrypoint.sh
|
UTF-8
| 1,163
| 4.09375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# This script will run as the postgres user due to the Dockerfile USER directive
set -e
# Setup postgres CONF file
source /setup-conf.sh
# Setup ssl
source /setup-ssl.sh
source /setup-database.sh
# Setup pg_hba.conf
source /setup-pg_hba.sh
# Running extended script or sql if provided.
# Useful for people who extends the image.
echo
for f in /docker-entrypoint-initdb.d/*; do
case "$f" in
*.sh) echo "$0: running $f"; . "$f" ;;
*.sql) echo "$0: running $f"; "${psql[@]}" < "$f"; echo ;;
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${psql[@]}"; echo ;;
*) echo "$0: ignoring $f" ;;
esac
echo
done
# If no arguments passed to entrypoint, then run postgres by default
if [ $# -eq 0 ];
then
echo "Postgres initialisation process completed .... restarting in foreground"
cat /tmp/postgresql.conf > ${CONF}
su - postgres -c "$SETVARS $POSTGRES -D $DATADIR -c config_file=$CONF"
fi
# If arguments passed, run postgres with these arguments
# This will make sure entrypoint will always be executed
if [ "${1:0:1}" = '-' ]; then
# append postgres into the arguments
set -- postgres "$@"
fi
exec su - "$@"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.