blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
8c4a360ac47df0cdf1f0a6ce2439098d116d4502
|
Shell
|
tixel/developer-exercises
|
/run-all-tests-added-to-script.sh
|
UTF-8
| 3,242
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
## basic exercises
# 0.entries
cd basic/0.entries
CARGO_TARGET_DIR=target cargo build --release --target wasm32-unknown-unknown
hc dna pack workdir
cd tests
npm install &&
OUTPUT=$(npm test)
FAILED="$(echo "$OUTPUT" | grep -o '# fail ')" # expected that zero tests fail
cd ../../.. #back to base folder
echo "$OUTPUT" #print output of tests to investigate
if [[ ! -z $FAILED ]]; then
echo "TESTS FAILED"
exit 1
fi
echo "TESTS PASSED"
# 1.hashes
cd basic/1.hashes
CARGO_TARGET_DIR=target cargo build --release --target wasm32-unknown-unknown
hc dna pack workdir
cd tests
npm install &&
OUTPUT=$(npm test)
FAILED="$(echo "$OUTPUT" | grep -o '# fail ')" # expected that zero tests fail
cd ../../.. #back to base folder
echo "$OUTPUT" #print output of tests to investigate
if [[ ! -z $FAILED ]]; then
echo "TESTS FAILED"
exit 1
fi
echo "TESTS PASSED"
# 2.headers
cd basic/2.headers
CARGO_TARGET_DIR=target cargo build --release --target wasm32-unknown-unknown
hc dna pack workdir
cd tests
npm install &&
OUTPUT=$(npm test)
FAILED="$(echo "$OUTPUT" | grep -o '# fail ')" # expected that zero tests fail
cd ../../.. #back to base folder
echo "$OUTPUT" #print output of tests to investigate
if [[ ! -z $FAILED ]]; then
echo "TESTS FAILED"
exit 1
fi
echo "TESTS PASSED"
# 3.elements --> is just cargo test, no integration test
cd basic/3.elements
cargo test
cd ../.. #only 2 steps to get back to base folder here
# 4.links
cd basic/4.links
CARGO_TARGET_DIR=target cargo build --release --target wasm32-unknown-unknown
hc dna pack workdir
cd tests
npm install &&
OUTPUT=$(npm test)
FAILED="$(echo "$OUTPUT" | grep -o '# fail ')" # expected that zero tests fail
cd ../../.. #back to base folder
echo "$OUTPUT" #print output of tests to investigate
if [[ ! -z $FAILED ]]; then
echo "TESTS FAILED"
exit 1
fi
echo "TESTS PASSED"
## intermediate exercises
# 1.paths
cd intermediate/1.paths
CARGO_TARGET_DIR=target cargo build --release --target wasm32-unknown-unknown
hc dna pack workdir
cd tests
npm install &&
OUTPUT=$(npm test)
FAILED="$(echo "$OUTPUT" | grep -o '# fail ')" # expected that zero tests fail
cd ../../.. #back to base folder
echo "$OUTPUT" #print output of tests to investigate
if [[ ! -z $FAILED ]]; then
echo "TESTS FAILED"
exit 1
fi
echo "TESTS PASSED"
# 2.remote-call
cd intermediate/2.remote-call
CARGO_TARGET_DIR=target cargo build --release --target wasm32-unknown-unknown
hc dna pack workdir
cd tests
npm install &&
OUTPUT=$(npm test)
FAILED="$(echo "$OUTPUT" | grep -o '# fail ')" # expected that zero tests fail
cd ../../.. #back to base folder
echo "$OUTPUT" #print output of tests to investigate
if [[ ! -z $FAILED ]]; then
echo "TESTS FAILED"
exit 1
fi
echo "TESTS PASSED"
# 3.capability-tokens
cd intermediate/3.capability-tokens
CARGO_TARGET_DIR=target cargo build --release --target wasm32-unknown-unknown
hc dna pack workdir
cd tests
npm install &&
OUTPUT=$(npm test)
FAILED="$(echo "$OUTPUT" | grep -o '# fail ')" # expected that zero tests fail
cd ../../.. #back to base folder
echo "$OUTPUT" #print output of tests to investigate
if [[ ! -z $FAILED ]]; then
echo "TESTS FAILED"
exit 1
fi
echo "TESTS PASSED"
| true
|
8b5462b2b444092aacf3aebf6c472ed1333720b1
|
Shell
|
deenario/shopping-cart-blockchain
|
/tribe-network/organizations/fabric-ca/registerEnroll.sh
|
UTF-8
| 14,480
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
function createtribe1() {
infoln "Enrolling the CA admin"
mkdir -p organizations/peerOrganizations/tribe1.example.com/
export FABRIC_CA_CLIENT_HOME=${PWD}/organizations/peerOrganizations/tribe1.example.com/
set -x
fabric-ca-client enroll -u https://admin:adminpw@localhost:7054 --caname ca-tribe1 --tls.certfiles ${PWD}/organizations/fabric-ca/tribe1/tls-cert.pem
{ set +x; } 2>/dev/null
echo 'NodeOUs:
Enable: true
ClientOUIdentifier:
Certificate: cacerts/localhost-7054-ca-tribe1.pem
OrganizationalUnitIdentifier: client
PeerOUIdentifier:
Certificate: cacerts/localhost-7054-ca-tribe1.pem
OrganizationalUnitIdentifier: peer
AdminOUIdentifier:
Certificate: cacerts/localhost-7054-ca-tribe1.pem
OrganizationalUnitIdentifier: admin
OrdererOUIdentifier:
Certificate: cacerts/localhost-7054-ca-tribe1.pem
OrganizationalUnitIdentifier: orderer' >${PWD}/organizations/peerOrganizations/tribe1.example.com/msp/config.yaml
infoln "Registering peer0"
set -x
fabric-ca-client register --caname ca-tribe1 --id.name peer0 --id.secret peer0pw --id.type peer --tls.certfiles ${PWD}/organizations/fabric-ca/tribe1/tls-cert.pem
{ set +x; } 2>/dev/null
infoln "Registering user"
set -x
fabric-ca-client register --caname ca-tribe1 --id.name user1 --id.secret user1pw --id.type client --tls.certfiles ${PWD}/organizations/fabric-ca/tribe1/tls-cert.pem
{ set +x; } 2>/dev/null
infoln "Registering the org admin"
set -x
fabric-ca-client register --caname ca-tribe1 --id.name tribe1admin --id.secret tribe1adminpw --id.type admin --tls.certfiles ${PWD}/organizations/fabric-ca/tribe1/tls-cert.pem
{ set +x; } 2>/dev/null
infoln "Generating the peer0 msp"
set -x
fabric-ca-client enroll -u https://peer0:peer0pw@localhost:7054 --caname ca-tribe1 -M ${PWD}/organizations/peerOrganizations/tribe1.example.com/peers/peer0.tribe1.example.com/msp --csr.hosts peer0.tribe1.example.com --tls.certfiles ${PWD}/organizations/fabric-ca/tribe1/tls-cert.pem
{ set +x; } 2>/dev/null
cp ${PWD}/organizations/peerOrganizations/tribe1.example.com/msp/config.yaml ${PWD}/organizations/peerOrganizations/tribe1.example.com/peers/peer0.tribe1.example.com/msp/config.yaml
infoln "Generating the peer0-tls certificates"
set -x
fabric-ca-client enroll -u https://peer0:peer0pw@localhost:7054 --caname ca-tribe1 -M ${PWD}/organizations/peerOrganizations/tribe1.example.com/peers/peer0.tribe1.example.com/tls --enrollment.profile tls --csr.hosts peer0.tribe1.example.com --csr.hosts localhost --tls.certfiles ${PWD}/organizations/fabric-ca/tribe1/tls-cert.pem
{ set +x; } 2>/dev/null
cp ${PWD}/organizations/peerOrganizations/tribe1.example.com/peers/peer0.tribe1.example.com/tls/tlscacerts/* ${PWD}/organizations/peerOrganizations/tribe1.example.com/peers/peer0.tribe1.example.com/tls/ca.crt
cp ${PWD}/organizations/peerOrganizations/tribe1.example.com/peers/peer0.tribe1.example.com/tls/signcerts/* ${PWD}/organizations/peerOrganizations/tribe1.example.com/peers/peer0.tribe1.example.com/tls/server.crt
cp ${PWD}/organizations/peerOrganizations/tribe1.example.com/peers/peer0.tribe1.example.com/tls/keystore/* ${PWD}/organizations/peerOrganizations/tribe1.example.com/peers/peer0.tribe1.example.com/tls/server.key
mkdir -p ${PWD}/organizations/peerOrganizations/tribe1.example.com/msp/tlscacerts
cp ${PWD}/organizations/peerOrganizations/tribe1.example.com/peers/peer0.tribe1.example.com/tls/tlscacerts/* ${PWD}/organizations/peerOrganizations/tribe1.example.com/msp/tlscacerts/ca.crt
mkdir -p ${PWD}/organizations/peerOrganizations/tribe1.example.com/tlsca
cp ${PWD}/organizations/peerOrganizations/tribe1.example.com/peers/peer0.tribe1.example.com/tls/tlscacerts/* ${PWD}/organizations/peerOrganizations/tribe1.example.com/tlsca/tlsca.tribe1.example.com-cert.pem
mkdir -p ${PWD}/organizations/peerOrganizations/tribe1.example.com/ca
cp ${PWD}/organizations/peerOrganizations/tribe1.example.com/peers/peer0.tribe1.example.com/msp/cacerts/* ${PWD}/organizations/peerOrganizations/tribe1.example.com/ca/ca.tribe1.example.com-cert.pem
infoln "Generating the user msp"
set -x
fabric-ca-client enroll -u https://user1:user1pw@localhost:7054 --caname ca-tribe1 -M ${PWD}/organizations/peerOrganizations/tribe1.example.com/users/User1@tribe1.example.com/msp --tls.certfiles ${PWD}/organizations/fabric-ca/tribe1/tls-cert.pem
{ set +x; } 2>/dev/null
cp ${PWD}/organizations/peerOrganizations/tribe1.example.com/msp/config.yaml ${PWD}/organizations/peerOrganizations/tribe1.example.com/users/User1@tribe1.example.com/msp/config.yaml
infoln "Generating the org admin msp"
set -x
fabric-ca-client enroll -u https://tribe1admin:tribe1adminpw@localhost:7054 --caname ca-tribe1 -M ${PWD}/organizations/peerOrganizations/tribe1.example.com/users/Admin@tribe1.example.com/msp --tls.certfiles ${PWD}/organizations/fabric-ca/tribe1/tls-cert.pem
{ set +x; } 2>/dev/null
cp ${PWD}/organizations/peerOrganizations/tribe1.example.com/msp/config.yaml ${PWD}/organizations/peerOrganizations/tribe1.example.com/users/Admin@tribe1.example.com/msp/config.yaml
}
function createtribe2() {
infoln "Enrolling the CA admin"
mkdir -p organizations/peerOrganizations/tribe2.example.com/
export FABRIC_CA_CLIENT_HOME=${PWD}/organizations/peerOrganizations/tribe2.example.com/
set -x
fabric-ca-client enroll -u https://admin:adminpw@localhost:8054 --caname ca-tribe2 --tls.certfiles ${PWD}/organizations/fabric-ca/tribe2/tls-cert.pem
{ set +x; } 2>/dev/null
echo 'NodeOUs:
Enable: true
ClientOUIdentifier:
Certificate: cacerts/localhost-8054-ca-tribe2.pem
OrganizationalUnitIdentifier: client
PeerOUIdentifier:
Certificate: cacerts/localhost-8054-ca-tribe2.pem
OrganizationalUnitIdentifier: peer
AdminOUIdentifier:
Certificate: cacerts/localhost-8054-ca-tribe2.pem
OrganizationalUnitIdentifier: admin
OrdererOUIdentifier:
Certificate: cacerts/localhost-8054-ca-tribe2.pem
OrganizationalUnitIdentifier: orderer' >${PWD}/organizations/peerOrganizations/tribe2.example.com/msp/config.yaml
infoln "Registering peer0"
set -x
fabric-ca-client register --caname ca-tribe2 --id.name peer0 --id.secret peer0pw --id.type peer --tls.certfiles ${PWD}/organizations/fabric-ca/tribe2/tls-cert.pem
{ set +x; } 2>/dev/null
infoln "Registering user"
set -x
fabric-ca-client register --caname ca-tribe2 --id.name user1 --id.secret user1pw --id.type client --tls.certfiles ${PWD}/organizations/fabric-ca/tribe2/tls-cert.pem
{ set +x; } 2>/dev/null
infoln "Registering the org admin"
set -x
fabric-ca-client register --caname ca-tribe2 --id.name tribe2admin --id.secret tribe2adminpw --id.type admin --tls.certfiles ${PWD}/organizations/fabric-ca/tribe2/tls-cert.pem
{ set +x; } 2>/dev/null
infoln "Generating the peer0 msp"
set -x
fabric-ca-client enroll -u https://peer0:peer0pw@localhost:8054 --caname ca-tribe2 -M ${PWD}/organizations/peerOrganizations/tribe2.example.com/peers/peer0.tribe2.example.com/msp --csr.hosts peer0.tribe2.example.com --tls.certfiles ${PWD}/organizations/fabric-ca/tribe2/tls-cert.pem
{ set +x; } 2>/dev/null
cp ${PWD}/organizations/peerOrganizations/tribe2.example.com/msp/config.yaml ${PWD}/organizations/peerOrganizations/tribe2.example.com/peers/peer0.tribe2.example.com/msp/config.yaml
infoln "Generating the peer0-tls certificates"
set -x
fabric-ca-client enroll -u https://peer0:peer0pw@localhost:8054 --caname ca-tribe2 -M ${PWD}/organizations/peerOrganizations/tribe2.example.com/peers/peer0.tribe2.example.com/tls --enrollment.profile tls --csr.hosts peer0.tribe2.example.com --csr.hosts localhost --tls.certfiles ${PWD}/organizations/fabric-ca/tribe2/tls-cert.pem
{ set +x; } 2>/dev/null
cp ${PWD}/organizations/peerOrganizations/tribe2.example.com/peers/peer0.tribe2.example.com/tls/tlscacerts/* ${PWD}/organizations/peerOrganizations/tribe2.example.com/peers/peer0.tribe2.example.com/tls/ca.crt
cp ${PWD}/organizations/peerOrganizations/tribe2.example.com/peers/peer0.tribe2.example.com/tls/signcerts/* ${PWD}/organizations/peerOrganizations/tribe2.example.com/peers/peer0.tribe2.example.com/tls/server.crt
cp ${PWD}/organizations/peerOrganizations/tribe2.example.com/peers/peer0.tribe2.example.com/tls/keystore/* ${PWD}/organizations/peerOrganizations/tribe2.example.com/peers/peer0.tribe2.example.com/tls/server.key
mkdir -p ${PWD}/organizations/peerOrganizations/tribe2.example.com/msp/tlscacerts
cp ${PWD}/organizations/peerOrganizations/tribe2.example.com/peers/peer0.tribe2.example.com/tls/tlscacerts/* ${PWD}/organizations/peerOrganizations/tribe2.example.com/msp/tlscacerts/ca.crt
mkdir -p ${PWD}/organizations/peerOrganizations/tribe2.example.com/tlsca
cp ${PWD}/organizations/peerOrganizations/tribe2.example.com/peers/peer0.tribe2.example.com/tls/tlscacerts/* ${PWD}/organizations/peerOrganizations/tribe2.example.com/tlsca/tlsca.tribe2.example.com-cert.pem
mkdir -p ${PWD}/organizations/peerOrganizations/tribe2.example.com/ca
cp ${PWD}/organizations/peerOrganizations/tribe2.example.com/peers/peer0.tribe2.example.com/msp/cacerts/* ${PWD}/organizations/peerOrganizations/tribe2.example.com/ca/ca.tribe2.example.com-cert.pem
infoln "Generating the user msp"
set -x
fabric-ca-client enroll -u https://user1:user1pw@localhost:8054 --caname ca-tribe2 -M ${PWD}/organizations/peerOrganizations/tribe2.example.com/users/User1@tribe2.example.com/msp --tls.certfiles ${PWD}/organizations/fabric-ca/tribe2/tls-cert.pem
{ set +x; } 2>/dev/null
cp ${PWD}/organizations/peerOrganizations/tribe2.example.com/msp/config.yaml ${PWD}/organizations/peerOrganizations/tribe2.example.com/users/User1@tribe2.example.com/msp/config.yaml
infoln "Generating the org admin msp"
set -x
fabric-ca-client enroll -u https://tribe2admin:tribe2adminpw@localhost:8054 --caname ca-tribe2 -M ${PWD}/organizations/peerOrganizations/tribe2.example.com/users/Admin@tribe2.example.com/msp --tls.certfiles ${PWD}/organizations/fabric-ca/tribe2/tls-cert.pem
{ set +x; } 2>/dev/null
cp ${PWD}/organizations/peerOrganizations/tribe2.example.com/msp/config.yaml ${PWD}/organizations/peerOrganizations/tribe2.example.com/users/Admin@tribe2.example.com/msp/config.yaml
}
function createOrderer() {
infoln "Enrolling the CA admin"
mkdir -p organizations/ordererOrganizations/example.com
export FABRIC_CA_CLIENT_HOME=${PWD}/organizations/ordererOrganizations/example.com
set -x
fabric-ca-client enroll -u https://admin:adminpw@localhost:9054 --caname ca-orderer --tls.certfiles ${PWD}/organizations/fabric-ca/ordererOrg/tls-cert.pem
{ set +x; } 2>/dev/null
echo 'NodeOUs:
Enable: true
ClientOUIdentifier:
Certificate: cacerts/localhost-9054-ca-orderer.pem
OrganizationalUnitIdentifier: client
PeerOUIdentifier:
Certificate: cacerts/localhost-9054-ca-orderer.pem
OrganizationalUnitIdentifier: peer
AdminOUIdentifier:
Certificate: cacerts/localhost-9054-ca-orderer.pem
OrganizationalUnitIdentifier: admin
OrdererOUIdentifier:
Certificate: cacerts/localhost-9054-ca-orderer.pem
OrganizationalUnitIdentifier: orderer' >${PWD}/organizations/ordererOrganizations/example.com/msp/config.yaml
infoln "Registering orderer"
set -x
fabric-ca-client register --caname ca-orderer --id.name orderer --id.secret ordererpw --id.type orderer --tls.certfiles ${PWD}/organizations/fabric-ca/ordererOrg/tls-cert.pem
{ set +x; } 2>/dev/null
infoln "Registering the orderer admin"
set -x
fabric-ca-client register --caname ca-orderer --id.name ordererAdmin --id.secret ordererAdminpw --id.type admin --tls.certfiles ${PWD}/organizations/fabric-ca/ordererOrg/tls-cert.pem
{ set +x; } 2>/dev/null
infoln "Generating the orderer msp"
set -x
fabric-ca-client enroll -u https://orderer:ordererpw@localhost:9054 --caname ca-orderer -M ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp --csr.hosts orderer.example.com --csr.hosts localhost --tls.certfiles ${PWD}/organizations/fabric-ca/ordererOrg/tls-cert.pem
{ set +x; } 2>/dev/null
cp ${PWD}/organizations/ordererOrganizations/example.com/msp/config.yaml ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/config.yaml
infoln "Generating the orderer-tls certificates"
set -x
fabric-ca-client enroll -u https://orderer:ordererpw@localhost:9054 --caname ca-orderer -M ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/tls --enrollment.profile tls --csr.hosts orderer.example.com --csr.hosts localhost --tls.certfiles ${PWD}/organizations/fabric-ca/ordererOrg/tls-cert.pem
{ set +x; } 2>/dev/null
cp ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/tls/tlscacerts/* ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/tls/ca.crt
cp ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/tls/signcerts/* ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/tls/server.crt
cp ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/tls/keystore/* ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/tls/server.key
mkdir -p ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts
cp ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/tls/tlscacerts/* ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem
mkdir -p ${PWD}/organizations/ordererOrganizations/example.com/msp/tlscacerts
cp ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/tls/tlscacerts/* ${PWD}/organizations/ordererOrganizations/example.com/msp/tlscacerts/tlsca.example.com-cert.pem
infoln "Generating the admin msp"
set -x
fabric-ca-client enroll -u https://ordererAdmin:ordererAdminpw@localhost:9054 --caname ca-orderer -M ${PWD}/organizations/ordererOrganizations/example.com/users/Admin@example.com/msp --tls.certfiles ${PWD}/organizations/fabric-ca/ordererOrg/tls-cert.pem
{ set +x; } 2>/dev/null
cp ${PWD}/organizations/ordererOrganizations/example.com/msp/config.yaml ${PWD}/organizations/ordererOrganizations/example.com/users/Admin@example.com/msp/config.yaml
}
| true
|
a4a2afa56122e97976771fc0c0f4635121823f5e
|
Shell
|
emkp/CSE538_FinalProject
|
/src/data_gen/lib/att2fst.sh
|
UTF-8
| 145
| 2.796875
| 3
|
[] |
no_license
|
FILES=./*.att
for f in $FILES
do
echo "Processing $f file..."
f2=${f%????}
fstcompile --isymbols=ins.txt --osymbols=outs.txt $f $f2.fst
done
| true
|
b08885e913485283347cf0545edb6302dc115e6c
|
Shell
|
5GenCrypto/circ-obfuscation
|
/scripts/circuit-info.sh
|
UTF-8
| 1,838
| 3.640625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Prints circuit info in latex for a CSV file containing info produced by
# get-kappas.sh
#
set -e
fname=${1:-kappas.csv}
source "$(dirname "$0")/utils.sh"
count=0
curname=
curmode=
printline () {
row=
if (( $((count % 2)) == 1 )); then
row="\\rowcol"
fi
circ="\texttt{$curname}"
if [[ $curmode == o2 ]]; then
circ="$circ\$^{*}\$"
elif [[ $curmode == o3 ]]; then
circ="$circ\$^{**}\$"
fi
echo "$row $circ && $result"
curname=$name
count=$((count + 1))
}
while read -r input; do
line=$(echo "$input" | tr -d ' ')
name=$(get_name "$line")
name=$(perl -e "\$line = \"$name\"; \$line =~ s/_/\\\_/g; print \$line")
if [[ $name == name || $name =~ ^aes1r\\_(3|5|6|7)$ || $name =~ ^f ]]; then
continue
fi
if [[ $name =~ ^prg || $name == "sbox" || $name =~ ^gf || $name == linearParts ]]; then
continue
fi
mode=$(get_mode "$line")
if [[ $mode != o1 && $mode != o2 && $mode != o3 ]]; then
continue
fi
if [[ $curname == "" ]]; then
curname=$name
curmode=$mode
fi
if [[ $name != "$curname" && $result != "" ]]; then
printline
fi
curmode=$mode
ninputs=$(get_ninputs "$line")
nconsts=$(get_nconsts "$line")
nouts=$(get_nouts "$line")
size=$(get_size "$line")
nmuls=$(get_nmuls "$line")
depth=$(get_depth "$line")
degree=$(get_degree "$line")
if (( ${#degree} > 6 )); then
degree=$(printf %.2e "$degree")
else
degree="\num{$degree}"
fi
kappa=$(get_kappa_mife "$line" | cut -d'|' -f2)
if [[ $kappa != "[overflow]" ]]; then
kappa="\num{$kappa}"
fi
result="$ninputs && $nconsts && $nouts && $size && $nmuls && $depth && $degree && $kappa \\\\"
done < "$fname"
printline
| true
|
ffb1c7fecd471042ed5a9b396f4faee7a31ed1a1
|
Shell
|
lucaregini/poplog
|
/prelinked-v15.65/pop/src/mksyscomp
|
UTF-8
| 1,327
| 3.203125
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
#!/bin/sh
# --- Copyright University of Sussex 2008. All rights reserved. ----------
# File: C.unix/src/mksyscomp
# Purpose: Build saved images for POPC, POPLINK and POPLIBR
# Author: John Gibson, Jun 24 1988 (see revisions)
# Usage:
# mksyscomp [-d] [image ...]
# where 'image' is popc, poplink or poplibr
if [ "$popautolib" = "" ]; then
echo "mksyscomp: doing pop/com/popenv to set environment vars"
. $usepop/pop/com/popenv.sh
fi
DEBUG=false
case "$1" in
-d) DEBUG=true
shift
;;
esac
for IMAGE
do
corepop %nort %noinit << ****
lvars savedir = current_directory;
'$popsrc/syscomp' -> current_directory;
$DEBUG -> pop_debugging;
pop11_compile("make_$IMAGE");
savedir -> current_directory;
make_saved_image('$popsys/$IMAGE.psv');
sysexit();
****
done
# --- Revision History ---------------------------------------------------
# --- Aaron Sloman, Nov 6 2008 Changed corepop11 to corepop
# --- John Gibson, Jul 10 1993 Image now built in popsys on corepop11
# --- Robert John Duncan, Jan 9 1992
# Rewritten to allow multiple arguments and always to use safepop11
# --- Rob Duncan, Jun 18 1990
# Changed to poplib=/ (no "unset" on Ultrix)
# --- John Gibson, Jun 18 1990
# Added unset poplib
# --- John Gibson, Jul 11 1989
# Added -d option for debugging
# --- John Gibson, Jun 7 1989
# Added false -> pop_debugging
| true
|
19f106eeb6937cf3469fd7ae1c45bb612ea32082
|
Shell
|
apioff/IPTUNNELS
|
/kvm.sh
|
UTF-8
| 21,960
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
red='\033[0;31m'
green='\033[0;32m'
yellow='\033[0;33m'
plain='\033[0m'
cur_dir=$(pwd)
[[ $EUID -ne 0 ]] && echo -e "${red}Error:${plain} This script must be run as root!" && exit 1
[[ -d "/proc/vz" ]] && echo -e "${red}Error:${plain} Your VPS is based on OpenVZ, which is not supported." && exit 1
if [ -f /etc/redhat-release ]; then
release="centos"
elif cat /etc/issue | grep -Eqi "debian"; then
release="debian"
elif cat /etc/issue | grep -Eqi "ubuntu"; then
release="ubuntu"
elif cat /etc/issue | grep -Eqi "centos|red hat|redhat"; then
release="centos"
elif cat /proc/version | grep -Eqi "debian"; then
release="debian"
elif cat /proc/version | grep -Eqi "ubuntu"; then
release="ubuntu"
elif cat /proc/version | grep -Eqi "centos|red hat|redhat"; then
release="centos"
else
release=""
fi
export DEBIAN_FRONTEND=noninteractive
OS=`uname -m`;
MYIP=$(curl -4 icanhazip.com)
if [ $MYIP = "" ]; then
MYIP=`ifconfig | grep 'inet addr:' | grep -v inet6 | grep -vE '127\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | cut -d: -f2 | awk '{ print $1}' | head -1`;
fi
MYIP2="s/xxxxxxxxx/$MYIP/g";
ln -fs /usr/share/zoneinfo/Asia/Jakarta /etc/localtime
sed -i 's/AcceptEnv/#AcceptEnv/g' /etc/ssh/sshd_config
service ssh restart
remove_unused_package_disableipv6(){
apt-get -y update --fix-missing
apt-get -y --purge remove sendmail*;
apt-get -y --purge remove bind9*;
apt-get -y purge sendmail*
apt-get -y remove sendmail*
echo 1 > /proc/sys/net/ipv6/conf/all/disable_ipv6
sed -i '$ i\echo 1 > /proc/sys/net/ipv6/conf/all/disable_ipv6' /etc/rc.local
}
install_package_dependency(){
apt-get -y install wget curl monit git nano stunnel4 zlib1g-dev zlib1g vnstat apache2 bmon iftop htop nmap axel nano traceroute dnsutils bc nethogs less screen psmisc apt-file whois ptunnel ngrep mtr git unzip rsyslog debsums rkhunter fail2ban cmake make gcc libc6-dev dropbear apache2-utils squid3 --no-install-recommends gettext build-essential autoconf libtool libpcre3-dev asciidoc xmlto libev-dev libc-ares-dev automake haveged
apt-file update
}
change_dns_resolver(){
wget -O /etc/issue.net "https://github.com/Trustdee/IPTUNNELS/raw/master/config/issue.net"
echo "nameserver 1.1.1.1" >> /etc/resolv.conf
echo "nameserver 1.0.0.1" >> /etc/resolv.conf
sed -i '$ i\echo "nameserver 1.1.1.1" >> /etc/resolv.conf' /etc/rc.local
sed -i '$ i\echo "nameserver 1.0.0.1" >> /etc/resolv.conf' /etc/rc.local
}
install_shadowsocks(){
apt-get install software-properties-common -y
add-apt-repository ppa:max-c-lv/shadowsocks-libev -y
apt-get update -y
apt-get install shadowsocks-libev -y
}
install_cloak(){
archs=amd64
url=$(wget -O - -o /dev/null https://api.github.com/repos/cbeuw/Cloak/releases/latest | grep "/ck-server-linux-$archs-" | grep -P 'https(.*)[^"]' -o)
wget -O ck-server $url
chmod +x ck-server
sudo mv ck-server /usr/local/bin
}
generate_credentials(){
[ -z "$cloak" ] && cloak=y
if [ "${cloak}" == "y" ] || [ "${cloak}" == "Y" ]; then
ckauid=$(ck-server -u)
[ -z "$admuid" ] && admuid=$ckauid
IFS=, read ckpub ckpv <<< $(ck-server -k)
[ -z "$publi" ] && publi=$ckpub
[ -z "$privat" ] && privat=$ckpv
fi
}
install_prepare_cloak(){
[ -z "$cloak" ] && cloak=y
if [ "${cloak}" == "y" ] || [ "${cloak}" == "Y" ]; then
echo -e "Please enter a redirection IP for Cloak (leave blank to set it to 203.104.129.195:443 of www.line.me):"
[ -z "$ckwebaddr" ] && ckwebaddr="203.104.129.195:443"
echo -e "Where do you want to put the userinfo.db? (default $HOME)"
[ -z "$ckdbp" ] && ckdbp=$HOME
fi
}
shadowsocks_conf(){
rm /etc/shadowsocks-libev/config.json
cat >/etc/shadowsocks-libev/config.json << EOF
{
"server":"0.0.0.0",
"server_port":53794,
"password":"GLOBALSSH",
"timeout":600,
"method":"chacha20-ietf-poly1305",
"fast_open":true,
"nameserver":"1.1.1.1",
"reuse_port":true,
"no_delay":true,
"mode":"tcp_and_udp",
"plugin":"ck-server",
"plugin_opts":"WebServerAddr=${ckwebaddr};PrivateKey=${privat};AdminUID=${admuid};DatabasePath=${ckdbp}/userinfo.db;BackupDirPath=${ckdbp};loglevel=none"
}
EOF
cat >/lib/systemd/system/shadowsocks.service << END8
[Unit]
Description=Shadowsocks-libev Server Service
After=network.target
[Service]
ExecStart=/usr/bin/ss-server -c /etc/shadowsocks-libev/config.json -u
ExecReload=/bin/kill -HUP \$MAINPID
Restart=on-failure
[Install]
WantedBy=multi-user.target
END8
systemctl enable shadowsocks.service
wget -O /etc/init.d/shadowsocks "https://github.com/Trustdee/IPTUNNELS/raw/master/config/shadowsocks"
chmod +x /etc/init.d/shadowsocks
}
install_ovpn(){
homeDir="/root"
curl -O https://raw.githubusercontent.com/Angristan/openvpn-install/master/openvpn-install.sh
chmod +x openvpn-install.sh
export APPROVE_INSTALL=y
export APPROVE_IP=y
export IPV6_SUPPORT=n
export PORT_CHOICE=1
export PROTOCOL_CHOICE=2
export DNS=3
export COMPRESSION_ENABLED=n
export CUSTOMIZE_ENC=n
export CLIENT=client
export PASS=1
./openvpn-install.sh
cd /etc/openvpn/
wget -O /etc/openvpn/openvpn-auth-pam.so https://github.com/Trustdee/IPTUNNELS/raw/master/package/openvpn-auth-pam.so
echo "plugin /etc/openvpn/openvpn-auth-pam.so /etc/pam.d/login" >> /etc/openvpn/server.conf
echo "verify-client-cert none" >> /etc/openvpn/server.conf
echo "username-as-common-name" >> /etc/openvpn/server.conf
echo "duplicate-cn" >> /etc/openvpn/server.conf
echo "max-clients 10000" >> /etc/openvpn/server.conf
echo "max-routes-per-client 1000" >> /etc/openvpn/server.conf
echo "mssfix 1200" >> /etc/openvpn/server.conf
echo "sndbuf 2000000" >> /etc/openvpn/server.conf
echo "rcvbuf 2000000" >> /etc/openvpn/server.conf
echo "txqueuelen 4000" >> /etc/openvpn/server.conf
echo "replay-window 2000" >> /etc/openvpn/server.conf
sed -i 's|user|#user|' /etc/openvpn/server.conf
sed -i 's|group|#group|' /etc/openvpn/server.conf
sed -i 's|user|#user|' /etc/openvpn/server.conf
cp server.conf server-udp.conf
sed -i 's|1194|587|' /etc/openvpn/server-udp.conf
sed -i 's|tcp|udp|' /etc/openvpn/server-udp.conf
sed -i 's|10.8.0.0|10.9.0.0|' /etc/openvpn/server-udp.conf
sed -i 's|#AUTOSTART="all"|AUTOSTART="all"|' /etc/default/openvpn
service openvpn restart
rm client.ovpn
echo 'auth-user-pass
mssfix 1200
sndbuf 2000000
rcvbuf 2000000' >> /etc/openvpn/client-template.txt
cp /etc/openvpn/client-template.txt "$homeDir/client.ovpn"
# Determine if we use tls-auth or tls-crypt
if grep -qs "^tls-crypt" /etc/openvpn/server.conf; then
TLS_SIG="1"
elif grep -qs "^tls-auth" /etc/openvpn/server.conf; then
TLS_SIG="2"
fi
{
echo "<ca>"
cat "/etc/openvpn/easy-rsa/pki/ca.crt"
echo "</ca>"
case $TLS_SIG in
1)
echo "<tls-crypt>"
cat /etc/openvpn/tls-crypt.key
echo "</tls-crypt>"
;;
2)
echo "key-direction 1"
echo "<tls-auth>"
cat /etc/openvpn/tls-auth.key
echo "</tls-auth>"
;;
esac
} >> "$homeDir/client.ovpn"
cd
cp client.ovpn clientudp.ovpn
sed -i 's|tcp-client|udp|' /root/clientudp.ovpn
sed -i 's|1194|587|' /root/clientudp.ovpn
cp /root/client.ovpn /var/www/html/tcp-$MYIP.ovpn
cp /root/clientudp.ovpn /var/www/html/udp-$MYIP.ovpn
}
install_screenfetch(){
wget -O /usr/bin/screenfetch "https://github.com/Trustdee/IPTUNNELS/raw/master/config/screenfetch"
chmod +x /usr/bin/screenfetch
echo "clear" >> .profile
echo "screenfetch" >> .profile
}
config_systemctl(){
echo 1 > /proc/sys/net/ipv4/ip_forward
echo '* soft nofile 51200' >> /etc/security/limits.conf
echo '* hard nofile 51200' >> /etc/security/limits.conf
ulimit -n 51200
sed -i 's|#net.ipv4.ip_forward=1|net.ipv4.ip_forward=1|' /etc/sysctl.conf
sed -i 's|net.ipv4.ip_forward=0|net.ipv4.ip_forward=1|' /etc/sysctl.conf
fallocate -l 2G /swapfile
chmod 600 /swapfile
mkswap /swapfile
swapon /swapfile
echo '/swapfile none swap sw 0 0' >> /etc/fstab
sysctl vm.swappiness=40
sysctl vm.vfs_cache_pressure=50
swapon -s
echo 'vm.vfs_cache_pressure = 50
vm.swappiness= 40
fs.file-max = 51200
net.core.rmem_max = 67108864
net.core.wmem_max = 67108864
net.core.netdev_max_backlog = 250000
net.core.somaxconn = 4096
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_fin_timeout = 30
net.ipv4.tcp_keepalive_time = 1200
net.ipv4.ip_local_port_range = 10000 65000
net.ipv4.tcp_max_syn_backlog = 8192
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_fastopen = 3
net.ipv4.tcp_mem = 25600 51200 102400
net.ipv4.tcp_rmem = 4096 87380 67108864
net.ipv4.tcp_wmem = 4096 65536 67108864
net.ipv4.tcp_mtu_probing = 1' >> /etc/sysctl.conf
sysctl --system
sysctl -p
sysctl -p /etc/sysctl.d/local.conf
}
install_badvpn(){
cd
wget https://github.com/ambrop72/badvpn/archive/1.999.130.tar.gz
tar xf 1.999.130.tar.gz
mkdir badvpn-build
cd badvpn-build
cmake ~/badvpn-1.999.130 -DBUILD_NOTHING_BY_DEFAULT=1 -DBUILD_UDPGW=1 -DBUILD_TUN2SOCKS=1
make install
sed -i '$ i\/usr/local/bin/badvpn-udpgw --listen-addr 127.0.0.1:7300' /etc/rc.local
clear
cd
}
install_ssh_banner(){
cd
echo 'Port 143' >>/etc/ssh/sshd_config
echo 'MaxAuthTries 2' >>/etc/ssh/sshd_config
echo 'Banner /etc/issue.net' >>/etc/ssh/sshd_config
clear
}
install_dropbear(){
cd
wget -O /etc/default/dropbear "https://github.com/Trustdee/IPTUNNELS/raw/master/config/dropbear"
echo "/bin/false" >> /etc/shells
echo "/usr/sbin/nologin" >> /etc/shells
sed -i 's/obscure/minlen=5/g' /etc/pam.d/common-password
service ssh restart
service dropbear restart
clear
wget https://matt.ucc.asn.au/dropbear/dropbear-2018.76.tar.bz2
bzip2 -cd dropbear-2018.76.tar.bz2 | tar xvf -
cd dropbear-2018.76
./configure
make && make install
mv /usr/sbin/dropbear /usr/sbin/dropbear.old
ln /usr/local/sbin/dropbear /usr/sbin/dropbear
cd && rm -rf dropbear-2018.76 && rm -rf dropbear-2018.76.tar.bz2
service dropbear restart
clear
}
install_stunnel4(){
sed -i 's/ENABLED=0/ENABLED=1/g' /etc/default/stunnel4
wget -O /etc/stunnel/stunnel.conf "https://github.com/Trustdee/IPTUNNELS/raw/master/config/stunnel.conf"
sed -i $MYIP2 /etc/stunnel/stunnel.conf
#setting cert
country=SG
state=MAPLETREE
locality=Bussiness
organization=GLOBALSSH
organizationalunit=READYSSH
commonname=server
email=admin@iptunnels.com
openssl genrsa -out key.pem 2048
openssl req -new -x509 -key key.pem -out cert.pem -days 1095 \
-subj "/C=$country/ST=$state/L=$locality/O=$organization/OU=$organizationalunit/CN=$commonname/emailAddress=$email"
cat key.pem cert.pem >> /etc/stunnel/stunnel.pem
/etc/init.d/stunnel4 restart
clear
}
install_failban(){
cd
service fail2ban restart
cp /etc/fail2ban/jail.conf /etc/fail2ban/jail.local
service fail2ban restart
}
install_squid3(){
touch /etc/squid/passwd
/bin/rm -f /etc/squid/squid.conf
/usr/bin/touch /etc/squid/blacklist.acl
/usr/bin/wget --no-check-certificate -O /etc/squid/squid.conf https://github.com/Trustdee/IPTUNNELS/raw/master/config/squid.conf
service squid restart
update-rc.d squid defaults
#create user default
/usr/bin/htpasswd -b -c /etc/squid/passwd GLOBALSSH READYSSH
service squid restart
clear
}
config_firewall(){
NIC=$(ip -4 route ls | grep default | grep -Po '(?<=dev )(\S+)' | head -1)
iptables -I INPUT -p tcp --dport 8080 -j ACCEPT
iptables -I INPUT -p tcp --dport 3128 -j ACCEPT
iptables -I FORWARD -s 10.9.0.0/24 -j ACCEPT
iptables -I INPUT -p udp --dport 587 -j ACCEPT
iptables -t nat -I POSTROUTING -s 10.9.0.0/24 -o $NIC -j MASQUERADE
iptables-save
clear
apt-get -y install iptables-persistent
netfilter-persistent save
}
config_autostartup(){
sed -i '$ i\screen -AmdS limit /root/limit.sh' /etc/rc.local
sed -i '$ i\screen -AmdS ban /root/ban.sh' /etc/rc.local
sed -i '$ i\service fail2ban restart' /etc/rc.local
sed -i '$ i\service dropbear restart' /etc/rc.local
sed -i '$ i\service squid restart' /etc/rc.local
sed -i '$ i\service webmin restart' /etc/rc.local
sed -i '$ i\/etc/init.d/stunnel4 restart' /etc/rc.local
echo "0 0 * * * root /usr/local/bin/user-expire" > /etc/cron.d/user-expire
echo "0 0 * * * root /usr/local/bin/deltrash" > /etc/cron.d/deltrash
echo "0 0 * * * root /usr/local/bin/killtrash" > /etc/cron.d/killtrash
echo "0 0 * * * root /usr/local/bin/expiredtrash" > /etc/cron.d/expiredtrash
}
install_webmin(){
cd
echo 'deb http://download.webmin.com/download/repository sarge contrib' >>/etc/apt/sources.list
echo 'deb http://webmin.mirror.somersettechsolutions.co.uk/repository sarge contrib' >>/etc/apt/sources.list
wget http://www.webmin.com/jcameron-key.asc
apt-key add jcameron-key.asc
apt-get -y update && apt-get -y install webmin
clear
}
install_automaticdeleteaccount(){
#automatic deleting
cat > /usr/local/bin/deltrash <<END1
#!/bin/bash
nowsecs=$( date +%s )
while read account
do
username=$( echo $account | cut -d: -f1 )
expiredays=$( echo $account | cut -d: -f2 )
expiresecs=$(( $expiredays * 86400 ))
if [ $expiresecs -le $nowsecs ]
then
echo "$username has expired deleting"
userdel -r "$username"
fi
done < <( cut -d: -f1,8 /etc/shadow | sed /:$/d )
END1
#automatic killing
cat > /usr/local/bin/killtrash <<END2
while :
do
./userexpired.sh
sleep 36000
done
END2
#automatic check trash
cat > /usr/local/bin/expiredtrash <<END3
#!/bin/bash
echo "" > /root/infouser.txt
echo "" > /root/expireduser.txt
echo "" > /root/alluser.txt
cat /etc/shadow | cut -d: -f1,8 | sed /:$/d > /tmp/expirelist.txt
totalaccounts=`cat /tmp/expirelist.txt | wc -l`
for((i=1; i<=$totalaccounts; i++ ))
do
tuserval=`head -n $i /tmp/expirelist.txt | tail -n 1`
username=`echo $tuserval | cut -f1 -d:`
userexp=`echo $tuserval | cut -f2 -d:`
userexpireinseconds=$(( $userexp * 86400 ))
tglexp=`date -d @$userexpireinseconds`
tgl=`echo $tglexp |awk -F" " '{print $3}'`
while [ ${#tgl} -lt 2 ]
do
tgl="0"$tgl
done
while [ ${#username} -lt 15 ]
do
username=$username" "
done
bulantahun=`echo $tglexp |awk -F" " '{print $2,$6}'`
echo " User : $username Expire tanggal : $tgl $bulantahun" >> /root/alluser.txt
todaystime=`date +%s`
if [ $userexpireinseconds -ge $todaystime ] ;
then
timeto7days=$(( $todaystime + 604800 ))
if [ $userexpireinseconds -le $timeto7days ];
then
echo " User : $username Expire tanggal : $tgl $bulantahun" >> /root/infouser.txt
fi
else
echo " User : $username Expire tanggal : $tgl $bulantahun" >> /root/expireduser.txt
passwd -l $username
fi
done
END3
chmod +x /usr/local/bin/deltrash
chmod +x /usr/local/bin/killtrash
chmod +x /usr/local/bin/expiredtrash
clear
}
install_premiumscript(){
cd /usr/local/bin
wget -O premium-script.tar.gz "https://github.com/Trustdee/IPTUNNELS/raw/master/package/premium-script.tar.gz"
tar -xvf premium-script.tar.gz
rm -f premium-script.tar.gz
cp /usr/local/bin/premium-script /usr/local/bin/menu
cat > /root/ban.sh <<END4
#!/bin/bash
#/usr/local/bin/user-ban
END4
cat > /root/limit.sh <<END5
#!/bin/bash
#/usr/local/bin/user-limit
END5
chmod +x /usr/local/bin/trial
chmod +x /usr/local/bin/user-add
chmod +x /usr/local/bin/user-aktif
chmod +x /usr/local/bin/user-ban
chmod +x /usr/local/bin/user-delete
chmod +x /usr/local/bin/user-detail
chmod +x /usr/local/bin/user-expire
chmod +x /usr/local/bin/user-limit
chmod +x /usr/local/bin/user-lock
chmod +x /usr/local/bin/user-login
chmod +x /usr/local/bin/user-unban
chmod +x /usr/local/bin/user-unlock
chmod +x /usr/local/bin/user-password
chmod +x /usr/local/bin/user-log
chmod +x /usr/local/bin/user-add-pptp
chmod +x /usr/local/bin/user-delete-pptp
chmod +x /usr/local/bin/alluser-pptp
chmod +x /usr/local/bin/user-login-pptp
chmod +x /usr/local/bin/user-expire-pptp
chmod +x /usr/local/bin/user-detail-pptp
chmod +x /usr/local/bin/bench-network
chmod +x /usr/local/bin/speedtest
chmod +x /usr/local/bin/ram
chmod +x /usr/local/bin/log-limit
chmod +x /usr/local/bin/log-ban
chmod +x /usr/local/bin/listpassword
chmod +x /usr/local/bin/pengumuman
chmod +x /usr/local/bin/user-generate
chmod +x /usr/local/bin/user-list
chmod +x /usr/local/bin/diagnosa
chmod +x /usr/local/bin/premium-script
chmod +x /usr/local/bin/user-delete-expired
chmod +x /usr/local/bin/auto-reboot
chmod +x /usr/local/bin/log-install
chmod +x /usr/local/bin/menu
chmod +x /usr/local/bin/user-auto-limit
chmod +x /usr/local/bin/user-auto-limit-script
chmod +x /usr/local/bin/edit-port
chmod +x /usr/local/bin/edit-port-squid
chmod +x /usr/local/bin/edit-port-openvpn
chmod +x /usr/local/bin/edit-port-openssh
chmod +x /usr/local/bin/edit-port-dropbear
chmod +x /usr/local/bin/autokill
chmod +x /root/limit.sh
chmod +x /root/ban.sh
screen -AmdS limit /root/limit.sh
screen -AmdS ban /root/ban.sh
clear
cd
}
config_apache2(){
sed -i 's|Listen 80|Listen 81|' /etc/apache2/ports.conf
sed -i 's|80|81|' /etc/apache2/sites-enabled/000-default.conf
systemctl restart apache2
cd
}
install_bbr(){
curl -sSL https://github.com/Trustdee/IPTUNNELS/raw/master/package/bbr.sh | bash
}
Install_monit_shadowsocks(){
wget -O /etc/monit/monitrc "https://github.com/Trustdee/IPTUNNELS/raw/master/config/monitrc"
monit reload all
systemctl enable monit
}
log_file(){
clear
echo " " | tee -a log-install.txt
echo "Instalasi telah selesai! Mohon baca dan simpan penjelasan setup server!" | tee -a log-install.txt
echo " "
echo "--------------------------- Penjelasan Setup Server ----------------------------" | tee -a log-install.txt
echo " Modified by https://www.facebook.com/ibnumalik.al " | tee -a log-install.txt
echo "--------------------------------------------------------------------------------" | tee -a log-install.txt
echo "" | tee -a log-install.txt
echo "Informasi Server" | tee -a log-install.txt
echo "http://$MYIP:81/log-install.txt"
echo "Download Client tcp OVPN: http://$MYIP:81/tcp-$MYIP.ovpn" | tee -a log-install.txt
echo "Download Client tcp OVPN: http://$MYIP:81/udp-$MYIP.ovpn" | tee -a log-install.txt
echo " - Timezone : Asia/Jakarta (GMT +7)" | tee -a log-install.txt
echo " - Fail2Ban : [on]" | tee -a log-install.txt
echo " - IPtables : [off]" | tee -a log-install.txt
echo " - Auto-Reboot : [on]" | tee -a log-install.txt
echo " - IPv6 : [off]" | tee -a log-install.txt
echo "" | tee -a log-install.txt
echo "Informasi Aplikasi & Port" | tee -a log-install.txt
echo " - OpenVPN : TCP 1194 UDP 587 SSL 1443" | tee -a log-install.txt
echo " - OpenSSH : 22, 143" | tee -a log-install.txt
echo " - OpenSSH-SSL : 444" | tee -a log-install.txt
echo " - Dropbear : 80, 8443" | tee -a log-install.txt
echo " - Dropbear-SSL: 443" | tee -a log-install.txt
echo " - Squid Proxy : 8080, 3128 (public u/p= GLOBALSSH/READYSSH)" | tee -a log-install.txt
echo " - Squid-SSL : 8000 (public u/p= GLOBALSSH/READYSSH)" | tee -a log-install.txt
echo " - Badvpn : 7300" | tee -a log-install.txt
echo "" | tee -a log-install.txt
echo -e "Congratulations, ${green}shadowsocks-libev${plain} server install completed!" | tee -a log-install.txt
echo -e "Your Server IP : $MYIP" | tee -a log-install.txt
echo -e "Your Server Port : 53794" | tee -a log-install.txt
echo -e "Your Password : GLOBALSSH" | tee -a log-install.txt
echo -e "Your Encryption Method: chacha20-ietf-poly1305" | tee -a log-install.txt
echo -e "Your Cloak's Public Key: ${publi}" | tee -a log-install.txt
echo -e "Your Cloak's Private Key: ${privat}" | tee -a log-install.txt
echo -e "Your Cloak's AdminUID: ${admuid}" | tee -a log-install.txt
echo -e "Download Plugin Cloak PC : https://api.github.com/repos/cbeuw/Cloak/releases/latest" | tee -a log-install.txt
echo -e "Download Plugin Cloak Android: https://github.com/cbeuw/Cloak-android/releases" | tee -a log-install.txt
echo "Informasi Tools Dalam Server" | tee -a log-install.txt
echo " - htop" | tee -a log-install.txt
echo " - iftop" | tee -a log-install.txt
echo " - mtr" | tee -a log-install.txt
echo " - nethogs" | tee -a log-install.txt
echo " - screenfetch" | tee -a log-install.txt
echo "" | tee -a log-install.txt
echo "Informasi Premium Script" | tee -a log-install.txt
echo " Perintah untuk menampilkan daftar perintah: menu" | tee -a log-install.txt
echo "" | tee -a log-install.txt
echo " Penjelasan script dan setup VPS"| tee -a log-install.txt
echo "" | tee -a log-install.txt
echo "Informasi Penting" | tee -a log-install.txt
echo " - Webmin : http://$MYIP:10000/" | tee -a log-install.txt
echo " - Log Instalasi : cat /root/log-install.txt" | tee -a log-install.txt
echo " NB: User & Password Webmin adalah sama dengan user & password root" | tee -a log-install.txt
echo "" | tee -a log-install.txt
echo " Modified by https://www.facebook.com/ibnumalik.al " | tee -a log-install.txt
cp /root/log-install.txt /var/www/html/
}
exit_all(){
exit 0;
}
install_all(){
remove_unused_package_disableipv6
install_package_dependency
install_bbr
change_dns_resolver
config_apache2
install_shadowsocks
install_cloak
generate_credentials
install_prepare_cloak
shadowsocks_conf
Install_monit_shadowsocks
install_ovpn
install_screenfetch
config_systemctl
install_badvpn
install_ssh_banner
install_dropbear
install_stunnel4
install_failban
install_squid3
config_firewall
config_autostartup
install_webmin
install_automaticdeleteaccount
install_premiumscript
log_file
reboot
echo "AFTER REBOOT ENJOY YOUR FREEDOM"
}
# Initialization step
action=$1
[ -z $1 ] && action=install
case "$action" in
install|exit)
${action}_all
;;
*)
echo "Arguments error! [${action}]"
echo "Usage: `basename $0` [install|exit]"
;;
esac
| true
|
424ed38796c88c172af4ebb0995059ba853b15f2
|
Shell
|
pfhchaos/dotfiles
|
/.config/bin/electricsheep-desktop
|
UTF-8
| 343
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
rundir="/run/user/$(id -u)/esdesk"
case $1 in
start)
[ -d "$rundir" ] || mkdir "$rundir"
echo $2
geometry="$(xrandr | grep "$2 " | sed -e 's:.* \([0-9]*x[0-9]*+[0-9]*+[0-9]*\).*:\1:')"
xwinwrap -ni -g "$geometry" -s -b -ov -- $0 env WID &> /dev/null
;;
env)
XSCREENSAVER_WINDOW="$2" electricsheep &> /dev/null
;;
esac
| true
|
044ca1d0a060edf556999246dccbbb11b2d9017b
|
Shell
|
bringhurst/generate_random_files
|
/genrandom.sh
|
UTF-8
| 480
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
no_of_files=500000;
file_counter=1;
while [[ $file_counter -le $no_of_files ]];
do echo Creating file no $counter;
if [ "$((RANDOM%100+1))" -lt "2" ]
then
exit
fi
if [ "$((RANDOM%100+1))" -lt "50" -a "$((RANDOM%100+1))" -gt "5" ]
then
mkdir random-dir.$file_counter
cd random-dir.$file_counter
fi
dd bs=1024 count=$RANDOM skip=$RANDOM if=/dev/sda of=random-file.$file_counter;
let "file_counter += 1";
done
| true
|
fe65c406750d8f789b29ac41f0139346857a11bf
|
Shell
|
orientationsys/nb_kb
|
/MySQLReplication/full-backup.sh
|
UTF-8
| 827
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
###############Basic parameters##########################
DAY=`date +%Y%m%d`
Environment=$(/sbin/ifconfig | grep "inet addr" | head -1 |grep -v "127.0.0.1" | awk '{print $2;}' | awk -F':' '{print $2;}')
USER="backup"
PASSWD="bbvsbackup!@#"
HostPort="3306"
DATADIR="/var/www/jacky_bbv/backup/"
MYSQL=`/usr/bin/which mysql`
MYSQLDUMP=`/usr/bin/which mysqldump`
Dump(){
${MYSQLDUMP} --master-data=2 --single-transaction --routines --triggers --events -u${USER} -p${PASSWD} -P${HostPort} ${database} > ${DATADIR}/${DAY}-${database}.sql
gzip ${DATADIR}/${DAY}-${database}.sql
# chmod 755 ${DATADIR}/${DAY}-${database}.sql.gz
}
for db in `echo "SELECT schema_name FROM information_schema.schemata where schema_name in ('bbvs')" | ${MYSQL} -u${USER} -p${PASSWD} --skip-column-names`
do
database=${db}
Dump
done
| true
|
ae49c0b7c3540c40f96b2cdd8729e559a4e1b6f7
|
Shell
|
lisuke/repo
|
/archlinuxcn/refind-theme-dracula/PKGBUILD
|
UTF-8
| 1,015
| 2.78125
| 3
|
[] |
no_license
|
# Maintainer: Kyle Rassweiler <dev at kylerassweiler dot ca>
pkgname=refind-theme-dracula
pkgver=1.0.0
pkgrel=2
pkgdesc="Simple theme inspired by the Dracula palette"
arch=('any')
url="https://github.com/rassweiler/refind-theme-dracula"
license=('MIT')
depends=('refind')
source=("${url}/releases/download/${pkgver}/${pkgname}-${pkgver}.tar.gz")
sha256sums=('f7fdc40ca24722e703164cfb6098b85a40194bb72459c284135248c142e8a471')
install=$pkgname.install
prepare() {
cd "$pkgname-$pkgver"
# do not reset the STDIN file descriptor
sed -e 's|exec|# exec|g' -i 'setup.sh'
}
package() {
cd "$pkgname-$pkgver"
_theme_dir="usr/share/refind/themes/dracula"
install -D -m0755 -t "${pkgdir}/${_theme_dir}/" "setup.sh"
install -D -m0644 -t "${pkgdir}/${_theme_dir}/" {theme.conf,*.png}
install -D -m0644 -t "${pkgdir}/${_theme_dir}/icons" "icons/"*.png
# docs
install -D -m0644 -t "${pkgdir}/usr/share/doc/${pkgname}" README.md
# licenses
install -D -m0644 -t "${pkgdir}/usr/share/licenses/${pkgname}" {LICENSE,COPYING}
}
| true
|
0b95aaa02d7c79001c01669666de164b59d2f0c9
|
Shell
|
kabo/lambda-comments
|
/deploy/cloudformation/cloudformation.sh
|
UTF-8
| 1,086
| 3.625
| 4
|
[
"ISC"
] |
permissive
|
#! /bin/bash
set -e
if [ "$1" = "create" ]; then
ACTION=create-stack
elif [ "$1" = "update" ]; then
ACTION=update-stack
elif [ "$1" = "delete" ]; then
ACTION=delete-stack
else
echo "Usage: $0 [create|update]"
exit 1
fi
TAG='lambda-comments'
DIR=`cd $(dirname $0); pwd`
BABEL_NODE=$DIR/../../node_modules/babel-cli/bin/babel-node.js
BIN_DIR=$DIR/../../bin
STACK_NAME=$($BABEL_NODE $BIN_DIR/dump-config.js CLOUDFORMATION)
ORIGIN=$($BABEL_NODE $BIN_DIR/dump-config.js ORIGIN)
REGION=$($BABEL_NODE $BIN_DIR/dump-config.js REGION)
if [ "$ACTION" = "delete-stack" ]; then
aws cloudformation delete-stack \
--region $REGION \
--stack-name $STACK_NAME
exit 0
fi
aws cloudformation $ACTION \
--region $REGION \
--stack-name $STACK_NAME \
--template-body file://$DIR/lambda-comments.json \
--capabilities CAPABILITY_IAM \
--parameters \
ParameterKey=TagName,ParameterValue=$TAG,UsePreviousValue=false \
ParameterKey=Origin,ParameterValue=$ORIGIN,UsePreviousValue=false \
|| true
# $BABEL_NODE $BIN_DIR/save-cloudformation-config.js
| true
|
a719d8bb3cd2d8eefa6f219407ae72f8edec688e
|
Shell
|
jaredballou/linuxgsm
|
/functions/logs.sh
|
UTF-8
| 2,041
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# LGSM logs.sh function
# Author: Daniel Gibbs
# Website: http://gameservermanagers.com
# Description: Acts as a log rotater, removing old logs.
local modulename="Log Manager"
if [ -n "${consolelog}" ]; then
if [ ! -e "${consolelog}" ]; then
touch "${consolelog}"
fi
fi
# log manager will active if finds logs older than ${logdays}
if [ $(find "${scriptlogdir}"/ -type f -mtime +${logdays}|wc -l) -ne "0" ]; then
fn_printdots "Starting"
sleep 1
fn_printok "Starting"
fn_scriptlog "Starting"
sleep 1
echo -en "\n"
fn_printinfo "Removing logs older than ${logdays} days"
fn_scriptlog "Removing logs older than ${logdays} days"
sleep 1
echo -en "\n"
if [ "${engine}" == "unreal2" ]||[ "${engine}" == "source" ]; then
find "${gamelogdir}"/ -type f -mtime +${logdays}|tee >> "${scriptlog}"
fi
find "${scriptlogdir}"/ -type f -mtime +${logdays}|tee >> "${scriptlog}"
if [ -n "${consolelog}" ]; then
find "${consolelogdir}"/ -type f -mtime +${logdays}|tee >> "${scriptlog}"
fi
if [ "${engine}" == "unreal2" ]||[ "${engine}" == "source" ]; then
gamecount=$(find "${scriptlogdir}"/ -type f -mtime +${logdays}|wc -l)
fi
scriptcount=$(find "${scriptlogdir}"/ -type f -mtime +${logdays}|wc -l)
echo "${consolelog}"
if [ -n "${consolelog}" ]; then
consolecount=$(find "${consolelogdir}"/ -type f -mtime +${logdays}|wc -l)
else
consolecount=0
fi
count=$((${scriptcount} + ${consolecount}))
if [ "${engine}" == "unreal2" ]||[ "${engine}" == "source" ]; then
count=$((${scriptcount} + ${consolecount} + ${gamecount}))
else
count=$((${scriptcount} + ${consolecount}))
fi
if [ "${engine}" == "unreal2" ]||[ "${engine}" == "source" ]; then
find "${gamelogdir}"/ -mtime +${logdays} -type f -exec rm -f {} \;
fi
find "${scriptlogdir}"/ -mtime +${logdays} -type f -exec rm -f {} \;
if [ -n "${consolelog}" ]; then
find "${consolelogdir}"/ -mtime +${logdays} -type f -exec rm -f {} \;
fi
fn_printok "Removed ${count} log files"
fn_scriptlog "Removed ${count} log files"
sleep 1
echo -en "\n"
fi
| true
|
cd4e5da569fddfcce9ddca1e2f47014bedb7564a
|
Shell
|
mbencherif/forced-alignment
|
/train_rl.sh
|
UTF-8
| 349
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
declare -a languages=("de" "fr" "it" "en")
declare -a features=("mfcc" "mel" "pow")
for feature_type in "${features[@]}"
do
for lang in "${languages[@]}"
do
echo "training on ${features} features of speech segments with language=${lang}"
python ./src/train_brnn.py -c rl -l $lang -f $feature_type
done
done
| true
|
daa1b95b55a46063679598418874d32498264feb
|
Shell
|
rickypc/dotfiles
|
/bin/json-uglifier
|
UTF-8
| 1,025
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# JSON Uglifier - Remove whitespaces from JSON document.
# Copyright (C) 2008-2015 Richard Huang <rickypc@users.noreply.github.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
if [[ -z $1 || -z $2 ]]; then
echo "Usage: $0 <pretty.json> <uglified.json>"
else
cat $1 | python -c "with open('$2','w') as file: import json,sys;json.dump(json.load(sys.stdin),file,separators=(',',':'))"
fi
| true
|
069e00c976d5cf40f830ae820751753acc6e650d
|
Shell
|
DingGuodong/LinuxBashShellScriptForOps
|
/functions/system/getLinuxDmesg.sh
|
UTF-8
| 455
| 2.875
| 3
|
[] |
no_license
|
#!/bin/sh
# Refer to: http://blog.csdn.net/wzb56_earl/article/details/50625705
uptime_ts=`cat /proc/uptime | awk '{ print $1}'`
dmesg | awk -v uptime_ts=${uptime_ts} 'BEGIN {
now_ts = systime();
start_ts = now_ts - uptime_ts;
#print "system start time seconds:", start_ts;
#print "system start time:", strftime("[%Y/%m/%d %H:%M:%S]", start_ts);
}
{
print strftime("[%Y/%m/%d %H:%M:%S]", start_ts + substr($1, 2, length($1) - 2)), $0
}'
| true
|
b1de9a225214313f1d863b7c9880a9e0e0f93abf
|
Shell
|
RafaelAPB/blockchain-integration-framework
|
/weaver/tests/network-setups/fabric/dev/scripts/envVar.sh
|
UTF-8
| 2,951
| 3.4375
| 3
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
#
# Copyright IBM Corp All Rights Reserved
#
# SPDX-License-Identifier: Apache-2.0
#
# This is a collection of bash functions used by different scripts
#NWPATH="$1"
#P_ADD="$1"
export NW_NAME="$3"
export CORE_PEER_TLS_ENABLED=true
export ORDERER_CA=$NW_PATH/ordererOrganizations/${NW_NAME}.com/orderers/orderer.${NW_NAME}.com/msp/tlscacerts/tlsca.${NW_NAME}.com-cert.pem
export PEER0_ORG1_CA=$NW_PATH/peerOrganizations/org1.${NW_NAME}.com/peers/peer0.org1.${NW_NAME}.com/tls/ca.crt
export PEER0_ORG2_CA=$NW_PATH/peerOrganizations/org2.${NW_NAME}.com/peers/peer0.org2.${NW_NAME}.com/tls/ca.crt
#export PEER0_ORG3_CA=${PWD}/organizations/peerOrganizations/org3.example.com/peers/peer0.org3.example.com/tls/ca.crt
# Set OrdererOrg.Admin globals
setOrdererGlobals() {
export CORE_PEER_LOCALMSPID="OrdererMSP"
export CORE_PEER_TLS_ROOTCERT_FILE=$NW_PATH/ordererOrganizations/${NW_NAME}.com/orderers/orderer.${NW_NAME}.com/msp/tlscacerts/tlsca.network1.com-cert.pem
export CORE_PEER_MSPCONFIGPATH=$NW_PATH/ordererOrganizations/${NW_NAME}.com/users/Admin@${NW_NAME}.com/msp
}
# Set environment variables for the peer org
setGlobals() {
local USING_ORG=""
if [ -z "$OVERRIDE_ORG" ]; then
USING_ORG=$1
else
USING_ORG="${OVERRIDE_ORG}"
fi
echo "Using organization ${USING_ORG} NW - $3"
if [ $USING_ORG -eq 1 ]; then
export CORE_PEER_LOCALMSPID="Org1MSP"
export CORE_PEER_TLS_ROOTCERT_FILE=$PEER0_ORG1_CA
export CORE_PEER_MSPCONFIGPATH=$NW_PATH/peerOrganizations/org1."$3".com/users/Admin@org1."$3".com/msp
export CORE_PEER_ADDRESS="localhost:"${2}
elif [ $USING_ORG -eq 2 ]; then
export CORE_PEER_LOCALMSPID="Org2MSP"
export CORE_PEER_TLS_ROOTCERT_FILE=$PEER0_ORG2_CA
export CORE_PEER_MSPCONFIGPATH=$NW_PATH/peerOrganizations/org2."$3".com/users/Admin@org2."$3".com/msp
export CORE_PEER_ADDRESS="localhost:"${2}
else
echo "================== ERROR !!! ORG Unknown =================="
exit 1
fi
if [ "$VERBOSE" == "true" ]; then
env | grep CORE
fi
}
# parsePeerConnectionParameters $@
# Helper function that sets the peer connection parameters for a chaincode
# operation
parsePeerConnectionParameters() {
PEER_CONN_PARMS=""
PEERS=""
#echo "In parsePeerConnectionParameters : "$CORE_PEER_ADDRESS
while [ "$#" -gt 0 ]; do
setGlobals $1 $2 $3
PEER="peer0.org$1"
## Set peer adresses
PEERS="$PEERS $PEER"
PEER_CONN_PARMS="$PEER_CONN_PARMS --peerAddresses $CORE_PEER_ADDRESS"
## Set path to TLS certificate
TLSINFO=$(eval echo "--tlsRootCertFiles \$PEER0_ORG$1_CA")
PEER_CONN_PARMS="$PEER_CONN_PARMS $TLSINFO"
echo "PEER_CONN_PARMS: $PEER_CONN_PARMS"
# shift by 3 to get to the next organization
shift 3
done
# remove leading space for output
PEERS="$(echo -e "$PEERS" | sed -e 's/^[[:space:]]*//')"
}
verifyResult() {
if [ $1 -ne 0 ]; then
echo "!!!!!!!!!!!!!!! "$2" !!!!!!!!!!!!!!!!"
echo
exit 1
fi
}
| true
|
1c504e891752791c73186b90dcb893a5097f1116
|
Shell
|
chraac/lua.js.test
|
/gen.xcode.sh
|
UTF-8
| 247
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
path=$(dirname $0)
path=${path/\./$(pwd)}
cmake_file_path=${path}/
output_path=${path}/cmake/
mkdir -p ${output_path}
cd ${output_path}
cmake -G Xcode -DCMAKE_TOOLCHAIN_FILE=${cmake_file_path}/iOS.cmake ${cmake_file_path}
cd ${path}
| true
|
9df6c00274b5a92eaf6c01dac8a89c9e1a892ace
|
Shell
|
jbarn16/repo.plugin.video.9anime
|
/updateLatest.sh
|
UTF-8
| 549
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
git checkout master
git remote update origin
git reset --hard origin/master
NEW_VERSION=`python2 updateLatest.py`
BRANCH_NAME=version-${NEW_VERSION}
git checkout -b ${BRANCH_NAME}
python2 kodi-addons/addons_xml_generator.py
git add plugin.video.9anime
git add addons.xml
git add addons.xml.md5
git commit -m "chore(addons): update plugin.video.9anime to version ${NEW_VERSION}"
git push -u origin ${BRANCH_NAME}
git checkout master
echo "Done: https://github.com/DxCx/repo.plugin.video.9anime/compare/${BRANCH_NAME}?expand=1"
| true
|
d8047ecf19dec110fee138be7138a5e0bceb38d3
|
Shell
|
openshift/ironic-image
|
/prepare-ipxe.sh
|
UTF-8
| 635
| 3.078125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# prepare-ipxe copies the right images to /tftpboot, later when the
# shared volume is created, rundnsmasq will copy them there to
# /shared/tftpboot. We do this as a two-step operation to ensure all
# the expected images are available at build-time. Otherwise the CI
# jobs that build these images could succeed, but provisioning
# will actually fail without the images present.
set -ex
mkdir -p /tftpboot
mkdir -p /tftpboot/arm64-efi
cp /usr/share/ipxe/undionly.kpxe /tftpboot/
cp /usr/share/ipxe/ipxe-snponly-x86_64.efi /tftpboot/snponly.efi
cp /usr/share/ipxe/arm64-efi/snponly.efi /tftpboot/arm64-efi/snponly.efi
| true
|
cdc1a40252537cfb25f20abbdf4c62f30fdaa8ce
|
Shell
|
soochoe/visual-compatibility
|
/data/amazon/prepare_dataset.sh
|
UTF-8
| 191
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
dir="dataset"
metadata="metadata.pkl"
if [ ! -f "$metadata" ]; then
cd utils
python parse_metadata.py
cd ..
fi
if [[ ! -e "$dir" ]]; then
cd utils; python create_dataset.py; cd ..
fi
| true
|
7044e935d19d93a8775f797961c0aa69f67502ab
|
Shell
|
yeasy/code_snippet
|
/security/openssl/gen_certs.sh
|
UTF-8
| 2,109
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "===Generate a self signed certificate as the CA"
[ -f ca.crt ] || openssl req -nodes -newkey rsa:2048 -subj "/C=US/ST=State/L=Location/O=OrgName/OU=IT Department/CN=ca" -keyout ca.key -x509 -days 365 -out ca.crt
#echo "Review genrated certificate"
#openssl x509 -text -noout -in ca.crt
echo "===Generate private key for server"
#[ -f server.key ] || openssl genrsa -nodes -out server.key 2048
[ -f server.key ] || openssl req -nodes -newkey rsa:2048 -subj "/C=US/ST=State/L=Location/O=OrgName/OU=IT Department/CN=server" -x509 -keyout server.key
#[ -f server.key ] || openssl req -newkey rsa:2048 -subj "/C=US/ST=State/L=Location/O=OrgName/OU=IT Department/CN=server" -nodes -keyout server.key -x509 -days 365 -out server-tmp.crt
# Optionally inspect the generated private key
# openssl rsa -in ca.key -noout -text
# Optionally inspect the generated public key
# openssl rsa -in example.org.pubkey -pubin -noout -text
#echo "Generate private key and csr for server: use server address as CN field (can use wildcard)"
#openssl req -new -key server.key -out server.csr
echo "===Generate the config file for the server csr, indicating server key file"
if [ ! -f server-csr.conf ] ; then
cat <<EOF >./server-csr.conf
[ req ]
prompt = no
default_bits = 2048
default_keyfile = server.key
distinguished_name = req_distinguished_name
req_extensions = req_ext
[ req_distinguished_name ]
C=CN
ST=BJ
L=BJ
O=O
OU=O
CN=server
[ req_ext ]
subjectAltName = @alt_names
[alt_names]
DNS.1 = localhost
DNS.2 = server
DNS.3 = oci-lbr
IP.1 = 127.0.0.1
IP.2 = 129.213.9.145
IP.3 = 129.213.62.129
IP.4 = 129.213.53.75
IP.5 = 129.213.51.73
EOF
fi
echo "===Generate csr based on the config"
[ -f server.csr ] || openssl req -new -nodes -out server.csr -config server-csr.conf
echo "===Sign a server certificate based on csr by CA"
[ -f server.crt ] || openssl x509 -req -in server.csr -extfile v3.ext -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt
echo "===Combine key and cert together as server certificate"
if [ ! -f server.pem ]; then
cp server.key server.pem
cat server.crt >> server.pem
fi
| true
|
56bb976bbebf2c54bd1b32648b1860494f3ceb38
|
Shell
|
koolnube84/Stocks
|
/Scripts/a.sh
|
UTF-8
| 534
| 3.5
| 4
|
[] |
no_license
|
clear
cd ~/Desktop/Stocks
echo "$(tput setaf 2)[System]$(tput sgr0) Starting a.sh"
echo "$(tput setaf 2)[System]$(tput sgr0) Press Any Key to exit"
while [ true ] ; do
read -t 1 -n 1
if [ $? = 0 ] ; then
echo ""
echo "$(tput setaf 2)[System]$(tput sgr0) pull.sh stopped by user"
exit ;
else
cd Lists
LINESINLETTERFILE=$(awk 'END {print NR}' A.txt)
echo "$LINESINLETTERFILE lines"
for i in `seq 1 $LINESINLETTERFILE`;
Name=$(sed -n $i\p A.txt)
echo $NAME
cd ..
fi
echo "Done"
done
| true
|
0599376097ef713c40c8e8fe021d17a76361b0cd
|
Shell
|
ericbutters/bashscripts
|
/setlinker.sh
|
UTF-8
| 349
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
LD="/usr/bin/ld"
GOLD="/usr/bin/ld.gold"
DEF="/usr/bin/ld.bfd"
if [ -z "$1" ]; then
echo "no linker passed.. exit."
exit 1
fi
if [ -h "$LD" ]; then
sudo rm $LD
else
echo "no symbolic link for ld found.. exit."
exit 2
fi
if [ "$1" == "gold" ]; then
sudo ln -s $GOLD $LD
else
sudo ln -s $DEF $LD
fi
exit 0
| true
|
1db6b7d0ce326a3b8be47514c5e571e5bb6eb56b
|
Shell
|
Tedgar20/XHTML-To-CSV
|
/XHTML2CSV.sh
|
UTF-8
| 1,583
| 3.125
| 3
|
[] |
no_license
|
#/bin/bash
touch weather.csv
echo "state,city,weather,temperature,humidity,pressure" > weather.csv
while true; do
NY=`date '+%Y-%m-%d-%H-%M-%S'`-NY.html
`touch ${NY}`
echo `curl -L https://forecast-v3.weather.gov/point/40.78,-73.97` > ${NY} #NY, New York
`java -jar tagsoup-1.2.1.jar --files ${NY}`
CA=`date '+%Y-%m-%d-%H-%M-%S'`-CA.html
`touch ${CA}`
echo `curl -L https://forecast-v3.weather.gov/point/34.02,-118.45` > ${CA} #CA, Los Angeles
`java -jar tagsoup-1.2.1.jar --files ${CA}`
IL=`date '+%Y-%m-%d-%H-%M-%S'`-IL.html
`touch ${IL}`
echo `curl -L https://forecast-v3.weather.gov/point/41.78,-87.76` > ${IL} #IL, Chicago
`java -jar tagsoup-1.2.1.jar --files ${IL}`
TX=`date '+%Y-%m-%d-%H-%M-%S'`-TX.html
`touch ${TX}`
echo `curl -L https://forecast-v3.weather.gov/point/29.64,-95.28` > ${TX} #TX, Houston
`java -jar tagsoup-1.2.1.jar --files ${TX}`
AZ=`date '+%Y-%m-%d-%H-%M-%S'`-AZ.html
`touch ${AZ}`
echo `curl -L https://forecast-v3.weather.gov/point/33.69,-112.07` > ${AZ} #AZ, Phoenix
`java -jar tagsoup-1.2.1.jar --files ${AZ}`
PA=`date '+%Y-%m-%d-%H-%M-%S'`-PA.html
`touch ${PA}`
echo `curl -L https://forecast-v3.weather.gov/point/40.08,-75.01` > ${PA} #PA, Philadelphia
`java -jar tagsoup-1.2.1.jar --files ${PA}`
FL=`date '+%Y-%m-%d-%H-%M-%S'`-FL.html
`touch ${FL}`
echo `curl -L https://forecast-v3.weather.gov/point/30.23,-81.67` > ${FL} #FL, Jacksonville
`java -jar tagsoup-1.2.1.jar --files ${FL}`
for file in *.xhtml
do
echo `python makeCSV.py ${file}` >> weather.csv
done
sleep 1h
done
| true
|
d1035036dc13ff34d393ced101a5f1881de4aa0a
|
Shell
|
sboosali/configuration
|
/home-manager/init/sboo-init-gpg.sh
|
UTF-8
| 6,279
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
##################################################
command -v gpg
gpg --version #TODO check ≥2.1.17
##################################################
GPG_USER_ID='samboosalis@gmail.com'
##################################################
gpg --full-generate-key
##################################################
gpg --gen-revoke --armor --output=RevocationCertificate.asc ${GPG_USER_ID}
##################################################
gpg init ${GPG_USER_ID}
##################################################
##################################################
# Notes ##########################################
##################################################
# « https://wiki.archlinux.org/index.php/GnuPG#Configuration_files »
##################################################
# NOTE Whenever a « user-id » is required in a command,
# it can be specified with:
#
# * your key ID,
# * fingerprint,
# * a part of your name,
# * your email address,
# * etc.
#
# GnuPG is flexible on this.
##################################################
# e.g.:
#
# $ gpg --full-generate-key
#
# gpg (GnuPG) 2.2.11; Copyright (C) 2018 Free Software Foundation, Inc.
# This is free software: you are free to change and redistribute it.
# There is NO WARRANTY, to the extent permitted by law.
#
# Please select what kind of key you want:
# (1) RSA and RSA (default)
# (2) DSA and Elgamal
# (3) DSA (sign only)
# (4) RSA (sign only)
# • Your selection? 1
#
# RSA keys may be between 1024 and 4096 bits long.
# • What keysize do you want? (2048) 4096
# Requested keysize is 4096 bits
#
# Please specify how long the key should be valid.
# 0 = key does not expire
# <n> = key expires in n days
# <n>w = key expires in n weeks
# <n>m = key expires in n months
# <n>y = key expires in n years
# • Key is valid for? (0) 12y
# Key expires at Wed Dec 11 14:04:36 2030 PST
#
# GnuPG needs to construct a user ID to identify your key.
# • Real name: Sam Boosalis
# • Email address: samboosalis@gmail.com
# • Comment: sboosali
# You selected this USER-ID:
# "Sam Boosalis (sboosali) <samboosalis@gmail.com>"
#
# # an input-dialog pops up, in which you enter your passphrase (twice).
#
# We need to generate a lot of random bytes. It is a good idea to perform
# some other action (type on the keyboard, move the mouse, utilize the
# disks) during the prime generation; this gives the random number
# generator a better chance to gain enough entropy.
# gpg: /home/sboo/.gnupg/trustdb.gpg: trustdb created
# gpg: key 96581B5997622007 marked as ultimately trusted
# gpg: directory '/home/sboo/.gnupg/openpgp-revocs.d' created
# gpg: revocation certificate stored as '/home/sboo/.gnupg/openpgp-revocs.d/FAE77E1A10376FD9381DDB2596581B5997622007.rev'
#
# public and secret key created and signed.
# pub rsa4096 2018-12-14 [SC] [expires: 2030-12-11]
# FAE77E1A10376FD9381DDB2596581B5997622007
# uid Sam Boosalis (sboosali) <samboosalis@gmail.com>
# sub rsa4096 2018-12-14 [E] [expires: 2030-12-11]
#
##################################################
# e.g.
#
# $ gpg --gen-revoke --armor --output=RevocationCertificate.asc <user-id>
##################################################
# e.g.
#
# $ gpg --list-keys
#
# /home/sboo/.gnupg/pubring.kbx
# -----------------------------
# pub rsa4096 2018-12-14 [SC] [expires: 2030-12-11]
# FAE77E1A10376FD9381DDB2596581B5997622007
# uid [ultimate] Sam Boosalis (sboosali) <samboosalis@gmail.com>
# sub rsa4096 2018-12-14 [E] [expires: 2030-12-11]
##################################################
# e.g.
#
# $ gpg --output 'public_samboosalis.key' --armor --export 'samboosalis'
#
#
#
##################################################
##################################################
# > This certificate can be used to revoke your key if it is ever lost or compromised. Do not neglect this step! Print it out, save it on a disk, and store it safely. It will be short enough that you can type it back in by hand without much effort if you just print it out.
# >
# > If you lose your secret key or it is compromised, you will want to revoke your key by uploading the revocation certificate to a public keyserver (assuming you uploaded your public key to a public keyserver in the first place).
# >
# > Protect your revocation key like you protect your secret keys.
##################################################
# To list keys in your public key ring:
#
# $ gpg --list-keys
# To list keys in your secret key ring:
#
# $ gpg --list-secret-keys
##################################################
# > In order for others to send encrypted messages to you, they need your public key.
# >
# > To generate an ASCII version of a user's public key to file public.key (e.g. to distribute it by e-mail): function
# >
# $ gpg --output public_USERID.key --armor --export USERID
# >
##################################################
# > To ease the migration to the no-secring method, gpg detects the presence of a secring.gpg and converts the keys on-the-fly to the the key store of gpg-agent (this is the private-keys-v1.d directory below the GnuPG home directory (~/.gnupg)).
# Print out your private key, and "keep it secret, keep it safe".
#
# $ gpg -a --export-secret-keys USERID > private_USERID.key
#
# $ lpr -o "sides=two-sided-long-edge" "private_USERID.key"
#
##################################################
# « gpg-agent »:
#
# > gpg-agent is mostly used as daemon to request and cache the password for the keychain. This is useful if GnuPG is used from an external program like a mail client. gnupg comes with systemd user sockets which are enabled by default. These sockets are gpg-agent.socket, gpg-agent-extra.socket, gpg-agent-browser.socket, gpg-agent-ssh.socket, and dirmngr.socket.
# >
##################################################
# >
##################################################
# >
##################################################
| true
|
49920053de57fd0f262207223fc98ad364351895
|
Shell
|
dexterous/dotfiles
|
/home/bin/show-repos
|
UTF-8
| 244
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash -e
#set -o xtrace
find ${1-.} -maxdepth 3 -type d -name .git \
-exec echo {} \; \
-exec echo '================' \; \
-execdir git remote update \; \
-execdir git log --oneline --graph ..@{upstream} \; \
-exec echo '' \;
| true
|
841dbe1fedeb02bd356cfba2f90a8516a1c95c51
|
Shell
|
nsadriano/Projeto-DevOps-jankins-Ansible-Git-Docker-Zabbix
|
/install.sh
|
UTF-8
| 1,710
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
###################
#instalando Docker-CE
if ! grep -q "docker" /etc/apt/sources.list /etc/apt/sources.list.d/*; then
echo "Adicionar chave GPG oficial do Docker"
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
#Use o seguinte comando para configurar o repositório estável
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable"
fi
if ! hash docker > /dev/null 2>&1; then
echo "Instalando Docker-CE"
#Primeiro remover versões antigas chamadas docker ou docker-engine
sudo apt-get remove docker docker-engine docker.io
#Atualize o indice de pacotes com o apt
sudo apt-get update
#Instalar pacotes para permitir que o apt use um repositório via HTTPS:
sudo apt-get install apt-transport-https ca-certificates curl software-properties-common -y
#Atualize novamente a lista de pacotes:
sudo apt-get update
#Finalmente instale a última versão do Docker CE:
sudo apt-get install docker-ce -y
else
echo "Docker already installed"
fi
# Install ansible #
if ! grep -q "ansible/ansible" /etc/apt/sources.list /etc/apt/sources.list.d/*; then
echo "Adding Ansible PPA"
sudo apt-add-repository ppa:ansible/ansible -y
fi
if ! hash ansible > /dev/null 2>&1; then
echo "Installing Ansible..."
sudo apt-get update
sudo apt-get install software-properties-common ansible git pip python-apt -y
else
echo "Ansible already installed"
fi
#####################################
# Display real installation process #
echo ""
echo " Altere o playbook playbook.yml de acordo com suas necessidade, em seguinda executar o comando :"
echo " ansible-playbook playbook.yml --ask-become-pass"
echo ""
| true
|
3998743eaf2d0a916b6e35d57af271a64f4d95ea
|
Shell
|
kjsanger/containers
|
/singularity/scripts/singularity-wrapper
|
UTF-8
| 5,574
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Copyright (C) 2023 Genome Research Ltd. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Keith James <kdj@sanger.ac.uk>
set -euo pipefail
usage() {
cat 1>&2 << EOF
Install singularity proxy wrappers for the executables listed in a
container's manifest to a nominated directory.
Two different types of wrapper are available which differ in how
they run singularity. The default type uses "singularity run",
while the alternative type uses "singularity exec".
If the "exec" type is used, the wrapper can additionally be made
to create a long-running service instance on first use and
subsequently "exec" within that instance.
Usage: $0
[-e]
[-h]
[-i <Docker image name>]
[-m <JSON manifest path>]
[-p <wrapper install prefix>]
[-r <Docker registry name>]
[-s]
[-t <Docker image tag>]
[-u <Docker user name>] [-v] <operation>
Operation may be one of:
list Print the manifest to STDOUT and exit.
install Install wrappers to the location given by the install
prefix (-p option). Wrappers are installed to a directory
"\$prefix/bin", which is created if not already present.
Options:
-h Print usage and exit.
-e Use "singularity exec", rather than "singularity run" in the
generated wrappers.
-i Docker image name. Required, defaults to the value of the
environment variable "\$DOCKER_IMAGE" ("$DOCKER_IMAGE").
-m Manifest file path. Optional, defaults to the value of the
environment variable \$MANIFEST_PATH ("$MANIFEST_PATH").
-p Install prefix. Optional, defaults to the value of the
environment variable \$PREFIX ("$PREFIX").
-r Docker registry name. Optional, defaults to the value of the
environment variable \$DOCKER_REGISTRY ("$DOCKER_REGISTRY").
-s Start a long-running service instance (implies use of exec).
-t Docker image tag. Optional, defaults to the value of the
environment variable \$DOCKER_TAG ("$DOCKER_TAG").
-u Docker user name. Optional, defaults to the value of the
environment variable \$DOCKER_USER ("$DOCKER_USER").
-v Print verbose messages.
EOF
}
# Print an application manifest
print_manifest() {
jq . "$MANIFEST_PATH"
}
# Write a bash script wrapping an application in a Docker container
write_wrapper() {
local dir="$1"
local exe="$2"
if [ -z "$DOCKER_IMAGE" ] ; then
usage
echo -e "\nERROR:\n A Docker image name is required"
exit 4
fi
cat << EOF > "$dir/$exe"
#!/bin/bash
set -e
export DOCKER_REGISTRY="$DOCKER_REGISTRY"
export DOCKER_USER="$DOCKER_USER"
export DOCKER_IMAGE="$DOCKER_IMAGE"
export DOCKER_TAG="$DOCKER_TAG"
# TODO: Add other environment variables that will, if set at install
# time, be permanently set in the installed wrapper. E.g. a candidate
# is SINGULARITY_CACHEDIR.
"\$(dirname "\${BASH_SOURCE[0]}")/$singularity_wrap_impl" "$exe" "\$@"
EOF
chmod +x "$dir/$exe"
}
# Install wrappers for every application in a container's application manifest
install_wrappers() {
local dir="$PREFIX/bin"
install -d "$dir"
cp "/usr/local/bin/$singularity_wrap_impl" "$PREFIX/bin"
for exe in "${wrappers[@]}" ; do
write_wrapper "$dir" "$exe"
done
}
DOCKER_REGISTRY=${DOCKER_REGISTRY:-ghcr.io}
DOCKER_USER=${DOCKER_USER:-wtsi-npg}
DOCKER_IMAGE=${DOCKER_IMAGE:-""}
DOCKER_TAG=${DOCKER_TAG:-latest}
PREFIX=${PREFIX:-/opt/wtsi-npg}
MANIFEST_PATH=${MANIFEST_PATH:-"$PREFIX/etc/manifest.json"}
singularity_wrap_impl="singularity-run-docker"
while getopts "hei:m:p:r:st:u:v" option; do
case "$option" in
e)
singularity_wrap_impl="singularity-exec-docker"
;;
h)
usage
exit 0
;;
i)
DOCKER_IMAGE="$OPTARG"
;;
m)
MANIFEST_PATH="$OPTARG"
;;
p)
PREFIX="$OPTARG"
;;
r)
DOCKER_REGISTRY="$OPTARG"
;;
s)
singularity_wrap_impl="singularity-service-docker"
;;
t)
DOCKER_TAG="$OPTARG"
;;
u)
DOCKER_USER="$OPTARG"
;;
v)
set -x
;;
*)
usage
echo -e "\nERROR:\n Invalid option"
exit 4
;;
esac
done
shift $((OPTIND -1))
declare -a wrappers
if [ ! -e "$MANIFEST_PATH" ] ; then
echo -e "\nERROR:\n The manifest of executables at '$MANIFEST_PATH' does not exist"
exit 4
fi
wrappers=($(jq -j '.executable[] + " "' "$MANIFEST_PATH"))
operation="$@"
if [ -z "$operation" ] ; then
usage
echo -e "\nERROR:\n An operation argument is required"
exit 4
fi
case "$operation" in
list)
print_manifest
exit 0
;;
install)
install_wrappers
;;
*)
usage
echo -e "\nERROR:\n Invalid wrapper operation '$operation'"
exit 4
;;
esac
| true
|
3d21f5ca37b35cc1f359e806e2606fbb8e006170
|
Shell
|
mina-sadek/ROS-dev
|
/catkin_ws_crt.sh
|
UTF-8
| 598
| 3.65625
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
starting_dir="$PWD"
home_dir="$HOME"
echo home_dir:$home_dir
#1. If catkin workspace doesn't exist, Create a catkin workspace
if [ ! -d "$home_dir/catkin_ws/src" ]; then
# Control will enter here if $DIRECTORY doesn't exist.
echo "Creating a catkin workspace @ $home_dir/catkin_ws"
cd ~
mkdir -p $home_dir/catkin_ws/src
cd $home_dir/catkin_ws/src
echo "Initializing catkin workspace ..."
catkin_init_workspace
else
echo "catkin_ws exists @ $home_dir/catkin_ws"
fi
#2. Build the workspace
echo "Building catkin workspace ..."
cd $home_dir/catkin_ws
catkin_make
| true
|
23ef2a5115c9f8d0f305a991cbe17e7c0196a617
|
Shell
|
bytedance/fedlearner
|
/deploy/scripts/rsa_psi/run_raw_data_partitioner.sh
|
UTF-8
| 2,088
| 2.59375
| 3
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
export CUDA_VISIBLE_DEVICES=
source /app/deploy/scripts/hdfs_common.sh || true
source /app/deploy/scripts/env_to_args.sh
partitioner_name=$(normalize_env_to_args "--partitioner_name" $NAME)
input_file_wildcard=$(normalize_env_to_args "--input_file_wildcard" "$FILE_WILDCARD")
raw_data_iter=$(normalize_env_to_args "--raw_data_iter" $FILE_FORMAT)
compressed_type=$(normalize_env_to_args "--compressed_type" $COMPRESSED_TYPE)
read_ahead_size=$(normalize_env_to_args "--read_ahead_size" $READ_AHEAD_SIZE)
read_batch_size=$(normalize_env_to_args "--read_batch_size" $READ_BATCH_SIZE)
output_builder=$(normalize_env_to_args "--output_builder" $FILE_FORMAT)
builder_compressed_type=$(normalize_env_to_args "--builder_compressed_type" $BUILDER_COMPRESSED_TYPE)
file_paths=$(normalize_env_to_args "--file_paths" $INPUT_FILE_PATHS)
kvstore_type=$(normalize_env_to_args "--kvstore_type" $KVSTORE_TYPE)
memory_limit_ratio=$(normalize_env_to_args '--memory_limit_ratio' $MEMORY_LIMIT_RATIO)
python -m fedlearner.data_join.cmd.raw_data_partitioner_cli \
--input_dir=$INPUT_DIR \
--output_dir=$OUTPUT_DIR \
--output_partition_num=$OUTPUT_PARTITION_NUM \
--total_partitioner_num=$TOTAL_PARTITIONER_NUM \
--partitioner_rank_id=$INDEX \
$partitioner_name $kvstore_type\
$raw_data_iter $compressed_type $read_ahead_size $read_batch_size \
$output_builder $builder_compressed_type \
$file_paths $input_file_wildcard $memory_limit_ratio
| true
|
4eeaed191ff94b887487baa16414defb0acc9a53
|
Shell
|
uw-dims/ansible-dims-playbooks
|
/roles/git-ssh/templates/git-shell-commands/newrepo.j2
|
UTF-8
| 7,284
| 3.4375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# vim: set ts=4 sw=4 tw=0 et :
#
# {{ ansible_managed }}
#
# Copyright (C) 2014-2017, University of Washington. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
. $DIMS/lib/shflags
. $DIMS/bin/dims_functions.sh
# Tracks with bumpversion
VERSION="2.14.0"
export FQDN="$(get_fqdn)"
DESCRIPTION="${DESCRIPTION:-{repo_name}}"
REPOSDIR="${REPOSDIR:-{{ git_reposdir }}}"
DEPLOYMENT=${DEPLOYMENT:-$(get_deployment_from_fqdn)}
CATEGORY=${CATEGORY:-$(get_category_from_fqdn)}
SENDER=${SENDER:-{{ git_envelope_sender|default("git@{{ inventory_hostname }}") }}}
RECIPIENTS=${RECIPIENTS:-{{ git_mailing_list|default("git@{{ inventory_hostname }}") }}}
FLAGS_HELP="usage: $BASE [options] args"
# Define command line options
DEFINE_boolean 'debug' false 'enable debug mode' 'd'
DEFINE_string 'description' "${DESCRIPTION}" 'repo description' 'D'
DEFINE_string 'reposdir' "${REPOSDIR}" 'repos directory' 'R'
DEFINE_string 'recipients' "${RECIPIENTS}" 'email envelope recipients' 'E'
DEFINE_string 'sender' "${SENDER}" 'email envelope sender' 'F'
DEFINE_boolean 'hooks' false 'install hooks configuration' 'H'
DEFINE_boolean 'usage' false 'print usage information' 'u'
DEFINE_boolean 'verbose' false 'be verbose' 'v'
DEFINE_boolean 'version' false 'print version and exit' 'V'
usage() {
flags_help
cat << EOD
This script is intended to facilitate creating new Git
repositories with the necessary elements for use in
continuous integration and continuous deployment workflows.
This includes setting up hooks to trigger Jenkins build
jobs, feedback via email and/or message bus queues,
description strings, etc.
If the repo name is a bare string (e.g., "mynewrepo"), a new
local bare Git repository named "mynewrepo.git" will be
created in ${REPOSDIR}.
$ newrepo mynewrepo
$ cat ${REPOSDIR}/mynewrepo.git/description
mynewrepo
To produce a more verbose description, use:
$ newrepo --description "This is my second repo" mynewrepo2
$ cat ${REPOSDIR}/mynewrepo2.git/description
This is my second repo
If the repo name contains an equal sign ('=') then it is assumed
the desired repo name is to the left of the equal sign, and a
valid URL follows on the right of the equal sign, like this:
shflags=https://github.com/kward/shflags
A file named 'shflags.git' will be created and the URL will
be returned by programs such as 'mrconfig'.
For use in creating a bare repo on a host serving Git repos,
you only need to provide the desired name of the repo without
the .git extension. (If .git is provided, it will be ignored;
if left off, it will be added.)
When installed in ~git/git-shell-commands directory, anyone with
SSH access to Git repos can invoke the script remotely:
$ ssh git@source.{{ dims_domain }} newrepo anotherrepo
{{ ansible_managed }}
EOD
exit 0
}
main()
{
dims_main_init
debug 'debug mode enabled'
[[ $FLAGS_debug -eq ${FLAGS_TRUE} && $FLAGS_verbose -eq ${FLAGS_TRUE} ]] && set -x
# Validate required elements
[ ! -z "$1" ] || error_exit 1 "No repo specified"
[[ -d ${FLAGS_reposdir} ]] || error_exit 1 "Repos directory \"${FLAGS_reposdir}\" not found"
cd ${FLAGS_reposdir} || error_exit 1 "Cannot change directory to ${FLAGS_reposdir}"
if [[ $1 =~ '=' ]]; then
verbose "Processing remote reference"
REPO=$(echo $1 | awk -F= '{ print $1; }')
RURL=$(echo $1 | awk -F= '{ print $2; }')
[[ ! -d ${REPO}.git ]] || error_exit 1 "Directory ${REPO}.git already exists"
[[ ! -f ${REPO}.git ]] || error_exit 1 "File ${REPO}.git already exists"
echo "$RURL" > ${REPO}.git || error_exit 1 "Could not create file ${REPO}.git linking to ${RURL}"
exit 0
fi
verbose "Processing local repo"
# Strip any .git extension, if provided (it is forced later).
REPO="$(basename $1 .git)"
debug "REPO=${REPO}"
[[ ! -d "${REPO}.git" ]] || error_exit 1 "Directory \"${REPO}.git\" already exists"
if [[ ${FLAGS_description} = "${DESCRIPTION}" ]]; then
FLAGS_description="$REPO"
fi
# We want the repo name to end in .git, so after validation
# and possible use of short name for description, put it back.
REPO="${REPO}.git"
# Exit immediately on any error
set -e
verbose "Initializing repo \"$REPO\""
git init --bare $REPO >/dev/null
verbose "Removing .sample hooks"
rm -f $REPO/hooks/*.sample || true
verbose "Setting description to \"${FLAGS_description}\""
echo "${FLAGS_description}" > $REPO/description
# The git-ssh role handles the "config" file, so this option isn't
# strictly necessary (but code was left here for use by enabling the
# option if so desired.)
if [[ $FLAGS_hooks -eq $FLAGS_TRUE ]]; then
verbose "Enabling hooks:"
git config --file $REPO/config hooks.mailinglist ${FLAGS_recipients}
git config --file $REPO/config hooks.announcelist ${FLAGS_recipients}
git config --file $REPO/config hooks.envelopesender ${FLAGS_sender}
git config --file $REPO/config hooks.emailprefix "[Git] "
#git config --file $REPO/config hooks.diffopts '"--stat" "--summary" "--find-copies-harder" "-p"'
# hack - force writing a line that starts with tab and has a single
# quoted string with options, since "git config" doesn't seem to want
# to do this in a way that works for reading the value properly.
echo " diffopts = \"--stat --summary --find-copies-harder -p\"" >> $REPO/config
fi
if [[ ${FLAGS_verbose} -eq ${FLAGS_TRUE} && -f $REPO/config ]]; then
git config --file $REPO/config -l | grep "^hooks\." | sed 's/^/\[+\] /'
fi
debug "Returning from main()"
on_exit
return $?
}
# parse the command-line
FLAGS "$@" || exit $?
eval set -- "${FLAGS_ARGV}"
main "$@"
exit $?
| true
|
75f66225a4ff794d7bce6f7e9012c2a0767604ec
|
Shell
|
AugurProject/augur
|
/support/install.sh
|
UTF-8
| 8,927
| 3.84375
| 4
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
#!/usr/bin/env bash
make_docker_compose() {
cat << 'EOF' > "${PWD}/augur/docker-compose.yml"
version: '3.7'
services:
augur:
image: augurproject/augur:runner
restart: always
ports:
- 9001:9001
- 9002:9002
- 9003:9003
- 9004:9004
volumes:
- ./keys:/keys
environment:
AUGUR_ENV: ${AUGUR_ENV}
ETHEREUM_NETWORK: ${AUGUR_ENV}
ETH_NODE_URL: ${ETHEREUM_HTTP}
EOF
}
make_cli() {
cat << 'EOF' > "${PWD}/augur/cli"
#!/usr/bin/env bash
DEFAULT_ETH_NODE_URL="${ETHEREUM_HTTP:-http://localhost:8545}";
method="$1"
intro() {
cat <<HERE
##############################################################################
Welcome, Friend of Augur
This utility will help to set up the services which can interact with Augur on
kovan, mainnet, and localhost.
This utility requires docker-ce and docker-compose, and once fully installed
the services can be managed using docker-compose directly. If you don't know
what that means, you can also just google docker desktop and download that and
run it.
Configuration will be written to a directory on your filesystem.
##############################################################################
HERE
}
usage() {
cat <<HERE
To setup scripts and start augur services:
./augur/cli setup
To start augur services:
./augur/cli start
To view logs:
./augur/cli logs
To stop services:
./augur/cli stop
To restart services:
./augur/cli restart
To upgrade this script:
./augur/cli upgrade
HERE
}
prereqs() {
# Check docker is present
printf "Checking prerequisites...\n"
if [ -x "$(command -v docker)" ]; then
printf "[x]: Docker - Installed\n"
else
printf "[!]: Docker - not installed\n~~> You need Docker installed and configured in order to run the augur services. See: https://docs.docker.com/get-docker/ for instructions.\n\n";
exit 1;
fi
docker info > /dev/null 2>&1;
if [ $? != 0 ]; then
printf "[!]: Docker Daemon - Not running!\n~~> Follow the instructions from the docker install guide on making sure your docker daemon is running or download docker desktop and double click to install\n";
exit 1;
else
printf "[x]: Docker Daemon - Running!\n";
fi
if [ -x "$(command -v docker-compose)" ]; then
printf "[x]: docker-compose - Installed\n"
else
printf "[!]: docker-compose - Not installed\n~~> You must install docker-compose to run Augur services. See: https://docs.docker.com/compose/install/\n"
exit 1
fi
printf "Prerequisites check complete!\n\n"
}
read_env(){
local choice
read -p "Enter choice [1 - 3] (Default is v2): " choice
case $choice in
1) printf "v2";;
2) printf "mainnet";;
3) printf "local";;
# The Default
*) printf "v2"
esac
}
get_augur_key() {
helper() {
key=$(docker logs $(docker ps|grep augur_augur_1|awk '{print $1}') 2>&1|grep -C0 'wallet with address'|awk '{print $5}')
key_exists=$(docker logs $(docker ps|grep augur_augur_1|awk '{print $1}') 2>&1|grep -C0 'Keyfile already exists at path')
if [ ! -z "$key" ]; then
echo "$key"
echo "$key" > ./addr.key
return 0
elif [ ! -z "$key_exists" ]; then
cat ./addr.key
return 0
else
return 1
fi
}
until helper
do sleep 1
done
}
get_previous_warp_sync_hash() {
helper() {
hash=$(docker logs $(docker ps|grep augur_augur_1|awk '{print $1}') 2>&1|grep -C0 'Previous Warp Sync Hash'|awk '{print $5}')
if [ -z "$hash" ]
then return 1
else
echo $hash
return 0
fi
}
until helper
do sleep 1
done
}
get_current_warp_sync_hash() {
helper() {
hash=$(docker logs $(docker ps|grep augur_augur_1|awk '{print $1}') 2>&1|grep -C0 'Current Warp Sync State'|awk '{print $7}')
if [ -z "$hash" ]
then return 1
else
echo $hash
return 0
fi
}
until helper
do sleep 1
done
}
get_trading_UI_hash() {
docker logs $(docker ps|grep augur_augur_1|awk '{print $1}') 2>&1|grep -C0 'Pinning UI build at path'|awk '{print $10}'
}
get_trading_UI_hash32() {
docker logs $(docker ps|grep augur_augur_1|awk '{print $1}') 2>&1|grep -C0 'Pinning UI build at path'|awk '{print $12}'
}
get_reporting_UI_hash() {
docker logs $(docker ps|grep augur_augur_1|awk '{print $1}') 2>&1|grep -C0 'Pinning Reporting UI build at path'|awk '{print $11}'
}
get_reporting_UI_hash32() {
docker logs $(docker ps|grep augur_augur_1|awk '{print $1}') 2>&1|grep -C0 'Pinning Reporting UI build at path'|awk '{print $13}'
}
setup() {
cat <<- HERE
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Select Augur Environment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1. v2 (kovan)
2. mainnet
3. local
HERE
export AUGUR_ENV=$(read_env)
if [ "${AUGUR_ENV}" == "mainnet" ]; then
# Get eth node url
printf "$BUFFER_TEXT\nNOTE: You need to have access to an Ethereum Mainnet server.\nIf you don't have one or don't know what this is, \nregister one at https://infura.nio/register and past the Mainnet URL here.$BUFFER_TEXT\n";
printf "Enter an ethereum RPC URL (default: $DEFAULT_ETH_NODE_URL): ";
read ETH_NODE_URL;
export ETHEREUM_HTTP=${ETH_NODE_URL:-$DEFAULT_ETH_NODE_URL}
elif [ "${AUGUR_ENV}" == "v2" ]; then
export ETHEREUM_HTTP=${ETHEREUM_HTTP:-https://kovan.augur.net/ethereum-http}
else
export ETHEREUM_HTTP=$DEFAULT_ETH_NODE_URL
fi
cat <<HERE
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Configuring Docker Environment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
HERE
(
cat <<HEREDOC
ETHEREUM_HTTP=${ETHEREUM_HTTP}
AUGUR_ENV=${AUGUR_ENV}
HEREDOC
) > ./augur/.env
printf "[x]: ${PWD}/augur/.env - Configuration saved\n"
cat <<HERE
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Ready To Start Augur Services
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Will run "./augur/cli start".
This may take some time...
HERE
read -n 1 -s -r -p "Press any key to continue"
echo
$0 start
}
intro
prereqs
if [ -z "${method}" ]; then
usage
exit 1
fi
case "$method" in
"setup")
(
setup
)
;;
"start")
(
cd augur
docker-compose up -d augur
printf "Spinning up augur sdk server. Please wait, this'll take many minutes\n"
printf "You can view the progress in a separate terminal with this command: $0 logs"
printf "\n\n"
augur_key=`get_augur_key`
previous_warp_sync_hash=`get_previous_warp_sync_hash`
current_warp_sync_hash=`get_current_warp_sync_hash`
trading_ui_hash=`get_trading_UI_hash`
trading_ui_hash32=`get_trading_UI_hash32`
reporting_ui_hash=`get_reporting_UI_hash`
reporting_ui_hash32=`get_reporting_UI_hash32`
cat <<PRETTYBLOCK
##############################
IPFS Hashes
##############################
- Reporting
- CIDv0: $reporting_ui_hash
- CIDv1: $reporting_ui_hash32
- Trading
- CIDv0: $trading_ui_hash
- CIDv1: $trading_ui_hash32
##############################
IPFS Links
##############################
Links:
- Reporting
- https://$reporting_ui_hash32.ipfs.dweb.link
- https://dweb.link/ipfs/$reporting_ui_hash
- Trading
- https://$trading_ui_hash32.ipfs.dweb.link
- https://dweb.link/ipfs/$trading_ui_hash
##############################
Warp Sync
##############################
ETH Account for Warp Sync Reporting: $augur_key
Most-recently resolved warp sync hash: $previous_warp_sync_hash
Warp sync hash to be reporting/confirmed for pending market: $current_warp_sync_hash
You are currently pinning the Trading and Reporting UIs to IPFS. Thanks!
This will be true as long as you keep the "augur_augur_1" docker running.
Actions you can take:
1. Pin the Trading UI with your local ipfs daemon (not just the augur docker):
ipfs pin add $trading_ui_hash
2. Pin the Reporting UI with your local ipfs daemon (not just the augur docker):
ipfs pin add reporting_ui_hash
3. Begin autoreporting on the warpsync market by sending some ether (recommended: 1 ETH) to your augur address: $augur_key
PRETTYBLOCK
)
;;
"logs")
(
cd augur
docker-compose logs -f
)
;;
"stop")
(
cd augur &&\
docker-compose down
)
;;
"restart")
(
cd augur &&\
docker-compose restart -d
)
;;
"upgrade")
printf "Pulls new docker images and restarts augur.\n";
(
cd augur || (echo "augur directory does not exist - run $0 setup"; exit 1)
docker-compose down
docker-compose pull
docker-compose up -d
)
;;
*)
usage
exit 1
esac
EOF
}
cat <<HERE
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Configuring Augur runtime
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
HERE
if [ -d augur ]; then
upgrade=true
else
upgrade=false
mkdir augur
fi
make_docker_compose
make_cli && chmod +x ./augur/cli
if [ $upgrade = true ]; then
/usr/bin/env bash ./augur/cli upgrade
/usr/bin/env bash ./augur/cli start
else
/usr/bin/env bash ./augur/cli setup
fi
| true
|
6997592820598eb0f0d231785c0e1d968e3c9c8d
|
Shell
|
arnaudbos/.dotfiles
|
/extras.sh
|
UTF-8
| 2,025
| 2.65625
| 3
|
[
"ISC",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
################################################
# Haskell
################################################
softcheck=`stat $HOME/.ghcup/env`
if [ $? == 1]; then
running "Downloading and installing Haskell's ghcup"; filler
curl --proto '=https' --tlsv1.2 -sSf https://get-ghcup.haskell.org | sh
ok
fi
if [[ $OSTYPE == darwin* ]]; then
###############################################################################
bot "Downloading OmniPlan"
###############################################################################
download_app 'OmniPlan' '1HJI4OkHQWnC5-tEEc31OlFNltncN9eSC'
botdone
###############################################################################
bot "Downloading OmniGraffle"
###############################################################################
download_app 'OmniGraffle' '1eKntPOP-Yl1ExV65f308s1HahWPV7EfW'
botdone
###############################################################################
bot "Downloading Final Cut Pro X 10.4.8"
###############################################################################
download_app 'Final Cut Pro X 10.4.8' '1MbKfLa8NilJ970ILTrrQhVtIwHcNUWHP'
botdone
###############################################################################
bot "Downloading Grammarly"
###############################################################################
download_app 'Grammarly' 'https://download-editor.grammarly.com/osx/Grammarly.dmg'
botdone
###############################################################################
bot "Downloading SizeUp with License"
###############################################################################
download_app 'SizeUp' '1ClbmLG_3k6UDpjPquG8Tl9GzkgJ2UGnf'
botdone
###############################################################################
bot "Downloading Zotero"
###############################################################################
download_app 'Zotero' '1SRs0PaB7FDh8z4gvFdrqE1xkodcfZZyB'
botdone
fi
| true
|
47410fbe4c1b534a431f42fe6464df4e5e1fad6b
|
Shell
|
adpartin/summit-test
|
/bsub3.sh
|
UTF-8
| 1,579
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
#BSUB -P med106
#BSUB -W 2:00
#BSUB -nnodes 10
#BSUB -J Script2
# ----------------------------------------------
# This script uses more than 1 node.
# ----------------------------------------------
# You first need to load the appropriate module!
# module load ibm-wml-ce/1.7.0-2
echo "Bash version ${BASH_VERSION}..."
GPUs_PER_NODE=6
NODES=10
N_SPLITS=$(($NODES * $GPUs_PER_NODE))
echo "Number of nodes to use: $NODES"
echo "Number of GPUs per node: $GPUs_PER_NODE"
echo "Number of data splits for LC: $N_SPLITS"
id=0
for node in $(seq 0 1 $(($NODES-1)) ); do
for device in $(seq 0 1 5); do
echo "Run $id (use device $device on node $node)"
# jsrun -n 1 -a 1 -c 4 -g 1 ./jsrun_script.sh $device 0 3 exec >run"$id".log 2>&1
id=$(($id+1))
done
done
# for node in $(seq 0 1 $(($NODES-1)) ); do
# echo "Use device 0 on node $node"
# done
# for device in $(seq 0 1 5); do
# echo "Use device $device on node $node"
# done
# # Resources of node 1.
# jsrun -n 1 -a 1 -c 4 -g 1 ./jsrun_script.sh 0 0 3 exec >run0.log 2>&1
# jsrun -n 1 -a 1 -c 4 -g 1 ./jsrun_script.sh 1 1 3 exec >run1.log 2>&1
# jsrun -n 1 -a 1 -c 4 -g 1 ./jsrun_script.sh 2 2 3 exec >run2.log 2>&1
# jsrun -n 1 -a 1 -c 4 -g 1 ./jsrun_script.sh 3 3 3 exec >run3.log 2>&1
# jsrun -n 1 -a 1 -c 4 -g 1 ./jsrun_script.sh 4 4 3 exec >run4.log 2>&1
# jsrun -n 1 -a 1 -c 4 -g 1 ./jsrun_script.sh 5 5 3 exec >run5.log 2>&1
# # Resources of node 2.
# jsrun -n 1 -a 1 -c 4 -g 1 ./jsrun_script.sh 0 6 3 exec >run6.log 2>&1
# jsrun -n 1 -a 1 -c 4 -g 1 ./jsrun_script.sh 1 7 3 exec >run7.log 2>&1
| true
|
3086d4d5e216601b899cd080848d40fcaca9ae51
|
Shell
|
Pokerpoke/dotfiles
|
/backup.sh
|
UTF-8
| 498
| 3.59375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
SCRIPT_DIR=$(dirname $(readlink -f $0))
function backup()
{
if [ -f $1 ]
then
rsync $1 $2
echo "$1 backuped."
else
echo -e "${RED}File doesn't exists!$NC}"
fi
}
backup ~/.zshrc ${SCRIPT_DIR}/src/oh-my-zsh/.zshrc
sed -i "7s#.*#export\ ZSH=/home/\${USER}/.oh-my-zsh#g" ${SCRIPT_DIR}/src/oh-my-zsh/.zshrc
backup ~/.vimrc ${SCRIPT_DIR}/src/vim/.vimrc
| true
|
c2864da8586b9ed138dcb7c052d23e394b94b38f
|
Shell
|
alexlenk/deepracer-for-dummies
|
/scripts/training/reset-restart-rounds.sh
|
UTF-8
| 484
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$1" == "" ]; then
echo "Error: No Tar"
exit
fi
./stop.sh
docker run -v ~:/mnt centos:latest rm -rf /mnt/deepracer-for-dummies/docker/volumes/minio/bucket/rl-deepracer-pretrained
docker run -v ~:/mnt centos:latest rm -rf /mnt/deepracer-for-dummies/docker/volumes/minio/bucket/rl-deepracer-sagemaker
tar xvfz ~/deepracer-training/backup/$1 -C ~/deepracer-for-dummies/
./set-last-run-to-pretrained.sh
../../reset-checkpoint.sh
#./round_training.sh --resume
| true
|
5f57dff26c8bc311377f2ace7da3a485a9e2b9f8
|
Shell
|
sixpi/dotfiles
|
/system/path.zsh
|
UTF-8
| 435
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
pathmunge()
{
if ! echo $PATH | /bin/grep -qE "(^|:)$1($|:)" ; then
if [ "$2" = "after" ] ; then
PATH=$PATH:$1
else
PATH=$1:$PATH
fi
fi
}
pathmunge $ZSH/bin
pathmunge $HOME/bin
if [[ -d $HOME/anaconda/bin ]]
then
pathmunge $HOME/anaconda/bin
fi
pathmunge ./bin
export PATH
export MANPATH="/usr/local/man:/usr/local/mysql/man:/usr/local/git/man:$MANPATH"
unset pathmunge
| true
|
f15c81b2acaab91b6a5a72f8897686043ed2c104
|
Shell
|
rdickey/dotfiles
|
/bash_profile.linux.sh
|
UTF-8
| 2,826
| 3.09375
| 3
|
[] |
no_license
|
# TODO: combine the common parts of bash_profile.linux and bash_profile.mac and only have the small differences in each
#echo sourcing .bash_profile
[[ -f /etc/profile ]] && . /etc/profile
[[ -f ~/.aws.sh ]] && . ~/.aws.sh
export EDITOR="/usr/bin/vim"
export HISTFILESIZE=1048576
export LS_COLORS='no=00:fi=00:di=00;32:ln=00;36:pi=01;36:so=00;34:bd=33;01:cd=33;01:or=31;01:ex=00;33:*.tar=01;37:*.tgz=01;37:*.arj=01;37:*.taz=01;37:*.lzh=01;37:*.zip=01;37:*.z=01;37:*.Z=01;37:*.gz=01;37:*.deb=01;37:*.rpm=01;37:*.bz2=01;37:*.jpg=01;32:*.gif=01;32:*.png=01;32:*.bmp=01;32:*.ppm=01;32:*.tga=01;32:*.xbm=01;32:*.xpm=01;32:*.tif=01;32:*.mpg=01;32:*.avi=01;32:*.gl=01;32:*.dl=01;35:*.cc=01;32:*.cpp=01;32:*.py=01;32:*.java=01;32:*.h=00;32:*.c=01;32:*.o=00;37:*.pyc=00;37'
export PATH=$HOME/bin:$PATH
for f in ~/.bash_functions*; do
source $f
done
# if running bash
if [ -n "$BASH_VERSION" ]; then
# include .bashrc if it exists
if [ -f "$HOME/.bashrc" -a -z "$BASHRC_DONE" ]; then
. "$HOME/.bashrc"
fi
if [ -f "/etc/bash_completion" ] && ! shopt -oq posix; then
. "/etc/bash_completion"
fi
if [ -d "/etc/bash_completion.d" ]; then
for f in `find /etc/bash_completion.d/*`; do
source "$f" 2>/dev/null
done
fi
fi
# colors found at http://vim.wikia.com/wiki/Xterm256_color_names_for_console_Vim
# ref: http://tldp.org/HOWTO/Bash-Prompt-HOWTO/x329.html
true && RS="\[\033[0m\]" # reset
true && BRANCH="\[\033[1;34m\]"
true && FG="\[\033[0;31m\]"
true && FGB="\[\033[1;31m\]"
if [ "$USER" = "root" ]; then
SEP='#'
else
SEP='$'
fi
PS1="${FGB}{${BRANCH}\$git_branch${FGB}\u@\h:${FG}\w${FGB}}${SEP}${RS} "
PROMPT_COMMAND='echo -ne "\033]0;$git_branch$USER@$HOSTNAME:$munged_path\007"'
PROMPT_COMMAND="munge_path; find_git_branch; $PROMPT_COMMAND"
alias ls="ls --color -F"
alias sl="ls"
alias l="ls -al"
alias ll="ls -alhF"
[[ -n "`which vim 2>/dev/null`" ]] && alias vi="vim" || echo "Warning: vim is not installed. This may cause sadness."
alias grep="egrep"
alias egrep="egrep --color"
# This is a hack so that things like "sudo vi" will evaluate as "sudo vim".
# Otherwise, bash would only evaluate the alias for sudo (if any), not whatever came after it.
alias sudo="sudo "
alias fixcursor="setterm -cursor on"
alias fuck='sudo $(history -p \!\!)'
alias ffs='sudo $(history -p \!\!)'
export LESS="-x4 -FXR"
complete -C aws_completer aws
[[ -s "$HOME/.bash_custom" ]] && source "$HOME/.bash_custom"
[[ -s "/usr/local/rvm/scripts/rvm" ]] && source "/usr/local/rvm/scripts/rvm"
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm"
### Added by the Heroku Toolbelt
[[ -d "/usr/local/heroku/bin" ]] && export PATH="/usr/local/heroku/bin:$PATH"
ssh-add $HOME/.ssh/*pem >/dev/null 2>&1
export BASH_PROFILE_DONE=true
| true
|
dac85c5121d1d83a0bdda1fa748911e1ace4abff
|
Shell
|
snort3/snort3_demo
|
/perf/3.0/mem_scale.sh
|
UTF-8
| 405
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
. ./setup.sh
conf=$1
mpse=$2
max=$3
pcap=$4
echo "conf=$conf, cpu=$cpu, max=$max, pcap=$pcap"
var="search_engine.search_method"
pcaps="$pcap"
for i in $(seq 2 $max) ; do
pcaps+=" $pcap"
done
echo $i `$snort $args -c $conf -r "$pcaps" --lua "$var = '$mpse'" -z $max 2>&1 | \
grep "$runt" | grep -o '[0-9.]*'`
#$snort $args -c $conf -r $pcaps --lua "$cpus; $var = '$mpse'" -z $i
| true
|
db9d304d665afc209709f00fdc80ec9942f4f0df
|
Shell
|
merryChris/blotus
|
/tools/del_ranged_imgs.sh
|
UTF-8
| 188
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
folder='ttt/'
for (( i=${1}; i<=${2}; i++ )); do
if [ -d "${folder}thread_${i}" ]; then
rm -rf "${folder}thread_${i}"
echo "Deleted Folder thread_${i}."
fi
done
| true
|
60952a5bbc6fb31ae8c44fba8958b4524965700c
|
Shell
|
StartAt24/Python-Flask
|
/daemon
|
UTF-8
| 479
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/sh
if [ $# -eq 0 ];then
echo "usage: ./net2io n server.py "
exit
fi
if [ ! -f $1 || -f $3 ];then
echo "$1 not exsit or $3 not exsit"
exit
fi
sudo ./$1 $2 &
python3.4 $3 &
while true ; do
count=`ps -ef | grep $1| grep -v "grep" |grep -v "daemon" |wc -l`
servercount = `ps -ef | grep $3| grep -v "grep" |grep -v "daemon" |wc -l`
if [ $count -eq 0 ]; then
sleep 15
sudo reboot
fi
if[ $servercount -eq 0 ]; then
python3.4 $3 &
fi
sleep 3
done
| true
|
1eeaff2dc790dcbbd857129a4945c09484ad7f4b
|
Shell
|
golangit/gobyexample-it
|
/examples/variabili-dambiente/variabili-dambiente.sh
|
UTF-8
| 435
| 2.671875
| 3
|
[
"CC-BY-3.0"
] |
permissive
|
# Eseguendo il programma verrà stampato il valore di
# `FOO`, impostato da noi nel sorgente, mentre il valore
# di `BAR` sarà vuoto.
$ go run environment-variables.go
FOO: 1
BAR:
# La lista di chiavi nell'ambiente dipenderà dal tuo
# sistema
TERM_PROGRAM
PATH
SHELL
...
# Se prima di eseguire il programma assegnamo un valore
# a `BAR`, allora il programma lo stamperà.
$ BAR=2 go run environment-variables.go
FOO: 1
BAR: 2
...
| true
|
635398be4c26cb0444f38900e8cbe621ac2da1de
|
Shell
|
mbreest/logstash-input-s3sqs
|
/ci/setup.sh
|
UTF-8
| 885
| 3
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
download_logstash() {
logstash_version=$1
case "$logstash_version" in
*-SNAPSHOT)
wget https://snapshots.elastic.co/downloads/logstash/logstash-$logstash_version.tar.gz
;;
*)
wget https://artifacts.elastic.co/downloads/logstash/logstash-$logstash_version.tar.gz
;;
esac
}
echo "Downloading logstash version: $LOGSTASH_VERSION"
download_logstash $LOGSTASH_VERSION
tar -zxf logstash-$LOGSTASH_VERSION.tar.gz
export LOGSTASH_PATH=$PWD/logstash-${LOGSTASH_VERSION}
export PATH=$LOGSTASH_PATH/vendor/jruby/bin:$LOGSTASH_PATH/vendor/bundle/jruby/1.9.3/bin:$LOGSTASH_PATH/vendor/bundle/jruby/2.3.0/bin:$PATH
export LOGSTASH_SOURCE=1
cp $LOGSTASH_PATH/logstash-core/versions-gem-copy.yml $LOGSTASH_PATH/versions.yml
gem install bundler
jruby -S bundle install --jobs=3 --retry=3 --path=vendor/bundler
jruby -S bundle exec rake vendor
| true
|
7ffbc44f01fd8c95e777ddc38da81badc3d887a2
|
Shell
|
neobht/uird
|
/initrd/bin/pcimodules
|
UTF-8
| 256
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
# list of all pci device modules
KERNEL=$(uname -r)
cat /proc/bus/pci/devices | while read junk1 id junk2; do
v=0x0000${id%????}
d=0x0000${id#????}
cat /lib/modules/$KERNEL/modules.pcimap | grep "$v $d" | cut -d " " -f 1
done | sort -u
| true
|
5f02c3d06ed420ed854606eb857b10d85f374ade
|
Shell
|
albarron/LumpSTS
|
/scripts/TRANS.computeCharSimFeatures.sh
|
UTF-8
| 3,443
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
JAR=/Users/albarron/workspace/LumpSTS/target/LumpSTS-0.0.1-SNAPSHOT.jar
DEPENDENCIES=/Users/albarron/workspace/LumpSTS/target/dependency/*
INPUT_PATH=/Users/albarron/workspace/LumpSTS/DATA
################
# CHAR N-GRAMS #
################
CLAZZ=cat.lump.sts2017.similarity.CharNgramsSimilarity
######
# Test 2017
######
L1=ar
L2=ar
for i in $(seq 2 5); do
java -cp $JAR:$DEPENDENCIES $CLAZZ -f $INPUT_PATH/STS2017.eval/STS.input.track1.$L1-$L2.txt.trad2en -o $INPUT_PATH/STS2017.eval/sim/track1.$L1-$L2.0.trad2en -l en -n $i
done
L1=en
L2=en
for i in $(seq 2 5); do
java -cp $JAR:$DEPENDENCIES $CLAZZ -f $INPUT_PATH/STS2017.eval/STS.input.track5.$L1-$L2.txt.trad2es -o $INPUT_PATH/STS2017.eval/sim/track5.$L1-$L2.0.trad2es -l es -n $i
done
L1=es
L2=es
for i in $(seq 2 5); do
java -cp $JAR:$DEPENDENCIES $CLAZZ -f $INPUT_PATH/STS2017.eval/STS.input.track3.$L1-$L2.txt.trad2en -o $INPUT_PATH/STS2017.eval/sim/track3.$L1-$L2.0.trad2en -l en -n $i
done
L1=en
L2=ar
for i in $(seq 2 5); do
java -cp $JAR:$DEPENDENCIES $CLAZZ -f $INPUT_PATH/STS2017.eval/STS.input.track2.$L2-$L1.txt.trad2en -o $INPUT_PATH/STS2017.eval/sim/track2.$L2-$L1.0.trad2en -l $L1 -n $i
done
L1=en
L2=es
for i in $(seq 2 5); do
java -cp $JAR:$DEPENDENCIES $CLAZZ -f $INPUT_PATH/STS2017.eval/STS.input.track4a.$L2-$L1.txt.trad2en -o $INPUT_PATH/STS2017.eval/sim/track4a.$L2-$L1.0.trad2en -l $L1 -n $i
java -cp $JAR:$DEPENDENCIES $CLAZZ -f $INPUT_PATH/STS2017.eval/STS.input.track4b.$L2-$L1.txt.trad2en -o $INPUT_PATH/STS2017.eval/sim/track4b.$L2-$L1.0.trad2en -l $L1 -n $i
done
L1=en
L2=tr
for i in $(seq 2 5); do
java -cp $JAR:$DEPENDENCIES $CLAZZ -f $INPUT_PATH/STS2017.eval/STS.input.track6.$L2-$L1.txt.trad2en -o $INPUT_PATH/STS2017.eval/sim/track6.$L2-$L1.0.trad2en -l $L1 -n $i
done
######
# Test 2016
######
L1=en
L2=en
for i in $(seq 2 5); do
java -cp $JAR:$DEPENDENCIES $CLAZZ -f $INPUT_PATH/$L1.test.1_fold/$L1.input.0.txt.trad2es -o $INPUT_PATH/$L1.test.1_fold/sim/$L1-$L2.0.trad2es -l es -n $i
done
#####
# SINGLE
#####
# ENGLISH
L1=en
L2=en
for i in $(seq 2 5); do
java -cp $JAR:$DEPENDENCIES $CLAZZ -f $INPUT_PATH/$L1.train.1_fold/$L1.input.0.txt.trad2es -o $INPUT_PATH/$L1.train.1_fold/sim/$L1-$L2.0.trad2es -l es -n $i
done
# ARABIC
L1=ar
L2=ar
for i in $(seq 2 5); do
java -cp $JAR:$DEPENDENCIES $CLAZZ -f $INPUT_PATH/$L1.train.1_fold/$L1.input.0.txt.trad2en -o $INPUT_PATH/$L1.train.1_fold/sim/$L1-$L2.0.trad2en -l en -n $i
done
# Spanish
L1=es
L2=es
for i in $(seq 2 5); do
java -cp $JAR:$DEPENDENCIES $CLAZZ -f $INPUT_PATH/$L1.train.1_fold/$L1.input.0.txt.trad2en -o $INPUT_PATH/$L1.train.1_fold/sim/$L1-$L2.0.trad2en -l en -n $i
done
#####
# PAIRS
#####
# ENGLISH-ARABIC
L1=en
L2=ar
for i in $(seq 2 5); do
java -cp $JAR:$DEPENDENCIES $CLAZZ -f $INPUT_PATH/$L1\_$L2.train.1_fold/$L1\_$L2.input.0.txt.trad2en -o $INPUT_PATH/$L1\_$L2.train.1_fold/sim/$L1-$L2.0.trad2en -l en -n $i
done
# English-Spanish
L1=en
L2=es
for i in $(seq 2 5); do
java -cp $JAR:$DEPENDENCIES $CLAZZ -f $INPUT_PATH/$L1\_$L2.train.1_fold/$L1\_$L2.input.0.txt.trad2en -o $INPUT_PATH/$L1\_$L2.train.1_fold/sim/$L1-$L2.0.trad2en -l en -n $i
done
#10-folds
#for i in $(seq 2 5); do
# for j in $(seq 0 9); do
# java -cp $JAR:$DEPENDENCIES $CLAZZ -f $INPUT_PATH/$L1\_$L2.train.10_fold/$L1\_$L2.input.$j.txt -o $INPUT_PATH/$L1\_$L2.train.10_fold/sim/$L1-$L2.$j.txt -l $L1 -m $L2 -n $i
# done
#done
| true
|
9369fe24cf64c61b7a7195c2797d2e1ab7833387
|
Shell
|
mauodias/.v4
|
/playbooks/files/.tools/functions/git_scripts
|
UTF-8
| 1,129
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
set_git_vars () {
export GIT_USER_NAME=$USER
export GIT_REPO_NAME=$(basename $(pwd))
export GIT_NAME="Maurício Dias"
export GIT_EMAIL="mauricio@mauricio.cc"
}
set_git_remotes () {
git remote set-url origin --add https://gitlab.com/${GIT_USER_NAME}/${GIT_REPO_NAME}.git
git remote add origin-gitlab https://gitlab.com/${GIT_USER_NAME}/${GIT_REPO_NAME}.git
}
set_github_remotes () {
git remote set-url origin --add https://github.com/${GIT_USER_NAME}/${GIT_REPO_NAME}.git
git remote add origin-github https://github.com/${GIT_USER_NAME}/${GIT_REPO_NAME}.git
}
config_user() {
}
git_create () {
set_git_vars
git init
hub create
gitlab project create --name $GIT_REPO_NAME --visibility public
set_git_remotes
config_user
}
git_import () {
set_git_vars
set_git_remotes
set_github_remotes
}
git_readme_badges () {
set_git_vars
echo "[](https://github.com/$GIT_USER_NAME/$GIT_REPO_NAME)
[](https://gitlab.com/$GIT_USER_NAME/$GIT_REPO_NAME)" >> README.md
}
| true
|
ad4032ed379e9052d1ff655004224d5cd9b4b6ce
|
Shell
|
osandov/blktests
|
/tests/block/005
|
UTF-8
| 1,220
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
# SPDX-License-Identifier: GPL-3.0+
# Copyright (C) 2017 Jens Axboe
#
# Threads doing IO to a device, while we switch schedulers
. tests/block/rc
DESCRIPTION="switch schedulers while doing IO"
TIMED=1
CAN_BE_ZONED=1
requires() {
_have_fio
}
device_requires() {
_require_test_dev_sysfs queue/scheduler
}
test_device() {
echo "Running ${TEST_NAME}"
# shellcheck disable=SC2207
local scheds=($(_io_schedulers "$(basename "${TEST_DEV_SYSFS}")"))
if _test_dev_is_rotational; then
size="32m"
else
size="1g"
fi
# start fio job
_run_fio_rand_io --filename="$TEST_DEV" --size="$size" &
# while job is running, switch between schedulers
# fio test may take too long time to complete read/write in special
# size in some bad situations. Set a timeout here which does not block
# overall test.
start_time=$(date +%s)
timeout=${TIMEOUT:=900}
while kill -0 $! 2>/dev/null; do
idx=$((RANDOM % ${#scheds[@]}))
_test_dev_queue_set scheduler "${scheds[$idx]}"
sleep .2
end_time=$(date +%s)
if (( end_time - start_time > timeout + 15 )); then
echo "fio did not finish after $timeout seconds!"
break
fi
done
FIO_PERF_FIELDS=("read iops")
_fio_perf_report
echo "Test complete"
}
| true
|
d26b0aae33f9aee034465fb1f778e76be3235ebf
|
Shell
|
wangpanqiao/CSD_Lfabarum
|
/src/convert_coord/convert_GFF_coord.sh
|
UTF-8
| 5,136
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
# This script converts the coordinates in an original GFF file to coordinates
# corresponding to another assembly. It uses a correspondence file generated
# by corresp_contigs.sh to retrieve contig coordinates in the new assembly.
# Cyril Matthey-Doret
# 02.11.2017
# Help message
function usage () {
cat <<EOF
Usage: `basename $0` -i input_gff -o output_gff (-c corresp_file | \
-O old -N new) [-L] [-h]
-g log file
-i gff file to be converted
-o output converted gff file
-c csv file for correspondance between contigs
-O old reference, only needed if -c is not specified
-N new reference, only needed if -c is not specified
-l local run. If specified, will not use LSF bsub command
-h displays this help
EOF
exit 0
}
# Parsing CL arguments
while getopts ":g:i:o:c:O:N:lh" opt; do
case $opt in
g ) LOGS=${OPTARG} ;;
i ) GFF=${OPTARG} ;;
o ) OUT_GFF=${OPTARG} ;;
c ) CORRESP_GFF=${OPTARG};;
O ) OLD_REF=${OPTARG};;
N ) NEW_REF=${OPTARG};;
l ) local=yes;;
h ) usage ;;
\?) usage ;;
esac
done
if [ "x" == "x$GFF" ] || [ "x" == "x$OUT_GFF" ];
then
echo "Error: Input and output GFF files must be provided \
along with either a contig correspondance file or both references."
usage
exit 0
fi
shift $(($OPTIND - 1))
# set command to be used depending if running locally or on LSF
if [ -z ${local+x} ];then run_fun="bsub -I -tty";else run_fun="bash";fi
# Generate correspondance file if not specified
if [ -z ${CORRESP_GFF+x} ];
then
if [ -z "$OLD_REF" ] || [ -z "$NEW_REF" ];
then
echo "Error: If no correspondance file is provided, both the old and new \
references are required."
usage
exit 0
fi
# Default path for corresp file
CORRESP_GFF="data/annotations/corresp_gff.csv"
bash corresp_contigs.sh -O "$OLD_REF" \
-N "$NEW_REF" \
-G "$LOGS" \
-c "$CORRESP_GFF" ${local:+-l}
else if [ ! -f "$CORRESP_GFF" ];
then
echo "Correspondance file invalid. Exiting"
exit 0
fi
fi
# Run main code with bsub or directly, depending on -l flag
# Note: many variables are escaped to force remote expansion of heredoc
echo "Job submitted !"
eval $run_fun <<CONV_COORD
#!/bin/bash
#BSUB -J conv_GFF
#BSUB -q normal
#BSUB -e $LOGS/gff_conv.err
#BSUB -o $LOGS/gff_conv.out
#BSUB -M 16000000
#BSUB -R "rusage[mem=16000]"
#BSUB -n 28
#BSUB -R "span[ptile=28]"
source src/misc/jobs_manager.sh
# Performance tuning parameters
MAX_PROC=24;CHUNK_SIZE=200
n_rec=\$(wc -l $GFF | awk '{print \$1}')
n_chunk=\$((\$n_rec / \$CHUNK_SIZE + 1))
# Clean temporary files
tmpdir="\$(dirname $OUT_GFF)/tmp/"
rm -rf \$tmpdir && mkdir -p \$tmpdir
echo -n "" > $OUT_GFF
for ((chunk=0;chunk < n_chunk; chunk++))
do
# Line boundaries of the chunk (1-indexed)
c_start=\$(( \$chunk * \$CHUNK_SIZE + 1))
c_end=\$((\$c_start + (\$CHUNK_SIZE - 1)))
# Stop spawning subprocesses if too many running
[ \$( jobs -p | wc -l ) -ge \$MAX_PROC ] && wait
# spawning one parallel subprocess per chunk
# each subprocess iterates over lines of its chunk
(
sed -n "\${c_start},\${c_end}p" $GFF | while read line
do
# Storing line of GFF file into bash array
track=( \$line )
# Getting line of corresp file for the contig on current line
corresp=( \$(grep "^\${track[0]}" $CORRESP_GFF | sed 's/,/ /g') )
# If track is not on a chromosome (i.e. contig is not anchored), skip it
if [ -z "\$corresp" ]; then continue; fi
# Replacing contig of current record in memory
track[0]=\${corresp[1]}
# Shift start and end if necessary and flip,
# depending if contig was reversed or not
if [[ \${corresp[4]} == *"rev"* ]]
then
# Reversed: corresp[2] will match the end of the contig
# start -> contig_end-track_end, end -> contig_end-track_start
start=\$((\${corresp[2]}-\${track[4]}))
end=\$((\${corresp[2]}-\${track[3]}))
track[3]=\$start
track[4]=\$end
else
# not reversed: start shifted, end -> start + contig size
let "track[3] += \${corresp[2]}"
let "track[4] += \${corresp[2]}"
fi
# If complementary and not strand agnostic -> complement track
if [[ \${corresp[4]} == *"comp"* && \${track[6]} != "." ]]
then
# Reverse strand
if [[ \${track[6]} == "+" ]]
then
track[6]="-"
else
track[6]="+"
fi
fi # redirect line to output gff (line >> file)
# Write line to temporary file to avoid write conflicts
echo "\${track[@]}" | tr ' ' \\\\t >> "\$tmpdir/chunk.\$chunk";
done
) &
# Displaying progress bar
prettyload \$c_start \$n_rec
done
wait
# Concatenate temporary files (this syntax allows to get over
# cat's maximum number of argument)
find \$tmpdir/ -name "chunk*" -type f -maxdepth 1 | \
xargs cat > $OUT_GFF
# Sort lines according to new coordinates
sort -k1,1 -k4,4n -o $OUT_GFF $OUT_GFF
CONV_COORD
| true
|
25aa99f9cd89549afbb43c08a59dde2a225b1c8f
|
Shell
|
siyue1226/alt-splice
|
/Mus_musculus/cq.Spleen.sh
|
UTF-8
| 5,860
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
start_time=`date +%s`
### bash hints
### 2>&1 redirect stderr to stdout
### | tee -a log.log screen outputs also append to log file
### ; ( exit ${PIPESTATUS} ) correct program exitting status
### Only run parallel when you're sure that there are no errors.
cd /home/wangq/data/rna-seq/mouse_trans
### bowtie index
# bowtie2-build /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.fa /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65
#----------------------------#
# cuffquant
#----------------------------#
# lane SRR453160
cd /home/wangq/data/rna-seq/mouse_trans/process/Spleen/SRR453160/
echo "* Start cuffquant [Spleen] [SRR453160] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
# cuffquant
/home/wangq/bin/cuffquant -p 8 \
--no-update-check -u -M /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.mask.gtf \
-b /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.fa \
/home/wangq/data/rna-seq/mouse_trans/process/merged_asm/merged.gtf \
-o /home/wangq/data/rna-seq/mouse_trans/process/Spleen/SRR453160/cq_out \
/home/wangq/data/rna-seq/mouse_trans/process/Spleen/SRR453160/th_out/accepted_hits.bam
[ $? -ne 0 ] && echo `date` Spleen SRR453160 [cuffquant] failed >> /home/wangq/data/rna-seq/mouse_trans/fail.log && exit 255
echo "* End cuffquant [Spleen] [SRR453160] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
# lane SRR453161
cd /home/wangq/data/rna-seq/mouse_trans/process/Spleen/SRR453161/
echo "* Start cuffquant [Spleen] [SRR453161] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
# cuffquant
/home/wangq/bin/cuffquant -p 8 \
--no-update-check -u -M /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.mask.gtf \
-b /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.fa \
/home/wangq/data/rna-seq/mouse_trans/process/merged_asm/merged.gtf \
-o /home/wangq/data/rna-seq/mouse_trans/process/Spleen/SRR453161/cq_out \
/home/wangq/data/rna-seq/mouse_trans/process/Spleen/SRR453161/th_out/accepted_hits.bam
[ $? -ne 0 ] && echo `date` Spleen SRR453161 [cuffquant] failed >> /home/wangq/data/rna-seq/mouse_trans/fail.log && exit 255
echo "* End cuffquant [Spleen] [SRR453161] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
# lane SRR453162
cd /home/wangq/data/rna-seq/mouse_trans/process/Spleen/SRR453162/
echo "* Start cuffquant [Spleen] [SRR453162] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
# cuffquant
/home/wangq/bin/cuffquant -p 8 \
--no-update-check -u -M /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.mask.gtf \
-b /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.fa \
/home/wangq/data/rna-seq/mouse_trans/process/merged_asm/merged.gtf \
-o /home/wangq/data/rna-seq/mouse_trans/process/Spleen/SRR453162/cq_out \
/home/wangq/data/rna-seq/mouse_trans/process/Spleen/SRR453162/th_out/accepted_hits.bam
[ $? -ne 0 ] && echo `date` Spleen SRR453162 [cuffquant] failed >> /home/wangq/data/rna-seq/mouse_trans/fail.log && exit 255
echo "* End cuffquant [Spleen] [SRR453162] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
# lane SRR453163
cd /home/wangq/data/rna-seq/mouse_trans/process/Spleen/SRR453163/
echo "* Start cuffquant [Spleen] [SRR453163] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
# cuffquant
/home/wangq/bin/cuffquant -p 8 \
--no-update-check -u -M /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.mask.gtf \
-b /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.fa \
/home/wangq/data/rna-seq/mouse_trans/process/merged_asm/merged.gtf \
-o /home/wangq/data/rna-seq/mouse_trans/process/Spleen/SRR453163/cq_out \
/home/wangq/data/rna-seq/mouse_trans/process/Spleen/SRR453163/th_out/accepted_hits.bam
[ $? -ne 0 ] && echo `date` Spleen SRR453163 [cuffquant] failed >> /home/wangq/data/rna-seq/mouse_trans/fail.log && exit 255
echo "* End cuffquant [Spleen] [SRR453163] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
# lane SRR453164
cd /home/wangq/data/rna-seq/mouse_trans/process/Spleen/SRR453164/
echo "* Start cuffquant [Spleen] [SRR453164] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
# cuffquant
/home/wangq/bin/cuffquant -p 8 \
--no-update-check -u -M /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.mask.gtf \
-b /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.fa \
/home/wangq/data/rna-seq/mouse_trans/process/merged_asm/merged.gtf \
-o /home/wangq/data/rna-seq/mouse_trans/process/Spleen/SRR453164/cq_out \
/home/wangq/data/rna-seq/mouse_trans/process/Spleen/SRR453164/th_out/accepted_hits.bam
[ $? -ne 0 ] && echo `date` Spleen SRR453164 [cuffquant] failed >> /home/wangq/data/rna-seq/mouse_trans/fail.log && exit 255
echo "* End cuffquant [Spleen] [SRR453164] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
# lane SRR453165
cd /home/wangq/data/rna-seq/mouse_trans/process/Spleen/SRR453165/
echo "* Start cuffquant [Spleen] [SRR453165] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
# cuffquant
/home/wangq/bin/cuffquant -p 8 \
--no-update-check -u -M /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.mask.gtf \
-b /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.fa \
/home/wangq/data/rna-seq/mouse_trans/process/merged_asm/merged.gtf \
-o /home/wangq/data/rna-seq/mouse_trans/process/Spleen/SRR453165/cq_out \
/home/wangq/data/rna-seq/mouse_trans/process/Spleen/SRR453165/th_out/accepted_hits.bam
[ $? -ne 0 ] && echo `date` Spleen SRR453165 [cuffquant] failed >> /home/wangq/data/rna-seq/mouse_trans/fail.log && exit 255
echo "* End cuffquant [Spleen] [SRR453165] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
| true
|
eedfa1885f5c6f635329872e96213f4638027a8e
|
Shell
|
godane/abs
|
/community/perl-file-mmagic/PKGBUILD
|
UTF-8
| 877
| 2.90625
| 3
|
[] |
no_license
|
pkgname="perl-file-mmagic"
pkgver="1.27"
pkgrel=1
pkgdesc="Guess file type"
arch=(any)
license=('PerlArtistic')
url="http://search.cpan.org/dist/File-MMagic"
depends=('perl')
options=(!emptydirs)
source=("http://search.cpan.org/CPAN/authors/id/K/KN/KNOK/File-MMagic-${pkgver}.tar.gz")
md5sums=('4ffb13b6587888e6e455c22988abce5e')
build() {
cd "${srcdir}/File-MMagic-${pkgver}"
# Force module installation to "current" perl directories.
eval `perl -V:archname`
PERL_MM_USE_DEFAULT=1 perl Makefile.PL \
INSTALLARCHLIB=/usr/lib/perl5/current/${archname} \
INSTALLSITELIB=/usr/lib/perl5/site_perl/current \
INSTALLSITEARCH=/usr/lib/perl5/site_perl/current/${archname}
make || return 1
make install DESTDIR="${pkgdir}"
# remove perllocal.pod and .packlist
find "${pkgdir}" -name perllocal.pod -delete
find "${pkgdir}" -name .packlist -delete
}
| true
|
7e1aba2f1e07074b2bd17ad3bd2233415a0e1a02
|
Shell
|
provingground-curly/buildFiles
|
/clapack.build
|
UTF-8
| 932
| 3.109375
| 3
|
[] |
no_license
|
# -*- sh -*-
# get the tarball
wget http://www.netlib.org/clapack/clapack-@VERSION@.tgz &&
# untar
tar xvzf clapack-@VERSION@.tgz &&
cd CLAPACK-@VERSION@ &&
# update the make.inc file
sed -e 's/PLAT = _LINUX/PLAT =/' -e 's/CFLAGS = -O3/CFLAGS = -O3 -fPIC/' make.inc.example > make.inc &&
# build f2c
make f2clib &&
# build reference blas which comes with clapack
make blaslib &&
# build the archive containing lapack source
cd INSTALL && make && cd .. &&
cd SRC && make && cd .. &&
: install the .a files and include files &&
product_dir=$(eups path 0)/$(eups flavor)/clapack/@VERSION@ &&
mkdir -p $product_dir/{lib,include} &&
cp INCLUDE/*.h $product_dir/include &&
cp F2CLIBS/libf2c.* $product_dir/lib &&
for p in lapack blas; do
for s in a so dylib; do
for f in $p*.$s; do
if [ -f $f ]; then
cp $f $product_dir/lib
ln -fs $product_dir/lib/$f
fi
done
done
done &&
ranlib $product_dir/lib/*.a
| true
|
2cb872cc58dfe37a0f4f666aaafba3633939a627
|
Shell
|
smititelu/ntfsheurecovery
|
/bin/nhrdb-diff.sh
|
UTF-8
| 6,223
| 3.234375
| 3
|
[
"ISC"
] |
permissive
|
#!/bin/sh
DEF_LDB=ntfsheurecovery-old.db
DEF_RDB=ntfsheurecovery.db
usage () {
APPNAME=$(basename "$0")
echo "ntfsheurecovery DB diff
Usage:
$APPNAME -h
$APPNAME [-l <left DB>] [-r <right DB>]
Options:
-l <left DB> Specify left database file for comparison (def: $DEF_LDB)
-r <right DB> Specify right database file for comparison (def: $DEF_RDB)
-h Print this help
"
}
while getopts "hl:r:" OPT; do
case "$OPT" in
h)
usage
exit 0
;;
l) LDB="$OPTARG";;
r) RDB="$OPTARG";;
*) exit 1;;
esac
done
[ -z "$LDB" ] && LDB=$DEF_LDB;
[ -z "$RDB" ] && RDB=$DEF_RDB;
if [ \! -f "$LDB" ]; then
echo "No left database file $LDB" >&2
exit 1;
fi
if [ \! -f "$RDB" ]; then
echo "No right database file $RDB" >&2
exit 1;
fi
case "$(uname -s)" in
Linux )
RE_WS="\<"
RE_WE="\>"
;;
*BSD )
RE_WS="[[:<:]]"
RE_WE="[[:>:]]"
;;
esac
Q_ATTACH="ATTACH DATABASE '$LDB' AS ldb;
ATTACH DATABASE '$RDB' AS rdb;"
TABLES="src idx_types param hints_classes hints_types hints cmap bb mft_entries mft_entries_fn mft_entries_oid data data_mp data_chunks data_segments mft_entries_attrs mft_entries_tree idx_nodes idx_entries idx_entries_dir idx_entries_sdh idx_entries_sii"
F_CMN_src="id"
F_DIFF_src="name desc"
F_SORT_src="id"
F_CMN_idx_types="id"
F_DIFF_idx_types="name desc"
F_SORT_idx_types="id"
F_CMN_param="name"
F_DIFF_param="val"
F_SORT_param="name"
F_CMN_hints_classes="id"
F_DIFF_hints_classes="name"
F_SORT_hints_classes="id"
F_CMN_hints_types="class id"
F_DIFF_hints_types="name"
F_SORT_hints_types="class id"
F_CMN_hints="mft_entnum class type cargs args"
F_DIFF_hints="val"
F_SORT_hints="mft_entnum class type"
F_CMN_cmap="off len"
F_DIFF_cmap="flags"
F_SORT_cmap="off"
F_CMN_bb="off"
F_DIFF_bb="flags entnum attr_type attr_id voff entity_idx"
F_SORT_bb="off"
F_CMN_mft_entries="num"
F_DIFF_mft_entries="f_cmn f_bad f_rec f_sum bb_map bb_rec parent parent_src base base_src seqno seqno_src t_create t_create_src t_change t_change_src t_mft t_mft_src t_access t_access_src fileflags fileflags_src sid sid_src"
F_SORT_mft_entries="num"
F_CMN_mft_entries_fn="num type"
F_DIFF_mft_entries_fn="attr_id src len name"
F_SORT_mft_entries_fn="num"
F_CMN_mft_entries_oid="num"
F_DIFF_mft_entries_oid="src obj_id birth_vol_id birth_obj_id domain_id"
F_SORT_mft_entries_oid="num"
F_CMN_data="mft_entnum pos"
F_DIFF_data="name flags sz_alloc sz_alloc_src sz_used sz_used_src sz_init sz_init_src"
F_SORT_data="mft_entnum pos"
F_CMN_data_mp="mft_entnum pos vcn"
F_DIFF_data_mp="lcn clen"
F_SORT_data_mp="mft_entnum pos"
F_CMN_data_chunks="mft_entnum pos voff"
F_DIFF_data_chunks="len src"
F_SORT_data_chunks="mft_entnum pos"
F_CMN_data_segments="mft_entnum pos firstvcn"
F_DIFF_data_segments="firstvcn_src lastvcn lastvcn_src attr_entnum attr_id"
F_SORT_data_segments="mft_entnum pos firstvcn"
F_CMN_mft_entries_attrs="num pos"
F_DIFF_mft_entries_attrs="src type id name entnum firstvcn entity_idx"
F_SORT_mft_entries_attrs="num pos"
F_CMN_mft_entries_tree="entry parent h"
F_DIFF_mft_entries_tree=""
F_SORT_mft_entries_tree="entry"
F_CMN_idx_nodes="mft_entnum type vcn"
F_DIFF_idx_nodes="lcn parent level flags bb_map bb_rec"
F_SORT_idx_nodes="mft_entnum type vcn"
F_CMN_idx_entries="mft_entnum type pos"
F_DIFF_idx_entries="container child voff"
F_SORT_idx_entries="mft_entnum type pos"
F_CMN_idx_entries_dir="mft_entnum mref name_type"
F_DIFF_idx_entries_dir="pos parent t_create t_change t_mft t_access alloc_sz used_sz flags reparse name_len name"
F_SORT_idx_entries_dir="mft_entnum"
F_CMN_idx_entries_sdh="mft_entnum hash id"
F_DIFF_idx_entries_sdh="pos voff len"
F_SORT_idx_entries_sdh="mft_entnum hash id"
F_CMN_idx_entries_sii="mft_entnum id"
F_DIFF_idx_entries_sii="hash pos voff len"
F_SORT_idx_entries_sii="mft_entnum id"
for TBL in $TABLES; do
eval F_CMN='$F_CMN_'$TBL
eval F_DIFF='$F_DIFF_'$TBL
eval F_SORT='$F_SORT_'$TBL
F_CMN1=$(echo "$F_CMN" | awk '{ print $1 }')
F_L_CMN=$(echo "$F_CMN" | sed -re "s/($RE_WS[a-z0-9_]+$RE_WE)/, l.\1 AS \1/g;s/^, //")
F_R_CMN=$(echo "$F_CMN" | sed -re "s/($RE_WS[a-z0-9_]+$RE_WE)/, r.\1 AS \1/g;s/^, //")
if [ -n "$F_DIFF" ]; then
F_L_DIFF=$(echo " $F_DIFF" | sed -re "s/($RE_WS[a-z0-9_]+$RE_WE)/l.\1/g;s/[[:space:]]+/, /g")
F_R_DIFF=$(echo " $F_DIFF" | sed -re "s/($RE_WS[a-z0-9_]+$RE_WE)/r.\1/g;s/[[:space:]]+/, /g")
W_DIFF=$(echo "$F_DIFF" | sed -re "s/($RE_WS[a-z0-9_]+$RE_WE)/(l.\1 IS NULL AND r.\1 NOT NULL OR l.\1 NOT NULL AND r.\1 IS NULL OR l.\1<>r.\1)/g;s/[)][[:space:]]+[(]/) OR (/g")
else
F_L_DIFF=
F_R_DIFF=
W_DIFF="1 = 0"
fi
F_SORT=$(echo "$F_SORT" | sed -re "s/[[:space:]]+/,/g")
J=$(echo "$F_CMN" | sed -re "s/($RE_WS[a-z0-9_]+$RE_WE)/l.\1=r.\1/g;s/[[:space:]]+/ AND /g")
Q_DEL="SELECT 'del' as op, $F_L_CMN $F_L_DIFF $F_R_DIFF FROM ldb.$TBL AS l LEFT JOIN rdb.$TBL AS r ON $J WHERE r.$F_CMN1 IS NULL"
Q_ADD="SELECT 'add' as op, $F_R_CMN $F_L_DIFF $F_R_DIFF FROM rdb.$TBL AS r LEFT JOIN ldb.$TBL AS l ON $J WHERE l.$F_CMN1 IS NULL"
Q_CHR="SELECT 'chr' as op, $F_L_CMN $F_L_DIFF $F_R_DIFF FROM ldb.$TBL AS l INNER JOIN rdb.$TBL AS r ON $J WHERE $W_DIFF"
Q="${Q_ATTACH}
SELECT * FROM (
${Q_DEL}
UNION ALL
${Q_ADD}
UNION ALL
${Q_CHR}
) ORDER BY $F_SORT;"
NCMN=$(echo $(echo $F_CMN | wc -w))
NDIFF=$(echo $(echo $F_DIFF | wc -w))
echo "$Q" | sqlite3 -header | awk -v TBL=$TBL -v NCMN=$NCMN -v NDIFF=$NDIFF -F '|' '
BEGIN {
ltot = 0
rtot = 0
ls = 1 + NCMN + 1
rs = ls + NDIFF
}
NR == 1 {
HDR=$2
for (i = 3; i <= NCMN + 1; ++i)
HDR=sprintf("%s\t%s", HDR, $i);
for (i = ls; i <= rs - 1; ++i)
HDR=sprintf("%s\t%s", HDR, $i);
}
NR == 2 {
printf "--- a/tbl_%s\n", TBL
printf "+++ b/tbl_%s\n", TBL
}
NR > 1 {
l = $1 != "add" ? 1 : 0;
r = $1 != "del" ? 1 : 0;
if (NR % 10 == 2) {
l +=1
r +=1
}
printf "@@ -%u,%u +%u,%u @@\n", ltot, l, rtot, r
if (NR % 10 == 2)
printf " %s\n", HDR
if ($1 != "add") {
printf "- %s", $2;
for (i = 3; i <= NCMN + 1; ++i)
printf "\t%s", $i
for (i = ls; i <= rs - 1; ++i)
printf "\t%s", $i
printf "\n"
}
if ($1 != "del") {
printf "+ %s", $2;
for (i = 3; i <= NCMN + 1; ++i)
printf "\t%s", $i
for (i = rs; i <= NF; ++i)
printf "\t%s", $i
printf "\n"
}
ltot += l
rtot += r
}
'
done
| true
|
5ff41fb0b58fe2d88c82314c68587dd9a869afd4
|
Shell
|
ncbo/virtual_appliance
|
/deployment/versions
|
UTF-8
| 830
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
# This file contains a list of variables as sourced in the deployment wrapper scripts
# <component>_RELEASE corresponds to the version of a component such as UI or API
# installed in the virtual appliance
# RELEASE numbers have to be compatible with this verision of the appliance stack
#
# general settings
DATA_DIR=/srv/ontoportal/data
APP_DIR=/srv/ontoportal
VIRTUAL_APPLIANCE_REPO=/srv/ontoportal/virtual_appliance
BUNDLE_PATH='/srv/ontoportal/.bundle'
#export LOCAL_CONFIG_PATH=$VIRTUAL_APPLIANCE_REPO/appliance_config
# Versions of virtual appliance components
APPLIANCE_VERSION='3.1.1'
API_RELEASE='v5.22.3'
UI_RELEASE='v6.7.1'
NCBO_CRON_RELEASE=$API_RELEASE
ONTOLOGIES_LINKED_DATA_RELEASE=$API_RELEASE
if [ "$USER" != 'ontoportal' ]; then
echo "you need to run this script as ontoportal user"
exit 1
fi
| true
|
8100ec9ec81f5c4aa246465a7a9bfd15dc15c2e5
|
Shell
|
rdk/PointSite_Assessment
|
/programs/pointsite_merge.sh
|
UTF-8
| 3,763
| 3.3125
| 3
|
[] |
no_license
|
####### define the main_root of PointSite here !!!!! ########
#PointSite_HOME=/home/wangsheng/GitBucket/
#============== global variables defined here ========= # start
declare PointSite_HOME #-> root directory
if [ -z "${PointSite_HOME}" ]
then
#echo "PointSite_HOME not set. Use default value '~/GitBucket'"
PointSite_HOME=~/GitBucket
fi
#--- assess switch: run simple or all ----#
declare Assess_Switch
if [ -z "${Assess_Switch}" ]
then
Assess_Switch=0 #-> by default, we only assess 'blind'
fi
#============== Part IV: use PointSite to help each method ===============#
#-- parameter setting
cur_root=`pwd`
cpunum=32
dist_thres=6.5
ratio_thres=0.1
method_list=`pwd`/method_list
#--- output directory ----#
outdir=/tmp/tmp_pointsite
mkdir -p $outdir
#---- use SIMPLE or ALL switch -----#
if [ $Assess_Switch -eq 1 ] #-> ALL test datasets
then
dataset_list_wrapper=dataset_list
root_input=$PointSite_HOME/PointSite_TestData/
root_output=$PointSite_HOME/PointSite_Assessment/testset_result/
else #-> blind dataset
dataset_list_wrapper=blind_dataset
root_input=$PointSite_HOME/PointSite_Assessment/programs/example/
root_output=$PointSite_HOME/PointSite_Assessment/programs/example/
fi
#=================== PointSite assisted merged results ======================#
#--- for dist_thres ----#
for dist_thres in 4.5 5.5 6.5
do
echo "#++++++++++++++++++++ dist_thres $dist_thres +++++++++++++++#"
for ratio_thres in 0.1 0.2 0.3
do
echo "#||||||||||||||||||||| ratio_thres $ratio_thres |||||||||||||#"
#--- for each dataset ----#
for i in `cat $dataset_list_wrapper`
do
#-> get data name
orig_data=`echo $i | cut -d '|' -f 1`
proc_data=`echo $i | cut -d '|' -f 2`
echo "#=================== data $orig_data ==================#"
#-> define input
suffix=${proc_data}_data
list=$root_input/${orig_data}_data/data_list
gt_dir=$root_input/${orig_data}_data/data
pred_dir=$root_output/${proc_data}_out
pt_dir=$pred_dir/pointsite_$suffix
#------------ run PointSite_Merge for each method ----------#
rm -f ptsave_proc
for method in `cat $method_list`;
#for method in sitehound
do
mkdir -p $outdir/${method}_${suffix}
#for param in 4.0 4.5 5.0 5.5 6.0 6.5 7.0 7.5 8.0 8.5 9.0 9.5 10.0
for param in 4.0
do
for i in `cat $list`;
do
echo "$cur_root/util/PointSite_Merge $pt_dir/${i}_xyz_out/${i}_atom.xyz $gt_dir/${i}_lig.xyz $pred_dir/${method}_${suffix}/${i}_xyz_out/${i}_xyz.surface $dist_thres $ratio_thres $param 1> $outdir/${method}_${suffix}/${i}.assess_$param 2> $outdir/${method}_${suffix}/${i}.result_$param " >> ptsave_proc
done
done
done
$cur_root/util/distribute_bash.sh ptsave_proc $cpunum $cur_root
rm -f ptsave_proc
#------------ collect results ---------------#
for method in `cat $method_list`;
#for method in sitehound
do
echo "#----------- method: $method ------------#"
#for param in 4.0 4.5 5.0 5.5 6.0 6.5 7.0 7.5 8.0 8.5 9.0 9.5 10.0
for param in 4.0
do
echo "#-> param: $param"
rm -f $outdir/${method}_${suffix}.assess_$param
for i in `cat $list`;
do
reso=`head -n1 $outdir/${method}_${suffix}/${i}.assess_$param`
if [ "$reso" != "" ]
then
echo "$i $reso" >> $outdir/${method}_${suffix}.assess_$param
fi
done
#awk 'BEGIN{a=0;b=0;c=0;d=0;e=0;f=0;g=0;h=0;z=0;y=0;}{if(NF==26){a+=$4;b+=$6;c+=$8;d+=$12;e+=$14;f+=$19;g+=$21;z+=$24;y+=$26;h++}}END{print a/c" "b/c" "d/c" "e/c" "f/h" "g/h" "c" "h" "z/h" "y/h}' $outdir/${method}_${suffix}.assess_$param
awk 'BEGIN{a=0;b=0;c=0;d=0;e=0;f=0;g=0;h=0;z=0;y=0;}{if(NF==26){a+=$4;b+=$6;c+=$8;d+=$12;e+=$14;f+=$19;g+=$21;z+=$24;y+=$26;h++}}END{print "DCA "a/c" | atom-IoU "f/h" | ligand_num "c" | protein_num "h}' $outdir/${method}_${suffix}.assess_$param
done
done
done
done
done
| true
|
927d0d1131a2b7be7e35af46ad6194ad7d3dd95c
|
Shell
|
doctolib/master-chef
|
/cookbooks/graphite/templates/default/carbon_init_d.erb
|
UTF-8
| 1,092
| 3.359375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
### BEGIN INIT INFO
# Provides: carbon
# Required-Start:
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: carbon
### END INIT INFO
PID_FILE="<%= @graphite_directory %>/storage/carbon-cache-a.pid"
cd <%= @graphite_directory %>
<% if @whisper_dev_shm_size %>
mkdir -p /dev/shm/whisper
chown -R www-data /dev/shm/whisper
mount -o remount,size=<%= @whisper_dev_shm_size %> /dev/shm
<% end %>
#if [ "$1" = "start" ]; then
# if [ -f "$PID_FILE" ]; then
# if [ ! -d $(cat $PID_FILE) ]; then
# rm $PID_FILE
# fi
# fi
#fi
pid=$(cat $PID_FILE)
if [[ "$(pgrep -f "<%= @pypy %> ./bin/carbon-cache.py start")" == "$pid" ]]; then
echo "running"
else
sudo -u www-data <%= @pypy %> ./bin/carbon-cache.py $*
fi
if [ "$1" = "stop" ]; then
count=0
while true; do
if [ "$count" = "60" ]; then
echo "Unable to stop carbon killing it"
kill -9 $pid
break
fi
if [ -f "$PID_FILE" ]; then
if [ -d "/proc/$pid" ]; then
echo "."
sleep 1
else
break
fi
else
break
fi
count=$(($count + 1))
done
fi
| true
|
b5b351716a3b239ff8f0c016034e0d567a16d925
|
Shell
|
oliverroick/nea
|
/deploy.sh
|
UTF-8
| 1,464
| 3.90625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
PROFILE="default"
STAGE="dev"
while [ "$1" != "" ]; do
IFS='=' read -r -a arg <<< "$1"
case ${arg[0]} in
--email ) EMAIL=${arg[1]}
;;
--stage ) STAGE=${arg[1]}
;;
--profile ) PROFILE=${arg[1]}
;;
esac
shift
done
if [ -z "$EMAIL" ]
then
echo "Argument email not provided"
exit 1
fi
if [[ -z "${AWS_ACCESS_KEY_ID}" ]] && [[ -z "${AWS_SECRET_ACCESS_KEY}" ]]
then
echo "AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY not set, using profile $PROFILE"
USE_PROFILE=1
fi
LAMBDAS_BUCKET=$PROFILE-nea-$STAGE-lambdas
# make a build directory to store artifacts
rm -rf build
mkdir build
# make the deployment bucket in case it doesn't exist
aws s3 mb s3://$LAMBDAS_BUCKET ${USE_PROFILE:+--profile $PROFILE}
# generate next stage yaml file
aws cloudformation package \
--template-file template.yaml \
--output-template-file build/output.yaml \
--s3-bucket $LAMBDAS_BUCKET \
${USE_PROFILE:+--profile $PROFILE}
# the actual deployment step
aws cloudformation deploy \
--template-file build/output.yaml \
--stack-name nea-$STAGE \
--capabilities CAPABILITY_IAM \
${USE_PROFILE:+--profile $PROFILE} \
--parameter-overrides Email=$EMAIL Environment=$STAGE
| true
|
d70b3af1413492e720a32c67de0601d7b5e3350c
|
Shell
|
tahmmee/toastty
|
/scripts/clean.sh
|
UTF-8
| 1,827
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
CLIENT=$1
cleanup_session() {
session=$1
# kill session
tmux kill-session -t $session 2>/dev/null || true
# get testrunner id
testrunner_id=$(docker logs $session | grep exec | awk '{print $6}' | head -1)
# cleanup toastty
docker rm -fv $session 2>/dev/null
if [ -z "$testrunner_id" ]; then
echo "no testrunner tty"
exit 0
fi
# cleanup linked containers
docker -H $CLIENT inspect $testrunner_id | jq .[0].HostConfig.Links | grep couchbase | awk -F ":" '{print $1}' | sed 's/.*"\///' | xargs -I '{}' docker -H $CLIENT rm -fv '{}'
# cleanup testrunner
docker -H $CLIENT rm -fv $testrunner_id
}
# clean up by ports that no longet have established connections
ports=$(grep "running for client" /tmp/gotty.log | sed 's/.*\]://' | awk '{print $1}')
for port in $( echo $ports ); do
is_connected=$(netstat -n | grep 7021 | grep $port)
if [ -z "$is_connected" ]; then # may be dangling connection
# get session
pid=$(grep $port /tmp/gotty.log | grep PID | sed 's/.*PID//' | awk '{print $1}')
session=$(cat /tmp/$pid)
# only clean up if this is a single client session (ie. outside of ui)
nclients=$(grep $session /tmp/gotty.log |wc -l)
if [ $nclients -le 1 ]; then
# cleanup if tmux session exists
tmux ls | grep $session && cleanup_session $session
fi
fi
done
# clean up by ports that are in close state
ports=$( netstat -n | grep 7021 | grep CLOSE_WAIT | awk '{print $5}' | sed 's/.*\.//')
for port in $( echo $ports ); do
pid=$(grep $port /tmp/gotty.log | grep PID | sed 's/.*PID//' | awk '{print $1}')
session=$(cat /tmp/$pid)
cleanup_session $session
done
# cleanup any remaining session in detached mode
sessions=$(tmux ls | grep -v attached | awk '{print $1}' | sed 's/://')
for session in $( echo $sessions ); do
cleanup_session $session
done
| true
|
89a95958978d14a3a57a9ccf7a8bf6f4d623ab5c
|
Shell
|
YRXING/envoyingresscontroller
|
/tests/performance/scripts/update_configmap.sh
|
UTF-8
| 2,016
| 3.25
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2019 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SRC_DIR=${GOPATH}/src/github.com/kubeedge/kubeedge
EDGE_PATH=${SRC_DIR}/tests/performance/assets/02-edgeconfigmap.yaml
CLOUD_PATH=${SRC_DIR}/tests/performance/assets/01-configmap.yaml
nodename=$2
Url=$3
configmapName=$4
nodelimit=$5
create_edge_config() {
if [ ! -f ${EDGE_PATH} ]; then
echo "There is no 03-configmap-edgenodeconf.yaml!"
exit 1
fi
echo "file found !!!!!!!!!!!!!"
sed -i "s|namespace: .*|namespace: default|g" ${EDGE_PATH}
sed -i "s|name: edgecore-configmap.*|name: ${configmapName}|g" ${EDGE_PATH}
sed -i "s|node-id: .*|node-id: ${nodename}|g" ${EDGE_PATH}
sed -i "s|hostname-override: .*|hostname-override: ${nodename}|g" ${EDGE_PATH}
if [[ ${Url} == *"wss"* ]]; then
sed -i "20s|url: .*|url: ${Url}/e632aba927ea4ac2b575ec1603d56f10/${nodename}/events|g" ${EDGE_PATH}
sed -i "s|protocol: .*|protocol: websocket|g" ${EDGE_PATH}
else
sed -i "28s|url: .*|url: ${Url}|g" ${EDGE_PATH}
sed -i "s|protocol: .*|protocol: quic|g" ${EDGE_PATH}
fi
}
create_cloud_config() {
if [ ! -f ${CLOUD_PATH} ]; then
echo "There is no 01-configmap.yaml!"
exit 1
fi
echo "file found !!!!!!!!!!!!!"
sed -i "s|master: .*|master: ${Url}|g" ${CLOUD_PATH}
sed -i "s|name: .*|name: ${configmapName}|g" ${CLOUD_PATH}
sed -i "s|node-limit: .*|node-limit: ${nodelimit}|g" ${CLOUD_PATH}
}
"$@"
| true
|
f01a0dbfd89ec786974462992ba81fcc6f62e825
|
Shell
|
danielhoherd/homepass
|
/RaspberryPi/homepass.sh
|
UTF-8
| 1,996
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# HeadURL: https://github.com/danielhoherd/homepass/blob/master/RaspberryPi/homepass.sh
CONFIG_FILE=/etc/hostapd/hostapd.conf
RELAY_TIME=100
DB=/root/homepass.db
service hostapd stop ; sleep 1 ;
pkill hostapd ; sleep 1 ;
pkill -9 hostapd
while true ; do
aps=()
#aps+=( $(sqlite3 "${DB}" "select * from (select mac,ssid from aps where last_used < datetime('now','-8 hours') or last_used is NULL) order by random() ;") )
#aps+=( $(sqlite3 "${DB}" "select * from (select mac,ssid from aps order by last_used asc limit 10 ) order by random() ;") )
#aps+=( $(sqlite3 "${DB}" "select * from (select mac,ssid from aps where ssid='wifine' order by last_used asc limit 10 ) order by random() ;") )
#aps+=( $(sqlite3 "${DB}" "select * from (select mac,ssid from aps where ssid='NZ@McD1' order by last_used asc limit 10 ) order by random() ;") )
aps+=( $(sqlite3 "${DB}" "select * from (select mac,ssid from aps where ssid='attwifi' order by last_used asc limit 10 ) order by random() ;") )
aps+=( $(sqlite3 "${DB}" "select mac,ssid from aps where ssid='attwifi' and mac like '4E:53:50:4F:4F:%' and last_used < datetime('now','-12 hours') order by random() limit 10 ;") )
p=${aps[$(( RANDOM % ${#aps[@]} ))]}
MAC="${p%|*}"
SSID="${p#*|}"
[ ! -z "$MAC" ] && [ ! -z "$SSID" ] || exit 1
sqlite3 "${DB}" "update aps set last_used = datetime('now') where mac = '$MAC' and ssid = '$SSID' ;"
cat > $CONFIG_FILE <<EOF
ssid=$SSID
bssid=$MAC
interface=wlan0
bridge=br0
ctrl_interface=wlan0
ctrl_interface_group=0
hw_mode=g
channel=$(( RANDOM % 13 + 1 ))
wpa=0
rsn_pairwise=CCMP
beacon_int=100
auth_algs=3
driver=nl80211
ieee80211n=1
macaddr_acl=1
accept_mac_file=/etc/hostapd/accept
wmm_enabled=0
eap_reauth_period=360000000
EOF
echo "$(date "+%F %T%z") ${MAC} ${SSID} Starting relay for ${RELAY_TIME} seconds"
timeout "${RELAY_TIME}" hostapd /etc/hostapd/hostapd.conf 2>&1 | while read -r X ; do echo "$(date "+%F %T%z") ${MAC} ${SSID} ${X}" ; done ;
done
| true
|
361cccf90ce0d8d55758add6a378824174022309
|
Shell
|
rgve/LiveRNome
|
/171003_HiSAT2_ref_genome_build_JNS.sh
|
UTF-8
| 538
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
#script to build reference genome for HiSAT2
#Made by Jonas N. Søndergaard
#Made on 171003
#UPPMAX commands (Uppsala Multidisciplinary Center for Advanced Computational Science)
#SBATCH -A uppmax_proj_number
#SBATCH -p core
#SBATCH -n 2
#SBATCH -t 3:00:00
#load packages. bioinfo-tools is loaded on uppmax in order to load all other packages used.
module load bioinfo-tools
module load HISAT2/2.1.0
#use HiSAT2 to build reference genome
hisat2-build \
-f /proj/ref_genomes/GRCh38.p10.genome.fa \
/proj/ref_genomes/GRCh38.p10.genome
| true
|
3204a62e675bb85bc609fa39dae17f4c26d321fa
|
Shell
|
rouxpn/raven
|
/scripts/setup_raven_libs
|
UTF-8
| 480
| 3.59375
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#Tries to load raven's conda package, and activate raven_libraries
try_use_raven_conda ()
{
if test -e /opt/raven_libs/bin/conda;
then
export PATH="/opt/raven_libs/bin:$PATH"
source activate raven_libraries
fi
}
#Check for conda and activate raven_libraries
if which conda 2> /dev/null;
then
if conda env list | grep -q raven_libraries;
then
source activate raven_libraries
else
try_use_raven_conda
fi
else
try_use_raven_conda
fi
| true
|
5f5c96a70dbba5ac9df8b1c760a493c6cabfdb0e
|
Shell
|
Cloudxtreme/dotfiles-26
|
/udev/rules/unused/camera.sh
|
UTF-8
| 382
| 3.0625
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
symlink="/dev/camera"
mountpoint="/mnt/camera"
photopath="$mountpoint/DCIM/100PHOTO/"
# Wait for symlink.
iter=0
while [ ! -e $symlink ]
do
sleep 0.5
iter=`expr $iter + 1`
if [ $iter -gt 8 ]
then exit
fi
done
sudo mount -o uid=panther,gid=users $symlink $mountpoint
export XAUTHORITY=/home/panther/.Xauthority
export DISPLAY=:0.0
/usr/bin/thunar $photopath &
| true
|
ab65854c6765157b18fae29e290aed37cdd4b276
|
Shell
|
git-for-windows/MINGW-packages
|
/mingw-w64-pinentry/PKGBUILD
|
UTF-8
| 1,557
| 2.796875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# $Id$
# Maintainer: Yuui Tanabe <yuuitanabe@163.com>
pkgbase=mingw-w64-pinentry
_realname=pinentry
pkgname=(${MINGW_PACKAGE_PREFIX}-${_realname})
pkgver=1.1.0
pkgrel=1
url="https://www.gnupg.org/software/pinentry/index.html"
license=('GPL')
pkgdesc='Collection of simple PIN or passphrase entry dialogs which utilize the Assuan protocol (mingw-w64)'
arch=('any')
depends=("${MINGW_PACKAGE_PREFIX}-qt5"
"${MINGW_PACKAGE_PREFIX}-libsecret"
"${MINGW_PACKAGE_PREFIX}-libassuan")
makedepends=("${MINGW_PACKAGE_PREFIX}-gcc")
source=(https://www.gnupg.org/ftp/gcrypt/pinentry/pinentry-${pkgver}.tar.bz2{,.sig})
sha256sums=('68076686fa724a290ea49cdf0d1c0c1500907d1b759a3bcbfbec0293e8f56570'
'8425fbe9ddff3de980cb33ef3ae3bc8a00d214b2554cd73b3a8680eb35fbe6c8')
validpgpkeys=('D8692123C4065DEA5E0F3AB5249B39D24F25E3B6'
'46CC730865BB5C78EBABADCF04376F3EE0856959'
'031EC2536E580D8EA286A9F22071B08A33BD3F06')
build()
{
[[ -d ${srcdir}/build-${MINGW_CHOST} ]] && rm -rf ${srcdir}/build-${MINGW_CHOST}
mkdir -p "${srcdir}/build-${MINGW_CHOST}" && cd "${srcdir}/build-${MINGW_CHOST}"
../${_realname}-${pkgver}/configure \
--prefix=${MINGW_PREFIX} \
--build=${MINGW_CHOST} \
--host=${MINGW_CHOST} \
--target=${MINGW_CHOST} \
--disable-pinentry-tty \
--disable-pinentry-curses \
--disable-fallback-curses \
--disable-pinentry-emacs \
--disable-pinentry-gtk2 \
--disable-pinentry-gnome3 \
--enable-pinentry-qt \
--enable-libsecret
make
}
package()
{
cd "${srcdir}/build-${MINGW_CHOST}"
make DESTDIR=${pkgdir} install
}
| true
|
bf051d8aacee73ca3542a4e0dddbab879ffc1612
|
Shell
|
Simon0910/eshop-cloud
|
/txt/03_03_redisc-单机集群启动关闭顺序
|
UTF-8
| 1,680
| 3.75
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
# Simple Redis init.d script conceived to work on Linux systems
# as it does use of the /proc filesystem.
# chkconfig: 2345 90 10
# description: Redis is a persistent key-value database
REDISPORT1=7001
REDISPORT2=7002
REDISPORT3=7011
REDISPORT4=7022
EXEC=/usr/local/redis/bin/redis-server
CLIEXEC=/usr/local/redis/bin/redis-cli
PIDFILE=/var/run/redisc_7000.pid
CONF1="/etc/redis/${REDISPORT1}.conf"
CONF2="/etc/redis/${REDISPORT2}.conf"
CONF3="/etc/redis/${REDISPORT3}.conf"
CONF4="/etc/redis/${REDISPORT4}.conf"
case "$1" in
start)
if [ -f $PIDFILE ]
then
echo "$PIDFILE exists, process is already running or crashed"
else
echo "Starting Redis cluster server..."
$EXEC $CONF1 &
$EXEC $CONF2 &
$EXEC $CONF3 &
$EXEC $CONF4 &
echo "reids master-slave start finshed..."
fi
;;
stop)
if [ ! -f $PIDFILE ]
then
echo "$PIDFILE does not exist, process is not running"
else
PID=$(cat $PIDFILE)
echo "Stopping ..."
$CLIEXEC -p $REDISPORT1 shutdown
$CLIEXEC -p $REDISPORT2 shutdown
$CLIEXEC -p $REDISPORT3 shutdown
$CLIEXEC -p $REDISPORT4 shutdown
while [ -x /proc/${PID} ]
do
echo "Waiting for Redis cluster to shutdown ..."
sleep 2
done
echo "Redis cluster stopped"
fi
;;
*)
echo "Please use start or stop as first argument"
;;
esac
| true
|
275edf032d31d15947f717fbbab5b19aedfd8109
|
Shell
|
flickr-downloadr/flickr-downloadr-gtk
|
/build-tools/deploy.sh
|
UTF-8
| 3,213
| 3.5625
| 4
|
[
"LicenseRef-scancode-free-unknown",
"MIT"
] |
permissive
|
SOURCE_BRANCH="source"
REPO="https://github.com/flickr-downloadr/flickr-downloadr.github.io.git"
SOURCEREPO="https://github.com/flickr-downloadr/flickr-downloadr-gtk.git"
CIENGINE="appveyor"
if [[ $GITHUB_WORKFLOW = 'ci cd' ]]
then
echo "The GITHUB_COMMIT_MESSAGE variable has a value of - ${GITHUB_COMMIT_MESSAGE}"
CIENGINE="github"
APPVEYOR_REPO_COMMIT_MESSAGE=${GITHUB_COMMIT_MESSAGE}
elif [[ $CIRCLECI = true ]]
then
echo "The CIRCLE_SHA1 variable has a value of - ${CIRCLE_SHA1}"
CIENGINE="circleci"
wget "http://stedolan.github.io/jq/download/linux64/jq"
chmod +x jq
echo "About to run: curl https://api.github.com/repos/flickr-downloadr/flickr-downloadr-gtk/commits/${CIRCLE_SHA1} | ./jq -r '.commit.message'"
APPVEYOR_REPO_COMMIT_MESSAGE=$(curl -u ${GH_TOKEN}:x-oauth-basic https://api.github.com/repos/flickr-downloadr/flickr-downloadr-gtk/commits/$CIRCLE_SHA1 | ./jq -r '.commit.message')
fi
echo "CI Server : ${CIENGINE}."
echo "Commit Message : '${APPVEYOR_REPO_COMMIT_MESSAGE}'"
if [[ $APPVEYOR_REPO_COMMIT_MESSAGE != *\[deploy\]* ]]
then
echo 'There is nothing to deploy here. Moving on!';
exit
fi
echo "Beginning Deploy..."
git config --global user.name "The CI Bot"
git config --global user.email "contact.us@flickrdownloadr.com"
VERSION="v${BUILDNUMBER}"
DEPLOYVERSION="deploy-${VERSION}"
# circleci seems to be cloning to /root/project
THISREPOCLONEDIR="flickr-downloadr-gtk"
if [[ $CIRCLECI = true ]]
then
THISREPOCLONEDIR="project"
fi
cd ../..
git clone -b $SOURCE_BRANCH $REPO
cd flickr-downloadr.github.io
git config credential.helper "store --file=.git/fd-credentials"
echo "https://${GH_TOKEN}:@github.com" > .git/fd-credentials
git config push.default tracking
git checkout -b ${DEPLOYVERSION}
rm -rf "../${THISREPOCLONEDIR}/dist/osx/Install flickr downloadr (${VERSION}).app"
cp -r ../${THISREPOCLONEDIR}/dist/* ./app/installer
git add -f .
git commit -m "created release ${VERSION} ($CIENGINE) [skip ci]" -s
# circleci throws 'fatal: could not read from remote repository' error
if [[ $CIRCLECI = true ]]
then
git remote set-url origin "https://${GH_TOKEN}:@github.com/flickr-downloadr/flickr-downloadr.github.io.git"
fi
echo "Pulling/Rebasing with the remote branch..."
git ls-remote --heads origin | grep ${DEPLOYVERSION} && git pull --rebase origin ${DEPLOYVERSION}
echo "Pushing the branch to remote..."
git ls-remote --heads origin | grep ${DEPLOYVERSION} && git push origin ${DEPLOYVERSION} || git push -u origin ${DEPLOYVERSION}
# Do the below script only from GitHub - updates source to mark the current released version
if [[ $CIENGINE = 'github' ]]
then
cd ..
git clone -b main $SOURCEREPO flickr-downloadr-gtk-new
cd flickr-downloadr-gtk-new
git config credential.helper "store --file=.git/fd-credentials"
echo "https://${GH_TOKEN}:@github.com" > .git/fd-credentials
git config push.default tracking
cp -f ../flickr-downloadr-gtk/build-tools/build.number ./build-tools/
cp -f ../flickr-downloadr-gtk/source/CommonAssemblyInfo.cs ./source/
git add -f .
git commit -m "Released ${VERSION} [skip ci]" -s
git tag -a ${VERSION} -m "Creating release ${VERSION}"
git push --tags origin main
fi
echo "Deployed $VERSION successfully"
exit
| true
|
89447705001693953cc5b651cf4d86881ec5666a
|
Shell
|
lenary/lowrisc-toolchains
|
/generate-clang-meson-cross-file.sh
|
UTF-8
| 2,787
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
## generate-gcc-meson-cross-file.sh
#
# This generates a cross file to configure meson for cross-compiling with clang.
#
# Docs: https://mesonbuild.com/Cross-compilation.html
set -e
set -x
set -o pipefail
if ! [ "$#" -ge 2 ]; then
echo "Usage: $0 <target> <prefix_dir> <cflags...>"
exit 2
fi;
## Take configuration from arguments
# This is the gcc target triple
toolchain_target="${1}"
# This is the directory where the toolchain has been installed.
toolchain_dest="${2}"
# Remaining cflags for build configurations
toolchain_cflags=("${@:3}")
# Meson uses the driver when both compiling and linking, which may need flags to
# identify exactly how to set up paths and defaults for both.
#
# In particular, the clang driver requires a `--gcc-toolchain=<path>` argument
# to find the right libraries if there are system versions of the risc-v
# toolchains installed.
meson_driver_flags="'--gcc-toolchain=${toolchain_dest}'"
for flag in "${toolchain_cflags[@]}"; do
if [ -z "${meson_driver_flags}" ]; then
meson_driver_flags+="'${flag}'";
else
meson_driver_flags+=", '${flag}'"
fi
done
config_dest="${toolchain_dest}/meson-${toolchain_target}-clang.txt"
sysroot_config="";
system_name=""
case "${toolchain_target}" in
riscv*-*-linux-gnu)
sysroot_config="sys_root = '${toolchain_dest}/${toolchain_target}/sysroot'";
system_name="linux";
;;
riscv*-*-elf)
system_name="bare metal";
;;
esac;
tee "${config_dest}" <<CONFIG
# Autogenerated by ${0} on $(date -u)
# Problems? Bug reporting instructions in ${toolchain_dest}/buildinfo
#
# If you have relocated this toolchain, change all occurences of '${toolchain_dest}'
# to point to the new location of the toolchain.
[binaries]
c = '${toolchain_dest}/bin/${toolchain_target}-clang'
cpp = '${toolchain_dest}/bin/${toolchain_target}-clang++'
ar = '${toolchain_dest}/bin/${toolchain_target}-ar'
ld = '${toolchain_dest}/bin/${toolchain_target}-ld'
c_ld = '${toolchain_dest}/bin/${toolchain_target}-ld'
cpp_ld = '${toolchain_dest}/bin/${toolchain_target}-ld'
objdump = '${toolchain_dest}/bin/${toolchain_target}-objdump'
objcopy = '${toolchain_dest}/bin/${toolchain_target}-objcopy'
strip = '${toolchain_dest}/bin/${toolchain_target}-strip'
as = '${toolchain_dest}/bin/${toolchain_target}-as'
[properties]
needs_exe_wrapper = true
has_function_printf = false
c_args = [${meson_driver_flags}]
c_link_args = [${meson_driver_flags}]
cpp_args = [${meson_driver_flags}]
cpp_link_args = [${meson_driver_flags}]
${sysroot_config}
[host_machine]
system = '${system_name}'
cpu_family = '${toolchain_target%%-*}'
cpu = 'ibex'
endian = 'little'
CONFIG
| true
|
db9847ca6be3b80b40527b4f6d662bd142ca8bf3
|
Shell
|
containernetworking/plugins
|
/plugins/main/windows/build.sh
|
UTF-8
| 308
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
PLUGINS=$(cat plugins/windows_only.txt)
for d in $PLUGINS; do
if [ -d "$d" ]; then
plugin="$(basename "$d").exe"
echo " $plugin"
CXX=x86_64-w64-mingw32-g++ CC=x86_64-w64-mingw32-gcc CGO_ENABLED=1 \
$GO build -o "${PWD}/bin/$plugin" "$@" "$REPO_PATH"/$d
fi
done
| true
|
b6eb6e0f76d03cf858449557b2ac3ebb2c4d2855
|
Shell
|
whatsondoc/Linux
|
/hpc/slurm/submission_scripts/parallel_find_and_calculate/parallel_find_launcher.sh
|
UTF-8
| 5,280
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
# Wrapper script to launch parallel Find, file list aggregation & calculate total size from file list input
# Intended to be used with Slurm, leveraging job arrays
# This launcher script will submit Slurm job array for the parllel find execution, and dependencies for the aggregation and size calculation tasks
#=================================================================================================================================================================================================================================================================================================================#
# USER-LEVEL VARIABLES
export PFCS_SEARCH_STRING_INCLUDE=" -name "*<PATTERN_1>*" -or -iname "*<PATTERN_2>*" -and -iname "*<PATTERN_3>*" "
export PFCS_SEARCH_STRING_EXCLUDE=" -not -name "*<PATTERN_4>*" " # Note that there must be a 'NOT' statement included, as the find command relies on this and cannot handle empty parenthesis
PFCS_PAR_FIND_SCRIPT="/path/to/parallel_find_executor.sh"
PFCS_AGGREGATOR_SCRIPT="/path/to/parallel_find_aggregator.sh"
PFCS_CALC_SIZE_SCRIPT="/path/to/calculate_size.sh"
export PFCS_OUTPUT_DIR="/path/to/output/directory"
export PFCS_ROOT_PATH="/path/to/root/from/where/to/find"
export PFCS_PATH_DEPTH="4"
#export PFCS_ROOT_PATH="${1}" # This variable can be set to accept the root path as the first positional argument passed to the script
#export PFCS_PATH_DEPTH="${2}" # This variable can be set to accept the depth as the second positional argument passed to the script
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
# FRAMEWORK-LEVEL VARIABLES
export PFCS_UUID="${RANDOM}_" # The underscore '_' is intentional, as it separates the random integer generated from other characters in names. Comment this variable out to not use a unique identifier.
export PFCS_WORKING_LOG="${PFCS_OUTPUT_DIR}/${PFCS_UUID}stdout_output.log"
#================================================================================================================================================================================================================================================================================================================#
# FUNCTIONS
pfcs_slurm_submit() {
PFCS_PAR_FIND_TOP_SUBMIT=$(sbatch --job-name=${PFCS_UUID}parallel_find_unit_top --output=${PFCS_OUTPUT_DIR}/%x.out ${PFCS_PAR_FIND_SCRIPT} TOP)
PFCS_PAR_FIND_TOP_JOB_ID=$( echo ${PFCS_PAR_FIND_TOP_SUBMIT} | awk '{print $4}')
echo "Parallel Find - Top : ${PFCS_PAR_FIND_TOP_SUBMIT}"
for PFCS_TOP_JOB_ID in $(scontrol show job ${PFCS_PAR_FIND_TOP_JOB_ID} | grep JobId | awk '{print $1}' | cut -f2 -d '=')
do
PFCS_ARRAY_JOB_IDS=${PFCS_ARRAY_JOB_IDS}:${PFCS_TOP_JOB_ID}
done
PFCS_PAR_FIND_LOW_SUBMIT=$(sbatch --array=0-$(( ${PFCS_NUM_DIRS} - 1 )) --job-name=${PFCS_UUID}parallel_find_unit_low --output=${PFCS_OUTPUT_DIR}/%x-%a.out ${PFCS_PAR_FIND_SCRIPT} LOW)
PFCS_PAR_FIND_LOW_JOB_ID=$( echo ${PFCS_PAR_FIND_LOW_SUBMIT} | awk '{print $4}')
echo "Parallel Find - Low : ${PFCS_PAR_FIND_LOW_SUBMIT}"
for PFCS_LOW_JOB_ID in $(scontrol show job ${PFCS_PAR_FIND_LOW_JOB_ID} | grep JobId | awk '{print $1}' | cut -f2 -d '=')
do
PFCS_ARRAY_JOB_IDS=${PFCS_ARRAY_JOB_IDS}:${PFCS_LOW_JOB_ID}
done
PFCS_AGGREGATOR_SUBMIT=$(sbatch --dependency=afterany:${PFCS_PAR_FIND_TOP_JOB_ID}:${PFCS_PAR_FIND_LOW_JOB_ID} --job-name=${PFCS_UUID}parallel_find_aggregation --output=${PFCS_OUTPUT_DIR}/%x.out ${PFCS_AGGREGATOR_SCRIPT})
PFCS_AGGREGATOR_JOB_ID=$( echo ${PFCS_AGGREGATOR_SUBMIT} | awk '{print $4}')
echo "Aggregator : ${PFCS_AGGREGATOR_SUBMIT}"
PFCS_CALC_SIZE_SUBMIT=$(sbatch --exclusive --dependency=afterok:${PFCS_AGGREGATOR_JOB_ID} --job-name=${PFCS_UUID}calculate_size --output=${PFCS_OUTPUT_DIR}/%x.out ${PFCS_CALC_SIZE_SCRIPT} ${PFCS_OUTPUT_DIR}/${PFCS_UUID}parallel_find_aggregation-${PFCS_AGGREGATOR_JOB_ID}.out)
echo "Calculate Size : ${PFCS_CALC_SIZE_SUBMIT}"
echo
}
echo "
Date : $(date)
Operation : Parallel find to create file list
Root path : ${PFCS_ROOT_PATH}
Path depth : ${PFCS_PATH_DEPTH}
Output file path : ${PFCS_OUTPUT_DIR}
Unique identifier : ${PFCS_UUID}
Calculating directory count ..." | tee -a ${PFCS_WORKING_LOG}
PFCS_NUM_DIRS=$(find ${PFCS_ROOT_PATH} -mindepth ${PFCS_PATH_DEPTH} -maxdepth ${PFCS_PATH_DEPTH} -type d -not -path "*.Trash*" 2> /dev/null | wc -l)
echo "Number of directories : ${PFCS_NUM_DIRS}
" | tee -a ${PFCS_WORKING_LOG}
pfcs_slurm_submit | tee -a ${PFCS_WORKING_LOG}
echo | tee -a ${PFCS_WORKING_LOG}
| true
|
310b96c9814a091f292e416a1bf24cf0d0f436f3
|
Shell
|
proller/clickhouse-dev
|
/server_minimal.sh
|
UTF-8
| 242
| 2.5625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
export BUILD_TYPE=${BUILD_TYPE="_minimal"}
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
CMAKE_OS+=" -DENABLE_LIBRARIES=0 "
. $CUR_DIR/cmake.sh $*
. $CUR_DIR/make.sh clickhouse-bundle
. $CUR_DIR/server.sh
| true
|
dd67676a4ae2debc074093406a255ce6aa82af4b
|
Shell
|
k0smik0/saint_seiya_cosmo_fantasy__mining
|
/stats_generator/generate.sh
|
UTF-8
| 529
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
[ $# -lt 1 ] && exit 1
map_putters=()
while IFS='' read -r line || [[ -n "$line" ]]; do
stat_name="$line"
class_name=$(echo $line | sed 's/ /_/g' | sed 's/\.//g')
map_putter=$(echo "numericalClassesMap.put(\"$stat_name\",${class_name}.class);")
map_putters=(${map_putters[@]} $map_putter)
# echo -e "package net.iubris.optimus_saint.model.saint.data;\n\npublic class $class_name extends NumericStat { }\n" > "${class_name}.java"
done < "numerical.txt"
for i in ${map_putters[@]}; do
echo $i
done
| true
|
4cef2fb15cab22daea1c2001cec10583412d29e5
|
Shell
|
ZRouter/ZRouter
|
/contrib/autossh/autossh.host
|
UTF-8
| 678
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Example script to start up tunnel with autossh.
#
# This script will tunnel 2200 from the remote host
# to 22 on the local host. On remote host do:
# ssh -p 2200 localhost
#
# $Id: autossh.host,v 1.6 2004/01/24 05:53:09 harding Exp $
#
ID=username
HOST=hostname.your.net
if [ "X$SSH_AUTH_SOCK" = "X" ]; then
eval `ssh-agent -s`
ssh-add $HOME/.ssh/id_rsa
fi
#AUTOSSH_POLL=600
#AUTOSSH_PORT=20000
#AUTOSSH_GATETIME=30
#AUTOSSH_LOGFILE=$HOST.log
#AUTOSSH_DEBUG=yes
#AUTOSSH_PATH=/usr/local/bin/ssh
export AUTOSSH_POLL AUTOSSH_LOGFILE AUTOSSH_DEBUG AUTOSSH_PATH AUTOSSH_GATETIME AUTOSSH_PORT
autossh -2 -fN -M 20000 -R 2200:localhost:22 ${ID}@${HOST}
| true
|
17de8214ee84210355d2c452e74d2edea50f948c
|
Shell
|
akankshreddy/Kaustav-CSE-LABS-and-Projects
|
/Sem04-Embedded-Systems-LAB/OTHERS/Assement Prep/damn2/kkk/OS/Lab2/script.sh
|
UTF-8
| 215
| 2.75
| 3
|
[] |
no_license
|
echo the name of this script is $0
echo first argument us $1
echo a list of all the arguments is $*
echo this script places the date into a temporary child
echo called $1.$$
date >$1.$$
ls $1.$$
echo $firstname
| true
|
6cb95868a6b87c235f21b854f244e3c92bae4f56
|
Shell
|
a2o/puppet-modules-a2o-essential
|
/modules/a2o_essential_linux_geoip/templates/install-geoip-database-free.sh
|
UTF-8
| 2,069
| 2.921875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
###########################################################################
# a2o Essential Puppet Modules #
#-------------------------------------------------------------------------#
# Copyright (c) Bostjan Skufca #
#-------------------------------------------------------------------------#
# This source file is subject to version 2.0 of the Apache License, #
# that is bundled with this package in the file LICENSE, and is #
# available through the world-wide-web at the following url: #
# http://www.apache.org/licenses/LICENSE-2.0 #
#-------------------------------------------------------------------------#
# Authors: Bostjan Skufca <my_name [at] a2o {dot} si> #
###########################################################################
# Compile directory
export SRCROOT="<%= compileDir %>" &&
mkdir -p $SRCROOT &&
cd $SRCROOT &&
### Set versions and directories
export PVERSION_SW="<%= softwareVersion %>" &&
export PDESTDIR="<%= destDir %>" &&
### Install MaxMind GeoIP free databases
mkdir -p $PDESTDIR/install-tmp &&
cd $PDESTDIR/install-tmp &&
rm -f *.dat &&
rm -f *.gz &&
#wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz &&
#wget http://geolite.maxmind.com/download/geoip/database/GeoIPv6.dat.gz &&
#wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz &&
#wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCityv6-beta/GeoLiteCityv6.dat.gz &&
wget http://source.a2o.si/files/geoip/$PVERSION_SW/GeoIP.dat.gz &&
wget http://source.a2o.si/files/geoip/$PVERSION_SW/GeoIPv6.dat.gz &&
wget http://source.a2o.si/files/geoip/$PVERSION_SW/GeoLiteCity.dat.gz &&
wget http://source.a2o.si/files/geoip/$PVERSION_SW/GeoLiteCityv6.dat.gz &&
gunzip Geo*.dat.gz &&
chown root.root Geo*.dat &&
chmod 644 Geo*.dat &&
mv -f Geo*.dat $PDESTDIR &&
# Cleanup
cd $PDESTDIR &&
rmdir install-tmp &&
true
| true
|
2fea5f75546cc7bd39f2a8fe11b0b5c32a7fe3ff
|
Shell
|
BCCH-MRI-Research-Facility/nii2dcm
|
/nii2dcm.sh
|
UTF-8
| 248
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
#Use medcon to convert a nifti file to dicom
filename=$1
f=$(basename "$filename" | cut -d. -f1)
mkdir "$f"_dcm
ln -s $PWD/$filename $PWD/"$f"_dcm/.
cd "$f"_dcm
medcon -f $filename -split3d -c dicom #-o $PWD/"$f"_dcm/
cd ..
| true
|
fbb778dc2b39a8b77c9278a16d7e920c87a92886
|
Shell
|
tooltwist/juiceconfig
|
/juice-client-nodejs/test/test-locally-using-mocha.sh
|
UTF-8
| 282
| 2.9375
| 3
|
[] |
no_license
|
#d!b/in/sh
export JUICE_CONFIG=file:::`pwd`/mocha-cli-config.json
echo export JUICE_CONFIG=${JUICE_CONFIG}
#
# If files are specified, run just those tests
#
if [ "$#" != 0 ] ; then
echo mocha $*
mocha $*
else
echo mocha mocha-cli-tests
mocha mocha-cli-tests
fi
| true
|
015d2b2b1484e05c8433e712e9efd40f0f52d132
|
Shell
|
daneharrigan/mci
|
/bin/setup
|
UTF-8
| 383
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/sh
ENV=$1
ENV_FILE=.env.$ENV
DBNAME=mci-$ENV
cat <<EOF >> $ENV
MCI_PUBLIC_KEY=X
MCI_PRIVATE_KEY=X
DATABASE_URL=DATABASE_URL=postgres:///$DBNAME?sslmode=disable
MAILER_FROM=noreply@example.com
MAILER_SUBJECT="MCI: Your Releases This Week"
MAILGUN_API_KEY=X
MAILGUN_DOMAIN=example.com
MAILGUN_URL=https://api.mailgun.net
EOF
createdb $DBNAME
cat db/schema.sql | psql -d $DBNAME
bin/test
| true
|
c5215c4fbde65aa95e431861e48bf01c8223e94f
|
Shell
|
zutils/HelloWorld
|
/.circleci/files/roll_update.sh
|
UTF-8
| 717
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Note: This won't do anything if we are already deployed
echo "Initializing Deployment and Service"
minikube kubectl -- apply -f ~/HelloWorld/.circleci/files/service.yml
minikube kubectl -- apply -f /tmp/modified_deployment.yml
echo "Rolling update!"
minikube kubectl -- rollout restart deployments/hello-world-server
# Run socat to forward port 80 to port 8000
if $(ps -A | grep -q socat)
then
echo "Port 80 is already forwarded to 8000"
else
echo "Forwarding localhost port 80 to the NodePort in Minikube server $(minikube service --url hello-world-server)"
sudo nohup socat TCP-LISTEN:80,fork TCP:$(minikube service --url hello-world-server | sed -s "s|http://||g") >> /tmp/socat_logs &
fi
| true
|
92318b754da14a4b3bdd71fbb0bab71322fadca8
|
Shell
|
binarycodes/scripts
|
/deleteglob
|
UTF-8
| 535
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/zsh
# written primarily to help delete certain files from a glob pattern
# with confirmation to exclude certain ones
function usage() {
echo "Usage: actionFile <command/program> [pattern]glob\n"
}
function actionOnGlob() {
for i in ${~2};do
#i=$(readlink -f "$i")
$1 $i 2>/dev/null
zenity --question --text "Delete file $i ?"
if [[ $? == 0 ]]; then
zenity --info --text "Deleting $i ..."
rm "$i"
fi
done
}
function main() {
if [[ $# != 2 ]]; then
usage
else
actionOnGlob $*
fi
}
main $*
| true
|
3ded83361c80721a091e09088944390fe8abcea3
|
Shell
|
HatPull/devtools
|
/scripts/git-clone-all.sh
|
UTF-8
| 621
| 3.796875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#/bin/bash
#working dirctory is were the Makefile is
#Include our helpers file (the . is short for an include in bash )
. scripts/helpers.sh
for repository in $(cat repository.list.txt); do
dir_name=$(repository_directory_name ${repository})
#If the repository directory does not exist, then clone the repo
echo "${textblue}------------------ repos/${dir_name} ${textcyan}clone${textblue} ------------------ ${textreset}"
if [ ! -d "repos/${dir_name}" ]; then
mkdir -p repos
git clone ${repository} repos/${dir_name}
else
echo "Already cloned"
fi
echo
echo
done
| true
|
3d5f1116a4c85132d540a0b033d028f4889403bf
|
Shell
|
mkrause/workflow
|
/workflow/util/configuration.sh
|
UTF-8
| 947
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
path_current="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" # https://stackoverflow.com/questions/59895
. "${path_current}/printing.sh"
# https://stackoverflow.com/questions/9376904
find_up() {
# Recursively list a file from current directory up the tree to root
[[ -n $1 ]] || { echo "find_up [ls-opts] name"; return 1; }
local THERE=$path_current RC=2
while [[ $THERE != / ]]
do [[ -e $THERE/${2:-$1} ]] && { ls ${2:+$1} $THERE/${2:-$1}; RC=0; }
THERE="$(dirname $THERE)"
done
[[ -e $THERE/${2:-$1} ]] && { ls ${2:+$1} /${2:-$1}; RC=0; }
return $RC
}
config_check() {
find_up ".env" > /dev/null || { print_info "Search for .env found no matches"; return 1; }
# TODO: check conformance with .env.example?
return 0
}
config_load() {
config_check || return $?
local path_env="$(find_up ".env")"
. "${path_env}"
return 0
}
| true
|
36a3a2863e055d7d04d7db318c08ee224824eae1
|
Shell
|
halvards/veewee
|
/templates/CentOS-6.0-x86_64-gnome/postinstall.sh
|
UTF-8
| 2,320
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#http://chrisadams.me.uk/2010/05/10/setting-up-a-centos-base-box-for-development-and-testing-with-vagrant/
date > /etc/vagrant_box_build_time
cat > /etc/yum.repos.d/puppetlabs.repo << EOM
[puppetlabs]
name=puppetlabs
baseurl=http://stahnma.fedorapeople.org/puppetlabs/6/\$basearch
enabled=1
gpgcheck=0
EOM
cat > /etc/yum.repos.d/epel.repo << EOM
[epel]
name=epel
baseurl=http://download.fedoraproject.org/pub/epel/6/\$basearch
enabled=1
gpgcheck=0
EOM
yum -y erase *i386 *i586 *i686
yum -y install puppet facter ruby-devel rubygems kernel-devel-`uname -r`
yum -y --exclude=kernel* update
yum -y clean all
rm /etc/yum.repos.d/{puppetlabs,epel}.repo
gem install --no-ri --no-rdoc chef
# Installing vagrant keys
mkdir /home/vagrant/.ssh
chmod 700 /home/vagrant/.ssh
cd /home/vagrant/.ssh
wget --no-check-certificate 'http://github.com/mitchellh/vagrant/raw/master/keys/vagrant.pub' -O authorized_keys
chown -R vagrant /home/vagrant/.ssh
# Installing the virtualbox guest additions
VBOX_VERSION=$(cat /home/vagrant/.vbox_version)
cd /tmp
wget http://download.virtualbox.org/virtualbox/$VBOX_VERSION/VBoxGuestAdditions_$VBOX_VERSION.iso
mount -o loop VBoxGuestAdditions_$VBOX_VERSION.iso /mnt
sh /mnt/VBoxLinuxAdditions.run
umount /mnt
rm VBoxGuestAdditions_$VBOX_VERSION.iso
rm -rf /usr/share/backgrounds/images/*
rm -rf /usr/share/backgrounds/nature/*
rm -rf /usr/share/backgrounds/tiles/*
rm -rf /usr/share/anaconda/pixmaps/rnotes/bn_IN
rm -rf /usr/share/anaconda/pixmaps/rnotes/cs
rm -rf /usr/share/anaconda/pixmaps/rnotes/de
rm -rf /usr/share/anaconda/pixmaps/rnotes/es
rm -rf /usr/share/anaconda/pixmaps/rnotes/fr
rm -rf /usr/share/anaconda/pixmaps/rnotes/it
rm -rf /usr/share/anaconda/pixmaps/rnotes/ja
rm -rf /usr/share/anaconda/pixmaps/rnotes/kr
rm -rf /usr/share/anaconda/pixmaps/rnotes/nl
rm -rf /usr/share/anaconda/pixmaps/rnotes/pt
rm -rf /usr/share/anaconda/pixmaps/rnotes/pt_BR
rm -rf /usr/share/anaconda/pixmaps/rnotes/ro
rm -rf /usr/share/anaconda/pixmaps/rnotes/ru
# Make it boot into X
sed -i "s/id:3:initdefault:/id:5:initdefault:/" /etc/inittab
# Disable grub boot timeout
sed -i "s/timeout=5/timeout=0/" /boot/grub/grub.conf
sed -i "s/timeout=5/timeout=0/" /boot/grub/menu.lst
sed -i "s/^.*requiretty/#Defaults requiretty/" /etc/sudoers
dd if=/dev/zero of=/tmp/clean || rm /tmp/clean
exit
| true
|
a92b16b2bd5b1a6f7d042573091180afefbd40f5
|
Shell
|
roadnarrows-robotics/rnr-sdk
|
/Eudoxus/share/etc/init.d/eudoxus_shutter
|
UTF-8
| 3,580
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
### BEGIN INIT INFO
# Provides: 3D sensor ROS publishing nodes via user push button.
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: 3D sensor ROS publishing nodes via user push button.
# Description: The ROS openni2_launch script execs a set of nodes and
# nodelettes to support RGBD publishing from the 3D point
# cloud sensor. This service attaches to the GPIO associated
# with a user push button to start/stop the nodes.
### END INIT INFO
if [ -f /etc/profile.d/eudoxus.sh ]
then
. /etc/profile.d/eudoxus.sh
elif [ -f /prj/etc/profile.d/eudoxus.sh ]
then
. /prj/etc/profile.d/eudoxus.sh
fi
export PATH="${PATH:+$PATH:}/usr/sbin:/sbin:/bin"
NAME="Eudoxus user button monitor service"
SERVICE=eudoxus_shutter
SCRIPT=eudoxus_svc_openni2
DAEMON=$(which ${SCRIPT})
PIDFILE=/var/run/${SERVICE}.pid
LOGFILE=/var/log/${SERVICE}.log
test -x "${DAEMON}" || exit 0
# standard init sh support functions
. /lib/lsb/init-functions
#
# Get daemon process id.
#
# 'Returns' pid or 0.
#
get_daemon_pid()
{
pid=$(/usr/bin/pgrep -f "/bin/bash *-c *${DAEMON}")
if [ "${pid}" != '' ]
then
echo "${pid}"
else
echo 0
fi
#echo "DBG: pid=${pid}" >>${LOGFILE}
}
#
# Get daemon process group id.
#
# get_daemon_pgid <pid>
#
# 'Returns' pgid or 0.
#
get_daemon_pgid()
{
pgid=$(/bin/ps -o pgid= ${1})
if [ "${pgid}" != '' ]
then
echo "${pgid}"
else
echo 0
fi
#echo "DBG: pgid=${pgid}" >>${LOGFILE}
}
#
# Start service
#
startdaemon()
{
pid=$(get_daemon_pid)
if [ "${pid}" -eq 0 ]
then
log_daemon_msg "Starting ${NAME}" "${SERVICE}" || true
if start-stop-daemon --start --quiet --make-pidfile --pidfile ${PIDFILE} \
--background --exec /bin/bash -- \
-c "${DAEMON} >>${LOGFILE} 2>&1"
#if start-stop-daemon -t --start --quiet --make-pidfile --pidfile ${PIDFILE} \
# --background --exec ${DAEMON}
then
log_end_msg 0 || true
else
log_end_msg 1 || true
fi
else
log_daemon_msg "${NAME} already running" "" || true
log_end_msg 1 || true
fi
}
#
# Stop service
#
stopdaemon()
{
pid=$(get_daemon_pid)
if [ "${pid}" -ne 0 ]
then
log_daemon_msg "Stopping ${NAME}" "${SERVICE}" || true
pgid=$(get_daemon_pgid ${pid})
#if start-stop-daemon --stop --quiet --oknodo --pidfile ${PIDFILE}
if [ "${pgid}" -ne 0 ]
then
if /bin/kill -15 -"${pgid}"
then
rm -f "${PIDFILE}"
log_end_msg 0 || true
else
log_end_msg 1 || true
fi
else
log_end_msg 1 || true
fi
else
log_daemon_msg "${NAME} not running" "" || true
log_end_msg 1 || true
fi
}
#
# Restart service
#
restartdaemon()
{
stopdaemon
startdaemon
}
#
# Status (not used)
#
pidof_daemon()
{
if [ -e "${PIDFILE}" ]
then
PID=$(cat ${PIDFILE})
pidof ${DAEMON} | grep -q ${PID}
return $?
fi
return 1
}
case $1 in
start)
startdaemon
;;
stop)
stopdaemon
;;
restart)
restartdaemon
;;
reload|force-reload)
;;
status)
#status_of_proc -p "${PIDFILE}" "${DAEMON}" "${SERVICE}" && exit 0 || exit $?
pid=$(get_daemon_pid)
if [ "${pid}" -ne 0 ]
then
log_daemon_msg "${NAME} is running" "" || true
else
log_daemon_msg "${NAME} is not running" "" || true
fi
;;
*)
log_action_msg "Usage: $0 {start|stop|restart|reload|force-reload|status}" || true
exit 1
;;
esac
exit 0 # good
| true
|
28b7d0f57f3d145684b3e74cbeeaaef74cfa8fe6
|
Shell
|
codexico/dotfiles
|
/scripts/installs/install_oh-my-zsh.sh
|
UTF-8
| 461
| 3.4375
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Unlicense"
] |
permissive
|
#!/usr/bin/env bash
# check zsh
if [ "$SHELL" == "/bin/bash" ]; then
echo "please install zsh first";
echo "try ./install_zsh.sh";
echo "aborting...";
elif [ "$SHELL" == "/usr/bin/zsh" ]; then
echo "installing oh-my-zsh";
sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
else
echo "unknown shell, please restart and try again";
echo "Read more: https://github.com/robbyrussell/oh-my-zsh";
fi
| true
|
a2e23150204909247bc569430fa12e6b290ff5bb
|
Shell
|
jxyanglinus/settings-for-my-manjaro
|
/source-and-update.sh
|
UTF-8
| 601
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
echo "=======开始配置======="
inst='sudo pacman -S --noconfirm'
APPSOURCE='https://mirrors.ustc.edu.cn/manjaro/stable/$repo/$arch'
# 安装源
echo "是否更新软件源?(如果先前更新过了,可以不做这一步)[y/n]"
read -r update
if [ "$update" == "y" ];then
sudo echo "Server = $APPSOURCE" > /etc/pacman.d/mirrorlist
sudo cat ./archlinuxcn >> /etc/pacman.conf
sudo pacman -Syy
$inst archlinuxcn-keyring
else
echo "你取消了更新软件源"
fi
# 一键更新
sudo pacman -Syyu
# AUR
$inst base-devel
$inst yay yaourt
echo "完成后请重启"
| true
|
c5948f5ff1d9bde6d32155e6b29804e8350cbc83
|
Shell
|
rylankasitz/CIS520Proj4
|
/mpi/run.sh
|
UTF-8
| 960
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
FILE_LOCATION="/homes/rylankasitz/cis520/Proj4/mpi"
OUTPUT_LOCATION=$FILE_LOCATION/"output_files"
RUNTIMES_FILE=$FILE_LOCATION/"runtimes.csv"
THREADS=( 20 18 16 14 12 10 8 6 4 2 1 )
ITERATIONS=$1
cd $FILE_LOCATION
make
mkdir -p $OUTPUT_LOCATION
rm $OUTPUT_LOCATION/*
rm $RUNTIMES_FILE
touch $RUNTIMES_FILE
i=0
line="Core Count,"
while [ $i -lt $ITERATIONS ]
do
line+="Run $i Threaded Sections,Run $i Total,"
let "i++"
done
echo "$line,Average Threaded Section,Average Total" >> $RUNTIMES_FILE
for t in ${THREADS[@]}
do
echo "Running for $t threads"
sbatch --ntasks-per-node=$t --output=$OUTPUT_LOCATION/$t"_threads.output" $FILE_LOCATION/mpi.sh $ITERATIONS $RUNTIMES_FILE $t
done
echo "Waiting for iterations to finish"
touch $FILE_LOCATION/temp.csv
while [ $(find $FILE_LOCATION/*temp.csv -type f | wc -l) -le ${#THREADS[@]} ]
do
sleep 1
done
cat $FILE_LOCATION/*temp.csv >> $RUNTIMES_FILE
rm $FILE_LOCATION/*temp.csv
| true
|
73aa0ba82636c8cf6a3dc6f030cff8d0e77ac1af
|
Shell
|
djemos/iso-creation-slackel
|
/initrd-scripts/usr-lib-setup/INSURL
|
UTF-8
| 7,749
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/sh
TMP=/var/log/setup/tmp
if [ ! -d $TMP ]; then
mkdir -p $TMP
fi
. /usr/lib/setup/INCISO
if [ -f $TMP/Punattended ]; then
eval $(grep "^REMOTE_URL=" $TMP/Punattended)
eval $(grep "^REMOTE_ROOT=" $TMP/Punattended)
fi
REMOTE_SERVER=$REMOTE_URL
REMOTE_PATH=$REMOTE_ROOT
while [ 0 ]; do
rm -f $TMP/SeTDS $TMP/SeTmount $TMP/SeTsource $TMP/SeTremotesvr
umount -f /var/log/mount 2>/dev/null
if [ -n "$(mount | grep /var/log/mount)" -o -d /var/log/mount/lost+found -o -d /var/log/mount/recycled -o -r /var/log/mount/io.sys ]; then
cat <<- EOF > $TMP/tempmsg
Setup failed to unmount a required directory:
/var/log/mount
Please reboot the machine an try again.
EOF
dialog \
--title "ERROR" \
--yes-label "REBOOT" \
--no-label "CANCEL" \
--yesno "$(cat $TMP/tempmsg)" \
11 70
RET=$?
rm -f $TMP/tempmsg
if [ $RET -eq 0 ]; then
reboot
else
exit 1
fi
fi
while [ 0 ]; do
cat <<- EOF > $TMP/tempmsg
Please provide the URL of a server containing the contents
(or an ISO image) of a Slackel installation disc.
Examples: http://mydomain.com
ftp://192.168.1.101
EOF
dialog \
--title "HTTP/FTP SERVER" \
--inputbox "$(cat $TMP/tempmsg)" \
14 70 \
$REMOTE_SERVER \
2> $TMP/remote
RET=$?
REMOTE_SERVER="$(cat $TMP/remote)"
rm -f $TMP/tempmsg $TMP/remote
if [ $RET -ne 0 ]; then
exit 1
elif [ ! $(echo $REMOTE_SERVER | grep -iE '^http://|^ftp://') ]; then
# subtle hint
REMOTE_SERVER="?://$SERVER"
continue
fi
break
done
cat <<- EOF > $TMP/tempmsg
Now please provide the path to a directory on the server
containing the contents of a Slackel installation disc.
Example: /mirror/slackel-xfce
Note: If you are installing from a web (HTTP) server you may also
specify an ISO image as the source e.g. /mirror/slackel-xfce.iso
EOF
dialog \
--title "SELECT SOURCE DIRECTORY" \
--inputbox "$(cat $TMP/tempmsg)" \
16 70 \
$REMOTE_PATH \
2> $TMP/sourcedir
RET=$?
REMOTE_PATH="$(cat $TMP/sourcedir)"
rm -f $TMP/sourcedir $TMP/tempmsg
if [ $RET -ne 0 ]; then
continue
fi
mkdir -p /var/log/mount 2>/dev/null
# attempt to mount an ISO over a HTTP connection.
if [ -n "$(echo $REMOTE_SERVER | grep -i '^http://')" -a -n "$(echo $REMOTE_PATH | grep -i '\.iso$')" ]; then
cat <<- EOF > $TMP/tempmsg
It appears you have chosen to use the following ISO image:
$(basename $REMOTE_PATH)
Setup will now attempt to mount it inside a local directory.
EOF
dialog \
--title "INFORMATION" \
--exit-label "OK" \
--textbox "$TMP/tempmsg" \
11 70
rm -f $TMP/tempmsg
sh /etc/rc.d/rc.fuse start
if [ $? -eq 1 ]; then
cat <<- EOF > $TMP/tempmsg
Setup failed to start the FUSE service.
Please check and try again.
EOF
dialog \
--title "SERVICE FAILURE" \
--exit-label "OK" \
--textbox "$TMP/tempmsg" \
9 70
rm -f $TMP/tempmsg
continue
fi
# mount the file inside a directory.
if [ -x $(type -path httpfs2) ]; then
httpfs2 -c /dev/null $REMOTE_SERVER$REMOTE_PATH /var/log/mount
if [ $? -ne 0 ]; then
cat <<- EOF > $TMP/tempmsg
Failed to mount file from HTTP server:
$REMOTE_SERVER$REMOTE_PATH
Please check and try again.
EOF
dialog \
--title "MOUNT FAILURE" \
--exit-label "OK" \
--textbox "$TMP/tempmsg" \
11 70
rm -f $TMP/tempmsg
continue
else
cat <<- EOF > $TMP/tempmsg
Successfully mounted file from HTTP server:
$(mount | grep /var/log/mount)
EOF
dialog \
--title "MOUNT SUCCESS" \
--exit-label "OK" \
--textbox "$TMP/tempmsg" \
9 70
rm -f $TMP/tempmsg
fi
else
cat <<- EOF > $TMP/tempmsg
Setup failed to execute a required command:
httpfs2
Please check and try again.
EOF
dialog \
--title "COMMAND NOT FOUND" \
--exit-label "OK" \
--textbox "$TMP/tempmsg" \
11 70
rm -f $TMP/tempmsg
continue
fi
# attempt to loop mount the file
check_iso_image /var/log/mount /var/log/mntiso
if [ $? -eq 0 ]; then
if [ -d /var/log/mntiso/slackel ]; then
echo "/var/log/mntiso/slackel" > $TMP/SeTDS
else
cat <<- EOF > $TMP/tempmsg
The ISO image you specified is not valid:
$(basename $SOURCEISO)
Please check and try again.
EOF
dialog \
--title "INVALID ISO IMAGE" \
--exit-label "OK" \
--textbox "$TMP/tempmsg" \
11 70
rm -f $TMP/tempmsg
umount -f /var/log/mntiso 2>/dev/null
continue
fi
else
cat <<- EOF > $TMP/tempmsg
The file you specified is not valid:
$REMOTE_SERVER$REMOTE_PATH
Please check and try again.
EOF
dialog \
--title "INVALID SOURCE FILE" \
--exit-label "OK" \
--textbox "$TMP/tempmsg" \
11 70
rm -f $TMP/tempmsg
continue
fi
# attempt to fetch individual packages from the server
else
cat <<- EOF > $TMP/tempmsg
We will now attempt to download a repository information file
from the server.
If this is successful we will use it to create a local cache of
the packages we want to install.
EOF
dialog \
--title "INFORMATION" \
--exit-label "OK" \
--textbox "$TMP/tempmsg" \
11 70
rm -f $TMP/tempmsg
mkdir -p $TMP/pkgrepo 2>/dev/null
chmod 700 $TMP/pkgrepo
rm -rf $TMP/pkgrepo/* 2>/dev/null
mkdir -p /var/log/mount 2>/dev/null
ln -sf $TMP/pkgrepo /var/log/mount/
cd /var/log/mount/pkgrepo
dialog \
--infobox "\nDownloading file...\n" \
5 23
wget -q -c $REMOTE_SERVER$REMOTE_PATH/PACKAGES.TXT.gz
if [ $? -ne 0 ]; then
cat <<- EOF > $TMP/tempmsg
Failed to download repository information file:
$REMOTE_SERVER$REMOTE_PATH/PACKAGES.TXT.gz
Please check and try again.
EOF
dialog \
--title "DOWNLOAD FAILURE" \
--exit-label "OK" \
--textbox "$TMP/tempmsg" \
11 70
rm -f $TMP/tempmsg
continue
elif [ -z "$(gzip -dc PACKAGES.TXT.gz | grep 'PACKAGE ')" ]; then
cat <<- EOF > $TMP/tempmsg
The repository information file is not valid:
$REMOTE_SERVER$REMOTE_PATH/PACKAGES.TXT.gz
Please check and try again.
EOF
dialog \
--title "INVALID FILE" \
--exit-label "OK" \
--textbox "$TMP/tempmsg" \
11 70
rm -f $TMP/tempmsg
continue
else
# create a local repository with zero-byte package placeholders:
TOTALP=$(gzip -dc PACKAGES.TXT.gz | grep "PACKAGE NAME:" | wc -l)
(
NUMPKG=0
GAUGE=0
gzip -dc PACKAGES.TXT.gz | grep "PACKAGE " | while read REPLY ; do
case "$REPLY" in
"PACKAGE NAME:"*)
TEMP=$(echo $REPLY | cut -d: -f2)
PKGNAME=$(echo $TEMP)
PKGBASE=${PKGNAME/.t[glbx]z}
let NUMPKG=NUMPKG+1
if [ $(( ((100 * $NUMPKG)/$TOTALP)/5 )) -gt $(( $GAUGE/5 )) ]; then
GAUGE=$(( (100 * $NUMPKG)/$TOTALP ))
echo "$GAUGE"
fi
;;
"PACKAGE LOCATION:"*)
TEMP=$(echo $REPLY | cut -d: -f2)
PKGDIR=$(echo $TEMP)
mkdir -p $PKGDIR
touch $PKGDIR/$PKGNAME
;;
"PACKAGE SIZE (compressed):"*)
TEMP=$(echo $REPLY | cut -d: -f2)
PKGSIZE=$(echo $TEMP)
echo "$PKGSIZE" 1> $PKGDIR/$PKGBASE.size
;;
*)
;;
esac
done \
) | \
dialog \
--title "INITIALISING PACKAGE REPOSITORY" \
--gauge "\nProcessing $TOTALP Slackel packages..." \
8 65
echo "/var/log/mount/pkgrepo/slackel" > $TMP/SeTDS
echo "$REMOTE_SERVER,$REMOTE_PATH" > $TMP/SeTremotesvr
fi
fi
echo "-source_mounted" > $TMP/SeTmount
echo "/dev/null" > $TMP/SeTsource
break
done
| true
|
9c48a92cb392a5264b7dd2724c1053a92f947022
|
Shell
|
nya3jp/icfpc2018
|
/gens/reassemble_line_assembler_small_flip.sh
|
UTF-8
| 496
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
cd "$(dirname "$0")/.."
bazel build ...
for n in `seq -f "%03g" 1 34`; do
for z in 1 2; do
./bazel-bin/solver/solver \
--source data/models/FR${n}_src.mdl \
--target data/models/FR${n}_tgt.mdl \
--output a.nbt \
--line_assembler_flip_xz \
--line_assembler_x_divs 1 \
--line_assembler_z_divs ${z} \
--impl reassemble_naive \
--disasm bbgvoid_task \
--asm line_assembler
./evaluate.py R $n a.nbt --nobuild
done
done
| true
|
dbfef21431645bf7b5697d5846f1a89a995277e7
|
Shell
|
benjamin-travis-summers/toys
|
/pkg/archive/ipfs-persist
|
UTF-8
| 1,898
| 3.859375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
BUCKET=ipfs-archive-backups
tmpfiles=""
cleanup () {
for tmp in $tmpfiles
do rm -f $tmp
done
}
trap cleanup EXIT
persist () {
block=$1
tmp="${block}.block"
tmpfiles="$tmpfiles $tmp"
ipfs block get "$block" >"$tmp"
/usr/bin/aws s3 cp "$tmp" "s3://$BUCKET/block/$block"
rm "$tmp"
}
persisttree () {
entry="$1"
persist "$entry"
s3pin "$entry"
for subblock in $(ipfs refs -r -u "$entry")
do persist "$subblock"
done
}
restore () {
block=$1
tmp="${block}.block"
tmpfiles="$tmpfiles $tmp"
/usr/bin/aws s3 cp "s3://$BUCKET/block/$block" "$tmp"
ipfs block put <"$tmp"
rm "$tmp"
}
# TODO This will do duplicate work if there are shared subtrees, but it wont
# endlesslessly loop (unless someone found a hash cycle).
restoretree () {
entry="$1"
restore "$entry"
for subblock in $(ipfs refs -u "$entry")
do restoretree "$subblock"
done
}
# TODO This will duplicate lots of work!
persistall () {
for block in $(ipfs pin ls | sed 's/ .*//')
do persisttree "$block"
done
}
validipfsblock () {
[ Qm = "$(echo "$1" | sed 's/^\(..\).*$/\1/')" ]
}
# TODO Pin root blocks! Need a way to record in s3 which blocks were pinned or were roots.
restoreall () {
for block in $(/usr/bin/aws s3 ls "s3://$BUCKET/block/")
do if validipfsblock "$block"
then echo restore "$block"
restore "$block"
fi
done
for pin in $(/usr/bin/aws s3 ls "s3://$BUCKET/pin/")
do if validipfsblock "$pin"
then echo ipfs pin add "$pin"
ipfs pin add "$pin"
fi
done
}
s3pin () {
block="$1"
echo $block > $block
tmpfiles="$tmpfiles $block"
/usr/bin/aws s3 cp $block "s3://ipfs-archive-backups/pin/$block"
rm $block
}
if [ "$#" = 1 ] && [ "$1" = all ]
then persistall
elif [ "$#" = 1 ] && [ "$1" = restoreall ]
then restoreall
else for x in "$@"
do persisttree "$x"
done
fi
| true
|
d7310abef8de47fd31c5406c335d4d00630478c0
|
Shell
|
kopaka1822/Compressonator
|
/build/buildcli_linux_package.sh
|
UTF-8
| 5,093
| 3.09375
| 3
|
[] |
permissive
|
#please make sure all the prerequisite packages - initsetup_ubuntu.sh are installed and build-buildCLI_ubuntu_cmake.sh is run successfully before running this script.
set -x
set -e
# This is done to ensure the script works whether WORKSPACE is set or not
# We assume this script is called from the root "compressonator" folder
if [ -z "$WORKSPACE" ]
then
WORKSPACE=".."
fi
cd $WORKSPACE/compressonator
rm -rf compressonatorcli_linux_x86_64_4.3*
mkdir compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER
mkdir compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/documents
mkdir compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/images
mkdir compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/license
mkdir compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/qt
mkdir compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/plugins
mkdir compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/plugins/imageformats
mkdir compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
cp scripts/compressonatorcli compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER
cp bin/compressonatorcli-bin compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER
cp runtime/qt.conf compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER
cp -r docs/build/html compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/documents
cp -r runtime/images compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER
cp license/license.txt compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/license
# Qt
cp $QT_ROOT/lib/libQt5Core.so* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/qt
cp $QT_ROOT/lib/libQt5Core.so.* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/qt
cp $QT_ROOT/lib/libQt5Gui.so* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/qt
cp $QT_ROOT/lib/libQt5Gui.so.* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/qt
cp $QT_ROOT/lib/libicui18n.so.* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/qt
cp $QT_ROOT/lib/libicuuc.so.* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/qt
cp $QT_ROOT/lib/libicudata.so.* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/qt
cp $QT_ROOT/plugins/imageformats/* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/plugins/imageformats
# OpenCV
cp /usr/lib/x86_64-linux-gnu/libopencv_core.so compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
cp /usr/lib/x86_64-linux-gnu/libopencv_core.so.* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
cp /usr/lib/x86_64-linux-gnu/libopencv_highgui.so compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
cp /usr/lib/x86_64-linux-gnu/libopencv_highgui.so.* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
cp /usr/lib/x86_64-linux-gnu/libopencv_imgproc.so compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
cp /usr/lib/x86_64-linux-gnu/libopencv_imgproc.so.* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
# Misc
cp /usr/lib/x86_64-linux-gnu/libtbb.so.* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
# Optional User pkg update
cp build/initsetup_ubuntu.sh compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
# GLEW if needed
# cp /usr/lib/x86_64-linux-gnu/libGLEW.so compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
# cp /usr/lib/x86_64-linux-gnu/libGLEW.so.* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
# OpenEXR if needed
#cp /usr/lib/x86_64-linux-gnu/libHalf.la compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
#cp /usr/lib/x86_64-linux-gnu/libIex-2_2.la compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
#cp /usr/lib/x86_64-linux-gnu/libIex-2_2.so compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
#cp /usr/lib/x86_64-linux-gnu/libIexMath-2_2.la compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
#cp /usr/lib/x86_64-linux-gnu/libIexMath-2_2.so compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
#cp /usr/lib/x86_64-linux-gnu/libImath-2_2.la compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
#cp /usr/lib/x86_64-linux-gnu/libImath-2_2.so compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
#cp /usr/lib/x86_64-linux-gnu/libIlmThread-2_2.la compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
#cp /usr/lib/x86_64-linux-gnu/libIlmThread-2_2.so compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
#cp /usr/lib/x86_64-linux-gnu/libIlmImf-2_2.so compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
cp /usr/lib/x86_64-linux-gnu/libHalf.so compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
cp /usr/lib/x86_64-linux-gnu/libHalf.so.* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
cp /usr/lib/x86_64-linux-gnu/libIex-2_2.so.* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
cp /usr/lib/x86_64-linux-gnu/libIexMath-2_2.so.* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
cp /usr/lib/x86_64-linux-gnu/libImath-2_2.so.* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
cp /usr/lib/x86_64-linux-gnu/libIlmThread-2_2.so.* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
cp /usr/lib/x86_64-linux-gnu/libIlmImf-2_2.so.* compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER/pkglibs
tar -zcvf compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER.tar.gz compressonatorcli_linux_x86_64_4.3.$BUILD_NUMBER
| true
|
9759f4964f3e5c6f6d66f0dbe96e973da4556fd1
|
Shell
|
orudge/ultimateblocks
|
/gcc-uni.sh
|
UTF-8
| 2,757
| 4.375
| 4
|
[] |
no_license
|
#!/bin/sh
# gcc-uni.sh
# ----------
# By Matthew Leverton
#
# Builds a universal binary by a multi-step process to allow for individual
# options for both architectures. Its primary use is to be used as a wrapper
# for makefile based projects.
#
# Although untested, it should be able to build OS X 10.2 compatible builds.
# Note that you may need to install the various OS SDKs before this will
# work. gcc-3.3 is used for the PPC build. The active version of gcc is used
# for the Intel build. (Note that it must be at least version 4.)
#
# If the makefile has a CC variable, this is all that should be necessary:
#
# CC=/usr/bin/gcc-uni.sh
#
# set up defaults
mode=link
output=
cmd=
# check whether to use gcc or g++
# (using a symlink with g++ in name is recommended)
case "$0" in
*g++*)
gcc=g++
;;
*)
gcc=gcc
;;
esac
# which OSX to target (used for PPC)
OSX_TARGET=10.2
# which SDK to use (unused with PPC because gcc-3.3 doesn't know about it))
SDK_i386=/Developer/SDKs/MacOSX10.4u.sdk
SDK_PPC=/Developer/SDKs/MacOSX10.3.9.sdk
# i386 flags
CFLAGS_i386=" -isysroot $SDK_i386 -I/usr/local/include"
LDFLAGS_i386=" -isysroot $SDK_i386 -Wl,-syslibroot,$SDK_i386 -L/usr/local/lib"
# ppc flags
CFLAGS_PPC="-I$SDK_PPC/usr/include -I$SDK_PPC/System/Library/Frameworks/"
LDFLAGS_PPC="-L$SDK_PPC/usr/lib"
# Parse options:
# -arch switches are ignored
# looks for -c to enable the CFLAGS
# looks for -o to determine the name of the output
if [ $# -eq 0 ]; then
echo "This is a wrapper around gcc that builds universal binaries."
echo "It can only be used to compile or link."
exit 1
fi
# remember the arguments in case there's no output files
args=$*
while [ -n "$1" ]; do
case "$1" in
-arch)
shift
;;
-c)
mode=compile
cmd="$cmd -c"
;;
-o)
shift
output="$1"
;;
*)
cmd="$cmd $1"
;;
esac
shift
done
# if no output file, just pass the original command as-is and hope for the best
if [ -z "$output" ]; then
exec $gcc $args
fi
# figure out if we are compiling or linking
case "$mode" in
link)
FLAGS_i386="$LDFLAGS_i386"
FLAGS_PPC="$LDFLAGS_PPC"
;;
compile)
FLAGS_i386="$CFLAGS_i386"
FLAGS_PPC="$CFLAGS_PPC"
;;
*)
echo "internal error in gcc-uni.sh script"
exit 1
;;
esac
# TODO: use trap to cleanup
# build the i386 version
$gcc $cmd $FLAGS_i386 -arch i386 -o $output.i386
if [ $? -ne 0 ]; then
exit 1
fi
# build the PPC version
MACOSX_DEPLOYMENT_TARGET=$OSX_TARGET /usr/bin/$gcc-3.3 $cmd $FLAGS_PPC -arch ppc -o $output.ppc
if [ $? -ne 0 ]; then
rm -f $output.i386
exit 1
fi
# create the universal version
lipo -create $output.i386 $output.ppc -output $output
if [ $? -ne 0 ]; then
rm -f $output.i386 $output.ppc
exit 1
fi
# cleanup
rm -f $output.i386 $output.ppc
| true
|
c513b38876bab8bbd160a4c846632fbc63694d30
|
Shell
|
mgmarino/VMELinux
|
/installUniverseDriver.sh
|
UTF-8
| 2,650
| 4.46875
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Install script, this will grab most recent version
# from the repo and install it. Repo changed to github
#
# Run (as root, from a temp directory):
#
# cd tmp
# sh /path/to/installUniverseDriver.sh install
#
#
name=universe
version=newest
FILEURL=https://github.com/mgmarino/VMELinux/tarball/master
output_file=$name-$version.tar.gz
check()
{
lsmod | grep -q $name
STATUS=$?
if [ $STATUS -eq 0 ]; then
echo "Universe module running."
else
echo "Universe module not running."
fi
return $STATUS
}
download()
{
echo "Downloading file at: $FILEURL"
wget --timeout=10 --no-check-certificate $FILEURL -O $output_file
if [ ! -f $output_file ]; then
echo "Can't obtain $output_file, try manually copying to this directory"
return 1
else
return 0
fi
}
inflate()
{
echo "Inflating..."
tmp_dir_name="/tmp/universe_tmp_output_$$.tmp"
mkdir $tmp_dir_name
tar xmfz $output_file -C $tmp_dir_name
mv $tmp_dir_name/mgmarino-VMELinux-* $name-$version
rm -rf $tmp_dir_name
}
install()
{
my_cwd=`pwd`
if [ ! -f $output_file ]; then
download
return_val=$?
if [ "$return_val" -eq "1" ]; then
echo "Exiting"
exit
fi
fi
if [ ! -d $name-$version ]; then
inflate
fi
cd $name-$version
echo "Building driver..."
cd driver && make && make install || exit $?
cd ../
echo "Building api..."
cd universe_api && make && make install || exit $?
echo "Installation done."
cd $my_cwd
}
uninstall()
{
my_cwd=`pwd`
if [ ! -d $name-$version ]; then
if [ ! -f $output_file ]; then
echo "It doesn't look like the install folder exists, trying to obtain..."
download
return_val=$?
if [ "$return_val" -eq "1" ]; then
echo "Exiting"
exit
fi
fi
inflate
fi
cd $name-$version
echo "Uninstalling driver..."
cd driver && make uninstall || exit $?
cd ../
echo "Uninstalling api..."
cd universe_api && make uninstall || exit $?
echo "Uninstallation done."
cd $my_cwd
}
upgrade()
{
check
if [ $? -ne 0 ]; then
echo "Universe not installed, installing new version"
cleanup
install
else
echo "Universe installed, upgrading to new version"
uninstall
cleanup
install
fi
}
cleanup()
{
rm -f $name-$version.tar.gz
rm -fr $name-$version
}
# See how we were called.
case "$1" in
install)
install
;;
uninstall)
uninstall
;;
check)
check
;;
cleanup)
cleanup
;;
upgrade)
upgrade
;;
download)
download
;;
*)
echo $"Usage: $0 {install|uninstall|upgrade|cleanup|check|download}"
exit 1
esac
exit 0
| true
|
2ff28985d8ab8cd5e80275a7c63b40e9f6e2e6dc
|
Shell
|
wang111chi/fenle
|
/sit/tests/preauth_done.sh
|
UTF-8
| 1,813
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
# 预授权完成
# 接收标准输入:
# {
# "id": "111111111110012017030100000013",
# "bank_list": "20170301145608",
# "bank_settle_time": "0301150700",
# "pre_author_code": "096207",
# ...
# }
input=$(cat -)
status=$(echo $input | jq '.status')
op=$(echo $input | jq '.op')
if [ "$status" == 1 -a "$op" != "null" ]; then
echo $input | jq '.'
exit 127
fi
parent_id=$(echo $input | jq '.id' | sed -e 's/^"//' -e 's/"$//')
# 先发送验证码
echo "sleeping 5 seconds..." >&2
sleep 5
echo "sending sms code..." >&2
ret=$(
curl -s \
-d @data/bank_spid \
-d @data/terminal_id \
-d amount=100 \
-d @data/bankacc_no \
-d @data/mobile \
-d @data/valid_date \
http://58.67.212.197:8081/sms/send
)
retcode=$(echo $ret | jq '.status')
if [ $retcode -ne 0 ]; then
echo [fail] send sms code >&2
echo $ret | jq '.' >&2
exit 127
fi
bank_list=$(echo $ret | jq '.bank_list' | sed -e 's/^"//' -e 's/"$//')
bank_sms_time=$(echo $ret | jq '.bank_sms_time' | sed -e 's/^"//' -e 's/"$//')
echo "sleeping 5 seconds..." >&2
sleep 5
echo "making preauth done request..." >&2
ret=$(
curl -s \
-d @data/bank_spid \
-d @data/terminal_id \
-d amount=100 \
-d @data/bankacc_no \
-d @data/mobile \
-d @data/valid_date \
-d bank_validcode=000000 \
-d bank_sms_time=$bank_sms_time \
-d bank_list=$bank_list \
-d parent_id=$parent_id \
http://58.67.212.197:8081/preauth/done
)
retcode=$(echo $ret | jq '.status')
if [ $retcode -ne 0 ]; then
echo [fail] preauth done >&2
echo $ret | jq '. | {status: .status, message: .message, op: "preauth_done"}'
exit 127
fi
echo $ret | jq '.trans'
| true
|
bb731171e1069b8e044f00b7919eaeb31ed0b098
|
Shell
|
tropicloud-2k/submarine2
|
/bin/env.sh
|
UTF-8
| 1,693
| 3.21875
| 3
|
[] |
no_license
|
# SUBMARINE --------------------------------------------------------------------
export user="submarine"
export home="/submarine"
export acme="$home/acme"
export etc="$home/etc"
export log="$home/log"
export run="$home/run"
export ssl="$home/ssl"
export www="$home/www"
# WP_DOMAIN --------------------------------------------------------------------
if [[ -z $WP_DOMAIN ]]; then
echo "ERROR: WP_DOMAIN is not defined."
exit 1
else
if [[ $WP_PORT -eq "443" ]];
then export SCHEME=https
else export SCHEME=http
fi
export WP_DOMAIN=$(echo $WP_DOMAIN | cut -d, -f1)
export WP_HOME=${SCHEME}://${WP_DOMAIN}
fi
# DB_HOST ----------------------------------------------------------------------
if [[ -z $DB_HOST ]]; then
if dig mysql > /dev/null;
then export DB_HOST=mysql
else echo "ERROR: DB_HOST is not defined" && exit 1
fi
fi
# DB_NAME ----------------------------------------------------------------------
if [[ -z $DB_NAME ]]; then
if [[ -n $WP_DOMAIN ]];
then export DB_NAME=`echo ${WP_DOMAIN//./_} | cut -c 1-16`
else echo "ERROR: DB_NAME is not defined" && exit 1
fi
fi
# DB_USER ----------------------------------------------------------------------
if [[ -z $DB_USER ]]; then
if env | grep MYSQL_ROOT_PASSWORD > /dev/null;
then export DB_USER=root
else echo "ERROR: DB_USER is not defined" && exit 1
fi
fi
# DB_PASSWORD ------------------------------------------------------------------
if [[ -z $DB_PASSWORD ]]; then
if env | grep MYSQL_ROOT_PASSWORD > /dev/null;
then export DB_PASSWORD=`env | grep MYSQL_ROOT_PASSWORD | head -n1 | cut -d= -f2`
else echo "ERROR: DB_PASSWORD is not defined" && exit 1
fi
fi
| true
|
c918390240012aa9af2ae4ededd3130e2a8bcdd6
|
Shell
|
mazrk7/tf_playground
|
/vae/run_multi_model.sh
|
UTF-8
| 257
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "Names: multi_$1_ldim$2"
echo "VAE type is: $1"
echo "Latent dim is: $2"
echo "# of epochs is: $3"
for NUM in `seq 0 1 9`
do
python train_multi_vae.py --name multi_$1_ldim$2 --index $NUM --vae_type=$1 --latent_dim=$2 --n_epochs=$3
done
| true
|
eeb0ea039e0ef7dc513435b7670a5633ea7949e9
|
Shell
|
colinwilson/pfsense-kemp-cert-autoupdate
|
/kemp-cert-update.sh
|
UTF-8
| 2,488
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Title: Auto-Update & Upload LetsEncrypt Certs to KEMP LoadMaster
# Guide/Source: https://colinwilson.uk/2017/06/19/auto-update-ssl-certificates-on-kemp-loadmaster-via-pfsense-lets-encrypt/
# Created: 12/06/2017
# Update: 05/12/2018
# Author: Colin Wilson [https://github.com/colinwilson]
# Vendor or Software Link: https://www.pfsense.org/ , https://kemptechnologies.com
# Version: 1.2.1
# Category: BASH Shell Script
# Tested on: pfSense 2.4.4 & KEMP LM 7.2.43
#
# e.g. sh /home/custom/kemp-cert-update.sh -f /home/custom/cert-auto-update.cert.pem -d mydomain.com -i 172.16.2.10
while [ -n "$1" ]
do
case "$1" in
-f|--file)
KEMP_API_ACCESS_CERT_PATH="$2"
shift # past argument
;;
-b|--basicauth)
BASIC_AUTH="$2"
shift # past argument
;;
-d|--domain)
CERT_NAME="$2"
shift # past argument
;;
-i|--ipaddress)
KEMP_IP="$2"
shift # past argument
;;
*) # unknown option
;;
esac
shift # past argument or value
done
# Check if certificate name exists on KEMP LoadMaster
if [ -z "$KEMP_API_ACCESS_CERT_PATH" ]
then
:
else
LIST_CERTS=$(curl -sS -k -E "$KEMP_API_ACCESS_CERT_PATH" https://"${KEMP_IP}"/access/listcert | xmllint --format --xpath "boolean(//name[text()='$CERT_NAME'])" - )
fi
if [ -z "$BASIC_AUTH" ]
then
:
else
LIST_CERTS=$(curl -sS -k https://"${BASIC_AUTH}"@"${KEMP_IP}"/access/listcert | xmllint --format --xpath "boolean(//name[text()='$CERT_NAME'])" - )
fi
if [ "$LIST_CERTS" = true ] ; then
REPLACE=1
else
REPLACE=0
fi
# Concatenate certificate and key
cat /conf/acme/"$CERT_NAME".crt /conf/acme/"$CERT_NAME".key > /tmp/"$CERT_NAME".full.pem
# Upload certificate to KEMP LoadMaster
if [ -z "$BASIC_AUTH" ]
then
:
else
upload_cert_basic() {
curl -sS -X POST --data-binary "@/tmp/${CERT_NAME}.full.pem" -k "https://${BASIC_AUTH}@${KEMP_IP}/access/addcert?cert=${CERT_NAME}&replace=${REPLACE}"
}
upload_cert_basic
fi
if [ -z "$KEMP_API_ACCESS_CERT_PATH" ]
then
:
else
upload_cert() {
curl -sS -X POST --data-binary "@/tmp/${CERT_NAME}.full.pem" -k -E "$KEMP_API_ACCESS_CERT_PATH" "https://${KEMP_IP}/access/addcert?cert=${CERT_NAME}&replace=${REPLACE}"
}
upload_cert
fi
# Delete concatenated certificate file
rm /tmp/"$CERT_NAME".full.pem
| true
|
9af592d55d7562a06b34c316c0da986a17b7f88f
|
Shell
|
dilanacar/HCPpipelines
|
/PostFreeSurfer/scripts/Example_1res_inflate.sh
|
UTF-8
| 1,627
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
source "${HCPPIPEDIR}/global/scripts/debug.shlib" "$@" # Debugging functions; also sources log.shlib
#quick script to regenerate inflated 59k surfaces to match original HCP 32k
#Example call:
# . SetUpHCPPipeline.sh
# StudyFolder=/data/Phase2_7T
# Subject=102311
# T1wFolder="$StudyFolder"/"$Subject"/T1w
# AtlasSpaceFolder="$StudyFolder"/"$Subject"/MNINonLinear
# LowResMeshes=59
# Example_1res_inflate.sh $StudyFolder $Subject $T1wFolder $AtlasSpaceFolder $LowResMeshes
StudyFolder="$1"
Subject="$2"
T1wFolder="$3"
AtlasSpaceFolder="$4"
LowResMeshes="$5"
LowResMeshes=`echo ${LowResMeshes} | sed 's/@/ /g'`
for Hemisphere in L R ; do
for LowResMesh in ${LowResMeshes} ; do
InflationScale=`echo "scale=4; 0.75 * $LowResMesh / 32" | bc -l`
${CARET7DIR}/wb_command -surface-generate-inflated "$AtlasSpaceFolder"/fsaverage_LR"$LowResMesh"k/"$Subject"."$Hemisphere".midthickness."$LowResMesh"k_fs_LR.surf.gii "$AtlasSpaceFolder"/fsaverage_LR"$LowResMesh"k/"$Subject"."$Hemisphere".inflated."$LowResMesh"k_fs_LR.surf.gii "$AtlasSpaceFolder"/fsaverage_LR"$LowResMesh"k/"$Subject"."$Hemisphere".very_inflated."$LowResMesh"k_fs_LR.surf.gii -iterations-scale "$InflationScale"
${CARET7DIR}/wb_command -surface-generate-inflated "$T1wFolder"/fsaverage_LR"$LowResMesh"k/"$Subject"."$Hemisphere".midthickness."$LowResMesh"k_fs_LR.surf.gii "$T1wFolder"/fsaverage_LR"$LowResMesh"k/"$Subject"."$Hemisphere".inflated."$LowResMesh"k_fs_LR.surf.gii "$T1wFolder"/fsaverage_LR"$LowResMesh"k/"$Subject"."$Hemisphere".very_inflated."$LowResMesh"k_fs_LR.surf.gii -iterations-scale "$InflationScale"
done
done
| true
|
ec9ea0975ae97500174b9e1e3efdc03da9e3ddc0
|
Shell
|
chr0n1x/ethos-systemd
|
/v1/lib/helpers.sh
|
UTF-8
| 775
| 3.515625
| 4
|
[] |
no_license
|
#!/usr/bin/bash -x
# Source the etcd
if [ -f /etc/profile.d/etcdctl.sh ]; then
source /etc/profile.d/etcdctl.sh;
fi
# Handle retrying of all etcd sets and gets
function etcd-set() {
etcdctl set "$@"
while [ $? != 0 ]; do sleep 1; etcdctl set $@; done
}
function etcd-get() {
etcdctl get "$@"
# "0" and "4" responses were successful, "4" means the key intentionally doesn't exist
while [[ $? != 0 && $? != 4 ]]; do sleep 1; etcdctl get $@; done
}
# Handle retrying of all fleet submits and starts
function submit-fleet-unit() {
sudo fleetctl submit "$@"
while [ $? != 0 ]; do sleep 1; sudo fleetctl submit $@; done
}
function start-fleet-unit() {
sudo fleetctl start "$@"
while [ $? != 0 ]; do sleep 1; sudo fleetctl start $@; done
}
| true
|
a14df420496cb524a62a536eeabdfaa8ab64d394
|
Shell
|
opencrowbar/core
|
/bin/ocb_regen_docs
|
UTF-8
| 441
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ $(whoami) != crowbar ]]; then
su -l -c "$0" crowbar
exit $?
fi
. /etc/profile
shopt -s globstar nullglob
if ! which yardoc &>/dev/null; then
echo "YARD gem not installed."
echo "Please install it to generate the developer documentation"
exit 1
fi
cd /opt/opencrowbar
for d in **/.yardopts; do
( cd "${d%/*}" || continue
yardoc && git add doc/internals
rm -rf .yardoc
)
done
| true
|
864132ce173d18027699e3ebc554a2dda4e4c5cb
|
Shell
|
wlloyduw/lambda_test
|
/keep_alive_trigger/delete_permission.sh
|
UTF-8
| 541
| 3.84375
| 4
|
[] |
no_license
|
# Can have up to 5 triggers for each event
lambdaname=$1
if [ -z ${lambdaname} ]
then
echo ""
echo "USAGE: "
echo "./delete_permission.sh (lambda_function_name) "
echo ""
echo "Deletes cloudwatch event execute permissions for specified AWS Lambda function"
echo ""
echo "Provide parameters without quotation marks."
echo ""
exit
fi
echo -n "Deleting cloudwatch event permission to lambda function: $lambdaname"
aws lambda remove-permission --function-name $lambdaname --statement-id cloud_watch_event_permission
echo ""
| true
|
a43e6e000dbf811f48927ccbac60f53e7dea32bc
|
Shell
|
cedadev/slstr_calibration_ftp_retriever
|
/cron/data_retriever_run_cpa_viscal_s3a.sh
|
UTF-8
| 294
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/sh
for days_ago in {0..5}
do
export month=`date -d $days_ago' days ago' +%m`
export year=`date -d $days_ago' days ago' +%Y`
export day=`date -d $days_ago' days ago' +%d`
$GWS_PATH/software/slstr_calibration_ftp_retriever/download_viscal_ompc.sh S3A $year $month $day
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.