blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
35398db0fe2c45a1de530cce15538fdb1f9d07f5
|
Shell
|
petronny/aur3-mirror
|
/ardour-bin/PKGBUILD
|
UTF-8
| 1,348
| 2.921875
| 3
|
[] |
no_license
|
# Maintainer: speps <speps at aur dot archlinux dot org>
_name=ardour
pkgname=$_name-bin
pkgver=3.0beta5_13072
pkgrel=1
pkgdesc="A multichannel hard disk recorder and digital audio workstation"
arch=(i686 x86_64)
url="http://$_name.org/"
license=('GPL')
depends=('jack' 'desktop-file-utils')
options=('!strip')
install="$pkgname.install"
if [ "$CARCH" = 'i686' ]; then
_arch=_32 && md5sums=('1b95a266cec2417460482f2603cf5f51'); else
_arch=_64 && md5sums=('49d81b5f8e914cf6d3a4bb6aba06e8ef'); fi
source=("http://$_name.org/files/${_name^}${_arch}bit-$pkgver.tar"
$pkgname.{desktop,sh})
md5sums+=('27eaf365a95bc15177090b8e11d2ec52'
'21886cd2d1ac140aa29fbba886deb5b5')
package() {
cd "$srcdir/${_name^}${_arch}bit-$pkgver"
# unpack
install -d "$pkgdir/opt"
bsdtar --no-same-owner -jxvf \
${_name^}_x86${_arch/_32}-$pkgver.tar.bz2 \
-C "$pkgdir/opt"
# install to /opt as does not conflict
# with other ardour installations and libs
cd "$pkgdir/opt"
mv ${_name^}_x86${_arch/_32}-$pkgver $_name
# rm uninstaller and sanity check
rm $_name/bin/{*.uninstall.sh,sanityCheck}
# launcher
install -Dm755 "$srcdir/$pkgname.sh" \
"$pkgdir/usr/bin/$pkgname"
# desktop file
install -Dm644 "$srcdir/$pkgname.desktop" \
"$pkgdir/usr/share/applications/$pkgname.desktop"
}
# vim:set ts=2 sw=2 et:
| true
|
ffe38668c0cad8b22185fd724834475956c27688
|
Shell
|
cadornaa/mu2eer
|
/src/build_select.sh
|
UTF-8
| 1,200
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
################################################################################
#
# build_select.sh
#
# Mu2eER Deployment Script. Automates the installation of the Mu2eER Buildroot
# images produced by Jenkins to the test and production boot areas.
#
# @author jdiamond
#
################################################################################
. scripts_inc.sh
# usage
#
# Display usage information.
#
usage()
{
printf "Usage: $0 [test|production] [jenkins build number]\n"
printf "Displaying boot directory for Mu2eER...\n"
ssh -t nova "ls -l $BOOT_LOCATION" 2>/tmp/ssh_stderr
}
ENVIRONMENT=$1
BUILD_NUM=$2
if [ -z "$BUILD_NUM" ]; then
usage
exit 1
fi
case $ENVIRONMENT in
test | production)
printf "Setting $ENVIRONMENT to build #$BUILD_NUM...\n"
;;
"")
usage
exit 1
;;
*)
printf "Unrecognized environment: $ENVIRONMENT. Use test or production.\n"
exit 1
;;
esac
do_remote "Removing existing $ENVIRONMENT link", "rm ${BOOT_LOCATION}/${ENVIRONMENT} ||true"
do_remote "Creating $ENVIRONMENT link", "cd ${BOOT_LOCATION}; ln -s $BUILD_NUM $ENVIRONMENT"
printf "Done!\n";
| true
|
0f377d870dd12192983dbae2b6e750343733e272
|
Shell
|
PokhodenkoSA/dpctl
|
/scripts/build_for_develop.sh
|
UTF-8
| 1,582
| 3.28125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set +xe
rm -rf build_cmake
mkdir build_cmake
pushd build_cmake
INSTALL_PREFIX=`pwd`/../install
rm -rf ${INSTALL_PREFIX}
export ONEAPI_ROOT=/opt/intel/oneapi
DPCPP_ROOT=${ONEAPI_ROOT}/compiler/latest/linux
cmake \
-DCMAKE_BUILD_TYPE=Debug \
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \
-DCMAKE_PREFIX_PATH=${INSTALL_PREFIX} \
-DDPCPP_INSTALL_DIR=${DPCPP_ROOT} \
-DCMAKE_C_COMPILER:PATH=${DPCPP_ROOT}/bin/clang \
-DCMAKE_CXX_COMPILER:PATH=${DPCPP_ROOT}/bin/dpcpp \
-DDPCTL_BUILD_CAPI_TESTS=ON \
-DDPCTL_GENERATE_COVERAGE=ON \
../dpctl-capi
make V=1 -n -j 4 && make check && make install
if [ $? -ne 0 ]; then
echo "Building of libDPCTLSyclInterface failed. Abort!"
exit 1
fi
# To run code coverage for dpctl-c API
make llvm-cov
# For more verbose tests use:
# cd tests
# ctest -V --progress --output-on-failure -j 4
# cd ..
popd
cp install/lib/*.so dpctl/
mkdir -p dpctl/include
cp -r dpctl-capi/include/* dpctl/include
export DPCTL_SYCL_INTERFACE_LIBDIR=dpctl
export DPCTL_SYCL_INTERFACE_INCLDIR=dpctl/include
export CC=${DPCPP_ROOT}/bin/clang
export CXX=${DPCPP_ROOT}/bin/dpcpp
# FIXME: How to pass this using setup.py? The fPIC flag is needed when
# dpcpp compiles the Cython generated cpp file.
export CFLAGS=-fPIC
python setup.py clean --all
python setup.py build develop
python -m unittest -v dpctl.tests
| true
|
5658dcb915dbde9b15383106c1e913debce5c7f4
|
Shell
|
PolynomialDivision/usrv6
|
/prefix-switcher/prefix-switcher.sh
|
UTF-8
| 4,755
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/sh
. /usr/share/usrv6/babel.sh
. /usr/share/libubox/jshn.sh
function create_new_prefix_section {
local p=$1
local k=$2
local gw=$3
uci import prefix-switcher < /etc/config/prefix-switcher
uci set prefix-switcher.$k=prefix
uci set prefix-switcher.$k.prefix=$p
uci set prefix-switcher.$k.gateway=$gw
uci commit
}
function add_prefix {
local p=$1
local k=$2
local gw=$3
if ! section=$(uci get prefix-switcher.$k) 2> /dev/null; then
create_new_prefix_section $p $k $gw
fi
uci set prefix-switcher.$k.prefix=$p
uci set prefix-switcher.$k.gateway=$gw
uci commit
}
function del_prefix {
local k=$1
if ! section=$(uci get prefix-switcher.$k) 2> /dev/null; then
# empty
return 0
fi
todeletegw=$(uci get prefix-switcher.$k.gateway)
todeleteprefix=$(uci get prefix-switcher.$k.prefix)
usrv6c delete --ip [$todeletegw] --user usrv6 --password usrv6 --prefix $todeleteprefix
}
function load_remover {
local xdp=$1
local xdp_prog=$2
local j=0
while uci get prefix-switcher.@general[$j] &> /dev/null ; do
mesh_interface=$(uci get prefix-switcher.@general[$j].mesh_interface)
ip link set dev $mesh_interface xdp off
xdpload -d $mesh_interface -f $xdp -p $xdp_prog
j=$((j+1))
done
}
function apply_remover {
local prefix=$1
local key=$2
local j=0
while uci get prefix-switcher.@general[$j] &> /dev/null ; do
mesh_interface=$(uci get prefix-switcher.@general[$j].mesh_interface)
xdp-srv6-remover -d $mesh_interface -p $prefix -k $key
j=$((j+1))
done
}
function new_prefix {
local key=$1
local segpath=$2
local valid_lft=$3
local preferred_lft=$4
local max_metric=$5
# delete old prefix
del_prefix $key
gw_ip=$(babeld-utils --gateways ${max_metric} | awk '{print $4}' | cut -f1 -d"/")
prefix_call=$(usrv6c get_free_prefix --ip [$gw_ip] --user usrv6 --password usrv6 --random 1)
prefix=$(echo $prefix_call | awk '{print $1}')
valid=$(echo $prefix_call | awk '{print $2}')
preferred=$(echo $prefix_call | awk '{print $3}')
add_prefix $prefix $key $gw_ip
echo "Prefix: ${prefix}"
echo "Valid: ${valid}"
echo "Preferred: ${preferred}"
# eth0 is just a dummy value for now
usrv6c install --ip [$gw_ip] --user usrv6 --password usrv6 --prefix $prefix --seginterface eth0 --segpath $segpath
assignip=$(owipcalc $prefix add 1)
# make configurable
ip -6 a add $assignip dev br-lan valid_lft $valid_lft preferred_lft $preferred_lft
apply_remover $prefix $key
xdp-srv6-adder -d $CLIENT_INTERFACE -p $prefix -k $key
# here we could also now use NetEm to add delays, packet loss and jitter for the prefix on the mesh interface
/etc/init.d/odhcpd reload
/etc/init.d/network reload
}
# make uci config
SLEEP=$(uci get prefix-switcher.@general[0].sleep)
SEGPATH_GW=$(uci get prefix-switcher.@general[0].segpath_gw)
SEGPATH_CLIENT=$(uci get prefix-switcher.@general[0].segpath_client)
VALID_LFT=$(uci get prefix-switcher.@general[0].valid_lft)
PREFERRED_LFT=$(uci get prefix-switcher.@general[0].preferred_lft)
MAX_METRIC=$(uci get prefix-switcher.@general[0].max_metric)
MAX_PREFIXES=$(uci get prefix-switcher.@general[0].max_prefixes)
CLIENT_INTERFACE=$(uci get prefix-switcher.@general[0].client_interface)
XDP_REMOVER=$(uci get prefix-switcher.@general[0].xdp_remover)
XDP_ADDER=$(uci get prefix-switcher.@general[0].xdp_adder)
XDP_PROG_REMOVER=$(uci get prefix-switcher.@general[0].xdp_prog_remover)
XDP_PROG_ADDER=$(uci get prefix-switcher.@general[0].xdp_prog_adder)
LAST_SEGMENT=$(uci get prefix-switcher.@general[0].last_segment)
echo "Running Prefix Switcher With:"
echo "-----------------------------"
echo "sleep: ${SLEEP}"
echo "segpath gateway: ${SEGPATH_GW}"
echo "segpath client: ${SEGPATH_CLIENT}"
echo "valid_lft: ${VALID_LFT}"
echo "preferred_lft: ${PREFERRED_LFT}"
echo "max_metric: ${MAX_METRIC}"
echo "max_prefixes: ${MAX_PREFIXES}"
echo "client_interface: ${CLIENT_INTERFACE}"
echo "xdp_remover: ${XDP_REMOVER}"
echo "xdp_adder: ${XDP_ADDER}"
echo "xdp_prog_remover: ${XDP_PROG_REMOVER}"
echo "xdp_prog_adder: ${XDP_PROG_ADDER}"
echo "last_segment: ${LAST_SEGMENT}"
echo "-----------------------------"
# load and initialze adder
ip link set dev $CLIENT_INTERFACE xdp off
xdpload -d $CLIENT_INTERFACE -f $XDP_ADDER -p $XDP_PROG_ADDER
xdp-srv6-adder -d $CLIENT_INTERFACE -s $SEGPATH_CLIENT -l $LAST_SEGMENT
# load and init remover
load_remover $XDP_REMOVER $XDP_PROG_REMOVER
i=0
while [ 1 ]
do
new_prefix $i $SEGPATH_GW $VALID_LFT $PREFERRED_LFT $MAX_METRIC
sleep $SLEEP
i=$((i+1))
if [ $i -ge $MAX_PREFIXES ]; then
i=0
fi
done
| true
|
ea42e39f55eba5b9fe56a3b50d7beeadc5d9c069
|
Shell
|
jacg/flakes-learning-curve
|
/spec/nix_spec.sh
|
UTF-8
| 2,918
| 2.9375
| 3
|
[] |
no_license
|
Describe 'nix'
# --------------------------------------------------------------------------------
It 'A sanity check: we have a recent enough version of `nix`'
When call nix --version
The output should include 'nix (Nix) 2.4'
End
# --------------------------------------------------------------------------------
# # TODO how to inspect something inside the shell?
# It 'shell (local default)'
# When call nix shell
# The output should equal 'This is program AAA.'
# End
# --------------------------------------------------------------------------------
It 'shell local-flake#package-name --command'
# `nix shell --command <executable>` can be used within the repository (or
# anywhere on the local filesystem) to run executables installed by the
# `defaultPackage`.
When call nix shell .#aaa-and-bbb --command aaa
The output should equal 'This is program AAA.'
End
It 'shell remote-flake#pakcage-name --command'
# It also works for flakes which are not present on the local machine.
When call nix shell github:jacg/flakes-learning-curve/generalize-system#aaa-and-bbb --command bbb
The output should include 'this is program BBB!'
End
# TODO point out that `-c 'this | that'` doesn't work, and the `-c bash -c
# 'this | that'` workaround.
# --------------------------------------------------------------------------------
# TODO need to check something about result
# It 'nix build'
# When call nix build
# The status should be success
# End
# --------------------------------------------------------------------------------
It 'nix shell'
When call nix shell --command aaa
The status should be success
The output should equal 'This is program AAA.'
End
# --------------------------------------------------------------------------------
# TODO how to deal with testing something inside the resulting shell
# It 'nix develop'
# When call nix develop
# The status should be failure
# The error should include 'does not provide attribute'
# The error should include 'defaultPackage'
# End
# --------------------------------------------------------------------------------
It 'run'
# By default `nix run` tries to execute a binary with a name that is
# identical to the name of the `default Package`. Our current flake does not
# include such a binary, so `nix run` will fail:
When call nix run
The status should be failure
The error should include 'unable to execute'
The error should include "flake-learning-curve/bin/flake-learning-curve"
The error should include "No such file or directory"
End
# Next we'll see two approaches to getting this to work:
# 1. Set a binary with the appropritate name.
# 2. Define a `defaultApp` in the flake's outputs.
# --------------------------------------------------------------------------------
End
| true
|
0e13fcbac7ae682d36bfdd6686dea6f6b91cbb30
|
Shell
|
novr/xcenv
|
/test/global.bats
|
UTF-8
| 865
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bats
load stub_helper
setup() {
make_root_dir
}
run_command() {
run xcenv-global $@
assert_success
}
@test "global default" {
run_command
assert_output "system"
}
@test "unsetting global version" {
make_global_file
assert [ -f "$XCENV_ROOT/.xcode-version" ]
run_command --unset
assert [ ! -f "$XCENV_ROOT/.xcode-version" ]
}
@test "setting global version" {
assert [ ! -f "$XCENV_ROOT/.xcode-version" ]
expect_executable_parameter "xcenv-version-file-write" 1 "$XCENV_ROOT/.xcode-version"
expect_executable_parameter "xcenv-version-file-write" 2 "1.2.3"
run_command 1.2.3
}
@test "reading global version" {
expect_executable_parameter "xcenv-version-file-read" 1 "$XCENV_ROOT/.xcode-version"
stub_executable_success "xcenv-version-file-read" "1.2.3"
run_command
assert_output "1.2.3"
}
| true
|
fa62c5e6501371b46166b056089a9df37595e587
|
Shell
|
yenalp/homecentral
|
/setup/modules/provision/postfix/module.base.sh
|
UTF-8
| 1,199
| 3.171875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
import.require 'provision'
provision.postfix_base.init() {
provision.postfix_base.__init() {
import.useModule 'provision'
}
# Install required packasges
provision.postfix_base.require() {
debconf-set-selections <<<"postfix postfix/mailname string localhost"
debconf-set-selections <<<"postfix postfix/main_mailer_type string 'Internet Site'"
provision.isInstalled 'postfix'
return $?
}
# Restart the postfix server
provision.postfix_base.restart() {
service postfix restart || {
cl "failed to restart the postfix server ... " -e
return 1
}
return 0
}
# Modify to core config
provision.postfix_base.configSetup() {
local __main_cf="/etc/postfix/main.cf"
if grep -h "devrelay.in.monkii.com" "${__main_cf}" > /dev/null 2> @1; then
return 1
else
sed -i '/relayhost =/c relayhost = \[devrelay.in.monkii.com\]' "${__main_cf}" > /dev/null 2> @1 || {
return 1
}
# Restart postfix server
provision.postfix_base.restart
fi
return 0
}
}
| true
|
884e183f5f80f18ef2aa31d72bece8a7abc8f7b4
|
Shell
|
thedataincubator/aws-quickstart
|
/scripts/patch-kube-proxy.sh
|
UTF-8
| 2,601
| 2.625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2018 by the contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Patching kube-proxy to set the hostnameOverride is a workaround for https://github.com/kubernetes/kubeadm/issues/857
export KUBECONFIG=/etc/kubernetes/admin.conf
kubectl -n kube-system patch --type json daemonset kube-proxy -p "$(cat <<'EOF'
[
{
"op": "add",
"path": "/spec/template/spec/volumes/0",
"value": {
"emptyDir": {},
"name": "kube-proxy-config"
}
},
{
"op": "replace",
"path": "/spec/template/spec/containers/0/volumeMounts/0",
"value": {
"mountPath": "/var/lib/kube-proxy",
"name": "kube-proxy-config"
}
},
{
"op": "add",
"path": "/spec/template/spec/initContainers",
"value": [
{
"command": [
"sh",
"-c",
"sed -e \"s/hostnameOverride: \\\"\\\"/hostnameOverride: \\\"${NODE_NAME}\\\"/\" /var/lib/kube-proxy-configmap/config.conf > /var/lib/kube-proxy/config.conf && cp /var/lib/kube-proxy-configmap/kubeconfig.conf /var/lib/kube-proxy/"
],
"env":[
{
"name": "NODE_NAME",
"valueFrom": {
"fieldRef": {
"apiVersion": "v1",
"fieldPath": "spec.nodeName"
}
}
}
],
"image": "busybox",
"name": "config-processor",
"volumeMounts": [
{
"mountPath": "/var/lib/kube-proxy-configmap",
"name": "kube-proxy"
},
{
"mountPath": "/var/lib/kube-proxy",
"name": "kube-proxy-config"
}
]
}
]
}
]
EOF
)"
| true
|
02408d83e952ebfac4800848584755b992f095b2
|
Shell
|
henriqueal/lara-tools
|
/test-scripts/scriptFirstExperiment.sh
|
UTF-8
| 1,680
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
#First experiment: One execution for this config for each TI code:
#População inicial: 100 ;
#Tamanho máximo do cromossomo: 128;
#Tamanho máximo do cromossomo na população inicial 10;
#Máximo de gerações: 100;
#Taxa de crossover: 60%;
#Taxa de mutação: 40%;
#Critério de parada: número de gerações apenas.
#declare -a arr_sel=("torneio" "roleta")
#declare -a arr_gen=("100" "80" "50")
#declare -a arr_pop=("300" "100" "80" "60" "40")
#declare -a arr_cro=("90" "80" "70" "60")
#declare -a arr_mut=("40" "30" "20" "10")
#declare -a arr_sto=("20" "15" "10")
#declare -a arr_initChrom=("30" "50" "80" "100")
#rm -rf /tmp/experiments
#mkdir /tmp/experiments
for dir in /opt/lara-tools/benchmarks/TEXAS_42_LEON3/*/
#for dir in /tmp/here/*/
do
dir=${dir%*/}
for i in 1
do
#for init in "${arr_initChrom[@]}"
#do
#mkdir "/tmp/teste/${dir##*/}/"
#mkdir "/tmp/experiments/${dir##*/}/"
cd /tmp
echo $dir
#larad -compiler=llvm371 -target=leon3 -algo=ga_henrique -nsteps=100 -seqlen=128 -nexec=1 -popSize=2 ~/Benchmarks/TEXAS_42_LEON3/${dir##*/}/${dir##*/}.c > /tmp/experiments/${dir##*/}/${sel}_${gen}_${pop}_${cro}_${mut}
larad -compiler=llvm371 -target=leon3 -algo=ga_henrique -nsteps=100 -seqlen=128 -nexec=1 -popSize=2 ~/Benchmarks/TEXAS_42_LEON3/${dir##*/}/${dir##*/}.c
#cd /tmp
#larad -compiler=llvm371 -target=leon3 -algo=ga_henrique -selection=roulette -crossRate=80 -mutRate=20 -popSize=2 -maxGen=1 -seqlen=128 -nexec=1 -initChrom=128 ~/Benchmarks/TEXAS_42_LEON3/DSP_vecsumsq_c/DSP_vecsumsq_c.c
#$filename = "/tmp/experiments/${dir##*/}/${sel}_${gen}_${pop}_${cro}_${mut}"
#done
done
done
| true
|
0eb655b84b12961a06e596bafb2ed160185de9b0
|
Shell
|
smartface/sf-extension-amcharts
|
/scripts/get.sh
|
UTF-8
| 410
| 3.078125
| 3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/usr/bin/env bash
folder=~/workspace/assets/amcharts
url="https://s3.amazonaws.com/amcharts-downloads/3.21/amcharts_3.21.7.free.zip"
cwd=$(pwd)
if [ -d $folder ]; then rm -rf $folder; fi
(
cd ~/workspace/assets
wget -O amcharts.zip $url
unzip -q amcharts.zip 'amcharts/*' -d amcharts
rm ./amcharts.zip
cd amcharts
if [ -f $cwd/index.html ]; then cp $cwd/index.html ./index.html; fi
)
| true
|
e0c12f9e78c4f759f34fe929a055a2eae3b2d5b9
|
Shell
|
lerner/MapRTools
|
/create_hec_vols.sh
|
UTF-8
| 13,410
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
# Author: Andy Lerner
# Update History:
# 03Nov2017 Removed need for client numbering. Use clush groups to determine clients
# This script can be run from any client node with clush access to all clients and cluster nodes
# ssh to cluster node now used - no dependence on maprcli on client nodes
HANA=hana # Volume and directory names base
################################################################################
# SET VARIABLE VALUES IN THIS SECTION
################################################################################
# 1. Set CLUSTER to the cluster name of a cluster and CLUSTERNODE to any MapR node on the cluster
# $CLUSTER MUST be the cluster name in /opt/mapr/conf/mapr-clusters.conf AND the clush group name
# for all cluster nodes.
CLUSTERNAME=hec2_cluster; CLUSTERNODE=lnvmaprc1n01
#CLUSTER=scale62 ; CLUSTERNODE=scale-64
# 2. Set CLUSH_CLIENTS to the clush group name of the client nodes
CLUSH_CLIENTS_GRP=hana
#CLUSH_CLIENTS_GRP=hana_x16
CLUSH_DATANODES_GRP=hec2_cluster # Just data nodes (not including control nodes)
#CLUSH_DATANODES_GRP=hec2_cluster_x8 # Just data nodes (not including control nodes)
CLUSH_CLUSTER_GRP=hec2_cluster # Entire cluster (including control nodes)
# STEP 3 No longer necessary
# 3. Set client numbering. Hana volumes will be created for each client with client number
#STARTCLIENT=1; ENDCLIENT=16
#STARTCLIENT=7; ENDCLIENT=18
#STARTCLIENT=67; ENDCLIENT=67
# 4. Set the desired number of master containers for calculating data to be dd'd
#MASTER_CNTRS_PER_SP=3 # precreate_containers() master containers per storage pool per volume
MASTER_CNTRS_PER_SP=3
# 5. Set MapR replication for volume creation
REPL=3
MINREPL=2
# 6. a) Set a small container size for pre-creation of containers
# b) Once container size has been set, comment out following 2 lines and re-run script
##### Once container size has been set, comment out these 2 lines and run script
#ssh $CLUSTERNODE maprcli config save -values '{"cldb.container.sizemb":"1024"}'
#clush -B -w @$CLUSH_CLUSTER_GRP systemctl restart mapr-warden ; echo "After cluster restart, comment out section 6 in script and re-run"; exit
#### NO LONGER NECESSARY. NOT RUNNING SSH PER CLIENT ####
# 7. IF TOTAL CLIENTS IS >10, update /etc/ssh/sshd_config for 2x total clients and
# restart sshd on CLUSTERNODE (systemctl restart sshd).
# If there are 20 clients, set MaxSessions and first var of MaxStartups to 40.
## grep MaxS /etc/ssh/sshd_config
##MaxSessions 10
#MaxSessions 40
##MaxStartups 10:30:100
#MaxStartups 40:30:100
################################################################################
# Create space separated list of client hostname numbers without leading zeros
shopt -s extglob
CLIENT_NUMS=( $(for NODE in $(nodeset -e @$CLUSH_CLIENTS_GRP); do NUM=${NODE##${NODE%%+([0-9])}}; echo -n "${NUM##*(0)} " ; done) )
# Set up .cluster_env to be sourced for environment variables on subsequent clush calls
#let NUM_CLIENTS=ENDCLIENT-STARTCLIENT+1
let NUM_CLIENTS=${#CLIENT_NUMS[@]}
rm -f /tmp/.cluster_env
echo "export CLUSTERNAME=$CLUSTERNAME" >> /tmp/.cluster_env
echo "export CLIENT_NUMS=( ${CLIENT_NUMS[@]} )" >> /tmp/.cluster_env
echo "export STARTCLIENT=$STARTCLIENT" >> /tmp/.cluster_env
echo "export ENDCLIENT=$ENDCLIENT" >> /tmp/.cluster_env
echo "export NUM_CLIENTS=$NUM_CLIENTS" >> /tmp/.cluster_env
echo "export HANA=$HANA" >> /tmp/.cluster_env
clush -c -w @$CLUSH_CLIENTS_GRP /tmp/.cluster_env
clush -c -w $CLUSTERNODE /tmp/.cluster_env
remove_vols_per_client_old() {
echo $FUNCNAME
for i in $(eval echo {$STARTCLIENT..$ENDCLIENT}) ; do
echo -n "$i "
ssh $CLUSTERNODE maprcli volume remove -cluster $CLUSTERNAME -name ${HANA}.log.n$i &
ssh $CLUSTERNODE maprcli volume remove -cluster $CLUSTERNAME -name ${HANA}.data.n$i &
done
wait
echo ""
}
remove_vols_per_client() {
echo $FUNCNAME
for V in log data ; do
printf "%5s: " $V
ssh $CLUSTERNODE "for i in ${CLIENT_NUMS[@]}; do echo -n \"\$i \"; maprcli volume remove -cluster $CLUSTERNAME -name ${HANA}.${V}.n\$i & done; wait"
echo ""
done
}
remove_top_level_volumes() {
echo $FUNCNAME
for V in log data shared backup; do
#for V in data ; do
ssh $CLUSTERNODE maprcli volume remove -cluster $CLUSTERNAME -name ${HANA}.$V
done
ssh $CLUSTERNODE maprcli volume remove -cluster $CLUSTERNAME -name ${HANA}
}
create_top_level_volumes() {
echo $FUNCNAME
ssh $CLUSTERNODE maprcli volume create -cluster $CLUSTERNAME -name ${HANA} -path /apps/${HANA} -replication $REPL -minreplication $MINREPL
ssh $CLUSTERNODE maprcli volume create -cluster $CLUSTERNAME -name ${HANA}.log -path /apps/${HANA}/log -replicationtype low_latency -replication $REPL -minreplication $MINREPL
for V in data shared backup; do
#for V in data ; do
ssh $CLUSTERNODE maprcli volume create -cluster $CLUSTERNAME -name ${HANA}.$V -path /apps/${HANA}/$V -replicationtype high_throughput -replication $REPL -minreplication $MINREPL
done
# for V in log data; do
for V in log data; do
hadoop mfs -setcompression off /mapr/$CLUSTERNAME/apps/${HANA}/$V
done
for V in " " /log /data /shared /backup; do
hadoop mfs -setnetworkencryption on /mapr/$CLUSTERNAME/apps/${HANA}$V
done
}
create_vols_per_client() {
echo $FUNCNAME
for V in log data ; do
[[ $V = "log" ]] && REPTYPE=low_latency
[[ $V = "data" ]] && REPTYPE=high_throughput
printf "%5s: " $V
ssh $CLUSTERNODE "for i in ${CLIENT_NUMS[@]}; do echo -n \"\$i \"; maprcli volume create -cluster $CLUSTERNAME -name ${HANA}.${V}.n\$i -path /apps/${HANA}/$V/n\$i -replicationtype $REPTYPE -replication $REPL -minreplication $MINREPL & done; wait"
echo ""
done
echo "set compression off and encryption on "
for V in log data ; do
printf "%5s: " $V
for i in ${CLIENT_NUMS[@]} ; do
echo -n "$i "
hadoop mfs -setcompression off /mapr/$CLUSTERNAME/apps/${HANA}/$V/n$i &
hadoop mfs -setnetworkencryption on /mapr/$CLUSTERNAME/apps/${HANA}/$V/n$i &
done
wait
echo " "
done
}
create_vols_per_client_old() {
echo $FUNCNAME
echo -n "create volume "
for i in $(eval echo {$STARTCLIENT..$ENDCLIENT}) ; do
echo -n "$i "
ssh $CLUSTERNODE maprcli volume create -cluster $CLUSTERNAME -name ${HANA}.log.n$i -path /apps/${HANA}/log/n$i -replicationtype low_latency -replication $REPL -minreplication $MINREPL &
ssh $CLUSTERNODE maprcli volume create -cluster $CLUSTERNAME -name ${HANA}.data.n$i -path /apps/${HANA}/data/n$i -replicationtype high_throughput -replication $REPL -minreplication $MINREPL &
done
wait
echo ""
echo -n "set compression off and encryption on "
for i in $(eval echo {$STARTCLIENT..$ENDCLIENT}) ; do
echo -n "$i "
hadoop mfs -setcompression off /mapr/$CLUSTERNAME/apps/${HANA}/log/n$i &
hadoop mfs -setcompression off /mapr/$CLUSTERNAME/apps/${HANA}/data/n$i &
hadoop mfs -setnetworkencryption on /mapr/$CLUSTERNAME/apps/${HANA}/log/n$i &
hadoop mfs -setnetworkencryption on /mapr/$CLUSTERNAME/apps/${HANA}/data/n$i &
done
wait
echo ""
}
create_piodir_per_chunkdir() {
echo $FUNCNAME
#for i in $(eval echo {$STARTCLIENT..$ENDCLIENT}) ; do
for i in ${CLIENT_NUMS[@]} ; do
echo -n "$i "
for CHUNKMB in 2 4 8 16 32 64 256; do
for V in data log ; do
NEWDIR="/mapr/$CLUSTERNAME/apps/${HANA}/$V/n$i/chunk${CHUNKMB}MB"
mkdir $NEWDIR/pio &
done
done
done
wait
echo ""
}
create_chunkdirs_per_vol() {
echo $FUNCNAME
#for i in $(eval echo {$STARTCLIENT..$ENDCLIENT}) ; do
for i in ${CLIENT_NUMS[@]} ; do
echo $i
for CHUNKMB in 2 4 8 16 32 64 256; do
for V in data log ; do
NEWDIR="/mapr/$CLUSTERNAME/apps/${HANA}/$V/n$i/chunk${CHUNKMB}MB"
mkdir $NEWDIR
let CHUNKBYTES=$CHUNKMB*1024*1024
echo hadoop mfs -setchunksize $CHUNKBYTES $NEWDIR
hadoop mfs -setchunksize $CHUNKBYTES $NEWDIR &
done
done
done
wait
create_piodir_per_chunkdir
}
# Create a link so the same path on each client from /home/mapr goes to a separate volume
# Assume last two characters of client hostname are numeric host number
create_link_per_client() {
echo $FUNCNAME
:
clush -B -w @$CLUSH_CLIENTS_GRP '. /tmp/.cluster_env; \
H=$(hostname -s); \
H=${H: -2}; H=${H#0}; \
for V in data log; do \
[[ ! -d /home/mapr/$CLUSTERNAME/apps/${HANA} ]] && mkdir -p /home/mapr/$CLUSTERNAME/apps/${HANA}; \
rm -f /home/mapr/$CLUSTERNAME/apps/${HANA}/$V; \
ln -s /mapr/$CLUSTERNAME/apps/${HANA}/$V/n$H /home/mapr/$CLUSTERNAME/apps/${HANA}/$V; \
done'
}
precreate_containers() {
echo $FUNCNAME
# MASTER_CNTRS_PER_SP=3 # Number of master containers per storage pool per volume
NUMSPS=$(ssh $CLUSTERNODE /opt/mapr/server/mrconfig sp list | grep path | wc -l)
MB_PER_CNTR=$(ssh $CLUSTERNODE maprcli config load -noheader -keys cldb.container.sizemb)
# Use clush group rather than servers running fileserver in case cldb nodes are NOT data nodes
#NUM_NODES=$(ssh $CLUSTERNODE 'maprcli node list -filter "[svc==fileserver]" -columns hostname -noheader | wc -l' )
NUM_NODES=$(nodeset -c @$CLUSH_DATANODES_GRP)
#CNTRS_PER_VOL=$(echo $NUMSPS*$MASTER_CNTRS_PER_SP | bc)
#CNTRS_PER_VOL=$(echo $NUMSPS*$MASTER_CNTRS_PER_SP*$NUM_NODES | bc)
let CNTRS_PER_VOL=NUMSPS*MASTER_CNTRS_PER_SP*NUM_NODES
#MB_PER_VOL=$(echo $MB_PER_CNTR*$CNTRS_PER_VOL | bc)
let MB_PER_VOL=MB_PER_CNTR*CNTRS_PER_VOL
NUM_DDS=10
#MB_PER_DD=$(echo $MB_PER_VOL/$NUM_DDS | bc)
let MB_PER_DD=$MB_PER_VOL/$NUM_DDS
echo "export NUM_DDS=$NUM_DDS" >> /tmp/.cluster_env
echo "export MB_PER_DD=$MB_PER_DD" >> /tmp/.cluster_env
clush -c -w @$CLUSH_CLIENTS_GRP /tmp/.cluster_env
echo MASTER_CNTRS_PER_SP=$MASTER_CNTRS_PER_SP
echo NUMSPS=$NUMSPS
echo MB_PER_CNTR=$MB_PER_CNTR
echo NUM_NODES=$NUM_NODES
echo CNTRS_PER_VOL=$CNTRS_PER_VOL
echo MB_PER_VOL=$MB_PER_VOL
echo NUM_DDS=$NUM_DDS
echo MB_PER_DD=$MB_PER_DD
echo Total Root Data = $MB_PER_VOL MB
echo Total Root Log = $MB_PER_VOL MB
echo Total Root Backup = $MB_PER_VOL MB
echo Total Client Data = $(let x=$MB_PER_VOL*$NUM_CLIENTS ; echo $x) MB
echo Total Client Log = $(let x=$MB_PER_VOL*$NUM_CLIENTS ; echo $x) MB
# Create dd directories
#hadoop mfs -setcompression off /mapr/$CLUSTERNAME/apps/${HANA}/$V/n$H/dd ;
hadoop mfs -setcompression off /mapr/$CLUSTERNAME/apps/${HANA}/backup ;
hadoop mfs -setnetworkencryption on /mapr/$CLUSTERNAME/apps/${HANA}/backup ;
clush -B -w @$CLUSH_CLIENTS_GRP '. /tmp/.cluster_env; \
H=$(hostname -s); \
H=${H: -2}; H=${H#0}; \
let B_MB_PER_DD=$MB_PER_DD/$NUM_CLIENTS ; \
for V in backup data log ; do \
for i in $(eval echo {1..$NUM_DDS}) ; do \
dd if=/dev/zero of=/mapr/$CLUSTERNAME/apps/${HANA}/$V/dd.${B_MB_PER_DD}MB.node$H.$i bs=1024k count=$B_MB_PER_DD &
done; \
done; \
wait; \
for V in backup data log ; do \
rm -rf /mapr/$CLUSTERNAME/apps/${HANA}/$V/dd*node$H.* &
done; \
wait ; \
for V in data log; do \
mkdir /mapr/$CLUSTERNAME/apps/${HANA}/$V/n${H}/dd; \
for i in $(eval echo {1..$NUM_DDS}) ; do \
dd if=/dev/zero of=/home/mapr/$CLUSTERNAME/apps/${HANA}/$V/dd/dd.${MB_PER_DD}MB.$i bs=1024k count=$MB_PER_DD &
done; \
wait; \
rm -rf /home/mapr/$CLUSTERNAME/apps/${HANA}/$V/dd/* ; \
done; \
'
hadoop mfs -setcompression on /mapr/$CLUSTERNAME/apps/${HANA}/backup ;
}
remove_dd_files() {
clush -B -w @$CLUSH_CLIENTS_GRP '. /tmp/.cluster_env; \
for V in data log; do \
rm -rf /home/mapr/$CLUSTERNAME/apps/${HANA}/$V/dd/* ; \
done; \
'
}
remove_pio_files() {
echo $FUNCNAME
clush -B -w @$CLUSH_CLIENTS_GRP '. /tmp/.cluster_env; \
for CHUNKMB in 2 4 8 16 32 64 256; do \
for V in data log ; do \
rm -f /home/mapr/$CLUSTERNAME/apps/${HANA}/$V/chunk${CHUNKMB}MB/pio/* ; \
done; \
done; \
'
}
# get container count for each volume
get_container_count_old() {
echo $FUNCNAME
ssh $CLUSTERNODE '. /tmp/.cluster_env; for V in log data; do for i in $(eval echo {$STARTCLIENT..$ENDCLIENT}) ; do echo -n "${HANA}.${V}.n$i: "; /opt/mapr/server/mrconfig info containerlist ${HANA}.$V.n$i | wc -l; done; done'
}
get_container_count() {
echo $FUNCNAME
echo $CLUSTERNODE
ssh $CLUSTERNODE '. /tmp/.cluster_env; for V in log data; do for i in ${CLIENT_NUMS[@]} ; do echo -n "${HANA}.${V}.n$i: "; /opt/mapr/server/mrconfig info containerlist ${HANA}.$V.n$i | wc -l; done; done'
}
remove_vols_per_client
remove_top_level_volumes
create_top_level_volumes
create_vols_per_client
create_chunkdirs_per_vol
create_link_per_client
precreate_containers
remove_dd_files
remove_pio_files
get_container_count
echo "Current container size: $(ssh $CLUSTERNODE 'maprcli config load -json | grep container.sizemb')"
echo "Run these commands to reset to default value 32768:"
printf "%s%s\n" "ssh $CLUSTERNODE maprcli config save -values {" '"cldb.container.sizemb":"32768"}'
echo "clush -B -w@$CLUSH_CLUSTER_GRP service mapr-warden restart"
| true
|
52d2d8b9e187c393add130a629a60b4124dcbf2a
|
Shell
|
pnarvor/ubuntu-config
|
/bash_aliases
|
UTF-8
| 977
| 3.359375
| 3
|
[] |
no_license
|
# This cleanly add a path to either an empty or an already filled environement
# variable.
prepend-path() {
local variable=$1
if [ -z "${!variable}" ]; then
export $1=$2;
else
export $1=$2:"${!variable}";
fi
}
append-path() {
local variable=$1
if [ -z "${!variable}" ]; then
export $1=$2;
else
export $1="${!variable}":$2;
fi
}
alias tmuxl='tmux list_sessions'
alias ll='ls -alFh'
alias diff='git diff --no-index'
function diff-dirs {
tree $1 > ~/tree1.txt
tree $2 > ~/tree2.txt
# diff ~/tree1.txt ~/tree2.txt
diff ~/tree1.txt ~/tree2.txt
rm ~/tree1.txt ~/tree2.txt
}
function diff-files {
find $1 -type f -exec cat {} > ~/files1.txt \;
find $2 -type f -exec cat {} > ~/files2.txt \;
diff ~/files1.txt ~/files2.txt
rm ~/files1.txt ~/files2.txt
}
temp-monitor() {
while [ 1 ] ; do sensors; sleep 1; done
}
alias dewa='ssh arv@62.210.75.34'
alias ipython3='ipython3 -i'
| true
|
54e14cbff1190b1a8fe3163dd39889a245746e64
|
Shell
|
yk0817/dotfiles
|
/fish/sync.sh
|
UTF-8
| 467
| 2.65625
| 3
|
[] |
no_license
|
# https://qiita.com/nakagawa1017/items/a11599938a6cb0db0dad
echo "デフォルトシェルを fish に変更"
chsh -s /usr/local/bin/fish
echo "fish系"
ln -sf ~/dotfiles/fish ~/.config/fish
echo "fishパッケージマネージャーfisherで管理されているもの"
ln -sf ~/dotfiles/fisher ~/.config/fisher
echo "fisher install"
curl https://git.io/fisher --create-dirs -sLo ~/dotfiles/fish/functions/fisher.fish
echo "start login shell..."
fish -l
fisher
| true
|
9818e2c37bc09dc69b9c62ac83b9b11d06b1def4
|
Shell
|
Shashi-Prakash95/codeinClub
|
/practice/patterns/regexEx2.sh
|
UTF-8
| 194
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash -x
#1:string of 3 consecutive characters with no special characters
read any
pat="^([0-9]*[a-zA-Z]){3,}[0-9]*$"
any="aaa1"
if [[ $any =~ $pat ]];
then
echo yes;
else
echo no;
fi
| true
|
8b85d34e849398d3b26e04279f59a7e476a9280c
|
Shell
|
FauxFaux/debian-control
|
/l/lprng/lprng_3.8.B-2.1_amd64/config
|
UTF-8
| 451
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
# source debconf library.
. /usr/share/debconf/confmodule
# lpq, lprm lpr setuid?
db_input low lprng/setuid_tools || true
db_go
# Check for default file, if it is there then set the debconf value
default_start_lpd="true"
if [ -f /etc/default/lprng ]; then
grep -q 'START_LPD=yes' /etc/default/lprng || default_start_lpd="false"
db_set lprng/start_lpd $default_start_lpd
fi
db_input medium lprng/start_lpd || true
db_go
| true
|
6faca4063a4e631904d4f60fe51e7d9fb3e9ea9c
|
Shell
|
rfajta/.custom
|
/bin/diffthem.sh
|
UTF-8
| 267
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
comp() {
ceFile="$(echo "common/coreengine/$1" | sed -e "s/\/\.\//\//g ; s/\/\//\//g")"
coreFile=
}
cd common/coreengine
allFiles=$(find . -tye f)
cd -
for ceFile in $allFiles
do
comp "$ceFile" "common/core"
comp "$ceFile" "common/render"
done
| true
|
2d56a228e8a0db1e888f5c9bf05d6b124c30aeb9
|
Shell
|
vcctr/NoMouseWheelZoom
|
/build/build.sh
|
UTF-8
| 246
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
!#/bin/bash
deliverable="deliverable"
output="NoMouseWheelZoom.zip"
rm -Rf $deliverable;
mkdir $deliverable;
cp ../contentscript.js ../manifest.json ../NoMouseWheelZoom.min.js $deliverable
zip -r $output $deliverable/*
rm -Rf $deliverable;
| true
|
7a02fb2eac6517eddc98992946408c7c48545cdb
|
Shell
|
maobuji/go-package-plantuml
|
/goplantuml/install.sh
|
UTF-8
| 326
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ ! -n "$GOPATH" ]; then
echo "GOPATH IS NULL"
exit 0
else
echo "GOPATH="$GOPATH
fi
rm go-package-plantuml -rf
go build github.com/maobuji/go-package-plantuml
if [ -f "plantuml.jar" ]
then
echo "plantuml.jar exist"
else
wget https://jaist.dl.sourceforge.net/project/plantuml/plantuml.jar
fi
| true
|
c566492d0306e988f26984453c1a959d75eb85ec
|
Shell
|
jami/sqlpile
|
/sqlpile.sh
|
UTF-8
| 4,582
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
VERSION="1.0.0"
dbuse=1
dbuser="sqlpile"
dbpass=
dbname=
verbose=0
testing=0
dbfuncimport="mysqlimport"
history=".sqlpile"
historylist=( )
opmode=0
opdir=`pwd`
opoutput="compose.sql"
historyfile="${opdir}/${history}"
function mysqlimport {
local dumpfile="$1"
`mysql -u "${dbuser}" -p${dbpass} < "${dumpfile}"`
}
function nop {
return 0
}
function echo_v {
if [ $verbose == "1" ]; then
echo "$1"
fi
}
function readhistoryfile {
if [ -s "$historyfile" ]; then
echo_v "read history $historyfile"
local tmp=$IFS
IFS=$'\n'
while read file; do
if [ -f "${opdir}/${file}" ]; then
historylist+=( $file )
fi
done < "$historyfile"
IFS=$tmp
fi
}
function historycontains {
local key=$1
for value in ${historylist[@]}; do
if [ "$value" == "$key" ]; then
echo "true"
return 1
fi
done
echo "false"
return 0
}
function writehistoryfile {
if [ $testing == "1" ]; then
return 0
fi
local tmp=$IFS
IFS=$'\n'
for file in "${historylist[@]}"; do
echo "$file"
done > "$historyfile"
IFS=$tmp
}
function deploy {
local output="${opdir}/${opoutput}"
local outputbuffer=""
if [ "${opmode}" == "scaffold" ]; then
local sffiles=( "000-cleaning.sql" "100-structure.sql" "200-modify.sql" "300-constraints.sql" "400-data.sql" )
for f in "${sffiles[@]}"; do
if [ ! -s "${opdir}/${f}" ]; then
echo_v "creating scaffold file ${opdir}/${f}"
touch "${opdir}/${f}"
fi
done
return 0
fi
local files=`find "$opdir" -maxdepth 1 -regextype posix-egrep -regex '.*[0-9]{3}\-[^\/]*\.sql$' | sort`
for f in $files; do
local filename=$(basename "$f")
local new=0
if [ $(historycontains "$filename") == "false" ]; then
historylist+=( "$filename" )
new=1
fi
case "$opmode" in
append )
if [ $new -eq 1 ]; then
echo_v "append ${filename}"
outputbuffer+=$(cat "${opdir}/${filename}")
outputbuffer+=$'\n'
fi
;;
all )
echo_v "append ${filename}"
outputbuffer+=$(cat "${opdir}/${filename}")
outputbuffer+=$'\n'
;;
esac
done
echo_v "write composer sql ${output}"
echo "${outputbuffer}" > "${output}"
${dbfuncimport} "${output}"
}
function showhelp {
echo "sqlpile [OPTION] [FOLDER]"
echo "Helper to migrated and deploy sql"
echo -e " -a, --all\t\tuse all sql files"
echo -e " -n, --new\t\tuse new sql files"
echo -e " -t, --test\t\ttest only. no use of sql driver"
echo -e " -c, --create\t\tcreates a sql file scaffold in the folder"
echo -e " -v, --verbose\t\tverbose"
echo -e " -u, --user\t\tdatabase username"
echo -e " -p, --password\t\tdatabase password"
echo -e " -o, --output\t\tcomposer output filename"
}
OPT=$(getopt -o o:u:p:cantv -l "output:,user:,password:,create,all,new,test,verbose" -n "sqlpile.sh" -- "$@")
eval set -- "$OPT"
while true; do
case "$1" in
-u|--user )
dbuser=$2
shift 2
;;
-p|--password )
dbpass=$2
shift 2
;;
-a|--all )
echo "set opmode all"
opmode="all"
shift
;;
-n|--new )
opmode="append"
shift
;;
-c|--create )
opmode="scaffold"
shift
;;
-o|--output )
opoutput=$2
shift 2
;;
-t|--test )
dbfuncimport="nop"
testing=1
shift
;;
-v|--verbose )
verbose=1
shift
;;
--)
shift
;;
*)
if [ ${#1} -gt 1 ]; then
opdir="$1"
fi
shift 1
break
;;
esac
done;
echo_v "sqlpile $VERSION"
echo_v "working folder: $opdir"
echo_v "filter mode: $opmode"
if [ "$opmode" == "0" ]; then
echo "Either -a, -n or -c is required"
showhelp
exit 1
fi
opdir=`readlink -f "$opdir"`
if [ ! -d "$opdir" ]; then
echo "$opdir is not a directory"
exit 1
fi
historyfile="${opdir}/${history}"
readhistoryfile
deploy
writehistoryfile
| true
|
521ff27020a2e5f8b3a5981f424784d903cb37af
|
Shell
|
colona/config
|
/.tools.statusbar.sh
|
UTF-8
| 542
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/sh
realcpu="$(~/.tools.realcpu.sh 3)"
cpuload="$(read l1 l2 l3 __ < /proc/loadavg; echo $l1 $l2 $l3)"
date="$(date +'%a %d %b %H:%M')"
if [ -x "$(command -v sensors)" ]; then
temp="$(sensors | sed -rn '/temp1|Core/s/.*:\s+\+(\w+)\.\w+(°).*/\1\2/p')"
temp="${temp//[$'\n']}"
fi
if [ -x "$(command -v acpi)" ]; then
batt="$(acpi -b | cut -d ' ' -f 4)"
batt=" [$(acpi -a | grep -q on-line && echo '=')${batt//[$'\n',]}]"
fi
echo "${temp}${batt} ${realcpu} (${cpuload}) $(~/.tools.realmem.sh) $date"
sleep 1
| true
|
90bfc75e576435a31919b0ca3dfb8e0575bb7b7b
|
Shell
|
Duslerke/File-Composition-test
|
/bricker.sh
|
UTF-8
| 477
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
#do the inserting
sed -i -E "s~$(grep -oP '\#\>\>.+\<\<\#' ./insertTarget.sh)~$(echo "$(cat .\/codeToInsert.sh)" | sed -E ':a;N;$!ba;s/\r{0,1}\n/\\n/g')~g" ./insertTarget.sh
#grep -oP --color=always '\#\>\>.+\<\<\#' ./insertTarget.sh #nice display, debug
#echo "$(cat .\/codeToInsert.sh)" | sed -E ':a;N;$!ba;s/\r{0,1}\n/\\n/g' #check if escaping works
# my marker: #>>codeToInsert<<#
#test if it works
#echo "$(cat ./insertTarget.sh)"
./insertTarget.sh
| true
|
091483378adefbae9be21fc1b6279e0819ce227c
|
Shell
|
gouboft/MyCode
|
/shell/shell.sh
|
UTF-8
| 120
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
name=`ls`
echo "name = $name"
for m in $name
do
echo $m
if [ -d $m ]; then
cd $m
fi
pwd
cd -
done
| true
|
1e43fae24522eb30c9f890020981c084780a3bde
|
Shell
|
zbw/marc2kbart
|
/edwardElgar_kxp/marc2kbart.sh
|
UTF-8
| 999
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "Records are being converted to KBART format."
# Convert records using fixes specified in file marc2kbart.fix
# Save records as records.csv
catmandu convert MARC --type RAW to CSV --fix marc2kbart.fix --fields publication_title,\
print_identifier,online_identifier,date_first_issue_online,\
num_first_vol_online,num_first_issue_online,date_last_issue_online,\
num_last_vol_online,num_last_issue_online,title_url,platform_url,\
platform,first_author,title_id,embargo_info,coverage_depth,notes,\
publisher_name,publication_type,date_monograph_published_print,date_monograph_published_online,\
monograph_volume,monograph_edition,first_editor,parent_publication_title_id,\
preceding_publiation_title_id,access_type,preceding_title_journal_id,\
journal_id,journal_title_history,monograph_parent_collection_title,\
zdb --sep_char '\t' < $1 > $2
# Count and display record count
resultCount=$(grep "=\"001\">" $1 | wc -l)
echo "Number of records successfully processed: "${resultCount}
| true
|
6bd64d902cec81b602850755874056ae8ac3e893
|
Shell
|
FangLiangQiang/tools
|
/dir_build.sh
|
UTF-8
| 482
| 3.53125
| 4
|
[] |
no_license
|
#**********************************************************
#作用:循环建立多级目录
#用法:dir_build.sh n 。n为要建立目录的级数,如
#dir_build.sh 3,则会在当前目录下建立3级目录文件./F1/F2/F3
#**********************************************************
#! /bin/sh
dir_num=$1
i=1
while [ $i -le $dir_num ]
do
dir=`pwd`
mkdir "$dir/F$i"
cd "./F$i"
let i++
done
#cp files ./ #拷贝文件files到最后一级目录下
echo `pwd`
ls
exit 0
| true
|
d347ff36999a8e22d13ee76a16cc5f39f6fe31a6
|
Shell
|
sbates130272/fio-stuff
|
/latency.sh
|
UTF-8
| 1,575
| 2.96875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
########################################################################
##
## Copyright 2015 PMC-Sierra, Inc.
##
## Licensed under the Apache License, Version 2.0 (the "License"); you
## may not use this file except in compliance with the License. You may
## obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0 Unless required by
## applicable law or agreed to in writing, software distributed under the
## License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
## CONDITIONS OF ANY KIND, either express or implied. See the License for
## the specific language governing permissions and limitations under the
## License.
##
########################################################################
########################################################################
##
## Description:
## A simple shell script to call the latency.fio script with enviroment
## variables setup.
##
########################################################################
DIR=$(realpath $(dirname "$0"))
source $DIR/common.sh
BINS=100
SKIP=10000
CROP=10000
export COUNT=100000
while getopts "${COMMON_OPTS}c:p" opt; do
parse_common_opt $opt $OPTARG && continue
case "$opt" in
c) export COUNT=${OPTARG} ;;
p) export FIOOPTS="--ioengine=pvsync2 --hipri"
export IOENGINE=pvsync2
;;
esac
done
export LAT_LOG=$(basename ${FILENAME})
export COUNT=$((${COUNT} + ${SKIP} + ${CROP}))
run
rm -f *_slat.*.log *_clat.*.log > /dev/null
mv ${LAT_LOG}_lat.1.log ${SCRIPT}.log
post -k ${CROP} -s ${SKIP} -b ${BINS}
| true
|
0c764e8980b87f3c37972c7a25ad2f46db51bcc9
|
Shell
|
pypa/manylinux
|
/build.sh
|
UTF-8
| 4,264
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Stop at any error, show all commands
set -exuo pipefail
if [ "${MANYLINUX_BUILD_FRONTEND:-}" == "" ]; then
MANYLINUX_BUILD_FRONTEND="docker-buildx"
fi
# Export variable needed by 'docker build --build-arg'
export POLICY
export PLATFORM
# get docker default multiarch image prefix for PLATFORM
if [ "${PLATFORM}" == "x86_64" ]; then
MULTIARCH_PREFIX="amd64/"
elif [ "${PLATFORM}" == "i686" ]; then
MULTIARCH_PREFIX="i386/"
elif [ "${PLATFORM}" == "aarch64" ]; then
MULTIARCH_PREFIX="arm64v8/"
elif [ "${PLATFORM}" == "ppc64le" ]; then
MULTIARCH_PREFIX="ppc64le/"
elif [ "${PLATFORM}" == "s390x" ]; then
MULTIARCH_PREFIX="s390x/"
else
echo "Unsupported platform: '${PLATFORM}'"
exit 1
fi
# setup BASEIMAGE and its specific properties
if [ "${POLICY}" == "manylinux2014" ]; then
if [ "${PLATFORM}" == "s390x" ]; then
BASEIMAGE="s390x/clefos:7"
else
BASEIMAGE="${MULTIARCH_PREFIX}centos:7"
fi
DEVTOOLSET_ROOTPATH="/opt/rh/devtoolset-10/root"
PREPEND_PATH="${DEVTOOLSET_ROOTPATH}/usr/bin:"
if [ "${PLATFORM}" == "i686" ]; then
LD_LIBRARY_PATH_ARG="${DEVTOOLSET_ROOTPATH}/usr/lib:${DEVTOOLSET_ROOTPATH}/usr/lib/dyninst"
else
LD_LIBRARY_PATH_ARG="${DEVTOOLSET_ROOTPATH}/usr/lib64:${DEVTOOLSET_ROOTPATH}/usr/lib:${DEVTOOLSET_ROOTPATH}/usr/lib64/dyninst:${DEVTOOLSET_ROOTPATH}/usr/lib/dyninst:/usr/local/lib64"
fi
elif [ "${POLICY}" == "manylinux_2_28" ]; then
BASEIMAGE="${MULTIARCH_PREFIX}almalinux:8"
DEVTOOLSET_ROOTPATH="/opt/rh/gcc-toolset-12/root"
PREPEND_PATH="${DEVTOOLSET_ROOTPATH}/usr/bin:"
LD_LIBRARY_PATH_ARG="${DEVTOOLSET_ROOTPATH}/usr/lib64:${DEVTOOLSET_ROOTPATH}/usr/lib:${DEVTOOLSET_ROOTPATH}/usr/lib64/dyninst:${DEVTOOLSET_ROOTPATH}/usr/lib/dyninst"
elif [ "${POLICY}" == "musllinux_1_1" ]; then
BASEIMAGE="${MULTIARCH_PREFIX}alpine:3.12"
DEVTOOLSET_ROOTPATH=
PREPEND_PATH=
LD_LIBRARY_PATH_ARG=
elif [ "${POLICY}" == "musllinux_1_2" ]; then
BASEIMAGE="${MULTIARCH_PREFIX}alpine:3.18"
DEVTOOLSET_ROOTPATH=
PREPEND_PATH=
LD_LIBRARY_PATH_ARG=
else
echo "Unsupported policy: '${POLICY}'"
exit 1
fi
export BASEIMAGE
export DEVTOOLSET_ROOTPATH
export PREPEND_PATH
export LD_LIBRARY_PATH_ARG
BUILD_ARGS_COMMON="
--build-arg POLICY --build-arg PLATFORM --build-arg BASEIMAGE
--build-arg DEVTOOLSET_ROOTPATH --build-arg PREPEND_PATH --build-arg LD_LIBRARY_PATH_ARG
--rm -t quay.io/pypa/${POLICY}_${PLATFORM}:${COMMIT_SHA}
-f docker/Dockerfile docker/
"
if [ "${CI:-}" == "true" ]; then
# Force plain output on CI
BUILD_ARGS_COMMON="--progress plain ${BUILD_ARGS_COMMON}"
# Workaround issue on ppc64le
if [ ${PLATFORM} == "ppc64le" ] && [ "${MANYLINUX_BUILD_FRONTEND}" == "docker" ]; then
BUILD_ARGS_COMMON="--network host ${BUILD_ARGS_COMMON}"
fi
fi
if [ "${MANYLINUX_BUILD_FRONTEND}" == "docker" ]; then
docker build ${BUILD_ARGS_COMMON}
elif [ "${MANYLINUX_BUILD_FRONTEND}" == "docker-buildx" ]; then
docker buildx build \
--load \
--cache-from=type=local,src=$(pwd)/.buildx-cache-${POLICY}_${PLATFORM} \
--cache-to=type=local,dest=$(pwd)/.buildx-cache-staging-${POLICY}_${PLATFORM} \
${BUILD_ARGS_COMMON}
elif [ "${MANYLINUX_BUILD_FRONTEND}" == "buildkit" ]; then
buildctl build \
--frontend=dockerfile.v0 \
--local context=./docker/ \
--local dockerfile=./docker/ \
--import-cache type=local,src=$(pwd)/.buildx-cache-${POLICY}_${PLATFORM} \
--export-cache type=local,dest=$(pwd)/.buildx-cache-staging-${POLICY}_${PLATFORM} \
--opt build-arg:POLICY=${POLICY} --opt build-arg:PLATFORM=${PLATFORM} --opt build-arg:BASEIMAGE=${BASEIMAGE} \
--opt "build-arg:DEVTOOLSET_ROOTPATH=${DEVTOOLSET_ROOTPATH}" --opt "build-arg:PREPEND_PATH=${PREPEND_PATH}" --opt "build-arg:LD_LIBRARY_PATH_ARG=${LD_LIBRARY_PATH_ARG}" \
--output type=docker,name=quay.io/pypa/${POLICY}_${PLATFORM}:${COMMIT_SHA} | docker load
else
echo "Unsupported build frontend: '${MANYLINUX_BUILD_FRONTEND}'"
exit 1
fi
docker run --rm -v $(pwd)/tests:/tests:ro quay.io/pypa/${POLICY}_${PLATFORM}:${COMMIT_SHA} /tests/run_tests.sh
if [ "${MANYLINUX_BUILD_FRONTEND}" != "docker" ]; then
if [ -d $(pwd)/.buildx-cache-${POLICY}_${PLATFORM} ]; then
rm -rf $(pwd)/.buildx-cache-${POLICY}_${PLATFORM}
fi
mv $(pwd)/.buildx-cache-staging-${POLICY}_${PLATFORM} $(pwd)/.buildx-cache-${POLICY}_${PLATFORM}
fi
| true
|
b06d4e24852bd77ec2dd094f45433e0fd18a4f85
|
Shell
|
alexvanaxe/dotfile20
|
/bin/toggle_bars.sh
|
UTF-8
| 4,798
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/dash
if [ -z "${PREFERENCE}" ]; then
MONITOR1=$(monitors_info.sh -t 0)
MONITOR2=$(monitors_info.sh -t 1)
fi
TARGET=$1
dim=$(grep ";dim-value" -w $HOME/.config/polybar/config)
dim_simple=$(grep ";dim-value" -w $HOME/.config/polybar/config_simple)
toggle_light() {
pid=$(ps aux | egrep "[l]emonbar" | awk '{print $2}')
if [ ! -z "$pid" ]; then
bspc config -m $MONITOR1 bottom_padding 0
bspc config -m $MONITOR2 bottom_padding 0
kill $pid
else
lemonbar.sh | lemonbar -b -o "$MONITOR2" 2>&1 &
fi
}
toggle_tint(){
pid=$(ps aux | egrep "[t]int2" | awk '{print $2}')
if [ ! -z "$pid" ]; then
bspc config -m $MONITOR1 right_padding 0
bspc config -m $MONITOR2 right_padding 0
bspc config -m $MONITOR1 left_padding 0
bspc config -m $MONITOR2 left_padding 0
kill $pid
else
sed -i "s/panel_monitor.*/panel_monitor = ${MONITOR1}/" ${HOME}/.config/tint2/tint2rc
#bspc config -m $MONITOR1 right_padding 203
tint2 >> /tmp/tint2.log 2>&1 &
fi
}
toggle_tint_h(){
pid=$(ps aux | egrep "[t]int2" | awk '{print $2}')
if [ ! -z "$pid" ]; then
kill $pid
bspc config -m $MONITOR1 right_padding 0
bspc config -m $MONITOR2 right_padding 0
bspc config -m $MONITOR1 left_padding 0
bspc config -m $MONITOR2 left_padding 0
bspc config -m $MONITOR1 bottom_padding 0
bspc config -m $MONITOR2 bottom_padding 0
else
sed -i "s/panel_monitor.*/panel_monitor = ${MONITOR1}/" ${HOME}/.config/tint2/tint2rc_h1
#bspc config -m $MONITOR1 right_padding 203
tint2 -c ${HOME}/.config/tint2/tint2rc_h1 >> /tmp/tint2.log 2>&1 &
fi
}
toggle_full(){
pid=$(ps aux | egrep "[p]olybar.*default" | awk '{print $2}')
if [ ! -z "$pid" ]; then
bspc config -m $MONITOR1 top_padding 0
bspc config -m $MONITOR1 bottom_padding 0
kill $pid
else
if [ ! -z "${dim}" ]; then
bspc config -m $MONITOR1 top_padding 0
bspc config -m $MONITOR1 bottom_padding 0
fi
MONITOR1=$MONITOR1 polybar -q default >>/tmp/polybar1.log 2>&1 &
fi
}
toggle_simple(){
pid_simple=$(ps aux | egrep "[p]olybar.*simple" | awk '{print $2}')
if [ ! -z "$pid_simple" ]; then
kill $pid_simple
else
MONITOR2=$MONITOR2 polybar -q -c $HOME/.config/polybar/config_simple simple >>/tmp/polybar2.log 2>&1 &
fi
}
restart_bar(){
pid=$(ps aux | egrep "[p]olybar.*default" | awk '{print $2}')
pid_simple=$(ps aux | egrep "[p]olybar.*simple" | awk '{print $2}')
pid_tint=$(ps aux | egrep "[t]int2" | awk '{print $2}')
if [ ! -z "$pid_tint" ]; then
kill ${pid_tint}
sleep 1
toggle_tint
fi
if [ ! -z "$pid" ]; then
kill ${pid}
sleep 1
toggle_full
fi
if [ ! -z "$pid_simple" ]; then
kill ${pid_simple}
sleep 1
toggle_simple
fi
}
toggle_eww() {
local panel="$1"
eww open --toggle $panel
}
toggle_all(){
pid_simple=$(ps aux | egrep "[p]olybar.*simple" | awk '{print $2}')
pid=$(ps aux | egrep "[p]olybar.*default" | awk '{print $2}')
if [ ! -z $pid ]; then
bspc config top_padding 0
bspc config bottom_padding 0
kill $pid
kill $pid_simple
else
if [ ! -z "${dim}" ]; then
bspc config -m $MONITOR1 top_padding 0
bspc config -m $MONITOR1 bottom_padding 0
fi
bspc config -m $MONITOR2 top_padding 0
bspc config -m $MONITOR2 bottom_padding 0
MONITOR1=$MONITOR1 polybar -q default >>/tmp/polybar1.log 2>&1 &
#if [ ! -z ${MONITOR2} ]; then
#MONITOR2=$MONITOR2 polybar -q -c $HOME/.config/polybar/config_simple simple >>/tmp/polybar2.log 2>&1 &
#fi
fi
}
auto_hide(){
if [ -z "${dim_simple}" ]; then
sed -i "s/dim-value/;dim-value/" ${HOME}/.config/polybar/config_simple
else
sed -i "s/;dim-value/dim-value/" ${HOME}/.config/polybar/config_simple
fi
if [ -z "${dim}" ]; then
sed -i "s/dim-value/;dim-value/" ${HOME}/.config/polybar/config
sed -i "s/;wm-restack/wm-restack/" ${HOME}/.config/polybar/config
else
sed -i "s/;dim-value/dim-value/" ${HOME}/.config/polybar/config
sed -i "s/wm-restack/;wm-restack/" ${HOME}/.config/polybar/config
fi
}
case "$TARGET" in
"--target2") $(toggle_simple);;
"--target1") $(toggle_full);;
"--tint") $(toggle_tint);;
"--eww1") $(toggle_eww "general_infos");;
"--eww2") $(toggle_eww "pc_infos");;
"--tinth") $(toggle_tint_h);;
"--light") $(toggle_light);;
"--restart") restart_bar;;
"--autohide") auto_hide;;
*) $(toggle_all);;
esac
| true
|
dc7c6618bbd1cb84bc0c0262509edd5a4fed04ce
|
Shell
|
dickschoeller/gedbrowser
|
/config/gedbrowser_restart.sh
|
UTF-8
| 1,204
| 3.078125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
export GEDBROWSER_HOME=/var/lib/gedbrowser
export DATA_DIR=/home/dick/data
docker stop gedbrowserng
docker stop gedbrowser
docker stop geoservice
docker stop mongo
docker rm gedbrowserng
docker rm gedbrowser
docker rm geoservice
docker rm mongo
docker rmi dickschoeller/gedbrowserng:latest
docker rmi dickschoeller/gedbrowser:latest
docker rmi dickschoeller/geoservice:latest
docker rmi mongo:latest
export R="--restart unless-stopped"
export M="--link mongo:mongo"
export H="-v ${GEDBROWSER_HOME}:/var/lib/gedbrowser"
export A="--spring.config.location=file:/var/lib/gedbrowser/application.yml"
export V="latest"
export VN=""
export HO="largo.schoellerfamily.org"
export PO="9086"
docker ps | grep mongo
if [ $? = 1 ]; then
docker run -v ${DATA_DIR}:/data/db --name mongo -p 28001:27017 -d mongo
fi
docker run ${R} ${M} ${H} -p 8086:8080 -p 8087:8081 --name geoservice${VN} -d dickschoeller/geoservice:${V}
sleep 5
docker run ${R} ${M} --link geoservice:geoservice ${H} -p 8082:8080 -p 8083:8081 --name gedbrowser${VN} -d dickschoeller/gedbrowser:${V} ${A}
sleep 5
docker run ${R} ${M} ${H} -p 8088:8080 -p 8089:8081 --name gedbrowserng${VN} -d dickschoeller/gedbrowserng:${V} ${A}
| true
|
1839f0efad0612eeb9f2241c7ca1e2c868877847
|
Shell
|
giagiannis/aura
|
/example/wordpress/web_server/install.sh
|
UTF-8
| 315
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Apache, Mysql client, PHP installation
export DEBIAN_FRONTEND=noninteractive
apt-get update -qq
apt-get install -y -qq apache2 libapache2-mod-php apache2 php-mysql wget
service apache2 reload
# Wordpress download
cd /opt/
wget --quiet http://wordpress.org/latest.tar.gz
tar xfz latest.tar.gz
exit 0
| true
|
75bf96e1efe62f169eba759073bd414b139edfde
|
Shell
|
InfluxOW/docker_test_project
|
/.env.example
|
UTF-8
| 1,348
| 2.625
| 3
|
[] |
no_license
|
###########################################################
###################### General Setup ######################
###########################################################
### Paths #################################################
# Point to the path of your PHP applications code on your host
PHP_CODE_PATH_HOST=
# Point to where your applications should be in the container
APP_CODE_PATH_CONTAINER=/var/www
# You may add flags to the path `:cached`, `:delegated`. When using Docker Sync add `:nocopy`
APP_CODE_CONTAINER_FLAG=:cached
# Choose storage path on your machine. For all storage systems
DATA_PATH_HOST=./data
### NGINX #################################################
NGINX_VERSION=
NGINX_HOST_HTTP_PORT=80
NGINX_HOST_HTTPS_PORT=443
NGINX_HOST_LOG_PATH=./logs/nginx/
NGINX_SITES_PATH=./server/nginx/sites/
NGINX_PHP_UPSTREAM_CONTAINER=php-fpm
NGINX_PHP_UPSTREAM_PORT=9000
### BACKEND #################################################
PHP_VERSION=
SYSTEM_USER_USERNAME=
SYSTEM_USER_UID=
### POSTGRES ##############################################
POSTGRES_VERSION=
POSTGRES_USER=
POSTGRES_PASSWORD=
POSTGRES_DB=db
POSTGRES_API_ENTRYPOINT_INITDB=./db/postgres_api/docker-entrypoint-initdb.d
POSTGRES_AUTH_ENTRYPOINT_INITDB=./db/postgres_auth/docker-entrypoint-initdb.d
POSTGRES_API_PORT=54320
POSTGRES_AUTH_PORT=54321
| true
|
7212c44fd407eb84a2b5b8923dbc82952fa6af01
|
Shell
|
ScrippsPipkinLab/JYC_DataAnalysis
|
/z_codes_hpc_2/6_0_byGroup_motif_finding/6_0_byGroup_motif_finding.sh
|
UTF-8
| 721
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
#PBS -l mem=6gb
#PBS -t 0-5
### Motif score calculation: matrix score **0.86
### Combined with Homer Known motifs
wk_dir=/gpfs/home/hdiao/jycATAC/6_GeneGroupMotifAnalysis/byGroup_bed
preparsed_dir=/gpfs/home/hdiao/resources
mm10_genome=/gpfs/group/databases/Mus_musculus/UCSC/mm10/Sequence/Bowtie2Index/genome.fa
cd $wk_dir
export PATH="/gpfs/home/hdiao/homer/bin:$PATH"
sp_names=(Group_I--ATAC_peaks Group_II--ATAC_peaks Group_III--ATAC_peaks Group_IV--ATAC_peaks TFH_Associated--ATAC_peaks TH1_Associated--ATAC_peaks)
bed_name=${sp_names[$PBS_ARRAYID]}.bed
out_name=${sp_names[$PBS_ARRAYID]}_cb_mtfs
findMotifsGenome.pl $bed_name $mm10_genome $out_name -size given -mask -preparsedDir $preparsed_dir
| true
|
fd3bcf8cb6f0500154f216f37bb29fec93fbbf39
|
Shell
|
mrts/snippets-cpp
|
/make-snippet.sh
|
UTF-8
| 274
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
set -u
SNIPPET_NAME="$1"
cp -r template "$SNIPPET_NAME"
pushd "$SNIPPET_NAME"
sed -i "s/template/$SNIPPET_NAME/" CMakeLists.txt
git init
touch README.md
git add .
git commit -m "Add project template, configuration files and scripts."
| true
|
42db8ffb8df43c574db74ef8dd911c36ed648cfa
|
Shell
|
sidaf/scripts
|
/run-masscan
|
UTF-8
| 1,038
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
TCP1='0-8191'
TCP2='8192-16383'
TCP3='16384-24575'
TCP4='24576-32767'
TCP5='32768-40959'
TCP6='40960-49151'
TCP7='49152-57343'
TCP8='57344-65535'
if [ -z $2 ];
then printf "\nSyntax: $0 <file of ips> <rate|e.g. 2000>\n\n"
else
FILE=$1
RATE=$2
TCP_PORTS=($TCP1 $TCP2 $TCP3 $TCP4 $TCP5 $TCP6 $TCP7 $TCP8)
for i in "${!TCP_PORTS[@]}"; do
PORTS="${TCP_PORTS[$i]}"
let INDEX=${i}+1
OUT="tcp-${INDEX}"
echo "# masscan -iL $FILE -p T:$PORTS --rate $RATE --retries 1 -oB $OUT.masscan" | tee $OUT.out
masscan -iL $FILE -p T:$PORTS --rate $RATE --retries 1 -oB $OUT.masscan | tee -a $OUT.out
if [ $(ls $OUT.masscan 2> /dev/null | wc -l) -gt 0 ]; then
masscan --readscan $OUT.masscan -oX $OUT.xml
masscan --readscan $OUT.masscan -oL $OUT.list
cat tcp-*.list | grep ^open | cut -d" " -f3 | sort -n | uniq > ports.list
cat tcp-*.list | grep ^open | cut -d" " -f4 | sort -V | uniq > hosts.list
chown $USER:$USER tcp-*.* ports.list hosts.list
fi
echo ""
done
fi
| true
|
bd7d679a07129af6e6c78b9b061716a57abee0e7
|
Shell
|
JerrikEph/Locationing
|
/all_file/run_3.sh
|
UTF-8
| 270
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
cnt=21
src=./compete_data/case
res=./compete_case/output_case
while (($cnt <=25))
do
python locate_3.py --src-path=${src}$(printf '%03d' $cnt)_input.txt --res-path=${res}$(printf '%03d' $cnt).txt --gpu-num=3
cnt=$((cnt + 1))
done
printf '%03d' $cnt
| true
|
6cc43a9f81673d029a4c426c0944cfa560f39726
|
Shell
|
nithinshiriya/PPNativeScript
|
/.run-ctags
|
UTF-8
| 370
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/sh
ctags -R \
--exclude=node_modules \
--exclude=platforms \
--exclude='*.js'
add_ctags() {
LOCATION=$1
PATTERN=${2:-*.d.ts}
find "$LOCATION" -iname "$PATTERN" -exec ctags -a \{\} \;
}
add_ctags "node_modules/angular2"
add_ctags "node_modules/tns-core-modules"
sed -r -i '/^(if|for|forEach|while|switch|super|function|return)\b/d' tags
| true
|
466c34292aa3d3c49db47e8dacdd7ac6d3261f59
|
Shell
|
estiloinfo/clip-itk
|
/cliplibs/clip-mysql/configure
|
UTF-8
| 341
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/sh
test -f /usr/include/mysql/mysql.h -o -f /usr/local/include/mysql/mysql.h
if [ $? != 0 ]
then
if [ $1 = "install" ]; then
echo ' Warning: lib not installed' >> ${LOG}
else
echo ' Warning: default-libmysqlclient-dev package not installed' >> ${LOG}
echo ' Warning: libclip-mysql make skip' >> ${LOG}
fi
exit 1
fi
| true
|
72c0a8a54cda532e937044136f01b145a36b0cb9
|
Shell
|
coolgoose85/FreeBSD
|
/sys/contrib/dev/acpica/acpica_prep.sh
|
UTF-8
| 2,683
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/sh
# $FreeBSD: src/sys/contrib/dev/acpica/acpica_prep.sh,v 1.9 2005/11/01 22:38:50 jkim Exp $
#
# Unpack an ACPI CA drop and restructure it to fit the FreeBSD layout
#
if [ ! $# -eq 1 ]; then
echo "usage: $0 acpica_archive"
exit
fi
src=$1
wrk=./_acpi_ca_unpack
dst=./acpi_ca_destination
# files that should keep their full directory path
fulldirs="common compiler"
# files to remove
stripdirs="generate acpisrc"
stripfiles="16bit.h Makefile README a16find.c a16utils.asm a16utils.obj \
acdos16.h acintel.h aclinux.h acmsvc.h acnetbsd.h acpixtract.c \
acwin.h acwin64.h aeexec.c aemain.c osdosxf.c osunixdir.c \
oswindir.c oswinxf.c readme.txt"
# include files to canonify
src_headers="acapps.h acconfig.h acdebug.h acdisasm.h acdispat.h \
acenv.h acevents.h acexcep.h acfreebsd.h acgcc.h acglobal.h \
achware.h acinterp.h aclocal.h acmacros.h acnames.h acnamesp.h \
acobject.h acopcode.h acoutput.h acparser.h acpi.h acpiosxf.h \
acpixf.h acresrc.h acstruct.h actables.h actbl.h actbl1.h \
actbl2.h actypes.h acutils.h aecommon.h amlcode.h amlresrc.h"
comp_headers="aslcompiler.h asldefine.h aslglobal.h asltypes.h"
# files to update paths in
src_update_files="acpi.h acpiosxf.h"
# pre-clean
echo pre-clean
rm -rf ${wrk}
rm -rf ${dst}
mkdir -p ${wrk}
mkdir -p ${dst}
# unpack
echo unpack
tar -x -z -f ${src} -C ${wrk}
# strip files
echo strip
for i in ${stripdirs}; do
find ${wrk} -name ${i} -type d | xargs rm -r
done
for i in ${stripfiles}; do
find ${wrk} -name ${i} -type f -delete
done
echo copying full dirs
for i in ${fulldirs}; do
find ${wrk} -name ${i} -type d | xargs -J % mv % ${dst}
done
# move files to destination
echo copying flat dirs
find ${wrk} -type f | xargs -J % mv % ${dst}
mv ${dst}/changes.txt ${dst}/CHANGES.txt
# update src/headers for appropriate paths
echo updating paths
for i in ${src_update_files}; do
i=${dst}/$i
sed -e 's/platform\///' $i > $i.new && mv $i.new $i
done
# canonify include paths
for H in ${src_headers}; do
find ${dst} -name "*.[chy]" -type f | \
xargs sed -i "" -e "s|[\"<]$H[\">]|\<contrib/dev/acpica/$H\>|g"
done
for H in ${comp_headers}; do
find ${dst}/compiler -name "*.[chly]" -type f | \
xargs sed -i "" -e "s|[\"<]$H[\">]|\<contrib/dev/acpica/compiler/$H\>|g"
done
# post-clean
echo post-clean
rm -rf ${wrk}
# assist the developer in generating a diff
echo "Directories you may want to 'cvs diff':"
echo " src/sys/contrib/dev/acpica src/sys/dev/acpica \\"
echo " src/sys/amd64/acpica src/sys/i386/acpica src/sys/ia64/acpica \\"
echo " src/sys/amd64/include src/sys/i386/include src/sys/ia64/include \\"
echo " src/sys/boot src/sys/conf src/sys/modules/acpi src/usr.sbin/acpi"
| true
|
6a255336fde5ce31fa8383580f9aafd9846c3194
|
Shell
|
ManuGithubSteam/Single_Gpu_Arch_GTX1060
|
/revert.sh
|
UTF-8
| 1,286
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
# Unload VFIO-PCI Kernel Driver
modprobe -r vfio-pci
modprobe -r vfio_iommu_type1
modprobe -r vfio
# remove dev from windows
#echo 1 > /sys/bus/pci/devices/0000\:00\:03.0/remove
# Ubind GPU VRAM
echo 1 > /sys/bus/pci/devices/0000\:00\:03.0/0000\:04\:00.0/remove
echo 1 > /sys/bus/pci/devices/0000\:00\:03.0/0000\:04\:00.1/remove
# Rescan for the new devies
echo 1 > /sys/bus/pci/rescan
# Reload the kernel modules
#modprobe -r snd_hda_intel
modprobe -a nvidia_drm
modprobe -a nvidia_modeset
modprobe -a nvidia
# Re-Bind USB Hubs
virsh nodedev-reattach pci_0000_06_00_0
virsh nodedev-reattach pci_0000_08_00_0
virsh nodedev-reattach pci_0000_00_1d_0
virsh nodedev-reattach pci_0000_00_1a_0
# USB 3
virsh nodedev-reattach pci_0000_06_00_0
virsh nodedev-reattach pci_0000_08_00_0
# Re-Bind the Tyys to linux
echo 1 > /sys/class/vtconsole/vtcon0/bind
echo 1 > /sys/class/vtconsole/vtcon1/bind
# Re-Bind the EFI Framebuffer
nvidia-xconfig --query-gpu-info > /dev/null 2>&1
echo "efi-framebuffer.0" > /sys/bus/platform/drivers/efi-framebuffer/bind
sleep 2
# Restart Display Manager
systemctl start display-manager.service
###########
## Some convienice stuff
##########
## Get KDE recognise that we come from vm
touch /tmp/from_kvm.txt
chmod 777 /tmp/from_kvm.txt
| true
|
0dabe0c2e1bd4c54037453e44866645f04572787
|
Shell
|
assafmuller/neutron_troubleshooting_training
|
/exercise_02/exercise_02.sh
|
UTF-8
| 1,421
| 2.796875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Stop OVS agent, wait 80 seconds (Longer than agent_down_time), create resources. Ports will fail to bind.
. ~/overcloudrc
ssh heat-admin@overcloud-controller-0 'sudo pcs resource unmanage neutron-l3-agent-clone'
ssh heat-admin@overcloud-controller-0 'sudo pcs resource unmanage neutron-dhcp-agent-clone'
ssh heat-admin@overcloud-controller-0 'sudo pcs resource unmanage neutron-metadata-agent-clone'
ssh heat-admin@overcloud-controller-0 'sudo pcs resource unmanage neutron-openvswitch-agent-clone'
for i in {0..2}
do
ssh heat-admin@overcloud-controller-$i 'sudo systemctl stop neutron-openvswitch-agent'
done
echo 'Sleeping for 80 seconds'
sleep 80
neutron router-create exercise_02
neutron net-create exercise_02
neutron subnet-create --name exercise_02 exercise_02 20.0.0.0/8
neutron router-interface-add exercise_02 exercise_02
echo 'Sleeping for 10 seconds'
sleep 10
for i in {0..2}
do
ssh heat-admin@overcloud-controller-$i 'sudo systemctl start neutron-openvswitch-agent'
done
echo 'Sleeping for 10 seconds'
sleep 10
ssh heat-admin@overcloud-controller-0 'sudo pcs resource manage neutron-l3-agent-clone'
ssh heat-admin@overcloud-controller-0 'sudo pcs resource manage neutron-dhcp-agent-clone'
ssh heat-admin@overcloud-controller-0 'sudo pcs resource manage neutron-metadata-agent-clone'
ssh heat-admin@overcloud-controller-0 'sudo pcs resource manage neutron-openvswitch-agent-clone'
| true
|
78975a058203df7dbbd107f902d097d32e0c03e0
|
Shell
|
Serial-DeV/Compilation
|
/projet_compilation_src/src/Tests/gencode_test_KO_Compilation_et_Execution.sh
|
UTF-8
| 167
| 2.71875
| 3
|
[] |
no_license
|
for ((i=1;i<=4;i++)) ; do
f=./Gencode/KO/test$i.c
echo -e "\n"
message='Fichier testé :'
echo $message $f
../minicc $f
java -jar Mars_4_2.jar out.s
done
| true
|
feb448d1ae644b0f0db18e62e9b14503c8636763
|
Shell
|
ulysse71/skychart
|
/tools/data/deltat/make_deltat.sh
|
UTF-8
| 1,273
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
# Download leap seconds file
source="https://www.ietf.org/timezones/data/leap-seconds.list"
wget $source
## Create Delta T file for Cartes du Ciel
#source=http://maia.usno.navy.mil/ser7
#source="--inet4-only ftp://cddis.gsfc.nasa.gov/pub/products/iers"
source="ftps://gdc.cddis.eosdis.nasa.gov/products/iers"
rm deltat.tmp deltat.txt
rm historic_deltat.data deltat.data deltat.preds
# Get historic data
wget $source/historic_deltat.data
tail +3 historic_deltat.data | head -633 | awk '{printf $1 " " $2 " " $3 "\n"}' | while read dat del err
do
printf "%8.4f\t%8.4f\t%8.4f\n" "$dat" "$del" "$err" >> deltat.tmp
done
# Get current data
wget $source/deltat.data
cat deltat.data | awk '{printf $1 " " $2 " " $3 " " $4 "\n"}' | while read y m d del
do
dat=$(echo 'scale=4;'"$y + ( $m -1 ) / 12"|bc -l)
err=0
printf "%8.4f\t%8.4f\t%8.4f\n" "$dat" "$del" "$err" >> deltat.tmp
done
# Get next years predictions
wget $source/deltat.preds
tail +2 deltat.preds | while read lin
do
dat=${lin:11:7}
del=${lin:21:5}
err=${lin:45:5}
printf "%8.4f\t%8.4f\t%8.4f\n" "$dat" "$del" "$err" >> deltat.tmp
done
# Sort and remove duplicate date
cat deltat.tmp | sort -u -k1.1,1.9 > deltat.txt
rm deltat.tmp
rm historic_deltat.data deltat.data deltat.preds
| true
|
bc90eaabeaf1bb6e45746b11ad7a5f96f1fe4889
|
Shell
|
curtscraw/osu_rocketry
|
/setup.sh
|
UTF-8
| 1,301
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
#used to setup the beaglebone environment after a fresh clone of this repo
#ONLY RUN ON A BEAGLEBON RUNNING DEBIAN
WORKING_DIR=$PWD
#ensure root permisions
if [ "$EUID" -ne 0 ]
then echo "Please run as root"
exit
fi
#setup the required Debian packages
apt-get update
apt-get upgrade -y
apt-get install -y git gpsd gpsd-clients python-gps python python-dev python-setuptools build-essential python-smbus
#install pip modules
pip install pyserial
pip install Adafruit_BBIO
#instantiate the needed git submodules
git submodule init
git pull --recurse-submodules
git submodule update --recursive
#install Adafruit altimeter module
cd Adafruit_Python_BMP
python setup.py install
cd $WORKING_DIR
#install the Accelerometer module
cd LSM9DS0_Python_Lib
python setup.py install
cd $WORKING_DIR
#install BMP180 wrapper
cd BMP180_Python_wrapper
python setup.py install
cd $WORKING_DIR
#install TGY-6114MD servo wrapper
cd TGY6114MD_Python_Lib
python setup.py install
cd $WORKING_DIR
#setup the gpsd to listen to the correct USART port
cd python
python initial_uart_setup.py
gpsd -n /dev/ttyO2 -F /var/run/gpsd.sock
cd $WORKING_DIR
chmod +x gpsd_setup.sh
echo "run payload with: "
echo "nohup payload_start.py &"
echo "run avionics with: "
echo "nohup avionics_start.py &"
exit
| true
|
c1d14bbcbba9c7aa6f1f8273b2af42de4385129b
|
Shell
|
biodatageeks/sequila
|
/src/test/resources/pileup/pileup_commands.sh
|
UTF-8
| 1,504
| 2.671875
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
export REF_FILE=/Users/aga/workplace/sequila/src/test/resources/reference/Homo_sapiens_assembly18_chr1_chrM.small.fasta
## SAMTOOLS
# -x ignore overlaps
# -B Disable base alignment quality (BAQ) computation
# -A (--count-orphans) Do not skip anomalous read pairs in variant calling.
# -q 0 base quality > 0
# -Q 0 maping quality > 0
samtools mpileup --fasta-ref $REF_FILE -B -x -A -q 0 -Q 0 NA12878.multichrom.md.bam > samtools.csv
## GATK
# run on cdh00. GATK complains about not complete fasta reference file
#export REF_PATH=/Users/aga/workplace/sequila/src/test/resources/reference
#export BAM_PATH=/Users/aga/workplace/sequila/src/test/resources/multichrom/mdbam/
# 1. generate reference dict file
export REF_PATH=/data/work/projects/pileup/data
docker run --rm -it --entrypoint="java" -v $REF_PATH:/data broadinstitute/picard -jar /usr/picard/picard.jar CreateSequenceDictionary R=/data/Homo_sapiens_assembly18.fasta O=/data/Homo_sapiens_assembly18.dict
#docker run --rm -it --entrypoint="java" -v $REF_PATH:/data broadinstitute/picard -jar /usr/picard/picard.jar CreateSequenceDictionary R=/data/Homo_sapiens_assembly18_chr1_chrM.small.fasta O=/data/Homo_sapiens_assembly18_chr1_chrM.small.dict
# 2. calculate pileup
export BAM_PATH=BAM_PATH=/data/work/projects/pileup/data/data2/slice
docker run --rm -it -v $REF_PATH:/ref -v $BAM_PATH:/data broadinstitute/gatk gatk Pileup -R /ref/Homo_sapiens_assembly18.fast -I /data/NA12878.multichrom.md.bam -O /data/gatk.csv
| true
|
35653b38b6437bffd306715a73174a244d15ce04
|
Shell
|
wu1233456/qmx
|
/sh/vpc_lab.sh
|
UTF-8
| 1,030
| 2.6875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
############################
#npm instal
############################
cd /www/site/com.lab/qmx
npm install --unsafe-perm
npm run prod
############################
#openresty和uwsgi相关
############################
#环境启动程序
openresty -t;
service openresty start;
############################
# pip install
############################
cd /www/site/com.lab/qmx
apt-get install python3-pip
pip3 install -r requirements.txt
############################
# uwsgi
############################
cd /www/site/com.lab/qmx
if [[ ! -L /etc/uwsgi-emperor/vassals/flask_index.ini ]] && [[ ! -f /etc/uwsgi-emperor/vassals/flask_index.ini ]];then
ln -s /www/site/com.lab/qmx/uwsgi/vpc_lab/flask_index.ini /etc/uwsgi-emperor/vassals/flask_index.ini
fi;
service uwsgi-emperor stop
service uwsgi-emperor start
sleep 2
service uwsgi-emperor status
############################
#给予download文件夹全部权限
############################
cd /www/site/com.lab/qmx
chmod -R 777 download
echo "Success"
| true
|
353224245463976c05c0bc5ab71a956a38253406
|
Shell
|
Kokokokoka/s6opts
|
/s6opts.in
|
UTF-8
| 2,451
| 3.53125
| 4
|
[
"Beerware"
] |
permissive
|
#!/usr/bin/bash
# Copyright (C) 2015-2017 Eric Vidal <eric@obarun.org>
#
# This file is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE.
#
# This scripts is under License Beerware.
#
# "THE BEERWARE LICENSE" (Revision 42):
# <eric@obarun.org> wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. Eric Vidal http://obarun.org
sourcing(){
local list
for list in /usr/lib/obarun/{common_functions,s6opts_functions} /etc/obarun/s6opts.conf; do
if [[ -f "${list}" ]]; then
source "${list}"
else
echo_error " Missing file : ${list}"
exit
fi
done
unset list
}
sourcing
shellopts_save
shellopts_set_unset "extglob" 0
(( EUID == 0 )) || die " You must be run this script with root privileges"
if [[ -z "${2}" && "${1}" != @(list|which_db|create) || -z "${3}" && "${1}" = @(add|delete|compile|all) ]]; then
usage
exit 1
fi
if [[ ! -h "${RC_DATABASE_COMPILED}/current" ]] || [[ ! -h "${RC_DATABASE_COMPILED}/previous" ]] \
|| [[ ! -h "${RC_DATABASE_COMPILED}/Default.src" ]]; then
cat << EOF
To properly manage the service database, the symlinks current|previous|Default.src must exist.
The default is :
/etc/s6-serv/enabled/rc/compiled/current -> /etc/s6-serv/enabled/rc/compiled/Default
/etc/s6-serv/enabled/rc/compiled/previous -> /etc/s6-serv/enabled/rc/compiled/Default
/etc/s6-serv/enabled/rc/compiled/Default.src -> /etc/s6-serv/enabled/rc/source/default
EOF
exit 1
fi
opts=( "${2}" )
where_which=( "${3}" )
case "${1}" in
add)
add "${opts}" "${where_which[@]}"
;;
delete)
delete "${opts}" "${where_which[@]}"
;;
compile)
compile "${opts}" "${where_which}"
;;
switch)
switch "${opts}"
;;
all)
all "${opts}" "${where_which}"
;;
update)
update "${opts[@]}"
;;
list)
list "${opts}"
;;
list_source)
list_source "${opts}"
;;
verbose)
verbose "${opts}"
;;
remove)
remove "${opts[@]}"
;;
remove_db)
remove_db "${opts[@]}"
;;
which_db)
which_db "${opts}"
;;
enable)
enable "${opts[@]}"
;;
disable)
disable "${opts[@]}"
;;
create)
create
;;
edit)
edit "${opts}"
;;
*)
usage
exit 1
;;
esac
shellopts_restore
exit 0
| true
|
da2f92f561479b55d19dbfc0751081eeaf9aed2f
|
Shell
|
co2-git/lib
|
/Process/kill.sh
|
UTF-8
| 118
| 3.25
| 3
|
[] |
no_license
|
lib.Process.kill() {
[ $# -eq 0 ] && return 1;
if lib isANumber "$1"; then
kill -9 "$1";
else
pkill "$1";
fi
}
| true
|
e62633243d83f825fcd59aaed68694179a56ab39
|
Shell
|
ycao233/secure-data-service
|
/tools/jenkinsTools/jenkinsJobScripts/runTestJob.sh
|
UTF-8
| 990
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
############### Load Utils #########################
PRG="$0"
ROOT=`dirname "$PRG"`
source "$ROOT/utils.sh"
############### Static Data ########################
declare -A deployHash
deployHash=( [api]="$WORKSPACE/sli/api/target/api.war"
[dashboard]="$WORKSPACE/sli/dashboard/target/dashboard.war"
[simple-idp]="$WORKSPACE/sli/simple-idp/target/simple-idp.war"
[sample]="$WORKSPACE/sli/SDK/sample/target/sample.war"
[ingestion-service]="$WORKSPACE/sli/ingestion/ingestion-service/target/ingestion-service.war"
[mock-zis]="$WORKSPACE/sli/sif/mock-zis/target/mock-zis.war"
[sif-agent]="$WORKSPACE/sli/sif/sif-agent/target/sif-agent.war"
)
############### Process Inputs #####################
WHICHTEST=$1
GITCOMMIT=$2
shift
shift
APPSTODEPLOY=$@
############## Run Tests ###########################
source "$ROOT/${WHICHTEST}Tests.sh"
####################################################
| true
|
62400051cb209492c97dce1a0728d0c3f762142d
|
Shell
|
dvdvideo1234/UbuntuBatches
|
/GitConfig/setup-ssh-key.sh
|
UTF-8
| 973
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
scriptfile=""
scriptmail=""
scriptname=$(readlink -f "$0")
scriptpath=$(dirname "$scriptname")
echo This will create a public and private keys with use of github SSH!
read -p "Enter filename or leave blank: " scriptfile
if test "$scriptfile" == ""
then
scriptfile=id_rsa
fi
read -p "Enter e-mail or leave blank: " scriptmail
if test "$scriptmail" == ""
then
ssh-keygen -t rsa -b 4096 -f $scriptfile
else
ssh-keygen -t rsa -b 4096 -C "$scriptmail" -f $scriptfile
fi
eval $(ssh-agent -s)
ssh-add $scriptfile
clip < $scriptfile.pub
echo Please follow the procedure described exactly:
echo 1. The key now exists into the clipboard!
echo 2. Go to: https://github.com/settings/keys
echo 3. Create new SSH key and paste it there
echo 4. Import the key with PUTTYGEN and write your password
echo 5. Save the public and private version of the key
echo 6. Add the PUTTY key to PAGEANT to identify computer
echo You are all set for push and pull with github!
| true
|
1055a16c78768d8e5074051a218a344a66debaef
|
Shell
|
keep1earning/script
|
/install.sh
|
UTF-8
| 6,771
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
get_protocol(){
echo && echo -e "
1. brook
2. brook ws
3. brook wss
4. socks5" && echo
echo "Select a protocol[1-4]"
echo "选择一个协议[1-4]"
read -e -p "-> " protocol
case "$protocol" in
[1-4])
echo
;;
*)
clear
echo "Invalid protocol!!!" && echo
get_protocol
esac
}
get_username(){
echo "Input the username(optional)"
echo "输入一个用户名(如果不需要可以不写)"
read -e -p "-> " username
[[ "$username" ]] && echo && get_password
}
get_password(){
if [[ $skip ]];
then
password=$(LC_CTYPE=C tr -dc 'A-Za-z0-9' < /dev/urandom | head -c 12) #生成隨機12位密碼
else
echo "Input the password"
echo "输入一个密码"
read -e -p "-> " password
[[ -z "$password" ]] && echo "Invalid password!!!" && echo && get_password
echo
fi
}
get_port(){ #TODO: 檢查port是否被佔用
if [[ $skip ]];
then
port=$(LC_CTYPE=C tr -dc '2-9' < /dev/urandom | head -c 4) #生成隨機port -> 2222-9999
else
echo "Select a port[1024-65535]"
echo "选择一个端口[1024-65535]"
read -e -p "-> " port #TODO: allow port [0-1023]
case $port in
1[1-9][0-9][0-9] | 10[3-9][0-9] | 102[4-9] | [2-9][0-9][0-9][0-9] | [1-5][0-9][0-9][0-9][0-9] | 6[0-4][0-9][0-9][0-9] | 65[0-4][0-9][0-9] | 655[0-3][0-5])
echo
;;
*)
clear
echo "Invalid port!!!" && echo
get_port
;;
esac
fi
}
check_domain_ip(){
domain_ip=$(ping "${domain}" -c 1 | sed '1{s/[^(]*(//;s/).*//;q}') #TODO: Support macOS
if [[ ${domain_ip} != "${ip}" ]]; then
echo "Make sure $domain -> $ip"
echo "请确保 $domain -> $ip"
exit 2
fi
}
get_domain(){
port=443
echo "Input a domain(eg. www.google.com)"
echo "输入一个域名(例如 www.google.com)" #TODO: 沒有域名
read -e -p "-> " domain
[[ -z "$domain" ]] && echo "Invalid domain!!!" && echo && get_domain || clear
}
get_ip() {
ip=$(curl -s https://ipinfo.io/ip)
[[ -z $ip ]] && ip=$(curl -s https://api.ip.sb/ip)
[[ -z $ip ]] && ip=$(curl -s https://api.ipify.org)
[[ -z $ip ]] && ip=$(curl -s https://ip.seeip.org)
[[ -z $ip ]] && ip=$(curl -s https://ifconfig.co/ip)
[[ -z $ip ]] && ip=$(curl -s https://api.myip.com | grep -oE "([0-9]{1,3}\.){3}[0-9]{1,3}")
[[ -z $ip ]] && ip=$(curl -s icanhazip.com)
[[ -z $ip ]] && ip=$(curl -s myip.ipip.net | grep -oE "([0-9]{1,3}\.){3}[0-9]{1,3}")
[[ -z $ip ]] && echo "Sorry I can get your server's ip address" && echo "不好意思,无法取得服务器ip" && exit 1
}
fail_to_install(){
clear
echo "Fail to install $1"
echo "安装$1时出错"
exit 1
}
install_nami(){
source <(curl -L https://git.io/getnami)
[[ $(command -v nami) ]] || fail_to_install nami
}
install_joker(){
nami install github.com/txthinking/joker
[[ $(command -v joker) ]] || fail_to_install joker
}
install_brook(){
nami install github.com/txthinking/brook
[[ $(command -v brook) ]] || fail_to_install brook
}
welcome(){
clear
echo "Version: v20210427"
echo "Please wait..."
echo "请耐心等待。。。"
}
check_root(){
[[ $EUID != 0 ]] && echo "ROOT is required" && echo "请使用ROOT运行" && exit 1
}
install(){
if [[ -f ~/.nami/bin/nami ]];
then
nami upgrade github.com/txthinking/nami
else
install_nami
fi
if [[ -f ~/.nami/bin/joker ]];
then
nami upgrade github.com/txthinking/joker
else
install_joker
fi
if [[ -f ~/.nami/bin/brook ]];
then
nami upgrade github.com/txthinking/brook
else
install_brook
fi
clear
}
run_brook(){
if [[ "$protocol" ]];
then
skip=true
else
get_protocol
fi
clear
case "$protocol" in
1)
[[ "$port" ]] || get_port
[[ "$password" ]] || get_password
joker brook server -l :$port -p $password
link=$(brook link -s $ip:$port -p $password)
brook qr -s $ip:$port -p $password
server=$ip:$port
;;
2)
[[ "$port" ]] || get_port
[[ "$password" ]] || get_password
joker brook wsserver -l :$port -p $password
link=$(brook link -s ws://$ip:$port -p $password)
brook qr -s ws://$ip:$port -p $password
server=ws://$ip:$port
;;
3)
check_root
[[ "$domain" ]] || get_domain
check_domain_ip
[[ "$password" ]] || get_password
joker brook wssserver --domain $domain -p $password
link=$(brook link -s wss://$domain:443/ws -p $password)
brook qr -s wss://$domain:443/ws -p $password
server=wss://$domain:443
;;
4)
[[ "$port" ]] || get_port
[[ "$skip" ]] || [[ "$username" ]] || get_username
if [[ -z "$username" ]];
then
joker brook socks5 --socks5 $ip:$port
link=$(brook link -s socks5://$ip:$port)
brook qr -s socks5://$ip:$port
else
joker brook socks5 --socks5 $ip:$port --username $username --password $password
link=$(brook link -s socks5://$ip:$port --username $username --password $password)
brook qr -s socks5://$ip:$port --username $username --password $password
fi
server=$ip:$port
;;
esac
}
show_status(){
echo
echo "link --> $link"
echo "server --> $server"
[[ "$username" ]] && echo "username --> $username"
[[ "$password" ]] && echo "password --> $password"
echo
}
PATH=$HOME/.nami/bin:$PATH
protocol=''
port=''
password=''
username=''
skip=''
welcome
install
for ((i=1;i<=$#;i++));
do
case ${!i} in
"--install-only")
echo "Brook has been installed successfully!"
exit 0
;;
"--brook-server")
protocol=1
;;
"--brook-wsserver")
protocol=2
;;
"--brook-wssserver")
protocol=3
;;
"--socks5")
protocol=4
;;
"--username")
((i++))
username=${!i}
;;
"--password")
((i++))
password=${!i}
;;
"--port")
((i++))
port=${!i}
;;
"--domain")
((i++))
domain=${!i}
;;
*)
clear
echo "error: Found argument '${!i}' which wasn't expected." #TODO: show help
exit 1
esac
done
get_ip
run_brook
show_status
| true
|
422b276b6ae6e45ff0d084efc95bfb786b6a3e49
|
Shell
|
banadiga/Big-Data-Training
|
/redis-host/provision.sh
|
UTF-8
| 982
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
## Java 8 Installation
sudo apt-get install -y python-software-properties
sudo add-apt-repository -y ppa:webupd8team/java
sudo apt-get update -qq -y
echo debconf shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections
echo debconf shared/accepted-oracle-license-v1-1 seen true | /usr/bin/debconf-set-selections
sudo apt-get install -y -qq build-essential
sudo apt-get install -y -qq tcl8.5
sudo apt-get install -y -qq oracle-java8-installer
yes "" | sudo apt-get -f install -y
# install MC
sudo apt-get install -y -qq mc
sudo apt-get update -y -qq
sudo apt-get upgrade -y -qq
# Install redis
. /redis-host/redis-install.sh
# Configure redis
su -s /bin/bash -c "/redis-host/redis-configure.sh" rduser
cd /usr/local/redis
sudo make install
sudo /usr/local/redis/utils/install_server.sh
## Start all redis daemons
sudo service redis_6379 start
## Install and start redis web admin
. /redis-host/redis-commander.sh
echo "Done!"
| true
|
e8851e40b60eb46208d16b8ea9e10f473175f51f
|
Shell
|
toly-k/zero-aws-eks-stack
|
/templates/scripts/create-dev-env.sh
|
UTF-8
| 1,190
| 3.421875
| 3
|
[
"Apache-2.0",
"CC0-1.0"
] |
permissive
|
#!/bin/sh
PROJECT=<% .Name %>
AWS_DEFAULT_REGION=<% index .Params `region` %>
RANDOM_SEED="<% index .Params `randomSeed` %>"
ENVIRONMENT=stage # only apply to Staging environment
DEV_DB_LIST=$(aws iam get-group --group-name ${PROJECT}-developer-${ENVIRONMENT} | jq -r '"dev" + .Users[].UserName' | tr '\n' ' ')
if [[ -z "${DEV_DB_LIST}" ]]; then
echo "$0: No developers available yet, skip."
exit 0
fi
DEV_DB_SECRET_NAME=${PROJECT}-${ENVIRONMENT}-rds-${RANDOM_SEED}-devenv
aws secretsmanager describe-secret --region ${AWS_DEFAULT_REGION} --secret-id ${DEV_DB_SECRET_NAME} > /dev/null 2>&1
if [[ $? -eq 0 ]]; then
DEV_DB_SECRET=$(aws secretsmanager get-secret-value --region=${AWS_DEFAULT_REGION} --secret-id ${DEV_DB_SECRET_NAME} | jq -r ".SecretString")
REGION=${AWS_DEFAULT_REGION} \
SEED=${RANDOM_SEED} \
PROJECT_NAME=${PROJECT} \
ENVIRONMENT=${ENVIRONMENT} \
NAMESPACE=${PROJECT} \
DATABASE_TYPE=<% index .Params `database` %> \
DATABASE_NAME="${DEV_DB_LIST}" \
SECRET_NAME=devenv${PROJECT} \
USER_NAME=dev${PROJECT} \
USER_PASSWORD=${DEV_DB_SECRET} \
CREATE_SECRET=secret-application.yml.tpl \
sh ./create-db-user.sh
fi
| true
|
ed574d59f66f170a0f9eaeb0ef7a7e37b74eb344
|
Shell
|
bblu/pma
|
/codeStat/statModl.sh
|
UTF-8
| 607
| 3.125
| 3
|
[] |
no_license
|
objModl=$1
mdlConf=$objModl.cfg
if [ ! -f $mdlConf ];then
mdlConf="../bkup/$mdlConf"
if [ ! -f $mdlConf ];then
echo "Not found config file:$mdlConf"
exit 1
else
echo "Use bkup cfg:$mdlConf"
fi
fi
#vcp begin
while read VCP;do
i=0
for raw in $VCP;do
if [ $i -eq 0 ];then
TYPE=$raw
elif [ $i -eq 1 ];then
NAME=$raw
elif [ $i -gt 1 ];then
if [ ! -d "$LOCAL/$raw" ]; then
continue
fi
funAnsDir $raw
fi
((i++))
done
if [ $SFILE -eq 0 ]; then
continue
fi
funEchoVcp
funOutVcp
funInsVcp
funUpProject
#while read VCP;do
done < "$CFG"
| true
|
83d3860e0aa26689cd004d47edec9b3c3d5122f1
|
Shell
|
palmkeep/dotfiles
|
/bashrc
|
UTF-8
| 1,483
| 3.90625
| 4
|
[] |
no_license
|
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
# Prompt #
PS1='\[\033[01;32m\]\u\[\033[01;00m\]@\[\033[01;36m\]\h\[\033[00m\]:\[\033[01;34m\]\w \$\[\033[00m\] '
# Bash #
# Set bash to append to command history instead of overwriting
# Multiple shells will otherwise overwrite eachothers history
shopt -s histappend
# Automatically cd's into directory if only dir name is entered
shopt -s autocd
# SSH #
alias startx='ssh-agent startx'
# Load common aliases #
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
### Programs ###
# Valgrind #
function VGMEM {
if [ -z "$1" ] ; then
echo "valgrind --tool=memcheck --leak-check=yes"
echo "Usage: VGMEM ./<file>"
else
valgrind --tool=memcheck --leak-check=yes $1
fi
}
### Functions ###
# Displays all files including those in subdirs
function subls {
for d in * .[!.]* ..?*; do
test -d "${d}" && echo ""
test -d "${d}" && echo "${d}"
test -d "${d}" && ls -lh --color=always "${d}"
done
}
function cdl {
if [ -z "$1" ] ; then
echo "Usage: cdl <path>"
else
cd $1
ls -l
fi
}
function cdls {
if [ -z "$1" ] ; then
echo "Usage: cdls <path>"
else
clear
cd $1
ls -l
fi
}
function mcd {
mkdir -pv $1
cd $1
pwd
}
function fsize {
du -sh *
}
function pdfpcount {
find $1 -maxdepth 20 -type f -name '*.pdf' -exec pdfinfo {} \; | grep Pages | cut -b 17-
}
| true
|
f8e5044fff0aa54de9c54474e80b000b0ae6a7cc
|
Shell
|
FXTD-ODYSSEY/vscode-mayapy
|
/py/ptvsd/_vendored/pydevd/.travis_install_python_deps.sh
|
UTF-8
| 1,019
| 2.53125
| 3
|
[
"EPL-1.0",
"MIT"
] |
permissive
|
#!/bin/bash
set -ev
source activate build_env
conda install --yes numpy ipython pytest cython psutil
if [ "$PYDEVD_PYTHON_VERSION" = "2.6" ]; then
conda install --yes pyqt=4
pip install pympler==0.5
pip install pathlib2
# Django 1.7 does not support Python 2.6
else
# pytest-xdist not available for python 2.6
pip install pytest-xdist
pip install pympler
fi
if [ "$PYDEVD_PYTHON_VERSION" = "2.7" ]; then
conda install --yes pyqt=4 gevent
pip install "django>=1.7,<1.8"
pip install pathlib2
fi
if [ "$PYDEVD_PYTHON_VERSION" = "3.5" ]; then
conda install --yes pyqt=5
pip install "django>=2.1,<2.2"
fi
if [ "$PYDEVD_PYTHON_VERSION" = "3.6" ]; then
conda install --yes pyqt=5 gevent
pip install "django>=2.2,<2.3"
fi
if [ "$PYDEVD_PYTHON_VERSION" = "3.7" ]; then
conda install --yes pyqt=5 matplotlib
# Note: track the latest web framework versions.
pip install "django"
pip install "cherrypy"
fi
pip install untangle
pip install scapy==2.4.0
| true
|
00580bb4987dfd03a22dc34727a8c01965211285
|
Shell
|
amnawaseem/Final_dts_initrd
|
/script_size.sh
|
UTF-8
| 407
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
cd /home/amna/Thesis/initrd/
find . | cpio -H newc -o > ../initramfs.cpio
cd /home/amna/Thesis
cat initramfs.cpio | gzip > initramfs-debug.gz
cp initramfs-debug.gz /home/amna/Thesis/Phidias_access
cd /home/amna/Thesis/Phidias_access
size=$(du -b /home/amna/Thesis/Phidias_access/initramfs-debug.gz | awk '{print $1}')
echo $size
var1=$size
sum=$((var1 + 167772160))
echo "obase=16; $sum"| bc
| true
|
aae837975c630bb885d1c15e6c80caa40a52475d
|
Shell
|
terzo/Euetelscope_v00-09-02_fork
|
/external/eudaq/tags/v01-00-00/bin/.svn/text-base/svnswitch.svn-base
|
UTF-8
| 1,201
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
DEF_REPO1="svn+ssh://"
DEF_REPO2="login.hepforge.org/hepforge/svn/eudaq"
if [ "$2" == "" ]; then
echo "Usage: svnswitch <newrepo> <svn-command> [svn-params]..."
echo " <newrepo> can be:"
echo " - Use $DEF_REPO1$DEF_REPO2"
echo " -USER Use ${DEF_REPO1}USER@$DEF_REPO2"
echo " otherwise <newrepo> is used unmodified as the repository"
echo " e.g. svnswitch -corrin ci -m \"Commit message\""
exit 1
fi
if [ "$1" == "-" ]; then
REPO=$DEF_REPO1$DEF_REPO2
elif [ "${1}" == "-${1#-}" ]; then
REPO=$DEF_REPO1${1#-}@$DEF_REPO2
else
REPO="$1"
fi
OLDREPO=`svn info | grep '^Repository Root: ' | sed 's/^[^:]*: //'`
if [ "$OLDREPO" == "" ]; then
OLDREPO="http://svn.hepforge.org/eudaq"
echo "Unable to detect repository root (you probably have an old svn installed)"
echo "assuming it is: http://svn.hepforge.org/eudaq"
fi
shift
echo "Switching from $OLDREPO to $REPO"
if ! svn switch --relocate "$OLDREPO" "$REPO"; then
echo "Error: Unable to switch repository"
exit 1
fi
echo "Performing command: svn $@"
if svn "$@"; then
echo OK
else
echo Failed
fi
echo "Returning repository back to original setting"
svn switch --relocate "$REPO" "$OLDREPO"
| true
|
d828e751c9c066d3e7d143fd409ba448e4a56141
|
Shell
|
LeulBM/blog
|
/newPost.sh
|
UTF-8
| 1,211
| 3.59375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
read -p "Title: " title
read -p "Date [$(date +%Y-%m-%d)]: " date
read -p "Categories (space-separated): " categories
read -p "Description: " description
read -p "Post Image URL (full size): " img
read -p "Post Image URL (smaller): " img_sm
read -p "Author Name: " author_name
read -p "Author Image URL: " author_img
read -p "Author Bio: " author_bio
read -p "Author E-Mail: " author_email
read -p "Author GitHub URL: " author_github
read -p "Author LinkedIn URL: " author_linkedin
if [ -z "$date" ] # Default to current date
then
date=$(date +%Y-%m-%d)
fi
title_url=$(echo "$title" | tr " " "-" | tr '[:upper:]' '[:lower:]')
function add {
printf -- "$1\n" >> "_posts/$date-$title_url".md
}
# Append to file
add "---"
add "layout: post"
add "title: $title"
add "date: $date"
add "categories:"
for category in $categories
do
add " - $category"
done
add "description: $description"
add "image: $img"
add "image-sm: $img_sm"
add "author: $author_name"
add "author-image: $author_img"
add "author-bio: $author_bio"
add "author-email: $author_email"
add "author-social:"
add " github: $author_github"
add " linkedin: $author_linkedin"
add "---\n"
add "Post content goes here!"
| true
|
1f06df98aaf6c79db801eb8518d85c6d1f22ea75
|
Shell
|
Pepek25/gitcid
|
/.gc/.gc-git-hooks/post-receive.old.sample
|
UTF-8
| 3,014
| 4
| 4
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/usr/bin/env bash
gc_post_receive_get_ref_name() {
# --- Arguments
oldrev=$(git rev-parse $1)
newrev=$(git rev-parse $2)
refname="$3"
# --- Interpret
# 0000->1234 (create)
# 1234->2345 (update)
# 2345->0000 (delete)
if expr "$oldrev" : '0*$' >/dev/null
then
change_type="create"
else
if expr "$newrev" : '0*$' >/dev/null
then
change_type="delete"
else
change_type="update"
fi
fi
# --- Get the revision types
newrev_type=$(git cat-file -t $newrev 2> /dev/null)
oldrev_type=$(git cat-file -t "$oldrev" 2> /dev/null)
case "$change_type" in
create|update)
rev="$newrev"
rev_type="$newrev_type"
;;
delete)
rev="$oldrev"
rev_type="$oldrev_type"
;;
esac
# The revision type tells us what type the commit is, combined with
# the location of the ref we can decide between
# - working branch
# - tracking branch
# - unannoted tag
# - annotated tag
case "$refname","$rev_type" in
refs/tags/*,commit)
# un-annotated tag
refname_type="tag"
short_refname=${refname##refs/tags/}
;;
refs/tags/*,tag)
# annotated tag
refname_type="annotated tag"
short_refname=${refname##refs/tags/}
# change recipients
if [ -n "$announcerecipients" ]; then
recipients="$announcerecipients"
fi
;;
refs/heads/*,commit)
# branch
refname_type="branch"
short_refname=${refname##refs/heads/}
;;
refs/remotes/*,commit)
# tracking branch
refname_type="tracking branch"
short_refname=${refname##refs/remotes/}
echo >&2 "*** Push-update of tracking branch, $refname"
return 1
;;
*)
# Anything else (is there anything else?)
echo >&2 "*** Unknown type of update to $refname ($rev_type)"
return 1
;;
esac
GITCID_REF_NAME="${short_refname}"
printf '%b' "${GITCID_REF_NAME}"
}
gc_git_hook_post_receive() {
GITCID_VERBOSE_OUTPUT="y"
GITCID_DIR=${GITCID_DIR:-".gc/"}
source "${GITCID_DIR}deps.sh" $@
res_import_deps=$?
if [ $res_import_deps -ne 0 ]; then
gitcid_log_warn "${BASH_SOURCE[0]}" $LINENO "Failed importing GitCid dependencies. Things might not work properly, so you might want to address the issue."
fi
gitcid_log_info "${BASH_SOURCE[0]}" $LINENO "Invoked git hook: ${BASH_SOURCE[0]} $@"
if [ -n "$1" -a -n "$2" -a -n "$3" ]; then
GITCID_REF_NAME="$(gc_post_receive_get_ref_name $2 $3 $1)"
else
while read oldrev newrev refname
do
GITCID_REF_NAME="$(gc_post_receive_get_ref_name $oldrev $newrev $refname)" || continue
done
fi
gitcid_log_info "${BASH_SOURCE[0]}" $LINENO "Got ref name: ${GITCID_REF_NAME}"
source "${GITCID_DIR}run.sh" -d $@
res_import_deps=$?
if [ $res_import_deps -ne 0 ]; then
gitcid_log_err "${BASH_SOURCE[0]}" $LINENO "Failed running GitCid pipeline. Exiting with error code:\n\
${res_import_deps}"
return ${res_import_deps}
fi
gitcid_log_info "${BASH_SOURCE[0]}" $LINENO "The git hook finished successfully: ${BASH_SOURCE[0]} $@"
}
gc_git_hook_post_receive "$@"
| true
|
f66104113c59bd21258b0743cadd575339430d0d
|
Shell
|
budenny/dotfiles
|
/bootstrap.sh
|
UTF-8
| 591
| 3.453125
| 3
|
[] |
no_license
|
#! /usr/bin/env zsh
function clone_repo()
{
url=$1
dest=$2
if [ -d "${dest}" ]; then
echo "${url} already cloned"
else
git clone "${url}" "${dest}" --depth 1
fi
}
# oh-my-zsh
clone_repo https://github.com/robbyrussell/oh-my-zsh.git ~/.oh-my-zsh
# syntax highlighting
clone_repo https://github.com/zsh-users/zsh-syntax-highlighting.git ~/.zsh-syntax-highlighting
# autosuggestions
clone_repo https://github.com/zsh-users/zsh-autosuggestions ~/.oh-my-zsh/custom/plugins/zsh-autosuggestions
# setup zsh config
ln -sf "$(pwd)/zshrc" ~/.zshrc
source ~/.zshrc
echo "done"
| true
|
65c6a5692ac13f5e633a20c6e95880f1ea989050
|
Shell
|
eoli3n/dotfiles
|
/roles/wofi/templates/powermenu.sh.j2
|
UTF-8
| 605
| 3.234375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
action=$(echo -e "suspend\nlock\nlogout\nshutdown\nreboot" | wofi -d -p "power:" -L 7)
if [[ "$action" == "lock" ]]
then
swaylock-fancy --font "JetBrains-Mono-Light"
fi
if [[ "$action" == "suspend" ]]
then
{% if ansible_distribution_release == "void" %}
swaylock-fancy --font "JetBrains-Mono-Light" && sudo zzz
{% else %}
swaylock-fancy --font "JetBrains-Mono-Light" && systemctl suspend
{% endif %}
fi
if [[ "$action" == "logout" ]]
then
swaymsg exit
fi
if [[ "$action" == "shutdown" ]]
then
shutdown now
fi
if [[ "$action" == "reboot" ]]
then
reboot
fi
| true
|
70d293cc91e4b173937cd5c86e3936c22e1793a7
|
Shell
|
ryan-williams/head-tail-helpers
|
/tail-f-with-lines
|
UTF-8
| 120
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ $# -gt 1 ] && isnum "$1"; then
num=$1
shift
tail -n $num -f "$@"
else
tail -f "$@"
fi
| true
|
cc64b4a7de20c6f212866ea13637fba3aa4edb45
|
Shell
|
YCP-Swarm-Robotics-Capstone-2020-2021/SRCSim
|
/missions/launch_vehicle_test.sh
|
UTF-8
| 4,438
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash -e
#-------------------------------------------------------
# Part 1: Check for and handle command-line arguments
#-------------------------------------------------------
export ID="Dolphin0"
TIME_WARP=1
NUM_BOTS=0
BUILD_MODE=0
KAPPA=1
DT=1
JUST_MAKE="no"
LAUNCH_GUI="yes"
version=0.0.1
print_help(){
echo
echo "This is a script that will run multiple vehicle simulations. Pass the below arguments to customize how the simulation launches. The most importanct parameter is the num_bots flag."
echo " --num_bots | -n This flag sets how many robots you want the simulation to launch."
echo " --keep | -k Pass this flag to ensure that the world files are not deleted."
echo " --build | -b Pass this flag to only build"
echo " --dt | -d Pass this flag to edit dt"
echo " --kappa | -K Pass this flag to edit kappa"
echo
exit 0
}
while [[ "$1" =~ ^- && ! "$1" == "--" ]]; do case $1 in
-V | --version )
echo $version
exit
;;
-h | --help )
print_help
;;
-k | --keep )
print_help
;;
-n | --num_bots )
shift;
NUM_BOTS=$1
echo "Num Bots = $NUM_BOTS"
;;
-b | --build )
BUILD_MODE=1
echo "Build only"
;;
-d | --dt )
shift;
DT=$1
;;
-K | --kappa )
shift;
KAPPA=$1
;;
* )
print_help
exit 1
;;
esac; shift; done
if [[ "$1" == '--' ]]; then shift; fi
#get version number
version_number=`git rev-parse --short HEAD`
commit_message=`git show-branch --no-name HEAD`
cd ../missions/
#-------------------------------------------------------
# Part 2: Create the .moos and .bhv files.
#-------------------------------------------------------
#PATH='../../../missions/$title'
if [[ ! -d "./logs" ]]; then
mkdir logs
fi
if [[ ! BUILD_MODE -eq 1 ]]; then
cd ./logs
mission_dir=$(date +'%F-%H-%M-%S_Mission_Numbots-')
mission_dir=$mission_dir$NUM_BOTS
mkdir $mission_dir
cd ../
fi
VNAME=$ID # The first vehicle Community
V1PORT="8000"
GCSIP="localhost"
GCSPORT=9000
BROADCASTNUM=1
#nsplug meta_vehicle.moos targ_$VNAME2.moos -f WARP=$TIME_WARP \
# VNAME=$VNAME2 VPORT="8310" \
# GCSIP=$GCSIP GCSPORT=$GCSPORT \
# BROADCASTNUM=$BROADCASTNUM VIP=$VIP
VPORT=8000
UPDATEPOSE=""
VIP="localhost"
cat > plug_VehiclepShare.moos <<EOF
ProcessConfig = pShare
{
Input=route=localhost:\$(VPORT)
Input=route=multicast_\$(BROADCASTNUM)
Output=src_name=PROC_WATCH_SUMMARY,dest_name=PROC_WATCH_DOLPHIN,route=\$(GCSIP):\$(GCSPORT)
Output=src_name=Narwhal_Current_State,dest_name=Current_State,route=\$(GCSIP):\$(GCSPORT)
Output=src_name=Reg_In,route=\$(GCSIP):\$(GCSPORT)
Output=src_name=Speed_Curv,route=\$(GCSIP):\$(GCSPORT)
Output=src_name=WCA_MESSAGE,route=\$(GCSIP):\$(GCSPORT)
Output=src_name=VERSION_NUMBER,route=\$(GCSIP):\$(GCSPORT)
EOF
PORT=8000
VIP2="192.168.1."
VIPEND=110
for ((i=0 ; i < $NUM_BOTS ; i++)); do
VIPEND=$(($VIPEND+5))
cat >> plug_VehiclepShare.moos <<EOF
Output=src_name=Dolphin${i}_Neighbor_Zeta,dest_name=Neighbor_Zeta,route=${VIP2}${VIPEND}:$PORT
EOF
done
cat >> plug_VehiclepShare.moos <<EOF
}
EOF
PORT=8000
nsplug meta_vehicle.moos targ_Dolphin$i.moos -f WARP=$TIME_WARP \
VNAME=$VNAME VPORT=$PORT \
GCSIP=$GCSIP GCSPORT=$GCSPORT \
BROADCASTNUM=$BROADCASTNUM VIP=$VIP \
KAPPA=$KAPPA DT=$DT \
LOG_DIR=$mission_dir VERSION=$version_number \
MESSAGE=$commit_message
#-------------------------------------------------------
# Part 3: Build the modules
#-------------------------------------------------------
cd ../modules
for i in ./*; do
if [[ -d $i ]]; then
if [[ "$i" != "./Images" && "$i" != "./SwarmHandler" && "$i" != "./StageInterface" && "$I" != "./ControlGUI" && "$i" != "./UserInterface" ]]; then
cd $i
qmake
make
cd ..
fi
fi
done
cd ../missions
if [[ BUILD_MODE -eq 1 ]]; then exit 1; fi
#-------------------------------------------------------
# Part 4: Launch the processes
#-------------------------------------------------------
printf "Launching ${VNAME} MOOS Community (WARP=%s) \n" $TIME_WARP
pAntler targ_${VNAME}.moos >& /dev/null &
printf "Done \n"
| true
|
ce7eb967031c865853c54d2221be5cdd5d751406
|
Shell
|
bigml/mbase
|
/doc/script/sc
|
UTF-8
| 124
| 2.625
| 3
|
[] |
no_license
|
#!/bin/sh
# sc new.jpg -s -d 3
PWD=`pwd`
FPATH=${PWD}/../pop/fly/pic/
FNAME=$1
shift
OP=$*
scrot ${OP} ${FPATH}${FNAME}
| true
|
1ad54cd08b8d973eeb4f12574236d5155739cae2
|
Shell
|
ahmedeljami/pytools
|
/ambari_cancel_all_requests.sh
|
UTF-8
| 1,419
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2016-09-27 17:25:36 +0100 (Tue, 27 Sep 2016)
#
# https://github.com/harisekhon/pytools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
set -euo pipefail
[ -n "${DEBUG:-}" ] && set -x
AMBARI_HOST="${1:-${AMBARI_HOST:-localhost}}"
AMBARI_PORT="${2:-${AMBARI_PORT:-8080}}"
AMBARI_USER="${3:-${AMBARI_USER:-admin}}"
AMBARI_PASSWORD="${4:-${AMBARI_PASSWORD:-admin}}"
AMBARI_CLUSTER="${5:-${AMBARI_CLUSTER:-Sandbox}}"
usage(){
echo "Very simple script to cancel all Ambari op requests
usage: ${0##*/} <ambari_host> <ambari_port> <username> <password> <cluster_name>"
exit 1
}
if [ $# -gt 0 ]; then
usage
fi
echo "querying Ambari for request IDs"
curl -u "$AMBARI_USER:$AMBARI_PASSWORD" "http://$AMBARI_HOST:$AMBARI_PORT/api/v1/clusters/$AMBARI_CLUSTER/requests" |
grep id |
awk '{print $3}' |
while read id; do
echo "requesting cancellation of request $id"
curl -u "$AMBARI_USER:$AMBARI_PASSWORD" -i -H "X-Requested-By: $AMBARI_USER ($USER)" -X PUT -d '{"Requests":{"request_status":"ABORTED","abort_reason":"Aborted by user"}}' "http://$AMBARI_HOST:$AMBARI_PORT/api/v1/clusters/$AMBARI_CLUSTER/requests/$id"
done
| true
|
d09dfa9621a2258f0e80c69380ad0b5ebe70afbd
|
Shell
|
venudhar1/EBR_clientupgrade
|
/Disk_relayout.sh
|
UTF-8
| 10,947
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
# Venudhar Chinthakuntla
# Script to remove metadevice and recreate new devices as per the NBU latest version minimum space requirement.
BASEDIR=/usr/openv
TEMP_VARTMP=/var/tmp
TEMP_CRASH=/var/crash
TEMP_SWAP=/var/run
BASEDIR_LOG=${BASEDIR}/netbackup/logs
PLATFORM=`uname -s`
BASE_TAR="CORE_DIR_BKUP_$(date +%Y%m%d).tar"
_max_tar_size=`du -sk ${BASEDIR}|awk '{print $1}'`
_req_tar_space=`expr ${_max_tar_size} + 204800`
_underlying_openvmnt=`df -k ${BASEDIR}|awk '/\// {print $NF}'`
_underlying_nblogsmnt=`df -k ${BASEDIR_LOG}|awk '/\// {print $NF}'`
_usropenvnetbackuplogssp=`df -k ${BASEDIR_LOG}|awk -F/ '/\// {print $5}'|awk '{print $1}'`
_usropenvnetbackuplogsmd=`metastat -p ${_usropenvnetbackuplogssp}|awk '/p/ {print $3}'`
[ $? -ne 0 ] && exit 11
RC=0
space_identifier (){
# platform_identifier () - Identify avalible disk space.
#
# Function to ....
#
# paramaters: Pass the mount point information
#
# returns: [NONE]
#
# requires: [NONE]
#
# side effects: [NONE]
mntarg=$1
minsparg=$2
if [[ (X$mntarg == "X") || (X$minsparg == "X") ]]; then
echo "paramaters not passed to space check. exiting..."
RC=16
return 16
fi
if [[ ${PLATFORM} == SunOS ]]; then
if [ "$(df -k ${mntarg} |awk '/\// {print $4}')" -ge ${minsparg} ] ;then
echo pass
else
echo fail
fi
elif [[ ${PLATFORM} == Linux ]]; then
if [ "$(df -kP ${mntarg} |awk '/\// {print $4}')" -ge ${minsparg} ] ;then
echo pass
else
echo fail
fi
fi
}
go_no-go () {
#check 1 should return pass
#check 2 should return pass
# then good to proceed else exit
#check1
RC=0
if [[ $(space_identifier ${TEMP_VARTMP} ${_req_tar_space}) == "pass" ]] ; then
_available_mnt=${TEMP_VARTMP}
elif [[ $(space_identifier ${TEMP_SWAP} ${_req_tar_space}) == "pass" ]] ; then
_available_mnt=${TEMP_SWAP}
elif [[ $(space_identifier ${TEMP_CRASH} ${_req_tar_space}) == "pass" ]] ; then
_available_mnt=${TEMP_CRASH}
else
RC=13
fi
#1 - Space Cheker
if [[ $RC -ne 0 ]]; then
echo "FAILED: Space check failed. exiting.."
exit 13
elif [[ ${_available_mnt} != "" ]]; then
echo "PASSED: Space is avalible to backup /usr/openv"
fi
#2 - Scenario 1 checker
if [ ${_underlying_openvmnt} == \/ ] && [ X${_underlying_openvmnt} != X ] ; then
if [ ${_underlying_nblogsmnt} != \/ ] && [ X${_underlying_nblogsmnt} != X ] ; then
echo "PASSED: netbackup/logs is not under root"
else
RC=15
fi
echo "PASSED: /usr/openv is under /"
else
RC=15
fi
}
NB_shutdown () {
# NB_shutdown () - bring down the NBU processess before performing backup and upgrade
#
# Function to ....
#
# paramaters: [NONE]
#
# returns: [ 0 or 17 ]
#
# requires: [NONE]
#
# side effects: [NONE]
#Checking if NBackup processess are running
if [[ $(/usr/openv/netbackup/bin/bpps -x|awk '/vnetd/||/bpcd/||/pbx/ {print $8$9}'|wc -l) -le "4" ]] ; then
echo "INFO: Stoping Netbackup process...."
[ -f "/etc/init.d/netbackup" ] && "/etc/init.d/netbackup" stop > /dev/null 2>&1 && sleep 1
"/usr/openv/netbackup/bin/goodies/netbackup" stop > /dev/null 2>&1
sleep 1
"/opt/VRTSpbx/bin/vxpbx_exchanged" stop > /dev/null 2>&1
sleep 1
#checking if processess have stopped succesfully
if [[ $(/usr/openv/netbackup/bin/bpps -x|awk '/vnetd/||/bpcd/||/pbx/ {print $8$9}'|wc -l) -eq "0" ]]; then
echo "PASSED: Netbackup Stopped succesfully"
RC=0
else
echo "FAILED: Unable to shutdown the processess. Manually stop the processess and re-run script"
RC=17
fi
fi
}
backup_core_files () {
# backup_core_dir () - Create the full backup file (tar) of the /usr/openv into /var/crash.
#
# Function to ....
#
# paramaters: [NONE]
#
# returns: [NONE]
#
# requires: [ Backup should not return error. Incase of NB Upgrade issues, we would have to restore the tar. ]
#
# side effects: [NONE]
if [[ -f "${_available_mnt}/${BASE_TAR}" ]] ; then
echo "INFO: A Backup was taken today. Ignoring taking of backup again"
else
echo "INFO: taking backup of curret NB version....."
tar -cf "${_available_mnt}/${BASE_TAR}" "$BASEDIR" > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "ERROR: Failed to backup the [$BASEDIR]."
RC=14
else
echo "PASSED: openv backup was taken successfully"
fi
fi
}
basix_math () {
# backup_core_dir () - Create the full backup file (tar) of the /usr/openv into /var/crash.
#
# Function to ....
#
# paramaters: [NONE]
#
# returns: [NONE]
#
# requires: [NONE]
#
# side effects: [NONE]
#total netbackup logs is to be divided in 90:10 ratio
multiplier=1048576
_A=`df -k ${BASEDIR_LOG}|awk '/\/md/ {print $2}'`
_B=`expr ${_A} / 100`
_C=`expr ${_B} \* 70` #Amount of space to be assigned to /usr/openv/ 90 % of total
_D=`expr ${_B} \* 30` #Amount of space to be assigned to /usr/openv/netbackup/logs 10% of total
size_for_openv=`expr ${_C} / ${multiplier}`
echo "calculated size for openv is ${size_for_openv}g"
size_for_nblogs=`expr ${_D} / ${multiplier}`
echo "calculated size for nb logs is ${size_for_nblogs}g"
}
free_sp_identifier () {
# backup_core_dir () - Create the full backup file (tar) of the /usr/openv into /var/crash.
#
# Function to ....
#
# paramaters: [NONE]
#
# returns: [NONE]
#
# requires: [ Backup should not return error. Incase of NB Upgrade issues, we would have to restore the tar. ]
#
# side effects: [NONE]
for i in `metastat -p |awk '{print $1}'|tr -d :[[:alpha:]]:`;
do [[ $i -eq $1 ]] && echo "failed";
done
}
nb_softslice_relayout () {
# backup_core_dir () - Create the full backup file (tar) of the /usr/openv into /var/crash.
#
# Function to ....
#
# paramaters: [NONE]
#
# returns: [NONE]
#
# requires: [ Backup should not return error. Incase of NB Upgrade issues, we would have to restore the tar. ]
#
# side effects: [NONE]
umount "/usr/openv/netbackup/logs"
[ $? -eq 0 ] && echo "PASSED: unmounted netbackuplogs filesystem" || (echo "FAILED: unmount netbackuplogs filesystem"; exit 4)
echo "=================="
echo "Executing command \"metaclear ${_usropenvnetbackuplogssp}\" "
read -p "Press enter to continue"
metaclear ${_usropenvnetbackuplogssp}
[ $? -eq 0 ] && echo "PASSED: Cleared netbackup/logs softpartition ${_usropenvnetbackuplogssp}" || (echo "FAILED: Cleared netbackup/logs softpartition ${_usropenvnetbackuplogssp}" ; exit 4)
echo "=================="
echo "Creating Softpartition for nblogs. Executing command \"metainit $locked_nblog -p ${_usropenvnetbackuplogsmd} ${size_for_nblogs}g \" "
read -p "Press enter to continue"
metainit $locked_nblog -p ${_usropenvnetbackuplogsmd} ${size_for_nblogs}g #for /usr/openv/netbackup/logs
echo "=================="
echo "Creating Softpartition for openv.Executing command \" metainit $locked_openv -p ${_usropenvnetbackuplogsmd} ${size_for_openv}g \" "
read -p "Press enter to continue"
metainit $locked_openv -p ${_usropenvnetbackuplogsmd} ${size_for_openv}g #for /usr/openv
echo "INFO: softpartitions created "
metastat -p $locked_nblog $locked_openv # -- to verify if we got required ones
echo "=================="
echo "FS creation for nblogs. Executing command \" newfs /dev/md/rdsk/${locked_nblog}\" "
read -p "Press enter to continue"
newfs /dev/md/rdsk/${locked_nblog}
echo "FS creation for openv. Executing command \"newfs /dev/md/rdsk/${locked_openv}\" "
read -p "Press enter to continue"
newfs /dev/md/rdsk/${locked_openv}
echo "=================="
cd "/usr"
echo "INFO: Cleaning up the openv directory"
cd "/usr/openv";
rm -rf *
cd "/"
[ $? -ne 0 ] && (echo "Unable to cleanup openv. mounting of openv might fail")
umask 022;
[ ! -d /usr/openv ] && mkdir /usr/openv;
cd "/"
echo "Mounting filesystems.."
mount -f ufs /dev/md/dsk/${locked_openv} ${BASEDIR}
[ $? -ne 0 ] && (echo "Unable to mount FS, exiting.." ) && exit 4
echo "INFO: recreating /usr/openv/netbackup/logs directory"
mkdir -p /usr/openv/netbackup/logs
echo "INFO: mounting /usr/openv/netbackup/logs.."
mount ${BASEDIR_LOG}
[ $? -ne 0 ] && echo "Unable to mount FS, exiting.." && exit 4 || RC=0
}
untar_and_start_NB () {
#Untaring
if [[ -f "${_available_mnt}/${BASE_TAR}" ]] ; then
cd ${BASEDIR}
echo "INFO: Restoring the openv data from backup.."
tar -xf ${_available_mnt}/${BASE_TAR}
echo "SUCCESS: openv data restored"
fi
echo "INFO: Atempting to bring up NB process....."
"/opt/VRTSpbx/bin/vxpbx_exchanged" start > /dev/null 2>&1
sleep 1
"/usr/openv/netbackup/bin/goodies/netbackup" start > /dev/null 2>&1
sleep 1
if [[ $(/usr/openv/netbackup/bin/bpps -x|awk '/vnetd/||/bpcd/||/pbx/ {print $8$9}'|wc -l) -le 4 ]] ; then
echo "SUCCESS: NB Process started up successfully :)"
RC=0
else
echo "INFO: Unable to start NB process.Please start manually"
RC=19
fi
}
#MAIN
go_no-go
if [ $RC -ne 0 ]; then
echo "didnt match the scenarion 1. Exiting.."
exit 15
fi
echo "=================="
NB_shutdown
NB_shutdown
if [ $RC -ne 0 ]; then
echo "unable to shutdown NB processess. Exiting.."
exit 17
fi
echo "=================="
backup_core_files
if [ $RC -ne 0 ] ; then
echo "unable to take backup. Exiting.."
exit 14
fi
echo "=================="
basix_math
echo "=================="
_output=failed
sp_for_logs=`echo ${_usropenvnetbackuplogssp}|tr -d '[[:alpha:]]'`
sp_for_openv=$sp_for_logs
while [[ $_output == failed ]]; do
let sp_for_openv=sp_for_openv+1
_output=`free_sp_identifier ${sp_for_openv}`
#let sp_for_openv=sp_for_openv+1
done
echo "=================="
locked_nblog=${_usropenvnetbackuplogssp}
locked_openv=d$sp_for_openv
echo "md name which is going to use for NBLOGS is - $locked_nblog"
echo "md name which is going to use for OPENV is - $locked_openv"
metastat -p|awk '{print $1}'
echo " Are you sure that there are no duplicate meta device exists with the identified new names for NBLOGS and OPENV?"
read -p "Press enter to continue"
echo "checking open files and killing open NB process"
read -p "Press enter to continue"
echo "=================="
nb_softslice_relayout
echo "=================="
if [ $RC -ne 0 ] ; then
echo "Failed at soft slice creation.."
exit 21
fi
echo "=================="
untar_and_start_NB
#_END_
| true
|
f5955c5b3edb9480a14da70fbee934ad64c648ab
|
Shell
|
yaofuzhou/JHUGen
|
/JHUGenMELA/MELA/setup.sh
|
UTF-8
| 1,326
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
(
set -euo pipefail
cd $(dirname $0)
MELADIR="."
DATA_LIB_DIR="slc6_amd64_gcc530"
export SCRAM_ARCH=$DATA_LIB_DIR
printenv () {
if [ -z "${LD_LIBRARY_PATH+x}" ]; then
end=''
else
end=':$LD_LIBRARY_PATH'
fi
echo "export LD_LIBRARY_PATH=$(readlink -f $MELADIR)/data/$DATA_LIB_DIR$end"
if [ -z "${PYTHONPATH+x}" ]; then
end=''
else
end=':$PYTHONPATH'
fi
echo "export PYTHONPATH=$(readlink -f $MELADIR)/python$end"
}
if [[ "$#" -ge 1 ]] && [[ "$1" == "env" ]]; then
printenv
elif [[ "$#" -ge 1 ]] && [[ "$1" == *"clean"* ]]; then
COLLIER/setup.sh "$@"
make clean
pushd $MELADIR"/fortran/"
make clean
rm -f "../data/"$DATA_LIB_DIR"/libjhugenmela.so"
popd
make clean
else
COLLIER/setup.sh "$@"
tcsh data/retrieve.csh $DATA_LIB_DIR mcfm_705
./downloadNNPDF.sh
pushd $MELADIR"/fortran/"
make all
if mv libjhugenmela.so "../data/"$DATA_LIB_DIR"/"; then
echo
echo "...and you are running setup.sh, so this was just done."
echo
popd
make
echo
echo "remember to:"
echo
printenv
echo
else
echo
echo "ERROR: something went wrong in mv, see ^ error message"
echo
popd
exit 1
fi
fi
)
| true
|
3e644a0821afe951c6f19d88aa3d333be812fa25
|
Shell
|
h1f1x/cookiecutter-iac-terraform
|
/tests/integration-tests.sh
|
UTF-8
| 1,475
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
set -u
DST_BASE_DIR=../build/tests/integration
PROJECT_SLUG='cookiecutter_iac_terraform_inttest'
terraform_dir_name='terraform'
source common.source
function create_project_from_cookiecutter() {
cookiecutter --output-dir=${DST_BASE_DIR}/ --no-input $(pwd)/../ \
project_name='Cookiecutter IAC Terraform Integration Tests' \
project_slug=${PROJECT_SLUG} \
terraform_dir_name=${terraform_dir_name} \
use_docker="n"
}
rm -rf ${DST_BASE_DIR}
create_project_from_cookiecutter
pushd . > /dev/null
env_backup_and_set
echo $STAGE
export TF_IN_AUTOMATION=1
export TF_LOG=TRACE
export TF_LOG_PATH=./terraform.log
export TF_VAR_project_name="${PROJECT_SLUG}"
export TF_VAR_owner="${USER}"
export TF_VAR_environment="${STAGE}"
cd ${DST_BASE_DIR}/${terraform_dir_name}
just info
hr; echo "[${PROJECT_SLUG}] Testing just target: init ..."; hr
just init
hr; echo "[${PROJECT_SLUG}] Testing just target: apply ..."; hr
just apply
hr; echo "[${PROJECT_SLUG}] Testing just target: show ..."; hr
just show
hr; echo "[${PROJECT_SLUG}] Testing just target: state list ..."; hr
just state list
hr; echo "[${PROJECT_SLUG}] Testing just target: output ..."; hr
just output
hr; echo "[${PROJECT_SLUG}] Testing just target: destroy ..."; hr
just destroy
popd > /dev/null
env_restore
hr;
echo -e "\e[32m[*] All tests passed for '${PROJECT_SLUG}' !"; hr
echo "[!] You will find the build in: ${DST_BASE_DIR}/${terraform_dir_name}"
| true
|
dfa3683ee0659a5536bcdc725f6310cb98076206
|
Shell
|
atrtnkw/sph
|
/analysis.heos/movie/analysis.sh
|
UTF-8
| 338
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
if test $# -ne 5
then
echo "sh $0 <nthread> <m> <n/1k> <ifile> <ofile>"
exit
fi
nthrd=$1
tmass=$2
nreso=$3
ifile=$4
ofile=$5
nproc=1
nptcl=`echo "$tmass * 10 * $nreso * 1024" | bc`
export OMP_NUM_THREADS=$1
echo "Nproc: $nproc"
echo "Nthrd: $OMP_NUM_THREADS"
echo "Nptcl: $nptcl"
mpirun -np $nproc ./run $nptcl $ifile $ofile
| true
|
fbdb67ca2715f42852e286a6ad04116eef7cdaa9
|
Shell
|
minkcv/pinballnx
|
/collisionshapes/imgtopoints.sh
|
UTF-8
| 154
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
FILE=$1
if [ "$FILE" = "" ]; then
echo "usage: ./imgtopoints.sh myimage.bmp"
exit
fi
potrace -a 0 $FILE -b svg -o tmp.svg
./totris.py 1
| true
|
659e4adafe864d270a0f02a797496b87da555b38
|
Shell
|
Ptijohn/nightclazzZenika
|
/2-start-mongo/docker-run.sh
|
UTF-8
| 1,485
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
mkdir -p data/{db1,db2,db3}
# Here we start our main MongoDB instance, in >3.6
docker run -d -p 27017:27017 -v $(pwd)/data/db1:/data/db1 \
-u 1000:1000 -h mongodb-nightclazz1 --network netnightclazz \
--network-alias nightclazz-mongo1 --name mongodb-nightclazz1 \
mongo:3.6.1 --port=27017 --dbpath /data/db1 --replSet myReplica --bind_ip_all --logpath /data/db1/mongod.log
docker run -d -p 27018:27018 -v $(pwd)/data/db2:/data/db2 \
-u 1000:1000 -h mongodb-nightclazz2 --network netnightclazz \
--network-alias nightclazz-mongo2 --name mongodb-nightclazz2 \
mongo:3.6.1 --port=27018 --dbpath /data/db2 --replSet myReplica --bind_ip_all --logpath /data/db2/mongod.log
docker run -d -p 27019:27019 -v $(pwd)/data/db3:/data/db3 \
-u 1000:1000 -h mongodb-nightclazz3 --network netnightclazz \
--network-alias nightclazz-mongo3 --name mongodb-nightclazz3 \
mongo:3.6.1 --port=27019 --dbpath /data/db3 --replSet myReplica --bind_ip_all --logpath /data/db3/mongod.log
#sleep 3
# Here we initialize the replica
docker exec -it mongodb-nightclazz1 mongo --eval 'rs.initiate()'
docker exec -it mongodb-nightclazz1 mongo --eval 'rs.add("nightclazz-mongo1:27017")'
docker exec -it mongodb-nightclazz1 mongo --eval 'rs.add("nightclazz-mongo2:27018")'
docker exec -it mongodb-nightclazz1 mongo --eval 'rs.addArb("nightclazz-mongo3:27019")'
sleep 3
docker exec -it mongodb-nightclazz1 mongo --eval 'db.adminCommand( { setFeatureCompatibilityVersion: "3.6" } )'
| true
|
fa6569696496174ade75dcf7f22e37f763285bc7
|
Shell
|
cppforlife/bosh-ipv6-acceptance-tests
|
/ci/test-hybrid-director.sh
|
UTF-8
| 4,013
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e -x
echo "This test ensures that Director can be deployed in hybrid IPv4+IPv6 configuration"
apt-get -y update
apt-get -y install wget
echo "-----> `date`: Starting docker"
source bosh-ipv6-acceptance-tests/ci/docker-lib.sh
export OUTER_CONTAINER_IP=$(ruby -rsocket -e 'puts Socket.ip_address_list
.reject { |addr| !addr.ip? || addr.ipv4_loopback? || addr.ipv6? }
.map { |addr| addr.ip_address }')
export DOCKER_HOST="tcp://${OUTER_CONTAINER_IP}:4243"
docker_certs_dir=$(mktemp -d)
start_docker ${docker_certs_dir}
echo "-----> `date`: Deploying IPv4 Director"
bosh create-env bosh-deployment/bosh.yml \
-o bosh-deployment/docker/cpi.yml \
-o bosh-deployment/jumpbox-user.yml \
-o bosh-deployment/uaa.yml \
-o bosh-deployment/credhub.yml \
-v director_name=ipv4-plus-ipv6 \
-v internal_cidr=10.245.0.0/16 \
-v internal_gw=10.245.0.1 \
-v internal_ip=10.245.0.3 \
--state state.json \
--vars-store creds.yml \
-v director_name=docker \
-v docker_host=$DOCKER_HOST \
--var-file docker_tls.ca=${docker_certs_dir}/ca.pem \
--var-file docker_tls.certificate=${docker_certs_dir}/cert.pem \
--var-file docker_tls.private_key=${docker_certs_dir}/key.pem \
-o bosh-ipv6-acceptance-tests/ci/local-bosh-release.yml \
-v network=ipv4-only
export BOSH_ENVIRONMENT="https://10.245.0.3:25555"
export BOSH_CA_CERT="$(bosh int creds.yml --path /director_ssl/ca)"
export BOSH_CLIENT=admin
export BOSH_CLIENT_SECRET="$(bosh int creds.yml --path /admin_password)"
echo "-----> `date`: Update cloud config"
bosh -n update-cloud-config bosh-ipv6-acceptance-tests/ci/cloud-config.yml \
-v internal_cidr=10.245.0.0/16 \
-v internal_gw=10.245.0.1 \
-v internal_dns="['8.8.8.8']" \
-v docker_network_name=ipv4-only
bosh -n update-config --type cloud --name ipv6 bosh-ipv6-acceptance-tests/ci/cloud-config-network.yml \
-v internal_cidr=fd8d:a46c:6ec2:6709:0000:0000:0000:0000/64 \
-v internal_gw=fd8d:a46c:6ec2:6709:0000:0000:0000:0001 \
-v internal_ip=fd8d:a46c:6ec2:6709:0000:0000:0000:0006 \
-v internal_dns="['2001:4860:4860::8888']" \
-v network_name=ipv6-only \
-v docker_network_name=ipv6-only
echo "-----> `date`: Update runtime config"
bosh -n update-runtime-config bosh-deployment/runtime-configs/dns.yml \
-o bosh-ipv6-acceptance-tests/ci/local-bosh-dns.yml
echo "-----> `date`: Upload stemcell"
bosh -n upload-stemcell https://bosh.io/d/stemcells/bosh-warden-boshlite-ubuntu-trusty-go_agent?v=3541.9 \
--sha1 44138ff5e30cc1d7724d88eaa70fab955b8011bd
echo "-----> `date`: Deploy"
bosh -n -d zookeeper deploy <(wget -O- https://raw.githubusercontent.com/cppforlife/zookeeper-release/master/manifests/zookeeper.yml) \
-o bosh-ipv6-acceptance-tests/ci/zookeeper-enable-dns.yml \
-o bosh-ipv6-acceptance-tests/ci/zookeeper-two-networks.yml \
-o bosh-ipv6-acceptance-tests/ci/zookeeper-docker-cpi-disks.yml \
-o bosh-ipv6-acceptance-tests/ci/zookeeper-variable-test.yml
echo "-----> `date`: Exercise deployment"
bosh -n -d zookeeper run-errand status
bosh -n -d zookeeper run-errand smoke-tests
echo "-----> `date`: Delete deployment"
bosh -n -d zookeeper delete-deployment
echo "-----> `date`: Clean up disks, etc."
bosh -n -d zookeeper clean-up --all
echo "-----> `date`: Deleting env"
bosh delete-env bosh-deployment/bosh.yml \
-o bosh-deployment/docker/cpi.yml \
-o bosh-deployment/jumpbox-user.yml \
-o bosh-deployment/uaa.yml \
-o bosh-deployment/credhub.yml \
-v director_name=ipv4-plus-ipv6 \
-v internal_cidr=10.245.0.0/16 \
-v internal_gw=10.245.0.1 \
-v internal_ip=10.245.0.3 \
--state state.json \
--vars-store creds.yml \
-v director_name=docker \
-v docker_host=$DOCKER_HOST \
--var-file docker_tls.ca=${docker_certs_dir}/ca.pem \
--var-file docker_tls.certificate=${docker_certs_dir}/cert.pem \
--var-file docker_tls.private_key=${docker_certs_dir}/key.pem \
-o bosh-ipv6-acceptance-tests/ci/local-bosh-release.yml \
-v network=ipv4-only
| true
|
2abad1a70956af0115a551f761f243c288b7c25e
|
Shell
|
thirdgen88/ignition-docker
|
/8.1/perform-commissioning.sh
|
UTF-8
| 6,893
| 3.953125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
port="${GATEWAY_HTTP_PORT:-8088}"
# usage: health_check DELAY_SECS TARGET|DETAILS
# ie: health_check 60
# ie: health_check 60 RUNNING|COMMISSIONING
health_check() {
local delay=$1
local target=$2
local details="null"
if [[ "${target}" == *"|"* ]]; then
details=$(printf '%s' "${target}" | cut -d \| -f 2)
target=$(printf '%s' "${target}" | cut -d \| -f 1)
fi
# Wait for a short period for the commissioning servlet to come alive
for ((i=delay;i>0;i--)); do
raw_json=$(curl -s --max-time 3 -f "http://localhost:${port}/StatusPing" || true)
state_value=$(echo "${raw_json}" | jq -r '.["state"]')
details_value=$(echo "${raw_json}" | jq -r '.["details"]')
if [ "${state_value}" == "${target}" ] && [ "${details_value}" == "${details}" ]; then
break
fi
sleep 1
done
if [ "$i" -le 0 ]; then
echo "init | Commissioning helper function run delay (${delay}) exceeded, exiting."
exit 0
fi
}
# usage: evaluate_post_request URL PAYLOAD EXPECTED_CODE PHASE DESC
# ie: evaluate_post_request http://localhost:8088/post-step '{"id":"edition","step":"edition","data":{"edition":"'maker'"}}' 201 "Commissioning" "Edition Selection"
evaluate_post_request() {
local url="$1"
local payload="$2"
local expected_code="$3"
local phase="$4"
local desc="$5"
local response_output_file
response_output_file=$(mktemp)
local response_output
local response_code_final
# shellcheck disable=SC2034
response_output=$(curl -o "${response_output_file}" -i -H "content-type: application/json" -d "${payload}" "${url}" 2>&1)
response_code_final=$(grep -Po '(?<=^HTTP/1\.1 )([0-9]+)' < "${response_output_file}" | tail -n 1)
if [ -z "${response_code_final}" ]; then
response_code_final="NO HTTP RESPONSE DETECTED"
fi
if [ "${response_code_final}" != "${expected_code}" ]; then
echo >&2 "ERROR: Unexpected Response (${response_code_final}) during ${phase} phase: ${desc}"
cat >&2 "${response_output_file}"
exit 1
else
# Cleanup temp file
if [ -e "${response_output_file}" ]; then rm "${response_output_file}"; fi
fi
}
# usage: perform_commissioning
perform_commissioning() {
local phase="Commissioning"
local base_url="http://localhost:${port}"
local bootstrap_url="${base_url}/bootstrap"
local get_url="${base_url}/get-step"
local url="${base_url}/post-step"
local ignition_edition_current
commissioning_steps_raw=$(curl -s -f "${bootstrap_url}")
ignition_edition_current=$(echo "$commissioning_steps_raw" | jq -r '.edition')
if [ "${ignition_edition_current}" == "NOT_SET" ]; then
local edition_selection="${IGNITION_EDITION}"
if [ "${IGNITION_EDITION}" == "full" ]; then edition_selection=""; fi
local edition_selection_payload='{"id":"edition","step":"edition","data":{"edition":"'${edition_selection}'"}}'
evaluate_post_request "${url}" "${edition_selection_payload}" 201 "${phase}" "Edition Selection"
echo "init | IGNITION_EDITION: ${IGNITION_EDITION}"
# Reload commissioning steps
commissioning_steps_raw=$(curl -s -f "${bootstrap_url}")
fi
echo -n "init | Gathering required commissioning steps: "
mapfile -t commissioning_steps < <( (echo "$commissioning_steps_raw" | jq -r '.steps | keys | @sh') | tr -d \' )
echo "${commissioning_steps[*]}"
# activation
if [[ ${commissioning_steps[*]} =~ "activated" ]]; then
local activation_payload='{"id":"activation","data":{"licenseKey":"'${IGNITION_LICENSE_KEY}'","activationToken":"'${IGNITION_ACTIVATION_TOKEN}'"}}'
evaluate_post_request "${url}" "${activation_payload}" 201 "${phase}" "Online Activation"
echo "init | IGNITION_LICENSE_KEY: ${IGNITION_LICENSE_KEY}"
fi
# authSetup
if [[ ${commissioning_steps[*]} =~ "authSetup" && "${GATEWAY_PROMPT_PASSWORD}" != "1" ]]; then
local auth_user="${GATEWAY_ADMIN_USERNAME:=admin}"
local auth_salt
auth_salt=$(date +%s | sha256sum | head -c 8)
local auth_pwhash
auth_pwhash=$(printf %s "${GATEWAY_ADMIN_PASSWORD}${auth_salt}" | sha256sum - | cut -c -64)
local auth_password="[${auth_salt}]${auth_pwhash}"
local auth_payload
auth_payload=$(jq -ncM --arg user "$auth_user" --arg pass "$auth_password" '{ id: "authentication", step:"authSetup", data: { username: $user, password: $pass }}')
evaluate_post_request "${url}" "${auth_payload}" 201 "${phase}" "Configuring Authentication"
echo "init | GATEWAY_ADMIN_USERNAME: ${GATEWAY_ADMIN_USERNAME}"
if [[ -n "$GATEWAY_RANDOM_ADMIN_PASSWORD" ]]; then echo " GATEWAY_RANDOM_ADMIN_PASSWORD: ${GATEWAY_ADMIN_PASSWORD}"; fi
fi
# connections
if [[ ${commissioning_steps[*]} =~ "connections" ]]; then
# Retrieve default port configuration from get-step payload
connection_info_raw=$(curl -s -f "${get_url}?step=connections")
# Register Port Configuration
local http_port
http_port="$(echo "${connection_info_raw}" | jq -r '.data[] | select(.name=="httpPort").port')"
local https_port
https_port="$(echo "${connection_info_raw}" | jq -r '.data[] | select(.name=="httpsPort").port')"
local gan_port
gan_port="$(echo "${connection_info_raw}" | jq -r '.data[] | select(.name=="ganPort").port')"
local use_ssl="${GATEWAY_USESSL:=false}"
local port_payload='{"id":"connections","step":"connections","data":{"http":'${http_port:=8088}',"https":'${https_port:=8043}',"gan":'${gan_port:=8060}',"useSSL":'${use_ssl}'}}'
evaluate_post_request "${url}" "${port_payload}" 201 "${phase}" "Configuring Connections"
echo "init | GATEWAY_HTTP_PORT: ${http_port}"
echo "init | GATEWAY_HTTPS_PORT: ${https_port}"
echo "init | GATEWAY_NETWORK_PORT: ${gan_port}"
echo "init | GATEWAY_USESSL: ${GATEWAY_USESSL}"
fi
# eula
if [[ ${commissioning_steps[*]} =~ "eula" ]]; then
local license_accept_payload='{"id":"license","step":"eula","data":{"accept":true}}'
evaluate_post_request "${url}" "${license_accept_payload}" 201 "${phase}" "License Acceptance"
echo "init | EULA_STATUS: accepted"
fi
# finalize
if [ "${GATEWAY_PROMPT_PASSWORD}" != "1" ]; then
local finalize_payload='{"id":"finished","data":{"startGateway":true}}'
evaluate_post_request "${url}" "${finalize_payload}" 200 "${phase}" "Finalizing Gateway"
echo "init | COMMISSIONING: finalized"
fi
}
echo "init | Initiating commissioning helper functions..."
health_check "${IGNITION_COMMISSIONING_DELAY:=30}" "RUNNING|COMMISSIONING"
perform_commissioning
| true
|
ee6ba5be06b793a3c83732b3687d45ea42e2e1fb
|
Shell
|
amitkumarj441/Airbus_Ship_Detection
|
/gcp_setup.sh
|
UTF-8
| 1,708
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
set -v
# NVIDIA repo
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/cuda-repo-ubuntu1604_9.1.85-1_amd64.deb
sudo dpkg -i ./cuda-repo-ubuntu1604_9.1.85-1_amd64.deb
rm ./cuda-repo-ubuntu1604_9.1.85-1_amd64.deb
# Install CUDA and system dependencies for Python
sudo apt-get update && sudo apt-get install -y --allow-unauthenticated cuda-9.1 imagemagick unzip make build-essential \
libssl-dev zlib1g-dev libbz2-dev libreadline-dev \
libsqlite3-dev wget curl llvm libncurses5-dev libncursesw5-dev \
xz-utils tk-dev libturbojpeg && sudo apt-get clean
# Instal CuDNN
curl -O http://developer.download.nvidia.com/compute/redist/cudnn/v7.0.5/cudnn-9.0-linux-x64-v7.tgz
tar -xvf ./cudnn-9.0-linux-x64-v7.tgz -C ./
sudo cp -P ./cuda/lib64/* /usr/local/cuda/lib64
sudo cp ./cuda/include/* /usr/local/cuda/include
rm -rf ./cuda
rm cudnn-9.0-linux-x64-v7.tgz
# Install pyenv
curl -L https://raw.githubusercontent.com/pyenv/pyenv-installer/master/bin/pyenv-installer | bash
# env setup for pyenv and CUDA
export PYENV_ROOT="${HOME}/.pyenv"
echo "export PATH=\"${PYENV_ROOT}/bin:\$PATH\"" >> ~/.profile
echo "eval \"\$(pyenv init -)\"" >> ~/.profile
echo "eval \"\$(pyenv virtualenv-init -)\"" >> ~/.profile
echo 'export CUDA_HOME=/usr/local/cuda' >> ~/.profile
echo 'export PATH=$PATH:$CUDA_HOME/bin' >> ~/.profile
echo 'export LD_LIBRARY_PATH=$CUDA_HOME/lib64' >> ~/.profile
source ~/.profile
# Install Python and project dependencies
pyenv install 3.6.3
pyenv virtualenv 3.6.3 kaggle-airbus-3.6.3
pip3 install http://download.pytorch.org/whl/cu90/torch-0.4.1-cp36-cp36m-linux_x86_64.whl
pip3 install -r requirements.txt
| true
|
3156ff42346a7db109d4579a615c730d85283797
|
Shell
|
spicy202110/spicy_201909
|
/scripts.Hugo/content/blog/2021/10/14/20200224_iezch2d27vg/20200224_iEZcH2D27Vg.info.json.sh2
|
UTF-8
| 4,365
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
rm -f \
20200224_iEZcH2D27Vg.info.json.vo.* \
20200224_iEZcH2D27Vg.info.json.ao.* \
20200224_iEZcH2D27Vg.info.json.bo.* \
20200224_iEZcH2D27Vg.info.json.wav
rm -fr 20200224_iEZcH2D27Vg.info_dir
mkdir -p 20200224_iEZcH2D27Vg.info_dir
mv 20200224_iEZcH2D27Vg.info.json 20200224_iEZcH2D27Vg.info.json.* 20200224_iEZcH2D27Vg.info_dir/
mv 20200224_iEZcH2D27Vg.info_dir/20200224_iEZcH2D27Vg.info.json.jpg 20200224_iEZcH2D27Vg.info_dir/20200224_iEZcH2D27Vg.info.json.webm.mp4.jpg
cat > 20200224_iEZcH2D27Vg.info_dir/_index.md << EOF3
+++
title = " 20200224_iEZcH2D27Vg 16:这个世界,不能没有反对的声音! "
description = " #一切从零开始#美国生活#新移民_特别说明,我真心不想占用大家太多的时间,大伙儿放着可以不看的,有空时才看一看吧,不能误你们的时间,抱歉!_应几位订阅朋友的友情要求,希望我把关闭的几个节目给放出来,以确保整个节目体系的完整性。那么我这几天就加班加点把关闭节目的内容重新剪短一些,把不太重要的东西给删了,然后重发出来。因此可能会出现一天之内出现多个视频,请大家谅解我的行为,我只是为了早点回到正经的新节目中去。_————_为5000年后的来者,我独立开通这个与各大主播力量同行的频道,人,不是低级动物,总需要给历史以真实,给后人以良知。《麻辣空间》将长期更新下去,我在中国的时候一直在做这个事,到了美国也就不是另起炉灶,而是接着中国那边关停的账号,继续换个国度再来。我的邮箱是wto5185188@几迈L,欢迎订阅我频道的朋友把不想公开在评论区的话发到我邮箱里,我会定期做答复的。另外,我的腿腿儿账号是:wto518,脸书ID叫:Sctv麻辣空间。这两个地方平时用于看信息,我自己说的少。不过可以相互关注,方便联系。 "
weight = 20
+++
{{< mymp4 mp4="20200224_iEZcH2D27Vg.info.json.webm.mp4"
text="len $(cat 20200224_iEZcH2D27Vg.info_dir/20200224_iEZcH2D27Vg.info.json.webm.mp4|wc -c)"
>}}
{{< mymp4x mp4x="20200224_iEZcH2D27Vg.info.json.25k.mp4"
text="len $(cat 20200224_iEZcH2D27Vg.info_dir/20200224_iEZcH2D27Vg.info.json.25k.mp4|wc -c)"
>}}
{{< mymp4x mp4x="20200224_iEZcH2D27Vg.info.json.48k.mp4"
text="len $(cat 20200224_iEZcH2D27Vg.info_dir/20200224_iEZcH2D27Vg.info.json.48k.mp4|wc -c)"
>}}
{{< mydiv text="#一切从零开始#美国生活#新移民_特别说明,我真心不想占用大家太多的时间,大伙儿放着可以不看的,有空时才看一看吧,不能误你们的时间,抱歉!_应几位订阅朋友的友情要求,希望我把关闭的几个节目给放出来,以确保整个节目体系的完整性。那么我这几天就加班加点把关闭节目的内容重新剪短一些,把不太重要的东西给删了,然后重发出来。因此可能会出现一天之内出现多个视频,请大家谅解我的行为,我只是为了早点回到正经的新节目中去。_————_为5000年后的来者,我独立开通这个与各大主播力量同行的频道,人,不是低级动物,总需要给历史以真实,给后人以良知。《麻辣空间》将长期更新下去,我在中国的时候一直在做这个事,到了美国也就不是另起炉灶,而是接着中国那边关停的账号,继续换个国度再来。我的邮箱是wto5185188@几迈L,欢迎订阅我频道的朋友把不想公开在评论区的话发到我邮箱里,我会定期做答复的。另外,我的腿腿儿账号是:wto518,脸书ID叫:Sctv麻辣空间。这两个地方平时用于看信息,我自己说的少。不过可以相互关注,方便联系。" >}}
<br>
{{< mydiv link="https://www.youtube.com/watch?v=iEZcH2D27Vg" >}}
<br>
请大家传播时,不需要传播文件本身,<br>
原因是:一旦传播过大东西(例如,图片,文件),<br>
就会触发检查机制。<br>
我不知道检查机制的触发条件。<br>
但是我知道,不会说你传一个没有敏感词的网络地址都检查,<br>
否则,检查员得累死。<br><br>
直接转发网址就可以了:<br>
原因是,这是程序员网站,<br>
共匪不敢封锁,墙内可以直接下载。
EOF3
| true
|
bff80de37e6a945417426700b279786ba80f0349
|
Shell
|
ralevn/shell_scripts
|
/specvars.sh
|
UTF-8
| 248
| 3.328125
| 3
|
[] |
no_license
|
#! /bin/sh
echo "Number of supplied arguments (\$#): $#"
echo "List of provided arguments (\$@): $@"
echo "List of provided arguments (\$*): $*"
for var in pattern, before, after, file; do
var=$1
echo "== $var =="
shift
done
| true
|
6a8b9e7550f6055666e3a5e7c4e68cbbec5019a4
|
Shell
|
devlab-group/pine-ql
|
/bake.sh
|
UTF-8
| 579
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
task:init() {
task:install
npm init
}
task:install() {
set -e
# install test suit
bake dev mocha istanbul should
# install lint suit
bake dev lint-staged pre-commit
}
# Install node package
task:i() {
npm i $@
}
# Install dev dependency
task:dev() {
npm i --save-dev $@
}
task:test() {
npm run test
}
task:cov() {
npm run cov
}
task:run() {
docker exec -e NODE_ENV=${NODE_ENV:-development} -ti etblegal_server_1 $@
}
task:server() {
task:run npm start server
}
task:mongo() {
docker exec -ti etblegal_mongo_1 $@
}
| true
|
5ba82994861d702aa7361d0c04381a156816810d
|
Shell
|
stdlib-js/stdlib
|
/tools/git/hooks/pre-commit
|
UTF-8
| 12,940
| 3.21875
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"SunPro",
"BSD-3-Clause",
"BSL-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/usr/bin/env bash
#
# @license Apache-2.0
#
# Copyright (c) 2017 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A Git hook called by `git commit`. If this scripts exits with a non-zero status, the commit will be aborted.
#
# This hook is called with no arguments.
# shellcheck disable=SC2181
# VARIABLES #
# Resolve environment variables:
skip_filenames="${SKIP_LINT_FILENAMES}"
skip_markdown="${SKIP_LINT_MARKDOWN}"
skip_package_json="${SKIP_LINT_PACKAGE_JSON}"
skip_repl_help="${SKIP_LINT_REPL_HELP}"
skip_javascript_src="${SKIP_LINT_JAVASCRIPT_SRC}"
skip_javascript_cli="${SKIP_LINT_JAVASCRIPT_CLI}"
skip_javascript_examples="${SKIP_LINT_JAVASCRIPT_EXAMPLES}"
skip_javascript_tests="${SKIP_LINT_JAVASCRIPT_TESTS}"
skip_javascript_benchmarks="${SKIP_LINT_JAVASCRIPT_BENCHMARKS}"
skip_python="${SKIP_LINT_PYTHON}"
skip_r="${SKIP_LINT_R}"
skip_c_src="${SKIP_LINT_C_SRC}"
skip_c_examples="${SKIP_LINT_C_EXAMPLES}"
skip_c_benchmarks="${SKIP_LINT_C_BENCHMARKS}"
skip_c_tests_fixtures="${SKIP_LINT_C_TESTS_FIXTURES}"
skip_shell="${SKIP_LINT_SHELL}"
skip_typescript_declarations="${SKIP_LINT_TYPESCRIPT_DECLARATIONS}"
skip_license_headers="${SKIP_LINT_LICENSE_HEADERS}"
# Determine root directory:
root=$(git rev-parse --show-toplevel)
# Define the path to a utility for linting filenames:
lint_filenames="${root}/lib/node_modules/@stdlib/_tools/lint/filenames/bin/cli"
# Define the path to a utility for linting package.json files:
lint_package_json="${root}/lib/node_modules/@stdlib/_tools/lint/pkg-json/bin/cli"
# Define the path to a utility for linting REPL help files:
lint_repl_help="${root}/lib/node_modules/@stdlib/_tools/lint/repl-txt/bin/cli"
# Define the path to ESLint configuration file for linting examples:
eslint_examples_conf="${root}/etc/eslint/.eslintrc.examples.js"
# Define the path to ESLint configuration file for linting tests:
eslint_tests_conf="${root}/etc/eslint/.eslintrc.tests.js"
# Define the path to ESLint configuration file for linting benchmarks:
eslint_benchmarks_conf="${root}/etc/eslint/.eslintrc.benchmarks.js"
# Define the path to cppcheck configuration file for linting examples:
cppcheck_examples_suppressions_list="${root}/etc/cppcheck/suppressions.examples.txt"
# Define the path to cppcheck configuration file for linting test fixtures:
cppcheck_tests_fixtures_suppressions_list="${root}/etc/cppcheck/suppressions.tests_fixtures.txt"
# Define the path to cppcheck configuration file for linting benchmarks:
cppcheck_benchmarks_suppressions_list="${root}/etc/cppcheck/suppressions.benchmarks.txt"
# FUNCTIONS #
# Defines an error handler.
#
# $1 - error status
on_error() {
cleanup
exit "$1"
}
# Runs clean-up tasks.
cleanup() {
echo '' >&2
}
# Runs initialization tasks.
init() {
return 0
}
# Checks for non-ASCII filenames (to ensure cross platform portability).
check_filenames() {
local num_files
local against
local commit
commit=$(git rev-parse --verify HEAD)
if [[ -z "${commit}" ]]; then
# This is the initial commit, so we diff against an empty tree object:
against='4b825dc642cb6eb9a060e54bf8d69288fbee4904'
else
against='HEAD'
fi
# We exploit the fact that the printable range starts with the space character and ends with the tilde. Note that the use of brackets around a `tr` range is okay here, (for portability to Solaris 10's /usr/bin/tr, it's even required), since the square bracket bytes happen to fall in the designated range.
num_files=$(git diff --cached --name-only --diff-filter=ACR -z "${against}" | LC_ALL=C tr -d '[ -~]\0' | wc -c)
if [[ "${num_files}" -ne 0 ]]; then
echo 'Error: Attempting to add a non-ASCII filename. Non-ASCII filenames limit cross-platform portability. Please rename offending files before committing.' >&2
return 1
fi
return 0
}
# Lints staged files.
run_lint() {
local changed_files
local files
# Get the set of changed files (added, copied, modified, and renamed):
changed_files=$(git diff --name-only --cached --diff-filter ACMR)
# Lint filenames:
if [[ -z "${skip_filenames}" ]]; then
echo "${changed_files}" | "${lint_filenames}"
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'Filename lint errors.' >&2
return 1
fi
fi
# Lint Markdown files...
if [[ -z "${skip_markdown}" ]]; then
files=$(echo "${changed_files}" | grep '\.md$' | tr '\n' ' ')
if [[ -n "${files}" ]]; then
make FILES="${files}" lint-markdown-files > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'Markdown lint errors.' >&2
return 1
fi
fi
fi
# Lint package.json files...
if [[ -z "${skip_package_json}" ]]; then
files=$(echo "${changed_files}" | grep 'package\.json$' | grep -v 'datapackage\.json$' )
if [[ -n "${files}" ]]; then
echo "${files}" | "${lint_package_json}" >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'Package.json lint errors.' >&2
return 1
fi
fi
fi
# Lint REPL help files...
if [[ -z "${skip_repl_help}" ]]; then
files=$(echo "${changed_files}" | grep 'repl\.txt$' )
if [[ -n "${files}" ]]; then
echo "${files}" | "${lint_repl_help}" >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'REPL help lint errors.' >&2
return 1
fi
fi
fi
# Lint JavaScript source files...
if [[ -z "${skip_javascript_src}" ]]; then
files=$(echo "${changed_files}" | grep '\.js$' | grep -v -e '/examples' -e '/test' -e '/benchmark' -e '^dist/' | tr '\n' ' ')
if [[ -n "${files}" ]]; then
make FILES="${files}" FIX=1 lint-javascript-files > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'JavaScript lint errors for source files.' >&2
return 1
fi
fi
fi
# Lint JavaScript command-line interfaces...
if [[ -z "${skip_javascript_cli}" ]]; then
files=$(echo "${changed_files}" | grep '/bin/cli$' | tr '\n' ' ')
if [[ -n "${files}" ]]; then
make FILES="${files}" FIX=1 lint-javascript-files > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'JavaScript lint errors for command-line interface files.' >&2
return 1
fi
fi
fi
# Lint JavaScript examples files...
if [[ -z "${skip_javascript_examples}" ]]; then
files=$(echo "${changed_files}" | grep '/examples/.*\.js$' | tr '\n' ' ')
if [[ -n "${files}" ]]; then
make JAVASCRIPT_LINTER=eslint ESLINT_CONF="${eslint_examples_conf}" FILES="${files}" FIX=1 lint-javascript-files > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'JavaScript lint errors for example files.' >&2
return 1
fi
fi
fi
# Lint JavaScript test files...
if [[ -z "${skip_javascript_tests}" ]]; then
files=$(echo "${changed_files}" | grep '/test/.*\.js$' | tr '\n' ' ')
if [[ -n "${files}" ]]; then
make JAVASCRIPT_LINTER=eslint ESLINT_CONF="${eslint_tests_conf}" FILES="${files}" FIX=1 lint-javascript-files > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'JavaScript lint errors for test files.' >&2
return 1
fi
fi
fi
# Lint JavaScript benchmark files...
if [[ -z "${skip_javascript_benchmarks}" ]]; then
files=$(echo "${changed_files}" | grep '/benchmark/.*\.js$' | tr '\n' ' ')
if [[ -n "${files}" ]]; then
make JAVASCRIPT_LINTER=eslint ESLINT_CONF="${eslint_benchmarks_conf}" FILES="${files}" FIX=1 lint-javascript-files > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'JavaScript lint errors for benchmark files.' >&2
return 1
fi
fi
fi
# Lint Python files...
if [[ -z "${skip_python}" ]]; then
files=$(echo "${changed_files}" | grep '\.py$' | tr '\n' ' ')
if [[ -n "${files}" ]]; then
make check-python-linters > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'Unable to lint Python files. Ensure that linters are installed.' >&2
else
make FILES="${files}" lint-python-files > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'Python lint errors.' >&2
return 1
fi
fi
fi
fi
# Lint R files...
if [[ -z "${skip_r}" ]]; then
files=$(echo "${changed_files}" | grep '\.R$' | tr '\n' ' ')
if [[ -n "${files}" ]]; then
make FILES="${files}" lint-r-files > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'R lint errors.' >&2
return 1
fi
fi
fi
# Lint C source files...
if [[ -z "${skip_c_src}" ]]; then
files=$(echo "${changed_files}" | grep '\.c$' | grep -v -e '/examples' -e '/test' -e '/benchmark' | tr '\n' ' ')
if [[ -n "${files}" ]]; then
make check-c-linters > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'Unable to lint C files. Ensure that linters are installed.' >&2
else
make FILES="${files}" lint-c-files > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'C lint errors for source files.' >&2
return 1
fi
fi
fi
fi
# Lint C examples files...
if [[ -z "${skip_c_examples}" ]]; then
files=$(echo "${changed_files}" | grep '/examples/.*\.c$' | tr '\n' ' ')
if [[ -n "${files}" ]]; then
make check-c-linters > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'Unable to lint C files. Ensure that linters are installed.' >&2
else
make C_LINTER=cppcheck CPPCHECK_SUPPRESSIONS_LIST="${cppcheck_examples_suppressions_list}" FILES="${files}" lint-c-files > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'C lint errors for examples files.' >&2
return 1
fi
fi
fi
fi
# Lint C benchmark files...
if [[ -z "${skip_c_benchmarks}" ]]; then
files=$(echo "${changed_files}" | grep '/benchmark/.*\.c$' | tr '\n' ' ')
if [[ -n "${files}" ]]; then
make check-c-linters > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'Unable to lint C files. Ensure that linters are installed.' >&2
else
make C_LINTER=cppcheck CPPCHECK_SUPPRESSIONS_LIST="${cppcheck_benchmarks_suppressions_list}" FILES="${files}" lint-c-files > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'C lint errors for benchmark files.' >&2
return 1
fi
fi
fi
fi
# Lint C test fixtures files...
if [[ -z "${skip_c_tests_fixtures}" ]]; then
files=$(echo "${changed_files}" | grep '/test/fixtures/.*\.c$' | tr '\n' ' ')
if [[ -n "${files}" ]]; then
make check-c-linters > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'Unable to lint C files. Ensure that linters are installed.' >&2
else
make C_LINTER=cppcheck CPPCHECK_SUPPRESSIONS_LIST="${cppcheck_tests_fixtures_suppressions_list}" FILES="${files}" lint-c-files > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'C lint errors for test fixtures files.' >&2
return 1
fi
fi
fi
fi
# Lint shell script files...
if [[ -z "${skip_shell}" ]]; then
files=$(echo "${changed_files}" | while read -r file; do head -n1 "$file" | grep -q '^\#\!/usr/bin/env bash' && echo "$file"; done | tr '\n' ' ')
if [[ -n "${files}" ]]; then
make check-shell-linters > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'Unable to lint shell script files. Ensure that linters are installed.' >&2
else
make FILES="${files}" lint-shell-files > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'Shell script lint errors.' >&2
return 1
fi
fi
fi
fi
# Lint TypeScript declaration files...
if [[ -z "${skip_typescript_declarations}" ]]; then
files=$(echo "${changed_files}" | grep '\.d\.ts$' | tr '\n' ' ')
if [[ -n "${files}" ]]; then
make TYPESCRIPT_DECLARATIONS_LINTER=dtslint FILES="${files}" lint-typescript-declarations-files > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'TypeScript declaration file lint errors.' >&2
return 1
fi
fi
fi
# Lint license headers...
if [[ -z "${skip_license_headers}" ]]; then
files=$(echo "${changed_files}" | tr '\n' ' ')
if [[ -n "${files}" ]]; then
make FILES="${files}" lint-license-headers-files > /dev/null >&2
if [[ "$?" -ne 0 ]]; then
echo '' >&2
echo 'License header lint errors.' >&2
return 1
fi
fi
fi
# TODO: if datapackage.json, validate via schema
# Re-add files that may have been fixed by linting:
# shellcheck disable=SC2086
git add ${changed_files}
return 0
}
# Main execution sequence.
main() {
init
if [[ "$?" -ne 0 ]]; then
on_error 1
fi
check_filenames
if [[ "$?" -ne 0 ]]; then
on_error 1
fi
run_lint
if [[ "$?" -ne 0 ]]; then
on_error 1
fi
cleanup
exit 0
}
# Run main:
main
| true
|
4c56903ac65001559ad14bde6ac7f95a2cc30d51
|
Shell
|
zarafagroupware/zarafa-tools
|
/helpers/backup/restore-bricklevels.sh
|
UTF-8
| 641
| 3.4375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ -e /usr/share/zarafa-backup/full-restore.sh ];
then
RESTORE=/usr/share/zarafa-backup/full-restore.sh
elif [ -e /usr/share/zarafa/full-restore.sh ];
then
RESTORE=/usr/share/zarafa/full-restore.sh
else
echo Error: Cannot find full-restore.sh.
exit 1
fi
for user in `zarafa-admin -l | egrep -v "\-|SYSTEM|User list|Username" | awk '{print $1}'`;
do
echo ------
echo Starting bricklevel import for user: $user
if [ -e $user.index.zbk ];
then
echo Importing bricklevel for user: $user
$RESTORE $user
else
echo Cannot find bricklevel for user: $user, skipping.
fi
done
| true
|
22a57b32f68c8984c9c1d225c80f9c85017332b2
|
Shell
|
dowjones/Bigtable-dotnet
|
/scripts/build-prep.sh
|
UTF-8
| 621
| 2.53125
| 3
|
[
"ISC"
] |
permissive
|
#!/bin/bash
# Run this tool from root git folder: scripts/build-prep.sh
# Ensure submodules are up-to-date
git submodule update --init
# Move to grpc folder
cd submodules
cd grpc
# Ensure grpc submodules are up-to-date
git submodule update --init
# Ensure submodule packages are restored
cd vsprojects
../../../tools/nuget.exe restore -NonInteractive grpc_csharp_ext.sln
cd ..
cd src
cd csharp
../../../../tools/nuget.exe restore -NonInteractive Grpc.sln
# Move back to root
cd ..
cd ..
cd ..
cd ..
# Ensure solution packages are restored
cd src
../tools/nuget.exe restore -NonInteractive Bigtable.NET.sln
cd ..
| true
|
003ba3292627e8c5922100501f00480d8d072116
|
Shell
|
fallalex/git-scripts
|
/git_init_remote.sh
|
UTF-8
| 905
| 3.890625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# $1 is the full path where 'git init' was run
# run after 'git init' 'git add' and 'git commit -m "first commit"'
# after these commands above the local repo is setup
# now to duplicate it to the server and setup the remote
if [ ! $# -eq 1 ]
then
echo "Expects one argument, directory of git repo"
exit 1
fi
if [ ! -d "$1" ]
then
echo "$1 is not an existing directory"
exit 1
fi
if [ ! -d "$1/.git" ]
then
echo "$1 does not contain '.git/' directory"
exit 1
fi
GIT_URL="git@vcs.fallalex.com:/srv/git/"
REPO_PATH="$(greadlink -f $1)"
REPO_NAME="$(basename $REPO_PATH).git"
BARE_REPO_PATH="$REPO_PATH/$REPO_NAME"
cd $REPO_PATH
git clone --bare $REPO_PATH $BARE_REPO_PATH
rsync -vr -e ssh $BARE_REPO_PATH $GIT_URL
git remote add origin $GIT_URL$REPO_NAME
git remote set-url origin $GIT_URL$REPO_NAME
rm -rf $BARE_REPO_PATH
git fetch origin
git branch -u origin/main
| true
|
c7e640ddfcfcc564f3e4ea4b73841140859c3ee0
|
Shell
|
petronny/aur3-mirror
|
/write/PKGBUILD
|
UTF-8
| 553
| 2.515625
| 3
|
[] |
no_license
|
# Maintainer: Janosch Dobler <janosch.dobler [at} gmx [dot} de>
pkgname=write
pkgver=194
pkgrel=1
pkgdesc="Write - A word processor for handwriting"
arch=(i686 x86_64)
url="http://www.styluslabs.com/"
license=('custom')
depends=(qt4 libpng12)
optdepends=()
provides=('writer')
if [[ $CARCH == 'x86_64' ]]; then
depends=(lib32-qt4 lib32-libpng12)
fi
source=("http://www.styluslabs.com/write/write${pkgver}.tar.gz")
md5sums=('19530097bdbfb8f118424e2a372c7b59')
package() {
install -Dm755 "$srcdir/Write" "$pkgdir/usr/bin/writer"
}
# vim:set ts=2 sw=2 et:
| true
|
5d075cda2938cc1325ff34dcf9cec49251f3b533
|
Shell
|
SigmaG33/code-server
|
/ci/dev/image/exec.sh
|
UTF-8
| 961
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
# Opens an interactive bash session inside of a docker container
# for improved isolation during development.
# If the container exists it is restarted if necessary, then reused.
main() {
cd "$(dirname "${0}")/../../.."
local container_name=code-server-dev
if docker inspect $container_name &> /dev/null; then
echo "-- Starting container"
docker start "$container_name" > /dev/null
enter
exit 0
fi
build
run
enter
}
enter() {
echo "--- Entering $container_name"
docker exec -it "$container_name" /bin/bash
}
run() {
echo "--- Spawning $container_name"
docker run \
-it \
--name $container_name \
"-v=$PWD:/code-server" \
"-w=/code-server" \
"-p=127.0.0.1:8080:8080" \
$(if [[ -t 0 ]]; then echo -it; fi) \
"$container_name"
}
build() {
echo "--- Building $container_name"
docker build -t $container_name ./ci/dev/image > /dev/null
}
main "$@"
| true
|
19bb04fa86819ca727551d9144a333ef46f52a74
|
Shell
|
gblanchard4/swc_tulane
|
/clean.sh
|
UTF-8
| 575
| 2.609375
| 3
|
[] |
no_license
|
# This script performs cleaning operations of counrty files with errors
cd data/clean/
# Fix Guinea file - just want middle 12 rows
echo "fixing Guinea file"
head -n 24 Guinea.cc.txt | tail -n12 > Guinea.cc.txt.clean
mv -v Guinea.cc.txt.clean Guinea.cc.txt
# Fix China file - just want top 12
echo "fixing China file"
head -n12 China.cc.txt> China.cc.txt.clean
mv -v China.cc.txt.clean China.cc.txt
# Fix Niger file - we jsut want the top 12
echo "fixing Niger file"
head -n12 Niger.cc.txt> Niger.cc.txt.clean
mv -v Niger.cc.txt.clean Niger.cc.txt
cd ../..
sleep 1
| true
|
7c58bc3b668e187d21365a98d0330fa3f4bd94c3
|
Shell
|
tstapler/stapler-scripts
|
/set_git_emails
|
UTF-8
| 680
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
# vim: ai ts=2 sw=2 et sts=2 ft=sh
ORIGIN_URL=$(git ls-remote --get-url 'origin')
export GIT_AUTHOR_NAME="Tyler Stapler"
export GIT_COMMITTER_NAME=$GIT_AUTHOR_NAME
git config --local user.name "$GIT_AUTHOR_NAME"
case $ORIGIN_URL in
*github.com/Workiva*|*github.com:Workiva*|\
*github.com/workiva*|*github.com:workiva*|\
*github.com/*-wf*|*github.com:*-wf*|\
*github.com/*-wk*|*github.com:*-wk*)
export GIT_AUTHOR_EMAIL="tyler.stapler@workiva.com"
;;
*.googlesource.com*|sso://*)
export GIT_AUTHOR_EMAIL="tstapler@google.com"
;;
*)
export GIT_AUTHOR_EMAIL="tystapler@gmail.com"
;;
esac
export GIT_COMITTER_EMAIL=$GIT_AUTHOR_EMAIL
git config --local user.email "$GIT_AUTHOR_EMAIL"
| true
|
46be74d428b8127c415e413d1292d078cd6c8f36
|
Shell
|
AlexeiKharchev/bash_functions_library
|
/lib/terminal/_terminal_spinner.sh
|
UTF-8
| 2,813
| 3.875
| 4
|
[
"MIT"
] |
permissive
|
#! /dev/null/bash
[[ "$BASH_SOURCE" =~ /bash_functions_library ]] && _bfl_temporary_var="_GUARD_BFL_$(echo "$BASH_SOURCE" | sed 's|^.*/lib/\([^/]*\)/\([^/]*\)\.sh$|\1\2|')" || return 0
[[ ${!_bfl_temporary_var} -eq 1 ]] && return 0 || readonly $_bfl_temporary_var=1
#------------------------------------------------------------------------------
#----------- https://github.com/natelandau/shell-scripting-templates ----------
#
# Library of functions related to Linux Systems
#
# @author Nathaniel Landau
#
# @file
# Defines function: bfl::terminal_spinner().
#------------------------------------------------------------------------------
# Dependencies
#------------------------------------------------------------------------------
source "${BASH_FUNCTION_LIBRARY%/*}"/lib/terminal/_is_Terminal.sh
#------------------------------------------------------------------------------
# @function
# Creates a spinner within a for/while loop.
# Don't forget to add bfl::terminal_clear_line() at the end of the loop.
# Output Progress bar.
#
# @param String $text (Optional)
# Text accompanying the spinner. (Defaults to 1)
#
# @example
# for i in $(seq 0 100); do
# bfl::sleep 0.1
# bfl::terminal_spinner "Counting numbers"
# done
# bfl::terminal_clear_line
#------------------------------------------------------------------------------
bfl::terminal_spinner() {
[[ $BASH_INTERACTIVE == true ]] || return 0
[[ $VERBOSE == true ]] && return 0 # Do nothing in quiet/verbose mode.
bfl::verify_arg_count "$#" 0 1 || { bfl::writelog_fail "${FUNCNAME[0]} arguments count $# ∉ [0, 1]"; return ${BFL_ErrCode_Not_verified_args_count}; } # Verify argument count.
# bfl::is_Terminal || { bfl::writelog_fail "${FUNCNAME[0]}: no terminal found"; return 1; } # Do nothing if the output is not a terminal.
[[ ${_BFL_HAS_TPUT} -eq 1 ]] || { bfl::writelog_fail "${FUNCNAME[0]}: dependency 'tput' not found"; return ${BFL_ErrCode_Not_verified_dependency}; } # Verify dependencies.
[[ ${_BFL_HAS_PERL} -eq 1 ]] || { bfl::writelog_fail "${FUNCNAME[0]}: dependency 'perl' not found"; return ${BFL_ErrCode_Not_verified_dependency}; } # Verify dependencies.
local s l msg="${1:-Running process}"
tput civis # Hide the cursor
[[ -z ${SPIN_NUM:-} ]] && declare -gi SPIN_NUM=0
local -i iMax=28
s=$(bfl::string_of_char '▁' $((iMax-SPIN_NUM)) )
l=$(bfl::string_of_char '█' $SPIN_NUM)
local glyph="${l}${s}"
local -i p=$((100*SPIN_NUM/iMax))
s="$p"
[[ $p -lt 100 ]] && s=" $s"
[[ $p -lt 10 ]] && s=" $s"
n=${n//.*/} # cut floating point!
# shellcheck disable=SC2154
printf "\r${Gray}[ $s%%] %s %s...${reset}" "${glyph}" "${msg}"
[[ $SPIN_NUM -lt $iMax ]] && ((SPIN_NUM = SPIN_NUM + 1)) || SPIN_NUM=0
return 0
}
| true
|
22ade2b9523afb336511c801bcb09d09336ff148
|
Shell
|
Rampo0/bibit
|
/TASK1/setup-db.sh
|
UTF-8
| 332
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
setup_postgres()
{
echo "SETUP POSTGRES"
docker stop $(docker ps -aq)
docker rm $(docker ps -aq)
docker run --name postgresdb --mount type=bind,source="$(pwd)"/init-scripts,target=/docker-entrypoint-initdb.d -p 5432:5432 -e POSTGRES_PASSWORD=password -d postgres
}
main()
{
setup_postgres
}
main
| true
|
d22df5c4d0addc1d9dd1a47b16bdb9a20686e9b0
|
Shell
|
dipin24/FORENSICS-TASKS
|
/twin.sh
|
UTF-8
| 345
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
cat > c.py <<eof
with open('./Twin1','rb') as f:
x = f.read()
with open('./Twin2','rb') as f:
y = f.read()
flag=''
for i in range(min(len(x) ,len(y))):
if x[i] !=y[i]:
flag+=str(y[i])
print(flag)
eof
python3 c.py
# after this the flag that you got is in ascii form then convert this to string.
echo "inctf{y0u_Got_m3}"
| true
|
15aa212ea8b6c9491fd1e1ad8ce90b91c02b6e14
|
Shell
|
gnperdue/jburkardt-cpp
|
/calpak/calpak_prb.sh
|
UTF-8
| 511
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
#
g++ -c -g -I/$HOME/include calpak_prb.cpp >& compiler.txt
if [ $? -ne 0 ]; then
echo "Errors compiling calpak_prb.cpp"
exit
fi
rm compiler.txt
#
g++ calpak_prb.o /$HOME/libcpp/$ARCH/calpak.o -lm
if [ $? -ne 0 ]; then
echo "Errors linking and loading calpak_prb.o."
exit
fi
#
rm calpak_prb.o
#
mv a.out calpak_prb
./calpak_prb > calpak_prb_output.txt
if [ $? -ne 0 ]; then
echo "Errors running calpak_prb."
exit
fi
rm calpak_prb
#
echo "Program output written to calpak_prb_output.txt"
| true
|
33269353b813177010e6fa56266aba3e5de4f5f7
|
Shell
|
omsai/dREG
|
/dREG_paper_analyses/train_nn/get_test_files/setupTrainingFiles.bsh
|
UTF-8
| 1,678
| 2.578125
| 3
|
[] |
no_license
|
#!/usr/bin/bash
outpath=/home/cgd24/work/dbn_shapes/training_beds
## TSS.
cat /usr/projects/GROseq.parser/tss_detecter/andre_hmm/hg19.k562.new_hmm2b.post2.bed | sort-bed - | bedops --merge - > $outpath/grocaptss.bed
## Gaps.
## ChIP-seq for poly-A cleavage enzymes, etc.!?
## Intersect
## Poly-adenylation sites -- perhaps useful as an additional signal sensor for gaps?!
R --no-save < MetaPlotPolyA.R
zcat /home/cgd24/work/tss_detector/k562.predictions.bed.gz | bedmap --indicator --echo --echo-map postPolyA.bed - | grep "^1|" | sed "s/^1|//g" | sed "s/|/\t/g" | awk 'BEGIN{OFS="\t"} {split($0,a,"\t"); if($6=="+" && $2<$8) {print $1,$2,$8,".",0,"+"} if($6=="-" && a[NF-2]<$3) {print $1,a[NF-2],$3,".",0,"-"}}' > postPolyA.stopAtTSS.bed
zcat zcat /home/cgd24/work/tss_detector/k562.predictions.bed.gz | bedmap --echo --indicator postPolyA.bed - | grep "|0$" | sed "s/|0$//g" > postPolyA.noTSS.bed
cat postPolyA.stopAtTSS.bed postPolyA.noTSS.bed | sort-bed - > postPolyA.bed
rm postPolyA.stopAtTSS.bed postPolyA.noTSS.bed
cat postPolyA.bed | awk 'BEGIN{OFS="\t"} {if($6=="+") {print $1,($3-500),($3),".",0,"+"} if($6=="-") {print $1,($2),($2+500),".",0,"-"}}' > geneEnd.bed
## Transcribed regions, excluding post-polyA.
zcat /usr/data/GROseq.parser/hg19/k562/chromhmm/wgEncodeBroadHmmK562HMM.bed.gz | grep "Txn" | grep -v "Transition" | sort-bed - | bedops --merge - | bedops --difference - postPolyA.bed > $outpath/ernst_txn.bed
## Get strand:
#hgsql hg19 -e "select chrom,txStart,txEnd,name,exonCount,strand from knownGene" > ucscKnownGene.bed
#bedmap --echo --echo-map ernst_txn.bed ucscKnownGene.bed | less
R --no-save < txn.strand.R
## Anything else useful?!
| true
|
cc82856d9de236dcffde510eef0db8880d7eb92d
|
Shell
|
klauck/dispatcher
|
/start_cluster.sh
|
UTF-8
| 432
| 2.984375
| 3
|
[] |
no_license
|
path_to_hyrise=../hyrise
number_of_cores=4;
number_of_instances=4;
hyrise_port=5000;
path_to_dispatcher=$(pwd)
cd $path_to_hyrise
for ((id=0;id<$number_of_instances;id++))
do
$path_to_hyrise/build/hyrise-server_debug -l $path_to_hyrise/build/log.properties -p $(($hyrise_port + $id)) --corecount $number_of_cores --coreoffset 0 --nodeId $id --dispatcherport 8888 &
done
$path_to_dispatcher/dispatcher 8888
killall -r hyrise
| true
|
00d877a7be7e628bb5b053a7090528f77381d6c1
|
Shell
|
Robert-Christopher/site
|
/bin/create-config.sh
|
UTF-8
| 854
| 2.765625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#
# Lithium Site
#
# Copyright (c) 2014 Union of RAD - All rights reserved.
#
# The code is distributed under the terms of the BSD 3-clause
# License. For the full license text see the LICENSE file.
#
source $DETA/util.sh
role THIS
cd $THIS_PATH/config/nginx/includes
for NAME in app assets access; do
msg "Generating includes configuration from templates for ${NAME}..."
cp $NAME.conf{.default,}
fill PROJECT $THIS_PATH ${NAME}.conf
fill DOMAIN $THIS_DOMAIN ${NAME}.conf
fill NGINX_FASTCGI_CONFIG $THIS_NGINX_FASTCGI_CONFIG ${NAME}.conf
fill PHP_FPM_SOCKET $THIS_PHP_FPM_SOCKET ${NAME}.conf
done
cd -
cd $THIS_PATH/config/nginx/servers
for NAME in app; do
msg "Generating servers configuration from templates for ${NAME}..."
cp $NAME.conf{.default,}
fill PROJECT $THIS_PATH ${NAME}.conf
fill DOMAIN $THIS_DOMAIN ${NAME}.conf
done
cd -
| true
|
95bd6ab8a40f9bb68c82f386718c512c75a53c0d
|
Shell
|
carlostechinnovation/bdml
|
/mod002parser/scripts/bolsa/MOD001A_yahoo_finance.sh
|
UTF-8
| 555
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -eq 0 ]
then
echo "ERROR Parametros de entrada incorrectos. Debes indicar: anio"
exit -1
fi
export anio=${1}
echo "Borramos todos los ficheros brutos que tuvieramos de ese anio=${anio}..."
rm -f "/home/carloslinux/Desktop/DATOS_BRUTO/bolsa/YF_${anio}_*"
empresas="/home/carloslinux/git/bdml/mod002parser/scripts/bolsa/empresas_yahoo_finance.in"
while read -r line
do
empresa="$line"
node /home/carloslinux/git/bdml/mod002parser/scripts/bolsa/MOD001A_yahoo_finance.js ${empresa} ${anio}
done < "$empresas"
| true
|
9d020fd1daf4ea3e85fc3ec8750d27cbfbf7a8f1
|
Shell
|
salvadord/gcloud
|
/slurm-gcp_v3/scripts/delete_slurm.sh
|
UTF-8
| 1,100
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
function delete_instances() {
step=50
instances_list=("`gcloud compute instances list --format="value(NAME,ZONE)" 2>/dev/null | grep compute`")
zone=`echo "${instances_list[@]}" | awk '{print $2}' | sort -u`
echo "${instances_list[@]}" | grep -v -w "compute1" | grep -v -w "compute2" | awk '{print $1}' | xargs -n 1000 gcloud compute instances delete -q --zone=$zone
}
function main() {
while [ -n "`gcloud compute instances list 2>/dev/null | grep compute | grep -v -w "compute1" | grep -v -w "compute2" | awk '{print $1}'`" ]; do
delete_instances
done
if [ "$2" != "--instances" ]; then
echo "Instance deletion complete."
echo "Deleting firewall rules."
sleep 15
for i in `gcloud compute firewall-rules list 2>/dev/null | grep slurm-network | awk '{print $1}'`; do
gcloud compute firewall-rules delete $i -q &
done
while [ `gcloud compute firewall-rules list 2>/dev/null | grep -c slurm-network` -gt 0 ]; do sleep 5; done
gcloud compute networks delete slurm-network -q
sleep 10
gcloud deployment-manager deployments delete $1 -q
fi
}
main $@
| true
|
f4ba8165c7379c574f1039786cf6d61ae6aae297
|
Shell
|
SanduDevOps08/Roboshop
|
/Components/common.sh
|
UTF-8
| 3,346
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
#source Components/common.sh
Status_Check()
{
if [ $1 -eq 0 ]; then
echo -e "\e[32mSUCCESS\e[0m"
else
echo -e "\e[31mFAILURE\e[0m"
exit 2
fi
}
Print()
{
echo -e "\n\t\t\e[36m...................$1...................\e[0m\n"
echo -n -e "$1 \t-"
}
if [ $UID -ne 0 ]; then
echo -e "\n\e[1;33mYou Should Execute This Script as a Root User\e[0m\n"
exit 1
fi
LOG=/tmp/Roboshop.log
rm -f $LOG
ADD_APP_USER()
{
<<<<<<< HEAD
id Roboshop &>>$LOG
=======
id roboshop &>>$LOG
>>>>>>> 310a81d26005bd79bbeb2dd7164c3aa106131dd6
if [ $? -eq 0 ]; then
echo "The user already exists, hence skipping" &>>$LOG
else
useradd -G wheel roboshop &>>$LOG
Print "roboshop user is added successfully"
fi
Status_Check $?
}
DOWNLOAD()
{
Print "Downloading ${COMPONENT} zipfile content\t\t"
curl -s -L -o /tmp/${COMPONENT}.zip "https://github.com/roboshop-devops-project/${COMPONENT}/archive/main.zip" &>>$LOG
Status_Check $?
Print "Extracting the ${COMPONENT} files"
cd /home/roboshop
rm -rf ${COMPONENT} && unzip -o /tmp/${COMPONENT}.zip &>>$LOG && mv ${COMPONENT}-main ${COMPONENT}
Status_Check $?
}
systemD.Setup()
{
Print "Update systemD service"
sed -i -e 's/MONGO_DNSNAME/mongodb.roboshop.internal/' -e 's/REDIS_ENDPOINT/redis.roboshop.internal/' -e 's/MONGO_ENDPOINT/mongodb.roboshop.internal/' -e 's/CATALOGUE_ENDPOINT/catalogue.roboshop.internal/' -e 's/CARTENDPOINT/cart.roboshop.internal/' -e 's/DBHOST/mysql.roboshop.internal/' -e 's/CARTHOST/cart.roboshop.internal/' -e 's/USERHOST/user.roboshop.internal/' -e 's/AMQPHOST/rabbitmq.roboshop.internal/' /home/roboshop/${COMPONENT}/systemd.service
Status_Check $?
Print "Setup systemD service"
mv /home/roboshop/${COMPONENT}/systemd.service /etc/systemd/system/${COMPONENT}.service &>>$LOG && systemctl daemon-reload && systemctl restart ${COMPONENT} &>>$LOG && systemctl enable ${COMPONENT} &>>$LOG
Status_Check $?
}
NODEJS()
{
source Components/common.sh
Print "Installing nodejs packages\t\t"
yum install nodejs make gcc-c++ -y &>>$LOG
Status_Check $?
ADD_APP_USER
DOWNLOAD
Print "Download nodeJS dependencies"
cd /home/roboshop/${COMPONENT} &>>$LOG
npm install --unsafe-perm &>>$LOG
Status_Check $?
chown roboshop:roboshop -R /home/roboshop
systemD.Setup
}
JAVA()
{
Print "Installing Maven\t"
yum install maven -y &>>$LOG
Status_Check $?
ADD_APP_USER
DOWNLOAD
cd /home/roboshop/shipping
Print "Make Shipping Package\t"
mvn clean package &>>$LOG
Status_Check $?
Print "Rename Shipping Package"
mv target/shipping-1.0.jar shipping.jar &>>$LOG
Status_Check $?
chown roboshop:roboshop -R /home/roboshop
systemD.Setup
}
PYTHON()
{
Print "Install Python3\t\t"
yum install python36 gcc python3-devel -y &>>$LOG
Status_Check $?
ADD_APP_USER
DOWNLOAD
cd /home/roboshop/payment
Print "Install Python Dependencies"
pip3 install -r requirements.txt &>>$LOG
Status_Check $?
USERID=$(id -u roboshop)
GROUPID=$(id -g roboshop)
Print "Update RoboShop User in Config"
sed -i -e "/uid/ c uid=${USERID}" -e "/gid/ c gid=${GROUPID}" /home/roboshop/payment/payment.ini &>>$LOG
Status_Check $?
systemD.Setup
}
| true
|
864eb0fdce85bfbcb4a0f5bd4c72339eeb097c6d
|
Shell
|
CyanogenMod/android_device_sony_blue-common
|
/releasetools/unify_userdata/prop_tools.sh
|
UTF-8
| 827
| 3.4375
| 3
|
[] |
no_license
|
#!/sbin/sh
#
# Copyright (C) 2016 The CyanogenMod Project
# Copyright (C) 2016 Adrian DC
#
# Properties functions helper tools
#
# Function prop_default_date_timestamp()
prop_default_date_timestamp()
{
# Variables
local build_date;
local build_timestamp=0;
# Build date to timestamp
if [ -e /default.prop ]; then
build_date=$(${toybox} cat /default.prop \
| ${toybox} grep 'ro.build.date=' \
| ${toybox} sed 's/.*=//' \
| ${toybox} sed 's/[A-Z]* \(20[0-9][0-9]\)/\1/');
# Convert to timestamp
build_timestamp=$(${toybox} date -d "${build_date}" -D "%A %B %d %T %Y" +'%s' || \
${toybox} date -d "${build_date}" -D "%A %d %B %T %Y" +'%s' || \
echo 0);
fi;
# Result output
${toybox} echo "${build_timestamp}";
}
| true
|
f35e29c9c8968124499f2a834e8ab2cde5b57387
|
Shell
|
ss9036726/Programs
|
/assign4.sh
|
UTF-8
| 136
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
read -s -p "Enter the username : " username
if [ "$username" == "$USER" ]; then
echo "Hello"
else
echo "Try Again"
fi
| true
|
fffadf4f56f82e3a2e5ceb5aa95bdbb89c1a0bf7
|
Shell
|
pjotrp/smb_performance
|
/scripts/smb_cramindex.sh
|
UTF-8
| 247
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# ./scripts/smb_cramindex.sh &>> results/smb_cramindex.out
echo "cramindex with sambamba"
date
. etc/environment
for t in 47 31 15 7 ; do
cmd="$sambamba index -t $t -C $cram10s"
echo $cmd
$(/usr/bin/time -v $cmd)
done
date
| true
|
e809eaa6cf56618a0aa73fa9a0071fe4e8fd2782
|
Shell
|
statgen/monitor-topmed
|
/bin/topmed_gcepull.sh
|
UTF-8
| 4,533
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# topmed_gcepull.sh -submit bamid -retry
#
# Copy remapped CRAM for a sample from GCE
#
. /usr/cluster/$PROJECT/bin/topmed_actions.inc
topmed_check_recab=/usr/cluster/$PROJECT/bin/topmed_check_recab.pl
me=gcepull
markverb=$me
if [ "$1" = "-submit" ]; then
shift
bamid=`GetDB $1 bamid`
MyRealHost $bamid b$build
MayIRun $me $bamid $realhost
timeout='4:00:00'
SubmitJob $bamid "$PROJECT-gce" '4G' "$0 $*"
exit
fi
if [ "$1" = "" ]; then
me=`basename $0`
echo "Usage: $me [-submit] bamid [-retry]"
echo ""
echo "Copy remapped CRAM for a sample from GCE"
exit 1
fi
bamid=$1
nwdid=`GetNWDID $bamid`
bamid=`GetDB $nwdid bamid`
retry=0
if [ "$2" = "-retry" ]; then # See if nomatch really was okay
retry=1
fi
# Get where our remapped file is to go
crampath=`$topmedpath wherepath $bamid b$build`
if [ "$crampath" = "" ]; then
Fail "Unable to determine where remapped CRAM file should go for '$bamid'"
fi
mkdir -p $crampath
if [ "$?" != "0" ]; then
Fail "Unable to create '$crampath' for remapped CRAM for '$bamid'"
fi
Started
cramfile=$crampath/$nwdid.recab.cram
#======================================================================
# Copy remapped CRAM from GCE, check flagstat, fix up database
#======================================================================
cramflagstat=`GetDB $bamid cramflagstat`
if [ "$cramflagstat" = "" ]; then
SetDB $bamid state_gce38bcf 0
SetDB $bamid state_gce38copy 0
Fail "Cramflagstat is missing from database for bamid '$bamid'"
fi
stime=`date +%s`
inuri="$incominguri/$nwdid/$nwdid.recab.cram"
$gsutil stat "$inuri"
if [ "$?" != "0" ]; then # Remote file is not there
Fail "Unable to find $nwdid/$nwdid.recab.cram in: $incominguri"
fi
# If retrying, try to rename the possible nomatch file. Failure okay
if [ "$retry" = "1" ]; then
$gsutil mv $inuri.flagstat.nomatch $inuri.flagstat
fi
# Now know where to look for data. Check flagstat
echo "Checking if flagstat is as we expect from $inuri"
$gsutil cp $inuri.flagstat $crampath
if [ "$?" != "0" ]; then
SetDB $bamid state_gce38bcf 0
SetDB $bamid state_gce38copy 0
Fail "Failed to copy flagstat from GCE: $inuri.flagstat"
fi
# Get number of interest from flagstat file and check it
n=`CalcFlagstatFromFile $crampath/$nwdid.recab.cram.flagstat`
if [ "$n" != "$cramflagstat" ]; then
# Renaming the flagstat file stops pull from happening again
$gsutil mv $inuri.flagstat $inuri.flagstat.nomatch
SetDB $bamid state_gce38bcf 0
SetDB $bamid state_gce38copy 0
Fail "Flagstat '$n' did not match cramflagstat '$cramflagstat' for bamid '$bamid' nwdid $nwdid -- URL=$inuri"
fi
echo "Flagstat value is correct: $n"
# See if we have already done this
f=$crampath/$nwdid.recab.cram
if [ -f $f ]; then
echo "Replacing existing CRAM $f"
rm -f $f $f.crai
fi
echo "Copying remapped CRAM to local file $crampath"
$gsutil cp $inuri $f
if [ "$?" != "0" ]; then
SetDB $bamid state_gce38bcf 0
SetDB $bamid state_gce38copy 0
Fail "Failed to copy file from GCE $inuri to $f"
fi
# Remapping can still result in a trashed file. Make sure this is a CSG file
set -o pipefail
$samtools view -H $f | $topmed_check_recab -csg
if [ "$?" != "0" ]; then
SetDB $bamid state_gce38bcf 0
SetDB $bamid state_gce38copy 0q
Fail "Remapped file '$f' header has multiple ids"
fi
echo "Only one sample found in the header and is a CSG remapped file"
# Clean up data in GCE if data found in incoming. Move remapped data to bcf bucket
$gsutil mv $inuri $bcfuri/$nwdid/$nwdid.recab.cram
echo "Moved $inuri files to $bcfuri/$nwdid"
# Remove any left over cruft in recabs bucket
echo "Removing $incominguri/$nwdid"
$gsutil rm -rf $incominguri/$nwdid
# Post processing needed here
echo "Begin post-processing of $f"
echo "Create index for remapped sample"
CreateIndex $bamid $f
echo "Calculating MD5s for local files"
md5cram=`CalcMD5 $bamid $f`
md5crai=`CalcMD5 $bamid $f.crai`
echo "Set checksums and flagstat for b$build sample"
SetDB $bamid b${build}cramchecksum $md5cram
SetDB $bamid b${build}craichecksum $md5crai
SetDB $bamid b${build}flagstat $cramflagstat
# Save date of file in database
$topmedcmd setdate $bamid datemapping_b38 $f
etime=`date +%s`
etime=`expr $etime - $stime`
echo "Copy of remapped CRAM from GCE to $crampath completed in $etime seconds"
SetDB $bamid state_b${build} 20 # Mark b38 as done
SetDB $bamid state_gce38bcf 1 # We need more reprocessing
SetDB $bamid state_gce38copy 0
Successful
Log $etime
exit
| true
|
e87ea3ccc704a97842bb27895754b36167573671
|
Shell
|
old-bear/Learning-The-Bash-Shell
|
/convert2jpg
|
UTF-8
| 4,379
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
######################################################################
# Author: jrjbear@gmail.com
# Date: Wed Feb 5 20:19:47 2014
# File: convert2jpg
#
# Usage: convert2jpg [-Sq] [-s N] [-w N] [-c S] image
# Description: A helper script tool wrapping NetPBM utilities
######################################################################
# Set up the defaults
width=1
colour='-color grey'
quiet=""
standardise=' | pnmtojpeg ${quiet}'
commands=""
function print_usage()
{
echo "Usage: $0 [-Sq] [-s N] [-w N] [-c S] image" >&2
echo "Convert images of different types (gif, tiff...) into jpeg" >&2
echo "-S: Enhance the image and make it sharper" >&2
echo "-s: Scale the image to \`N'" >&2
echo "-w: Add a border to the image with width=\`N'" >&2
echo "-c: Add a border to the image with colour=\`S'" >&2
echo "-q: Suppress diagnostic message" >&2
echo "Note that the order of enhance/scale/border options will be" >&2
echo "used as the order to do the corresponding operations" >&2
}
# Mock functions for NetPBM utilities. Note that we use global
# variable `quiet' to avoid parsing the arguments temporarily
# $1 is the input filename (empty means stdin)
# $2 is the output string
# $3 is the diagnostic message
function _netpbm_mock_func()
{
cat $1
echo "$2"
if [[ -z "${quiet}" ]]; then
echo "$3" >&2
fi
}
function pnmnlfilt()
{
_netpbm_mock_func "" "${FUNCNAME} $*" \
"Enhance the image using parameters: $*"
}
function pnmscale()
{
_netpbm_mock_func "" "${FUNCNAME} $*" \
"Scale the image using parameters: $*"
}
function pnmmargin()
{
_netpbm_mock_func "" "${FUNCNAME} $*" \
"Add margin to image using parameters: $*"
}
function giftopnm()
{
local file="$1"
if [[ -n "${quiet}" ]]; then
file="$2"
fi
_netpbm_mock_func "${file}" "${FUNCNAME} $*" \
"Convert gif file to pnm file"
}
function tgatoppm()
{
local file="$1"
if [[ -n "${quiet}" ]]; then
file="$2"
fi
_netpbm_mock_func "${file}" "${FUNCNAME} $*" \
"Convert tga file to ppm file"
}
function xpmtoppm()
{
local file="$1"
if [[ -n "${quiet}" ]]; then
file="$2"
fi
_netpbm_mock_func "${file}" "${FUNCNAME} $*" \
"Convert xpm file to ppm file"
}
function pcxtoppm()
{
local file="$1"
if [[ -n "${quiet}" ]]; then
file="$2"
fi
_netpbm_mock_func "${file}" "${FUNCNAME} $*" \
"Convert pcx file to ppm file"
}
function tifftopnm()
{
local file="$1"
if [[ -n "${quiet}" ]]; then
file="$2"
fi
_netpbm_mock_func "${file}" "${FUNCNAME} $*" \
"Convert tiff file to pnm file"
}
function jpegtopnm()
{
local file="$1"
if [[ -n "${quiet}" ]]; then
file="$2"
fi
_netpbm_mock_func "${file}" "${FUNCNAME} $*" \
"Convert jpeg file to pnm file"
}
function pnmtojpeg()
{
_netpbm_mock_func "" "${FUNCNAME} $*" \
"Convert pnm file to jpeg file"
}
while getopts ":Sqs:w:c:" opt; do
case $opt in
S ) sharpness=' | pnmnlfilt -0.7 0.45 ${quiet}'
commands="${commands}${sharpness}" ;;
s ) size=$OPTARG
scale=' | pnmscale -xysize ${size} ${size} ${quiet}'
commands="${commands}${scale}" ;;
w ) width=$OPTARG
if [[ -z "${border}" ]]; then
border=' | pnmmargin ${colour} ${width} ${quiet}'
commands="${commands}${border}"
fi ;;
c ) colour="-color ${OPTARG}"
if [[ -z "${border}" ]]; then
border=' | pnmmargin ${colour} ${width} ${quiet}'
commands="${commands}${border}"
fi ;;
q ) quiet='-quiet' ;;
\?) print_usage
exit 1 ;;
esac
done
shift $(($OPTIND-1))
for filename in "$@"; do
case "${filename}" in
*.gif ) convert='giftopnm' ;;
*.tga ) convert='tgatoppm' ;;
*.xpm ) convert='xpmtoppm' ;;
*.pcx ) convert='pcxtoppm' ;;
*.tif ) convert='tifftopnm' ;;
*.jpg ) convert='jpegtopnm' ;;
* ) echo "$0: Unknown filetype '${filename##*.}'" >&2
exit 1;;
esac
convert="${convert}"' ${quiet}'
outfile="${filename%.*}.new.jpg"
eval "${convert}" "${filename}" "${commands}" "${standardise}" > "${outfile}"
done
| true
|
47dab84127ebfaf51b7bc6050c1464065b31c563
|
Shell
|
algowave/http
|
/squid.sh
|
UTF-8
| 4,798
| 3.203125
| 3
|
[] |
no_license
|
#SQUID CONFIG: DEBIAN
SoftRelease="SquidAuto 1.0.0"
AuthorIpRange="199.91.71.193/32"
#PORT=$(( RANDOM % ( 65534-1024 ) + 1024 ))
PORT=2012
function authorIps(){
echo "AuthotIpRange: "$(cat /etc/squid3/squid.conf | sed -n 's/\<acl mymaster src \(.*\)/\1/p')
[ -z "$1" ] && read -p "Adding New IpRange:" newiprange
[ -n "$1" ] && newiprange=$1
[ -n "$newiprange" ] && ( sed -i "s/\<acl mymaster src.*/acl mymaster src ${newiprange} /g" /etc/squid3/squid.conf ) && \
echo "Update AuthotIpRange: "$(cat /etc/squid3/squid.conf | sed -n 's/\<acl mymaster src \(.*\)/\1/p')
}
function adduser(){
user=$(echo $1 | cut -d "@" -f 1 )
pwd=$(echo $1 | cut -d "@" -f 2 )
[ -z "$user" ] && return 0
$(find / -name htpasswd | head -1 ) -b /etc/squid3/passwd ${user} ${pwd}
}
function deluser(){
[ -z "${1}" ] && return 0
$(find / -name htpasswd | head -1 ) -D /etc/squid3/passwd ${1}
}
function addip(){
[ -z "${1}" ] && return 0
ipname="ip-add"$( echo ${1} | sed 's/.*\.\(.*\)/\1/g' )
cat >>/etc/squid3/squid.conf<<EOF
acl ${ipname} myip ${1}
tcp_outgoing_address ${1} ${ipname}
EOF
}
function delip(){
[ -z "${1}" ] && return 0
sed -i '/'${1}'/d' /etc/squid3/squid.conf
}
function install(){
apt-get update
apt-get install squid3 apache2-utils -y
/etc/init.d/squid3 stop
mkdir -p /var/squid3/cache
useradd squid3 -s /bin/false
chown squid3:squid3 /var/squid3/cache/ -R
chown squid3:squid3 /var/log/squid3/ -R
cat >/etc/squid3/squid.conf <<EOF
http_port ${PORT}
dns_nameservers 8.8.8.8
cache_access_log /var/log/squid3/access.log
cache_log /var/log/squid3/cache.log
cache_effective_user squid3
cache_effective_group squid3
cache_mem 5 MB
cache_dir ufs /var/squid3/cache 4096 16 256
cache_store_log /var/log/squid3/store.log
#visible_hostname
#cache_mgr
acl ip_allow src all
acl mymaster src ${AuthorIpRange}
http_access allow mymaster
auth_param basic program /usr/lib/squid3/ncsa_auth /etc/squid3/passwd
acl passwder proxy_auth REQUIRED
http_access allow passwder
http_access deny all
forwarded_for delete
via Deny all
EOF
ip_num=$(ifconfig | grep 'inet addr' | grep -Ev 'inet addr:127.0.0|inet addr:192.168.0|inet addr:10.0.0' | sed -n 's/.*inet addr:\([^ ]*\) .*/\1/p' | wc -l)
ips=$(ifconfig | grep 'inet addr' | grep -Ev 'inet addr:127.0.0|inet addr:192.168.0|inet addr:10.0.0' | sed -n 's/.*inet addr:\([^ ]*\) .*/\1/p')
genall=""
[ $ip_num -gt 1 ] && ( echo $ips | sed 's/ /\n/g' ) && read -p "Server IP > 1, Adding all IP as HTTP Proxy Server ?(yes|no)" genall
if [ "$genall" == "yes" ] || [ "$genall" == "Yes" ] || [ "$genall" == "YES" ];then
for((i=1;i<=ip_num;i++));do
ip=$( echo $ips | sed 's/ /\n/g' | sed -n ${i}p )
ipname="ip"$i$( echo $ip | sed 's/.*\.\(.*\)/\1/g' )
cat >>/etc/squid3/squid.conf<<EOF
acl ${ipname} myip ${ip}
tcp_outgoing_address ${ip} ${ipname}
EOF
done
elif [ "$genall" == "" ] || [ "$genall" == "No" ] || [ "$genall" == "no" ] || [ "$genall" == "NO" ];then
ip=$(echo $ips | sed 's/ /\n/g'|sed -n 1p)
ipname="ipm"$( echo $ip | sed 's/.*\.\(.*\)/\1/g' )
echo "Setting Proxy as ${ip}:${PORT} You can Modify/Add it latter."
cat >>/etc/squid3/squid.conf<<EOF
acl ${ipname} myip ${ip}
tcp_outgoing_address ${ip} ${ipname}
EOF
else
echo "INPUT ERROR!" && exit
fi
echo "Setting Default user and password | squid:squid"
/usr/bin/htpasswd -c -b /etc/squid3/passwd squid squid
squid3 -k parse
/etc/init.d/squid3 start
clear
cat <<EOF
+-----------------------------------------+
Squid3 HTTP Config Done.
+-----------------------------------------+
Config Version: ${SoftRelease}
Proxy Port: ${PORT}
AuthorIpRange: ${AuthorIpRange}
User: squid@squid
+_________________________________________+
EOF
}
while getopts "ia:d:p:q:m:v" arg
do
case $arg in
"i")
install
;;
"a")
adduser $OPTARG
exit
;;
"d")
deluser $OPTARG
exit
;;
"p")
delip ${OPTARG}
addip ${OPTARG}
exit
;;
"q")
delip ${OPTARG}
exit
;;
"m")
authorIps "${OPTARG}"
exit
;;
"v")
echo $SoftRelease
exit
;;
"?")
echo "USAGE: ./squidauto [ -i instal | -a add user@password | -d delete user | -p add ip | -q delete ip]"
exit
;;
esac
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.