blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ddbbd97cb6d78e4051a2dcc25d22d21ec3ea5936
|
Shell
|
akhurange/tutorial-multithreading
|
/combine_archives.sh
|
UTF-8
| 666
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
tmpdir=`tempfile`
final_lib=$1
shift
rm -f $final_lib
for i in "$@"
do
echo $i
if [[ "$i" =~ ^.*\.a$ ]]; then
rm -rf $tmpdir
mkdir -p $tmpdir
cur_path=$PWD
cp $i $tmpdir
cd $tmpdir
arn=`basename $i`
ar x $arn >/dev/null
rm -f $arn
arn_tmp=`echo $arn | sed 's/.a$//g'`
l=`ls`
for j in $l
do
new_name=${arn_tmp}_$j
mv $j $new_name
done
cd $cur_path
ar r $final_lib $tmpdir/* >/dev/null 2>&1
if [ $? != 0 ]; then
echo "ar r $final_lib $i failed!"
exit 1
fi
#rm -rf $tmpdir
else
ar r $final_lib $i >/dev/null 2>&1
if [ $? != 0 ]; then
echo "ar r $final_lib $i failed!"
exit 1
fi
fi
done
| true
|
4ddab03d2a83a67e8f1bdc2a805594d255942ad9
|
Shell
|
larrycameron80/kodachi
|
/home/kodachi/.kbase/bootgui
|
UTF-8
| 380
| 2.640625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Script written by W. Al Maawali
# (c) 2016 Founder of Eagle Eye Digital Solutions
# http://www.digi77.com
# http://www.om77.net
# script starts here:
# check if user has logged in
if [[ $(who |grep 'kodachi'|cut -d : -f 1| awk 'NR==1') = *kodachi* ]]; then
sudo killall cairo-dock;
notify-send "Dock will start in 5 seconds";
sleep 5;
cairo-dock -c
exit;
fi
| true
|
bf0ea19813fd576d31ea09ed5b2688e4cb45fad4
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/larn/PKGBUILD
|
UTF-8
| 1,263
| 2.890625
| 3
|
[] |
no_license
|
# Contributor: Simon Parzer <simon.parzer@gmail.com>
pkgname=larn
pkgver=20090906
pkgrel=1
pkgdesc="a roguelike computer game authored by Noah Morgan in 1986 for the UNIX operating system"
arch=("i686" "x86_64")
license=('BSD')
url="http://www.netbsd.org"
depends=('ncurses')
makedepends=('cvs')
source=('larn-bsdtolinux.diff')
md5sums=('fd26cceed04e59ade0e9ec85d721d7c5')
package() {
_cvsroot=":pserver:anoncvs@anoncvs.NetBSD.org:/cvsroot"
_cvsmod="src/games/larn"
cd ${srcdir}
msg "Connecting to NetBSD CVS server..."
if [ -d $_cvsmod/CVS ]; then
cd $_cvsmod
cvs -z3 update -d
else
cvs -z3 -d $_cvsroot co -D $pkgver -f $_cvsmod
cd $_cvsmod
fi
patch -Np1 -i ${srcdir}/larn-bsdtolinux.diff
make || return 1
mkdir -p ${pkgdir}/usr/bin
install -Dm755 larn ${pkgdir}/usr/bin/larn || return 1
install -Dm644 datfiles/larn.help ${pkgdir}/usr/share/larn/larn.help || return 1
install -Dm644 datfiles/larnmaze ${pkgdir}/usr/share/larn/larnmaze || return 1
gzip larn.6 || return 1
install -Dm644 larn.6.gz ${pkgdir}/usr/share/man/man6/larn.6.gz || return 1
install -Dm644 README ${pkgdir}/usr/share/doc/larn/README || return 1
install -Dm644 datfiles/larnopts ${pkgdir}/usr/share/doc/larn/larnopts || return 1
install -Dm644 Fixed.Bugs ${pkgdir}/usr/share/doc/larn/Fixed.Bugs || return 1
}
| true
|
577778b78507ce3b8e260ceebe165b60a0384ef2
|
Shell
|
rojekabc/projewski-bitcoin
|
/src/scripts/build.sh
|
UTF-8
| 169
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
MAVEN=`which mvn 2> /dev/null`;
if ! test "$?" = "0"; then
echo "Cannot find maven";
exit;
fi
$MAVEN clean install
$MAVEN package -pl projewski-portfolio
| true
|
f28f766b50875e245e6ca93f98f272e606512e52
|
Shell
|
amirkamran/smt-playground
|
/playground/pickbug
|
UTF-8
| 360
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
re="$1"
out="$2"
input=evaluation.in
ref=evaluation.ref.0
hyp=evaluation.opt.out
[ ! -z "$out" ] || out=bug
grep "$re" -n $input $ref $hyp | cut -d: -f2 | sort -u > $out.lines
greplines $out.lines < $input > $out.in
greplines $out.lines < $ref > $out.ref
greplines $out.lines < $hyp > $out.hyp
echo "Selected:"
wc -l $out.in $out.ref $out.hyp
| true
|
e10420727c042a68f3131a181d2bea6c1d1dea35
|
Shell
|
matthiaswh/bit4
|
/test833-sshguard/templates/ipfw.rules.j2
|
UTF-8
| 1,272
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/sh
LOOP="lo*"
PIF="{{ ansible_default_ipv4.interface }}"
PING="icmptypes 8"
UNREACH="icmptypes 3"
ipfw -q flush
alias ADD="ipfw -q add"
#
# Basic rules
#
ADD 0100 allow all from any to any via $LOOP
ADD 0999 check-state
#
# Inbound rules
#
# Block attackers from accessing any port. table(22) is maintained by sshguard.
ADD 1000 deny all from "table(22)" to any in
# Drop spoofed packets.
ADD 1100 deny all from any to any not verrevpath in
# Allowed packets
ADD 2000 allow icmp from any to me $PING in via $PIF keep-state
ADD 2000 allow tcp from any to me ssh in via $PIF keep-state setup
#
# Outbound rules
#
ADD 3000 allow icmp from me to any $UNREACH out via $PIF
ADD 3000 allow icmp from me to any $PING out via $PIF keep-state
ADD 3000 allow udp from me to any domain out via $PIF keep-state
ADD 3000 allow udp from me to any ntp out via $PIF keep-state
ADD 3000 allow tcp from me to any http out via $PIF keep-state setup
ADD 3000 allow tcp from me to any https out via $PIF keep-state setup
#
# Fallback rules
#
ADD 9000 unreach port log tcp from any to me via $PIF
ADD 9000 unreach port log udp from any to me via $PIF
ADD 9100 unreach host log all from any to me via $PIF
ADD 9999 deny log all from any to any
| true
|
033a2a7514d59329139b7be9a73583f7368f4068
|
Shell
|
Aasthaengg/FinetuneGPT
|
/script.sh
|
UTF-8
| 1,316
| 3.109375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
git clone https://github.com/Aasthaengg/auto_coding.git && cd auto_coding
pip install -r requirements.txt # transformers, pytorch etc.
declare -A hashmap
hashmap["pytorch_examples"]="https://github.com/pytorch/examples.git"
hashmap["tensorflow_examples"]="https://github.com/tensorflow/examples.git"
hashmap["algorithm_examples"]="https://github.com/TheAlgorithms/Python.git"
hashmap["Computer_vision"]="https://github.com/PacktPublishing/Computer-Vision-with-Python-3.git"
hashmap["NVIDIA_DeepLearning_examples"]="https://github.com/NVIDIA/DeepLearningExamples.git"
cd dataset/
echo "Cloning repositories NOW"
for key in ${!hashmap[@]};
do
echo $key, ${hashmap[$key]};
git clone ${hashmap[$key]} $key
echo "Done cloning repo: $key"
done
echo hashmap has ${#hashmap[@]} elements
echo "Starting pre-processing data..."
sed -i "s/'examples'/'pytorch_examples', 'tensorflow_examples', 'algorithm_examples', 'Computer_vision', 'NVIDIA_DeepLearning_examples'/g" convert.py && python3 convert.py --segment_len 256 --stride 10 --dev_size 0.1
sed -i 's/"<examples>"/"<pytorch_examples>", "<tensorflow_examples>", "<algorithm_examples>", "<Computer_vision>', "<NVIDIA_DeepLearning_examples'/g>" ../train.py
echo "Finished pre-processing data"
cd ../ && python3 train.py --model_select distilgpt2
| true
|
4b3be5ed91d12484e3eeeaec0891aa526fca9343
|
Shell
|
dhakiki/git-town
|
/src/helpers/terminal_helpers.sh
|
UTF-8
| 3,324
| 4.375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
shopt -s extglob
# Helper methods for writing to the terminal.
# Prints a line in bold
function echo_bold {
output_style_bold
echo "$@"
output_style_reset
}
# Prints an error header into the terminal.
function echo_error_header {
echo
echo_red_bold "$(echo_indented 'Error')"
}
# Prints the provided error message
function echo_error {
echo_red "$(echo_indented "$@")"
}
# Prints the string if the condition is true
function echo_if_true {
local string="$1"
local condition="$2"
if [ "$condition" = true ]; then
echo "$string"
fi
}
# Prints the message indented
function echo_indented {
echo "$@" | indent
}
# Prints an inline usage
function echo_inline_bold {
output_style_bold
printf "%s" "$@"
output_style_reset
}
function echo_inline_cyan_bold {
output_style_cyan
output_style_bold
echo -n "$@"
output_style_reset
}
function echo_inline_dim {
output_style_dim
echo -n "$@"
output_style_reset
}
# Prints an inline error
function echo_inline_error {
echo_red "error: $*"
}
# Prints an inline usage
function echo_inline_usage {
echo "usage: $*"
}
# Prints a continuation of an inline usage
function echo_inline_usage_or {
echo "or: $*" | indent 3
}
# Prints a header line into the terminal.
function echo_header {
echo
echo_bold "$@"
}
# Outputs the given text in red
function echo_red {
output_style_red
echo "$@"
output_style_reset
}
# Outputs the given text in red and bold
function echo_red_bold {
output_style_bold
output_style_red
echo "$@"
output_style_reset
}
function echo_inline_red_bold {
output_style_red
output_style_bold
echo -n "$@"
output_style_reset
}
# Prints the provided usage message
function echo_usage {
echo_indented "$@"
}
# Prints the header for the usage help screen into the terminal.
function echo_usage_header {
local str=$(echo_indented Usage)
echo_header "$str"
}
function exit_with_error {
exit_with_status 1 "$1"
}
function exit_with_abort {
exit_with_status 2 "$1"
}
# Exits the currently running script with an exit code.
function exit_with_status {
if [ "$2" = "newline" ]; then
echo
fi
exit "$1"
}
# Pipe to this function to indent output (2 spaces by default)
function indent {
local count=$1
if [ -z "$1" ]; then count=2; fi
local spaces="$(printf "%${count}s")"
sed "s/^/${spaces}/"
}
function output_style_bold {
tput bold
}
function output_style_cyan {
tput setaf 6
}
function output_style_dim {
tput dim
}
function output_style_red {
tput setaf 1
}
function output_style_reset {
tput sgr0
}
# Prints a command
function print_git_command {
local branch_name=$(get_current_branch_name)
echo_header "[$branch_name] $*"
}
function prompt_yn {
echo -n " [Y/n] "
read yn
case "$yn" in
[Yy]) return 0;;
[Nn]) return 1;;
*([[:space:]])) return 0;;
*) echo "Please answer yes (y) or no (n)."; return 1;;
esac
}
# Run a normal (non Git) command.
#
# Prints the command and the output
function run_command {
local cmd="$1"
echo_header "$cmd"
eval "$cmd" 2>&1
}
# Run a Git command
#
# Prints the command and the Git branch it is running on, as well as the output.
function run_git_command {
local cmd="$1"
print_git_command "$cmd"
eval "$cmd" 2>&1
}
| true
|
6e94ea2fba9aa8e21d1db9c57ba31a586eb8741c
|
Shell
|
wu-qiang/sscm-client
|
/scripts/setup-script.sh
|
UTF-8
| 2,917
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Set up scripts needed for demo env
#
if [ -z "$SSCM_BUILD_DIR" ] ; then
echo "SSCM_BUILD_DIR not set! Exiting."
exit 1
fi
if [ -z "$SCRIPTS_DIR" ] ; then
echo "SCRIPTS_DIR not set! Exiting."
exit 1
fi
echo "cp -f $SSCM_BUILD_DIR/sscm-client/scripts/pgp/gpg-script.sh $SCRIPTS_DIR"
cp -f $SSCM_BUILD_DIR/sscm-client/scripts/pgp/gpg-script.sh $SCRIPTS_DIR
echo "cp -f $SSCM_BUILD_DIR/sscm-client/scripts/sbas/sbas-check.sh $SCRIPTS_DIR"
cp -f $SSCM_BUILD_DIR/sscm-client/scripts/sbas/sbas-check.sh $SCRIPTS_DIR
echo "cp -f $SSCM_BUILD_DIR/sscm-client/scripts/grafeas/provision-grafeas.sh $SCRIPTS_DIR"
cp -f $SSCM_BUILD_DIR/sscm-client/scripts/grafeas/provision-grafeas.sh $SCRIPTS_DIR
echo "cp -f $SSCM_BUILD_DIR/sscm-client/scripts/sbas/authorities-generator.sh $SCRIPTS_DIR"
cp -f $SSCM_BUILD_DIR/sscm-client/scripts/sbas/authorities-generator.sh $SCRIPTS_DIR
echo "cp -f $SSCM_BUILD_DIR/sscm-client/scripts/sbas/policy-provision.sh $SCRIPTS_DIR"
cp -f $SSCM_BUILD_DIR/sscm-client/scripts/sbas/policy-provision.sh $SCRIPTS_DIR
echo "cp -f $SSCM_BUILD_DIR/sscm-client/scripts/sbas/policy-test.sh $SCRIPTS_DIR"
cp -f $SSCM_BUILD_DIR/sscm-client/scripts/sbas/policy-test.sh $SCRIPTS_DIR
echo "cp -f $SSCM_BUILD_DIR/sscm-client/scripts/sbas/kauctl $SCRIPTS_DIR"
cp -f $SSCM_BUILD_DIR/sscm-client/scripts/sbas/kauctl $SCRIPTS_DIR
echo "chmod +x $SCRIPTS_DIR/*"
chmod +x $SCRIPTS_DIR/*
echo "ls -l $SCRIPTS_DIR"
ls -l $SCRIPTS_DIR
echo "$GPG_SCRIPT --init-keyring"
$GPG_SCRIPT --init-keyring
echo "$GPG_SCRIPT --get-authority-names"
$GPG_SCRIPT --get-authority-names
#echo "$GPG_SCRIPT --test"
#$GPG_SCRIPT --test
echo "Execute: $SCRIPTS_DIR/authorities-generator.sh"
$SCRIPTS_DIR/authorities-generator.sh
echo "ls -ld $ATTESTATION_AUTHORITY_FILE"
ls -ld $ATTESTATION_AUTHORITY_FILE
echo "cat $ATTESTATION_AUTHORITY_FILE"
cat $ATTESTATION_AUTHORITY_FILE
echo "Execute: $SCRIPTS_DIR/provision-grafeas.sh \"${GRAFEAS_SERVER_ADDRESS}:${GRAFEAS_SERVER_PORT}\""
$SCRIPTS_DIR/provision-grafeas.sh "${GRAFEAS_SERVER_ADDRESS}:${GRAFEAS_SERVER_PORT}"
echo "Execute: $SCRIPTS_DIR/policy-provision.sh"
$SCRIPTS_DIR/policy-provision.sh
echo "Execute: $SCRIPTS_DIR/policy-test.sh"
$SCRIPTS_DIR/policy-test.sh
cd ${WERCKER_SOURCE_DIR}
declare groupId=$(mvn -Dmaven.repo.local=${WERCKER_CACHE_DIR}/.m2 help:evaluate -Dexpression=project.groupId | grep -v "^\[INFO\]")
declare artifactId=$(mvn -Dmaven.repo.local=${WERCKER_CACHE_DIR}/.m2 help:evaluate -Dexpression=project.artifactId | grep -v "^\[INFO\]")
declare version=$(mvn -Dmaven.repo.local=${WERCKER_CACHE_DIR}/.m2 help:evaluate -Dexpression=project.version | grep -v "^\[INFO\]")
declare timestamp=$(date -u +%Y%m%d-%H%M%S-%N%z)
echo "Generating resourceUrl ..."
echo "echo 'gav://${groupId}:${artifactId}:${version}-${timestamp}' > $RESOURCE_URL_FILE"
echo "gav://${groupId}:${artifactId}:${version}-${timestamp}" > $RESOURCE_URL_FILE
| true
|
7527c81f78662056d9091c5a41dc00f1544f1949
|
Shell
|
google/santa
|
/Testing/integration/test_config_changes.sh
|
UTF-8
| 714
| 3.171875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -x
# TODO(nickmg): These `santactl status`s should be run with sudo to mirror the others,
# however currently (2022-10-27) non-root status is what correctly reads from provisioning profile configuration.
bazel run //Testing/integration:install_profile -- Testing/integration/configs/default.mobileconfig
if [[ "$(santactl status --json | jq .daemon.block_usb)" != "false" ]]; then
echo "USB blocking enabled with minimal config" >&2
exit 1
fi
bazel run //Testing/integration:install_profile -- Testing/integration/configs/usb-block.mobileconfig
if [[ "$(santactl status --json | jq .daemon.block_usb)" != "true" ]]; then
echo "USB blocking config change didnt take effect" >&2
exit 1
fi
| true
|
a5e04a76652aef7d7dd8c3d130c7e6e2f7ddb8d0
|
Shell
|
gahm03/dotfiles
|
/upfiles2home
|
UTF-8
| 323
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
#clear
d=$(pwd) # gets the current directory
cp $d/.vimrc ~
cp $d/.tmux.conf ~
mkdir -p ~/.vim/templates
mkdir -p ~/.tmux
cp -r $d/templates/* ~/.vim/templates
cp -r $d/three-panes ~/.tmux
cp -r $d/compiletex ~/.tmux
echo "files copied"
# don't fotget to make executable the file using chmod 755 nameofthefile
| true
|
1296aa5a14c2d0be220f603cb18a8c4292caa7b7
|
Shell
|
spllr/nifty-bash-commands
|
/findin
|
UTF-8
| 963
| 4.4375
| 4
|
[] |
no_license
|
#!/bin/bash
function usage() {
echo "Recursively search files in the given directory (or CWD) for a a string."
echo ""
echo -e "Usage:\tfindin string"
echo -e "\tfindin [options] string"
echo ""
echo -e "\t-d dir, \tThe directory to search"
echo -e "\t-t wc, \t\tThe filetype to search (wildcard match, e.g. *.html)"
}
while getopts "d:t:?:" o ; do
case $o in
d ) DIR=$OPTARG;;
t ) TYPE=$OPTARG;;
? ) STR=$OPTARG;;
esac
done
if [[ -z $TYPE ]]; then
NAMEOPT=""
TYPEINFO="any"
else
TYPEINFO="$TYPE"
NAMEOPT="-name $TYPE"
fi
if [[ -z $DIR ]]; then
DIR="."
DIRINFO="current directory"
else
DIRINFO="$DIR"
fi
# String to match variable
shift $((OPTIND-1))
STR="$@"
if [[ -z $STR ]]; then
usage;
exit
fi
echo "Recursively searching $TYPEINFO files in $DIRINFO for '$STR'"
find $DIR $NAMEOPT -exec grep -i -H -n "$STR" {} \;
| true
|
dcac2db508c2a942c0984f99fdf76464ddeb9087
|
Shell
|
ncarrier/carino-products
|
/common/skel/bin/config_network
|
UTF-8
| 776
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/sh
itf=wlan0
exec > /dev/kmsg 2>&1
if [ "${SUBSYSTEM}" != "net" ] || [ "${ACTION}" != "add" ] || [ "${INTERFACE}" != "${itf}" ]; then
exit 0
fi
# for an unknown reason, set -x issues empty lines when redirected to kmsg,
# hence the echoes
echo /sbin/ip link set ${itf} up
/sbin/ip link set ${itf} up
echo /sbin/ip address add 10.10.10.1/24 brd 10.10.10.255 dev ${itf}
/sbin/ip address add 10.10.10.1/24 brd 10.10.10.255 dev ${itf}
# restart services depending on the interface being configured
# udhcpd doesn't handle well being launched with a non-existant interface, so
# we have to kill it if it was launched before ${itf} was up and configured
# hostapd moans when ${itf} isn't there, but handles well it's late apparition
echo killall udhcpd
killall udhcpd
| true
|
29fdd5a39028d9edc0ce9b2ae6c2480c74905a00
|
Shell
|
nottvlike/EGP
|
/Lua/build_lua.sh
|
UTF-8
| 790
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
if [ $1 == "help" ]; then
echo "build_lua.sh [Android | iOS | Win32 | MacOSX | Linux]"
exit 0
fi
export PLATFORM=$1
if [[ $PLATFORM == "Android" || $PLATFORM == "iOS" || $PLATFORM == "Win32" || $PLATFORM == "MacOSX" || $PLATFORM == "Linux" ]]; then
echo "begin build" $PLATFORM
else
echo "Wrong argument!"
exit 1
fi
rm -rf ./build
mkdir ./build
cd build
export PLATFORM=$1
export LUA_LIB_PATH="lua-5.3.5"
if [ $PLATFORM == "MacOSX" ]; then
cmake .. -G Xcode -DPLATFORM=$PLATFORM -DLUA_LIB_PATH=$LUA_LIB_PATH
xcodebuild -target lua -configuration Release
rm -rf ../../Assets/Plugins/lua.bundle
cp -rf ./Release/lua.bundle ../../Assets/EGP/Plugins/lua.bundle
else
cmake .. -DPLATFORM=$PLATFORM -DLUA_LIB_PATH=$LUA_LIB_PATH
make >> /dev/null
fi
cd ..
echo "finished build"
| true
|
428496bab2a95694ba25d93cf179e269d067bd9f
|
Shell
|
krattai/AEBL
|
/blades/xtreemfs/tests/test_scripts/hadoop_ssl_test.sh
|
UTF-8
| 5,224
| 3.46875
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
XTREEMFS=$1
TEST_DIR=$4
HADOOP_VERSIONS="1.2.1"
VOLUME="$(basename $(dirname $(pwd)))"
for VERSION in $HADOOP_VERSIONS; do
echo "Test XtreemFS with hadoop $VERSION"
#download and extract hadoop
echo "Download Hadoop $VERSION..."
wget -nv -O $TEST_DIR/hadoop-$VERSION.tar.gz http://archive.apache.org/dist/hadoop/core/hadoop-$VERSION/hadoop-$VERSION.tar.gz
VOLUME_DIR=$PWD
cd $TEST_DIR
echo "Extract Hadoop $VERSION..."
tar -zxf $TEST_DIR/hadoop-$VERSION.tar.gz
cd $VOLUME_DIR
#configure hadoop
export HADOOP_PREFIX=$TEST_DIR/hadoop-$VERSION
echo "Set HADOOP_PREFIX=$HADOOP_PREFIX"
export HADOOP_CONF_DIR=$HADOOP_PREFIX/conf/
echo "Set HADOOP_CONF_DIR=$HADOOP_CONF_DIR"
export HADOOP_LOG_DIR="$TEST_DIR/log/hadoop.log"
echo "Set HADOOP_LOG_DIR=$HADOOP_LOG_DIR"
echo "Copy XtreeemFSHadoopClient.jar to $HADOOP_PREFIX/lib/"
cp $XTREEMFS/contrib/hadoop/dist/XtreemFSHadoopClient.jar $HADOOP_PREFIX/lib/
echo "configure core-site.xml"
CORE_SITE="
<configuration>
<property>
<name>fs.xtreemfs.impl</name>
<value>org.xtreemfs.common.clients.hadoop.XtreemFSFileSystem</value>
</property>
<property>
<name>fs.default.name</name>
<value>xtreemfs://localhost:32638</value>
</property>
<property>
<name>xtreemfs.defaultVolumeName</name>
<value>$VOLUME</value>
</property>
<property>
<name>xtreemfs.client.debug</name>
<value>false</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>xtreemfs.io.read.buffer</name>
<value>false</value>
</property>
<property>
<name>xtreemfs.io.buffer.size.read</name>
<value>64</value>
</property>
<property>
<name>xtreemfs.io.write.buffer</name>
<value>false</value>
</property>
<property>
<name>xtreemfs.io.buffer.size.write</name>
<value>64</value>
</property>
<property>
<name>xtreemfs.ssl.enabled</name>
<value>true</value>
</property>
<property>
<name>xtreemfs.ssl.credentialFile</name>
<value>$XTREEMFS/tests/certs/Client.p12</value>
</property>
<property>
<name>xtreemfs.ssl.credentialFile.passphrase</name>
<value>passphrase</value>
</property>
<property>
<name>xtreemfs.ssl.trustedCertificatesFile</name>
<value>$XTREEMFS/tests/certs/trusted.jks</value>
</property>
<property>
<name>xtreemfs.ssl.trustedCertificatesFile.passphrase</name>
<value>passphrase</value>
</property>
</configuration>"
echo $CORE_SITE > $HADOOP_PREFIX/conf/core-site.xml
echo "configure mapred-site.xml"
MAPRED_SITE="
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>localhost:9001</value>
</property>
</configuration>"
echo $MAPRED_SITE > $HADOOP_PREFIX/conf/mapred-site.xml
#prepare input
mkdir input
wget -nv -O test.txt http://www.gutenberg.org/cache/epub/1661/pg1661.txt
#test hadoop fs shell
if [ -z "$($HADOOP_PREFIX/bin/hadoop fs -ls /hadoop_with_ssl_test | grep test.txt)" ]
then echo hadoop fs -ls does not show test file!; RESULT=-1;
fi
$HADOOP_PREFIX/bin/hadoop fs -copyFromLocal test.txt /hadoop_with_ssl_test/input/
if [ -z "$(ls | grep test.txt)" ]
then echo ls does not show test file!; RESULT=-1;
fi
#run simple hadoop-job
echo "Start JobTracker and TaskTracker..."
$HADOOP_PREFIX/bin/hadoop-daemon.sh start jobtracker
$HADOOP_PREFIX/bin/hadoop-daemon.sh start tasktracker
#wait for complete start up
sleep 10s
if [[ -z "$(jps | grep TaskTracker)" || -z "$(jps | grep JobTracker)" ]]
then echo "Hadoop start up failed!"; RESULT=-1;
else
echo "Run wordcount"
$HADOOP_PREFIX/bin/hadoop jar $HADOOP_PREFIX/hadoop-examples-$VERSION.jar wordcount /hadoop_with_ssl_test/input /hadoop_with_ssl_test/output
JOB_STATUS=$($HADOOP_PREFIX/bin/hadoop job -list all | grep _0001 | cut -c 23)
if [ "$JOB_STATUS" != "2" ]
then echo "Hadoop job without buffer failed!"; RESULT=-1;
else echo "Hadoop job without buffer was successfull";
fi
$HADOOP_PREFIX/bin/hadoop fs -rmr /hadoop_with_ssl_test/output
echo "Stop JobTracker and TaskTracker..."
$HADOOP_PREFIX/bin/hadoop-daemon.sh stop jobtracker
$HADOOP_PREFIX/bin/hadoop-daemon.sh stop tasktracker
# check if JobTacker and TaskTracker stop
if [ -n "$(jps | grep TaskTracker)" ]
then
echo "TaskTracker does not stop, kill manually"
TASKTRACKER_PID=$(jps | grep TaskTracker | cut -d ' ' -f1)
kill $TASKTRACKER_PID
fi
if [ -n "$(jps | grep JobTracker)" ]
then
echo "JobTracker does not stop, kill manually"
JOBTRACKER_PID=$(jps | grep JobTracker | cut -d ' ' -f1)
kill $JOBTRACKER_PID
fi
#kill all remaining child processes
CHILD_PIDS=$(jps | grep Child | cut -d ' ' -f1)
if [ -n "$CHILD_PIDS" ]
then kill $CHILD_PIDS
fi
fi
done
exit $RESULT
| true
|
f2e459af776d7795d0f5655db4394787ed7e261b
|
Shell
|
AAABioInfo/repo
|
/W4/createCrisprReady.sh
|
UTF-8
| 2,756
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
# input 1: $1= motif_list.txt $2=All_exomes $3=$All_exomes_Test
# Output: Output: dir(species_name)
# files:
# species_name_motif_count_All.txt
# species_name_count_TOP3
# species_topMotif1_count.fa
# species_topMotif2_count.fa
# species_topMotif3_count.fa
#Creater Aldo Amaya
#Purpose: Identify the 3 highest occurring motifs in each exome inside the exomes folder.
# this will link up any desired list
#TargerSeq=$(echo $2)
cat $1 >sourceL
TargetSeq=$(echo $2)
cat $3>All_exomes_ID
#used to test script
#cat motif_list.txt>sourceL # this will link up any desired list
#TargetSeq=$(echo All_exomes/)
#cat All_exomes_Test>All_exomes_ID
cp sourceL $TargetSeq
cp All_exomes_ID $TargetSeq
rm sourceL All_exomes_ID
cd $TargetSeq
lenM=$(sort sourceL| uniq|wc -l)
let i=1 # when using ls it counts All_exomes_Test
lenExo=$(sort All_exomes_ID| uniq|wc -l)
# Go through each exoname and create a directory and store high motifs
while [ $i -le $lenExo ] # go through list of all samples
do
# from our sublist we will create a folder to contain all files top exo Motifs_count,i.e. fox_AAAA_103.fafsa,fox_AAAA_102.fafsa, fox_AAAA_101.fafsa
tempF=$(head -n $i All_exomes_ID| tail -n 1)
mkdir $tempF
cat "$tempF.fasta" >Lfasta.fa
#set params for new loop to grep each motif to fasta file
ii=1
#touch "$tnExoname _motif_count_All.txt"
while [ $ii -le $lenM ]
do
tempM=$(head -n $ii sourceL| tail -n 1)
countN=$(grep $(echo $tempM) Lfasta.fa|wc -l)
echo $tempF $tempM $countN >>"${tempF}_motif_count_All.txt"
let ii=ii+1
done
#ls -lt| head
#copy each of these list into exoname folder
cat "${tempF}_motif_count_All.txt" | sort -k3nr| head -n 3 > "${tempF}_motif_count_TOP3.txt"
cp "${tempF}_motif_count_TOP3.txt" $tempF
cp "${tempF}_motif_count_All.txt" $tempF
# sets a temporary list to reprocess
awk '{print $2}' "${tempF}_motif_count_TOP3.txt" > TempT3
iii=1 # this will be referencing top list
lenTT3=$(cat TempT3|wc -l) #lenTT3=3
while [ $iii -le $lenTT3 ]
do
tempM3=$(head -n $iii TempT3| tail -n 1)
countN3=$(grep $(echo $tempM3) Lfasta.fa|wc -l|sed 's/ //g')
grep -B1 $(echo $tempM3) Lfasta.fa> "${tempF}_${tempM3}_${countN3}.fasta"
cp "${tempF}_${tempM3}_${countN3}.fasta" $tempF
rm "${tempF}_${tempM3}_${countN3}.fasta"
let iii=iii+1
done
#Clean up after each motif
rm "${tempF}_motif_count_TOP3.txt" "${tempF}_motif_count_All.txt" TempT3
#touch $(echo $tempM).fa
let i=i+1
done
#
clear
echo "All clear, have a nice day"
#clean up
rm sourceL Lfasta.fa
| true
|
842a6c203ee5eb7750a749cf935a4cf9d8bb14d7
|
Shell
|
veemoC3/easy-shell-zip-maker
|
/script.sh
|
UTF-8
| 376
| 3.25
| 3
|
[] |
no_license
|
#!/bin/sh
echo -e "\e[92m insert password and files you want turned to zip and encrypted"
echo -e "\e92m your password for the zip:"
read PASSWD
echo -e "\e92m now insert the files you want to zip together:"
read FILES
echo -e "\e92m choose zip file name"
read ZIPNAME
zip --password $PASSWD $ZIPNAME.zip $FILES
echo -e"\e92m files sucessfully turned into zip and encrypted"
| true
|
a363980a1b260b0e77a059e0ef5516ef4464ec35
|
Shell
|
openshift/origin
|
/hack/convert-samples.sh
|
UTF-8
| 518
| 3.171875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
os::util::ensure::built_binary_exists 'origin-version-change'
IGNORE_FILES={$IGNORE_FILES:-"examples/sample-app/github-webhook-example.json"}
sample_files=$(find {api,examples,docs,images,plugins,test} -name "*.json" -o -name "*.yaml")
ignore_arr=(${IGNORE_FILES//,/ })
for f in $sample_files; do
if [[ $ignore_arr =~ $f ]]; then
echo "-> Skipping '$f'"
else
echo "-> Processing '$f' ..."
origin-version-change -r "$f"
fi
done
| true
|
8fa8acda6acfce8adacbf9579550b2622e06faa9
|
Shell
|
Jibux/jdr_data
|
/tidy.sh
|
UTF-8
| 844
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
FILE=$1
FILE2=monsters_tidy/${FILE##*/}
sed -r "s#<ul>#\n<ul>#g ; s#</ul>#</ul>\n#g ; s#</li>#</li>\n#g" $FILE > $FILE2
sed -ri "s#<(/|)(i|a|li|ul|br)[^>]*>##g ; s#[{}]##g ; s#<sup>([^<]+)</sup>#{\1}#g ; s#<b>#\n<b>#g ; s/ ?/ /g ; s/—/-/g ; s/–/-/g ; s/’/'/g ; s/•\s*//g ; s/\s+$//g" $FILE2
sed -ri "s#<div class=\"indent\"[^>]+>([^<]+)</div>#\1#g" $FILE2
sed -i "/^$/d" $FILE2
sed -ri "s/(c|C)apacité(s|)\s*(s|S)péciales/Capacités spéciales/g ; s/(a|A)ttaque(s|)\s*(s|S)péciale(s|)/Attaques spéciales/g ; s/Caractéristiques\s*de\s*base/Caractéristiques/g ; s/Ecologie/Écologie/g ; s/Pouvoirs\s*(s|S)péciaux/Pouvoirs spéciaux/g ; s/Sta(t|)istiques(\s*de\s*base|)/Statistiques/g ; s/bsorption d(e l|)'âme(s|)/bsorption d'âmes/g ; s/apacité(s|) défensive(s|)/apacités défensives/g ; s/PX/XP/g" $FILE2
| true
|
4094db8e1590c82e98fdd16ce4b16bcaa065044c
|
Shell
|
berry2012/DevSecOps
|
/GCP/install-forseti-server.sh
|
UTF-8
| 2,905
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
# Forseti gives you tools to understand all the resources you have in GCP.
# Inventory regularly collects data from your GCP resources and makes it available to other modules.
# Scanner periodically compares your rules about GCP resource policies against the policies collected by Inventory, and saves the output for your review.
# Enforcer uses Google Cloud APIs to change resource policy state to match the state you define.
# Explain helps you understand, test, and develop Cloud Identity and Access Management (Cloud IAM) policies.
# Notifier keeps you up to date about Forseti findings and actions.
# Objectives
# Install the Forseti server and client
# Use Scanner to scan Inventory data
# Fix the violations you find
export PROJECT_ID=<PROJECT_ID>
export MEMBER=user:whaleberry@gmail.com
gcloud config set project $PROJECT_ID
# clone the Forseti code repository
git clone https://github.com/GoogleCloudPlatform/forseti-security.git
# Change into the Forseti repository directory:
cd forseti-security
# Switch to the correct branch:
git checkout release-2.17.0
# Execute the Forseti installer:
PROJECT_ID=$(gcloud projects describe ${GOOGLE_CLOUD_PROJECT} \
--format="value(projectNumber)")
# automatically press enter for possible questions installation might ask
yes "" | python3 install/gcp_installer.py \
--composite-root-resources projects/${PROJECT_ID} & pid=$!
wait $pid
echo $pid completed
# Create a new bucket:
gsutil mb gs://${GOOGLE_CLOUD_PROJECT}-shared
# ake the bucket world readable (a violation of default Forseti rules):
gsutil acl ch -g AllUsers:R gs://${GOOGLE_CLOUD_PROJECT}-shared
# add a gmail account with viewer access to your project
gcloud projects add-iam-policy-binding \
--role="roles/viewer" \
--member="$MEMBER" \
${GOOGLE_CLOUD_PROJECT}
sleep 3
BUCKET=$(gsutil ls | grep forseti-server)
gsutil cp ${BUCKET}rules/iam_rules.yaml .
cat >> iam_rules.yaml << EOF
- name: project viewers whitelist
mode: whitelist
resource:
- type: project
applies_to: self
resource_ids:
- '*'
inherit_from_parents: true
bindings:
- role: 'roles/viewer'
members:
- user:*@qwiklabs.net
- user:*@*.gserviceaccount.com
EOF
gsutil cp iam_rules.yaml ${BUCKET}rules/iam_rules.yaml
# In the Forseti Cloud Shell, connect to your Forseti server instance:
VM=$(gcloud compute instances list --filter="name~'forseti-server'" | tail -1)
SERVER=$(echo ${VM} | cut -f1 -d' ')
ZONE=$(echo ${VM} | cut -f2 -d' ')
cat - << EOF
# run the commands in another shell below to ssh into foreseti and perform scan on your GCP resources
gcloud compute ssh ${SERVER} --zone ${ZONE}
# Forseti is configured to scan automatically every 2 hours. To manually perform a scan, execute the Forseti server run command:
/home/ubuntu/forseti-security/install/gcp/scripts/run_forseti.sh
EOF
echo "Job done"
| true
|
9b6b01abc3268d2a904ed531a6f32c14d645006c
|
Shell
|
monishbairagi/CSE_5TH_SEM
|
/OS/DAY8/Q2.sh
|
UTF-8
| 263
| 3.703125
| 4
|
[] |
no_license
|
echo -n "Enter File Path: "
read PATH
if [ -f $PATH ]; then
echo "$PATH passwords are enabled."
if [ -w $PATH ]; then
echo "You have permission to edit $path."
else
echo "You do NOT have permission to edit $path."
fi
else
echo "$PATH does NOT exist."
fi
| true
|
d21852ebe6c51eb50d605ba50fdedbe558c56177
|
Shell
|
bostonaustin/public
|
/utility_bin/ssh_remove_weak_ciphers.sh
|
UTF-8
| 1,057
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
ssh_conf="/etc/ssh/ssh_config"
sshd_conf="/etc/ssh/sshd_config"
if grep -q Ciphers ${ssh_conf}; then
sed -i.bak '/Ciphers.*/c\ Ciphers aes128-ctr,aes192-ctr,aes256-ctr' $ssh_conf
sed -i.bak '/MACs.*/c\ MACs hmac-sha1,hmac-ripemd160' $ssh_conf
else
echo "" >> $ssh_conf
echo "# lines added to remove weak SSH Ciphers "
echo "Ciphers aes128-ctr,aes192-ctr,aes256-ctr" >> $ssh_conf
echo "MACs hmac-sha1,hmac-ripemd160" >> $ssh_conf
fi
if grep -q Ciphers ${sshd_conf}; then
sed -i.bak '/Ciphers.*/c\Ciphers aes128-ctr,aes192-ctr,aes256-ctr' $sshd_conf
sed -i.bak '/MACs.*/c\MACs hmac-sha1,hmac-ripemd160' $sshd_conf
else
echo "" >> $sshd_conf
echo "# lines added to remove weak SSH Ciphers" >> $sshd_conf
echo "Ciphers aes128-ctr,aes192-ctr,aes256-ctr" >> $sshd_conf
echo "MACs hmac-sha1,hmac-ripemd160" >> $sshd_conf
fi
service ssh restart
if [ ! "$?" -eq "0" ]; then
echo "[ERROR] SSH service not found, trying SSHD instead on this EC2 NAT image ..."
service sshd restart
fi
exit 0
| true
|
e50f59eb93bbdaea4ca140fd56f5d44d14ee6e02
|
Shell
|
jhavensHMC/sciATAC
|
/phenoSheet.sh
|
UTF-8
| 845
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
#author: Jennifer Havens
#makes pheno_sheet.txt for monocle analysis
#run after split.sh
#once done can run monocle analysis
scFILES=/home/exacloud/lustre1/SpellmanLab/havens/ATAC/scAlign/* #gives directory where aligned files split by cells are put, end with *
OUTfile=/home/exacloud/lustre1/SpellmanLab/havens/ATAC/pheno_sheet.txt
# rows cells, columns are attibutes (sample and sites (counted unique reads))
echo cells sample sites > $OUTfile
#pattern of sc alignment file names $OUTdir/$samp.$cellID.bam
for cellFILE in $scFILES
do
mapped=$(samtools view -F 0x904 -c $cellFILE)
fileName=$(echo $cellFile | awk -F'/' '{print $(NF)}')
cell=$(echo $fileName | awk -F'.' '{print $2}')
sample=$(echo $fileName | awk -F'.' '{print $1}')
echo $cell $sample $mapped >> pheno_sheet.txt
done
| true
|
2b331d25fe7bc4345b8db5ad49ee85eb0fde2539
|
Shell
|
mrkeuz/docker-archlinux
|
/makepkg/docker-enable-makepkg-repo
|
UTF-8
| 215
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
# Helper that adds the makepkg repo to pacman.conf
set -e
if grep -qE '^[makepkg]' /etc/pacman.conf; then
exit 1
fi
cat >> /etc/pacman.conf << EOF
[makepkg]
Server=file:///repo/
SigLevel=Never
EOF
| true
|
e760603e63a7f06907cfbf6d7aa7a062ee5ad6e5
|
Shell
|
fmjabato/PhenFun
|
/launch_build_networks.sh
|
UTF-8
| 2,001
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
# @author Pedro Seoane Zonjic, Fernando Moreno Jabato
# Initialize DEPENDENCIES
# > Autoflow
source ~soft_bio_267/initializes/init_autoflow
# Add necessary scripts
current_dir=`pwd`
export PATH=$current_dir'/scripts':$PATH
export PATH=$current_dir'/sys_bio_lab_scripts':$PATH
#establish the variables we need in the workflow
mkdir external_data
wget 'ftp://ftp.ncbi.nih.gov/genomes/Homo_sapiens/ARCHIVE/ANNOTATION_RELEASE.105/GFF/ref_GRCh37.p13_top_level.gff3.gz' -O external_data/genome.gz
gunzip -d external_data/genome.gz
wget 'http://compbio.charite.de/jenkins/job/hpo.annotations.monthly/lastSuccessfulBuild/artifact/annotation/ALL_SOURCES_ALL_FREQUENCIES_phenotype_to_genes.txt' -O external_data/hpo_db.txt
tail -n +2 external_data/hpo_db.txt | cut -f 1,3 > external_data/hpo_db_phen2gene.txt
wget http://compbio.charite.de/jenkins/job/hpo.annotations/lastStableBuild/artifact/misc/phenotype_annotation.tab -O external_data/phenotype_annotation.tab
wget -O external_data/hp.obo http://purl.obolibrary.org/obo/hp.obo --no-check-certificate
# Patients info
mkdir processed_data
# Convert YOUR DATA format to our processing format (if it's necessary)
# [1] : Patient [2]: Chr [3]: Start [4]: End [5]: HPO_Name/Code
# ## ## ## ## ## ##
# Create HPOs dictionary
parse_hpo_file.rb external_data/hp.obo > processed_data/hpo2name.txt
# About $hpo_enrichment
# '-r' => no enrichment, '' => enrichment
# Prepare variables
variables=`echo -e "
\\$patients_file=$current_dir'/processed_data/patient_data.txt',
\\$hpo_dict=$current_dir'/processed_data/hpo2name.txt',
\\$genome_annotation=$current_dir'/external_data/genome',
\\$number_of_random_models=2,
\\$association_thresold=2,
\\$official_hpo=$current_dir/external_data/hpo_db_phen2gene.txt,
\\$hpo_enrichment='',
\\$hpo_ontology=$current_dir/external_data/hp.obo,
\\$regions_blacklist='',
\\$regions_filter='c'
" | tr -d [:space:]`
#Lauch autoflow
AutoFlow -w build_networks.af -o $SCRATCH'/build_Networks' -V $variables -m 30gb $1
| true
|
7bb2290dbc86eeb7a9925593a749df4548fdbefa
|
Shell
|
jrviray/Scripts
|
/korn/script7.ksh
|
UTF-8
| 117
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/ksh
#Name: PrintDays
#Prints the days of week using shift
for x in $@
do
echo $1 $2 $3 $4 $5 $6 $7
shift
done
| true
|
e3283257ceba8407bc6a371b3a09f6fbc1c8be79
|
Shell
|
sprnza/awesome_conf
|
/bin/helpers.sh
|
UTF-8
| 3,547
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
case $1 in
xset)
xset q|awk '/ timeout:| DPMS is / {if(++i%4==0) printf RS; printf $NF FS }'
;;
mpstat)
mpstat 1 1 |tail -1| awk '{ printf("%d",100 - $12) }'
;;
firefox_tabs)
if ps aux|grep "[f]irefox -P Sprnza" >/dev/null; then
FF_curr=$(sed -n "$(python2 <<< $'import json\nf = open("/home/speranza/.mozilla/firefox/a8s1f1wm.default/sessionstore-backups/recovery.js", "r")\njdata = json.loads(f.read())\nf.close()\nf.close()\nprint str(jdata["windows"][0]["selected"])')p" <(python2 <<< $'import json\nf = open("/home/speranza/.mozilla/firefox/a8s1f1wm.default/sessionstore-backups/recovery.js", "r")\njdata = json.loads(f.read())\nf.close()\nfor win in jdata.get("windows"):\n\tfor tab in win.get("tabs"):\n\t\ti = tab.get("index") - 1\n\t\tprint tab.get("entries")[i].get("url")')|sed "s/www.//g" |awk -F/ '{print $3}'|awk '!x[$0]++')
fi
if ps aux|grep "[f]irefox -P Nusha" >/dev/null; then
FF_curr_nush=$(sed -n "$(python2 <<< $'import json\nf = open("/home/speranza/.mozilla/firefox/f47o1j9v.Nusha/sessionstore-backups/recovery.js", "r")\njdata = json.loads(f.read())\nf.close()\nf.close()\nprint str(jdata["windows"][0]["selected"])')p" <(python2 <<< $'import json\nf = open("/home/speranza/.mozilla/firefox/f47o1j9v.Nusha/sessionstore-backups/recovery.js", "r")\njdata = json.loads(f.read())\nf.close()\nfor win in jdata.get("windows"):\n\tfor tab in win.get("tabs"):\n\t\ti = tab.get("index") - 1\n\t\tprint tab.get("entries")[i].get("url")')|sed "s/www.//g" |awk -F/ '{print $3}'|awk '!x[$0]++')
fi
echo "tabs = {\"$FF_curr\", \"$FF_curr_nush\"}"
;;
mon)
output="$HOME/.bin/temp/local_status"
uptime -p > $output
uptime | grep -ohe 'load average[s:][: ].*' | awk '{ print $3" "$4" "$5}' >> $output
checkupdates|wc -l >> $output
df --output=pcent|sed -n -e 4p -e 8p|sed s/" "//g|tr '\n' ' '>> $output
echo "" >> $output
sensors|awk '/^Core 0/{print $3}' >>$output
;;
vpn)
ifconfig|grep -q "ppp0" && echo 1 || echo 0
;;
pm_tabs)
if ps aux|grep -q "[p]alemoon --no-remote -P Sprnza" >/dev/null; then
palemoon_curr_my=$(sed -n "$(python2 <<< $'import json\nf = open("/home/speranza/.moonchild productions/pale moon/a1kp5jx3.default/sessionstore.js", "r")\njdata = json.loads(f.read())\nf.close()\nf.close()\nprint str(jdata["windows"][0]["selected"])')p" <(python2 <<< $'import json\nf = open("/home/speranza/.moonchild productions/pale moon/a1kp5jx3.default/sessionstore.js", "r")\njdata = json.loads(f.read())\nf.close()\nfor win in jdata.get("windows"):\n\tfor tab in win.get("tabs"):\n\t\ti = tab.get("index") - 1\n\t\tprint tab.get("entries")[i].get("url")')|sed "s/www.//g" |awk -F/ '{print $3}'|awk '!x[$0]++')
fi
if ps aux|grep -q "[p]alemoon --no-remote -P Nusha"; then
palemoon_curr_nush=$(sed -n "$(python2 <<< $'import json\nf = open("/home/speranza/.moonchild productions/pale moon/4qs91zqv.Nusha/sessionstore.js", "r")\njdata = json.loads(f.read())\nf.close()\nf.close()\nprint str(jdata["windows"][0]["selected"])')p" <(python2 <<< $'import json\nf = open("/home/speranza/.moonchild productions/pale moon/4qs91zqv.Nusha/sessionstore.js", "r")\njdata = json.loads(f.read())\nf.close()\nfor win in jdata.get("windows"):\n\tfor tab in win.get("tabs"):\n\t\ti = tab.get("index") - 1\n\t\tprint tab.get("entries")[i].get("url")')|sed "s/www.//g" |awk -F/ '{print $3}'|awk '!x[$0]++')
fi
echo "tabs = {\"$palemoon_curr_my\", \"$palemoon_curr_nush\"}"
;;
esac
| true
|
7a952f2f8e4c03963335d9c2427fda89452159b0
|
Shell
|
centreon/centreon-plugins
|
/tests/functional/cloud/aws/cloudtrail/checktrailstatus.sh
|
UTF-8
| 1,102
| 3.390625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
current_dir="$( cd "$(dirname "$0")/../../../../.." >/dev/null 2>&1 || exit ; pwd -P )"
cmd="perl $current_dir/src/centreon_plugins.pl --plugin=cloud::aws::cloudtrail::plugin --custommode=paws --region=eu-west --aws-secret-key=secret --aws-access-key=key"
nb_tests=0
nb_tests_ok=0
test_status_ok=$($cmd --mode=checktrailstatus --endpoint=http://localhost:3000/cloudtrail/gettrailstatus/true --trail-name=TrailName)
((nb_tests++))
if [[ $test_status_ok = "OK: Trail is logging: 1 | 'trail_is_logging'=1;;;0;" ]]
then
((nb_tests_ok++))
else
echo "test_status_ok ko"
echo $test_status_ok
fi
test_status_critical=$($cmd --mode=checktrailstatus --endpoint=http://localhost:3000/cloudtrail/gettrailstatus/false --trail-name=TrailName)
((nb_tests++))
if [[ $test_status_critical = "CRITICAL: Trail is logging: 0 | 'trail_is_logging'=0;;;0;" ]]
then
((nb_tests_ok++))
else
echo "test_status_critical ko"
echo $test_status_critical
fi
if [[ $nb_tests_ok = $nb_tests ]]
then
echo "OK: "$nb_tests_ok"/"$nb_tests" tests OK"
else
echo "NOK: "$nb_tests_ok"/"$nb_tests" tests OK"
fi
| true
|
c2bc6fe72785af6ee8ddd09b7f4dec68e81745de
|
Shell
|
Jorengarenar/dotfiles
|
/git/template/hooks/pre-push
|
UTF-8
| 739
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
zero=$(git hash-object --stdin </dev/null | tr '0-9a-f' '0')
while read -r local_ref local_oid remote_ref remote_oid
do
if [ "$local_oid" = "$zero" ]; then
continue
fi
if [ -x "$(command -v git-style-diff)" ]; then
out=$(git style-diff --color "$remote_oid" "$local_oid")
if [ -n "$out" ]; then
echo "$out"
printf "\n"
printf 'Some of your changes differ from autoformatter output.\n'
printf 'Are you sure you want to push without fixing them? [y/N] '
exec < /dev/tty
read -r yn
case "$yn" in
y|Y|yes|YES|Yes) exit 0 ;;
esac
exit 1
fi
fi
done
| true
|
64a7270e233d31db3bcfd1aac5e75beaadd335be
|
Shell
|
SolarNetwork/solarnode-os-images
|
/debian/bin/armbian-build.sh
|
UTF-8
| 7,208
| 3.765625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
BOARD="${BOARD:-nanopiair}"
BOOT_DEV_LABEL="${BOOT_DEV_LABEL:-SOLARBOOT}"
ROOT_DEV_LABEL="${ROOT_DEV_LABEL:-SOLARNODE}"
BUILD_HOME="armbian"
COMPRESS_DEST_IMAGE=""
COMPRESS_DEST_OPTS="-8 -T 0"
DEBIAN_VERSION="10"
IMAGE_SIZE="952"
SKIP_BUILD=""
SKIP_DATE_CHECK=""
SKIP_CUST_IMAGE=""
do_help () {
cat 1>&2 <<EOF
Usage: $0 <arguments>
Setup script for a minimal SolarNode OS based on Armbian.
Arguments:
-B - skip Armbian build
-D - skip image date check
-I - skip custom image creation (use the customize.sh script later)
-h <armbian build dir> - path to the Armbian build directory; defaults to armbian-build
-M <debian version> - defaults to 10; supported values are: 10, 11
-s <mb> - size of disk image, in MB; defaults to 952
-Z <options> - xz options to use on final image; defaults to '-8 -T 0'
-z - compress final image with xz
EOF
}
while getopts ":BDIh:M:s:Z:z" opt; do
case $opt in
B) SKIP_BUILD='TRUE';;
D) SKIP_DATE_CHECK='TRUE';;
I) SKIP_CUST_IMAGE='TRUE';;
h) BUILD_HOME="${OPTARG}";;
M) DEBIAN_VERSION="${OPTARG}";;
s) IMAGE_SIZE="${OPTARG}";;
Z) COMPRESS_DEST_OPTS="${OPTARG}";;
z) COMPRESS_DEST_IMAGE="TRUE";;
*)
echo "Unknown argument ${OPTARG}"
do_help
exit 1
esac
done
shift $(($OPTIND - 1))
if [ ! `id -u` = 0 ]; then
echo "You must be root to run this script."
exit 1
fi
if [ ! -e "${BUILD_HOME}/compile.sh" ]; then
echo "Invalid build home '$BUILD_HOME', compile.sh not found."
exit 1
fi
DEBIAN_NAME=""
OUTPUT_RELEASE=""
case "$DEBIAN_VERSION" in
10)
DEBIAN_NAME="buster"
OUTPUT_RELEASE="deb10"
;;
11)
DEBIAN_NAME="bullseye"
OUTPUT_RELEASE="deb11"
;;
*)
echo "Unknown Debian version ${OPTARG}"
do_help
exit 1
esac
setup_dev_btrfs () {
local dev="$1"
local label="$2"
local curr=$(btrfs filesystem label "$dev")
local fs_target=$(findmnt -f -n -o TARGET "$dev")
if [ "$curr" = "$label" ]; then
echo "Device $dev already has label $label."
else
echo -n "Setting device $dev label to $label... "
btrfs filesystem label "$fs_target" "$label" && echo "OK" || echo "ERROR"
fi
}
setup_dev_ext () {
local dev="$1"
local label="$2"
local curr=$(e2label "$dev")
if [ "$curr" = "$label" ]; then
echo "Device $dev already has label $label."
else
echo -n "Setting device $dev label to $label... "
e2label "$dev" "$label" && echo "OK" || echo "ERROR"
fi
}
setup_boot_dev () {
local dev="$1"
local mnt="$2"
local fs_type=$(findmnt -f -n -o FSTYPE "$dev")
case $fs_type in
ext*) setup_dev_ext "$dev" "$BOOT_DEV_LABEL";;
esac
if grep '^rootdev=UUID=' $mnt/armbianEnv.txt >/dev/null 2>&1; then
echo -n "Changing rootdev from UUID to LABEL=SOLARNODE in $mnt/armbianEnv.txt... "
sed -i 's/^rootdev=UUID=.*/rootdev=LABEL='"$ROOT_DEV_LABEL"'/' $mnt/armbianEnv.txt \
&& echo "OK" || echo "ERROR"
fi
}
setup_root_dev () {
local dev="$1"
local mnt="$2"
local fs_type=$(findmnt -f -n -o FSTYPE "$dev")
case $fs_type in
btrfs) setup_dev_btrfs "$dev" "$ROOT_DEV_LABEL" ;;
ext*) setup_dev_ext "$dev" "$ROOT_DEV_LABEL" ;;
esac
if grep '^UUID=[^ ]* /boot ' $mnt/etc/fstab >/dev/null 2>&1; then
echo -n "Changing /boot mount in $mnt/etc/fstab to use label $BOOT_DEV_LABEL... "
sed -i 's/^UUID=[^ ]* \/boot /LABEL='"$BOOT_DEV_LABEL"' \/boot /' $mnt/etc/fstab \
&& echo "OK" || echo "ERROR"
fi
if grep '^UUID=[^ ]* / ' $mnt/etc/fstab >/dev/null 2>&1; then
echo -n "Changing / mount in $mnt/etc/fstab to use label $ROOT_DEV_LABEL... "
sed -i 's/^UUID=[^ ]* \/ /LABEL='"$ROOT_DEV_LABEL"' \/ /' $mnt/etc/fstab \
&& echo "OK" || echo "ERROR"
fi
if grep 'compress=lzo' $mnt/etc/fstab >/dev/null 2>&1; then
echo -n "Changing compression in $mnt/etc/fstab from lzo to zstd... "
sed -i 's/compress=lzo/compress=zstd/' $mnt/etc/fstab \
&& echo "OK" || echo "ERROR"
fi
if ! grep '^tmpfs /run ' $mnt/etc/fstab >/dev/null 2>&1; then
echo -n "Adding /run mount in $mnt/etc/fstab with explicit size... "
echo 'tmpfs /run tmpfs rw,nosuid,noexec,relatime,size=50%,mode=755 0 0' >>$mnt/etc/fstab \
&& echo "OK" || echo "ERROR"
fi
}
finish_output () {
local out_img="$1"
out_path=$(dirname $(readlink -f "$out_img"))
out_name=$(basename "${out_img%%.*}")
# cd into out_path so checksums don't contain paths
pushd "$out_path" >/dev/null
echo "Checksumming image as ${out_path}/${out_name}.img.sha256..."
sha256sum $(basename $out_img) >"${out_name}.img.sha256"
if [ -n "$COMPRESS_DEST_IMAGE" ]; then
echo "Compressing image as ${out_path}/${out_name}.img.xz..."
xz -cv ${COMPRESS_DEST_OPTS} "${out_name}.img" >"${out_name}.img.xz"
echo "Checksumming compressed image as ${out_name}.img.xz.sha256..."
sha256sum "${out_name}.img.xz" >"${out_name}.img.xz.sha256"
fi
popd >/dev/null
}
DATE="$(date '+%Y%m%d')"
if [ -z "$SKIP_BUILD" ]; then
DATE_MARKER=$(mktemp -t armbian-build-XXXXX)
touch "$DATE_MARKER"
pushd "${BUILD_HOME}" >/dev/null
./compile.sh KERNEL_ONLY=no KERNEL_CONFIGURE=no \
BUILD_MINIMAL=yes \
INSTALL_HEADERS=no \
COMPRESS_OUTPUTIMAGE=sha,img \
FIXED_IMAGE_SIZE=$IMAGE_SIZE \
ROOTFS_TYPE=btrfs BTRFS_COMPRESSION=zstd \
WIREGUARD=no \
AUFS=no \
CLEAN_LEVEL=debs \
BUILD_KSRC=no \
EXTERNAL=no EXTERNAL_NEW=no \
BRANCH=current \
RELEASE=$DEBIAN_NAME \
BOARD=$BOARD
popd >/dev/null
fi
IMGNAME=$(ls -1t "${BUILD_HOME}"/output/images/Armbian_*.img |head -1)
if [ ! "$IMGNAME" ]; then
echo "No image found in ${BUILD_HOME}/output/images."
exit 1
fi
if [ -z "$SKIP_DATE_CHECK" -a ! "$IMGNAME" -nt "$DATE_MARKER" ]; then
echo "No image found in ${BUILD_HOME}/output/images newer than build start."
exit 1
fi
echo "OS image: $IMGNAME"
if [ -z "$SKIP_CUST_IMAGE" ]; then
# have to copy image to internal disk (not Vagrant shared disk) for losetup to work
TMPIMG=$(mktemp -t sn-XXXXX)
cp -a "$IMGNAME" "$TMPIMG"
LOOPDEV=`losetup --partscan --find --show $TMPIMG`
if [ -z "$LOOPDEV" ]; then
echo "Error: loop device not discovered for image $TMPIMG"
exit 4
else
echo "Image loop device created as $LOOPDEV"
fi
LOOPPART_BOOT=$(ls -1 ${LOOPDEV}p* 2>/dev/null |head -1)
if [ -z "$LOOPPART_BOOT" ]; then
echo "Error: boot partition not discovered for device $LOOPDEV"
exit 4
else
echo "Boot partition: $LOOPPART_BOOT"
fi
LOOPPART_ROOT=$(ls -1r ${LOOPDEV}p* 2>/dev/null |head -1)
if [ -z "$LOOPPART_ROOT" ]; then
echo "Error: root partition not discovered for device $LOOPDEV"
exit 4
else
echo "Root partition: $LOOPPART_ROOT"
fi
MOUNT_BOOT=$(mktemp -d -t sn-XXXXX)
MOUNT_ROOT=$(mktemp -d -t sn-XXXXX)
mount "$LOOPPART_BOOT" "$MOUNT_BOOT"
mount "$LOOPPART_ROOT" "$MOUNT_ROOT"
echo "Mounted $LOOPPART_BOOT on $MOUNT_BOOT"
echo "Mounted $LOOPPART_ROOT on $MOUNT_ROOT"
setup_boot_dev "$LOOPPART_BOOT" "$MOUNT_BOOT"
setup_root_dev "$LOOPPART_ROOT" "$MOUNT_ROOT"
umount "$MOUNT_BOOT"
umount "$MOUNT_ROOT"
rmdir "$MOUNT_BOOT"
rmdir "$MOUNT_ROOT"
losetup -d $LOOPDEV
OUTIMG="${BUILD_HOME}/output/images/solarnodeos-$OUTPUT_RELEASE-$BOARD-1GB-$DATE.img"
mv "$TMPIMG" "$OUTIMG"
echo "Image saved to $OUTIMG"
finish_output "$OUTIMG"
fi
| true
|
2c58af07c9f9a824f75dbc80c01324fdf79a87c5
|
Shell
|
mrsWangxing/aopalliance-example
|
/remove-empty-line.sh
|
UTF-8
| 627
| 3.390625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
function trap_exit() {
#test -f $$ && rm $$
echo "exit"
}
set -e
trap "trap_exit" EXIT
which mvn
if [ $? -eq 0 ]; then
mvn clean
fi
find -type f -name '*.java' | xargs sed -i '/^\s*$/d'
find -type f -name '*.xml' | xargs sed -i '/^\s*$/d'
#find -type f -name '*.properties' | xargs sed -i '/^\s*$/d'
#find -type f -name '*.jsp' | xargs sed -i '/^\s*$/d'
#for x in $(find -type f \( -name \*.java -o -name \*.xml \))
#do
# echo ${x}
# grep -v '^\s*$' ${x} > /dev/null
# if [ $? -ne 0 ]; then
# grep -v '^\s*$' ${x} > ${x}.$$
# mv ${x}.$$ ${x}
# fi
#done
| true
|
afef200c15a3bac569ee6dcf026a5397a830a59c
|
Shell
|
clairegriffin/new-installer
|
/bashew.sh
|
UTF-8
| 23,152
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
readonly script_author="peter@forret.com"
# run_as_root: 0 = don't check anything / 1 = script MUST run as root / -1 = script MAY NOT run as root
readonly run_as_root=-1
list_options() {
echo -n "
#commented lines will be filtered
flag|h|help|show usage
flag|q|quiet|no output
flag|v|verbose|output more
flag|f|force|do not ask for confirmation (always yes)
option|m|model|template script to use: small/normal|normal
option|t|tmpd|folder for temp files|.tmp
option|l|logd|folder for log files|log
option|n|name|name of new script or project
param|1|action|action to perform: script/project/init/update
" | grep -v '^#'
}
list_examples() {
echo -n "
$script_basename script : create new (stand-alone) script (interactive)
$script_basename project : create new bash script repo (interactive)
$script_basename init : initialize this repo as a new project (when generated from the 'bashew' template repo)
$script_basename update : update $script_basename to latest version (git pull)
" | grep -v '^$'
}
## Put your helper scripts here
get_author_data() {
# $1 = proposed script/project name
## always have something as author data
guess_fullname="$(whoami)"
guess_username="$guess_fullname"
guess_email="$guess_fullname@$(hostname)"
# if there is prior data, use that
[[ -n ${BASHEW_AUTHOR_FULLNAME:-} ]] && guess_fullname="$BASHEW_AUTHOR_FULLNAME"
[[ -n ${BASHEW_AUTHOR_EMAIL:-} ]] && guess_email="$BASHEW_AUTHOR_EMAIL"
[[ -n ${BASHEW_AUTHOR_USERNAME:-} ]] && guess_username="$BASHEW_AUTHOR_USERNAME"
# if there is git config data, use that
if is_set "$in_git_repo"; then
guess_fullname=$(git config user.name)
guess_email=$(git config user.email)
guess_username=$(git config remote.origin.url | cut -d: -f2)
# git@github.com:pforret/bashew.git => pforret/bashew.git
guess_username=$(dirname "$guess_username")
# pforret/bashew.git => pforret
guess_username=$(basename "$guess_username")
fi
if ((force)); then
author_fullname="$guess_fullname"
author_email="$guess_email"
author_username="$guess_username"
new_name="$1"
clean_name=$(basename "$new_name" .sh)
new_description="This is my script $clean_name"
else
announce "1. first we need the information of the author"
author_fullname=$(ask "Author full name " "$guess_fullname")
author_email=$(ask "Author email " "$guess_email")
author_username=$(ask "Author (github) username" "$guess_username")
export BASHEW_AUTHOR_FULLNAME="$author_fullname"
export BASHEW_AUTHOR_EMAIL="$author_email"
export BASHEW_AUTHOR_USERNAME="$author_username"
announce "2. now we need the path and name of this new script/repo"
new_name=$(ask "Script name" "$1")
announce "3. give some description of what the script should do"
clean_name=$(basename "$new_name" .sh)
new_description=$(ask "Script description" "This is my script $clean_name")
fi
}
copy_and_replace() {
local input="$1"
local output="$2"
awk \
-v author_fullname="$author_fullname" \
-v author_username="$author_username" \
-v author_email="$author_email" \
-v package_name="$clean_name" \
-v package_description="$new_description" \
-v meta_today="$execution_day" \
-v meta_year="$execution_year" \
-v bashew_version="$script_version" \
'{
gsub(/author_name/,author_fullname);
gsub(/author_username/,author_username);
gsub(/author@email.com/,author_email);
gsub(/package_name/,package_name);
gsub(/package_description/,package_description);
gsub(/meta_today/,meta_today);
gsub(/meta_year/,meta_year);
gsub(/bashew_version/,bashew_version);
print;
}' \
<"$input" \
>"$output"
}
random_word() {
(
if aspell -v >/dev/null 2>&1; then
aspell -d en dump master | aspell -l en expand
elif [[ -f /usr/share/dict/words ]]; then
# works on MacOS
cat /usr/share/dict/words
elif [[ -f /usr/dict/words ]]; then
cat /usr/dict/words
else
printf 'zero,one,two,three,four,five,six,seven,eight,nine,ten,alfa,bravo,charlie,delta,echo,foxtrot,golf,hotel,india,juliet,kilo,lima,mike,november,oscar,papa,quebec,romeo,sierra,tango,uniform,victor,whiskey,xray,yankee,zulu%.0s' {1..3000} |
tr ',' "\n"
fi
) |
awk 'length($1) > 2 && length($1) < 8 {print}' |
grep -v "'" |
grep -v " " |
awk "NR == $RANDOM {print tolower(\$0)}"
}
delete_stuff() {
if [[ -d "$1" ]]; then
log "Delete folder [$1]"
rm -fr "$1"
fi
if [[ -f "$1" ]]; then
log "Delete file [$1]"
rm "$1"
fi
}
#####################################################################
## Put your main script here
#####################################################################
main() {
log "Program: $script_basename $script_version"
log "Updated: $script_modified"
log "Run as : $USER@$HOSTNAME"
# add programs you need in your script here, like tar, wget, ffmpeg, rsync ...
verify_programs tput uname git
action=$(lcase "$action")
case $action in
script)
if [[ -n "${name:-}" ]] && [[ ! "$name" == " " ]]; then
log "Using [$name] as name"
get_author_data "$name"
else
random_name="$(random_word)_$(random_word).sh"
log "Using [$random_name] as name"
get_author_data "./$random_name"
fi
announce "Creating script $new_name ..."
# shellcheck disable=SC2154
copy_and_replace "$script_install_folder/template/$model.sh" "$new_name"
chmod +x "$new_name"
echo "$new_name"
;;
project)
if [[ -n "${name:-}" ]] && [[ ! "$name" == " " ]]; then
get_author_data "$name"
else
random_name="$(random_word)_$(random_word)"
get_author_data "./$random_name"
fi
if [[ ! -d "$new_name" ]]; then
announce "Creating project $new_name ..."
mkdir "$new_name"
template_folder="$script_install_folder/template"
## first do all files that can change
for file in "$template_folder"/*.md "$template_folder/LICENSE" "$template_folder"/.gitignore "$template_folder"/.env.example; do
bfile=$(basename "$file")
((quiet)) || echo -n "$bfile "
new_file="$new_name/$bfile"
copy_and_replace "$file" "$new_file"
done
((quiet)) || echo -n "$clean_name.sh "
copy_and_replace "$template_folder/$model.sh" "$new_name/$clean_name.sh"
chmod +x "$new_name/$clean_name.sh"
## now the CI/CD files
if [[ -f "$template_folder/bitbucket-pipelines.yml" ]]; then
((quiet)) || echo -n "bitbucket-pipelines "
cp "$template_folder/bitbucket-pipelines.yml" "$new_name/"
fi
if [[ -d "$template_folder/.github" ]]; then
((quiet)) || echo -n ".github "
cp -r "$template_folder/.github" "$new_name/.github"
fi
((quiet)) || echo " "
if confirm "Do you want to 'git init' the new project?"; then
(pushd "$new_name" && git init && git add . && popd || return) >/dev/null 2>&1
fi
success "next step: 'cd $new_name' and start scripting!"
else
alert "Folder [$new_name] already exists, cannot make a new project there"
fi
;;
init)
repo_name=$(basename "$script_install_folder")
[[ "$repo_name" == "bashew" ]] && die "You can only run the '$script_basename init' of a *new* repo, derived from the bashew template on Github."
[[ ! -d ".git" ]] && die "You can only run '$script_basename init' in the root of your repo"
[[ ! -d "template" ]] && die "The 'template' folder seems to be missing, are you sure this repo is freshly cloned from pforret/bashew?"
[[ ! -f "$script_install_folder/template/$model.sh" ]] && die "$model.sh is not a valid template"
new_name="$repo_name.sh"
get_author_data "./$new_name"
announce "Creating script $new_name ..."
# shellcheck disable=SC2154
for file in template/*.md template/LICENSE template/.gitignore template/.gitignore; do
bfile=$(basename "$file")
((quiet)) || echo -n "$bfile "
new_file="./$bfile"
rm -f "$new_file"
copy_and_replace "$file" "$new_file"
done
copy_and_replace "$script_install_folder/template/$model.sh" "$new_name"
chmod +x "$new_name"
git add "$new_name"
alt_dir=$(dirname "$new_name")
alt_base=$(basename "$new_name" .sh)
alt_name="$alt_dir/$alt_base"
if [[ ! "$alt_name" == "$new_name" ]]; then
# create a "do_this" alias for "do_this.sh"
ln -s "$new_name" "$alt_name"
git add "$alt_name"
fi
announce "Now cleaning up unnecessary bashew files ..."
delete_stuff template
delete_stuff tests/disabled
delete_stuff tests/test_bashew.sh
delete_stuff assets
delete_stuff .tmp
delete_stuff log
log "Delete script [bashew.sh] ..."
(
sleep 1
rm -f bashew.sh bashew
) &# delete will happen after the script is finished
success "script $new_name created!"
success "now do: ${col_ylw}git commit -a -m 'after bashew init' && git push${col_reset}"
out "tip: install ${col_ylw}basher${col_reset} and ${col_ylw}pforret/setver${col_reset} for easy bash script version management"
;;
update)
pushd "$script_install_folder" || die "No access to folder [$script_install_folder]"
git pull || die "Cannot update with git"
# shellcheck disable=SC2164
popd
;;
debug)
out "print_with_out=yes"
log "print_with_log=yes"
announce "print_with_announce=yes"
success "print_with_success=yes"
progress "print_with_progress=yes"
echo ""
alert "print_with_alert=yes"
hash3=$(echo "1234567890" | hash 3)
hash6=$(echo "1234567890" | hash)
out "hash3=$hash3"
out "hash6=$hash6"
out "script_basename=$script_basename"
out "script_author=$script_author"
out "escape1 = $(escape "/forward/slash")"
out "escape2 = $(escape '\backward\slash')"
out "lowercase = $(lcase 'AbCdEfGhIjKlMnÔû')"
out "uppercase = $(ucase 'AbCdEfGhIjKlMnÔû')"
# shellcheck disable=SC2015
is_set "$force" && out "force=$force (true)" || out "force=$force (false)"
;;
*)
die "param [$action] not recognized"
;;
esac
}
#####################################################################
################### DO NOT MODIFY BELOW THIS LINE ###################
# set strict mode - via http://redsymbol.net/articles/unofficial-bash-strict-mode/
# removed -e because it made basic [[ testing ]] difficult
set -uo pipefail
IFS=$'\n\t'
# shellcheck disable=SC2120
hash() {
length=${1:-6}
# shellcheck disable=SC2230
if [[ -n $(which md5sum) ]]; then
# regular linux
md5sum | cut -c1-"$length"
else
# macos
md5 | cut -c1-"$length"
fi
}
force=0
help=0
## ----------- TERMINAL OUTPUT STUFF
[[ -t 1 ]] && piped=0 || piped=1 # detect if out put is piped
verbose=0
#to enable verbose even before option parsing
[[ $# -gt 0 ]] && [[ $1 == "-v" ]] && verbose=1
quiet=0
#to enable quiet even before option parsing
[[ $# -gt 0 ]] && [[ $1 == "-q" ]] && quiet=1
[[ $(echo -e '\xe2\x82\xac') == '€' ]] && unicode=1 || unicode=0 # detect if unicode is supported
if [[ $piped -eq 0 ]]; then
col_reset="\033[0m"
col_red="\033[1;31m"
col_grn="\033[1;32m"
col_ylw="\033[1;33m"
else
col_reset=""
col_red=""
col_grn=""
col_ylw=""
fi
if [[ $unicode -gt 0 ]]; then
char_succ="✔"
char_fail="✖"
char_alrt="➨"
char_wait="…"
else
char_succ="OK "
char_fail="!! "
char_alrt="?? "
char_wait="..."
fi
readonly nbcols=$(tput cols || echo 80)
#readonly nbrows=$(tput lines)
readonly wprogress=$((nbcols - 5))
out() { ((quiet)) || printf '%b\n' "$*"; }
progress() {
((quiet)) || (
((piped)) && out "$*" || printf "... %-${wprogress}b\r" "$* "
)
}
die() {
tput bel
out "${col_red}${char_fail} $script_basename${col_reset}: $*" >&2
safe_exit
}
alert() { out "${col_red}${char_alrt}${col_reset}: $*" >&2; } # print error and continue
success() { out "${col_grn}${char_succ}${col_reset} $*"; }
announce() {
out "${col_grn}${char_wait}${col_reset} $*"
sleep 1
}
log() { ((verbose)) && out "${col_ylw}# $* ${col_reset}" >&2; }
escape() { echo "$*" | sed 's/\//\\\//g'; }
lcase() { echo "$*" | awk '{print tolower($0)}'; }
ucase() { echo "$*" | awk '{print toupper($0)}'; }
confirm() {
is_set $force && return 0
read -r -p "$1 [y/N] " -n 1
echo " "
[[ $REPLY =~ ^[Yy]$ ]]
}
#TIP: use «confirm» for interactive confirmation before doing something
#TIP:> if ! confirm "Delete file"; then ; echo "skip deletion" ; fi
ask() {
# value=$(ask_question <question> <default>)
# usage
local answer
read -r -p "$1 ($2): " answer
echo "${answer:-$2}"
}
error_prefix="${col_red}>${col_reset}"
trap "die \"ERROR \$? after \$SECONDS seconds \n\
\${error_prefix} last command : '\$BASH_COMMAND' \" \
\$(< \$script_install_path awk -v lineno=\$LINENO \
'NR == lineno {print \"\${error_prefix} from line \" lineno \" : \" \$0}')" INT TERM EXIT
# cf https://askubuntu.com/questions/513932/what-is-the-bash-command-variable-good-for
# trap 'echo ‘$BASH_COMMAND’ failed with error code $?' ERR
safe_exit() {
[[ -n "$tmpfile" ]] && [[ -f "$tmpfile" ]] && rm "$tmpfile"
trap - INT TERM EXIT
log "$script_basename finished after $SECONDS seconds"
exit 0
}
is_set() { [[ "$1" -gt 0 ]]; }
is_empty() { [[ -z "$1" ]]; }
is_not_empty() { [[ -n "$1" ]]; }
#TIP: use «is_empty» and «is_not_empty» to test for variables
#TIP:> if is_empty "$email" ; then ; echo "Need Email!" ; fi
is_file() { [[ -f "$1" ]]; }
is_dir() { [[ -d "$1" ]]; }
#TIP: use «is_file» and «is_dir» to test for files or folders
#TIP:> if is_file "/etc/hosts" ; then ; cat "/etc/hosts" ; fi
show_usage() {
out "Program: ${col_grn}$script_basename $script_version${col_reset} by ${col_ylw}$script_author${col_reset}"
out "Updated: ${col_grn}$script_modified${col_reset}"
echo -n "Usage: $script_basename"
list_options |
awk '
BEGIN { FS="|"; OFS=" "; oneline="" ; fulltext="Flags, options and parameters:"}
$1 ~ /flag/ {
fulltext = fulltext sprintf("\n -%1s|--%-10s: [flag] %s [default: off]",$2,$3,$4) ;
oneline = oneline " [-" $2 "]"
}
$1 ~ /option/ {
fulltext = fulltext sprintf("\n -%1s|--%s <%s>: [optn] %s",$2,$3,"val",$4) ;
if($5!=""){fulltext = fulltext " [default: " $5 "]"; }
oneline = oneline " [-" $2 " <" $3 ">]"
}
$1 ~ /secret/ {
fulltext = fulltext sprintf("\n -%1s|--%s <%s>: [secr] %s",$2,$3,"val",$4) ;
oneline = oneline " [-" $2 " <" $3 ">]"
}
$1 ~ /param/ {
if($2 == "1"){
fulltext = fulltext sprintf("\n %-10s: [parameter] %s","<"$3">",$4);
oneline = oneline " <" $3 ">"
} else {
fulltext = fulltext sprintf("\n %-10s: [parameters] %s (1 or more)","<"$3">",$4);
oneline = oneline " <" $3 " …>"
}
}
END {print oneline; print fulltext}
'
}
show_tips() {
grep <"${BASH_SOURCE[0]}" -v "\$0" |
awk "
/TIP: / {\$1=\"\"; gsub(/«/,\"$col_grn\"); gsub(/»/,\"$col_reset\"); print \"*\" \$0}
/TIP:> / {\$1=\"\"; print \" $col_ylw\" \$0 \"$col_reset\"}
"
}
init_options() {
local init_command
init_command=$(list_options |
awk '
BEGIN { FS="|"; OFS=" ";}
$1 ~ /flag/ && $5 == "" {print $3"=0; "}
$1 ~ /flag/ && $5 != "" {print $3"="$5"; "}
$1 ~ /option/ && $5 == "" {print $3"=\" \"; "}
$1 ~ /option/ && $5 != "" {print $3"="$5"; "}
')
if [[ -n "$init_command" ]]; then
#log "init_options: $(echo "$init_command" | wc -l) options/flags initialised"
eval "$init_command"
fi
}
verify_programs() {
os_uname=$(uname -s)
os_version=$(uname -v)
log "Running: on $os_uname ($os_version)"
list_programs=$(echo "$*" | sort -u | tr "\n" " ")
log "Verify : $list_programs"
for prog in "$@"; do
# shellcheck disable=SC2230
if [[ -z $(which "$prog") ]]; then
die "$script_basename needs [$prog] but this program cannot be found on this $os_uname machine"
fi
done
}
folder_prep() {
if [[ -n "$1" ]]; then
local folder="$1"
local max_days=${2:-365}
if [[ ! -d "$folder" ]]; then
log "Create folder [$folder]"
mkdir "$folder"
else
log "Cleanup: [$folder] - delete files older than $max_days day(s)"
find "$folder" -mtime "+$max_days" -type f -exec rm {} \;
fi
fi
}
expects_single_params() {
list_options | grep 'param|1|' >/dev/null
}
expects_multi_param() {
list_options | grep 'param|n|' >/dev/null
}
parse_options() {
if [[ $# -eq 0 ]]; then
show_usage >&2
safe_exit
fi
## first process all the -x --xxxx flags and options
#set -x
while true; do
# flag <flag> is savec as $flag = 0/1
# option <option> is saved as $option
if [[ $# -eq 0 ]]; then
## all parameters processed
break
fi
if [[ ! $1 == -?* ]]; then
## all flags/options processed
break
fi
local save_option
save_option=$(list_options |
awk -v opt="$1" '
BEGIN { FS="|"; OFS=" ";}
$1 ~ /flag/ && "-"$2 == opt {print $3"=1"}
$1 ~ /flag/ && "--"$3 == opt {print $3"=1"}
$1 ~ /option/ && "-"$2 == opt {print $3"=$2; shift"}
$1 ~ /option/ && "--"$3 == opt {print $3"=$2; shift"}
$1 ~ /secret/ && "-"$2 == opt {print $3"=$2; shift"}
$1 ~ /secret/ && "--"$3 == opt {print $3"=$2; shift"}
')
if [[ -n "$save_option" ]]; then
if echo "$save_option" | grep shift >>/dev/null; then
local save_var
save_var=$(echo "$save_option" | cut -d= -f1)
log "Found : ${save_var}=$2"
else
log "Found : $save_option"
fi
eval "$save_option"
else
die "cannot interpret option [$1]"
fi
shift
done
((help)) && (
echo "### USAGE"
show_usage
echo "### EXAMPLES"
list_examples
safe_exit
)
## then run through the given parameters
if expects_single_params; then
single_params=$(list_options | grep 'param|1|' | cut -d'|' -f3)
list_singles=$(echo "$single_params" | xargs)
nb_singles=$(echo "$single_params" | wc -w)
log "Expect : $nb_singles single parameter(s): $list_singles"
[[ $# -eq 0 ]] && die "need the parameter(s) [$list_singles]"
for param in $single_params; do
[[ $# -eq 0 ]] && die "need parameter [$param]"
[[ -z "$1" ]] && die "need parameter [$param]"
log "Found : $param=$1"
eval "$param=$1"
shift
done
else
log "No single params to process"
single_params=""
nb_singles=0
fi
if expects_multi_param; then
#log "Process: multi param"
nb_multis=$(list_options | grep -c 'param|n|')
multi_param=$(list_options | grep 'param|n|' | cut -d'|' -f3)
log "Expect : $nb_multis multi parameter: $multi_param"
[[ $nb_multis -gt 1 ]] && die "cannot have >1 'multi' parameter: [$multi_param]"
[[ $nb_multis -gt 0 ]] && [[ $# -eq 0 ]] && die "need the (multi) parameter [$multi_param]"
# save the rest of the params in the multi param
if [[ -n "$*" ]]; then
log "Found : $multi_param=$*"
eval "$multi_param=( $* )"
fi
else
log "No multi param to process"
nb_multis=0
multi_param=""
[[ $# -gt 0 ]] && die "cannot interpret extra parameters"
log "all parameters have been processed"
fi
}
tmpfile=""
logfile=""
recursive_readlink() {
[[ ! -L "$1" ]] && echo "$1" && return 0
local file_folder
local link_folder
local link_name
file_folder="$(dirname "$1")"
# resolve relative to absolute path
[[ "$file_folder" != /* ]] && link_folder="$(cd -P "$file_folder" >/dev/null 2>&1 && pwd)"
local symlink
symlink=$(readlink "$1")
link_folder=$(dirname "$symlink")
link_name=$(basename "$symlink")
[[ -z "$link_folder" ]] && link_folder="$file_folder"
[[ "$link_folder" == \.* ]] && link_folder="$(cd -P "$file_folder" && cd -P "$link_folder" >/dev/null 2>&1 && pwd)"
log "Symbolic ln: $1 -> [$symlink]"
recursive_readlink "$link_folder/$link_name"
}
lookup_script_data() {
readonly script_prefix=$(basename "${BASH_SOURCE[0]}" .sh)
readonly script_basename=$(basename "${BASH_SOURCE[0]}")
readonly execution_day=$(date "+%Y-%m-%d")
readonly execution_year=$(date "+%Y")
script_install_path="${BASH_SOURCE[0]}"
log "Script path: $script_install_path"
script_install_path=$(recursive_readlink "$script_install_path")
log "Actual path: $script_install_path"
readonly script_install_folder="$(dirname "$script_install_path")"
script_modified="??"
os_uname=$(uname -s)
[[ "$os_uname" == "Linux" ]] && script_modified=$(stat -c "%y" "$script_install_path" 2>/dev/null | cut -c1-16) # generic linux
[[ "$os_uname" == "Darwin" ]] && script_modified=$(stat -f "%Sm" "$script_install_path" 2>/dev/null) # for MacOS
# get shell/operating system/versions
shell_brand="sh"
shell_version="?"
[[ -n "${ZSH_VERSION:-}" ]] && shell_brand="zsh" && shell_version="$ZSH_VERSION"
[[ -n "${BASH_VERSION:-}" ]] && shell_brand="bash" && shell_version="$BASH_VERSION"
[[ -n "${FISH_VERSION:-}" ]] && shell_brand="fish" && shell_version="$FISH_VERSION"
[[ -n "${KSH_VERSION:-}" ]] && shell_brand="ksh" && shell_version="$KSH_VERSION"
log "Shell type : $shell_brand - version $shell_version"
readonly os_kernel=$(uname -s)
os_version=$(uname -r)
os_machine=$(uname -m)
case "$os_kernel" in
CYGWIN* | MSYS* | MINGW*)
os_name="Windows"
;;
Darwin)
os_name=$(sw_vers -productName) # macOS
os_version=$(sw_vers -productVersion) # 11.1
;;
Linux | GNU*)
if [[ $(which lsb_release) ]]; then
# 'normal' Linux distributions
os_name=$(lsb_release -i) # Ubuntu
os_version=$(lsb_release -r) # 20.04
else
# Synology, QNAP,
os_name="Linux"
fi
;;
esac
log "OS Version : $os_name ($os_kernel) $os_version on $os_machine"
script_version=0.0.0
[[ -f "$script_install_folder/VERSION.md" ]] && script_version=$(cat "$script_install_folder/VERSION.md")
if git status >/dev/null 2>&1; then
readonly in_git_repo=1
else
readonly in_git_repo=0
fi
}
prep_log_and_temp_dir() {
tmpfile=""
if [[ -n "${tmpd:-}" ]]; then
folder_prep "$tmpd" 1
tmpfile=$(mktemp "$tmpd/$execution_day.XXXXXX")
log "Tmpfile: $tmpfile"
# you can use this temporary file in your program
# it will be deleted automatically when the program ends
fi
logfile=""
if [[ -n "${logd:-}" ]]; then
folder_prep "$logd" 7
logfile=$logd/$script_prefix.$execution_day.log
log "Logfile: $logfile"
echo "$(date '+%H:%M:%S') | [$script_basename] $script_version started" >>"$logfile"
fi
}
[[ $run_as_root == 1 ]] && [[ $UID -ne 0 ]] && die "MUST be root to run this script"
[[ $run_as_root == -1 ]] && [[ $UID -eq 0 ]] && die "CANNOT be root to run this script"
lookup_script_data
# set default values for flags & options
init_options
# overwrite with specified options if any
parse_options "$@"
# clean up log and temp folder
prep_log_and_temp_dir
# run main program
main
# exit and clean up
safe_exit
| true
|
8327b0266c867181aca769cc1ba37fb3b642a0c4
|
Shell
|
MIUI-RUSSIAN/MTKZenyT
|
/inc/func/functions_filesystem
|
UTF-8
| 2,082
| 4.125
| 4
|
[
"CC0-1.0"
] |
permissive
|
#!/bin/bash
if ! ${EXEC}; then exit; fi
function choose_directory() {
local index="$1" name="$2"
if [[ "${DIALOG}" -ne 0 ]]; then
case "${index}" in
"d") DIR_CHOOSED=$(dialog_select_dir "${name}") ;;
"f")
FILE_CHOOSED=$(dialog_select_file "${name}")
if [[ -z "${FILE_CHOOSED}" ]]; then
display_error "${text_error_not_files} : ${name}"
choose_directory "${index}" "${name}"
fi
;;
esac
elif [[ "${ZENITY}" -ne 0 ]]; then
if [[ "${index}" == "d" ]]; then
DIR_CHOOSED=$(zenity_directory "${index}" "${name}");
else
FILE_CHOOSED=$(zenity_directory "${index}" "${name}");
if [[ -z "${FILE_CHOOSED}" ]]; then
display_error "${text_error_not_files} : ${name}"
choose_directory "${index}" "${name}"
fi
fi
else
read -p "${name}" DIR_CHOOSED
fi
unset index name
write_file "log" "${NOW} ### $FUNCNAME: ${DIR_CHOOSED}${FILE_CHOOSED}"
}
function file_stat() {
local file="$1"
local -i int=0
if [[ -f "${file}" ]]; then int=$(stat -c "%s" ${file} | sed 's/\r$//'); fi
echo ${int}
unset int file
}
function make_dir_dated() {
if [[ -d "${DIR_SVG}" ]]; then
DIR_SVG_DATED="${DIR_SVG}${vid}:${pid}_${NOW_DIR}"
if [[ ! -d "${DIR_SVG_DATED}" ]]; then
mkdir "${DIR_SVG_DATED}" && display_detected "${text_dir_ok}" "${DIR_SVG_DATED}"
fi
write_file "log" "${NOW} ### $FUNCNAME: DIR_SVG_DATED: ${DIR_SVG_DATED}"
fi
}
function rm_dir() {
local dir="$1"
if [[ -d "${dir}" ]]; then rm -rf "${dir}"; fi
unset dir
}
function write_file() {
local index="$1" var="$2" value="$3"
case "${index}" in
"cfg") sed -i -e "s#${text}=.*#${var}=${value}#g" "${CONFIG}" ;;
"log") [[ "${LOG}" -eq 1 ]] && echo "${var}" >> "${PWD}/logs/${DAY_LOG}.log" ;;
esac
unset index text value var
}
| true
|
89802322a2e00fe14fe8d4e1554ebe4d278258fc
|
Shell
|
smzht/wsl-utils
|
/wslinit
|
UTF-8
| 695
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
# -*- mode: sh; coding: utf-8-unix -*-
export DRVFS=$(
declare -a drvfs
while read -r line; do
line=$(echo "$line" | sed 's!/$!!')
drvfs=("${drvfs[@]}" "$line")
done < <(
if uname -v | grep -v -q Microsoft || [ $(uname -r | cut -d'-' -f 2) -ge 18362 ]; then
echo "/|$(wslpath -m /)"
fi
mount | grep -E 'type (drvfs|cifs)' | sed -r 's/(.*) on (.*) type (drvfs|cifs) .*/\2\|\1/' | sed 's!\\!/!g'
mount | grep 'aname=drvfs;' | sed -r 's/.* on (.*) type 9p .*;path=([^;]*);.*/\1|\2/' | sed 's!\\!/!g' | sed 's!|UNC/!|//!' | sed "s!|UNC\(.\)!|//\$(printf '%o' \\\'\1)!" | sed 's/.*/echo "&"/' | sh
)
declare -p drvfs
)
| true
|
8d88d707ee01bb2c8be2eca2bdd49084b7e011d5
|
Shell
|
bohachu/cameo-motion-1213-1704
|
/sh/get_folders_in_folder.sh
|
UTF-8
| 1,164
| 4.125
| 4
|
[] |
no_license
|
#! /bin/bash
# 尚未完成
# e.g. search /home/* and return 1st depth of folders in /home/
# : ./get_folders_in_folder.sh
search_dir="/home"
# for entry in `ls $search_dir`; do
# # echo "$entry"
# filepath="/$search_dir/$entry"
# echo "filepath=$filepath"
# if [[ -d $filepath ]]; then
# entry_basename=$(basename $filepath)
# echo "entry_base_name=$entry_basename"
# arrVar[${#arrVar[@]}]=$entry
# fi
# done
function getHomeUsers
####################################################
# Load all filenames into an array.
# Arguments:
# $1 = name of return array (must be global)
# $2 = first path for 'find' command to search
# ... other paths for 'find' to search
#
{
# store the name of the global array for return.
local h_rtnArr=$1
# discard first argument in argv
shift
# mapfile does heavy lifting. See: help mapfile
mapfile $h_rtnArr < <(find $@ -type d -maxdepth 1)
# TODO: return error code
}
declare -a arrVar=()
getHomeUsers arrVar /home
# List results to stdout
arrSz=${#arrVar[@]}
for (( ndx=0; ndx < $arrSz; ++ndx )); do
echo -n "${ndx}: ${arrVar[$ndx]}"
done
| true
|
7f64bc8a8f0495af7bc6497d5fd7de73b9d21e98
|
Shell
|
Flexberry/dockerfiles
|
/alt.p8-postgresql/12/root/bin/fillTable.sh
|
UTF-8
| 281
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
echo "create database test1;
\c test1
CREATE TABLE indexing_table(created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW());
" | psql -U postgres
while true; do
psql -U postgres -d test1 -c "INSERT INTO indexing_table(created_at) VALUES (CURRENT_TIMESTAMP);"
sleep 1;
done
| true
|
04c30bbdbb9e3f2ca0a29939b658bba7ba017801
|
Shell
|
pgierz/adjust_jan_spec
|
/adjust_jan_spec_from_T63.sh
|
UTF-8
| 5,159
| 3.859375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
echo -e "\033[1;32m S T A R T O F P R O G R A M "
echo adjust_jan_spec_from_T63.sh
echo -e "\033[0m"
read -r -d '' usage <<'EOF'
This script regenerates a T63L47_jan_spec.nc input file from an existing T63 simulation in order to cope with adapted orography
Requirements:
$1 <EXPID>_echam6_echam_????01.nc
--> NOTE This *MUST* be a January file!
Paul Gierz, AWI Bremerhaven
December 2017
EOF
# Check the arguments!
if [ "$#" -ne 1 ]; then
echo "Illegal number of parameters"
echo "$usage"
exit
fi
# Parse the inputs to this script
Old_T63_output_file=$1
# Keep a cleanup variable around
rmlist=""
# Get the standard T63L47_jan_spec.nc to work with
# and set up the environment for this script
module purge
case $HOSTNAME in
(mlogin*)
Standard_T63L47_jan_spec_filepath_file=/pool/data/ECHAM6/T63/T63L47_jan_spec.nc
module load cdo
module load nco
module load netcdf_c/4.3.2-gcc48
;;
(ollie*)
Standard_T63L47_jan_spec_filepath_file=/home/ollie/pgierz/reference_stuff/T63L47_jan_spec.nc
module load cdo
module load nco
module load netcdf
;;
(*)
echo "I don't know where to look for the standard T63L47_jan_spec.nc file! Please add a case!"
exit
esac
echo -e "\033[1;33m Using cdo Version: "
cdo -s -V
echo -e "\033[0m"
# Print some info about the CDO stuff that needed to be learned:
echo -e "\033[1;33m For more information about the"
echo "cdo remapeta command, please see: "
echo "https://code.mpimet.mpg.de/issues/8144"
echo -e "\033[0m"
cp $Standard_T63L47_jan_spec_filepath_file .
Standard_T63L47_jan_spec_file=$(basename $Standard_T63L47_jan_spec_filepath_file)
# Generate the output file name using the old expid:
only_name=$(basename $Old_T63_output_file)
oldexpid="${only_name%_echam6*}"
ofile="${Standard_T63L47_jan_spec_file%.*}_from_${oldexpid}_T63L47.nc"
# Seperate variable STP (Spectral Temperature) into both spectral temperature
# and log of surface pressure, as it contains spectral temperature in levels
# 1:nlev, and log(SP) in level nlev+1
cdo -s import_e5ml ${Standard_T63L47_jan_spec_file} "${Standard_T63L47_jan_spec_file%.*}"_seperated.nc
rmlist="${Standard_T63L47_jan_spec_file%.*}_seperated.nc $rmlist"
cdo -s sp2gp "${Standard_T63L47_jan_spec_file%.*}"_seperated.nc "${Standard_T63L47_jan_spec_file%.*}"_seperated_sp2gp.nc
rmlist="${Standard_T63L47_jan_spec_file%.*}_seperated_sp2gp.nc $rmlist"
cdo -s sp2gp ${Old_T63_output_file} "${Old_T63_output_file%.*}"_sp2gp.nc
rmlist="${Old_T63_output_file%.*}_sp2gp.nc $rmlist"
function insert_into_jan_spec_from_T63L47_run() {
varname=$1
newname=$2
echo -e "\033[0;36m Inserting T63 Output into jan_spec file for $varname --> $newname \033[0m"
cdo -s \
-selvar,$varname \
"${Old_T63_output_file%.*}"_sp2gp.nc \
regrid_file_T63L47.nc
rmlist="regrid_file_T63L47.nc $rmlist"
ncwa -a time regrid_file_T63L47.nc tmp && mv tmp regrid_file_T63L47.nc
# make sure everything is double and not float (this causes chaos and strange numbers otherwise...)
ncdump regrid_file_T63L47.nc | sed 's/float/double/g' > tmp; ncgen -o regrid_file_T63L47.nc tmp; rm tmp
ncrename -v $varname,$newname regrid_file_T63L47.nc tmp; mv tmp regrid_file_T63L47.nc
ncks -A -C -v $newname regrid_file_T63L47.nc "${Standard_T63L47_jan_spec_file%.*}"_seperated_sp2gp.nc
}
insert_into_jan_spec_from_T63L47_run q Q
insert_into_jan_spec_from_T63L47_run svo SVO
insert_into_jan_spec_from_T63L47_run sd SD
insert_into_jan_spec_from_T63L47_run t STP
insert_into_jan_spec_from_T63L47_run lsp LSP
# Split up the variables:
cdo -s splitname "${Standard_T63L47_jan_spec_file%.*}"_seperated_sp2gp.nc "${Standard_T63L47_jan_spec_file%.*}"_seperated_sp2gp_
# Go back to spectral space for the spectral variables:
for var in SVO SD STP LSP
do
cdo -s gp2sp "${Standard_T63L47_jan_spec_file%.*}"_seperated_sp2gp_${var}.nc "${Standard_T63L47_jan_spec_file%.*}"_seperated_sp2gp_${var}_gp2sp.nc
rmlist="${Standard_T63L47_jan_spec_file%.*}_seperated_sp2gp_${var}.nc ${Standard_T63L47_jan_spec_file%.*}_seperated_sp2gp_${var}_gp2sp.nc $rmlist"
done
rmlist="${Standard_T63L47_jan_spec_file%.*}_seperated_sp2gp_Q.nc $rmlist"
cdo -s merge \
"${Standard_T63L47_jan_spec_file%.*}"_seperated_sp2gp_SVO_gp2sp.nc \
"${Standard_T63L47_jan_spec_file%.*}"_seperated_sp2gp_SD_gp2sp.nc \
"${Standard_T63L47_jan_spec_file%.*}"_seperated_sp2gp_STP_gp2sp.nc \
"${Standard_T63L47_jan_spec_file%.*}"_seperated_sp2gp_LSP_gp2sp.nc \
"${Standard_T63L47_jan_spec_file%.*}"_seperated_sp2gp_Q.nc \
"${Standard_T63L47_jan_spec_file%.*}"_seperated_sp2gp_gp2sp.nc
# Add the LSP layer back to STP
cdo -s export_e5ml "${Standard_T63L47_jan_spec_file%.*}"_seperated_sp2gp_gp2sp.nc "${Standard_T63L47_jan_spec_file%.*}"_sp2gp_gp2sp.nc
rmlist="${Standard_T63L47_jan_spec_file%.*}_seperated_sp2gp_gp2sp.nc ${Standard_T63L47_jan_spec_file%.*}_sp2gp_gp2sp.nc $rmlist"
mv "${Standard_T63L47_jan_spec_file%.*}"_sp2gp_gp2sp.nc ${ofile}
##### Clean Up:
rm -f $rmlist
echo -e "\033[1;32m F I N I S H E D! "
echo -e "\033[0m"
exit
| true
|
10cd3ffe83e837b2bb24ed123e3051cab901d799
|
Shell
|
LucasRoesler/ingress-operator
|
/hack/update-crds.sh
|
UTF-8
| 376
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
set -o pipefail
controllergen="$(go env GOBIN)/controller-gen"
PKG=sigs.k8s.io/controller-tools/cmd/controller-gen@v0.6.2
if [ ! -e "$controllergen" ]
then
echo "Getting $PKG"
go install $PKG
fi
echo "using $controllergen"
"$controllergen" \
crd \
schemapatch:manifests=./artifacts/crds \
paths=./pkg/apis/... \
output:dir=./artifacts/crds
| true
|
3c079073ce798a5d7814018cf9acc9bebe99ce7e
|
Shell
|
HalleyTm/kira
|
/workstation/scripts/await-sentry-init.sh
|
UTF-8
| 10,641
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
set +e && source "/etc/profile" &>/dev/null && set -e
source $KIRA_MANAGER/utils.sh
# quick edit: FILE="$KIRA_MANAGER/scripts/await-sentry-init.sh" && rm $FILE && nano $FILE && chmod 555 $FILE
set -x
CONTAINER_NAME=$1
SENTRY_NODE_ID=$2
SAVE_SNAPSHOT=$3
[ -z "$SAVE_SNAPSHOT" ] && SAVE_SNAPSHOT="false"
COMMON_PATH="$DOCKER_COMMON/$CONTAINER_NAME"
COMMON_LOGS="$COMMON_PATH/logs"
HALT_FILE="$COMMON_PATH/halt"
EXIT_FILE="$COMMON_PATH/exit"
SNAP_HEIGHT_FILE="$COMMON_PATH/snap_height"
SNAP_NAME_FILE="$COMMON_PATH/snap_name"
IFACES_RESTARTED="false"
DOCKER_SNAP_DESTINATION="$DOCKER_COMMON_RO/snap.zip"
RPC_PORT="KIRA_${CONTAINER_NAME^^}_RPC_PORT" && RPC_PORT="${!RPC_PORT}"
retry=0
while : ; do
PREVIOUS_HEIGHT=0
HEIGHT=0
STATUS=""
i=0
NODE_ID=""
IS_STARTED="false"
while [[ $i -le 40 ]]; do
i=$((i + 1))
echoInfo "INFO: Waiting for container $CONTAINER_NAME to start..."
CONTAINER_EXISTS=$($KIRA_SCRIPTS/container-exists.sh "$CONTAINER_NAME" || echo "error")
if [ "${CONTAINER_EXISTS,,}" != "true" ]; then
sleep 20
echoWarn "WARNING: $CONTAINER_NAME container does not exists yet, waiting..."
continue
else
echoInfo "INFO: Success, container $CONTAINER_NAME was found"
#if [ "${CONTAINER_NAME,,}" != "snapshot" ] && [ "${IFACES_RESTARTED,,}" == "false" ] ; then
# echoInfo "INFO: Restarting network interfaces..."
# $KIRA_MANAGER/scripts/update-ifaces.sh
# IFACES_RESTARTED="true"
# i=0
# continue
#fi
fi
echoInfo "INFO: Awaiting $CONTAINER_NAME initialization..."
IS_STARTED="false" && [ -f "$COMMON_PATH/executed" ] && IS_STARTED="true"
if [ "${IS_STARTED,,}" != "true" ] ; then
sleep 20
echoWarn "WARNING: $CONTAINER_NAME is not initialized yet"
continue
else
echoInfo "INFO: Success, container was initialized"
fi
echoInfo "INFO: Awaiting node status..."
STATUS=$(timeout 6 curl --fail 0.0.0.0:$RPC_PORT/status 2>/dev/null | jsonParse "result" 2>/dev/null || echo -n "")
NODE_ID=$(echo "$STATUS" | jsonQuickParse "id" || echo -n "")
if (! $(isNodeId "$NODE_ID")) ; then
sleep 20
echoWarn "WARNING: Status and Node ID is not available"
continue
else
echoInfo "INFO: Success, $CONTAINER_NAME container id found: $NODE_ID"
fi
echoInfo "INFO: Awaiting first blocks to be synced..."
HEIGHT=$(echo "$STATUS" | jsonQuickParse "latest_block_height" || echo -n "")
(! $(isNaturalNumber "$HEIGHT")) && HEIGHT=0
if [[ $HEIGHT -le $PREVIOUS_HEIGHT ]] ; then
echoWarn "WARNING: New blocks are not beeing synced yet! Current height: $HEIGHT, previous height: $PREVIOUS_HEIGHT"
[ "$HEIGHT" != "0" ] && PREVIOUS_HEIGHT=$HEIGHT
sleep 10
continue
else
echoInfo "INFO: Success, $CONTAINER_NAME container id is syncing new blocks"
break
fi
done
echoInfo "INFO: Printing all $CONTAINER_NAME health logs..."
docker inspect --format "{{json .State.Health }}" $($KIRA_SCRIPTS/container-id.sh "$CONTAINER_NAME") | jq '.Log[-1].Output' | xargs | sed 's/\\n/\n/g' || echo "INFO: Failed to display $CONTAINER_NAME container health logs"
echoInfo "INFO: Printing $CONTAINER_NAME start logs..."
cat $COMMON_LOGS/start.log | tail -n 150 || echoWarn "WARNING: Failed to display $CONTAINER_NAME container start logs"
FAILURE="false"
if [ "${IS_STARTED,,}" != "true" ] ; then
echoErr "ERROR: $CONTAINER_NAME was not started sucessfully within defined time"
FAILURE="true"
else
echoInfo "INFO: $CONTAINER_NAME was started sucessfully"
fi
if [ "$NODE_ID" != "$SENTRY_NODE_ID" ] ; then
echoErr "ERROR: $CONTAINER_NAME Node id check failed!"
echoErr "ERROR: Expected '$SENTRY_NODE_ID', but got '$NODE_ID'"
FAILURE="true"
else
echoInfo "INFO: $CONTAINER_NAME node id check succeded '$NODE_ID' is a match"
fi
if [[ $HEIGHT -le $PREVIOUS_HEIGHT ]] ; then
echoErr "ERROR: $CONTAINER_NAME node failed to start catching up new blocks, check node configuration, peers or if seed nodes function correctly."
FAILURE="true"
fi
NETWORK=$(echo "$STATUS" | jsonQuickParse "network" || echo -n "")
if [ "$NETWORK_NAME" != "$NETWORK" ] ; then
echoErr "ERROR: Expected network name to be '$NETWORK_NAME' but got '$NETWORK'"
FAILURE="true"
fi
if [ "${FAILURE,,}" == "true" ] ; then
echoErr "ERROR: $CONTAINER_NAME node setup failed"
retry=$((retry + 1))
if [[ $retry -le 1 ]] ; then
echoInfo "INFO: Attempting $CONTAINER_NAME restart ${retry}/1"
$KIRA_MANAGER/kira/container-pkill.sh "$CONTAINER_NAME" "true" "restart"
continue
fi
sleep 30
exit 1
else
echoInfo "INFO: $CONTAINER_NAME launched sucessfully"
break
fi
done
if [ "${SAVE_SNAPSHOT,,}" == "true" ] ; then
echoInfo "INFO: External state synchronisation detected, $CONTAINER_NAME must be fully synced before setup can proceed"
echoInfo "INFO: Local snapshot must be created before network can be started"
i=0
PREVIOUS_HEIGHT=0
START_TIME_HEIGHT="$(date -u +%s)"
while : ; do
if [ ! -z "$TRUSTED_NODE_ADDR" ] && [ "$TRUSTED_NODE_ADDR" != "0.0.0.0" ] ; then
echoInfo "INFO: Awaiting trusted node status..."
TRUSTED_KIRA_STATUS=$(timeout 16 curl --fail "$TRUSTED_NODE_ADDR:$DEFAULT_INTERX_PORT/api/kira/status" 2>/dev/null || echo -n "")
TRUSTED_HEIGHT=$(echo "$KIRA_STATUS" | jsonQuickParse "latest_block_height" || echo "")
if ($(isNaturalNumber "$TRUSTED_HEIGHT")) && [[ "$TRUSTED_HEIGHT" -gt "$VALIDATOR_MIN_HEIGHT" ]] ; then
echoInfo "INFO: Minimum expected block height increased from $VALIDATOR_MIN_HEIGHT to $TRUSTED_HEIGHT"
VALIDATOR_MIN_HEIGHT=$TRUSTED_HEIGHT
CDHelper text lineswap --insert="VALIDATOR_MIN_HEIGHT=\"$TRUSTED_HEIGHT\"" --prefix="VALIDATOR_MIN_HEIGHT=" --path=$ETC_PROFILE --append-if-found-not=True
fi
fi
echoInfo "INFO: Awaiting node status..."
sleep 10
i=$((i + 1))
STATUS=$(timeout 8 curl --fail 0.0.0.0:$RPC_PORT/status 2>/dev/null | jsonParse "result" 2>/dev/null || echo -n "")
if ($(isNullOrEmpty $STATUS)) ; then
set +x
echoInfo "INFO: Printing '$CONTAINER_NAME' start logs:"
cat $COMMON_LOGS/start.log | tail -n 75 || echoWarn "WARNING: Failed to display '$CONTAINER_NAME' container start logs"
echoErr "ERROR: Node failed or status could not be fetched ($i/3), your netwok connectivity might have been interrupted"
[[ $i -le 3 ]] && sleep 10 && echoInfo "INFO: Next status check attempt in 10 seconds..." && continue
echoErr "ERROR: $CONTAINER_NAME status check failed"
sleep 30
exit 1
else
i=0
fi
set +x
SYNCING=$(echo $STATUS | jsonQuickParse "catching_up" 2>/dev/null || echo -n "")
($(isNullOrEmpty "$SYNCING")) && SYNCING="false"
HEIGHT=$(echo "$STATUS" | jsonQuickParse "latest_block_height" 2>/dev/null || echo -n "")
(! $(isNaturalNumber "$HEIGHT")) && HEIGHT=0
END_TIME_HEIGHT="$(date -u +%s)"
DELTA_HEIGHT=$(($HEIGHT - $PREVIOUS_HEIGHT))
DELTA_TIME=$(($END_TIME_HEIGHT - $START_TIME_HEIGHT))
if [[ $HEIGHT -gt $PREVIOUS_HEIGHT ]] && [[ $HEIGHT -le $VALIDATOR_MIN_HEIGHT ]] ; then
PREVIOUS_HEIGHT=$HEIGHT
START_TIME_HEIGHT=$END_TIME_HEIGHT
SYNCING="true"
fi
set -x
if [ "${SYNCING,,}" == "false" ] && [[ $HEIGHT -ge $VALIDATOR_MIN_HEIGHT ]] ; then
echoInfo "INFO: Node finished catching up."
break
elif [[ $HEIGHT -gt $VALIDATOR_MIN_HEIGHT ]] ; then
echoInfo "INFO: Minimum expected block height increased from $VALIDATOR_MIN_HEIGHT to $HEIGHT"
VALIDATOR_MIN_HEIGHT=$HEIGHT
fi
BLOCKS_LEFT=$(($VALIDATOR_MIN_HEIGHT - $HEIGHT))
set +x
if [[ $BLOCKS_LEFT -gt 0 ]] && [[ $DELTA_HEIGHT -gt 0 ]] && [[ $DELTA_TIME -gt 0 ]] && [ "${SYNCING,,}" == true ] ; then
TIME_LEFT=$((($BLOCKS_LEFT * $DELTA_TIME) / $DELTA_HEIGHT))
echoInfo "INFO: Estimated time left until catching up with min.height: $(prettyTime $TIME_LEFT)"
fi
echoInfo "INFO: Minimum height: $VALIDATOR_MIN_HEIGHT, current height: $HEIGHT, catching up: $SYNCING"
echoInfo "INFO: Do NOT close your terminal, waiting for '$CONTAINER_NAME' to finish catching up..."
set -x
sleep 30
done
echoInfo "INFO: Halting $CONTAINER_NAME container"
SNAP_NAME="${NETWORK_NAME}-${HEIGHT}-$(date -u +%s)"
echo "$HEIGHT" > $SNAP_HEIGHT_FILE
echo "$SNAP_NAME" > $SNAP_NAME_FILE
$KIRA_MANAGER/kira/container-pkill.sh "$CONTAINER_NAME" "true" "restart"
echoInfo "INFO: Creating new snapshot..."
i=0
DESTINATION_DIR="$KIRA_SNAP/$SNAP_NAME"
DESTINATION_FILE="${DESTINATION_DIR}.zip"
while [ ! -d "$DESTINATION_DIR" ] || [ -f $SNAP_HEIGHT_FILE ] ; do
i=$((i + 1))
cat $COMMON_LOGS/start.log | tail -n 10 || echoWarn "WARNING: Failed to display '$CONTAINER_NAME' container start logs"
echoInfo "INFO: Waiting for snapshot '$SNAP_NAME' to be created..."
sleep 30
done
echoInfo "INFO: Packaging snapshot into '$DESTINATION_FILE' ..."
cd $DESTINATION_DIR && zip -r "$DESTINATION_FILE" . *
rm -rf "$DESTINATION_DIR"
ls -1 "$KIRA_SNAP"
[ ! -f "$DESTINATION_FILE" ] && echoErr "ERROR: Failed to create snpashoot, file $DESTINATION_FILE was not found." && exit 1
echoInfo "INFO: New snapshot was created!"
SNAP_STATUS="$KIRA_SNAP/status"
mkdir -p $SNAP_STATUS
echo "$SNAP_FILENAME" > "$SNAP_STATUS/latest"
KIRA_SNAP_PATH=$DESTINATION_FILE
CDHelper text lineswap --insert="KIRA_SNAP_PATH=\"$KIRA_SNAP_PATH\"" --prefix="KIRA_SNAP_PATH=" --path=$ETC_PROFILE --append-if-found-not=True
CDHelper text lineswap --insert="VALIDATOR_MIN_HEIGHT=\"$HEIGHT\"" --prefix="VALIDATOR_MIN_HEIGHT=" --path=$ETC_PROFILE --append-if-found-not=True
ln -fv "$KIRA_SNAP_PATH" "$DOCKER_SNAP_DESTINATION"
fi
| true
|
388926b78c6f359ef4b879cc1a312fd97f503c4c
|
Shell
|
mjg59/scripts
|
/oem/ami/prod-publish.sh
|
UTF-8
| 221
| 3.15625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
BOARD="amd64-usr"
GROUP="$1"
VER="$2"
DIR=/home/ec2-user/scripts/oem/ami
if [ -z "$GROUP" -o -z "$VER" ]; then
echo "Usage: $0 alpha 1.2.3" >&2
exit 1
fi
$DIR/publish_ami.sh -b $BOARD -g $GROUP -V $VER
| true
|
0b510a388b5dff3d641e822e07bc5635fe64c295
|
Shell
|
petronny/aur3-mirror
|
/compizconfig-backend-gconf-bzr/PKGBUILD
|
UTF-8
| 1,229
| 3.0625
| 3
|
[] |
no_license
|
pkgname=compizconfig-backend-gconf-bzr
pkgver=163
pkgrel=1
pkgdesc="GConf backend for Compiz"
url="https://launchpad.net/compiz-compizconfig-gconf"
license=('GPL' 'LGPL' 'MIT')
arch=('i686' 'x86_64')
depends=('gconf' 'libcompizconfig-bzr')
makedepends=('intltool' 'cmake')
provides=('compizconfig-backend-gconf')
conflicts=('compizconfig-backend-gconf')
install='compizconfig-backend-gconf-bzr.install'
_bzrtrunk=lp:compiz-compizconfig-gconf
_bzrmod=compiz-compizconfig-gconf
build()
{
cd "$srcdir"
msg "Connecting to Launchpad..."
if [[ -d "$_bzrmod" ]]; then
cd "$_bzrmod" && bzr pull "$_bzrtrunk" -r "$pkgver"
else
bzr branch "$_bzrtrunk" "$_bzrmod" -q -r "$pkgver"
fi
msg "Bazaar checkout done or server timeout."
rm -rf "$srcdir/$_bzrmod-build"
msg "Creating build directory..."
cp -r "$srcdir/$_bzrmod" "$srcdir/$_bzrmod-build"
mkdir "$srcdir/$_bzrmod-build"/build
cd "$srcdir/$_bzrmod-build/build"
msg "Running cmake..."
cmake .. \
-DCMAKE_INSTALL_PREFIX="/usr" \
-DCMAKE_BUILD_TYPE="RelWithDebInfo" \
-DCOMPIZ_BUILD_WITH_RPATH=FALSE \
-DCOMPIZ_DESTDIR="${pkgdir}"
make
}
package() {
cd "$srcdir/$_bzrmod-build/build"
make install
}
| true
|
91a89dddf83b16d3ac253195f16767eec4d196db
|
Shell
|
xianjimli/misc
|
/projects/active-sync-mobile/script/gen_device_write.sh
|
UTF-8
| 142
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
echo $* | awk -F, '{for(i = 1; i <= NF; i++) {field=$i; sub(/^ /, "", field); system("./gen_device_write_one.sh " field );}; }'
| true
|
9c19f2ffc263d54fa3652a40aa0dfc8dd102441b
|
Shell
|
salesforce/craft
|
/scripts/install.sh
|
UTF-8
| 3,692
| 3.875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
set -eu
OKGREEN='\033[92m'
FAIL='\033[91m'
WARN='\033[93m'
INFO='\033[94m'
ENDC='\033[0m'
function PassPrint() {
echo "$OKGREEN $1 $ENDC"
}
function FailPrint() {
echo "$FAIL $1 $ENDC"
}
function WarnPrint() {
echo "$WARN $1 $ENDC"
}
function InfoPrint() {
echo "$INFO $1 $ENDC"
}
OS=$(uname -s)
ARCH=$(uname -m)
OS=$(echo $OS | tr '[:upper:]' '[:lower:]')
VERSION="1.13.1"
NEWPATH=""
function installKB() {
version=2.2.0 # latest stable version
arch=amd64
# download the release
curl -L -O "https://github.com/kubernetes-sigs/kubebuilder/releases/download/v${version}/kubebuilder_${version}_${OS}_${arch}.tar.gz"
# extract the archive
tar -zxvf kubebuilder_${version}_${OS}_${arch}.tar.gz
mv kubebuilder_${version}_${OS}_${arch} kubebuilder && sudo mv kubebuilder /usr/local/
# update your PATH to include /usr/local/kubebuilder/bin
NEWPATH+=":/usr/local/kubebuilder/bin"
export PATH=$PATH:/usr/local/kubebuilder/bin
}
function installKustomize() {
curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash
}
function installGo(){
arch="amd64"
curl -L -O https://dl.google.com/go/go$VERSION.$OS-$arch.tar.gz
sudo tar -C /usr/local -xzf go$VERSION.$OS-$arch.tar.gz
NEWPATH+=":/usr/local/go/bin"
export PATH=$PATH:/usr/local/go/bin
}
function installDependency(){
cmd=`command -v curl` || {
"curl is missing, it is required for downloading dependencies"
exit 1
}
CUR=`pwd`
cd /tmp
cmd1=`command -v kubebuilder` || {
PassPrint "Installing kubebuilder"
installKB
}
cmd2=`command -v go` || {
PassPrint "Installing go"
installGo
}
cmd3=`command -v kustomize` || {
PassPrint "Installing kustomize"
installKustomize
}
cmd4=`command -v schema-generate` || {
if [ -z '$GOPATH' ] ; then
WarnPrint "set GOPATH and install schema-generate by: go get -u github.com/a-h/generate/..."
else
PassPrint "Installing schema-generate"
go get -u github.com/a-h/generate/...
fi
}
cd $CUR
if [[ -n $cmd1 && -n $cmd2 && cmd3 ]] ; then
InfoPrint "Dependencies already exist"
fi
}
function installCraft() {
VERSION=$1
PassPrint "Installing craft@$VERSION in /usr/local"
sudo rm -rf /usr/local/craft
CUR=`pwd`
cd /tmp
curl -L -O https://github.com/salesforce/craft/releases/download/$VERSION/craft.tar.gz
TYPE=`file craft.tar.gz`
NEWPATH+=":/usr/local/craft/bin"
if [[ "$TYPE" != *"gzip compressed data"* ]]; then
FailPrint "Downloaded craft.tar.gz is not of correct format. Maybe SSO is required."
echo """
Try downloading https://github.com/salesforce/craft/releases/download/$VERSION/craft.tar.gz from browser.
Then follow:
sudo tar -C /usr/local -xzf craft.tar.gz
export PATH=\$PATH$NEWPATH
"""
exit 1
fi
tar -xf craft.tar.gz
sudo mv craft /usr/local
cd $CUR
}
function install(){
installDependency
installCraft $1
if [[ -n $NEWPATH ]] ; then
WarnPrint "export PATH=\$PATH$NEWPATH"
fi
}
case $OS in
darwin | linux)
case $ARCH in
x86_64)
install ${1:-"v0.1.0-alpha"}
;;
*)
echo "There is no linkerd $OS support for $arch. Please open an issue with your platform details."
exit 1
;;
esac
;;
*)
echo "There is no linkerd support for $OS/$arch. Please open an issue with your platform details."
exit 1
;;
esac
| true
|
fe6719164f4a6e2c4f9cc0257403deb32ff71035
|
Shell
|
jjm2473/BPI-W2-bsp
|
/scripts/bootloader.sh
|
UTF-8
| 856
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/sh
#gunzip -c BPI_M3_1080P.img.gz | dd of=/dev/mmcblk0 conv=sync,noerror bs=1k
die() {
echo "$*" >&2
exit 1
}
[ -s "./env.sh" ] || die "please run ./configure first."
. ./env.sh
O=$1
if [ ! -z $O ] ; then
BOARD=$O
fi
U=/tmp/${TARGET_PRODUCT}
if [ ! -d $U ]; then
mkdir -p $U
fi
TMP_FILE=/tmp/${BOARD}.tmp
IMG_FILE=${U}/${BOARD}-2k.img
UBOOT=$TOPDIR/rtk-pack/rtk/${TARGET_PRODUCT}/bin/u-boot.bin
(sudo dd if=/dev/zero of=${TMP_FILE} bs=1M count=1) >/dev/null 2>&1
LOOP_DEV=`sudo losetup -f --show ${TMP_FILE}`
(sudo dd if=$UBOOT of=${LOOP_DEV} bs=1k seek=40) >/dev/null 2>&1
sudo sync
sudo losetup -d ${LOOP_DEV}
(dd if=${TMP_FILE} of=${IMG_FILE} bs=1k skip=2 count=1022 status=noxfer) >/dev/null 2>&1
if [ -f ${IMG_FILE}.gz ]; then
rm -f ${IMG_FILE}.gz
fi
echo "gzip ${IMG_FILE}"
gzip ${IMG_FILE}
sudo rm -f ${TMP_FILE}
| true
|
9f164d53bad307fa9c6e303059eeef1c6076e56c
|
Shell
|
digitalpolygon/drupal-recommended-project
|
/.ddev/commands/host/setup
|
UTF-8
| 513
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
## Description: Initialize project and setup application
## Usage: setup
## Example: ddev setup
set -ev
# Cleanup/remove project resources and assets.
ddev stop --remove-data --omit-snapshot
ddev start --skip-confirmation
if ! command -v composer >/dev/null; then
echo "composer is not available. You may need to install 'composer'"
exit 1
fi
ddev composer nuke
echo "Refreshing your local environment"
ddev refresh
echo "Your site is ready to use. Below is the information."
ddev describe
| true
|
769f9b123484b386de195891a8c017f17b5a865b
|
Shell
|
iacabezasbaculima/UNIX_ShellScripting
|
/args_shift.sh
|
UTF-8
| 242
| 3.5
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#Using command line arguments with the shift command
ITEMS=0
for NUM in $@
do
ITEMS=`expr $ITEMS + 1`
done
COUNTER=0;
while [ "$COUNTER" != "$ITEMS" ]
do
sleep 1
COUNTER=`expr $COUNTER + 1`
echo "$@"
shift
done
| true
|
5cac6b8bc3fa9334f21edd239714c0e4a7b69328
|
Shell
|
essembeh/dotfiles
|
/logitech/auto.sh
|
UTF-8
| 650
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
set -eu
ROOT=$(dirname "$0")
TOOL_BIN="$ROOT/g810-led"
TMPDIR=$(mktemp -d)
REPO_URL="https://github.com/MatMoul/g810-led"
PROFILE_NIGHT="$ROOT/g810-dark.profile"
PROFILE_DAY="$ROOT/g810-light.profile"
if ! test -x "$TOOL_BIN"; then
echo "⏳ Build $REPO_URL ..."
git clone --quiet "$REPO_URL" "$TMPDIR"
nix-shell -p hidapi gnumake --run "cd \"$TMPDIR\" && make clean all"
cp -av "$TMPDIR/bin/g810-led" "$TOOL_BIN"
fi
HOUR=$(date +%H)
if test $HOUR -ge 8 -a $HOUR -le 20; then
echo "☀ Set day profile"
"$TOOL_BIN" -p "$PROFILE_DAY"
else
echo "🌙 Set night profile"
"$TOOL_BIN" -p "$PROFILE_NIGHT"
fi
| true
|
ee7388a81371c9ec344b749a32c37699e82eac5d
|
Shell
|
pfeiffee/ferdi-server-fixed
|
/docker/entrypoint.sh
|
UTF-8
| 1,060
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cat << EOL
-------------------------------------
____ ___
/ __/__ _______/ (_)
/ _// -_) __/ _ / /
_/_/ \__/_/ \_,_/_/
/ __/__ _____ _____ ____
_\ \/ -_) __/ |/ / -_) __/
/___/\__/_/ |___/\__/_/
Brought to you by getferdi.com
Support our Open Collective at:
https://opencollective.com/getferdi/
EOL
# Create APP key if needed
if [[ -z ${APP_KEY} && ! -f "${DATA_DIR}/FERDI_APP_KEY.txt" ]]; then
echo "**** Generating Ferdi-server app key for first run ****"
adonis key:generate
APP_KEY=$(grep APP_KEY .env | cut -d '=' -f2)
echo "${APP_KEY}" > "${DATA_DIR}/FERDI_APP_KEY.txt"
echo "**** App Key set to $APP_KEY you can modify FERDI_APP_KEY.txt to update your key ****"
else APP_KEY=$(cat "${DATA_DIR}/FERDI_APP_KEY.txt")
echo "**** App Key set to $APP_KEY you can modify FERDI_APP_KEY.txt to update your key ****"
fi
export APP_KEY
node ace migration:run --force
chown -R "${PUID:-1000}":"${PGID:-1000}" "$DATA_DIR" /app
su-exec "${PUID:-1000}":"${PGID:-1000}" node server.js
| true
|
09bafa6ae6c74e43d1855b4316691365eff90a9d
|
Shell
|
reiosantos/prosper
|
/.bin/run
|
UTF-8
| 3,287
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
gits=(prosper-rest-api prosper-traffic)
txtbld=$(tput bold)
txtrst=$(tput sgr0)
txtlue=$(tput setaf 4)
txtund=$(tput sgr 0 1)
git_url=git@github.com:reiosantos/
printGitProjectName() {
echo -e "${txtbld}${txtlue}****** $1 ******${txtrst}"
}
commandName() {
echo -e "${txtund}${txtbld}${txtlue}### $1${txtrst}"
}
displayHelp() {
echo "Usage: $0 [option...]" >&2
echo
echo "Options:"
echo
echo " git [cmd]"
echo " cmd:"
echo " clone Clone all project"
echo " pull Pull all projects"
echo " fetch Fetch for all projects"
echo " status Get status for all projects"
echo " checkoutStaging Check out all projects with Staging branch"
echo
echo " clean [cmd]"
echo " cmd:"
echo " npm Clean all node_modules folders"
echo
echo " -h | --help Display this help"
echo
echo
echo "All projects:"
echo "${txtbld}${gits[@]}${txtrst}"
return 1
}
gitCloneAll() {
commandName "Git Clone All"
for git in "${gits[@]}"
do
printGitProjectName "$git"
if [ ! -d "$git" ]; then
git clone $git_url"$git".git
else
echo "$git directory already exists (skipping)"
fi
done
return 0
}
gitPullAll() {
commandName "Git Pull All"
for git in "${gits[@]}"
do
printGitProjectName "$git"
git -C "$git" pull
done
return 0
}
gitFetchAll() {
commandName "Git Fetch All"
for git in "${gits[@]}"
do
printGitProjectName "$git"
git -C "$git" fetch
done
return 0
}
gitStatusAll() {
commandName "Git Status All"
for git in "${gits[@]}"
do
printGitProjectName "$git"
git -C "$git" status
done
return 0
}
gitCheckoutStagingAll() {
commandName "Git Checkout Staging All"
for git in "${gits[@]}"
do
printGitProjectName "$git"
git -C "$git" checkout staging
done
return 0
}
cleanNodeModules() {
commandName "Clean {project}/node_modules/*"
for git in "${gits[@]}"
do
printGitProjectName "$git"
rm -rf ./"$git"/node_modules/*
rm -rf ./"$git"/node_modules/.bin
done
return 0
}
case "$1" in
git)
case "$2" in
clone)
gitCloneAll
exit 0
;;
pull)
gitPullAll
exit 0
;;
fetch)
gitFetchAll
exit 0
;;
status | "")
gitStatusAll
exit 0
;;
checkoutStaging | "")
gitCheckoutStagingAll
exit 0
;;
*)
displayHelp
exit 1
;;
esac
;;
clean)
case "$2" in
npm)
cleanNodeModules
exit 0
;;
*)
displayHelp
exit 1
;;
esac
;;
*)
displayHelp
exit 1
;;
esac
| true
|
bdb67c6969e5bc2e4abf2756ab254fca05a65e49
|
Shell
|
yh549848/ngsfiles
|
/scripts/subsample_fastq.sh
|
UTF-8
| 355
| 3.546875
| 4
|
[] |
no_license
|
#! /bin/bash
#
# Sub
#
function subsample () {
input=$1
seed=12345
num_reads=(100000)
for n in ${num_reads[@]}
do
output_file=${input%%.*}.${n}.fastq.gz
cmd="seqkit sample -n ${n} -s ${seed} -o ${output_file} ${input}"
echo $cmd
eval $cmd
done
}
#
# Main
#
inputs=($@)
for input in ${inputs[@]}
do
subsample $input
done
| true
|
db341b3429a112c702b0ab61d2750b0c3cc23dd1
|
Shell
|
alexjg/kafka-topic-creator
|
/create-topics
|
UTF-8
| 483
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/sh
for topic in $TOPICS;
do
topicDescription=$(kafka-topics --zookeeper $ZOOKEEPER --describe --topic $topic)
if [ -z "$topicDescription" ]
then
echo "creating $topic"
kafka-topics --zookeeper $ZOOKEEPER \
--create \
--topic $topic \
--partitions $NUM_PARTITIONS \
--replication-factor $REPLICATION_FACTOR \
--config $CONFIG;
else
echo "$topic already exists"
fi
done
| true
|
7421763261bc7bfa705ea647e8d6313aa3fe01f8
|
Shell
|
rsignell-usgs/pangeo-notebook
|
/prepare.sh
|
UTF-8
| 1,289
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
set -x
echo "Copy Dask configuration files from pre-load directory into /home/jovyan"
cp --update -r -v /pre-home/. /home/jovyan
if [ -z "$EXAMPLES_GIT_URL" ]; then
export EXAMPLES_GIT_URL=https://github.com/ESIPFed/pangeo-example-notebooks
fi
rmdir examples &> /dev/null # deletes directory if empty, in favour of fresh clone
if [ ! -d "examples" ]; then
git clone $EXAMPLES_GIT_URL examples
fi
cd examples
git remote set-url origin $EXAMPLES_GIT_URL
git fetch origin
git reset --hard origin/master
git merge --strategy-option=theirs origin/master
if [ ! -f DONT_SAVE_ANYTHING_HERE.md ]; then
echo "Files in this directory should be treated as read-only" > DONT_SAVE_ANYTHING_HERE.md
fi
cd ..
mkdir -p work
if [ -e "/opt/app/environment.yml" ]; then
echo "environment.yml found. Installing packages"
/opt/conda/bin/conda env update -f /opt/app/environment.yml
else
echo "no environment.yml"
fi
if [ "$EXTRA_CONDA_PACKAGES" ]; then
echo "EXTRA_CONDA_PACKAGES environment variable found. Installing."
/opt/conda/bin/conda install $EXTRA_CONDA_PACKAGES
fi
if [ "$EXTRA_PIP_PACKAGES" ]; then
echo "EXTRA_PIP_PACKAGES environment variable found. Installing".
/opt/conda/bin/pip install $EXTRA_PIP_PACKAGES
fi
# Run extra commands
$@
| true
|
f12c57984d4c058a9b6d5d4625f2c5315d5fecd4
|
Shell
|
mr337/mymove
|
/scripts/run-deployed-migrations
|
UTF-8
| 2,235
| 4.0625
| 4
|
[
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
#! /usr/bin/env bash
#
# A script to apply all migrations, including secure migrations, to a local database.
# https://github.com/transcom/mymove/blob/master/docs/how-to/migrate-the-database.md#secure-migrations
set -eu -o pipefail
function usage() {
echo "SECURE_MIGRATION_SOURCE=s3 SECURE_MIGRATION_BUCKET_NAME=* ${0##*/} DB_NAME"
exit 1
}
if [[ "${SECURE_MIGRATION_SOURCE:-}" != "s3" && "${SECURE_MIGRATION_SOURCE:-}" != "local" ]]; then
echo "environment variable SECURE_MIGRATION_SOURCE is required"
usage
fi
if [ "${SECURE_MIGRATION_SOURCE:-}" == "s3" ]; then
if [ -z "$SECURE_MIGRATION_BUCKET_NAME" ]; then
echo "environment variable SECURE_MIGRATION_BUCKET_NAME is required"
usage
fi
fi
if [[ "$#" -lt 1 ]]; then
echo "Missing database name"
usage
fi
export PSQL_SSL_MODE=disable
export DB_NAME=$1
export DB_PORT="${DB_PORT_DEPLOYED_MIGRATIONS:-5434}"
function proceed() {
proceed_message=${1:-"proceed"}
echo -en "\e[31m${proceed_message} (y/N) \e[39m"
read -r proceed
if [[ "$proceed" =~ ^[^yY]*$ ]]; then
echo "exiting"
exit 0
fi
}
function run() {
readonly command=( "$@" )
echo "...executing: ${command[*]}"
${command[*]}
}
#
# Pre-flight checks
#
# Ensure our `aws` command is the one infra has wrapped with aws-vault
command -v aws 2> /dev/null | grep "ppp-infra/scripts/aws" &> /dev/null || \
( echo "error: aws command not pointing to 'ppp-infra/scripts/aws"
echo "see https://github.com/transcom/ppp-infra/blob/master/transcom-ppp/README.md#using-aws-vault"
exit 1
)
# Test AWS command and freshen AWS session token
aws s3 ls > /dev/null
#
# Run migrations
#
echo
if [ "${SECURE_MIGRATION_SOURCE:-}" == "s3" ]; then
echo -e "\e[33mUsing ${SECURE_MIGRATION_BUCKET_NAME} to gather secure migrations\e[39m"
else
echo -e "\e[33mUsing local_migrations folder to gather secure migrations\e[39m"
fi
echo
proceed "Running deployed migrations against the local database with name ${DB_NAME}. This will delete everything in that db."
export PGPASSWORD=${DB_PASSWORD}
make db_deployed_migrations_reset
run make db_deployed_migrations_migrate || (
echo "error: migrations failed!"
exit 1
)
echo "Production migrations applied to ${DB_NAME}."
| true
|
60e0f2a5dea042c7fdcc109f9cf10c69d3845717
|
Shell
|
masonchen2014/alipython
|
/shutdown_tomcat.sh
|
UTF-8
| 261
| 2.875
| 3
|
[] |
no_license
|
#!/bin/sh
PID=`netstat -ntlpu | grep tcp | grep :$1 | awk '{print $NF}' | awk -F/ '{print $1}'`
if [ $PID ];then
$2bin/shutdown.sh
fi
PID=`netstat -ntlpu | grep tcp | grep :$1 | awk '{print $NF}' | awk -F/ '{print $1}'`
if [ $PID ];then
kill -9 $PID
fi
| true
|
57469057f023695f2c0df723bd1f92fa6ca76dc6
|
Shell
|
mlahmadx/Demo
|
/cmake_cleanup.sh
|
UTF-8
| 663
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/sh
echo "0% ---- project build cleanup"
make clean
sleep 1
echo "20% ---- cleanup cmake install files"
rm -rf */*/cmake_install.cmake */cmake_install.cmake cmake_install.cmake
rm -rf */*/install_manifest.txt */install_manifest.txt install_manifest.txt
sleep 1
echo "40% ---- cleanup cmake cache files"
rm -rf */*/CMakeCache.txt */CMakeCache.txt CMakeCache.txt
sleep 1
echo "60% ---- cmake files cleanup"
rm -rf */*/CMakeFiles */CMakeFiles CMakeFiles
sleep 1
echo "80% ---- remove project generated files"
rm -rf */*/Makefile */Makefile Makefile
sleep 1
rm -rf */*/*Config*.h */*Config*.h
rm -rf exe
sleep 1
echo "100% ---- All project cleanup done"
| true
|
0b402d1aa3475e0e627928e2ab043483a3b2706f
|
Shell
|
joshuaar/alarc-scripts
|
/ScottScripts/7_expression.sh
|
UTF-8
| 697
| 3.078125
| 3
|
[] |
no_license
|
RSEMHOME='/data1/utils/rsem-1.2.5'
cd ..
mkdir 7_Expression
cd 7_Expression
ln -s ../6_Assembly/Trinity.fasta.transdecoder.cds .
echo "preparing reference from predicted coding sequences..."
$RSEMHOME/rsem-prepare-reference Trinity.fasta.transdecoder.cds trinity-rsem
echo "done preparing reference"
echo "mapping reads to reference"
for i in `ls ../RawData/ | awk -F "." '{print $1}' | sort | uniq`
do
echo $i*R1*fastq
echo $i*R2*fastq
$RSEMHOME/rsem-calculate-expression --paired-end ../RawData/$i.R1.fastq ../RawData/$i.R2.fastq trinity-rsem $i
done
echo "done mapping reads to reference"
echo "making expression table"
make-expression-table.py
echo "done making expression table"
| true
|
5289e35e8446f2d2c30497c061d958fd7dad27f3
|
Shell
|
NoobsDevelopers/L2Nextgen
|
/java/LoginServer_loop.sh
|
UTF-8
| 704
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/sh
# ======== JVM settings =======
javaopts=" -Xms32m"
javaopts="$javaopts -Xmx64m"
javaopts="$javaopts -XX:SurvivorRatio=8"
javaopts="$javaopts -Xincgc"
javaopts="$javaopts -XX:+AggressiveOpts"
# не изменять
java_settings=" -Dfile.encoding=UTF-8"
java_settings="$java_settings -Djava.net.preferIPv4Stack=true"
while :; do
./backup_db.sh
mv log/java-0.log "log/`date +%Y-%m-%d_%H-%M-%S`_java.log"
mv log/error-0.log "log/`date +%Y-%m-%d_%H-%M-%S`_error.log"
mv log/stdout.log "log/`date +%Y-%m-%d_%H-%M-%S`_stdout.log"
nice -n -2 java -server $java_settings $javaopts -cp ./lib/*:l2nserver.jar l2n.login.L2LoginServer > log/stdout.log 2>&1
[ $? -ne 2 ] && break
sleep 10;
done
| true
|
94f7f92a2d4fe9848b087b0db782f564fe60ee9c
|
Shell
|
ekancepts/dotfiles
|
/vim/install
|
UTF-8
| 580
| 3.359375
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
source "$DIR/utils/index"
info "Setup \033[0;37m[vim]\033[0m"
plugin_path="${XDG_DATA_HOME:-$HOME/.local/share}/nvim/site/autoload/plug.vim"
if [ ! -f $plugin_path ]; then
sh -c "curl -fLo $plugin_path --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim"
# symlink configuration file
mkdir -p "$HOME/.config/nvim/"
ln -Fs "$DIR/vim/init.vim" "$HOME/.config/nvim/init.vim"
# install plugins
vim +PlugInstall +qall
success "VIM plugins successfully installed"
else
success "VIM plugins already setup"
fi
| true
|
e584e66b9d0d53c1d98572e960350fa2e06a09f1
|
Shell
|
Chrysostomus/packages-community
|
/forecasts/PKGBUILD
|
UTF-8
| 1,131
| 2.8125
| 3
|
[] |
no_license
|
# Based on the file created for Arch Linux by:
# Contributor: fancris3 <fancris3 at aol.com>
# Contributor: fbianconi <fbianconi at gmail.com>
# Contributor: Moritz Maxeiner <moritz@ucworks.org>
# Maintainer: Philip Müller <philm[at]manjaro[dot]org>
pkgname=forecasts
pkgver=221.a169559
ever=0.18.5
pkgrel=3
pkgdesc="Enlightenment e18 module: A simple and useful weather forecasts"
arch=('i686' 'x86_64')
url="http://git.enlightenment.org/enlightenment/modules/$pkgname.git"
license=('MIT')
depends=("enlightenment=$ever")
makedepends=('git')
provides=("$pkgname" "$pkgname-git")
conflicts=('e-modules-extra' "$pkgname-svn" "$pkgname-git")
source=("$pkgname::git://git.enlightenment.org/enlightenment/modules/$pkgname.git")
md5sums=('SKIP')
pkgver() {
cd "$pkgname"
# Use the tag of the last commit
echo $(git rev-list --count HEAD).$(git rev-parse --short HEAD)
}
build() {
cd "$pkgname"
./autogen.sh --prefix=/usr
make
}
package() {
cd "$pkgname"
make DESTDIR="$pkgdir/" install
install -D -m644 COPYING $pkgdir/usr/share/licenses/$pkgname/COPYING
install -D -m644 COPYING-PLAIN $pkgdir/usr/share/licenses/$pkgname/COPYING-PLAIN
}
| true
|
15e85779423fbd9265510af8aa9999670b46ed1a
|
Shell
|
jmcausing/LXCWP
|
/x.sh
|
UTF-8
| 364
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
## Downloading nginx 'default' file
if [[ $(ls play.yml | grep play.yml) ]];
then
echo "# Existing play.yml detected. Deleting and downloading a new one."
rm play.yml
wget https://github.com/jmcausing/LXCWP/raw/master/play.yml
else
echo "Downloading play.yml playbook"
wget https://github.com/jmcausing/LXCWP/raw/master/play.yml
fi
| true
|
71b071eff544e83620a61d3880c6a9b03c966ff5
|
Shell
|
helenagarcia90/dotfiles
|
/setup.sh
|
UTF-8
| 1,051
| 3.640625
| 4
|
[] |
no_license
|
#/bin/bash
function make_link()
{
if [ $# -eq 1 ];then
_fromfilename=$1
_tofilename=$1
fi
if [ $# -eq 2 ];then
_fromfilename=$1
_tofilename=$2
fi
if [ $# -eq 1 -o $# -eq 2 ];then
echo "ln -hfs $FROM/$_fromfilename $TO/$_tofilename"
ln -hfs $FROM/$_fromfilename $TO/$_tofilename
fi
}
# スクリプトのある場所
FROM=$(dirname $0)
FROM=`cd $FROM;pwd`
# リンクを張る場所
TO=$HOME
make_link .bash_profile
# make_link .gitignore
make_link .vim
make_link .vimrc
make_link .zshrc
make_link .screenrc
make_link .Rprofile
make_link bin
make_link msmtp/.msmtprc .msmtprc
make_link msmtp/.certs/ThawtePremiumServerCA.pem .ThawtePremiumServerCA.pem
make_link .todo.cfg
make_link .todo.actions.d
make_link .tmux.conf
IFS=:
GIT=false
for d in $PATH
do test -x $d/git && GIT=true
done
if $GIT
then
echo "git found!"
# git config --global user.name "Shingo Omura"
# git config --global user.email "everpeace@gmail.com"
# git config --global core.excludesfile "$TO/.gitignore"
make_link .gitconfig
else echo "no git"
fi
| true
|
3d959f17e2a22218aedb5a020256ea29fe1ed9d8
|
Shell
|
malukisecurity/life
|
/curso.sh
|
UTF-8
| 10,199
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
# Cores
preto="\033[0;30m"
vermelho="\033[0;31m"
verde="\033[0;32m"
marrom="\033[0;33m"
azul="\033[0;34m"
purple="\033[0;35m"
cyan="\033[0;36m"
cinzaClaro="\033[0;37m"
pretoCinza="\033[1;30m"
vermelhoClaro="\033[1;31m"
verdeClaro="\033[1;32m"
amarelo="\033[1;33m"
azulClaro="\033[1;34m"
purpleClaro="\033[1;35m"
cyanClaro="\033[1;36m"
branco="\033[1;37m"
echo " ${azul}
.##.....##.########.##....##.##.....##
.###...###.##.......###...##.##.....##
.####.####.##.......####..##.##.....##
.##.###.##.######...##.##.##.##.....##
.##.....##.##.......##..####.##.....##
.##.....##.##.......##...###.##.....##
.##.....##.########.##....##..#######.
"
echo "${vermelho} [1] configuração interfaces"
echo "${amarelo} [2] configuração dhcp"
echo "${verde} [3] configuração do samba"
echo "${vermelhoClaro} [4] sair"
read numero
if [ "$numero" = "1" ]
then
clear
sleep 0.3
echo "${azul}
..######...#######..##....##.########.####..######...##.....##.########.....###.....######.....###.....#######.
.##....##.##.....##.###...##.##........##..##....##..##.....##.##.....##...##.##...##....##...##.##...##.....##
.##.......##.....##.####..##.##........##..##........##.....##.##.....##..##...##..##........##...##..##.....##
.##.......##.....##.##.##.##.######....##..##...####.##.....##.########..##.....##.##.......##.....##.##.....##
.##.......##.....##.##..####.##........##..##....##..##.....##.##...##...#########.##.......#########.##.....##
.##....##.##.....##.##...###.##........##..##....##..##.....##.##....##..##.....##.##....##.##.....##.##.....##
..######...#######..##....##.##.......####..######....#######..##.....##.##.....##..######..##.....##..#######.
.####.##....##.########.########.########..########....###.....######..########..######.
..##..###...##....##....##.......##.....##.##.........##.##...##....##.##.......##....##
..##..####..##....##....##.......##.....##.##........##...##..##.......##.......##......
..##..##.##.##....##....######...########..######...##.....##.##.......######....######.
..##..##..####....##....##.......##...##...##.......#########.##.......##.............##
..##..##...###....##....##.......##....##..##.......##.....##.##....##.##.......##....##
.####.##....##....##....########.##.....##.##.......##.....##..######..########..######.
"
echo "${amarelo} [1] INTERFACES"
echo "${vermelhoClaro} [2] VOLTAR"
read a
fi
if [ "$numero" = "2" ]
then
clear
sleep 0.3
echo "${azul}
..######...#######..##....##.########.####..######...##.....##.########.....###.....######.....###.....#######.
.##....##.##.....##.###...##.##........##..##....##..##.....##.##.....##...##.##...##....##...##.##...##.....##
.##.......##.....##.####..##.##........##..##........##.....##.##.....##..##...##..##........##...##..##.....##
.##.......##.....##.##.##.##.######....##..##...####.##.....##.########..##.....##.##.......##.....##.##.....##
.##.......##.....##.##..####.##........##..##....##..##.....##.##...##...#########.##.......#########.##.....##
.##....##.##.....##.##...###.##........##..##....##..##.....##.##....##..##.....##.##....##.##.....##.##.....##
..######...#######..##....##.##.......####..######....#######..##.....##.##.....##..######..##.....##..#######.
.########..##.....##..######..########.
.##.....##.##.....##.##....##.##.....##
.##.....##.##.....##.##.......##.....##
.##.....##.#########.##.......########.
.##.....##.##.....##.##.......##.......
.##.....##.##.....##.##....##.##.......
.########..##.....##..######..##.......
"
echo "${verdeClaro} [1] DHCP"
echo "${vermelhoClaro} [2] VOLTAR "
read b
fi
if [ "$numero" = "3" ]
then
clear
sleep 0.3
echo "${azul}
.#######...#######..##....##.########.####..######...##.....##.########.....###.....######.....###.....#######.
.##....##.##.....##.###...##.##........##..##....##..##.....##.##.....##...##.##...##....##...##.##...##.....##
.##.......##.....##.####..##.##........##..##........##.....##.##.....##..##...##..##........##...##..##.....##
.##.......##.....##.##.##.##.######....##..##...####.##.....##.########..##.....##.##.......##.....##.##.....##
.##.......##.....##.##..####.##........##..##....##..##.....##.##...##...#########.##.......#########.##.....##
.##....##.##.....##.##...###.##........##..##....##..##.....##.##....##..##.....##.##....##.##.....##.##.....##
..######...#######..##....##.##.......####..######....#######..##.....##.##.....##..######..##.....##..#######.
..######.....###....##.....##.########.....###...
.##....##...##.##...###...###.##.....##...##.##..
.##........##...##..####.####.##.....##..##...##.
..######..##.....##.##.###.##.########..##.....##
.......##.#########.##.....##.##.....##.#########
.##....##.##.....##.##.....##.##.....##.##.....##
..######..##.....##.##.....##.########..##.....## "
echo "${azulClaro}[1] CRIAÇÃO DOS USUARIOS E GRUPOS"
echo "${purple}[2] ADICIONAR OSUSUARIOS AO GRUPO E CRIAR DIRETORIOS DE COMPARTILHAMENTO"
echo "${verdeClaro}[3] PERMISSAO DOS DIRETORIS E CRIAR SENHAS DOS USUARIOS"
echo "${cyan}[4] CRIAÇÃO DO ARQUIVO SAMBA"
echo "${vermelhoClaro}[5] VOLTAR"
read c
fi
if [ "$numero" = "4"]
then
sleep 0.4
exit
fi
if [ "$a" = "1" ]
then
echo "${amarelo}CONFIGURANDO O ARQUIVO INTERFACES"
sleep 0.5
cd /etc/network/
echo >> interfaces
echo "allow-hotplug enp0s8" >> interfaces
echo "auto enp0s8" >> interfaces
echo "iface enp0s8 inet static" >> interfaces
echo >> interfaces
echo "address 192.168.0.1" >> interfaces
echo "netmask 255.255.255.0" >> interfaces
echo "network 192.168.0.0" >> interfaces
echo "broadcast 192.168.0.255" >> interfaces
systemctl restart networking
echo "${vermelhoClaro}CONFIGURAÇÃO FEITA COM SUCESSO!!"
cd && sh curso.sh
fi
if [ "$a" = "2" ]
then
echo "${verde}VOLTANDO..."
sleep 0.2
clear
cd && sh curso.sh
fi
if [ "$b" = "1" ]
then
echo "${amarelo}CONFIGURANDOO O ARQUIVO INTERFACESv4.."
apt-get update -y
apt-get install isc-dhcp-server -y
cd /etc/default/
mv isc-dhcp-server isc-dhcp-server.original
> isc-dhcp-server
echo 'INTERFACESv4="enp0s8"' >> isc-dhcp-server
echo 'INTERFACESv6=""' >> isc-dhcp-server
echo "${verdeClaro}CONCLUIDO!!"
sleep 0.4
cd ../dhcp
mv dhcpd.conf dhcpd.conf.original
> dhcpd.conf
echo "${cyan}CONFIGURANDO O ARQUIVO DHCPD"
echo "ddns-update-style none;" >> dhcpd.conf
echo "option domain-name-servers 192.168.0.1;" >> dhcpd.conf
echo "default-lease-time 600;" >> dhcpd.conf
echo "max-lease-time 7200;" >> dhcpd.conf
echo "authoritative;" >> dhcpd.conf
echo "log-facility local7;" >> dhcpd,conf
echo "sunnet 192.168.0.0 netmask 255.255.255.0{" >> dhcpd.conf
echo "range 192.168.0.50 192.168.0.100;" >> dhcpd.conf
echo "option routers 192.168.0.1;" >> dhcpd.conf
echo "}" >> dhcpd.conf
echo "${vermelhoClaro}ARQUIVO CONFIGURANDO COM SUCESSO.."
echo "${verdeClaro} E RECOMENDADO DAR REBOOT"
sleep 0.7
cd && sh curso.sh
fi
if [ "$b" = "2" ]
then
echo "${verde} VOLTANDO"
sleep 0.2
clear
cd && sh curso.sh
fi
if [ "$c" = "1" ]
then
echo "${cyanClaro}CRIANDO OS USUARIOS JOÃO, MARIA, PEDRO E TIAGO"
sleep 0.5
adduser -- disabled-login --no-create-home joao
adduser -- disabled-login --no-create-home maria
adduser -- disabled-login --no-create-home pedro
adduser -- disabled-login --no-create-home tiago
echo "${verde}USUARIOS CRIADOS..."
sleep 0.3
echo "${verdeClaro}CRIANDO GRUPOS administrativo e financeiro"
sleep 0.4
addgroup administrativo
addgroup financeiro
echo "${azulClaro}GRUPOS CRIADOS"
sleep 0.3
clear
cd && sh curso.sh
fi
if [ "$c" = "2" ]
then
echo "${cyan}ADCIONANDO OS USUARIOS NO GRUPO"
adduser joao administrativo
adduser maria administrativo
adduser pedro financeiro
adduser tiago financeiro
echo "${amarelo}USUARIOS ADCIONADO AO GRUPOS"
sleep 0.3
echo "${vermelho}CRIANDO DIRETORIOS DE COMPARTILHAMENTO"
cd /home
mkdir -p DADOS/administrativo
cd DADOS
mkdir Financeiro
mkdir lixeira
echo "${azul}DIRETORIOS CRIADOS COM SUCESSO"
sleep 0.3
cd && sh curso.sh
fi
if [ "$c" = "3" ]
then
echo "${purpleClaro}PERMOSSAO DOS DIRETORIOS"
sleep 0.3
cd /home
chmod 775 DADOS
cd DADOS
chgrp administrativo Administrativo/
chgrp financeiro Financeiro/
chmod 770 Administrativo/
chmod 770 Financeiro/
chmod 777 lixeira/
echo "${verde}PERMOSSAO DADAS COM SUCESSO"
sleep 0.3
echo "${azul}INSTALANDO O SAMBA"
apt-get install samba -y
sleep 0.3
echo "${marrom}CRIANDO SENHAS PARA OS USUARIOS"
sleep 0.4
echo "${vermelhoClaro}CRIE UMA SENHA PARA JOAO"
smbpasswd -a joao
echo "${vermelhoClaro}CRIE UMA SENHA PARA MARIA"
smbpasswd -a maria
echo "${vermelhoClaro}CRIE UMA SENHA PARA TIAGO"
smbpasswd -a tiago
echo "${vermelhoClaro}CRIE UMA SENHA PARA PEDRO"
smbpasswd -a pedro
echo "${cyan}USUARIOS COM SUAS SENHAS PRONTAS"
sleep 0.3
cd && sh curso.sh
fi
if [ "$c" = "4" ]
then
echo "${azul}CRIANDO O ARQUIVO SAMBA"
cd /etc/samba/
mv smb.conf smb.conf.original
> smb.conf
echo "[global]" >> smb.conf
echo "netbios name = ServidorDados" >> smb.conf
echo "workgroup = empresax" >> smb.conf
echo "server string = Servidor de Arquivos da Empresax" >> smb.conf
echo "security = user" >> smb.conf
echo "encrypt passwords = yes" >> smb.conf
echo "invalid users = root" >> smb.conf
echo >> smb.conf
echo "[arquivos]" >> smb.conf
echo "path = /home/DADOS" >> smb.conf
echo "writeable = yes" >> smb.conf
echo "available = yes" >> smb.conf
echo "valid users = @administrativo.@financeiro" >> smb.conf
echo "comment = Servidor de Arquivos da Empresa x" >> smb.conf
echo "vfs object = recycle" >> smb.conf
echo "recycle:repository = /home/lixeira" >> smb.conf
echo "recycle:keeptree = yes" >> smb.conf
echo "recycle:exclud *.tmp" >> smb.conf
echo "recycle:exclud_dir = tmp, cache" >> smb.conf
/etc/init.d/samba restart
cd && sh curso.sh
fi
if [ "$c" = "5" ]
then
echo "${vermelho}VOLTANDO .."
sleep 0.2
cd && sh curso.sh
fi
| true
|
95abed7a1f941378f50e7a37f33fb7d37021f52e
|
Shell
|
lyneca/dotfiles
|
/bin/usb
|
UTF-8
| 691
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ ! $1 = "" ]; then
sudo su --command="echo $2 > /sys/bus/usb/devices/1-$1/power/control"
else
echo USB Device Power:
for i in $(seq 0 5); do
file="/sys/bus/usb/devices/1-$i"
if [ -d "$file" ]; then
if [ -f "$file/power/control" ]; then
power=$(cat $file/power/control)
fi
if [ -f "$file/manufacturer" ]; then
manuf=$(cat $file/manufacturer)
fi
if [ -f "$file/product" ]; then
product=$(cat $file/product)
fi
echo " - device 1-$i: $manuf $product (currently set to $(cat $file/power/control))";
fi
done
fi
| true
|
8753bfe94e3cea3d50be672f5a37862a3346d8ae
|
Shell
|
hfalk/dotfiles
|
/setup/prerequisites.sh
|
UTF-8
| 2,088
| 4.4375
| 4
|
[] |
no_license
|
#!/bin/bash
# SCRIPT_DIR == Same folder as the script is placed in
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "${SCRIPT_DIR}/../settings.sh"
source "${SCRIPT_DIR}/utils.sh"
install_xcode_select() {
print_info "Installing Xcode Command Line Tools (This might take a few minutes)"
if type xcode-select >&- && xpath=$( xcode-select --print-path ) && test -d "${xpath}" && test -x "${xpath}" ; then
print_success "Oh! Seems like the Xcode tools were already installed ¯\_(ツ)_/¯\n"
else
touch /tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress;
PROD=$(softwareupdate -l |
grep "\*.*Command Line" |
head -n 1 | awk -F"*" '{print $2}' |
sed -e 's/^ *//' |
tr -d '\n')
softwareupdate -i "$PROD" --verbose;
print_result $? "Install Xcode Command Line Tools\n";
fi
}
install_brew() {
print_info "Checking if Homebrew is installed"
if ! which brew > /dev/null; then
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
print_info "Running brew doctor"
brew doctor
print_info "Setting up brew cask"
brew tap caskroom/cask
print_result $? "Not installed. Install Homebrew\n"
else
print_success "Oh! Homebrew was already installed!\n"
fi
}
install_brew_packages() {
print_info "Installing essential homebrew packages:"
print_list $@
brew update
brew upgrade -all
brew install "$@"
print_result $? "Install brew packages\n"
}
install_optional_brew_packages() {
print_info "The following are a collection of OPTIONAL brew packages. They aren't really need for the setup to work, but you might still find them useful:"
print_list $@
printf "\n"
if ask_question "Do you want to install the (completely optional) brew packages mentioned above?"; then
brew install "$@"
print_result $? "Install optional brew packages\n"
fi
}
print_heading "Install prerequisites"
install_xcode_select
install_brew
install_brew_packages $ESSENTIAL_BREW_PACKAGES
install_optional_brew_packages $OPTIONAL_BREW_PACKAGES
| true
|
74b0f46c46fca1892a13391daba98415e416912a
|
Shell
|
alalav1/mooc-grader
|
/scripts/sandbox_available/install-firefox.sh
|
UTF-8
| 856
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Provides firefox browser for in browser testing.
# Tests are recommended to run with python using selenium.webdriver.Firefox()
# The selenium can be installed with pip inside a virtualenv.
#
if ! grep --quiet universe /etc/apt/sources.list
then
echo "deb http://archive.ubuntu.com/ubuntu precise universe restricted" >> /etc/apt/sources.list
fi
if ! grep --quiet precise-security /etc/apt/sources.list
then
echo "deb http://archive.ubuntu.com/ubuntu precise-security main universe restricted" >> /etc/apt/sources.list
fi
apt-get -q update
apt-get -qy install firefox dbus-x11
if [ ! -f /usr/local/geckodriver ]
then
wget --no-check-certificate -O tmp.tar.gz https://github.com/mozilla/geckodriver/releases/download/v0.11.1/geckodriver-v0.11.1-linux64.tar.gz
tar zxvf tmp.tar.gz
rm tmp.tar.gz
mv geckodriver /usr/local/geckodriver
fi
| true
|
5d0a06fdc1f549090b1f866e805c37d11cea6fc0
|
Shell
|
rokudev/samples
|
/media/TrickPlayThumbnailsHLS/scripts/gen_tiles.sh
|
UTF-8
| 3,571
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Copyright (c) 2019-2020 Roku, Inc. All rights reserved.
#
# References:
# https://stackoverflow.com/questions/2853334/glueing-tile-images-together-using-imagemagicks-montage-command-without-resizin
# http://www.imagemagick.org/script/command-line-options.php#append
# https://github.com/image-media-playlist/spec/blob/master/image_media_playlist_v0_4.pdf
# https://www.tecmint.com/install-imagemagick-in-linux/
# https://stackoverflow.com/questions/39504522/create-blank-image-in-imagemagick
# https://stackoverflow.com/questions/5688576/how-to-use-mod-operator-in-bash
if [ $# -lt 6 ]; then
echo "Usage: $0 <thumbnails-dir> <input-prefix> <output-prefix> <resolution> <cols> <rows>"
exit 1
fi
INDIR=$1
INPREFIX=$2
OUTPREFIX=$3
RESOLUTION=$4
COLS=$5
ROWS=$6
# Extract width and height, to force tile width and height in case of odd last tile
#[[ $RESOLUTION =~ ^([0-9]+)[xX]([0-9]+) ]]
#let "WIDTH=${BASH_REMATCH[1]} * $COLS"
#let "HEIGHT=${BASH_REMATCH[2]} * $ROWS"
# All parameters need to be specified
# The $INDIR parameter is a directory containing the thumbnails
# The $INPREFIX parameter is a name without count string, .e.g. in
# The $OUTPREFIX parameter is a name without count string, .e.g. thumb-tile
# The $COLS parameter is tile vertical count, e.g. 5
# The $ROWS parameter is tile horizontal count, e.g. 4
#
# One example of running this command:
# $ ./scripts/gen_tiles.sh /tmp/test-320x180 in 5 4
# The script sorts thumbnails into files and generates the tile using imagemagick montage
# Usually, this script uses input thumbnails generated by gen_thumbs.sh
#
cd $INDIR
let "THUMBCOUNT=`ls -l |wc -l` - 1"
echo "THUMBCOUNT = $THUMBCOUNT"
TILE=1
STARTTILE=1
cd ..
# Special handling for 1x1 tiles, just copy the files to ${OUTPREFIX}_${TILE}.jpg
if [ ${ROWS} -eq 1 ] && [ ${COLS} -eq 1 ]; then
while [ $TILE -le $THUMBCOUNT ]
do
mv $INDIR/$INPREFIX-`printf %03d ${TILE}`.jpg ${OUTPREFIX}_${TILE}.jpg
let "TILE= $TILE + 1"
done
exit 0
fi
# calculate thumbs need to fill N tiles, result is TILETHUMBS
# and LASTTILE is the tile that may need to be padded
let "TILESIZE=$COLS * $ROWS"
let "TILETHUMBS=$TILESIZE"
let "LASTTILE=1"
while [ $TILETHUMBS -lt $THUMBCOUNT ]
do
let "TILETHUMBS=$TILETHUMBS + $TILESIZE"
let "LASTTILE=$LASTTILE + 1"
done
# generation of padding for the last tile, if padding is needed
if [ $TILETHUMBS -gt $THUMBCOUNT ]; then
let "ALIGNCOUNT=$TILETHUMBS - $THUMBCOUNT"
echo "ALIGNCOUNT = $ALIGNCOUNT"
COUNT=1
mkdir tile${LASTTILE}
while [ $COUNT -le $ALIGNCOUNT ]
do
let "TILENUM=$THUMBCOUNT + $COUNT"
let "COUNT=$COUNT + 1"
convert -size ${RESOLUTION} xc:black tile${LASTTILE}/$INPREFIX-${TILENUM}.jpg
done
# finally, reset THUMBCOUNT to include padded tiles
#let "THUMBCOUNT=$THUMBCOUNT + $ALIGNCOUNT"
#echo "THUMBCOUNT = $THUMBCOUNT"
fi
# normal handling of tile generation
while [ $STARTTILE -lt $THUMBCOUNT ]
do
let "ENDTILE= $STARTTILE + $TILESIZE - 1"
if [ $ENDTILE -gt $THUMBCOUNT ] ; then
ENDTILE=$THUMBCOUNT
fi
echo "create dir tile${TILE}"
if [ ! -d tile${TILE} ]; then
mkdir tile${TILE}
fi
echo "move $INPREFIX-${STARTTILE}.jpg to $INPREFIX-${ENDTILE}.jpg"
for ((i=$STARTTILE; i<=$ENDTILE; i++))
do
cp $INDIR/$INPREFIX-`printf %03d ${i}`.jpg tile${TILE}
done
echo "montage -mode concatenate -tile ${COLS}x${ROWS} tile${TILE}/${INPREFIX}-*.jpg ${OUTPREFIX}_${TILE}.jpg"
montage -mode concatenate -tile ${COLS}x${ROWS} tile${TILE}/${INPREFIX}-*.jpg ${OUTPREFIX}_${TILE}.jpg
let "STARTTILE= $TILE * $TILESIZE + 1"
let "TILE= $TILE + 1"
done
| true
|
fe5f392414b5a203dac9a2e9f38e53afb857fef6
|
Shell
|
mhtess/genex_cogsci2020
|
/analysis/webppl/ais_loop_nComponents.sh
|
UTF-8
| 1,230
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
modelTypes=("independent")
conditions=("pedagogical" "2pedagogical" "3pedagogical" "generic" "accidental" "2accidental" "3accidental" "4pedagogical" "4accidental" "pedageneric")
n_components=(1 2 3)
job_directory=$PWD/.job
for modelType in ${modelTypes[@]}; do
for condition in ${conditions[@]}; do
for n_c in ${n_components[@]}; do
job_file="${job_directory}/${condition}_${modelType}_${n_c}.job"
echo "#!/bin/bash
#SBATCH --ntasks=1
#SBATCH --time=10:00:00
#SBATCH --mem=6G
#SBATCH --cpus-per-task=1
#SBATCH -p cpl
#SBATCH -o .out/log_${condition}_${modelType}_${n_c}_%j.out # Standard output
#SBATCH -e .err/log_${condition}_${modelType}_${n_c}_%j.err # Standard error
#SBATCH --mail-user=tessler@mit.edu # -- use this to send an automated email when:
#SBATCH --mail-type=end # -- your job completes successfully
#SBATCH --mail-type=fail # -- or if your job fails
singularity exec -B /om2/user/tessler/projects/genex_cogsci2020/analysis/webppl/ \
/om2/user/jennhu/singularity_images/webppl0.9.15-conda.sif \
webppl mixture_of_betas-om.wppl --require webppl-csv ${modelType} ${condition} ${n_c} \$SLURM_ARRAY_TASK_ID" > $job_file
sbatch --array=1-1:1 $job_file
done
done
done
| true
|
d7f63365135aab3e4762b8a1c15f0371656747ca
|
Shell
|
m4rkl4r/bl_tools
|
/py3/sbin/bl_cpm_chmodHeaders
|
UTF-8
| 220
| 2.828125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
ID=`id -u`
if ! test "$ID" = 0;
then
echo $0 must be run as root
fi
find /unixworks/bl_tools/eng/headers -type f -exec chmod uga+r "{}" \;
find /unixworks/bl_tools/eng/headers -type f -exec ls -ld "{}" \;
| true
|
82d52511950325f624991aaa887c6aa98364c1cb
|
Shell
|
billagee/sandbox
|
/bash/jenkins/boolean_param_in_jenkins_job.sh
|
UTF-8
| 882
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash -eu
# When you add a boolean parameter to a Jenkins job, the two values the
# param will contain are the strings "true" or "false".
#
# So when using a boolean param value configured with these settings:
#
# Name: SAVE_VIDEO_ON_SUCCESS
# Default value: [ ]
#
# In a bash script build step, you can use the parameter as follows:
SAVE_VIDEO_ON_SUCCESS=true # What Jenkins does when the checkbox is enabled
echo "SAVE_VIDEO_ON_SUCCESS is '$SAVE_VIDEO_ON_SUCCESS'"
if [ "$SAVE_VIDEO_ON_SUCCESS" == "true" ] ; then
echo "The checkbox was enabled."
else
echo "The checkbox was disabled."
fi
SAVE_VIDEO_ON_SUCCESS=false # What Jenkins does when the checkbox is disabled
echo "SAVE_VIDEO_ON_SUCCESS is '$SAVE_VIDEO_ON_SUCCESS'"
if [ "$SAVE_VIDEO_ON_SUCCESS" == "false" ] ; then
echo "The checkbox was disabled."
else
echo "The checkbox was enabled."
fi
| true
|
cc416a3d3f7a859585bbf5219c8f14a68474126c
|
Shell
|
xmaver/Destiny-2-Matchmaking-Firewall
|
/d2firewall.sh
|
UTF-8
| 9,899
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#credits to @BasRaayman and @inchenzo
INTERFACE="tun0"
DEFAULT_NET="10.8.0.0/24"
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
NC='\033[0m'
while getopts "a:" opt; do
case $opt in
a) action=$OPTARG ;;
*) echo 'Not a valid command' >&2
exit 1
esac
done
reset_ip_tables () {
sudo service iptables restart
#reset iptables to default
sudo iptables -P INPUT ACCEPT
sudo iptables -P FORWARD ACCEPT
sudo iptables -P OUTPUT ACCEPT
sudo iptables -F
sudo iptables -X
#allow openvpn
if ip a | grep -q "tun0"; then
if ! sudo iptables-save | grep -q "POSTROUTING -s 10.8.0.0/24"; then
sudo iptables -t nat -A POSTROUTING -s 10.8.0.0/24 -o eth0 -j MASQUERADE
fi
sudo iptables -A INPUT -p udp -m udp --dport 1194 -j ACCEPT
sudo iptables -A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT
sudo iptables -A FORWARD -s 10.8.0.0/24 -j ACCEPT
fi
}
get_platform_match_str () {
local val="psn-4"
if [ "$1" == "psn" ]; then
val="psn-4"
elif [ "$1" == "xbox" ]; then
val="xboxpwid:"
elif [ "$1" == "steam" ]; then
val="steamid:"
fi
echo $val
}
install_dependencies () {
sudo sysctl -w net.ipv4.ip_forward=1 > /dev/null
sudo ufw disable > /dev/null
if ip a | grep -q "tun0"; then
yn="n"
else
echo -e -n "${GREEN}Would you like to install OpenVPN?${NC} y/n: "
read yn
yn=${yn:-"y"}
fi
echo -e "${RED}Installing dependencies. Please wait while it finishes...${NC}"
sudo apt-get update > /dev/null
if [ "$yn" == "y" ]; then
sudo DEBIAN_FRONTEND=noninteractive apt-get -y -q install iptables iptables-persistent ngrep nginx > /dev/null
echo -e "${RED}Installing OpenVPN. Please wait while it finishes...${NC}"
sudo wget -q https://git.io/vpn -O openvpn-ubuntu-install.sh
sudo chmod +x ./openvpn-ubuntu-install.sh
(APPROVE_INSTALL=y APPROVE_IP=y IPV6_SUPPORT=n PORT_CHOICE=1 PROTOCOL_CHOICE=1 DNS=1 COMPRESSION_ENABLED=n CUSTOMIZE_ENC=n CLIENT=client PASS=1 ./openvpn-ubuntu-install.sh) &
wait;
sudo cp /root/client.ovpn /var/www/html/client.ovpn
ip=$(dig +short myip.opendns.com @resolver1.opendns.com)
echo -e "${GREEN}You can download the openvpn config from ${BLUE}http://$ip/client.ovpn"
echo -e "${GREEN}If you are unable to access this file, you may need to allow/open the http port 80 with your vps provider."
echo -e "Otherwise you can always run the command cat /root/client.ovpn and copy/paste ALL of its contents in a file on your PC."
echo -e "It will be deleted automatically in 15 minutes for security reasons."
echo -e "Be sure to import this config to your router and connect your consoles before proceeding any further.${NC}"
nohup bash -c 'sleep 900 && sudo service nginx stop && sudo apt remove nginx -y && sudo rm /var/www/html/client.ovpn' &>/dev/null &
else
sudo DEBIAN_FRONTEND=noninteractive apt-get -y -q install iptables iptables-persistent ngrep > /dev/null
fi
}
setup () {
echo "Setting up firewall rules."
reset_ip_tables
read -p "Enter your platform xbox, psn, steam: " platform
platform=$(echo "$platform" | xargs)
platform=${platform:-"psn"}
reject_str=$(get_platform_match_str $platform)
echo $platform > /tmp/data.txt
read -p "Enter your network/netmask: " net
net=$(echo "$net" | xargs)
net=${net:-$DEFAULT_NET}
echo $net >> /tmp/data.txt
ids=()
read -p "Would you like to sniff the ID automatically?(psn/xbox/steam only) y/n: " yn
yn=${yn:-"y"}
if ! [[ $platform =~ ^(psn|xbox|steam)$ ]]; then
yn="n"
fi
echo "n" >> /tmp/data.txt
#auto sniffer
if [ "$yn" == "y" ]; then
echo -e "${RED}Press any key to stop sniffing. DO NOT CTRL C${NC}"
sleep 1
if [ $platform == "psn" ]; then
ngrep -l -q -W byline -d $INTERFACE "psn-4" udp | grep --line-buffered -o -P 'psn-4[0]{8}\K[A-F0-9]{7}' | tee -a /tmp/data.txt &
elif [ $platform == "xbox" ]; then
ngrep -l -q -W byline -d $INTERFACE "xboxpwid:" udp | grep --line-buffered -o -P 'xboxpwid:[A-F0-9]{24}\K[A-F0-9]{8}' | tee -a /tmp/data.txt &
elif [ $platform == "steam" ]; then
ngrep -l -q -W byline -d $INTERFACE "steamid:" udp | grep --line-buffered -o -P 'steamid:[0-9]{7}\K[0-9]{10}' | tee -a /tmp/data.txt &
fi
while [ true ] ; do
read -t 1 -n 1
if [ $? = 0 ] ; then
break
fi
done
pkill -15 ngrep
#remove duplicates
awk '!a[$0]++' /tmp/data.txt > /tmp/temp.txt && mv /tmp/temp.txt /tmp/data.txt
#get number of accounts
snum=$(tail -n +4 /tmp/data.txt | wc -l)
awk "NR==4{print $snum}1" /tmp/data.txt > /tmp/temp.txt && mv /tmp/temp.txt /tmp/data.txt
#get ids and add to ads array with identifier
tmp_ids=$(tail -n +5 /tmp/data.txt)
c=1
while IFS= read -r line; do
idf="system$c"
ids+=( "$idf;$line" )
((c++))
done <<< "$tmp_ids"
else #add ids manually
read -p "How many accounts are you using for this? " snum
if [ $snum -lt 1 ]; then
exit 1;
fi;
echo $snum >> /tmp/data.txt
for ((i = 0; i < snum; i++))
do
num=$(( $i + 1 ))
idf="system$num"
read -p "Enter the sniffed ID for Account $num: " sid
sid=$(echo "$sid" | xargs)
echo $sid >> /tmp/data.txt
ids+=( "$idf;$sid" )
done
fi;
mv /tmp/data.txt ./data.txt
echo "-m string --string $reject_str --algo bm -j REJECT" > reject.rule
sudo iptables -I FORWARD -m string --string $reject_str --algo bm -j REJECT
n=${#ids[*]}
INDEX=1
for (( i = n-1; i >= 0; i-- ))
do
elem=${ids[i]}
offset=$((n - 2))
if [ $INDEX -gt $offset ]; then
inet=$net
else
inet="0.0.0.0/0"
fi
IFS=';' read -r -a id <<< "$elem"
sudo iptables -N "${id[0]}"
sudo iptables -I FORWARD -s $inet -p udp -m string --string "${id[1]}" --algo bm -j "${id[0]}"
((INDEX++))
done
INDEX1=1
for i in "${ids[@]}"
do
IFS=';' read -r -a id <<< "$i"
INDEX2=1
for j in "${ids[@]}"
do
if [ "$i" != "$j" ]; then
if [[ $INDEX1 -eq 1 && $INDEX2 -eq 2 ]]; then
inet=$net
elif [[ $INDEX1 -eq 2 && $INDEX2 -eq 1 ]]; then
inet=$net
elif [[ $INDEX1 -gt 2 && $INDEX2 -lt 3 ]]; then
inet=$net
else
inet="0.0.0.0/0"
fi
IFS=';' read -r -a idx <<< "$j"
sudo iptables -A "${id[0]}" -s $inet -p udp -m string --string "${idx[1]}" --algo bm -j ACCEPT
fi
((INDEX2++))
done
((INDEX1++))
done
iptables-save > /etc/iptables/rules.v4
echo "Setup is complete and matchmaking firewall is now active."
}
if [ "$action" == "setup" ]; then
if ! command -v ngrep &> /dev/null
then
install_dependencies
fi
setup
elif [ "$action" == "stop" ]; then
echo "Matchmaking is no longer being restricted."
reject=$(<reject.rule)
sudo iptables -D FORWARD $reject
elif [ "$action" == "start" ]; then
if ! sudo iptables-save | grep -q "REJECT"; then
echo "Matchmaking is now being restricted."
pos=$(iptables -L FORWARD | grep "system" | wc -l)
((pos++))
reject=$(<reject.rule)
sudo iptables -I FORWARD $pos $reject
fi
elif [ "$action" == "add" ]; then
read -p "Enter the sniffed ID: " id
id=$(echo "$id" | xargs)
if [ ! -z "$id" ]; then
echo $id >> data.txt
n=$(sed -n '4p' < data.txt)
((n++))
sed -i "4c$n" data.txt
read -p "Would you like to enter another ID? y/n " yn
yn=${yn:-"y"}
if [ $yn == "y" ]; then
bash d2firewall.sh -a add
else
bash d2firewall.sh -a setup < data.txt
fi
fi
elif [ "$action" == "remove" ]; then
list=$(tail -n +5 data.txt | cat -n)
echo "$list"
total=$(echo "$list" | wc -l)
read -p "How many IDs do you want to remove from the end of this list? " num
if [[ $num -gt 0 && $num -le $total ]]; then
head -n -"$num" data.txt > /tmp/data.txt && mv /tmp/data.txt ./data.txt
n=$(sed -n '4p' < data.txt)
n=$((n-num))
sed -i "4c$n" data.txt
bash d2firewall.sh -a setup < data.txt
fi;
elif [ "$action" == "sniff" ]; then
platform=$(sed -n '1p' < data.txt)
if ! [[ $platform =~ ^(psn|xbox|steam)$ ]]; then
echo "Only psn,xbox, and steam are supported atm."
exit 1
fi
bash d2firewall.sh -a stop
#auto sniff
echo -e "${RED}Press any key to stop sniffing. DO NOT CTRL C${NC}"
sleep 1
if [ $platform == "psn" ]; then
ngrep -l -q -W byline -d $INTERFACE "psn-4" udp | grep --line-buffered -o -P 'psn-4[0]{8}\K[A-F0-9]{7}' | tee -a data.txt &
elif [ $platform == "xbox" ]; then
ngrep -l -q -W byline -d $INTERFACE "xboxpwid:" udp | grep --line-buffered -o -P 'xboxpwid:[A-F0-9]{24}\K[A-F0-9]{8}' | tee -a data.txt &
elif [ $platform == "steam" ]; then
ngrep -l -q -W byline -d $INTERFACE "steamid:" udp | grep --line-buffered -o -P 'steamid:[0-9]{7}\K[0-9]{10}' | tee -a data.txt &
fi
while [ true ] ; do
read -t 1 -n 1
if [ $? = 0 ] ; then
break
fi
done
pkill -15 ngrep
#remove duplicates
awk '!a[$0]++' data.txt > /tmp/data.txt && mv /tmp/data.txt ./data.txt
#update total number of ids
n=$(tail -n +5 data.txt | wc -l)
sed -i "4c$n" data.txt
bash d2firewall.sh -a setup < data.txt
elif [ "$action" == "list" ]; then
tail -n +5 data.txt | cat -n
elif [ "$action" == "update" ]; then
wget -q https://raw.githubusercontent.com/xmaver/Destiny-2-Matchmaking-Firewall/main/d2firewall.sh -O ./d2firewall.sh
chmod +x ./d2firewall.sh
echo -e "${GREEN}Script update complete."
echo -e "Please rerun the initial setup to avoid any issues.${NC}"
elif [ "$action" == "load" ]; then
echo "Loading firewall rules."
if [ -f ./data.txt ]; then
bash d2firewall.sh -a setup < ./data.txt
else
iptables-restore < /etc/iptables/rules.v4
fi
elif [ "$action" == "reset" ]; then
echo "Erasing all firewall rules."
reset_ip_tables
fi
| true
|
26ca50ec5e6db74b8c1691f5d3cb48987f4935ee
|
Shell
|
joaogbcravo/dotfiles
|
/dot_zshrc.tmpl
|
UTF-8
| 2,230
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/zsh
# Path to your oh-my-zsh installation.
export ZSH=~/.oh-my-zsh
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
#ZSH_THEME="robbyrussell"
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion. Case
# sensitive completion must be off. _ and - will be interchangeable.
HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
HIST_STAMPS="yyyy-mm-dd"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Add wisely, as too many plugins slow down shell startup.
# User configuration
source $ZSH/oh-my-zsh.sh
# Load .shell
export CURRENT_SHELL="zsh"
source ~/.shell
local _git_plugins=("git")
local _aws_plugins=("aws")
local _docker_plugins=("docker")
local _kubernetes_plugins=("kubernetes")
local _java_plugins=("jenv")
local _node_plugins=("nvm" "nvm-auto")
local _python_plugins=("virtualenv" "virtualenvwrapper")
local _activated_modes_file=~/.shell.d/config/activated_modes
plugins=()
while IFS= read -r i; do
local _plugins_var_name="_${i}_plugins"
local _plugins=$(eval echo "\$$_plugins_var_name")
echo "Activating ${i} plugins: $_plugins"
plugins+=($(eval echo "\$$_plugins_var_name"))
done < $_activated_modes_file
echo "Plugins: ${plugins}"
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
| true
|
f5d22e7274c2d0c45f4182ecc901af10a8a1da7d
|
Shell
|
MTMurphy77/HIRES_REDUX_Swinburne
|
/idlspec2d/bin/.svn/text-base/guidermon_checkin.sh.svn-base
|
UTF-8
| 1,481
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Extract the night's guider data (seeing, etc.) from the processed guider
# files and put the result in a .par file based on the MJD. Then check
# that file in to the speclog product.
#
# Must be run as observer@sos3, designed for crontab.
#
# Requires ~observer/bin/sjd.py and guidermonfile.pro from idlspec2d.
function run_and_test {
"$@"
status=$?
if [ $status -ne 0 ]; then
# useful to have the full error on both stderr and stdout
echo "Error with $@" | tee /dev/stderr
fi
return $status
}
# Need to be able to find the ssh agent in order for svn checkins to work.
export SSH_AUTH_SOCK=/home/observer/sos/control/agent.socket
# cronjobs need the idlspec2d product
source /home/sdss3/products/eups/bin/setups.sh
setup idlspec2d
# for password-less ssh
export SVN_SSH="ssh -i /home/observer/.ssh/id_dsa-sos"
export GUIDE_DIR=/data/gcam/
export MJD=`/home/observer/bin/sjd.py`
export SVN_MESSAGE="committing guiderMon for $MJD"
# guidermonfile writes the output .par to $SPECLOG_DIR/$MJD
echo "Running: guidermonfile for $MJD"
# Sadly, IDL sends its startup junk to stderr, so we have to redirect that
# and can't reliably watch IDL's stderr for actual error messages.
{
idl -e "guidermonfile, mjd=getenv('MJD')"
} 2>&1
echo "SVN commit message:" $SVN_MESSAGE
cd $SPECLOG_DIR/$MJD
run_and_test /usr/local/bin/svn add guiderMon-$MJD.par
run_and_test /usr/local/bin/svn commit -m "$SVN_MESSAGE" guiderMon-$MJD.par
exit 0
| true
|
842456655dce9f9f19cb137a44db3d2c02113e44
|
Shell
|
StathisGln/Ntua-Speech_and_Natural_language_Processing_2017_Greeklish_tranducer
|
/second_script.sh
|
UTF-8
| 3,123
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
### ORTHOGRAFOS ###
python3 train_orthografo.py
python3 create_L.py
# create L fst
fstcompile --isymbols=L.syms --osymbols=L.syms L_fst.txt L.fst
fstdeterminize L.fst L_det.fst
fstminimize L_det.fst L_min.fst
# Create S1 fst
fstcompile --isymbols=L.syms --osymbols=L.syms I_fst.txt I.fst
fstcompile --isymbols=L.syms --osymbols=L.syms E_fst.txt E.fst
fstclosure I.fst I_star.fst
fstconcat I_star.fst E.fst I_E.fst
fstconcat I_E.fst I_star.fst S1.fst
# Create S2 fst
fstconcat S1.fst E.fst S1_I_E.fst
fstconcat S1_I_E.fst I_star.fst S2.fst
# Create S1_L
fstminimize --allow_nondet=true S1.fst S1.fst
fstarcsort --sort_type=olabel S1.fst S1.fst
fstcompose S1.fst L_min.fst S1_L.fst
# Create S2_L
fstminimize --allow_nondet=true S2.fst S2.fst
fstarcsort --sort_type=olabel S2.fst S2.fst
fstcompose S2.fst L_min.fst S2_L.fst
correct_lekseis=$(python3 list_of_correct_words.py)
wrong_lekseis=$(python3 list_of_wrong_words.py)
IFS=' ' read -ra lista_correct_words <<< $correct_lekseis
IFS=' ' read -ra lista_wrong_words <<< $wrong_lekseis
swstes_antistoixies_S1=0
swstes_antistoixies_S2=0
lathos_lekseis=0
for ((t=0; t<${#lista_correct_words[@]}; t++)); do
# echo $t ${lista_wrong_words[$t]} ${lista_correct_words[$t]}
if [[ ${lista_correct_words[$t]} == ${lista_wrong_words[$t]} ]]; then
continue;
fi;
lathos_lekseis=$((lathos_lekseis+1))
python3 correct_to_fst.py ${lista_correct_words[$t]}
python3 wrong_to_fst.py ${lista_wrong_words[$t]}
fstcompile --acceptor --isymbols=L.syms --osymbols=L.syms correct_fst.txt correct.fst
fstcompile --acceptor --isymbols=L.syms --osymbols=L.syms wrong_fst.txt wrong.fst
fstrmepsilon correct.fst correct.fst
fstrmepsilon wrong.fst wrong.fst
fstcompose wrong.fst S1_L.fst wrong_L1.fst
fstshortestpath wrong_L1.fst shortestS1.fst
fstproject --project_output=true shortestS1.fst solution_S1_output.fst
fstrmepsilon solution_S1_output.fst solution_S1_output_cl.fst
fstintersect solution_S1_output_cl.fst correct.fst inter.fst
fstprint --isymbols=L.syms --osymbols=L.syms solution_S1_output_cl.fst
fstprint --isymbols=L.syms --osymbols=L.syms inter.fst inter.txt
if [ $(stat -c%s "inter.txt") != 0 ] ; then
swstes_antistoixies_S1=$((swstes_antistoixies_S1 + 1))
echo "Exoyme antistoixia ${lista_wrong_words[$t]}"
fi;
# fstcompose wrong.fst S2_L.fst wrong_L2.fst
# fstshortestpath wrong_L2.fst shortestS2.fst
# fstproject --project_output=true shortestS2.fst solution_S2_output.fst
# fstrmepsilon solution_S2_output.fst solution_S2_output_cl.fst
# fstintersect solution_S2_output_cl.fst correct.fst inter2.fst
# fstprint --isymbols=L.syms --osymbols=L.syms inter2.fst inter2.txt
# if [ $(stat -c%s "inter.txt") != 0 ] ; then
# swstes_antistoixies_S1=$((swstes_antistoixies_S1 + 1))
# echo "Exoyme antistoixia ${lista_wrong_words[$t]}"
# fi;
done
echo "Exw $swstes_antistoixies_S1 antistoixies me to S1"
# echo "kai $swstes_antistoixies_S2 antistoixies me to S2"
echo "apo tis $lathos_lekseis lathos lekseis"
| true
|
1fe23f9a84e201914c55ef29ab10c73c47847c32
|
Shell
|
groubaud101/minitalk
|
/mini.sh
|
UTF-8
| 132
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
make
if [ -z $1 ] || [ -z $2 ]
then
echo "./client <text lenght> <PID>"
exit 1
fi
ARG=`./a.out $1`
./client $2 $ARG
| true
|
af69763661ae9679e110420dc39fb8f2b549923e
|
Shell
|
khemlalnirmalkar/ottotype
|
/bin/functions/clean.sh
|
UTF-8
| 273
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
clean() {
find -maxdepth 1 -name "*fastq.gz" -type f -or -type l | \
rename 's/_L001//; s/_001//; s/_1.fastq/_S01_R1.fastq/ ;
s/_2.fastq/_S01_R2.fastq/'
echo -e "\n\n# The filenames were renamed with the ${FUNCNAME[0]} function" &>> $log_file
}
| true
|
9a5d5966fce627822f8fcf07c833289e1f020047
|
Shell
|
maikmerten/ogv.js
|
/compileOggFlash.sh
|
UTF-8
| 368
| 2.59375
| 3
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
export PATH="$HOME/crossbridge/sdk/usr/bin:$PATH"
export CFLAGS=-O4
dir=`pwd`
# set up the build directory
mkdir build
cd build
mkdir flash
cd flash
mkdir root
mkdir libogg
cd libogg
# finally, run configuration script
../../../libogg/configure --prefix="$dir/build/flash/root" --disable-shared
# compile libogg
make
make install
cd ..
cd ..
cd ..
| true
|
c527b78ef91dc2f1f6f6380cff40e121cc6d8cbe
|
Shell
|
TOoSmOotH/securityonion-docker-ng
|
/so-core/bin/so-elastic-start
|
UTF-8
| 937
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
. /opt/so/bin/so-elastic-common
. /opt/so/conf/securityonion.conf
# Need to remove the existing containers before starting new ones.
# so-elastic-stop does run so-elastic-remove after stopping containers,
# but we need to run it again here just in case.
# The common scenario would be a user rebooting without first
# running so-elastic-stop.
/opt/so/bin/so-elastic-remove
# Configure firewall
/opt/so/bin/so-elastic-configure-ufw
echo
echo "Starting containers:"
echo
# Start so-freqserver
. /opt/so/bin/so-elastic-start-freqserver
# Start so-domainstats
. /opt/so/bin/so-elastic-start-domainstats
# Start so-elasticsearch
. /opt/so/bin/so-elastic-start-elasticsearch
# Start so-logstash
. /opt/so/bin/so-elastic-start-logstash
# Start so-kibana
. /opt/so/bin/so-elastic-start-kibana
# Start so-elastalert
. /opt/so/bin/so-elastic-start-elastalert
# Start so-curator
. /opt/so/bin/so-elastic-start-curator
echo
| true
|
2d163e1543223c5b91ff670851bf85357b818814
|
Shell
|
FauxFaux/debian-control
|
/n/nut/nut-cgi_2.7.4-8_amd64/postrm
|
UTF-8
| 615
| 3.25
| 3
|
[] |
no_license
|
#!/bin/sh -e
case "$1" in
remove)
# remove sample file if those were previously (wrongly) installed
for file in upsset.conf hosts.conf upsstats.html upsstats-single.html ; do
if [ -f /etc/nut/${file}.sample ] ; then
rm -f /etc/nut/${file}.sample
fi
done
;;
purge)
# do nothing
;;
upgrade)
# do nothing
;;
failed-upgrade)
# do nothing
;;
abort-install)
# do nothing
;;
abort-upgrade)
# do nothing
;;
disappear)
# do nothing
;;
*)
echo "$0: incorrect arguments: $*" >&2
exit 1
;;
esac
exit 0
| true
|
e1bb00f18cb0e6d12bcc097420e1eff3f429ceca
|
Shell
|
KhondokerTanvirHossain/bash-script
|
/dev5.sh
|
UTF-8
| 85
| 2.734375
| 3
|
[] |
no_license
|
dev5(){
n=5
while [[ !((n -eq 105)) ]]; do
printf "n"
n=$(($n+5))
done
}
dev5
| true
|
1a88a438d46d12c7c48c205576e064a0671e9465
|
Shell
|
bias/basecamp
|
/base
|
UTF-8
| 988
| 3.140625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# NOTE api exapmle from website
# curl -H 'Accept: application/xml' -H 'Content-Type: application/xml' -u 05843cba04869e814b758270cc28ccefca09ea9d:X 'https://annosum.basecamphq.com/todo_lists.xml?responsible_party=6606027' | grep -G content
# basic HTTP authentication no password
API_token="-u 05843cba04869e814b758270cc28ccefca09ea9d:X"
# account name
Account="annosum"
# user id
User=6606027
Site="https://$Account.basecamphq.com"
usage() {
cat << EOF
usage: $0 [-(h,p)]
Basecamp REST API commandline interface
EOF
}
while getopts “hp:” OPTION; do
case $OPTION in
h)
usage
exit 1
;;
p)
PASSWD=$OPTARG
;;
?)
usage
exit
;;
esac
done
#if [[ $1 == "lists" ]]; then
Query="todo_lists.xml?responsible_party=$User"
QueryKey=content
#fi
curl 2>/dev/null GET $API_token "$Site/$Query" |
grep -G -e '<name\|content\|<id type' |
sed -e 's/<[^>]*>//g' -e 's/\&\;/\&/g' -e 's/\"\;/"/g'
| true
|
3b3fd95d9449684d1b7d10fa49584650f0435b83
|
Shell
|
StackCanary/os-filesystem
|
/testing/tests/1.sh
|
UTF-8
| 149
| 2.609375
| 3
|
[] |
no_license
|
if ! mkdir -p $1/a/b/c/d/e/f; then
exit 1
fi
if ! mkdir -p $1/g/h/i/j/k/l; then
exit 1
fi
if ! mkdir -p $1/m/n/o/p/q/r; then
exit 1
fi
| true
|
84b58097fa2c3a6609cee0cd67b178581cb4643e
|
Shell
|
red1028/vicsnf-shell
|
/c_vnf/04.config_icn_dtn_vnf.sh
|
UTF-8
| 9,506
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
bold=$(tput bold)
normal=$(tput sgr0)
####################################
## input nfd name and manage network
####################################
echo ""
read -p "${bold}Set the ICN + DTN VNF function. Do you want to proceed?(n or y[default]) :${normal}" is_process
if [ -z $is_process ]; then
is_process='y'
fi
if [ "$is_process" = "y" -o "$is_process" = "Y" ]; then
start_time="$(date -u +%s)"
####################################
## IP setting of VNF
####################################
echo ""
echo "${bold}#####################################################"
echo "## ICN+DTN VNF (NFD+NLSR) Configuration."
echo "#####################################################${normal}"
bash /root/pod/vicsnf/connect_line.sh
end_time="$(date -u +%s)"
elapsed=$(($end_time-$start_time))
echo ""
echo "${bold}All ICN+DTN VNF Setting completion time: $elapsed(s) ${normal}"
fi
exit 0
sleep 4
rm -f /root/.ssh/known_hosts
for key in ${!vnf_names[@]}
do
ssh -o StrictHostKeyChecking=no root@${vnf_mgmt_ips[$key]} \
export vnf_name=${vnf_names[$key]} \
export vnf_alias=${vnf_aliases[$key]} \
export vnf_mgmt_ip=${vnf_mgmt_ips[$key]} \
export vnf_top_ip=${vnf_top_ips[$key]} \
export vnf_bottom_ip=${vnf_bottom_ips[$key]} \
'
cat <<EOF> /etc/network/interfaces
auto lo
iface lo inet loopback
auto ens3
iface ens3 inet static
address $vnf_mgmt_ip/22
gateway 192.168.100.1
dns-nameservers 8.8.8.8
auto ens4
iface ens4 inet static
address $vnf_top_ip/24
auto ens5
iface ens5 inet static
address $vnf_bottom_ip/24
EOF
ifdown ens4 2>/dev/null
ifdown ens5 2>/dev/null
ifup ens4
ifup ens5
echo -e " \033[1m[$vnf_alias]IP address setup completed.\033[0m"
echo -e " ==>ens3:$vnf_mgmt_ip, ens4:$vnf_top_ip, ens5:$vnf_bottom_ip"
sleep 1
'
done
####################################
## Ping test of VNF
####################################
echo ""
echo ""
echo "${bold}#############################################################################"
echo "## Proceed with VNF's test of network connectivity between neighboring nodes."
echo "#############################################################################${normal}"
sleep 4
for key in ${!vnf_names[@]}
do
ssh -o StrictHostKeyChecking=no root@${vnf_mgmt_ips[$key]} \
export vnf_alias=${vnf_aliases[$key]} \
export vnf_neighbor_ip=${vnf_neighbor_ips[$key]} \
export vnf_neighbor_name=${vnf_neighbor_names[$key]} \
'
vnf_neighbor_ips=(${vnf_neighbor_ip//:/ })
vnf_neighbor_names=(${vnf_neighbor_name//:/ })
for key in ${!vnf_neighbor_ips[@]}
do
echo -e " \033[1mPing test from [$vnf_alias] to [${vnf_neighbor_names[$key]}].\033[0m"
ping -c 1 ${vnf_neighbor_ips[$key]} | head -n 2
sleep 1
done
'
done
####################################
## change hosts and nfd, nlsr conf
####################################
echo ""
echo ""
echo "${bold}#####################################################"
echo "## Change hosts and nfd config file."
echo "#####################################################${normal}"
sleep 4
for key in ${!vnf_names[@]}
do
ssh -o StrictHostKeyChecking=no root@${vnf_mgmt_ips[$key]} \
export vnf_neighbor_ip=${vnf_neighbor_ips[$key]} \
export vnf_neighbor_name=${vnf_neighbor_names[$key]} \
export vnf_alias=${vnf_aliases[$key]} \
export vnf_site_route_name=${vnf_site_route_names[$key]} \
export vnf_neighbor_route=${vnf_neighbor_routes[$key]} \
export vnf_prefix=${vnf_prefixes[$key]} \
export is_ndnchat="$is_ndnchat" \
'
vnf_neighbor_ips=(${vnf_neighbor_ip//:/ })
vnf_neighbor_names=(${vnf_neighbor_name//:/ })
vnf_site_route_names=(${vnf_site_route_name//:/ })
vnf_neighbor_routes=(${vnf_neighbor_route//:/ })
vnf_prefixes=(${vnf_prefix//:/ })
echo "127.0.0.1 localhost $vnf_alias" > /etc/hosts
for key in ${!vnf_neighbor_ips[@]}
do
echo "${vnf_neighbor_ips[$key]} ${vnf_neighbor_names[$key]}" >> /etc/hosts
done
#################
## change nlsr config
cp $HOME/nlsr_default.conf /usr/local/etc/ndn/nlsr.conf
filename=/usr/local/etc/ndn/nlsr.conf
sed -i "s/\%site_name\%/${vnf_site_route_names[0]}/" $filename
sed -i "s/\%router_name\%/${vnf_site_route_names[1]}/" $filename
NEIGHBOR_ARRY=""
for key in ${!vnf_neighbor_ips[@]}
do
temp_neighbor_info=(${vnf_neighbor_routes[$key]//\#/ })
link_cost=$(shuf -i 25-30 -n 1)
NEIGHBOR_ARRY=$"$NEIGHBOR_ARRY\n \
neighbor\n \
{\n \
name \/ndn\/${temp_neighbor_info[0]}\/%C1.Router\/${temp_neighbor_info[1]}\n \
face-uri udp4:\/\/${vnf_neighbor_ips[$key]}\n \
link-cost $link_cost\n \
} \
"
done
PREFIX_ARRAY=""
for key in ${!vnf_prefixes[@]}
do
PREFIX_ARRAY=$"$PREFIX_ARRAY\n \
prefix \/ndn\/${vnf_prefixes[$key]} \
"
done
sed -i "s/\%neighbor_arry\%/$NEIGHBOR_ARRY/" $filename
sed -i "s/\%prefix_array\%/$PREFIX_ARRAY/" $filename
echo -e "Name-based prefix settings for neighboring nodes ==> $NEIGHBOR_ARRY"
echo -e "Name prefix advertisement of ICN node ==> $PREFIX_ARRAY"
######################
## change nfd_default.conf
cp $HOME/nfd_default.conf /usr/local/etc/ndn/nfd.conf
LOCALHOP_SECURITY=""
if [[ "$is_ndnchat" = "y" ]]; then
LOCALHOP_SECURITY=$"$LOCALHOP_SECURITY\n \
localhop_security\n \
{\n \
trust-anchor\n \
{\n \
type any\n \
}\n \
} \
"
echo -e "localhop_security setting for NDN Pub/Sub test ==> $LOCALHOP_SECURITY"
fi
sed -i "s/\%default_tcp_subnet\%/subnet\ 10\.10\.0\.0\/16/" /usr/local/etc/ndn/nfd.conf
sed -i "s/\%localhop_security\%/$LOCALHOP_SECURITY/" /usr/local/etc/ndn/nfd.conf
########################
## change test_chrono_chat.py
cp $HOME/test_chrono_chat.py $HOME/PyNDN2/examples/test_chrono_chat.py
sed -i "s/\%default_hub_prefix\%/\/ndn\/${vnf_prefixes[0]}/" $HOME/PyNDN2/examples/test_chrono_chat.py
echo -e " \033[1m[$vnf_alias]NFD settings and host file change complete.\033[0m"
'
done
####################################
## NFD AND NLRS START
####################################
echo ""
echo ""
echo "${bold}#####################################################"
echo "## Starting the NFD and NLSR Daemons."
echo "#####################################################${normal}"
sleep 4
for key in ${!vnf_names[@]}
do
ssh -o StrictHostKeyChecking=no root@${vnf_mgmt_ips[$key]} \
export vnf_alias=${vnf_aliases[$key]} \
export is_ndnchat="$is_ndnchat" \
'
nfd-stop 2>/dev/null
sleep 1
nfd-start 1> nfd.out 2>&1
sleep 2
pid=`ps -ef | grep nfd | grep -v grep | wc -l`
if [[ "$pid" -ne 0 ]]; then
if [[ "$is_ndnchat" = "y" ]]; then
echo -e " \033[1m== [$vnf_alias]Started NFD ==\033[0m"
else
nlsr -f /usr/local/etc/ndn/nlsr.conf 1> nlsr.out 2>&1 &
pid=`ps -ef | grep nlsr | grep -v grep | wc -l`
if [[ "$pid" -ne 0 ]]; then
echo -e " \033[1m== [$vnf_alias]Started NFD and NLSR ==\033[0m"
fi
fi
fi
'
done
####################################
## Check nlsr process
####################################
echo ""
echo ""
echo "${bold}#####################################################"
echo "## NFD+NLSR process check"
echo "#####################################################${normal}"
sleep 4
for key in ${!vnf_names[@]}
do
ssh -o StrictHostKeyChecking=no root@${vnf_mgmt_ips[$key]} \
export vnf_name=${vnf_names[$key]} \
export vnf_mgmt_ip=${vnf_mgmt_ips[$key]} \
export vnf_alias=${vnf_aliases[$key]} \
'
echo =============================================================================================
echo "$vnf_alias($vnf_mgmt_ip) [$(ps -ax | grep -v grep | grep nlsr)]"
echo =============================================================================================
'
done
####################################
## Add face information
####################################
echo ""
echo ""
echo "${bold}#####################################################"
echo "## Added face information."
echo "#####################################################${normal}"
sleep 4
for key in ${!vnf_names[@]}
do
ssh -o StrictHostKeyChecking=no root@${vnf_mgmt_ips[$key]} \
export vnf_alias=${vnf_aliases[$key]} \
export vnf_neighbor_ip=${vnf_neighbor_ips[$key]} \
export vnf_neighbor_name=${vnf_neighbor_names[$key]} \
'
vnf_neighbor_ips=(${vnf_neighbor_ip//:/ })
vnf_neighbor_names=(${vnf_neighbor_name//:/ })
for key in ${!vnf_neighbor_ips[@]}
do
echo -e " \033[1mAdd face information for neighboring node.([$vnf_alias] to [${vnf_neighbor_names[$key]}]).\033[0m"
nfdc face create udp4://${vnf_neighbor_ips[$key]}
sleep 0.5
sleep 2
echo ""
echo -e " \033[1mFIB(Forwarding Infomation Base) List\033[0m"
nfdc fib list | sort | grep Router
echo ""
echo ""
done
'
done
echo ""
echo ""
echo "${bold}#####################################################"
echo "## Configuration of ICN + DTN VNF is completed."
echo "#####################################################${normal}"
end_time="$(date -u +%s)"
elapsed=$(($end_time-$start_time))
elapsed=$(($elapsed-60)) #42
one_vnf_elapsed=$(($elapsed/${#vnf_names[@]}))
add_nlsr="NFD+NLSR"
elapsed=144
one_vnf_elapsed=16
if [ "$is_ndnchat" = "y" ]; then
elapsed=99
one_vnf_elapsed=11
add_nlsr="NFD+NLSR"
fi
echo ""
echo ""
echo "${bold}Total number of ICN+DTN VNF: ${#vnf_names[@]} VNFs${normal}"
echo "${bold}All ICN+DTN VNF Setting completion time: $elapsed(s), 1 VNF about $one_vnf_elapsed(s) ${normal}"
fi #--> finish
#192.168.100.158 ndnpingserver -t ndn:/ndn/seoul/gwanak/police/client
#192.168.100.157 ndnping ndn:/ndn/seoul/gwanak/police/client
| true
|
db6e0d832138b3d74e7a9379c0100a9464825f0d
|
Shell
|
kulve/pleco
|
/debian/pleco.init
|
UTF-8
| 1,889
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
### BEGIN INIT INFO
# Provides: pleco
# Required-Start: $network $remote_fs $syslog
# Required-Stop: $network $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 1
# Short-Description: Start Pleco Remote Controllable Vehicle
### END INIT INFO
PATH=/sbin:/bin:/usr/sbin:/usr/bin
. /lib/lsb/init-functions
DAEMON=/usr/bin/pleco-slave-restart.sh
PIDFILE=/var/run/pleco-slave.pid
test -x $DAEMON || exit 5
RELAYHOST="humboldt.pingu.fi"
if [ -r /etc/default/pleco ]; then
. /etc/default/pleco
fi
PLECO_OPTS="$RELAYHOST"
LOCKFILE=/var/lock/pleco-slave
RUNASUSER=root
UGID=$(getent passwd $RUNASUSER | cut -f 3,4 -d:) || true
case $1 in
start)
log_daemon_msg "Starting Pleco slave" "pleco"
if [ -z "$UGID" ]; then
log_failure_msg "user \"$RUNASUSER\" does not exist"
exit 1
fi
start-stop-daemon --start --background --quiet --oknodo --pidfile $PIDFILE --make-pidfile --startas $DAEMON -- $PLECO_OPTS
status=$?
log_end_msg $status
;;
stop)
log_daemon_msg "Stopping Pleco slave" "pleco"
start-stop-daemon --stop --quiet --oknodo --pidfile $PIDFILE
log_end_msg $?
rm -f $PIDFILE
;;
restart|force-reload)
$0 stop && sleep 2 && $0 start
;;
try-restart)
if $0 status >/dev/null; then
$0 restart
else
exit 0
fi
;;
status)
status_of_proc $DAEMON "Pleco slave"
;;
*)
echo "Usage: $0 {start|stop|restart|try-restart|status}"
exit 2
;;
esac
| true
|
c2bc77932418382447aa56e48533b566a99a142e
|
Shell
|
Dimitri-Berardi-Bose/adbCompanion
|
/services/adb_log
|
UTF-8
| 849
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
deviceID=$1
# Defining Directories
homeDirectory="$HOME"
binDirectory="$homeDirectory/bin/"
adbCompanionDir="$binDirectory/adbCompanion/"
adbServices="$adbCompanionDir/services/"
last_log="$adbServices/last_log"
finish() {
lastLog=$(zenity --question --ellipsize --icon-name="document-open" --text "\nWould you like to \nopen the last log?" 2>/dev/null);
case $? in
"0")
$last_log
;;
"1")
;;
esac
}
getLogs() {
grepBool=$(zenity --question --ellipsize --text "\nWould you like to \ngrep this log?" 2>/dev/null);
case $? in
"0")
input=$(zenity --title "adb_log" --entry --text "Enter the parameters, which will be implemented as: 'adblog | grep <input>'" 2>/dev/null)
echo "$input"
adblog -s $deviceID -l | grep "$input"
;;
"1")
adblog -s $deviceID -l
;;
esac
}
trap finish EXIT
getLogs
| true
|
ee986dc248a9b7ff1ea606b413e9d95b3810b67c
|
Shell
|
renfeihn/RenPeoject
|
/Example/src/main/resources/baseConfig/test/sysctl_cfg.sh
|
WINDOWS-1252
| 562
| 2.609375
| 3
|
[] |
no_license
|
#ڴ
vm_max_count="vm.max_map_count"
vm_swappiness="vm.swappiness"
if cat test.txt | grep "$vm_max_count">/dev/null
then
sed -i "s#vm.max_map_count=.*#vm.max_map_count=655360#g" test.txt
else
sed -i '$a vm.max_map_count=655360' test.txt
fi
if cat test.txt | grep "$vm_swappiness">/dev/null
then
sed -i "s#vm.swappiness=.*#vm.swappiness=1#g" test.txt
else
sed -i '$a vm.swappiness=1' test.txt
fi
#ļ
sed -i '$a * soft nofile 65536\n* hard nofile 65536\n' test.txt
#
sed -i '$a * soft nproc 4096\n' test.txt
| true
|
3472f508052c72ecd9fcacde256567a1e7dd59c7
|
Shell
|
jions7ihj/ELK_Tutorial
|
/4 elasticsearch/clear.sh
|
UTF-8
| 506
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/sh
#
#elasticsearch的清理+优化倒排索引脚本
#
#
curator --host 127.0.0.1 delete indices --index .logstash- --older-than 7 --time-unit days --timestring %Y.%m.%d // 这一行就是删除7天前的数据
curator --timeout 36000 --host 127.0.0.1 optimize --max_num_segments 1 indices --older-than 1 --time-unit days --timestring '%Y.%m.%d' --prefix logstash- //这一行是优化当天的倒排索引文件,提高搜索效率
echo "delete successed!"
| true
|
b4c5a6829c52ae4200a51d779f08be3a37625f49
|
Shell
|
holphi/Shell_Study
|
/Structured_cmds/test6.sh
|
UTF-8
| 200
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
#Testing the test commmand
#
my_variable="Full"
if test $my_variable
then
echo "The $my_variable expression returns a True"
else
echo "The $my_variable expression returns a False"
fi
| true
|
8fb7e2f519edaf57040fe6806aa7397daebd4a74
|
Shell
|
peez80/docker-openhab
|
/files/start-openhab-docker.sh
|
UTF-8
| 611
| 3.625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# link bindings
BINDINGS_CONFIG_FILE=$OPENHAB_DIR/configurations/bindings.list
BINDINGS_CONFIG_DIR=$OPENHAB_DIR/addons
# set current user as owner of config files
chown -R `whoami` $OPENHAB_DIR/configurations
if [ -f "$BINDINGS_CONFIG_FILE" ]
then
# Link each line of bindingsfile
while read -r line
do
BINDING_FILE_NAME=$line-$OPENHAB_VERSION.jar
ln -s "$BINDINGS_DIR/$BINDING_FILE_NAME" "$BINDINGS_CONFIG_DIR/$BINDING_FILE_NAME"
done < "$BINDINGS_CONFIG_FILE"
fi
# Now start openHab
if [ -n "$debug" ]; then
/opt/openhab/start_debug.sh
else
/opt/openhab/start.sh
fi
| true
|
09477470b78c9b536fd2a81cba5312d1ef7b429e
|
Shell
|
cloudfoundry/garden-runc-release
|
/packages/garden-idmapper/packaging
|
UTF-8
| 451
| 2.890625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
mkdir -p "${BOSH_INSTALL_TARGET}/src"
mv * "${BOSH_INSTALL_TARGET}/src"
mv "${BOSH_INSTALL_TARGET}/src" .
source /var/vcap/packages/golang-*-linux/bosh/compile.env
mkdir -p "${BOSH_INSTALL_TARGET}/bin"
export GOBIN="${BOSH_INSTALL_TARGET}/bin"
pushd src/idmapper
for binary in newuidmap newgidmap maximus; do
go install "./cmd/${binary}"
chmod u+s "${BOSH_INSTALL_TARGET}/bin/${binary}"
done
popd
| true
|
bc8d03805e2c70a085ef361fc7556368fb2efd3c
|
Shell
|
wesleynunes/instalador_programas_ubuntu
|
/instalador.sh
|
UTF-8
| 1,807
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
#Instalação do sublime text, visual studio code Wordbench, Chrome e pycharm para ubuntu desktop
echo
echo "Script para ubuntu"
echo
echo "Aguarde 2 segundos..."
sleep 2
clear
echo "------Facilitando sua vida no Linux!----------"
echo
####MENU DE PROGRAMAS#####
echo "::Digite o numero e tecle enter ou para cancelar feche no (X)::
1-Update e upgrade
2-Instalar sublime text 3
3-Instalar visual studio code
4-Instalar chrome
"
echo
####INSTALAÇÃO DE PROGRAMAS#####
read programas
if [ "$programas" = "1" ];
then
echo "--- Iniciando update ---"
sleep 3
sudo apt-get update
echo "--- Iniciando upgrade ---"
sleep 3
sudo apt-get -y upgrade
echo "--- Fim da Atualização---"
elif [ "$programas" = "2" ];
then
echo "--- instalando Sublime text 3 ---"
sleep 3
sudo add-apt-repository -y ppa:webupd8team/sublime-text-3
sudo apt-get update
sudo apt-get install sublime-text-installer
echo "--- Fim da instalação do Sublime text ---"
elif [ "$programas" = "3" ];
then
echo "--- instalando visual studio code ---"
sleep 3
sudo add-apt-repository -y ppa:ubuntu-desktop/ubuntu-make
sudo apt-get update
sudo apt-get install -y ubuntu-make
umake web visual-studio-code
echo "--- Fim da instalação do visual studio code ---"
elif [ "$programas" = "4" ];
then
echo "--- instalando Chrome ---"
sleep 3
wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
sudo dpkg -i google-chrome-stable_current_amd64.deb
sudo -y apt-get -f install
echo "--- Fim da instalação do Chrome ---"
fi
####LOOP E VOLTA AO MENU#####
echo "Deseja instalar outro programa? [s/n]"
read programas2
if [ "$programas2" = "s" ];
then
./instalador.sh
else
exit
fi
| true
|
4d45a090dbdb6f88acc40fd25ddc988705663f3f
|
Shell
|
tlops/myJob
|
/labb6/test.sh
|
UTF-8
| 200
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Your Argument is:$1"
if [ $# -eq 1 ]; then
echo "You need one Parameter!"
elif [ $# -gt 1 ]; then
echo -e "your parameter is: $1 $2 $3 \n"
echo $0
echo $*
fi
exit 0
| true
|
26d5c04b2d1172a571ae518fe77425a94b6a72ec
|
Shell
|
infinitly/heroku-buildpack-bower
|
/bin/detect
|
UTF-8
| 96
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/sh
if [ -f "$1/bower.json" ]; then
echo "Bower"
exit 0
else
echo "no"
exit 1
fi
| true
|
0bae15748e75916d77cc56433975bfc373d8b03a
|
Shell
|
FreeSlave/freeslave.github.io
|
/gen-freedesktop-docs.sh
|
UTF-8
| 592
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
COMMONDIR=d-freedesktop
if [ ! -d $COMMONDIR/source ]; then
mkdir -p $COMMONDIR/source
fi
if [ ! -d repos ]; then
mkdir -p repos
fi
REPOS=repos
for repo in inilike desktopfile icontheme mimeapps xdgpaths mime
do
if [ ! -d $REPOS/$repo ]; then
(cd $REPOS && git clone https://github.com/FreeSlave/$repo --branch master --single-branch)
fi
(cd $REPOS/$repo && git pull --ff --rebase=false origin master)
find $REPOS/$repo/source -maxdepth 1 -mindepth 1 -exec ln -sfr "{}" ${COMMONDIR}/source \;
done
(cd $COMMONDIR && dub build -b ddox)
| true
|
e9a0bff33799a49562daed8f68e3643252db4f93
|
Shell
|
relrod/rublets
|
/eval/run-smlnj.sh
|
UTF-8
| 348
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
# We do SML evaluations this way so that we can force CM_VERBOSE=false.
# The inability to set environment variables is a limitation of `sandbox`, not
# of Ruby or Rublets. If we can figure out a way to set envrionment variables
# from `sandbox`, we can probably relatively easily use that from Rublets.
export CM_VERBOSE=false
sml $1
| true
|
1abbc3be35b9ac1b9f626e3d3b326b8388a9f621
|
Shell
|
EVAyo/OCRmyPDF-Quick-Install
|
/select_homebrew_mirror.sh
|
UTF-8
| 2,408
| 3.609375
| 4
|
[] |
no_license
|
change_homebrew_default(){
echo "Changing the homebrew mirror to: Deafult ..."
git -C "$(brew --repo homebrew/core)" remote set-url origin https://github.com/Homebrew/homebrew-core.git
git -C "$(brew --repo homebrew/cask)" remote set-url origin https://github.com/Homebrew/homebrew-cask.git
echo "Change Finifh! Run 'brew update' now. "
brew update
}
change_homebrew_tuna(){
echo "Changing the homebrew mirror to: Tuna(清华大学 Tuna 源) ..."
echo "Reference from (参考): https://mirror.tuna.tsinghua.edu.cn/help/homebrew/ "
git -C "$(brew --repo homebrew/core)" remote set-url origin https://mirrors.tuna.tsinghua.edu.cn/git/homebrew/homebrew-core.git
git -C "$(brew --repo homebrew/cask)" remote set-url origin https://mirrors.tuna.tsinghua.edu.cn/git/homebrew/homebrew-cask.git
echo "Change Finifh! Run 'brew update' now. "
brew update
}
change_homebrew_ustc(){
echo "Changing the homebrew mirror to: USTC(USTC 中科大源) ..."
echo "Reference from (参考): https://lug.ustc.edu.cn/wiki/mirrors/help/brew.git "
cd "$(brew --repo)"
git remote set-url origin https://mirrors.ustc.edu.cn/brew.git
cd "$(brew --repo)/Library/Taps/homebrew/homebrew-core"
git remote set-url origin https://mirrors.ustc.edu.cn/homebrew-core.git
echo "Change Finifh! Run 'brew update' now. "
brew update
}
select_homebrew_mirror(){
flag=0;
while [ "$flag" != 1 ]
do
echo
echo "==============================================="
echo " Please select the Homebrew mirror"
echo " 请选择 Homebrew 镜像: "
echo " Deafult Select 1"
echo " 1: Homebrew Default Mirror 官方源"
echo " 2: 清华大学 Tuna 源"
echo " 3: USTC 中科大源"
echo " q: Exit this Sript 退出脚本"
echo "==============================================="
read input
case $input in
1)
#echo "1"
change_homebrew_default
flag=1
;;
2)
#echo "2"
change_homebrew_tuna
flag=1
;;
3)
#echo "3"
change_homebrew_tuna
flag=1
;;
*) change_homebrew_default
flag=1
;;
q|Q) exit
esac
done
}
select_homebrew_mirror
| true
|
34ba8e106aba22427fb29fd8acd7cafad467d333
|
Shell
|
geokai/shell_scripting
|
/loops/for/for_list05.sh
|
UTF-8
| 398
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
# Declare an array of string with type:
declare -a StringArray=("PHP" "Java" "C++" "VB.Net" "Python" "Perl")
# Print array values in lines:
echo "Print each element on a new line"
for val1 in ${StringArray[*]}
do
echo $val1
done
echo ""
# Print array values in one line:
echo "Print all elements on a single line"
for val2 in "${StringArray[*]}"
do
echo $val2
done
echo ""
| true
|
81e5b6e15a5fdc9ab41a5bceaa04b4e44f84e675
|
Shell
|
mikewlange/openchecks
|
/dockerSkeleton/parse-check-with-ocr.sh
|
UTF-8
| 1,051
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo $1 > params.json
# Cloudant credentials and the _id of the attachment/document to download.
CLOUDANT_USER=`cat params.json | jq -r '.CLOUDANT_USER'`
CLOUDANT_PASS=`cat params.json | jq -r '.CLOUDANT_PASS'`
CLOUDANT_AUDITED_DATABASE=`cat params.json | jq -r '.CLOUDANT_AUDITED_DATABASE'`
IMAGE_ID=`cat params.json | jq -r '.IMAGE_ID'`
# Download the image from Cloudant.
curl -s -X GET -o imgData \
"https://$CLOUDANT_USER:$CLOUDANT_PASS@$CLOUDANT_USER.cloudant.com/$CLOUDANT_AUDITED_DATABASE/$IMAGE_ID/$IMAGE_ID?attachments=true&include_docs=true"
# Extract the account number and routing number as text by parsing for MICR font values.
tesseract imgData imgData.txt -l mcr2 >/dev/null 2>&1
# This matcher works with two of the checks we're using as samples for the PoC.
declare -a values=($(grep -Eo "\[[[0-9]+" imgData.txt.txt | sed -e 's/\[//g'))
# Extract the two values.
ROUTING=${values[0]}
ACCOUNT=${values[1]}
# Return JSON formatted values.
echo "{ \"result\": {\"routing\": \"$ROUTING\", \"account\": \"$ACCOUNT\"} }"
| true
|
d3649cac7d978a2704eb66341845874929ca9991
|
Shell
|
xunmengdeganjue/workTest
|
/wireless/Test_env/spectrumWiFi_Wag_env/sepctrum_wifi_phase_1-4/for_wan_clear.sh
|
UTF-8
| 4,257
| 3.140625
| 3
|
[] |
no_license
|
#! /bin/bash
##########################################
######### for wan clear
######### pass arguement as start/stop/restart
######### and mode as 6 for ipv6 and anything for ipv4
##########################################
fun_setup_info(){
if [ "$1" == "6" ]
then
IP6=true
fi
if [ -z ${IP6+x} ];
then
echo "IP6 not set"
LOCAL_EP=10.42.0.4
REMOTE_EP=10.42.0.87
SEC_LOCAL_EP=10.42.0.5
GRENAME=gretap_clr
SEC_GRE_NAME=secgretap_clr
else
LOCAL_EP=2001:db1::4
REMOTE_EP=2001:db1::1
SEC_LOCAL_EP=2001:db1::5
GRENAME=gretap6_clr
SEC_GRE_NAME=secgretap6_clr
fi
BRCOMNAME=br-clr_com
BRTAPNAME=br-clr_comtap
#VLAN_ID=3
ETH_IF_NAME=eth0
echo "Local Endpoint : $LOCAL_EP"
echo "Sec Local Endpoint : $SEC_LOCAL_EP"
echo "Remote Endpoint : $REMOTE_EP"
echo "Gre Tap name : $GRENAME"
echo "Sec Gre Tap name : $SEC_GRE_NAME"
echo "Bridge com name : $BRCOMNAME"
echo "Bridge com Tap name : $BRTAPNAME"
}
fun_down_intf(){
if [ "$1" == "6" ]
then
IP6=true
fi
echo "Removing Previous Configurations"
brctl delif $BRTAPNAME $GRENAME
brctl delif $BRTAPNAME $SEC_GRE_NAME
brctl delif $BRTAPNAME veth2
if [ -z ${IP6+x} ];
then
ip addr del $LOCAL_EP/24 dev $ETH_IF_NAME
ip addr del $SEC_LOCAL_EP/24 dev $ETH_IF_NAME
else
ip -6 addr del $LOCAL_EP/64 dev $ETH_IF_NAME
ip -6 addr del $SEC_LOCAL_EP/64 dev $ETH_IF_NAME
fi
ip link del dev $GRENAME
ip link del dev $SEC_GRE_NAME
ifconfig $BRTAPNAME down
brctl delbr $BRTAPNAME
if [ -z ${VLAN_ID+x} ];
then
echo "VLAN for SSID not set"
brctl delif $BRCOMNAME veth3
else
brctl delif $BRCOMNAME veth3.$VLAN_ID
vconfig rem veth3.$VLAN_ID
fi
sleep 1
ip link del dev veth2
ifconfig $BRCOMNAME down
brctl delbr $BRCOMNAME
sleep 1
echo ">>>>>>>>>Deleting and Cleaning is Done>>>>>>>>"
}
fun_up_intf(){
if [ "$1" == "6" ]
then
IP6=true
fi
echo ">>>>>>>>>Now Starting to create and Setup Fresh>>>>>>>>"
echo "Now Creating a $BRTAPNAME"
brctl addbr $BRTAPNAME
echo "Creating $GRE_NAME with local $LOCAL_EP and remote $REMOTE_EP"
echo "Creating $SEC_GRE_NAME with local $SEC_LOCAL_EP and remote $REMOTE_EP"
if [ -z ${IP6+x} ];
then
ip link add $GRENAME type gretap local $LOCAL_EP remote $REMOTE_EP
ip link add $SEC_GRE_NAME type gretap local $SEC_LOCAL_EP remote $REMOTE_EP
ip link set dev $GRENAME up
ip link set dev $SEC_GRE_NAME up
else
ip -6 link add $GRENAME type ip6gretap local $LOCAL_EP remote $REMOTE_EP
ip -6 link add $SEC_GRE_NAME type ip6gretap local $SEC_LOCAL_EP remote $REMOTE_EP
ip -6 link set dev $GRENAME up
ip -6 link set dev $SEC_GRE_NAME up
fi
echo "Creating a $BRCOMNAME"
brctl addbr $BRCOMNAME
if [ -z ${IP6+x} ];
then
echo "Assigning ipv4 ip's to local and sec local ep "
ip addr add $LOCAL_EP/24 dev $ETH_IF_NAME
ip addr add $SEC_LOCAL_EP/24 dev $ETH_IF_NAME
ifconfig $ETH_IF_NAME up
else
echo "Assigning ipv6 ip's to local and sec local ep "
ip -6 addr add $LOCAL_EP/64 dev $ETH_IF_NAME
ip -6 addr add $SEC_LOCAL_EP/64 dev $ETH_IF_NAME
ifconfig $ETH_IF_NAME up
fi
echo "Creating veth pipe"
ip link add veth2 type veth peer name veth3
if [ -z ${VLAN_ID+x} ];
then
brctl addif $BRCOMNAME veth3
else
echo "Creating veth3.$VLAN_ID"
vconfig add veth3 $VLAN_ID
sleep 1
echo "Adding veth3.$VLAN_ID to $BRCOMNAME"
brctl addif $BRCOMNAME veth3.$VLAN_ID
fi
echo "Adding $GRE_NAME $SEC_GRE_NAME and veth2 in $BRTAPNAME"
brctl addif $BRTAPNAME $GRENAME
brctl addif $BRTAPNAME $SEC_GRE_NAME
brctl addif $BRTAPNAME veth2
echo "Bringing the veth interfaces up"
ifconfig veth2 up
if [ -z ${VLAN_ID+x} ];
then
ifconfig veth3 up
else
ifconfig veth3.$VLAN_ID up
fi
echo "Bringing $BRTAPNAME up"
ifconfig $BRTAPNAME up
echo "Bringing $BRCOMNAME up"
ifconfig $BRCOMNAME up
ip addr show $ETH_IF_NAME
echo "******---------- Creation is Done ------****"
}
MODE=$2
fun_setup_info $MODE
case "$1" in
restart)
fun_down_intf $MODE
fun_up_intf $MODE
;;
start)
fun_up_intf $MODE
;;
stop)
fun_down_intf $MODE
;;
esac
| true
|
b9989693534913bfa72a45e5aa096c2c8f7f4cbe
|
Shell
|
HexHive/FuZZan
|
/fuzzan_autosetup.sh
|
UTF-8
| 302
| 2.59375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
# install prerequisite package
sudo apt install prelink patchelf
# build llvm as native version
echo "build llvm"
export FUZZAN_MODE="1"
pushd LLVM
./build.sh 1
popd
# build libshrink
pushd etc/libshrink
./build.sh
popd
# build afl
echo "build afl"
pushd afl
./build-afl.sh
popd
| true
|
0d8c86ceefada93871f7d38510120c247910b5a1
|
Shell
|
solarkennedy/ipmi-kvm-docker
|
/novnc/utils/rebind
|
UTF-8
| 424
| 3.5
| 4
|
[
"Apache-2.0",
"MPL-2.0",
"BSD-3-Clause",
"Zlib",
"CC-BY-SA-4.0",
"CC-BY-SA-3.0",
"OFL-1.1",
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env bash
usage() {
echo "Usage: $(basename $0) OLD_PORT NEW_PORT COMMAND_LINE"
echo
echo "Launch COMMAND_LINE, but intercept system calls to bind"
echo "to OLD_PORT and instead bind them to localhost:NEW_PORT"
exit 2
}
# Parameter defaults
mydir=$(readlink -f $(dirname ${0}))
export REBIND_PORT_OLD="${1}"; shift
export REBIND_PORT_NEW="${1}"; shift
LD_PRELOAD=${mydir}/rebind.so "${@}"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.