blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
37c0d332de8a8f7233684648c2e7c7155fe06ae6
|
Shell
|
HacoK/Shell_scripts
|
/scripts/nestedfor.sh
|
UTF-8
| 159
| 2.796875
| 3
|
[] |
no_license
|
for((i=1;i<=5;i++)) ### Outer for loop ###
do
for((j=1;j<=5;j++)) ### Inner for loop ###
do
echo -n "$i"
done
echo "" #### print the new line ####
done
| true
|
0cb813f21366f377bbf5aee28baa07d1847849e4
|
Shell
|
bygreencn/highwaytest
|
/vanillacmd
|
UTF-8
| 313
| 2.59375
| 3
|
[] |
no_license
|
#! /bin/bash
DIR="results"
GPUID="0"
TYPE="vanilla"
DATASET="mnist"
SIZE="71"
mkdir -p "${DIR}"
for layers in 2 5 10 20 50 100
do
th main.lua -gpuid "${GPUID}" -cudnn 1 -json "${DIR}/${TYPE}-${DATASET}-${layers}.json" -type "${TYPE}" -set "${DATASET}" -layers "${layers}" -size "${SIZE}" -max_epochs 400
done
| true
|
c6b0f5af303e5d1a85f6c30419c7b01101751acb
|
Shell
|
songokas/rust-simple-tunnel
|
/setup.sh
|
UTF-8
| 3,664
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
tun_interface="tun0"
tun_ip="10.0.0.1"
tun_forward="10.0.0.2"
network_interface="enp39s0"
check_default=$(awk '$2 == 00000000 { print $1 }' /proc/net/route | head -n1)
if [[ $check_default ]]; then
network_interface="$check_default"
fi
nft_table="rust-simple-tunnel"
route_table_name="rust-simple-tunnel"
use_nft=""
clean=""
printUsage() {
echo -e "Usage:
./setup.sh [OPTIONS]
[OPTIONS]
--tun-name (tun interface name default: $tun_interface)
--tun-ip (tun forward ip default: $tun_ip)
--tun-forward-ip (tun forward ip default: $tun_forward)
--network-interface (forward traffic through network interface name default: $network_interface check ip addr)
--route-table-name (ip route table name to use. default $route_table_name)
--clean yes
"
}
if [[ $1 == "--help" ]]; then
printUsage
exit 0
fi
while [[ $# -gt 1 ]]
do
key="$1"
case $key in
--tun-name)
tun_interface="$2"
shift
;;
--tun-ip)
tun_ip="$2"
shift
;;
--tun-forward-ip)
tun_forward="$2"
shift
;;
--network-interface)
network_interface="$2"
shift
;;
--clean)
clean="$2"
shift
;;
--route-table-name)
route_table_name="$2"
shift
;;
*)
printUsage
exit 1
;;
esac
shift
done
if [[ $use_nft ]]; then
if [[ $(nft list table ip "$nft_table" 2>/dev/null) ]]; then
nft delete table ip "$nft_table"
fi
if [[ ! "$clean" ]]; then
NETWORK_INTERFACE="$network_interface" NFT_TABLE="$nft_table" envsubst < "config/routes" > /tmp/rust-simple-tunnel-routes
nft -f /tmp/rust-simple-tunnel-routes
rm -f /tmp/rust-simple-tunnel-routes
fi
else
if [[ $clean ]]; then
iptables -t nat -D POSTROUTING -o "$network_interface" -s "$tun_forward" -j MASQUERADE -m comment --comment "simple rust tunnel"
iptables -D FORWARD -i "$tun_interface" -o "$network_interface" -s "$tun_forward" -j ACCEPT -m comment --comment "simple rust tunnel"
iptables -D FORWARD -i "$network_interface" -o $tun_interface -d "$tun_forward" -j ACCEPT -m comment --comment "simple rust tunnel"
else
iptables -t nat -A POSTROUTING -o "$network_interface" -s "$tun_forward" -j MASQUERADE -m comment --comment "simple rust tunnel"
iptables -A FORWARD -i "$tun_interface" -o "$network_interface" -s "$tun_forward" -j ACCEPT -m comment --comment "simple rust tunnel"
iptables -A FORWARD -i "$network_interface" -o "$tun_interface" -d "$tun_forward" -j ACCEPT -m comment --comment "simple rust tunnel"
fi
fi
if [[ ! $(grep $route_table_name /etc/iproute2/rt_tables ) ]]; then
echo 100 $route_table_name >> /etc/iproute2/rt_tables
fi
# handle existing
if [[ ! $(ip rule | grep "from all ipproto tcp lookup $route_table_name") ]]; then
if [[ ! $clean ]]; then
ip rule add from all ipproto tcp lookup "$route_table_name" priority 500
fi
else
if [[ $clean ]]; then
ip rule delete pref 500
fi
fi
if [[ ! $(ip rule | grep "from all ipproto icmp lookup $route_table_name") ]]; then
if [[ ! $clean ]]; then
ip rule add from all ipproto icmp lookup "$route_table_name" priority 501
fi
else
if [[ $clean ]]; then
ip rule delete pref 501
fi
fi
if [[ ! $(ip rule | grep "from $tun_forward lookup main") ]]; then
if [[ ! $clean ]]; then
ip rule add from "$tun_forward" lookup main priority 300
fi
else
if [[ $clean ]]; then
ip rule delete pref 300
fi
fi
if [[ $(ip route show table $route_table_name 2>/dev/null ) ]]; then
ip route flush table "$route_table_name"
fi
| true
|
5e70c23a0b88e69e4afd41ad2b9421a258cd4174
|
Shell
|
manuel-192/m-m
|
/PKGBUILDs/eos-brightness/brightness-xrandr
|
UTF-8
| 2,283
| 4.6875
| 5
|
[] |
no_license
|
#!/bin/bash
# Set or get screen brightness using xrandr.
DIE() {
printf "%s: error: %s\n" "$progname" "$1" >&2
Usage 1
}
CheckValue1() {
local supported_values="Supported value range: $min..$max."
if [ -z "$brightness" ] || [ -n "$(echo "$brightness" | tr -d '0-9')" ] || [ $brightness -gt $max ] || [ $brightness -lt $min ]
then
DIE "value '$brightness' is out of range. $supported_values"
fi
brightness="$(echo $brightness*0.01 | bc -l)"
}
GetData() {
data="$(LANG=C xrandr --query --verbose)"
[ -n "$data" ] || DIE "xrandr returned no data!"
}
GetOutputDevice() {
outdev="$(echo "$data" | grep -w connected | head -n1 | awk '{print $1}')"
[ -n "$outdev" ] || DIE "no output device found!"
}
GetBrightness() {
local br=$(echo "$data" | sed -E -n "/$outdev connected/,/[ \t]+Brightness:/p")
if [ $(echo "$br" | wc -l) -le 6 ] ; then
br=$(echo "$br" | tail -n1 | awk '{print $NF}')
if [ -n "$br" ] ; then
local div=$(echo "1 / 100" | bc -l)
brightness=$(echo "scale=0; $br / $div" | bc -l)
return
fi
fi
DIE "failed to get brightness value."
}
CheckDependencies() {
local pkgs=( xorg-xrandr bc )
pacman -Q ${pkgs[*]} &>/dev/null || DIE "required dependencies: ${pkgs[*]}"
}
Usage() {
cat <<EOF >&2
Usage: $progname [percentage]
percentage Percentage of the maximum screen brightness.
If percentage is not given, current percentage will be shown, if previously set.
EOF
[ -n "$1" ] && exit $1
}
MainXrandr() {
local -r progname="$(basename "$0")"
local -r min=5
local -r max=100
local brightness=""
local data=""
local outdev=""
CheckDependencies
GetData
GetOutputDevice
case "$1" in
"") GetBrightness
echo "$brightness"
;;
*)
if [ -z "$(echo "$1" | sed 's|[0-9]||g')" ] ; then
brightness="$1"
CheckValue1
xrandr --output "$outdev" --brightness "$brightness" || DIE "failed to set brightness to '$brightness'."
else
DIE "sorry, value can contain only numbers in the range of $min to $max."
fi
;;
esac
}
MainXrandr "$@"
| true
|
caf40e63fe33201bdf2cb9d69c848fdf0953b612
|
Shell
|
jpikl/dotfiles
|
/.bashrc.d/50-cargo.sh
|
UTF-8
| 466
| 2.75
| 3
|
[] |
no_license
|
# shellcheck shell=bash
## rustup and cargo autocomplete
if [[ -x $(command -v rustup) ]]; then
source <(rustup completions bash)
#source <(rustup completions bash cargo)
# Workaround for https://github.com/rust-lang/rustup/issues/3407
if [[ -f ~/.rustup/toolchains/stable-x86_64-unknown-linux-gnu/src/etc/bash_completion.d/cargo ]]; then
source ~/.rustup/toolchains/stable-x86_64-unknown-linux-gnu/src/etc/bash_completion.d/cargo
fi
fi
| true
|
2025a83b23d4de2f60f5363cf25aa8a66d2f4794
|
Shell
|
sweagraff/ryu
|
/build/build.sh
|
UTF-8
| 624
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
# not in Pharo 7 need to do ssh-add -K ~/.ssh/id_rsa
MC_REPOS_URL='filetree://../repository'
performDownload=0
while getopts ":d" opt; do
case $opt in
d)
echo "-d was specified, will download full Pharo Distribution" >&2
performDownload=1
;;
\?)
echo "Invalid option: -$OPTARG" >&2
;;
esac
done
if [[ $performDownload -eq 1 ]]; then
echo "Downloading latest Pharo"
#curl get.pharo.org | bash
#curl https://get.pharo.org/64/70+vm | bash
curl https://get.pharo.org/64/stable+vm | bash
fi
./pharo Pharo.image st setup.st --save --quit
echo "Build Completed"
| true
|
2ba100f0e03fc754f7cd200c7e75478079bf97a4
|
Shell
|
OndrejHome/gentoo-build
|
/scripts/root_user.sh
|
UTF-8
| 412
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -x
set -e
if [ -z "${GB_ROOT_USER_PASSWORD}" ]; then
echo 'skipping root user'
exit 0
fi
chroot ${GB_ROOT} /bin/bash <<-EOF
source /etc/profile
set -x
set -e
# do not enforce password qulity checks
sed -i 's/enforce=.*/enforce=none/' /etc/security/passwdqc.conf
if printenv GB_ROOT_USER_PASSWORD | grep -q .; then
( echo -n 'root:' && printenv GB_ROOT_USER_PASSWORD ) | chpasswd
fi
EOF
| true
|
d8b1182e66c85f16cf3b2aef2888374a24afe4d1
|
Shell
|
mohisen/zdotfiles
|
/5/Capture/search/strip_href_cmds.sh
|
UTF-8
| 3,776
| 3.171875
| 3
|
[] |
no_license
|
# commandlinefu.com by David Winterbottom
# SVN Export files that were modified between given revisions.
svn diff . -r43:HEAD --summarize | cut -c9-99999 | cpio -pvdmu ~/destination
# Show only printable characters and newlines from a file or input
strings -1 <file>
# remove password from openssl key file
openssl rsa -in /path/to/originalkeywithpass.key -out /path/to/newkeywithnopass.key
# Watch active calls on an Asterisk PBX
watch "asterisk -vvvvvrx 'core show channels' | egrep \"(call|channel)\""
# Set name of windows in tmux/byobu to hostnames of servers you're connected to
for i in $(tmux list-windows -F '#{window_index}'); do panenames=$(tmux list-panes -t $i -F '#{pane_title}' | sed -e 's/:.*$//' -e 's/^.*@//' | uniq); windowname=$(echo ${panenames} | sed -e 's/ /|/g'); tmux rename-window -t $i $windowname; done
# parse html/stdin with lynx
alias html2ascii='lynx -force_html -stdin -dump -nolist'
# count the number of specific characters in a file or text stream
find /some/path -type f -and -iregex '.*\.mp3$' -and -print0 | tr -d -c '\000' |wc -c
# Print all words in a file sorted by length
for w in $(tr 'A-Z ,."()?!;:' 'a-z\n' < sample.txt); do echo ${#w} $w; done | sort -u | sort -n
# Download all MegaTokyo strips
for i in $(seq 1 `curl http://megatokyo.com 2>/dev/null|grep current|cut -f6 -d\"`);do wget http://megatokyo.com/`curl http://megatokyo.com/strip/${i} 2>/dev/null|grep src=\"strips\/|cut -f4 -d\"`;done
# Stripping ^M at end of each line for files
tr -d '\r' <dos_file_to_be_converted >converted_result
# strip config files of comments
grep -vE '^$|^[\s]*[;#]'
# MySQL: Find an instance of a populated table across numerous databases
TABLE_NAME=YYZ ; for DATABASE in $(echo "SELECT TABLE_SCHEMA FROM information_schema.tables WHERE TABLE_NAME='$TABLE_NAME'" | mysql -N) ; do echo -n "$DATABASE: " ; echo "SELECT COUNT(*) FROM $TABLE_NAME" | mysql $DATABASE -N ; done | fgrep -v ': 0'
# Extract tar content without leading parent directory
tar -xaf archive.tar.gz --strip-components=1
# Convert high resolution JPEG for web publication
convert /home/user/file.jpg -resize 800x533 -strip -quality 80 -interlace line /home/user/web_file.jpg
# Search and replace in VIM
:%s/foo/bar/g
# Alternative way to generate an XKCD #936 style 4 word password usig sed
shuf -n4 /usr/share/dict/words | sed -e ':a;N;$!ba;s/\n/ /g;s/'\''//g;s/\b\(.\)/\u\1/g;s/ //g'
# MySQL: Strip a my.cnf file from comments, remove blank lines, normalize spaces:
cat my.cnf | sed '/^#/d' | sed '/^$/d' | sed -e 's/[ \t]\+//g'
# Create a bash script from last n commands
history | tail -(n+1) | head -(n) | sed 's/^[0-9 ]\{7\}//' >> ~/script.sh
# Real time duplication of Apache app traffic to a second server
nice -n -20 ssh SOURCE_SERVER "tail -f /var/log/httpd/access.log " | awk '{print $7}' | grep jsp | parallel 'curl TARGET_SERVER{} 2>&1 /dev/null'
# Probably, most frequent use of diff
diff -Naur --strip-trailing-cr
# strip id3 v1 and v2 tags from all mp3s in current dir and below
find . -type f -iname "*.mp3" -exec id3v2 --delete-all {} \;
# Cleanly list available wireless networks (using iwlist)
iwlist wlan0 scan | sed -ne 's#^[[:space:]]*\(Quality=\|Encryption key:\|ESSID:\)#\1#p' -e 's#^[[:space:]]*\(Mode:.*\)$#\1\n#p'
# strips the first field of each line where the delimiter is the first ascii character
cut -f2 -d`echo -e '\x01'` file
# Video Google download
wget -qO- "VURL" | grep -o "googleplayer.swf?videoUrl\\\x3d\(.\+\)\\\x26thumbnailUrl\\\x3dhttp" | grep -o "http.\+" | sed -e's/%\([0-9A-F][0-9A-F]\)/\\\\\x\1/g' | xargs echo -e | sed 's/.\{22\}$//g' | xargs wget -O OUPUT_FILE
# strip non-constant number of directories from tar archive while decompressing
tar --transform 's#.*/\([^/]*\)$#\1#' -xzvf test-archive.tar.gz
| true
|
20c6be94a0cefb1f89d4ef1f79a8dcabd10d7e55
|
Shell
|
sri-arjuna/script-tools.tui
|
/menu/iso/usb
|
UTF-8
| 3,456
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# sea's Script Tools: The 3rd Generation
#
# Description: Writes an imagefile to selected USB drive with DD
# Author: Simon A. Erat (sea) <erat.simon AT gmail.com>
# License: GPL v3
# Date Created: 2011.09.09
# Date Changed: 2013.08.25
script_version=1.4
# Resource: man mkfs, fsck, cfdisk, wipefs, umount, mount
# Resource: http://forums.fedoraforum.org/showthread.php?t=269547
# Resource: http://forums.fedoraforum.org/showthread.php?t=270203
#
# Title
#
sT "sea's ISO to USB script, using DD ($script_version)"
#
# Variables
#
targetusb=""
sourcefile=""
devDisk=/dev/disk/by-id
[ "$(cat /etc/fedora-release|awk '{print $3}')" -ge 19 ] && \
dd_status=none || \
dd_status=noxfer
for arg in ${args[@]}
do if [ -b "$arg" ]
then targetusb="$arg"
elif [ -f "$arg" ] || [ -f "$(pwd)/$arg" ]
then sourcefile="$arg"
fi
done
if [ "" = "$sourcefile" ] ; then
if test "" = "$iso2usb_default_search"
then # This is read from default configuration
# Or overwritten by $HOME/bin/$USER-libs/iso2usb
# If neither of these exist, use these defaults
iso2usb_default_search="$HOME /home/$USER/Downloads /opt /usr"
fi
fi
#
# Set search Path
#
if [ "" = "$sourcefile" ] ; then
selectionLis=""
while [ "$selectionList" = "" ] ; do
sT "Please choose a directory to search for an image file"
select sPath in $iso2usb_default_search OTHER ; do
if [ $sPath = "OTHER" ]
then sPath="$(input 'Enter Path: ')"
len=${#sPath}
break
else len=$[ ${#sPath} +1 ]
break
fi
done
#
# Parsing for iso/img-images
#
cd $sPath
sP "Scanning $sPath"
for files in $(find "$sPath/." -mount -name "*.*" 2>&1) ; do
case $files in
*.iso) selectionList="$selectionList ${files:$len}" ;;
*.img) selectionList="$selectionList ${files:$len}" ;;
# "*") TODO: add other file filters here... ;;
else) return
esac
done
[ "" = "$selectionList" ] && \
sE "No iso/img files in $sPath" "$FAILURE"
done
#
# Image selection
#
sT "Please choose the image you want to use: "
select sourceImage in $selectionList ; do break ; done
else sourceImage="$sourcefile"
fi
sE "Selected image:" "$sourceImage"
#
# select USB Drive
#
if [ "" = "$targetusb" ] ; then
sT "Please choose the removable media: "
list="$(ls $devDisk|grep usb|grep -v part)"
while [ "" = "$list" ] ; do
read -p "$BORDER_LEFT Please insert an USB drive and press [ENTER] to continue..." buffer
list="$(ls $devDisk|grep usb|grep -v part[0-9])"
done
if [ ! "$list" = "$(echo $list)" ]
then select targetUsb in $list ; do
tDev=$(readlink -f $devDisk/$targetUsb)
break
done
else targetUsb=$list
tDev=$(readlink -f $devDisk/$targetUsb)
fi
else targetUsb="$( ls /dev/disk/by-id/ -l | grep $(basename $targetusb)|grep -v part|awk '{print $9}' )"
tDev="$targetusb"
fi
sE "Selected device:" "$tDev ($targetUsb)"
#
# Prepare
#
sT "Using '$(basename $sourceImage)' for '$tDev'" # ${targetUsb:0:(-14)}
sE "Delete the device's filesystem" "$tDev"
ask "Are you sure to erease all data on $tDev?" && \
sudo dd status=$dd_status if=/dev/zero of=$tDev bs=512 count=2 > /dev/zero
#
# Write
#
sT "Please be patient, the progress may take a while..."
sP "Writing $(basename $sourceImage) to $tDev" "$PROGRESS"
isofile=$sPath/$sourceImage
sudo dd status=$dd_status if=$isofile of=$tDev bs=1M > /dev/zero
ReportStatus $? "Written $sourceImage to $tDev"
| true
|
214dba38f4448b25e0a5a062ef474ef05257abd8
|
Shell
|
jondowsondatastax/pod
|
/pods/pod_REMOVE-PODS/prepare/prepare_misc.bash
|
UTF-8
| 502
| 3.1875
| 3
|
[] |
no_license
|
# about: preperation functions for this pod
# ------------------------------------------
function prepare_misc_checkFileFolderExist(){
## test specified files exist
# if software tarball is used - then check it exists
#if [[ "${SOFTWARE_TARBALL}" != "" ]]; then
# prepare_generic_misc_checkSoftwareExists
#fi
:
# add below any other pod specific file and folder checks
}
# ------------------------------------------
function prepare_misc_setDefaults(){
## pod specific default settings
:
}
| true
|
a630b5fb1bf5013782f05a5c1804743fb74b41d6
|
Shell
|
flaccid/rightscripts
|
/install_aws_cli.bash
|
UTF-8
| 1,560
| 3.59375
| 4
|
[] |
no_license
|
#! /bin/bash -e
source "$RS_ATTACH_DIR/rs_distro.sh"
if [ "$RS_DISTRO" = 'atomichost' ]; then
echo 'Red Hat Enterprise Linux Atomic Host not yet supported, but will exit gracefully.'
exit 0
fi
# PIP is in /usr/local/bin
export PATH=$PATH:/usr/local/bin
if ! type aws >/dev/null 2>&1; then
if ! type pip >/dev/null 2>&1; then
if type apt-get >/dev/null 2>&1; then
sudo apt-get -y update
sudo apt-get -y install python python-pip
elif type yum >/dev/null 2>&1; then
sudo yum -y install python python-pip
fi
fi
sudo pip install awscli
fi
if [ "$AWS_CLI_SETUP_DEFAULT_CONFIG" = 'true' ]; then
# Get instance region from metadata
if [ -z "$AWS_CLI_REGION" ]; then
availability_zone=`curl http://169.254.169.254/latest/meta-data/placement/availability-zone`
region=${availability_zone%?}
else
region="$AWS_CLI_REGION"
fi
# Create default config for aws cli
mkdir -p "$HOME/.aws"
# default configuration (profiles not yet supported)
cat <<EOF> "$HOME/.aws/config"
[default]
region = ${region}
output = json
EOF
# Populate credentials
if [ ! -z "$AWS_ACCESS_KEY_ID" ]; then
cat <<EOF> "$HOME/.aws/credentials"
[default]
aws_access_key_id = $AWS_ACCESS_KEY_ID
aws_secret_access_key = $AWS_SECRET_ACCESS_KEY
EOF
fi
if [ "$AWS_CLI_ROOT_USER_SETUP" = 'true' ]; then
echo 'copying confg to /root/.aws'
sudo mkdir -p /root/.aws
sudo chmod 700 /root/.aws
sudo cp -v "$HOME/.aws/config" /root/.aws/
sudo cp -v "$HOME/.aws/credentials" /root/.aws/
fi
fi
echo 'Done.'
| true
|
f861f0710229e8c9c20ce490e5b2ed44bd5679cb
|
Shell
|
Tubbz-alt/preston
|
/sync_licenses.sh
|
UTF-8
| 628
| 3.9375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
fail_exit(){
echo "$1"
exit 1
}
git clone https://github.com/spdx/license-list-data.git --depth=1 || fail_exit "Failed to clone"
if [[ -e "licenses.spdx" ]]; then
rm -v licenses.spdx
fi
pushd license-list-data/text
for i in *.txt ; do
# Strip all whitespace from it due to many licenses being reflowed
# Removes all newlines and whitespace
tr -d '\t\n\r\f= ' < $i > $i.tmp
mv $i.tmp $i
sum=`sha256sum "${i}"|cut -f 1 -d ' '`
nom=`echo "$i" | sed 's@\.txt$@@'`
echo -e "${sum}\t${nom}" >> ../../licenses.spdx
done
popd
rm -rf license-list-data
| true
|
c8c66f89cce97338e6b5d900c5e6659bcf957559
|
Shell
|
Alexander-Shukaev/MINGW-packages
|
/mingw-w64-libgit2/PKGBUILD
|
UTF-8
| 1,499
| 3
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Maintainer: Martell Malone <martellmalone@gmail.com>
_realname=libgit2
pkgname=("${MINGW_PACKAGE_PREFIX}-${_realname}")
pkgver=0.22.2
pkgrel=1
pkgdesc='A linkable library for Git (mingw-w64)'
arch=('any')
url='https://github.com/libgit2/libgit2'
license=('GPL2' 'custom')
options=('strip')
depends=("${MINGW_PACKAGE_PREFIX}-http-parser"
"${MINGW_PACKAGE_PREFIX}-libssh2"
"${MINGW_PACKAGE_PREFIX}-openssl"
"${MINGW_PACKAGE_PREFIX}-zlib")
makedepends=("${MINGW_PACKAGE_PREFIX}-cmake" "${MINGW_PACKAGE_PREFIX}-gcc")
_pkgfqn=v${pkgver}.tar.gz
source=("https://github.com/libgit2/libgit2/archive/${_pkgfqn}"
001-fix-pkg-config.patch
002-windows-headers.patch)
md5sums=('7b21448c471dc76a3ca4801b61ac856a'
'341f5b8f48d1a489b157dc5300628960'
'0ce56d74377548c04434ed50bf3c6aa7')
prepare() {
cd ${_realname}-${pkgver}
patch -p1 -i ${srcdir}/001-fix-pkg-config.patch
patch -p1 -i ${srcdir}/002-windows-headers.patch
}
build() {
mkdir -p ${srcdir}/build-${MINGW_CHOST}
cd ${srcdir}/build-${MINGW_CHOST}
${MINGW_PREFIX}/bin/cmake.exe \
-G"MSYS Makefiles" \
-DTHREADSAFE:BOOL=1 \
-DCMAKE_INSTALL_PREFIX:PATH=${pkgdir}${MINGW_PREFIX} \
${srcdir}/${_realname}-${pkgver}
make
}
package() {
cd ${srcdir}/build-${MINGW_CHOST}
make install
pushd ${pkgdir}${MINGW_PREFIX} > /dev/null
export PREFIX_WIN=`pwd -W`
popd > /dev/null
sed -s "s|${PREFIX_WIN}|${MINGW_PREFIX}|g" \
-i ${pkgdir}${MINGW_PREFIX}/lib/pkgconfig/libgit2.pc
}
| true
|
a8e1bad22bef1f4135bb9693a29a49557c659844
|
Shell
|
reviewboard/reviewboard
|
/contrib/internal/sandbox
|
UTF-8
| 631
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if test -d .svn; then
COMMAND="svn info"
elif test -d ../.git; then
COMMAND="git svn info"
else
echo "Unknown repository type. Make sure you're in the top-level of"
echo "the reviewboard directory."
exit 1
fi
REVISION=`$COMMAND | grep Revision: |cut -f2 -d" "`
./contrib/tools/post-review --output-diff $@ | buildbot try \
--diff=- \
-p1 \
--connect="ssh" \
--builder="reviewboard_django_trunk_sandbox" \
--builder="reviewboard_django_1.0_sandbox" \
--tryhost="reviewboard.org" \
--master="reviewboard.org:9989" \
--trydir="~buildbot/masters/reviewboard/jobdir" \
--username=$USER \
--baserev=$REVISION
| true
|
7ce024ea7b071276785872234c5634da4be61826
|
Shell
|
piojanu/btree
|
/gen_ninjafiles.sh
|
UTF-8
| 634
| 3.421875
| 3
|
[] |
no_license
|
tput setaf 6
echo
echo " Project files are generated in \"build\" directory."
echo " \"compile_commands.json\" is file used by eg. clang tools."
echo
echo " NOTE: Add \"use_clang\" arg if you want to use clang compiler."
echo
tput sgr0
if [ -d "build" ]
then
rm -R build/*
else
mkdir build
fi
CLANG="OFF"
if [ ! -z $1 ] && [ "$1" = "use_clang" ] || [ "$1" = "USE_CLANG" ]
then
CLANG="ON"
fi
cd build
cmake -GNinja -DUSE_CLANG=$CLANG -DCMAKE_EXPORT_COMPILE_COMMANDS=ON ../
cd ..
if [ -f "compile_commands.json" ]
then
rm compile_commands.json
fi
ln -s $PWD/build/compile_commands.json compile_commands.json
| true
|
173e3dd7a05b6c483c855c8c31f30612d7ea980a
|
Shell
|
dervynm/cgir
|
/Admin.Sys.Linux/revisionControleTP/scripts/test/argumentFichier.sh
|
UTF-8
| 467
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
# argumentFichier.sh : script utilisé par un autre script pour savoir
# s'il a bien été appelé en lui passant un unique fichier lisible
OK=0
ERR_ARGUMENT_NON_FICHIER=4
ERR_FICHIER_NON_LISIBLE=5
if source ./argumentUnique.sh $*
then
return $?
fi
if [[ ! -f "$1" ]]
then
echo "$1 n'est pas un fichier"
return $ERR_ARGUMENT_NON_FICHIER
fi
if ! cat "$1" &>/dev/null
then
echo "$1 n'est pas lisible"
return $ERR_FICHIER_NON_LISIBLE
fi
return $OK
| true
|
2c932a54120958248a7a23200e8e664971fa48f6
|
Shell
|
orenc1/hyperconverged-cluster-operator
|
/hack/hpp/deploy_hpp.sh
|
UTF-8
| 732
| 3.671875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Configures HPP on an OCP cluster:
# - on regular clusters, HPP is deployed the legacy way
# - on SNO clusters, HPP is deployed using the StoragePool feature
#
set -ex
readonly SCRIPT_DIR=$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")
CLUSTER_TOPOLOGY=$(
oc get infrastructure cluster \
--output=jsonpath='{$.status.controlPlaneTopology}'
)
CLUSTER_VERSION=$(
oc get clusterversion version \
--output=jsonpath='{.status.desired.version}')
if [[ "$CLUSTER_VERSION" != *"okd"* ]]; then
# skipping configuring HPP in case of an OKD cluster
if [[ "${CLUSTER_TOPOLOGY}" != 'SingleReplica' ]]; then
"${SCRIPT_DIR}"/configure_hpp_legacy.sh
else
"${SCRIPT_DIR}"/configure_hpp_pool.sh
fi
fi
| true
|
2fa29ca1ef91f0a538563a48d77c948c1a2d2a57
|
Shell
|
cogito-cea/mylittlepwny
|
/docker/entrypoint.sh
|
UTF-8
| 420
| 3.265625
| 3
|
[
"CECILL-B",
"LicenseRef-scancode-cecill-b-en"
] |
permissive
|
#!/bin/bash
# Add local user
# Either use the LOCAL_USER_ID if passed in at runtime or fallback
#
# source: https://denibertovic.com/posts/handling-permissions-with-docker-volumes
USER_ID=${LOCAL_USER_ID:-9001}
USER_NAME=dockeruser
echo "Starting with UID : $USER_ID"
useradd --shell /bin/bash --uid $USER_ID --non-unique --comment "" --create-home $USER_NAME
export HOME=/home/$USER_NAME
exec gosu $USER_NAME "$@"
| true
|
da77532035aab6da1e67c6254572aea51fa21232
|
Shell
|
lenik/uni
|
/base/bash-shlib/shlib.d/get_perm
|
UTF-8
| 1,467
| 3.703125
| 4
|
[] |
no_license
|
# -*- mode: sh -*-
# vim: set filetype=sh :
# get_perm [-u] NAME
function get_perm() {
local scope regpath
local arg
local dest src value
# set -- `getopt -aluser,system,help,version -- $*`
scope=machine
for arg; do
case $arg in
-u|--user)
shift; scope=user;;
-s|--system)
shift; scope=system;;
-h|--help)
echo $0 [OPTION] DEST[=SRC] [[DEST[=SRC]...]
echo OPTION:
echo -u, --user affect current-user only
echo -s, --system affect all users
echo -h, --help show this help page
echo -v, --version show version info
exit 0;;
-v|--version)
echo get_perm version 0.0.1
echo written by Lenik, distributed under GPL license.
exit 0;;
--) shift; break;;
-*) echo invalid option: $arg; exit 2;;
*) break;
esac
done
case $scope in
user) regpath="/user/Environment";;
machine) regpath="/machine/SYSTEM/CurrentControlSet/Control/Session Manager/Environment";;
esac
for arg; do
dest=${arg%%=*}
if [ ${#dest} -eq ${#arg} ]; then
src=$dest
else
src=${arg#*=}
fi
value=`regtool get "$regpath/$src" 2>/dev/null` ||
(echo can\'t get $src.; exit 10)
eval "$dest=\"$value\""
done
}
get_perm $*
| true
|
66c3936e2626ccca3b42c54fac8849d2711a34ba
|
Shell
|
okigan/awscurl
|
/scripts/ci.sh
|
UTF-8
| 1,629
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
DETOX_ROOT_DIR=./build/detox
grep -v '^ *#' < .python-version | while IFS= read -r PYENV_VERSION
do
# echo PYENV_VERSION="$PYENV_VERSION"
# echo SHELL="$SHELL"
# echo $PATH
# pyenv install -sv "${PYENV_VERSION}"
# https://github.com/pyenv/pyenv/issues/1819#issuecomment-780803524
# /root/.pyenv/bin/pyenv shell "${PYENV_VERSION}"
export PYENV_VERSION="$PYENV_VERSION"
eval "$(pyenv init -)"
PER_VER_DIR=${DETOX_ROOT_DIR}/v${PYENV_VERSION}
VENV_DIR=${PER_VER_DIR}/venv${PYENV_VERSION}
(
echo "##### NEW DETOX ENV: " "$(uname) " "${PER_VER_DIR}" " #####"
python3 -m venv "${VENV_DIR}"
source "${VENV_DIR}"/bin/activate
echo which python="$(which python)"
echo python --version="$(python --version)"
echo pip --version="$(pip --version)"
PS4='[$(date "+%Y-%m-%d %H:%M:%S")] '
set -o errexit -o pipefail -o nounset -o xtrace
pip -q -q install --upgrade pip
# python -m ensurepip --upgrade
# pip install -r requirements.txt
pip -q -q install -r requirements-test.txt
pycodestyle .
# python -m build .
pip -q install .
export AWS_ACCESS_KEY_ID=MOCK_AWS_ACCESS_KEY_ID
export AWS_SECRET_ACCESS_KEY=MOCK_AWS_SECRET_ACCESS_KEY
export AWS_SESSION_TOKEN=MOCK_AWS_SESSION_TOKEN
pytest \
--cov=awscurl \
--cov-fail-under=77 \
--cov-report html \
--cov-report=html:"${PER_VER_DIR}"/htmlcov \
--durations=2 \
--strict-config
)
done
| true
|
d7a84bbb023f518a885e2726cbb7ca9493b0f6d8
|
Shell
|
nnev/servicefiles
|
/matrix-irc/start.sh
|
UTF-8
| 339
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/sh
case $1 in
generate)
exec node app.js -r -f /config/appservice-registration-irc.yaml -c /config/config.yaml -u "http://localhost:9995"
;;
start)
exec node app.js -c /config/config.yaml -p 9995 -f /config/appservice-registration-irc.yaml -u http://localhost:9995
;;
*)
echo "Usage:\n\t $0 (generate | start)"
;;
esac
| true
|
eb2c295467447960cebf2d40ea46227d9493c9a0
|
Shell
|
wytfy/Fluent
|
/PDC/fluent_run.sh
|
UTF-8
| 907
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
# The name of the script is myjob
#SBATCH -J congwang
# Only 1 hour wall-clock time will be given to this job
#SBATCH -t 1:00:00
# set the project to be charged for this
# should normally be of the format 2016-1 or 2016-16-1 or similar
#SBATCH -A 2017-103
# Number of nodes
#SBATCH -N 1
# Number of MPI processes per node
#SBATCH --ntasks-per-node=16
#SBATCH -e error_file.e
#SBATCH -o output_file.o
#SBATCH --mail-type=ALL
# load module fluent v18.2
module add fluent/18.2
# The Journal file
JOURNALFILE=mycase.jou
# Total number of Processors
#NPROCS=16
NTASKS=`echo $SLURM_TASKS_PER_NODE | cut -c1-2`
NPROCS=`expr $SLURM_NNODES \* $NTASKS`
if [ $SLURM_NNODES -eq 1 ]; then
# Single node with shared memory
fluent 3ddp -g -t $NPROCS -i $JOURNALFILE > fluent.log
else
# Multi-node
fluent 3ddp -g -slurm -t $NPROCS -mpi=pcmpi -pib -i $JOURNALFILE > fluent.log
fi
| true
|
8f0879201c70722264a3a06fabdca52d855703b7
|
Shell
|
kat-co/elnode-charm
|
/src/reactive/elnode.sh
|
UTF-8
| 433
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
# This will be sourced in the hook-context.
source $CHARM_DIR/bin/charms.reactive.sh
PATH=$CHARM_DIR/app:$PATH
@hook '{install,upgrade-charm}'
function install() {
installEmacs
install-elnode.el
status-set active "Ready."
set_state "elnode.available"
}
function installEmacs() {
status-set maintenance "Ensuring emacs is installed."
sudo apt-get -y upgrade emacs24-nox
}
reactive_handler_main
| true
|
f62ca2d40d39a83fa7469c7bf35f7d1d121ca5d7
|
Shell
|
alphashooter/caketest
|
/og-test
|
UTF-8
| 444
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/sh
SOURCE_DIR=$(dirname ${BASH_SOURCE[0]})
WORKING_DIR=`pwd`
CONFIG_FILE=$SOURCE_DIR/og-test.conf.tmp
LOG_FILE=$WORKING_DIR/og_test_`date +%Y%m%d%H%M`.log
> $CONFIG_FILE
echo "group opengraph:" >> $CONFIG_FILE
echo " module tiers" >> $CONFIG_FILE
$SOURCE_DIR/m3highload-test --config $CONFIG_FILE --host cakestory-staging.plamee.com --verbose --log $LOG_FILE
rm -f $CONFIG_FILE
echo
echo "Log saved into $LOG_FILE"
echo
exit 0
| true
|
238c884286d9de996aabcad31485890ff7a57cb1
|
Shell
|
pahapoika/NginxBuild
|
/build.sh
|
UTF-8
| 3,857
| 3.0625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Names of latest versions of each package
export VERSION_PCRE=pcre-8.41
export VERSION_ZLIB=zlib-1.2.11
export VERSION_LIBRESSL=libressl-2.6.4
export VERSION_NGINX=nginx-1.13.9
# URLs to the source directories
export SOURCE_LIBRESSL=http://ftp.openbsd.org/pub/OpenBSD/LibreSSL/
export SOURCE_PCRE=http://ftp.csx.cam.ac.uk/pub/software/programming/pcre/
export SOURCE_NGINX=http://nginx.org/download/
export SOURCE_ZLIB=http://zlib.net/
# Path to local build
export BUILD_DIR=/tmp/nginx-static-libressl/build
# Path for libressl
export STATICLIBSSL="${BUILD_DIR}/${VERSION_LIBRESSL}"
function setup() {
# create and clean build directory
mkdir -p ${BUILD_DIR}
rm -Rf ${BUILD_DIR}/*
# install build environment tools
yum -y groupinstall "Development Tools"
#For Debian based systems
#sudo apt-get -y install curl wget build-essential libgd-dev libgeoip-dev checkinstall git
}
function download_sources() {
# todo: verify checksum / integrity of downloads!
echo "Download sources"
pushd ${BUILD_DIR}
curl -sSLO "${SOURCE_ZLIB}${VERSION_ZLIB}.tar.gz"
curl -sSLO "${SOURCE_PCRE}${VERSION_PCRE}.tar.gz"
curl -sSLO "${SOURCE_LIBRESSL}${VERSION_LIBRESSL}.tar.gz"
curl -sSLO "${SOURCE_NGINX}${VERSION_NGINX}.tar.gz"
popd
}
function extract_sources() {
echo "Extracting sources"
pushd ${BUILD_DIR}
tar -xf "${VERSION_PCRE}.tar.gz"
tar -xf "${VERSION_LIBRESSL}.tar.gz"
tar -xf "${VERSION_NGINX}.tar.gz"
tar -xf "${VERSION_ZLIB}.tar.gz"
popd
}
function compile_nginx() {
echo "Configure & Build nginx"
pushd "${BUILD_DIR}/${VERSION_NGINX}"
make clean
./configure \
--prefix=/usr/share/nginx \
--sbin-path=/usr/sbin/nginx \
--conf-path=/etc/nginx/nginx.conf \
--error-log-path=/var/log/nginx/error.log \
--http-log-path=/var/log/nginx/access.log \
--http-client-body-temp-path=/var/lib/nginx/tmp/client_body \
--http-proxy-temp-path=/var/lib/nginx/tmp/proxy \
--http-fastcgi-temp-path=/var/lib/nginx/tmp/fastcgi \
--http-uwsgi-temp-path=/var/lib/nginx/tmp/uwsgi \
--http-scgi-temp-path=/var/lib/nginx/tmp/scgi \
--pid-path=/run/nginx.pid \
--lock-path=/run/lock/subsys/nginx \
--user=nginx \
--group=nginx \
--with-threads \
--with-file-aio \
--with-http_ssl_module \
--with-http_v2_module \
--with-http_realip_module \
--with-http_gunzip_module \
--with-http_gzip_static_module \
--with-http_slice_module \
--with-http_stub_status_module \
--without-select_module \
--without-poll_module \
--without-mail_pop3_module \
--without-mail_imap_module \
--without-mail_smtp_module \
--with-stream \
--with-stream_ssl_module \
--with-pcre="${BUILD_DIR}/${VERSION_PCRE}" \
--with-pcre-jit \
--with-openssl="${STATICLIBSSL}" \
--with-zlib="${BUILD_DIR}/${VERSION_ZLIB}" \
--with-cc-opt="-fPIC -pie -O2 -g -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -m64 -mtune=generic" \
--with-ld-opt="-Wl,-z,now -lrt"
make -j4
popd
}
echo "Building ${VERSION_NGINX} with static ${VERSION_LIBRESSL}, ${VERSION_PCRE}, and ${VERSION_ZLIB} ..."
setup && download_sources && extract_sources && compile_nginx
retval=$?
echo ""
if [ $retval -eq 0 ]; then
echo "Your nginx binary is located at ${BUILD_DIR}/${VERSION_NGINX}/objs/nginx."
echo "Listing dynamically linked libraries ..."
ldd ${BUILD_DIR}/${VERSION_NGINX}/objs/nginx
echo ""
${BUILD_DIR}/${VERSION_NGINX}/objs/nginx -V
else
echo "Ooops, build failed. Check output!"
fi
| true
|
bc4b4ec607b5ae428d08e596562e22f0ac226075
|
Shell
|
DanssBot/VPSMX8.4x
|
/NEW-ULTIMATE-VPS-MX-8.0/_$_Versiones/Bot_Telegram/TeleBotGen/TeleBotGen-master/sources/donar
|
UTF-8
| 798
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
donar () {
bot_retorno="$LINE\n"
bot_retorno+="El proyecto BotGen requiere\nde fondos para poder continuar\n"
bot_retorno+="Usted puede realizar una donacion\n"
bot_retorno+="Paypal\n"
bot_retorno+="MercadoPago Arg\n"
bot_retorno+="o bien hacer uso del acortador\nreiteradas veces\n"
bot_retorno+="$LINE\n"
bot_retorno+="desde ya muchas gracias\n"
bot_retorno+="$LINE\n"
msj_donar
}
donar_OnOff () {
if [[ ! -e "/etc/donar_active.txt" ]]; then
echo "on" > /etc/donar_active.txt
local bot_retorno="$LINE\n"
bot_retorno+="Donar: <u>ON</u> ✅\n"
bot_retorno+="$LINE\n"
msj_fun
else
rm -rf /etc/donar_active.txt
local bot_retorno="$LINE\n"
bot_retorno+="Donar: <u>OFF</u> ❌\n"
bot_retorno+="$LINE\n"
msj_fun
fi
}
| true
|
f8023e7b26f15aee46bddf4d8e6d184d59de01ad
|
Shell
|
rusenask/vol-test
|
/terraform/files/node-scripts/src/replication/replication-sync-during-writes.bats
|
UTF-8
| 2,243
| 3.34375
| 3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
#!/usr/bin/env bats
load ../test_helper
export NAMESPACE=test
@test "Create non-replicated volume using driver ($driver)" {
run $prefix2 docker volume create --driver $driver --opt size=10 stress-sync
assert_success
}
@test "Confirm volume is created (volume ls) using driver ($driver)" {
run $prefix2 docker volume ls
assert_line --partial "stress-sync"
}
@test "Start a container and mount the volume on node 2" {
run $prefix2 docker run -it -d --name mounter -v stress-sync:/data ubuntu /bin/bash
assert_success
}
@test "initiate big write in background then trigger replication" {
$prefix2 docker exec 'mounter dd if=/dev/urandom of=/data/random bs=10M count=100' &
# wait a little just to ensure operation is in progress..
sleep 2
# Add replication
run $prefix2 storageos $cliopts volume update --label-add 'storageos.feature.replicas=2' default/stress-sync
assert_success
}
@test "Wait for replication, Get a checksum for that binary file" {
ACTIVE=$(eval "$prefix2" storageos "$cliopts" volume inspect default/stress-sync | jq 'first.replicas != [] and ( first.replicas + [first.master] | map(.health == "healthy") | all)')
while ! [[ "$ACTIVE" == "true" ]]; do
ACTIVE=$(eval "$prefix2" storageos "$cliopts" volume inspect default/stress-sync | jq 'first.replicas != [] and (first.replicas + [first.master] | map(.health == "healthy") | all)')
sleep 2
done
run $prefix2 'docker exec -i mounter /bin/bash -c "md5sum /data/random > /data/checksum"'
assert_success
}
@test "Stop container on node 2" {
run $prefix2 docker stop mounter
assert_success
}
@test "Destroy container on node 2" {
run $prefix2 docker rm mounter
assert_success
}
@test "Stop storageos on node 2" {
run $prefix2 docker plugin disable -f $driver
assert_success
}
@test "Confirm checksum on node 1" {
run $prefix docker run -i --rm -v stess-sync:/data ubuntu md5sum --check /data/checksum
assert_success
}
@test "Re-start storageos on node 2" {
run $prefix2 docker plugin enable $driver
assert_success
}
@test "wait for 60 seconds" {
sleep 60
assert_success
}
@test "Delete volume using storageos cli" {
run $prefix storageos $cliopts volume rm default/stress-sync
assert_success
}
| true
|
477a03e4731d39ad9213382dce5fbb6e1d53306a
|
Shell
|
Hadryan/stock-analyse
|
/script/strategies.bats
|
UTF-8
| 39,203
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env bats
# https://github.com/bats-core/bats-core
# load '/d/code/bats-assert/load.bash'
# Import
. ./script/constants.sh
. ./script/strategies.sh
# Constants
OUT_RESULT_FILE=temp/_result.html
SYMBOL=BEI
SYMBOL_NAME="BEI BEIERSDORF AG"
@test "StrategieOverratedStochasticWhenOwn" {
function WriteComdirectUrlAndStoreFileList() {
echo ""
}
export -f WriteComdirectUrlAndStoreFileList
StrategieOverratedStochasticWhenOwn
[ "$resultStrategieOverratedStochasticWhenOwn" == '' ]
StrategieOverratedStochasticWhenOwn 91 90 89 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" "*"
[ "$resultStrategieOverratedStochasticWhenOwn" == '' ]
StrategieOverratedStochasticWhenOwn 91 92 89 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" ""
[ "$resultStrategieOverratedStochasticWhenOwn" == '' ]
StrategieOverratedStochasticWhenOwn 91 90 92 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" ""
[ "$resultStrategieOverratedStochasticWhenOwn" == '' ]
StrategieOverratedStochasticWhenOwn 91 91 89 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" "*"
[ "$resultStrategieOverratedStochasticWhenOwn" == '' ]
StrategieOverratedStochasticWhenOwn 91 93 92 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" "*"
[ "$resultStrategieOverratedStochasticWhenOwn" == '' ]
StrategieOverratedStochasticWhenOwn 91 92 89 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" "*"
[ "$resultStrategieOverratedStochasticWhenOwn" == 'Sell: Stochastic Own (SO)' ]
}
@test "StrategieOverratedDivergenceRSI" {
function WriteComdirectUrlAndStoreFileList() {
echo ""
}
export -f WriteComdirectUrlAndStoreFileList
StrategieOverratedDivergenceRSI
[ "$resultStrategieOverratedDivergenceRSI" == '' ]
StrategieOverratedDivergenceRSI 75 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" "*" "0" "101" "100" "80" "79"
[ "$resultStrategieOverratedDivergenceRSI" == '' ]
StrategieOverratedDivergenceRSI 75 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" "*" "1" "100" "100" "80" "81"
[ "$resultStrategieOverratedDivergenceRSI" == '' ]
StrategieOverratedDivergenceRSI 75 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" "*" "-1" "101" "100" "80" "81"
[ "$resultStrategieOverratedDivergenceRSI" == '' ]
StrategieOverratedDivergenceRSI 75 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" "*" "1" "101" "100" "70" "71"
[ "$resultStrategieOverratedDivergenceRSI" == '' ]
StrategieOverratedDivergenceRSI 75 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" "*" "1" "101" "100" "80" "81"
[ "$resultStrategieOverratedDivergenceRSI" == 'Sell: RSI Divergence (D)' ]
StrategieOverratedDivergenceRSI 75 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" "*" "1" "101" "100" "81" "81"
[ "$resultStrategieOverratedDivergenceRSI" == 'Sell: RSI Divergence (D)' ]
}
@test "StrategieUnderratedDivergenceRSI" {
function WriteComdirectUrlAndStoreFileList() {
echo ""
}
export -f WriteComdirectUrlAndStoreFileList
StrategieUnderratedDivergenceRSI
[ "$resultStrategieUnderratedDivergenceRSI" == '' ]
StrategieUnderratedDivergenceRSI 25 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" "*" "0" "99" "100" "20" "19"
[ "$resultStrategieUnderratedDivergenceRSI" == '' ]
StrategieUnderratedDivergenceRSI 25 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" "*" "-1" "100" "100" "20" "19"
[ "$resultStrategieUnderratedDivergenceRSI" == '' ]
StrategieUnderratedDivergenceRSI 25 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" "*" "-1" "99" "100" "20" "21"
[ "$resultStrategieUnderratedDivergenceRSI" == '' ]
StrategieUnderratedDivergenceRSI 25 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" "*" "-1" "99" "100" "30" "29"
[ "$resultStrategieUnderratedDivergenceRSI" == '' ]
StrategieUnderratedDivergenceRSI 25 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" "*" "-1" "99" "100" "20" "20"
[ "$resultStrategieUnderratedDivergenceRSI" == 'Buy: RSI Divergence (D)' ]
StrategieUnderratedDivergenceRSI 25 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" "*" "-1" "99" "100" "20" "19"
[ "$resultStrategieUnderratedDivergenceRSI" == 'Buy: RSI Divergence (D)' ]
}
@test "StrategieOverratedXHighRSI" {
function WriteComdirectUrlAndStoreFileList() {
echo ""
}
export -f WriteComdirectUrlAndStoreFileList
StrategieOverratedXHighRSI
[ "$resultStrategieOverratedXHighRSI" == '' ]
StrategieOverratedXHighRSI 75 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 72, 79, 80, 74, 71, 62, 62," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedXHighRSI" == '' ]
StrategieOverratedXHighRSI 75 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 72, 79, 80, 74, 71, 22, 62," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedXHighRSI" == '' ]
StrategieOverratedXHighRSI 75 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 72, 72, 72, 73, 74, 76, 77, 76," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedXHighRSI" == 'Sell: High 3 last RSI (R): 3 last quotes are over 75' ]
resultStrategieOverratedXHighRSI=""
StrategieOverratedXHighRSI 98 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 72, 79, 80, 74, 99, 100, 99," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedXHighRSI" == 'Sell: High 3 last RSI (R): 3 last quotes are over 98' ]
resultStrategieOverratedXHighRSI=""
StrategieOverratedXHighRSI 75 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 72, 75, 75, 76, 99, 100, 99," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedXHighRSI" == 'Sell: High 4 last RSI (R): 4 last quotes are over 75' ]
resultStrategieOverratedXHighRSI=""
StrategieOverratedXHighRSI 75 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 72, 75, 76, 77, 99, 100, 99," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedXHighRSI" == 'Sell: High 5 last RSI (R): 5 last quotes are over 75' ]
resultStrategieOverratedXHighRSI=""
StrategieOverratedXHighRSI 75 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 75, 71, 76, 77, 99, 70, 99," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedXHighRSI" == 'Sell: High 4 last RSI (R): 4 last quotes are over 75' ]
resultStrategieOverratedXHighRSI=""
StrategieOverratedXHighRSI 75 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 75, 78, 76, 77, 70, 70, 76," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedXHighRSI" == 'Sell: High 4 last RSI (R): 4 last quotes are over 75' ]
resultStrategieOverratedXHighRSI=""
StrategieOverratedXHighRSI 75 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 75, 78, 76, 77, 70, 70, 70," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedXHighRSI" == '' ]
}
@test "StrategieUnderratedXLowRSI" {
function WriteComdirectUrlAndStoreFileList() {
echo ""
}
export -f WriteComdirectUrlAndStoreFileList
StrategieUnderratedXLowRSI
[ "$resultStrategieUnderratedXLowRSI" == '' ]
StrategieUnderratedXLowRSI 25 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 72, 79, 80, 74, 71, 62, 62," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowRSI" == '' ]
StrategieUnderratedXLowRSI 25 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 72, 79, 80, 74, 71, 22, 62," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowRSI" == '' ]
StrategieUnderratedXLowRSI 25 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 72, 79, 80, 74, 23, 22, 22," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowRSI" == 'Buy: Low 3 last RSI (R): 3 last quotes are under 25' ]
resultStrategieUnderratedXLowRSI=""
StrategieUnderratedXLowRSI 2 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 72, 79, 80, 74, 0, 0, 0," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowRSI" == 'Buy: Low 3 last RSI (R): 3 last quotes are under 2' ]
resultStrategieUnderratedXLowRSI=""
StrategieUnderratedXLowRSI 9 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 72, 79, 80, 8, 0, 0, 0," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowRSI" == 'Buy: Low 4 last RSI (R): 4 last quotes are under 9' ]
resultStrategieUnderratedXLowRSI=""
StrategieUnderratedXLowRSI 9 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 72, 79, 8, 8, 0, 0, 0," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowRSI" == 'Buy: Low 5 last RSI (R): 5 last quotes are under 9' ]
resultStrategieUnderratedXLowRSI=""
StrategieUnderratedXLowRSI 9 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 72, 5, 8, 8, 0, 0, 0," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowRSI" == 'Buy: Low 6 last RSI (R): 6 last quotes are under 9' ]
resultStrategieUnderratedXLowRSI=""
StrategieUnderratedXLowRSI 9 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 2, 5, 8, 8, 0, 0, 0," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowRSI" == 'Buy: Low 7 last RSI (R): 7 last quotes are under 9' ]
resultStrategieUnderratedXLowRSI=""
StrategieUnderratedXLowRSI 9 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 2, 9, 9, 8, 0, 0, 8," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowRSI" == 'Buy: Low 5 last RSI (R): 5 last quotes are under 9' ]
resultStrategieUnderratedXLowRSI=""
StrategieUnderratedXLowRSI 9 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 2, 0, 0, 8, 9, 9, 0," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowRSI" == 'Buy: Low 5 last RSI (R): 5 last quotes are under 9' ]
resultStrategieUnderratedXLowRSI=""
StrategieUnderratedXLowRSI 9 " 29, 25, 23, 27, 33, 29, 33, 31, 31, 33, 40, 30, 40, 44, 40, 44, 52, 44, 41, 48, 43, 47, 54, 56, 41, 45, 28, 34, 35, 49, 45, 49, 44, 42, 63, 66, 65, 58, 65, 72, 76, 75, 68, 63, 68, 74, 75, 76, 64, 56, 56, 63, 63, 61, 58, 61, 70, 74, 66, 51, 52, 59, 62, 67, 58, 56, 58, 48, 49, 42, 39, 43, 53, 60, 64, 62, 69, 74, 88, 77, 2, 0, 0, 8, 9, 9, 9," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowRSI" == '' ]
}
@test "StrategieOverratedXHighStochastic" {
function WriteComdirectUrlAndStoreFileList() {
echo ""
}
export -f WriteComdirectUrlAndStoreFileList
StrategieOverratedXHighStochastic
[ "$resultStrategieOverratedXHighStochastic" == '' ]
StrategieOverratedXHighStochastic 81 " , , 11, 11, 11, 11, 80, 81, 70," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedXHighStochastic" == '' ]
StrategieOverratedXHighStochastic 81 " , , 11, 11, 11, 83, 100, 82, 100," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedXHighStochastic" == 'Sell: High 4 last Stochastic (S): 4 last quotes are over 81' ]
resultStrategieOverratedXHighStochastic=""
StrategieOverratedXHighStochastic 81 " , , 84, 11, 11, 11, 100, 82, 100," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedXHighStochastic" == 'Sell: High 4 last Stochastic (S): 4 last quotes are over 81' ]
resultStrategieOverratedXHighStochastic=""
StrategieOverratedXHighStochastic 81 " , , 11, 11, 99, 11, 82, 82, 88," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedXHighStochastic" == 'Sell: High 4 last Stochastic (S): 4 last quotes are over 81' ]
resultStrategieOverratedXHighStochastic=""
StrategieOverratedXHighStochastic 81 " , , 88, 11, 11, 85, 84, 77, 82," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedXHighStochastic" == 'Sell: High 4 last Stochastic (S): 4 last quotes are over 81' ]
resultStrategieOverratedXHighStochastic=""
StrategieOverratedXHighStochastic 81 " , , 11, 11, 86, 85, 84, 83, 82," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedXHighStochastic" == 'Sell: High 5 last Stochastic (S): 5 last quotes are over 81' ]
resultStrategieOverratedXHighStochastic=""
StrategieOverratedXHighStochastic 81 " , , 11, 11, 87, 80, 86, 85, 84, 80, 82," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedXHighStochastic" == 'Sell: High 5 last Stochastic (S): 5 last quotes are over 81' ]
resultStrategieOverratedXHighStochastic=""
StrategieOverratedXHighStochastic 81 " , , 11, 11, 87, 80, 86, 85, 82, 83, 80," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedXHighStochastic" == 'Sell: High 5 last Stochastic (S): 5 last quotes are over 81' ]
resultStrategieOverratedXHighStochastic=""
StrategieOverratedXHighStochastic 81 " , , 11, 11, 80, 87, 86, 85, 80, 80, 80," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedXHighStochastic" == '' ]
}
@test "StrategieUnderratedXLowStochastic" {
function WriteComdirectUrlAndStoreFileList() {
echo ""
}
export -f WriteComdirectUrlAndStoreFileList
StrategieUnderratedXLowStochastic
[ "$resultStrategieUnderratedXLowStochastic" == '' ]
StrategieUnderratedXLowStochastic 9 " , , 11, 11, 11, 11, 8, 17, 8," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowStochastic" == '' ]
StrategieUnderratedXLowStochastic 9 " , , 11, 11, 11, 11, 8, 17, 8," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowStochastic" == '' ]
StrategieUnderratedXLowStochastic 9 " , , 11, 11, 11, 0, 0, 0, 0," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowStochastic" == 'Buy: Low 4 last Stochastic (S): 4 last quotes are under 9' ]
resultStrategieUnderratedXLowStochastic=""
StrategieUnderratedXLowStochastic 9 " , , 0, 11, 0, 11, 0, 11, 0," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowStochastic" == 'Buy: Low 4 last Stochastic (S): 4 last quotes are under 9' ]
resultStrategieUnderratedXLowStochastic=""
StrategieUnderratedXLowStochastic 9 " , , 11, 11, 11, 1, 0, 0, 0," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowStochastic" == 'Buy: Low 4 last Stochastic (S): 4 last quotes are under 9' ]
resultStrategieUnderratedXLowStochastic=""
StrategieUnderratedXLowStochastic 9 " , , 1, 1, 1, 1, 0, 0, 0," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowStochastic" == 'Buy: Low 7 last Stochastic (S): 7 last quotes are under 9' ]
resultStrategieUnderratedXLowStochastic=""
StrategieUnderratedXLowStochastic 9 " , , 1, 1, 1, 1, 0, 9, 0," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowStochastic" == 'Buy: Low 6 last Stochastic (S): 6 last quotes are under 9' ]
resultStrategieUnderratedXLowStochastic=""
StrategieUnderratedXLowStochastic 9 " , , 1, 9, 1, 1, 0, 9, 0," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowStochastic" == 'Buy: Low 5 last Stochastic (S): 5 last quotes are under 9' ]
resultStrategieUnderratedXLowStochastic=""
StrategieUnderratedXLowStochastic 9 " , , 1, 9, 1, 9, 0, 0, 9," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedXLowStochastic" == 'Buy: Low 4 last Stochastic (S): 4 last quotes are under 9' ]
}
@test "StrategieByTendency" {
function WriteComdirectUrlAndStoreFileList() {
echo ""
}
export -f WriteComdirectUrlAndStoreFileList
StrategieByTendency
[ "$resultStrategieByTendency" == '' ]
StrategieByTendency 100 "$RISING" "1.01" 100 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieByTendency" == '' ]
resultStrategieByTendency=""
StrategieByTendency 99 "$RISING" "1.01" 100 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieByTendency" == 'Buy: Low Quote by Tendency (T): 99€ is under Avg95 100€ with Tendency RISING' ]
resultStrategieByTendency=""
StrategieByTendency 110 "$RISING" "1.01" 100 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieByTendency" == '' ]
resultStrategieByTendency=""
StrategieByTendency 111 "$RISING" "1.01" 100 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieByTendency" == 'Sell: High Quote by Tendency (T): 111€ is over Avg95 100€ with Tendency RISING' ]
resultStrategieByTendency=""
StrategieByTendency 96 "$LEVEL" "1.01" 100 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieByTendency" == 'Buy: Low Quote by Tendency (T): 96€ is under Avg95 100€ with Tendency LEVEL' ]
resultStrategieByTendency=""
StrategieByTendency 98 "$LEVEL" "1.01" 100 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieByTendency" == '' ]
resultStrategieByTendency=""
StrategieByTendency 100 "$LEVEL" "1.01" 100 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieByTendency" == '' ]
resultStrategieByTendency=""
StrategieByTendency 103 "$LEVEL" "1.01" 100 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieByTendency" == '' ]
resultStrategieByTendency=""
StrategieByTendency 104 "$LEVEL" "1.01" 100 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieByTendency" == 'Sell: High Quote by Tendency (T): 104€ is over Avg95 100€ with Tendency LEVEL' ]
resultStrategieByTendency=""
StrategieByTendency 90 "$FALLING" "1.01" 100 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieByTendency" == 'Buy: Low Quote by Tendency (T): 90€ is under Avg95 100€ with Tendency FALLING' ]
resultStrategieByTendency=""
StrategieByTendency 91 "$FALLING" "1.01" 100 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieByTendency" == '' ]
resultStrategieByTendency=""
StrategieByTendency 100 "$FALLING" "1.01" 100 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieByTendency" == '' ]
resultStrategieByTendency=""
StrategieByTendency 101 "$FALLING" "1.01" 100 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieByTendency" == 'Sell: High Quote by Tendency (T): 101€ is over Avg95 100€ with Tendency FALLING' ]
}
@test "StrategieOverratedHighHorizontalMACD" {
function WriteComdirectUrlAndStoreFileList() {
echo ""
}
export -f WriteComdirectUrlAndStoreFileList
StrategieOverratedHighHorizontalMACD
[ "$resultStrategieOverratedHighHorizontalMACD" == '' ]
# , -0.1, 0.2, 0.3, 0.4,
StrategieOverratedHighHorizontalMACD " , , , , , , , , , , , , -1.189, -0.879, -0.825, -0.933, -0.488, -0.174, 1.031, 1.964, 2.592, 2.815, 3.437, 4.084, 4.744, 5.167, 5.838, 6.537, 7.01, 6.663, 6.834, 6.969, 6.916, 6.756, 6.434, 6.028, 5.762, 6.085, 5.87, 5.874, 5.491, 5.528, 4.138, 3.32, 1.724, 0.892, 0.589, -1.609, -3.193, -4.597, -6.243, -7.342, -8.398, -9.29, -9.643, -9.331, -8.502, -7.556, -6.689, -5.734, -5.239, -5.51, -5.899, -6.256, -6.305, -6.232, -6.263, -6.056, -5.825, -5.618, -5.551, -5.874, -6.379, -6.183, -6.079, -5.888, -5.792, -5.81, -5.877, -7.865, -8.615, -8.658, -9.662, -0.1, 0.2, 0.3, 0.4," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedHighHorizontalMACD" == '' ]
# , 0.2, 0.1, 0.2,
StrategieOverratedHighHorizontalMACD " , , , , , , , , , , , , -1.189, -0.879, -0.825, -0.933, -0.488, -0.174, 1.031, 1.964, 2.592, 2.815, 3.437, 4.084, 4.744, 5.167, 5.838, 6.537, 7.01, 6.663, 6.834, 6.969, 6.916, 6.756, 6.434, 6.028, 5.762, 6.085, 5.87, 5.874, 5.491, 5.528, 4.138, 3.32, 1.724, 0.892, 0.589, -1.609, -3.193, -4.597, -6.243, -7.342, -8.398, -9.29, -9.643, -9.331, -8.502, -7.556, -6.689, -5.734, -5.239, -5.51, -5.899, -6.256, -6.305, -6.232, -6.263, -6.056, -5.825, -5.618, -5.551, -5.874, -6.379, -6.183, -6.079, -5.888, -5.792, -5.81, -5.877, -7.865, -8.615, -8.658, -9.662, -9.62, 0.2, 0.1, 0.2," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedHighHorizontalMACD" == '' ]
# , 0.2, 0.2, 0.1,
StrategieOverratedHighHorizontalMACD " , , , , , , , , , , , , -1.189, -0.879, -0.825, -0.933, -0.488, -0.174, 1.031, 1.964, 2.592, 2.815, 3.437, 4.084, 4.744, 5.167, 5.838, 6.537, 7.01, 6.663, 6.834, 6.969, 6.916, 6.756, 6.434, 6.028, 5.762, 6.085, 5.87, 5.874, 5.491, 5.528, 4.138, 3.32, 1.724, 0.892, 0.589, -1.609, -3.193, -4.597, -6.243, -7.342, -8.398, -9.29, -9.643, -9.331, -8.502, -7.556, -6.689, -5.734, -5.239, -5.51, -5.899, -6.256, -6.305, -6.232, -6.263, -6.056, -5.825, -5.618, -5.551, -5.874, -6.379, -6.183, -6.079, -5.888, -5.792, -5.81, -5.877, -7.865, -8.615, -8.658, -9.662, -9.62, 0.2, 0.2, 0.1," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedHighHorizontalMACD" == '' ]
# , 0.2, 0.3, 0.2,
StrategieOverratedHighHorizontalMACD " , , , , , , , , , , , , -1.189, -0.879, -0.825, -0.933, -0.488, -0.174, 1.031, 1.964, 2.592, 2.815, 3.437, 4.084, 4.744, 5.167, 5.838, 6.537, 7.01, 6.663, 6.834, 6.969, 6.916, 6.756, 6.434, 6.028, 5.762, 6.085, 5.87, 5.874, 5.491, 5.528, 4.138, 3.32, 1.724, 0.892, 0.589, -1.609, -3.193, -4.597, -6.243, -7.342, -8.398, -9.29, -9.643, -9.331, -8.502, -7.556, -6.689, -5.734, -5.239, -5.51, -5.899, -6.256, -6.305, -6.232, -6.263, -6.056, -5.825, -5.618, -5.551, -5.874, -6.379, -6.183, -6.079, -5.888, -5.792, -5.81, -5.877, -7.865, -8.615, -8.658, -9.662, -9.62, 0.2, 0.3, 0.2," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedHighHorizontalMACD" == '' ]
# , 0.1, 0.2, 0.2,
resultStrategieOverratedHighHorizontalMACD=""
StrategieOverratedHighHorizontalMACD " , , , , , , , , , , , , -1.189, -0.879, -0.825, -0.933, -0.488, -0.174, 1.031, 1.964, 2.592, 2.815, 3.437, 4.084, 4.744, 5.167, 5.838, 6.537, 7.01, 6.663, 6.834, 6.969, 6.916, 6.756, 6.434, 6.028, 5.762, 6.085, 5.87, 5.874, 5.491, 5.528, 4.138, 3.32, 1.724, 0.892, 0.589, -1.609, -3.193, -4.597, -6.243, -7.342, -8.398, -9.29, -9.643, -9.331, -8.502, -7.556, -6.689, -5.734, -5.239, -5.51, -5.899, -6.256, -6.305, -6.232, -6.263, -6.056, -5.825, -5.618, -5.551, -5.874, -6.379, -6.183, -6.079, -5.888, -5.792, -5.81, -5.877, -7.865, -8.615, -8.658, -9.662, -9.62, 0.1, 0.2, 0.2," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedHighHorizontalMACD" == 'Sell: High horizontal MACD (M): last MACD 0.2' ]
# , 0.2, 0.2, 0.2,
resultStrategieOverratedHighHorizontalMACD=""
StrategieOverratedHighHorizontalMACD " , , , , , , , , , , , , -1.189, -0.879, -0.825, -0.933, -0.488, -0.174, 1.031, 1.964, 2.592, 2.815, 3.437, 4.084, 4.744, 5.167, 5.838, 6.537, 7.01, 6.663, 6.834, 6.969, 6.916, 6.756, 6.434, 6.028, 5.762, 6.085, 5.87, 5.874, 5.491, 5.528, 4.138, 3.32, 1.724, 0.892, 0.589, -1.609, -3.193, -4.597, -6.243, -7.342, -8.398, -9.29, -9.643, -9.331, -8.502, -7.556, -6.689, -5.734, -5.239, -5.51, -5.899, -6.256, -6.305, -6.232, -6.263, -6.056, -5.825, -5.618, -5.551, -5.874, -6.379, -6.183, -6.079, -5.888, -5.792, -5.81, -5.877, -7.865, -8.615, -8.658, -9.662, -9.62, 0.2, 0.2, 0.2," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedHighHorizontalMACD" == 'Sell: High horizontal MACD (M): last MACD 0.2' ]
}
@test "StrategieUnderratedLowHorizontalMACD" {
function WriteComdirectUrlAndStoreFileList() {
echo ""
}
export -f WriteComdirectUrlAndStoreFileList
StrategieUnderratedLowHorizontalMACD
[ "$resultStrategieUnderratedLowHorizontalMACD" == '' ]
# , -0.1, -0.2, -0.3, -0.4,
StrategieUnderratedLowHorizontalMACD " , , , , , , , , , , , , -1.189, -0.879, -0.825, -0.933, -0.488, -0.174, 1.031, 1.964, 2.592, 2.815, 3.437, 4.084, 4.744, 5.167, 5.838, 6.537, 7.01, 6.663, 6.834, 6.969, 6.916, 6.756, 6.434, 6.028, 5.762, 6.085, 5.87, 5.874, 5.491, 5.528, 4.138, 3.32, 1.724, 0.892, 0.589, -1.609, -3.193, -4.597, -6.243, -7.342, -8.398, -9.29, -9.643, -9.331, -8.502, -7.556, -6.689, -5.734, -5.239, -5.51, -5.899, -6.256, -6.305, -6.232, -6.263, -6.056, -5.825, -5.618, -5.551, -5.874, -6.379, -6.183, -6.079, -5.888, -5.792, -5.81, -5.877, -7.865, -8.615, -8.658, -9.662, -0.1, -0.2, -0.3, -0.4," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedLowHorizontalMACD" == '' ]
# , -0.2, -0.1, -0.2,
StrategieUnderratedLowHorizontalMACD " , , , , , , , , , , , , -1.189, -0.879, -0.825, -0.933, -0.488, -0.174, 1.031, 1.964, 2.592, 2.815, 3.437, 4.084, 4.744, 5.167, 5.838, 6.537, 7.01, 6.663, 6.834, 6.969, 6.916, 6.756, 6.434, 6.028, 5.762, 6.085, 5.87, 5.874, 5.491, 5.528, 4.138, 3.32, 1.724, 0.892, 0.589, -1.609, -3.193, -4.597, -6.243, -7.342, -8.398, -9.29, -9.643, -9.331, -8.502, -7.556, -6.689, -5.734, -5.239, -5.51, -5.899, -6.256, -6.305, -6.232, -6.263, -6.056, -5.825, -5.618, -5.551, -5.874, -6.379, -6.183, -6.079, -5.888, -5.792, -5.81, -5.877, -7.865, -8.615, -8.658, -9.662, -9.62, -0.2, -0.1, -0.2," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedLowHorizontalMACD" == '' ]
# , -0.2, -0.2, -0.1,
StrategieUnderratedLowHorizontalMACD " , , , , , , , , , , , , -1.189, -0.879, -0.825, -0.933, -0.488, -0.174, 1.031, 1.964, 2.592, 2.815, 3.437, 4.084, 4.744, 5.167, 5.838, 6.537, 7.01, 6.663, 6.834, 6.969, 6.916, 6.756, 6.434, 6.028, 5.762, 6.085, 5.87, 5.874, 5.491, 5.528, 4.138, 3.32, 1.724, 0.892, 0.589, -1.609, -3.193, -4.597, -6.243, -7.342, -8.398, -9.29, -9.643, -9.331, -8.502, -7.556, -6.689, -5.734, -5.239, -5.51, -5.899, -6.256, -6.305, -6.232, -6.263, -6.056, -5.825, -5.618, -5.551, -5.874, -6.379, -6.183, -6.079, -5.888, -5.792, -5.81, -5.877, -7.865, -8.615, -8.658, -9.662, -9.62, -0.2, -0.2, -0.1," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedLowHorizontalMACD" == '' ]
# , -0.1, -0.2, -0.2,
resultStrategieUnderratedLowHorizontalMACD=""
StrategieUnderratedLowHorizontalMACD " , , , , , , , , , , , , -1.189, -0.879, -0.825, -0.933, -0.488, -0.174, 1.031, 1.964, 2.592, 2.815, 3.437, 4.084, 4.744, 5.167, 5.838, 6.537, 7.01, 6.663, 6.834, 6.969, 6.916, 6.756, 6.434, 6.028, 5.762, 6.085, 5.87, 5.874, 5.491, 5.528, 4.138, 3.32, 1.724, 0.892, 0.589, -1.609, -3.193, -4.597, -6.243, -7.342, -8.398, -9.29, -9.643, -9.331, -8.502, -7.556, -6.689, -5.734, -5.239, -5.51, -5.899, -6.256, -6.305, -6.232, -6.263, -6.056, -5.825, -5.618, -5.551, -5.874, -6.379, -6.183, -6.079, -5.888, -5.792, -5.81, -5.877, -7.865, -8.615, -8.658, -9.662, -9.62, -0.1, -0.2, -0.2," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedLowHorizontalMACD" == 'Buy: Low horizontal MACD (M): last MACD -0.2' ]
# # , -0.2, -0.2, -0.2,
# resultStrategieUnderratedLowHorizontalMACD=""
# StrategieUnderratedLowHorizontalMACD " , , , , , , , , , , , , -1.189, -0.879, -0.825, -0.933, -0.488, -0.174, 1.031, 1.964, 2.592, 2.815, 3.437, 4.084, 4.744, 5.167, 5.838, 6.537, 7.01, 6.663, 6.834, 6.969, 6.916, 6.756, 6.434, 6.028, 5.762, 6.085, 5.87, 5.874, 5.491, 5.528, 4.138, 3.32, 1.724, 0.892, 0.589, -1.609, -3.193, -4.597, -6.243, -7.342, -8.398, -9.29, -9.643, -9.331, -8.502, -7.556, -6.689, -5.734, -5.239, -5.51, -5.899, -6.256, -6.305, -6.232, -6.263, -6.056, -5.825, -5.618, -5.551, -5.874, -6.379, -6.183, -6.079, -5.888, -5.792, -5.81, -5.877, -7.865, -8.615, -8.658, -9.662, -9.62, -0.2, -0.2, -0.2," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
# [ "$resultStrategieUnderratedLowHorizontalMACD" == 'Buy: Low horizontal MACD (M): last MACD -0.2' ]
# , -0.2, -0.3, -0.3,
resultStrategieUnderratedLowHorizontalMACD=""
StrategieUnderratedLowHorizontalMACD " , , , , , , , , , , , , -1.189, -0.879, -0.825, -0.933, -0.488, -0.174, 1.031, 1.964, 2.592, 2.815, 3.437, 4.084, 4.744, 5.167, 5.838, 6.537, 7.01, 6.663, 6.834, 6.969, 6.916, 6.756, 6.434, 6.028, 5.762, 6.085, 5.87, 5.874, 5.491, 5.528, 4.138, 3.32, 1.724, 0.892, 0.589, -1.609, -3.193, -4.597, -6.243, -7.342, -8.398, -9.29, -9.643, -9.331, -8.502, -7.556, -6.689, -5.734, -5.239, -5.51, -5.899, -6.256, -6.305, -6.232, -6.263, -6.056, -5.825, -5.618, -5.551, -5.874, -6.379, -6.183, -6.079, -5.888, -5.792, -5.81, -5.877, -7.865, -8.615, -8.658, -9.662, -9.62, -0.2, -0.3, -0.3," "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedLowHorizontalMACD" == 'Buy: Low horizontal MACD (M): last MACD -0.3' ]
}
@test "StrategieOverratedHighStochasticHighRSIHighMACD" {
function WriteComdirectUrlAndStoreFileList() {
echo ""
}
export -f WriteComdirectUrlAndStoreFileList
StrategieOverratedHighStochasticHighRSIHighMACD
[ "$resultStrategieOverratedHighStochasticHighRSIHighMACD" == '' ]
StrategieOverratedHighStochasticHighRSIHighMACD 91 70 90 69 "-0.1" "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedHighStochasticHighRSIHighMACD" == '' ]
StrategieOverratedHighStochasticHighRSIHighMACD 91 70 92 69 "-0.1" "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedHighStochasticHighRSIHighMACD" == '' ]
StrategieOverratedHighStochasticHighRSIHighMACD 91 70 90 71 "0.1" "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedHighStochasticHighRSIHighMACD" == '' ]
StrategieOverratedHighStochasticHighRSIHighMACD 91 70 90 71 "-0.1" "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedHighStochasticHighRSIHighMACD" == '' ]
StrategieOverratedHighStochasticHighRSIHighMACD 91 70 92 71 "-0.1" "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedHighStochasticHighRSIHighMACD" == '' ]
StrategieOverratedHighStochasticHighRSIHighMACD 91 70 92 71 "0.1" "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedHighStochasticHighRSIHighMACD" == 'Sell: High Stochastic & RSI & MACD+ (C): Stochastic quote 92 over 91 and RSI quote 71 over 70' ]
resultStrategieOverratedHighStochasticHighRSIHighMACD=""
StrategieOverratedHighStochasticHighRSIHighMACD 91 70 92 71 "0.1" "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedHighStochasticHighRSIHighMACD" == 'Sell: High Stochastic & RSI & MACD+ (C): Stochastic quote 92 over 91 and RSI quote 71 over 70' ]
}
@test "StrategieUnderratedLowStochasticLowRSILowMACD" {
function WriteComdirectUrlAndStoreFileList() {
echo ""
}
export -f WriteComdirectUrlAndStoreFileList
StrategieUnderratedLowStochasticLowRSILowMACD
[ "$resultStrategieUnderratedLowStochasticLowRSILowMACD" == '' ]
StrategieUnderratedLowStochasticLowRSILowMACD 9 90 10 5 "0.1" "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedLowStochasticLowRSILowMACD" == '' ]
StrategieUnderratedLowStochasticLowRSILowMACD 9 90 0 91 "0.1" "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedLowStochasticLowRSILowMACD" == '' ]
StrategieUnderratedLowStochasticLowRSILowMACD 9 90 10 91 "0.1" "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedLowStochasticLowRSILowMACD" == '' ]
StrategieUnderratedLowStochasticLowRSILowMACD 9 90 10 91 "-0.1" "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedLowStochasticLowRSILowMACD" == '' ]
StrategieUnderratedLowStochasticLowRSILowMACD 9 90 0 5 "0.1" "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedLowStochasticLowRSILowMACD" == '' ]
StrategieUnderratedLowStochasticLowRSILowMACD 9 90 0 5 "-0.1" "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedLowStochasticLowRSILowMACD" == 'Buy: Low Stochastic & RSI & MACD- (C): Stochastic quote 0 under 9 and RSI quote 5 under 90' ]
resultStrategieUnderratedLowStochasticLowRSILowMACD=""
StrategieUnderratedLowStochasticLowRSILowMACD 9 90 0 5 "-0.1" "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedLowStochasticLowRSILowMACD" == 'Buy: Low Stochastic & RSI & MACD- (C): Stochastic quote 0 under 9 and RSI quote 5 under 90' ]
}
@test "StrategieUnderratedByPercentAndStoch" {
function WriteComdirectUrlAndStoreFileList() {
echo ""
}
export -f WriteComdirectUrlAndStoreFileList
StrategieUnderratedByPercentAndStochastic
[ "$resultStrategieUnderratedByPercentAndStochastic" == '' ]
StrategieUnderratedByPercentAndStochastic 100 91 1 1 1 1 1 1 "5.61" "1.01" "4.44" "4.28" "4.03" 91 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedByPercentAndStochastic" == '' ]
StrategieUnderratedByPercentAndStochastic 0 9 1 1 1 1 1 1 "46.95" "0.99" "49.34" "50.08" "52.87" 9 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedByPercentAndStochastic" == 'Buy: Low Percentage & Stochastic (P): 46.95€ is 0.99 under Avg18 49.34€ and Avg38 50.08€ and Avg95 52.87€ and Stoch14 0 is lower then 9' ]
resultStrategieUnderratedByPercentAndStochastic=""
StrategieUnderratedByPercentAndStochastic 0 9 1 1 1 1 1 1 "46.95" "0.99" "49.34" "50.08" "52.87" 9 "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieUnderratedByPercentAndStochastic" == 'Buy: Low Percentage & Stochastic (P): 46.95€ is 0.99 under Avg18 49.34€ and Avg38 50.08€ and Avg95 52.87€ and Stoch14 0 is lower then 9' ]
}
@test "StrategieOverratedByPercentAndStoch" {
function WriteComdirectUrlAndStoreFileList() {
echo ""
}
export -f WriteComdirectUrlAndStoreFileList
StrategieOverratedByPercentAndStochastic
[ "$resultStrategieOverratedByPercentAndStochastic" == '' ]
StrategieOverratedByPercentAndStochastic 72 91 1 1 1 1 1 1 "287.50" "1.01" "281.09" "277.85" "272.43" "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedByPercentAndStochastic" == '' ]
StrategieOverratedByPercentAndStochastic 100 91 1 1 1 1 1 1 "5.61" "1.01" "4.44" "4.28" "4.03" "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedByPercentAndStochastic" == 'Sell: High Percentage & Stochastic (P): 5.61€ is 1.01 over Avg18 4.44€ and Avg38 4.28€ and Avg95 4.03€ and Stoch14 is 100 is higher then 91' ]
resultStrategieOverratedByPercentAndStochastic=""
StrategieOverratedByPercentAndStochastic 100 91 1 1 1 1 1 1 "5.61" "1.01" "4.44" "4.28" "4.03" "$OUT_RESULT_FILE" "$SYMBOL" "$SYMBOL_NAME" *
[ "$resultStrategieOverratedByPercentAndStochastic" == 'Sell: High Percentage & Stochastic (P): 5.61€ is 1.01 over Avg18 4.44€ and Avg38 4.28€ and Avg95 4.03€ and Stoch14 is 100 is higher then 91' ]
}
| true
|
789a91c76d54072994480a4413d41d886d912f11
|
Shell
|
schani/icfp-2011
|
/force/cforce.sh
|
UTF-8
| 258
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
LAST=`ls -d round_* | colrm 1 6 | sort -n | tail -1`
NEXT=`printf "%04d" $[ 10#$LAST + 1 ]`
mkdir "round_$NEXT"
( cd "round_$NEXT";
find ../round_$LAST/ -maxdepth 1 -type f -name '*.seq' -exec \
../cgen.sh {} \; -exec ../ctall.sh {} \;
)
| true
|
32a7e6401679cd0fff25408dbb07d59e9a288045
|
Shell
|
codcodog/hack-bash-exercise
|
/array/demo05
|
UTF-8
| 88
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/sh
# 读入数组,显示数组第三个元素
tmp=($(cat))
echo "${tmp[3]}"
| true
|
b9ac462c9cedbc819ac426ca178bdf014cc89563
|
Shell
|
knakayama/serverless-blox-v0.3.0
|
/bin/run.sh
|
UTF-8
| 621
| 3.265625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -eo pipefail
aws_region="ap-northeast-1"
while getopts ":bc:" args; do
case "$args" in
b)
build=true
;;
c)
cmd="$OPTARG"
;;
*)
echo "Invalid args: $OPTARG"
exit 1
esac
done
[[ "$build" == true ]] && docker image build --tag sls-blox .
docker container run \
--tty \
--rm \
--volume ${PWD}/.serverless:/serverless/.serverless \
--env AWS_ACCESS_KEY_ID="$(aws configure get aws_access_key_id)" \
--env AWS_SECRET_ACCESS_KEY="$(aws configure get aws_secret_access_key)" \
--env AWS_REGION="$aws_region" \
sls-blox \
run "$cmd"
| true
|
8360d1713a153671409895b34ce93893213b92e2
|
Shell
|
VertiPub/ooziebuild
|
/scripts/justinstall.sh
|
UTF-8
| 2,404
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
# this is going to be problematic for oozie, since we've already released RPMs which are 2.0.5
# this default is different than all the others so that the script doesn't cause things to break when merged.
ALTISCALE_RELEASE=${ALTISCALE_RELEASE:-2.0.5}
export DEST_DIR=${INSTALL_DIR}/opt
mkdir -p --mode=0755 ${DEST_DIR}
cd ${DEST_DIR}
tar -xvzpf ${WORKSPACE}/oozie/distro/target/oozie-${ARTIFACT_VERSION}-distro/oozie-${ARTIFACT_VERSION}/oozie-client-${ARTIFACT_VERSION}.tar.gz
# Make the Client RPM
export RPM_NAME=vcc-oozie-client-${ARTIFACT_VERSION}
cd ${RPM_DIR}
fpm --verbose \
--maintainer ops@altiscale.com \
--vendor Altiscale \
--provides ${RPM_NAME} \
-s dir \
-t rpm \
-n ${RPM_NAME} \
-v ${ALTISCALE_RELEASE} \
--description "${DESCRIPTION}" \
--iteration ${DATE_STRING} \
--rpm-user root \
--rpm-group root \
-C ${INSTALL_DIR} \
opt
# Make the Server RPM
rm -rf ${DEST_DIR}
mkdir -p --mode=0755 ${DEST_DIR}
cd ${DEST_DIR}
tar -xvzpf ${WORKSPACE}/oozie/distro/target/oozie-${ARTIFACT_VERSION}-distro.tar.gz
export OOZIE_ROOT=${DEST_DIR}/oozie-${ARTIFACT_VERSION}
mkdir -p -m 0775 ${OOZIE_ROOT}/libext
cd ${OOZIE_ROOT}/libext
wget http://s3-us-west-1.amazonaws.com/verticloud-dependencies/ext-2.2.zip
EXTSUM=`sha1sum ext-2.2.zip | awk '{print $1}'`
if [ "${EXTSUM}" != "a949ddf3528bc7013b21b13922cc516955a70c1b" ]; then
echo "FATAL: Filed to fetch the correct ext-2.2.zip"
fi
cd ${OOZIE_ROOT}/conf
rm -rf hadoop-conf
ln -s /etc/hadoop hadoop-conf
cd ${OOZIE_ROOT}/libtools
ln -s /opt/mysql-connector/mysql-connector.jar mysql-connector.jar
cd ${OOZIE_ROOT}/oozie-server/lib
ln -s /opt/mysql-connector/mysql-connector.jar mysql-connector.jar
cd ${OOZIE_ROOT}/bin
cp ${WORKSPACE}/scripts/pkgadd/oozie-status.sh .
chmod 755 oozie-status.sh
cd ${INSTALL_DIR}
find opt/oozie-${ARTIFACT_VERSION} -type d -print | awk '{print "/" $1}' > /tmp/$$.files
export DIRECTORIES=""
for i in `cat /tmp/$$.files`; do DIRECTORIES="--directories $i ${DIRECTORIES} "; done
export DIRECTORIES
rm -f /tmp/$$.files
export RPM_NAME=vcc-oozie-server-${ARTIFACT_VERSION}
cd ${RPM_DIR}
fpm --verbose \
-C ${INSTALL_DIR} \
--maintainer ops@altiscale.com \
--vendor Altiscale \
--provides ${RPM_NAME} \
--depends alti-mysql-connector \
-s dir \
-t rpm \
-n ${RPM_NAME} \
-v ${ALTISCALE_RELEASE} \
${DIRECTORIES} \
--description "${DESCRIPTION}" \
--iteration ${DATE_STRING} \
--rpm-user oozie \
--rpm-group hadoop \
opt
| true
|
bf1d02399e4025a6047689ff4a7ac825fca1966d
|
Shell
|
scottie33/amorphous_polymer_lammps
|
/getpdb.bash
|
UTF-8
| 874
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -lt 9 ]; then
echo " you should have VMD installed with path set well."
echo " "
echo " cmd psffile dcdfile fromid1 toid1 fromid2 toid2 distance deltadis withindis"
echo " "
echo " please try again."
exit -1
fi
pdbdir="pdbfiles"
if [ -d "${pdbdir}" ]; then
rm -fr ${pdbdir}
fi
mkdir ${pdbdir}
#### generating tempinput.tcl here ####
echo "set inputpsf \"$1\"" > tempinput.tcl
echo "set inputdcd \"$2\"" >> tempinput.tcl
echo "set sresids $3 " >> tempinput.tcl
echo "set sreside $4 " >> tempinput.tcl
echo "set eresids $5 " >> tempinput.tcl
echo "set ereside $6 " >> tempinput.tcl
echo "set distance $7 " >> tempinput.tcl
echo "set deltadis $8 " >> tempinput.tcl
echo "set withindis $9 " >> tempinput.tcl
echo "set pdbdir \"${pdbdir}\"" >> tempinput.tcl
#######################################
vmd -dispdev text -e getpdb.tcl
exit 0
| true
|
a202863370022233e6f5530f56132b602e316307
|
Shell
|
matthewpreston/matthewpreston.github.io
|
/programs/Cron/loadcheck.sh
|
UTF-8
| 809
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
#Used to see if load is above maximum threshold
emailFile="/path/to/file/email.txt"
echo "Please open this script in notepad and replace the /path/to/file to a file"
echo "which contains an email for each line. Then you can remove these echo"
echo "statements and the following exit statement."
exit 0
MAXLOAD=$(cat /proc/cpuinfo | grep -c processor)
current=$(cat /proc/loadavg | awk '{print $1}')
bool=$(echo $current'>'$MAXLOAD | bc -l)
if [ $bool -eq 1 ]; then
emails=($(cat $emailFile))
output="temp.txt"
echo -e "Load is greater than maximum!\n\n" > $output
echo -e "Current: $current\nMaximum: $MAXLOAD\n\n" >> $output
echo "Please elucidate the problem at hand" >> $output
for email in ${emails[@]}; do
cat $output | mail -s "Server Load Over Maximum" $email
done
fi
| true
|
376f0a2e8b805672c4e0f4bc284c8886891223a5
|
Shell
|
StudioProcess/rv-madek-link
|
/extras/getlink.sh
|
UTF-8
| 782
| 3.046875
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
if (( $# == 0 )); then
echo "Usage: $0 MEDIA_ENTRY_ID"
echo "Example: $0 f5b78e56-a229-4295-a4cc-0311e6534207"
exit 1
fi
MEDIA_ENTRY_ID=$1
MEDIA_ARCHIVE_BASE_URL='https://medienarchiv.zhdk.ch'
RV_PLAYER_BASE='https://rv-dev.process.studio'
function uriencode { jq -nr --arg v "$1" '$v|@uri'; }
VIDEO_URL="${MEDIA_ARCHIVE_BASE_URL}/api/media-entries/${MEDIA_ENTRY_ID}/media-file/data-stream"
VIDEO_URL_ENCODED=$(uriencode ${VIDEO_URL})
ANNOTATIONS_URL="${MEDIA_ARCHIVE_BASE_URL}/api/media-entries/${MEDIA_ENTRY_ID}/meta-data/research_video:rv_annotations/data-stream"
ANNOTATIONS_URL_ENCODED=$(uriencode ${ANNOTATIONS_URL})
RV_PLAYER_URL="${RV_PLAYER_BASE}/?video=${VIDEO_URL_ENCODED}&annotations=${ANNOTATIONS_URL_ENCODED}"
echo "$RV_PLAYER_URL"
| true
|
36e540b38b6460a4bc60cd49b0cff9e3c62fbc80
|
Shell
|
apache/mnemonic
|
/tools/bundle.sh
|
UTF-8
| 2,462
| 3.53125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
if [ -z "${MNEMONIC_HOME}" ]; then
source "$(dirname "$0")/find-mnemonic-home.sh" || { echo "Not found find-mnemonic-home.sh script."; exit 10; }
fi
pushd "$MNEMONIC_HOME" || { echo "the environment variable \$MNEMONIC_HOME contains invalid home directory of Mnemonic project."; exit 11; }
bundle_path="./target/bundle_tmp"
bundle_name="./target/bundle.jar"
excluded_modules_arr=(mnemonic-utilities-service* mnemonic-nvml-pmem-service* mnemonic-nvml-vmem-service* mnemonic-pmalloc-service*)
[[ x"${bundle_path}" = x"./target/"* ]] || ( echo "The bundle tmp path must begin with ./target/"; exit 20 )
mkdir -p ${bundle_path} || rm -f ${bundle_path}/*
bn_arr=($(find . ! -path "${bundle_path}/*" -type f -name "*.pom.asc" -exec basename {} .pom.asc \; | xargs))
for del in ${excluded_modules_arr[@]}
do
bn_arr=(${bn_arr[@]/$del})
done
if [ ${#bn_arr[@]} -eq 0 ]; then
echo "No found any signed submodules to be bundled !"
else
echo "There are ${#bn_arr[@]} submodules to be bundled."
fi
rm -f ${bundle_path}/*
for i in ${bn_arr[@]}
do
echo "Module -> " $i
find . ! -path "${bundle_path}/*" -type f \( -name "$i.pom" -o -name "$i.jar" -o -name "$i-javadoc.jar" -o -name "$i-sources.jar" \) -exec cp {} ${bundle_path} \;
find . ! -path "${bundle_path}/*" -type f \( -name "$i.pom.asc" -o -name "$i.jar.asc" -o -name "$i-javadoc.jar.asc" -o -name "$i-sources.jar.asc" \) -exec cp {} ${bundle_path} \;
done
cntf=($(ls -1 ${bundle_path} | xargs)); echo "There are ${#cntf[@]} files will be packed into a bundle."
rm -f ${bundle_name}
jar cf ${bundle_name} -C ${bundle_path} .
echo "The bundle has been generated as follows."
ls -1 ${bundle_name}
popd
| true
|
baa76f7438dbb610ce1edaab30803f1b99e98141
|
Shell
|
senay/Optimal-Solution-Energy-Efficient-Routing
|
/MATLAB/algorithm_v1/90W/data.sh
|
UTF-8
| 564
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
i=0
lambdaP=0
lambdaN=0
tot=0
ii=0
numberflow=5
totF=0
RESULT=0
all=10
for i in 3 #1 2 3 4 5 6
do
while read lambda avgPower
do
if [ $lambdaN -eq $ii ]
then
lambdaP=$lambda
lambdaN=$lambda
else
lambdaP=$lambdaN
lambdaN=$lambda
fi
if [ $lambdaP -eq $lambdaN ]
then
tot=`echo $tot + $avgPower | bc`
else
RESULT=$(echo "$tot/$all" | bc -l)
totF=$tot
tot=$avgPower
echo "$lambdaP $RESULT" >> $i.dat
fi
done < $i.'txt'
RESULT=$(echo "$totF/$all" | bc -l)
echo "$lambdaP $RESULT" >> $i.dat
RESULT=0
done
| true
|
fdb24f61756cd45999db9330e906d7e13eec825b
|
Shell
|
4383/tobiko
|
/tools/ci/keystone-credentials
|
UTF-8
| 256
| 3.015625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -eu
source $(dirname "$0")/activate
source $(dirname "$0")/os
function keystone_credentials {
os_setup
tobiko-keystone-credentials "$@"
}
if [ $(basename "$0") == keystone-credentials ]; then
keystone_credentials "$@"
fi
| true
|
b9cc6fe2424cd8432e518e81acbe9c9e71666730
|
Shell
|
radcliff/heroku-geo-buildpack
|
/support/build/geos/build
|
UTF-8
| 190
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/sh
workspace_dir=$1
output_dir=$2
curl http://download.osgeo.org/geos/geos-$VERSION.tar.bz2 -s -o - | tar xjf -
cd geos-$VERSION
./configure --prefix=$output_dir
make
make install
| true
|
edccf51b5a6e73670c89efdbd1e5294b9ba40ff5
|
Shell
|
kaiwan/L3_dd_trg
|
/cz_miscdrv/test_drv.sh
|
UTF-8
| 1,483
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
name=$(basename $0)
# runcmd
# Parameters
# $1 ... : params are the command to run
runcmd()
{
[ $# -eq 0 ] && return
echo "$@"
eval "$@"
}
[ $# -ne 1 ] && {
echo "Usage : ${name} pathname-to-driver-to-test.ko"
exit 1
}
DRV=$1
sudo rmmod ${DRV::-3} 2>/dev/null
runcmd "sudo insmod ${DRV}"
lsmod|grep ${DRV::-3} || {
echo "insmod failed? aborting...
(do you need to build it?)"
exit 1
}
date
runcmd "sudo dmesg -C"
echo "=== Test the czero misc device:"
runcmd "rm -f tst.dat 2>/dev/null"
runcmd "dd if=/dev/czero_miscdev of=tst.dat bs=2k count=3 ; sudo dmesg -c"
runcmd "ls -lh tst.dat"
runcmd "hexdump tst.dat"
echo
echo "--- Test by reading >1 page:
with cz_miscdrv : should get truncated to 1 page (at a time)...
with cz_enh_miscdrv : should transfer ALL requested bytes..."
runcmd "dd if=/dev/czero_miscdev of=tst.dat bs=81k count=3 ; sudo dmesg -c"
runcmd "hexdump tst.dat"
echo
echo "=== Test the cnul misc device:"
echo "--- Redirect to cnul (shouldn't see any o/p of cat):"
runcmd "cat /etc/passwd > /dev/cnul_miscdev"
runcmd "sudo dmesg -c"
runcmd "ls -l /etc/passwd"
echo
echo "--- Test file truncation by writing cnul content to a file"
runcmd "dd if=/dev/urandom of=tst.dat bs=2k count=3"
runcmd "ls -lh tst.dat"
runcmd "cat /dev/cnul_miscdev > tst.dat"
runcmd "ls -lh tst.dat"
runcmd "sudo dmesg -c"
echo
echo "Test both czero and cnul misc devices:"
runcmd "dd if=/dev/czero_miscdev of=/dev/cnul_miscdev bs=8k count=3 ; sudo dmesg -c"
exit 0
| true
|
1b3cfa2fdbb133a42011e5d44597a71bb6c350bc
|
Shell
|
mertnuhoglu/stuff
|
/bash/acksed
|
UTF-8
| 593
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#<url:file:///~/Dropbox (Personal)/projects/stuff/bash/acksed>
# usage:
# acksed "\/gis\/" "\/gis_frontend\/"
# acksed -n "\/gis\/" "\/gis_frontend\/"
while getopts ":n:" opt; do
case $opt in
n)
dry_run="True"
;;
\?)
dry_run="False"
;;
esac
done
if [ $dry_run = "True" ]; then
match=$2
replace=$3
echo $match
echo $replace
rg -l "$match" | xargs -n1 -d '\n' -I {} echo {}
else
match=$1
replace=$2
echo $match
echo $replace
rg -l "$match" | xargs -n1 -d '\n' -I {} echo {}
rg -l "$match" | xargs -n1 -d '\n' -I {} sed -i -e "s#$match#$replace#g" {}
fi
| true
|
133bdb8a2cb69cd9c7c12baa30dab1fdee26ae5f
|
Shell
|
wassim/dotfiles
|
/.zshrc
|
UTF-8
| 1,967
| 2.9375
| 3
|
[] |
no_license
|
# Enable Powerlevel10k instant prompt. Should stay close to the top of ~/.zshrc.
# Initialization code that may require console input (password prompts, [y/n]
# confirmations, etc.) must go above this block; everything else may go below.
if [[ -r "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh" ]]; then
source "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh"
fi
# Path to your dotfiles.
export DOTFILES=$HOME/.dotfiles
# Path to your oh-my-zsh installation.
export ZSH="$HOME/.oh-my-zsh"
# Set name of the theme to load --- if set to "random", it will
# load a random theme each time oh-my-zsh is loaded, in which case,
# to know which specific one was loaded, run: echo $RANDOM_THEME
# See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes
ZSH_THEME="powerlevel10k/powerlevel10k"
# Would you like to use another custom folder than $ZSH/custom?
ZSH_CUSTOM=$DOTFILES
# Which plugins would you like to load?
# Standard plugins can be found in $ZSH/plugins/
# Custom plugins may be added to $ZSH_CUSTOM/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(
git
zsh-syntax-highlighting
cp
colorize
web-search
node
npm
zsh-z
zsh-autosuggestions
)
source $ZSH/oh-my-zsh.sh
# You may need to manually set your language environment
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
# To customize prompt, run `p10k configure` or edit ~/.p10k.zsh.
[[ ! -f ~/.p10k.zsh ]] || source ~/.p10k.zsh
# JINA_CLI_BEGIN
## autocomplete
if [[ ! -o interactive ]]; then
return
fi
compctl -K _jina jina
_jina() {
local words completions
read -cA words
if [ "${#words}" -eq 2 ]; then
completions="$(jina commands)"
else
completions="$(jina completions ${words[2,-2]})"
fi
reply=(${(ps:
:)completions})
}
# session-wise fix
ulimit -n 4096
export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
# JINA_CLI_END
| true
|
b1c217bdf28b0c81a9ce3755a25e22328a7ab7f6
|
Shell
|
exqudens/exqudens-cpp-test
|
/cmake-full-clean-export-all.sh
|
UTF-8
| 1,376
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
rm -rfv "${BASH_HOME}/.conan/data/exqudens-cpp-test-lib/"
rm -rfv "./build/"
PROCESSOR_ARCHITECTURE="x86" \
INCLUDE="${MSVC_INCLUDE}" \
LIBPATH="${MSVC_X86_LIBPATH}" \
LIB="${MSVC_X86_LIB}" \
PATH="${MSVC_X86_BASH_CLPATH}:${MSVC_X86_BASH_RCPATH}:${PATH}" \
"./cmake-export.sh" "cmd-windows-ninja-msvc-x86-release-shared"
sleep 3s
PROCESSOR_ARCHITECTURE="x86" \
INCLUDE="${MSVC_INCLUDE}" \
LIBPATH="${MSVC_X86_LIBPATH}" \
LIB="${MSVC_X86_LIB}" \
PATH="${MSVC_X86_BASH_CLPATH}:${MSVC_X86_BASH_RCPATH}:${PATH}" \
"./cmake-export.sh" "cmd-windows-ninja-msvc-x86-release-static"
sleep 3s
PROCESSOR_ARCHITECTURE="AMD64" \
INCLUDE="${MSVC_INCLUDE}" \
LIBPATH="${MSVC_X86_X64_LIBPATH}" \
LIB="${MSVC_X86_X64_LIB}" \
PATH="${MSVC_X86_X64_BASH_CLPATH}:${MSVC_X86_X64_BASH_RCPATH}:${PATH}" \
"./cmake-export.sh" "cmd-windows-ninja-msvc-x86_x64-release-shared"
sleep 3s
PROCESSOR_ARCHITECTURE="AMD64" \
INCLUDE="${MSVC_INCLUDE}" \
LIBPATH="${MSVC_X86_X64_LIBPATH}" \
LIB="${MSVC_X86_X64_LIB}" \
PATH="${MSVC_X86_X64_BASH_CLPATH}:${MSVC_X86_X64_BASH_RCPATH}:${PATH}" \
"./cmake-export.sh" "cmd-windows-ninja-msvc-x86_x64-release-static"
sleep 3s
PATH="${GCC_BASH_PATH}:${PATH}" \
"./cmake-export.sh" "cmd-windows-ninja-gcc-x86_x64-release-shared"
sleep 3s
PATH="${GCC_BASH_PATH}:${PATH}" \
"./cmake-export.sh" "cmd-windows-ninja-gcc-x86_x64-release-static"
| true
|
3677ff8526a723afdcc886f193ed862c61b0914f
|
Shell
|
marcw/packer-templates
|
/support/scripts/vmware.sh
|
UTF-8
| 431
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
[ "$PACKER_BUILDER_TYPE" == "vmware" ] || {
echo 'not building vmware, skipping';
exit;
}
apt-get -qy install gcc make fuse fuse-utils linux-headers-$(uname -r)
mkdir /mnt/vmware
mount -o loop /tmp/linux.iso /mnt/vmware
cd /tmp
tar xzf /mnt/vmware/VMwareTools-*.tar.gz
/tmp/vmware-tools-distrib/vmware-install.pl -d
umount /mnt/vmware
rmdir /mnt/vmware
rm -rf /tmp/vmware-tools-distrib/
rm -f /tmp/linux.iso
| true
|
d9d84b580b058c9147ae62ecb6020037a41a7ad3
|
Shell
|
diffblue/cbmc
|
/.github/workflows/pull-request-check-clang-format.sh
|
UTF-8
| 1,339
| 4.03125
| 4
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-4-Clause"
] |
permissive
|
#!/bin/bash
# Stop on errors
set -e
# Log information about the run of this check.
echo "Pull request's base branch is: ${BASE_BRANCH}"
echo "Pull request's merge branch is: ${MERGE_BRANCH}"
echo "Pull request's source branch is: ${GITHUB_HEAD_REF}"
clang-format-11 --version
# The checkout action leaves us in detatched head state. The following line
# names the checked out commit, for simpler reference later.
git checkout -b ${MERGE_BRANCH}
# Build list of files to ignore
while read file ; do EXCLUDES+="':(top,exclude)$file' " ; done < .clang-format-ignore
# Make sure we can refer to ${BASE_BRANCH} by name
git checkout ${BASE_BRANCH}
git checkout ${MERGE_BRANCH}
# Find the commit on which the PR is based.
MERGE_BASE=$(git merge-base ${BASE_BRANCH} ${MERGE_BRANCH})
echo "Checking for formatting errors introduced since $MERGE_BASE"
# Do the checking. "eval" is used so that quotes (as inserted into $EXCLUDES
# above) are not interpreted as parts of file names.
eval git-clang-format-11 --binary clang-format-11 $MERGE_BASE -- $EXCLUDES
git diff > formatted.diff
if [[ -s formatted.diff ]] ; then
echo 'Formatting error! The following diff shows the required changes'
echo 'Use the raw log to get a version of the diff that preserves spacing'
cat formatted.diff
exit 1
fi
echo 'No formatting errors found'
exit 0
| true
|
98a3d5ed0b41dd1ad3bf5e9e919bf353f139f758
|
Shell
|
Bubujka/bu.bin
|
/bin/swagger-check
|
UTF-8
| 134
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
for file in $(find . -iname swagger.yaml); do
echo "Checking $file"
swagger-tools validate "$file"
echo
echo
done
| true
|
fbd40815df8d907149de69d486a8f22efa8bd683
|
Shell
|
estorsky/myconfig
|
/scripts/work/ma/cli_new_make.sh
|
UTF-8
| 338
| 2.75
| 3
|
[] |
no_license
|
#!/bin/sh
cd pp4x/apps
# make cli_new
# retVal=$?
# if [ $retVal -ne 0 ]; then
# echo "cli_new make faild"
# exit $retVal
# fi
cd ..
echo $PWD
make initrd
cd initrd
echo $PWD
./fastmkcontainer.sh plc
LASTCRF=$(ls -t | head -1)
echo $LASTCRF
echo $HOME
rm -f $HOME/shared/firmware.ma4k
cp $LASTCRF $HOME/shared/firmware.ma4k
| true
|
c7ff767822d75b5e568c12a85a2bdd1e9be665a3
|
Shell
|
minedec/vtools
|
/app/src/main/assets/kr-script/apps/douyin_ad_get.sh
|
UTF-8
| 748
| 3.390625
| 3
|
[] |
no_license
|
#!/system/bin/sh
if [[ ! -n "$SDCARD_PATH" ]]; then
if [[ -e /storage/emulated/0/Android ]]; then
SDCARD_PATH="/storage/emulated/0"
elif [[ -e /sdcard/Android ]]; then
SDCARD_PATH="/sdcard/"
elif [[ -e /data/media/0/Android ]]; then
SDCARD_PATH="/data/media/0"
fi
fi
path=$SDCARD_PATH/Android/data/com.ss.android.ugc.aweme
if [[ ! -e $path ]]; then
echo 1;
return
fi
if [[ -d $path ]]; then
echo 1;
return
fi
if [[ ! -n "$BUSYBOX" ]]; then
BUSYBOX=""
fi
if [[ -f $path/awemeSplashCache ]]; then
attr=`$BUSYBOX lsattr $path | $BUSYBOX cut -f1 -d " "`
attr=`echo $attr | grep "i"`
if [[ -n "$attr" ]]; then
echo 0
else
echo 1
fi
else
echo 1
fi
| true
|
2877963623cd26686989778de04c6b6e10d946f7
|
Shell
|
JWong42/Chef-Solo-Cookbooks
|
/deploy.sh
|
UTF-8
| 909
| 3.453125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Usage: ./deploy.sh [host] [identity_file]
# use the commandline argument 1 or if it's not there, use the ip specified here
host="${1:-root@96.126.110.107}"
identity_file="$2"
# removes old host from known_hosts in case host key changes when new VM instantiates
# parameter expansion removes anything in host variable before and including @
# any stderr is directed to /dev/null
ssh-keygen -R "${host#*@}" 2> /dev/null
# upload the public key to set up Public Key Authentication instead of PW Authentication
#ssh-copy-id "$host"
# creates an archive of the specified directory in bzip2 format
# pipe the output to tar in a string of command issued through ssh
# always confirm host key fingerprint during ssh
tar cj ./chef/* | ssh -o 'StrictHostKeyChecking no' "$host" -i "$identity_file" '
sudo rm -rf ~/chef &&
mkdir ~/chef &&
cd ~/chef &&
tar xj &&
sudo bash chef/install.sh'
| true
|
4bfcb799d5b1d829fe4c28a8b1ff3aaae93ed00d
|
Shell
|
pluginsGLPI/empty
|
/plugin.sh
|
UTF-8
| 2,487
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# -------------------------------------------------------------------------
# {NAME} plugin for GLPI
# Copyright (C) {YEAR} by the {NAME} Development Team.
# -------------------------------------------------------------------------
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# --------------------------------------------------------------------------
#
if [[ $# -ne 2 && $# -ne 3 ]]; then
echo $0: usage: plugin.sh name version [destination/path]
exit 1
fi
if [[ $# -eq 3 ]]; then
DIR=$3
if [ ! -d "$DIR" ]; then
echo "Destination directory $DIR does not exists!"
exit
fi
else
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/.."
fi
NAME=$(echo $1|tr -dc '[[:alpha:]]')
LNAME=${NAME,,}
UNAME=${NAME^^}
VERSION=$2
YEAR=$(date +%Y)
DEST=$DIR/$LNAME
echo "Creating new $NAME plugin..."
if [ -d "$DEST" ]; then
echo "A directory named $LNAME already exists!"
exit 1
fi
mkdir $DEST
rsync \
--exclude '.git' \
--exclude 'plugin.sh' \
--exclude 'dist' \
--exclude 'README.md' \
-a . $DEST
pushd $DEST > /dev/null
#rename .tpl...
for f in `ls *.tpl`
do
mv $f ${f%.*}
done
# move xml file
mv plugin.xml $LNAME.xml
#do replacements
sed \
-e "s/{NAME}/$NAME/" \
-e "s/{LNAME}/$LNAME/" \
-e "s/{UNAME}/$UNAME/" \
-e "s/{VERSION}/$VERSION/" \
-e "s/{YEAR}/$YEAR/" \
-i setup.php hook.php $LNAME.xml tools/HEADER README.md
popd > /dev/null
| true
|
e8692feabe061b0694fd28727509d618e318e252
|
Shell
|
wallstreetcn/dashboard
|
/aio/scripts/lint-backend.sh
|
UTF-8
| 1,104
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Exit on error.
set -e
# Import config.
ROOT_DIR="$(cd $(dirname "${BASH_SOURCE}")/../.. && pwd -P)"
. "${ROOT_DIR}/aio/scripts/conf.sh"
# Make sure that all required tools are available.
if [ ! -f ${GOLINT_BIN} ]; then
curl -sfL ${GOLINT_URL} | sh -s -- -b ${CACHE_DIR} v1.15.0
fi
# Need to check source files under GOPATH
if [ ${TRAVIS} ]; then
cd ${GOPATH}/src/github.com/kubernetes/dashboard/src/app/backend/
fi
# Run checks.
${GOLINT_BIN} run -c ${ROOT_DIR}/.golangci.yml ./...
| true
|
9415c81001ca165710d3b80b63b7018d2f17bd66
|
Shell
|
scoreboard-tcc/infraestrutura
|
/setup.sh
|
UTF-8
| 1,451
| 3.078125
| 3
|
[] |
no_license
|
read -p "Digite o domínio (ex: meudominio.com): " domain
read -p "Digite um e-mail (necessário para gerar os certificados): " email
read -p "Digite o nome de usuário do Github: " gh_username
read -p "Digite o token do Github: " gh_token
sed -i "s|scoreboardapp.tech|$domain|g" conf/reverse-proxy.conf
sed -i "s|scoreboardapp.tech|$domain|g" .env
echo "Instalando o Docker..."
curl -fsSL https://get.docker.com -o get-docker.sh
sudo sh get-docker.sh
rm get-docker.sh
sudo apt install docker-compose -y
newgrp docker <<EONG
docker login docker.pkg.github.com -u $gh_username -p $gh_token
docker-compose up -d
echo "Instalando e configurando o Nginx..."
sudo apt install nginx certbot python3-certbot-nginx -y
sudo cp conf/reverse-proxy.conf /etc/nginx/sites-available
sudo ln -s /etc/nginx/sites-available/reverse-proxy.conf /etc/nginx/sites-enabled/reverse-proxy.conf
sudo nginx -t
sudo systemctl reload nginx
sudo certbot --nginx -d server.$domain -d storage.$domain -d mqtt.$domain --email $email --non-interactive --agree-tos
echo "Aguardando os containers..."
until [ "`docker inspect -f {{.State.Running}} backend`"=="true" ]; do
sleep 0.1;
done;
echo "Populando dados iniciais no banco..."
docker exec -t backend npx knex seed:run --env development
echo "PATH=$PATH" > /etc/cron.d/certbot-renew
echo "@monthly certbot renew --nginx >> /var/log/cron.log 2>&1" >>/etc/cron.d/certbot-renew
crontab /etc/cron.d/certbot-renew
EONG
| true
|
97c30d6c7b1facac2f04e6e145419418092c88bd
|
Shell
|
aga-git/webc
|
/var/lib/dpkg/info/glx-alternative-mesa.postinst
|
UTF-8
| 3,099
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
TRIPLETS="/ /i386-linux-gnu/ /x86_64-linux-gnu/"
add_slave()
{
local target_link name source_path prefix
target_link="$1"
name="$2"
source_path="$3"
prefix="glx--"
if [ -f "${source_path}" ] && [ -d "$(dirname "${target_link}")" ]; then
echo --slave "${target_link}" "${prefix}${name}" "${source_path}"
fi
}
add_multiarch_slave()
{
local target_dir target_sub_dir file source_dir source_sub_dir prefix suffix triplet
target_dir="$1"
target_sub_dir="$2"
file="$3"
source_dir="$4"
source_sub_dir="$5"
prefix="$6"
for triplet in $TRIPLETS ; do
# s|/$||; s|^/|-|;
suffix="${triplet%/}"
suffix="${suffix:+-${suffix#/}}"
add_slave \
"${target_dir}${triplet}${target_sub_dir}${file}" \
"${prefix}${file}${suffix}" \
"${source_dir}${triplet}${source_sub_dir}${file}"
done
}
# Handles the alternatives for
# * /usr/lib[/<triplet>]/libGL.so
# * /usr/lib[/<triplet>]/libGL.so.1
setup_alternatives()
{
# libGL.so.1 (from /usr/lib[/triplet]/mesa/)
slaves="$(add_multiarch_slave /usr/lib "" libGL.so.1 /usr/lib mesa/)"
if echo "$slaves" | grep -q "slave" ; then
update-alternatives --install /usr/lib/glx glx /usr/lib/mesa 6 $slaves
else
update-alternatives --remove glx /usr/lib/mesa
fi
# libGL.so.1 (from /usr/lib/mesa-diverted/)
slaves="$(add_multiarch_slave /usr/lib "" libGL.so.1 /usr/lib/mesa-diverted)"
if echo "$slaves" | grep -q "slave" ; then
update-alternatives --install /usr/lib/glx glx /usr/lib/mesa-diverted 5 $slaves
else
update-alternatives --remove glx /usr/lib/mesa-diverted
fi
# libGL.so (from /usr/lib[/triplet]/mesa/)
slaves="$(add_multiarch_slave /usr/lib "" libGL.so /usr/lib mesa/)"
if echo "$slaves" | grep -q "slave" ; then
update-alternatives --install /usr/lib/mesa-diverted/libGL.so-master libGL.so-master /usr/lib/mesa 9996 $slaves
else
update-alternatives --remove libGL.so-master /usr/lib/mesa
fi
# libGL.so (from /usr/lib/mesa-diverted/)
slaves="$(add_multiarch_slave /usr/lib "" libGL.so /usr/lib/mesa-diverted)"
if echo "$slaves" | grep -q "slave" ; then
update-alternatives --install /usr/lib/mesa-diverted/libGL.so-master libGL.so-master /usr/lib/mesa-diverted 9995 $slaves
else
update-alternatives --remove libGL.so-master /usr/lib/mesa-diverted
fi
ldconfig
}
if [ "$1" = "triggered" ]; then
setup_alternatives
fi
# <target: wheezy>
if [ "$1" = "configure" ]; then
# remove alternatives set up by libgl{1,x}-nvidia-alternatives,
# libgl1-nvidia*-glx, nvidia-glx*
if update-alternatives --list libGL.so >/dev/null 2>&1 ; then
update-alternatives --remove-all libGL.so
fi
if update-alternatives --list libGL.so.1 >/dev/null 2>&1 ; then
update-alternatives --remove-all libGL.so.1
fi
if update-alternatives --list libglx.so >/dev/null 2>&1 ; then
update-alternatives --remove-all libglx.so
fi
# set up by older versions of glx-alternative-mesa
if update-alternatives --list libGLso >/dev/null 2>&1 ; then
update-alternatives --remove-all libGLso
fi
fi
# </target: wheezy>
if [ "$1" = "configure" ]; then
setup_alternatives
fi
exit 0
| true
|
3e8ff6b2dc589d90e850e41edd3df3482ebca57b
|
Shell
|
withinboredom/dotfiles
|
/bin/docker-compose
|
UTF-8
| 398
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
convertC() {
echo ${1#"/mnt"}
}
convertWin() {
declare start=${1#"/"}
start=${start:0:1}:${start:1}
echo $start | sed 's#/#\\#g'
}
target=$(convertC $(pwd -P))
source=$(convertWin $(convertC $(pwd -P)))
docker run \
-it \
--rm \
-v "$source:$target" \
-w $target \
-v /var/run/docker.sock:/var/run/docker.sock \
docker/compose:1.8.0 \
$@
| true
|
733a2d0320435d4abf841f5fd1d82bb0a6f07900
|
Shell
|
tnakaicode/jburkardt
|
/tetrahedron_integrals/tetrahedron_integrals.sh
|
UTF-8
| 332
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
#
cp tetrahedron_integrals.hpp /$HOME/include
#
g++ -c -I /$HOME/include tetrahedron_integrals.cpp
if [ $? -ne 0 ]; then
echo "Errors compiling tetrahedron_integrals.cpp"
exit
fi
#
mv tetrahedron_integrals.o ~/libcpp/$ARCH/tetrahedron_integrals.o
#
echo "Library installed as ~/libcpp/$ARCH/tetrahedron_integrals.o"
| true
|
441449ee666af1e284af888acaaa9c50df769dd6
|
Shell
|
amirakha1/vagrant-consul-dev
|
/scripts/provision.sh
|
UTF-8
| 687
| 3.546875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Install dependencies if needed
which unzip curl jq 2>&1 || {
echo "Installing dependencies... "
apt-get update
apt-get install -y unzip curl jq
}
# Download consul if needed
which consul 2>&1 || {
pushd /usr/local/bin
echo "Fetching Consul version 1.4.3"
curl -sL -o /var/tmp/consul.zip https://releases.hashicorp.com/consul/1.4.3/consul_1.4.3_linux_amd64.zip
unzip /var/tmp/consul.zip
rm /var/tmp/consul.zip
# Set permission
chmod +x consul
popd
}
# create /etc/consul.d if needed
[ -d /etc/consul.d ] || {
# Create consul config directory and set permission
mkdir /etc/consul.d
chmod a+w /etc/consul.d
}
echo "provisioning done"
| true
|
c8210f4d704ba44f6dbc7b484fe04652dbc7507d
|
Shell
|
derphilipp/ums
|
/plugins.d/fzf.sh
|
UTF-8
| 141
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -Eeuo pipefail
if [ -e "$HOME/.fzf/install" ]; then
echo "⌨️ Update fzf"
"$HOME/.fzf/install" --bin
fi
| true
|
72e748d39422d061def1d9d2ad9ffaf36548d2de
|
Shell
|
rishubjain/biowulf2_scripts
|
/sbatch_new_lscratch_rel14.sh
|
UTF-8
| 23,731
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Script by Rishub Jain, adapted from Mario's biowulf1 script
#
#
# READ THIS BEFORE USING THIS SCRIPT:
#
# This script is designed to work with relion 1.3 and 1.4 on biowulf2. It is meant to run as if you have extracted particles using RELION, but if you didn't you can change things to make it still work.
# The point of this script is to copy the particle stacks onto the SSD of each node so that reading the particle stacks will be quicker.
# It only works for the classification and refinement steps that use the particle stacks.
#
# GUI Information:
# Number of MPI procs: This is the number of different processes (tasks) you want to run
# Number of threads: This is the number of CPUs allocated to each task. Change this from the default of 1 if you need more memory per task.
# Available RAM (in Gb) per thread: This is the amount of memory allocated per CPU. You can find this out by dividing the memory per node by the number of CPUs available
#
# Example sbatch submit command using the largemem partition and 32G memory per task, with 32 tasks:
# Number of MPI procs: 32
# Number of threads: 2
# Available RAM (in Gb) per thread: 15.9
# Submit to queue?: Yes
# Queue name: <LEAVE BLANK>
# Queue submit command: sbatch --mem=1009g --partition=largemem
# Standard submission script: sbatch_new_lscratch_rel14.sh (or wherever it is located)
#
#
# Things to note before running this script:
# - You should have a good idea about how clusters, specifically biowulf2, work when using this script (please read the biowulf2 user guide). You must run this script using entirely free nodes.
# - This script will work for RELION 1.3 and 1.4 because the actual naming conventions do not change between these versions. It probably also works with RELION 1.2, but I am not sure.
# - The script assumes that the particle stack(s) are in the Particles/Micrographs/ directory, which should be in the home directory (the directory the GUI was run from). If this is not the case, an easy fix is to create a Particles/Micrographs/ directory in the home directory, and link the Particle stack from wherever it is to the Particles/Micrographs/ directory. You will also have to change this in the input star file (or in the data.star if you are continuing a run) to also include this. The particles in the star file must be in the format of Particles/<folder name>/<particle>.mrcs OR /lscratch/<JOB ID>/<particle>.mrcs.
# - To link files, type the following into the command line:
# ln -s /complete/path/to/file.mrcs /path/to/home/directory/
# - The script automatically assumes you are using a node with an 800GiB SSD. If you are using something different, specify the lscratch memory in the RELION GUI using --gres=lscratch:800.
# - Only use SSD nodes if possible, not SATA. SATA will be slower, but your total time might still increase by using this script on SATA.
# - m is the name of the Micrographs folder. This is by default "Micrographs", so if your Micrographs folder is called something else, you have to specify it by putting --export=m=foldername in the sbatch submit command in the RELION GUI
# - maxs is the maximum number of KiB that you want to use on the node. The default is 838000000 (~799 GiB out of the 800 GiB allocated). If you are using less than this the script should stop when the disk has reached its maximum (but you should still change this variable. 418400000 Kib = ~399 Gib). However if you are using more than 800GiB, you must change this variable.
# - It seems like leaving extra space on the disk is only useful if you are constantly writing to the disk. Since this copying process is a one time thing, we want to fill up the SSD as much as we can. The biowulf admins said they do not think filling up the entire disk will decrease reading performance. Also, it seems like filling up the disk completely does not significantly slow down the copying process at the end.
# - If the particle stack exceeds the maximum memory specified, the script will try to fit in as many of the .mrcs files as it can onto the SSD, and the rest of the particles it will use from your Particles/Micrographs/ folder. Since it tries to put as many particles as it can onto the /lscratch/ directory, and because the SPY pipeline often outputs one big particle stack, it is a time consuming process to create new particle stacks to be copied. Though this is done automatically if there isn't enough memory on /lscratch/, if you want to only copy the particles that are used, you can specify the "separate" variable to be 1 by doing --export=ALL,separate=1. Since this takes around an hour for large data, this will (most probably) not speed up the copying process. However, if your particle stacks are bigger that what you can fit on the /lscratch/ space, doing this could greatly speed up your runs.
# - If you set the "separate" variable to -1, it will not split the stack even if it is bigger than the /lscratch/ directory. This may be better at times if your particle stack files are very small, and when creating two new stacks would not help performance much.
# - To export multiple variables, use --export=ALL,m=Micrographs,maxs=830000000 in the submit command in the RELION GUI. You must have the =ALL at the beginning.
# - If you do not need 10 days to run, you should specify a more reasonable time using --time=4-00:00:00.
# - The star files are changed to have /lscratch/JOBID/ instead of Particles/Micrographs/ as the folder that holds the .mrcs files. The original star file is copied to *.star.save.
# - The *_data.star files with the particle information, and any other particle star files that were created, will have the particles listed as being in the lscratch directory, so when viewing the particles you will have to change this. You do not need to change this if you are just going from step to step. This script still works if the star files have /lscratch/JOBID/particle.mrcs instead of Particle/Micrographs/particle.mrcs
# - If you want to view the particles manually, you can type this into the command line:
# cat particles.star | perl -pe "s|/lscratch/.*?/|Particles/Micrographs/|" > particles.star.mod
# mv particles.star.mod particles.star
# - This script is specific to how biowulf2 functions as of August 2015 (how it specifies the list of nodes, etc.). As biowulf2 changes this script should be changed.
# - By default biowulf2 uses hyperthreading, and it seems like if you specify --ntasks-per-core=2, things get messed up (as of July 30 2015). Though I haven't tried this, specifying this option may solve wierd problems you might encounter.
#
#
# Things to note if you are changing this script, or if you are having issues:
# - The main input particles star file should be in the home directory. Otherwise you might encounter some problems (I haven't tested this yet, but it should work either way now). You can also link this starfile.
# - You may have problems if there are dashes (or any other special characters) in the names of the Particle stack .mrcs files or in the main particle star file. I think I fixed this but have not tested it thoroughly.
# - If you want this script to be run straight after going through the SPY Pipeline, you will have to edit this script. You can still run this script if you do the modifications I said above, which doesn't take long. I did this because it made things easier, and the majority of what I did was working with Particles generated through RELION, which creates this directory.
# - If you specify the "separate" variable, or if there isn't enough space on /lscratch/, it combines all of the particle stacks into two stacks: One which will fit almost exactly onto the /lscratch/ space, and one which will be stored in your Particles/Micrographs/ directory. Both of these combined will have only the particles from the input star file. If you specified the "separate" variable to be 1, and the particle stack will fit into the /lscratch/ directory, it will just create one particle stack of only the particles being used.
# - newstack is not the most efficient method, so if someone can make a method that does newstack in parallel, or at least more efficiently, it would greatly speed up the process of creating the new stacks, and the copying process would also probably be sped up because less particles are being copied.
# - Copying speed is around 14.5GB/min using rsh $node cp .... However, using rcp or rsync instead might slightly be faster. I have tested this a little and it doesn't seem to be much of a difference, but you could save time here if done properly.
# - After extensive testing, it seems like compressing the file before transfering does not reduce time, using a variety of different compressions and compression speeds (even compressing in parallel), but instead greatly increases the total time while only slightly decreasing the copying time. Combining the files without compression before copying over also does not seem to save time. Copying to each node's disk is just too fast, and any attempt to decrease the amount of data sent will only increase the total time.
# - It also seems like copying many files onto each node at the same time increases the speed by about 15%, which would save about 10 minutes when copying 800G. It may be worth testing.
# - When running Movie processing, I don't think it uses the particle stack from the averaged micrographs. However, if it actually does, the script could be changed to also modify the data.star files of those particle stacks, and copy those stacks to /lscratch. However, the Movie particles alone are often larger than 800GiB, so it probably wouldn't make a difference.
# - At some steps, particularly Movie processing, the script might use files that still have /lscratch/ in the star file, and it is unable to change them (because though the classification and refinement steps only use the .star files given, this may not be the case with the Movie processing steps). You will have to manually change them by doing the steps listed above. To see what files have /lscratch/ in them, you can run the following from your home directory:
# - grep -r lscratch *.star | awk '{print $1}' | perl -pe "s|star.*?mrcs|star|" | uniq
# - I initially stored this /lscratch/JOBID/ information in a new star file and edited the command to use that star file, but to do this sucessfully you would have to change the actual rootname of the files (like run1, autopicking, etc.). This can be conflicting in many places, since relion assumes some filesnames to be a certain way. To avoid this hedache, I am just editing over the same star file.
# - Allocate the memory by node (using --mem-125g) instead of by cpu (--mem-per-cpu=4g).
# - You can check the progress of the copying process on each node by looking at the .out files, or ussing ssh to access the lscratch directory of each node. If you see that files are no longer copying to the disk, wait a few minutes, and the script should take care of it. If this is still the case, something may have went wrong.
# - Copying the particles to /lscratch/ significantly saves time in every step of the process before movie processing. Though running movie processing with this script might actually take more time because it would take so long to copy the files, I do not think this is the case.
# - I ran out of time to do intense testing on real data, but everything should work. There may be small bugs.
# - It may be worth experimenting with making this not limited to exclusive nodes. I think you will not loose time if you do this, and you will have access to more CPUs, so it is worth a try.
#
# Setting up things
#SBATCH --job-name="rlnXXXnameXXX"
#SBATCH --ntasks=XXXmpinodesXXX
#SBATCH --exclusive
#SBATCH --error=XXXerrfileXXX
#SBATCH --output=XXXoutfileXXX
#SBATCH --time=10-00:00:00
#SBATCH --cpus-per-task=XXXthreadsXXX
#SBATCH --gres=lscratch:800
#SBATCH --mem=125g
module load RELION/1.4-beta-2
echo This is sbatch_new_lscratch_rel14.sh, last modified Aug 21 2015
echo run started on: $(date)
echo running on $SLURM_SUBMIT_DIR
echo with job id $SLURM_JOBID
echo with nodes: $SLURM_JOB_NODELIST
command=$(echo XXXcommandXXX)
echo with command:
echo $command
cd $SLURM_SUBMIT_DIR
# Set the Micrographs directory
if [ -z "$m" ]; then
m=Micrographs
fi
if [ -z "$maxs" ]; then
maxs=838000000
fi
if [ -z "$separate" ]; then
separate=0
fi
# Check if you are continuing a run or running movie processing
c=`echo $command|grep -o "\-\-continue.*star"`
mov=`echo $command|grep -o "realign_movie_frames"`
# get the name of the starfile from the command
if [ -z "$c" ]; then
if [ -z "$mov" ]; then
# Normal run
starfile=`echo $command|grep -oP "\-\-i.*star"|sed "s|--i ||"`
else
echo "ERROR: RUNNING MOVIE PROCESSING WITHOUT CONTINUING RUN!"
fi
else
if [ -z "$mov" ]; then
# Continuing previous run, not movie processing
opt=`echo $command|grep -o "\-\-continue.*optimiser.star"|sed "s|--continue ||"`
starfile=${opt/"optimiser"/"data"}
else
# Continuing previous run with movie processing
starfile=`echo $command|grep -oP "\-\-realign_movie_frames.*star"|sed "s|--realign_movie_frames ||"`
fi
fi
echo "star file is: $starfile"
# Get the column that corresponds to the image name
IFS=' # ' read -a col <<< $(grep _rlnImageName ${starfile})
echo "value of variable ${col[0]} is ${col[1]}"
# Get the list of nodes in use
if [ ${#SLURM_JOB_NODELIST} -eq 6 ]; then nlist=$SLURM_JOB_NODELIST; else
IFS=' [ ' read -a nodelist <<< $SLURM_JOB_NODELIST
IFS=' ] ' read -a nodelist <<< ${nodelist[1]}
IFS=' , ' read -a nodes <<< $nodelist
unset nlist
for x in "${nodes[@]}"
do
IFS=' - ' read -a y <<< $x
if [ ${#y[*]} -eq 2 ]; then
for i in $(seq -w ${y[0]} ${y[1]})
do
if [ ${#i} -eq 4 ]; then
temp=$i; else
temp=$(printf "%04d" $i)
fi
nlist=("${nlist[@]}" cn$temp)
done
else
nlist=("${nlist[@]}" cn$y)
fi
done
fi
echo nodes: ${nlist[@]}
unset ustacks
unset ustacks2
unset stacknums
declare -A stacknums
scount=0
# Slightly slower than:
# for dstack in `awk -v c=${col[1]} '{if (NF<= 2) {} else {print $c}}' < ${starfile}| grep -oP "\/\w*\.mrcs" | sed "s|/||" | sort | uniq`
# but works with dashes, and is faster than using basename
while read dstack
do
scount=$(( scount + 1 ))
stacknums[$dstack]=$scount
ustacks=("${ustacks[@]}" $dstack)
ustacks2=("${ustacks2[@]}" Particles/$m/$dstack)
done < <(for p in `awk -v c=${col[1]} '{if (NF<= 2) {} else {print $c}}' ${starfile}`; do echo ${p##*/}; done | uniq)
stacklen=$(du -cs ${ustacks2[*]} | tail -n 1 | awk '{print $1}')
if ( [ $stacklen -gt $maxs ] || [ $separate -gt 0 ] ) && [ $separate -ne -1 ]; then
module load IMOD
numparts=0
for x in ${ustacks2[*]}
do
ts=$( header $x -s | awk '{print $3}' )
numparts=$(( numparts + ts ))
done
maxp=$(( (maxs - 10000) / (stacklen/numparts) ))
unset secslist
unset restplist
unset secs
declare -A secs
unset restp
declare -A restp
unset secssize
declare -A secssize
unset restpsize
declare -A restpsize
unset pvals
declare -A pvals
unset wstack
declare -A wstack
unset addfsecs
declare -A addfsecs
unset addfrestp
declare -A addfrestp
overf=0
secscount=0
restpcount=0
pwrit=0
# one file, reduces some overhead
for parts in `awk -v c=${col[1]} '{if (NF<= 2) {} else {print $c}}' < ${starfile}`
do
IFS=' @ ' read -a pinfo <<< $parts
pnum=${pinfo[0]}
pstack=${pinfo[1]}
if [ $pwrit -gt $maxp ]; then
overf=1
if [ -z "${restp[$pstack]}" ]; then
restp[$pstack]="$pnum"
restplist=("${restplist[@]}" $pstack)
restpsize[$pstack]=1
restpcount=$(( restpcount + 1 ))
else
restpsize[$pstack]=$(( ${restpsize[$pstack]} + 1 ))
restp[$pstack]="${restp[$pstack]},${pnum}"
fi
pvals[${stacknums[$pstack]},$pnum]=${restpsize[$pstack]}
wstack[${stacknums[$pstack]},$pnum]=Particles/$m/not_copied_stack_${SLURM_JOBID}.mrcs
else
if [ -z "${secs[$pstack]}" ]; then
secs[$pstack]="$pnum"
secslist=("${secslist[@]}" $pstack)
secssize[$pstack]=1
secscount=$(( secscount + 1 ))
else
secssize[$pstack]=$(( ${secssize[$pstack]} + 1 ))
secs[$pstack]="${secs[$pstack]},${pnum}"
fi
pvals[${stacknums[$pstack]},$pnum]=${secssize[$pstack]}
wstack[${stacknums[$pstack]},$pnum]=/lscratch/${SLURM_JOBID}/copied_stack_${SLURM_JOBID}.mrcs
fi
pwrit=$(( pwrit + 1 ))
done
echo "There are $pwrit particles"
caddf=0
echo $secscount > copied_particles_${SLURM_JOBID}.in
for x in ${secslist[*]}
do
echo $x >> copied_particles_${SLURM_JOBID}.in
echo ${secs[$x]} >> copied_particles_${SLURM_JOBID}.in
addfsecs[$x]=$caddf
caddf=$(( caddf + ${secssize[$x]} ))
done
# There is not a native way to write to all /lscratch/ directories at once, but I could modify the newstack command to do so. This would save time.
newstack -fr -filei copied_particles_${SLURM_JOBID}.in -ou Particles/$m/copied_stack_${SLURM_JOBID}.mrcs &
PIDCOP=$!
if [ $overf -eq 1 ]; then
caddf=0
echo $restpcount > not_copied_particles_${SLURM_JOBID}.in
for x in ${restplist[*]}
do
echo $x >> not_copied_particles_${SLURM_JOBID}.in
echo ${restp[$x]} >> not_copied_particles_${SLURM_JOBID}.in
addfrestp[$x]=$caddf
caddf=$(( caddf + ${restpsize[$x]} ))
done
newstack -fr -filei not_copied_particles_${SLURM_JOBID}.in -ou Particles/$m/not_copied_stack_${SLURM_JOBID}.mrcs &
PIDNCOP=$!
fi
rm ${starfile}.mod
while read line
do
if [ `echo $line | awk '{print NF}'` -le 2 ]; then
echo $line >> ${starfile}.mod
else
parts=$(echo $line | awk -v c=${col[1]} '{print $c}')
IFS=' @ ' read -a pinfo <<< $parts
pnum=${pinfo[0]}
pstack=${pinfo[1]}
if [ "${wstack[${stacknums[$pstack]},$pnum]}" = "/lscratch/${SLURM_JOBID}/copied_stack_${SLURM_JOBID}.mrcs" ]; then
adv=${addfsecs[$pstack]}
else
adv=${addfrestp[$pstack]}
fi
echo $line | awk -v pn=$( printf "%07d%s" $(( ${pvals[${stacknums[$pstack]},$pnum]} + adv )) ) -v pname="${wstack[${stacknums[$pstack]},$pnum]}" -v c=${col[1]} '{$c=pn"@"pname}1' >> ${starfile}.mod
fi
done < ${starfile}
wait $PIDCOP
if [ $overf -eq 1 ]; then
wait $PIDNCOP
fi
echo started copying files at $(date)
dstack="copied_stack_${SLURM_JOBID}.mrcs"
orisize=$(stat -Lc '%s' "${SLURM_SUBMIT_DIR}/Particles/${m}/${dstack}")
echo "transferring file ${dstack} of original size ${orisize}"
# start the copy the stack to local /lscratch on each node
for i in "${nlist[@]}";
do
if [ ! -f ${SLURM_SUBMIT_DIR}/Particles/${m}/${dstack} ]; then
echo "File ${SLURM_SUBMIT_DIR}/Particles/${m}/${dstack} not found ... aborting"
exit
fi
rsh $i cp -L ${SLURM_SUBMIT_DIR}/Particles/${m}/${dstack} /lscratch/${SLURM_JOBID} &
done
# verify that copy is finished on each node
for i in "${nlist[@]}"; do
count=0
currsize=$(rsh $i if [ -f /lscratch/${SLURM_JOBID}/${dstack} ]";" then stat -c '%s' "/lscratch/${SLURM_JOBID}/${dstack}" ";" else echo 0 ";" fi)
temps=$currsize
echo Size of file /lscratch/$SLURM_JOBID/$dstack in node $i is $currsize
while [ $currsize -lt $orisize ]; do
sleep 5
currsize=$(rsh $i if [ -f /lscratch/${SLURM_JOBID}/${dstack} ]";" then stat -c '%s' "/lscratch/${SLURM_JOBID}/${dstack}" ";" else echo 0 ";" fi)
echo node ${i}: copied ${currsize} of ${orisize}
count=$(( count+1 ))
# If it is not copying anything for ~100 secconds, use the rest of the files from the Particles/Micrographs directory
if [ "$count" -gt 20 ]; then
ds=$(rsh $i du -s /lscratch/${SLURM_JOBID}/ | awk '{print $1}')
if [ "$ds" -gt "$maxs" ]; then
echo -e "\nParticle stack is too large for the lscratch space (${maxs} KiB). Changing star file to use the rest of the particles from the Particles/Micrographs directory.\n"
filled=1
cat ${starfile} | perl -pe "s|/lscratch/${SLURM_JOBID}/${dstack}|Particles/${m}/${dstack}|" > ${starfile}.mod
mv ${starfile}.mod ${starfile}
break 2
fi
if [ "$temps" -eq "$currsize" ]; then
echo -e "\nFILES NOT COPYING FOR SOME UNKNOWN ERROR! Using the rest of the files from Particles/Micrographs directory.\n"
filled=1
cat ${starfile} | perl -pe "s|/lscratch/${SLURM_JOBID}/${dstack}|Particles/${m}/${dstack}|" > ${starfile}.mod
mv ${starfile}.mod ${starfile}
break 2
fi
temps=$currsize
count=0
fi
done
done
if [ "$filled" -eq 1 ]; then
for i in "${nlist[@]}"; do
rsh $i rm /lscratch/${SLURM_JOBID}/${dstack}
done
fi
else
# Modify the star file to indicate that the stack should be read from local /lscratch
cp ${starfile} ${starfile}.save
cat ${starfile} | perl -pe "s|/lscratch/.*?/|/lscratch/${SLURM_JOBID}/|g" > ${starfile}.mod
cat ${starfile}.mod | sed "s|Particles/${m}/|/lscratch/${SLURM_JOBID}/|" > ${starfile}
rm ${starfile}.mod
echo finished modifying star file to use /lscratch/$SLURM_JOBID/
echo started copying files at $(date)
filled=0
while read dstack
do
if [ "$filled" -eq 0 ]; then
ds=$(rsh $i du -s /lscratch/${SLURM_JOBID}/ | awk '{print $1}')
if [ "$ds" -gt "$maxs" ]; then
filled=1
fi
fi
if [ "$filled" -eq 1 ]; then
cat ${starfile} | perl -pe "s|/lscratch/${SLURM_JOBID}/${dstack}|Particles/${m}/${dstack}|" > ${starfile}.mod
mv ${starfile}.mod ${starfile}
else
orisize=$(stat -Lc '%s' "${SLURM_SUBMIT_DIR}/Particles/${m}/${dstack}")
echo "transferring file ${dstack} of original size ${orisize}"
# start the copy the stack to local /lscratch on each node
for i in "${nlist[@]}";
do
if [ ! -f ${SLURM_SUBMIT_DIR}/Particles/${m}/${dstack} ]; then
echo "File ${SLURM_SUBMIT_DIR}/Particles/${m}/${dstack} not found ... aborting"
exit
fi
rsh $i cp -L ${SLURM_SUBMIT_DIR}/Particles/${m}/${dstack} /lscratch/${SLURM_JOBID} &
done
# verify that copy is finished on each node
for i in "${nlist[@]}"; do
count=0
currsize=$(rsh $i if [ -f /lscratch/${SLURM_JOBID}/${dstack} ]";" then stat -c '%s' "/lscratch/${SLURM_JOBID}/${dstack}" ";" else echo 0 ";" fi)
temps=$currsize
echo Size of file /lscratch/$SLURM_JOBID/$dstack in node $i is $currsize
while [ $currsize -lt $orisize ]; do
# This wait value should be set to something bigger if each file is larger than 5 GB, as checking file sizes very often could slow the copying process.
sleep .5
currsize=$(rsh $i if [ -f /lscratch/${SLURM_JOBID}/${dstack} ]";" then stat -c '%s' "/lscratch/${SLURM_JOBID}/${dstack}" ";" else echo 0 ";" fi)
echo node ${i}: copied ${currsize} of ${orisize}
count=$(( count+1 ))
# If it is not copying anything for ~15 secconds, use the rest of the files from the Particles/Micrographs directory
if [ "$count" -gt 30 ]; then
ds=$(rsh $i du -s /lscratch/${SLURM_JOBID}/ | awk '{print $1}')
if [ "$ds" -gt "$maxs" ]; then
echo -e "\nParticle stack is too large for the lscratch space (${maxs} KiB). Changing star file to use the rest of the particles from the Particles/Micrographs directory.\n"
filled=1
cat ${starfile} | perl -pe "s|/lscratch/${SLURM_JOBID}/${dstack}|Particles/${m}/${dstack}|" > ${starfile}.mod
mv ${starfile}.mod ${starfile}
break 2
fi
if [ "$temps" -eq "$currsize" ]; then
echo -e "\nFILES NOT COPYING FOR SOME UNKNOWN ERROR! Using the rest of the files from Particles/Micrographs directory.\n"
filled=1
cat ${starfile} | perl -pe "s|/lscratch/${SLURM_JOBID}/${dstack}|Particles/${m}/${dstack}|" > ${starfile}.mod
mv ${starfile}.mod ${starfile}
break 2
fi
temps=$currsize
count=0
fi
done
done
if [ "$filled" -eq 1 ]; then
for i in "${nlist[@]}"; do
rsh $i rm /lscratch/${SLURM_JOBID}/${dstack}
done
fi
fi
done < <(for p in `awk -v c=${col[1]} '{if (NF<= 2) {} else {print $c}}' ${starfile}`; do echo ${p##*/}; done | uniq)
fi
echo finsihed copying files at $(date)
echo "All stacks transferred to /lscratch/$SLURM_JOBID"
# run relion
srun --mpi=pmi2 $command
| true
|
b19cadafda2f050f2b83067f5946cc8d0ac42dd5
|
Shell
|
frdmn/LiveConfig-scripts
|
/confixx_web_and_mail_data_migrate/migrate-web.sh
|
UTF-8
| 3,303
| 3.78125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# LiveConfig Webinhalt Migration
# (c) 2014 by Jonas Friedmann (iWelt AG)
##################################
# Konfiguration - ggf. anpassen
##################################
###
# Variablen
###
# Load settings file
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
. ${DIR}/settings.conf
##################################
# Logik - ab hier nichts verändern
##################################
###
# Funktionen
###
function remoteSudo {
ssh -n root@${LIVECONFIGHOST} -C $1
}
###
# Checks
###
## Prüfen ob log folder existiert
if [ ! -d "${WEB_LOGFOLDER}" ]; then
mkdir -p ${WEB_LOGFOLDER}
fi
# Aufnehmen der alten und neuen Web nummer
read -p "Bitte gib die _ALTE_ Web-Nummer von 'Confixx' an (webXXX)? " ANTWORT1
read -p "Bitte gib die _NEU_ Web-Nummer von 'LiveConfig' an (webXXX)? " ANTWORT2
if [[ -z "${ANTWORT1}" || -z "${ANTWORT2}" ]]; then
echo "${ERRORPREFIX} Webnummern nicht vollständig. Bitte Eingabe prüfen!"
exit 1
fi
# Dump erzeugen?
read -p "Soll anschließend ein MySQL Dump von der Confixx Datenbank des Web Benutzers erzeugt und übertragen werden? (y/n)? " ANTWORT3
if [[ "${ANTWORT3}" =~ ^[Yy]$ ]]; then
read -p "Bitte gib den/die genauen Datenbanknamen des Web Benutzers an (\"usr_webXXX_1\" oder \"usr_webXXX_1 usr_webXXX_2\")? " ANTWORT4
if [[ -z "${ANTWORT4}" ]]; then
echo "${ERRORPREFIX} Datenbankname nicht gesetzt. Bitte Eingabe prüfen!"
exit 1
fi
fi
echo ${SPACER}
###
# Sicherheitscheck
###
echo "Bitte überprüfe folgende Angaben:"
echo "Web-Nummer auf Confixx System: ${ANTWORT1}"
echo "Web-Nummer auf LiveConfig System: ${ANTWORT2}"
if [[ ! -z "${ANTWORT4}" ]]; then
echo "Datenbanken dumpen und übertragen: ja"
echo "Ausgewählte Datenbank(en): ${ANTWORT4}"
else
echo "Datenbanken dumpen und übertragen: nein"
fi
read -p "Sind die Angaben so korrekt? (y/n)? " ANTWORT5
if [[ ! "${ANTWORT5}" =~ ^[Yy]$ ]]; then
echo "${ERRORPREFIX} Überprüfung fehlgeschlagen! Migration abgebrochen... "
exit 1
fi
echo ${SPACER}
###
# Migration
###
shopt -s dotglob
rsync /var/www/${ANTWORT1}/log/* -ave ssh root@${LIVECONFIGHOST}:/var/www/${ANTWORT2}/logs | tee -a ${WEB_LOGFOLDER}/${ANTWORT1}.log
rsync /var/www/${ANTWORT1}/html/* -ave ssh root@${LIVECONFIGHOST}:/var/www/${ANTWORT2}/htdocs | tee -a ${WEB_LOGFOLDER}/${ANTWORT1}.log
###
# Dump erstellen und übertragen
###
if [[ ! -z "${ANTWORT4}" ]]; then
echo ${SPACER}
echo "MySQL Dump und übertragung gewünscht..."
## Prüfen ob dump folder existiert
if [ ! -d "${WEB_SQL_DUMPFOLDER}" ]; then
mkdir -p ${WEB_SQL_DUMPFOLDER}
fi
# Dump erzeugen und archivieren - ANPASSEN VOR PRODUKTIVBETRIEB
mysqldump -u root -p${WEB_MYSQLPW} --add-drop-table --lock-tables --add-locks --allow-keywords --quote-names ${ANTWORT4} > ${WEB_SQL_DUMPFOLDER}/${ANTWORT1}.sql
# Dump auf LiveConfig System übertragen
rsync ${WEB_SQL_DUMPFOLDER}/${ANTWORT1}.sql -ave ssh root@${LIVECONFIGHOST}:/var/www/${ANTWORT2} | tee -a ${WEB_LOGFOLDER}/${ANTWORT1}.log
fi
echo "chmod auf entferntem Server absetzen"
remoteSudo "chown -R ${ANTWORT2}:${ANTWORT2} /var/www/${ANTWORT2}/htdocs" | tee -a ${WEB_LOGFOLDER}/${ANTWORT1}.log
remoteSudo "chmod -R 755 /var/www/${ANTWORT2}/htdocs" | tee -a ${WEB_LOGFOLDER}/${ANTWORT1}.log
# Erfolgreich beenden
exit 0
| true
|
9142c56efe40c8c78b6d56337ad53c8a155cd5cb
|
Shell
|
giuliano52/pyhub
|
/utilita-sistema/substitute-duplicates-with-hard-link.sh
|
UTF-8
| 277
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/sh
# rimuove duplicati sostituendoli con un hard link
# GD 20150316
fdupes -r -1 . | while read line
do
j="0"
for file in ${line[*]}
do
if [ "$j" == "0" ]
then j="1"
else
echo "ln -f ${line// .*/} $file"
ln -f ${line// .*/} $file
fi
done
done
| true
|
b8e22e291c80f25a952e4d54698b52c5d5f571bc
|
Shell
|
aramisf/aramisf.github.io
|
/ofs/site_arm1/grad/ci066/2-2007/Scripts/12-1f
|
UTF-8
| 403
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
encontra_pessoa_em_lista()
{
# Se /etc/passwd tem permissao de leitura, define NUM_LINHAS com numero de linhas.
# Retorno eh o status da execucao da expressao.
# O string eh atribuido a uma variavel nao-local.
[[ -r /etc/passwd ]] && USUARIO=`grep $1 Arquivos/telefones.txt`
}
if encontra_pessoa_em_lista $1; then
echo "$USUARIO"
else
echo "Nao eh possivel encontrar $1."
fi
| true
|
7fed2fcaf12997f2f0775df0e51311d8d2f6d221
|
Shell
|
BrahmaOS/brahmaos-ipfs
|
/cmd/ipfs/dist/install.sh
|
UTF-8
| 860
| 4.28125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# Installation script for ipfs. It tries to move $bin in one of the
# directories stored in $binpaths.
INSTALL_DIR=$(dirname $0)
bin="$INSTALL_DIR/ipfs"
binpaths="/usr/local/bin /usr/bin"
# This variable contains a nonzero length string in case the script fails
# because of missing write permissions.
is_write_perm_missing=""
for binpath in $binpaths; do
if mv "$bin" "$binpath/$bin" 2> /dev/null; then
echo "Moved $bin to $binpath"
exit 0
else
if [ -d "$binpath" -a ! -w "$binpath" ]; then
is_write_perm_missing=1
fi
fi
done
echo "We cannot install $bin in one of the directories $binpaths"
if [ -n "$is_write_perm_missing" ]; then
echo "It seems that we do not have the necessary write permissions."
echo "Perhaps try running this script as a privileged user:"
echo
echo " sudo $0"
echo
fi
exit 1
| true
|
0c4a0d5597e92d6a0e0842704c31b68d0b6be6d1
|
Shell
|
darthjee/docker
|
/scripts/0.1.7/home/sbin/rubycritic.sh
|
UTF-8
| 350
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
DIFF_LIST=$(git diff --name-only $CIRCLE_SHA1 $(git merge-base $CIRCLE_SHA1 origin/master) | grep "^lib/")
if [ ! -z "$DIFF_LIST" ]; then
mkdir -p tmp/rubycritic/compare
bundle exec rubycritic --format console --branch origin/master -t 0 --maximum-decrease 1 $DIFF_LIST
else
echo "No changes detected. Skipping rubycritic..."
fi
| true
|
3adda4329bf6dd70a1df7e84a64bab4075c90ded
|
Shell
|
ilventu/aur-mirror
|
/drm_tools/PKGBUILD
|
UTF-8
| 1,543
| 2.53125
| 3
|
[] |
no_license
|
# Contributor: Graziano Giuliani <giuliani@lamma.rete.toscana.it>
# Maintainer: Nathan Owe <ndowens.aur at gmail dot com>
pkgname=drm_tools
pkgver=1.1.9
pkgrel=1
pkgdesc="Small utility linux programs: accudate, columnadd, datasniffer, execinput, extract, mbin, mbout, mdump, msgqueue"
url="ftp://saf.bio.caltech.edu/pub/software/linux_or_unix_tools/"
license="GPL"
arch=('i686' 'x86_64')
depends=('glibc')
source=(ftp://saf.bio.caltech.edu/pub/software/linux_or_unix_tools/${pkgname}-${pkgver}.tar.gz)
md5sums=('5bb16cf0861302e942bf5e5126f94f19')
build() {
cd ${srcdir}/${pkgname}-${pkgver}/
gcc -Wall -std=c99 -pedantic -o accudate accudate.c
gcc -Wall -ansi -pedantic -o columnadd columnadd.c
gcc -Wall -std=c99 -pedantic -lm -o datasniffer datasniffer.c
gcc -Wall -ansi -pedantic -o extract -DMAXINFILE=20 extract.c
gcc -Wall -ansi -pedantic -o execinput execinput.c
gcc -Wall -std=c99 -pedantic -o mdump mdump.c
gcc -Wall -ansi -pedantic -o msgqueue -DHAS__MSG_CBYTES -D_XOPEN_SOURCE msgqueue.c
gcc -Wall -ansi -pedantic -D_LARGE_FILE_SOURCE -D_FILE_OFFSET_BITS=64 \
-o mbin mbin.c
gcc -Wall -ansi -pedantic -D_LARGE_FILE_SOURCE -D_FILE_OFFSET_BITS=64 \
-o mbout mbout.c
}
package() {
cd ${srcdir}/${pkgname}-${pkgver}/
install -d ${pkgdir}/usr/{share/man/man1,bin} ${srcdir}/tmp/{bin,man}
install -Dm755 accudate columnadd datasniffer extract execinput mdump msgqueue mbin mbout ${pkgdir}/usr/bin/
install -Dm644 *.1 ${pkgdir}/usr/share/man/man1/
}
| true
|
4925f5aa704f2621ce8b1d07e2a16ee273ea244b
|
Shell
|
Jinzai-solution/SALVATOR_SHELL
|
/board-salvator/USB-HOST/usb_smp_read_write_copy_one.sh
|
UTF-8
| 1,478
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
# usb device driver autotest shell-script
set -e
#set -x
if [ $# -lt 3 ]; then
echo "usage : $(basename $0) SOURCE_PATH DESTINATION_PATH FILE_SIZE"
exit 1
fi
SRC_DIR="$1"
DST_DIR="$2"
FILE_SIZE="$3"
echo "read write ${FILE_SIZE}M file from $SRC_DIR to $DST_DIR"
# write test
echo "create data ${FILE_SIZE}mb on $SRC_DIR"
cmd="dd if=/dev/urandom of=${SRC_DIR}/file_${FILE_SIZE}mb bs=1M count=${FILE_SIZE}"
echo $cmd
eval $cmd
if ! [ -f ${SRC_DIR}/file_${FILE_SIZE}mb ];then
echo "prepare data on $SRC_DIR not exits"
eval $FAIL_MEG
exit 1
fi
echo "copy data from $SRC_DIR to $DST_DIR"
cmd="cp ${SRC_DIR}/file_${FILE_SIZE}mb $DST_DIR"
echo $cmd
eval $cmd
sync
if ! cmp ${SRC_DIR}/file_${FILE_SIZE}mb ${DST_DIR}/file_${FILE_SIZE}mb;then
echo "copy data from $SRC_DIR to $DST_DIR error"
eval $FAIL_MEG
exit 1
fi
rm -rf ${SRC_DIR}/*
rm -rf ${DST_DIR}/*
sync
sleep 3
#read test
echo "create ${FILE_SIZE}mb data on $DST_DIR"
cmd="dd if=/dev/urandom of=${DST_DIR}/file_${FILE_SIZE}mb bs=1M count=${FILE_SIZE}"
echo $cmd
eval $cmd
if ! [ -f ${DST_DIR}/file_${FILE_SIZE}mb ];then
echo "prepare data on $DST_DIR not exits"
eval $FAIL_MEG
exit 1
fi
echo "copy data from $DST_DIR to $SRC_DIR"
cmd="cp ${DST_DIR}/file_${FILE_SIZE}mb $SRC_DIR"
echo $cmd
eval $cmd
sync
if ! cmp ${SRC_DIR}/file_${FILE_SIZE}mb ${DST_DIR}/file_${FILE_SIZE}mb;then
echo "copy data from $DST_DIR to $SRC_DIR error"
eval $FAIL_MEG
exit 1
fi
eval $PASS_MEG
sync
| true
|
503aefc8c7acfc7f1091f7d3fc0e13efe0b2db26
|
Shell
|
DrewLung/willis
|
/convert.sh
|
UTF-8
| 325
| 3.4375
| 3
|
[] |
no_license
|
function CtoF {
echo "CtoF $1"
echo "9*$1/5+32" | bc -l
}
function FtoC {
echo "FtoC $1"
}
read -p "What temp would you like to convert? " temp
read -p "Convert CtoF or FtoC? " convert
if [ $convert == "CtoF" ] ; then CtoF $temp
elif [ $convert == "FtoC" ] ; then FtoC $temp
else echo "$convert is not valid"
fi
| true
|
40e26140e3c43da277bae4ca2174fdf7b7c6bd56
|
Shell
|
numerals/forq
|
/conf/forq
|
UTF-8
| 1,214
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#! /usr/bin/sh
##
# Forismatic Conky
# https://github.com/numerals/forq.git
#
# Copyright (c) 2014 Sartaj Singh, Sumit Sahrawat
# Licensed under the MIT license.
##
NEW_REQUEST=
QUOTE=
AUTHOR=
help_forq()
{
echo "Usage: $0 <flag>"
echo "Valid flags:"
echo " -n : Request new quote from forismatic"
echo " -q : Print last quote"
echo " -a : Print author of last quote"
echo " -h : Displays this help"
exit 1
}
# default value for $FORQ_DIR
if [ -z "$FORQ_DIR" ]; then
FORQ_DIR=/usr/local/forq
fi
# If no command-line args, display help and exit
if [ $# -lt 1 ]; then
help_forq
fi
# If more than one command line argument, display help and exit
if [ $# -gt 2 ]; then
help_forq
fi
# Parse arguments
while getopts nqah opt
do
case "$opt" in
n) NEW_REQUEST=1;;
q) QUOTE=1;;
a) AUTHOR=1;;
h) help_forq;;
\?) help_forq;;
esac
done
if [ ! -z "$NEW_REQUEST" ]; then
python2.7 "$FORQ_DIR/generate.py" "$FORQ_DIR"
fi
if [ ! -z "$QUOTE" ]; then
# output quote
python2.7 "$FORQ_DIR/parse.py" quoteText "$FORQ_DIR"
fi
if [ ! -z "$AUTHOR" ]; then
# output Author
python2.7 "$FORQ_DIR/parse.py" quoteAuthor "$FORQ_DIR"
fi
| true
|
d658ed9e03c945b0bca5e332fdf2555823ad9245
|
Shell
|
lvjiang/testCode
|
/shell/arry/xingxing.sh
|
UTF-8
| 238
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
arry=("a aa","b bb","c cc")
b=(db c d)
echo $b
function showArr() {
arr=$1
echo ${arr[*]}
}
echo ${arry[*]}
echo ${arry[@]}
showArr "${arry[*]}"
showArr "${arry[@]}"
showArr $*
showArr "$*"
showArr $@
showArr "$@"
| true
|
5e72171650d6c7bd60ac74ad6ea7b96160528ff3
|
Shell
|
alkeshplutustec/service-ingest-initiation-test
|
/scripts/deploy_to_k8s.sh
|
UTF-8
| 810
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
REGISTRY="us.gcr.io/advisorconnect-1238/"
IMAGE="service-ingest-initiation"
#set image tag
if [ $CI_BRANCH != "production" ]; then
IMAGE_TAG=${CI_BRANCH}-${CI_TIMESTAMP}-${CI_COMMIT_ID}-${CI_COMMITTER_USERNAME}
else
IMAGE_TAG=2.1.0-${CI_TIMESTAMP}-${CI_COMMIT_ID}-${CI_COMMITTER_USERNAME}
fi
# authenticate google cloud
codeship_google authenticate
# set compute zone
gcloud config set compute/zone us-east1-b
# set kubernetes cluster
gcloud container clusters get-credentials london
echo deploying image: ${IMAGE}:${IMAGE_TAG}
# update kubernetes Deployment
GOOGLE_APPLICATION_CREDENTIALS=/keyconfig.json \
kubectl set image deployment/svc-ingest-initiation-${CI_BRANCH} \
--namespace=${CI_BRANCH} \
svc-ingest-initiation-$CI_BRANCH=${REGISTRY}${IMAGE}:${IMAGE_TAG}
| true
|
17b1132e2b25cf002c222049009c73cc726c9a3a
|
Shell
|
xuancong84/linux-home
|
/bin/start-scrcpy4.sh
|
UTF-8
| 2,126
| 3.234375
| 3
|
[] |
no_license
|
opt="-t"
session_name=scrcpy
curr_workspace="`wmctrl -d | grep '*' | awk '{print $1}'`"
win_IDs=(`wmctrl -lpx | awk "{if(\\$2==$curr_workspace)print \\$1}"`)
desktop_width=`wmctrl -d | grep '*' | awk '{print $9}' | sed "s:x.*::g"`
desktop_height=`wmctrl -d | grep '*' | awk '{print $9}' | sed "s:.*x::g"`
devs=(`adb devices | sed '/^$/d' | awk '{if(NR>1)print $1}'`)
cmds=("watch -n 1 adb devices")
for dev in ${devs[@]}; do
cmds[${#cmds[@]}]="scrcpy $opt -s $dev"
done
if [ "`tmux ls | grep $session_name`" ]; then
echo "The service already started!" >&2
exit 1
fi
tmux new-session -s $session_name -d -x 240 -y 60
for i in `seq 0 $[${#cmds[*]}-1]`; do
sleep 0.2
tmux split-window
sleep 0.2
tmux select-layout tile
sleep 0.2
tmux send-keys -l "${cmds[i]}"
sleep 0.2
tmux send-keys Enter
done
sleep 1
set -x
for winID in ${win_IDs[*]}; do
wmctrl -i -r $winID -b remove,maximized_vert,maximized_horz
sleep 0.2
wmctrl -i -r $winID -e 0,0,0,$[desktop_width-251-240],$desktop_height
sleep 0.2
wmctrl -i -r $winID -b add,maximized_vert
sleep 0.2
done
names=()
while IFS= read -r line; do
wsID=`echo "$line" | awk '{print $2}'`
if [ "$wsID" != "$curr_workspace" ]; then
continue
fi
winID=`echo "$line" | awk '{print $1}'`
if ! [[ "${win_IDs[*]}" == *"$winID"* ]]; then
names[${#names[@]}]="`echo $line | awk '{ print substr($0, index($0,$4)) }'`"
fi
done <<<"`wmctrl -l`"
N=${#names[@]}
if [ $N == 1 ]; then
wmctrl -r "${names[0]}" -e 0,1430,56,490,963
elif [ $N == 2 ]; then
wmctrl -r "${names[0]}" -e 0,1546,0,270,482
wmctrl -r "${names[1]}" -e 0,1546,482,270,529
elif [ $N == 3 ]; then
wmctrl -r "${names[0]}" -e 0,1546,0,270,482
wmctrl -r "${names[1]}" -e 0,1429,482,251,529
wmctrl -r "${names[2]}" -e 0,1680,482,240,529
elif [ $N == 4 ]; then
wmctrl -r "${names[0]}" -e 0,1429,0,251,482
wmctrl -r "${names[1]}" -e 0,1429,482,251,529
wmctrl -r "${names[2]}" -e 0,1680,482,240,529
wmctrl -r "${names[3]}" -e 0,1680,0,240,482
fi
tmux a -t $session_name
for winID in ${win_IDs[*]}; do
wmctrl -i -r $winID -b add,maximized_vert,maximized_horz
done
tmux kill-session -t $session_name
| true
|
6c0c66c3e72d76e60458814d3fdce9952428d737
|
Shell
|
rinie/utilities
|
/setup_scripts/beagle_setup.sh
|
UTF-8
| 10,036
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
# Run this script as root ie:
# sudo -s
# bash <(wget -q -O - https://raw.github.com/ninjablocks/utilities/master/setup_scripts/beagle_setup.sh)
bold=`tput bold`;
normal=`tput sgr0`;
# Setup the timezone
echo -e "\n→ ${bold}Setting up Sydney as the default timezone.${normal}\n";
sudo echo "Australia/Sydney" | sudo tee /etc/timezone;
sudo dpkg-reconfigure --frontend noninteractive tzdata;
# Add NTP Update as a daily cron job
echo -e "\n→ ${bold}Create the ntpdate file${normal}\n";
sudo touch /etc/cron.daily/ntpdate;
echo -e "\n→ ${bold}Add ntpdate ntp.ubuntu.com${normal}\n";
sudo echo "ntpdate ntp.ubuntu.com" > /etc/cron.daily/ntpdate;
echo -e "\n→ ${bold}Making ntpdate executable${normal}\n";
sudo chmod 755 /etc/cron.daily/ntpdate;
# Update the timedate
echo -e "\n→ ${bold}Updating the time${normal}\n";
sudo ntpdate ntp.ubuntu.com pool.ntp.org;
# Stop Ubuntu from saving the Mac Address
echo -e "\n→ ${bold}Stopping Ubuntu from saving the MAC address information${normal}\n";
sudo touch /etc/udev/rules.d/75-persistent-net-generator.rules;
sudo echo 'ENV{MATCHADDR}=="*", GOTO="persistent_net_generator_end"'> /etc/udev/rules.d/75-persistent-net-generator.rules;
# Updating apt-get
echo -e "\n→ ${bold}Updating apt-get${normal}\n";
sudo apt-get update;
# Remove the Apache2 default install
echo -e "\n→ ${bold}Removing Apache2${normal}\n";
sudo apt-get -f -y --force-yes remove apache2;
sudo apt-get -f -y --force-yes remove apache2.2-bin apache2.2-common apache2-utils apache2-mpm-worker;
# Download and install the Essential packages.
echo -e "\n→ ${bold}Installing g++${normal}\n";
sudo apt-get -f -y --force-yes install g++;
echo -e "\n→ ${bold}Installing node${normal}\n";
sudo apt-get -f -y --force-yes install node;
echo -e "\n→ ${bold}Installing npm${normal}\n";
sudo apt-get -f -y --force-yes install npm;
echo -e "\n→ ${bold}Installing ruby1.9.1-dev${normal}\n";
sudo apt-get -f -y --force-yes install ruby1.9.1-dev;
echo -e "\n→ ${bold}Installing make${normal}\n";
sudo apt-get -f -y --force-yes install make;
echo -e "\n→ ${bold}Installing build-essential${normal}\n";
sudo apt-get -f -y --force-yes install build-essential;
echo -e "\n→ ${bold}Installing avrdude${normal}\n";
sudo apt-get -f -y --force-yes install avrdude;
echo -e "\n→ ${bold}Installing libgd2-xpm-dev${normal}\n";
sudo apt-get -f -y --force-yes install libgd2-xpm-dev;
echo -e "\n→ ${bold}Installing libv4l-dev${normal}\n";
sudo apt-get -f -y --force-yes install libv4l-dev;
echo -e "\n→ ${bold}Installing subversion${normal}\n";
sudo apt-get -f -y --force-yes install subversion;
echo -e "\n→ ${bold}Installing libjpeg8-dev${normal}\n";
sudo apt-get -f -y --force-yes install libjpeg8-dev;
echo -e "\n→ ${bold}Installing imagemagick${normal}\n";
sudo apt-get -f -y --force-yes install imagemagick;
echo -e "\n→ ${bold}Installing psmisc${normal}\n";
sudo apt-get -f -y --force-yes install psmisc;
echo -e "\n→ ${bold}Installing curl${normal}\n";
sudo apt-get -f -y --force-yes install curl;
# Switching to /home/ninja
echo -e "\n→ ${bold}Switching to /home/ninja${normal}\n";
cd /home/ninja/;
# Checking out mjpeg-streamer
echo -e "\n→ ${bold}Checking out mjpeg-streamer${normal}\n";
svn co https://mjpg-streamer.svn.sourceforge.net/svnroot/mjpg-streamer mjpg-streamer;
# Entering the mjpeg-streamer dir
echo -e "\n→ ${bold}Entering the mjpeg-streamer dir${normal}\n";
cd /home/ninja/mjpg-streamer/mjpg-streamer/;
# Making mjpeg-streamer
echo -e "\n→ ${bold}Making mjpeg-streamer${normal}\n";
sudo make;
# Copying input_uvc.so into place
echo -e "\n→ ${bold}Copying input_uvc.so into place${normal}\n";
sudo cp /home/ninja/mjpg-streamer/mjpg-streamer/input_uvc.so /usr/local/lib/;
# Copying output_http.so into place
echo -e "\n→ ${bold}Copying output_http.so into place${normal}\n";
sudo cp /home/ninja/mjpg-streamer/mjpg-streamer/output_http.so /usr/local/lib/;
# Copying the mjpg-streamer binary into /usr/bin
echo -e "\n→ ${bold}Copying the mjpg-streamer binary into /usr/bin${normal}\n";
sudo cp /home/ninja/mjpg-streamer/mjpg-streamer/mjpg_streamer /usr/local/bin;
# Not essential packages
echo -e "\n→ ${bold}Installing aptitude${normal}\n";
sudo apt-get -f -y --force-yes install aptitude;
echo -e "\n→ ${bold}Installing vim${normal}\n";
sudo apt-get -f -y --force-yes install vim;
# Install Sinatra
echo -e "\n→ ${bold}Installing the sinatra gem${normal}\n";
sudo gem install sinatra --verbose --no-rdoc --no-ri;
# Install getifaddrs
echo -e "\n→ ${bold}Installing the getifaddrs gem${normal}\n";
sudo gem install system-getifaddrs --verbose --no-rdoc --no-ri;
# Create the rtl8192cu folder
echo -e "\n→ ${bold}Create the rtl8192cu Folder${normal}\n";
sudo mkdir -p /opt/rtl8192cu;
# Clone rtl8192cu into /opt/rtl8192cu
echo -e "\n→ ${bold}Fetching the rtl8192cu Repo from Github${normal}\n";
sudo git clone https://github.com/ninjablocks/rtl8192cu.git /opt/rtl8192cu;
# Removing the shitty Realtek drivers from /lib/modules/`uname -r`/kernel/drivers/net/wireless/rtlwifi/rtl8*
echo -e "\n→ ${bold}Removing the shitty Realtek drivers${normal}\n";
sudo rm -rf /lib/modules/`uname -r`/kernel/drivers/net/wireless/rtlwifi/rtl8*;
# Copy /opt/utilities/etc/wpa_supplicant.conf to /etc/
echo -e "\n→ ${bold}Copy /opt/utilities/etc/wpa_supplicant.conf to /etc/${normal}\n";
sudo cp /opt/utilities/etc/wpa_supplicant.conf /etc/;
# Set the permissions of the wpa_supplicant.conf file
echo -e "\n→ ${bold}Set the permissions of the wpa_supplicant.conf file${normal}\n";
sudo chmod 644 /opt/utilities/etc/wpa_supplicant.conf;
# Make /etc/network/interfaces use wpa_supplicant for wlan0
echo -e "\n→ ${bold} Make /etc/network/interfaces use wpa_supplicant for wlan0${normal}\n";
sudo echo "auto wlan0" >> /etc/network/interfaces;
sudo echo "iface wlan0 inet dhcp" >> /etc/network/interfaces;
sudo echo "pre-up wpa_supplicant -f /var/log/wpa_supplicant.log -B -D wext -i wlan0 -c /etc/wpa_supplicant.conf" >> /etc/network/interfaces;
sudo echo "post-down killall -q wpa_supplicant" >> /etc/network/interfaces;
# Install our rtl8192cu driver
echo -e "\n→ ${bold}Install our rtl8192cu driver${normal}\n";
cd /opt/rtl8192cu;
sudo install -p -m 644 8192cu.ko /lib/modules/`uname -r`/kernel/drivers/net/wireless/;
sudo /sbin/depmod -a `uname -r`;
# Create the Ninja Blocks utilities folder
echo -e "\n→ ${bold}Create the Ninja Blocks Utilities Folder${normal}\n";
sudo mkdir -p /opt/utilities;
# Set ninja user as the owner of this directory.
echo -e "\n→ ${bold}Set ninja user as the owner of this directory${normal}\n";
sudo chown ninja /opt/utilities;
# Clone the Ninja Utilities into /opt/utilities
echo -e "\n→ ${bold}Fetching the Utilities Repo from Github${normal}\n";
git clone https://github.com/ninjablocks/utilities.git /opt/utilities;
# Clone the Wifi Setup into /opt/wifi
#sudo mkdir -p /opt/wifi;
#echo -e "\n→ ${bold}Fetching the Wifi Repo from Github${normal}\n";
#git clone https://github.com/ninjablocks/wifi.git /opt/wifi;
# Copy /etc/init scripts into place
echo -e "\n→ ${bold}Copy /etc/init scripts into place${normal}\n";
sudo cp /opt/utilities/init/* /etc/init/
# Set the correct owner and permissions on the files
echo -e "\n→ ${bold}Set the correct owner and permissions on the init files${normal}\n";
sudo chown root:root /etc/init/*;
sudo chmod 644 /etc/init/*;
# Turn off SSH Password Authentication
echo -e "\n→ ${bold}Turning of SSH Password Authentication${normal}\n";
#sudo perl -pi -e 's/UsePAM yes/UsePAM no/g' /etc/ssh/sshd_config;
#sudo perl -pi -e 's/\#PasswordAuthentication yes/PasswordAuthentication no/g' /etc/ssh/sshd_config;
# Copy /etc/udev/rules.d/ scripts into place
echo -e "\n→ ${bold}Copy /etc/udev/rules.d/ scripts into place${normal}\n";
sudo cp /opt/utilities/udev/* /etc/udev/rules.d/;
# Create Ninja Directory (-p to preserve if already exists).
echo -e "\n→ ${bold}Create the Ninja Directory${normal}\n";
sudo mkdir -p /opt/ninja;
# Set ubuntu users as the owner of this directory.
echo -e "\n→ ${bold}Set ninja users as the owner of this directory${normal}\n";
sudo chown ninja /opt/ninja;
# Clone the Ninja Client into opt
echo -e "\n→ ${bold}Clone the Ninja Client into opt${normal}\n";
git clone https://github.com/ninjablocks/client.git /opt/ninja;
# Install the node packages
echo -e "\n→ ${bold}Install the node packages${normal}\n";
cd /opt/ninja;
npm install;
# Create directory /etc/opt/ninja
echo -e "\n→ ${bold}Adding /etc/opt/ninja${normal}\n";
sudo mkdir -p /etc/opt/ninja;
# Set owner of this directory to ninja
echo -e "\n→ ${bold}Set owner of this directory to ninja${normal}\n";
sudo chown ninja /etc/opt/ninja;
# Add /opt/utilities/bin to root's path
echo -e "\n→ ${bold}Adding /opt/utilities/bin to root's path${normal}\n";
echo 'export PATH=/opt/utilities/bin:$PATH' >> /root/.bashrc;
# Add /opt/utilities/bin to ninja's path
echo -e "\n→ ${bold}Adding /opt/utilities/bin to ninja's path${normal}\n";
echo 'export PATH=/opt/utilities/bin:$PATH' >> /home/ninja/.bashrc;
# Set the beagle's environment
echo -e "\n→ ${bold}Setting the beagle's environment to stable${normal}\n";
echo 'export NINJA_ENV=stable' >> /home/ninja/.bashrc;
# Add ninja_update to the hourly cron
#echo -e "\n→ ${bold}Add ninja_update to the hourly cron${normal}\n";
#ln -s /opt/utilities/bin/ninja_update /etc/cron.hourly/ninja_update;
# Run the setserial command so we can flash the Arduino later
echo -e "\n→ ${bold}Running setserial${normal}\n";
sudo /opt/utilities/bin/setserial;
# Run the setgpio command so we can flash the Arduino later
echo -e "\n→ ${bold}Running setgpio${normal}\n";
sudo /opt/utilities/bin/setgpio;
# Remove old Arduino hex files;
sudo rm /opt/utilities/tmp/*;
# Run Arduino Update
echo -e "\n→ ${bold}Updating the Arduino${normal}\n";
sudo /opt/utilities/bin/ninja_update_arduino;
echo -e "\n→ ${bold}Guess what? We're done!!!${normal}\n";
sudo reboot;
| true
|
0e2c16ab0799970c50ea28b8869aeae76c234b43
|
Shell
|
LabNeuroCogDevel/SzAttWMproc
|
/scripts/old_dontuse/20160225_WM_ROIstats.sh
|
UTF-8
| 2,615
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
#extracting ROIs from 3dttest run on patients vs. controls for WM task
data=/Volumes/Phillips/P5/subj
roi_path=/Volumes/Phillips/P5/scripts/ROIs/BA_spheres
text_files=/Volumes/Phillips/P5/scripts/txt
region="LBA17 LBA40 LBA46 LBA9 RBA17 RBA40 RBA46 RBA9"
cd $data
for d in 1*/; do
for reg in $region; do
3dROIstats -quiet -mask ${roi_path}/${reg}_10mm+tlrc.HEAD ${data}/${d}/contrasts/WM/stats_WM_correct_load_wrongtogether_dlymod+tlrc.[1] | sed "s/^/${d}" >>${text_files}/cue_ld1_${reg}.txt
3dROIstats -quiet -mask ${roi_path}/${reg}_10mm+tlrc.HEAD ${data}/${d}/contrasts/WM/stats_WM_correct_load_wrongtogether_dlymod+tlrc.[3] | sed "s/^/${d}" >>${text_files}/cue_ld3_${reg}.txt
3dROIstats -quiet -mask ${roi_path}/${reg}_10mm+tlrc.HEAD ${data}/${d}/contrasts/WM/stats_WM_correct_load_wrongtogether_dlymod+tlrc.[5] | sed "s/^/${d}" >>${text_files}/delay_ld1_${reg}.txt
3dROIstats -quiet -mask ${roi_path}/${reg}_10mm+tlrc.HEAD ${data}/${d}/contrasts/WM/stats_WM_correct_load_wrongtogether_dlymod+tlrc.[7] | sed "s/^/${d}" >>${text_files}/delay_ld3_${reg}.txt
3dROIstats -quiet -mask ${roi_path}/${reg}_10mm+tlrc.HEAD ${data}/${d}/contrasts/WM/stats_WM_correct_load_wrongtogether_dlymod+tlrc.[9] | sed "s/^/${d}">>${text_files}/probe_ld1_${reg}.txt
3dROIstats -quiet -mask ${roi_path}/${reg}_10mm+tlrc.HEAD ${data}/${d}/contrasts/WM/stats_WM_correct_load_wrongtogether_dlymod+tlrc.[11]| sed "s/^/${d}">>${text_files}/probe_ld3_${reg}.txt
done
done
cd $text_files
paste cue_ld1_LBA17.txt cue_ld1_LBA40.txt cue_ld1_LBA46.txt cue_ld1_LBA9.txt cue_ld1_RBA17.txt cue_ld1_RBA40.txt cue_ld1_RBA46.txt cue_ld1_RBA9.txt > cue_ld1_BA_spheres.txt
paste cue_ld3_LBA17.txt cue_ld3_LBA40.txt cue_ld3_LBA46.txt cue_ld3_LBA9.txt cue_ld3_RBA17.txt cue_ld3_RBA40.txt cue_ld3_RBA46.txt cue_ld3_RBA9.txt > cue_ld3_BA_spheres.txt
paste delay_ld1_LBA17.txt delay_ld1_LBA40.txt delay_ld1_LBA46.txt delay_ld1_LBA9.txt delay_ld1_RBA17.txt delay_ld1_RBA40.txt delay_ld1_RBA46.txt delay_ld1_RBA9.txt > delay_ld1_BA_spheres.txt
paste delay_ld3_LBA17.txt delay_ld3_LBA40.txt delay_ld3_LBA46.txt delay_ld3_LBA9.txt delay_ld3_RBA17.txt delay_ld3_RBA40.txt delay_ld3_RBA46.txt delay_ld3_RBA9.txt > delay_ld3_BA_spheres.txt
paste probe_ld1_LBA17.txt probe_ld1_LBA40.txt probe_ld1_LBA46.txt probe_ld1_LBA9.txt probe_ld1_RBA17.txt probe_ld1_RBA40.txt probe_ld1_RBA46.txt probe_ld1_RBA9.txt > probe_ld1_BA_spheres.txt
paste probe_ld3_LBA17.txt probe_ld3_LBA40.txt probe_ld3_LBA46.txt probe_ld3_LBA9.txt probe_ld3_RBA17.txt probe_ld3_RBA40.txt probe_ld3_RBA46.txt probe_ld3_RBA9.txt > probe_ld3_BA_spheres.txt
| true
|
1ad507fc586680606a343fa87601f5a1c5008eb9
|
Shell
|
gfitzp/db_export
|
/db_export.command
|
UTF-8
| 206
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
TODAYSBACKUP=`date +%Y-%m-%d`
PASSWORD=$1
for i in ${@:2}; do export=$TODAYSBACKUP\_sql_$i.sql; /usr/local/bin/mysqldump -u db_export -p$1 "$i" > ~/Documents/Database\ Exports/"$export"; done
| true
|
78ac83ec68e9a8343f77975bb62850d02b66425c
|
Shell
|
Dork96/DorkScript
|
/port-ssl.sh
|
UTF-8
| 2,395
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
red='\e[1;31m'
green='\e[0;32m'
NC='\e[0m'
cyan='\x1b[96m'
white='\x1b[37m'
bold='\033[1m'
off='\x1b[m'
clear
ssl="$(cat /etc/stunnel/stunnel.conf | grep -i accept | head -n 2 | cut -d= -f2 | sed 's/ //g' | tr '\n' ' ' | awk '{print $1}')"
ssl2="$(cat /etc/stunnel/stunnel.conf | grep -i accept | head -n 2 | cut -d= -f2 | sed 's/ //g' | tr '\n' ' ' | awk '{print $2}')"
echo -e ""
echo -e "${cyan}======================================${off}"
echo -e " ${green}PORT STUNNEL${off}"
echo -e "${cyan}======================================${off}"
echo -e "${green}"
echo -e " 1 ⸩ Ubah Port Stunnel $ssl"
echo -e " 2 ⸩ Ubah Port Stunnel $ssl2"
echo -e " x ⸩ Keluar"
echo -e "${off}"
echo -e "${cyan}======================================${off}"
echo -e "${green}"
read -p " Pilih Nomor [1-2 / x] : " prot
echo -e "${off}"
case $prot in
1)
echo -e "${green}"
read -p "Masukkan Port Baru Stunnel4 : " stl
echo -e "${off}"
if [ -z $stl ]; then
echo -e "${red}Port Tidak Dimasukkan !!!${off}"
sleep 1
clear
menu
fi
cek=$(netstat -nutlp | grep -w $stl)
if [[ -z $cek ]]; then
sed -i "s/$ssl/$stl/g" /etc/stunnel/stunnel.conf
sed -i "s/ - Stunnel4 : $ssl, $ssl2/ - Stunnel4 : $stl, $ssl2/g" /root/log-install.txt
/etc/init.d/stunnel4 restart > /dev/null
echo -e "${cyan}Port Stunnel4 Berhasil Diganti Menjadi :${off} ${green} $stl ${off}"
else
echo -e "${red}ERROR! Port{off} ${green}[ $stl ]${off} ${red}Sudah Beroperasi Pada System!${off}"
echo -e "${cyan}Silahkan Gunakan Port Lain...${off}"
sleep 2
exit 0
fi
;;
2)
echo -e "${green}"
read -p "Masukkan Port Baru Stunnel4 : " stl
echo -e "${off}"
if [ -z $stl ]; then
echo -e "${red}Port Tidak Dimasukkan !!!${off}"
sleep 1
clear
menu
fi
cek=$(netstat -nutlp | grep -w $stl)
if [[ -z $cek ]]; then
sed -i "s/$ssl2/$stl/g" /etc/stunnel/stunnel.conf
sed -i "s/ - Stunnel4 : $ssl, $ssl2/ - Stunnel4 : $ssl, $stl/g" /root/log-install.txt
/etc/init.d/stunnel4 restart > /dev/null
echo -e "${cyan}Port Stunnel4 Berhasil Diganti Menjadi :${off} ${green} $stl ${off}"
else
echo -e "${red}ERROR! Port{off} ${green}[ $stl ]${off} ${red}Sudah Beroperasi Pada System!${off}"
echo -e "${cyan}Silahkan Gunakan Port Lain...${off}"
sleep 2
exit 0
fi
;;
x)
exit
menu
;;
*)
echo -e "${red}Masukkan Nomor Yang Ada!${off}"
sleep 1
clear
menu
;;
esac
| true
|
b1aadc7c511646009309ee1745205a9998d87702
|
Shell
|
LarsHaalck/PKGBUILDs
|
/karl-git/PKGBUILD
|
UTF-8
| 861
| 2.84375
| 3
|
[] |
no_license
|
# Maintainer: Lars Haalck
pkgname=karl-git
pkgver=r19.d15e061
pkgrel=1
pkgdesc='A clipboard manager'
arch=('i686' 'x86_64')
url='https://github.com/LarsHaalck/karl'
license=('MIT')
makedepends=('git' 'cargo')
provides=('karl')
conflicts=('karl')
source=("$pkgname::git+$url")
sha256sums=('SKIP')
pkgver() {
cd "$pkgname"
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
prepare() {
cd "$pkgname"
cargo fetch --locked --target "$CARCH-unknown-linux-gnu"
}
build() {
export RUSTUP_TOOLCHAIN=stable
export CARGO_TARGET_DIR=target
cd "$pkgname"
cargo build --release --frozen
}
package() {
cd "$pkgname"
install -Dm755 target/release/karl -t "$pkgdir/usr/bin/"
install -Dm755 scripts/rofi-karl -t "$pkgdir/usr/bin/"
install -Dm644 LICENSE -t "$pkgdir/usr/share/licenses/$pkgname/"
}
# vim:set ts=2 sw=2 et:
| true
|
afeebd99c7548695834f5fd97ff03dd7e02fb2b1
|
Shell
|
tetsuo-cpp/tsh
|
/build.sh
|
UTF-8
| 1,039
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/sh
# Create build dir if it doesn't already exist.
if [ ! -d build/ ]; then
mkdir build/
fi
# Default build type is debug.
build_type=$1
if [ -z "$build_type" ]; then
build_type="debug"
fi
case $build_type in
"debug")
cmd="cmake -DCMAKE_BUILD_TYPE=Debug ../.. && make"
;;
"release")
cmd="cmake -DCMAKE_BUILD_TYPE=Release ../.. && make"
;;
"asan")
cmd="cmake -DCMAKE_BUILD_TYPE=Debug -DSANITIZER_TYPE=ASan ../.. && make"
;;
"ubsan")
cmd="cmake -DCMAKE_BUILD_TYPE=Debug -DSANITIZER_TYPE=UBSan ../.. && make"
;;
"scan")
# Need to do a full rebuild for scan-build to work.
if [ -d build/scan/ ]; then
rm -rf build/scan/
fi
cmd="scan-build cmake ../.. && scan-build -o . make"
;;
*)
echo "unrecognised build type $build_type"
exit 1
esac
cd build/ || exit
if [ ! -d $build_type ]; then
mkdir $build_type
fi
cd $build_type || exit
# Execute build cmd.
eval "$cmd"
| true
|
35c945fed12865fbd2dfebc60af687a2e5a855eb
|
Shell
|
mackpipe/ilumno_docker_drupal
|
/ilumno.sh
|
UTF-8
| 895
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
# The environment variables found in the .env file are exported
export $(grep -v '^#' ilumno.env | xargs)
# Print the message to console
echo >&2 "========================================================================"
echo >&2
echo >&2 " Start creating the image ${DOCKER_IMAGE}"
echo >&2
echo >&2 "========================================================================"
# Build the Image $ {DOCKER_IMAGE} using the Dockerfile
docker build -t "${DOCKER_IMAGE}" .
# Print the message to console
echo >&2 "========================================================================"
echo >&2
echo >&2 " Raise the Drupal and Mysql containers"
echo >&2
echo >&2 "========================================================================"
# Load the container with the image ${DOCKER_IMAGE} created by docker build
docker-compose -p "${DOCKER_CONTAINER}" --env-file ./ilumno.env up
| true
|
eec2fec78e67a916e3c5fa63e4c7d8305951a349
|
Shell
|
taowuwen/codec
|
/bash/test_line.sh
|
UTF-8
| 214
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "enable = ${1:-0}"
export printline='eval echo -e "`date` $$ [`pwd`/`basename $0`:${FUNCNAME:-OUTOFFUNC}:$LINENO]\t$*"'
test_func_name()
{
${printline}
}
test_func_name
${printline}
| true
|
1c094096694f70a4d51b2c5cb517077bbe8539a4
|
Shell
|
openshift-kni/oauth-proxy
|
/test/e2e.sh
|
UTF-8
| 1,912
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -x
set -e
PROJECT_REPO=github.com/openshift/oauth-proxy
DOCKER_REPO=localhost:5000
KUBECONFIG=~/admin.kubeconfig
TEST_NAMESPACE=myproject
REV=$(git rev-parse --short HEAD)
TEST_IMAGE=${DOCKER_REPO}/oauth-proxy-${REV}:latest
TEST_DIR=$(pwd)/test
HELLO_PATH=${TEST_DIR}/e2e/hello
HELLO_IMAGE=${DOCKER_REPO}/hello-proxy-${REV}:latest
ORIGIN_BUILD_DIR=/tmp/opbuild
ORIGIN_PATH=${ORIGIN_BUILD_DIR}/src/github.com/openshift/origin
if [ "${1}" == "clusterup" ]; then
if [ "${2}" != "nobuild" ]; then
if [ ! -d "${ORIGIN_BUILD_DIR}/src" ]; then
mkdir -p ${ORIGIN_BUILD_DIR}/src
fi
pushd .
cd /
GOPATH=${ORIGIN_BUILD_DIR} go get github.com/openshift/origin
cd ${ORIGIN_PATH}
# Stabilize on a known working 3.9 commit just for assurance.
git checkout 126033b
popd
GOPATH=${ORIGIN_BUILD_DIR} ${ORIGIN_PATH}/hack/build-go.sh
fi
export PATH=${ORIGIN_PATH}/_output/local/bin/linux/amd64/:${PATH}
openshift version
# Run bindmountproxy for a non-localhost OpenShift endpoint
IP=$(openshift start --print-ip)
docker run --privileged --net=host -v /var/run/docker.sock:/var/run/docker.sock -d --name=bindmountproxy cewong/bindmountproxy proxy ${IP}:2375 $(which openshift)
sleep 2
docker_host=tcp://${IP}:2375
DOCKER_HOST=${docker_host} oc cluster up -e DOCKER_HOST=${docker_host}
sudo cp /var/lib/origin/openshift.local.config/master/admin.kubeconfig ~/
sudo chmod 777 ${KUBECONFIG}
oc login -u developer -p pass
oc project ${TEST_NAMESPACE}
oc status
fi
# build backend site
go build -o ${HELLO_PATH}/hello_openshift ${PROJECT_REPO}/test/e2e/hello
sudo docker build -t ${HELLO_IMAGE} ${HELLO_PATH}
sudo docker push ${HELLO_IMAGE}
# build oauth-proxy
go build -o ${TEST_DIR}/oauth-proxy
sudo docker build -t ${TEST_IMAGE} ${TEST_DIR}/
sudo docker push ${TEST_IMAGE}
# run test
export TEST_IMAGE TEST_NAMESPACE HELLO_IMAGE KUBECONFIG
go test -v ${PROJECT_REPO}/test/e2e
| true
|
026ffc3e7cc1390b6d56eb872cd8a8e5430702af
|
Shell
|
erik-feller/OS
|
/pa4/harness.sh
|
UTF-8
| 358
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/bash
TIMEFORM=$'wall=%e user=%U system=%S CPU=%P i-switched=%c v-switched=%w'
PROGS=( pi-sched rw multi-sched)
FORKS=( 10 100 1000 )
SCHEDULERS=( SCHED_OTHER SCHED_FIFO SCHED_RR )
for a in ${PROGS[@]}; do
for b in ${SCHEDULERS[@]};do
for c in ${FORKS[@]};do
echo $a $b $c; time ./$a $b $c 1 > outhar 2 > results
done
done
done
| true
|
264ada3145fabd2315cf31181758f1334c8d978b
|
Shell
|
r4mmer/docker-nginx-gunicorn-django
|
/gunicorn/run
|
UTF-8
| 1,324
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/sh
exec 2>&1
source /etc/envvars
django_cmd=${DJANGO_CMD}
test_models=${DJANGO_TEST_MODELS}
GUNICORN=/usr/local/bin/gunicorn
ROOT=/app
PID=/var/run/gunicorn.pid
APP=${DJANGO_APP}.wsgi
if [ -f $PID ]; then rm $PID; fi
cd $ROOT
mkdir -p /root/.ssh/
chown -R root:root /root/.ssh
# XXX: skip test for migrations if need be
if [ $test_models -gt 0 ]; then
# test for migrations synced with models
$django_cmd makemigrations --dry-run --check
if [ ! $? -eq 0 ]; then
# migrations not synced
# should fail
echo "[-] Migrations are not synced with the models"
exit 1
fi
fi
# test for db synced with migrations
$django_cmd showmigrations -p --no-color | awk '{$NF=""; print $0}' | grep -v '[X]'
if [ $? -eq 0 ]; then
echo "[*] Database not synced"
# check for tenant-schemas/django-tenants as to not break the migration
tenant_migrate=0
pip freeze | grep -i '^django-tenants'
if [ $? -eq 0 ]; then
tenant_migrate=1
$django_cmd migrate_schemas
fi
pip freeze | grep -i '^django-tenant-schemas'
if [ $? -eq 0 ]; then
tenant_migrate=1
$django_cmd migrate_schemas
fi
if [ $tenant_migrate -eq 0 ]; then $django_cmd migrate; fi
fi
exec $GUNICORN -b unix:///tmp/gunicorn.sock -c /etc/gunicorn/gunicorn.conf.py --pid=$PID $APP
| true
|
180f4059ee88a111031bc41327a75e0b8cb5ee3e
|
Shell
|
UMN-CMS/HGCal-RPi
|
/etc/check_sync_logs
|
UTF-8
| 219
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
source etc/config
for sync_pi_alias in "${sync_pi_aliases[@]}"
do
echo "$sync_pi_alias ---------------------------------------- $sync_pi_alias"
ssh $sync_pi_alias "tail $pi_syncdir/sync.log"
done
| true
|
139bfb25b04a5a87c52a54a5fd258669061b751c
|
Shell
|
ODEX-TOS/packages
|
/libmpeg2/trunk/PKGBUILD
|
UTF-8
| 1,126
| 2.59375
| 3
|
[
"GPL-1.0-or-later",
"MIT"
] |
permissive
|
# Contributor: Sarah Hay <sarah@archlinux.org>
# Maintainer: Andreas Radke <andyrtr@archlinux.org>
pkgname=libmpeg2
pkgver=0.5.1
pkgrel=7
pkgdesc="Library for decoding MPEG-1 and MPEG-2 video streams."
arch=('x86_64')
url="http://libmpeg2.sourceforge.net/"
depends=('glibc')
makedepends=('sdl' 'libxv')
optdepends=('sdl: required for mpeg2dec'
'libxv: required for mpeg2dec')
source=(http://libmpeg2.sourceforge.net/files/${pkgname}-${pkgver}.tar.gz
libmpeg2-0.5.1-gcc4.6.patch)
license=('GPL2')
provides=('mpeg2dec')
sha256sums=('dee22e893cb5fc2b2b6ebd60b88478ab8556cb3b93f9a0d7ce8f3b61851871d4'
'763e188eea36ee3cdfb31e7877bbead00676b5766c25175ec6a7eb20884926d1')
prepare() {
cd "${srcdir}/${pkgname}-${pkgver}"
patch -Np1 -i "${srcdir}/libmpeg2-0.5.1-gcc4.6.patch"
sed '/AC_PATH_XTRA/d' -i configure.ac
autoreconf --force --install
}
build() {
cd "${srcdir}/${pkgname}-${pkgver}"
./configure --prefix=/usr --enable-shared --disable-static
make OPT_CFLAGS="${CFLAGS}" \
MPEG2DEC_CFLAGS="${CFLAGS}" \
LIBMPEG2_CFLAGS=""
}
package() {
cd "${srcdir}/${pkgname}-${pkgver}"
make DESTDIR="${pkgdir}" install
}
| true
|
dca9a88e6d865cf13924a97b1cbb3dac193a43df
|
Shell
|
arnautpe/minifs
|
/conf/packages/09busybox.sh
|
UTF-8
| 2,378
| 3.625
| 4
|
[] |
no_license
|
PACKAGES="$PACKAGES busybox"
hset busybox version "1.29.1"
hset busybox url "http://busybox.net/downloads/busybox-$(hget busybox version).tar.bz2"
hset busybox depends "crosstools host-libtool"
hset busybox optional "pam"
configure-busybox() {
local obj=$STAGING/obj/busybox-obj
mkdir -p $obj
if [ -f "$CONFIG"/config_busybox.conf ]; then
configure cp -a "$CONFIG"/config_busybox.conf $obj/.config
else
configure $MAKE O=$obj CROSS_COMPILE="${CROSS}-" CFLAGS="$TARGET_CFLAGS" CONFIG_PREFIX="$ROOTFS" defconfig
COMMAND="busybox_menuconfig"
fi
if [ "$COMMAND_PACKAGE" = "busybox" ] ; then
$MAKE O=$obj CROSS_COMPILE="${CROSS}-" CFLAGS="$TARGET_CFLAGS" CONFIG_PREFIX="$ROOTFS" $COMMAND_TARGET
echo #### busybox config done, copying it back
cp $obj/.config "$CONFIG"/config_busybox.conf
rm ._*
exit 0
fi
}
compile-busybox() {
local obj=$STAGING/obj/busybox-obj
compile $MAKE O=$obj CROSS_COMPILE="${CROSS}-" \
CFLAGS="$TARGET_CFLAGS" \
CONFIG_PREFIX="$ROOTFS" \
$MAKE_ARGUMENTS
}
install-busybox() {
log_install echo Done
}
deploy-busybox-local() {
local obj=$STAGING/obj/busybox-obj
$MAKE O=$obj CROSS_COMPILE="${CROSS}-" \
CFLAGS="$TARGET_CFLAGS" \
CONFIG_PREFIX="$ROOTFS" install
}
deploy-busybox() {
deploy deploy-busybox-local
}
PACKAGES+=" filesystem-populate"
hset filesystem-populate url "none"
hset filesystem-populate dir "."
hset filesystem-populate phases "deploy"
hset filesystem-populate depends "busybox"
deploy-filesystem-populate() {
deploy echo Copying
echo -n " Populating filesystem... "
(
mkdir -p "$ROOTFS"/proc/ "$ROOTFS"/dev/ "$ROOTFS"/sys/ \
"$ROOTFS"/tmp/ "$ROOTFS"/var/run "$ROOTFS"/var/log
rsync -av \
--exclude=._\* \
"$STAGING/etc/" \
"$STAGING_USR/etc/" \
"$ROOTFS/etc/"
mv "$ROOTFS"/usr/etc/* "$ROOTFS"/etc/
rm -rf "$ROOTFS"/usr/etc/ "$ROOTFS"/usr/var/
ln -s ../etc $ROOTFS/usr/etc
ln -s ../var $ROOTFS/usr/var
echo minifs-$MINIFS_BOARD >$ROOTFS/etc/hostname
tag=$(echo "minifs-$MINIFS_BOARD-" | \
awk '{ print $0 strftime("%y%m%d%H%M"); }')
echo $tag >$ROOTFS/etc/minifs.tag
export MINIFS_TAG=$tag
## Add rootfs overrides for boards
for pd in $(minifs_locate_config_path rootfs 1); do
if [ -d "$pd" ]; then
echo "### Overriding root $pd"
rsync -av --exclude=._\* "$pd/" "$ROOTFS/"
fi
done
) >>"$LOGFILE" 2>&1 || {
echo "FAILED"; exit 1
}
echo "Done"
}
| true
|
286eaa81348cf608ac306465764c115621d1fe55
|
Shell
|
Bramha-N/linuxShell
|
/d7_array/Randomarray.sh
|
UTF-8
| 607
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash -x
read -p "How many random numbers you wants to print ? : " max;
for (( n=1; n<=max; n++ ));
do
y=$(($RANDOM%900+100))
arr+=($y)
done
echo "Random numbers are : " ${arr[@]}
for i in ${arr[@]};
do
(( $i > max || max == 0)) && max=$i
(( $i < min || min == 0)) && min=$i
done
echo "min=$min"
echo "max=$max"
echo ${arr[@]}
arr=(${arr[@]/$max})
arr=(${arr[@]/$min})
echo ${arr[@]}
for i in ${arr[@]};
do
(( $i > NewMax || NewMax == 0)) && NewMax=$i
(( $i < NewMin || NewMin == 0)) && NewMin=$i
done
echo "Second Smallest value is=$NewMin
Second Largest value is=$NewMax"
| true
|
43c26ef62e992dcdf3321992e2d7520caca19a8d
|
Shell
|
rbone/dotmac
|
/packages/git/.bin/git-recentbranches
|
UTF-8
| 249
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
git for-each-ref --sort=-committerdate refs/heads/ --format='%(color:yellow bold)%(committerdate:relative)| %(color:cyan bold)%(authorname)| %(color:red bold)%(refname:short) %(color:reset)| %(subject)' --count=${1-20} | column -ts '|'
| true
|
03f6564a6d14e11fc9c7f787b8d581af4baff77b
|
Shell
|
fthayd/LinuxShellSamples
|
/07_Cut.sh
|
UTF-8
| 572
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/sh
while read line
do
echo $line | cut -c3 # Display the third char only.
echo $line | cut -c2,7 # Display the third char and seventh char.
echo $line | cut -c2-7 # Display the second char to seventh char.
echo "$line" | cut -f1-3 # -d delim char TAB and display 1. to 3. parts.
echo $line | cut -c13- # Display after 13. character.
echo $line | cut -d' ' -f4
echo $line | cut -d' ' -f1-3 # Display 1.to 3. parts with deliminated space char.
echo "$line" | cut -d$'\t' -f2- # Display 2. to end parts with deliminated tab char.
done
| true
|
f3d06f69439ce9dfdd26e64217ac3ba682a6cc4f
|
Shell
|
zhangns07/multiple_source_adaptation
|
/density_and_basepred/gen_lm.sh
|
UTF-8
| 801
| 3.28125
| 3
|
[] |
no_license
|
if [ $# -lt 3 ]; then
echo \
"Usage: $0 model_data.txt ngram_order min_occurrences"
exit -1
fi
DATA=$1
NGRAM_ORDER=$2
NUM_OCCUR=$3
FSMLIB=../../tool_binaries/fsm-4.0/bin
GRMLIB=../../tool_binaries/grm-4.0/bin
SCRIPTS=./
echo Adding end symbols...
awk '{print $0, "</s>"}' $DATA > $DATA.endsymbol
#echo Storing data as automata...
#$FSMLIB/farcompilestrings -i vocab.txt -u "<unk>" $DATA.endsymbol > data.far
#$FSMLIB/farcompilestrings -i vocab.txt -u "<unk>" $DATATARGET.endsymbol > datatarget.far
$FSMLIB/farcompilestrings -i vocab.txt -u "<eps>" $DATA.endsymbol > data.far
echo Counting occurences...
$GRMLIB/grmcount -n $NGRAM_ORDER -i vocab.txt -s "<s>" -f "</s>" data.far > data.counts.fsm
echo Creating $NGRAM_ORDER-gram language model...
$GRMLIB/grmmake data.counts.fsm > data.lm.fsm
| true
|
20013ca9ee87942d6b439bfce81722827926a285
|
Shell
|
joliencremers/regression_wrapping
|
/2 Simulationstudy/Ccodes_and_scripts/run_N.sh
|
UTF-8
| 1,531
| 2.71875
| 3
|
[] |
no_license
|
#Change to directory with simulated data
cd ../Simulated_data/N
#Run the WN sampler
for f in *WN*; do
# run regression with k=1 and prior variance=5
../../Ccodes_and_scripts/Regression_WN.exe 1 5 $f
# run analysis with autocorrelations (argument 1) and write report to dat file
../../Ccodes_and_scripts/Analysis.exe 1 output* Analysis_NWN.dat
# zip the (large) output data
zip ../../Simulation_output/N_out.zip output*
zip ../../Simulation_output/N_raw.zip raw*
zip ../../Simulation_output/N_analysis.zip analysis*
zip ../../Simulation_output/N_autocor.zip auto*
rm -f output*
rm -f raw*
rm -f analysis*
rm -f auto*
done
mv Analys_NWN.dat ../../Simulation_output/
#Run the WC sampler
for f in *WC*; do
# run regression with k=1 and prior variance=5
../../Ccodes_and_scripts/Regression_WC.exe 1 5 $f
# run analysis with autocorrelations (argument 1) and write report to dat file
../../Ccodes_and_scripts/Analysis.exe 1 output* Analysis_NWC.dat
# zip the (large) output data
zip ../../Simulation_output/N_out.zip output*
zip ../../Simulation_output/N_raw.zip raw*
zip ../../Simulation_output/N_analysis.zip analysis*
zip ../../Simulation_output/N_autocor.zip auto*
rm -f output*
rm -f raw*
rm -f analysis*
rm -f auto*
done
mv Analys_NWC.dat ../../Simulation_output/
#All output has been written to /Simulation_output/
#the Analysis_AncovaWC.dat and Analysis_AncovaWN.dat are important. Refer to the R code in \2 Simulation study\Diagnotic_tools_and_Analysis\Analysis.R for further instructions.
| true
|
41de9ad4ba5f0afd18f1172cee7280cb91a85ea6
|
Shell
|
mohamedgalal99/gig-nodes-scripts
|
/functions_reinstall.sh
|
UTF-8
| 5,282
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
baseTFTP="/opt/g8-pxeboot/pxeboot/tftpboot/pxelinux.cfg"
baseIP="/opt/g8-pxeboot/pxeboot/conf"
#check if base files exist
function check {
[ -f "$baseIP/hosts" ] || { echo [-] "can't find $baseIP/hosts file" && exit 1; }
[ -f "$baseIP/dhcphosts" ] || { echo [-] "can't find $baseIP/dhcphosts file" && exit 1; }
[ -f "$baseTFTP/911boot" ] || { echo [-] "can't find $baseTFTP/911boot file" && exit 1; }
}
function enable_pxe {
[ $# -gt 1 ] && { echo "[-] This function take only one arg, NODE NAME" && exit; }
[ $1 ] && node=$1 || { echo "[-] please enter target node" && exit 4; }
cd $baseIP
mac=01-`cat dhcphosts | awk 'BEGIN {FS=",";OFS=","}; {if ($2 == "'$node'" ) print $1}' | tr ":" "-"`
cd $baseTFTP
[ -L $mac ] && echo "[*] Allready found link of $node" || ( ln -s 911boot $mac && echo "[+] Link created for $node" || { echo [Error] faild to create to $node && exit 5; })
}
#reboot_from_pxe cpu-01
function reboot_from_pxe {
[ $# -gt 1 ] && { echo "[-] This function take only one arg, NODE NAME" && exit; }
[ $1 ] && node="ipmi"$1 || { echo "[-] please enter target node" && exit; }
cd $baseIP && ip=`grep "\s$node" $baseIP/hosts | awk '{print $1}'`
if [[ $node == *"cpu"* ]]
then
ipmitool -I lanplus -H $ip -U ADMIN -P ADMIN chassis bootdev pxe > /dev/null && echo "[*] Set $node to boot from pxe" || { echo "[Error] faild to make $node boot from pxe" && break; }
ipmitool -I lanplus -H $ip -U ADMIN -P ADMIN chassis power cycle > /dev/null && echo "[*] Restarting $node " || (echo "[Error] faild to power cycle $node")
elif [[ $node == *"stor"* ]]; then
ipmitool -I lanplus -H $ip -U admin -P admin chassis bootdev pxe > /dev/null && echo "[*] Set $node to boot from pxe" || { echo "[Error] faild to make $node boot from pxe" && break; }
ipmitool -I lanplus -H $ip -U admin -P admin chassis power cycle > /dev/null && echo "[*] Restarting $node " || (echo "[Error] faild to power cycle $node")
fi
sleep 2
}
#reboot_from_hd cpu-01
function reboot_from_hd {
[ $# -gt 1 ] && { echo "[-] This function take only one arg, NODE NAME" && exit; }
[ $1 ] && node="ipmi"$1 || { echo "[-] please enter target node" && exit; }
cd $baseIP && ip=`grep "\s$node" $baseIP/hosts | awk '{print $1}'`
if [[ $node == *"cpu"* ]]
then
ipmitool -I lanplus -H $ip -U ADMIN -P ADMIN chassis bootdev disk > /dev/null && echo "[*] Set $node to boot from hard disk" || { echo "[Error] faild to make $node boot from pxe" && break; }
ipmitool -I lanplus -H $ip -U ADMIN -P ADMIN chassis power cycle > /dev/null && echo "[*] Restarting $node " || (echo "[Error] faild to power cycle $node")
elif [[ $node == *"stor"* ]]; then
ipmitool -I lanplus -H $ip -U admin -P admin chassis bootdev disk > /dev/null && echo "[*] Set $node to boot from hard disk" || { echo "[Error] faild to make $node boot from pxe" && break; }
ipmitool -I lanplus -H $ip -U admin -P admin chassis power cycle > /dev/null && echo "[*] Restarting $node " || (echo "[Error] faild to power cycle $node")
fi
sleep 2
}
#reboot_node cpu-01 pxe
#reboot_node stor-01 hd
function reboot_node {
[ $# -qt 2 ] && echo "[-] This function take 2 args, NODE NAME, pxe||hd" && exit
[ $1 ] && node="ipmi-"$1 || ( echo "[-] please enter target node" && exit )
( [ $2 == 'pxe' ] || [ $2 == 'hd' ] ) && boot=$2 || ( echo "[-]Please enter from where u want node to boot, pxe or hard disk 'hd'" && exit)
cd $baseIP && ip=`grep "\s$node" $baseIP/hosts | awk '{print $1}'`
if [[ $node == *"cpu"* ]]
then
ipmitool -I lanplus -H $ip -U ADMIN -P ADMIN chassis bootdev $boot > /dev/null && echo "[*] Set $node to boot from $boot" || { echo "[Error] faild to make $node boot from pxe" && break; }
ipmitool -I lanplus -H $ip -U ADMIN -P ADMIN chassis power cycle > /dev/null && echo "[*] Restarting $node " || (echo "[Error] faild to power cycle $node")
elif [[ $node == *"stor"* ]]; then
ipmitool -I lanplus -H $ip -U admin -P admin chassis bootdev $boot > /dev/null && echo "[*] Set $node to boot from $boot" || { echo "[Error] faild to make $node boot from pxe" && break; }
ipmitool -I lanplus -H $ip -U admin -P admin chassis power cycle > /dev/null && echo "[*] Restarting $node " || (echo "[Error] faild to power cycle $node")
fi
}
#installer_node cpu-01 password
function installer_node {
[[ $# != "1" ]] && echo "[-] This function take only one arg, NODE NAME" && exit
node=$1
cd $baseIP && ip=`grep "\s$node" $baseIP/hosts | awk '{print $1}'`
canConnect=0
counter=1
echo "[*] Try to connect to $node"
while [ $canConnect != 1 ]; do
nc -z $ip 22 && { canConnect=1 && break; }|| { canConnect=0 && printf "."; }
if [ $counter == "60" ]; then
printf "[=>] $node doesn't come back yet, Do u want to retry connect to it other 1 min [y/n]: "
read answer
if [ $answer == "y" ]; then
counter=0
continue
elif [ $answer == "n" ]; then\
break
fi
fi
((counter++))
sleep 1
done
if [ $canConnect == "0" ]; then
break
fi
sshpass -p$installerPasswd ssh -o StrictHostKeyChecking=no root@$ip 'cd /root/tools && bash Install "'$enviromentName'"'
}
for i in {01..04}
do
#enable_pxe cpu-$i
#reboot_from_pxe cpu-$i
reboot_from_hd cpu-$i
done
| true
|
81f6d396ce60f4055626177deef06e92fc95864e
|
Shell
|
SRSteinkamp/rideshare_analystcert
|
/getdata.sh
|
UTF-8
| 659
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
# Create directory
mkdir data
# Declare list of dates to be included
declare -a StringArray=(202004 202005 202006 202007 202008 202009 202010 202011 202012 202101 202102 202103 202104)
base_url="https://divvy-tripdata.s3.amazonaws.com"
for date in ${StringArray[@]}
do
# Create url
tmpurl="${base_url}/${date}-divvy-tripdata.zip"
# Print out
echo $tmpurl
# Download data with redirect, to data folder
curl $tmpurl -o "data/${date}-divvy-tripdata.zip" -L
# Unzip
unzip "data/${date}-divvy-tripdata.zip" -d data/
# Clean up zipfiles
rm "data/${date}-divvy-tripdata.zip"
done
# clean up
rm -r data/__MACOSX/
| true
|
7f0c2c9340a5b8939383aa258e5750b536d3caec
|
Shell
|
CORDEA/zsh-tmux-decor
|
/.zsh/tmux_status.sh
|
UTF-8
| 1,388
| 3.265625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# encoding:utf-8
#
# Copyright [2015] [Yoshihiro Tanaka]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Author: Yoshihiro Tanaka <contact@cordea.jp>
# date : 2016-02-28
. $HOME/.zsh/environment.sh
function output() {
line=`. $1`
if [ "$2" = "right" ];then
printf $line
else
printf $line
fi
}
function switcher() {
sw=""
case "$1" in
left*)
sw=`cut -d ' ' -f 1 $tmux_env`
;;
right*)
sw=`cut -d ' ' -f 2 $tmux_env`
;;
*)
exit 1
;;
esac
case "$sw" in
0*)
output "$reuter_base/get.sh" $1
;;
1*)
output "$tweet_base/get.sh" $1
;;
2*)
output "$winfo_base/get.sh" $1
;;
3*)
output "$gmail_base/get.sh" $1
;;
esac
}
switcher $1
| true
|
28e09f67dc49515a560b155d72a7c2e7d8d3a2dd
|
Shell
|
mcavady/backupBashScripts
|
/scripts/sql.sh
|
UTF-8
| 1,972
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
#sql script for backing up databases and zipping them this may not send the email, users perms will have to be set by the user running this script
#sql backup user, user name and password for the mysql user you want to use this script
DB_USER=""
DB_PASSWD=""
#optimise tables and check
mysqlcheck --user=$DB_USER --password=$DB_PASSWD --all-databases;
# sql to backup use the user for my sql and dumps the file to a tmp directory
mysqldump --user=$DB_USER --password=$DB_PASSWD something > something.sql;
#the directories that you want to include
backup_files="/files "
#save zip file else were on the server
dest="/backups"
#Create archive file name
day=$(date +%d)
month=$(date +%b )
year=$(date +%y)
prefix="sql"
archive_file="$prefix-$day-$month-$year.tgz"
#Print start status
echo "Backing it all up $backup_files to $dest/$archive_file"
date
echo
tar czf $dest/$archive_file $backup_files
echo
# print end message
echo "Backup complete : " date
#long listing of the files in dest
ls -lh $dest
#remove all sql temp files
rm *.sql
#mail out a report if running from the term window
# echo "sql backup completed \n Date of Backup : $day $month $year \n Backed up : SQL \n Time of the backup : $(date) \n Backup location : $dest" | sendmail Backup blar@blar.co.uk
From: me@blar.co.uk
To: me@blar.co.uk
Subject: MIME Test
cat <<EOF
Mime-Version: 1.0
Content-Type: text/html
<!doctype html>
<html>
<head>
<meta charset='utf-8'>
<title>Backups for sql complete</title>
<style type='text/css'>
/*styles to go here*/
.main{width:100%;background:#000;}
.heading{width:95%;background:#ddd;}
.information{width:95%;background:#888;}
</style>
</head>
<body>
<div class='main'>
<div class='heading'>
<h1>SQL optimised and backed up $day - $month - $year</h1>
<div class='information'>
dest: $dest
</div>
</div>
</div>
</body>
</html>
EOF
| true
|
c2becea0c043e9befd587ded0281f6385f8562d1
|
Shell
|
dogeth/dotfiles
|
/profile.bash
|
UTF-8
| 1,192
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
source ~/.dotfiles/git-completion.bash
# changing directory to code project
function c { cd ~/code/$1; }
# cd
alias ..='cd ..'
# ls
alias ls="ls -F"
alias l="ls -lAh"
alias ll="ls -l"
alias la='ls -A'
# git
alias gl='git pull'
alias gp='git push'
alias gd='git diff'
alias gc='git commit'
alias gca='git commit -a'
alias gco='git checkout'
alias gb='git branch'
alias gs='git status'
alias grm="git status | grep deleted | awk '{print \$3}' | xargs git rm"
alias gcl="git config --list"
alias gcm="git commit -m"
alias ga.="git add ."
alias gph="git push heroku"
# rails
alias sc='script/console'
alias ss='script/server'
alias sg='script/generate'
alias tlog='tail -f log/development.log'
alias rst='touch tmp/restart.txt'
# misc
alias reload='. ~/.bash_profile'
#history
# don't put duplicate lines in the history. See bash(1) for more options
export HISTCONTROL=ignoredups
# ... and ignore same sucessive entries.
export HISTCONTROL=ignoreboth
alias h?="history | grep "
#export PS1="\w$ "
export PS1='\w$(__git_ps1 ":%s")\$ '
export GIT_PS1_SHOWDIRTYSTATE=1
export GIT_PS1_SHOWSTASHSTATE=1
export GIT_PS1_SHOWUNTRACKEDFILES=1
export EDITOR='mate -w'
export PATH=$PATH:~/.dotfiles
| true
|
8375bd4aba979faefbecbbbc53d4c98f9d0d3fe0
|
Shell
|
gdubya/.dotfiles
|
/src/scripts/networkservice.sh
|
UTF-8
| 844
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
services=$(networksetup -listnetworkserviceorder | grep 'Hardware Port')
while read line; do
sname=$(echo $line | awk -F "(, )|(: )|[)]" '{print $2}')
sdev=$(echo $line | sed -n 's/.*Device: \(.*\))$/\1/p')
if [ -n "$sdev" ]; then
status=$(ifconfig $sdev 2>/dev/null | grep 'status: active')
if [[ $(echo $status | sed -e 's/.*status: active.*/active/') == 'active' ]]; then
selfAsigned=$(ifconfig $sdev 2>/dev/null | grep 'inet' | sed -n 's/^.*inet \(169\.254\).*$/\1/p' | sed -n 's/169\.254/true/p')
if [[ $selfAsigned != 'true' ]]; then
currentservice="$sname"
break
fi
fi
fi
done <<< "$(echo "$services")"
if [[ $currentservice == 'Wi-Fi' ]]; then
echo "$(wifi.sh -t)"
else
echo "$(ethernet.sh ${sdev})"
fi
| true
|
a2c8f83e2726485e5d6e6a9616b9abe628bb9c67
|
Shell
|
wcboone/OOPerl_Code_Sample
|
/GNC/GoldCard/bin/loadInitial.sh
|
UTF-8
| 557
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
# $Id: loadInitial.sh,v 1.1 2007/01/19 23:15:20 donohuet Exp $
# Run this from cron every 2.5 hours to launch 2 loads.
cd /feeds/tmp/GNC
mkdir -p LOAD DONE
loadfile() {
file=$1
mv $file LOAD
f=LOAD/$file
nohup /feeds/bin/datax -file=$f -db=feedsprd -legacy -import gnc.goldcard
mv $f DONE
}
for i in 1 2 ; do
file=$( ls -1 gc_?? | head -1 )
[ -z "$file" ] && echo No more files in $DIR && exit 1
[ ! -s "$file" ] && echo File \"$file\" not valid in $DIR && exit 1
loadfile $file &
done
exit 0
| true
|
1412ca2172d98dea22da186f41da17f165e4e101
|
Shell
|
galaxysd/GalaxyCodeBases
|
/bash/etc/cutter.sh
|
UTF-8
| 463
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
pieceHeight=5408
pieceSlide=$[$pieceHeight-130]
targetdir=./work
infile="$1"
inputname=${infile%%.*}
mkdir -p ${targetdir}
function getwidth() { magick identify -format '%[height]' "$1"; }
height=$(getwidth "${infile}")
echo "[${height}]"
top="0"
fno="0"
while [ $top -lt $height ]
do
echo "$top $height"
convert -crop x${pieceHeight}+0+${top} +repage "${infile}" "${targetdir}/${inputname}_${fno}.png"
top=$[$top+$pieceSlide]
fno=$[$fno+1]
done
| true
|
0365ca7ea82bbb27231cbb839c387449c7f2eb0f
|
Shell
|
TheBoringTeam/AwesomeMusic-frontend
|
/stop.sh
|
UTF-8
| 177
| 3.140625
| 3
|
[] |
no_license
|
if [ ! "$(docker ps -q -f name=front-end)" ]; then
if [ "$(docker ps -aq -f status=exited -f name=front-end)" ]; then
# cleanup
docker rm front-end
fi
fi
| true
|
d8331df02540167c27dc179dfb1cca1863383c8f
|
Shell
|
tomoya0x00/soracom_autossh
|
/soracom_autossh.sh
|
UTF-8
| 2,046
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
SSH_INFO_FILE="/var/tmp/ssh_info"
SSH_INFO_FILE_OLD="/var/tmp/ssh_info.old"
SSH_PRIVATEKEY_FILE="/var/tmp/private-key.pem"
AUTOSSH_PIDFILE="/var/run/autossh.pid"
# 自分のIMSIと該当するSSH情報を取得
IMSI=`curl -s http://metadata.soracom.io/v1/subscriber | jq .imsi`
if [ $? -ne 0 ]; then
echo "failed to get own imsi"
exit 1
fi
SSH_INFO=`curl -s http://metadata.soracom.io/v1/userdata | jq -r .ssh.imsis[${IMSI}]`
if [ $? -ne 0 ]; then
echo "failed to get userdata"
exit 1
fi
# 取得したSSH情報を保存
if [ -e $SSH_INFO_FILE ]; then
cp -f $SSH_INFO_FILE $SSH_INFO_FILE_OLD
fi
echo $SSH_INFO > $SSH_INFO_FILE
exit_autossh() {
if [ -e $AUTOSSH_PIDFILE ]; then
PID=`cat $AUTOSSH_PIDFILE`
echo "kill autossh pid=$PID"
kill -9 $PID
rm $AUTOSSH_PIDFILE
fi
}
delete_privatekey() {
if [ -e $SSH_PRIVATEKEY_FILE ]; then
rm $SSH_PRIVATEKEY_FILE
fi
}
if [ "$SSH_INFO" != "null" ]; then
# 自分のSSH情報があれば、前回取得したSSH情報との差分チェック
if diff -q $SSH_INFO_FILE $SSH_INFO_FILE_OLD > /dev/null 2>&1; then
# do nothing
echo "do nothing"
else
# 前回取得したSSH情報との差分があれば、秘密鍵を書き出してautossh開始
# autosshが起動済みなら終了
exit_autossh
delete_privatekey
echo $SSH_INFO | jq -r .privateKey > $SSH_PRIVATEKEY_FILE
chmod 600 $SSH_PRIVATEKEY_FILE
AUTOSSH_PIDFILE=$AUTOSSH_PIDFILE \
autossh -M 0 -o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
-o ServerAliveInterval=60 \
-o ServerAliveCountMax=3 \
-o ExitOnForwardFailure=yes \
-i $SSH_PRIVATEKEY_FILE \
-N \
-f \
-R `echo $SSH_INFO | jq -r .portForwardParam` &
echo "started autossh"
fi
else
# 自分のSSH情報が無い場合、autosshが起動済みなら終了
exit_autossh
delete_privatekey
fi
| true
|
f7e9813f5591b2af2d65a24d8569b4e07decb91c
|
Shell
|
davidadamsphd/hellbender-validation
|
/scripts/run-md-and-bqsr.sh
|
UTF-8
| 2,778
| 3.140625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
GATK_RESOURCES=gs://hellbender-validation/resources/gatk-bundle
FILE=CEUTrio.HiSeq.WGS.b37.ch20.4m-6m.NA12878.bam
GCS_INPUT_PATH=gs://hellbender-validation/test-input/NA12878
GCS_OUTPUT_PATH=gs://hellbender-validation/pickard-gatk3-output/NA12878
cd /tmp/
gsutil cp ${GCS_INPUT_PATH}/${FILE}* ./
gsutil cp ${GATK_RESOURCES}/google.key ./
gsutil cp ${GATK_RESOURCES}/human_g1k_v37_decoy.dict ./
gsutil cp ${GATK_RESOURCES}/human_g1k_v37_decoy.fasta ./
gsutil cp ${GATK_RESOURCES}/human_g1k_v37_decoy.fasta.fai ./
gsutil cp ${GATK_RESOURCES}/dbsnp_138.b37.vcf* ./
# start the docker container and run the tools
sudo docker run -ti -e FILE=${FILE} -e REF=/host/tmp/human_g1k_v37_decoy.fasta \
-e DBSNP=/host/tmp/dbsnp_138.b37.vcf \
-e KEY=/host/tmp/google.key \
-v /:/host gcr.io/atomic-life-89723/gga-3.4-0a bash
mkdir /tmp/gatk-validation/
cd /tmp/gatk-validation/
GATKJAR=/opt/extras/gatk/GenomeAnalysisTK.jar
SET_INTERVAL_RANGES=""
SUGGESTED_THREADS=8
SUGGESTED_RAM=48G # n1-standard-8 has 52GB total
FULL_INPUT=/host/tmp/${FILE}
FILE_WO_EXTENSION=`echo $FILE | sed 's/\.[^.]*$//'`
METRICS_FILE=/host/tmp/md-${FILE_WO_EXTENSION}.metrics
RECAL_TABLE=/host/tmp/recal-stats-${FILE_WO_EXTENSION}.txt
mkdir -p ./tmp
export TMPDIR=$(pwd)/tmp
##################
# MarkDuplicates #
##################
java -Xmx$SUGGESTED_RAM -Djava.io.tmpdir=${TMPDIR} -jar /opt/extras/picard-tools-1.130/picard.jar MarkDuplicates \
ASSUME_SORTED=true \
MAX_RECORDS_IN_RAM=2000000 \
CREATE_INDEX=true \
REMOVE_DUPLICATES=false \
I=${FULL_INPUT} \
O=/host/tmp/deduped-${FILE} \
METRICS_FILE=${METRICS_FILE} \
TMP_DIR=${TMPDIR}
####################
# BaseRecalibrator #
####################
#--useOriginalQualities shouldn't be necessary if it's a BAM reversion
java -Xmx$SUGGESTED_RAM -jar $GATKJAR -K $KEY -et NO_ET \
-T BaseRecalibrator \
-nct $SUGGESTED_THREADS \
-I ${FULL_INPUT} \
-o $RECAL_TABLE \
-R $REF \
$SET_INTERVAL_RANGES \
-knownSites $DBSNP \
--useOriginalQualities \
-DIQ \
-cov ReadGroupCovariate \
-cov QualityScoreCovariate \
-cov CycleCovariate \
-cov ContextCovariate
###################
# PrintReads BQSR #
###################
java -Xmx$SUGGESTED_RAM -jar $GATKJAR -K $KEY -et NO_ET \
-T PrintReads \
-I ${FULL_INPUT} \
-R $REF \
$SET_INTERVAL_RANGES \
-BQSR $RECAL_TABLE \
-o /host/tmp/recalibrated-${FILE} \
-baq CALCULATE_AS_NECESSARY
# exit the docker container and copy back to gcs
exit
FILE_WO_EXTENSION=`echo $FILE | sed 's/\.[^.]*$//'`
gsutil cp deduped-${FILE_WO_EXTENSION}* ${GCS_OUTPUT_PATH}/
gsutil cp recal-stats-${FILE_WO_EXTENSION}.txt ${GCS_OUTPUT_PATH}/
gsutil cp recalibrated-${FILE}* ${GCS_OUTPUT_PATH}/
gsutil cp md-${FILE_WO_EXTENSION}.metrics ${GCS_OUTPUT_PATH}/
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.