blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9050bed6bba2de3ef705a1e543f04980aa61c168
|
Shell
|
bast69/piholeparser
|
/scripts/Parsing-Process/70-Checking-For-Fully-Qualified-Domain-Name-Requirements.sh
|
UTF-8
| 350
| 3.078125
| 3
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
# shellcheck disable=SC1090
## Domain Requirements,, a period and a letter
## Variables
SCRIPTDIRA=$(dirname $0)
source "$SCRIPTDIRA"/foldervars.sh
if [[ -f $BFILETEMP ]]
then
cat $BFILETEMP | sed '/[a-z]/!d; /[.]/!d; /[a-z]$/!d; /^\./d; /\.$/d; /^.\{,255\}$/!d;/^-/ d' > $BTEMPFILE
fi
if [[ -f $BFILETEMP ]]
then
rm $BFILETEMP
fi
| true
|
6b55ed3bf42f8c4d211a4217d0e020452a634bf4
|
Shell
|
manaswinidas/clinvar-report
|
/arvados/clinvar-report/crunch_scripts/clinvar-report
|
UTF-8
| 1,003
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
set -e
export PATH=$PATH:$CRUNCH_SRC:$CRUNCH_SRC/crunch_scripts:$CRUNCH_SRC/crunch_scripts/bin:.
. <( arv-dax setup )
mnt=`arv-dax keep`
jobparam=`arv-dax script_parameters`
clinvar=$mnt/`echo "$jobparam" | jq -r .CLINVAR`
vcf=$mnt/`echo "$jobparam" | jq -r .VCF`
bn_clinvar=`basename "$clinvar"`
if [ "$bn_clinvar" != "clinvar.vcf" ]
then
clinvar="$clinvar"/clinvar.vcf
fi
vcfdir="$vcf"
bn_vcf=`basename "$vcf"`
echo ">>>>> bn_vcf $bn_vc"
if [[ "$bn_vcf" =~ \.vcf$ ]]
then
vcfdir=`dirname "$vcf"`
echo ">>>> calling dirname on vcf file ($vcf -> $vcfdir)"
fi
echo ">>>>>> starting with clinvar $clinvar vcfdir $vcfdir"
#!/bin/bash
SAVEIFS=$IFS
IFS=$(echo -en "\n\b")
for input_vcf in `ls "$vcfdir"`
do
echo ">>>>>>>>> processing $vcfdir/$input_vcf (using $clinvar). creating ${b}_clinvar_report.csv"
b=`basename "$input_vcf" .vcf`
clinvar-report.py -C "$clinvar" -i "$vcfdir/$input_vcf" -F csv > "$b"_clinvar_report.csv
done
IFS=$SAVEIFS
arv-dax task finish
exit 0
| true
|
f2a549e5b5172a2f74968456a9e3c7d86823439a
|
Shell
|
AlexisDefranoux/QGL-ISG
|
/scripts/subscripts/exec_map.sh
|
UTF-8
| 318
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/local/bin/bash
# Script to test our Explorer on different maps
mvn -q exec:java -Djava.awt.headless=true -Dexec.args="$*"
tmp=${1%%.*}
map=${tmp##*/}
echo $map
cat logs/explorer.log > scripts/results/full_logs/$map'_('$2,$3')_'$4.log
cat logs/report.log > scripts/results/reports/$map'_('$2,$3')_'$4_report.log
| true
|
681c380302c657ff9fac7909a44a855f912a9744
|
Shell
|
ACTDataLake/act-data-lake
|
/00_Cluster.sh
|
UTF-8
| 2,257
| 3.4375
| 3
|
[
"Apache-2.0"
] |
permissive
|
# ===============================================================================================================================
# Descriptiopn : Script to test installation and status of the cluster
# Author : Stuart Wilson
# Date : 01/03/2018
#
# Based on the fine work by Selvaraaju Murugesan at www.github.com/actgov/act-data-lake
# ===============================================================================================================================
#Some basic information about the cluster
clear
echo "OS :"
cat /etc/*release | grep -m1 -i -o -e ubuntu -e redhat -e 'red hat' -e centos
echo -e "\n CPU Info"
grep '^model name' /proc/cpuinfo | sort -u
echo -e "\n Host name"
hostname -f
echo -e "\n Host IP"
hostname -i
echo -e "\n"
# Test 1 : Test whether the cluster is secure or not
searchstr="secure=true"
file="/opt/mapr/conf/mapr-clusters.conf"
if [grep -q "$searchstr" $file]; then
echo "Cluster is Secure"
echo "Test 1 Pass"
else
echo "Test 1 Fail : Cluster is not Secure"
fi
echo -e "\n"
# Test 2 : Number of Nodes
val=$(maprcli dashboard info -json | grep nodesUsed)
echo "Number of Nodes"
if echo "$val" | grep -q 3 ; then
echo "Number of Nodes is 3"
echo "Test 2 Pass "
else
echo "Test 2 Fail : Number of nodes is less than 3"
fi
echo -e "\n"
# Test 3 : NFS Mount
val=$(cat /proc/mounts | grep mapr)
#echo $val
echo "NFS Mounted ?"
if echo "$val" | grep -q "mapr" && echo "$val" | grep -q "nfs" ; then
echo "NFS Mount True"
echo "Test 3 Pass "
else
echo "Test 3 Fail : No NFS mount"
fi
echo -e "\n"
# Test 4 : Cluster Audit
# Test Case 78401
echo "Cluster Audit Enabled ?"
val=$( maprcli audit info -json | head -15 | grep retentionDays | cut -d ':' -f2 | tr -d '"')
#echo $val
# Retention dayds is set to 365
if [ "$val" -eq "365" ];then
echo "Cluster Audit Enabled"
echo "Test 4 Pass "
else
echo "Test 4 Fail : No Audit Enabled"
fi
echo -e "\n"
#Test 5 : Cluster Health
# Test Case
echo "Cluster Alatms raised ?"
val=$(maprcli alarm list)
if [[ $val ]]; then
echo "Alarms are raised"
echo "Test 5 Fail"
else
echo "No Alarms raised"
echo "Test 5 Pass"
fi
#Test 6 : Linux version
# Test Case
| true
|
459b952e0d8d4ffd393d1ef0c3165d8095e4421a
|
Shell
|
eterne92/COMP9021
|
/quiz/test.sh
|
UTF-8
| 154
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh
for i in $(seq 1990 2015)
do
for j in $(seq 1990 2015)
do
echo -e $i'--' $j'\n 10000'|python quiz_4.py|grep Korea
done
done
| true
|
c9bf9d2d6f1e687bf07c3ad09aa3faaf1e686b3c
|
Shell
|
Azure/azurehpc
|
/scripts/setupbeeond.sh
|
UTF-8
| 783
| 2.734375
| 3
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
#!/bin/bash
MOUNT_ROOT=${1:-/mnt/resource}
# this script needs to run without sudo
# - the keys from the user are used for the root user
sudo wget -O /etc/yum.repos.d/beegfs-rhel7.repo https://www.beegfs.io/release/beegfs_7.2/dists/beegfs-rhel7.repo
sudo rpm --import https://www.beegfs.io/release/latest-stable/gpg/RPM-GPG-KEY-beegfs
sudo yum install -y epel-release
sudo yum install -y psmisc libbeegfs-ib beeond pdsh
sudo sed -i 's/^buildArgs=-j8/buildArgs=-j8 BEEGFS_OPENTK_IBVERBS=1 OFED_INCLUDE_PATH=\/usr\/src\/ofa_kernel\/default\/include/g' /etc/beegfs/beegfs-client-autobuild.conf
sudo /etc/init.d/beegfs-client rebuild || exit 1
sudo cp -r $HOME/.ssh /root/.
sudo mkdir $MOUNT_ROOT/beeond
sudo chmod 777 $MOUNT_ROOT/beeond
sudo mkdir /beeond
sudo chmod 777 /beeond
| true
|
b95c643a5d9bbc4aabd68a96fd0f14e6c220a177
|
Shell
|
joaogsleite/configs
|
/old/virtualbox/restore.sh
|
UTF-8
| 128
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [[ $(uname) == "Darwin" ]]; then
echo "use Parallels on macOS"
else
sudo adduser $USER vboxusers
fi
| true
|
f060e21287d42c5a505e75958253da914c380cd8
|
Shell
|
megamsys/gulp
|
/.hooks/pre-commit
|
UTF-8
| 1,262
| 3.578125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
fmtcount=`git ls-files | grep '.go$' | xargs gofmt -l 2>&1 | wc -l`
if [ $fmtcount -gt 0 ]; then
echo "Some files aren't formatted, please run 'go fmt ./...' to format your source code before committing"
exit 1
fi
fmtcount=`grep --exclude-dir={config,.git,bin,log,spec,tmp,.hooks,public,*.md} -rnw './' -e "fmt.Println" | wc -l`
if [ $fmtcount -gt 0 ]; then
echo "Some files have fmt.Println, please run 'grep --exclude-dir={config,.hooks,log,bin,spec,tmp,public,.git} -rnw './' -e "puts"'"
exit 1
fi
# Due to the way composites work, vet will fail for some of our tests so we ignore it
vetcount=`go vet ./... 2>&1 | wc -l`
if [ $vetcount -gt 0 ]; then
echo "Some files aren't passing vet heuristics, please run 'go vet ./...' to see the errors it flags and correct your source code before committing"
exit 1
fi
exit 0
fmtcount=`find . -name *.*~ 2>&1 | wc -l`
if [ $fmtcount -gt 0 ]; then
echo "Some files are temporary files, please run 'find . -type f -name "*.*~" -exec rm -f {} \;' to delete them before committing"
exit 1
fi
mkcount=`make 2>&1 | wc -l`
if [ $mkcount -gt 0 ]; then
echo "compilation failed, please run 'make' to see the errors and fix them before committing"
exit 1
fi
exit 0
| true
|
8b1d0dc4bedafa79fd6761075adc48c163b3fbce
|
Shell
|
lukeblevins/Uno.Gallery
|
/build/scripts/ios-uitest-run.sh
|
UTF-8
| 1,556
| 2.65625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
export UNO_UITEST_PLATFORM=iOS
export UNO_UITEST_IOSBUNDLE_PATH=$BUILD_SOURCESDIRECTORY/Uno.Gallery/Uno.Gallery.iOS/bin/iPhoneSimulator/Release/Uno.Gallery.app
export UNO_UITEST_SCREENSHOT_PATH=$BUILD_ARTIFACTSTAGINGDIRECTORY/screenshots/ios
export UNO_UITEST_PROJECT=$BUILD_SOURCESDIRECTORY/Uno.Gallery/Uno.Gallery.UITest/Uno.Gallery.UITest.csproj
export UNO_UITEST_BINARY=$BUILD_SOURCESDIRECTORY/Uno.Gallery/Uno.Gallery.UITest/bin/Release/net47/Uno.Gallery.UITest.dll
export UNO_UITEST_LOGFILE=$BUILD_ARTIFACTSTAGINGDIRECTORY/screenshots/ios/nunit-log.txt
export UNO_UITEST_IOS_PROJECT=$BUILD_SOURCESDIRECTORY/Uno.Gallery/Uno.Gallery.iOS/Uno.Gallery.iOS.csproj
export UNO_UITEST_NUNIT_VERSION=3.11.1
export UNO_UITEST_NUGET_URL=https://dist.nuget.org/win-x86-commandline/v5.7.0/nuget.exe
echo "Lising iOS simulators"
xcrun simctl list devices --json
/Applications/Xcode.app/Contents/Developer/Applications/Simulator.app/Contents/MacOS/Simulator &
cd $BUILD_SOURCESDIRECTORY
msbuild /r /p:Configuration=Release $UNO_UITEST_PROJECT
msbuild /r /p:Configuration=Release /p:Platform=iPhoneSimulator /p:IsUiAutomationMappingEnabled=True $UNO_UITEST_IOS_PROJECT
cd $BUILD_SOURCESDIRECTORY/build
wget $UNO_UITEST_NUGET_URL
mono nuget.exe install NUnit.ConsoleRunner -Version $UNO_UITEST_NUNIT_VERSION
mkdir -p $UNO_UITEST_SCREENSHOT_PATH
mono $BUILD_SOURCESDIRECTORY/build/NUnit.ConsoleRunner.$UNO_UITEST_NUNIT_VERSION/tools/nunit3-console.exe \
--inprocess --agents=1 --workers=1 \
$UNO_UITEST_BINARY
| true
|
ea6831f1fd7530cdc538d8698631dfba6a91f077
|
Shell
|
pottumuusi/omnichat
|
/scripts/install_deps.sh
|
UTF-8
| 239
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# TODO support for different distributions
required_ubuntu_packages=""
required_ubuntu_packages+="curl"
echo "Running sudo command next"
sudo apt-get install "${required_ubuntu_packages}"
curl https://sh.rustup.rs -sSf | sh
| true
|
cbc9e8892c56b78bed0b90609f6332541af3630f
|
Shell
|
SUNET/docker-redirect
|
/start.sh
|
UTF-8
| 900
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
count=`find /etc/ssl/private/ -type f -a -name \*.pem 2>/dev/null |wc -l`
if [ $count -eq 0 ]; then
make-ssl-cert generate-default-snakeoil
cat /etc/ssl/private/ssl-cert-snakeoil.key /etc/ssl/certs/ssl-cert-snakeoil.pem > /etc/ssl/private/server.pem
fi
mkdir -p /var/run/pound
cat>/etc/pound/pound.cfg<<EOF
User "www-data"
Group "www-data"
LogLevel 3
Alive 30
Control "/var/run/pound/poundctl.socket"
Daemon 0
ListenHTTP
RewriteLocation 1
Port 80
End
ListenHTTPS
xHTTP 1
Address 0.0.0.0
Port 443
EOF
for c in /etc/ssl/private/*.pem; do
echo " Cert \"$c\"" >> /etc/pound/pound.cfg
done
cat>>/etc/pound/pound.cfg<<EOF
Ciphers "ECDHE-RSA-AES128-SHA256:AES128-GCM-SHA256:RC4:HIGH:!MD5:!aNULL:!EDH"
Service
URL "^(/|.*)\$"
Redirect "${URL}"
End
End
EOF
/usr/sbin/pound
| true
|
a4f35798d127f6f23b5baf5dab269d084cca0141
|
Shell
|
00mjk/dotfiles-6
|
/zsh/.zshrc
|
UTF-8
| 2,808
| 2.6875
| 3
|
[] |
no_license
|
# Enable Powerlevel10k instant prompt. Should stay close to the top of ~/.zshrc.
# Initialization code that may require console input (password prompts, [y/n]
# confirmations, etc.) must go above this block; everything else may go below.
if [[ -r "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh" ]]; then
source "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh"
fi
# The following lines were added by compinstall
zstyle ':completion:*' auto-description 'specify: %d'
zstyle ':completion:*' completer _expand _complete _ignored _match _approximate
zstyle ':completion:*' completions 1
zstyle ':completion:*' expand prefix suffix
zstyle ':completion:*' format 'completing %d'
zstyle ':completion:*' glob 1
zstyle ':completion:*' group-name ''
zstyle ':completion:*' ignore-parents parent pwd
zstyle ':completion:*' list-colors ${(s.:.)LS_COLORS}
zstyle ':completion:*' list-prompt %SAt %p: Hit TAB for more, or the character to insert%s
zstyle ':completion:*' list-suffixes true
zstyle ':completion:*' matcher-list '' 'm:{[:lower:]}={[:upper:]}' 'r:|[._-]=** r:|=**' 'l:|=* r:|=*'
zstyle ':completion:*' max-errors 4
zstyle ':completion:*' menu select=1
zstyle ':completion:*' preserve-prefix '//[^/]##/'
zstyle ':completion:*' select-prompt %SScrolling active: current selection at %p%s
zstyle ':completion:*' squeeze-slashes true
zstyle ':completion:*' substitute 1
zstyle ':completion:*:*:git:*' user-commands bonsai:'trim local branches with removed remotes'
zstyle :compinstall filename '/home/irish/.zshrc'
autoload -Uz compinit
compinit
# End of lines added by compinstall
# Lines configured by zsh-newuser-install
HISTFILE=~/.histfile
HISTSIZE=1000
SAVEHIST=1000
setopt notify
unsetopt beep
bindkey -e
# End of lines configured by zsh-newuser-install
# Enable dircolors if available
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dir_colors && eval "$(dircolors -b ~/.dir_colors)" || eval "$(dircolors -b)"
fi
source /usr/share/zsh-theme-powerlevel10k/powerlevel10k.zsh-theme
# To customize prompt, run `p10k configure` or edit ~/.p10k.zsh.
source ~/.zsh/p10k.zsh
# Source alias definitions
source ~/.zsh/aliases.zsh
# Plugins
source /usr/share/zsh/plugins/zsh-syntax-highlighting/zsh-syntax-highlighting.plugin.zsh
source /usr/share/zsh/plugins/zsh-history-substring-search/zsh-history-substring-search.zsh
source /usr/share/zsh/plugins/zsh-autosuggestions/zsh-autosuggestions.plugin.zsh
ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE="fg=#585858,bold,underline"
ZSH_AUTOSUGGEST_STRATEGY="match_prev_cmd"
bindkey '^[[A' history-substring-search-up
bindkey '^[[B' history-substring-search-down
bindkey -M emacs '^P' history-substring-search-up
bindkey -M emacs '^N' history-substring-search-down
autoload -Uz bashcompinit
bashcompinit
complete -C aws_completer aws
| true
|
16d5ae21532c03470ed812bfc8bfa7e75057e6e9
|
Shell
|
VirtualSkin-Project/limb-admin
|
/limb-manager
|
UTF-8
| 1,054
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
### BEGIN INIT INFO
# Provides: subscribe.sh
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start daemon at boot time
# Description: Enable service provided by daemon.
### END INIT INFO
[ -f /etc/default/limb-manager ] && . /etc/default/limb-manager
if [ -z "$VIRTUALSKIN_LIMB_ADMIN" ] ; then
echo "VIRTUALSKIN_LIMB_ADMIN is not set, please set it in /etc/default/limb-manager" >> /tmp/limb-manager.log
exit 1
fi
case "$1" in
start)
sleep 10
$VIRTUALSKIN_LIMB_ADMIN/subscribe.sh
echo $!>/var/run/limb-manager.pid
;;
stop)
kill `cat /var/run/limb-manager.pid`
rm /var/run/limb-manager.pid
;;
restart)
$0 stop
$0 start
;;
status)
if [ -e /var/run/limb-manager.pid ]; then
echo limb-manager is running, pid=`cat /var/run/limb-manager.pid`
else
echo limb-manager is NOT running
exit 1
fi
;;
*)
echo "Usage: $0 {start|stop|status|restart}"
esac
exit 0
| true
|
b6a8aadbee08d459737d14935cd9fdce1b858e02
|
Shell
|
LiboMa/gmetric
|
/network/network_gmetric.sh
|
UTF-8
| 7,039
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
### $Header: /var/lib/cvs/ops/ganglia/network_gmetric.sh,v 1.3 2006/07/11 17:29:27 ben Exp $
### this script reports network metrics to ganglia.
### It should be called from cron every n minutes.
### It will report network usage per interface
### and will automatically adjust for whatever
### timeframe it is called
### Copyright Simply Hired, Inc. 2006
### License to use, modify, and distribute under the GPL
### http://www.gnu.org/licenses/gpl.txt
VERSION=1.3
GMETRIC="/usr/bin/gmetric"
GMETRIC_ARGS="-c /etc/gmond.conf"
STATEFILE="/var/lib/ganglia/metrics/net.stats"
date=`date +%s`
procfile="/proc/net/dev"
ERROR_CREATE="/tmp/network_gmetric_create_statefile_failed"
ERROR_IOSTAT="/tmp/network_gmetric_no_procfile"
ERROR_DEVNAMES="/tmp/network_gmetric_bad_devname"
ERROR_DEVNAMES2="/tmp/network_gmetric_bad_devname_didnt_fix"
ERROR_GMETRIC="/tmp/network_gmetric_no_gmetric"
ERROR_TIMEDIFF="/tmp/network_gmetric_timediff"
ERROR_NOTROOT="/tmp/network_gmetric_notroot"
if [ $UID -ne 0 ]
then
if [ -e $ERROR_NOTROOT ] ; then exit 1; fi
echo "Error: this script must be run as root."
touch $ERROR_NOTROOT
exit 1
fi
rm -f $ERROR_NOTROOT
if [ "x$1" == "x-h" ]
then
echo "Usage: network_gmetric.sh [--clean]"
echo " --clean delete all tmp files"
exit 0
fi
if [ "x$1" == "x--clean" ]
then
rm -f $ERROR_CREATE $ERROR_IOSTAT $ERROR_DEVNAME $ERROR_DEVNAME2 $ERROR_GMETRIC $ERROR_TIMEDIFF $ERROR_NOTROOT $STATEFILE
retval=$?
if [ $retval -ne 0 ]
then
echo "failed to clean up."
exit 1
else
echo "All cleaned up."
exit 0
fi
fi
# save and turn off /STDERR for th estatefile tests
exec 3>&2
exec 2>/dev/null
# if the GMETRIC program isn't installed, compain
if [ ! -e $GMETRIC ]
then
if [ -e $ERROR_GMETRIC ] ; then exit 1; fi
echo ""
echo "Error: GMETRIC doesn't seem to be installed."
echo "$GMETRIC doesn't exist."
echo ""
touch $ERROR_GMETRIC
exit 1
fi
# if the /proc/net/dev file doesn't exist (eh?!) complain
if [ ! -e $procfile ]
then
if [ -e $ERROR_IOSTAT ]
then
exit 1
fi
echo ""
echo "Error: $procfile doesn't seem to exist."
echo ""
touch $ERROR_IOSTAT
exit 1
fi
# if the statefile doesn't exist, we either havn't
# run yet or there's something bigger wrong.
if [ ! -e $STATEFILE ]
then
if [ ! -d `dirname $STATEFILE` ]
then
mkdir -p `dirname $STATEFILE`
fi
echo "$date" > $STATEFILE
cat $procfile | sed -e "s/:/ /" | grep "eth" >> $STATEFILE
if [ ! -e $STATEFILE ]
then
# if it didn't exist and we couldn't create
# it, we should just scream bloody murder and die.
# only scream once though...
if [ -e $ERROR_CREATE ]
then
exit 1
fi
echo ""
echo "ERROR: couldn't create $STATEFILE"
echo ""
touch $ERROR_CREATE
exit 1
fi
echo "Created statefile. Exitting."
exit 0
fi
# restore stderr
exec 2>&3
exec 3>&-
# this script uses gets its stats directly from /proc
stats=(`cat $procfile | sed -e "s/:/ /" | grep "eth"`)
old_stats=(`cat $STATEFILE`)
old_date=${old_stats[0]}
read=0
write=0
old_read=0
old_write=0
read_sum=0
write_sum=0
### function get_rw sets the variables $read and $write
### to the total number of read blocks and write blocks
### for a device. Which device is specified as an argument
### to the function.
### The function returns 1 if an invalid device number
### was specified.
function get_rw() {
base=$1
let "base *= 17"
if [ "k${stats[$base]}" == "k" ]
then
# we're done looping
return 1;
else
devname=${stats[$base]}
read=${stats[$(($base + 1))]}
write=${stats[$(($base + 9))]}
return 0
fi
}
function get_old_rw() {
base=$1
let "base *= 17"
let "base += 1"
if [ "k${old_stats[$base]}" == "k" ]
then
# we're done looping
return 1;
else
old_devname=${old_stats[$base]}
old_read=${old_stats[$(($base + 1))]}
old_write=${old_stats[$(($base + 9))]}
return 0
fi
}
time_diff=$(($date - $old_date))
devnum=0
get_rw $devnum
get_old_rw $devnum
res=$?
while [ $res -eq 0 ]
do
# if devname and old_devname aren't the same,
# this whole function is invalid.
if [ $devname != $old_devname ]
then
if [ -e $ERROR_DEVNAMES ]
then
if [ -e $ERROR_DEVNAMES2 ] ; then exit 1; fi
echo "Sorry, my attempt at fixing the problem failed."
echo "It's now up to you, dear human."
touch $ERROR_DEVNAMES2
exit 1
fi
echo "something is broken."
echo "devnames are not the same."
echo "devname=$devname old_devname=$old_devname"
echo "I'm backing up the current statefile ($STATEFILE) "
echo "and will recreate it next time to see if that fixes this."
mydate=`date +%Y%m%d%H%M%S`
mv -fv $STATEFILE{,.${mydate}}
touch $ERROR_DEVNAMES
exit 1
fi
rm -f $ERROR_DEVNAMES $ERROR_DEVNAME2
#devname, read, write, old_devname, old_read, old_write
# are all set. calculate stat/sec and report.
if [ $read -lt $old_read ]
then
# counter wrapped - add 2^32
let "read += 4294967296"
fi
if [ $write -lt $old_write ]
then
# counter wrapped - add 2^32
let "write += 4294967295"
fi
read_diff=$(($read - $old_read))
write_diff=$(($write - $old_write))
if [ $time_diff -eq 0 ]
then
if [ -e $ERROR_TIMEDIFF ] ; then exit 1 ; fi
echo "something is broken."
echo "time_diff is 0."
touch $ERROR_TIMEDIFF
exit 1
fi
rm -f $ERROR_TIMEDIFF
rps=`echo "scale=3;$read_diff / $time_diff" | bc`
wps=`echo "scale=3;$write_diff / $time_diff" | bc`
read_sum=`echo "scale=3;$read_sum + $rps" | bc`
write_sum=`echo "scale=3;$write_sum + $wps" | bc`
# log current values
# echo `date +%Y.%m.%d.%H:%M:%S` "network_gmetric values: ${devname}: old_read: $old_read old_write: $old_write read: $read write: $write RPS: $rps WPS: $wps" >> /var/log/gmetric.log
# report what we have calculated
# only send in metric if it's greater than 0
if [ `expr $rps \> 0` -eq 1 ];
then
$GMETRIC $GMETRIC_ARGS --name="${devname}_rx" --value="$rps" --type="float" --units="bytes/sec"
fi
if [ `expr $wps \> 0` -eq 1 ];
then
$GMETRIC $GMETRIC_ARGS --name="${devname}_tx" --value="$wps" --type="float" --units="bytes/sec"
fi
# echo "$devname $rps $wps $read_sum $write_sum " >> /tmp/foo.txt
devnum=$((devnum + 1))
get_rw $devnum
get_old_rw $devnum
res=$?
done
# log current values
#echo `date +%Y.%m.%d.%H:%M:%S` "network_gmetric values: sum: RPS: $read_sum WPS: $write_sum" >> /var/log/gmetric.log
# only send in metric if it's greater than 0
if [ `expr $read_sum \> 0` -eq 1 ];
then
$GMETRIC $GMETRIC_ARGS --name="network_rx" --value="$read_sum" --type="float" --units="bytes/sec"
fi
if [ `expr $write_sum \> 0` -eq 1 ];
then
$GMETRIC $GMETRIC_ARGS --name="network_tx" --value="$write_sum" --type="float" --units="bytes/sec"
fi
echo "$date" > $STATEFILE
cat $procfile | sed -e "s/:/ /" | grep "eth" >> $STATEFILE
rm -f $ERROR_CREATE $ERROR_IOSTAT $ERROR_DEVNAME2 $ERROR_DEVNAME $ERROR_GMETRIC $ERROR_TIMEDIFF $ERROR_NOTROOT
| true
|
975e134bde8085d0c5ff2a21e8c965da715bf02f
|
Shell
|
chinthirla/Ansible
|
/Batch47/shell-scripting/shell-scripting-master/01-comment.sh
|
UTF-8
| 322
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
# Any Line starts with # character is treated as comment by any interpreter along with BASH shell.
## Single hash or any hashes does not really matter, If it starts with single hash that is a comment
## How to do multi line commenting.
<<COMMENT
ls
pwd
uname
cat /etc/*release
COMMENT
## One moore line ##
| true
|
d1a3efb0b3a9e91b3b354dce0b5fe719b0cb38fd
|
Shell
|
Dalembert/dotfiles
|
/setup.zsh
|
UTF-8
| 1,082
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/zsh
# Setup dotfiles (as symlink) in user's home directory.
# Create plugin directory for vim
function skipping_msg {
echo "$1 already exists, skipping"
}
# glob with `*` does expand to all files in directory
# wrap in `(` and `)` to convert to array
FILES=(*[^setup.zsh\|README.md])
# array[@] return all items as seperate word,
# array[*] would return all items as one word
# add double quotes to avoid further parsing of filenames
for FILE in "${FILES[@]}"
do
if [[ -a "$HOME/.$FILE" ]]
then
skipping_msg $FILE
else
print "$FILE: Create symbolic link from $HOME/.$FILE to $(pwd)/$FILE"
ln -s "$(pwd)/$FILE" "$HOME/.$FILE"
fi
done
# create vim pack directory
VIM_PACK="$HOME/.vim/pack/$USER/start"
if [[ -a $VIM_START ]]
then
skipping_msg $VIM_PACK
else
git clone https://github.com/cormacrelf/vim-colors-github "$VIM_PACK/vim-colors-github"
git clone https://github.com/mattn/emmet-vim "$VIM_PACK/vim-emmet"
fi
# set git config
git config --global user.email "dalembert@pm.me"
git config --global user.name "Dalembert"
| true
|
2e89df5eb9d2b5c102316f510f865c164cdb4f2f
|
Shell
|
zemian/learn-mysql
|
/bin/restoredb.sh
|
UTF-8
| 408
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/sh
# Author: Zemian Deng 2020-11-14
# Usage: restoredb.sh <sql_filename_suffix>
SCRIPT_DIR=$(dirname $0)
source $SCRIPT_DIR/.env
DB_SCRIPT=$SCRIPT_DIR/$DB_NAME.sql
if [[ $# -eq 1 ]]; then
DB_SCRIPT=$SCRIPT_DIR/$DB_NAME-$1.sql
fi
if [[ ! -e $DB_SCRIPT ]]; then
echo "File does not exist: $DB_SCRIPT"
exit 0
fi
echo "Restoring $DB_NAME with $DB_SCRIPT"
mysql -f $DB_OPTS_USER $DB_NAME < $DB_SCRIPT
| true
|
cf08d2ce74441e1499b9a8630483b3baff42d29f
|
Shell
|
lulf/qpid-dispatch
|
/bin/release.sh
|
UTF-8
| 2,402
| 3.984375
| 4
|
[
"BSD-3-Clause",
"MIT",
"CC0-1.0",
"OFL-1.1",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
# Script to pull together an Apache Release
#
ME=$(basename $0)
usage() {
cat <<-EOF
USAGE: ${ME} [options] BRANCH VERSION
Creates an Apache release tarball.
Mandatory arguments:
BRANCH The git branch/tag for the build
VERSION The release version.
Optional arguments:
-h This help screen
EOF
}
while getopts "h" opt; do
case $opt in
h)
usage
exit 0
;;
\?)
echo "Invalid option: -$OPTARG" >&2
usage
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
usage
exit 1
;;
esac
done
BRANCH=${1-}
VERSION=${2-}
if [[ -z "$BRANCH" ]] || [[ -z "$VERSION" ]]; then
printf "Missing one or more required argument.\n\n" >&2
usage
exit 1
fi
URL=https://git-wip-us.apache.org/repos/asf/qpid-dispatch.git
WORKDIR=$(mktemp -d)
BASENAME=qpid-dispatch-${VERSION}
GITDIR=$WORKDIR/$BASENAME
FILENAME=$PWD/${BASENAME}.tar.gz
if [ -f $FILENAME ]; then rm -f $FILENAME; fi
echo "Checking out to ${WORKDIR}..."
cd $WORKDIR
git clone $URL $BASENAME >/dev/null || exit 1
cd $GITDIR
git checkout $BRANCH >/dev/null || exit 1
BUILD_VERSION=$(cat $GITDIR/VERSION.txt) || exit 1
test "$VERSION" == "$BUILD_VERSION" || {
echo "Version mismatch: $VERSION != $BUILD_VERSION. Please update VERSION.txt in the source"
exit 1
}
echo "Building source tarball $FILENAME"
cd $WORKDIR
tar --exclude release.sh --exclude .git --exclude .gitignore -zcvf $FILENAME ${BASENAME} >/dev/null || exit 1
echo "Done!"
| true
|
fab61ac63aa3f414014595ec62edecef61a1501f
|
Shell
|
shureg/rng
|
/.scripts/make_hdr.sh
|
UTF-8
| 365
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
echo Updating the top header file "$1"
cd $2
find $3/ -regex ".*\.\(h\|hpp\|hxx\|H\|h++\|hh\)" ! -name "$1" -fprintf hpp.tmp "#include \"%p\"\n"
uc_name=$(echo $3 | tr [:lower:] [:upper:])
printf "#ifndef %s_TOP_INC\n" $uc_name > $1
printf "#define %s_TOP_INC\n" $uc_name >> $1
cat hpp.tmp >> $1
printf "#endif //%s_TOP_INC" $uc_name >> $1
rm hpp.tmp
| true
|
76ee6ca7a961f28377cdc0ea57e4692d8f723a21
|
Shell
|
palankai/config
|
/bash/unused.bash
|
UTF-8
| 750
| 3.53125
| 4
|
[] |
no_license
|
# Change terminal window and tab name
function tabname {
printf "\e]1;$1\a"
}
function winname {
printf "\e]2;$1\a"
}
# OSX: Quit an application from the command line
quit () {
for app in $*; do
osascript -e 'quit app "'$app'"'
done
}
# OSX: Pass 0 or 1 to hide or show hidden files in Finder
function showhidden() {
defaults write com.apple.Finder AppleShowAllFiles $1
killall Finder
}
# OSX: show postscript rendered man page in Preview
function pman () {
# just using builtins (but Preview pops up a conversion dialog)
# man -t $@ | open -f -a /Applications/Preview.app
# or convert using ps2pdf, requires "brew install ghostscript"
man -t $@ | ps2pdf - - | open -f -a /Applications/Preview.app
}
| true
|
644c683c28ddf66cb13a1d9578895e7a28e68822
|
Shell
|
bpopovic42/init
|
/clean/network/02
|
UTF-8
| 415
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "ETHERNET INTERFACE :"
ifconfig en0
broadcast_ip=$(ifconfig en0 | grep 'broadcast' | awk '{print $NF}')
echo
echo "BROADCAST'S ADDRESS :"
echo $broadcast_ip
subnet=$(echo $broadcast_ip | sed -E 's/.?255.?//g')
echo
echo "IPS IN THE SAME SUB-NETWORK :"
IPS=$(ping -c1 $broadcast_ip > /dev/null ; arp -a | grep -o "[0-9]\+\.[0-9]\+\.[0-9]\+\.[0-9]\+" | grep -o "$subnet\.[0-9]\+\.[0-9]\+")
echo $IPS
| true
|
14a18ec9ca0dead287cd222fb75bf1d2b07cce31
|
Shell
|
robrod14/test
|
/ef-cms/web-api/setup-irs-user.sh
|
UTF-8
| 2,235
| 3.515625
| 4
|
[
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
#!/bin/bash -e
# Usage
# creates the IRS user in the IRS user pool
# Requirements
# - curl must be installed on your machine
# - jq must be installed on your machine
# - aws cli must be installed on your machine
# - aws credentials must be setup on your machine
# Arguments
# - $1 - the environment [dev, stg, prod, exp1, exp1, etc]
[ -z "$1" ] && echo "The ENV to deploy to must be provided as the \$1 argument. An example value of this includes [dev, stg, prod... ]" && exit 1
[ -z "${USTC_ADMIN_PASS}" ] && echo "You must have USTC_ADMIN_PASS set in your environment" && exit 1
[ -z "${AWS_ACCESS_KEY_ID}" ] && echo "You must have AWS_ACCESS_KEY_ID set in your environment" && exit 1
[ -z "${AWS_SECRET_ACCESS_KEY}" ] && echo "You must have AWS_SECRET_ACCESS_KEY set in your environment" && exit 1
ENV=$1
REGION="us-east-1"
restApiId=$(aws apigateway get-rest-apis --region="${REGION}" --query "items[?name=='gateway_api_${ENV}'].id" --output text)
USER_POOL_ID=$(aws cognito-idp list-user-pools --query "UserPools[?Name == 'efcms-${ENV}'].Id | [0]" --max-results 30 --region "${REGION}")
USER_POOL_ID="${USER_POOL_ID%\"}"
USER_POOL_ID="${USER_POOL_ID#\"}"
CLIENT_ID=$(aws cognito-idp list-user-pool-clients --user-pool-id "${USER_POOL_ID}" --query "UserPoolClients[?ClientName == 'client'].ClientId | [0]" --max-results 30 --region "${REGION}")
CLIENT_ID="${CLIENT_ID%\"}"
CLIENT_ID="${CLIENT_ID#\"}"
generate_post_data() {
email=$1
role=$2
section=$3
name=$4
cat <<EOF
{
"email": "$email",
"password": "Testing1234$",
"role": "$role",
"section": "$section",
"name": "$name"
}
EOF
}
response=$(aws cognito-idp admin-initiate-auth \
--user-pool-id "${USER_POOL_ID}" \
--client-id "${CLIENT_ID}" \
--region "${REGION}" \
--auth-flow ADMIN_NO_SRP_AUTH \
--auth-parameters USERNAME="ustcadmin@example.com"',PASSWORD'="${USTC_ADMIN_PASS}")
adminToken=$(echo "${response}" | jq -r ".AuthenticationResult.IdToken")
curl --header "Content-Type: application/json" \
--header "Authorization: Bearer ${adminToken}" \
--request POST \
--data "$(generate_post_data "service.agent.test@irs.gov" "irsSuperuser" "irsSuperuser" "IRS Superuser")" \
"https://${restApiId}.execute-api.us-east-1.amazonaws.com/${ENV}/users"
| true
|
5b04ea62721789080fd75e2e449996e1a0d6c52d
|
Shell
|
poids/Immunodietica
|
/Scripts/EpitopeUniProt_OLD.sh
|
UTF-8
| 1,733
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
chmod 775 $1
EPITOPES=`awk '{print $1}' $1`
echo 'Building table with peptide sequence & UniProt names'
for epitope in ${EPITOPES};
do
#Runs grep search for UniProt name
PROTEIN=`grep -m1 -B20 ${epitope} swissprot | grep '>' | tail -1 | cut -d '=' -f 2 | cut -d ";" -f 1`
VALID_GREP=`grep -c ${epitope} swissprot`
#Checks to see if grep function works, if not it will try and run blast
if [ ${VALID_GREP} == 0 ]
then
#Creates temporary fasta file for missing epitope
echo ">"${epitope}
echo ${epitope}
#Runs blast search on unmatched epitope
PROTEIN=`blastp -query ./temp_fasta.fa -db swissprot -outfmt 5 -max_target_seqs 5 | grep 'Full=' | awk -F 'Full=' '{ print $2 }' | awk -F '</Hit_def>' '{ print $1 }' | cut -d ';' -f 1 | head -1`
fi > temp_fasta.fa
#Removes temporary fasta file
rm ./temp_fasta.fa
#If no protein is found, add epitope to error_log file
if [ -z "$PROTEIN" ]
then
echo ${epitope}
fi >> unmatched_epitopes.txt
paste <(printf %s "${epitope}") <(printf %s "$PROTEIN")
done > uniprot_names.tsv
echo 'Building dataframe with corresponding Gene/Protein Names'
#Runs python script to build dataframe containing ACC-ID and corresponding Gene/Protein Name
python3 PROTNAME_2_GENE.py
#Summary Statistics
EPITOPE_NO=`awk '{print $2}' uniprot_names.tsv | wc -l`
UNIPROT=`awk '{print $2}' uniprot_names.tsv | grep ^[A-Z0-9] | wc -l`
UNMATCHED=`wc -l unmatched_epitopes.txt | awk {'print $1'}`
GENE_NAME=`wc -l acc_gene.csv | awk {'print $1'}`
echo "Number of epitopes in original list: ${EPITOPE_NO}"
echo "Matching UniProt Protein Names found in query: ${UNIPROT}"
echo "Number of Epitopes Unmatched: ${UNMATCHED}"
echo "Number of matching Protein/Gene Names ${GENE_NAME}"
| true
|
965f75ac629d19d3c4e8b41a0fe1b4771c4da102
|
Shell
|
xuansinhmedia/Service_Scripts
|
/alert_log_by_modify_time.sh
|
UTF-8
| 2,340
| 2.984375
| 3
|
[] |
no_license
|
#!bin/bash
#@author: xuansinhmedia
# Description about this script:
# 1. Check time modify of log file.
# 2. Compare between time modify and current time
# 3. If time modify not change after 3 minutes, system will send alert.
export ORACLE_HOME=/usr/lib/oracle/11.2/client64/
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/usr/lib64
PATH=$PATH:$HOME/bin:/sbin:$ORACLE_HOME/bin
export PATH=/usr/lib/oracle/11.2/client64/bin:$PATH
# Check time modify of log file.
file_log=/u01/vas_pay4me/sccpgw/nohup.out
time_log=`stat $file_log | grep Modify | awk '{print $2 "-" $3}' | awk -F "-" '{print $1 "/" $2 "/" $3 " " $4}' | cut -d'.' -f1`
# Convert time from log file to unix timestamp
log_second=`date -d "$time_log" +%s`
# extract time of sysdate to unix timestamp
time_sys=`date +%s`
diff=$((time_sys - log_second))
# Check subtract of system time and the last time of log file
# if it is greater than 5 minitues, run auto restart command.
if [ $diff -ge 180 ]
then
sqlplus 'vas_pay4me/ADweredsd2ds@(DESCRIPTION=(LOAD_BALANCE=yes)(ADDRESS=(PROTOCOL=TCP)(HOST=10.229.42.173)(PORT=1521))(ADDRESS=(PROTOCOL=TCP)(HOST=10.229.42.174)(PORT=1521))(CONNECT_DATA=(FAILOVER_MODE=(TYPE=select)(METHOD=basic)(RETRIES=180)(DELAY=5))(SERVER=shared)(SERVICE_NAME=marketdbXDB)))' <<sql
INSERT INTO "MSG_ALERTER" (DOMAIN, THRESHOLD, ISSUE, ALERTMSG,INSDATE,ALERT_LEVEL, CONTACT, GROUPNAME) VALUES ('MVT_P4M01', 'Alert', 'Alert', 'No log gctload in $((diff/60)) minitues',sysdate, 'serious', 'vnteleco', 'admin');
quit
sql
fi
# Check log TCPEV_DLG_REQ_DISCARD to send alert
# 1. Grep log and count filter by: TCPEV_DLG_REQ_DISCARD
# 2. if > 5 in 20 rows --> send mail alert
discard_log=`tail -n 20 $file_log | grep "TCPEV_DLG_REQ_DISCARD" | wc -l`
if [ $discard_log -ge 5 ]
then
echo $discard_log "seen in gctload"
sqlplus 'vas_pay4me/ADweredsd2ds@(DESCRIPTION=(LOAD_BALANCE=yes)(ADDRESS=(PROTOCOL=TCP)(HOST=10.229.42.173)(PORT=1521))(ADDRESS=(PROTOCOL=TCP)(HOST=10.229.42.174)(PORT=1521))(CONNECT_DATA=(FAILOVER_MODE=(TYPE=select)(METHOD=basic)(RETRIES=180)(DELAY=5))(SERVER=shared)(SERVICE_NAME=marketdb)))' <<sql
INSERT INTO "MSG_ALERTER" (DOMAIN, THRESHOLD, ISSUE, ALERTMSG,INSDATE,ALERT_LEVEL, CONTACT, GROUPNAME) VALUES ('MVT_P4M01', 'Alert', 'Alert', 'Log TCPEV_DLG_REQ_DISCARD seen $discard_log',sysdate, 'serious', 'vnteleco', 'admin');
quit
sql
fi
| true
|
8ddab25573e1853c37c552e8dc3b71931250b953
|
Shell
|
athulase/my-aibench-containers
|
/cnn-training/workload_scripts/h-start-container.sh
|
UTF-8
| 218
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$OVERRIDE_START_CONTAINER" ]
then
# Locate the Discovery Service
OWN_IP=`hostname -i`
python3.6 /discovery-service.py client
sleep 1
else
/bin/bash $OVERRIDE_START_CONTAINER
fi
| true
|
2fa697036ecaa00a0e1bf3ea6a67c696bbc3615f
|
Shell
|
paucabral/ignite
|
/sysadTools/scripts/dashboard/backup.sh
|
UTF-8
| 208
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $1 = "1" ]
then
echo "Run backup"
tar -czvf ~/backup.tgz ~
elif [ $1 = "2" ]
then
echo "Extract backup"
tar -xzvf ~/backup.tgz -C ~/backup
else
echo "Delete backup"
rm ~/backup.tgz
fi
| true
|
848d466a07be02be12e1ca4ff69be194dfe330bd
|
Shell
|
codyhammock/tapis-deployer
|
/templates/admin/tapischex
|
UTF-8
| 704
| 3.265625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
slackurl () {
kubectl get configmap global-config -o json | jq -r '.data.slack_url'
}
alertcmd () {
s="$(slackurl)"
p='payload={"channel": "#tapisv3_dev_stg_alerts", "username": "tapisdev", "text": "'$1'"}'
echo $p | curl -X POST --data-binary @- $s
}
checkvault () {
kubectl exec -it deploy/vault -- sh -c "wget -q -O - http://vault:8200/v1/sys/health" | jq -r ".sealed"
}
tenantdomain () {
kubectl get configmap global-config -o json | jq -r '.data.tenant_domain'
}
# vault seal check
t="$(tenantdomain)"
if v="$(checkvault)"
then
if [ $v -eq "true" ]
then
alertcmd "ERROR: vault sealed in domain $t"
fi
else
alertcmd "ERROR: vault down for domain $t"
fi
| true
|
7159ac94cf6c9de348e5990d6778e1a7c246f581
|
Shell
|
hadess/raspbian-ua-netinst
|
/build-musicbox.sh
|
UTF-8
| 745
| 3.015625
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"ISC"
] |
permissive
|
#!/bin/sh -e
if [ ! -f kernel-qemu ] ; then
# See http://xecdesign.com/qemu-emulating-raspberry-pi-the-easy-way/
wget http://xecdesign.com/downloads/linux-qemu/kernel-qemu
fi
./build.sh
# See https://github.com/woutervanwijk/Pi-MusicBox/issues/236 for default size
# dd if=/dev/zero of=Pi-MusicBox-installer.img count=1 bs=1002438656
truncate -s +1002438656 Pi-MusicBox-installer.img
mkfs.vfat -n "BOOTORIG" Pi-MusicBox-installer.img
IMAGE="`pwd`/Pi-MusicBox-installer.img"
pushd bootfs/
mcopy -i $IMAGE * ::/
popd
# FIXME: Use -nographic ?
qemu-system-arm -kernel kernel-qemu -initrd bootfs/installer-rpi1.cpio -cpu arm1176 -m 256 -M versatilepb -no-reboot -serial stdio -append "root=/dev/sda1 panic=0 ro" -hda Pi-MusicBox-installer.img
| true
|
5c9f9b8afefd19242d73ac3e1d09bdda43bbf244
|
Shell
|
nanajo/tools
|
/ec2start
|
UTF-8
| 1,495
| 4.40625
| 4
|
[] |
no_license
|
#!/bin/bash
#The ec2start command to start EC2 instance using tags.
#User must gives tag name and value. The tag value is allowed to put multiple parameters.
#Invariables
EXIT_NORMAL=0
EXIT_FAILURE=1
#Print usage. (-h option)
function usage () {
echo "\
Usage:
-t Tag Name (Required)
-v Tag Value (Required)
-r AWS Region (Option)
-p Profile name for awscli (Option)
-h Print this message
If you don't defined region and profile, this command use default region defined in ~/.aws/config \
"
exit $1
}
#Do exit with usage and EXIT_FAILURE when no options.
if [ $# -eq 0 ];
then
usage $EXIT_FAILURE
fi
#Parse options
#-t Tag Name
#-v Tag Value
#-r Region
#-p Profile
#-h print usage
while getopts t:v:r:p:h OPT
do
case $OPT in
t)
TAG=$OPTARG
;;
v)
VALUES=$OPTARG
;;
r)
REGION=$OPTARG
;;
p)
PROFILE=$OPTARG
;;
h)
usage $EXIT_NORMAL
;;
esac
done
#Check options are required
if [ -v $TAG ] || [ -v $VALUES ];
then
usage $EXIT_FAILURE
fi
#Check options are optional
if [ ! -v $REGION ];
then
OPTION="$OPTION --region $REGION"
fi
if [ ! -v $PROFILE ];
then
OPTION="$OPTION --profile $PROFILE"
fi
#Get Instance IDs
for TAG_VALUE in `echo $VALUES | sed -e "s/,/ /g"`
do
InstanceID=`aws ec2 describe-instances --output text $OPTION --query "Reservations[].Instances[?Tags[?Key == '$TAG'].Value|[0] == '$TAG_VALUE'][].InstanceId"`
IDList="$IDList $InstanceID"
done
#StartInstances
aws ec2 start-instances --instance-ids $IDList
exit $EXIT_NORMAL
| true
|
d827fe999e5c9e2560e5fa251458ec61a1adadf8
|
Shell
|
macwinnie/docker_devenv
|
/init/container/it-e/suitecrm.sh
|
UTF-8
| 1,569
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z ${SCRIPT_PATH+x} ]; then
echo "do not run this script directly!"
exit 1
fi
source $SCRIPT_PATH/config/require.sh
isContainerRunning "system/traefik"
isContainerRunning "system/database"
cnt_group="it-e"
cnt_name="suitecrm"
docker_name="$cnt_group.$cnt_name"
image="iteconomics/apache:php7.0"
local_domain='suite'
register_host $local_domain # will be appended by ".$LOCAL_WILDCARD"!
if checkRunning "$docker_name"; then
pullImage $image
docker run --detach \
--name $docker_name \
--restart unless-stopped \
--volume $DATA_PATH/$cnt_group/$cnt_name/code:/var/www/html/suiteCRM/:rw \
--volume $DATA_PATH/$cnt_group/$cnt_name/logs:/var/log/apache2:rw \
--volume $SCRIPT_PATH/container/$cnt_group/config/$cnt_name/php.ini:/usr/local/etc/php/conf.d/z_suite.ini:ro \
--env APACHE_PUBLIC_DIR="/var/www/html/suiteCRM" \
--env PHP_XDEBUG=1 \
--env XDEBUG_IDE_KEY="$(build_url $local_domain)" \
--label traefik.frontend.rule="Host:$(build_url $local_domain)" \
--label traefik.frontend.entryPoints=http \
--label traefik.docker.network=$NETWORK_TRAEFIK \
--label traefik.backend="it-e: suite CRM" \
--label traefik.port=80 \
$image
echo -e "\033[31mDo not forget to check out suite sourcecode – and probably a dev database:"
echo -e "git clone ssh://git@bitbucket.it-economics.de:7999/infra/suitecrm.git $DATA_PATH/$cnt_group/$cnt_name/code\033[0m"
controllNetwork "internal" "$docker_name"
controllNetwork "traefik" "$docker_name"
fi
| true
|
f9f84d062910851f5f5a4cefd73834bdbf316da0
|
Shell
|
diegoep/field-workshops-terraform
|
/instruqt-tracks/sentinel-for-terraform-v2/exercise-4/check-sentinel
|
UTF-8
| 3,985
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash -l
set -e
# Run 'touch /tmp/skip-check' to disable this check
if [ -f /tmp/skip-check ]; then
rm /tmp/skip-check
exit 0
fi
cd /root/sentinel
grep -qL "<resource_type>" restrict-gcp-instance-image-1.sentinel && fail-message "You have not replaced '<resource_type>' in restrict-gcp-instance-image-1.sentinel yet."
grep -q "google_compute_instance" restrict-gcp-instance-image-1.sentinel || fail-message "You have not replaced '<resource_type>' with 'google_compute_instance' in restrict-gcp-instance-image-1.sentinel yet."
grep -qL "<expression_1>" restrict-gcp-instance-image-1.sentinel && fail-message "You have not replaced '<expression_1>' in restrict-gcp-instance-image-1.sentinel yet."
fgrep -q "boot_disk.0.initialize_params.0.image" restrict-gcp-instance-image-1.sentinel || fail-message "You have not replaced '<expression_1>' with 'boot_disk.0.initialize_params.0.image' in restrict-gcp-instance-image-1.sentinel yet."
grep -qL "<expression_2>" restrict-gcp-instance-image-1.sentinel && fail-message "You have not replaced '<expression_2>' in restrict-gcp-instance-image-1.sentinel yet."
fgrep -q 'length(violatingGCPComputeInstances["messages"])' restrict-gcp-instance-image-1.sentinel || fail-message "You have not replaced '<expression_2>' with 'length(violatingGCPComputeInstances[\"messages\"]' in restrict-gcp-instance-image-1.sentinel yet."
grep -q "cp restrict-gcp-instance-image-1.sentinel restrict-gcp-instance-image.sentinel" /root/.bash_history || fail-message "You have not copied restrict-gcp-instance-image-1.sentinel to restrict-gcp-instance-image.sentinel yet."
grep -q "sentinel test -run=image.sentinel -verbose" /root/.bash_history || grep -q "sentinel test -run=image.sentinel" /root/.bash_history || fail-message "You haven't tested the restrict-gcp-instance-image-1.sentinel policy against the test cases yet. Please run 'sentinel test -run=image.sentinel -verbose'"
grep -qL "<expression_1>" restrict-gcp-instance-image-2.sentinel && fail-message "You have not replaced '<expression_1>' in restrict-gcp-instance-image-2.sentinel yet."
fgrep -q "instance.change.after.boot_disk" restrict-gcp-instance-image-2.sentinel || fail-message "You have not replaced '<expression_1>' with 'instance.change.after.boot_disk' in restrict-gcp-instance-image-2.sentinel yet."
grep -qL "<expression_2>" restrict-gcp-instance-image-2.sentinel && fail-message "You have not replaced '<expression_2>' in restrict-gcp-instance-image-2.sentinel yet."
fgrep -q "boot_disk[0].initialize_params" restrict-gcp-instance-image-2.sentinel || fail-message "You have not replaced '<expression_2>' with 'boot_disk[0].initialize_params' in restrict-gcp-instance-image-2.sentinel yet."
grep -qL "<expression_3>" restrict-gcp-instance-image-2.sentinel && fail-message "You have not replaced '<expression_3>' in restrict-gcp-instance-image-2.sentinel yet."
matches=$(fgrep "boot_disk[0].initialize_params[0].image" restrict-gcp-instance-image-2.sentinel | wc -l)
if [ $matches -ne 2 ]; then
fail-message "You have not replaced both occurences of '<expression_3>' with 'boot_disk[0].initialize_params[0].image' in restrict-gcp-instance-image-2.sentinel yet."
fi
grep -qL "<add_main_rule>" restrict-gcp-instance-image-2.sentinel && fail-message "You have not replaced '<add_main_rule>' in restrict-gcp-instance-image-2.sentinel yet."
grep -q "main = rule {" restrict-gcp-instance-image-2.sentinel || fail-message "You have not added 'main = rule {' in restrict-gcp-instance-image-2.sentinel yet."
grep -q "violations is 0" restrict-gcp-instance-image-2.sentinel || grep -q "violations == 0" restrict-gcp-instance-image-2.sentinel || fail-message "You have not added 'violations is 0' or 'violations == 0' to your main rule in restrict-gcp-instance-image-2.sentinel yet."
sentinel test -run=gcp
rc=$?
if [ $rc -ne 0 ]; then
fail-message "Your second version of the policy did not pass both test cases. Please revise and test it again."
fi
exit 0
| true
|
a40ecaf7f08e1f8e9a09dfda4b7596fd10e2c910
|
Shell
|
c0sco/centos-cis-benchmark
|
/test/4.1.18.sh
|
UTF-8
| 445
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# ** AUTO GENERATED **
# 4.1.18 - Ensure the audit configuration is immutable (Scored)
# 4.1.18 "Ensure the audit configuration is immutable (Scored)" Yes Server2 Workstation2
execute(){
cut -d\# -f1 /etc/audit/audit.rules | egrep -q "^-e[[:space:]]+2" || return 1
}
test_serial_number="4.1.18"
test_name="Ensure the audit configuration is immutable (Scored)"
scored="Yes"
server="Server2"
workstation="Workstation2"
important="Yes"
| true
|
725f958dad6660fbae54dfcff0283cda1028cc33
|
Shell
|
skorik-kirill/monitoring
|
/memtest.sh
|
UTF-8
| 326
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/sh
ramtotal=$(free | awk '/Mem/{print $2}')
ramusage=$(free | awk '/Mem/{print $3}')
let " rampercent=(($ramtotal * 5) / 10)"
echo "50% of memoty: $rampercent";
echo "Total RAM: $ramtotal";
echo "Usage RAM: $ramusage";
if [[ "$ramusage" -gt "$rampercent" ]]; then
echo "critical ram state"
else
echo "OK state"
fi
| true
|
af0ddfecee37d93e5c4a67a4ea35a3f048fe3078
|
Shell
|
thegedge/dotfiles
|
/bin/git-enhanced-blame
|
UTF-8
| 375
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/zsh
function main {
local pager="${GIT_PAGER:-$(git config --get core.pager || printf ${PAGER:-cat})}"
local column_size="$(git config --get enhancedblame.columnsize)"
# TODO figure out whether or not to use color from config
git blame -p "$@" \
| COLUMN_SIZE="${column_size}" awk -f "${HOME}/bin/parse-git-blame.awk" \
| eval "${pager}"
}
main "$@"
| true
|
583f7259c537d598fae7104e83d38a7aa7781b85
|
Shell
|
anasilviacs/sm-engine
|
/docker/quick-setup.sh
|
UTF-8
| 523
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
cp config.json ../conf/config.json
# docker-compose.yml mounts formula_dbs to /databases directory
mkdir -p formula_dbs
echo "Starting webserver"
docker-compose up -d
echo "Adding HMDB database"
wget https://s3-eu-west-1.amazonaws.com/sm-engine/hmdb.csv -O formula_dbs/hmdb.csv
./add_database.sh HMDB hmdb.csv
echo "Processing an example dataset"
# (the directory is specified in docker-compose.yml and defaults to ../tests/data)
./add_dataset.sh spheroid_test sci_test_search_job_spheroid_dataset
| true
|
e96f276189e461aed5539e42a76b9b5cb9e0e925
|
Shell
|
pixlmint/nacho-skeleton
|
/create-skeleton.sh
|
UTF-8
| 490
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Project Name"
read projectName
cp -r ./nacho-skeleton/root $projectName
cd $projectName
echo "Port"
read port
printf "version: \"3\"\n\nservices:\n nacho:\n build:\n context: .\n dockerfile: Dockerfile\n container_name: $projectName\n ports:\n - $port:80\n volumes:\n - ./:/var/www/html" > docker-compose.yaml
docker-compose up -d
docker exec -it ${projectName} bash <<<EOF
composer install
exit
EOF
| true
|
c00c97fc1f8b63319779a5f3969703e32534a1d2
|
Shell
|
rgl/terraform-azure-aks-example
|
/hello/destroy.sh
|
UTF-8
| 233
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
export KUBECONFIG="$(dirname "$0")/../shared/kube.conf"
dns_zone="$(terraform output -raw dns_zone)"
sed -E "s,(\.example\.com),.$dns_zone,g" "$(dirname "$0")/resources.yml" \
| kubectl delete -f -
| true
|
7447b3c82d9afc532d25223546913b13b669fef4
|
Shell
|
unnch/spell-sat
|
/spell/tags/2.0.9/scripts/linux/SPELL-PackageDriver
|
UTF-8
| 1,922
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/sh
###############################################################################
# Copyright (C) 2008, 2011 SES ENGINEERING, Luxembourg S.A.R.L.
#
# This file is part of SPELL.
#
# SPELL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SPELL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SPELL. If not, see <http://www.gnu.org/licenses/>.
#
# FILE: Script for creating SPELL driver packages
#
# DATE: 24/11/2008
#
###############################################################################
DRIVER_NAME=$1
DRIVER_DIR=$2
DRIVER_CFG=$3
[[ "$DRIVER_NAME" == "" ]] && echo "ERROR: must provide the driver name: `basename $0` <driver name> <driver dir> <driver cfg>" && exit 1
[[ "$DRIVER_DIR" == "" ]] && echo "ERROR: must provide the driver directory: `basename $0` <driver name> <driver dir> <driver cfg>" && exit 1
[[ "$DRIVER_CFG" == "" ]] && echo "ERROR: must provide the driver config file: `basename $0` <driver name> <driver dir> <driver cfg>" && exit 1
[[ ! -d $DRIVER_DIR ]] && echo "ERROR: cannot find driver directory ('$DRIVER_DIR')" && exit 1
[[ ! -f $DRIVER_CFG ]] && echo "ERROR: cannot find driver config file ('$DRIVER_CFG')" && exit 1
mkdir -p temp_drv/$DRIVER_NAME
mkdir -p temp_drv/config
cp -rvf $DRIVER_DIR/* temp_drv/$DRIVER_NAME/.
cp -vf $DRIVER_CFG temp_drv/config/.
DRIVER_PKG="driver_${DRIVER_NAME}.tar.gz"
[[ -f $DRIVER_PKG ]] && rm -f $DRIVER_PKG
cd temp_drv
tar czf ../$DRIVER_PKG *
cd -
rm -rf temp_drv
echo "done."
| true
|
d28692999b4c206868879d43bab684255e39b6ac
|
Shell
|
uk-gov-mirror/hmcts.tactical-jenkins-library
|
/resources/uk/gov/hmcts/artifactory/create-property.sh
|
UTF-8
| 656
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
usage() {
echo "Adds a property to an artifact"
echo "Usage: $0 path repoKey propertyKey propertyValue"
exit 1
}
if [ -z "$4" ]; then
usage
fi
set -eu
export path="${1}"
export repoKey="${2}"
export propertyKey="${3}"
export propertyValue="${4}"
export artifactoryServer="https://artifactory.reform.hmcts.net"
curl -sS --fail -u "${ARTIFACTORY_AUTH}" -H "Content-Type: application/json" \
-XPOST \
-d '{"multiValue":false,"property":{"name":"'${propertyKey}'"},"selectedValues":["'${propertyValue}'"]}' \
"${artifactoryServer}/artifactory/api/artifactproperties?path=${path}&repoKey=${repoKey}&recursive=false"
echo
| true
|
ebaf93c197ddaab62b9349c716353f38580416dc
|
Shell
|
ueisele/app-ntpartner
|
/environment/docker-compose/docker-compose.sh
|
UTF-8
| 244
| 3.203125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
pushd . > /dev/null
cd $(dirname ${BASH_SOURCE[0]})
YAML_DIR=$(pwd)
popd > /dev/null
function docker_compose_in_env() {
(cd ${YAML_DIR}; docker-compose $(for file in $(ls *.yml); do echo "-f ${file}"; done) $@)
}
| true
|
16687c9a303d8559115b2f57c73989567bb9e827
|
Shell
|
gazdik/dp-automatic-pronunciation-evaluation
|
/src/egs/misp/local/steps/eval_misp.sh
|
UTF-8
| 4,179
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Copyright 2018 Peter Gazdik
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
# ++ Configuration section ++
cmd=run.pl
nj=4
norm_vars=true
norm_means=true
splice_opts='--left-context=5 --right-context=5'
train_opts=
# -- Configuration section --
echo "$0 $@" # Print the command line for logging
. parse_options.sh || exit 1;
if [[ $# -lt 7 ]]; then
echo "Usage: $0 <lang-dir> <conf-dir> <data-test> <ali-test> \\"
echo " <feats-model-dir> <exp-dir> <eval-dir>"
echo " e.g.: $0 data/lang conf data/test_fbank \\"
echo " exp/mono_ali_ext_test exp/mono_pdf exp/misp_pdf \\"
echo " exp/misp_pdf/eval_test \\"
echo " Options:"
echo " TODO "
exit 1;
fi
lang_dir=$1
conf_dir=$2
data_dir=$3
ali_dir=$4
feats_model_dir=$5
exp_dir=$6
eval_dir=$7
log_dir=${eval_dir}/log
mkdir -p ${eval_dir}
cp ${conf_dir}/phn_sil_to_idx.txt ${exp_dir}
flags="${data_dir}/text_ext_flags"
phones_ali="ark:ali-to-phones --write-lengths ${ali_dir}/final.mdl "
phones_ali+="\"ark:gunzip -c ${ali_dir}/ali*.gz|\" ark,t:- |"
phones="ark:ali-to-phones ${exp_dir}/final.mdl "
phones+="\"ark:gunzip -c ${ali_dir}/ali*.gz |\" ark:- |"
# Create files with features since we can't pass it
# as stream because of the "tensorflow deadlock"
apply-cmvn --norm-vars=${norm_vars} --norm-means=${norm_means} \
--utt2spk=ark:${data_dir}/utt2spk scp:${data_dir}/cmvn.scp \
scp:${data_dir}/feats.scp ark:- | \
splice-feats ${splice_opts} ark:- ark:- | \
${feats_model_dir}/forward.py ${feats_model_dir}/final.h5 | \
local/utils/avg_feats.py "${phones_ali}" | \
local/utils/cmvn_transform.py ${exp_dir}/cmvn.mdl > \
${exp_dir}/feats_test.ark
# Check feature lengths if there isn't some mismatch
${cmd} ${log_dir}/check_lengths_tr.log local/utils/check_lengths.py \
"${phones}" ${exp_dir}/feats_test.ark \
--type_1 int_ark --type_2 mat_ark
# Evaluate performance of the mispronunciation classifier
${cmd} ${log_dir}/misp_eval.log local/pron/misp_eval.py ${train_opts} \
${exp_dir}/feats_test.ark "${phones}" ${flags} ${exp_dir} ${eval_dir}
# Create alignments in the text grid format
${cmd} ${log_dir}/align2ctm.log \
ali-to-phones --ctm-output ${exp_dir}/final.mdl \
"ark:gunzip -c ${ali_dir}/ali*.gz |" \
${eval_dir}/ali_phones.ctm
[[ ! -f ${data_dir}/utt2dur ]] && utils/data/get_utt2dur.sh ${data_dir}
${cmd} ${log_dir}/ctm2textgrid.log local/utils/ctm2textgrid.py ${eval_dir}/ali_phones.ctm \
${eval_dir}/score_textgrid ${lang_dir}/phones.txt \
${data_dir}/utt2dur
# Add scores into alignments
${cmd} ${log_dir}/score2textgrid.log local/utils/score2textgrid.py \
${eval_dir}/score.txt ${eval_dir}/score_textgrid
# Calculate equal error rate
local/utils/eq_err_rate.py ${flags} ${eval_dir}/score.ark \
--thr_start 0.0 --thr_stop 1.0 --trend dec > \
${eval_dir}/eer.txt
# Calculate equal error rate per phone
local/utils/eq_err_rate.py ${flags} ${eval_dir}/score.ark \
--per_phone ${data_dir}/text_ext_canonic.int ${lang_dir}/phones.txt \
--thr_start 0.0 --thr_stop 1.0 --trend dec > \
${eval_dir}/eer_per_phone.txt
# Plot ROC curve
local/utils/plot_roc.py ${flags} ${eval_dir}/score.ark dec \
--thr_start 0.0 --thr_stop 1.0 --out_file ${eval_dir}/roc
# Plot ROC curves per phone
local/utils/plot_roc.py ${flags} ${eval_dir}/score.ark dec \
--per_phone ${data_dir}/text_ext_canonic.int ${lang_dir}/phones.txt \
--thr_start 0.0 --thr_stop 1.0 --out_file ${eval_dir}/roc_per_phone
# Delete features
rm ${exp_dir}/feats_test.ark
# Print results
cat ${eval_dir}/eer*.txt
| true
|
cde6e50b93fc30ea94d6f4f5331f9f4722f3a3f7
|
Shell
|
Amhed-KAUST/sgRNA-DB
|
/Version1.sh
|
UTF-8
| 11,201
| 2.671875
| 3
|
[] |
no_license
|
##################All from Scratch
###After multiple intents I realized several errors in gff3:
#Exon and mRNA in WormBase contain the 5'UTR
##Resources: Extracted from
#Caenorhabditis_elegans.WBcel235.95.gff3.gz: Ensemble95 Database
#ce11-appris_data.principal.txt: APPRIS database based on Ensemble 88; grep "PRINCIPAL:1" ce11-appris_data.principal.txt | awk -F"\t" '{print $2}' | sort | uniq | sort > tmp; grep "PRINCIPAL:1" ce11-appris_data.principal.txt | awk -F"\t" '{print $2}' | sort | uniq -d > dupplicated-appris.txt ; comm tmp dupplicated-appris.txt -2 -3 > appris-to-get.txt
#Sorted and combined TSS: awk -F"\t" '{if(array[$1"-"$2"-"$3] != 0){print array[$1"-"$2"-"$3]"\t;"$5 }else{array[$1"-"$2"-"$3]=$0}}' ATAC-TSSs-FWD.BED ATAC-TSSs-REV.BED | awk -F"\t" '{print $1"\t"$2"\t"$3"\t"$4"\t"$5$7"\t"$6}' - > ATAC-CombTSSs.BED
#EnsePrim-Lists:
#awk -F"\t" '{array[$2]=$0}END{for(i in array){print i"\t"array[i]}}' ce11-appris_data.principal.txt | awk -F"\t" '{if(array[$1] != 0){print array[$1]}else{array[$1]=$0}}' - appris-to-get.txt| awk -F"\t" '{print $3";"$4}' > appris-to-getG.txt;
#zcat ../Caenorhabditis_elegans.WBcel235.95.gff3.gz | awk -F"\t" '{if($2=="WormBase"){if($3=="mRNA"){print $0}}}' - | awk -F"\t|;|=|:" '{print $1"\t"$4"\t"$5"\t"$14";"$16"\t.\t"$7}' | awk -F"\t" '{print $4"\t"$0}' | awk -F"\t" '{if(array[$1] != 0){print array[$1]}else{array[$1]=$0}}' - appris-to-getG.txt > Ensembl95-Primary.txt;
#awk -F"\t|;" '{print $2}' Ensembl95-Primary.txt > EnsePrim-List.txt;
#zcat Caenorhabditis_elegans.WBcel235.95.gff3.gz | awk -F"\t" '{if($2=="WormBase"){if($3=="mRNA"){print $0}}}' - | awk -F"\t|;|=|:" '{print $1"\t"$4"\t"$5"\t"$14";"$16"\t.\t"$7"\t"$14}' | awk -F"\t" '{print $7"\t"$0}' | awk -F'\t' '{if(array[$1] != 0){array[$1]=array[$1]"lalolanda."$0}else{array[$1]=$0}}END{for(keys in array){print array[keys]}}' - | awk -F"\t" '{if(array[$1] != 0){print array[$1]}else{array[$1]=$0}}' - non-PrimGenes.txt | awk -F"lalolanda." '{for(i=1;i<=NF;i++){print $i}}' - | awk -F"\t" '{$1=$8="";print $5"\t"$0}' - > Ensembl95-NON-Primary.txt
#WormBaseANNOT: zcat c_elegans.PRJNA13758.WS268.annotations.gff3.gz | awk -F"\t" '{if($2=="WormBase"){if($3=="mRNA"){print $0}}}' - | awk -F"\t|;|=|:" '{print $14";"$20}' | sort | uniq | awk -F";" '{print $1"\t"$2}' > WormBaseID-locus.txt
#WS268GenesOperon.BED Done previously by GetOperon LIst
#Obtain CDS GeneBodies
zcat Caenorhabditis_elegans.WBcel235.95.gff3.gz | awk -F"\t" '{if($2=="WormBase"){if($3=="CDS"){print $0}}}' - | awk -F"\t|;|=|:" '{print $1"\t"$4"\t"$5"\t"$11"\t.\t"$7}' | awk -F"\t" '{if(array[$4] != 0){if(start[$4] > $2){start[$4]=$2};if(end[$4] < $3){end[$4]=$3};array[$4]=$1"\t"start[$4]"\t"end[$4]"\t"$4"\t"$5"\t"$6}else{start[$4]=$2;end[$4]=$3;array[$4]=$0}}END{for(key in array){print key"\t"array[key]}}' - | sort > GeneBodies.txt
zcat Caenorhabditis_elegans.WBcel235.95.gff3.gz | awk -F"\t" '{if($2=="WormBase"){if($3=="CDS"){print $0}}}' - | awk -F"\t|;|=|:" '{print $1"\t"$4"\t"$5"\t"$11"\t.\t"$7}' | awk -F"\t" '{if(array[$4] != 0){if(start[$4] > $2){start[$4]=$2};if(end[$4] < $3){end[$4]=$3};array[$4]=$1"\t"start[$4]"\t"end[$4]"\t"$4"\t"$5"\t"$6}else{start[$4]=$2;end[$4]=$3;array[$4]=$0}}END{for(key in array){print array[key]}}' - | sort > GeneBodies.bed
##Gff3 from Wormbase is 1 based convert to 0 based (bedformat)
awk -F"\t" '{OFS="\t";$3=($3-1);$4=$4;print $0}' GeneBodies.txt | sort -k1,1 -k2,2n > GeneBodies.0.txt
awk -F"\t" '{OFS="\t";$2=($2-1);$3=$3;print $0}' GeneBodies.bed | sort -k1,1 -k2,2n > GeneBodies.0.bed
##Obtain closest and higher ATAC-opening for all Isoforms
bedtools closest -io -id -D "a" -k 2 -a GeneBodies.0.bed -b ATAC-CombTSSs.BED | awk '{if($NF > -600){print $0}}' | awk -F"\t" '{if(array[$4] !=0){ split($11,coso,";"); if($6=="+"){tmp=coso[1]}else{tmp=coso[2]};if(val[$4]<tmp){array[$4]=$0}}else{array[$4]=$0; split($11,coso,";"); if($6=="+"){val[$4]=coso[1]}else{val[$4]=coso[2]}}}END{for(key in array){print array[key]}}' | sort -k1,1 -k2,2n > Isoform-with-closestandhigher-ATAC.txt
#Remove those not found
awk -F"\t" '{if($8 == "-1"){}else{print $0}}' Isoform-with-closestandhigher-ATAC.txt > GoodIsoform-with-closestandhigher-ATAC.txt
#Obtain first set of those with primary form and ATAC
awk -F"\t" '{print $4"\t"$0}' GoodIsoform-with-closestandhigher-ATAC.txt | awk -F"\t" '{if(array[$1] != 0){print array[$1]}else{array[$1]=$0}}' - EnsePrim-List.txt > Primary-forms-Promoter.txt
#PutWB AS List
awk -F"\t|;" '{print $2"\t"$1}' Ensembl95-Primary.txt | awk -F"\t" '{if(array[$1] != 0){print $2"\t"array[$1]}else{array[$1]=$0}}' Primary-forms-Promoter.txt - | awk '{print $1}' - | sort > WBListwithGoodProm.txt
awk -F"\t|;" '{print $2"\t"$1}' Ensembl95-Primary.txt | awk -F"\t" '{if(array[$1] != 0){print $2"\t"array[$1]}else{array[$1]=$0}}' Primary-forms-Promoter.txt - > WBGoodIsoform-with-closestandhigher-ATAC.txt
#Make subtemporary directory
mkdir almost
cp WBGoodIsoform-with-closestandhigher-ATAC.txt almost/Primary+ATAC.txt
cp GoodIsoform-with-closestandhigher-ATAC.txt Isoform-with-closestandhigher-ATAC.txt
awk '{print $4"\t"$0}' GoodIsoform-with-closestandhigher-ATAC.txt > testes
awk -F"\t|;" '{print $2"\t"$1}' Ensembl95-NON-Primary.txt | awk -F"\t" '{if(array[$1] != 0){print array[$1]"\t"$0}else{array[$1]=$2}}' - testes >WBGenes-non-prim-but-ATAC.txt
awk '{if(array[$1] != 0){if(val[$1] < $15){array[$1]=$0; val[$1]=$15}}else{array[$1]=$0; val[$1]=$15}}END{for(keys in array){print array[keys]}}' WBGenes-non-prim-but-ATAC.txt > WBGenes-nonP-ATAC-ShortestD.txt
cp WBGenes-nonP-ATAC-ShortestD.txt almost/NonPrimary+ATAC.txt
#Obtain semi Lists
cd almost/
awk '{print $2}' Primary+ATAC.txt | sort > A
sort ../EnsePrim-List.txt > B
comm -2 -3 B A > PrimaryNoAtac-list.txt
cd ..
#Obtain Prim No ATAC
awk -F"\t" '{print $4"\t"$0}' GeneBodies.0.bed | awk -F"\t" '{if(array[$1] != 0){print array[$1]}else{array[$1]=$0}}' - almost/PrimaryNoAtac-list.txt | awk -F"\t" '{if($7 =="-"){cen=$4+250}else{cen=$3-250}; print $0"\t"$2"\t"(cen-75)"\t"(cen+75)"\tPutative_Centered250Upstream-150bpRegion\t.\t"$7"\t-175"}' - > PartialPrimNOATAC.txt
awk -F"\t|;" '{print $2"\t"$1}' Ensembl95-Primary.txt | awk -F"\t" '{if(array[$1] != 0){print array[$1]"\t"$0}else{array[$1]=$2}}' - PartialPrimNOATAC.txt > almost/Primary-ATAC.txt
#Obtain semi Lists
cd almost/
awk '{print $2}' NonPrimary+ATAC.txt | sort > A
awk -F"\t|;" '{print $2}' ../Ensembl95-NON-Primary.txt | sort > B
comm -2 -3 B A > NONPrimaryNoAtac-list.txt
cd ..
awk -F"\t" '{print $4"\t"$0}' GeneBodies.0.bed | awk -F"\t" '{if(array[$1] != 0){print array[$1]}else{array[$1]=$0}}' - almost/PrimaryNoAtac-list.txt | awk -F"\t" '{if($7 =="-"){cen=$4+150}else{cen=$3-150}; print $0"\t"$2"\t"(cen-75)"\t"(cen+75)"\tPutative_Centered150Upstream-150bpRegion\t.\t"$7"\t-75"}' - > PartialPrimNOATAC.txt
awk -F"\t|;" '{print $2"\t"$1}' Ensembl95-Primary.txt | awk -F"\t" '{if(array[$1] != 0){print array[$1]"\t"$0}else{array[$1]=$2}}' - PartialPrimNOATAC.txt > almost/Primary-ATAC.txt
awk -F"\t" '{print $4"\t"$0}' GeneBodies.0.bed | awk -F"\t" '{if(array[$1] != 0){print array[$1]}else{array[$1]=$0}}' - almost/NONPrimaryNoAtac-list.txt | awk -F"\t" '{if($7 =="-"){cen=$4+150}else{cen=$3-150}; print $0"\t"$2"\t"(cen-75)"\t"(cen+75)"\tPutative_Centered150Upstream-150bpRegion\t.\t"$7"\t-75"}' - > PartialNONPrimNOATAC.txt
awk -F"\t|;" '{print $2"\t"$1}' Ensembl95-NON-Primary.txt | awk -F"\t" '{if(array[$1] != 0){print array[$1]"\t"$0}else{array[$1]=$2}}' - PartialNONPrimNOATAC.txt | awk -F"\t" '{if(array[$1] != 0){tmp=($5-$4); if(tmp > val[$1]){array[$1]=$0; val[$1]=($5-$4)}}else{array[$1]=$0; val[$1]=($5-$4)}}END{for(keys in array){print array[keys]}}' > almost/NonPrimary-ATAC.txt
cd almost/
rm A
rm B
mkdir tmp
mv PrimaryNoAtac-list.txt tmp/
mv NONPrimaryNoAtac-list.txt tmp/
cat *.txt | awk '{print $1}' | sort | uniq -c | awk '{if($1>1){print $2}}' > Duplicated
awk '{print $1}' NonPrimary-ATAC.txt | sort > A
sort Duplicated > B
comm -2 -3 A B > C
awk -F"\t" '{if(array[$1] != 0){print array[$1]}else{array[$1]=$0}}' NonPrimary-ATAC.txt C > RealNonPrimary-ATAC.txt
mv NonPrimary-ATAC.txt tmp/OldNonPrimary-ATAC.txt
mv RealNonPrimary-ATAC.txt NonPrimary-ATAC.txt
rm A
rm B
rm C
rm Duplicated
for file in `ls *.txt`; do echo $file; awk -F"\t" '{print $3"\t"$4"\t"$5"\t"$6"\t"$7"\t"$8}' $file > ${file%.txt}.bed; done
for file in `ls *.txt`; do echo $file; awk -F"\t" '{print $9"\t"$10"\t"$11"\t"$12"\t"$13"\t"$14}' $file > ${file%.txt}.TSS.bed; done
cd ..
##First constitutive Exon (REMEMBER TO CHANGE TO 0 index!!!!!!!)
zcat Caenorhabditis_elegans.WBcel235.95.gff3.gz | awk -F"\t" '{if($2=="WormBase"){if($3=="exon"){print $0}}}' - | grep "constitutive=1" | awk -F"\t|:|;" '{print $10"\t"$1"\t"$4"\t"$5"\t"$6"\t"$7}' | awk '{if(array[$1] != 0){if($6=="-"){tmp=$4; if(tmp>val[$1]){array[$1]=$0; val[$1]=$4}}else{tmp=$3; if(tmp<val[$1]){array[$1]=$0; val[$1]=$3}}}else{array[$1]=$0; if($6=="-"){val[$1]=$4}else{val[$1]=$3}}}END{for(keys in array){print array[keys]}}' > FirstConstitutiveExon-Ensemble95.txt
##Get CDS
zcat Caenorhabditis_elegans.WBcel235.95.gff3.gz | awk -F"\t" '{if($2=="WormBase"){if($3=="CDS"){print $0}}}' - | awk -F"\t|:|;" '{print $1"\t"$4"\t"$5"\t"$10"-CDS\t"$6"\t"$7}' | sort -k1,1 -k2,2n > CDS-all.bed
awk -F"\t" '{OFS="\t";print $2,$3,$4,$1,$5,$6}' FirstConstitutiveExon-Ensemble95.txt | sort -k1,1 -k2,2n > FirstConstitutiveExon-Ensemble95.bed
bedtools intersect -a FirstConstitutiveExon-Ensemble95.bed -b CDS-all.bed | awk '{if(array[$4] != 0){array[$4]=$0}else{array[$4]=$0}}END{for(keys in array){print array[keys]}}' | sort -k1,1 -k2,2n > FirstCDS.bed
awk '{print $1}' GeneBodies.0.txt | sort | uniq > A
awk '{print $4}' FirstCDS.bed | sort | uniq > B
comm -2 -3 A B > List-without-Constitutive.txt
zcat Caenorhabditis_elegans.WBcel235.95.gff3.gz | awk -F"\t" '{if($2=="WormBase"){if($3=="CDS"){print $0}}}' - | awk -F"\t|;|=|:" '{print $1"\t"$4"\t"$5"\t"$11"\t.\t"$7}' | awk -F"\t" '{if(array[$4] != 0){if(start[$4] > $2){start[$4]=$2; end[$4]=$3};if(end[$4] < $3){end[$4]=$3; start[$4]=$2};array[$4]=$1"\t"start[$4]"\t"end[$4]"\t"$4"\t"$5"\t"$6}else{start[$4]=$2;end[$4]=$3;array[$4]=$0}}END{for(key in array){print key"\t"array[key]}}' - | awk '{if(array[$1] != 0){print array[$1]}else{array[$1]=$0}}' - List-without-Constitutive.txt | awk -F"\t" '{print $2"\t"$3"\t"$4"\t"$5"\t"$6"\t"$7}' | sort -k1,1 -k2,2n > FirstCDS-IsoformNonConstitutive.bed
#Convert-O indexed
cat FirstCDS.bed FirstCDS-IsoformNonConstitutive.bed | awk -F"\t" '{OFS="\t";$2=($2-1);$3=$3;print $0}' | sort -k1,1 -k2,2n > AlmostConstitutive.bed
cd almost/tmp/
##TOFinish-all-in-loop
for file in `ls ../*.txt`; do
echo $file;
awk -F"\t" '{print $2"\t"$0}' $file > tmp;
###This filter removes dead genes or pseudo genes
awk -F"\t" '{print $4"\t"$0}' ../../AlmostConstitutive.bed | awk -F"\t" '{if(array[$1] != 0){print $2"\t"$0"\t"array[$1]}else{array[$1]=$0}}' - tmp | awk -F"\t" '{if(array[$1] != 0){print array[$1]"\t"$0}else{array[$1]=$2}}' ../../WormBaseID-locus.txt - | awk -F"\t" 'OFS="\t"{print $2,$1,$3,$6,$7,$8,$2";"$1";"$9,$10,$11,$12,$13,$14,$15";WormbaseScore="$16";DistancetoATG="$18,$16,$17,$20,$21,$22,"Exon-"$23,$24,$25}' > ${file%.txt}+CDS.txt;
done
##Run R code
| true
|
38e0ee60e757a984ad039eabb94b25e1b44b32cd
|
Shell
|
Raph-hsyuan/OCS-2019
|
/python/onboot.sh
|
UTF-8
| 994
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/sh
export LD_LIBRARY_PATH=./
while true; do
mosquitto=`ps aux | grep "mosquitto -p 1884" | grep -v grep`
main=`ps aux | grep ./main.py | grep -v grep`
alexa=`ps aux | grep wakeword.py | grep -v grep`
if [ ! "$mosquitto" ]; then
mosquitto -p 1884 1>>./log/journalMos 2>&1 &
fi
if [ ! "$main" ]; then
python ./main.py 1>>./log/journalMain 2>&1 &
fi
if [ ! "$alexa" ]; then
sudo sudo python3 /home/pi/Desktop/OCS-2019-master/Alexa/launch.py --example /home/pi/Desktop/OCS-2019-master/Alexa/src/examples/wakeword/wakeword.py 1>>./log/journalMain 2>&1 &
fi
sleep 10
done
#sudo nano /etc/systemd/system/alois.service
# [Unit]
# Description=My service
# After=network.target
# [Service]
# ExecStart=/bin/bash /home/pi/Desktop/OCS-2019-master/python/onboot.sh
# WorkingDirectory=/home/pi/Desktop/OCS-2019-master/python/
# StandardOutput=inherit
# StandardError=inherit
# Restart=always
# User=pi
# [Install]
# WantedBy=multi-user.target
# sudo systemctl enable alois.service
| true
|
660eb37c5182c554126354698a09f1db0eb81fb2
|
Shell
|
rmaruthiyodan/resource_monitor
|
/nw_monitor.sh
|
UTF-8
| 1,173
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ]; then
echo -e "\n\tusage: $0 <Network-Interface>"
echo -e "\tExample: $0 eth0 \n"
exit
fi
IF=$1
while true
do
R1=`cat /sys/class/net/$IF/statistics/rx_bytes`
T1=`cat /sys/class/net/$IF/statistics/tx_bytes`
sleep 1
R2=`cat /sys/class/net/$IF/statistics/rx_bytes`
T2=`cat /sys/class/net/$IF/statistics/tx_bytes`
TBPS=`expr $T2 - $T1`
RBPS=`expr $R2 - $R1`
# convert to bits
TBPS=$(($TBPS * 8))
RBPS=$(($RBPS * 8))
if [ $TBPS -gt 1023 ] && [ $TBPS -lt 1048576 ]
then
Tx=`expr $TBPS / 1024`" K"
elif [ $TBPS -gt 1048575 ] && [ $TBPS -lt 1073741824 ]
then
Tx=`expr $TBPS / 1048576`" M"
elif [ $TBPS -gt 1073741823 ]
then
Tx=`expr $TBPS / 1073741824`" G"
fi
if [ $RBPS -gt 1023 ] && [ $RBPS -lt 1048576 ]
then
Rx=`expr $RBPS / 1024`" K"
elif [ $RBPS -gt 1048575 ] && [ $RBPS -lt 1073741824 ]
then
Rx=`expr $RBPS / 1048576`" M"
elif [ $RBPS -gt 1073741823 ]
then
Rx=`expr $RBPS / 1073741824`" G"
fi
echo -e "tx $IF: $Tx""b/s \t rx $1: $Rx""b/s"
done
| true
|
b3dd4d0459338d9a46d52255ffb923ddc2cb5e3b
|
Shell
|
richarms/tkp-lofar-build
|
/init_scripts/soft/init-soft.sh
|
UTF-8
| 1,017
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
#Setup the various environment paths:
stable_software_pathdirs=/opt/soft/pathdirs
export PATH=${stable_software_pathdirs}/bin${PATH:+:${PATH}}
export LD_LIBRARY_PATH=${stable_software_pathdirs}/lib${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
export PYTHONPATH=${stable_software_pathdirs}/python-packages${PYTHONPATH:+:${PYTHONPATH}}
#for s in PATH:bin LD_LIBRARY_PATH:lib \
# PYTHONPATH:python-packages;
#do
# path_var=${s%:*} #first half of the list item (before :), e.g. PATH
# group_dir=${s#*:} #second half of the list item (after :) e.g. bin
# group_path=$stable_software_pathdirs/$group_dir #e.g. /opt/soft/bin
# eval prepath="$"$path_var #Env. variable before alteration, e.g. /usr/bin
#If group path not in path already; add it:
# if [[ $prepath != *$group_path* ]]; then
# eval postpath=${group_path}${prepath:+:${prepath}}
# eval export $path_var=$postpath
# fi
# unset path_var group_dir group_path prepath postpath
#done
#unset s stable_software_pathdirs
| true
|
878d5ba0bacab2d7ce34c7edecff21899842456a
|
Shell
|
vokhmin/ethereum-node
|
/docker/geth-node/devs/start.sh
|
UTF-8
| 681
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/sh
set echo on
ETH_BASE="7f34a023fa2e16559ec7b6066491c17d73531736"
ETH_DEFAULT="42ffc349ca3b856da57428580baa61a9d8dc0eb3"
CONFIG=" --port 30303 --nodiscover --maxpeers 0 --ipcpath /tmp/geth.ipc --rpcapi \"db,personal,eth,net,web3\" --rpccorsdomain '*' --rpc --rpcaddr 0.0.0.0 --rpcvhosts=* --nat any --mine --miner.threads 1 --miner.etherbase $ETH_BASE --verbosity 4 "
UNLOCK=" --unlock $ETH_BASE --password /root/password "
echo "Geth OPTS='$OPTS'"
echo "Geth CONFIG='$CONFIG'"
echo "Geth UNLOCK='$UNLOCK'"
# geth $CONFIG 2> $ETH_DATA/geth.log
#nohup geth $CONFIG > $ETH_DATA/geth.log &
#echo "Geth has stared, see Logs at $ETH_DATA/geth.log"
geth $OPTS $CONFIG $UNLOCK
| true
|
c70cdc3d5258e8309e0bee67a088001044bce70d
|
Shell
|
K-INBRE/kinbre_b017
|
/b017_commands.sh
|
UTF-8
| 2,678
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
NTHREADS=8
PROJECTDIR=/panfs/pfs.local/scratch/sjmac/b961k922/kinbre/b017_macdonald
RAWDATADIR=${PROJECTDIR}/data/parental_lines/raw
QCDIR=${PROJECTDIR}/data/parental_lines/filtered
REPORTSDIR=${PROJECTDIR}/report_files
REFGENOME=${PROJECTDIR}/refs/dmel_main_chr_r6.03_masked.fa
ALNDIR=${PROJECTDIR}/outputs/aln/parental_lines
mkdir -p QCDIR REPORTSDIR ALNDIR
parental_lines=( Db3852 Dsam )
activate_conda_environment () {
ENVNAME=${1}_${2}
# expected directory for the conda environment
ENVDIR=/panfs/pfs.local/work/sjmac/software/conda_envs/${ENVNAME}
# check if the directory exists; if it doesn't, create the env
if [ ! -d "${ENVDIR}" ]; then
# create a conda environment for the application
# --yes = do not ask for confirmation during installation
conda create --name ${ENVNAME} --yes
# install the application in the newly created conda environment
# --yes = do not ask for confirmation during installation
conda install --name ${ENVNAME} ${2} --yes
fi
source activate ${ENVNAME}
}
#########################
# FASTP Quality Control #
#########################
cd ${PROJECTDIR}
activate_conda_environment b017 fastp
for parent in "${parental_lines[@]}"
do
fastp -i ${RAWDATADIR}/${parent}_R1_sanger.fq.gz -I ${RAWDATADIR}/${parent}_R2_sanger.fq.gz \
-o ${QCDIR}/${parent}_R1.filt.fq.gz -O ${QCDIR}/${parent}_R2.filt.fq.gz \
-h ${REPORTSDIR}/${parent}.fastp.report.html \
-j ${REPORTSDIR}/${parent}.fastp.report.json \
--dont_overwrite \
--cut_by_quality3 \
--cut_window_size 5 \
--cut_mean_quality 30 \
--correction \
--overrepresentation_analysis 2>&1 | tee ${REPORTSDIR}/${parent}.QC.fastp.log
done
source deactivate
##############################
# Index the reference genome #
##############################
activate_conda_environment b017 bwa
bwa index ${REFGENOME}
##########################
# Map the parental lines #
##########################
for parent in "${parental_lines[@]}"
do
bwa sampe -t ${NTHREADS} -r "$(echo "@RG\tID:${parent}\tSM:${parent}\tLB:lib1")" \
${REFGENOME} \
${QCDIR}/${parent}_R1.filt.fq.gz ${QCDIR}/${parent}_R2.filt.fq.gz > ${ALNDIR}/${parent}.aln.sam
done
source deactivate
activate_conda_environment b017 samtools
for parent in "${parental_lines[@]}"
do
samtools fixmate -O bam ${ALNDIR}/${parent}.aln.sam ${ALNDIR}/${parent}.aln.bam || exit 1
samtools sort -t ${NTHREADS} -O bam -o ${ALNDIR}/${parent}.sorted.bam -T tmp_ ${ALNDIR}/${parent}.aln.bam || exit 1
samtools index ${ALNDIR}/${parent}.sorted.bam
rm ${ALNDIR}/${parent}.aln.sam
done
source deactivate
| true
|
d97c32e4e3d8ddfac35f57d6b2043d63d0d6d40d
|
Shell
|
bozeugene/correlation
|
/image_correlation_workbench/ressources/bluepages/imagebuttons/cmpall.sh
|
UTF-8
| 285
| 2.953125
| 3
|
[] |
no_license
|
for i in high-res/in-step-*.png
do
for j in high-res/in-screen-*.png
do
#echo "compare -metric NCC $i $j diff.png"
ncc=`compare -metric NCC $i $j diff.png 2>&1 >/dev/null`
if [ $? -eq 1 ]
then
printf "%s\t%s\t%f\n" $i $j $ncc
fi
done
done
rm diff.png
| true
|
dab457ba5cd73e7bf8ac4fc6ea5ef29507c4f3fb
|
Shell
|
martinwguy/delia-derbyshire
|
/anal/run.sh
|
UTF-8
| 4,647
| 3.359375
| 3
|
[] |
no_license
|
#! /bin/sh
# run: Convert the spectrograms in James Percival's thesis back into audio,
# a driver program for run.c.
#
# Copyright (c) Martin Guy <martinwguy@gmail.com> 2016.
#
# https://dl.dropboxusercontent.com/u/41104723/71040190-MUSC40110-DeliaDerbyshire.pdf
# http://wikidelia.net/wiki/Delia_Derbyshire's_Creative_Process
# and extract the spectrograms with "pdfimages *.pdf image"
#
# Usage: sh run.sh [options] [filename.png]
# Options:
# --fill Set all FFT input values by interpolating from input points
# --floor N Noise floor; truncate all amplitudes below -N dB to zero.
# --fps N interpolate between pixel columns to give N columns per second
# --partials Tell "run" to dump the first 10 audio frames for debugging
set -e # Exit if anything fails unexpectedly
# Offset of spectrogram graphic in image file (= width of left panel)
groffset=91
# Set noise floor to -N decibels. Leave unset to use full range.
floor=
# Set frames per second to interpolate to
fps=
# Process option flags
while [ $# -gt 1 ]; do
case "$1" in
--fill) fillflag="--fill";;
--floor) floor="$2"; shift;;
--fps) fps="$2"; shift;;
--partials) partialsflag=--partials;;
*) break ;;
esac
shift
done
run="$(echo "$0" | sed 's/\.sh$//')"
test -x "$run" || {
echo "Cannot find "run" binary"
exit 1
}
[ "$floor" ] && floorflag="--floor $floor"
[ "$fps" ] && fpsflag="--fps $fps"
(
echo -n "-73 -13 5 651 28 91 2.25 "
echo "Fig 0.1 Music of the Brisbane School 2.25s"
echo -n "-63 -4 5 949 27 91 11 "
echo "Fig II.4 CDD-1-7-37 2'49\"-3'00\" Water makeup"
echo -n "-65 -13 257 5882 23 91 2.4 "
echo "Fig III.2 CDD-1-7-67 4'53.2\"-4'55.6\" Way Out makeup type A - rhythm pattern"
echo -n "-58 -9 23 515 15 91 2.4 "
echo "Fig III.3 CDD-1-7-67 4'53.2\"-4'55.6\" Way Out makeup type B - bass pattern"
echo -n "-69 -9 70 18562 28 97 9 "
echo "Fig III.4 CDD-1-7-67 7'59\"-8'08\" Way Out makeup type D - upward-trending melody"
echo -n "-71 -11 23 3914 30 91 12 "
echo "Fig III.5 CDD-1-7-67 10'39\"-10'51\" Way Out makeup type F - sinewave oscillator"
echo -n "-67 -9 46 3515 20 91 100 "
echo "Fig III.7 CDD-1-7-68 1'21\"-2'51\" Pot Au Feu early version"
echo -n "-73 -13 16 6809 30 91 60 "
echo "Fig IV.2 Putative synthesis using DD334 calculated partials"
echo -n "-56 -7 11 6808 30 91 240 "
echo "Fig IV.3 CDD-1-3-5 0'32\"-4'32\" Lowell"
echo -n "-71 -12 11 6972 19 91 150 "
echo "Fig IV.4 CDD-1-6-3 15'54\"-18'26\" Random Together I"
#echo -n "-73 -0 16 6809 30 91 240 "
#echo "test"
) | while read dbmin dbmax fmin fmax fmaxat groffset duration filestem
do
# A single png file as parameter limits processing to that file.
[ "$1" -a "$1" != "$filestem".png ] && continue
echo $filestem
imagefile="$filestem".png
audiofile="$filestem".wav
if [ "$floor" ]; then
audiofile="$filestem"-floor-"$floor".wav
fi
# Measure the total size of the graphic in pixels.
# width includes the width of the legend area on the left while the
# height of the image is the same as the height of the graph data.
width=`identify "$imagefile" | sed -n 's/.* \([1-9][0-9]*\)x\([1-9][0-9]*\) .*/\1/p'`
test "$width" || exit 1
height=`identify "$imagefile" | sed -n 's/.* \([1-9][0-9]*\)x\([1-9][0-9]*\) .*/\2/p'`
test "$height" || exit 1
# Extract frequency scale
convert "$imagefile" -crop 1x`expr $height - 39 - 20`+32+39 scale$$.png
# Extract raw spectrogram
convert "$imagefile" -crop `expr $width - $groffset`x$height+${groffset}+0 graph$$.png
# Figure out the frequency represented by the top row of pixels.
# In the Percival spectrograms, the frequency represented by the bottom row
# of pixels is marked but the highest value marked in the legend is for the
# pixels row $fmaxat pixels below the top one.
# To figure out the frequency of the top pixel row, calculate the number of
# Hz per pixel from the two known points and project this to the top point.
# The lowest pixel row (at y=height-1) represents $fmin and
# the $fmaxat-th pixel row represents $fmax so each pixel row represents a
# band of frequencies (fmax-fmin) / ((height-1)-$fmaxat) Hz wide.
hz_per_pixel_row=`echo "($fmax - $fmin)/(($height - 1) - $fmaxat)" | bc -l`
# and the top pixel is $fmaxat pixels above the highest marked frequency
ftop=`echo "$fmax + $fmaxat * $hz_per_pixel_row" | bc -l`
$run $fillflag $floorflag $fpsflag $partialsflag \
$dbmin $dbmax $fmin $ftop $duration graph$$.png scale$$.png "$audiofile"
rm -f graph$$.png scale$$.png
done
| true
|
033a46bc5bee92d0de244efab062fbd87b3153e0
|
Shell
|
lliurex/lliurex-up
|
/lliurex-up-core/usr/share/lliurex-up/commonScripts/fix-apt-pinning
|
UTF-8
| 391
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
# APT PINNING FILE
PINNING_FILE="/usr/share/lliurex-up/lliurex-pinning.cfg"
PINNING_DEST="/etc/apt/preferences.d/lliurex-pinning"
ACTION="$1"
case "$ACTION" in
initActions)
#[ -f "$PINNING_DEST" ] || cp -f "$PINNING_FILE" "$PINNING_DEST"
cp -f "$PINNING_FILE" "$PINNING_DEST"
;;
initActionsSai)
if [ -f "$PINNING_DEST" ]; then
rm -f "$PINNING_DEST"
fi
;;
esac
| true
|
c63d3053fe70c9b512edebf7866c3a8f4e679f3a
|
Shell
|
miki2525/SystemyOperacyjne-zadania
|
/s20687/zajecia3/kodpocztowy.sh
|
UTF-8
| 116
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
#!/bin/bash
if [[ $1 =~ ^[0-9]{2}-[0-9]{3}$ ]]
then
echo "Podales kod pocztowy"
else
echo "Blad"
fi
| true
|
4e53ff82a143d5455609c1d93eb99559f1b9e467
|
Shell
|
joshuaulrich/settings-configs
|
/bin/df
|
UTF-8
| 369
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
# remove snap file systems from df output, unless --snap option is present
# https://unix.stackexchange.com/a/258514
# remove the --snap option, so it isn't passed to df
for arg do
shift
if [ "$arg" = "--snap" ]; then
snap="SET"
continue
fi
set -- "$@" "$arg"
done
if [ -n "$snap" ]; then
/bin/df $@
else
/bin/df $@ | grep -v "snap"
fi
| true
|
ef87e30e7f1bb40b4a65540e305188b13964d36b
|
Shell
|
alexsynytskiy/vidkruvai
|
/vagrant/provision/once-as-root.sh
|
UTF-8
| 3,148
| 3.203125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#== Import script args ==
timezone=$(echo "$1")
#== Bash helpers ==
function info {
echo " "
echo "--> $1"
echo " "
}
#== Provision script ==
info "Provision-script user: `whoami`"
info "Allocate swap for MySQL"
fallocate -l 2048M /swapfile
chmod 600 /swapfile
mkswap /swapfile
swapon /swapfile
echo '/swapfile none swap defaults 0 0' >> /etc/fstab
info "Configure locales"
update-locale LC_ALL="C"
dpkg-reconfigure locales
info "Configure timezone"
echo ${timezone} | tee /etc/timezone
dpkg-reconfigure --frontend noninteractive tzdata
info "Update OS software"
add-apt-repository -y ppa:ondrej/php
apt-get update -y -qq
apt-get install -y python-software-properties openssl libssl-dev libssl-dev build-essential
echo "Done!"
info "Install additional software"
apt-get install -y git vim screen curl unzip memcached redis-server supervisor mc grc wget swftools poppler-utils htop cron
echo "Done!"
info "Install Nginx"
apt-get install -y nginx
echo "Done!"
info "Install MySQL"
debconf-set-selections <<< "mariadb-server mysql-server/root_password password \"'vagrant'\""
debconf-set-selections <<< "mariadb-server mysql-server/root_password_again password \"'vagrant'\""
apt-get install -y mariadb-server
echo "Done!"
info "Install PHP"
apt-get install -y php7.0 php7.0-fpm php7.0-mbstring php7.0-mcrypt php7.0-phpdbg php7.0-dev php7.0-curl php7.0-sqlite3 php7.0-json php7.0-gd php7.0-cli php7.0-mysql php7.0-xml
echo "Done!"
info "Install NodeJS and NPM"
apt-get -y install npm
curl -sL https://deb.nodesource.com/setup_5.x | sudo -E bash -
apt-get install -y nodejs
echo "Done!"
info "Install PhantomJs"
npm install -g phantomjs
echo "Done!"
info "Install CasperJs"
npm install -g casperjs
echo "Done!"
info "Configure MySQL"
sed -i "s/.*bind-address.*/bind-address = 0.0.0.0/" /etc/mysql/my.cnf
echo "Done!"
info "Configure PHP-FPM"
mv /etc/php/7.0/fpm/php.ini /etc/php/7.0/fpm/php.ini.dmp
ln -s /var/www/vidkruvai/vagrant/php/fpm/php.ini /etc/php/7.0/fpm/php.ini
mv /etc/php/7.0/fpm/pool.d/www.conf /etc/php/7.0/fpm/pool.d/www.conf.dmp
ln -s /var/www/vidkruvai/vagrant/php/fpm/pool.d/www.conf /etc/php/7.0/fpm/pool.d/www.conf
sed -i 's/user = www-data/user = vagrant/g' /etc/php/7.0/fpm/pool.d/www.conf
sed -i 's/group = www-data/group = vagrant/g' /etc/php/7.0/fpm/pool.d/www.conf
sed -i 's/owner = www-data/owner = vagrant/g' /etc/php/7.0/fpm/pool.d/www.conf
echo "Done!"
info "Configure NGINX"
sed -i 's/user www-data/user vagrant/g' /etc/nginx/nginx.conf
echo "Done!"
info "Enabling site configuration"
ln -s /var/www/vidkruvai/vagrant/nginx/app.conf /etc/nginx/sites-enabled/app.conf
echo "Done!"
info "Enabling xdebug configuration"
mv /etc/php/7.0/mods-available/xdebug.ini /etc/php/7.0/mods-available/xdebug.ini.dmp
cp /var/www/vidkruvai/vagrant/php/mods-available/xdebug.ini /var/www/vidkruvai/vagrant/php/mods-available/xdebug-local.ini
ln -s /var/www/vidkruvai/vagrant/php/mods-available/xdebug-local.ini /etc/php/7.0/mods-available/xdebug.ini
echo "Done!"
info "Install composer"
curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/local/bin --filename=composer
| true
|
57896b50144db67e8199bac1cbf05554db9bc394
|
Shell
|
nickbrazeau/sWGA_Pv
|
/Pv_swga_Run0.sh
|
UTF-8
| 3,722
| 3.171875
| 3
|
[] |
no_license
|
####################################################
# sWGA primer design for Pf
# foreground: PvSal1 (v3.0/13.0)
# background: Human, GRCh38.p7 -- ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA_000001405.22_GRCh38.p7/GCA_000001405.22_GRCh38.p7_genomic.fna.gz
# exclusion: Human mitochondrial DNA, NC_012920.1 (included in GRCh38.p7 above) -- http://www.ncbi.nlm.nih.gov/nuccore/251831106
#
# - Created 7/26/16
#- Edited from Jonathan Parr for PvSal1
# - Designs Pv sWGA primers with the following characteristics:
# - restricted Tm range 18-30 (like Hahn paper, intended to prevent nonspecific primer binding with phi29 30C elongation temp)
# - excluded the human mitochondrial genome entirely (Hahn only excluded primers with >3 binding sites)
# - broadened the total number of primers in a set to 5-10 (default was 2-7, for unclear reasons -- Hahn used two sets of 7)
# - decreased maximum number of complimentary bp in each primer to 3 (the default, vs 4 for Hahn)
# - summary: different from Hahn by using newer human genome, excluding human mito DNA altogether, allowing more primers in a set
####################################################
ref_pv=/proj/meshnick/Genomes/PvSAL1_v13.0/PlasmoDB-13.0_PvivaxSal1_Genome.fasta
ref_human=/proj/meshnick/Genomes/Human_GRCh38/GRCh38p7_genomic.fasta
ref_human_mito=/proj/meshnick/Genomes/Human_GRCh38/NC_012920_1.fasta
# Before running this script:
# 1) launch a minicon environment using the command line as follows:
source activate swga_env
# 2) create symlinks for genomes into the working directory: "ln -s XXX.fasta" (from working directory)
# need to create symlinks b/c the program wants to create fasta index files and will get upset if there is already a fasta index there
###########################
# Step 1: BUILD PARAMETERS.CFG FILE (comment out when not needed -- this command only needs to be run once, in order to build the parameters.cfg file.)
# Define the foreground, background, and exclusionary genomes
# For "Pf":
swga init --fg_genome_fp PlasmoDB-13.0_PvivaxSal1_Genome_NOAAKM.fasta --bg_genome_fp GRCh38p7_genomic.fasta --exclude_fp NC_012920_1.fasta
##### Testing to see if we can get the primers to work
#--exclude_fp AAKM_contigs.fasta
# --bg_genome_fp Pf3D7_GRCh38.fasta
###########################
# Step 2: COUNT ALL POSSIBLE PRIMERS (comment out after this has been completed - this command only needs to be run once, in order to build the primer_db database of all possible primer sets.)
# Finds all primer sets of k base pairs that bind >=2 times to the foreground genome (default, looks at 5-12mers)
# For "Pv" set:
swga count --exclude_threshold 3
# For "Pv_mito3x" set (same as above but allows for primers to bind human mito 3 times or less, like Hahn):
#swga count --exclude_threshold 3
###########################
# Step 3: APPLY FILTERS (comment out after this has been completed - this command only needs to be run once, in order to filter the primers and select the 200 with lowest background binding freq.)
# Used Hahn's Tm range as above
# Note, in the past, have also had to increase the max gini coefficient from 0.6->1 -- but did not do this here
# the Gini coefficient describes how evenly distributed the primers are across the genome (1= very uneven, 0=perfect even distribution)
swga filter --max_tm 40 --min_tm 18 --max_gini 0.6
###########################
# Step 4: FIND SETS (comment out after this has been completed - this command only needs to be run once, in order to find primer pairs that don't form primer-dimers, are otherwise compatible, and meet additional customizable criteria.)
# Note, in the past, had to increase the max_fg_bind_dist to find primers
swga find_sets --max_size 10
| true
|
95df8bd0390f524e82f7c4697dcf258a357b1be4
|
Shell
|
chriszanf/Extension-Attributes
|
/HDSMARTStatus.sh
|
UTF-8
| 206
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
# Script to report the SMART status on disk0
# Author : contact@richard-purves.com
# Version 1.0 : 1-11-2013 - Initial Version
echo "<result>`diskutil info disk0 | grep SMART | awk '{print $3}'`</result>"
| true
|
3f690c5267b9ac3bd42a4ba5a76ebebef47be371
|
Shell
|
hepcat72/bioconda-recipes
|
/recipes/takeabreak/build.sh
|
UTF-8
| 370
| 2.59375
| 3
|
[] |
permissive
|
#!/bin/bash
#strictly use anaconda build environment
export INCLUDE_PATH="${PREFIX}/include"
export LIBRARY_PATH="${PREFIX}/lib"
export LD_LIBRARY_PATH="${PREFIX}/lib"
export CFLAGS="-I$PREFIX/include"
export LDFLAGS="-L$PREFIX/lib"
export CPATH=${PREFIX}/include
mkdir -p ${PREFIX}/bin
# installation
sh INSTALL
# copy binary
cp build/bin/TakeABreak ${PREFIX}/bin
| true
|
8cebeed2a8702e5cc943e1e542a9de45091b123e
|
Shell
|
thefloweringash/packet-nix-builder
|
/reboot.sh
|
UTF-8
| 4,449
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env nix-shell
#!nix-shell -p curl -p jq -i bash
set -eu
set -o pipefail
cfgOpt() {
ret=$(awk '$1 == "'"$1"'" { print $2; }' build.cfg)
if [ -z "$ret" ]; then
echo "Config option '$1' isn't specified in build.cfg" >&2
echo "Example format:"
echo "$1 value"
echo ""
exit 1
fi
echo "$ret"
}
PACKET_TOKEN=$(cfgOpt "packetKey")
PACKET_PROJECT_ID=$(cfgOpt "packetProjectId")
drain() {
data=$((
curl \
--header 'Accept: application/json' \
--header 'Content-Type: application/json' \
--header "X-Auth-Token: $PACKET_TOKEN" \
--fail \
"https://api.packet.net/devices/${1}" \
| jq -r '.tags | .[]'
echo "skip-hydra"
# using jq -R . to convert the lines in to JSON strings,
# use jq -s . to convert the of JSON strings in to a JSON list
) | jq -R . | jq -s '{
id: $id,
tags: .
}' --arg id "$1")
curl -X PATCH \
--data "${data}" \
--header 'Accept: application/json' \
--header 'Content-Type: application/json' \
--header "X-Auth-Token: $PACKET_TOKEN" \
--fail \
"https://api.packet.net/devices/${1}" 2> /dev/null > /dev/null
}
restore() {
data=$((
curl \
--header 'Accept: application/json' \
--header 'Content-Type: application/json' \
--header "X-Auth-Token: $PACKET_TOKEN" \
--fail \
"https://api.packet.net/devices/${1}" \
| jq -r '.tags | .[]' | grep -v '^skip-hydra$'
# using jq -R . to convert the lines in to JSON strings,
# use jq -s . to convert the of JSON strings in to a JSON list
) | jq -R . | jq -s '{
id: $id,
tags: .
}' --arg id "$1")
curl -X PATCH \
--data "${data}" \
--header 'Accept: application/json' \
--header 'Content-Type: application/json' \
--header "X-Auth-Token: $PACKET_TOKEN" \
--fail \
"https://api.packet.net/devices/${1}" > /dev/null 2> /dev/null
}
current_jobs() {
curl -q \
--header 'Accept: application/json' \
--fail \
"https://status.nixos.org/prometheus/api/v1/query?query=hydra_machine_current_jobs\{host=%22root@${1}%22\}" 2> /dev/null \
| jq -r '.data.result[0].value[1]'
}
reboot() {
curl \
--data '{"type": "reboot"}' \
--header 'Accept: application/json' \
--header 'Content-Type: application/json' \
--header "X-Auth-Token: $PACKET_TOKEN" \
--fail \
"https://api.packet.net/devices/${1}/actions"
}
id=$1
host=$2
sos=$3
echo "--- Draining ${id}..."
drain "${id}"
echo "Draining builds ..."
while [ $(current_jobs "$host") -gt 0 ]; do
echo -n "."
sleep 1
done
echo ""
echo "--- Rebooting ${id}..."
reboot "${id}"
echo "--- waiting for ${id} to go down"
while [ $(ssh-keyscan "$host" 2> /dev/null | wc -l) -gt 0 ] ; do
echo -n "."
done
echo ""
echo " ... down!"
echo "--- waiting for ${id} to come back up"
up=0
last_keyscan=$(date +%s)
keyscans_remaining=60
coproc SSH (ssh -o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
"${sos}" 2>&1 | while read -r line; do printf " %s %s\n" "$(date)" "$line"; done)
while [ $up -eq 0 ]; do
if read -t5 -r output <&"${SSH[0]}"; then
echo "$output"
fi
now=$(date +%s)
if [ $((now - last_keyscan)) -gt 30 ]; then
last_keyscan=$now
if [ $(ssh-keyscan "$host" 2> /dev/null | wc -l) -eq 0 ] ; then
echo -n "."
keyscans_remaining=$((keyscans_remaining - 1))
if [ $keyscans_remaining -eq 0 ]; then
reboot "${id}"
keyscans_remaining=60
fi
else
up=1
fi
fi
done
kill "${SSH_PID}"
echo ""
echo " ... up!"
echo "--- testing remote Nix builds"
(
if ! ssh \
-o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
"root@$host" \
"export NIX_PATH=pkgs=https://nixos.org/channels/nixos-unstable/nixexprs.tar.xz; nix-build '<pkgs>' -A hello && nix-build '<pkgs>' --check -A hello"; then
echo "^^^ ... failed!"
exit 1
fi
) 2>&1 | sed -e 's/^/ │ /'
echo "--- adding back to hydra"
restore "${id}"
echo "--- ok!"
| true
|
dbfd902358cfb1b0475ca3e1607e8b1182bab779
|
Shell
|
Paralusion/PKGBUILDS
|
/EndeavourOS-archiso-builder/PKGBUILD
|
UTF-8
| 5,436
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
# Maintainer: EndeavourOS-Team <info@endeavouros.com>
pkgname=EndeavourOS-archiso
pkgver=0.9.1
pkgrel=1
pkgdesc="Build EndeavourOS-archiso"
arch=(any)
makedepends=(archiso mkinitcpio-archiso git squashfs-tools)
source=(git+https://github.com/endeavouros-team/EndeavourOS-archiso.git)
sha512sums=('SKIP')
_check_repo_in_pacman_conf() {
local reponame="endeavouros_calamares"
local conf=/etc/pacman.conf
if [ -z "$(grep "^\[$reponame\]$" $conf)" ] ; then
echo "Sorry, repo [$reponame] is not found in $conf." >&2
read -p "Add it now and update system (Y/n)? " >&2
case "$REPLY" in
[yY]* | "")
local conftmp="$(mktemp -u "$HOME"/pacman.conf.tmp.XXXX)"
cp $conf $conftmp
cat <<EOF >> $conftmp
[$reponame]
Server = https://github.com/endeavouros-team/mirrors/releases/download/\$repo
EOF
sudo cp $conftmp $conf || { rm -f $conftmp ; return 1 ; }
rm -f $conftmp
;;
*)
return 1
;;
esac
fi
}
_create_cleanup_script() {
# There may be lots of files to delete:
local installer_files=(
cleaner_script.sh chrooted_cleaner_script.sh calamares_switcher update-mirrorlist
pacstrap_calamares calamares_for_testers rank_pacman_key.sh
)
local file=run_before_squashfs.sh
if [ -r $file ] ; then
installer_files=($(grep "^mv " $file | grep "/usr/bin/$" | sed -e 's|^mv ||' -e 's| /usr/bin/$||'))
fi
installer_files+=(update-mirrorlist)
cat <<EOF > "$cleanup"
#!/bin/bash
sudo rm -f $build_rootdir/${pkgname}-*.pkg.tar.*
sudo rm -rf $build_rootdir/${pkgname}
sudo rm -rf $build_rootdir/pkg
sudo rm -rf $build_rootdir/src
# pushd /usr/bin
# sudo rm -fv "${installer_files[@]}"
# popd
sync
rm -f \$(basename $cleanup) # underlying folder no more exists...
EOF
chmod +x "$cleanup"
}
_select_calamares_type() {
local timeout=60
while true ; do
read -t $timeout -p "[${timeout}sec] Build with calamares 'test' or 'current'? [current] " >&2
case "$REPLY" in
"" | current)
sed -i packages.x86_64 \
-e 's|^#[ ]*calamares_current[ ]*$|calamares_current|' \
-e 's|^[ ]*calamares_test[ ]*$|#calamares_test|'
break
;;
test)
sed -i packages.x86_64 \
-e 's|^[ ]*calamares_current[ ]*$|#calamares_current|' \
-e 's|^#[ ]*calamares_test[ ]*$|calamares_test|'
break
;;
esac
done
}
build() {
###################################################
# Check that we have calamares repo properly set up.
###################################################
_check_repo_in_pacman_conf
###################################################
# Create a cleanup script.
###################################################
local build_rootdir="$srcdir/.."
local cleanup="$build_rootdir/cleanup.bash"
local basedir=$srcdir/$pkgname
_create_cleanup_script
###################################################
# Build.
###################################################
cd $basedir
_select_calamares_type # current or test
sudo pacman -Syyu || return 1
sudo ./fix_permissions.sh
sudo ./build.sh -v |& tee $HOME/iso-build-log.txt
cd ../..
sudo mv $srcdir/$pkgname/out/*.iso .
sudo chown $LOGNAME *.iso
sync
###################################################
# Show the result.
###################################################
cd "$build_rootdir"
echo ""
echo "#######################################################################"
echo ""
echo "The new ISO is here:"
ls -1 "$build_rootdir"/*.iso
echo ""
echo "To clean up all except the ISO, run command:"
echo " bash $cleanup"
echo ""
echo "#######################################################################"
echo ""
}
package() {
cd $srcdir/..
local isofile=$(ls -1 endeavouros-*-x86_64.iso | grep -v devel | tail -n1) # $isofile = endeavouros-<date>-x86_64.iso
local isobase=$(basename $isofile .iso) # $isobase = endeavouros-<date>-x86_64
local date=$(echo $isofile | sed 's|endeavouros-\([0-9][0-9.]*\)-.*|\1|') # date from the ISO file name
echo "Create the sha512sum file:"
sha512sum $isofile > $isofile.sha512sum
echo "Create the signature file:"
gpg --default-key info@endeavouros.com \
--detach-sign \
--output $isofile.sig \
$isofile
if [ "$HOME" = "/home/joekamprad" ] ; then
echo "Create the torrent file:"
mktorrent -p -v \
--announce=udp://tracker.openbittorrent.com:80 \
-a udp://tracker.torrent.eu.org:451/announce \
-a udp://thetracker.org:80/announce \
-a udp://tracker.dutchtracking.com:6969/announce \
-a udp://tracker.opentrackr.org:1337/announce \
-c $isobase.tar \
-n $isobase.tar \
-o $isobase.tar.torrent \
-w https://mirror.alpix.eu/endeavouros/iso/$isofile \
$HOME/Documents/$isobase.tar
fi
echo Done.
}
| true
|
4e1ae082e2ababba9ba7ffff81683643bde260db
|
Shell
|
bah-insignia/zcmd
|
/devutils/zcmd_helpers/global/_cat_default_env
|
UTF-8
| 153
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ ! -z "$ZCMD_HOME" ]; then
cat $ZCMD_HOME/devutils/default-docker-env.txt
else
cat $HOME/zcmd/devutils/default-docker-env.txt
fi
| true
|
ea60494df35fe105359f2123d77319a1d8ec7ecd
|
Shell
|
celsworthy/Installer
|
/Common/bin/RoboxDetector.linux.sh
|
UTF-8
| 535
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
# Robox detector. Works with or without the presence of robox.rules
if [ -e /etc/udev/rules.d/robox.rules ]
then
robox_device="/dev/robox*"
is_ttyACM=
else
robox_device="/dev/ttyACM*"
is_ttyACM=1
fi
name=$1
id=$2
for device in $robox_device
do
if [[ $device = "$robox_device" ]]
then
echo NOT_CONNECTED
exit
fi
if [[ $is_ttyACM ]]
then
poss=`udevadm info --query=symlink --name=$device | grep -i $name | grep -i $id`
else
poss=1
fi
if [[ $poss ]]
then
echo $device >/dev/stdout
echo " "
fi
done
| true
|
080c51cebb075c05202fba0cd5b3bb8f18076660
|
Shell
|
aks60808/COMP9044
|
/9044AS1/legit/test08.sh
|
UTF-8
| 835
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/dash
# author: Heng-Chuan Lin (z5219960@unsw.edu.au)
# class: 9041 soft-con
# file description: file checking
# written in 14/07/2019
if [ -d .legit ]
then
rm -r .legit #just in case
fi
./legit-init
touch a
echo 'unchanged' >a
./legit-add a
./legit-commit -m first
./legit-branch b1
echo 'changed' > a
./legit-checkout b1
out1=`cat a`
out2=`cat .legit/repo/b1/index/a`
if [ $out1 != 'changed' ]
then
echo "error: a in curdir should be 'changed'"
exit 1
fi
if [ $out2 != 'unchanged' ]
then
echo "error: a in index of b1 should be 'unchanged'"
exit 1
fi
./legit-commit -a -m sec
./legit-checkout master
out3=`cat a`
if [ ! -f a ]
then
echo " a should be in cur dir"
exit 1
fi
if [ $out3 != 'unchanged' ]
then
echo " a in cur dir should be 'unchanged' "
exit 1
fi
echo "done the script - passed"
rm a
rm -r .legit
| true
|
84f52790cdbd5dbdd3db62652e2a2f3e9658b79a
|
Shell
|
ZhuOS/CDManager
|
/cdManager.sh
|
UTF-8
| 4,738
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
#update
#insert/remove/alter
#find
#play
#task: 添加Tracks时,自动添加Records
#*********************************************************
insertRecordLine(){
cdName=$1
cdType=$2
cdComposer=$3
if grep -w -h "$cdName" Records.file; then
echo "The Record exists."
return
else
set $(wc -l Records.file) #number of lines in records.file
catalogNum=$1
catalogNum=$(($catalogNum+1))
echo "CD${catalogNum} $cdName $cdType $cdComposer" >> Records.file
cat Records.file | sort > temp.file
mv temp.file Records.file
echo "Insert New CD $cdName Success."
return
fi
}
insertTrackLine(){
cdName=$1
trackName=$2
recordLine=$(grep -h $cdName Records.file)
if [ -z "$recordLine" ] #if no found, add the CD to Records.file
then
insertRecordLine $cdName Type-xxx Composer-xxx
fi
recordLine=$(grep -h $cdName Records.file)
set $recordLine
catalogNum=$1 #目录编号CDXXX
#判断是否存在该曲目
if grep -h "$trackName$" Tracks.file > temp.file; then #是否存在曲目名
echo " Track Name Exist."
if grep -h $catalogNum temp.file; then #是否存在唱片目录编号
echo "The Track Exist." #存在该曲目,退出.
return
fi
fi
rm -f temp.file
echo " test $catalogNum"
grep "^$catalogNum" Tracks.file > temp_insert_track.file
set $(wc -l temp_insert_track.file)
rm -f temp_insert_track.file
trackNum=$1 #曲目编号
trackNum=$(($trackNum+1))
echo "$catalogNum $trackNum $trackName" >> Tracks.file
cat Tracks.file | sort > temp.file
mv temp.file Tracks.file
return
}
insertRecords(){
echo -e -n "CD-Name Type Composer\n:"
read lineRecord
insertRecordLine $lineRecord #insert line to Record.file
return
}
insertTracks(){
echo -e -n "CD-Name Track-Name\n:"
read lineTrack
insertTrackLine $lineTrack #insert line to Tracks.file
return
}
#****************************************************
removeRecords(){
echo -n "Remove CD-Name: "
read cdName
if grep -w $cdName Records.file > /dev/null; then
cdLine=$(grep -h $cdName Records.file)
set $cdLine
grep -h -v -w "$cdName" Records.file > temp.file
mv temp.file Records.file
grep -h -v -w $1 Tracks.file > temp.file
mv temp.file Tracks.file
echo "Remove CD Success"
return
else
echo "No Such CD"
return
fi
}
removeTracks(){
echo -n "Remove Track: "
read trackName
if grep -w $trackName Tracks.file > /dev/null; then
grep -h -v -w $trackName Tracks.file > temp.file #need to be optimized
mv temp.file Tracks.file
echo "Remove Track Success"
return
else
echo "No Such Track"
return
fi
}
#******************************************************
# test
alterRecords(){
echo -n "Choose Catalog-Number: "
read catalogNum
grep -h "^$catalogNum" Records.file
grep -v "^$catalogNum" Records.file > temp.file
echo -n "new Records infomation: "
read newCDInfo
echo $newCDInfo >> temp.file
cat temp.file | sort | uniq > Records.file
rm -f temp.file
return
}
# test
alterTracks(){
echo -n "alterTracks"
return
}
#*********************************************************
updateCD(){
while true
do
cat <<!UPDATE!
1.insert records
2.insert tracks
3.remove records
4.remove tracks
5.alter records
6.alter tracks
!UPDATE!
echo -n ":"
read updateChoice
case "$updateChoice" in
1 ) insertRecords
break;;
2 ) insertTracks
break;;
3 ) removeRecords
break;;
4 ) removeTracks
break;;
5 ) alterRecords
break;;
6 ) alterTracks
break;;
* ) echo "Enter the valid choice";;
esac
done
return
}
#***********************************************************
findCD() { #同曲目名,不同CD Bug
echo -n "Track-Name: "
read trackName
if grep -h [[:blank:]]$trackName$ Tracks.file
then
:
else
echo "NO FOUND"
fi
return
}
#************************************************************
# stop playing tracks
stop() {
playEn=false
echo "stop $*"
return
}
# play tracks
playCD() {
playEn=true
while true
do
echo -n "Track-Name: "
read trackName #当输入空格等控制符号时出现Bug
trackLine=$(grep -h "$trackName$" Tracks.file)
if [ -z "$trackLine" ]; then
echo "No Such Track."
continue
else
break
fi
done
echo "ctl+c to stop"
trap 'stop $trackLine' INT
while $playEn
do
set $(date)
echo "|| $trackLine ($5)"
sleep 1
done
return
}
# main function
# variable
playEn=true
echo "Welcome to CDManager helper! Please choose which help you want to get."
# main loop
while true; do
cat <<!MENU!
1.update
2.find
3.play
4.exit
!MENU!
echo -n ":"
read menuChoice
case "$menuChoice" in
1 ) updateCD;;
2 ) findCD;;
3 ) playCD;;
4 ) break;;
* ) echo "wrong input";;
esac
done
exit 0
| true
|
a68daceb2b59a2574db7ca8541d68f41798e2655
|
Shell
|
MediaArea/MediaInfoLib
|
/Project/zlib/Compile.sh
|
UTF-8
| 1,176
| 3.828125
| 4
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"NCSA",
"LGPL-2.0-or-later",
"curl",
"Zlib",
"GPL-1.0-or-later",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
#! /bin/sh
# Initialization
test -d ../../Source || mkdir -p ../../Source
zlib_source=../../Source/zlib
##########################################################################
# Fetch if necessary
if test -e $zlib_source/configure; then
echo
echo The source of zlib are presents
echo
else
echo
echo Downloading zlib...
echo
rm -fr $zlib_source
git clone -b "v1.2.8" https://github.com/madler/zlib $zlib_source
if test -e $zlib_source/configure; then
echo
echo zlib downloaded, compiling it
echo
else
echo
echo Error while downloading zlib
echo
exit 1
fi
fi
##########################################################################
# Already compiled
if test -e $zlib_source/zlib/zlib.a || test -e $zlib_source/zlib/zlib.la; then
echo
echo zlib is already compiled, recompiling it
echo
fi
##########################################################################
# Compile
cd $zlib_source
echo
echo Compiling zlib...
echo
./configure $*
make clean
make
# In the previous version:
mkdir zlib
cp zlib.h zlib
cp zconf.h zlib
unset -v zlib_source
| true
|
1428ba6bfceab45e6a9533f5cb86e678fe681be3
|
Shell
|
cocomdidin/morbis
|
/Run Docker/update-docker.sh
|
UTF-8
| 536
| 3.265625
| 3
|
[] |
no_license
|
dir=.
if [ -d "$dir" ]; then
branch=$(git --git-dir "$dir/.git" branch | sed -n -e 's/^\* \(.*\)/\1/p')
status=$(git --git-dir "$dir/.git" --work-tree=$dir status)
else
branch='.git dir not found'
status=''
fi
echo
echo "* Folder: $dir/.git"
echo "* Branch: $branch"
echo "* Status:"
echo
echo "$status"
echo
if [ -z "$branch" ]
then
git clone https://github.com/cocomdidin/morbis.git
else
git pull origin $branch
fi
sleep 2
echo ''
echo ''
echo 'Docker Already Updated....'
echo 'Mantap Slur.....'
sleep 2
| true
|
ae2582d665ac95fd7f10b025bdc108ac0f809bd8
|
Shell
|
waynegzw/CloudComputing
|
/Project4_3/deploy.sh
|
UTF-8
| 5,085
| 3.46875
| 3
|
[] |
no_license
|
#! /bin/bash
#This is the script to help you deploy kafka+samza cluster on EMR
join_arr() {
local IFS="$1"; shift; echo "$*";
}
home="/home/hadoop"
cd "$home"
echo "Enter the full path of pem file (ex:/home/hadoop/yourpem.pem):"
read pemfile
if [ ! -f "$pemfile" ]
then
echo "pem file not found. Please put your pem file under /home/hadoop/ folder."
exit
else
chmod 400 "$pemfile"
fi
#Check git
if [ ! -f /usr/bin/git ]
then
echo -e "\033[34minstall git now...\e[0m"
sudo yum install git
else
echo -e "\033[92mgit installed, good!\e[0m"
fi
#Check maven
if [ ! -d apache-maven-3.3.3/ ]
then
echo -e "\033[34minstall maven now...\e[0m"
wget http://www.eu.apache.org/dist/maven/maven-3/3.3.3/binaries/apache-maven-3.3.3-bin.tar.gz
tar -zxf apache-maven-3.3.3-bin.tar.gz
fi
echo -e "\033[92mmaven installed, good!\e[0m"
#Set environment
echo " ">> .bashrc
echo "export HADOOP_YARN_HOME=/usr/lib/hadoop-yarn" >> .bashrc
echo "export PATH=$PATH:/home/hadoop/apache-maven-3.3.3/bin" >> .bashrc
export PATH=$PATH:/home/hadoop/apache-maven-3.3.3/bin
export HADOOP_YARN_HOME=/usr/lib/hadoop-yarn
. .bashrc
sudo curl http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/resources/capacity-scheduler.xml?view=co > /etc/hadoop/conf/capacity-scheduler.xml
#Download samza
echo -e "\033[34minstall samza now...\e[0m"
git clone http://git-wip-us.apache.org/repos/asf/samza.git
cd samza
./gradlew clean publishToMavenLocal
#Download hello-samza
cd "$home"
git clone git://git.apache.org/samza-hello-samza.git hello-samza
echo -e "\033[34minstall hello-samza project now...\e[0m"
cd hello-samza
mvn clean package
mkdir -p deploy/samza
tar -xvf ./target/hello-samza-0.10.0-dist.tar.gz -C deploy/samza
hadoop fs -rm /hello-samza-0.10.0-dist.tar.gz
hadoop fs -put ./target/hello-samza-0.10.0-dist.tar.gz /
sed -i '/yarn.package.path=/d' deploy/samza/config/wikipedia-feed.properties
sed -i '/systems.kafka.consumer.zookeeper.connect=/d' deploy/samza/config/wikipedia-feed.properties
sed -i '/systems.kafka.producer.bootstrap.servers=/d' deploy/samza/config/wikipedia-feed.properties
hdfs_path=$(grep -o "hdfs:[^<]*" /etc/hadoop/conf/core-site.xml)
master_ip=$(expr "$hdfs_path" : "hdfs://\([^:]*\):")
echo yarn.package.path="$hdfs_path"/hello-samza-0.10.0-dist.tar.gz >> deploy/samza/config/wikipedia-feed.properties
echo systems.kafka.consumer.zookeeper.connect="$master_ip":2181/ >> deploy/samza/config/wikipedia-feed.properties
echo systems.kafka.producer.bootstrap.servers="$master_ip":9092 >> deploy/samza/config/wikipedia-feed.properties
echo -e "\033[92minstall hello-samza project complete\e[0m"
echo -e "\033[34mstart kafka and zookeeper...\e[0m"
bin/grid install kafka
bin/grid install zookeeper
echo delete.topic.enable=true >> deploy/kafka/config/server.properties
bin/grid start kafka
bin/grid start zookeeper
sudo service iptables save
sudo service iptables stop
sudo chkconfig iptables off
echo -e "\033[92mmaster node complete\e[0m"
# Deploy on all slave nodes
iplist=$(hdfs dfsadmin -report | grep ^Name | cut -f2 -d: | cut -f2 -d' ')
array=($iplist)
count=1
for private_ip in "${array[@]}"
do
echo -e "\033[34mdeploy kafka for slave node:$private_ip\e[0m"
scp -o stricthostkeychecking=no -i "$pemfile" "$home"/hello-samza/bin/grid hadoop@"$private_ip":/home/hadoop/
# install kafka on slave node
ssh -o stricthostkeychecking=no -i "$pemfile" hadoop@"$private_ip" 'mkdir -p hello-samza/bin;mv grid hello-samza/bin/;cd hello-samza;bin/grid install kafka'
# configure kafka
ssh -o stricthostkeychecking=no -i "$pemfile" hadoop@"$private_ip" 'sed -i ''/zookeeper.connect=/d'' /home/hadoop/hello-samza/deploy/kafka/config/server.properties;hdfs_path=$(grep -o "hdfs:[^<]*" /etc/hadoop/conf/core-site.xml);master_ip=$(expr "$hdfs_path" : "hdfs://\([^:]*\):");echo zookeeper.connect="$master_ip":2181 >> /home/hadoop/hello-samza/deploy/kafka/config/server.properties;echo delete.topic.enable=true >> /home/hadoop/hello-samza/deploy/kafka/config/server.properties;'
ssh -o stricthostkeychecking=no -i "$pemfile" hadoop@"$private_ip" "sed -i ''/broker.id=/d'' /home/hadoop/hello-samza/deploy/kafka/config/server.properties;echo broker.id=$count >> /home/hadoop/hello-samza/deploy/kafka/config/server.properties"
# start kafka
ssh -o stricthostkeychecking=no -i "$pemfile" hadoop@"$private_ip" 'cd /home/hadoop/hello-samza;bin/grid start kafka&'
# iptables
ssh -o stricthostkeychecking=no -i "$pemfile" hadoop@"$private_ip" 'sudo service iptables save;sudo service iptables stop;sudo chkconfig iptables off'
count=$((count+1))
done
ownDns=`ifconfig eth0 | grep inet | grep -v inet6 | awk '{print $2}' | cut -d ':' -f2`
ipArray=($iplist)
ipArray+=($ownDns)
brokerArray=( "${ipArray[@]/%/:9092}" )
brokerList=`join_arr , "${brokerArray[@]}"`
echo -e "\033[92mThe IP of the master is: $ownDns\e[0m"
echo -e "\033[92mThe IP list of Samza brokers in the cluster is given below for your reference. Copy it for pasting into .properties file!\e[0m"
echo -e "\033[92m$brokerList\e[0m"
| true
|
4658a5cb6a3335810a8a26c2444c010a3eefcbf2
|
Shell
|
ArchersDeLaBretagneRomantique/Frontend
|
/.utility/deploy.sh
|
UTF-8
| 468
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
if [ "$TRAVIS_PULL_REQUEST" == "false" ]
then
echo -e "Starting deployment\n"
cd dist
git init
git config --global user.name "Travis"
git config --global user.email "benoit.travers.fr@gmail.com"
git remote add origin https://${GH_TOKEN}@github.com/ArchersDeLaBretagneRomantique/ArchersDeLaBretagneRomantique.github.io.git > /dev/null
git add .
git commit -am "Travis build $TRAVIS_BUILD_NUMBER deployed"
git push origin master -fq > /dev/null
echo -e "Deployment done\n"
fi
| true
|
9a9a81cd1c830eb2de6faf989fa1a703c61519a4
|
Shell
|
san-nat/prime-simplereport
|
/start.sh
|
UTF-8
| 1,716
| 4.09375
| 4
|
[
"CC0-1.0"
] |
permissive
|
#!/bin/bash
###########
# This script is used for local development only.
# It will spin up a create-react-app dev server and
# a Spring Boot "bootRun" task, output server logs, and clean
# up the processes when exited.
###########
GREEN='\033[0;32m'
PURPLE='\033[0;35m'
function prepend() {
NC='\033[0m' # No Color
while read line; do
echo -e "${2}${1}:${NC} ${line}"
done
}
# This function kills the server processes when the script is interrupted
# Takes the PID of the frontend server as an argument
cleanup() {
echo
echo "Script stopped, performing cleanup..."
kill $1 # kill frontend server
cd ${BACKEND_DIR}
./gradlew --stop
./gradlew clean
rm -rf ${BACKEND_DIR}/.gradle/daemon # Daemons _cannot_ survive script shutdown
echo "Cleanup complete!"
}
# Get environment variables
set -o allexport
source .env
# Get dir paths
ROOT_DIR=$(pwd)
FRONTEND_DIR=${ROOT_DIR}/frontend
BACKEND_DIR=${ROOT_DIR}/backend
# Start frontend server
cd ${FRONTEND_DIR}
echo "Starting frontend server..."
BROWSER=none yarn start | prepend "frontend" $GREEN &
NPM_PID=$!
echo "frontend server PID: ${NPM_PID}"
trap "cleanup ${NPM_PID}" EXIT
# Build backend
cd ${BACKEND_DIR}
# Start a continuous build process and send to background
./gradlew --no-daemon -t build -x test -x checkstyleMain -x checkstyleTest -x spotlessCheck -x bootBuildInfo | prepend "backend" $PURPLE &
# Wait for initial build to complete
sleep 15
# Start bootRun without build. It will live reload when the previous process rebuilds
./gradlew --no-daemon -x build -x test -x checkstyleMain -x checkstyleTest -x spotlessCheck bootRun --args='--spring.profiles.active=dev' | prepend "backend" $PURPLE
| true
|
c7d5354da8772b68519c0b9748522c099513d275
|
Shell
|
novcn/rEFInd-Vintage-Flex
|
/render
|
UTF-8
| 13,782
| 3.75
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
RVF_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
COLORSHIFT_URL="https://github.com/novcn/colorshift"
#
# Defaults options
#
declare HEADER_COLOR
declare ICON_COLOR
FONT="CozetteVector"
FONT_SIZE=28
VERBOSE=0
FOREGROUND="#000000"
BACKGROUND="#FFFFFF"
WAL=0
XRDB=0
HEADER=""
HEADER_BORDER="-"
HEADER_Y_BORDER_PADDING=2
HEADER_X_BORDER_PADDING=9
ICONS_ONLY=0
BACKGROUND_ONLY=0
ICONS_DIR="${RVF_DIR}/input/icons/white"
trace() {
[[ "$VERBOSE" == "1" ]] && {
echo -e "\\033[93mtrace: \\033[97m$*\\033[39m"
}
}
info() {
echo -e " \\033[96m--> \\033[97m$*\\033[39m"
}
die() {
echo "${1:-usage : ${0##*/} [ options ] }" 2>&1
[ "$1" ] && exit 1
cat > /dev/stdout <<EOF
${0} [ options ]
options:
-h | --help show help
-v | --verbose verbose mode
-f | --foreground color for rendered foreground objects, defaults to #000000
-b | --background color for rendered background objects, defaults to #FFFFFF
--header-color color for rendered text. falls back to --color if not set
--icon-color color for rendered icons. falls back to --color if not set
--wal use colors from wal cache (~/.cache/wal)
--xrdb use colors from xrdb (xrdb -query)
--header header or file with text to transpose on background image
--font font to use for transposed text. defaults to CozetteVector
--font-size font size to use for transposed text. defaults to 28
--header-border character to use as a border. defaults to empty
--y-boder-padding y-cartesian spaces around the border of text. defaults to 12
--x-boder-padding x-cartesian spaces around the border of text. defaults to 4
--disable-progress disable the progress bar
--icons <dir> directory to render icons from
--background-only only render the background
--icons-only only render the icons
EOF
exit 0
}
# TODO: add flag for choosing background image
# TODO: add flag for chosing icon set (if we ever move beyond https://www.deviantart.com/sworiginal/art/Lightness-for-burg-181461810)
while [[ "$#" -gt 0 ]]; do
trace "$1"
case "$1" in
-h|--help)
die
;;
-v|--verbose)
VERBOSE=1
;;
-f|--foreground)
FOREGROUND="$2"
shift
;;
-b|--background)
BACKGROUND="$2"
shift
;;
--font)
FONT="$2"
shift
;;
--font-size)
FONT_SIZE="$2"
shift
;;
--text-color)
HEADER_COLOR="$2"
shift
;;
--wal)
WAL=1
;;
--xrdb)
XRDB=1
;;
--icon-color)
ICON_COLOR="$2"
shift
;;
--header)
HEADER="$2"
shift
;;
--text-border)
HEADER_BORDER="$2"
shift
;;
--text-x-border-padding)
HEADER_X_BORDER_PADDING="$2"
shift
;;
--text-y-border-padding)
HEADER_Y_BORDER_PADDING="$2"
shift
;;
--icons-only)
ICONS_ONLY=1
;;
--background-only)
BACKGROUND_ONLY=1
;;
--disable-progress)
DISABLE_PROGRESS=1
;;
--icons)
ICONS_DIR="$2"
shift
;;
*)
die "Unknown parameter passed: $1"
;;
esac
shift
done
trace "VERBOSE : $VERBOSE"
trace "FOREGROUND : $FOREGROUND"
trace "BACKGROUND : $BACKGROUND"
trace "WAL : $WAL"
trace "XRDB : $XRDB"
trace "HEADER : $HEADER"
trace "HEADER_BORDER : $HEADER_BORDER"
trace "HEADER_Y_BORDER_PADDING : $HEADER_Y_BORDER_PADDING"
trace "HEADER_X_BORDER_PADDING : $HEADER_X_BORDER_PADDING"
trace "ICONS_ONLY : $ICONS_ONLY"
trace "BACKGROUND_ONLY : $BACKGROUND_ONLY"
trace "ICONS_DIR : $ICONS_DIR"
count_files() {
printf "%s\n" "$#"
}
update_progress() {
[ "$VERBOSE" == "0" ] && [ "$DISABLE_PROGRESS" != "1" ] && {
SCREEN_LENGTH=128
type tput &> /dev/null && {
SCREEN_LENGTH=$(tput cols)
}
# Cursor home position 0,0
printf '\e[H'
printf '['
total=$(count_files "${ICONS_DIR}/"*)
# Hack since count_files returns 1 when dir is empty
[ -z "$(ls ${RVF_DIR}/tmp)" ] \
&& current=0 \
|| current=$(count_files "${RVF_DIR}/tmp/"*.png)
progress=$((current * SCREEN_LENGTH / total))
remaining=$((SCREEN_LENGTH - progress - 2))
printf %${progress}s | tr " " "="
printf %${remaining}s
printf ']'
}
}
#
# Set foreground color from xrdb or wal if those options are passed
#
[ "$XRDB" == "1" ] && [ "$WAL" == "1" ] &&
die "error : invalid options. cannot pass both --xrdb and --wal"
[ "$XRDB" == "1" ] && {
FOREGROUND=$(xrdb -query | grep -P '\*foreground:' | awk '{ print $2 }')
BACKGROUND=$(xrdb -query | grep -P '\*background:' | awk '{ print $2 }')
}
[ "$WAL" == "1" ] && {
WAL_FILE=$HOME/.cache/wa/colors.sh
[ ! -a "$WAL_FILE" ] && die "Could not find wal colors file at $WAL_FILE"
source "$WAL_FILE"
FOREGROUND="$foreground"
BACKGROUND="$background"
}
trace "selected colors foreground: $FOREGROUND background: $BACKGROUND"
install_colorshift() {
[ -x "${RVF_DIR}/util/colorshift" ] || {
info "Installing colorshift script from $COLORSHIFT_URL"
git clone "$COLORSHIFT_URL" "${RVF_DIR}/tmp/colorshift"
cp "${RVF_DIR}/tmp/colorshift/colorshift" "${RVF_DIR}/util/colorshift"
chmod +x "${RVF_DIR}/util/colorshift"
}
}
hex_to_rgb() {
: "${1/\#}"
((r=16#${_:0:2},g=16#${_:2:2},b=16#${_:4:2}))
echo "$r $g $b"
}
within_threshold() {
hex="${1:?}"
thresh="${2:?}"
rgb=$(hex_to_rgb "$hex")
[ $(awk '{print $1}' <<< "$rgb") -gt "$thresh" ] && return 0
[ $(awk '{print $2}' <<< "$rgb") -gt "$thresh" ] && return 0
[ $(awk '{print $3}' <<< "$rgb") -gt "$thresh" ] && return 0
}
#
#
#
clean() {
info "cleaning up any past renderings"
# remove files in tmp
for file in "${RVF_DIR}/tmp/"*; do
trace "removing $file"
[ -d "$file" ] && rm -rf "$file" || rm -f "$file"
done
# remove output icons
for file in "${RVF_DIR}/output/icons/"*; do
trace "removing $file"
rm -f "$file"
done
# remove icon directory
[ -e "${RVF_DIR/output/icons}" ] || {
trace "removing directory ./output/icons"
rmdir "${RVF_DIR}/output/icons"
}
# remove other output files
for file in "${RVF_DIR}/output/icons/"*; do
trace "removing $file"
rm -f "$file"
done
}
#
# Build center aligned text
#
build_text() {
max_len=0
export IFS=$'\n'
while read line; do
[ "${#line}" -gt "$max_len" ] && max_len=${#line}
done < "$HEADER"
x_border=$(echo "$max_len" + "$HEADER_X_BORDER_PADDING" | bc)
printf " "
eval printf '–%.0s' {0..${x_border}}
printf "\n"
# Print padding above
for ((; i++ < "$HEADER_Y_BORDER_PADDING";)) {
printf "|";
eval printf '\ %.0s' {0..${x_border}};
printf "|\n"
}
# print each line shifted such that data is centered
while read line; do
line_len="${#line}"
justify_amt=$(((x_border - line_len) / 2 - 1))
x_right_border=$((x_border - line_len - justify_amt - 1))
# print each line prepending $justify_amt spaces
printf "|"
eval printf '\ %.0s' {0..${justify_amt}}
printf "%s" "$line"
eval printf '\ %.0s' {0..${x_right_border}}
printf "|\n"
done < "$HEADER"
# Print padding below
for ((; k++ < "$HEADER_Y_BORDER_PADDING";)) {
printf "|";
eval printf '\ %.0s' {0..${x_border}};
printf "|\n"
}
printf " "
eval printf '–\%.0s' {0..${x_border}}
printf "\n"
}
#
# Render passed $HEADER onto background if it exists, otherwise just copy over the chosen background
#
render_background() {
info "rendering background"
[ "$HEADER" != "" ] && {
text_color="${HEADER_COLOR:-${FOREGROUND}}"
output=$(build_text)
convert -fill "$BACKGROUND" -tint 100% "${RVF_DIR}/input/backgrounds/darkstatic.png" "${RVF_DIR}/tmp/background.png"
convert "${RVF_DIR}/tmp/background.png" \
-font "$FONT" -gravity North -fill "$text_color" -pointsize "$FONT_SIZE" -annotate +0+30 "$output" \
"${RVF_DIR}/output/background.png"
} || {
# TODO: tint works better when a lightened color is passed. I think to do this optimally we'd convert to HSL
# increase the light factor and then convert back to RGB hex. See https://www.rapidtables.com/convert/color/hsl-to-rgb.html
convert -fill "$BACKGROUND" -tint 100% "${RVF_DIR}/input/backgrounds/darkstatic.png" "${RVF_DIR}/output/background.png"
}
}
#
# For each color, compare it with the desired output color to get an RGB shift amount
#
rgb_shift_amnt() {
local desired=$(sed 's/#//g' <<< ${1:?} | tr '[:lower:]' '[:upper:]')
local initial=$(sed 's/#//g' <<< ${2:?} | tr '[:lower:]' '[:upper:]')
local output=""
for idx in $(seq 1 3); do
init=$(fold -w2 <<< "$initial" | awk "NR == $idx")
des=$(fold -w2 <<< "$desired" | awk "NR == $idx")
inter=$(echo "ibase=16; $init - $des" | bc | tr '\n' ' ')
inter=$(echo "$inter * -1" | bc) # TODO: simplify this
output="$output $inter"
done
echo "$output"
}
#
# Shift colors by rgb amounts
#
shift_colors() {
initial=${1:?}
initial=$(cut -c -7 <<< "$initial")
rshift=${2:?}
gshift=${3:?}
bshift=${4:?}
desired=$("$RVF_DIR/util/colorshift" -l -r "$rshift" -g "$gshift" -b "$bshift" "$initial")
echo "$desired"
}
#
# Render the icons with the passed FOREGROUND as a gradient so that the original icon's noise will be preserved w/ the new color
#
render_icons_gradient() {
info "rendering icons as gradient. this will take a long while..."
type fortune &> /dev/null && {
info "here's your fortune for you to mull over while you wait"
info ""
info $(fortune)
info ""
}
local color="${ICON_COLOR:-${FOREGROUND}}"
trace "initial color : $color"
for icon in "${ICONS_DIR}/"*; do
update_progress
icon_name=$(basename "$icon")
#info "rendering $icon_name"
cp "$icon" "${RVF_DIR}/tmp/$icon_name"
icon="${RVF_DIR}/tmp/$icon_name"
# Get every color in the image except for the color that appears most (in this case our background)
# TODO: there are some icons which the background color isn't the most prevelant. We'll have to update this for that.
# Should be able to just assume black is background
trace "loading initial colors for $icon"
initial_colors=$(convert $icon txt:- | awk '{print $3}' | rg -v pixel | sort | uniq -c | sort -n | head -n -1 | awk '{print $2}')
most_prevalent=$(tail -n1 <<< "$initial_colors")
#rgb_shift_amnt "$most_prevalent" "$foreground"
trace "color : $color"
trace "most prevalent : $most_prevalent"
cshift=$(rgb_shift_amnt "$color" "$most_prevalent")
trace "cshift: $cshift"
rshift=$(awk '{print $1}' <<< "$cshift")
gshift=$(awk '{print $2}' <<< "$cshift")
bshift=$(awk '{print $3}' <<< "$cshift")
inter_icon=tmp/inter_${icon_name}
cp "$icon" "$inter_icon"
for initial in ${initial_colors[@]}; do
alpha=$(sed 's/#//g' <<< "$initial" | fold -w2 | tail -n1)
initial=$(cut -c -7 <<< "$initial")
within_threshold "$initial" 150 && {
desired=$("$RVF_DIR/util/colorshift" -l -r "$rshift" -g "$gshift" -b "$bshift" "$initial")
trace "mapping $initial -> $desired $inter_icon"
convert "$inter_icon" -fuzz 0% -fill "${desired}${alpha}" -opaque "${initial}${alpha}" "$inter_icon"
}
done
update_progress
mv "$inter_icon" "${RVF_DIR}/output/icons/$icon_name"
done
}
rgb_extract_hue() {
: "${1/\#}"
((r=16#${_:0:2},g=16#${_:2:2},b=16#${_:4:2},a=16#${_:6:2}))
R=$(echo "scale=10; $r / 255" | bc)
G=$(echo "scale=10; $g / 255" | bc)
B=$(echo "scale=10; $b / 255" | bc)
max=$(printf '%s\n' "$R" "$G" "$B" | sort -nr | head -n1)
min=$(printf '%s\n' "$R" "$G" "$B" | sort -n | head -n1)
chroma=$(echo "scale=10; $max - $min" | bc)
lightness=$(echo "scale=2; ($max + $min) / 2" | bc)
if [[ "$chroma" == "0" ]]; then
echo "0"
return
elif [[ "$R" == "$max" ]]; then
hue_radians=$(echo "a(1)*4/3 * ($G - $B) / $chroma" | bc -l)
elif [[ "$G" == "$max" ]]; then
hue_radians=$(echo "2 + ($B - $R) / $chroma" | bc -l)
else
hue_radians=$(echo "4 + ($R - $G) / $chroma" | bc -l)
fi
hue=$(echo "$hue_radians * 57.3" | bc | awk '{ print int($1+0.5) }')
echo "$hue"
}
render_icons_hue_shift() {
local color="${ICON_COLOR:-${FOREGROUND}}"
foreground_hue=$(rgb_extract_hue "$FOREGROUND")
echo "foreground_hue : $foreground_hue"
for icon in "${ICONS_DIR}/"*; do
icon_name=$(basename "$icon")
echo "icon_name: $icon_name"
total_count=0
total_hue=0
IFS=$'\n'
for line in $(convert "$icon" -colorspace HSV -format %c histogram:info:- | sort -t':' -gr -k 1); do
rgb=$(awk '{ print $3 }' <<< "${line/\#}" | cut -c -6)
if [[ "$rgb" != "000000" ]]; then
count=$(awk '{ print $1 }' <<< "${line/\:}")
hue=$(awk '{ print $2 }' <<< "$line" | sed -E 's/\(|\)//g' | awk -F ',' '{ print $1 }' | awk '{ print int($1+0.5) }')
((total_count += count))
((total_hue += hue))
fi
done
initial_hue=$((total_hue / total_count))
initial_hue=146
echo "initial_hue : $initial_hue"
hue_diff=$((foreground_hue - initial_hue))
echo "hue_diff : $hue_diff"
mod=$(((hue_diff * 100/180) + 100))
echo "mod : $mod"
convert "$icon" -modulate 100,100,"$mod" "${RVF_DIR}/tmp/${icon_name}"
echo ""
done
}
render_icons() {
mkdir -p "${RVF_DIR}/output/icons"
# TODO: The idea is that you might just want a solid color. If that's the case another function will have to be written / used
#render_icons_gradient
render_icons_hue_shift
}
clean
install_colorshift
trace "BACKGROUND_ONLY : $BACKGROUND_ONLY"
[ "$ICONS_ONLY" == 0 ] && render_background
[ "$BACKGROUND_ONLY" == 0 ] && render_icons
info "job's done"
| true
|
2b00c42585658ea12042bf87f40b73c7370dcd86
|
Shell
|
m4kvn/install
|
/installer/etc/python.sh
|
UTF-8
| 274
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
global_version=2.7.14
root=~/.pyenv
pyenv=$root/bin/pyenv
if [ ! -s $root ]; then
git clone https://github.com/yyuu/pyenv.git $root
fi
if [ ! -s $root/versions/$global_versions ]; then
$pyenv install $global_version
$pyenv global $global_version
fi
| true
|
2647c99c8028e0049166f111289342359f7ba39c
|
Shell
|
unlhcc/hcc-conda-recipes
|
/recipes/genomethreader/1.7.1/build.sh
|
UTF-8
| 661
| 2.640625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
mkdir -p ${PREFIX}/bin ${PREFIX}/share/${PKG_NAME}-${PKG_VERSION}
cp -R bin/bssm bin/gthdata ${PREFIX}/share/${PKG_NAME}-${PKG_VERSION}
cp bin/{gth,gthbssmbuild,gthbssmfileinfo,gthbssmrmsd,gthbssmtrain,gthcleanrec.sh,gthclean.sh,gthconsensus,gthfilestat,gthgetseq,gthsplit,gthsplit2dim.sh} ${PREFIX}/bin
mkdir -p ${PREFIX}/etc/conda/activate.d ${PREFIX}/etc/conda/deactivate.d
cat <<EOF >> ${PREFIX}/etc/conda/activate.d/gth.sh
export BSSMDIR=${PREFIX}/share/${PKG_NAME}-${PKG_VERSION}/bssm
export GTHDATADIR=${PREFIX}/share/${PKG_NAME}-${PKG_VERSION}/gthdata
EOF
cat <<EOF >> ${PREFIX}/etc/conda/deactivate.d/gth.sh
unset BSSMDIR GTHDATADIR
EOF
| true
|
b146f5f48a767f24c3f496a2111d20ec8a9f3499
|
Shell
|
PratikValvi/Shell_Programming
|
/Assignments/Programming Constructs/functions_prog/temp_conversion.sh
|
UTF-8
| 1,595
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash -x
#Program of "Help user find degF or degC based on their Conversion Selection. Use Clase Statements and ensure the inputs are within the freezing points (0 C/32 F) and the boiling point of water (100 C/212 F)"
#a. degF = (degC*9/5) + 32
#b. degC = (degF-32) * 5/9
echo "Select your option by entering 'a' or 'b'"
echo "a. Convert Celsius to Fahrenheit"
echo "b. Convert Fahrenheit to Celsius"
read check_case
#Function to Convert Celsius to Fahrenheit
_degC_F () {
degF=`echo "($degC*9/5)+32" | bc -l`
degF=`printf %.2f $degF`
return $degF
}
#Function to Convert Fahrenheit to Celsius
_degF_C () {
degC=`echo "($degF-32)*5/9" | bc -l`
degC=`printf %.2f $degC`
return $degC
}
case $check_case in
a)
echo "Celsius to Fahrenheit Converter"
echo "Enter any Celsius value from 0 to 100"
read input
if (( $input >= 0 )) && (( $input <= 100 ))
then
degC=$input
#Calling Function Celsius to Fahrenheit Converter
_degC_F
echo "The "$degC "Degree Celsius is "$degF "Degree Fahrenheit."
else
echo "Please enter any Celsius value from 0 to 100"
echo "Thank You"
fi
;;
b)
echo "Fahrenheit to Celsius Converter"
echo "Enter any Fahrenheit value from 32 to 212"
read input
if (( $input >= 32 )) && (( $input <= 212 ))
then
degF=$input
#Calling Function Fahrenheit to Celsius Converter
_degF_C
echo "The "$degF "Degree Fahrenheit is "$degC "Degree Celsius."
else
echo "Please enter any Fahrenheit value from 32 to 212"
echo "Thank You"
fi
;;
*)
echo "Please enter 'a' or 'b'"
echo "Thank You"
;;
esac
| true
|
1ebc225b3186bb0fdc2acf804d51afa32c5d46e2
|
Shell
|
astronasutarou/differential-backup
|
/diff_backup
|
UTF-8
| 3,464
| 4.25
| 4
|
[] |
no_license
|
#!/bin/bash -f
#############################################################
#
# A script for differential backup. You can make differential
# backups where you want to. The nomal usage of this script
# is discribed below:
#
# usage:
# ./backup target distination
#
#############################################################
#############################################################
# Environmental Variables
#
# CMD : a command for backup, default is `rsync`
# OPT : options for $CMD, default is `-avz`
# EXCLUDE: a list of the filenames excluded in backup
#############################################################
### define command for backup
CMD="rsync"
OPT="-avz"
### define log directory
LOGDIR="/tmp"
### exclude rules
EXCLUDE_LIST=( \
'.ecryptfs/' \
'lost+found/' \
'Cache/' \
'.cache/' \
'cache/' \
);
###
#############################################################
# User Defined Functions
#
#############################################################
### usage
usage()
{ # function to display usage
cat <<EOF >&2
usage:
./backup targets destination
EOF
exit -1;
}
### create a targets list
get_targets()
{
while [ $# -gt 1 ]
do
if [[ ! ( -f ${1%/} || -d ${1%/} ) ]]; then
echo "${0##*/}: cannot access ${1%/}: No such file or directory"
echo "${0##*/}: abort backup procedure"
usage; exit -1;
fi
if [[ ! ( -r ${1%/} ) ]]; then
echo "${0##*/}: cannnot open directory ${1%/}: Permissoin denined"
echo "${0##*/}: abort backup procedure"
usage; exit -1;
fi
TARGETS=(${TARGETS[@]} ${1%/})
shift
done
echo "# BACKUP TARGETS: ${TARGETS[@]}"
return 1;
}
### set destination
set_destination()
{
while [ $# -gt 1 ]; do shift; done;
if [[ ! ( -d ${1%/} ) ]]; then
echo "${0##*/}: cannot access ${1%/}: No such file or directory"
echo "${0##*/}: abort backup procedure."
usage; exit -1;
fi
if [[ ! ( -w ${1%/} ) ]]; then
echo "${0##*/}: cannot backup in ${1%/}: Permission denied"
echo "${0##*/}: abort backup procedure"
usage; exit -1;
fi
DEST=${1%/}
echo "# BACKUP DESTINATION: ${DEST}"
return 1;
}
#############################################################
# Main Procedure
#
#############################################################
## check the number of arguments
if [ $# -le 1 ]; then
echo "${0##*/}: unsufficient number of arguments"
usage
fi
## startup message
TIMESTAMP=`date +%Y%m%d%H%M`
LOGFILE="${LOGDIR}/${USER}.backup${TIMESTAMP}.log"
exec > >(tee ${LOGFILE}) 2>&1
echo '###' `date` '###'
echo '# A differential backup script created by Ryou OHSAWA'
echo '#'
## set exclusion rules
for k in ${EXCLUDE_LIST[@]}
do
EXCLUDE="${EXCLUDE} --exclude ${k}"
done
## set backup target names
get_targets $@
## set destination
set_destination $@
## search the latest backup folder
## if this finds the latest backup folder, this run rsync in
## differential backup mode (creating hardlink if files exist).
LATEST=`ls ${DEST} | sed -n '/bak$/p' | sort -n | tail -n 1`
LATEST="${DEST}/${LATEST}"
if [ ! -z "${LATEST}" ]
then
LINK="--link-dest=${LATEST}"
echo "# PREVIOUS BACKUP FILE: ${LATEST}"
fi
## backup folder
NEWBACKUP="${DEST}/${TIMESTAMP}.bak"
echo "# CURRENT BACKUP FILE: ${NEWBACKUP}"
echo ""
## do backup using rsync
for T in ${TARGETS[@]}
do
${CMD} ${OPT} ${EXCLUDE} ${LINK} ${T} ${NEWBACKUP}
done
echo ""
echo "# BACKUP FINISHED"
echo "# BACKUP LOGFILE: ${LOGFILE}"
| true
|
bd7f08ecf38a5daeb49e37d68eb7fc240041601d
|
Shell
|
akilans/Shell_Scripting_Tutorial
|
/1-Intro/7_grade_system.sh
|
UTF-8
| 571
| 3.1875
| 3
|
[] |
no_license
|
#! \bin\bash
# This Example shows Grade system. Get mark fro user & tells pass or fail
echo "Enter your mark :\c"
read mark
if [ $mark -lt 35 ]
then
echo "Failed!!!"
elif [ $mark -eq 35 ]
then
echo "Just Pass!!!"
elif [ $mark -ge 35 -a $mark -lt 50 ] # [ $mark -ge 35 -a $mark -lt 50 ] also works
then
echo "Average!!!"
elif [ $mark -ge 50 ] && [ $mark -lt 75 ]
then
echo "Good!!!"
elif [ $mark -ge 75 ] && [ $mark -lt 90 ]
then
echo "Very Good!!!"
elif [ $mark -ge 90 ] && [ $mark -le 100 ]
then
echo "Outstanding!!!"
else
echo "Enter mark bw 1-100"
fi
| true
|
641718329cef8fc9076ae17bfe5adbcc109b0762
|
Shell
|
sitodav/before-november-2014
|
/C/C_Unix_Programming/SHELLSCRIPT/10_12_08/script.sh
|
UTF-8
| 292
| 3.078125
| 3
|
[] |
no_license
|
if [ $# -ne 1 ]
then
echo "passami il prezzo"
fi
stipendio=1000
if [ $stipendio -lt $1 ]
then
echo "Non puoi acquistare"
else
echo "Puoi acquistare"
if [ $1 -gt 500 ]
then
`echo "COSTOSO" > testo.txt`
else
`echo "ECONOMICO" > testo.txt`
fi
cat testo.txt
wc -c testo.txt
fi
| true
|
bc02d3bc3c730036a56e239d64ae6bc0fcae3fa5
|
Shell
|
pi-geosolutions/georchestra-datadir-initializer
|
/root/docker-entrypoint.d/020-strings-replace
|
UTF-8
| 3,013
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
sleep 5
cd $DEST_DIR
#######################
# String replacements
#######################
# Load file_env script to allow passing sensitive data through secrets
# e.g. SLAPD_PASSWORD_FILE will be used to feed SLAPD_PASSWORD with the content
# of the file pointed by SLAPD_PASSWORD_FILE
source /docker-entrypoint.d/utils/file_env.sh
# file_env 'SLAPD_PASSWORD'
echo "proceeding to string replacements"
#
# # Replace scheme if set
# if [ ! -z "$HTTP_SCHEME" ]; then
# sed -i "s|scheme=.*|scheme=${HTTP_SCHEME}|" default.properties
# fi
# Replace FQDN if set
if [ ! -z "$GEORCHESTRA_FQDN" ]; then
# geOrchestra <= docker-20.1
find ./ -type f -exec sed -i "s|georchestra.mydomain.org|${GEORCHESTRA_FQDN}|" {} \;
# geOrchestra > docker-20.1, docker-master
find ./ -type f -exec sed -i "s|georchestra-127-0-1-1.traefik.me|${GEORCHESTRA_FQDN}|" {} \;
if [ ! -z "$REPLACE_THIS_FQDN" ]; then
find ./ -type f -exec sed -i "s|$REPLACE_THIS_FQDN|${GEORCHESTRA_FQDN}|" {} \;
fi
# security proxy redirection for /
# fixes some issues in http -> https redirection + skips one redirection step
# sed -i "s|defaultTarget=.*|defaultTarget=https://${GEORCHESTRA_FQDN}/home|g" security-proxy/security-proxy.properties
fi
if [ ! -z "$PGSQL_HOST" ]; then
find ./ -type f -exec sed -i "s|pgsqlHost=.*|pgsqlHost=${PGSQL_HOST}|" {} \;
sed -i "s|jdbc.host=.*|jdbc.host=$PGSQL_HOST|" geonetwork/geonetwork.properties
# TODO: add support for a different geonetwork user DB password
fi
if [ ! -z "$LDAP_HOST" ]; then
find ./ -type f -exec sed -i "s|ldapHost=.*|ldapHost=${LDAP_HOST}|" {} \;
sed -i "s|ldap.url=.*|ldap.url=ldap://${LDAP_HOST}:389|" geonetwork/geonetwork.properties
fi
if [ ! -z "$SMTP_HOST" ]; then
find ./ -type f -exec sed -i "s|smtpHost=.*|smtpHost=${SMTP_HOST}|" {} \;
fi
# Allow secrets usage by GEOSERVER_PRIVILEGED_USER_PASSWORD variable
file_env 'GEOSERVER_PRIVILEGED_USER_PASSWORD'
if [ ! -z "$GEOSERVER_PRIVILEGED_USER_PASSWORD" ]; then
find ./ -type f -exec sed -i "s|gerlsSnFd6SmM|${GEOSERVER_PRIVILEGED_USER_PASSWORD}|" {} \;
fi
# Allow secrets usage by LDAPADMIN_PASSWORD variable
file_env 'LDAPADMIN_PASSWORD'
if [ ! -z "$LDAPADMIN_PASSWORD" ]; then
sed -i "s|ldapAdminPassword=.*|ldapAdminPassword=${LDAPADMIN_PASSWORD}|" default.properties
sed -i "s|ldap.security.credentials=.*|ldap.security.credentials=${LDAPADMIN_PASSWORD}|" geonetwork/geonetwork.properties
fi
# Allow secrets usage by PGSQL_ADMIN_PASSWORD_FILE variable
file_env 'PGSQL_ADMIN_PASSWORD'
if [ ! -z "$PGSQL_ADMIN_PASSWORD" ]; then
sed -i "s|pgsqlPassword=.*|pgsqlPassword=${PGSQL_ADMIN_PASSWORD}|" default.properties
fi
# Allow secrets usage by RECAPTCHA_PRIVATE_KEY variable
file_env 'RECAPTCHA_PRIVATE_KEY'
if [ ! -z "$RECAPTCHA_PRIVATE_KEY" ]; then
sed -i "s|privateKey=.*|privateKey=${RECAPTCHA_PRIVATE_KEY}|" console/console.properties
fi
echo "GeOrchestra datadir (config) folder ready"
# debug command. TODO: remove when on production
cat $DEST_DIR/default.properties
| true
|
a6b0a6d59bcca761948d2ba22af3041ef64cb8b3
|
Shell
|
bowwowxx/gce-ha
|
/admin/update-elasticconfig.sh
|
UTF-8
| 848
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
filename=/home/app/elastic/elasticsearch.yml
tmp="tmp.yml"
els="\"pro-elastic-admin\""
for machine in $(gcloud compute instances list | grep elastic-group | awk {'print $1'}); do
els=$els,\"$machine\"
elsold=$elsold,\"$machine\"
done
echo new $els
old=`cat $filename | grep discovery.zen.ping.unicast.host | awk -F'[' '{print $2}' | awk -F']' '{print $1}'`
echo old $old
sed 's/discovery.zen.ping.unicast.hosts: \['"$old"'\]/discovery.zen.ping.unicast.hosts: \['"$els"'\]/' $filename > $tmp
if [ "$(diff $filename $tmp)" != "" ]
then
mv $tmp $filename
cp -f $filename /home/app/elasticsearch.yml
sed -i 's/index.store.type: memory/#index.store.type: memory/g' /home/app/elasticsearch.yml
sed -i 's/node.master: true/node.master: false/g' /home/app/elasticsearch.yml
rm -f $tmp
echo "update config ok!"
fi
| true
|
08a56131a337536d64d15992a2c3744e50d5be73
|
Shell
|
hm1365166/opencsw
|
/csw/mgar/pkg/openssh/branches/openssh-5.4p1+lpk/files/cswopenssh
|
UTF-8
| 1,837
| 2.84375
| 3
|
[] |
no_license
|
#!/sbin/sh
# stolen from solaris 9 sshd script, more or less.
# cswclassutils smf variable
#RC_KNUM 99
#RC_SNUM 99
#RC_KLEV 0,1,2,S
#RC_SLEV 3
#MANIFEST /var/opt/csw/svc/manifest/network/cswopenssh.xml
PATH=/usr/bin:/opt/csw/bin:/usr/sbin
KEYDIR=/opt/csw/etc/ssh
PIDFILE=/var/run/sshd.pid
case $1 in
'start')
if [ -x /opt/csw/bin/ssh-keygen ]; then
if [ ! -f "$KEYDIR/ssh_host_rsa_key" ]; then
echo "Creating new RSA public/private host key pair"
ssh-keygen -f $KEYDIR/ssh_host_rsa_key -t rsa -N ''
fi
if [ ! -f "$KEYDIR/ssh_host_dsa_key" ]; then
echo "Creating new DSA public/private host key pair"
ssh-keygen -f $KEYDIR/ssh_host_dsa_key -t dsa -N ''
fi
fi
[ -x /opt/csw/sbin/sshd ] && [ -f /opt/csw/etc/ssh/sshd_config ] && /opt/csw/sbin/sshd &
;;
'stop')
#
# If we are switching Run level downwards then we disconnect
# all connections.
#
# Otherwise we just kill the master daemon that is listening
# and leave the connections active
if [ -z "$_INIT_RUN_LEVEL" ]; then
set -- `/usr/bin/who -r`
_INIT_RUN_LEVEL="$7"
_INIT_PREV_LEVEL="$9"
fi
if [ $_INIT_RUN_LEVEL -lt $_INIT_PREV_LEVEL ]; then
/usr/bin/pkill -u 0 -x sshd
fi
if [ -f "$PIDFILE" ]; then
/usr/bin/kill -TERM `/usr/bin/cat $PIDFILE`
fi
;;
'restart')
if [ -f "$PIDFILE" ]; then
/usr/bin/kill -HUP `/usr/bin/cat $PIDFILE`
fi
;;
*)
echo "Usage: $0 { start | stop | restart }"
exit 1
;;
esac
| true
|
faa213cc7148fc323ec4f0a2ccc77e054a38dba6
|
Shell
|
Interbotix/interbotix_ros_manipulators
|
/interbotix_ros_uxarms/install/rpi4/uxarm_rpi4_install.sh
|
UTF-8
| 3,886
| 3.671875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
ubuntu_version="$(lsb_release -r -s)"
if [ $ubuntu_version == "16.04" ]; then
ROS_NAME="kinetic"
elif [ $ubuntu_version == "18.04" ]; then
ROS_NAME="melodic"
elif [ $ubuntu_version == "20.04" ]; then
ROS_NAME="noetic"
else
echo -e "Unsupported Ubuntu verison: $ubuntu_version"
echo -e "Interbotix Arm only works with 16.04, 18.04, or 20.04"
exit 1
fi
echo "Ubuntu $ubuntu_version detected. ROS-$ROS_NAME chosen for installation.";
echo -e "\e[1;33m ******************************************** \e[0m"
echo -e "\e[1;33m The installation may take around 15 Minutes! \e[0m"
echo -e "\e[1;33m ******************************************** \e[0m"
sleep 4
start_time="$(date -u +%s)"
# Update the system
sudo apt update && sudo apt -y upgrade
sudo apt -y autoremove
# Install some necessary core packages
sudo apt -y install openssh-server curl
if [ $ROS_NAME != "noetic" ]; then
sudo apt -y install python-pip
sudo -H pip install modern_robotics
else
sudo apt -y install python3-pip
sudo -H pip3 install modern_robotics
fi
# Step 1: Install ROS
if [ $(dpkg-query -W -f='${Status}' ros-$ROS_NAME-desktop-full 2>/dev/null | grep -c "ok installed") -eq 0 ]; then
echo "Installing ROS..."
sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list'
sudo apt-key adv --keyserver 'hkp://keyserver.ubuntu.com:80' --recv-key C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654
curl -s https://raw.githubusercontent.com/ros/rosdistro/master/ros.asc | sudo apt-key add -
sudo apt update
sudo apt -y install ros-$ROS_NAME-desktop-full
if [ -f /etc/ros/rosdep/sources.list.d/20-default.list ]; then
sudo rm /etc/ros/rosdep/sources.list.d/20-default.list
fi
echo "source /opt/ros/$ROS_NAME/setup.bash" >> ~/.bashrc
if [ $ROS_NAME != "noetic" ]; then
sudo apt -y install python-rosdep python-rosinstall python-rosinstall-generator python-wstool build-essential
else
sudo apt -y install python3-rosdep python3-rosinstall python3-rosinstall-generator python3-wstool build-essential
fi
sudo rosdep init
rosdep update
else
echo "ros-$ROS_NAME-desktop-full is already installed!"
fi
source /opt/ros/$ROS_NAME/setup.bash
# Step 2: Install Arm packages
INTERBOTIX_WS=~/interbotix_ws
if [ ! -d "$INTERBOTIX_WS/src" ]; then
echo "Installing Interbotix ROS packages for the Universal Factory Xarm..."
mkdir -p $INTERBOTIX_WS/src
cd $INTERBOTIX_WS/src
git clone https://github.com/Interbotix/interbotix_ros_core.git
git clone https://github.com/Interbotix/interbotix_ros_manipulators.git
git clone https://github.com/Interbotix/interbotix_ros_toolboxes.git
cd interbotix_ros_manipulators && git checkout $ROS_NAME && cd ..
rm interbotix_ros_core/interbotix_ros_uxarms/CATKIN_IGNORE
rm interbotix_ros_manipulators/interbotix_ros_uxarms/CATKIN_IGNORE
rm interbotix_ros_toolboxes/interbotix_ux_toolbox/CATKIN_IGNORE
rm interbotix_ros_toolboxes/interbotix_rpi_toolbox/CATKIN_IGNORE
rm interbotix_ros_toolboxes/interbotix_common_toolbox/interbotix_moveit_interface/CATKIN_IGNORE
cd $INTERBOTIX_WS
rosdep install --from-paths src --ignore-src -r -y
catkin_make
echo "source $INTERBOTIX_WS/devel/setup.bash" >> ~/.bashrc
else
echo "Interbotix Universal Xarm ROS packages already installed!"
fi
source $INTERBOTIX_WS/devel/setup.bash
# Step 3: Setup Environment Variables
if [ -z "$ROS_IP" ]; then
echo "Setting up Environment Variables..."
echo 'export ROS_IP=$(echo `hostname -I | cut -d" " -f1`)' >> ~/.bashrc
echo -e 'if [ -z "$ROS_IP" ]; then\n\texport ROS_IP=127.0.0.1\nfi' >> ~/.bashrc
else
echo "Environment variables already set!"
fi
end_time="$(date -u +%s)"
elapsed="$(($end_time-$start_time))"
echo "Installation complete, took $elapsed seconds in total"
echo "NOTE: Remember to reboot the computer before using the robot!"
| true
|
73d1043cd59d766b3b949bd53fc57e606ede24e8
|
Shell
|
renode/renode
|
/tools/packaging/conda/build.sh
|
UTF-8
| 4,304
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -x
if [[ "$(uname)" == 'Linux' ]]; then
_os_name=linux
install -D /bin/sed $BUILD_PREFIX/bin/sed
# Install gtk-sharp2
install -D /usr/lib/cli/gtk-sharp-2.0/gtk-sharp.dll* $BUILD_PREFIX/lib/mono/4.5-api/
install -D /usr/lib/cli/glib-sharp-2.0/glib-sharp.dll* $BUILD_PREFIX/lib/mono/4.5-api/
install -D /usr/lib/cli/atk-sharp-2.0/atk-sharp.dll* $BUILD_PREFIX/lib/mono/4.5-api/
install -D /usr/lib/cli/gdk-sharp-2.0/gdk-sharp.dll* $BUILD_PREFIX/lib/mono/4.5-api/
install -D /usr/lib/cli/pango-sharp-2.0/pango-sharp.dll* $BUILD_PREFIX/lib/mono/4.5-api/
mkdir -p $PREFIX/opt/renode/bin
cp /usr/lib/cli/gtk-sharp-2.0/gtk-sharp.dll* $PREFIX/opt/renode/bin/
cp /usr/lib/cli/glib-sharp-2.0/glib-sharp.dll* $PREFIX/opt/renode/bin/
cp /usr/lib/cli/atk-sharp-2.0/atk-sharp.dll* $PREFIX/opt/renode/bin/
cp /usr/lib/cli/gdk-sharp-2.0/gdk-sharp.dll* $PREFIX/opt/renode/bin/
cp /usr/lib/cli/pango-sharp-2.0/pango-sharp.dll* $PREFIX/opt/renode/bin/
mkdir -p $PREFIX/lib/
install -D /usr/lib/cli/gtk-sharp-2.0/libgtksharpglue-2.so $PREFIX/lib/libgtksharpglue-2.so
install -D /usr/lib/cli/gdk-sharp-2.0/libgdksharpglue-2.so $PREFIX/lib/libgdksharpglue-2.so
install -D /usr/lib/cli/glib-sharp-2.0/libglibsharpglue-2.so $PREFIX/lib/libglibsharpglue-2.so
install -D /usr/lib/x86_64-linux-gnu/gtk-2.0/modules/libatk-bridge.so $PREFIX/lib/libatk-bridge.so
sed -i 's/\/usr\/lib\/cli\/.*-sharp-2.0\///g' $PREFIX/opt/renode/bin/*.dll.config
else
_os_name=macos
cp /Library/Frameworks/Mono.framework/Libraries/libatksharpglue-2* $PREFIX/lib/
cp /Library/Frameworks/Mono.framework/Libraries/libgtksharpglue-2* $PREFIX/lib/
cp /Library/Frameworks/Mono.framework/Libraries/libgdksharpglue-2* $PREFIX/lib/
cp /Library/Frameworks/Mono.framework/Libraries/libglibsharpglue-2* $PREFIX/lib/
mkdir -p $BUILD_PREFIX/lib/mono/4.5-api/
find /Library/Frameworks/Mono.framework/Versions/5*/lib/mono/* -name 'gtk-sharp.dll*' -exec cp '{}' $BUILD_PREFIX/lib/mono/4.5-api/ ';'
find /Library/Frameworks/Mono.framework/Versions/5*/lib/mono/* -name 'gdk-sharp.dll*' -exec cp '{}' $BUILD_PREFIX/lib/mono/4.5-api/ ';'
find /Library/Frameworks/Mono.framework/Versions/5*/lib/mono/* -name 'atk-sharp.dll*' -exec cp '{}' $BUILD_PREFIX/lib/mono/4.5-api/ ';'
find /Library/Frameworks/Mono.framework/Versions/5*/lib/mono/* -name 'glib-sharp.dll*' -exec cp '{}' $BUILD_PREFIX/lib/mono/4.5-api/ ';'
find /Library/Frameworks/Mono.framework/Versions/5*/lib/mono/* -name 'pango-sharp.dll*' -exec cp '{}' $BUILD_PREFIX/lib/mono/4.5-api/ ';'
cp /usr/lib/libc.dylib $PREFIX/lib/
fi
./build.sh
mkdir -p $PREFIX/opt/renode/bin
mkdir -p $PREFIX/opt/renode/scripts
mkdir -p $PREFIX/opt/renode/platforms
mkdir -p $PREFIX/opt/renode/tests
mkdir -p $PREFIX/opt/renode/tools
mkdir -p $PREFIX/opt/renode/licenses
cp .renode-root $PREFIX/opt/renode/
cp -r output/bin/Release/* $PREFIX/opt/renode/bin/
cp -r scripts/* $PREFIX/opt/renode/scripts/
cp -r platforms/* $PREFIX/opt/renode/platforms/
cp -r tests/* $PREFIX/opt/renode/tests/
cp -r tools/metrics_analyzer $PREFIX/opt/renode/tools
cp -r tools/execution_tracer $PREFIX/opt/renode/tools
cp -r tools/gdb_compare $PREFIX/opt/renode/tools
cp -r tools/sel4_extensions $PREFIX/opt/renode/tools
cp lib/resources/styles/robot.css $PREFIX/opt/renode/tests
tools/packaging/common_copy_licenses.sh $PREFIX/opt/renode/licenses $_os_name
sed -i.bak "s#os\.path\.join(this_path, '\.\./lib/resources/styles/robot\.css')#os.path.join(this_path,'robot.css')#g" $PREFIX/opt/renode/tests/robot_tests_provider.py
rm $PREFIX/opt/renode/tests/robot_tests_provider.py.bak
mkdir -p $PREFIX/bin/
cat > $PREFIX/bin/renode <<"EOF"
#!/usr/bin/env bash
mono $MONO_OPTIONS $CONDA_PREFIX/opt/renode/bin/Renode.exe "$@"
EOF
cat > $PREFIX/bin/renode-test <<"EOF"
#!/usr/bin/env bash
STTY_CONFIG=`stty -g 2>/dev/null`
python3 $CONDA_PREFIX/opt/renode/tests/run_tests.py --robot-framework-remote-server-full-directory $CONDA_PREFIX/opt/renode/bin "$@"
RESULT_CODE=$?
if [ -n "${STTY_CONFIG:-}" ]
then
stty "$STTY_CONFIG"
fi
exit $RESULT_CODE
EOF
mkdir -p "${PREFIX}/etc/conda/activate.d"
cp "${RECIPE_DIR}/activate.sh" "${PREFIX}/etc/conda/activate.d/${PKG_NAME}_activate.sh"
| true
|
7d16d78ee33942e0d305b01d043e28a94160fbf3
|
Shell
|
MartinLidh/tddc78
|
/src/mpich-1.2.0/src/mpich-1.2.0/examples/test/pt2pt/testhetero
|
UTF-8
| 2,195
| 3.765625
| 4
|
[] |
no_license
|
#! /bin/sh
#
# This is a simple heterogeneous test which exploits the mpicc command and
# mpirun. This is an example of how heterogeneous programs may be built and
# run
#
#
# Parameters for all programs and systems
set -x
device=ch_p4
mpihome=../../..
rshcmd=rsh
programs="sendrecv sendrecv2 sendrecv3 sendrecv4 getelm"
# Extra files needed for each program.
sendrecvfiles="test.c"
sendrecvargs="-nolongdouble"
sendrecv2files="dtypes.c gcomm.c"
sendrecv3files="dtypes.c gcomm.c"
sendrecv4files="dtypes.c gcomm.c"
getelmfiles=""
#
#
# arch1 is local, arch2 is remote
arch1=sun4
arch2=freebsd
name2=dogbert
#
debug_args=""
fail_hard=1
rebuild=0
mpirun_args=""
for arg in "$@" ; do
case "$arg" in
-echo) set -x ;;
-noclean) noclean=1 ;;
-debug) debug_args="-p4dbg 99 -p4rdbg 99" ;;
-mpichdebug) debug_args="$debug_args -mpichdebug" ;;
-xxgdb) mpirun_args="-xxgdb" ;;
-soft) fail_hard=0 ;;
-force | -rebuild) rebuild=1 ;;
-alpha) arch2=alpha ; name2=ptera ;;
-help)
echo "Test heterogeneous operation of MPICH with ch_p4 using"
echo "the versions of MPICH built in the current tree."
echo "Should be run on a sun4; it rsh's to other machines as"
echo "necessary."
exit 1
;;
*) echo "Unrecognized argument $arg"
exit 1
;;
esac
done
#
arches="$arch1 $arch2"
#
mypwd=`pwd`
# Fixup for brain-dead automounters
mypwd=`echo $mypwd | sed s%/tmp_mnt%%g`
#
# Build local versions
if [ 1 = 1 ] ; then
for pgm in $programs ; do
eval extrafiles=$"${pgm}files"
$mpihome/lib/$arch1/$device/mpicc -o $pgm.$arch1 $pgm.c $extrafiles
done
fi
#
# Build remote versions
for pgm in $programs ; do
eval extrafiles=$"${pgm}files"
$rshcmd -n $name2 "(cd $mypwd ; $mpihome/lib/$arch2/$device/mpicc \
-o $pgm.$arch2 $pgm.c $extrafiles)"
done
#
# Run the programs
for pgm in $programs ; do
echo "Running $pgm..."
eval extraargs=$"${pgm}args"
$mpihome/lib/$arch1/$device/mpirun $mpirun_args \
-arch $arch1 -np 1 -arch $arch2 -np 1 $pgm.%a $debug_args \
$extraargs
done
#
# Remove the executables
if [ -z "$noclean" ] ; then
for arch in $arches ; do
for pgm in $programs ; do
/bin/rm -f $pgm.$arch
done
done
fi
| true
|
ce8119ffe47c77ee8cefeb9625696f071af8e024
|
Shell
|
RobocupApollo3d/Robocup3dSoccer
|
/autotest/optimization/sample_start-optimization.sh
|
UTF-8
| 2,452
| 3.515625
| 4
|
[
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# Choose which optimization task to run
task="kick" # "walk"
# This script runs the simspark soccer simulator and an agent
# Set the agent and monitor port randomly, to allow for multiple agents per machine
# Note: $RANDOM returns a value from 0 to 32767, ports <= 1024 are reserved for root
# TODO: Instead of picking random ports purposely bind to available ports
export SPARK_SERVERPORT=$[$RANDOM + 1025] #3200
export SPARK_AGENTPORT=$[$RANDOM + 1025] #3100
echo -n "It is: "
date
echo -n "and I am on: "
hostname
echo "Agent port: $SPARK_AGENTPORT, Monitor port: $SPARK_SERVERPORT"
rcssserver3d --agent-port $SPARK_AGENTPORT --server-port $SPARK_SERVERPORT &
PID=$!
#To view task while it runs uncomment the following line
#<roboviz_start_script> --serverPort=$SPARK_SERVERPORT &
sleep 5
DIR_SCRIPT="$( cd "$( dirname "$0" )" && pwd )"
TYPE=$1
DIR_PARAMS="$( cd "$( dirname "$2" )" && pwd )"
DIR_OUTPUT="$( cd "$( dirname "$3" )" && pwd )"
PARAMS_FILE=$DIR_PARAMS/$(basename $2)
OUTPUT_FILE=$DIR_OUTPUT/$(basename $3)
# TODO: temporary fix!! get rid of it - because relative path dependency of skills
cd $DIR_SCRIPT/..
export LD_LIBRARY_PATH=./libs:$LD_LIBRARY_PATH
if [ $task == "kick" ]
then
# FixedKick optimization task
$DIR_SCRIPT/../agentspark --unum 2 --type $TYPE --paramsfile $DIR_SCRIPT/../paramfiles/defaultParams.txt --paramsfile $DIR_SCRIPT/../paramfiles/defaultParams_t$TYPE.txt --paramsfile $PARAMS_FILE --experimentout $OUTPUT_FILE --optimize fixedKickAgent --port $SPARK_AGENTPORT --mport $SPARK_SERVERPORT &
fi
if [ $task == "walk" ]
then
# WalkForward optimization task
$DIR_SCRIPT/../agentspark --unum 2 --type $TYPE --paramsfile $DIR_SCRIPT/../paramfiles/defaultParams.txt --paramsfile $DIR_SCRIPT/../paramfiles/defaultParams_t$TYPE.txt --paramsfile $PARAMS_FILE --experimentout $OUTPUT_FILE --optimize walkForwardAgent --port $SPARK_AGENTPORT --mport $SPARK_SERVERPORT &
fi
AGENTPID=$!
sleep 3
maxWaitTimeSecs=300
total_wait_time=0
while [ ! -f $OUTPUT_FILE ] && [ $total_wait_time -lt $maxWaitTimeSecs ]
do
sleep 1
total_wait_time=`expr $total_wait_time + 1`
done
if [ ! -f $OUTPUT_FILE ]
then
echo "Timed out while waiting for script to complete, current wait time is $total_wait_time seconds."
else
echo "Completed with a wait time of $total_wait_time seconds."
fi
echo "Killing Simulator"
kill -s 2 $PID
echo "Killing Agent"
kill -s 2 $AGENTPID
sleep 2
echo "Finished"
| true
|
89902910d058ad5c6e6c3f3801d5653d87ae17e2
|
Shell
|
caprice-j/dotfiles
|
/symboliclink.sh
|
UTF-8
| 794
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/sh
TGT_EMACS_FILE=~/.emacs.d/init.el
TGT_EMACS_INIT_MODELINE_FILE=~/.emacs.d/init-modeline.el
TGT_TMUX_FILE=~/.tmux.conf
TGT_TMUXINATOR1=~/.tmuxinator/research.yml
TGT_TMUXINATOR2=~/.tmuxinator/skt.yml
TGT_XMODMAP=~/.Xmodmap
DOTFILES=~/dotfiles
# apt-get install exuberant-ctags
# ~/.emacs and ~/.emacs.el take precedence over ~/.emacs.d/init.el
rm -f ~/.emacs
rm -f ~/.emacs.el
ln -fs ${DOTFILES}/init.el ${TGT_EMACS_FILE}
ln -fs ${DOTFILES}/init-modeline.el ${TGT_EMACS_INIT_MODELINE_FILE}
ln -fs ${DOTFILES}/tmux.conf ${TGT_TMUX_FILE}
# mux start res
ln -fs ${DOTFILES}/skt.yml ${TGT_TMUXINATOR2}
ln -fs ${DOTFILES}/research.yml ${TGT_TMUXINATOR1}
# MAYBE: I need to add xmodmap ~/.Xmodmap in .bashrc?
ln -fs ${DOTFILES}/Xmodmap ${TGT_XMODMAP}
| true
|
8af4645d875936624e75f03f8c7e4e37323d2d68
|
Shell
|
smiglo/scripts-aka-dot-files
|
/bin/oth/tmux-local-install.sh
|
UTF-8
| 3,937
| 3.84375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash -e
# Source: https://gist.github.com/ryin/3106801
# Script for installing tmux on systems where you don't have root access.
# tmux will be installed in $ROOT/local/bin.
# It's assumed that wget and a C/C++ compiler are installed.
# Initialize # {{{
TMUX_INSTALL_VERSION=${TMUX_INSTALL_VERSION:-'local:2.8'}
if [[ $TMUX_INSTALL_VERSION == local:* ]]; then
TMUX_INSTALL_VERSION=${TMUX_INSTALL_VERSION/'local:'}
LOCAL_TARBALL=$MY_PROJ_PATH/scripts/bash/inits/tmux/src/tmux-src-${TMUX_INSTALL_VERSION}.tar.gz
fi
TAKE_CARE_OF_LIBS=false
ROOT=$HOME/.config/tmux-local
CLEAN_AFTER=false
if $TAKE_CARE_OF_LIBS; then # {{{
LIBEVENT_VERSION=${LIBEVENT_VERSION:-'2.0.22-stable'}
NCURSES_VERSION=${NCURSES_VERSION:-'6.0'}
fi # }}}
# Handle arguments # {{{
while [[ ! -z $1 ]]; do
case $1 in
--ver) shift; TMUX_INSTALL_VERSION=$1; LOCAL_TARBALL=;;
--local | \
--local-repo) shift; LOCAL_TARBALL=$1;;&
--local) [[ $LOCAL_TARBALL != /* ]] && LOCAL_TARBALL="$PWD/$LOCAL_TARBALL"; TMUX_INSTALL_VERSION=$(echo "$LOCAL_TARBALL" | sed -e 's/.*tmux-//' -e 's/\.tar\.gz//');;
--local-repo) TMUX_INSTALL_VERSION='repo';;
--root) shift; ROOT=$1;;
--libs) TAKE_CARE_OF_LIBS=true;;
--clean) CLEAN_AFTER=true;;
esac
shift
done # }}}
# Prepare #{{{
ROOT=$(cd $ROOT; pwd)
if [[ -e $ROOT/local ]]; then
suffix=
type tmux && suffix="$(tmux -V | cut -d\ -f2)-"
suffix+="$(date +$DATE_FMT)"
mv $ROOT/local $ROOT/local-${suffix}
fi
DST="$ROOT/local-$TMUX_INSTALL_VERSION"
command mkdir -p $DST $ROOT/tmux_tmp
cd $ROOT/tmux_tmp
# Download source files for tmux, libevent, and ncurses # {{{
if [[ -z $LOCAL_TARBALL ]]; then
[[ ! -e tmux-${TMUX_INSTALL_VERSION}.tar.gz ]] && wget https://github.com/tmux/tmux/releases/download/${TMUX_INSTALL_VERSION}/tmux-${TMUX_INSTALL_VERSION}.tar.gz
else
cp -a ${LOCAL_TARBALL%/} ./
[[ $TMUX_INSTALL_VERSION == 'repo' ]] && mv ${LOCAL_TARBALL%/} tmux-${TMUX_INSTALL_VERSION}
fi
if $TAKE_CARE_OF_LIBS; then # {{{
[[ ! -e release-${LIBEVENT_VERSION}.tar.gz ]] && wget https://github.com/libevent/libevent/archive/release-${LIBEVENT_VERSION}.tar.gz
[[ ! -e ncurses-${NCURSES_VERSION}.tar.gz ]] && wget ftp://ftp.gnu.org/gnu/ncurses/ncurses-${NCURSES_VERSION}.tar.gz
fi # }}}
# }}}
# }}}
# }}}
# Extract files, configure, and compile # {{{
if $TAKE_CARE_OF_LIBS; then # {{{
# LibEvent # {{{
[[ ! -e libevent-release-${LIBEVENT_VERSION} ]] && tar xvzf release-${LIBEVENT_VERSION}.tar.gz
cd libevent-release-${LIBEVENT_VERSION}
./autogen.sh
./configure --prefix=$DST --disable-shared
make
make install
cd ..
# }}}
# NCureses # {{{
[[ ! -e ncurses-${NCURSES_VERSION} ]] && tar xvzf ncurses-${NCURSES_VERSION}.tar.gz
cd ncurses-${NCURSES_VERSION}
export CPPFLAGS="-P"
./configure --prefix=$DST
make
make install
export CPPFLAGS=
cd ..
# }}}
fi # }}}
# TMUX # {{{
[[ ! -e tmux-${TMUX_INSTALL_VERSION} ]] && tar xvzf tmux-${TMUX_INSTALL_VERSION}.tar.gz
cd tmux-${TMUX_INSTALL_VERSION}
[[ -e autogen.sh ]] && sh autogen.sh
if $TAKE_CARE_OF_LIBS; then # {{{
./configure CFLAGS="-I$DST/include -I$DST/include/ncurses" LDFLAGS="-L$DST/lib -L$DST/include/ncurses -L$DST/include"
CPPFLAGS="-I$DST/include -I$DST/include/ncurses" LDFLAGS="-static -L$DST/include -L$DST/include/ncurses -L$DST/lib" make # }}}
else # {{{
./configure
make
fi # }}}
cp tmux $DST/bin
if [[ -e 'tmux.1' ]]; then
[[ ! -e "$DST/share/man/man1" ]] && command mkdir -p "$DST/share/man/man1"
cp 'tmux.1' "$DST/share/man/man1/"
(
echo ".Sh VERSION"
echo ".An $(./tmux -V)"
) >>$DST/share/man/man1/tmux.1
fi
cd ..
# }}}
# }}}
# Clean up # {{{
$CLEAN_AFTER && rm -rf $ROOT/tmux_tmp
# ln -s $DST/bin $HOME/.bin/tmux-local-bin
echo
echo '--------------------------------------------------------------------------------'
echo 'DONE'
echo "$DST/bin/tmux is now available. You can optionally add $DST/bin to your PATH."
# }}}
| true
|
838d430033bdba3d86e062008c8b35789364432c
|
Shell
|
liutongchao/EducateMonitor
|
/auto.sh
|
UTF-8
| 1,695
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
#计时
SECONDS=0
#update Pods
#rm -rf Pods
#rm Podfile.lock
#pod update
#假设脚本放置在与项目相同的路径下
project_path=$(pwd)
#取当前时间字符串添加到文件结尾
now=$(date +"%Y%m%d%H_%M%S")
#指定项目的scheme名称
scheme="EducateMonitor"
#指定要打包的配置名
configuration="Debug"
#指定项目地址
workspace_path="$project_path/$scheme.xcworkspace"
#指定输出路径
output_path="/Users/$scheme/Dev/ipa"
#指定输出归档文件地址
archive_path="$output_path/$scheme${now}.xcarchive"
#指定输出ipa地址
ipa_name="$scheme-${now}.ipa"
#ipa路径
ipa_path="$output_path/$ipa_name"
#指定打包所使用的输出方式,目前支持app-store, package, ad-hoc, enterprise, development, 和developer-id,即xcodebuild的method参数
export_method='development'
#获取执行命令时的commit message
commit_msg="$1"
#输出设定的变量值
echo "===workspace path: ${workspace_path}==="
echo "===archive path: ${archive_path}==="
echo "===ipa path: ${ipa_path}==="
echo "===profile: ${provisioning_profile}==="
echo "===commit msg: $1==="
#先清空前一次build
gym --workspace ${workspace_path} --scheme ${scheme} --clean --configuration ${configuration} --archive_path ${archive_path} --export_method ${export_method} --output_directory ${output_path} --output_name ${ipa_name}
#判断ipa是否生成 如果没有生成 shell报错=>告知Job构建失败
if [ -f $ipa_path ]
then
echo "Generate $ipa_path successfully!"
else
echo "Generate $ipa_path fail!"
exit 1
fi
#上传到fir
#fir publish ${ipa_path} -T fir_token -c "${commit_msg}"
#输出总用时
echo "===Finished. Total time: ${SECONDS}s==="
| true
|
071070b3ced500cb335cca431f2aa5f176ef44c0
|
Shell
|
mammut/dotfiles
|
/bootstrap
|
UTF-8
| 485
| 3.25
| 3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/usr/bin/env zsh
{
# set -xue # To enable debugging
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
for file in $(cd $DIR/dotfiles && echo .*); do
ln -sfn $DIR/dotfiles/$file ~/$file
done;
# Fetch zsh antibody plugins
~/.bin/antibody bundle < ~/.zsh_plugins.txt > ~/.zsh_plugins.sh
# Install tmux plugins
bash ~/.tmux/plugins/tpm/bin/install_plugins
# Install vim plugins
vim +PluginInstall +qall
# Place to put our own scripts
mkdir -p ~/.bin
}
| true
|
3b3e1ba37a415a0f582bdc9fb9cab5c87c782fab
|
Shell
|
rdpate/one-off
|
/delete-if-empty
|
UTF-8
| 975
| 3.890625
| 4
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/sh -ue
fatal() {
printf %s\\n "$(basename "$0"): $2" >&2
exit "$1"
}
handle_option() {
case "$1" in
*)
fatal 64 "unknown option: $1"
;;
esac
}
while [ $# -gt 0 ]; do
case "$1" in
--)
shift
break ;;
--*=*)
x="${1#--}"
handle_option "${x%%=*}" "${x#*=}"
shift ;;
--*)
handle_option "${1#--}"
shift ;;
-?*)
if [ ${#1} = 2 ]; then
handle_option "${1#-}"
else
v="${1#??}"
x="${1%"$v"}"
handle_option "${x#-}" "$v"
fi
shift ;;
*)
break ;;
esac
done
if [ $# = 0 ]; then
fatal 64 'usage: FILE..'
fi
for fn; do
if [ -L "$fn" ]; then
fatal 1 "symlink: $fn"
elif [ ! -e "$fn" -o -s "$fn" ]; then
true
else
rm -- "$fn"
fi
done
| true
|
5ed5791188ee5de1ab7603e918e9624dbbe3dfb8
|
Shell
|
jonhiggs/stateshifter
|
/plugins/osx/system-cpu
|
UTF-8
| 500
| 3.375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source $(dirname $0)/../../config
source $(dirname $0)/../../lib/redis
REDIS_KEY="${namespace}.cpu_used"
REDIS_EXPIRE=5
_usage() {
echo "$0 [get|set]"
}
_cpu_used() {
samples_to_take=2
top -l${samples_to_take} -d \
| awk '
/CPU\ usage/ {
gsub("%", "");
sum+=$7;
n++;
}
END { print 100 - (sum / n) }
'
}
redis_get || redis_set $(_cpu_used)
| true
|
3179945af6689a4da0cb603e03ca3fdab3ef6330
|
Shell
|
l4rz/idrac-7-8-reverse-engineering
|
/squashfs-linux/Linux/usr/bin/script.sh
|
UTF-8
| 1,413
| 3.203125
| 3
|
[] |
no_license
|
RETVAL=0
srcdir="/etc/default"
destdir="/flash/data0/etc"
exec_fmgr="/avct/sbin/fmgr"
exec_fmchk="/avct/sbin/fmchk"
start() {
if [ ! -f "$destdir/fake_feb.txt" ]; then
cp $srcdir/fake_feb.txt $destdir
fi
fmgr_running=`ps | grep fmgr | grep -v "\[fmgr\]" | grep -v S_9300_fmgr | grep -v grep`
dsm_running=`ps | grep dsm_sa_datamgrd | grep -v grep`
if [ ! -z "$dsm_running" ]; then
echo "DM running"
if [ ! -z "$fmgr_running" ]; then
# FM/DM running. Send LM FEB update event
echo "FM running"
echo "#### Update LM FEB ####"
$exec_fmchk -l
else
# DM running/FM not running. start with LM FEB
echo "FM not running"
echo "#### start fmgr with LM FEB ####"
# $exec_fmgr -f
$exec_fmgr
fi
else
echo "DM not running"
if [ ! -z "$fmgr_running" ]; then
# DM not running/FM running. stop
echo "FM running"
echo "#### fmgr is running already!! ####"
else
# DM/FM not running. start fmgr with fake FEB
echo "FM not running"
$exec_fmgr -f
fi
fi
# sleep 5
# problem fixed. comment this out
# /etc/sysapps_script/clp_shell.sh
}
stop() {
# Use -9 will not exexute exit_handle() and cause semaphore leak
killall fmgr
}
restart() {
stop
sleep 3
killall -9 fmgr
start
}
case "$1" in
start)
start
;;
stop)
stop
;;
reset)
stop
;;
restart)
restart
;;
*)
echo "Usage: $0 {start|stop|restart}"
exit 1
esac
exit $?
exit $RETVALETVAL
| true
|
1cfda16d4f5214c118fdbba80945c22268c98131
|
Shell
|
tobiasjwm/munki_scripts
|
/ouradmin/ouradmin-uninstall_check.sh
|
UTF-8
| 466
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
# uninstall_check script for Munki to determine if a user account exists.
# Get a list of account names for all accounts above 500
listAccts=`dscl . list /Users UniqueID | /usr/bin/awk '$2 > 500 { print $1 }'`
# Add the shortname of the target account.
ourAdmin='ouradmin'
# Check the list for our admin. Exit 0 to run the uninstall script.
# Exit 1 if the account does not exist.
if [[ $listAccts == *"$ourAdmin"* ]]; then
exit 0
else
exit 1
fi
| true
|
80aed3e1aac4470038032e89f020bcf2b23f203a
|
Shell
|
skatt/server
|
/rels/web/files/configure
|
UTF-8
| 3,115
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
H=`hostname -f`
HIP=
##FIX: should handle multiple IPs computer by selecting first IP from the list
HIPS=`for i in $HIP; do echo $i | grep -v "127.0."; done`
IP=$HIPS
WEBPORT=80
GAMESERVER=game
WEBSERVER=web
SERVERPORT=7999
SERVERHOST=""
GAMESERVERHOST=127.0.0.1
APPSERVER=app@$H
JSPACK=min
CSSPACK=min
SYNC=false
MQ_HOST=localhost
MQ_PORT=5672
MQ_USER=kakauser
MQ_PASS=paynet123
MQ_VHOST=/
while [ $# -gt 0 ]
do
case $1 in
-ip)
IP=$2;shift 2;;
-app)
APPSERVER=$2;shift 2;;
-srv)
SERVERHOST=$2;shift 2;;
-srv-port)
SERVERPORT=$2;shift 2;;
-srv-host)
GAMESERVERHOST=$2;shift 2;;
-game)
GAMESERVER=$2;shift 2;;
-web)
WEBSERVER=$2;shift 2;;
-web-port)
WEBPORT=$2;shift 2;;
-mq-host)
MQ_HOST=$2;shift 2;;
-mq-port)
MQ_PORT=$2;shift 2;;
-mq-user)
MQ_USER=$2;shift 2;;
-mq-pass)
MQ_PASS=$2;shift 2;;
-mq-vhost)
MQ_VHOST=$2;shift 2;;
-fb-app-id)
FBAPPID=$2;shift 2;;
-fb-app-secret)
FBAPPSECRET=$2;shift 2;;
-tw-consumer-key)
TWCONSUMERKEY=$2;shift 2;;
-tw-consumer-secret)
TWCONSUMERSECRET=$2;shift 2;;
-jspack)
JSPACK=$2;shift 2;;
-csspack)
CSSPACK=$2;shift 2;;
-sync)
SYNC=$2;shift 2;;
*) break;;
esac
done
echo "Web Server $SERVERHOST ($HIPS:$WEBPORT) will use $FBAPPID FB id"
cat vm.args.template | sed "s#WEBSERVER_NODE#$WEBSERVER@$H#g" > vm.args
./config -file app.config nsm_db app_srv_node -atom $APPSERVER
./config -file app.config nsm_db game_srv_node -atom $GAMESERVER@$H
./config -file app.config nsm_db web_srv_node -atom $WEBSERVER@$H
./config -file app.config nsm_db riak_srv_node -atom $WEBSERVER@$H
./config -file app.config nsw_srv game_srv_port -integer $SERVERPORT
# ./config -file app.config nsw_srv game_srv_host -list "\"$GAMESERVERHOST\""
./config -file app.config nsw_srv fb_id -list "\"$FBAPPID\""
./config -file app.config nsw_srv fb_secret -list "\"$FBAPPSECRET\""
./config -file app.config nsw_srv fb_redirect_uri -list "\"http://$H:$WEBPORT\""
./config -file app.config nsw_srv tw_consumer_key -list "\"$TWCONSUMERKEY\""
./config -file app.config nsw_srv tw_consumer_secret -list "\"$TWCONSUMERSECRET\""
./config -file app.config nsw_srv jspack -list "\"$JSPACK\""
./config -file app.config nsw_srv csspack -list "\"$CSSPACK\""
./config -file app.config nsm_mq amqp_host -list "\"$MQ_HOST\""
./config -file app.config nsm_mq amqp_port -integer $MQ_PORT
./config -file app.config nsm_mq amqp_user -list "\"$MQ_USER\""
./config -file app.config nsm_mq amqp_pass -list "\"$MQ_PASS\""
./config -file app.config nsm_mq amqp_vhost -list "\"$MQ_VHOST\""
./config -file app.config nsx_utils app_srv_node -atom $APPSERVER
./config -file app.config nsm_db sync_nodes -atom $SYNC
./config -file app.config nsm_db nodes -list [app@$H,game@$H,web@$H]
./config -file app.config nsw_srv http_address -list "\"http://$H:$WEBPORT\""
./config -file app.config nsw_srv https_address -list "\"https://$H\""
echo "
[{webmachine, [
{bind_address, \"$IP\"},
{port, $WEBPORT},
{document_root, \"./site/static\"}
]}]." > webmachine.config
| true
|
57e252706347a0dcdf8f28e59640497d7f3d4b4e
|
Shell
|
rakshasa/rtorrent-vagrant
|
/scripts/torrent-deactivate
|
UTF-8
| 169
| 2.5625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source "${BASH_SOURCE[0]%/*}/include/init"
TORRENT_FILENAME="${1:?Missing torrent filename.}"
rm "${DATA_HOST_WATCH}/${TORRENT_FILENAME}.torrent"
| true
|
37822baed2c86806b06501c30251e59e160f7fc8
|
Shell
|
myin142/dotfiles
|
/i3wm/i3/scripts/memory
|
UTF-8
| 103
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
MEMORY=$(free -h | grep "Mem:" | awk -v x="/" '{print $3x$2}')
echo $MEMORY
echo $MEMORY
| true
|
ccbc0814e67d529532eb2397fb881ca77362678e
|
Shell
|
manueldeval/vagrant-ansible
|
/roles/zookeeper/files/generateMyId.sh
|
UTF-8
| 587
| 3.4375
| 3
|
[] |
no_license
|
#! /bin/bash
CONF=$1
MYID=$2
ips=`ip addr show | grep "inet " | sed 's/.*inet //' | sed 's/\/.*//'`
servers=`cat $CONF | grep server | sed 's/server\.//' | sed 's/:.*//'`
for ip in $ips
do
for server in $servers
do
echo "$server" | grep "$ip" &> /dev/null
RETVAL=$?
if [ $RETVAL -eq 0 ]; then
echo $server | sed 's/=.*//' > /tmp/myid.new
fi
done
done
diff /tmp/myid.new $MYID &> /dev/null
RETVAL=$?
if [ $RETVAL -eq 0 ]; then
exit 0
fi
cp /tmp/myid.new $MYID
exit 1
| true
|
32d1df45f9a3c943916722367f388462357aec32
|
Shell
|
leisheyoufu/study_exercise
|
/algorithm/astyle.sh
|
UTF-8
| 437
| 3.453125
| 3
|
[] |
no_license
|
basepath=$(cd `dirname $0`; pwd)
dir=$basepath/leetcode
filelist=`find $dir -type f -name "*.c" -or -name "*.h" -or -name "*.cpp"`
astyle=$basepath/astyle
unameOut="$(uname -s)"
case "${unameOut}" in
Linux*) astyle=$basepath/astyle;;
Darwin*) astyle=$basepath/astyle_mac;;
esac
for file in $filelist
do
$astyle --style=linux --indent=spaces=4 $file
if [ -f "$file.orig" ]; then
rm "$file.orig"
fi
done
| true
|
4c4b7f998068d749ca938c0dd11b39fbeb0ff1dd
|
Shell
|
xandbox/cocalc
|
/install/install.sh
|
UTF-8
| 1,382
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
bold=$(tput bold)
redbold=${bold}$(tput setaf 1)
normal=$(tput sgr0)
becho () {
echo ${bold}$*${normal}
}
# From https://serverfault.com/questions/607884/hide-the-output-of-a-shell-command-only-on-success
set -e
SILENT_LOG=/tmp/silent_log_$$.txt
trap "/bin/rm -f $SILENT_LOG" EXIT
function report_and_exit {
echo " ... ${redbold}error!${normal}"
cat "${SILENT_LOG}";
}
function silent {
echo -ne $*
rm -f ${SILENT_LOG}
$* 2>>"${SILENT_LOG}" >> "${SILENT_LOG}" && echo " ... done." || report_and_exit;
}
pushd ~
becho "Configure git"
silent git config --global push.default simple
becho "Clone the cocalc xandbox into your project home directory"
silent git init
silent git remote add origin https://github.com/xandbox/cocalc.git
silent git fetch --all
silent git reset --hard origin/master
becho "Clone the git submodules"
silent git submodule update --init --remote
becho "Clone the xandbox itself"
silent git clone https://github.com/xandbox/xandbox.git
popd
becho "Import the xandbox private key"
silent gpg --import ~/gpg/xandbox.txt
pushd ~/xandbox
if [[ $(git config user.name) ]]; then
echo "Your git username is $(git config user.name)"
else
echo
becho "Set your git user.name"
silent git config user.name "Ximera Sandbox"
silent git config user.email "ximera@math.osu.edu"
fi
popd
becho "Restart bash"
exec bash
| true
|
beae7926e0b792274340b8a460127f80637c65cc
|
Shell
|
Meghan-Hamp/Meghan-Hamp.GitHub.io
|
/MyCount.sh
|
UTF-8
| 226
| 3.703125
| 4
|
[] |
no_license
|
#!bin/bash
#Loop for whether count is a single or double-digit number
for Count in {1..20}
do
if [[ $Count == [[:digit:]] ]]
then
echo "$Count is a single-digit number"
else
echo "$Count is a double-digit number"
fi
done
| true
|
853871b6c01308b0f5e2db993c69a8f22f70da97
|
Shell
|
wasuaje/django-payroll
|
/start
|
UTF-8
| 640
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
mysql_ready() {
mysqladmin ping --host=test-app-mysql --user=root --password=MYSQL_ROOT_PASSWORD > /dev/null 2>&1
}
while !(mysql_ready)
do
sleep 5
echo "waiting for mysql to continue..."
done
python manage.py makemigrations --no-input
python manage.py migrate --no-input
python manage.py makemigrations app --no-input
python manage.py migrate app --no-input
python manage.py collectstatic --no-input
python manage.py showmigrations
python manage.py initadmin
#python manage.py runserver 0.0.0.0:8090
# $(ip a | grep -oE "\b([0-9]{1,3}\.){3}[0-9]{1,3}\b" | grep 172.18 | head -1):8090 #test
uwsgi --ini conf.ini
| true
|
8f8356b390ca4bc2c7cb1da592e33582f19a8a8d
|
Shell
|
lsbloo/ShellScriptLinuxBegin
|
/utilLinux/criarDir.sh
|
UTF-8
| 415
| 3.265625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo 'Digite o nome do diretorio'
echo
read nome_diretorio
echo 'Digite o caminho do diretorio'
echo
read caminho_diretorio
if [ -z $nome_diretorio -a -z $caminho_diretorio ]; then
echo 'Erro campo nao digitado'
#Se o valor da variavel nome e o valor da variavel caminho for nula entao da ero
else
#Senao ele cria o diretorio no caminho especificado
mkdir "$caminho_diretorio/$nome_diretorio"
fi
| true
|
8574f3e1099ad90ac17ec18765c857200a4aca62
|
Shell
|
PNolan92/xbmc.addons
|
/scripts/build
|
UTF-8
| 561
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/sh
. config/options $1
if [ -z "$1" ]; then
echo "usage: $0 package_name"
exit 1
fi
unset INSTALL
mkdir -p $STAMPS/$1
STAMP=$STAMPS/$1/build
$SCRIPTS/unpack $1
if [ -f $STAMP -a -f $PKG_DIR/need_build ]; then
$PKG_DIR/need_build $@
fi
if [ -f $STAMP -a $PKG_DIR/build -nt $STAMP ]; then
rm -f $STAMP
fi
if [ ! -f $STAMP ]; then
rm -f $STAMP
printf "%${INDENT}c BUILD $1\n" ' '>&$SILENT_OUT
export INDENT=$((${INDENT:-1}+$INDENT_SIZE))
for p in $PKG_BUILD_DEPENDS; do
$SCRIPTS/build $p
done
echo "STAMP" >> $STAMP
fi
| true
|
d508d711118038436a805bbf3688dbc61b5d1b0e
|
Shell
|
maiqueb/kubetron
|
/cmd/deviceplugin/attach-pod
|
UTF-8
| 1,006
| 3.3125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/bash -ex
exec > >(tee -ia /var/log/attach-pod.log)
exec 2> >(tee -ia /var/log/attach-pod.err.out)
CONTAINERNAME=$1
PORTNAME=$2
PORTID=$3
MAC=$4
PID=$5
# create internal port on integration bridge, that will be later pased to pod netns
ovs-vsctl -- \
add-port br-int $PORTNAME -- \
set Interface $PORTNAME type=internal
sleep 1
# initialize /var/run/netns in case it does not exist yet
ls /var/run/netns || ip netns add dummyNS || true
# we need to create a named netns for given pid, so we are able to use `ip netns`
rm -f /var/run/netns/$CONTAINERNAME
ln -s /host/proc/$PID/ns/net /var/run/netns/$CONTAINERNAME
# move interface to pod netns, configure its MAC and set it up
ip link set dev $PORTNAME netns $CONTAINERNAME
ip netns exec $CONTAINERNAME ip link set $PORTNAME address $MAC
ip netns exec $CONTAINERNAME ip link set $PORTNAME up
# finally mark interface with its interface ID, so it is recognized by OVN
ovs-vsctl set Interface $PORTNAME external_ids:iface-id=$PORTID
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.