blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b01dfcccc41efcb81e2a8f1c24c0e5597cad3b6d | Shell | andersdra/docker-mplabx | /scripts/mplabx/create_user.bash | UTF-8 | 284 | 2.65625 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
# docker-mplabx
if [ ! "$C_USER" = root ]
then
groupadd \
--system "$C_USER" \
--gid "$C_GUID" \
&& useradd \
--no-log-init --uid "$C_UID" \
--system --gid "$C_USER" \
--create-home --home-dir "$C_HOME" \
--shell /sbin/nologin "$C_USER" \
;fi
| true |
289671944103fa337f43fad9f0ca1023d7a018a4 | Shell | LiErhua/dotfiles | /v2.0/.common/exports.sh | UTF-8 | 2,016 | 3.09375 | 3 | [
"MIT"
] | permissive | # xterm hack for some terminals to support 256 colors
# If not set null_glob option, when '/usr/share/terminfo/*/xterm-256color'
# not exists, will output: no matches found: /usr/share/terminfo/*/xterm-256color
# more detailed see this answer: http://unix.stackexchange.com/a/26825/45725
if [[ "$SHELL" == `which zsh 2>/dev/null` ]]; then
setopt null_glob
fi
if [ -z "$TMUX" ] && [[ "$TERM" =~ "xterm" ]]; then
if [ -e /usr/share/terminfo/*/xterm-256color ]; then
export TERM='xterm-256color'
else
export TERM='xterm-color'
fi
elif [ -n "$TMUX" ]; then
if [ -e /usr/share/terminfo/*/screen-256color ]; then
export TERM='screen-256color'
else
export TERM='screen'
fi
fi
# refer to: http://superuser.com/questions/39751/add-directory-to-path-if-its-not-already-there
pathappend() {
for _path in "$@"
do
if [ -d "$_path" ] && [[ ":$PATH:" != *":$_path:"* ]]; then
PATH="${PATH:+"$PATH:"}$_path"
fi
done
}
pathprepend() {
_paths=("$@")
for ((i=$#; i>0; i--));
do
_path=${_paths[$i]} # for bash & zsh
if [ -d "$_path" ] && [[ ":$PATH:" != *":$_path:"* ]]; then
PATH="$_path${PATH:+":$PATH"}"
fi
done
}
export GOPATH=${HOME}/.go
pathprepend $GOPATH/bin ${HOME}/.local/bin ${HOME}/.local/bin /usr/local/sbin /usr/sbin /sbin
export TZ='Asia/Shanghai'
export EDITOR='vim'
export LANG='en_US.UTF-8'
export LESS='-RS'
# Fix colored man pages not work
# * http://unix.stackexchange.com/questions/6010/colored-man-pages-not-working-on-gentoo
# * https://forums.gentoo.org/viewtopic-t-819833-start-0.html
export GROFF_NO_SGR=1
# Overwrite man with different color
man() {
env \
LESS_TERMCAP_mb=$(printf "\e[1;34m") \
LESS_TERMCAP_md=$(printf "\e[1;34m") \
LESS_TERMCAP_me=$(printf "\e[0m") \
LESS_TERMCAP_se=$(printf "\e[0m") \
LESS_TERMCAP_so=$(printf "\e[1;44;33m") \
LESS_TERMCAP_ue=$(printf "\e[0m") \
LESS_TERMCAP_us=$(printf "\e[1;32m") \
PAGER="${commands[less]:-$PAGER}" \
_NROFF_U=1 \
PATH="$HOME/bin:$PATH" \
man "$@"
}
| true |
9c0e10b5688e7907e1be55c370735f6f6b310b65 | Shell | punkupoz/terraform-openvpn-easyrsa-boilerplate | /initial-script.sh | UTF-8 | 499 | 3 | 3 | [] | no_license | #!/bin/bash
# Sleep until boot finished
until [[ -f /var/lib/cloud/instance/boot-finished ]]; do
sleep 1
done
sudo apt update
# Install packages
sudo apt install openssl -y
sudo apt install openvpn -y
curl -L https://github.com/OpenVPN/easy-rsa/archive/3.0.1.tar.gz -o /home/ubuntu/easy-rsa.tar.gz
tar xzvf /home/ubuntu/easy-rsa.tar.gz
rm /home/ubuntu/easy-rsa.tar.gz
# Enable ipv4 package forward
sudo sed -i "s/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/g" /etc/sysctl.conf
sudo sysctl -p
| true |
1b663dc14bde4b72c7239a2653400787def4cf9f | Shell | JoaquinVte/scriptsShell | /vectorBidimensional3 | UTF-8 | 1,719 | 3.796875 | 4 | [] | no_license | #!/bin/bash
declare -A matrix
num_columns=2
num_rows=0
function visualizar() {
f1="%$((${#num_columns}+1))s"
f2=" %50s"
printf "$f1" ''
printf "$f2" 'Nombre Completo'
printf "$f2" 'Direccion/es de correo electronico'
printf "$f2" 'Departamento'
echo
echo "-----------------------------------------------------------------------------------------------------------------------------------------------------------"
echo
for ((j=0;j<num_rows;j++)) do
printf "$f1" $(($j+1))
for ((i=0;i<=num_columns;i++)) do
printf "$f2" ${matrix[$i,$j]}
done
echo
done
}
function insertar(){
if [ $num_rows -eq 0 ]
then
matrix[0,$num_rows]=$1
matrix[1,$num_rows]=$2
matrix[2,$num_rows]=$3
let num_rows=$num_rows+1
else
pos=`buscar $1`
if [ $pos -eq -1 ]
then
matrix[0,$num_rows]=$1
matrix[1,$num_rows]=$2
matrix[2,$num_rows]=$3
let num_rows=$num_rows+1
else
matrix[1,$pos]=${matrix[1,$pos]}","$2
fi
fi
}
function buscar(){
local encontrado=-1
local i=0
while [[ $encontrado -eq -1 && $i -le $num_rows ]]
do
if [[ ${matrix[0,$i]} == $1 ]]
then
encontrado=$i
fi
let i=$i+1
done
echo $encontrado
}
lineas=$(wc texto2 | cut -d" " -f3)
for i in `seq 1 1 $(($lineas+1))`
do
fila=$(cat texto2 | sed -n "$i p")
insertar `echo $fila | cut -d";" -f1` `echo $fila | cut -d";" -f2` `echo $fila | cut -d";" -f3`
done
visualizar | true |
53b493549ee23ead214dda07eb72c447d7d9f8b1 | Shell | kristofferahl/koshu-shell | /test/missing_koshufile/test.bats | UTF-8 | 171 | 2.75 | 3 | [] | no_license | #!/usr/bin/env bats
@test "missing koshufile exits with code 1" {
run "$KOSHU_SOURCE_DIR/koshu.sh" default --file "$BATS_TEST_DIRNAME/koshufile"
[ $status -eq 1 ]
}
| true |
303138159c5300162d200d2892c458c7b0678ef9 | Shell | joeconlin/trivial-admin-tools | /Postini/Postini Shell Scripting/postini_userlist_dump.sh | UTF-8 | 1,893 | 3.3125 | 3 | [] | no_license | #!/bin/bash
# Postini user dump
# Joe Conlin <joe@joeconlin.org>
# Customization Settings
#Postini Info
PSTEMAIL="postini%40domain.com" # url encoded value of your postini admin account
PSTPASS="your_url_encoded_password" # url encoded password value
ORGID="100000000" # put your Org ID here. This can be a parent org or a child org
PSTSYSTEM="ac-s7" # your Postini system. ac-s7 is for System 7. other systems are untested SORRY!
#email to send postini userlist to
email=support@domain.tld
message="Please see the attached spreadsheet for Postini user counts this period."
date=`date +%Y%m%d` #set todays date
file=/tmp/postiniusers${date}.csv #name of csv file
cookies=/tmp/pstcookie.txt # temp location to store cookies
#------ End Customization ---------------#
#this logs into postini and gets the session details
wget -O - --cookies=on --keep-session-cookies --save-cookies $cookies --referer=https://login.postini.com/exec/login --post-data 'email=$PSTEMAIL&pword=$PSTPASS&Login.x=30&Login.y=14&action=login' https://login.postini.com/exec/login |grep adminstart | wget --keep-session-cookies --load-cookies $cookies --save-cookies $cookies --force-html -i - -O /dev/null
# This gets the userlist
wget --cookies=on --keep-session-cookies --save-cookies $cookies --load-cookies $cookies 'https://$PSTSYSTEM.postini.com/exec/admin_listusers_download?sortkeys=address%3Aa&type_of_user=all&lastorglist=&childorgs=1&type_of_encrypted_user=ext_encrypt_any&aliases=0&targetorgid=$ORGID&type=usersets&pagenum=1&pagesize=25' -O /tmp/users.txt
#this processes the userlist to give a count of users per domain, sorted
cat /tmp/users.txt | cut -d "," -f1 | cut -d '@' -f2 | sort -b -d -f | uniq -c | sort > $file
#now mail this sucker (requires Mutt to be installed!)
echo "$message" | mutt -a $file -s "Postini Userlist $date" $email
#Cleanup
rm -f $cookies $file /tmp/users.txt
| true |
933470ceb09d98d88d26792a26c169f8d7d3713c | Shell | sb047/citypay-paylink-woo-commerce | /scripts/apache2-start.sh | UTF-8 | 1,249 | 2.9375 | 3 | [] | no_license | #!/bin/bash
# the wordpress entry point looks for a file starting with apache2 hence the name, this file actually runs the setup process just before running apache in the foreground
echo ========== Initial Plugin List =============
wp --allow-root plugin list
echo ============================================
wp --allow-root plugin uninstall woocommerce
wp --allow-root plugin install woocommerce --activate --version=$WOOCOMMERCE_VERSION
cd wp-content/plugins
echo ========== Loading CityPay Woo Commerce Plugin List =============
wget https://github.com/citypay/citypay-paylink-woo-commerce/raw/${CITYPAY_PLUGIN_VERSION}/build/citypay-paylink-woocommerce.zip
unzip citypay-paylink-woocommerce.zip
rm citypay-paylink-woocommerce.zip
chown -R www-data:www-data ./*
cd ../../
echo ========== Activate CityPay Woo Commerce Plugin =================
wp --allow-root plugin activate citypay-paylink-woocommerce
echo =================================================================
echo ========== Updated Plugin List =============
wp --allow-root plugin list
echo ============================================
echo Starting NGROK...
ngrok http -authtoken=$NGROK_AUTHTOKEN -log=ngrok.log 80 &
# run apache in the foreground...
apache2-foreground | true |
6d6e8f2e6c55f12754508e1ed4e81a3bfa634b30 | Shell | danishprakash/dotfiles | /.zsh.histdb | UTF-8 | 1,845 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env bash
# catches original bash, writes them to mysql or sqlite3 file
# place it to the end of ~/.bashrc
# need to find original author of preexec_invoke_exec, google it :)
# source: https://github.com/digitalist/bash_database_history
#-------------------------------------------------------------------
# Create db and schema
#-------------------------------------------------------------------
# create database if not exists bash ;
# use bash ;
# -- drop table `history`;
# CREATE TABLE `history` (
# `oid` bigint(20) NOT NULL AUTO_INCREMENT,
# `command` TEXT,
# `arguments` TEXT,
# `cwd` TEXT,
# `created` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
# `tag` TEXT,
# PRIMARY KEY (`oid`),
# KEY `created` (`created`)
# ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ;
preexec() {
BASH_COMMAND=$1
if [[ -z "$HISTORY_TAG" ]]; then
HISTORY_TAG=''
else
echo TAG: $HISTORY_TAG
fi
[ -n "$COMP_LINE" ] && return # do nothing if completing
[ "$BASH_COMMAND" = "$PROMPT_COMMAND" ] && return # don't cause a preexec for $PROMPT_COMMAND
# get current command
local cur_cmd=$(echo $1 | sed -e "s/^[ ]*[0-9]*[ ]*//g")
cwd=$(pwd)
# optional: ignore alias for mysql history search
[[ "$BASH_COMMAND" =~ historyMysql* ]] && return
# optional: ignore pyenv
[[ "$BASH_COMMAND" =~ _pyenv_virtualenv_hook* ]] && return
# TODO
# ARGS=$@
# place ESCAPED $BASH_COMMAND into $BASH_COMMAND_ESCAPE variable
printf -v BASH_COMMAND_ESCAPE "%q" "$BASH_COMMAND" # from https://stackoverflow.com/a/4383994/5006740
# trap and write to db
mysql -ucli-history -e "INSERT INTO sh.history (oid, command, arguments, cwd, created, tag) values (0, '${BASH_COMMAND_ESCAPE}', '', '$cwd', NOW(), '$HISTORY_TAG' )"
}
| true |
40eb6ecd6e2a4440d57bb2401072215f9aad1028 | Shell | venkategowdap/job-converter-control-m | /run.sh | UTF-8 | 307 | 3.484375 | 3 | [] | no_license | if [[ $# -eq 0 ]] ; then
echo 'You need to provide a Control-M file or comma separated files'
exit 0
fi
OUT=output
if [ -d "$OUT" ]; then
printf '%s\n' "Removing Output Folder ($OUT)"
rm -rf "$OUT"
fi
echo "CONTROL-M INPUT FILES: $1"
gradle resources -Pinput="$1"
gradle jobs -Pinput="$1" | true |
845f44339343c60e7c8c0d9c7f882e0fb53b4ca0 | Shell | gbrault/ldap | /init.sh | UTF-8 | 1,625 | 3.375 | 3 | [] | no_license | #!/bin/sh
set -eu
status () {
echo "---> ${@}" >&2
}
set -x
if [ ! -e /var/lib/ldap/docker_bootstrapped ]; then
status "configuring slapd for first run"
cat <<EOF | debconf-set-selections
slapd slapd/password2 password ${LDAP_PASSWORD}
slapd slapd/password1 password ${LDAP_PASSWORD}
slapd slapd/internal/generated_adminpw password ${LDAP_PASSWORD}
slapd slapd/internal/adminpw password ${LDAP_PASSWORD}
slapd slapd/dump_database_destdir string /var/backups/slapd-VERSION
slapd slapd/domain string ${LDAP_DOMAIN}
slapd shared/organization string ${LDAP_ORGANISATION}
slapd slapd/backend string HDB
slapd slapd/purge_database boolean true
slapd slapd/move_old_database boolean true
slapd slapd/allow_ldap_v2 boolean false
slapd slapd/no_configuration boolean false
slapd slapd/dump_database select when needed
EOF
dpkg-reconfigure -f noninteractive slapd
touch /var/lib/ldap/docker_bootstrapped
# Configure phpldapadmin
DC='dc='$(echo ${LDAP_DOMAIN} | cut -d "." -f 1)',dc='$(echo ${LDAP_DOMAIN} | cut -d "." -f 2)
sed -i "s/\(\$servers->setValue('server','name','\)\(.*\)\(');\)$/\1${LDAP_SERVERNAME}\3/g" /etc/phpldapadmin/config.php
sed -i "s/\(\$servers->setValue('server','base',array('\)\(.*\)\('));\)$/\1${DC}\3/g" /etc/phpldapadmin/config.php
sed -i "s/\(\$servers->setValue('login','bind_id','\)\(.*\)\(');\)$/\1cn=admin,${DC}\3/g" /etc/phpldapadmin/config.php
sed -i "s/\(\$servers->setValue('login','bind_pass','\)\(.*\)\(');\)$/\1${LDAP_PASSWORD}\3/g" /etc/phpldapadmin/config.php
else
status "found already-configured slapd"
fi
status "starting slapd"
set -x
service apache2 start
service slapd start | true |
07ce6ea1891d38b12a5cbe2803e703d42d94eff1 | Shell | RobertAudi/.dotfiles | /common/.local/bin/dfll | UTF-8 | 1,118 | 3.734375 | 4 | [
"WTFPL"
] | permissive | #!/bin/sh
# df local disk partitions only (excluding tmpfs, loopback mounts etc.)
exclude="-x none -x tmpfs -x devtmpfs -x iso9660"
maybe_color() {
if [ -t 1 ]; then
perl -lpe '
if (/( (\d\d?)% )/) {
my $percent = $2;
my $color = $percent >= 95 ? "41;1" :
$percent >= 90 ? "31" :
$percent >= 80 ? "33" :
$percent >= 50 ? "32" :
"0";
s/$1/ \e[${color}m${percent}%\e[0m/;
}
'
else
cat
fi
}
# When the 1st column gets wide, the remaining columns get split
# onto a separate line. For sorting we need to join them back up.
df -lT $exclude "$@" |
perl -0777pe 's!^(/\S+$)\n!$1!gm; s/Mounted on/Mountedon/' |
column -t |
sed 's/Mountedon/Mounted on/' |
csort \
1=Filesystem a \
1:/sda/ b \
1:/dm-/ c \
7=/ d \
7=/home e \
7:/^\/home/ f \
7:/\/mnt/ h \
1:/dev\/mapper/ g \
|
sort -k1,1 -k6,6nr |
cut -c3- |
maybe_color
| true |
b6e2e1b3b527e10f3e350c8ab5c69e1af6f1e389 | Shell | RobertAudi/.dotfiles | /git/.local/bin/git-nuke-tag | UTF-8 | 400 | 3.59375 | 4 | [
"WTFPL"
] | permissive | #!/usr/bin/env zsh
git-nuke-tag() {
if ! git rev-parse 2> /dev/null; then
echo -e "\e[1;41;97m ERROR \e[0m Not a repository: $PWD" >&2
return 1
fi
if (( $# == 0 )); then
echo -e "\e[1;41;97m ERROR \e[0m Missing tag name" >&2
return 1
fi
# TODO: Add confirmation
# TODO: Do not hardcode the remote
git tag -d "$1" && git push origin :refs/tags/"$1"
}
git-nuke-tag $@
| true |
95a4602ab53019e59205799395e8011f081935d7 | Shell | waaan7/COMP9044-19T2 | /final/mock/eating.sh | UTF-8 | 113 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
cat $1 | egrep "name" | cut -d":" -f2 | cut -d"," -f1 | sed "s/\"//g" | sort | uniq | sed "s/^ //"
| true |
27620176b9c313986d5a8c75e0e5a0f77593ee59 | Shell | pyraca/world | /zshrc | UTF-8 | 5,970 | 2.765625 | 3 | [
"MIT"
] | permissive | # If you come from bash you might have to change your $PATH.
# export PATH=$HOME/bin:/usr/local/bin:$PATH
# Path to your oh-my-zsh installation.
export ZSH="$HOME/.oh-my-zsh"
# Set name of the theme to load --- if set to "random", it will
# load a random theme each time oh-my-zsh is loaded, in which case,
# to know which specific one was loaded, run: echo $RANDOM_THEME
# See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes
ZSH_THEME="robbyrussell"
# Set list of themes to pick from when loading at random
# Setting this variable when ZSH_THEME=random will cause zsh to load
# a theme from this variable instead of looking in ~/.oh-my-zsh/themes/
# If set to an empty array, this variable will have no effect.
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion.
# Case-sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to automatically update without prompting.
# DISABLE_UPDATE_PROMPT="true"
# Uncomment the following line to change how often to auto-update (in days).
export UPDATE_ZSH_DAYS=13
# Uncomment the following line if pasting URLs and other text is messed up.
# DISABLE_MAGIC_FUNCTIONS=true
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# You can set one of the optional three formats:
# "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# or set a custom format using the strftime function format specifications,
# see 'man strftime' for details.
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load?
# Standard plugins can be found in ~/.oh-my-zsh/plugins/*
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
#
export VI_MODE_SET_CURSOR=true
export VI_MODE_RESET_PROMPT_ON_MODE_CHANGE=true
#
plugins=(z git fzf gradle man mvn docker themes vi-mode ssh-agent kubectl colorize colored-man-pages tmux genpass)
TERM=xterm-256color
export TERM
source $ZSH/oh-my-zsh.sh
# User configuration
export MANPATH="/usr/local/man:$MANPATH"
# You may need to manually set your language environment
export LANG=en_US.UTF-8
export EDITOR='vim'
#
# Preferred editor for local and remote sessions
# if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
# else
# export EDITOR='mvim'
# fi
# Compilation flags
export ARCHFLAGS="-arch x86_64"
# ------------------------------------------
#
setopt histignorealldups sharehistory
#
# Enable Ctrl-x-e to edit command line
autoload -U edit-command-line
# Emacs style
# 1Gzle -N edit-command-line
# bindkey '^xe' edit-command-line
# bindkey '^x^e' edit-command-line
# Vi style:
zle -N edit-command-line
bindkey -M vicmd v edit-command-line
# Use emacs keybindings even if our EDITOR is set to vi
bindkey -v
# Keep 1000 lines of history within the shell and save it to ~/.zsh_history:
HISTSIZE=1000
SAVEHIST=1000
HISTFILE=~/.zsh_history
# Use modern completion system
autoload -Uz compinit
compinit
zstyle ':completion:*' auto-description 'specify: %d'
zstyle ':completion:*' completer _expand _complete _correct _approximate
zstyle ':completion:*' format 'Completing %d'
zstyle ':completion:*' group-name ''
zstyle ':completion:*' menu select=2
#
#
which dircolors > /dev/null
if [ $? -eq 1 ]; then
eval "$(gdircolors -b)"
else
eval "$(dircolors -b)"
fi
#
#
zstyle ':completion:*:default' list-colors ${(s.:.)LS_COLORS}
zstyle ':completion:*' list-colors ''
zstyle ':completion:*' list-prompt %SAt %p: Hit TAB for more, or the character to insert%s
zstyle ':completion:*' matcher-list '' 'm:{a-z}={A-Z}' 'm:{a-zA-Z}={A-Za-z}' 'r:|[._-]=* r:|=* l:|=*'
zstyle ':completion:*' menu select=long
zstyle ':completion:*' select-prompt %SScrolling active: current selection at %p%s
zstyle ':completion:*' use-compctl false
zstyle ':completion:*' verbose true
zstyle ':completion:*:*:kill:*:processes' list-colors '=(#b) #([0-9]#)*=0=01;31'
zstyle ':completion:*:kill:*' command 'ps -u $USER -o pid,%cpu,tty,cputime,cmd'
#
#a
#
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
#
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
#
export SPATH=${PATH}
#
PATH=".:${HOME}/bin"
PATH="${PATH}:/usr/local/bin:/usr/local/bin/X11:${SPATH}"
PATH="${PATH}:/usr/ucb:/usr/bin/X11"
PATH="${PATH}:/usr/local/gnu/bin:/usr/local/perl5"
PATH="${PATH}:/opt/local/bin:/opt/local/sbin"
PATH="${PATH}:/usr/ccs/bin"
PATH="${PATH}:/usr/etc/"
PATH="${PATH}:/usr/sbin"
PATH="${PATH}:/usr/bin/X11"
PATH="${PATH}:/usr/X11/bin"
PATH="${PATH}:/sbin:/nbin"
#
export EDITOR=vim
[ -f .aliases ] && source .aliases
#
[ -f ~/world/zshrc_mine ] && source ~/world/zshrc_mine
#
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
#
| true |
a008303ce1e12ec63eae6534dad2c459a1ba83df | Shell | clearlinux/clr-update-triggers | /10-locale-archive-hook.sh | UTF-8 | 170 | 2.65625 | 3 | [] | no_license | #!/bin/bash
if [ -e /var/cache/locale/locale-archive ]; then
exit 0
fi
if [ -x /usr/bin/localdef ]; then
/usr/bin/localedef -i en_US -c -f UTF-8 en_US.UTF-8
fi
| true |
84154e7ad027007e890e808b5bb60a02b4527f79 | Shell | dennyzhang/cheatsheet-pks-A4 | /airgap-iptable.rules | UTF-8 | 3,675 | 2.9375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# /home/kubo/gw_scripts/airgap/iptable.rules
# flush rules in INPUT chain
sudo iptables -F INPUT
# flush rules in OUTPUT chain
sudo iptables -F OUTPUT
# set default policy in INPUT chain to DROP
sudo iptables -P INPUT DROP
# set default policy in OUTPUT chain to DROP
sudo iptables -P OUTPUT DROP
# flush rules in FORWARD chain
sudo iptables -F FORWARD
sudo iptables -P FORWARD DROP
# flush rules in table nat PREROUTING chain
sudo iptables -t nat -F PREROUTING
# flush rules in table nat POSTROUTING chain
sudo iptables -t nat -F POSTROUTING
# eth0 is for vm network which means public access here
# eth1 is for priviate management network, eg: nsx-t manager, controller, vc etc
# vlan0101 is for nsx-t virtual network
vm_network_interface="eth0"
manager_networker_interface="eth1"
nsx_t_virtual_network_interface="vlan0101"
# enables traffic from/to private network for local processes on eth0
sudo iptables -A INPUT -i "$vm_network_interface" -s 10.0.0.0/8 -j ACCEPT
sudo iptables -A INPUT -i "$vm_network_interface" -s 169.254.0.0/16 -j ACCEPT
sudo iptables -A INPUT -i "$vm_network_interface" -s 172.16.0.0/12 -j ACCEPT
sudo iptables -A INPUT -i "$vm_network_interface" -s 192.168.0.0/16 -j ACCEPT
sudo iptables -A OUTPUT -o "$vm_network_interface" -d 10.0.0.0/8 -j ACCEPT
sudo iptables -A OUTPUT -o "$vm_network_interface" -d 169.254.0.0/16 -j ACCEPT
sudo iptables -A OUTPUT -o "$vm_network_interface" -d 172.16.0.0/12 -j ACCEPT
sudo iptables -A OUTPUT -o "$vm_network_interface" -d 192.168.0.0/16 -j ACCEPT
# enables traffic between nsx-t provisioned networks and local
sudo iptables -A INPUT -i "$nsx_t_virtual_network_interface" -s 192.168.0.0/16 -j ACCEPT
sudo iptables -A INPUT -i "$nsx_t_virtual_network_interface" -s 192.167.0.0/16 -j ACCEPT
sudo iptables -A INPUT -i "$nsx_t_virtual_network_interface" -s 30.0.0.0/16 -j ACCEPT
sudo iptables -A INPUT -i "$nsx_t_virtual_network_interface" -s 40.0.0.0/16 -j ACCEPT
sudo iptables -A OUTPUT -o "$nsx_t_virtual_network_interface" -d 192.168.0.0/16 -j ACCEPT
sudo iptables -A OUTPUT -o "$nsx_t_virtual_network_interface" -d 192.167.0.0/16 -j ACCEPT
sudo iptables -A OUTPUT -o "$nsx_t_virtual_network_interface" -d 30.0.0.0/16 -j ACCEPT
sudo iptables -A OUTPUT -o "$nsx_t_virtual_network_interface" -d 40.0.0.0/16 -j ACCEPT
# enables traffic between private vm management network and local
sudo iptables -A INPUT -i "$manager_networker_interface" -s 192.168.0.0/16 -j ACCEPT
sudo iptables -A OUTPUT -o "$manager_networker_interface" -d 192.168.0.0/16 -j ACCEPT
# enables traffic between nsx-t network to outside private network
iptables -t nat -A POSTROUTING -o "$vm_network_interface" -j MASQUERADE
sudo iptables -A FORWARD -i "$vm_network_interface" -o "$nsx_t_virtual_network_interface" -m state --state RELATED,ESTABLISHED -j ACCEPT
sudo iptables -A FORWARD -i "$nsx_t_virtual_network_interface" -o "$vm_network_interface" -d 10.0.0.0/8 -j ACCEPT
sudo iptables -A FORWARD -i "$nsx_t_virtual_network_interface" -o "$vm_network_interface" -d 169.254.0.0/16 -j ACCEPT
sudo iptables -A FORWARD -i "$nsx_t_virtual_network_interface" -o "$vm_network_interface" -d 172.16.0.0/12 -j ACCEPT
sudo iptables -A FORWARD -i "$nsx_t_virtual_network_interface" -o "$vm_network_interface" -d 192.168.0.0/16 -j ACCEPT
# enables any traffic between nsx-t network and private vm management network
iptables -t nat -A POSTROUTING -o "$manager_networker_interface" -j MASQUERADE
sudo iptables -A FORWARD -i "$manager_networker_interface" -o "$nsx_t_virtual_network_interface" -j ACCEPT
sudo iptables -A FORWARD -i "$nsx_t_virtual_network_interface" -o "$manager_networker_interface" -j ACCEPT
| true |
df41be4c1080c0db2899456f01837229c59b60db | Shell | saurabhvyas/kaldi_scripts | /speaker_diarization_xvectors.sh | UTF-8 | 2,353 | 2.640625 | 3 | [] | no_license | # kaldi commit id : commit b5385b46c135f8d1f2bf4f9924e287e6fd91cd23
# pretrained model used : http://kaldi-asr.org/models/m3
# this script should be executed from egs/callhome_diarization/v2 directory
# create your own data directory at the above directory and call it speaker_diarization, it should follow the general kaldi style format, eg. create and train folder etc.
# 0. use the same config files as provided in the pretrained model folder, or will fail in later stages
# 1. compute mfcc features , note that we need to create mfcc config file
train_cmd="utils/run.pl"
data_dir="speaker_diarization/data/train"
log_dir="speaker_diarization/exp/make_mfcc"
sudo steps/make_mfcc.sh --nj 1 --cmd "$train_cmd" --mfcc-config speaker_diarization/conf/mfcc.conf "$data_dir" "$log_dir"
# 2 . compute vad decisions file
log_dir="speaker_diarization/exp/make_vad"
sudo bash steps/compute_vad_decision.sh --nj 1 --vad-config speaker_diarization/conf/vad.conf "$data_dir" "$log_dir"
#3 create segmented data
sudo bash ../v1/diarization/vad_to_segments.sh --nj 1 --cmd "$train_cmd" "$data_dir" speaker_diarization/data/train_segmented
# 3. cmvn feature extraction
data_dir="speaker_diarization/data/train_segmented"
output_dir="speaker_diarization/data/train_segmented"
log_dir="speaker_diarization/exp/cmvn"
sudo bash /home/saurabh/Documents/kaldi/egs/callhome_diarization/v2/local/nnet3/xvector/prepare_feats.sh --nj 1 --cmd "$train_cmd" "$data_dir" "$output_dir" "$log_dir"
# 4. extract xvectors
nnet_dir="/home/saurabh/Documents/kaldi/egs/callhome_diarization/v2/0003_sre16_v2_1a/exp/xvector_nnet_1a"
log_dir="$nnet_dir/exp/xvectors"
sudo diarization/nnet3/xvector/extract_xvectors.sh --cmd \ "$train_cmd --mem 5G" \
--nj 1 --window 1.5 --period 0.75 --apply-cmn false \
--min-segment 0.5 $nnet_dir \
"$data_dir" "$log_dir"
# 5. plda scoring
plda_dir="/home/saurabh/Documents/kaldi/egs/callhome_diarization/v2/0003_sre16_v2_1a/exp/xvectors_sre_combined"
sudo diarization/nnet3/xvector/score_plda.sh \
--cmd "$train_cmd --mem 4G" \
--target-energy 0.9 --nj 4 "$plda_dir" \
$nnet_dir/exp/xvectors $nnet_dir/xvectors/plda_scores
# 6. final clustering step
threshold=0.5
sudo diarization/cluster.sh --cmd "$train_cmd --mem 4G" --nj 1 \
--threshold $threshold \
$nnet_dir/xvectors/plda_scores \
$nnet_dir/xvectors/plda_scores_speakers
| true |
3bc7adf79b9202e1651482af1a7ffba99e77c879 | Shell | theniceboy/.config | /zsh/fzf/fps | UTF-8 | 776 | 3.15625 | 3 | [] | no_license | #!/usr/bin/env zsh
_fps() {
local render
if (( $+commands[grcat] )); then
render='grcat fps.grc'
else
render='cat'
fi
ps -eo user,pid,ppid,pgid,stat,tname,cmd | awk '
BEGIN { "ps -p $$ -o pgid --no-headers | tr -d \"[:blank:]\"" | getline pgid } {
if ($4 != pgid || $2 == pgid)
print
}' | ${(z)render}
}
setopt localoptions pipefail
local fzf_opts="--header-lines=1 -m \
${commands[grcat]:+--ansi} --height=50% \
--min-height=15 --tac --reverse \
--preview-window=down:2"
local fzf_preview_cmd="ps -o pcpu,pmem,vsz,rss,thcount,start_time,time -p {2}"
_fps | fzf ${(z)fzf_opts} --preview=$fzf_preview_cmd |
awk -v sep=${FZF_MUL_DELIM:- } '{ printf "%s%c", $2, sep }' | sed -E "s/${FZF_MUL_DELIM:- }$//"
| true |
5a1afeb5076cc6b34c682264e1b0fcd54d7f09a3 | Shell | cloud-barista/cb-spider | /test/sg-rules-validation-cli/2.outbound-rule-test/20.outbound-case-20.sh | UTF-8 | 3,139 | 3.15625 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
if [ "$1" = "" ]; then
echo
echo -e 'usage: '$0' mock|aws|azure|gcp|alibaba|tencent|ibm|openstack|cloudit|ncp|nhncloud'
echo -e '\n\tex) '$0' aws'
echo
exit 0;
fi
echo -e "#############################################"
echo -e "# TEST: $0 "
echo -e "#############################################"
source ../common/setup.env $1
source setup.env $1
echo "============== before RemoveRules: '${SG_NAME}' --- outbound:TCP/1000/1000"
#### @todo Change this command with spctl
curl -sX DELETE http://localhost:1024/spider/securitygroup/${SG_NAME}/rules -H 'Content-Type: application/json' -d \
'{
"ConnectionName": "'${CONN_CONFIG}'",
"ReqInfo": {
"RuleInfoList" :
[
{
"Direction": "outbound",
"IPProtocol": "TCP",
"FromPort": "1000",
"ToPort": "1000"
}
]
}
}' |json_pp
echo "============== after RemoveRules: '${SG_NAME}' --- outbound:TCP/1000/1000"
# Get SG info two times to give a slice time to csp
curl -sX GET http://localhost:1024/spider/securitygroup/SG-Rules-Test-SG01 -H 'Content-Type: application/json' -d '{ "ConnectionName": "'${CONN_CONFIG}'" }' 1> /dev/null 2>/dev/null
curl -sX GET http://localhost:1024/spider/securitygroup/SG-Rules-Test-SG01 -H 'Content-Type: application/json' -d '{ "ConnectionName": "'${CONN_CONFIG}'" }' 1> /dev/null 2>/dev/null
C_IP=`curl ifconfig.co`
echo "============== before AddRules: '${SG_NAME}' --- outbound:TCP/1000/1000/${C_IP}/32"
#### @todo Change this command with spctl
curl -sX POST http://localhost:1024/spider/securitygroup/${SG_NAME}/rules -H 'Content-Type: application/json' -d \
'{
"ConnectionName": "'${CONN_CONFIG}'",
"ReqInfo": {
"RuleInfoList" :
[
{
"Direction": "outbound",
"IPProtocol": "TCP",
"FromPort": "1000",
"ToPort": "1000",
"CIDR": "'${C_IP}'/32"
}
]
}
}' |json_pp
echo "============== after AddRules: '${SG_NAME}' --- outbound:TCP/1000/1000/${C_IP}/32"
if [ "$SLEEP" ]; then
sleep $SLEEP
else
sleep 10
fi
# print the table header of test results
$(test_result_header $1)
# CSP I:TCP-01 I:TCP-02 I:UDP-01 I:ICMP-01 | O:TCP-01 O:TCP-02 O:UDP-01 O:ICMP-01
#./io-traffic-test.sh $1 $2 $3 $4 $5 $6 $7 $8 $9
./io-traffic-test.sh $1 pass fail skip fail pass pass pass pass
# print the end mesg of test results
$(test_result_tailer)
echo -e "\n\n"
| true |
013c39677eee982c82b998e9fe3b20d27c351c30 | Shell | shaikbasha536/Linux | /nestedloop.sh | UTF-8 | 116 | 2.8125 | 3 | [] | no_license | #!/bin/bash
for number in {1..3}
do
for letter in a b c
do
echo "number is $number,letter is $letter"
done
done
| true |
c19ade9236b70a19a45d3568762bc1da107d593f | Shell | rchain/rchain-devnet-node | /rundeck-scripts/stop-node | UTF-8 | 190 | 2.71875 | 3 | [] | no_license | #!/bin/bash
set -e
source "$(dirname $0)/functions"
logcmd docker stop rnode && logcmd docker rm rnode || true
if [[ "$RD_OPTION_CLEAN_DATA" == yes ]]; then
logcmd rm -rf /var/lib/rnode
fi
| true |
d4f5a663edf953ac8a2577b90a9e04fb1dde1137 | Shell | adrianogil/sbuild | /src/bashrc.sh | UTF-8 | 577 | 3.1875 | 3 | [
"MIT"
] | permissive |
function smart-build()
{
echo "Smart Build Tool - version 0.0.1"
echo "SMART BUILD!!!"
START=$(date +%s);
project_type=$(p2 $SMART_BUILD_TOOLS_DIR/detect_project.py)
$SMART_BUILD_TOOLS_DIR/$project_type/smart_build_$project_type.sh $@
END=$(date +%s);
echo $((END-START)) | awk '{print int($1/60)":"int($1%60)}'
}
alias b="smart-build"
alias s-build="smart-build"
function smart-create-build-file()
{
python3 $SMART_BUILD_TOOLS_DIR/create_build_file.py $@
}
alias bf="smart-create-build-file"
alias s-build-file='smart-create-build-file'
| true |
577a00f7ca33ce46146a87a471e8a776900c79db | Shell | welikechips/recon-data | /delete.sh | UTF-8 | 243 | 3.359375 | 3 | [] | no_license | histdeln(){
# Get the current history number
delete_num=$1; n=$(history 1 | awk '{print $1}'); for h in $(seq $(( $n - $delete_num )) $(( $n - 1 ))); do history -d $(( $n - $delete_num )); done; history -d $(history 1 | awk '{print $1}')
} | true |
9527798886f3a6635ef44ab16b445babd8967600 | Shell | phanirajl/elasticsearch | /1. OS preparation.sh | UTF-8 | 2,245 | 3 | 3 | [] | no_license |
##### System preparation #####
lsblk
# Disk information
file -s /dev/xvdb
fdisk -l
# Create new partiion
fdisk /dev/xvdb
# n -create a new partition
# p - choose primary partition (or e - extended)
# [1-4] - choose partition number
# 2048 - set start block
# 22... - set and block or size +200GB
# w - save changes or q - quit without saving
# Format the new partition
mkfs -t ext4 /dev/xvdb1
mkfs.ext4 /dev/xvdb1
# Create a new directory
mkdir /hdfs
# Mount a nre directory to the new created partition
mount /dev/xvdb /hdfs
# Check the new mount
df -Th
# Check the partition UUID
blkid | grep xvdb
# Write UUID to the fstab in order to allow automatic mount at the boot
vim /etc/fstab
# UUID=ea0e300d-8b98-4409-bd1d-d5305688e51c /hdfs ext4 defaults,noatime 1 2
# Install additional software
yum update -y
yum install -y epel-release
yum install -y mlocate ncdu htop rsync vim wget
# Disable transparent huge pages
vim /etc/rc.d/rc.local
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo never > /sys/kernel/mm/transparent_hugepage/defrag
chmod 777 /etc/rc.d/rc.local
reboot
cat /sys/kernel/mm/transparent_hugepage/enabled
cat /sys/kernel/mm/transparent_hugepage/defrag
# Limit open files
vim /etc/security/limits.conf
elasticsearch - nofile 65536
elasticsearch - nproc 4096
elasticsearch - as unlimited
root - as unlimited
elasticsearch - fsize unlimited
root - fsize unlimited
# Disable swap or configure swappiness
vim /etc/fstab
# Comment a line with thw word swap
vim /etc/sysctl.conf
vm.swappiness = 1
vm.max_map_count = 262144
#net.ipv6.conf.all.disable_ipv6 = 1
#net.ipv6.conf.default.disable_ipv6 = 1
# Verify after reboot
sysctl vm.max_map_count
sysctl vm.swappiness
# Disable SELINUX
setenforce 0
vim /etc/selinux/config
SELINUX=disabled
# Disable firewall
sudo systemctl status firewalld
sudo systemctl stop firewalld
sudo systemctl disable firewalld
# Hostnames
vim /etc/hosts
10.0.0.1 elk1
10.0.0.1 elk2
10.0.0.1 elk3 | true |
5e914c7f7af5766128548e233619c73ff9d28a17 | Shell | bjackman/dotfiles | /bin/for_each_commit.sh | UTF-8 | 197 | 2.96875 | 3 | [] | no_license | set -eu
current_branch=$(git rev-parse --abbrev-ref HEAD)
for commit_id in $(git log --format=%H $1..HEAD); do
git checkout $commit_id
$TEST_COMMIT_CMD
done
git checkout $current_branch
| true |
25222c59e1e89b163184550b11ada7f928668769 | Shell | shreyapohekar/Port_to_service_map | /port_to_service.sh | UTF-8 | 416 | 3.40625 | 3 | [] | no_license | #!/bin/bash
i=0
arr=()
for j in $(netstat -alnp | grep 127.0.0.1 | awk '{print $5}' | sed 's/127.0.0.1://g') ; do
arr[$i]="$j"
echo "$i = $j"
let "i = $i + 1"
done
echo ${arr[1]}
len=${#arr[@]}
for (( n=0; n<$len; n++ ))
do
curl https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt | grep -w $arr | awk '{print $1}' | head -n 1
echo "port num is $arr"
done
| true |
afb27e7f01b9e463cf0b60021cc6b09035e53082 | Shell | Jonjoe/dotfiles | /setup/unix-tooling.sh | UTF-8 | 2,078 | 3.828125 | 4 | [] | no_license | # | Unix tooling ==================================================
# -----------------------------------------------------------------
# Install Unix tools:
# - Rbenv,
# - Oh My ZSH,
# - Ruby build,
# - FZF,
# - Github Hub,
outputTitle "Setup Unix Tooling"
# Tmux Package Manager Setup ======================================
# -----------------------------------------------------------------
if [ ! -d ~/.tmux/plugins/tpm ]; then
git clone https://github.com/tmux-plugins/tpm ~/.tmux/plugins/tpm
fi
echo "${spacer} ${bold}tmux${normal}"
# Oh My ZSH Setup =================================================
# -----------------------------------------------------------------
if [ ! -d ~/.oh-my-zsh ]; then
sh -c "$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)" "" --unattended
fi
echo "${spacer} ${bold}oh my zsh${normal}"
# Rbenv Setup =====================================================
# -----------------------------------------------------------------
if [ ! -d ~/.rbenv ]; then
git clone https://github.com/rbenv/rbenv.git ~/.rbenv >&-
cd ~/.rbenv && src/configure && make -C src >&-
fi
echo "${spacer} ${bold}rbenv${normal}"
# Ruby Build Setup ================================================
# -----------------------------------------------------------------
if [ ! -d ~/.rbenv/plugins/ruby-build ]; then
git clone https://github.com/sstephenson/ruby-build.git ~/.rbenv/plugins/ruby-build >&-
fi
echo "${spacer} ${bold}ruby build${normal}"
# FZF Setup =======================================================
# -----------------------------------------------------------------
if [ ! -d ~/.fzf ]; then
git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf >&-
~/.fzf/install -y >&-
fi
echo "${spacer} ${bold}fzf${normal}"
# Github Hub Setup ================================================
# -----------------------------------------------------------------
if [ ! -d ~/.hub ]; then
git clone https://github.com/github/hub.git ~/.hub
fi
echo "${spacer} ${bold}github hub${normal}"
| true |
2ee3e798807feddb66844b695c913b399d201e21 | Shell | rickmed/shell_playground | /stringsAndparamExpansion | UTF-8 | 215 | 2.984375 | 3 | [] | no_license | #!/bin/bash
random_str="This is my dog"
echo "String length: ${#random_str}"
echo "Tail from 2nd char: ${random_str:2}"
echo "Range characters: ${random_str:2:7}"
b=${random_str#*i}
echo "Everything after i: $b"
| true |
5c7179eba977a5810a14885cb1d54fe31d083118 | Shell | mendhak/pash | /evaluation/benchmarks/poets/8_1.sh | UTF-8 | 404 | 3.140625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# tag: sort_words_by_num_of_syllables
# set -e
IN=${IN:-$PASH_TOP/evaluation/benchmarks/poets/input/pg/}
OUT=${OUT:-$PASH_TOP/evaluation/benchmarks/poets/input/output/}
ls ${IN} | sed "s;^;$IN;"| xargs cat | tr -sc '[A-Z][a-z]' '[\012*]' | sort -u > ${OUT}.words
tr -sc '[AEIOUaeiou\012]' ' ' < ${OUT}.words | awk '{print NF}' > ${OUT}.syl
paste ${OUT}.syl ${OUT}.words | sort -nr | sed 5q
| true |
c4368a4aa5f3bf6b3dd825e0f275330f3f56fc4c | Shell | ginevracoal/high_performance_computing | /P1.2_seed/lectures/ParallelProgramming/exercises/allgather_multiplication/scalability_multiplication.sh | UTF-8 | 1,092 | 3 | 3 | [] | no_license | #!/bin/bash
module load openmpi/1.8.3/gnu/4.8.3
#MATRIXSIZE=1024
module load openmpi/1.8.3/gnu/4.8.3
rm -rf times/*.txt
gcc serial_multiplication.c -o serial_multiplication.x
mpicc parallel_multiplication.c -o parallel_multiplication.x
for MATRIXSIZE in 512 1024 2048; do
SER_OUT=times/serial_time.txt
CLEAN_OUT=times/times_$MATRIXSIZE.txt
#calculate serial execution time
/usr/bin/time ./serial_multiplication.x $MATRIXSIZE 2>&1 | grep "elap" | awk '{print($3)}' \
| awk -F ":" '{print $2}' | awk -F "elapsed" '{printf $1}' >$SER_OUT
echo "process / serial ex. time / parallel ex. time" >>$CLEAN_OUT
procs=1;
while [ $procs -le $MATRIXSIZE ]; do
# for procs in 1 2 4 8 16 20 ; do
#print the number of processes
echo -n $procs " " >>$CLEAN_OUT
cat $SER_OUT>>$CLEAN_OUT
echo -n " " >>$CLEAN_OUT
#calculate parallel execution time
/usr/bin/time mpirun -np $procs ./parallel_multiplication.x $MATRIXSIZE 2>&1 | grep "elap" \
| awk '{print($3)}' | awk -F ":" '{print $2}' | awk -F "elapsed" '{print $1}' >>$CLEAN_OUT
procs=$(($procs * 2))
done
done
| true |
95d698bc472c841911405371b0aa222cbf46e4e3 | Shell | tesch1/OpenVnmrJ | /src/scripts/htmltopdf.sh | UTF-8 | 793 | 3.171875 | 3 | [
"Apache-2.0",
"GPL-3.0-only"
] | permissive | #! /bin/sh
# '@(#)htmltopdf.sh 1991-2014 '
#
#
# Copyright (C) 2015 University of Oregon
#
# You may distribute under the terms of either the GNU General Public
# License or the Apache License, as specified in the LICENSE file.
#
# For more information, see the LICENSE file.
#
#
#-----------------------------------------------
if [ -r /etc/SuSE-release ]
then
distrover=5.0
elif [ -r /etc/debian_version ]
then
distrover=`lsb_release -rs` # 8.04, 9.04, etc.
else
distrover=`cat /etc/redhat-release | sed -r 's/[^0-9]+//' | sed -r 's/[^0-9.]+$//'` # yield 5.1, 5.3 , 6.1, etc..
fi
if [[ $distrover < 6.0 ]];
then
wkhtmltopdf --page-size Letter $1 $2
else
wkhtmltopdf --no-footer-line --footer-font-size 8 --footer-right "[page]/[topage]" --page-size Letter $1 $2
fi
| true |
a75c3733b57ae6794b6cb914ef8a107cbb37688c | Shell | himanshuyadav9/employeeWageComputation | /welcome_Employee.sh | UTF-8 | 988 | 3.75 | 4 | [] | no_license | #!/bin/bash -x
echo "Welcome to Employee Wage Computation Program"
PER_HOUR=20
FULL_DAY_HOUR=8
PART_TIME_HOUR=4
NUMBER_OF_HOUR=100
NUMBER_OF_DAYS=20
numberOfDays=20
totalWorkingDays=0
totalEmpHrs=0
day=1
function getWorkingHours()
{
random=$1
case $random in
1)
empHrs=8
;;
2)
empHrs=4
;;
3)
empHrs=0
;;
esac
echo "$empHrs"
}
function calcDailyWage()
{
local workingHrs=$1
wage=$(($workingHrs*$PER_HOUR))
echo "$wage"
}
while(( $NUMBER_OF_HOUR > $totalEmpHrs && $NUMBER_OF_DAYS > $totalWorkingDays ))
do
totalWorkingDays=$((totalWorkingDays+1))
empHrs="$( getWorkingHours $((RANDOM%3+1)) )"
totalEmpHrs=$(($totalEmpHrs+$empHrs))
empDailyWage[$totalWorkingDays]="$( calcDailyWage $empHrs )"
day=$((day+1))
done
totalSalary=$(($totalEmpHrs*$numberOfDays))
echo "Total Salary is : ${empDailyWage[@]}"
for day in ${!empDailyWage[@]}
do
printf "$day : ${empDailyWage[$day]} \n"
done
| true |
18e224d5c1deab6e723424622a741de29308b047 | Shell | andreacioni/opencv-build | /install-dependencies.sh | UTF-8 | 1,929 | 2.75 | 3 | [
"MIT"
] | permissive | #!/bin/bash
source ./config.sh
#Used for OpenCV
sudo apt-get install -y build-essential cmake pkg-config checkinstall
sudo apt-get install -y libjpeg-dev libtiff5-dev libjasper-dev libpng12-dev
sudo apt-get install -y libgtk2.0-dev libgstreamer0.10-0-dbg libgstreamer0.10-0 libgstreamer0.10-dev libv4l-0 libv4l-dev
sudo apt-get install -y libavcodec-dev libavformat-dev libswscale-dev libv4l-dev
sudo apt-get install -y libxvidcore-dev libx264-dev
sudo apt-get install -y libatlas-base-dev gfortran
sudo apt-get install -y python2.7 python3 python2.7-dev python3-dev
sudo apt-get install -y python-numpy python-scipy python-matplotlib
sudo apt-get install -y python3-numpy python3-scipy python3-matplotlib
sudo apt-get install -y default-jdk ant
sudo apt-get install -y libgtkglext1-dev
sudo apt-get install -y v4l-utils
#Setting up tools for cross compilation (armv6, RPi 1/Zero)
#cd ..
#git clone https://github.com/raspberrypi/tools.git --depth 1
#sudo ln -s ./tools/arm-bcm2708/arm-rpi-4.9.3-linux-gnueabihf/bin/arm-linux-gnueabihf-gcc /usr/bin/rpi-gcc
#sudo ln -s ./tools/arm-bcm2708/arm-rpi-4.9.3-linux-gnueabihf/bin/arm-linux-gnueabihf-g++ /usr/bin/rpi-g++
#sudo ln -s ./tools/arm-bcm2708/arm-rpi-4.9.3-linux-gnueabihf/bin/arm-linux-gnueabihf-ar /usr/bin/rpi-ar
#sudo ln -s ./tools/arm-bcm2708/arm-rpi-4.9.3-linux-gnueabihf/bin/arm-linux-gnueabihf-ranlib /usr/bin/rpi-ranlib
#Used for cross-compiling (armv7, RPi 2/3)
#sudo apt-get install gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf
#Intallink pip and numpy
mkdir pip/
cd pip
wget https://bootstrap.pypa.io/get-pip.py
sudo python get-pip.py
sudo python3 get-pip.py
sudo pip install numpy
sudo pip3 install numpy
#Downloading OpenCV and OpenCV contrib
cd ..
git clone https://github.com/opencv/opencv.git
cd opencv
git checkout $OPENCV_VERSION
cd ..
git clone https://github.com/opencv/opencv_contrib.git
cd opencv_contrib
git checkout $OPENCV_CONTRIB_VERSION
| true |
4e911261509fa9c4af4c74a0b7820fea1a68b657 | Shell | minghao2016/gromacs_solution_tutorial | /biphenyl_1_in_chloroform_500/solvate.sh | UTF-8 | 705 | 2.609375 | 3 | [] | no_license | #!/bin/bash
res_solute=BIP
model_solute_gro=biphenyl_GMX.gro
model_solvent_gro=chloroform_solv_500.gro
model_solvent_top=chloroform_solv_500.top
model_solution_gro=biphenyl_1_in_chloroform_solv_500.gro
model_solution_top=biphenyl_1_in_chloroform_solv_500.top
box=`tail -n 1 ${model_solvent_gro}`
echo $box
box_x=`echo ${box} | awk '{print $1}'`
box_y=`echo ${box} | awk '{print $2}'`
box_z=`echo ${box} | awk '{print $3}'`
echo ${box_x} ${box_y} ${box_z}
cp -p ${model_solvent_top} ${model_solution_top}
gmx editconf -f ${model_solute_gro} -o solute.gro -box ${box_x} ${box_y} ${box_z}
gmx solvate -cs ${model_solvent_gro} -cp solute.gro -p ${model_solution_top} -o ${model_solution_gro}
rm solute.gro
| true |
2bfd37234cc3f6b0c4868f3e083f3d9f20b79db1 | Shell | LilyHe789/llvm-zorg | /zorg/buildbot/builders/sanitizers/buildbot_bootstrap_msan.sh | UTF-8 | 1,079 | 3.109375 | 3 | [
"NCSA",
"Apache-2.0",
"LLVM-exception"
] | permissive | #!/usr/bin/env bash
set -x
set -e
set -u
HERE="$(cd $(dirname $0) && pwd)"
. ${HERE}/buildbot_functions.sh
ROOT=`pwd`
PLATFORM=`uname`
export PATH="/usr/local/bin:$PATH"
LLVM=$ROOT/llvm
CMAKE_COMMON_OPTIONS+=" -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=ON -DLLVM_ENABLE_PER_TARGET_RUNTIME_DIR=OFF"
clobber
buildbot_update
# Stage 1
build_stage1_clang
check_stage1_msan
# Stage 2 / Memory Sanitizer
build_stage2_msan
(
check_stage2_msan |& tee stage2_msan.log
exit ${PIPESTATUS[0]}
)
if grep "WARNING: MemorySanitizer" stage2_msan.log ; then
# Stage 2 / MemoryWithOriginsSanitizer
(
build_stage2_msan_track_origins
check_stage2_msan_track_origins
)
fi
# Stage 3 / MemorySanitizer
(
{
build_stage3_msan
check_stage3_msan
} |& tee stage3_msan.log
exit ${PIPESTATUS[0]}
)
if grep "WARNING: MemorySanitizer" stage3_msan.log ; then
# Stage 3 / MemoryWithOriginsSanitizer
(
build_stage2_msan_track_origins
build_stage3_msan_track_origins
check_stage3_msan_track_origins
)
fi
cleanup $STAGE1_DIR
| true |
1170df425f46e0adce4f9b748de0df5fc950225c | Shell | rmaruthiyodan/docker-hdp-lab | /generate_json.sh | UTF-8 | 4,489 | 3.578125 | 4 | [] | no_license | #!/bin/bash
########
# Author: Kuldeep Kulkarni
# Description: This script does the Magic of automating HDP install using Ambari Blueprints
#set -x
#Globals
LOC=`pwd`
PROPS=$1
#Source props
source $LOC/$PROPS 2>/dev/null
STACK_VERSION=`echo $CLUSTER_VERSION|cut -c1-3`
AMBARI_HOST=$2
NUMBER_OF_HOSTS=`grep HOST $LOC/$PROPS|grep -v SERVICES|wc -l`
LAST_HOST=`grep HOST $LOC/$PROPS|grep -v SERVICES|head -n $NUMBER_OF_HOSTS|tail -1|cut -d'=' -f2`
grep HOST $LOC/$PROPS|grep -v SERVICES|grep -v $LAST_HOST|cut -d'=' -f2 > $LOC/list
#Generate hostmap function#
hostmap()
{
#Start of function
echo "{
\"blueprint\" : \"$CLUSTERNAME\",
\"default_password\" : \"$DEFAULT_PASSWORD\",
\"host_groups\" :["
for HOST in `cat list`
do
echo "{
\"name\" : \"$HOST\",
\"hosts\" : [
{
\"fqdn\" : \"$HOST.$DOMAIN_NAME\"
}
]
},"
done
echo "{
\"name\" : \"$LAST_HOST\",
\"hosts\" : [
{
\"fqdn\" : \"$LAST_HOST.$DOMAIN_NAME\"
}
]
}
]
}"
#End of function
}
clustermap()
{
#Start of function
LAST_HST_NAME=`grep 'HOST[0-9]*' $LOC/$PROPS|grep -v SERVICES|tail -1|cut -d'=' -f1`
echo "{
\"configurations\" : [ ],
\"host_groups\" : ["
for HOST in `grep -w 'HOST[0-9]*' $LOC/$PROPS|tr '\n' ' '`
do
HST_NAME_VAR=`echo $HOST|cut -d'=' -f1`
echo "{
\"name\" : \"`grep $HST_NAME_VAR $PROPS |head -1|cut -d'=' -f2|cut -d'.' -f1`\",
\"components\" : ["
LAST_SVC=`grep $HST_NAME_VAR"_SERVICES" $LOC/$PROPS|cut -d'=' -f2|tr ',' ' '|rev|cut -d' ' -f1|rev|cut -d'"' -f1`
for SVC in `grep $HST_NAME_VAR"_SERVICES" $LOC/$PROPS|cut -d'=' -f2|tr ',' ' '|cut -d'"' -f2|cut -d'"' -f1`
do
echo "{
\"name\" : \"$SVC\""
if [ "$SVC" == "$LAST_SVC" ]
then
echo "}
],
\"cardinality\" : "1""
if [ "$HST_NAME_VAR" == "$LAST_HST_NAME" ]
then
echo "}"
else
echo "},"
fi
else
echo "},"
fi
done
done
echo " ],
\"Blueprints\" : {
\"blueprint_name\" : \"$CLUSTERNAME\",
\"stack_name\" : \"HDP\",
\"stack_version\" : \"$STACK_VERSION\"
}
}"
#End of function
}
repobuilder()
{
#Start of function
BASE_URL=`grep $CLUSTER_VERSION $LOC/$PROPS|grep BASE|cut -d'=' -f2|sed -e 's/^"//' -e 's/"$//'|sort -n|tail -1`
if [ -z "$BASE_URL" ]
then
BASE_URL=`grep $CLUSTER_VERSION /opt/docker_cluster/local_repo |grep BASE|cut -d'=' -f2|sed -e 's/^"//' -e 's/"$//'|sort -n|tail -1`
fi
echo "{
\"Repositories\" : {
\"base_url\" : \"$BASE_URL\",
\"verify_base_url\" : true
}
}" > $LOC/repo.json
BASE_URL_UTILS=`grep UTILS $LOC/$PROPS|cut -d'=' -f2|sed -e 's/^"//' -e 's/"$//'|sort -n|tail -1`
if [ -z "$BASE_URL_UTILS" ]
then
BASE_URL_URILS=`grep UTILS /opt/docker_cluster/local_repo |cut -d'=' -f2|sed -e 's/^"//' -e 's/"$//'|sort -n|tail -1`
fi
export BASE_URL_UTILS;
echo "{
\"Repositories\" : {
\"base_url\" : \"$BASE_URL_UTILS\",
\"verify_base_url\" : true
}
}" > $LOC/repo-utils.json
#End of function
}
timestamp()
{
#Function to print timestamp
echo "`date +%Y-%m-%d-%H:%M:%S`"
}
installhdp()
{
#Install hdp using Ambari Blueprints
HDP_UTILS_VERSION=`echo $BASE_URL_UTILS|cut -d'/' -f5`
curl -H "X-Requested-By: ambari" -X POST -u admin:admin http://$AMBARI_HOST:8080/api/v1/blueprints/$CLUSTERNAME -d @"$LOC"/cluster_config.json
sleep 1
curl -H "X-Requested-By: ambari" -X PUT -u admin:admin http://$AMBARI_HOST:8080/api/v1/stacks/HDP/versions/$STACK_VERSION/operating_systems/redhat6/repositories/HDP-$STACK_VERSION -d @$LOC/repo.json
sleep 1
curl -H "X-Requested-By: ambari" -X PUT -u admin:admin http://$AMBARI_HOST:8080/api/v1/stacks/HDP/versions/$STACK_VERSION/operating_systems/redhat6/repositories/$HDP_UTILS_VERSION -d @$LOC/repo-utils.json
sleep 1
curl -H "X-Requested-By: ambari" -X POST -u admin:admin http://$AMBARI_HOST:8080/api/v1/clusters/$CLUSTERNAME -d @$LOC/hostmap.json
}
#################
# Main function #
################
#Generate hostmap
echo -e "`timestamp` Generating hostmap json.."
hostmap > $LOC/hostmap.json
echo "`timestamp` Saved $LOC/hostmap.json"
#Generate cluster config json
echo -e "`timestamp` Generating cluster configuration json"
clustermap > $LOC/cluster_config.json
echo "`timestamp` Saved $LOC/cluster_config.json"
#Create internal repo json
repobuilder
echo -e "`timestamp` Generating internal repositories json..\n`timestamp` Saved $LOC/repo.json & $LOC/repo-utils.json"
#Start hdp installation
installhdp
| true |
b1f383d5014d035b873f5220322bcc72998cb50c | Shell | braineo/configs | /zim/.zshell/aliases.zsh | UTF-8 | 1,472 | 2.90625 | 3 | [] | no_license | if [[ "$OSTYPE" == linux* ]]; then
alias emacs="emacsclient -t -a ''"
alias emacsapp="LC_CTYPE=zh_CN.UTF-8 /usr/local/bin/emacs"
elif [[ "$OSTYPE" = darwin* ]]; then
alias emacsapp="open -a /Applications/Emacs.app"
alias emacsd="/usr/local/bin/emacs --daemon"
alias emacs="emacsclient -c -a emacs-cocoa"
fi
alias killemacs="emacsclient -e '(kill-emacs)'"
#
# git
#
alias ga='git add'
alias gaa='git add --all'
alias gss='git status -s'
# Local
alias gs='git stash'
alias gst='git reset'
alias gco='git checkout'
alias gcp='git cherry-pick'
# Data
alias gc='git commit'
alias gl='git pull'
alias gp='git push'
alias gpf='git push --force-with-lease'
alias gm='git merge'
# Log
alias glg='git log --stat'
alias glgp='git log --stat -p'
alias glgpd='BAT_PAGER="less -R" GIT_PAGER=delta glgp'
alias glgg='git log --graph'
# Upstream
alias gfa='git fetch --all --prune'
alias gcl='git clone --recursive'
# Diff
alias gd='git diff'
alias gdca='git diff --cached'
alias dgd='GIT_PAGER=delta gd --ignore-all-space'
# Submodule
alias gsu='git submodule update'
#
# navigation
#
alias -g ...='../..'
alias -g ....='../../..'
alias -g .....='../../../..'
alias -g ......='../../../../..'
alias -- -='cd -'
alias 1='cd -'
alias 2='cd -2'
alias 3='cd -3'
alias 4='cd -4'
alias 5='cd -5'
alias 6='cd -6'
alias 7='cd -7'
alias 8='cd -8'
alias 9='cd -9'
alias d='dirs -v | head -10'
alias ncdu='ncdu --color dark -rr -x --exclude .git --exclude node_modules'
| true |
a0e150c54ae4474997f452fdaff418d8d71de8a2 | Shell | garygriswold/SafeBible | /Validation/ProductionUpload.sh | UTF-8 | 447 | 3.328125 | 3 | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/sh -v
if [ -z "$1" ]; then
echo "Usage: Upload.sh VERSION";
exit 1;
fi
VERSION=$1;
SOURCE=../../DBL/4validated
TARGET=../../DBL/5ready
HOST=root@cloud.shortsands.com
DIRECTORY=/root/StaticRoot/book/
echo "Upload $VERSION";
cp ${SOURCE}/${VERSION}.db ${TARGET}/${VERSION}.db
cd ${TARGET}
rm ${VERSION}.db.zip
zip ${VERSION}.db.zip ${VERSION}.db
##scp -P7022 ${VERSION}.db.zip ${HOST}:${DIRECTORY}
##scp -P7022 ${VERSION}.db ${HOST}:${DIRECTORY}
| true |
09b12fd442d4ed852bed26728808e3d0d01d1312 | Shell | hydrateio/k8s-appliance | /scripts/centos7-hardened-install.sh | UTF-8 | 11,017 | 2.828125 | 3 | [
"Unlicense"
] | permissive | # Installs cloudinit, cloud-init, havaged - requires epel repo
yum -y install wget
version=7
mirror=http://mirror.bytemark.co.uk/centos/
# Detect primary root drive
if [ -e /dev/xvda ]; then
drive=xvda
elif [ -e /dev/vda ]; then
drive=vda
elif [ -e /dev/sda ]; then
drive=sda
fi
mkdir /boot/centos
cd /boot/centos
wget ${mirror}/${version}/os/x86_64/isolinux/vmlinuz
wget ${mirror}/${version}/os/x86_64/isolinux/initrd.img
# This kickstart file has been adapted from the scap-security-guide kickstart
# file in: https://github.com/OpenSCAP/scap-security-guide and the RedHatGov
# project: https://github.com/RedHatGov/ssg-el7-kickstart
cat > /boot/centos/kickstart.ks << EOKSCONFIG
# SCAP Security Guide OSPP/USGCB profile kickstart for Red Hat Enterprise Linux 7 Server
# Version: 0.0.2
# Date: 2015-11-19
#
# Based on:
# http://fedoraproject.org/wiki/Anaconda/Kickstart
# https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Installation_Guide/sect-kickstart-syntax.html
# http://usgcb.nist.gov/usgcb/content/configuration/workstation-ks.cfg
# Text or Graphical
text
# Install a fresh new system (optional)
install
# Don't run the Setup Agent on first boot
firstboot --disable
# Accept Eula
eula --agreed
# Suppress unsupported hardware warning
unsupported_hardware
# Don't configure X even if installed
skipx
# Specify installation method to use for installation
# To use a different one comment out the 'url' one below, update
# the selected choice with proper options & un-comment it
#
# Install from an installation tree on a remote server via FTP or HTTP:
# --url the URL to install from
#
# Example:
#
# url --url=http://192.168.122.1/image
#
# Modify concrete URL in the above example appropriately to reflect the actual
# environment machine is to be installed in
#
# Other possible / supported installation methods:
# * install from the first CD-ROM/DVD drive on the system:
#
# cdrom
#
# * install from a directory of ISO images on a local drive:
#
# harddrive --partition=hdb2 --dir=/tmp/install-tree
#
# * install from provided NFS server:
#
# nfs --server=<hostname> --dir=<directory> [--opts=<nfs options>]
#
# We'll be using a known good mirror of CentOS repos for the install
# Many thanks to ByteMark, a Manchester based ISP worth checking out
url --url="http://mirror.bytemark.co.uk/centos/7/os/x86_64/"
repo --name="base" --baseurl=http://mirror.bytemark.co.uk/centos/7/os/x86_64/
# Including the updates repo ensures we install the latest packages at install time
repo --name="updates" --baseurl=http://mirror.bytemark.co.uk/centos/7/updates/x86_64/
repo --name="extras" --baseurl=http://mirror.bytemark.co.uk/centos/7/extras/x86_64/
repo --name="epel" --baseurl=http://mirror.bytemark.co.uk/fedora/epel/7/x86_64/
repo --name="puppet" --baseurl=https://yum.puppetlabs.com/el/7/PC1/x86_64/
# OS Locale and time
lang en_GB.UTF-8
keyboard uk
timezone Europe/London --isUtc --ntpservers=0.centos.pool.ntp.org,1.centos.pool.ntp.org,2.centos.pool.ntp.org,3.centos.pool.ntp.org
# Configure network information for target system and activate network devices in the installer environment (optional)
# --onboot enable device at a boot time
# --device device to be activated and / or configured with the network command
# --bootproto method to obtain networking configuration for device (default dhcp)
# --noipv6 disable IPv6 on this device
#
# NOTE: Usage of DHCP will fail CCE-27021-5 (DISA FSO RHEL-06-000292). To use static IP configuration,
# "--bootproto=static" must be used. For example:
# network --bootproto=static --ip=10.0.2.15 --netmask=255.255.255.0 --gateway=10.0.2.254 --nameserver 192.168.2.1,192.168.3.1
#
network --onboot yes --device eth0 --bootproto dhcp --ipv6=auto --activate
# Set the system's root password (required)
rootpw --lock --iscrypted "*"
# Configure firewall settings for the system (optional)
# --enabled reject incoming connections that are not in response to outbound requests
# --ssh allow sshd service through the firewall
firewall --enabled --ssh
# Set up the authentication options for the system (required)
# --enableshadow enable shadowed passwords by default
# --passalgo hash / crypt algorithm for new passwords
# See the manual page for authconfig for a complete list of possible options.
authconfig --enableshadow --passalgo=sha512
# State of SELinux on the installed system (optional)
# Defaults to enforcing
selinux --enforcing
# Specify how the bootloader should be installed (required)
bootloader --location=mbr --append="crashkernel=auto rhgb quiet" --timeout=0
# Initialize (format) all disks (optional)
zerombr
# The following partition layout scheme assumes disk of size 20GB or larger
# Modify size of partitions appropriately to reflect actual machine's hardware
#
# Remove Linux partitions from the system prior to creating new ones (optional)
# --linux erase all Linux partitions
# --initlabel initialize the disk label to the default based on the underlying architecture
clearpart --linux --initlabel
# Create primary system partitions (required for installs)
part /boot --fstype=xfs --size=512
part pv.00 --grow --size=1
# Create a Logical Volume Management (LVM) group (optional)
volgroup VolGroup00 --pesize=4096 pv.00
# Create particular logical volumes (optional)
logvol / --fstype=xfs --name=00_root --vgname=VolGroup00 --size=256 --fsoptions="defaults,nobarrier,noatime,nodiratime"
# CCE-26557-9: Ensure /home Located On Separate Partition
logvol /home --fstype=xfs --name=01_home --vgname=VolGroup00 --size=1024 --fsoptions="defaults,nobarrier,noatime,nodiratime,nodev"
# CCE-26435-8: Ensure /tmp Located On Separate Partition
logvol /tmp --fstype=xfs --name=02_tmp --vgname=VolGroup00 --size=256 --fsoptions="defaults,nobarrier,noatime,nodiratime,nodev,noexec,nosuid"
# CCE-26639-5: Ensure /var Located On Separate Partition
logvol /var --fstype=xfs --name=03_var --vgname=VolGroup00 --size=512 --fsoptions="defaults,nobarrier,noatime,nodiratime,nodev"
logvol /var/tmp --fstype=xfs --name=04_var_tmp --vgname=VolGroup00 --size=256 --fsoptions="defaults,nobarrier,noatime,nodiratime,nodev,noexec,nosuid"
# CCE-26215-4: Ensure /var/log Located On Separate Partition
logvol /var/log --fstype=xfs --name=05_var_log --vgname=VolGroup00 --size=256 --fsoptions="defaults,nobarrier,noatime,nodiratime,nodev"
# CCE-26436-6: Ensure /var/log/audit Located On Separate Partition
logvol /var/log/audit --fstype=xfs --name=06_var_log_audit --vgname=VolGroup00 --size=256 --fsoptions="defaults,nobarrier,noatime,nodiratime,nodev"
# Usually where applications get put to run
logvol /opt --fstype=xfs --name=07_opt --vgname=VolGroup00 --size=512 --fsoptions="defaults,nobarrier,noatime,nodiratime,nodev"
logvol /usr --fstype=xfs --name=08_usr --vgname=VolGroup00 --size=1536 --fsoptions="defaults,nobarrier,noatime,nodiratime,nodev"
# Very small swap - we usually set swap to 1 for safety
logvol swap --name=lv_swap --vgname=VolGroup00 --size=128
# Service configuration
services --enabled=NetworkManager,sshd,chronyd,tuned,haveged
# Packages selection (%packages section is required)
%packages --excludedocs
# Install ther latest security guide packege
scap-security-guide
# CCE-27024-9: Install AIDE
aide
# Install libreswan package
libreswan
# A selection of basic system packages
@core
chrony
yum-utils
system-config-firewall-base
wget
# tuned is great for the cloud / virtual world
tuned
# Cloud init bootstraps instances based on this AMI
cloud-init
# havaged improves entropy in the virtual world
haveged
# unneeded firmware
-aic94xx-firmware
-atmel-firmware
-b43-openfwwf
-bfa-firmware
-ipw2100-firmware
-ipw2200-firmware
-ivtv-firmware
-iwl100-firmware
-iwl1000-firmware
-iwl3945-firmware
-iwl4965-firmware
-iwl5000-firmware
-iwl5150-firmware
-iwl6000-firmware
-iwl6000g2a-firmware
-iwl6050-firmware
-libertas-usb8388-firmware
-ql2100-firmware
-ql2200-firmware
-ql23xx-firmware
-ql2400-firmware
-ql2500-firmware
-rt61pci-firmware
-rt73usb-firmware
-xorg-x11-drv-ati-firmware
-zd1211-firmware
# Disable prelink by not installing it
-prelink
%end
# We can apply most security config at install time with the kickstart addon
%addon org_fedora_oscap
content-type = scap-security-guide
profile = pci-dss
%end
# A bit of cleanup post install
%post
# cloud-init config
mkdir -p /etc/cloud/
echo "---
users:
- default
preserve_hostname: false
# This is our pre-base image. Update packages.
package_update: true
package_reboot_if_required: true
# We're in the UK so let's accept it.
locale_configfile: /etc/sysconfig/i18n
locale: en_GB.UTF-8
timezone: Europe/London
# SSH Configuration
disable_root: true
ssh_pwauth: no
ssh_deletekeys: true
ssh_genkeytypes: ~
syslog_fix_perms: ~
system_info:
default_user:
name: centos
lock_passwd: false
# password: centos
passwd: $6$Uq.eaVbT3$mNtmpx.3bMPN/DuMs8BjRMCIrFzpglPPw2cf9TvjOU6mD4jav3NOGWQpHX8jF.IIiMhbTEve.zOsD7o6RXVB.1
gecos: Administrator
groups: [wheel, adm, systemd-journal]
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/bash
distro: rhel
paths:
cloud_dir: /var/lib/cloud
templates_dir: /etc/cloud/templates
ssh_svcname: sshd
# Edit these to our taste
cloud_init_modules:
- migrator
- bootcmd
- write-files
- growpart
- resizefs
- set_hostname
- update_hostname
- update_etc_hosts
- rsyslog
- users-groups
- ssh
cloud_config_modules:
- mounts
- locale
- set-passwords
- yum-add-repo
- package-update-upgrade-install
- timezone
- puppet
- chef
- salt-minion
- mcollective
- disable-ec2-metadata
- runcmd
cloud_final_modules:
- rightscale_userdata
- scripts-per-once
- scripts-per-boot
- scripts-per-instance
- scripts-user
- ssh-authkey-fingerprints
- keys-to-console
- phone-home
- final-message
" > /etc/cloud/cloud.cfg
# Cleanup SSH keys
rm -f /etc/ssh/*key*
rm -rf ~/.ssh/
# Don't require tty for ssh / sudo
sed -i "s/^.*requiretty/#Defaults requiretty/" /etc/sudoers
# Run the virtual-guest tuned profile
echo "virtual-guest" > /etc/tune-profiles/active-profile
# Let SELinux relabel FS on next boot
touch /.autorelabel
%end
reboot --eject
EOKSCONFIG
echo "menuentry 'centosinstall' {
set root='hd0,msdos1'
linux /boot/centos/vmlinuz ip=dhcp ksdevice=eth0 ks=hd:${drive}1:/boot/centos/kickstart.ks method=${mirror}/${version}/os/x86_64/ lang=en_US keymap=us
initrd /boot/centos/initrd.img
}" >> /etc/grub.d/40_custom
echo 'GRUB_DEFAULT=saved
GRUB_HIDDEN_TIMEOUT=
GRUB_TIMEOUT=2
GRUB_RECORDFAIL_TIMEOUT=5
GRUB_CMDLINE_LINUX_DEFAULT="quiet nosplash vga=771 nomodeset"
GRUB_DISABLE_LINUX_UUID=true' > /etc/default/grub
grub2-set-default 'centosinstall'
grub2-mkconfig -o /boot/grub2/grub.cfg
rm -rf ~/.ssh/*
rm -rf /root/*
reboot
| true |
d7a75aa95021c80012f715b3b81bfbf7840fddea | Shell | kagurazakayashi/CodeNotebook | /ShellScript_Linux_Command/chmod_更改文件权限.sh | UTF-8 | 1,027 | 3.609375 | 4 | [] | no_license | # 更改用户文件的权限 change mode
# 使用命令 chmod 更改文件的权限。
# chmod {a,u,g,o} {+,-} {r,w,x} <filename>
# {a,u,g,o}
# a 所有用户
# u 当前用户
# g 同组用户
# o 用户组以外的其他用户
# {+,-}
# + 添加访问权
# - 去掉访问权
# {r,w,x}
# r 可读
# w 可写
# x 可执行
# <filename>
# 表示指定的文件名
# 把 -rw-r--r-- 改成 -rw-rw-r-- (给同组用户开放可写权限)
chmod g+w hello.txt # group同组用户 + writable可写
# 把 -rw-rw-r-- 改成 -rwxrw-r-- (当前用户可以运行)
chmod u+x hello.txt
# 让其他用户也可以写入 -rwxrw-rw-
chmod o+x hello.txt
# 所有用户去掉写权限 -r-xr--r--
chmod a-w hello.txt
# 用数字表示权限:如果有权限就用 1 表示,没有权限用 0 表示
# 字母码 - r - x r - - r - -
# 二进制 1 0 1 1 0 0 1 0 0
# 十进制 5 4 4
chmod 544 hello.txt
# 字母码 - r w - r - - r - -
# 二进制 1 1 0 1 1 0 1 1 0
# 十进制 6 6 6
chmod 666 hello.txt | true |
b208f5ca25e0f1ee95c8685ce0962b22cfc05114 | Shell | ljfranklin/terraform-resource | /scripts/run-tests | UTF-8 | 465 | 2.796875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -eu -o pipefail
release_dir="$( cd "$( dirname "$0" )" && cd .. && pwd )"
src_dir="$( cd "${release_dir}/src/terraform-resource/" && pwd )"
pushd "${src_dir}" > /dev/null
set -x
# flakeAttempts to temporarily work around S3 eventual consistency issues
ginkgo \
-r \
-p \
-skipPackage vendor/ \
-randomizeAllSpecs \
-randomizeSuites \
-slowSpecThreshold 30 \
-flakeAttempts 2 \
"$@"
set +x
popd > /dev/null
| true |
d2c2cf7ea744fe7ad6947854b2a3fd1e16d065c1 | Shell | mk8310/hadoop-deploy | /kafka/start-kafka.sh | UTF-8 | 689 | 3.5 | 4 | [] | no_license | #!/usr/bin/env bash
KafkaNodes=`cat ../remote/servers/kafka-nodes.lst`
Port=22
RootName="root"
RootPassword="sz0ByxjoYeTh"
UserName="hduser"
GroupName="hdgroup"
UserPassword="sz0ByxjoYeTh"
KAFKA_HOME=/opt/cluster/kafka
KAFKA_BIN=${KAFKA_HOME}/bin
KAFKA_CONF=${KAFKA_HOME}/config/server.properties
for Host in ${KafkaNodes}
do
ssh -p ${Port} ${UserName}@${Host} "source /etc/profile ;${KAFKA_BIN}/kafka-server-stop.sh ; ${KAFKA_BIN}/kafka-server-start.sh -daemon ${KAFKA_CONF} &"
if [[ $? -eq 0 ]];then
echo -e "\033[32m Start kafka service on $Host success. \033[0m"
else
echo -e "\033[31m Start kafka service on $Host failure. \033[0m"
exit
fi
done
| true |
437e3ad04d1a22bfef1619824e1eb9d4abc0a95a | Shell | warrickmoran/BMH | /rpms-BMH/Installer.neospeech/scripts/init.d/neospeech_tts | UTF-8 | 5,269 | 3.953125 | 4 | [] | no_license | #!/bin/bash
#
# neospeech_tts This shell script takes care of starting and stopping
# the NeoSpeech TTS Server.
#
# chkconfig: 235 99 10
# description: BMH NeoSpeech TTS
# processname: ttssrv
##############################################################################
# BMH NeoSpeech TTS Service Script.
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 10/15/15 4976 bkowal Initial Creation.
# 01/06/16 5210 bkowal Verify that the TTS-provided
# stop script finishes.
##############################################################################
# Source function library.
. /etc/rc.d/init.d/functions
# Source networking configuration.
. /etc/sysconfig/network
# Check that networking is up.
[ ${NETWORKING} = "no" ] && exit 0
RETVAL=0
START_SCRIPT="ttssrv_start"
STOP_SCRIPT="ttssrv_stop"
PROG="NeoSpeech TTS"
NEOSPEECH_HOME=/awips2/bmh/neospeech
NEOSPEECH_BIN=${NEOSPEECH_HOME}/bin
NEOSPEECH_LOGS=${NEOSPEECH_HOME}/log
TODAY=`/bin/date +%Y%m%d`
NEOSPEECH_LOG=${NEOSPEECH_LOGS}/tts-start-${TODAY}.log
getTtsPid() {
neospeech_pid=`pgrep -f "ttssrv"`
neospeech_pid_found=$?
}
getTtsStopPid() {
neospeech_stop_pid=`pgrep -f "${STOP_SCRIPT}"`
neospeech_stop_pid_found=$?
}
handleStatus() {
local status_target=$1
local status_code=$2
local status_pid=$3
case ${status_code} in
0)
echo "${status_target} (pid ${status_pid}) is running ..."
;;
1)
echo "${status_target} is not running."
;;
4)
echo "${status_target} status unknown due to insufficient privileges."
;;
*)
echo "${status_target} status unknown!"
;;
esac
}
start() {
getTtsPid
if [ ${neospeech_pid_found} -eq 0 ]; then
echo -ne "${PROG} (pid ${neospeech_pid}) is already running."
failure
echo
return
fi
# Start NeoSpeech TTS
local now=`date`
echo "Starting ${PROG} @ ${now} ..." >> ${NEOSPEECH_LOG}
echo -ne "Starting ${PROG} ..."
/bin/bash ${NEOSPEECH_BIN}/${START_SCRIPT} >> ${NEOSPEECH_LOG} 2>&1
if [ $? -ne 0 ]; then
failure
echo
echo "Failed to start ${PROG}!" >> ${NEOSPEECH_LOG}
return
fi
local attempt_max=10
local attempt_count=0
# Verify that NeoSpeech TTS has started
while [ ${neospeech_pid_found} -ne 0 ]; do
let attempt_count+=1
if [ ${attempt_count} -eq ${attempt_max} ]; then
failure
echo
# the assumption is that there will be stacktraces in the log
# at this point to differentiate between this failure and
# the previous potential failure.
echo "Failed to start ${PROG}!" >> ${NEOSPEECH_LOG}
return
fi
sleep 2
getTtsPid
done
success
echo
}
stop() {
# get the pids
getTtsPid
if [ ${neospeech_pid_found} -ne 0 ]; then
echo -ne "${PROG} is not running."
failure
echo
return
fi
local attempt_max=10
local force_kill_neospeech=0
local force_kill_neospeech_stop=0
local now=`date`
echo "Stopping ${PROG} @ ${now} ..." >> ${NEOSPEECH_LOG}
echo -ne "Stopping ${PROG} ..."
# Run the tts stop script in the background so that we will be able to verify
# its successful execution.
/bin/bash ${NEOSPEECH_BIN}/${STOP_SCRIPT} >> ${NEOSPEECH_LOG} 2>&1 &
# verify that the NeoSpeech stop script has successfully finished
getTtsStopPid
local attempt_count=0
while [ ${neospeech_stop_pid_found} -eq 0 ]; do
let attempt_count+=1
if [ ${attempt_count} -eq ${attempt_max} -a ${force_kill_neospeech_stop} -eq 0 ]; then
force_kill_neospeech_stop=1
attempt_count=0
echo "Killing ${STOP_SCRIPT} (pid ${neospeech_stop_pid})" >> ${NEOSPEECH_LOG}
kill -9 ${neospeech_stop_pid} > /dev/null 2>&1
elif [ ${attempt_count} -eq ${attempt_max} -a ${force_kill_neospeech_stop} -eq 1 ]; then
failure
echo
echo "Failed to stop ${STOP_SCRIPT} (pid ${neospeech_stop_pid})" | tee -a ${NEOSPEECH_LOG}
echo "Failed to stop ${PROG} (pid ${neospeech_pid})" | tee -a ${NEOSPEECH_LOG}
return
fi
sleep 1
getTtsStopPid
done
# wait for neospeech to stop
attempt_count=0
if [ ${force_kill_neospeech_stop} -eq 1 ]; then
# just skip to the kill NeoSpeech phase because the NeoSpeech stop script
# failed to finish.
attempt_count=9
# refresh information about the running NeoSpeech process.
getTtsPid
fi
while [ ${neospeech_pid_found} -eq 0 ]; do
let attempt_count+=1
if [ ${attempt_count} -eq ${attempt_max} -a ${force_kill_neospeech} -eq 0 ]; then
force_kill_neospeech=1
attempt_count=0
echo "Killing ${PROG} (pid ${neospeech_pid})" >> ${NEOSPEECH_LOG}
kill -9 ${neospeech_pid} > /dev/null 2>&1
elif [ ${attempt_count} -eq ${attempt_max} -a ${force_kill_neospeech} -eq 1 ]; then
failure
echo
echo "Failed to stop ${PROG} (pid ${neospeech_pid})" | tee -a ${NEOSPEECH_LOG}
return
fi
sleep 2
getTtsPid
done
success
echo
return
}
status() {
getTtsPid
handleStatus "${PROG}" ${neospeech_pid_found} ${neospeech_pid}
}
usage() {
echo "Usage: $0 {start|stop|status|restart}"
}
func=$1
case $func in
start)
${func}
;;
stop)
${func}
;;
status)
${func}
;;
restart)
stop
sleep 1
start
;;
*)
usage
;;
esac
| true |
4ce1ae1b59b1266749069ee9cee6a0cca9814d8a | Shell | renukachandra/offline_jenkins_robot | /setup.sh | UTF-8 | 2,355 | 4.09375 | 4 | [] | no_license | #!/bin/bash -
#title :setup.sh
#description :this script will install everything needed, setup jenkins, create a test job, create a virtual environment venv and install robot framework with appium library in venv
#author :Renuka chandra
#date :20092017
#version :2.0
#usage :bash setup.sh
#==============================================================================
echo "#================precheck=============================#"
echo "Running a precheck...."
#checking the java installation status
if type -p java; then
echo found java executable in PATH
_java=java
elif [[ -n "$JAVA_HOME" ]] && [[ -x "$JAVA_HOME/bin/java" ]]; then
echo found java executable in JAVA_HOME
_java="$JAVA_HOME/bin/java"
else
echo "please install java using command --/ sudo apt-get install openjdk-8-jre -y /-- before running this script"
exit 1
fi
if [[ "$_java" ]]; then
version=$("$_java" -version 2>&1 | awk -F '"' '/version/ {print $2}')
echo version "$version"
if [[ "$version" > "1.7" ]]; then
echo java requirements satisfied
else
echo please install java version later than 1.7
exit 1
fi
fi
#checking the python installation status
if command -v python2 > /dev/null 2>&1; then
echo python requirements satisfied
else
echo please install python before running this script!
fi
echo "starting the setup...."
sudo apt-get clean #cleaning the apt-getr archives
sudo cp -a setup/files/archive_setup/. /var/cache/apt/archives/ #copying the required files
bash setup/jenkins_setup.sh #running jenkins setup
bash setup/createjob.sh #running create job script
bash setup/virtualenvsetup.sh #running virtual environment setup script
echo "#================postcheck=============================#"
echo "running a postcheck....."
jen_output=$(sudo service jenkins status)
if [[ $jen_output == *"Active: active (exited)"* ]]; then
echo "Jenkins is installed successfully and running"
else
echo "Jenkins is either not installed properly or not up, please check and re-run the script if needed"
fi
source venv/bin/activate
venv_output=$(pip list)
if [[ $venv_output == *"robotframework"* ]]; then
echo "robotframework is installed inside venv (virtual environment)"
else
echo "robotframework is not installed"
fi
| true |
5acac12e9e6c4f087a5f3a2427872d5f7f2411dd | Shell | typoworx-de/linux-snipplets | /bin/pecl-wrapper | UTF-8 | 2,599 | 3.609375 | 4 | [] | no_license | #!/bin/bash
#if [[ $UID -ne 0 ]];
#then
# echo "Root permissions required!";
# exit 1;
#fi
base=$(basename $0);
phpVersion=${base//[!0-9\.]/};
phpLibPath=$(php${phpVersion} -i | grep -E '(^|\n)extension_dir' | cut -d '>' -f 2 | cut -d '=' -f 1 | tr -d '[:space:]');
if [[ -z "${phpVersion}" ]];
then
echo "Error unable to determine PHP-Version for PECL";
exit 1;
fi
if [[ -z "${phpLibPath}" ]];
then
echo "Error unable to determine php extension path /usr/lib/php/????";
exit 1;
fi
function _pecl()
{
# [ -d /usr/share/php${phpVersion} ] || {
# cp -R /usr/share/php /usr/share/php${phpVersion}
# }
$(which php${phpVersion}) \
-c ~/.pearrc-php${phpVersion} \
-C -q \
-d include_path=/usr/share/php \
-d date.timezone=UTC -d output_buffering=1 -d variables_order=EGPCS -d safe_mode=0 -d register_argc_argv="On" \
-d display_startup_errors="Off" \
/usr/share/php/peclcmd.php "$@"
return $?
}
echo "Using php-${phpVersion}";
echo "PHP Extension-Path: ${phpLibPath}";
#_pecl config-set temp_dir "/tmp/pear/php${phpVersion}/temp" > /dev/null;
#_pecl config-set data_dir "${phpShared}/data" > /dev/null;
#_pecl config-set cfg_dir "${phpShared}/cfg" > /dev/null;
#_pecl config-set test_dir "${phpShared}/tests" > /dev/null;
#_pecl config-set www_dir "${phpShared}/www" > /dev/null;
_pecl config-set php_bin "/usr/bin/php${phpVersion}" > /dev/null;
_pecl config-set php_dir "/usr/share/php${phpVersion}-pear" > /dev/null;
_pecl config-set php_ini "/etc/php/${phpVersion}/cli/php.ini" > /dev/null;
_pecl config-set ext_dir "${phpLibPath}" > /dev/null;
_pecl config-set bin_dir "/usr/bin/" > /dev/null;
_pecl config-set php_suffix "${phpVersion}" > /dev/null;
declare -a peclArguments;
#peclArguments+=(-d temp_dir=/tmp/pear/php${phpVersion}/temp);
#peclArguments+=(-d cache_dir=/tmp/pear/php${phpVersion}/cache);
#peclArguments+=(-d data_dir=${phpShared}/data);
#peclArguments+=(-d cfg_dir=${phpShared}/config);
#peclArguments+=(-d test_dir=${phpShared}/tests);
#peclArguments+=(-d www_dir=${phpShared}/www);
peclArguments+=(-d php_bin=/usr/bin/php${phpVersion});
peclArguments+=(-d php_dir=/usr/share/php${phpVersion}-pear);
peclArguments+=(-d php_ini=/etc/php/${phpVersion}/cli/php.ini);
peclArguments+=(-d ext_dir=${phpLibPath});
peclArguments+=(-d bin_dir=/usr/bin/);
peclArguments+=(-d php_suffix=${phpVersion});
#_pecl update-channels;
#_pecl channel-update pecl.php.net;
_pecl ${peclArguments[@]} $@
#_pecl $@
cacheDir=$(pecl config-get cache_dir);
rm -rf "${cacheDir}/*";
tmpDir=$(pecl config-get temp_dir);
rm -rf "${cacheDir}/*";
exit $?
| true |
3590a798572b2adb3969b50526f29fc93bcf8f29 | Shell | nikofil/dotfiles | /.zshrc | UTF-8 | 12,311 | 3.34375 | 3 | [] | no_license | # Source Prezto
if [[ -s "${ZDOTDIR:-$HOME}/.zprezto/init.zsh" ]]; then
source "${ZDOTDIR:-$HOME}/.zprezto/init.zsh"
fi
# Run Fasd
unalias d
alias d="dirs -v"
alias z='fasd_cd -d'
alias zz='fasd_cd -d -i'
# a and f
alias ag="rg --no-line-number"
alias f="fd -p -H"
alias c="bat"
alias q="br" # broot
alias xc="noglob xc"
alias p="pyp"
function dush() {
args=("${@[@]}")
if [[ $# == 0 ]]; then
args=(".")
fi
for d in $args; do
du -h -d 1 --all "$d" 2>/dev/null | sort -h -r | grep -v '^0\s'
done
}
function _cdf() {
found=$(f $@)
if [[ $? -eq 0 ]]; then
echo $found | while read res; do
if [[ -d "$res" ]]; then
cd "$res"
return 0
fi
done
return 1
else
return $?
fi
}
alias cdf='noglob _cdf'
function cdb() {
cd "$(dirname $1)"
}
alias ff='fasd -f'
alias fv='fasd -f -t -e ${EDITOR:-vim} -b viminfo'
alias vt='v $(fzf --multi --reverse)'
function _vf() {
found=$(f $@)
if [[ $? -eq 0 ]]; then
onlyfiles=()
echo $found | while read i; do
ftype=$(file -bL --mime "$i")
if [[ -f "$i" && ((! "$ftype" =~ "binary" ) || "$ftype" =~ "x-empty") ]]; then
onlyfiles+=($i)
fi
done
if [[ -z "$onlyfiles" ]]; then
return 1
fi
[[ -n "$onlyfiles" ]] && ${EDITOR:-vim} ${onlyfiles}
else
return $?
fi
}
alias vf='noglob _vf'
alias a='RIPGREP_CONFIG_PATH=$HOME/.ripgreprc rg --ignore-file=$HOME/.agignore'
function va() {
lastparam=""
for param in $@; do
if [[ $param[1] != "-" || lastparam == "--" ]]; then break; fi
lastparam=$param
done
found=$(a -l $@)
if [[ $? -eq 0 ]]; then
[[ -n "$found" ]] && ${EDITOR:-vim} "+silent!/$param" ${(f)found}
return 0
else
return $?
fi
}
function fa() {
pat="$@"
if [ "$#" -lt 1 ]; then pat=''; fi
files=$(rg --column --line-number --no-heading --fixed-strings --ignore-case --hidden --follow --color always "$pat" 2>/dev/null | fzf --ansi --multi --reverse | awk -F ':' '{print $1":"$2":"$3}')
[[ -n "$files" ]] && ${EDITOR:-vim} ${(f)files}
}
function psa() {
psres=$(ps axk -%cpu o user,pid,pgid,%cpu,%mem,rss,stat,start,time,command)
for i in $@; do
psres=$(echo "$psres" | grep -a "$i")
done
echo $psres
}
function kpsa() {
psa $@ | awk '{print $2}' | xargs kill
}
function kp() {
pid=$(ps -ef | sed 1d | eval "fzf ${FZF_DEFAULT_OPTS} -m --header='[kill:process]'" | awk '{print $2}')
if [ "x$pid" != "x" ]; then
echo $pid | xargs kill -${1:-9}
fi
}
function x() {
for i in $@; do
fout=$(echo "$i" | rev | cut -f 2- -d '.' | rev)
if [[ -f "$fout" ]]; then
fout="$i.out"
fi
7z x "-o$fout" "$i"
done
}
function dush() {
if [[ $# -eq 0 ]]; then
args=$(/bin/ls -A -1)
else
args=$(printf '%s\n' "${@[@]}")
fi
echo "$args" | xargs -d '\n' du -s --block-size=M | sort -n -r
}
# rcd
function rcd {
tempfile='/tmp/ranger-cd'
ranger --choosedir="$tempfile" "${@:-$(pwd)}"
test -f "$tempfile" &&
if [ "$(cat -- "$tempfile")" != "$(echo -n `pwd`)" ]; then
cd -- "$(cat "$tempfile")"
fi
rm -f -- "$tempfile"
}
function _rcdw {
BUFFER="rcd"
zle accept-line
}
zle -N _rcdw
export GOPATH=$HOME/workspace/go
export GOBIN=$HOME/workspace/go/bin
export WORKON_HOME=$HOME/virtenvs
export PROJECT_HOME=$HOME/workspace
export PATH=$PATH:$HOME/.rvm/bin:$HOME/.pyenv/bin:$HOME/.yarn/bin:$HOME/bin:$HOME/.local/bin:$GOBIN:$HOME/bin/fzf/bin
if [[ -e $HOME/lib ]]; then
export LD_LIBRARY_PATH=$HOME/lib
fi
export LESS="-Ri"
# fzf
[[ $- == *i* ]] && source "$HOME/bin/fzf/shell/completion.zsh" 2> /dev/null
source "$HOME/.fzf/shell/key-bindings.zsh"
export EDITOR="nvim"
bindkey -v
export KEYTIMEOUT=1
bindkey '^[[1;2C' forward-word
bindkey '^[[1;2D' backward-word
# key bindings
bindkey "\e[1~" beginning-of-line
bindkey "\e[4~" end-of-line
bindkey "\e[5~" beginning-of-history
bindkey "\e[6~" end-of-history
bindkey "\e[3~" delete-char
bindkey "\e[2~" quoted-insert
bindkey "\e[5C" forward-word
bindkey "\eOc" emacs-forward-word
bindkey "\e[5D" backward-word
bindkey "\eOd" emacs-backward-word
bindkey "\e[1;5D" backward-word
bindkey "\e[1;5C" forward-word
bindkey "\e\e[C" forward-word
bindkey "\e\e[D" backward-word
bindkey "^H" backward-delete-word
bindkey '^[[3;5~' kill-word
# for rxvt
bindkey "\e[8~" end-of-line
bindkey "\e[7~" beginning-of-line
# for non RH/Debian xterm, can't hurt for RH/DEbian xterm
bindkey "\eOH" beginning-of-line
bindkey "\eOF" end-of-line
# for freebsd console
bindkey "\e[H" beginning-of-line
bindkey "\e[F" end-of-line
# completion in the middle of a line
bindkey '^i' expand-or-complete-prefix
# ranger-cd
bindkey "^O" _rcdw
# edit command line
bindkey -M vicmd v edit-command-line
# insert last word
bindkey '^k' insert-last-word
# ignore double esc
noop () { }
zle -N noop
bindkey -M vicmd '\e' noop
setopt AUTO_CD
setopt AUTO_PUSHD
setopt GLOB_COMPLETE
setopt PUSHD_MINUS
setopt PUSHD_TO_HOME
setopt PUSHD_IGNORE_DUPS
setopt NO_HUP
setopt NO_CLOBBER
setopt NO_CASE_GLOB
setopt NUMERIC_GLOB_SORT
setopt EXTENDED_GLOB
setopt AUTO_LIST
ZSH_HIGHLIGHT_STYLES[globbing]='fg=cyan'
# source ~/.tmuxinator/tmuxinator.zsh
bindkey '^B' push-line
# vi style incremental search
# bindkey '^R' history-incremental-search-backward
# bindkey '^S' history-incremental-search-forward
# bindkey '^P' history-search-backward
# bindkey '^N' history-search-forward
unalias rm
alias rm="nocorrect rm -I"
setopt rm_star_silent
alias em="emacs -nw"
alias v="${EDITOR:-vim}"
alias vl="${EDITOR:-vim} -u ~/.vimlessrc -"
alias gv="gvim"
alias sv="sudo ${EDITOR:-vim}"
function vout() {
tmpfile=$(mktemp)
if [[ $# -eq 1 ]]; then
if [[ "$1" == "-" ]]; then
cat >! "$tmpfile"
else
/bin/cp "$1" "$tmpfile"
fi
fi
v "$tmpfile" > /dev/tty
stty -F /dev/tty sane
cat "$tmpfile"
rm "$tmpfile"
}
alias new="i3-sensible-terminal ."
alias extract="aunpack"
alias xcopy="xclip -selection clipboard"
alias xpaste="xclip -selection clipboard -o"
alias tmux="tmux -2"
alias tm="tmux"
alias tml="tmux list-sessions"
alias tma="tmux attach-session"
alias tmd="tmux detach-client"
alias tmkw="tmux kill-window"
alias tmkp="tmux kill-pane"
alias tmks="tmux kill-session"
alias tmr="tmux resize-pane"
alias tmrl="tmux resize-pane -L"
alias tmrr="tmux resize-pane -R"
alias dke='dk exec -it $(dk ps -ql -f status=running)'
alias dkr='dk run --rm -it'
alias ga="git add"
alias gaa="git add -u"
alias gb="git branch"
alias gbi="git bisect"
alias gbin="git bisect bad"
alias gbiy="git bisect good"
alias gbl="git blame -s"
alias gc="git commit --signoff -S"
alias gca="git commit --amend --signoff -S"
alias gcaa="git commit --amend --no-edit --signoff -S"
alias gch="git checkout"
alias gcherry="git cherry-pick"
function gchpr(){git fetch $1 refs/pull/$2/head:pr/$2 && git checkout pr/$2;}
function gchmr(){git fetch $1 merge-requests/$2/head:mr-$1-$2 && git checkout mr-$1-$2}
alias gclean="git clean -f"
alias gclon="git clone"
alias gd="git diff"
alias gdrop="git stash --patch && git stash drop"
alias gds="git diff --staged"
alias gdt="git difftool"
alias gf="git fetch"
function gfpr(){git fetch $1 refs/pull/$2/head:pr/$2;}
alias ggrep="git grep"
alias ginit="git init"
alias gkeep="git add -i . && git stash -k && git stash drop && git reset"
alias gl="git log --topo-order --stat --pretty=format:\"${_git_log_medium_format}\""
alias glast="git log --color --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit --stat -1 | cat"
alias glo="git log --color --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit"
alias gloo="git log --color --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit --branches HEAD"
alias gls="git log --color --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit --branches --stat"
alias gm="git merge"
alias gmt="git mergetool"
alias gmv="git mv"
alias gp="git push"
alias gpatch="git format-patch --signoff"
alias gpf="git push -f"
alias gpull="git pull"
alias greb="git rebase"
alias greba="git rebase --abort"
alias grebc="git rebase --continue"
alias grebi="git rebase -i"
alias greflog="git reflog --color --walk-reflogs --pretty=format:'%Cred%h%Creset %C(magenta)%gD%Creset -%C(yellow)%d%Creset %gs %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit"
alias grem="git remote -v"
alias gres="git reset"
alias gresh="git reset --hard"
alias gress="git reset --soft"
alias grev="git revert"
alias grm="git rm"
alias grmc="git rm --cached"
alias gs="git status -sb"
alias gsend="git send-email"
alias gsh="git show"
alias gst="git stash"
alias gsti="git stash --patch"
function gundo() {git reset --hard $(git rev-parse --abbrev-ref HEAD)@\{${1-1}\};}
unalias gcl
unalias gcm
function gcm() {
git commit --signoff -S -m "$*"
}
alias ..='cd ..'
alias ...='cd ../..'
alias ....='cd ../../..'
alias .....='cd ../../../..'
alias ......='cd ../../../../..'
alias diff="colordiff"
alias inst="sudo apt-get install"
alias rmf="rm -rf"
alias r="ranger"
alias wl="wunderline"
alias path="readlink -f"
function mkcd() {
command mkdir -p $1 && cd $1
}
# Wunderline remind in hours / days
function wldh() {
wldate=$(python -c "import sys; from datetime import datetime, timedelta;
d = datetime.now() + timedelta(days=$1, hours=$2);
sys.stderr.write('Reminder on: {}\n'.format(d.strftime('%A %Y-%m-%d %H:%M')));
print d.strftime('%Y-%m-%d %H:%M');")
wl add --reminder "$wldate" --due "$(echo $wldate | cut -d' ' -f1)" "${@[@]:3}"
}
function wlh() {
wldh 0 $1 "${@[@]:2}"
}
function wld() {
wldh $1 0 "${@[@]:2}"
}
# TheFuck
alias fuck='TF_CMD=$(TF_ALIAS=fuck PYTHONIOENCODING=utf-8 TF_SHELL_ALIASES=$(alias) thefuck $(fc -ln -1 | tail -n 1)) && eval $TF_CMD && print -s $TF_CMD'
# VirtualEnv
if type virtualenvwrapper.sh &> /dev/null; then
export VIRTUAL_ENV_DISABLE_PROMPT=1
source "$(which virtualenvwrapper.sh)"
fi
# AWS aliases
alias aws-get-p2='export instanceId=`aws ec2 describe-instances --filters "Name=instance-state-name,Values=stopped,Name=instance-type,Values=p2.xlarge" --query "Reservations[0].Instances[0].InstanceId" | tr -d \"` && echo $instanceId'
alias aws-get-t2='export instanceId=`aws ec2 describe-instances --filters "Name=instance-state-name,Values=stopped,Name=instance-type,Values=t2.micro" --query "Reservations[0].Instances[0].InstanceId" | tr -d \"` && echo $instanceId'
alias aws-start='aws ec2 start-instances --instance-ids $instanceId && aws ec2 wait instance-running --instance-ids $instanceId && export instanceIp=`aws ec2 describe-instances --filters "Name=instance-id,Values=$instanceId" --query "Reservations[0].Instances[0].PublicIpAddress" | tr -d \"` && echo $instanceIp'
alias aws-ip='export instanceIp=`aws ec2 describe-instances --filters "Name=instance-id,Values=$instanceId" --query "Reservations[0].Instances[0].PublicIpAddress" | tr -d \"` && echo $instanceIp'
alias aws-ssh='ssh -i ~/.ssh/aws-key.pem ubuntu@$instanceIp'
alias aws-stop='aws ec2 stop-instances --instance-ids $instanceId'
alias aws-status='aws ec2 describe-instance-status --include-all-instances'
# moo
if type fortune &> /dev/null && type cowsay &> /dev/null; then
curdate=$(date +%m/%d/%y)
if [[ ! -e ~/.last_fortune || $(cat ~/.last_fortune) != $curdate ]]; then
echo $curdate >! ~/.last_fortune
COWSTYLES="bdgpstwy"
RANDCOW=$[ ( $RANDOM % 9 ) ]
if [[ $RANDCOW > 0 ]]; then
COWSTYLE="-${COWSTYLES[$RANDCOW]}"
else
COWSTYLE=""
fi
fortune fortunes | cowsay $COWSTYLE
fi
fi
# NVM
# export NVM_DIR="$HOME/.nvm"
# [ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
# kubectl autocompletion
alias kb=kubectl
source <(kubectl completion zsh)
export GPG_TTY=$(tty)
source $HOME/.config/broot/launcher/bash/br
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
true
| true |
171aa5b5ea71ab9e4e15c29960fe8a9aa48e28aa | Shell | kvendrel/Intro_Biocomp_ND_317_Tutorial4 | /wages3.sh | UTF-8 | 1,287 | 3.53125 | 4 | [] | no_license | #prints the difference of graduating college on average minimum wage for earners in the wages.csv dataset.
#Final printout is the average minimum wage dollar difference between workers who graduated from college
#minus the average minimum wage of workers who graduated from highschool
##usage: bash wages3.sh
#creates sorted list of minumum wages of college graduates from lowest minimum wage to highest
cat wages.csv | tr "," " " | awk '{print $3 " " $4}' | awk '$1~/16/{print}' | sort -n > collegeGrads.txt
#creates sorted list of minimum wages of high school graduates from lowest minimum wage to highest
cat wages.csv | tr "," " " | awk '{print $3 " " $4}' | awk '$1~/12/{print}' | sort -n >> highschoolGrads.txt
#to find average wage of a college grad
cat collegeGrads.txt | awk '{ sum += $2; n++ } END { if (n > 0) print sum / n; }' > "val1"
#to find average wage of a high school grad
cat highschoolGrads.txt | awk '{ sum += $2; n++ } END { if (n > 0) print sum / n; }' > "val2"
#to find difference in average minimum wage between a high school and college graduate
val1=$(cat collegeGrads.txt | awk '{sum += $2; n++ } END { if (n > 0) print sum / n; }')
val2=$(cat highschoolGrads.txt | awk '{ sum += $2; n++ } END { if (n > 0) print sum / n; }')
echo "$val1 - $val2" | bc
| true |
92f6d39168fc9c15d68a146d5df7c53c9f4dc5e2 | Shell | Roja-B/EvolvingComs | /code_v2_recovered_myself/getRelevant_h1.sh | UTF-8 | 1,722 | 2.578125 | 3 | [] | no_license | #!/bin/bash
dirnames=$(ls Results/)
#dirnames=$(ls /media/data3/roja/Balatarin/CompleteRun/Results/)
#dirnames=$(ls ./test)
for dirname in $dirnames; do
numcoms=$(ls Results/$dirname/community* |wc -l)
echo $((numcoms-1))
PathName=Results
# PatheName=/media/data3/roja/Balatarin/CompleteRun/Results/$dirname/
echo $dirname
python links_group_hash.py $PathName $dirname
python contingencytable.py $PathName $dirname $((numcoms-1))
python communityvotecounts.py $PathName $dirname $((numcoms-1))
# python expected.py $PathName $dirname $((numcoms-1))
python binomial.py $PathName $dirname $((numcoms-1))
python representativeLink_HValue.py $PathName $dirname $((numcoms-1))
# python makeLinkURL.py $PathName $dirname $((numcoms-1))
python findLinkdomain2.py $PathName $dirname $((numcoms-1))
mkdir $PathName/$dirname/RelevantLinks_h
mv $PathName/$dirname/repLinks.txt $PathName/$dirname/RelevantLinks_h/
# mv $PathName/$dirname/ExpectedVotes.txt $PathName/$dirname/RelevantLinks_h/
mv $PathName/$dirname/linkVoteCounts.txt $PathName/$dirname/RelevantLinks_h/
mv $PathName/$dirname/links.txt $PathName/$dirname/RelevantLinks_h/
mv $PathName/$dirname/communityVoteCounts.txt $PathName/$dirname/RelevantLinks_h/
mv $PathName/$dirname/contingencyTable.txt $PathName/$dirname/RelevantLinks_h/
mv $PathName/$dirname/RepLinkDomains* $PathName/$dirname/RelevantLinks_h/
# mv $PathName/$dirname/Chi2.txt $PathName/$dirname/RelevantLinks_h/
mv $PathName/$dirname/H_value.txt $PathName/$dirname/RelevantLinks_h/
done
cp Work/NumComsAndModularities Results
| true |
c18d11429b9dcb5ab913bb207066497606ca8cee | Shell | fewstera/go-event-sourcing-hack | /scripts/live-reload.sh | UTF-8 | 699 | 3.890625 | 4 | [] | no_license | #!/bin/bash
set -eo pipefail
sigint_handler()
{
kill $PID
exit
}
trap sigint_handler SIGINT
while true; do
echo "Building app"
if go build -o eventsourcing-hack ./cmd/eventsourcing-hack/main.go; then
echo "App built, starting server."
./eventsourcing-hack &
# Remember process id so we can kill it when a file changes
PID=$!
else
echo "Build failed."
unset PID
fi
printf "Waiting for file changes...\n\n"
# Hang until a file changes
inotifywait -e modify -e create -e delete --exclude \.git -r -q ./
echo "File change detected, reloading."
if [ -n "$PID" ]; then
# Kill process and restart
kill $PID > /dev/null
fi
sleep 0.2
done
| true |
8ad3c3e872f86113ff13d201ff4395ccd960cac1 | Shell | jamesconrad/Discord-Voice-Normalizer | /bot.sh | UTF-8 | 343 | 3.34375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
##Main script to handle bot execution
while true; do
node index.js &>>log
val=$?
if [ $val -eq 0 ]; then
echo "Pulling from Github."
git fetch
git reset --hard origin/master
echo "Pull complete, Restarting."
else
echo "Program exited unsuccessfully, Restarting."
fi
done | true |
431e4872a763bbc3c821681cd0edddc14113c7ad | Shell | sgarudi/ocs-upi-kvm | /scripts/destroy-ocp.sh | UTF-8 | 1,918 | 3.59375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
if [ ! -e helper/parameters.sh ]; then
echo "Please invoke this script from the directory ocs-upi-kvm/scripts"
exit 1
fi
source helper/parameters.sh
if [ "$PLATFORM" == "kvm" ]; then
sudo -sE helper/kvm/virsh-cleanup.sh
else
pushd ../src/$OCP_PROJECT
if [ ! -e terraform.tfstate ]; then
echo "No terraform artifacts or state file!"
exit 0
fi
terraform_cmd=$WORKSPACE/bin/terraform
set +e
if [[ $($terraform_cmd state list -state=terraform.tfstate | wc -l) -eq 0 ]]; then
echo "Nothing to destroy!"
else
echo "Validating cluster to be deleted is network addressible ..."
if [ "$PLATFORM" == powervs ]; then
bastion_ip=$($terraform_cmd output | grep ^bastion_public_ip | awk '{print $3}')
else
bastion_ip=$($terraform_cmd output | grep ^bastion_ip | awk '{print $3}')
fi
if [ -n "$bastion_ip" ] && [ -e $WORKSPACE/bin/oc ] && [ -e $WORKSPACE/env-ocp.sh ]; then
echo "Validate use of oc command for ocs-ci teardown ..."
source $WORKSPACE/env-ocp.sh
oc get nodes
rc=$?
if [ "$rc" != 0 ]; then
echo "Retry 1 of 2 - oc command ..."
sleep 5m
oc get nodes
rc=$?
fi
if [ "$rc" != 0 ]; then
echo "Retry 2 of 2 - oc command ..."
sleep 15m
oc get nodes
rc=$?
fi
if [ "$rc" == 0 ]; then
pushd ../../scripts
echo "Invoking teardown-ocs-ci.sh"
./teardown-ocs-ci.sh
popd
fi
else
if [ -n "$bastion_ip" ]; then
echo "Bastion IP is $bastion_ip. Lacking cluster login..."
else
echo "Bastion IP is not known"
fi
fi
echo "Invoking terraform destroy"
$terraform_cmd destroy -var-file $WORKSPACE/site.tfvars -auto-approve -parallelism=7
if [ -n "$bastion_ip" ]; then
echo "Removing $bastion_ip from /etc/hosts"
grep -v "$bastion_ip" /etc/hosts | tee /tmp/hosts.1
sudo mv /tmp/hosts.1 /etc/hosts
fi
fi
rm -rf .terraform terraform.tfstate
set -e
popd
fi
| true |
e9608af944530f777b73184022ae98601e9a2050 | Shell | dlopeznev/lamp | /provision/datos.sh | UTF-8 | 931 | 2.5625 | 3 | [] | no_license | #Quito interactividad
export DEBIAN_FRONTEND=noninteractive
#Certificado
mkdir /root/.ssh
cat /vagrant/provision/keys/id_rsa.pub >> /root/.ssh/authorized_keys
chmod 700 /root/.ssh
chmod 600 /root/.ssh/authorized_keys
#Usuario operador
useradd -m operador
echo operador:password | chpasswd
usermod -aG sudo operador
#SSH puerto 4000
cp -f /vagrant/provision/configs/sshd_config /etc/ssh/
systemctl restart sshd
#MySQL
apt-get update -q -y
apt-get -q -y install mysql-server
mysqladmin -u root password root
#Configuracion bdd
cp -f /vagrant/provision/db/my.cnf /etc/mysql/
#Mysql no password prompt
cp -f /vagrant/provision/configs/.my.cnf /root/
#Restart bdd
systemctl restart mysql
#Restore bdd dump
mysql -u root < /vagrant/provision/db/db.dmp
#Grant privileges
mysql -u root < /vagrant/provision/db/grants
#Restart bdd
systemctl restart mysql
#Hosts por nombre
cat /vagrant/provision/configs/hosts_datos >> /etc/hosts | true |
40a91cf0b96337e120cf59251ffbaf13f6074f58 | Shell | lowet84/DockerFiles | /build2/wip/netshare/start.sh | UTF-8 | 673 | 3.359375 | 3 | [] | no_license | #!/bin/bash
cd /netshare
if [ ! -f "/key/netshare.key.pub" ]; then
ssh-keygen -t rsa -b 4096 -C "netshare" -N "" -f /key/netshare.key
fi
cd /key
KEY=$(cat netshare.key.pub)
echo Key:
echo $KEY
if grep -Fxq "$KEY" /ssh/authorized_keys
then
echo "Key already in authorized_keys"
else
echo "Adding key to authorized_keys"
cat /key/netshare.key.pub >> /ssh/authorized_keys
fi
cd /netshare
echo "Killing netshare"
ssh -oStrictHostKeyChecking=no -i /key/netshare.key root@localhost "pkill -f netshare"
cp docker-volume-netshare /hostroot
cp netshare.sh /hostroot
ssh -oStrictHostKeyChecking=no -i /key/netshare.key root@localhost "sh netshare.sh $USERNAME $PASSWORD"
| true |
d067054593e02221ebe823d4631de57b239a2134 | Shell | seoyhaein/install_mesos | /install_mesos | UTF-8 | 16,031 | 3.953125 | 4 | [] | no_license | #!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
distro="" # Name and version of distro: e.g. centos-6.5, ubuntu-14.05
distro_type="" # Distro type e.g. el6, el7, debian
type_master=false
type_agent=false
masters_specified=false
master_list=""
script_basename=$(basename "$0")
declare -a master_ips='()'
node_hostname=""
node_ip=$(ip route get 8.8.8.8 | grep -Po '(?<=src )(\d{1,3}.){4}')
mesos_version=""
marathon_version=""
function err_exit
{
echo "$*" "Exiting." >&2
exit 1
}
function usage
{
echo
echo "Usage: $script_basename [--distro <distro>] [--masters <comma-separated-master-ip-list>] [--hostname <resolvable-hostname-for-node>] [--ip <host-ip-addr>] [--mesos <vesrion>] [--marathon <version] <node-type>"
echo
echo "Example: $script_basename --masters \"1.1.1.1,2.2.2.2,3.3.3.3\" --hostname mesos-master-01 --ip 1.1.1.1 master"
}
# Validate if a given string is a valid IPv4 address or not
# returns :
# 0 --> valid IP
# 1 --> invalid IP
function valid_ip
{
local ip="$1"
local stat=1
if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
OIFS=$IFS
IFS='.'
ip=($ip)
IFS=$OIFS
[[ ${ip[0]} -le 255 && ${ip[1]} -le 255 \
&& ${ip[2]} -le 255 && ${ip[3]} -le 255 ]]
stat=$?
fi
return $stat
}
# Prepend "sudo" to command if not running as root
function sudo
{
if [[ $(id -u) = 0 ]]
then "$@"
else
$(which sudo) "$@"
fi
}
# Detect the distro on the box
function detect_distro
{
# Check for Debian based Distros
# ID and VERSION_ID in /etc/os-release give the Distro and Version
if [[ -f /etc/os-release ]]
then
( source /etc/os-release && echo "$ID-$VERSION_ID" )
return
else
err_exit "Could not determine OS version OR an unspported distro"
fi
}
function get_distro_type
{
case $distro in
ubuntu-*|debian-*)
distro_type=debian
;;
rhel-6|rhel-6.*|centos-6|centos-6.*)
distro_type=el6
;;
rhel-7|rhel-7.*|centos-7|centos-7.*)
distro_type=el7
;;
*) err_exit "Unsupported distro" ;;
esac
echo $distro_type
}
# Set up Mesosphere repo for Ubuntu / Debian
function setup_apt_repo
{
echo "Setting up apt repo"
if [ ! -f /etc/apt/sources.list.d/mesosphere.list ]
then
# Setup
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF
echo "Done adding key"
DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]')
CODENAME=$(lsb_release -cs)
echo "Adding repo"
# Add the repository
echo "deb http://repos.mesosphere.com/${DISTRO} ${CODENAME} main" | \
sudo tee /etc/apt/sources.list.d/mesosphere.list
sudo apt-get -y update
fi
}
# Set up Mesosphere repo for Redhat 6 / Centos 6
function setup_el6_repo
{
echo "Setting up EL6 repo"
if [ ! -f /etc/yum.repos.d/mesosphere.repo ]
then
# Add the repository
sudo rpm -Uvh http://repos.mesosphere.com/el/6/noarch/RPMS/mesosphere-el-repo-6-2.noarch.rpm
fi
}
# Set up Mesosphere repo for Redhat 7 / Centos 7
function setup_el7_repo
{
echo "Setting up EL7 repo"
if [ ! -f /etc/yum.repos.d/mesosphere.repo ]
then
# Add the repository
sudo rpm -Uvh http://repos.mesosphere.com/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm
fi
}
# Set up Mesosphere repo depending on the distro
function setup_mesosphere_repo
{
echo "Setting up Mesosphere Repo to install Mesos"
case $distro_type in
debian) setup_apt_repo ;;
el6) setup_el6_repo ;;
el7) setup_el7_repo ;;
*) err_exit "Unsupported distro" ;;
esac
}
# Compare two given version strings
# Inputs: The two version strings to compare
# Output: 0 --> =
# 1 --> >
# 2 --> <
function compare_version
{
if [[ $1 == $2 ]]
then
return 0
fi
local IFS=.
local i ver1=($1) ver2=($2)
# fill empty fields in ver1 with zeros
for ((i=${#ver1[@]}; i<${#ver2[@]}; i++))
do
ver1[i]=0
done
for ((i=0; i<${#ver1[@]}; i++))
do
if [[ -z ${ver2[i]} ]]
then
# fill empty fields in ver2 with zeros
ver2[i]=0
fi
if ((10#${ver1[i]} > 10#${ver2[i]}))
then
return 1
fi
if ((10#${ver1[i]} < 10#${ver2[i]}))
then
return 2
fi
done
return 0
}
# Find the version of the package in the repo matching the version specified
# Input: Package name, Version string
# Output: Matching package version in the repo
function find_pkg_version
{
local package="$1"
local version="$2"
local all_versions=""
local pkg_ver=""
case $distro_type in
debian)
all_versions=$(apt-cache madison $package | awk -F"|" '{print $2}')
pkg_ver=$(echo "$all_versions" | grep "^$version" | head -n1)
;;
el*)
all_versions=$(yum list $package --showduplicates | grep mesosphere | awk '{print $2}')
pkg_ver=$(echo "$all_versions" | grep "^$version" | tail -n1)
;;
*) err_exit "Unsupported distro" ;;
esac
if [ -z "$pkg_ver" ]
then
err_exit "No version matching $2 found in the repo. The list of available versions in the repo is:
$all_versions"
fi
echo $pkg_ver
}
function install_mesos
{
local pkg_version=""
if [ -n "$mesos_version" ]
then
pkg_version=$(find_pkg_version mesos $mesos_version)
fi
case $distro_type in
debian)
if [ -z "$pkg_version" ]
then
sudo apt-get -y install mesos
else
sudo apt-get -y install "mesos=$pkg_version"
fi
;;
el*)
if [ -z "$pkg_version" ]
then
sudo yum -y install mesos
else
sudo yum -y install "mesos-$pkg_version"
fi
;;
*) err_exit "Unsupported distro" ;;
esac
}
function install_zookeeper
{
case $distro_type in
el6)
sudo rpm -Uvh http://archive.cloudera.com/cdh4/one-click-install/redhat/6/x86_64/cloudera-cdh-4-0.x86_64.rpm
sudo yum -y install zookeeper
;;
el7)
sudo yum -y install mesosphere-zookeeper ;;
*)
err_exit "Unsupported distro" ;;
esac
}
# Install Java 8 on Debian based distros
function install_java8
{
#sudo add-apt-repository ppa:webupd8team/java
echo "deb http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main" | sudo tee /etc/apt/sources.list.d/webupd8team-java.list
echo "deb-src http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main" | sudo tee -a /etc/apt/sources.list.d/webupd8team-java.list
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys EEA14886
sudo apt-get update
echo debconf shared/accepted-oracle-license-v1-1 select true | sudo debconf-set-selections
echo debconf shared/accepted-oracle-license-v1-1 seen true | sudo debconf-set-selections
sudo apt-get -y install oracle-java8-installer
}
function install_marathon
{
local pkg_version=""
if [ -n "$marathon_version" ]
then
pkg_version=$(find_pkg_version marathon $marathon_version)
fi
case $distro_type in
debian)
if [ -z "$pkg_version" ]
then
install_java8
sudo apt-get -y install marathon
else
# Install Java 8 if marathon version is 0.11.0+
local cmp=$(compare_version $marathon_version "0.11.0"; echo $?)
if [ $cmp -ne 2 ]
then
install_java8
fi
sudo apt-get -y install "marathon=$pkg_version"
fi
;;
el*)
if [ -z "$pkg_version" ]
then
sudo yum -y install marathon
else
sudo yum -y install "marathon-$pkg_version"
fi
install_zookeeper
;;
*) err_exit "Unsupported distro" ;;
esac
}
function configure_mesos
{
local etc_mesos_zk='zk://'
ips_left=${#master_ips[@]}
if [ $ips_left -eq 0 ]
then
etc_mesos_zk+="localhost:2181/mesos"
else
for ip in "${master_ips[@]}"
do
if [ $ips_left -eq 1 ]
then
etc_mesos_zk+="${ip}:2181/mesos"
else
etc_mesos_zk+="${ip}:2181,"
fi
ips_left=$((ips_left-1))
done
fi
sudo bash -c "echo $etc_mesos_zk > /etc/mesos/zk"
}
function configure_zookeeper
{
# get myid for this host
myid=""
if [ ${#master_ips[@]} -eq 0 ]
then
myid=1
else
for i in "${!master_ips[@]}"
do
if [[ "${master_ips[$i]}" = "$node_ip" ]]
then
myid=$((i+1))
break
fi
done
fi
if [[ -z "$myid" ]]
then
err_exit "Node IP: $node_ip specified is not in the list of masters specified: $master_list"
fi
case $distro_type in
debian)
sudo bash -c "echo $myid > /etc/zookeeper/conf/myid"
;;
el6)
sudo zookeeper-server-initialize --myid=$myid
;;
el7)
sudo bash -c "echo $myid > /var/lib/zookeeper/myid"
;;
*) err_exit "Unsupported distro" ;;
esac
if [ ${#master_ips[@]} -ne 0 ]
then
local zk_conf=""
local count=0
for ip in "${master_ips[@]}"
do
count=$((count+1))
zk_conf+="server.${count}=${ip}:2888:3888"$'\n'
done
sudo sed -i '/server\.[[:digit:]]\+/d' /etc/zookeeper/conf/zoo.cfg
sudo bash -c "echo \"$zk_conf\" >> /etc/zookeeper/conf/zoo.cfg"
fi
}
function configure_mesos_master
{
local num_masters=${#master_ips[@]}
if [ $num_masters -eq 0 ]
then
num_masters=1
fi
# num_masters is garunteed to be an odd number since we validated that
# so quorum is num_masters/2 + 1
local quorum=$((num_masters/2 + 1))
sudo bash -c "echo $quorum > /etc/mesos-master/quorum"
if [ -n "$node_hostname" ]
then
sudo bash -c "echo $node_hostname > /etc/mesos-master/hostname"
fi
}
function configure_mesos_agent
{
if [ -n "$node_hostname" ]
then
sudo bash -c "echo $node_hostname > /etc/mesos-slave/hostname"
fi
}
function configure_marathon
{
if [ -n "$node_hostname" ]
then
sudo mkdir -p /etc/marathon/conf
sudo bash -c "echo $node_hostname > /etc/marathon/conf/hostname"
fi
}
function start_zookeeper
{
case $distro_type in
debian)
sudo service zookeeper restart
;;
el6)
sudo zookeeper-server start
;;
el7)
sudo systemctl start zookeeper
;;
*) err_exit "Unsupported distro" ;;
esac
}
# Check is a given service is running
# 0 --> running
# 1 --> not running
function service_running
{
local service="$1"
local stat=1
local service_status="$(sudo service $service status)"
if [[ $service_status == *"start/running"* ]]
then
stat=0
fi
return $stat
}
function stop_service
{
local service="$1"
if service_running $service
then
sudo service $service stop
fi
}
function disable_service
{
local service="$1"
case $distro in
ubuntu-*)
stop_service $service
sudo sh -c "echo manual > /etc/init/${service}.override"
;;
debian-*)
sudo service $service stop
sudo update-rc.d -f $service remove
;;
rhel-6|rhel-6.*|centos-6|centos-6.*)
sudo stop $service
sudo sh -c "echo manual > /etc/init/${service}.override"
;;
rhel-7|rhel-7.*|centos-7|centos-7.*)
systemctl stop ${service}.service
systemctl disable ${service}.service
;;
*) err_exit "Unsupported distro" ;;
esac
}
function restart_service
{
local service="$1"
case $distro_type in
debian|el7)
sudo service $service restart
;;
el6)
sudo restart $service
;;
*) err_exit "Unsupported distro" ;;
esac
}
function setup_master
{
install_marathon
configure_zookeeper
start_zookeeper
configure_mesos_master
configure_marathon
if [ "$type_agent" = false ]
then
disable_service mesos-slave
fi
restart_service mesos-master
restart_service marathon
}
function setup_agent
{
configure_mesos_agent
if [ "$type_master" = false ]
then
disable_service mesos-master
if [ "$distro_type" = "debian" ]
then
disable_service zookeeper
fi
fi
restart_service mesos-slave
}
#parse arguments
ARGS=$(getopt -o h --long "masters:,hostname:,mesos:,marathon:,distro:,help" -n "$script_basename" -- "$@")
eval set -- "$ARGS"
while true
do
case "$1" in
--masters)
masters_specified=true
shift
master_list=$1
shift
count=0
for ip in $(echo $master_list | sed "s/,/ /g")
do
if valid_ip $ip
then
count=$((count+1))
master_ips+=($ip)
else
err_exit "Invalid IP address provided: $ip"
fi
done
if [ $((count%2)) -eq 0 ]
then
err_exit "You entered an even number of masters. We recommend running an odd number of masters."
fi
;;
--hostname)
shift
node_hostname=$1
shift
;;
--mesos)
shift
mesos_version=$1
shift
;;
--marathon)
shift
marathon_version=$1
shift
;;
--distro)
shift
distro=$1
shift
;;
-h|--help)
usage
exit
;;
--)
shift
break
;;
*) usage
exit 1
;;
esac
done
if [ $# -lt 1 ]
then
echo "Please specify the node type to setup:\"master\", \"agent\" or \"master agent\""
usage
exit 1
fi
# Determine the type of node to be set up (Master/Agent)
node_types=$@
for node_type in $node_types
do
if [ "$node_type" != "master" -a "$node_type" != "slave" -a "$node_type" != "agent" ]
then
echo "Invalid node type $node_type. Valid node types are \"master\" and \"agent\""
exit
else
if [ "$node_type" == "master" ]
then
type_master=true
else
type_agent=true
fi
fi
done
#end parse arguments
if [ -n "$distro" ]
then
if ! [[ $distro =~ ubuntu-*|debian-*|rhel-6|rhel-7|centos-6|centos-7 ]]
then
err_exit "$distro is not a supported distro"
fi
else
distro="$(detect_distro)"
fi
distro_type="$(get_distro_type)"
setup_mesosphere_repo
install_mesos
configure_mesos
if [ "$type_master" = true ]
then
setup_master
fi
if [ "$type_agent" = true ]
then
setup_agent
fi
| true |
4f511c258f044d028fb84c90c0772ce3a387e8dd | Shell | leohuang4977/tvb-ukbb | /bb_functional_pipeline/bb_FC | UTF-8 | 1,082 | 3.015625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
##########
#
# Author Z. Wwang
#
# script to register ROI parcellation to rfMRI
# and then compute timeseries and FC (pearson correlations)
#
##########
subjdir="$PWD/$1"
ts_rois=${subjdir}/fMRI/rfMRI.ica/ts_roied.txt
stats_sum=${subjdir}/fMRI/rfMRI.ica/stats.sum
touch $stats_sum
### register parcellation (labelled GM) to fMRI
# create inverse warp
${FSLDIR}/bin/convert_xfm -omat ${subjdir}/fMRI/rfMRI.ica/reg/highres2example_func.mat -inverse ${subjdir}/fMRI/rfMRI.ica/reg/example_func2highres.mat
# apply it to labelled GM
${FSLDIR}/bin/applywarp -i ${subjdir}/T1/labelled_GM -r ${subjdir}/fMRI/rfMRI.ica/example_func -o ${subjdir}/fMRI/rfMRI.ica/parcellation --premat=${subjdir}/fMRI/rfMRI.ica/reg/highres2example_func.mat --interp=nn
### segstate
mri_segstats --avgwf ${ts_rois} --i ${subjdir}/fMRI/rfMRI.ica/filtered_func_data_clean.nii.gz --seg ${subjdir}/fMRI/rfMRI.ica/parcellation.nii.gz --sum ${stats_sum}
### FC coumpute
${BB_BIN_DIR}/bb_functional_pipeline/bb_FC_compute -stat ${stats_sum} -ts $ts_rois -od ${subjdir}/fMRI/rfMRI.ica -LUT ${PARC_LUT}
| true |
97c0a080551608a1ff62e5c9d5f8a4b9222e1dfd | Shell | STROMANZ/AoC | /day05/day05.sh | UTF-8 | 450 | 3.390625 | 3 | [] | no_license | #!/bin/bash
seats=$(cat input.txt | while read line
do
line=$(sed -e 's,[F|L],0,g' -e 's,[B|R],1,g' <<< ${line})
row=$(echo ${line} | cut -b -7)
row=$((2#${row}))
column=$(echo ${line} | cut -b 8-)
column=$((2#${column}))
seatID=$((${row} * 8 + ${column}))
echo ${seatID}
done)
high=$(echo ${seats} | tr " " "\n" | sort -nr | head -1)
echo ${high}
for i in `seq 2 ${high}`
do
echo ${seats} | tr " " "\n" | grep -q ${i} || echo ${i}
done
| true |
9da07701cf9aa5f42cba9e50dfdb0a0c6c223e26 | Shell | pranavmishra90/Home-Assistant | /ha_gitpush.sh | UTF-8 | 324 | 3.078125 | 3 | [
"MIT"
] | permissive | ## Script for pushing all changes to GitHub
# Not a best practice, but convenient
# Go to /config folder
cd /config
# Add all files to the repository
git add .
# Commit changes with message with current date stamp
git commit -m "Update on `date +'%Y-%m-%d %H:%M:%S'`"
# Push changes to GitHub
git push -u origin master
| true |
08364d6267923b5a0c17a4f26ff7a520f816b412 | Shell | panzuelaz/raspi_en_uart | /enable_uart.sh | UTF-8 | 907 | 3.421875 | 3 | [] | no_license | # These 2 function to enable uart on RaspberryPi
if grep -Fxq "core_freq=250" /boot/config.txt; then
# if found
echo "Config 'core_freq=250' already exist."
else
# if not found
echo "core_freq=250" >> /boot/config.txt
echo "Added new line: core_freq=250"
fi
sleep 1
if grep -Fxq "enable_uart=1" /boot/config.txt; then
# if found
echo "Config 'enable_uart=1' already exist."
else
# if not found
echo "enable_uart=1" >> /boot/config.txt
echo "Added new line: enable_uart=1"
fi
sleep 1
# This function to disable serial console on RaspberryPi
if grep "console=serial0,115200\|console=ttyAMA0,115200" /boot/cmdline.txt; then
# if found
sed -i -e 's/\(console=serial0,115200 \|console=ttyAMA0,115200 \)//g' /boot/cmdline.txt
echo "Serial console disabled."
else
# if not found
echo "Serial console already disabled!"
fi
sleep 1
echo "Tasks done!" | true |
473f4fe68c46d8a03c622ac3b8f9eacf6e1d673a | Shell | maneyko/dotfiles | /bin/trim-empty-lines | UTF-8 | 967 | 3.890625 | 4 | [] | no_license | #!/bin/bash
source "argparse.sh"
ARG_NUMBER_TRAILING=0
arg_positional "[input-file] [Input file to process.]"
arg_optional "[number-trailing] [n] [Allow this many trailing lines to be grouped together. Default '$ARG_NUMBER_TRAILING'.]"
arg_help "[Do not print out empty lines.]"
parse_args
read -r -d '' perl_script << 'EOT'
BEGIN {
$n = 0;
}
if (/^\s+?$/) {
$n++;
if ($n gt <ARG_NUMBER_TRAILING>) {
$n = 0;
} else {
print;
}
} else {
$n = 0;
print;
}
EOT
read -r -d '' perl_script1 << 'EOT'
BEGIN {
$n = 0;
}
if (/^\s+?$/) {
$n++;
if ($n gt <ARG_NUMBER_TRAILING>) {
print;
$n = 0;
} else {
$n++;
}
} else {
$n = 0;
print;
}
EOT
if [[ $ARG_NUMBER_TRAILING == 1 ]]; then
perl_script=$perl_script1
fi
perl_script=${perl_script//<ARG_NUMBER_TRAILING>/$ARG_NUMBER_TRAILING}
if [[ ${#POSITIONAL[@]} -eq 0 ]]; then
cat | perl -ne "$perl_script"
else
perl -ne "$perl_script" "$ARG_INPUT_FILE"
fi
| true |
20b55c6540c9e98dd46a6f4c03a64b619b13b0b4 | Shell | alinpopa/dotfiles | /bin/sgenapp | UTF-8 | 2,055 | 3.609375 | 4 | [] | no_license | #!/usr/bin/env bash
ORG=$1
NAME=$2
MAIN_OBJ=$3
usage () {
echo "sgenapp: Generate Scala app skeleton"
echo "usage: $0 <organisation> <app name> <main class name>"
exit 1
}
to_path () {
what=$1
echo "$what" | tr "." "/"
}
render_sbt () {
sbt_file=$(cat <<-EOM
lazy val commonSettings = Seq(
organization := "$ORG",
version := "1.0.0",
scalaVersion := "2.12.4",
scalacOptions := Seq("-unchecked", "-deprecation", "-feature")
)
lazy val root = (project in file(".")).
settings(commonSettings: _*).
settings(
name := "$NAME",
mainClass in assembly := Some("${ORG}.${MAIN_OBJ}"),
assemblyJarName in assembly := s"\${name.value}-\${version.value}.jar"
)
libraryDependencies ++= {
Seq(
"com.typesafe.akka" %% "akka-actor" % "2.5.2",
"org.scalatest" %% "scalatest" % "3.0.1" % Test,
"com.typesafe.akka" %% "akka-testkit" % "2.5.2" % Test
)
}
EOM
)
echo "$sbt_file"
}
render_main () {
package="$ORG"
main_content=$(cat <<-EOM
package $package
object $MAIN_OBJ {
def main(args: Array[String]): Unit = {
println("Hello, $MAIN_OBJ!")
}
}
EOM
)
echo "$main_content"
}
render_test () {
package="$ORG"
test_content=$(cat <<-EOM
package $package
import org.scalatest.{Matchers, WordSpecLike}
class TestSpec extends WordSpecLike with Matchers {
"Test" should {
"test" in {
1 should be (1)
}
}
}
EOM
)
echo "$test_content"
}
run () {
if [ -z "$ORG" ] || [ -z "$NAME" ] || [ -z "$MAIN_OBJ" ]; then
echo "Incomplete command line arguments"
usage
fi
path="$(to_path "$ORG")"
mkdir -p "${NAME}/project"
mkdir -p "${NAME}/src/main/scala/$path"
mkdir -p "${NAME}/src/test/scala/$path"
echo "$(render_sbt)" > "${NAME}/build.sbt"
echo "$(render_main)" > "${NAME}/src/main/scala/${path}/${MAIN_OBJ}.scala"
echo "$(render_test)" > "${NAME}/src/test/scala/${path}/TestSpec.scala"
echo 'addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.5")' > "${NAME}/project/assembly.sbt"
echo 'sbt.version = 1.1.0' > "${NAME}/project/build.properties"
}
run
| true |
33a5a1273aba59456e9faf2b2d2d77363245262a | Shell | jniedzie/LightByLight2018 | /analysis/condor/runTauTauApplySelection.sh | UTF-8 | 651 | 2.578125 | 3 | [] | no_license | #!/bin/bash
index=$(( 1 + ${1}))
inputPathData=`sed "${index}q;d" /afs/cern.ch/user/m/mnickel/private/LightByLight2018/analysis/input_list.txt`
echo "${inputPathData}"
python /afs/cern.ch/user/m/mnickel/private/LightByLight2018/analysis/configs/input_files/MakeInputFile.py "${inputPathData}" "/afs/cern.ch/user/m/mnickel/private/LightByLight2018/analysis/configs/input_files/condor_inputfiles/tmp_runfile_${2}_${1}.txt"
/afs/cern.ch/user/m/mnickel/private/LightByLight2018/analysis/applySelections "TauTau" "/afs/cern.ch/user/m/mnickel/private/LightByLight2018/analysis/configs/input_files/condor_inputfiles/tmp_runfile_${2}_${1}.txt" "Data"
done
| true |
e90f024fe89eac64bddb472e8a73c50e05f6e456 | Shell | dkkumargoyal/lambda-coldstart-runtime-vs-memory | /build.sh | UTF-8 | 322 | 3.03125 | 3 | [] | no_license | #!/bin/bash
declare -a folders=("csharp1" "csharp2" "java" "python2" "python3" "golang" "nodejs6" "nodejs4")
#export AWS_PROFILE=personal
for i in `seq 1 200`;
do
for folder in "${folders[@]}"
do
cd $folder
pwd
sls deploy
cd ..
done
sleep 10
node invoke-functions.js
sleep 10
done
| true |
72e8f494b1609015236379aa86dfcca43a88579b | Shell | fjagui/scripting | /detalleldap/ldap5.sh | UTF-8 | 780 | 3.078125 | 3 | [] | no_license | #!/bin/bash
clear
grep "x:[1-9][0-9][0-9][0-9]" /etc/passwd | (while read TARGET_USER
do
echo "PROCESAMOS TARGET_USER: $TARGET_USER"
echo "----------"
USER_ID="$(echo "$TARGET_USER" | cut -d':' -f1)"
grep "${USER_ID}:" /etc/shadow
LASTCHANGE_FLAG="$(grep "${USER_ID}:" /etc/shadow | cut -d':' -f3)"
echo "\$(grep \"\${USER_ID}:\" /etc/shadow"
echo "LASTCHANGE_FLAG=\"\$(grep \"\${USER_ID}:\" /etc/shadow | cut -d':' -f3)\" "
echo "LATCHANGE_FLAG=$LASTCHANGE_FLAG"
SHADOW_FLAG="$(grep "${USER_ID}:" /etc/shadow | cut -d':' -f9)"
echo "\$(grep \"\${USER_ID}:\" /etc/shadow"
echo "SHADOW_FLAG=\"\$(grep \"\${USER_ID}:\" /etc/shadow | cut -d':' -f9)\" "
echo "SHADOW_FLAG=$SHADOW_FLAG"
echo "=========="
echo ""
done
)
| true |
4801189fe7343b198e8305bb6f7470ba8cd8a8c7 | Shell | dmitriyfomin/installara | /installara | UTF-8 | 2,267 | 3.90625 | 4 | [] | no_license | #!/bin/bash
#*********************************#
# INSTALLARA #
# #
# Laravel Framework 5 Installer #
# #
# Author: Dmitry Fomin #
# #
#*********************************#
# Set colour vars
white='\e[1;37m'
red='\e[1;31m'
orange='\e[38;5;209m'
cyan='\e[0;36m'
purple='\e[0;35m'
yellow='\e[0;33m'
spec='\e[38;5;203m'
# Menu (Title screen)
clear
echo
echo -e $white" #*********************************#"
echo -e $white" # #"
echo -e $white" # $red INSTALLARA $white #"
echo -e $white" # #"
echo -e $white" # $orange Author: Dmitry Fomin $white#"
echo -e $white" # #"
echo -e $white" #*********************************#"
echo
echo -e $purple" OPTIONS:"
echo
echo -e " $yellow 1)$white START INSTALLING"
echo -e " $yellow 2)$white QUIT"
# Options
echo
echo -ne $spec"installara> $white"
read option
if [ $option = 1 ];
then
echo
echo -e $cyan"Step 1. Enter name of your project or domain name ([a-z0-9-.a-z]): $white"
echo
echo -ne $spec"installara> $white"
read projectname
if [ ! -z $projectname ];
then
echo
echo -e $orange"Step 2. Enter Laravel Framework version ([~5.*0-9.*0-9]) to install it: $white"
echo
echo -ne $spec"installara> $white"
read ver
echo -e "$white ********* STARTING INSTALL... ***********"
echo
echo -e "$purple Project folder: $white $(pwd)/$projectname,$red Laravel:$white $ver"
echo
curl -sS https://getcomposer.org/installer | php
php composer.phar create-project laravel/laravel $projectname "$ver"
chmod -R 775 $projectname/storage/ && zenity --info --text "OK! Thanks for using Installara" && clear && exit
else
echo -e $red"Error! Empty name"
sleep 0.9
bash $(basename $0)
fi
else
zenity --info --text "Thanks for using Installara"
clear
exit
fi
| true |
a072db85916acb7fead430d9acb796403e73cb70 | Shell | ratsgo/embedding | /preprocess.sh | UTF-8 | 11,660 | 2.84375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
COMMAND=$1
function gdrive_download () {
CONFIRM=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate "https://docs.google.com/uc?export=download&id=$1" -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')
wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$CONFIRM&id=$1" -O $2
rm -rf /tmp/cookies.txt
}
case $COMMAND in
dump-raw-wiki)
echo "download ko-wikipedia..."
wget https://dumps.wikimedia.org/kowiki/latest/kowiki-latest-pages-articles.xml.bz2 -P /notebooks/embedding/data/raw
mkdir -p /notebooks/embedding/data/processed
;;
dump-raw-korquad)
echo "download KorQuAD data..."
wget https://korquad.github.io/dataset/KorQuAD_v1.0_train.json -P /notebooks/embedding/data/raw
wget https://korquad.github.io/dataset/KorQuAD_v1.0_dev.json -P /notebooks/embedding/data/raw
mkdir -p /notebooks/embedding/data/processed
;;
dump-raw-nsmc)
echo "download naver movie corpus..."
wget https://github.com/e9t/nsmc/raw/master/ratings.txt -P /notebooks/embedding/data/raw
wget https://github.com/e9t/nsmc/raw/master/ratings_train.txt -P /notebooks/embedding/data/raw
wget https://github.com/e9t/nsmc/raw/master/ratings_test.txt -P /notebooks/embedding/data/raw
mkdir -p /notebooks/embedding/data/processed
;;
dump-blog)
echo "download blog data.."
mkdir -p /notebooks/embedding/data/processed
gdrive_download 1Few7-Mh3JypQN3rjnuXD8yAXrkxUwmjS /notebooks/embedding/data/processed/processed_blog.txt
;;
dump-raw)
echo "make directories..."
mkdir -p /notebooks/embedding/data
mkdir -p /notebooks/embedding/data/processed
mkdir /notebooks/embedding/data/tokenized
echo "download similar sentence data..."
wget https://github.com/songys/Question_pair/raw/master/kor_pair_train.csv -P /notebooks/embedding/data/raw
wget https://github.com/songys/Question_pair/raw/master/kor_Pair_test.csv -P /notebooks/embedding/data/raw
;;
dump-word-embeddings)
echo "download word embeddings..."
mkdir -p /notebooks/embedding/data/processed
cd /notebooks/embedding/data
gdrive_download 1FeGIbSz2E1A63JZP_XIxnGaSRt7AhXFf /notebooks/embedding/data/word-embeddings.zip
unzip word-embeddings.zip
rm word-embeddings.zip
;;
dump-sentence-embeddings)
echo "download sentence embeddings..."
mkdir -p /notebooks/embedding/data/processed
cd /notebooks/embedding/data
gdrive_download 1jL3Q5H1vwATewHrx0PJgJ8YoUCtEkaGW /notebooks/embedding/data/sentence-embeddings.zip
unzip sentence-embeddings.zip
rm sentence-embeddings.zip
;;
dump-tokenized)
echo "download tokenized data..."
mkdir -p /notebooks/embedding/data/processed
cd /notebooks/embedding/data
gdrive_download 1Ybp_DmzNEpsBrUKZ1-NoPDzCMO39f-fx /notebooks/embedding/data/tokenized.zip
unzip tokenized.zip
rm tokenized.zip
;;
dump-processed)
echo "download processed data..."
mkdir -p /notebooks/embedding/data
cd /notebooks/embedding/data
gdrive_download 1kUecR7xO7bsHFmUI6AExtY5u2XXlObOG /notebooks/embedding/data/processed.zip
unzip processed.zip
rm processed.zip
;;
process-wiki)
echo "processing ko-wikipedia..."
mkdir -p /notebooks/embedding/data/processed
python preprocess/dump.py --preprocess_mode wiki \
--input_path /notebooks/embedding/data/raw/kowiki-latest-pages-articles.xml.bz2 \
--output_path /notebooks/embedding/data/processed/processed_wiki_ko.txt
;;
process-nsmc)
echo "processing naver movie corpus..."
mkdir -p /notebooks/embedding/data/processed
python preprocess/dump.py --preprocess_mode nsmc \
--input_path /notebooks/embedding/data/raw/ratings.txt \
--output_path /notebooks/embedding/data/processed/processed_ratings.txt \
--with_label False
python preprocess/dump.py --preprocess_mode nsmc \
--input_path /notebooks/embedding/data/raw/ratings_train.txt \
--output_path /notebooks/embedding/data/processed/processed_ratings_train.txt \
--with_label True
python preprocess/dump.py --preprocess_mode nsmc \
--input_path /notebooks/embedding/data/raw/ratings_test.txt \
--output_path /notebooks/embedding/data/processed/processed_ratings_test.txt \
--with_label True
;;
process-korquad)
echo "processing KorQuAD corpus..."
mkdir -p /notebooks/embedding/data/processed
python preprocess/dump.py --preprocess_mode korquad \
--input_path /notebooks/embedding/data/raw/KorQuAD_v1.0_train.json \
--output_path /notebooks/embedding/data/processed/processed_korquad_train.txt
python preprocess/dump.py --preprocess_mode korquad \
--input_path /notebooks/embedding/data/raw/KorQuAD_v1.0_dev.json \
--output_path /notebooks/embedding/data/processed/processed_korquad_dev.txt
cat /notebooks/embedding/data/processed/processed_korquad_train.txt /notebooks/embedding/data/processed/processed_korquad_dev.txt > /notebooks/embedding/data/processed/processed_korquad.txt
rm /notebooks/embedding/data/processed/processed_korquad_*.txt
;;
mecab-tokenize)
echo "mecab, tokenizing..."
python preprocess/supervised_nlputils.py --tokenizer mecab \
--input_path /notebooks/embedding/data/processed/processed_wiki_ko.txt \
--output_path data/tokenized/wiki_ko_mecab.txt
python preprocess/supervised_nlputils.py --tokenizer mecab \
--input_path /notebooks/embedding/data/processed/processed_ratings.txt \
--output_path data/tokenized/ratings_mecab.txt
python preprocess/supervised_nlputils.py --tokenizer mecab \
--input_path /notebooks/embedding/data/processed/processed_korquad.txt \
--output_path data/tokenized/korquad_mecab.txt
;;
process-jamo)
echo "processing jamo sentences..."
python preprocess/unsupervised_nlputils.py --preprocess_mode jamo \
--input_path /notebooks/embedding/data/tokenized/corpus_mecab.txt \
--output_path /notebooks/embedding/data/tokenized/corpus_mecab_jamo.txt
;;
space-correct)
echo "train & apply space correct..."
python preprocess/unsupervised_nlputils.py --preprocess_mode train_space \
--input_path /notebooks/embedding/data/processed/processed_ratings.txt \
--model_path /notebooks/embedding/data/processed/space-correct.model
python preprocess/unsupervised_nlputils.py --preprocess_mode apply_space_correct \
--input_path /notebooks/embedding/data/processed/processed_ratings.txt \
--model_path /notebooks/embedding/data/processed/space-correct.model \
--output_path /notebooks/embedding/data/processed/corrected_ratings_corpus.txt \
--with_label False
python preprocess/unsupervised_nlputils.py --preprocess_mode apply_space_correct \
--input_path /notebooks/embedding/data/processed/processed_ratings_train.txt \
--model_path /notebooks/embedding/data/processed/space-correct.model \
--output_path /notebooks/embedding/data/processed/corrected_ratings_train.txt \
--with_label True
python preprocess/unsupervised_nlputils.py --preprocess_mode apply_space_correct \
--input_path /notebooks/embedding/data/processed/processed_ratings_test.txt \
--model_path /notebooks/embedding/data/processed/space-correct.model \
--output_path /notebooks/embedding/data/processed/corrected_ratings_test.txt \
--with_label True
;;
soy-tokenize)
echo "soynlp, LTokenizing..."
mkdir -p /notebooks/embedding/data/tokenized
python preprocess/unsupervised_nlputils.py --preprocess_mode compute_soy_word_score \
--input_path /notebooks/embedding/data/processed/corrected_ratings_corpus.txt \
--model_path /notebooks/embedding/data/processed/soyword.model
python preprocess/unsupervised_nlputils.py --preprocess_mode soy_tokenize \
--input_path /notebooks/embedding/data/processed/corrected_ratings_corpus.txt \
--model_path /notebooks/embedding/data/processed/soyword.model \
--output_path /notebooks/embedding/data/tokenized/ratings_soynlp.txt
;;
komoran-tokenize)
echo "komoran, tokenizing..."
mkdir -p /notebooks/embedding/data/tokenized
python preprocess/supervised_nlputils.py --tokenizer komoran \
--input_path /notebooks/embedding/data/processed/corrected_ratings_corpus.txt \
--output_path /notebooks/embedding/data/tokenized/ratings_komoran.txt
;;
okt-tokenize)
echo "okt, tokenizing..."
mkdir -p /notebooks/embedding/data/tokenized
python preprocess/supervised_nlputils.py --tokenizer okt \
--input_path /notebooks/embedding/data/processed/corrected_ratings_corpus.txt \
--output_path /notebooks/embedding/data/tokenized/ratings_okt.txt
;;
hannanum-tokenize)
echo "hannanum, tokenizing..."
mkdir -p /notebooks/embedding/data/tokenized
python preprocess/supervised_nlputils.py --tokenizer hannanum \
--input_path /notebooks/embedding/data/processed/corrected_ratings_corpus.txt \
--output_path /notebooks/embedding/data/tokenized/ratings_hannanum.txt
;;
khaiii-tokenize)
echo "khaiii, tokenizing..."
mkdir -p /notebooks/embedding/data/tokenized
python preprocess/supervised_nlputils.py --tokenizer khaiii \
--input_path /notebooks/embedding/data/processed/corrected_ratings_corpus.txt \
--output_path /notebooks/embedding/data/tokenized/ratings_khaiii.txt
;;
bert-tokenize)
mkdir -p /notebooks/embedding/data/tokenized
python preprocess/unsupervised_nlputils.py --preprocess_mode bert_tokenize \
--vocab_path /notebooks/embedding/data/sentence-embeddings/bert/pretrain-ckpt/vocab.txt \
--input_path /notebooks/embedding/data/processed/corrected_ratings_corpus.txt \
--output_path /notebooks/embedding/data/tokenized/ratings_sentpiece.txt
;;
mecab-user-dic)
echo "insert mecab user dictionary..."
cd /tmp/mecab-ko-dic-2.1.1-20180720
cp -f /notebooks/embedding/preprocess/mecab-user-dic.csv /tmp/mecab-ko-dic-2.1.1-20180720/user-dic/nnp.csv
./tools/add-userdic.sh
make install
cd /notebooks/embedding
;;
make-bert-vocab)
echo "making BERT vocabulary..."
mkdir -p /notebooks/embedding/data
cd /notebooks/embedding/data
gdrive_download 1kUecR7xO7bsHFmUI6AExtY5u2XXlObOG /notebooks/embedding/data/processed.zip
unzip processed.zip
rm processed.zip
cd /notebooks/embedding
python preprocess/unsupervised_nlputils.py --preprocess_mode make_bert_vocab \
--input_path /notebooks/embedding/data/processed/processed_wiki_ko.txt \
--vocab_path /notebooks/embedding/data/processed/bert.vocab
mv sentpiece* /notebooks/embedding/data/processed
;;
esac
| true |
229c77a6fc32f291ff3febc4ca189e5e0a13cd3e | Shell | pappu8871/PracticeAssignment | /day5pdf1/day5pdf1-5b.sh | UTF-8 | 125 | 2.90625 | 3 | [] | no_license | #!/bin/bash -x
l=60
h=40
area=$(( l * h ))
echo "Area of rectangular:$area"
m=$((area / 3.2808))
echo "Total meter is: $m"
| true |
094989cc4a8708b9018f7560e07aeef1130c4ffd | Shell | valerie-tseng/valerie-tseng.github.io | /publish.sh | UTF-8 | 248 | 3.078125 | 3 | [] | no_license |
#!/bin/bash
# History:
# 2017/11/26 Hans First release
# description : this scirpt will commit in system time and upload to git repository
DATE=`date +%Y-%m-%d' '%H:%M`
git add .
comment="site upload on $DATE"
git commit -m "$comment"
git push | true |
d348e7a0dcc124a57004634902fcb97e3912775c | Shell | adikira/dotfiles | /zshrc | UTF-8 | 3,288 | 2.578125 | 3 | [] | no_license | # avoid problems with emacs tramp
[[ $TERM == "dumb" ]] && unsetopt zle && PS1='$ ' && return
# Path to your oh-my-zsh configuration.
ZSH=$HOME/.oh-my-zsh
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
#ZSH_THEME="robbyrussell"
ZSH_THEME="agnoster"
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
# Set to this to use case-sensitive completion
# CASE_SENSITIVE="true"
# Comment this out to disable bi-weekly auto-update checks
# DISABLE_AUTO_UPDATE="true"
# Uncomment to change how often before auto-updates occur? (in days)
# export UPDATE_ZSH_DAYS=13
# Uncomment following line if you want to disable colors in ls
# DISABLE_LS_COLORS="true"
# Uncomment following line if you want to disable autosetting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment following line if you want to disable command autocorrection
# DISABLE_CORRECTION="true"
# Uncomment following line if you want red dots to be displayed while waiting for completion
# COMPLETION_WAITING_DOTS="true"
# Uncomment following line if you want to disable marking untracked files under
# VCS as dirty. This makes repository status check for large repositories much,
# much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# NOTE that autojump needs the executable autojump installed
# NOTE do NOT enable the tmux plugin! It breaks the last-working-dir functionality
plugins=(last-working-dir zsh-navigation-tools rvm web-search rails bundler ruby git gem git-extras github vi-mode wd fabric docker archlinux colorize)
source $ZSH/oh-my-zsh.sh
# Customize to your needs...
DEFAULT_USER="svk"
autoload -U zmv
alias mmv='noglob zmv -W'
autoload znt-history-widget
zle -N znt-history-widget
bindkey "^R" znt-history-widget
zle -N znt-cd-widget
bindkey "^J" znt-cd-widget
setopt AUTO_PUSHD
setopt PUSHD_IGNORE_DUPS
znt_cd_hotlist=( "~/development/pa/rho" "~/development/pa/psi" "~/development/sc/webapp"
"~/development/sc/graphical_client" "~/development/sc/client_server"
)
# enable rvm
#[[ -s "$HOME/.rvm/scripts/rvm" ]] && . "$HOME/.rvm/scripts/rvm"
# tell Java that XMonad is non-reparenting (prevents blank windows of java applications)
export _JAVA_AWT_WM_NONREPARENTING=1
export EDITOR=vim
alias dockercleancontainers="docker ps -a -f status=exited -q | xargs docker rm"
alias dockercleanimages="docker images -f dangling=true -q | xargs docker rmi"
alias dockerclean="dockercleancontainers && dockercleanimages"
# ps + grep.
# see https://github.com/blueyed/oh-my-zsh/blob/a08181210b47625efdc8480e628b0155bff392c9/lib/aliases.zsh#L10-L18
pg() {
local pids
pids=$(pgrep -f $@)
if [[ ! -n $pids ]]; then
echo "No processes found." >&2; return 1
fi
ps up $(pgrep -f $@)
}
export SSH_AUTH_SOCK="$XDG_RUNTIME_DIR/ssh-agent.socket"
# initialize rbenv (ruby version manager)
eval "$(rbenv init -)"
unalias gr # zsh git plugin defines this alias but we want to use the gr tool
| true |
c1050b8a65488e501e45a3306e972ba319fddd94 | Shell | clamxyz/Criterion | /.cmake/copy-source.sh | UTF-8 | 743 | 3.59375 | 4 | [
"MIT"
] | permissive | #!/bin/sh
CURDIR=$(dirname $0)
SOURCE_DIR=$1; shift
DEST_DIR=$1; shift
add_to_sources() {
URL=$1; shift
NAME=$1; shift
HASH=$1; shift
(
git clone "$URL" "$DEST_DIR/$NAME"
cd "$DEST_DIR/$NAME"
git checkout -qf "$HASH"
rm -Rf .git
)
}
(
cd "$SOURCE_DIR"
mkdir -p "$DEST_DIR"
"$CURDIR/git-archive-all.sh" --format tar -- - | tar -x -C "$DEST_DIR"
add_to_sources https://github.com/Snaipe/libcsptr dependencies/libcsptr 0d52904
add_to_sources https://github.com/Snaipe/dyncall dependencies/dyncall 51e79a8
add_to_sources https://github.com/nanomsg/nanomsg dependencies/nanomsg 7e12a20
add_to_sources https://github.com/diacritic/BoxFort dependencies/boxfort 7ed0cf2
)
| true |
83f46aa0620c2b4c43637cdad76bf3dfe7fdac33 | Shell | galli-a/di | /di-ransomwhere.sh | UTF-8 | 3,746 | 3.8125 | 4 | [] | no_license | #!/usr/bin/env zsh -f
# Purpose: Download and install/update the latest version of "RansomWhere"
#
# From: Timothy J. Luoma
# Mail: luomat at gmail dot com
# Date: 2018-09-13
NAME="$0:t:r"
if [[ -e "$HOME/.path" ]]
then
source "$HOME/.path"
fi
HOMEPAGE="https://objective-see.com/products/ransomwhere.html"
DOWNLOAD_PAGE="https://objective-see.com/products/ransomwhere.html"
RELEASE_NOTES_URL='https://objective-see.com/products/changelogs/RansomWhere.txt'
SUMMARY="By continually monitoring the file-system for the creation of encrypted files by suspicious processes, RansomWhere? aims to protect your personal files, generically stopping ransomware in its tracks."
INSTALL_TO='/Library/Objective-See/RansomWhere/RansomWhere'
INFO=($(curl -H "Accept-Encoding: gzip,deflate" -sfLS "$HOMEPAGE" \
| gunzip -f -c \
| tr -s '"|\047' '\012' \
| egrep '^http.*\.zip|sha-1:' \
| awk '{print $NF}' \
| head -2))
URL="$INFO[1]"
EXPECTED_SHA1="$INFO[2]"
LATEST_VERSION=$(echo "$URL:t:r" | tr -dc '[0-9]\.')
# If any of these are blank, we cannot continue
if [ "$URL" = "" -o "$LATEST_VERSION" = "" -o "$EXPECTED_SHA1" = "" ]
then
echo "$NAME: Error: bad data received:
LATEST_VERSION: $LATEST_VERSION
URL: $URL
EXPECTED_SHA1: $EXPECTED_SHA1
"
exit 1
fi
if [[ -e "$INSTALL_TO" ]]
then
# This may be a terrible way to determine the current version, but I can't figure out any other way.
INSTALLED_VERSION=$(strings "$INSTALL_TO" | egrep -B1 '^majorVersion$' | tr -dc '[0-9]\.')
autoload is-at-least
is-at-least "$LATEST_VERSION" "$INSTALLED_VERSION"
VERSION_COMPARE="$?"
if [ "$VERSION_COMPARE" = "0" ]
then
echo "$NAME: Up-To-Date ($INSTALLED_VERSION)"
exit 0
fi
echo "$NAME: Outdated: $INSTALLED_VERSION vs $LATEST_VERSION"
FIRST_INSTALL='no'
else
FIRST_INSTALL='yes'
fi
FILENAME="$HOME/Downloads/${${INSTALL_TO:t:r}// /}-${LATEST_VERSION}.zip"
SHA_FILE="$HOME/Downloads/${${INSTALL_TO:t:r}// /}-${LATEST_VERSION}.sha1.txt"
echo "$EXPECTED_SHA1 ?$FILENAME:t" >| "$SHA_FILE"
( curl -H "Accept-Encoding: gzip,deflate" -sfLS "$RELEASE_NOTES_URL" \
| gunzip -f -c) | tee "$FILENAME:r.txt"
OS_VER=$(SYSTEM_VERSION_COMPAT=1 sw_vers -productVersion | cut -d. -f2)
if [ "$OS_VER" -lt "8" ]
then
echo "$NAME: [WARNING] '$INSTALL_TO:t' is only compatible with macOS versions 10.8 and higher (you are using 10.$OS_VER)."
echo "$NAME: [WARNING] Will download, but the app might not install or function properly."
fi
echo "$NAME: Downloading '$URL' to '$FILENAME':"
curl --continue-at - --progress-bar --fail --location --output "$FILENAME" "$URL"
EXIT="$?"
## exit 22 means 'the file was already fully downloaded'
[ "$EXIT" != "0" -a "$EXIT" != "22" ] && echo "$NAME: Download of $URL failed (EXIT = $EXIT)" && exit 0
[[ ! -e "$FILENAME" ]] && echo "$NAME: $FILENAME does not exist." && exit 0
[[ ! -s "$FILENAME" ]] && echo "$NAME: $FILENAME is zero bytes." && rm -f "$FILENAME" && exit 0
##
echo "$NAME: Checking '$FILENAME' against '$SHA_FILE':"
cd "$FILENAME:h"
shasum -c "$SHA_FILE"
EXIT="$?"
if [ "$EXIT" = "0" ]
then
echo "$NAME: SHA-1 verification passed"
else
echo "$NAME: SHA-1 verification failed (\$EXIT = $EXIT)"
exit 1
fi
##
UNZIP_TO=$(mktemp -d "${TMPDIR-/tmp/}${NAME}-XXXXXXXX")
echo "$NAME: Unzipping '$FILENAME' to '$UNZIP_TO':"
ditto -xk --noqtn "$FILENAME" "$UNZIP_TO"
EXIT="$?"
if [[ "$EXIT" == "0" ]]
then
echo "$NAME: Unzip successful"
else
# failed
echo "$NAME failed (ditto -xkv '$FILENAME' '$UNZIP_TO')"
exit 1
fi
INSTALLER="$UNZIP_TO/RansomWhere_Installer.app"
echo "$NAME: launching custom installer/updater: '$INSTALLER'"
# launch the custom installer app and wait for it to finish.
# Note: 'open -W' does not work for this one
open -a "$INSTALLER"
exit 0
#
#EOF
| true |
f95c49948361969f5864dbf349e8d427c1cc9b24 | Shell | HPSCTerrSys/TSMP | /bldsva/intf_oas3/clm3_5/arch/build_interface_clm3_5.ksh | UTF-8 | 2,061 | 2.609375 | 3 | [
"MIT"
] | permissive | #! /bin/ksh
always_clm(){
route "${cyellow}>> always_clm${cnormal}"
route "${cyellow}<< always_clm${cnormal}"
}
configure_clm(){
route "${cyellow}>> configure_clm${cnormal}"
cplLib="-lnetcdff "
flags=""
ccc="$profComp $mpiPath/bin/mpicc "
cfc="$profComp $mpiPath/bin/mpif90 "
if [[ $profiling == "scalasca" ]]; then
ccc="scorep-mpicc "
cfc="scorep-mpif90 "
fi
flags+="-mpi_lib $mpiPath/lib "
c_configure_clm
route "${cyellow}<< configure_clm${cnormal}"
}
make_clm(){
route "${cyellow}>> make_clm${cnormal}"
c_make_clm
route "${cyellow}<< make_clm${cnormal}"
}
substitutions_clm(){
route "${cyellow}>> substitutions_clm${cnormal}"
c_substitutions_clm
comment " cp m_FileResolve.F90 and shr_sys_mod.F90 to usr.src folder"
if echo "$compiler" | grep -qE 'Intel'; then
maincompiler="Intel"
else
maincompiler="Gnu"
fi
patch $rootdir/bldsva/intf_oas3/clm3_5/arch/src.$maincompiler/m_FileResolv.F90 $clmdir/bld/usr.src
check
patch $rootdir/bldsva/intf_oas3/clm3_5/arch/src.$maincompiler/shr_sys_mod.F90 $clmdir/bld/usr.src
check
if [[ $withOASMCT == "true" ]] ; then
comment " replace files for oasis3-mct and parallel clm coupling"
patch $rootdir/bldsva/intf_oas3/clm3_5/mct/atmdrvMod.F90 $clmdir/bld/usr.src/
check
patch $rootdir/bldsva/intf_oas3/clm3_5/mct/decompMod.F90 $clmdir/bld/usr.src/
check
patch "$rootdir/bldsva/intf_oas3/clm3_5/mct/oas*" $clmdir/src/oas3/
check
patch "$rootdir/bldsva/intf_oas3/clm3_5/mct/receive*" $clmdir/src/oas3/
check
patch "$rootdir/bldsva/intf_oas3/clm3_5/mct/send*" $clmdir/src/oas3/
check
fi
comment " cp new clm configure & Makefile.in to clm/bld/"
patch $rootdir/bldsva/intf_oas3/clm3_5/arch/config/configure $clmdir/bld
check
patch $rootdir/bldsva/intf_oas3/clm3_5/arch/config/Makefile.in $clmdir/bld
check
patch $rootdir/bldsva/intf_oas3/clm3_5/arch/config/config_clm_defaults.xml $clmdir/bld
check
route "${cyellow}<< substitutions_clm${cnormal}"
}
| true |
86b4d5e7fbcd641ee8e372685a6e9d0b6d1cbd2e | Shell | KaOSx/apps | /mauiman/PKGBUILD | UTF-8 | 645 | 2.6875 | 3 | [] | no_license |
pkgname=mauiman
pkgver=1.1.0
pkgrel=1
pkgdesc='Maui Manager Library. Server and public library API.'
arch=('x86_64')
url="https://mauikit.org/"
license=('GPL3')
depends=('qt5-base' 'qtsystems')
makedepends=('extra-cmake-modules')
source=("https://download.kde.org/stable/maui/mauiman/${pkgver}/${pkgname}-${pkgver}.tar.xz")
sha256sums=('6523705d9d48dec4bd4cf005d2b18371e2a4a0d774415205dff11378eee6468f')
build() {
cmake -B build -S ${pkgname}-${pkgver} \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_INSTALL_LIBDIR=/usr/lib
cmake --build build
}
package() {
DESTDIR=${pkgdir} cmake --install build
}
| true |
ffc02c250daf086324772ff5323a0537b8d2a21f | Shell | vikramkhatri/buildvm | /bin/setup/waitInstall | UTF-8 | 393 | 3.265625 | 3 | [] | no_license | #!/bin/bash
# Author Vikram Khatri
#
# Purpose : This script is called by vmrun to check if installation has
# completed or not.
if [[ -z "$BINHOME" ]] ; then
. /root/bin/setup/setenvvars
fi
while true
do
if [ -f $SECOND_BOOT_FLAG ] ; then
sleep 30
break
else
echo $(date +"%D %T.%3N") : Waiting for build process to complete ...
sleep 30
fi
done
| true |
bce4b1ec8ad6f6fc7ae39dbdd701eaf332ccfed3 | Shell | HannahCorman/Intro_Biocomp_Group_Project | /muscle_hmm_script.sh | UTF-8 | 657 | 2.953125 | 3 | [] | no_license | #!/bin/bash
#Muscle alignment for protein files from BLAST
#Build hmm for alignments
#Path for muscle/hmmbuild may be different depending on where binaries are
for sequence in $(ls | egrep '_[0-9]{1,2}\.fasta$'); do
./align/muscle.exe -in $sequence -out ./align/$sequence.align
done
for alignment in $(ls ./align/ | egrep '_[0-9]{1,2}\.fasta.align$'); do
./hmm/hmmbuild.exe ./hmm/$alignment.hmm ./align/$alignment
done
for build in $(ls | egrep 'protein.fasta$'); do
for seq in "Atp12a_8" "Gsta2_1" "Lhx2_9" "Ptpn5_6" "Slc7a12_2" "Synpr_10"; do
./hmm/hmmsearch.exe --tblout ./hmmoutput/$build.$seq.out ./hmm/$seq.fasta.align.hmm ./$build
done
done
| true |
33e79c9bf2c54297c6fe63a1a97c26fc475853da | Shell | Dynamictivity/sqwiki | /run.sh | UTF-8 | 1,785 | 3.453125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
chown www-data:www-data /app -R
if [ "$ALLOW_OVERRIDE" = "**False**" ]; then
unset ALLOW_OVERRIDE
else
sed -i "s/AllowOverride None/AllowOverride All/g" /etc/apache2/apache2.conf
a2enmod rewrite
fi
# Wait for MySQL to come up (http://stackoverflow.com/questions/6118948/bash-loop-ping-successful)
((count = 100000)) # Maximum number to try.
while [[ $count -ne 0 ]] ; do
nc -v mysql 3306 # Try once.
rc=$?
if [[ $rc -eq 0 ]] ; then
((count = 1)) # If okay, flag to exit loop.
fi
((count = count - 1)) # So we don't go forever.
done
if [[ $rc -eq 0 ]] ; then # Make final determination.
echo 'The MySQL server is up.'
else
echo 'Timeout waiting for MySQL server.'
fi
# Delete all contents of the log and tmp directories
#rm -rfv ./app/log/*
rm -rfv /app/tmp/*
# Create required cache subdirectories
mkdir -p /app/tmp/cache
mkdir -p /app/tmp/cache/models
mkdir -p /app/tmp/cache/persistent
mkdir -p /app/tmp/cache/views
mkdir -p /app/tmp/sessions
mkdir -p /app/tmp/tests
mkdir -p /app/tmp/logs
chmod -R 777 /app/tmp/*
echo "### Updating db schema"
cake -app /app schema update -s 1 -y
export MYSQL_STATUS=$(php /app/check_db.php)
echo "MySQL Status: $MYSQL_STATUS"
if [ "$MYSQL_STATUS" -eq 0 ]; then
echo "### Creating initial db schema and populating seed data";
cake -app /app schema create -s 1 -y;
cake -app /app schema create sessions -y;
fi
export MYSQL_STATUS=$(php /app/check_db.php)
echo "MySQL Status: $MYSQL_STATUS"
chmod 777 /app/Vendor/ezyang/htmlpurifier/library/HTMLPurifier/DefinitionCache/Serializer
source /etc/apache2/envvars
tail -F /var/log/apache2/* &
exec apache2 -D FOREGROUND
| true |
b6a307d4deaac8a6011a4a4efeda6403baa0327c | Shell | sid1980/LinuxAdministrator-201810 | /lesson-05 Bash scripts/01.sh | UTF-8 | 4,148 | 3.953125 | 4 | [] | no_license | #!/bin/bash
# Defaults values
######
# Первая часть для блокировки повторного запуска
LOCK=/var/tmp/initplock
if [ -f $LOCK ]; then
echo Job is already running\!
exit 6
fi
touch $LOCK
trap 'rm -f "$LOCK"; exit $?' INT TERM EXIT
######
request_type="GET\|POST"
filename="access.log"
amount_of_ip=10
templine=templine
# Тут нашел хороший парсинг ключей
while [ -n "$1" ]
do
case "$1" in
-f) filename="$2"
#echo "File name is $filename"
shift ;;
-t) request_type="$2"
#echo "request type $request_type"
shift ;;
-l) templine="$2"
#echo "request type $request_type"
shift ;;
-n) amount_of_ip="$2"
#echo "Amount of ip $amount_of_ip"
shift ;;
-h) echo "Script po parse nginx access log
Keys:
-f file name
-t GET or POST request_type
-n Amount of ip to print
-h help page
"
shift ;;
--) shift
break ;;
*) echo "$1 is not an option";;
esac
shift
done
count=1
for param in "$@"
do
echo "Parameter #$count: $param"
count=$(( $count + 1 ))
done
####################################
# Тут вставляем функцию поиска номера строки - если ненайдена - 0 - значит с начала файла
function strnum {
nameoffile=$1
# Подготовка сохраняенной строки к копированию ее в массив
sed -i 's/\[/ /g' $nameoffile
#cat $nameoffile
sed -i 's/\]/ /g' $nameoffile
#cat $nameoffile
sed -i 's/"/ /g' $nameoffile
#cat $nameoffile
sed -i 's/-\s/ /g' $nameoffile
#cat $nameoffile
sed -i 's/\// /g' $nameoffile
#cat $nameoffile
sed -i 's/\s\s\s/ /g' $nameoffile
#cat $nameoffile
sed -i 's/\s\s/ /g' $nameoffile
#cat $nameoffile
sed -i 's/\s\s/ /g' $nameoffile
#cat $nameoffile
# Из первых 10 полей в логе делаю массив
for i in {1..10}; do array[$i]=`cat $nameoffile | cut -f$i -d' '`; done
#for i in "${array[@]}";do
#echo '############################'
#echo $i
#done
echo ${array[1]}
cat $2 | grep -n ${array[1]} > tempfile
cat tempfile | wc -l
for i in {1..10}
do
echo ${array[$i]} #> /dev/null
cat tempfile | grep ${array[$i]} > tempfile
cat tempfile | wc -l
done
number=`cat tempfile | cut -f1 -d':'`
rm -f tempfile
return $number
}
strnum $templine $filename
numstr=$?
echo "The new value is" $numstr
cat $filename | wc -l
tail -n +$numstr $filename > tempaccfile
# Тут логика такая, мы только что порезали файл по предыдущей сроке - последняя строка нового фала должна остаться как ориентир для следующих запусков
# Тут тоже костыль, одно убрать, другой оставить
cat $filename | sed -n 38,38p > templine
# tail -n1 tempaccfile > $templine
cat tempaccfile | wc -l
# Тут, по скольку у меня файла нет обновляемого - костыль - если убрать - поиск будет по срезу файла согласно найденной строки
#filename=tempaccfile
####################################
last_run_time=`date +%s`
echo $last_run_time > last_run_time.tmp
# Тут тоже кривовато - имейл нужно вынести в переменну бы, и все в одно письмо, но у меня и так слишком много времени уходит на элементарные вещи
cat $filename | grep $request_type | awk '{print $1}' | uniq -c | sort -k1nr | head -n $amount_of_ip > mail.txt
echo "Subject: hello Popular IP Source" | sendmail -v localhost < mail.txt
cat mail.txt
cat $filename | awk -F\" '{print $2}' $filename | awk '{print $2}' | sort | uniq -c | sort -k1nr | head -n $amount_of_ip > mail.txt
echo "Subject: hello Popular URL destination" | sendmail -v localhost < mail.txt
cat $filename | awk '{print $9}' $filename | uniq -c | sort -k1nr > mail.txt
echo "Subject: hello Ansver code" | sendmail -v localhost < mail.txt
######
# Завершающая часть для блокировки повторного запуска
rm -f $LOCK
trap - INT TERM EXIT
###### | true |
e0c3ab6a2d0970c1283bc72000324adbe52a10aa | Shell | ChillMonk21/UTSem1 | /EE382N-1/Labs/.profile | UTF-8 | 2,901 | 3.375 | 3 | [] | no_license | # This is the default .profile file for the ECE Learning Resource Center.
# It is intended to work with ksh, but should work with any bourne compatible
# shell (such as sh or bash).
#
#-----------------------------------------------------------------------------
# The umask sets the access permissions for any new files you create.
# Common umasks:
# 077 - removes all permissions except the owner's
# 022 - the standard unix default - removes write permissions for
# everyone other than the owner. (Allows others to read most
# of your files.)
# 027 - removes write permissions for the members of a file's group,
# and removes all permissions for all others.
# For an explanation of the octal encoding, see "man chmod".
#
umask 077
#-----------------------------------------------------------------------------
# Modules are an easy, flexible way to customize your environment.
# To see what modules are available on the system, type "module avail".
# Using modules replaces setting your "path" environment variable.
if [ -d /home/projects/Modules/init ]; then
#
# This works for sh, ksh and bash
# except for the set-alias cmd which doesn't work in sh.
# only sort of works in ksh --
# doesn't work at login. one has to specifically execute a bash shell
#
. /home/projects/Modules/init/bash
#
# This line determines how your modules are set up. To change this,
# you should use the following commands at the prompt:
#
# module initadd <module name>
# module initrm <moudule name>
#
#
module load ece-defaults
else
echo ''
echo The modules directory does not currently exist on this machine.
echo Contact the Systems Administrators Immediately!
echo ''
fi
#
# Keep aliases in your .bash_aliases file they will work for ksh and sh
# and bash. By keeping aliases in these files if/when you run resetenv they
# will not be overwritten. This file is automatically created by
# "module load aliases"
#
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
#
# Set the Prompt based on user and hostname
#
USER=`whoami`
HOSTNAME=`hostname`
BASE=`basename $SHELL`
if [ "$BASE" == "ksh" ]; then
PS1='$USER@${HOSTNAME%%.*} (${PWD##$HOME/}) % '
PS2="loop: "
export PS1 PS2
# Fix a few KSH issues with the arrow commands
# Allows ksh to act like Bash
set -o emacs
alias \!\!='fc -e -'
# Fix the backspace key
stty erase \^H erase \^? kill \^U intr \^C
else
# Works for BASH and SH
PS1="$USER@${HOSTNAME%%.*} (\w) % "
PS2="loop: "
export PS1 PS2
fi
#
# set the path to default mailbox
#
MAIL=/var/spool/mail/$USER
MAILPATH=$MAIL
export MAIL MAILPATH
#
# This will set the default editor to emacs. If you want this feature,
# uncomment the next lines
#
#if [ -x /usr/local/bin/emacs ]; then
# export EDITOR=emacs
#fi
# End .profile
| true |
60592a9dae9a263e942265f747c08f2c7acabb47 | Shell | shoracek/kernel-tests | /distribution/ltp/openposix_testsuite/patches/20140422/run-tests.sh | UTF-8 | 2,515 | 3.875 | 4 | [] | no_license | #!/bin/sh
#
# A simple wrapper for pre- and post-execution activities for any given
# openposix test.
#
# run_test contains logic moved out of Makefile.
#
# Garrett Cooper, June 2010
#
LOGFILE=${LOGFILE:=logfile}
NUM_FAIL=0
NUM_PASS=0
NUM_TESTS=0
run_test_loop() {
for t in $*; do
if run_test "$t"; then
NUM_PASS=$(expr $NUM_PASS + 1)
else
NUM_FAIL=$(expr $NUM_FAIL + 1)
fi
NUM_TESTS=$(expr $NUM_TESTS + 1)
done
cat <<EOF
*******************
SUMMARY
*******************
$(printf "PASS\t\t%3d" $NUM_PASS)
$(printf "FAIL\t\t%3d" $NUM_FAIL)
*******************
$(printf "TOTAL\t\t%3d" $NUM_TESTS)
*******************
EOF
}
run_test() {
testname="$TEST_PATH/${1%.*}"
complog=$(basename $testname).log.$$
trace_cmd=""
if [ "$1" = "pthread_cond_broadcast_4-1.run-test" -o "$1" = "difftime_1-1.run-test" -o "$1" = "sigtimedwait_1-1.run-test" ]; then
trace_cmd="strace -tt -v -f"
fi
sh -c "$SCRIPT_DIR/t0 $TIMEOUT_VAL $trace_cmd ./$1 $(cat ./$(echo "$1" | sed 's,\.[^\.]*,,').args 2>/dev/null)" > $complog 2>&1
ret_code=$?
if [ "$ret_code" = "0" ]; then
echo "$testname: execution: PASS" >> "${LOGFILE}"
if [ -n "$trace_cmd" ]; then
cat $complog >> "${LOGFILE}"
fi
elif [ -f "$1" ]; then
case "$ret_code" in
1)
msg="FAILED"
;;
2)
msg="UNRESOLVED"
;;
4)
msg="UNSUPPORTED"
;;
5)
msg="UNTESTED"
;;
$TIMEOUT_RET)
msg="HUNG"
;;
*)
if [ $ret_code -gt 128 ]; then
msg="SIGNALED"
else
msg="EXITED ABNORMALLY"
fi
esac
echo "$testname: execution: $msg: Output: " >> "${LOGFILE}"
cat $complog >> "${LOGFILE}"
echo "$testname: execution: $msg "
else
echo "$testname: execution: SKIPPED (test not present)"
fi
rm -f $complog
return $ret_code
}
# SETUP
if [ -w "$LOGFILE" ] || echo "" > "$LOGFILE"; then
:
else
echo >&2 "ERROR: $LOGFILE not writable" | tee -a $OUTPUTFILE
rstrnt-report-result CHECKLOGS WARN/ABORTED
rstrnt-abort -t recipe
exit
fi
SCRIPT_DIR=$(dirname "$0")
TEST_PATH=$1; shift
T0=$SCRIPT_DIR/t0
T0_VAL=$SCRIPT_DIR/t0.val
if [ ! -x $T0 ]; then
echo >&2 "ERROR: $T0 doesn't exist / isn't executable" | tee -a $OUTPUTFILE
rstrnt-report-result CHECKLOGS WARN/ABORTED
rstrnt-abort -t recipe
exit
fi
if [ ! -f "$T0_VAL" ]; then
$SCRIPT_DIR/t0 0 >/dev/null 2>&1
echo $? > "$T0_VAL"
fi
if TIMEOUT_RET=$(cat "$T0_VAL"); then
TIMEOUT_VAL=${TIMEOUT_VAL:=240}
if [ -f test_defs ] ; then
. ./test_defs || exit $?
fi
trap '' INT
# RUN
run_test_loop $*
exit $NUM_FAIL
else
exit $?
fi
| true |
5e39fc9dde48ced5df367ede991bf3c1695a2762 | Shell | mkuchin/bash-wharf | /create-node.sh | UTF-8 | 595 | 2.640625 | 3 | [] | no_license | export DIGITALOCEAN_PRIVATE_NETWORKING=true
export DIGITALOCEAN_REGION=sgp1
. settings
export DIGITALOCEAN_ACCESS_TOKEN=$(cat token)
NODE=$1
docker-machine create --driver digitalocean $NODE
#minor host changes
docker-machine ssh $NODE "apt-get install -yq haveged moreutils"
docker-machine ssh $NODE ". /etc/default/docker && echo DOCKER_OPTS=\'\$DOCKER_OPTS --insecure-registry $REGISTRY_HOST\' > /etc/default/docker"
docker-machine ssh $NODE "service docker restart"
echo IP private: `docker-machine ssh $NODE "ifdata -pa eth1"`
echo IP public: `docker-machine ssh $NODE "ifdata -pa eth0"`
| true |
4871880db31b253ab6ffc513843f3a35e45bba82 | Shell | farseeker/pirate-3d-buccaneer | /SD-Card-Contents/home/buccaneer/bin/mechanical-version/1.0/mechanical-version.sh | UTF-8 | 424 | 3.515625 | 4 | [] | no_license | if [ -z $1 ]; then
echo "mechanical-version.sh : Give as first argument the mechanical version number" > /dev/stderr
exit
fi
result=$(grep -E "^$1:" /home/buccaneer/bin/mechanical-version/current/version.txt)
if [ "$result" == "" ]; then
# Default prudential settings
echo "printing-X-range=130:printing-Y-range=96:printing-Z-range=139" | tr ':' '\n'
else
echo "$result" | cut -d':' -f1 --complement | tr ':' '\n'
fi
| true |
b2d80720c373b581608fb0216f3f2de20f47e2f8 | Shell | pappu8871/PracticeAssignment | /day5Assignment.sh | UTF-8 | 9,705 | 3.71875 | 4 | [] | no_license | Selection Practice problems with if & else
1.)WAP that reads 5 Random 3 digit value and then outputs the minimum and the maximum value
#!/bin/bash
a=$((100 + RANDOM %889+10))
b=$((100 + RANDOM %889+10))
c=$((100 + RANDOM %889+10))
d=$((100 + RANDOM %889+10))
e=$((100 + RANDOM %889+10))
S$
echo "a=$a"
echo "b=$b"
echo "c=$c"
echo "d=$d"
echo "e=$e"
arrayName=( $a $b $c $d $e )
max=${arrayName[0]}
min=${arrayName[0]}
for i in "${arrayName[@]}"
do
if [[ "$i" -gt "$max" ]]; then
max="$i"
fi
if [[ "$i" -lt "$min" ]]; then
min="$i"
fi
done
echo "Max is: $max"
echo "Min is: $min"
2.). lear problem
#!/bin/bash
echo "Enter the year (YYYY)"
read year
if [ $((year % 4)) -eq 0 ]
then
if [ $((year % 100)) -eq 0 ]
then
if [ $((year % 400)) -eq 0 ]
then
echo "It is a leap year"
else
echo "It is not a leap year"
fi
else
echo "It is a leap year"
fi
else
echo "It is not a leap year"
fi
(3) date problem
#!/bin/bash -x
read -p " Enter Date:-" date
read -p " Enter Month:-" Month
if (($Month ==3 && $date >=20 && $date <= 31))
then
echo $Month $date "True";
elif (($Month ==4 && $date >= 1 && $date <= 31))
then
echo $Month $date "True";
elif (($Month == 5 && $date >=1 && $date <=31 ))
then
echo $Month $date "True";
elif (($Month ==6 && $date <=20 ))
then
echo $Month $date "True";
else
echo "False";
fi
4). Write a program to simulate a coin flip and print out "Head " or "Tails"accordingly
#!/bin/bash
Head=0
Tails=1
Result=$((RANDOM%2))
if [[ ${Result} -eq 0 ]];
then
echo "HEADS"
elif [[${Result} -eq 1 ]];
then
echo "Tails"
fi
1)Read a single digit number and write the number in words
#!/bin/bash
echo -n "Enter number : "
read n
case $n in
0) echo "zero " ;;
1) echo "one " ;;
2) echo "two " ;;
3) echo "three " ;;
4) echo "four " ;;
5) echo "five " ;;
6) echo "six " ;;
7) echo "seven " ;;
8) echo "eight " ;;
9) echo "nine " ;;
*)echo "enter value 0 to 1";;
esac
#!/bin/bash
echo -n "Enter number : "
read n
len=$(echo $n | wc -c)
len=$(( $len - 1 ))
echo "Your number $n in words : "
for (( i=1; i<=$len; i++ ))
do
digit=$(echo $n | cut -c $i)
case $digit in
0) echo -n "zero " ;;
1) echo -n "one " ;;
2) echo -n "two " ;;
3) echo -n "three " ;;
4) echo -n "four " ;;
5) echo -n "five " ;;
6) echo -n "six " ;;
7) echo -n "seven " ;;
8) echo -n "eight " ;;
9) echo -n "nine " ;;
esac
done
2). Read a Number and Display the weekday (Sunday,Monday...)
#!/bin/bash
echo "enter a number"
read n
case $n in
1) echo "Sunday" ;;
2) echo "Monday" ;;
3) echo "Tuesday" ;;
4) echo "Wednesday" ;;
5) echo "Thursday" ;;
6) echo "Friday" ;;
7) echo "Saturday" ;;
*) echo "enter value between 1 to 7" ;;
esac
3.Read aNumber 1, 10 ,100, 1000, and display unit,ten,hundred...
#!/bin/bash
echo "Enter a value"
read n
case $n in
1) echo "unit" ;;
10) echo "teen" ;;
100) echo "hundred" ;;
1000) echo "Thousand " ;;
esac
**
#!/bin/bash
echo -value "Give the number: "
read price
thousands=$((price/1000))
hundreds=$((price%1000/100))
teens=$((price%100/10))
units=$((price%10))
for ((i=0 ; i<=$thousands; i++ ))
do
case $thousands in
0) echo -value "Zero";;
1) echo -value "onethousands";;
2) echo -value "twothousands";;
3) echo -value "threethousands";;
4) echo -value "fourthousands";;
5) echo -value "fivethousands";;
6) echo -value "sixthousands";;
7) echo -value "seventhousands";;
8) echo -value "eightthousands";;
9) echo -value "ninethousands";;
10) echo -value "tenthousands";;
esac
done
for ((i=0 ; i<=$hundreds; i++ ))
do
case $hundreds in
0) echo -value "Zero";;
1) echo -value "onehundreds";;
2) echo -value "twohundreds";;
3) echo -value "threehundreds";;
4) echo -value "fourhundreds";;
5) echo -value "fivehundreds";;
6) echo -value "sixhundreds";;
7) echo -value "sevenhundreds";;
8) echo -value "eighthundreds";;
9) echo -value "ninehundreds";;
10) echo -value "tenhundreds";;
esac
done
for ((i=0 ; i<=$teens; i++ ))
do
case $teens in
0) echo -value "Zero";;
1) echo -value "one";;
2) echo -value "two";;
3) echo -value "three";;
4) echo -value "four";;
5) echo -value "five";;
6) echo -value "six";;
7) echo -value "seven";;
8) echo -value "eight";;
9) echo -value "nine";;
10) echo -value "ten";
esac
done
for ((i=0 ; i<=$units; i++ ))
do
case $units in
0) echo -value "Zero";;
1) echo -value "one";;
2) echo -value "two";;
3) echo -value "three";;
4) echo -value "four";;
5) echo -value "five";;
6) echo -value "six";;
7) echo -value "seven";;
8) echo -value "eight";;
9) echo -value "nine";;
10) echo -value "ten";
esac
done
echo "The price is: " 'expr $thousands + $hundreds + $teens + $units'
4). Enter 3 Numbers do following arithmetic operation and
find the one that is maximum and minimum
#!/bin/bash
echo "Please enter your first number: "
read a
echo "Second number: "
read b
echo "Third number: "
read c
p=$(( a + b * c ))
echo "p:$p"
q=$(( c + a / c ))
echo "q:$q"
r=$(( a % b + c ))
echo "r:$r"
s=$(( a * b + c ))
echo "s:$s"
arrayName=( $p $q $r $s )
max=${arrayName[0]}
min=${arrayName[0]}
# Loop through all elements in the array
for i in "${arrayName[@]}"
do
# Update max if applicable
if [[ "$i" -gt "$max" ]]; then
max="$i"
fi
# Update min if applicable
if [[ "$i" -lt "$min" ]]; then
min="$i"
fi
done
# Output results:
echo "Max is: $max"
echo "Min is: $min"
********
Selection Practice problem with case statement
1)Read a single digit number and write the number in words
echo -n "Enter number : "
read n
case $n in
0) echo "zero " ;;
1) echo "one " ;;
2) echo "two " ;;
3) echo "three " ;;
4) echo "four " ;;
5) echo "five " ;;
6) echo "six " ;;
7) echo "seven " ;;
8) echo "eight " ;;
9) echo "nine " ;;
*)echo "enter value 0 to 1";;
esac
2). Read a Number and Display the weekday (Sunday,Monday...)
#!/bin/bash
echo "enter a number"
read n
case $n in
1) echo "Sunday" ;;
2) echo "Monday" ;;
3) echo "Tuesday" ;;
4) echo "Wednesday" ;;
5) echo "Thursday" ;;
6) echo "Friday" ;;
7) echo "Saturday" ;;
*) echo "enter value between 1 to 7" ;;
esac
3.Read aNumber 1, 10 ,100, 1000, and display unit,ten,hundred...
#!/bin/bash
echo -value "Give the number: "
read price
thousands=$((price/1000))
hundreds=$((price%1000/100))
teens=$((price%100/10))
units=$((price%10))
for ((i=0 ; i<=$thousands; i++ ))
do
case $thousands in
0) echo -value "Zero";;
1) echo -value "onethousands";;
2) echo -value "twothousands";;
3) echo -value "threethousands";;
4) echo -value "fourthousands";;
5) echo -value "fivethousands";;
6) echo -value "sixthousands";;
7) echo -value "seventhousands";;
8) echo -value "eightthousands";;
9) echo -value "ninethousands";;
10) echo -value "tenthousands";;
esac
done
for ((i=0 ; i<=$hundreds; i++ ))
do
case $hundreds in
0) echo -value "Zero";;
1) echo -value "onehundreds";;
2) echo -value "twohundreds";;
3) echo -value "threehundreds";;
4) echo -value "fourhundreds";;
5) echo -value "fivehundreds";;
6) echo -value "sixhundreds";;
7) echo -value "sevenhundreds";;
8) echo -value "eighthundreds";;
9) echo -value "ninehundreds";;
10) echo -value "tenhundreds";;
esac
done
for ((i=0 ; i<=$teens; i++ ))
do
case $teens in
0) echo -value "Zero";;
1) echo -value "one";;
2) echo -value "two";;
3) echo -value "three";;
4) echo -value "four";;
5) echo -value "five";;
6) echo -value "six";;
7) echo -value "seven";;
8) echo -value "eight";;
9) echo -value "nine";;
10) echo -value "ten";
esac
done
for ((i=0 ; i<=$units; i++ ))
do
case $units in
0) echo -value "Zero";;
1) echo -value "one";;
2) echo -value "two";;
3) echo -value "three";;
4) echo -value "four";;
5) echo -value "five";;
6) echo -value "six";;
7) echo -value "seven";;
8) echo -value "eight";;
9) echo -value "nine";;
10) echo -value "ten";
esac
done
echo "The price is: " 'expr $thousands + $hundreds + $teens + $units'
**
#!/bin/bash
echo "Enter a value"
read n
case $n in
1) echo "unit" ;;
10) echo "teen" ;;
100) echo "hundred" ;;
1000) echo "Thousand " ;;
esac
4). Write a program that takes user Inputs and does Unit Conversion of different length units
** Feet to Inch
#!/bin/bash -x
echo " Renter a value"
read value
i=$((value * 12))
echo "Total inches is: $i "
** Inch to feet
#!/bin/bash -x
echo " Renter a value"
read value
f=$((value / 12))
echo "Total feet is: $f "
**Meter to Feet
#!/bin/bash -x
echo " Renter a value"
read value
f=$((meter * 3.2808))
echo "Total feet is: $f"
feet to meter
**
#!/bin/bash -x
echo " Renter a value"
read value
m=$((value / 3.2808))
echo "Total meter is: $m"
| true |
936fc7b33ee452ad7ba9e953ad5476954c465bc5 | Shell | agiza/terraform-azurerm-terraform-enterprise | /.scripts/doc-gen.sh | UTF-8 | 1,947 | 3.828125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
BINARY_DIR=./work
BINARY_FILE="${BINARY_DIR}/terraform-docs"
BINARY_VERSION=0.6.0
BINARY_URL_PREFIX="https://github.com/segmentio/terraform-docs/releases/download/v${BINARY_VERSION}/terraform-docs-v${BINARY_VERSION}"
DOCS_CMDS="--sort-inputs-by-required --with-aggregate-type-defaults markdown table"
DOCS_DIR=docs
VARS_TF=variables.tf
OUTS_TF=outputs.tf
INS_MD=inputs.md
OUTS_MD=outputs.md
function setup {
mkdir -p ${BINARY_DIR}
if [[ ! -e "${BINARY_FILE}" ]]; then
if [[ "$OSTYPE" == "linux-gnu" ]]; then
BINARY_URL="${BINARY_URL_PREFIX}-linux-amd64"
elif [[ "$OSTYPE" == "darwin"* ]]; then
BINARY_URL="${BINARY_URL_PREFIX}-darwin-amd64"
else
echo "Please run this in either a Linux or Mac environment."
exit 1
fi
echo "Downloading ${BINARY_URL}"
curl -L -o "${BINARY_FILE}" "${BINARY_URL}"
chmod +x "${BINARY_FILE}"
fi
}
function main_docs {
if test ! -d "${DOCS_DIR}"; then
mkdir "${DOCS_DIR}"
fi
echo -e "# Terraform Enterprise: Clustering\n" | tee "${DOCS_DIR}/${INS_MD}" "${DOCS_DIR}/${OUTS_MD}" &> /dev/null
eval "${BINARY_FILE} ${DOCS_CMDS} ${VARS_TF}" >> "${DOCS_DIR}/${INS_MD}"
eval "${BINARY_FILE} ${DOCS_CMDS} ${OUTS_TF}" >> "${DOCS_DIR}/${OUTS_MD}"
}
function module_docs {
if test -d ./modules; then
for dir in ./modules/*; do
mkdir -p "${dir}/${DOCS_DIR}"
echo -e "# Terraform Enterprise: Clustering\n" | tee "${dir}/${DOCS_DIR}/${INS_MD}" "${dir}/${DOCS_DIR}/${OUTS_MD}" &> /dev/null
eval "${BINARY_FILE} ${DOCS_CMDS} ${dir}/${VARS_TF}" >> "${dir}/${DOCS_DIR}/${INS_MD}"
eval "${BINARY_FILE} ${DOCS_CMDS} ${dir}/${OUTS_TF}" >> "${dir}/${DOCS_DIR}/${OUTS_MD}"
done
else
echo "No modules directory, skipping."
fi
}
setup
main_docs
module_docs
| true |
c2182ad84c88fbac22ea077ea878316788fe0dc8 | Shell | alvinctk/dotfiles | /.bash_function | UTF-8 | 4,576 | 3.796875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
echo "Loading global bash function ~/.bash_function"
##################################################################################
######################### function COMMANDS #########################
##################################################################################
function fgrep
{
read search_query;
read file_list;
grep -i -r -n "$search_query" "$file_list";
}
sort-du () {
paste -d '#' <( du "$1" ) <( du -h "$1" ) | sort -n -k1,7 | cut -d '#' -f 2
}
# statistic of the frequnce of your command from your history
hstats() {
history |\
awk '{
CMD[$4]++;count++;
}
END {
for (a in CMD ) print CMD[a] " " CMD[a]/count*100 "% " a }' \
| sort -nr | column -t | head -n 10
}
# Read $PATH environment variable
# And output each path in a new line.
function rpath
{
echo $PATH | tr ':' '\n';
}
function extract {
if [ -f $1 ] ; then
case $1 in
*.tar.bz2) tar xvjf $1 ;;
*.tar.gz) tar xvzf $1 ;;
*.bz2) bunzip2 $1 ;;
*.rar) unrar x $1 ;;
*.gz) gunzip $1 ;;
*.tar) tar xvf $1 ;;
*.tbz2) tar xvjf $1 ;;
*.tgz) tar xvzf $1 ;;
*.zip) unzip $1 ;;
*.Z) uncompress $1 ;;
*.7z) 7z x $1 ;;
*) echo "don't know how to extract '$1'..." ;;
esac
else
echo "'$1' is not a valid file!"
fi
}
bgrep() {
method_keyword="$1"
closing_braces="$2"
search_file="$3"
sed -n '/'"$method_keyword"'/,/'"$closing_braces"'/p' $search_file \
| sed '/'"$method_keyword"'/d' \
| sed '/'"$closing_braces"'/d'
}
function backup_profile {
local verbose_flag="$1" # v to print backup process.
local verbose_name="v"
local backup_list="$2" # one file on each line to be backup
# to be store in ~/
local backup_name="$(basename $backup_list | sed 's/[\. ]//g')";
echo "backup_name is " $backup_name;
local backup_dir="~/.bak.""$backup_name";
echo "backup_dir is $backup_dir";
# To return this directory after backup files.
local cur_pwd="$(pwd)";
echo "cur_pwd = " $cur_pwd;
# Go to ~; backup directory will be one directory under ~
cd;
mkdir -p $backup_dir && echo "making directory " $backup_dir
cd $backup_dir
local backup_version_file=".bak.version"
# create backup version file if does not exists
# otherwise, update backup_version to previously stored version.
# By default, the oldest version is 1.
if [ ! -f "$backup_version_file" ]; then \
touch "$backup_version_file";
[ "$verbose_flag" == "$verbose_name" ] && echo "touch $backup_version_file";
echo "0" > "$backup_version_file";
fi
# Update version to current version and save it to file.
# Save version to file by overwritting previous data.
# > => overwrite; >> implies append.
local backup_version="$(cat $backup_version_file)"
let backup_version=backup_version+1;
[ "$verbose_flag" == "$verbose_name" ] && echo "backup version is $backup_version";
echo "$backup_version" > "$backup_version_file"
# format of backup suffix filename
local backup_suffix=".backup.v"$backup_version
[ "$verbose_flag" == "$verbose_name" ] && echo "backup suffix is $backup_suffix";
# make directory this version of backup-tag
if [ ! -d "$backup_version" ]; then \
mkdir "$backup_version";
fi
cd $backup_version;
#for backup_this_file in "${to_backup[@]}" ; do \
while read file_to_backup; do
echo "Trying to backup file: $file_to_backup"
[ -f "$file_to_backup" ] && \cp -f "$file_to_backup" "$file_to_backup""$backup_suffix" \
&& echo "Successful backup to ""$pwd/$file_to_backup""$backup_suffix";
done < "$backup_list"
cd ..
# Create archive using gzip
# -z : Compress archive using gzip program
# -c: Create archive
# -v: Verbose i.e display progress while creating archive
# -f: Archive File name
local archive_name="archive.""$backup_dir"".v$backup_version.tar.gz"
local archive_dir="$pwd/$backup_version"
[ "$verbose_flag" == "$verbose_name" ] && echo "Attempting to archive: $archive_dir";
tar -zcvf $archive_name $archive_dir && \
[ "$verbose_flag" == "$verbose_name" ] && echo "compress archive: $archive_dir/$archive_name";
cd $cur_pwd;
}
| true |
7f572c3b8cd5a5254b6250f2cfb9fc46c467ccaf | Shell | SrTrek/Scripts_Para_Vps | /bannerssh | UTF-8 | 5,858 | 3.1875 | 3 | [] | no_license | #!/bin/bash
script="Banner Trek"
author="SrTrek"
version="1.0.2"
echo "==|$script $version|=="
echo "Criado por: $author"
echo "======================"
sleep 10
clear
echo "==|$script $version|=="
echo "Quantas Linhas Seu Banner Terá?"
echo "Digite: 1,2,3,4 ou 5"
read number
if [ "$number" = "1" ]; then
echo "==|$script $version|=="
echo "----------------------------------"
echo "Digite A Cor Da Primeira Linha? ex:#ffffff ou White"
read color1
echo "Digite a Primeira Linha do seu Banner"
read linha1
echo "----------------------------------"
echo '<font color="$color1">$linha1' > /etc/bannerssh.net
clear
echo "==|$script $version|=="
echo "Banner Atualizado Com Sucesso"
echo "=============================="
fi
if [ "$number" = "2" ]; then
echo "==|$script $version|=="]
echo "----------------------------------"
echo "Digite A Cor Da Primeira Linha? ex:#ffffff ou White"
read color1
echo "Digite a Primeira Linha do seu Banner"
read linha1
echo "----------------------------------"
echo "==|$script $version|=="
echo "----------------------------------"
echo "Digite A Cor Da Segunda Linha? ex:#ffffff ou White"
read color2
echo "Digite a Segunda Linha do seu Banner"
read linha2
echo "----------------------------------"
echo '<font color="$color1">$linha1' > /etc/bannerssh.net
echo '<font color="$color2">$linha2' >> /etc/bannerssh.net
clear
echo "==|$script $version|=="
echo "Banner Atualizado Com Sucesso"
echo "=============================="
fi
if [ "$number" = "3" ]; then
echo "==|$script $version|=="]
echo "----------------------------------"
echo "Digite A Cor Da Primeira Linha? ex:#ffffff ou White"
read color1
echo "Digite a Primeira Linha do seu Banner"
read linha1
echo "----------------------------------"
echo "==|$script $version|=="
echo "----------------------------------"
echo "Digite A Cor Da Segunda Linha? ex:#ffffff ou White"
read color2
echo "Digite a Segunda Linha do seu Banner"
read linha2
echo "----------------------------------"
echo "==|$script $version|=="
echo "----------------------------------"
echo "Digite A Cor Da Terceira Linha? ex:#ffffff ou White"
read color3
echo "Digite a Terceira Linha do seu Banner"
read linha3
echo "----------------------------------"
echo '<font color="$color1">$linha1' > /etc/bannerssh.net
echo '<font color="$color2">$linha2' >> /etc/bannerssh.net
echo '<font color="$color3">$linha3' >> /etc/bannerssh.net
clear
echo "==|$script $version|=="
echo "Banner Atualizado Com Sucesso"
echo "=============================="
fi
if [ "$number" = "4" ]; then
echo "==|$script $version|=="]
echo "----------------------------------"
echo "Digite A Cor Da Primeira Linha? ex:#ffffff ou White"
read color1
echo "Digite a Primeira Linha do seu Banner"
read linha1
echo "----------------------------------"
echo "==|$script $version|=="
echo "----------------------------------"
echo "Digite A Cor Da Segunda Linha? ex:#ffffff ou White"
read color2
echo "Digite a Segunda Linha do seu Banner"
read linha2
echo "----------------------------------"
echo "==|$script $version|=="
echo "----------------------------------"
echo "Digite A Cor Da Terceira Linha? ex:#ffffff ou White"
read color3
echo "Digite a Segunda Linha do seu Banner"
read linha3
echo "----------------------------------"
echo "==|$script $version|=="
echo "----------------------------------"
echo "Digite A Cor Da Quarta Linha? ex:#ffffff ou White"
read color4
echo "Digite a Quarta Linha do seu Banner"
read linha4
echo "----------------------------------"
echo '<font color="$color1">$linha1' > /etc/bannerssh.net
echo '<font color="$color2">$linha2' >> /etc/bannerssh.net
echo '<font color="$color3">$linha3' >> /etc/bannerssh.net
echo '<font color="$color4">$linha4' >> /etc/bannerssh.net
clear
echo "==|$script $version|=="
echo "Banner Atualizado Com Sucesso"
echo "=============================="
fi
if [ "$number" = "5" ]; then
echo "==|$script $version|=="]
echo "----------------------------------"
echo "Digite A Cor Da Primeira Linha? ex:#ffffff ou White"
read color1
echo "Digite a Primeira Linha do seu Banner"
read linha1
echo "----------------------------------"
echo "==|$script $version|=="
echo "----------------------------------"
echo "Digite A Cor Da Segunda Linha? ex:#ffffff ou White"
read color2
echo "Digite a Segunda Linha do seu Banner"
read linha2
echo "----------------------------------"
echo "==|$script $version|=="
echo "----------------------------------"
echo "Digite A Cor Da Terceira Linha? ex:#ffffff ou White"
read color3
echo "Digite a Segunda Linha do seu Banner"
read linha3
echo "----------------------------------"
echo "==|$script $version|=="
echo "----------------------------------"
echo "Digite A Cor Da Quarta Linha? ex:#ffffff ou White"
read color4
echo "Digite a Quarta Linha do seu Banner"
read linha4
echo "----------------------------------"
echo "==|$script $version|=="
echo "----------------------------------"
echo "Digite A Cor Da Quinta Linha? ex:#ffffff ou White"
read color5
echo "Digite a Quinta Linha do seu Banner"
read linha5
echo "----------------------------------"
echo '<font color="$color1">$linha1' > /etc/bannerssh.net
echo '<font color="$color2">$linha2' >> /etc/bannerssh.net
echo '<font color="$color3">$linha3' >> /etc/bannerssh.net
echo '<font color="$color4">$linha4' >> /etc/bannerssh.net
echo '<font color="$color5">$linha5' >> /etc/bannerssh.net
clear
echo "==|$script $version|=="
echo "Banner Atualizado Com Sucesso"
echo "=============================="
fi
| true |
dad301e32a4d0e63f058aaa6192404034e6671d2 | Shell | laisevn/dotfiles | /install | UTF-8 | 1,498 | 3.828125 | 4 | [] | no_license | #!/bin/sh
printf "Creating symbolic links to dot files... "
for name in dot/* ; do
target="$HOME/.`basename $name`"
if [ -e "$target" ]; then
if [ ! -L "$target" ]; then
echo "WARNING: $target exists but is not a symlink."
echo Backuping file $target to "$target"_old
mv $target $(target)_old
fi
else
echo "Symlinking $target"
ln -s "$PWD/$name" "$target"
fi
done
printf "Linking neovim files... "
mkdir -p ~/.vim ~/.config/nvim
ln -s ~/.vimrc ~/.config/nvim/init.vim
ln -s ~/.vim ~/.config/nvim
echo "done"
printf "Fetching dein.vim(Bundler manager)..."
curl -s -L https://raw.githubusercontent.com/Shougo/dein.vim/master/bin/installer.sh | bash -s $HOME/.cache/dein.vim &> /dev/null
if [ "$?" != "0" ];
then
echo "Dein.vim installation failed"
exit 1
else
echo "done"
fi
solarized_dir=$HOME/solarized
printf "Installing Solarized..."
if [ ! -d "$solarized_dir" ]; then
git clone https://github.com/altercation/solarized.git $solarized_dir
else
echo "done"
fi
if ! type "brew" > /dev/null 2>&1; then
echo "Homebrew was not found, please install homebrew to continue: http://brew.sh/"
exit 1;
else
echo "Updating brew formulas, it may take few minutes."
brew update
echo "Installing required brew formulas."
brew bundle
fi
vim -u ~/.vimrc.bundles +"call dein#install()" +"call dein#update()" +qa
nvim -u ~/.vimrc.bundles +"call dein#install()" +"call dein#update()" +UpdateRemotePlugins +qa
echo "Installation completed"
| true |
bd69c7f4ed847c6940590bafae841afbc2eb2f4a | Shell | UncleSteve/test | /run.sh | UTF-8 | 150 | 3.015625 | 3 | [] | no_license | #!/bin/sh
echo "Start!"
./$1 &
# get child pid
child_pid=$!
echo "Child pid = $child_pid"
# wait and kill
sleep 10
kill -9 $child_pid
echo "Done!"
| true |
cf67e171bac5d097d96ac4dc534807f98c62482e | Shell | formal-verification-research/vnncomp2021-verapak-scripts | /components/wrangle.sh | UTF-8 | 582 | 3.515625 | 4 | [] | no_license | #!/bin/bash
TOOL_NAME=verapak
VERSION_STRING=v1
# check arguments
if [ "$1" != ${VERSION_STRING} ]; then
echo "Expected first argument to be the version string '$VERSION_STRING', instead got '$1'" 1>&2
exit 1
fi
if [[ "$#" -ne 3 && "$#" -ne 4 ]]; then
echo "Expected 3 or 4 arguments (got $#): \"$VERSION_STRING\" <in.pb> <out.pb> [negate : bool]" 1>&2
exit 1
fi
TF_IN=$2
TF_OUT=$3
if [ "$4" == "" ]; then
DO_NEG="False"
else
DO_NEG=$4
fi
cp $TF_IN .tf_in.tmp
graph_wrangler main.py .tf_in.tmp .tf_out.tmp True $DO_NEG
cp .tf_out.tmp $TF_OUT
rm -f .tf_in.tmp .tf_out.tmp
| true |
9a17f25b9c5094c3252546a71b1bbbcf62558449 | Shell | kongming92/6824 | /src/kvpaxos/test_script | UTF-8 | 223 | 2.578125 | 3 | [] | no_license | #!/bin/bash
for i in {1..100}
do
echo $i
go test 2>/dev/null | egrep -av 'EOF|connection|broken|socket'
#go test > test_out1 2>/dev/null
#cat test_out1 | egrep -av 'EOF|connection|broken|socket' >> test_output
done
| true |
f244fe50287a2e247dec3494a08d74d369cb3ce7 | Shell | kabicm/arbor | /example/miniapp/brunel/scale_n_cores.sh | UTF-8 | 2,780 | 3.203125 | 3 | [
"BSD-3-Clause"
] | permissive | # Parameters of the simulation.
n_exc=100 # exc population size
n_inh=$((n_exc/4)) # inh popoulation size
n_ext=30 # poisson population size
prop=0.1 # prop of connections from each population
weight=1.2 # exc connections weight
rel_inh_strength=0.5 # relative strength of inhibitory connections
delay=1 # delay of all connections
rate=5 # rate of Poisson neruons
time=100 # simulation time
dt=1 # timestep (ignored)
group_size=100 # size of cell groups
# Multicore parameters.
#n_ranks=(1 2 4 9 18)
n_ranks=(1)
n_cores=(2)
# Runs the simulation with given parameters on n_rank ranks and n_core cores.
run() {
n_rank=$1
n_core=$2
group_size=$((2*n_exc))
# Use multithreading for 36 cores and otherwise no.
if [ $n_core -eq 36 ]
then
srun -n $n_rank -c $n_core ../../build/miniapp/brunel/brunel_miniapp.exe -n $n_exc -m $n_inh -e $n_ext -p $prop -w $weight -d $delay -g $rel_inh_strength -r $rate -t $time -s $dt -G $group_size
else
srun -n $n_rank -c $n_core --hint=nomultithread ../../build/miniapp/brunel/brunel_miniapp.exe -n $n_exc -m $n_inh -e $n_ext -p $prop -w $weight -d $delay -g $rel_inh_strength -r $rate -t $time -s $dt -G $group_size
fi
}
run_temp() {
group_size=150000
#ssrun -n 1 -c 5 ../../build/miniapp/brunel/brunel_miniapp.exe -n $n_exc -m $n_inh -e $n_ext -p $prop -w $weight -d $delay -g $rel_inh_strength -r $rate -t $time -s $dt -G $group_size
../../build/miniapp/brunel/brunel_miniapp.exe -n $n_exc -m $n_inh -e $n_ext -p $prop -w $weight -d $delay -g $rel_inh_strength -r $rate -t $time -s $dt -G $group_size
}
# Preserve the newline characters by setting this empty (field splitting).
IFS=
cd $SCRATCH/nestmc-proto/build
make -j
cd ../miniapp/brunel
vary_n_exc=(1000 10000 100000)
for n_exc in ${vary_n_exc[@]}
do
#echo "Setting n_exc = "$n_exc"..."
file="scale_"$n_exc".txt"
rm scale_*.txt
n_inh=$((n_exc/4))
for n_rank in ${n_ranks[@]}
do
#echo " setting n_core = "$n_core"..."
# Take the output of the simulation.
output=$(run_temp)
#output=$(run $n_rank $n_core)
#output=$(run_locally)
#echo " "$output
# Find the duration of the simulation from stdout.
setup=$(echo $output | awk '/setup/ {print $2}')
model_init=$(echo $output | awk '/model-init/ {print $2}')
model_simulate=$(echo $output | awk '/model-simulate/ {print $2}')
echo $n_core" "$setup" "$model_init" "$model_simulate
# Output n_core and the duration to a file.
echo $n_core" "$setup" "$model_init" "$model_simulate >> $file
done
prop=$(echo "$prop/10.0" | bc -l)
done
| true |
726630256a4d3531acfa8504247b598e2c3ec16c | Shell | UoE-macOS/lab | /labconfig-macapps-links | UTF-8 | 1,565 | 3.484375 | 3 | [] | no_license | #!/bin/bash
######
#
# Date: Thu 2 Aug 2018 14:37:21 BST
# Version: 0.2
# Author: dsavage
#
######
# Need to define a category for the app from the standard set of:
# Accessories
# Audio players & editors
# Data Analysis software
# Design-3D software
# Developer Tools
# Document & file viewers
# Document & text editors
# Graphics software
# Internet tools
# Language & Translation software
# Utilities
# Video editors
# Need to define a category for the app from the standard set.
Category="$4"
# Need to define the path to the app.
ApplicationPath="$5"
# Get the app name.
ApplicationName=`basename "$ApplicationPath"`
# Set our folder paths
MacSD="/Library/MacSD"
ConfigDir="${MacSD}/MacAppsConf"
ConfFile="${ConfigDir}/LN~${Category}~${ApplicationName}"
CategoryPath="${MacSD}/Applications/$Category"
LinkPath="${MacSD}/Applications/$Category/$ApplicationName"
# Build the folder structure
if ! [ -d "${MacSD}" ]; then
mkdir "${MacSD}"
fi
if ! [ -d "${MacSD}/Applications" ]; then
mkdir "${MacSD}/Applications"
fi
# Make sure the icon is set
python -c 'import Cocoa; Cocoa.NSWorkspace.sharedWorkspace().setIcon_forFile_options_(Cocoa.NSImage.alloc().initWithContentsOfFile_("/System/Library/CoreServices/CoreTypes.bundle/Contents/Resources/ApplicationsFolderIcon.icns"), "/Library/MacSD/Applications", 0)'
if ! [ -d "${ConfigDir}" ]; then
mkdir "${ConfigDir}"
fi
if ! [ -d "${CategoryPath}" ]; then
mkdir "${CategoryPath}"
fi
# Delete the conf file
rm -f "${ConfFile}"
# Link to add
echo "${ApplicationPath}+${LinkPath}" > "${ConfFile}"
exit 0;
| true |
612ac3d693b592338cafe12352a64c8d60621a81 | Shell | ChillyWillyGuru/libyaul | /tools/build-scripts/sh-elf/scripts/build | UTF-8 | 7,671 | 3.109375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# Copyright (c) 2012
# See LICENSE for details.
#
# Dave Murphy <davem@devkitpro.org>
# Israel Jacquez <mrkotfw@gmail.com>
# Build and install Binutils
mkdir -p "${BUILD_SRC_DIR}/${SH_TARGET}/binutils"
cd "${BUILD_SRC_DIR}/${SH_TARGET}/binutils"
if [ ! -f "${BUILD_SRC_DIR}/${SH_TARGET}/configured-binutils" ]; then
message "Configuring Binutils"
(CFLAGS="" LDFLAGS="" ../"${BINUTILS_SRC_DIR}"/configure \
--disable-debug \
--disable-nls \
--disable-shared \
--disable-threads \
--prefix="${BUILD_INSTALL_DIR}/${SH_TARGET}" \
--target="${SH_TARGET}" \
--with-gcc \
--with-gnu-as \
--with-gnu-ld \
--with-stabs) 1>> "${BUILD_SRC_DIR}/binutils-${SH_TARGET}.log" 2>&1 \
|| panic "See '${BUILD_SRC_DIR}/binutils-${SH_TARGET}.log'" 1
touch "${BUILD_SRC_DIR}/${SH_TARGET}/configured-binutils"
fi
if [ ! -f "${BUILD_SRC_DIR}/${SH_TARGET}/built-binutils" ]; then
message "Building Binutils"
("${MAKE}") 1>> "${BUILD_SRC_DIR}/binutils-${SH_TARGET}.log" 2>&1 || \
panic "See '${BUILD_SRC_DIR}/binutils-${SH_TARGET}.log'" 1
touch "${BUILD_SRC_DIR}/${SH_TARGET}/built-binutils"
fi
if [ ! -f "${BUILD_SRC_DIR}/${SH_TARGET}/installed-binutils" ]; then
message "Installing Binutils"
("${MAKE}" install) 1>> "${BUILD_SRC_DIR}/binutils-${SH_TARGET}.log" 2>&1 || \
panic "See '${BUILD_SRC_DIR}/binutils-${SH_TARGET}.log'" 1
touch "${BUILD_SRC_DIR}/${SH_TARGET}/installed-binutils"
fi
cd "${OLDPWD}"
# Build and install just the C compiler
mkdir -p "${BUILD_SRC_DIR}/${SH_TARGET}/gcc"
cd "${BUILD_SRC_DIR}/${SH_TARGET}/gcc"
if [ ! -f "${BUILD_SRC_DIR}/${SH_TARGET}/configured-gcc" ]; then
message "Configuring GCC"
(CFLAGS="" \
LDFLAGS="" \
CFLAGS_FOR_TARGET="-O2 -m2 -mb -fomit-frame-pointer" \
LDFLAGS_FOR_TARGET="" \
../"${GCC_SRC_DIR}"/configure \
--disable-cld \
--disable-initfini-array \
--disable-largefile \
--disable-libstdcxx-pch \
--disable-multilib \
--disable-multilib \
--disable-nls \
--disable-objc-gc \
--disable-rpath \
--disable-shared \
--disable-threads \
--disable-tls \
--disable-win32-registry \
--enable-decimal-float=no \
--disable-frame-pointer \
--enable-languages=c,c++ \
--enable-static \
--prefix="${BUILD_INSTALL_DIR}/${SH_TARGET}" \
--program-prefix="${SH_TARGET}-" \
--target="${SH_TARGET}" \
--with-endian=big \
--with-cpu=m2 \
--with-gcc \
--with-gnu-as \
--with-gnu-ld \
--without-dwarf2 \
--with-newlib) 1>> "${BUILD_SRC_DIR}/gcc-${SH_TARGET}.log" 2>&1 \
|| panic "See '${BUILD_SRC_DIR}/gcc-${SH_TARGET}.log'" 1
touch "${BUILD_SRC_DIR}/${SH_TARGET}/configured-gcc"
fi
if [ ! -f "${BUILD_SRC_DIR}/${SH_TARGET}/built-gcc" ]; then
message "Building GCC"
("${MAKE}" all-gcc) 1>> "${BUILD_SRC_DIR}/gcc-${SH_TARGET}.log" 2>&1 \
|| panic "See '${BUILD_SRC_DIR}/gcc-${SH_TARGET}.log'" 1
touch "${BUILD_SRC_DIR}/${SH_TARGET}/built-gcc"
fi
if [ ! -f "${BUILD_SRC_DIR}/${SH_TARGET}/installed-gcc" ]; then
message "Installing GCC"
("${MAKE}" install-gcc) 1>> "${BUILD_SRC_DIR}/gcc-${SH_TARGET}.log" 2>&1 \
|| panic "See '${BUILD_SRC_DIR}/gcc-${SH_TARGET}.log'" 1
touch "${BUILD_SRC_DIR}/${SH_TARGET}/installed-gcc"
fi
unset "CFLAGS"
cd "${OLDPWD}"
# Build and install newlib
mkdir -p "${BUILD_SRC_DIR}/${SH_TARGET}/newlib"
cd "${BUILD_SRC_DIR}/${SH_TARGET}/newlib"
if [ ! -f "${BUILD_SRC_DIR}/${SH_TARGET}/configured-newlib" ]; then
message "Configuring Newlib"
(CFLAGS="-O2 -fomit-frame-pointer -ffast-math -fstrict-aliasing" \
../"${NEWLIB_SRC_DIR}"/configure \
--disable-bootstrap \
--disable-build-poststage1-with-cxx \
--disable-build-with-cxx \
--disable-cloog-version-check \
--disable-dependency-tracking \
--disable-libada \
--disable-libquadmath \
--disable-libquadmath-support \
--disable-libssp \
--disable-maintainer-mode \
--disable-malloc-debugging \
--disable-multilib \
--disable-newlib-atexit-alloc \
--disable-newlib-hw-fp \
--disable-newlib-iconv \
--disable-newlib-io-float \
--disable-newlib-io-long-double \
--disable-newlib-io-long-long \
--disable-newlib-mb \
--disable-newlib-multithread \
--disable-newlib-register-fini \
--disable-newlib-supplied-syscalls \
--disable-objc-gc \
--enable-lto \
--enable-newlib-io-c99-formats \
--enable-newlib-io-pos-args \
--enable-newlib-reent-small \
--prefix="${BUILD_INSTALL_DIR}/${SH_TARGET}" \
--target="${SH_TARGET}" \
--with-endian=big \
--with-cpu=m2 \
--without-cloog \
--without-gmp \
--without-mpc \
--without-mpfr) 1>> "${BUILD_SRC_DIR}/newlib-${SH_TARGET}.log" 2>&1 \
|| panic "See '${BUILD_SRC_DIR}/newlib-${SH_TARGET}.log'" 1
touch "${BUILD_SRC_DIR}/${SH_TARGET}/configured-newlib"
fi
if [ ! -f "${BUILD_SRC_DIR}/${SH_TARGET}/built-newlib" ]; then
message "Building Newlib"
("${MAKE}") 1>> "${BUILD_SRC_DIR}/newlib-${SH_TARGET}.log" 2>&1 || \
panic "See '${BUILD_SRC_DIR}/newlib-${SH_TARGET}.log'" 1
touch "${BUILD_SRC_DIR}/${SH_TARGET}/built-newlib"
fi
if [ ! -f "${BUILD_SRC_DIR}/${SH_TARGET}/installed-newlib" ]; then
message "Installing Newlib"
("${MAKE}" install) 1>> "${BUILD_SRC_DIR}/newlib-${SH_TARGET}.log" 2>&1 || \
panic "See '${BUILD_SRC_DIR}/newlib-${SH_TARGET}.log'" 1
touch "${BUILD_SRC_DIR}/${SH_TARGET}/installed-newlib"
fi
cd "${OLDPWD}"
# Build and install the final compiler
cd "${BUILD_SRC_DIR}/${SH_TARGET}/gcc"
if [ ! -f "${BUILD_SRC_DIR}/${SH_TARGET}/built-g++" ]; then
message "Building GCC"
("${MAKE}") 1>> "${BUILD_SRC_DIR}/gcc-${SH_TARGET}.log" 2>&1 || \
panic "See '${BUILD_SRC_DIR}/gcc-${SH_TARGET}.log'" 1
touch "${BUILD_SRC_DIR}/${SH_TARGET}/built-g++"
fi
if [ ! -f "${BUILD_SRC_DIR}/${SH_TARGET}/installed-g++" ]; then
message "Installing GCC"
("${MAKE}" install) 1>> "${BUILD_SRC_DIR}/gcc-${SH_TARGET}.log" 2>&1 || \
panic "See '${BUILD_SRC_DIR}/gcc-${SH_TARGET}.log'" 1
touch "${BUILD_SRC_DIR}/${SH_TARGET}/installed-g++"
fi
cd "${OLDPWD}"
# Build and install GDB
mkdir -p "${BUILD_SRC_DIR}/${SH_TARGET}/gdb"
cd "${BUILD_SRC_DIR}/${SH_TARGET}/gdb"
if [ ! -f "${BUILD_SRC_DIR}/${SH_TARGET}/configured-gdb" ]; then
message "Configuring GDB"
(CFLAGS="" \
LDFLAGS="" \
../"${GDB_SRC_DIR}"/configure \
--disable-nls \
--disable-werror \
--prefix="${BUILD_INSTALL_DIR}/${SH_TARGET}" \
--target="${SH_TARGET}") 1>> "${BUILD_SRC_DIR}/gdb-${SH_TARGET}.log" 2>&1 \
|| panic "See '${BUILD_SRC_DIR}/gdb-${SH_TARGET}.log'" 1
touch "${BUILD_SRC_DIR}/${SH_TARGET}/configured-gdb"
fi
if [ ! -f "${BUILD_SRC_DIR}/${SH_TARGET}/built-gdb" ]; then
message "Building GDB"
("${MAKE}") 1>> "${BUILD_SRC_DIR}/gdb-${SH_TARGET}.log" 2>&1 || \
panic "See '${BUILD_SRC_DIR}/gdb-${SH_TARGET}.log'" 1
touch "${BUILD_SRC_DIR}/${SH_TARGET}/built-gdb"
fi
if [ ! -f "${BUILD_SRC_DIR}/${SH_TARGET}/installed-gdb" ]; then
message "Installing GDB"
("${MAKE}" install) 1>> "${BUILD_SRC_DIR}/gdb-${SH_TARGET}.log" 2>&1 || \
panic "See '${BUILD_SRC_DIR}/gdb-${SH_TARGET}.log'" 1
touch "${BUILD_SRC_DIR}/${SH_TARGET}/installed-gdb"
fi
cd "${OLDPWD}"
| true |
78e64fa72b257d92c7b8bb9e5f148c2bc518d5f7 | Shell | ClydeHuibregtse/dynamo | /install.sh | UTF-8 | 1,470 | 3.734375 | 4 | [] | no_license | #!/bin/bash
export USER=$1
if ! [ $USER ]
then
echo "Usage: install.sh <user> [production]"
exit 1
fi
if [ "$2" = "production" ]
then
PRODUCTION=1
fi
export DYNAMO_BASE=$(cd $(dirname ${BASH_SOURCE[0]}); pwd)
source $DYNAMO_BASE/etc/profile.d/init.sh
# DAEMONS
sed -e "s|_DYNAMO_BASE_|$DYNAMO_BASE|" -e "s|_USER_|$USER|" $DYNAMO_BASE/sysv/dynamod > /etc/init.d/dynamod
chmod +x /etc/init.d/dynamod
# DIRECTORIES
mkdir -p $DYNAMO_LOGDIR
chmod 775 $DYNAMO_LOGDIR
chown root:$(id -gn $USER) $DYNAMO_LOGDIR
mkdir -p $DYNAMO_DATADIR
chmod 775 $DYNAMO_DATADIR
chown root:$(id -gn $USER) $DYNAMO_DATADIR
# WEB INTERFACE
$DYNAMO_BASE/web/install.sh
# POLICIES
[ -e $DYNAMO_BASE/policies ] || git clone https://github.com/SmartDataProjects/dynamo-policies.git $DYNAMO_BASE/policies
# NRPE PLUGINS
if [ -d /usr/lib64/nagios/plugins ]
then
sed "s|_DYNAMO_BACKUP_PATH_|$DYNAMO_BACKUP_PATH|" $DYNAMO_BASE/etc/nrpe/check_dynamo.sh > /usr/lib64/nagios/plugins/check_dynamo.sh
chmod +x /usr/lib64/nagios/plugins/check_dynamo.sh
fi
cd $DYNAMO_BASE/policies
TAG=$(cat $DYNAMO_BASE/etc/policies.tag)
echo "Checking out policies tag $TAG"
git checkout master
git pull origin
git checkout $TAG 2> /dev/null
cd - > /dev/null
if [ $PRODUCTION ]
then
# CRONTAB
crontab -l -u $USER > /tmp/$USER.crontab
sed "s|_DYNAMO_BASE_|$DYNAMO_BASE|" $DYNAMO_BASE/etc/crontab >> /tmp/$USER.crontab
sort /tmp/$USER.crontab | uniq | crontab -u $USER -
rm /tmp/$USER.crontab
fi
| true |
e7bf218b7b6890601560ba6899378969369649ed | Shell | marlyhaas/CPE522_HW4 | /mysetup.sh | UTF-8 | 348 | 2.953125 | 3 | [] | no_license | #!/bin/bash
sudo apt install cpufrequtils
echo -e
sudo cpufreq-set -f 600MHz
cpufreq-info
echo -e
location=/home/debian/practice_cpe522
echo $location
echo -e
items=$(ls /home/debian/practice_cpe522 | wc -l)
echo $items
echo -e
touch comments.txt
PATH=$PATH:$HOME/practice_cpe522
export PATH
echo "$PATH" >> "$HOME/practice_cpe522/comments.txt"
| true |
3ac49fefa4318c5b787fb0f81c951fb01a6ec7b4 | Shell | cmpeters08/bash-presentation | /bash-talk-code-club/hello-function.sh | UTF-8 | 79 | 2.53125 | 3 | [] | no_license | #!/usr/bin/bash
function say_hello {
echo -e "Hello $1"
}
say_hello "Edgar"
| true |
cd321b805f3cad150c12f32442891a47f555aecd | Shell | nickvonklemp/Bash-Scripts | /install/netronome-repo.sh | UTF-8 | 1,574 | 4.03125 | 4 | [] | no_license | #!/bin/bash
# Adds netronome repo list to package manger
script_dir=$(pwd)
echo $script_dir
RHEL=$(cat /etc/os-release | grep 'ID="rhel"' | cut -d '"' -f2 | cut -d '"' -f1)
CENTOS=$(cat /etc/os-release | grep 'ID="centos"' | cut -d '"' -f2 | cut -d '"' -f1)
UBUNTU=$(cat /etc/os-release | grep "ID=ubuntu" | cut -d '=' -f2)
if [ "$UBUNTU" == "ubuntu" ]; then
echo "Ubuntu system detected"
# First download debian package key
echo "Downloading netronome public key"
wget https://deb.netronome.com/gpg/NetronomePublic.key
# Ubuntu specific apt-get changes
echo "Adding public key to apt package"
apt-key add NetronomePublic.key
# Add netronome's repository
mkdir -p /etc/apt/sources.list.d
echo "deb https://deb.netronome.com/apt stable main" > /etc/apt/sources.list.d/netronome.list
echo "Done, updating packages"
# Update repository lists
apt-get update
echo "Cleaning up"
rm -r NetronomePublic.key
fi
if [[ ( "$RHEL" == "rhel" ) || ( "$CENTOS" == "centos" ) ]]; then
echo "RHEL/Centos system detected"
# First download rpm package key
yum install -y wget
echo "Downloading netronome public key"
wget https://rpm.netronome.com/gpg/NetronomePublic.key
# RHEL/Centos specific yum changes
echo "Adding public key to yum package"
rpm --import NetronomePublic.key
# Add netronome's repository
cat << EOF >> /etc/yum.repos.d/netronome.repo
[netronome]
name=netronome
baseurl=https://rpm.netronome.com/repos/centos
EOF
# Update repository lists
echo "Done, updating packages"
yum updateinfo
echo "Cleaning up"
rm -r -f NetronomePublic.key
fi
| true |
b428fa4eaa180d0bfc9e17565438d471167b93a7 | Shell | aucfan-arie/dotfiles | /vimfiles/templates/shellscript.txt | UTF-8 | 383 | 3.515625 | 4 | [] | no_license | #!/bin/bash
# ======================================
# Name :
#
# About :
#
# Usage :
#
# ======================================
BASEDIR=$(cd $(dirname $0)/../; pwd)
if [ -L $0 ]; then
KERNEL=`uname`
if [ $KERNEL = "Darwin" ]; then
BASEDIR=$(cd $(dirname $(dirname $0)/$(readlink $0))/../; pwd)
else
BASEDIR=$(cd $(dirname $(readlink -f $0))/../; pwd)
fi
fi
| true |
e78444360ae1e164c1d48772ec47eecc65dc06ea | Shell | owalch/oliver | /linux/bash/arguments.sh | UTF-8 | 294 | 3.484375 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
set -e
echo "### Passing Arguments"
echo "-> \$1 = $1"
echo "-> \$2 = $2"
echo "-> \$@ = $@"
echo "-> \$# = $# (Number of Arguments)"
# Store arguments from bash command line in an array
args=("$@")
# use one argument
echo "Array[0]=${args[0]}"
echo "Array[1]=${args[1]}"
exit 0
| true |
e03594b05bd4189f6ba52d4eed60ef430982a10b | Shell | Bin-Guan/tools | /Allele_count_RUSH2A.sh | UTF-8 | 419 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
##Editing in excel: remove frameshift in column 2, remove the extra "," in the first column in the last row.
##Save as Tab-delimited file RUSH2A.txt.
awk 'gsub(/\r/,"")' RUSH2A.txt > RUSH2A.temp
cut -f 1,2 RUSH2A.temp | awk -F"\t" 'BEGIN{OFS="\t"} NR==1 {$3 = "RUSH2A-AC"; print $0} NR>1 {$3 = gsub(/hom/, "", $1) + gsub(/\,/, "", $1) + 1; print $0}' - | cut -f 3 | paste RUSH2A.temp - > RUSH2A_AC.txt
| true |
f88debdc7196169734861f196175b226b0e2dc5c | Shell | wade1990/flynn | /appliance/postgresql/start.sh | UTF-8 | 314 | 3.234375 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
case $1 in
postgres)
chown -R postgres:postgres /data
chmod 0700 /data
shift
exec sudo \
-u postgres \
-E -H \
/bin/flynn-postgres $*
;;
api)
shift
exec /bin/flynn-postgres-api $*
;;
*)
echo "Usage: $0 {postgres|api}"
exit 2
;;
esac
| true |
6181328636a91f817b368db6a733cae1a80bda4b | Shell | so2boy/useful_scripts | /scripts/backup_to_hdfs.sh | UTF-8 | 1,192 | 3.671875 | 4 | [] | no_license | #!/bin/sh
# backup conf and scripts to hdfs
# 4 5 * * * cd /home/serving/bin && sh ./backup_to_hdfs.sh /serving/reco_news >> /home/serving/bin/backup_to_hdfs.log 2>&1
if [ "$1" == "" ]; then
echo "Usage: $0 <backup_dir>"
exit 1
fi
cd $1
HOSTNAME=`hostname`
IP=`host $HOSTNAME | awk '{print $4}'`
export JAVA_HOME="/usr/jdk_home/"
export HADOOP_USER_NAME=yumeng
HDFS="/serving/hadoop/hadoop-2.6.0/bin/hdfs dfs"
TAR_NAME=$IP`pwd | sed 's/\//_/g'`".tgz"
find -L . -maxdepth 2 -name bin > dirs.txt
find -L . -maxdepth 2 -name conf >> dirs.txt
DIRS=`cat dirs.txt`
echo "Tar to $TAR_NAME"
tar zcf $TAR_NAME $DIRS
TIMESTAMP=`date +"%Y%m%d_%H%M%S"`
HDFS_PATH="/user/yumeng/backup/disk/"$TAR_NAME"_"$TIMESTAMP
echo "Upload $TAR_NAME to hdfs $HDFS_PATH"
$HDFS -copyFromLocal $TAR_NAME $HDFS_PATH
rm -vf $TAR_NAME
rm -vf dirs.txt
$HDFS -ls "/user/yumeng/backup/disk/${TAR_NAME}_*" | awk '{print $8}' | sort > old_backups.txt
BACKUP_CNT=`cat old_backups.txt | wc -l`
if [ $BACKUP_CNT -gt 2 ]; then
echo "Clean old backup"
let CLEAN_CNT=$BACKUP_CNT-2
for old_backup in `head -n $CLEAN_CNT old_backups.txt`; do
$HDFS -rm $old_backup
done
fi
rm -vf old_backups.txt
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.