blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
8550b6e8dc775e61909c44f7e3048795a6219905
|
Shell
|
nkahm/bashScripts
|
/inputGen.sh
|
UTF-8
| 160
| 2.640625
| 3
|
[] |
no_license
|
l=2
#loop over number of layers
while [[ $l -le 12 ]];
do
cd ../bumpy/l$i/p1
python generateInputDAT.py > input.dat
cd ../../../scripts/
(( l++ ))
done
| true
|
c912628d830e15874e24a261933776fba1931c25
|
Shell
|
PenguinCSC/PostInstallScripts
|
/mka2z.bash
|
UTF-8
| 77
| 2.828125
| 3
|
[] |
no_license
|
#! /bin/bash
mkdir ./0-9
for letters in {a..z}; do
mkdir $letters
done
| true
|
04d6d3da82850be854c3e23402a5c108b67e7b25
|
Shell
|
mineshmane/ShellScripting
|
/set5/conversion.sh
|
UTF-8
| 468
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash -x
printf "Enter your choice 1.FeetToInch\n 2.InchToFeet\n 3.FeetToMeter\ 4.meterTofeet "
read number
case $number in
1)
read -p "enetr feet" x
echo "scale=3; $x*12"|bc
;;
2)
read -p "Enter in inch :" x
echo "scale=2; $x / 12" | bc
;;
3)
read -p "Enter in feet one side" x
result=$((60*40))
echo "in mters:"
echo " scale=3; $x*0.3048"| bc
;;
4)
read -p "Enter in meter : " x
echo "scale=2; $x /0.3048 " | bc
;;
esac
| true
|
8cb69662fa39998544ec2f48f708a794ce8d404c
|
Shell
|
jensp/Arch-Linux-on-i586
|
/extra/cpufreqd/PKGBUILD
|
UTF-8
| 837
| 2.53125
| 3
|
[] |
no_license
|
# $Id: PKGBUILD 36633 2009-04-26 15:34:01Z andrea $
# Maintainer: Andrea Scarpino <andrea@archlinux.org>
# Contributor: Kevin Piche <kevin@archlinux.org>
# Contributor: Manolis Tzanidakis <manolis@archlinux.org>
pkgname=cpufreqd
pkgver=2.3.4
pkgrel=1
pkgdesc="A small daemon to adjust cpu speed (and indeed voltage)"
arch=('i586' 'i686' 'x86_64')
url="http://sourceforge.net/projects/cpufreqd"
license=('GPL2')
depends=('cpufrequtils')
install="$pkgname.install"
backup=(etc/cpufreqd.conf)
options=('!libtool')
source=(http://downloads.sourceforge.net/$pkgname/$pkgname-$pkgver.tar.bz2
'cpufreqd')
md5sums=('f4193f688305566a8422dd3989667668'
'ae7b0ec1e8e9f9e7f05fb83749af4ed4')
build() {
cd $srcdir/$pkgname-$pkgver
./configure --prefix=/usr \
--sysconfdir=/etc
make || return 1
make DESTDIR=$pkgdir install
install -D -m 755 $srcdir/cpufreqd $pkgdir/etc/rc.d/cpufreqd || return 1
}
| true
|
536ccfed7659b04ef46cc348fa7b3ae934d8da96
|
Shell
|
cbg-ethz/WES_Cancer_Sim
|
/sim_cancer/cancer_data_pipeline/utils/fix_bam_header.sh
|
UTF-8
| 2,444
| 3.71875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
function fix_bam_header()
{
if [ -z "$1" -o -z "$2" -o -z "$3" ]; then
echo "Usage: $0 <bamFiles> <extension> <outputDir> [ --run-opt <submit> ]"
exit -1
fi
local bamFiles=$1
local ext=$2
local outdir=$3
for i in {1..3}
do
shift;
done
local run_opt="local"
while [ 1 -lt $# ]; do
if [ $1 == "--run-opt" ]; then
shift;
run_opt=$1;
else
echo did not understand arg $1
shift
fi;
done
mkdir -p $outdir
for fn_bam in `echo $bamFiles`; do
local fn_result=$outdir/`basename ${fn_bam%$ext}`.bam
if [ -f $fn_result ]; then
echo "bam file $fn_result already exists."
continue;
elif [ -h $fn_result ]; then # symbolic link exists
echo "bam file $fn_result already exists."
continue;
else
echo "bam file $fn_result does not yet exist."
fi
if [ $run_opt == "submit" ]; then
echo "submit $fn_bam"
local log_dir=$outdir/logs/
mkdir -p $log_dir
local fn_out=${log_dir}reheader_`basename ${fn_bam}`.o
local fn_err=${log_dir}reheader_`basename ${fn_bam}`.e
echo "" > $fn_err
echo "" > $fn_out
num_jobs=1000
while [ "$num_jobs" -gt "15" ]
do
sleep 30
num_jobs=`qstat | grep reheader | wc -l`
echo num_jobs: $num_jobs
done
queue="-q regular.q@bs-dsvr02,regular.q@bs-dsvr04,regular.q@bs-dsvr05,regular.q@bs-dsvr08"
echo "echo ${fn_bam} > $fn_out; hostname >> $fn_out; source `find ${gitDir} -name fix_bam_header.sh` ; source `find ${gitDir} -name paths.sh`; source `find ${gitDir} -name utils.sh`; fix_bam_header $fn_bam $ext $outdir " | qsub $queue -o $fn_out -e $fn_err -N reheader_`basename ${fn_bam}` -cwd -V
else
unique_header=$outdir/`basename ${fn_bam}`_unique_header.txt
if [ ! -f $unique_header ]; then
echo "$samtools view -H $fn_bam | uniq > $unique_header"
$samtools view -H $fn_bam | uniq > $unique_header
else
echo "$unique_header already exists."
fi
if [ ! -f $fn_result ]; then
echo "$picard_tools/ReplaceSamHeader.jar INPUT=$fn_bam HEADER=$unique_header OUTPUT=$fn_result"
$picard_tools/ReplaceSamHeader.jar INPUT=$fn_bam HEADER=$unique_header OUTPUT=$fn_result
else
echo "$fn_result already exists."
fi
if [ ! -f ${fn_result}.bai ]; then
echo "$samtools index $fn_result"
samtools index $fn_result
else
echo "${fn_result}.bai already exists."
fi
fi
done
}
| true
|
74d6863476f67650ab1d3e0d4b2e01f748eb921b
|
Shell
|
etiennesky/ecearth3-post
|
/script/hc_cca.sh
|
UTF-8
| 1,068
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
usage()
{
echo "Usage: hc.sh [-a account] [-u user] exp year1 year2 [user]"
echo "Run hiresclim postprocessing of experiment exp in years year1 to year2 for a specific user (optional)"
echo "Options are:"
echo "-a account : specify a different special project for accounting (default: spnltune)"
echo "-u user : analyse experiment of a different user (default: yourself)"
}
. $HOME/ecearth3/post/conf/conf_users.sh
account=spnltune
while getopts "h?u:a:" opt; do
case "$opt" in
h|\?)
usage
exit 0
;;
u) USERexp=$OPTARG
;;
a) account=$OPTARG
;;
esac
done
shift $((OPTIND-1))
if [ "$#" -lt 3 ]; then
usage
exit 0
fi
if [ "$#" -ge 4 ]; then
USERexp=$4
fi
OUT=$SCRATCH/tmp
mkdir -p $OUT
sed "s/<EXPID>/$1/" < hc.tmpl > $OUT/hc.job
sed -i "s/<ACCOUNT>/$account/" $OUT/hc.job
sed -i "s/<Y1>/$2/" $OUT/hc.job
sed -i "s/<Y2>/$3/" $OUT/hc.job
sed -i "s/<USERme>/$USERme/" $OUT/hc.job
sed -i "s/<USERexp>/$USERexp/" $OUT/hc.job
qsub $OUT/hc.job
qstat -u $USERme
| true
|
efcbe25f0f655c4a7412f1cb49dbb12ac446ec56
|
Shell
|
sivakrishna0206/Docker-compose
|
/scripts/backup_cron.sh
|
UTF-8
| 1,389
| 3.640625
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
##> Docker compose modification for bind mount
#
#- We can define volume type in the compose file
#
#- bind-mount : method 1 : (Docker will identify bind mount and proceed accordingly)
#-
#- volumes:
#- - tomcat-volume:/opt/tomcat
#- - bim-home:/var/bimserver/home
#- - bim-webapps:/var/www/bimserver
#- - /opt/backups:/opt/backups
#-
#- bind-mount : method 2 : (explicitly mention mount type)
#-
#- volumes:
#- - type: volume
#- source: tomcat-volume
#- target: /opt/tomcat
#- - type: volume
#- source: bim-home
#- target: /var/bimserver/home
#- - type: volume
#- source: bim-webapps
#- target:/var/www/bimserver
#- - type: bind
#- source: /opt/backups
#- target: /opt/backups
##> Backup script starts here
#- Assuming source folder : /var/bimserver/home
#- target folder : /opt/backups
#- backup method : tar + gzip
#- rotate : 2 days
DATE=$(date "+%Y-%m-%d") # date format : YYYY-MM-DD
SOURCE_PATH='/var/bimserver'
BACKUP_PATH='/opt/backups'
cd ${SOURCE_PATH}
tar -czf ${BACKUP_PATH}/bimserver-${DATE}.tar.gz home
if [ $? -ne 0 ]
then
echo "Failed to create backup"
exit 1
else
echo "Backup created successfully"
fi
find ${BACKUP_PATH} -type f -mtime +2 -exec rm -f {} \;
echo "Cleared older backups"
exit 0
| true
|
3001e06ef5f95cc69d36d68d823d596557d54c29
|
Shell
|
ashukeshri/Smanager
|
/smanager.sh
|
UTF-8
| 776
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "------Welcome To Smanager------"
echo "Options--"
echo "1. Delete Photos that are 90 days old"
echo "2. Delete videos that are 90 days old"
x=$(pwd)
read ch
cd ~/Documents
case $ch in
1)
find -mtime +90 -name "*\.png" | rename 's/ /_/g'
a=$(find -mtime +90 -name "*\.png")
b=$(find -mtime +90 -name "*\.png" | wc -l)
if [[ $b -eq 0 ]]; then
echo "no pics are older than 90 days"
else
rm ${a}
echo "$b pics deleted"
fi
;;
2)
find -mtime +90 -name "*\.mp4" | rename 's/ /_/g'
a=$(find -mtime +90 -name "*\.mp4")
b=$(find -mtime +90 -name "*\.mp4" | wc -l)
if [[ $b -eq 0 ]]; then
echo "no videos are older than 90 days"
else
rm ${a}
echo "$b videos deleted"
fi
;;
*)
echo "Invalid choice "
esac
cd ${x}
| true
|
d904729fbb96490dc11908b608268fc8cd8a02e9
|
Shell
|
a-lazy-programmer/AutoScripts
|
/autouninstall/bin/autouninstall
|
UTF-8
| 1,545
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/sh
#Author : Sakir Beg
#Copyright (c) LazieDev
#Script that automatically uninstalls the complete program from your system.
COLUMNS=$(tput cols)
title="AutoApp UnInstaller v1.0"
printf "\n%*s\n" $(((${#title}+$COLUMNS)/2)) "$title"
userinput="y"
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
echo Enter Name of the Program to Remove.
#Reading name of the program to be removed.
read pname
echo You have entered $pname.
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
#Extracting the location where the program is present.
loc=$(sudo find / -type d -name $pname*) > /dev/null 2>&1
echo Path where the program resides: $loc
#Temp storing location of the program to a file named path
echo $loc > path
content=$(cat path | cut -f1 -d " ")
echo content of the temp before deleting \file is $content
if [ "$content" != "$flag" ] ; then
echo $content
echo $content > path
flag=$(cat path)
echo content of the flag before deleting \file is $content
echo
sudo rm -r $content > /dev/null 2>&1
else
break
fi
sudo rm /usr/bin/$pname
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
echo $pname removed successfully
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
echo "Wanna to check if $pname still exists or not!!(Y/N)" ;
read PROCEED
PROCEED="${PROCEED:-${userinput}}"
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
if [ "${PROCEED}" != "n" ] ; then
sudo find / -type d -name $pname* > /dev/null 2>&1
echo $pname doesn\'t exist anywhere.
exit
else
echo Thanks \for using AutoUninstaller :D
exit
fi
| true
|
9f5de344a8bef04041dae86989c040df355e069e
|
Shell
|
manizzle/Interceptera
|
/initial_setup.sh
|
UTF-8
| 1,929
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
##Interceptor First stage setup gateway host
##SecurityGuru.Ca
##Adnan Ahmad
##
function localadaptor {
echo "Enter lan side ethernet adaptor name"
read adaptorname
echo ""
echo ""
echo "Enter subnet range ea. 192.168.99 for /24"
read subnetrange
echo ""
echo ""
echo "allow-hotplug $adaptorname" >> /etc/network/interfaces
echo "iface $adaptorname inet static" >> /etc/network/interfaces
echo "address $subnetrange.1" >> /etc/network/interfaces
echo "netmask 255.255.255.0" >> /etc/network/interfaces
echo "network $subnetrange.0" >> /etc/network/interfaces
echo "broadcast $subnetrange.255" >> /etc/network/interfaces
}
function dnsmasquerade {
apt-get install dnsmasq
echo "interface=$adaptorname" >> /etc/dnsmasq.conf
echo "listen-address=127.0.0.1" >> /etc/dnsmasq.conf
echo "domain=localhost.localdomain" >> /etc/dnsmasq.conf
echo "dhcp-range=$subnetrange.100,$subnetrange.200,12h" >> /etc/dnsmasq.conf
}
function ipforwarding {
echo "net.ipv4.ip_forward=1" >> /etc/sysctl.conf
}
function iptablessetup {
echo "iptables -t nat -A POSTROUTING -o $gatewayadaptor" -s $subnetrange.0/24 -j MASQUERADE" >> /etc/iptables.save
echo "pre-up iptables-restore < /etc/iptables.save" >> /etc/network/interfaces
}
echo "Initial Setup - Interceptor"
echo "Please select"
echo "1. Local adaptor setup"
echo "2. DNS Masquerading"
echo "3. IP forwarding"
echo "4. Iptables rules"
echo "5. Full setup"
echo "Select the right option 1-5"
read theoption
if [ $theoption = 0 ] ; then
echo "Please enter gatewayadaptor name ea. eth0"
read gatewayadaptor
echo "Please enter lan adaptor name ea. eth1"
read adaptorname
echo "Please enter subnet range ea. 192.168.99"
read subnetrange
elif [ $theoption = 1 ] ; then
localadaptor;
elif [ $theoption = 2 ] ; then
dnsmasquerade
elif [ $theoption = 3 ] ; then
ipforwarding
elif [ $theoption = 4 ] ; then
iptablessetup
elif [ $theoption = 5 ] ; then
#add all the function here
fi
| true
|
4ccd27a4943c5cead6f88fd61ca164784631d1b7
|
Shell
|
akikinyan/docker-filewatch
|
/entrypoint.sh
|
UTF-8
| 706
| 4.25
| 4
|
[] |
no_license
|
#!/bin/sh
usage() {
echo "It takes two arguments to run.
First argument: Monitored file name
Second argument: command executed when the monitored file is updated
ex: ./entrypoint.sh a.cpp 'g++ a.cpp && ./a.cpp'"
}
update() {
if [ -e $1 ]; then
echo `openssl sha256 -r $1 | awk '{print $1}'`
fi
}
if [ $# -ne 2 ];
then
usage
exit 1
fi
# Monitoring interval, specified in seconds
INTERVAL=1
no=0
last=`update $1`
while true;
do
sleep $INTERVAL
current=`update $1`
if [ "$last" != "$current" ];
then
nowdate=`date '+%Y/%m/%d'`
nowtime=`date '+%H:%M:%S'`
echo "no:$no\tdate:$nowdate\ttime:$nowtime\tfile:$1"
eval $2
last=$current
no=`expr $no + 1`
fi
done
| true
|
6f5ac9a42f26c4c08ac3d9780917d33c3f308f78
|
Shell
|
aleksbyte/dotfiles
|
/files/zsh/zshrc
|
UTF-8
| 12,111
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env zsh
#{{{# Zgen #
ZSH_DISABLE_COMPFIX=true
# automatically run `zgen reset` if we modify our .zshrc
ZGEN_RESET_ON_CHANGE=("${HOME}/.zshrc")
[[ -z $XDG_DATA_HOME ]] && echo 'data home not set' && return
if [[ ! -f $XDG_DATA_HOME/zsh/zgen/zgen.zsh ]]; then
echo " ** zgen not found **"
echo "Making $XDG_DATA_HOME/zsh if it doesn't exist..."
mkdir -p $XDG_DATA_HOME/zsh
echo "Downloading zgen..."
git clone https://github.com/tarjoilija/zgen.git $XDG_DATA_HOME/zsh/zgen
fi
ZGEN_AUTOLOAD_COMPINIT=1
source $XDG_DATA_HOME/zsh/zgen/zgen.zsh
#
if ! zgen saved; then
echo "Creating a zgen save"
# oh-my-zsh
zgen oh-my-zsh
# oh my zsh plugins
zgen oh-my-zsh plugins/osx
# zgen oh-my-zsh plugins/git
# zgen oh-my-zsh plugins/aws
zgen oh-my-zsh plugins/web-search
# Automatically run zgen update and zgen selfupdate every 7 days
zgen load unixorn/autoupdate-zgen
zgen load BurntSushi/ripgrep complete
zgen load zsh-users/zsh-completions src
zgen load zsh-users/zsh-autosuggestions
ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE='fg=243'
zgen load mafredri/zsh-async
zgen load sindresorhus/pure
zgen save
fi
#}}}
# get direnv working with zsh https://github.com/direnv/direnv/issues/64
# eval "$(direnv hook $SHELL)"
#eval "$(direnv hook /opt/homebrew/bin/zsh)"
# Source the asdf version manager asdf.sh dependency
#. $(brew --prefix asdf)/asdf.sh
#########################################
# #}}}
# #{{{# Options #
# Zsh options
# Changing directories
setopt auto_cd # Change directory without having to write cd.
# Completion
setopt always_to_end # Move the cursor to the end of the completion.
setopt auto_remove_slash # Strip trailing slashes of completions.
setopt complete_in_word # Complete even if the cursor in not at the end.
setopt list_packed # More compact completion lists.
setopt list_types # Show types in completion.
unsetopt rec_exact # Recognize exact, ambiguous matches.
# Expansion and Globbing
setopt extended_glob # Use regex in filename generation.
setopt nomatch # If there is no match, print an error.
# History command configuration
export HISTSIZE=32768 # Maximum events for internal history
export SAVEHIST=32768 # Maximum events in history file
HIST_STAMPS="mm/dd/yyyy"
HISTFILE="$ZSH_CACHE/zhistory"
# setopt share_history
setopt append_history # Append to history, not overwrite. This way multiple sessions have the same history.
setopt extended_history # Save additional metadata to history file.
setopt inc_append_history # Append commands to history immediately.
setopt hist_expire_dups_first # del duplic..s first when HISTFILE size HISTSIZE
setopt hist_ignore_dups # ignore duplicated commands history list
setopt hist_ignore_space # ignore commands that start with space
setopt hist_ignore_all_dups # Remove the old entry and append the new one
setopt hist_verify # show com.with history expansion before running it
setopt hist_find_no_dups # Don't cycle through dupes
setopt hist_reduce_blanks # Trim multiple blanks in history
setopt no_case_glob # set ignore case for ls etc
#
# Input/Output
setopt correct # Command correction.
setopt dvorak # Correct dvorak typing mistakes.
unsetopt flow_control # Disable flow control since I am a new kid.
setopt short_loops # Allow for short forms of for, repeat, select, if and function constructs.
# Job Control
setopt notify # Report the status on background jobs immediately.
# Prompting
setopt prompt_subst # Command substitution and arithmetic expansion.
# Zle (Zsh line editor)
unsetopt beep # Disable beep.
#
export ZSH_THEME="gruvbox"
# SOLARIZED_THEME="dark"
# set -o vi # vi mode
# set -o emacs # emacs mode
#
# setopt PUSHD_IGNORE_DUPS # Don't duplicate dirs to the push/pop list
# setopt auto_menu # Use menu completion after the second completion reques t
# setopt auto_pushd # Makes cd, pushd (cd +1, cd -1)
# setopt always_to_end # .. move the cursor to the end of the word
# setopt complete_in_word # Only complete when completion is confirmed
# setopt correct # Try to autocorrect command
# setopt interactive_comments # Allow comments even in interactive shells
# setopt local_options # Allow functions to have local options
# setopt local_traps # Allow functions to have local traps
# setopt menu_complete # show completion on first tab
# setopt multios # Allow multiple input/output redirection
# setopt nohup # Don't kill background jobs on logout
# setopt prompt_subst # Enable parameter expansion, command substitution, etc.
# setopt prompt_cr # prompt always at beginning of line
# setopt nonomatch # Escape URL's special chars (eg.: ?)
setopt no_list_beep # Don't beep on an ambiguous completion
# unsetopt correct_all # Don't try to autocorrect all arguments
# # Globbing
setopt NO_CASE_GLOB # Case insensitive globbing
setopt EXTENDED_GLOB # Allow the powerful zsh globbing features, see link:
setopt NUMERIC_GLOB_SORT # Sort globs that expand to numbers numerically, not by letter (i.e. 01 2 03)
# #}}}
#############################################################
# #{{{# Completion #
#
# Caching autocompletion
#autoload -Uz compinit
# #if [[ -n ~/.zcompdump(#qN.mh+24) ]]; then
if [[ -n $ZSH_CACHE/zcompdump(#qN.mh+24) ]]; then
compinit -i
else
compinit -C -i
fi
#
setopt COMPLETE_IN_WORD # Allow completion from within a word/phrase
setopt ALWAYS_TO_END # When completing from the middle of a word, move cursor to end of word
setopt MENU_COMPLETE # When using auto-complete, put the first option on the line immediately
setopt COMPLETE_ALIASES # Turn on completion for aliases as well
setopt LIST_ROWS_FIRST # Cycle through menus horizontally instead of vertically
# #
# # Automatically list choices on ambiguous completion
setopt auto_list
# # Automatically use menu completion
setopt auto_menu
# # Move cursor to end if word had one match
setopt always_to_end
# # Menu-like autocompletion selection
zmodload -i zsh/complist
# # Select completions with arrow keys
zstyle ':completion:*' menu select
# # Group results by category
zstyle ':completion:*' group-name ''
# # Enable approximate matches for completion
zstyle ':completion:::::' completer _expand _complete _ignored _approximate
# # Case and hyphen insensitive
zstyle ':completion:*' matcher-list 'm:{a-zA-Z-_}={A-Za-z_-}' 'r:|=*' 'l:|=* r:|=*'
# # Use caching so that commands like apt and dpkg complete are useable
zstyle ':completion::complete:*' use-cache 1
zstyle ':completion::complete:*' cache-path $ZSH_CACHE
# Make zsh know about hosts already accessed by SSH
zstyle -e ':completion:*:(ssh|scp|sftp|rsh|rsync):hosts' hosts 'reply=(${=${${(f)"$(cat {/etc/ssh_,~/.ssh/known_}hosts(|2)(N) /dev/null)"}%%[# ]*}//,/ })'
# #
# # Add functions folder to $fpath array
fpath=($(brew --prefix)/share/zsh/functions \
$(brew --prefix)/share/zsh-completions \
$fpath)
autoload bashcompinit && bashcompinit # Enable bash completion
# github gh completion
# gh completion --shell zsh > $ZSH_CUSTOM/plugins/gh.zsh
compctl -K _gh gh
# Completion
# fpath=(~/.zsh/completion $fpath)
# autoload -Uz compinit && compinit
# SSH hosts completion
#[ -f ~/.ssh/config ] && : ${(A)ssh_config_hosts:=${${${${(@M)${(f)"$(<~/.ssh/config)"}:#Host *}#Host }:#*\**}:#*\?*}}
#[ -f ~/.ssh/known_hosts ] && : ${(A)ssh_known_hosts:=${${${(f)"$(<$HOME/.ssh/known_hosts)"}%%\ *}%%,*}}
#zstyle ':completion:*:*:*' hosts $ssh_config_hosts $ssh_known_hosts
# #}}}
#############################################################
# #{{{# bindkey #
# Keybindings -v-
bindkey -v # I want vi mode to be enabled for Zle.
# # Vi with some Emacs flavor control keys.
# bindkey -v
# # Find the key with: showkey -a ?
bindkey "^a" beginning-of-line # ctrl+a : move to beginning of line
bindkey "^e" end-of-line # ctrl+e : move to end of line (e for end)
bindkey '^b' backward-word # ctrl+b : move to previous word (b for backward)
bindkey '^f' forward-word # ctrl+f : move to next word (f for forward)
# #bindkey "^k" vi-kill-eol # ctrl+k : delete from character to end of line
bindkey "^k" kill-line # ctrl+k : delete from character to end of line
bindkey "^u" kill-whole-line # ctrl+u : clear line
bindkey "^w" backward-kill-word
bindkey "^l" clear-screen
bindkey "^y" yank
# # ⌥ + ← or → - move one word backward/forward ?
# #bindkey "[D" backward-word
# #bindkey "[C" forward-word
autoload -z edit-command-line
zle -N edit-command-line
bindkey "^X^E" edit-command-line
bindkey -M vicmd v edit-command-line
# #}}}
#############################################################
# #{{{# prompt #
# #autoload -Uz vcs_info
# #precmd() { vcs_info }
#
# # Format the vcs_info_msg_0_ variable
# #zstyle ':vcs_info:git:*' formats 'on branch %b'
# # Set up the prompt (with git branch name)
# #setopt PROMPT_SUBST
# #PROMPT='%n in ${PWD/#$HOME/~} > '
# #RPROMPT=\$vcs_info_msg_0_
#
# ##PURE_GIT_DOWN_ARROW=⇣
# ##PURE_GIT_UP_ARROW=⇡
# ##export GIT=''
# export GIT=''
PURE_GIT_DOWN_ARROW='▼'
PURE_GIT_UP_ARROW='▲'
PURE_GITSTASH_SYMBOL='■'
#
# # turn on git stash status
autoload -U promptinit; promptinit
#autoload -U promptinit
zstyle :prompt:pure:git:stash show yes
prompt pure
# Use Starship Prompt
# https://github.com/starship/starship'
#export STARSHIP_CONFIG=~/.starship.toml
#eval "$(starship init $SHELL)"
# #}}}
#############################################################
#{{{# fzf #
# Alt + c - change directory from fzf
# Ctrl + r - search through bash history with fzf
# Ctrl + t - insert file from fzf into command
# Ctrl + p - edit a file in vim from fzf ???
# mv dir/** - expand a directory with (**) and select from fzf
#
## use rg for fzf
# export FZF_DEFAULT_COMMAND="rg --files --hidden --glob \!.git"
# export FZF_CTRL_T_COMMAND="$FZF_DEFAULT_COMMAND"
# export FZF_DEFAULT_COMMAND="fd --type file --color=always"
# export FZF_DEFAULT_OPTS="--ansi"
# export FZF_CTRL_T_COMMAND="$FZF_DEFAULT_COMMAND"
if type rg &> /dev/null; then
# export FZF_DEFAULT_COMMAND='rg --files'
# export FZF_DEFAULT_OPTS='-m --height 50% --border'
# export FZF_DEFAULT_COMMAND="fd --type file --color=always"
# export FZF_DEFAULT_OPTS='-m --height 50% --border'
# export FZF_DEFAULT_COMMAND='rg --files --no-ignore --hidden --follow --glob "!.git/*"'
#
## FZF things
#[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
#export FZF_DEFAULT_COMMAND='rg --files --smart-case'
#export FZF_CTRL_T_COMMAND='rg --files --smart-case'
#export FZF_DEFAULT_OPTS="--height 40% --reverse"
## Preview files (but not everything else) with bat
#export FZF_CTRL_T_OPTS="$FZF_DEFAULT_OPTS \
# --preview 'bat --color=always --line-range=:40 --style=numbers,changes {}'"
#
# use rg for fzf
export FZF_DEFAULT_COMMAND="rg --files --hidden --glob \!.git"
export FZF_DEFAULT_OPTS='-m --height 50% --border'
# bindkey ^p vim $(fzf) ???
fi
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
# #}}}
#
[ -f ${XDG_CONFIG_HOME}/zsh/alias ] && source ${XDG_CONFIG_HOME}/zsh/alias
[ -f ${HOME}/.localrc ] && source ${HOME}/.localrc
#
typeset -U PATH
if [[ "$(uname -m)" == "arm64" ]]; then
export PATH="/opt/homebrew/bin:${PATH}"
fi
export PATH="$HOME/.yarn/bin:$HOME/.config/yarn/global/node_modules/.bin:$PATH"
| true
|
e88610f2bab0db8346938fe9bc9f1ae7de0113c2
|
Shell
|
ENSIIE-2022/ASR
|
/TP3/08-svgdir.sh
|
UTF-8
| 220
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
#NAVETEUR Lucas
svgdir () {
#Vérification des arguments
if [ -z $2 ] || [ ! -z $3 ]; then
echo "Usage : bash 04-doslike.sh <arg1> <arg2>"
exit
fi
cp -r $1 $2
}
svgdir $1 $2
| true
|
e93d11cb4d48a2cfc282559571844642b595a700
|
Shell
|
FrancisPouliot/cyphernode
|
/api_auth_docker/entrypoint.sh
|
UTF-8
| 342
| 3.15625
| 3
|
[
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-proprietary-license",
"MIT"
] |
permissive
|
#!/bin/bash
user='nginx'
if [[ $1 ]]; then
IFS=':' read -ra arr <<< "$1"
if [[ ${arr[0]} ]]; then
user=${arr[0]};
fi
fi
spawn-fcgi -M 0660 -s /var/run/fcgiwrap.socket -u $user -g nginx -U $user -- `which fcgiwrap`
chmod -R g+rw /var/run/fcgiwrap.socket /etc/nginx/conf.d/*
chown -R :nginx /etc/nginx/conf.d/*
nginx -g "daemon off;"
| true
|
77b699dcf2439f1d029a9922b0bf28b1e2ed1235
|
Shell
|
vectorfabrics/meta-pareon
|
/recipes-devtools/pareon/files/vfcc.template
|
UTF-8
| 2,689
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
LOG="@@LOG@@"
ERR="@@ERR@@"
# export PATH="@@GCCDIR@@:$PATH"
export ARM_TOOLCHAIN="@@TOOLCHAIN@@"
export ARM_SYSROOT="@@SYSROOT@@"
export ARM_HF_TOOLCHAIN="@@TOOLCHAIN@@"
export ARM_HF_SYSROOT="@@SYSROOT@@"
export I686_TOOLCHAIN="@@TOOLCHAIN@@"
export I686_SYSROOT="@@SYSROOT@@"
export X86_64_TOOLCHAIN="@@TOOLCHAIN@@"
export X86_64_SYSROOT="@@SYSROOT@@"
export VF_TARGET_TRIPLE="@@TRIPLE@@"
if [ "@@ARCH@@" == "i686-linux-gnu" ] || [ "@@ARCH@@" == "x86_64-linux-gnu" ]
then
export VF_ANA_PLATFORM_FLAVOR="@@FLAVOR@@"
fi
export GCC="@@TRIPLE@@-g@@SUFFIX@@ @@TUNE@@ --sysroot=@@SYSROOT@@"
export VFCC="@@ARCH@@-vf@@SUFFIX@@"
if [ "@@INSTALLTYPE@@" == "opt" ]
then
export VFCC="@@DIR@@/bin/${VFCC}"
else
export VF_INSTALL_BASE="@@DIR@@"
export VFUTILINSTALL="${VF_INSTALL_BASE}/vfutil"
export VFCCINSTALL="${VF_INSTALL_BASE}/vfcompiler"
export VFPLATFORMINSTALL="${VF_INSTALL_BASE}/vfplatform"
export VFINFRAINSTALL="${VF_INSTALL_BASE}/vfinfra"
export VFLIBINSTALL="${VF_INSTALL_BASE}/vflib"
export VFSERVICEINSTALL="${VF_INSTALL_BASE}/vfservice"
export VFEXECINSTALL="${VF_INSTALL_BASE}/vfexec"
export VFCXXINSTALL="${VF_INSTALL_BASE}/vfcxx"
export VFSERVERINSTALL="${VF_INSTALL_BASE}/vfserver"
export VFHELPINSTALL="${VF_INSTALL_BASE}/vfhelp"
export VFEXAMPLEINSTALL="${VF_INSTALL_BASE}/vfexample"
export VFUIINSTALL="${VF_INSTALL_BASE}/vfui"
export VFTASKSINSTALL="${VF_INSTALL_BASE}/vftasks"
export VFCC="${VFCCINSTALL}/@@PRODUCT@@/bin/${VFCC}"
fi
TUNE="@@TUNE@@"
# remove invalid compiler options
ARGS=()
for var in $TUNE "$@"
do
if [ "$var" == '--version' ]
then
$GCC "$@"
exit 0
fi
[ "$var" != '-mfpmath=sse' ] && ARGS+=("$var")
done
# compile paths in PAREON_EXCLUDE with gcc
if [ ! -z "$PAREON_EXCLUDE" ]
then
for x in $PAREON_EXCLUDE
do
if pwd | fgrep -q "$x"
then
if [ ! -z "$LOG" ]
then
echo "EXCLUDE cd $PWD && $0 $@" >> $LOG
fi
$GCC "$@"
exit $?
fi
done
fi
# compile
if $VFCC "${ARGS[@]}"
then
if [ ! -z "$LOG" ]
then
echo "SUCCESS cd $PWD && $0 $@" >> $LOG
fi
else
CODE=$?
if [ ! -z "$LOG" ]
then
echo "FAILURE cd $PWD && $0 $@" >> $LOG
fi
if [ ! -z "$ERR" ]
then
# call vfcc again and capture the output
OUTPUT=$($VFCC "${ARGS[@]}" 2>&1)
echo "cd $PWD && $0 $@" >> $ERR
echo "$OUTPUT" >> $ERR
echo >> $ERR
fi
# optionally fallback to GCC
if [ ! -z "@@FALLBACK@@" ]
then
$GCC "$@"
else
exit $CODE
fi
fi
| true
|
bcd4faabf0a5c66f2833db62ae9f3e7b75f9c5ef
|
Shell
|
Paul-Hume/mac-install
|
/generate-data.sh
|
UTF-8
| 598
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
VAR_FILE="./data/var.sh"
ZSH_FILE="./data/.zshbackup"
echo "Getting formulae..."
BREW=$(brew list)
echo "Getting casks..."
BREW_CASKS=$(brew cask list)
echo "Getting code extensions..."
VSC_EXT=$(code --list-extensions)
{
printf "#!/bin/bash\n\n"
printf "BREW=(%s)\n\n" "$BREW"
printf "BREW_CASKS=(%s)\n\n" "$BREW_CASKS"
printf "VSC_EXT=(%s)\n\n" "$VSC_EXT"
} > $VAR_FILE
cat ~/.zshrc > $ZSH_FILE
cp ~/Library/Preferences/com.googlecode.iterm2.plist ./data/com.googlecode.iterm2.plist
cp ~/Library/Application\ Support/Code/User/settings.json ./data/settings.json
| true
|
6f09bcfdb2499f58dd456fc46078bc42465c8d78
|
Shell
|
Cybersecurity-Labs/Script-bash
|
/Scripts/conf_net.sh
|
UTF-8
| 1,115
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh
#Script para configuracao da placa de rede local manual
ifconfig #Usado para listar as interfaces disponiveis
echo
read -p "Qual interface de rede deseja configurar? " INTF #Seta qual interface será configurada
echo
read -p "Informe o IP destinado ao host EX(192.168.1.25): " IP #Armazena qual IP será atribuido a interface
echo
read -p "Informe a mascara de rede EX(255.255.255.0): " MASK #Mascara da rede
echo
read -p "Informe o GateWay da rede: " GW #Gateway da rede
echo
sleep 1
clear
echo "Configurando..."
sleep 3
ifconfig $INTF $IP netmask $MASK #Seta as configurações desejadas
echo "Testando o IP adicionado."
ping -c2 $IP #Testa o IP com um ping
sleep 2
clear
if [ $? -eq 0 ]; then #O $? pega o ultimo resultado do ping e verifica se foi igual a zero, se sim o ping ocorreu com sucesso.
route add default gw $GW ; #Add a rota default
echo "Testando GateWay..."
ping -c2 $GW #Testa o GW com o ping
if [ $? -eq 0 ]; then
echo "Rede configurada com sucesso!"
sleep 3
clear
else
echo "Falha ao pingar no Gateway!"
exit
fi
else
echo "Falha ao pingar no IP!"
exit
fi
| true
|
8533b8e74c046a6c2cc18f54a0580545205bfdaf
|
Shell
|
kartishr/testcassandra
|
/cassandra/roles/install-cassandra/templates/start_cassandra.j2
|
UTF-8
| 3,307
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Wrapper script to start DDAC cassandra daemon running on this node
# Written by Madhu Kangara | initial version | v.1.0 | 05/28/2019
#
DDAC_HOME=/apps/cassandra
CASSANDRA_PIDFILE="${DDAC_HOME}/run/cassandra.pid"
RETVAL=0
NEED_NEWLINE=0
CASSANDRA_USER=cassandra
NAME="Apache Cassandra"
OUTPUT_FILE="${DDAC_HOME}/logs/output.log"
is_running()
{
is_running_silent
RETVAL="$?"
case "$RETVAL" in
0) echo "$NAME is running";;
*) echo "$NAME is not running";;
esac
return "$RETVAL"
}
is_running_silent()
{
if [ -f "${CASSANDRA_PIDFILE}" ]; then
pid=`cat "${CASSANDRA_PIDFILE}"`
grep -Eq "${CASSANDRA_PIDFILE}" "/proc/$pid/cmdline" 2>/dev/null && return 0
return 1
fi
return 3
}
do_stop()
{
# Return
# 0 if daemon has been stopped
# 1 if daemon could not be stopped
# other if a failure occurred
is_running_silent
RETVAL=$?
if [ $RETVAL -ne 0 ]; then
echo "$NAME is already stopped"
RETVAL=0
return 0
fi
# Will add following function later after fixing full logic
#check
RETVAL=0
if [ -f "${CASSANDRA_PIDFILE}" ]; then
pid=`cat "${CASSANDRA_PIDFILE}"`
fi
echo "Stopping with kill -9 ${pid}"
kill -9 ${pid}
RETVAL=$?
if [ $RETVAL -eq 2 ]; then
return 1
fi
rm -f "${CASSANDRA_PIDFILE}"
return $RETVAL
}
do_start()
{
# Check if Cassandra instance is actually up and running
# 0 if daemon has been stopped
# 1 if daemon could not be stopped
# other if a failure occurred
is_running_silent
RETVAL=$?
if [ $RETVAL -ne 0 ]; then
echo "$NAME is already stopped. Proceeding with Start"
RETVAL=0
fi
RETVAL=0
if [ -f "${CASSANDRA_PIDFILE}" ]; then
pid=`cat "${CASSANDRA_PIDFILE}"`
fi
# Check if ${DDAC_HOME} variable is defined
if [ -z "${DDAC_HOME}" ]; then
echo " DDAC_HOME variable not defined. Cannot start Cassandra instance"
return 1
fi
echo "Starting Cassandra instance"
nohup ${DDAC_HOME}/bin/cassandra -p ${CASSANDRA_PIDFILE} >${OUTPUT_FILE} 2>&1 &
RETVAL=$?
if [ $RETVAL -eq 2 ]; then
return 1
fi
return $RETVAL
}
# Starting the main
# Check if JAVA_HOME has been set for user: cassandra
if [ -z "${JAVA_HOME}" ]; then
echo "No JAVA_HOME found. Please set and try again..."
return 1
fi
# Check if DDAC_HOME has been set for user: cassandra
if [ -z "${DDAC_HOME}" ]; then
echo "No DDAC_HOME found. Please set and try again..."
return 1
fi
PIDDIR=`dirname ${CASSANDRA_PIDFILE}`
if [ ! -d "$PIDDIR" ]; then
mkdir -p "$PIDDIR"
if [ ! -d "$PIDDIR" ]; then
echo "$PIDDIR can not be found. Please create and make writable for user: $CASSANDRA_USER"
exit 5
fi
fi
# ::TODO
#owner=`stat -c %U "$PIDDIR"`
#if [ "$owner" != "$CASSANDRA_USER" ]; then
# chown -R "$CASSANDRA_USER" "$PIDDIR"
# owner=`stat -c %U "$PIDDIR"`
# if [ "$owner" != "$CASSANDRA_USER" ]; then
# log_message_failure "$PIDDIR is not owned by user: $CASSANDRA_USER. Please change ownership."
# exit 5
# fi
#fi
echo "DDAC_HOME is ${DDAC_HOME} and JAVA_HOME is $JAVA_HOME"
echo "CASSANDRA_PIDFILE is ${CASSANDRA_PIDFILE}"
do_start
| true
|
30230bc6a0455af9dbb7fbbc1a8b6f0266e5315b
|
Shell
|
trscavo/saml-library
|
/lib/saml_tools.bash
|
UTF-8
| 32,882
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#######################################################################
# Copyright 2013--2018 Tom Scavo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################
#######################################################################
#
# This function probes a browser-facing IdP endpoint location. The
# resulting HTTP exchange may include multiple round trips as the
# server negotiates an initial session with the client. The exchange
# usually terminates with the server presenting an HTML login form
# to the client.
#
# Usage:
# probe_saml_idp_endpoint \
# -t CONNECT_TIME -m MAX_TIME \
# -r MAX_REDIRS \
# [-V CURL_TRACE_FILE] \
# [-o RESPONSE_FILE] \
# -T TMP_DIR \
# IDP_ENDPOINT_LOCATION IDP_ENDPOINT_BINDING IDP_ENDPOINT_TYPE
# where
# IDP_ENDPOINT_LOCATION and IDP_ENDPOINT_BINDING are the
# Location and Binding XML attribute values of a browser-
# facing SAML endpoint at the IdP. Any such endpoint has one
# of the following binding URIs:
#
# urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect
# urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST
# urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST-SimpleSign
# urn:mace:shibboleth:1.0:profiles:AuthnRequest
#
# The IDP_ENDPOINT_TYPE must be "SingleSignOnService", which is
# the only endpoint type supported by the script at this time.
#
# The output of this script consists of a single line with four
# space-separated fields:
#
# 1. curl error code
# 2. curl output string
# 3. IdP endpoint location
# 4. IdP endpoint binding
#
# The function records the details of the various processing steps
# and the resulting HTTP transaction in files stored in the given
# temporary directory. If the -V option is specified on the command
# line, a curl trace of the transaction is also provided.
#
#######################################################################
probe_saml_idp_endpoint () {
# command-line options
local connect_timeout
local max_time
local max_redirs
local curl_trace_file
local response_file
local tmp_dir
# command-line arguments
local idp_endpoint_binding
local idp_endpoint_location
local idp_endpoint_type
# other local vars
local local_opts
local saml_message
local exit_status
###################################################################
# Process command-line options and arguments.
###################################################################
local opt
local OPTARG
local OPTIND
while getopts ":t:m:r:V:o:T:" opt; do
case $opt in
t)
connect_timeout="$OPTARG"
local_opts="$local_opts -t $connect_timeout"
;;
m)
max_time="$OPTARG"
local_opts="$local_opts -m $max_time"
;;
r)
max_redirs="$OPTARG"
local_opts="$local_opts -r $max_redirs"
;;
V)
curl_trace_file="$OPTARG"
local_opts="$local_opts -V $curl_trace_file"
;;
o)
response_file="$OPTARG"
local_opts="$local_opts -o $response_file"
;;
T)
tmp_dir="$OPTARG"
local_opts="$local_opts -T $tmp_dir"
;;
\?)
echo "ERROR: $FUNCNAME: Unrecognized option: -$OPTARG" >&2
return 2
;;
:)
echo "ERROR: $FUNCNAME: Option -$OPTARG requires an argument" >&2
return 2
;;
esac
done
if [ -z "$connect_timeout" ]; then
echo "ERROR: $FUNCNAME: connection timeout (option -t) required" >&2
return 2
fi
if [ -z "$max_time" ]; then
echo "ERROR: $FUNCNAME: max time (option -m) required" >&2
return 2
fi
if [ -z "$max_redirs" ]; then
echo "ERROR: $FUNCNAME: max redirects (option -r) required" >&2
return 2
fi
# check for a temporary directory
if [ -z "$tmp_dir" ]; then
echo "ERROR: $FUNCNAME: temporary directory (option -T) required" >&2
return 2
fi
if [ ! -d "$tmp_dir" ]; then
echo "ERROR: $FUNCNAME: temporary directory does not exist: $tmp_dir" >&2
return 2
fi
# make sure there are the correct number of command-line arguments
shift $(( OPTIND - 1 ))
if [ $# -ne 3 ]; then
echo "ERROR: $FUNCNAME: incorrect number of arguments: $# (3 required)" >&2
return 2
fi
idp_endpoint_location="$1"
idp_endpoint_binding="$2"
idp_endpoint_type="$3"
# SSO endpoints only
if [ "$idp_endpoint_type" != "SingleSignOnService" ]; then
echo "ERROR: $FUNCNAME: endpoint type not supported: $idp_endpoint_type" >&2
return 2
fi
###################################################################
# Probe the SAML endpoint.
###################################################################
# probe a browser-facing SAML2 SSO endpoint
if [ "$idp_endpoint_binding" = "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" ] || \
[ "$idp_endpoint_binding" = "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" ] || \
[ "$idp_endpoint_binding" = "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST-SimpleSign" ]; then
# construct an AuthnRequest message
saml_message=$( construct_SAML2_AuthnRequest $idp_endpoint_location )
exit_status=$?
if [ "$exit_status" -ne 0 ]; then
echo "ERROR: $FUNCNAME: construct_SAML2_AuthnRequest failed ($exit_status)" >&2
return 3
fi
# probe the endpoint
probe_saml2_idp_endpoint $local_opts $idp_endpoint_location $idp_endpoint_binding "$saml_message"
exit_status=$?
if [ "$exit_status" -ne 0 ]; then
echo "ERROR: $FUNCNAME: probe_saml2_idp_endpoint failed ($exit_status)" >&2
return 3
fi
return 0
fi
# probe a browser-facing SAML1 SSO endpoint
if [ "$idp_endpoint_binding" = "urn:mace:shibboleth:1.0:profiles:AuthnRequest" ]; then
# probe the endpoint
probe_shibboleth_sso_endpoint $local_opts $idp_endpoint_location $idp_endpoint_binding
exit_status=$?
if [ "$exit_status" -ne 0 ]; then
echo "ERROR: $FUNCNAME: probe_shibboleth_sso_endpoint failed ($exit_status)" >&2
return 3
fi
return 0
fi
echo "ERROR: $FUNCNAME: endpoint binding not supported: $idp_endpoint_binding" >&2
return 2
}
#######################################################################
#
# This function transmits a SAML V2.0 message to a browser-facing
# IdP endpoint location. The resulting HTTP exchange may include
# multiple round trips as the server negotiates an initial session
# with the client. The exchange often terminates with the server
# presenting an HTML login form to the client.
#
# Usage:
# probe_saml2_idp_endpoint \
# -t CONNECT_TIME -m MAX_TIME \
# -r MAX_REDIRS \
# [-V CURL_TRACE_FILE] \
# [-o RESPONSE_FILE] \
# -T TMP_DIR \
# IDP_ENDPOINT_LOCATION IDP_ENDPOINT_BINDING \
# SAML_MESSAGE
# where
# IDP_ENDPOINT_LOCATION and IDP_ENDPOINT_BINDING are the
# Location and Binding XML attribute values of a browser-
# facing SAML2 endpoint at the IdP. By definition, any
# such endpoint has one of the following bindings:
#
# urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect
# urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST
# urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST-SimpleSign
#
# The SAML_MESSAGE is a plain text XML message, either a SAML2
# AuthnRequest or a SAML2 LogoutRequest.
#
# Before transmitting the SAML message to the IdP, a message
# binding is constructing as specified in the OASIS SAML2 Binding
# specification. In the case of HTTP-Redirect, the message is
# DEFLATE-compressed, base64-encoded, and percent-encoded.
# In the case of HTTP-POST and HTTP-POST-SimpleSign, the message
# is base64-encoded and percent-encoded only (but not compressed).
#
# The output of this script consists of a single line with four
# space-separated fields:
#
# 1. curl error code
# 2. curl output string
# 3. IdP endpoint location
# 4. IdP endpoint binding
#
# The function records the details of the various processing steps
# and the resulting HTTP transaction in files stored in the given
# temporary directory. If the -V option is specified on the command
# line, a curl trace of the transaction is also provided. In the
# temporary directory, see these log files for details:
#
# deflate_log
# probe_saml2_idp_endpoint_log
#
#######################################################################
probe_saml2_idp_endpoint () {
# external dependency
if [ "$(type -t percent_encode)" != function ]; then
echo "ERROR: $FUNCNAME: function percent_encode not found" >&2
return 2
fi
# user agent
local script_version="0.6"
local user_agent_string="SAML2 IdP Endpoint Probe ${script_version}"
# command-line options
local local_opts
local connect_timeout
local max_time
local max_redirs
local tmp_dir
# command-line arguments
local idp_endpoint_binding
local idp_endpoint_location
local saml_message
# temporary files
local tmp_log_file
local header_file
local response_file
local cookie_jar_file
local curl_trace_file
local deflated_message_file
local base64_encoded_message_file
local exit_status
local base64_encoded_message
local percent_encoded_message
local protocol_url
local curl_opts
local curl_output
local curl_error_code
###################################################################
# Process command-line options and arguments.
###################################################################
# default curl options
curl_opts="--silent --show-error"
curl_opts="$curl_opts --insecure --tlsv1"
local opt
local OPTARG
local OPTIND
while getopts ":t:m:r:V:o:T:" opt; do
case $opt in
t)
connect_timeout="$OPTARG"
curl_opts="$curl_opts --connect-timeout $connect_timeout"
;;
m)
max_time="$OPTARG"
curl_opts="$curl_opts --max-time $max_time"
;;
r)
max_redirs="$OPTARG"
curl_opts="$curl_opts --location --max-redirs $max_redirs"
;;
V)
curl_trace_file="$OPTARG"
curl_opts="$curl_opts --trace-ascii $curl_trace_file"
;;
o)
response_file="$OPTARG"
curl_opts="$curl_opts --output $response_file"
;;
T)
tmp_dir="$OPTARG"
;;
\?)
echo "ERROR: $FUNCNAME: Unrecognized option: -$OPTARG" >&2
return 2
;;
:)
echo "ERROR: $FUNCNAME: Option -$OPTARG requires an argument" >&2
return 2
;;
esac
done
if [ -z "$connect_timeout" ]; then
echo "ERROR: $FUNCNAME: connection timeout (option -t) required" >&2
return 2
fi
if [ -z "$max_time" ]; then
echo "ERROR: $FUNCNAME: max time (option -m) required" >&2
return 2
fi
if [ -z "$max_redirs" ]; then
echo "ERROR: $FUNCNAME: max redirects (option -r) required" >&2
return 2
fi
# check for a temporary directory
if [ -z "$tmp_dir" ]; then
echo "ERROR: $FUNCNAME: temporary directory (option -T) required" >&2
return 2
fi
if [ ! -d "$tmp_dir" ]; then
echo "ERROR: $FUNCNAME: temporary directory does not exist: $tmp_dir" >&2
return 2
fi
# make sure there are the correct number of command-line arguments
shift $(( OPTIND - 1 ))
if [ $# -ne 3 ]; then
echo "ERROR: $FUNCNAME: incorrect number of arguments: $# (3 required)" >&2
return 2
fi
idp_endpoint_location="$1"
idp_endpoint_binding="$2"
saml_message="$3"
# check the binding
if [ "$idp_endpoint_binding" != "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" ] && \
[ "$idp_endpoint_binding" != "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" ] && \
[ "$idp_endpoint_binding" != "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST-SimpleSign" ]; then
echo "ERROR: $FUNCNAME: unrecognized binding: $idp_endpoint_binding" >&2
return 2
fi
###################################################################
# Initialization complete.
###################################################################
# temporary log file
tmp_log_file="$tmp_dir/${FUNCNAME}_log"
echo "$FUNCNAME using temporary directory: $tmp_dir" > "$tmp_log_file"
# temporary files
cookie_jar_file="${tmp_dir}/idp_cookie_jar.txt"
curl_opts="$curl_opts --cookie-jar $cookie_jar_file --cookie $cookie_jar_file"
header_file="${tmp_dir}/idp_http_header.txt"
curl_opts="$curl_opts --dump-header $header_file"
[ -z "$response_file" ] && response_file=/dev/null
curl_opts="$curl_opts --output $response_file"
# log input data
printf "$FUNCNAME using connection timeout (option -t): %d\n" "$connect_timeout" >> "$tmp_log_file"
printf "$FUNCNAME using max time (option -m): %d\n" "$max_time" >> "$tmp_log_file"
printf "$FUNCNAME using max redirects (option -r): %d\n" "$max_redirs" >> "$tmp_log_file"
printf "$FUNCNAME using IdP endpoint binding: %s\n" "$idp_endpoint_binding" >> "$tmp_log_file"
printf "$FUNCNAME using IdP endpoint location: %s\n" "$idp_endpoint_location" >> "$tmp_log_file"
printf "$FUNCNAME using SAML message (flattened): %s\n" "$( echo $saml_message | /usr/bin/tr -d '\n\r' )" >> "$tmp_log_file"
printf "$FUNCNAME using IdP cookie file: %s\n" "$cookie_jar_file" >> "$tmp_log_file"
printf "$FUNCNAME using IdP header file: %s\n" "$header_file" >> "$tmp_log_file"
printf "$FUNCNAME using IdP response file: %s\n" "$response_file" >> "$tmp_log_file"
###################################################################
# Compute the protocol URL.
###################################################################
# HTTP-Redirect or HTTP-POST?
if [ "$idp_endpoint_binding" = "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" ]; then
# Note: The deflated message is stored in a file.
# It is not stored in a variable since the echo
# command does not operate safely on binary data.
deflated_message_file="${tmp_dir}/saml_message.xml.deflate"
printf "$FUNCNAME using deflated message file: %s\n" "$deflated_message_file" >> "$tmp_log_file"
# deflate the SAML message
deflate $local_opts -T $tmp_dir "$saml_message" > "$deflated_message_file"
exit_status=$?
if [ $exit_status -ne 0 ]; then
echo "ERROR: $FUNCNAME: failed to deflate the message ($exit_status)" >&2
return 3
fi
# base64-encode the deflated message
base64_encoded_message=$( /usr/bin/base64 "$deflated_message_file" )
exit_status=$?
if [ $exit_status -ne 0 ]; then
echo "ERROR: $FUNCNAME: failed to base64-encode the deflated message ($exit_status)" >&2
return 3
fi
printf "$FUNCNAME computed base64-encoded message: %s\n" "$base64_encoded_message" >> "$tmp_log_file"
# percent-encode the base64-encoded, deflated SAML message
percent_encoded_message=$( percent_encode "$base64_encoded_message" )
exit_status=$?
if [ $exit_status -ne 0 ]; then
echo "ERROR: $FUNCNAME: failed to percent-encode message ($exit_status)" >&2
return 3
fi
printf "$FUNCNAME computed percent-encoded message: %s\n" "$percent_encoded_message" >> "$tmp_log_file"
# construct the URL subject to the SAML2 HTTP-Redirect binding
protocol_url=${idp_endpoint_location}?SAMLRequest=$percent_encoded_message
printf "$FUNCNAME computed protocol URL: %s\n" "$protocol_url" >> "$tmp_log_file"
else
base64_encoded_message_file="${tmp_dir}/saml_message.xml.base64"
printf "$FUNCNAME using encoded message file: %s\n" "$base64_encoded_message_file" >> "$tmp_log_file"
# base64-encode the SAML message
echo -n "$saml_message" | /usr/bin/base64 > "$base64_encoded_message_file"
exit_status=$?
if [ $exit_status -ne 0 ]; then
echo "ERROR: $FUNCNAME: failed to base64-encode the message ($exit_status)" >&2
return 3
fi
# in the case of HTTP-POST, the protocol URL IS the endpoint location
protocol_url=$idp_endpoint_location
printf "$FUNCNAME computed protocol URL: %s\n" "$protocol_url" >> "$tmp_log_file"
curl_opts="${curl_opts} --data-urlencode SAMLRequest@$base64_encoded_message_file"
fi
###################################################################
# Probe the IdP endpoint.
###################################################################
printf "$FUNCNAME using curl opts: %s\n" "$curl_opts" >> "$tmp_log_file"
# transmit the request to the IdP
curl_output=$( /usr/bin/curl ${curl_opts} \
--user-agent "$user_agent_string" \
--write-out 'redirects:%{num_redirects};response:%{http_code};dns:%{time_namelookup};tcp:%{time_connect};ssl:%{time_appconnect};total:%{time_total}' \
"$protocol_url"
)
curl_error_code=$?
# only the last line of output is processed further
curl_output=$(echo "$curl_output" | /usr/bin/tail -n 1)
printf "$FUNCNAME output: %s %s %s %s\n" "$curl_error_code $curl_output $idp_endpoint_location $idp_endpoint_binding" >> "$tmp_log_file"
echo "$curl_error_code $curl_output $idp_endpoint_location $idp_endpoint_binding"
return 0
}
#######################################################################
#
# This function transmits a SAML message to an IdP endpoint location
# via the Shibboleth 1.3 AuthnRequest protocol. The latter is a
# proprietary (but widely used) protocol for IdPs that support
# the SAML1 Web Browser SSO profile.
#
# Usage:
# probe_shibboleth_sso_endpoint \
# -t CONNECT_TIME -m MAX_TIME \
# -r MAX_REDIRS \
# [-V CURL_TRACE_FILE] \
# [-o RESPONSE_FILE] \
# -T TMP_DIR \
# IDP_ENDPOINT_LOCATION [IDP_ENDPOINT_BINDING]
# where
# IDP_ENDPOINT_LOCATION and IDP_ENDPOINT_BINDING are the
# Location and Binding XML attribute values of a particular
# browser-facing endpoint at the IdP. This script probes
# an endpoint with binding URI:
#
# urn:mace:shibboleth:1.0:profiles:AuthnRequest
#
# Since only one binding is recognized by this script, the
# binding URI is an optional command-line argument.
#
# The output of this script consists of a single line with four
# space-separated fields:
#
# 1. curl error code
# 2. curl output string
# 3. IdP endpoint location
# 4. IdP endpoint binding
#
# The function records the details of the various processing steps
# and the resulting HTTP transaction in files stored in the given
# temporary directory. If the -V option is specified on the command
# line, a curl trace of the transaction is also provided. In the
# temporary directory, see this log file for details:
#
# probe_shibboleth_sso_endpoint_log
#
#######################################################################
probe_shibboleth_sso_endpoint () {
# check global env vars
if [ -z "$SAML1_SP_ENTITY_ID" ]; then
echo "ERROR: $FUNCNAME requires env var SAML1_SP_ENTITY_ID" >&2
return 2
fi
if [ -z "$SAML1_SP_ACS_URL" ]; then
echo "ERROR: $FUNCNAME requires env var SAML1_SP_ACS_URL" >&2
return 2
fi
# make the binding optional
if [ -z "$SAML1_SP_ACS_BINDING" ]; then
echo "ERROR: $FUNCNAME requires env var SAML1_SP_ACS_BINDING" >&2
return 2
fi
# external dependency
if [ "$(type -t percent_encode)" != function ]; then
echo "ERROR: $FUNCNAME: function percent_encode not found" >&2
return 2
fi
# user agent
local script_version="0.3"
local user_agent_string="SAML1 IdP Endpoint Probe ${script_version}"
# command-line options
local local_opts
local connect_timeout
local max_time
local max_redirs
local tmp_dir
# command-line arguments
local idp_shibboleth_sso_binding
local idp_shibboleth_sso_location
# temporary files
local tmp_log_file
local header_file
local response_file
local cookie_jar_file
local curl_trace_file
local exit_status
local encoded_entityid
local encoded_acs_url
local protocol_url
local curl_opts
local curl_output
local curl_error_code
###################################################################
# Process command-line options and arguments.
###################################################################
# default curl options
curl_opts="--silent --show-error"
curl_opts="$curl_opts --insecure --tlsv1"
local opt
local OPTARG
local OPTIND
while getopts ":t:m:r:V:o:T:" opt; do
case $opt in
t)
connect_timeout="$OPTARG"
curl_opts="$curl_opts --connect-timeout $connect_timeout"
;;
m)
max_time="$OPTARG"
curl_opts="$curl_opts --max-time $max_time"
;;
r)
max_redirs="$OPTARG"
curl_opts="$curl_opts --location --max-redirs $max_redirs"
;;
V)
curl_trace_file="$OPTARG"
curl_opts="$curl_opts --trace-ascii $curl_trace_file"
;;
o)
response_file="$OPTARG"
curl_opts="$curl_opts --output $response_file"
;;
T)
tmp_dir="$OPTARG"
;;
\?)
echo "ERROR: $FUNCNAME: Unrecognized option: -$OPTARG" >&2
return 2
;;
:)
echo "ERROR: $FUNCNAME: Option -$OPTARG requires an argument" >&2
return 2
;;
esac
done
if [ -z "${connect_timeout}" ]; then
echo "ERROR: $FUNCNAME: connection timeout (option -t) required" >&2
return 2
fi
if [ -z "${max_time}" ]; then
echo "ERROR: $FUNCNAME: max time (option -m) required" >&2
return 2
fi
if [ -z "${max_redirs}" ]; then
echo "ERROR: $FUNCNAME: max redirects (option -r) required" >&2
return 2
fi
# check for a temporary directory
if [ -z "$tmp_dir" ]; then
echo "ERROR: $FUNCNAME: temporary directory (option -T) required" >&2
return 2
fi
if [ ! -d "$tmp_dir" ]; then
echo "ERROR: $FUNCNAME: temporary directory does not exist: $tmp_dir" >&2
return 2
fi
# make sure there are the correct number of command-line arguments
shift $(( OPTIND - 1 ))
if [ $# -lt 1 ]; then
echo "ERROR: $FUNCNAME: too few arguments: $# (at least 1 required)" >&2
return 2
fi
if [ $# -gt 2 ]; then
echo "ERROR: $FUNCNAME: too many arguments: $# (at most 2 required)" >&2
return 2
fi
# capture the command-line argument(s)
if [ $# -eq 1 ]; then
idp_shibboleth_sso_location="$1"
idp_shibboleth_sso_binding=urn:mace:shibboleth:1.0:profiles:AuthnRequest
else
idp_shibboleth_sso_location="$1"
idp_shibboleth_sso_binding="$2"
# check the binding
if [ "$idp_shibboleth_sso_binding" != "urn:mace:shibboleth:1.0:profiles:AuthnRequest" ]; then
echo "ERROR: $FUNCNAME: unrecognized binding: $idp_shibboleth_sso_binding" >&2
return 2
fi
fi
###################################################################
# Initialization complete.
###################################################################
# temporary log file
tmp_log_file="$tmp_dir/${FUNCNAME}_log"
echo "$FUNCNAME using temporary directory: $tmp_dir" > "$tmp_log_file"
# temporary files
cookie_jar_file="${tmp_dir}/idp_cookie_jar.txt"
curl_opts="$curl_opts --cookie-jar $cookie_jar_file --cookie $cookie_jar_file"
header_file="${tmp_dir}/idp_http_header.txt"
curl_opts="$curl_opts --dump-header $header_file"
[ -z "$response_file" ] && response_file=/dev/null
curl_opts="$curl_opts --output $response_file"
# log global env vars
printf "$FUNCNAME using SP with entityID: %s\n" "$SAML1_SP_ENTITY_ID" >> "$tmp_log_file"
printf "$FUNCNAME using SP ACS URL: %s\n" "$SAML1_SP_ACS_URL" >> "$tmp_log_file"
printf "$FUNCNAME using SP ACS Binding: %s\n" "$SAML1_SP_ACS_BINDING" >> "$tmp_log_file"
# log input data
printf "$FUNCNAME using connection timeout (option -t): %d\n" "$connect_timeout" >> "$tmp_log_file"
printf "$FUNCNAME using max time (option -m): %d\n" "$max_time" >> "$tmp_log_file"
printf "$FUNCNAME using max redirects (option -r): %d\n" "$max_redirs" >> "$tmp_log_file"
printf "$FUNCNAME using IdP endpoint location: %s\n" "$idp_shibboleth_sso_location" >> "$tmp_log_file"
printf "$FUNCNAME using IdP endpoint binding: %s\n" "$idp_shibboleth_sso_binding" >> "$tmp_log_file"
printf "$FUNCNAME using IdP cookie file: %s\n" "$cookie_jar_file" >> "$tmp_log_file"
printf "$FUNCNAME using IdP header file: %s\n" "$header_file" >> "$tmp_log_file"
printf "$FUNCNAME using IdP response file: %s\n" "$response_file" >> "$tmp_log_file"
###################################################################
# Compute the protocol URL.
###################################################################
# percent-encode the SP entityID
encoded_entityid=$( percent_encode "$SAML1_SP_ENTITY_ID" )
exit_status=$?
if [ $exit_status -ne 0 ]; then
echo "ERROR: $FUNCNAME: failed to percent-encode SP entityID ($exit_status)" >&2
return 3
fi
printf "$FUNCNAME encoded SP entityID: %s\n" "$encoded_entityid" >> "$tmp_log_file"
# percent-encode the SP AssertionConsumerService location
encoded_acs_url=$( percent_encode "$SAML1_SP_ACS_URL" )
exit_status=$?
if [ $exit_status -ne 0 ]; then
echo "ERROR: $FUNCNAME: failed to percent-encode ACS location ($exit_status)" >&2
return 3
fi
printf "$FUNCNAME encoded SP ACS URL: %s\n" "$encoded_acs_url" >> "$tmp_log_file"
# construct the URL subject to the Shibboleth 1.3 AuthnRequest protocol
protocol_url="${idp_shibboleth_sso_location}?providerId=${encoded_entityid}&shire=${encoded_acs_url}&target=cookie"
printf "$FUNCNAME computed protocol URL: %s\n" "$protocol_url" >> "$tmp_log_file"
###################################################################
# Probe the IdP endpoint.
###################################################################
printf "$FUNCNAME using curl opts: %s\n" "$curl_opts" >> "$tmp_log_file"
# transmit the request to the IdP
curl_output=$( /usr/bin/curl ${curl_opts} \
--user-agent "$user_agent_string" \
--write-out 'redirects:%{num_redirects};response:%{http_code};dns:%{time_namelookup};tcp:%{time_connect};ssl:%{time_appconnect};total:%{time_total}' \
"$protocol_url"
)
curl_error_code=$?
# only the last line of output is processed further
curl_output=$(echo "$curl_output" | /usr/bin/tail -n 1)
printf "$FUNCNAME output: %s %s %s %s\n" "$curl_error_code $curl_output $idp_shibboleth_sso_location $idp_shibboleth_sso_binding" >> "$tmp_log_file"
echo "$curl_error_code $curl_output $idp_shibboleth_sso_location $idp_shibboleth_sso_binding"
return 0
}
#######################################################################
#
# A native BASH implementation of DEFLATE compression (RFC 1951)
#
# Usage:
# deflate [-v] -T TMP_DIR STRING_TO_DEFLATE
# where
# TMP_DIR is a temporary working directory
# STRING_TO_DEFLATE is the actual string to be deflated
#
# This implementation leverages the fact that the popular tool gzip
# relies on DEFLATE compression at its core. The trick is to invoke
# 'gzip --no-name', which compresses its input without storing a
# filename or timestamp in the output. This yields a (fixed) 10-byte
# header along with the usual 8-byte trailer, both of which are
# stripped from the output of the gzip command by this function. The
# end result is a DEFLATE compressed stream of bytes.
#
# See: http://stackoverflow.com/questions/27066133/how-to-create-bare-deflate-stream-from-file-in-linux
#
# Warning: This function outputs binary data. To use it interactively,
# it's probably best to base64-encode the deflated string:
# $ deflate -T $TMPDIR "hello world" | /usr/bin/base64
# y0jNyclXKM8vykkBAA==
#
#######################################################################
deflate () {
# external dependencies
if [ "$(type -t print_log_message)" != function ]; then
echo "ERROR: $FUNCNAME: function print_log_message not found" >&2
exit 2
fi
local verbose_mode=false
local tmp_dir
local tmp_log_file
local string_to_deflate
local n
# temporary files
local zipfile
local headerfile
local trailerfile
local noheaderfile
local strippedfile
local opt
local OPTARG
local OPTIND
while getopts ":vT:" opt; do
case $opt in
v)
verbose_mode=true
;;
T)
tmp_dir="$OPTARG"
;;
\?)
echo "ERROR: $FUNCNAME: Unrecognized option: -$OPTARG" >&2
return 2
;;
:)
echo "ERROR: $FUNCNAME: Option -$OPTARG requires an argument" >&2
return 2
;;
esac
done
# a temporary directory is required
if [ -z "$tmp_dir" ]; then
echo "ERROR: $FUNCNAME: no temporary directory specified" >&2
return 2
fi
if [ ! -d "$tmp_dir" ]; then
echo "ERROR: $FUNCNAME: directory does not exist: $tmp_dir" >&2
return 2
fi
tmp_log_file="$tmp_dir/${FUNCNAME}_log"
$verbose_mode && echo "$FUNCNAME using temporary directory $tmp_dir" > "$tmp_log_file"
# determine the URL location
shift $(( OPTIND - 1 ))
if [ $# -ne 1 ]; then
echo "ERROR: $FUNCNAME: wrong number of arguments: $# (1 required)" >&2
return 2
fi
string_to_deflate="$1"
if [ -z "$string_to_deflate" ] ; then
echo "ERROR: $FUNCNAME: empty string" >&2
return 2
fi
$verbose_mode && echo "$FUNCNAME deflating string $string_to_deflate" >> "$tmp_log_file"
zipfile=$tmp_dir/${FUNCNAME}_junk.gz
headerfile=$tmp_dir/${FUNCNAME}_junk.gz.header
trailerfile=$tmp_dir/${FUNCNAME}_junk.gz.trailer
noheaderfile=$tmp_dir/${FUNCNAME}_junk.gz.no-header
strippedfile=$tmp_dir/${FUNCNAME}_junk.gz.stripped
if $verbose_mode; then
echo "$FUNCNAME using temporary file $zipfile" >> "$tmp_log_file"
echo "$FUNCNAME using temporary file $headerfile" >> "$tmp_log_file"
echo "$FUNCNAME using temporary file $trailerfile" >> "$tmp_log_file"
echo "$FUNCNAME using temporary file $noheaderfile" >> "$tmp_log_file"
echo "$FUNCNAME using temporary file $strippedfile" >> "$tmp_log_file"
fi
# compress with no filename or timestamp stored in the output,
# which yields a (fixed) 10-byte header and the usual 8-byte trailer.
echo -n "$string_to_deflate" | $_GZIP -q --no-name > $zipfile
exit_status=$?
if [ $exit_status -ne 0 ]; then
echo "ERROR: $FUNCNAME: gzip failed ($exit_status)" >&2
return 3
fi
# strip (and save) the 10-byte header
/bin/cat $zipfile | ( /bin/dd of=$headerfile bs=1 count=10 2>/dev/null; /bin/cat > $noheaderfile )
exit_status=$?
if [ $exit_status -ne 0 ]; then
echo "ERROR: $FUNCNAME: unable to strip header ($exit_status)" >&2
return 3
fi
# compute the size (in bytes) of the remaining file
n=$( /bin/cat $noheaderfile | /usr/bin/wc -c )
exit_status=$?
if [ $exit_status -ne 0 ]; then
echo "ERROR: $FUNCNAME: unable to compute file size ($exit_status)" >&2
return 3
fi
# strip (and save) the 8-byte trailer
/bin/cat $noheaderfile | ( /bin/dd of=$strippedfile bs=1 count=$[ n - 8 ] 2>/dev/null; /bin/cat > $trailerfile )
exit_status=$?
if [ $exit_status -ne 0 ]; then
echo "ERROR: $FUNCNAME: unable to strip trailer ($exit_status)" >&2
return 3
fi
# sanity check
/bin/cat $headerfile $strippedfile $trailerfile | /usr/bin/diff -q $zipfile - >&2
exit_status=$?
if [ $exit_status -ne 0 ]; then
echo "ERROR: $FUNCNAME: diff failed ($exit_status)" >&2
return 3
fi
# return the DEFLATE-compressed input string
/bin/cat $strippedfile
}
#######################################################################
# This function is intentionally not documented
#######################################################################
construct_SAML2_AuthnRequest () {
# check global env vars
if [ -z "$SAML2_SP_ENTITY_ID" ]; then
echo "ERROR: $FUNCNAME requires env var SAML2_SP_ENTITY_ID" >&2
return 2
fi
if [ -z "$SAML2_SP_ACS_URL" ]; then
echo "ERROR: $FUNCNAME requires env var SAML2_SP_ACS_URL" >&2
return 2
fi
if [ -z "$SAML2_SP_ACS_BINDING" ]; then
echo "ERROR: $FUNCNAME requires env var SAML2_SP_ACS_BINDING" >&2
return 2
fi
local message_id
local exit_status
local dateStr
# input arguments
local idp_sso_location
# make sure there are the correct number of command-line arguments
#shift $(( OPTIND - 1 ))
if [ $# -ne 1 ]; then
echo "ERROR: $FUNCNAME: incorrect number of arguments: $# (1 required)" >&2
return 2
fi
idp_sso_location="$1"
# compute value of ID XML attribute
# (40 bytes of pseudo-random alphanumeric characters)
message_id=$( LC_CTYPE=C /usr/bin/tr -dc '[:alnum:]' < /dev/urandom \
| /bin/dd bs=4 count=10 2>/dev/null
)
exit_status=$?
if [ $exit_status -ne 0 ]; then
echo "ERROR: $FUNCNAME: failed to produce message ID ($exit_status)" >&2
return 3
fi
# compute value of IssueInstant XML attribute
# (claim: use of /bin/date compatible on Mac OS and GNU/Linux)
dateStr=$( /bin/date -u +%Y-%m-%dT%TZ )
exit_status=$?
if [ $exit_status -ne 0 ]; then
echo "ERROR: $FUNCNAME: failed to produce dateTime string ($exit_status)" >&2
return 3
fi
/bin/cat <<- SAMLAuthnRequest
<samlp:AuthnRequest
xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
ID="_${message_id}"
Version="2.0"
IssueInstant="${dateStr}"
Destination="${idp_sso_location}"
AssertionConsumerServiceURL="${SAML2_SP_ACS_URL}"
ProtocolBinding="${SAML2_SP_ACS_BINDING}"
>
<saml:Issuer>${SAML2_SP_ENTITY_ID}</saml:Issuer>
<samlp:NameIDPolicy AllowCreate="true"/>
</samlp:AuthnRequest>
SAMLAuthnRequest
}
| true
|
7aa4b5a19c8aa0da721945a1c2712d55423f5dfc
|
Shell
|
McLeodMoores/starling
|
/examples/examples-simulated/scripts/examples-simulated.sh
|
UTF-8
| 1,871
| 3.828125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
canonicalize() {
local _TARGET _BASEDIR
_TARGET="$0"
readlink -f $_TARGET 2>/dev/null || (
cd $(dirname "$_TARGET")
_TARGET=$(basename "$_TARGET")
while [ -L "$_TARGET" ]
do
_TARGET=$(readlink "$_TARGET")
cd $(dirname "$_TARGET")
_TARGET=$(basename "$_TARGET")
done
_BASEDIR=$(pwd -P)
echo "$_BASEDIR/$_TARGET"
)
}
BASENAME=${0##*/}
COMPONENT=${BASENAME%.sh}
BASEDIR="$(dirname "$(dirname "$(canonicalize "$0")")")"
SCRIPTDIR="${BASEDIR}/scripts"
cd "${BASEDIR}" || exit 1
. ${SCRIPTDIR}/project-utils.sh
. ${SCRIPTDIR}/java-utils.sh
. ${SCRIPTDIR}/componentserver-init-utils.sh
if [ ! -f "${BASEDIR}"/data/masterdb/hsqldb/example-db.properties ]; then
if [ -x ${SCRIPTDIR}/init-${PROJECT}-db.sh ]; then
${SCRIPTDIR}/init-${PROJECT}-db.sh || exit 1
else
echo The ${PROJECT} database could not be found.
echo Please run ${SCRIPTDIR}/init-${PROJECT}-db.sh to create and populate the database.
echo Exiting immediately...
exit 1
fi
fi
# Read default configs
load_default_config
# Component specific default configs
CONFIG=${CONFIG:-classpath:fullstack/fullstack-examplessimulated-bin.properties}
LOGBACK_CONFIG=${LOGBACK_CONFIG:-jetty-logback.xml}
# No need to use 4g in the examples
MEM_OPTS=${MEM_OPTS:--Xms512m -Xmx1024m -XX:MaxPermSize=256m}
# User customizations
load_component_config ${PROJECT} ${COMPONENT}
CLASSPATH="lib/${PROJECTJAR}"
if [ -d lib/override ]; then
CLASSPATH="$(build_classpath lib/override):${CLASSPATH}"
fi
CLASSPATH="config:${CLASSPATH}"
RETVAL=0
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status
;;
debug)
debug
;;
showconfig)
showconfig
;;
restart|reload)
stop
start
;;
*)
echo "Usage: $0 {start|stop|restart|status|showconfig|debug|reload}"
esac
exit ${RETVAL}
| true
|
058a40acfcae8e7b1744e5bd5eac80dfc8025ebf
|
Shell
|
xxyxzxxyzyxxxzxyzxyx/study_python
|
/__run__.sh
|
UTF-8
| 780
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
case "$1" in
"build")
docker build \
--rm \
--pull \
--no-cache \
-t study_python \
-f Dockerfile \
.
;;
"run")
docker run \
-it \
-v /etc/passwd:/etc/passwd:ro \
-v /etc/group:/etc/group:ro \
-v ${PWD}:/home \
-u $(id -u ${USER}):$(id -g ${USER}) \
-p 8888:8888 \
study_python \
/bin/bash
;;
"jupyter")
/home/xxyxzxxyzyxxxzxyzxyx/.local/bin/jupyter notebook \
--port=8888 \
--ip=0.0.0.0 \
--allow-root \
--no-browser \
--NotebookApp.token=''
;;
*)
echo "undefined argment"
;;
esac
| true
|
67fae1874aa5bdd752570e7043b8e95eaa3dde9d
|
Shell
|
TUNL-Enge/Doc-InstallNSCLDAQ
|
/PostInstallFiles/nscldaq
|
UTF-8
| 4,159
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/sh
### BEGIN INIT INFO
# Provides: nscldaq
# Required-Start: $network $time $named $remote_fs $syslog
# Required-Stop: $network $time $named $remote_fs $syslog
# Should-Start: nscldaq
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: NSCL data acquisition daemons
# Description: NSCL data acquisition daemons
### END INIT INFO
. /lib/lsb/init-functions
# notes on style http://www.debian.org/doc/debian-policy/ch-opersys.html
# explain some of the choices here, expecting that this will become
# part of a nscl-maintained debian package some day
# The following are defaults that can be overidden in /etc/default/nscldaq:
DAQHOME=/usr/opt/nscldaq/current
echo $DAQHOME
# load daq defaults overrides
[ -r /etc/default/nscldaq ] && . /etc/default/nscldaq
# the following do not come from /etc/default/daq and are instead
# constructed from those paths
PIDFILEDIR=/home/daq/Live
#export TCLLIBPATH="${DAQHOME}/Scripts" #not sure why this is here, nothing uses it
BIT3DEVICES=0 # Number of bit 3 devices.
#PORTMGRSWITCHES="-log ${PIDFILEDIR}/portman.log -portfile ${PIDFILEDIR}/test.port -pidfile ${PIDFILEDIR}/pid"
PORTMGRSWITCHES=""
PORTMANAGER="${DAQHOME}/bin/DaqPortManager"
PORTMANAGERNAME="nscldaq-portmaster"
PORTMANAGERPIDFILE="${PIDFILEDIR}/${PORTMANAGERNAME}.pid"
RINGMASTER="${DAQHOME}/bin/RingMaster"
RINGMASTERNAME="nscldaq-ringmaster"
RINGMASTERPIDFILE="${PIDFILEDIR}/${RINGMASTERNAME}.pid"
# escape hatch in case nscldaq is not installed but this script is
# (should be marked as a conffile in dpkg if this came from a deb
# so this is a legitimate state to be in...)
test -f "${PORTMANAGER}" || exit 0
test -f "${RINGMASTER}" || exit 0
# make us a directory to stash pidfiles in
mkdir -p "${PIDFILEDIR}"
start_portmanager() {
### we should only reach this if ${PORTMANAGER} already exists
/sbin/start-stop-daemon --start \
--quiet \
--pidfile "${PORTMANAGERPIDFILE}" --make-pidfile --background \
--startas "${PORTMANAGER}" -- ${PORTMGRSWITCHES}
return $?
}
stop_portmanager() {
/sbin/start-stop-daemon --stop --retry 5 --oknodo \
--quiet \
--pidfile "${PORTMANAGERPIDFILE}"
retval=$?
if test "0" = "${retval}"
then
rm -f "${PORTMANAGERPIDFILE}"
fi
return ${retval}
}
start_ringmaster() {
### we should only reach this if ${RINGMASTER} already exists
/sbin/start-stop-daemon --start \
--quiet \
--pidfile "${RINGMASTERPIDFILE}" --make-pidfile --background \
--startas "${RINGMASTER}"
return $?
}
stop_ringmaster() {
/sbin/start-stop-daemon --stop --retry 5 --oknodo \
--quiet \
--pidfile "${RINGMASTERPIDFILE}"
retval=$?
if test "0" = "${retval}"
then
rm -f "${RINGMASTERPIDFILE}"
fi
return ${retval}
}
case "$1" in
start)
log_daemon_msg "Starting DAQ daemons"
log_progress_msg "${PORTMANAGERNAME}"
start_portmanager
log_progress_msg "${RINGMASTERNAME}"
start_ringmaster
log_end_msg 0
;;
stop)
log_daemon_msg "Stopping DAQ daemons"
log_progress_msg "${RINGMASTERNAME}"
stop_ringmaster
log_progress_msg "${PORTMANAGERNAME}"
stop_portmanager
log_end_msg 0
;;
status)
unset status
log_begin_msg "${PORTMANAGERNAME} "
status_of_proc -p "${PORTMANAGERPIDFILE}" "${PORTMANAGERNAME}" " process" && status="0" || status="$?"
if test "0" = "${status}"
then
echo ${PORTMANAGERNAME} PID: `cat "${PORTMANAGERPIDFILE}"`
else
echo ${PORTMANAGERNAME} not running
exit ${status}
fi
unset status
log_begin_msg "${RINGMASTERNAME} "
status_of_proc -p "${RINGMASTERPIDFILE}" "${RINGMASTERNAME}" "process" && status="0" || status="$?"
if test "0" = "${status}"
then
echo ${RINGMASTERNAME} PID: `cat "${RINGMASTERPIDFILE}"`
else
echo ${RINGMASTERNAME} not running
exit ${status}
fi
unset status
;;
restart|force-reload)
log_begin_msg "Restarting DAQ daemons:"
echo
$0 stop
$0 start
log_end_msg $?
;;
*)
echo "Usage: $0 {start|stop|status|restart|force-reload}" >&2
exit 1
esac
exit 0
| true
|
93a64e6a0950b4ff906e634909ea8e7f0c19adde
|
Shell
|
WZQ1397/config
|
/Kubernetes/yml/tsjr/java/prd-deploy/deploy-jdk8.sh
|
UTF-8
| 508
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
service=$1
svcport=$2
oriname=$3
deployhome=/tsjr-data/deploy-java/
jar_name=tsjr-$service-$svcport
cd /tsjr-data/deploy-java
#if [[ ! -d $deployhome$jar_name ]];
#then
# mkdir $deployhome$jar_name
#fi
mv $oriname $jar_name.jar
nohup /tsjr-data/jdk1.8/bin/java -server -Xms512m -Xmx512m -Xss256k -XX:MetaspaceSize=64m -XX:MaxMetaspaceSize=128m -jar $deployhome$jar_name.jar --server.port=$svcport > /data/tsjinrong/logs/$jar_name.log &
sleep 5
tail -50 /data/tsjinrong/logs/$jar_name.log
| true
|
a01b9b4cec36cf3ee5aeee867f2b01c5089e1ba8
|
Shell
|
saurabhchopade/ShellScripting-Program
|
/SequencesProblem/randomsingledigit.sh
|
UTF-8
| 116
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash -x
# Use Random Function (( RANDOM )) to get Single Digit
singleNum=$((RANDOM%10));
echo $singleNum;
| true
|
17ceebc8ff24ea3a93c929d5d609847211d1a3e3
|
Shell
|
zharley/stack
|
/etc/aliases
|
UTF-8
| 7,037
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
################################################################################
## FILE HELPERS ################################################################
################################################################################
# detailed file list
alias ll='ls -lah'
# show details of a file (including access rights in octal)
alias d='stat -c "%A (%a) %8s %.19y %n"'
# rsync -a archive, -v verbose, -z compressed, -h human, -P progress and partial
alias rcopy='rsync -avzhP'
# with --delete (delete extraneous files from dest dirs BEFORE transfer)
alias rcopyd='rsync -avzhP --delete'
# extracts a zipped tar file
alias untar='tar xvzf'
# reverse lines in file
alias reverse-file="awk '{ a[NR]=\$0 } END { for(i=NR; i; --i) print a[i] }'"
# count files in all subdirectories
alias count-files="find . -type f | wc -l"
# convenient date and timestamps
# %F is equivalent to %Y-%m-%d
# %H is replaced by the hour (24-hour clock) as a decimal number (00-23)
# %M is replaced by the minute as a decimal number (00-59)
# %S is replaced by the second as a decimal number (00-60)
alias -g stamp='date +%F-%H%M%S'
alias -g stampd='date +%F'
alias -g stampy='date "+%-d-%b-%Y at %-I:%M%p"'
################################################################################
## CODING ######################################################################
################################################################################
# revision control shortcuts
alias g='git-auto'
alias gl="git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit"
alias gld="git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit -p"
alias gd='git diff'
alias gdd='git diff --ignore-space-change'
alias st="svn --ignore-externals status"
alias sc="svn commit"
alias sl="svn log | less"
alias sup="svn up --ignore-externals"
alias svd="svn diff | colordiff | less -R"
alias svn-add-all="svn status | awk '{if (\$1 == \"?\") print \$2 }' | xargs svn add"
alias svn-del-all="svn status | awk '{if (\$1 == \"!\") print \$2 }' | xargs svn delete"
# debugging SSL
# e.g. show-certs smtpout.secureserver.net:465
alias show-certs='openssl s_client -showcerts -connect'
# e.g. msmtp-certs --host=smtpout.secureserver.net
alias msmtp-certs='msmtp --tls-certcheck=off --tls=on --tls-starttls=off --port=465 --serverinfo'
# tidy up xml/html e.g. tidy-xml file.html
alias tidy-xml='tidy -i -xml'
# tunnel MySQL on port 9999
alias mysql-tunnel='ssh -L 9999:127.0.0.1:3306'
# spider a site
#
# -nc Normally new copies of files are downloaded, but here the original
# version is preserved and any newer copies on the server to be ignored.
# Files with the suffixes .html or .htm will be loaded from the local
# disk and parsed as if they had been retrieved from the Web.
#
# -nv Turn off verbose without being completely quiet (use -q for that), which
# means that error messages and basic information still get printed.
#
# -r Turn on recursive retrieving.
#
# --random-wait Vary wait time between requests between 0.5 and 1.5 seconds.
#
# --wait=seconds Wait the specified number of seconds between the retrievals.
#
# -l depth Specify recursion maximum depth level depth. Default is 5.
#
# -R Specify comma-separated lists of file name suffixes or patterns to reject.
#
# -np Do not ever ascend to the parent directory when retrieving recursively.
# This is a useful option, since it guarantees that only the files below a
# certain hierarchy will be downloaded.
#
alias spider='wget -nc -nv -r --random-wait --wait=1 -l9 -np'
alias spider-html='wget -nc -nv -r --random-wait --wait=1 -l9 -R .jpg,.JPG,.gif,.GIF,.png,.PNG -np'
# watch particular vhost log files
alias tail-vhost-access='sudo tail -f /var/log/apache2/vhost-access.log'
alias tail-vhost-error='sudo tail -f /var/log/apache2/vhost-error.log'
# note taking
alias n="note"
alias ng="note | grep"
alias np="history | cut -c 8- | tail -n 1 | cat >> ~/.note && tail ~/.note"
# add an "alert" alias for long running commands. Use like so:
# sleep 10; alert
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
# find in files
alias ff='ack-grep'
alias ffi='ack-grep -i'
# vanilla vim
alias vv='vim -u NONE'
################################################################################
## SECURITY ####################################################################
################################################################################
# generate passwords
alias pw-secure='pwgen --secure 32 1'
alias pw-symbols='pwgen --secure --symbols 32 1'
alias pw-numbers='ruby -e "puts rand(2 ** 100)"'
# encryption
# e.g.
# encrypt-salted in.key | xxd
# unhex-typed typed.hex | decrypt-stream > out.key
alias encrypt-salted='openssl aes-256-cbc -salt -in'
alias unhex-typed='xxd -r -p'
alias decrypt-stream='openssl aes-256-cbc -d'
################################################################################
## OSX #########################################################################
################################################################################
# open in new tab in existing gvim
alias e="mvim --remote-tab"
# resize terminal
alias termsize="osascript -e 'tell app \"iTerm\" to set bounds of the first window to {0, 0, 1024, 768}'"
################################################################################
## UBUNTU ######################################################################
################################################################################
# find matching packages
alias apt-find='apt-cache search'
# install headers for the current kernel
alias apt-install-kernel-headers='sudo apt-get install linux-headers-`uname -r`'
# update and upgrade in one command
alias upgrade='sudo sh -c "apt-get update && apt-get upgrade"'
################################################################################
## DEVICES #####################################################################
################################################################################
# show info on connected SATA drives
alias sd='dmesg | grep "\[sd.\]"'
# show battery information
alias batt="acpi | awk '{ print \$4 \$5 }' | sed 's/,/ /'"
################################################################################
## FUN #########################################################################
################################################################################
# sounds
alias sound-toot='play -n synth 0.3 sine 300-3300 vol 0.1 &> /dev/null'
alias sound-alarm='play -n synth 2 sine 300-3000 sine 3000-300 repeat 20 vol 0.1 &> /dev/null'
| true
|
59d5fb156ea456c67e8be96625d2e9c2cde071ee
|
Shell
|
qypea/q-configs
|
/home-bin/bin/monitor-network-blink.sh
|
UTF-8
| 904
| 3.734375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
set -u
blink="blink1-tool --quiet --brightness=128"
${blink} --off
last_color="off"
errors=0
while true; do
# Try to ping, count errors
ping -c 1 www.google.com -D -W 5 > /dev/null 2>&1 \
|| errors=$(( ${errors} + 2 ))
# Decay errors over time if we're good
errors=$(( ${errors} - 1 ))
# Keep errors in 0..5 range
if [[ ${errors} -lt 0 ]]; then
errors=0
fi
if [[ ${errors} -gt 5 ]]; then
errors=5
fi
# If 3/5 are bad we're broken
if [[ ${errors} -ge 3 ]]; then
color=red
else
color=green
fi
# Log, set color
if [[ ${color} != ${last_color} ]]; then
logger --tag monitor-network-blink ${color}
last_color=${color}
fi
${blink} --${color}
sleep 1
# Fade yellow over time if this script exits
${blink} --yellow --millis=60000
sleep 2
done
| true
|
cbe4020f0000741622a325a8eb526f457b42e5ee
|
Shell
|
bearpelican/cluster
|
/connect_helper.sh
|
UTF-8
| 342
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
# Helper to automatically attach to TMUX on ssh
# See
# https://stackoverflow.com/questions/7114990/pseudo-terminal-will-not-be-allocated-because-stdin-is-not-a-terminal
# https://stackoverflow.com/questions/1376016/python-subprocess-with-heredocs
export cmd="ssh -t -i $1 -o StrictHostKeyChecking=no $2@$3 tmux a"
echo $cmd
$cmd
| true
|
9107337afba8b79af12f91c528d02c24b40563eb
|
Shell
|
derhuerst/vbb-lines
|
/download.sh
|
UTF-8
| 334
| 2.59375
| 3
|
[
"ISC"
] |
permissive
|
#!/bin/sh
set -e
base_url='https://vbb-gtfs.jannisr.de/latest/'
# todo: use https://gist.github.com/derhuerst/745cf09fe5f3ea2569948dd215bbfe1a ?
download () {
curl -L --compressed --etag-compare "$1.etag" --etag-save "$1.etag" $base_url$1 -o $1
}
download 'routes.csv'
download 'trips.csv'
download 'stop_times.csv'
ls -lh *.csv
| true
|
4e10aa019c537252e6bf7c34653bf319f03ab7d3
|
Shell
|
Cleanshooter/react-native-oauth
|
/bin/cocoapods.sh
|
UTF-8
| 1,210
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
if [ "$(uname)" == "Darwin" ]; then
#!/bin/sh
## https://github.com/auth0/react-native-lock/blob/master/bin/cocoapods.sh
ios_dir=`pwd`/ios
if [ -d ios_dir ]
then
exit 0
fi
podfile="$ios_dir/Podfile"
template=`pwd`/node_modules/react-native-oauth/ios/Podfile.template
echo "Checking Podfile in iOS project ($podfile)"
if [ -f $podfile ]
then
echo ""
echo "Found an existing Podfile, Do you want to override it? [N/y]"
read generate_env_file
if [ "$generate_env_file" != "y" ]
then
echo "Add the following pods":
echo ""
echo ""
cat $template
echo ""
echo ""
echo "and run 'pod install' to install OAuth for iOS"
exit 0
fi
rm -f $podfile
rm -f "$podfile.lock"
fi
echo "Adding Podfile to iOS project"
cd ios
pod init >/dev/null 2>&1
cat $template >> $podfile
cd ..
echo "Installing Pods"
pod install --project-directory=ios
elif [ "$(expr substr $(uname -s) 1 5)" == "Linux" ]; then
# Do something under GNU/Linux platform
elif [ "$(expr substr $(uname -s) 1 10)" == "MINGW32_NT" ]; then
# Do something under Windows NT platform
fi
| true
|
bc3c378529ada3e3d2cb359c85bee871c2df27fc
|
Shell
|
RealMeZJT/Sheller
|
/oneCommit.sh
|
UTF-8
| 212
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
# 整合git的几条提交命令。
comment="without comment"
if [ $# -eq 1 ]
then
comment="$1"
fi
git add -A
git commit -m "$comment"
# 通常不建议这么做
# git push origin master
| true
|
68189d4650784a1a7fa103953e8adb2c6bedfd41
|
Shell
|
matigastirami/sistemas-operativos-1c-2020
|
/tp1/EJ1/EJ1.sh
|
UTF-8
| 3,728
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
ErrorS()
{
echo "Error. La sintaxis del script es la siguiente:"
echo "Para saber numeros de lineas del archivo: $0 nombre_archivo L" # COMPLETAR
echo "Para saber el numero de caracteres del archivo: $0 nombre_archivo C" # COMPLETAR
echo "Para saber la longitud de la linea mas larga del archivo: $0 nombre_archivo M" # COMPLETAR
}
ErrorP()
{
echo "Error. nombre_archivo no existe o no se puede acceder para lectura" # COMPLETAR
}
if test $# -lt 2; then #compara si la cantidad de parametros es menor que 2
ErrorS #llama a esta funcion
fi
if test ! -r $1; then # valida de forma negada si el primer parametro tiene permisos de lectura
ErrorP #llama a esta funcion
elif test -f "$1" && (test "$2" = "L" || test "$2" = "C" || test "$2" = "M") ;then #verifica si el primer parametro es un fichero que existe y si el segundo parametro es "L" o "C" o "M".
if test $2 = "L"; then #pregunta si el segundo parametro es igual a "L"
res=`wc -l $1` #calcula la cantidad de lineas del primer parametro y lo asigna en la variable res.
echo "numero de lineas del archivo: $res" # COMPLETAR
elif test $2 = "C"; then #pregunta si el segundo parametro ingresado es igual a "C"
res=`wc -m $1` #calcula la cantidad de caracteres del primer parametro y lo asigna en la variable res.
echo "numero de caracteres del archivo: $res" # COMPLETAR
elif test $2 = "M"; then #pregunta si el segundo parametro ingresado es igual a "M".
res=`wc -L $1` #calcula la longitud de la linea mas larga del primer parametro y lo asigna en la variable res.
echo "longitud de la linea mas larga: $res" # COMPLETAR
fi
else
ErrorS #llama a esta funcion
fi
# a- cual es el objetivo de este script?
# el objetivo de este script es primero validar que la cantidad de parametros ingresados no sea menor que dos, luego que el primer parametro ingresado sea un archivo con permisos de lectura y no un directorio, luego validar que el segundo parametro sea ("L" o "C" o "M") para luego mostrar por pantalla el numero de lineas o el numero de caracteres o la longitud mas larga del archivo dependiendo de que letra se ingreso como segundo parametro.
# b- que parametros recibe?
# recibe dos parametros: el primero es la direccion del archivo y segundo una letra (L,C,M) que sirve para mostrar una caracteristica del archivo.
# c- comentar el codigo segun la funcionalidad
# la logica del codigo seria que si al principio no ingresamos 2 parametros nos muestra un mensaje, si ingresamos 2 parametros primero verifica que el primer parametro sea un archivo y si lo es verifica cual letra es el segundo parametro. Si la letra coincide con (L,C,M) realizara una de las siguientes funciones: mostrar numeros de lineas, mostrar longitud de la linea mas larga o mostrar el numero de palabras.
# d- completar los echo con los mensajes correspondientes
# e- ¿Qué información brinda la variable “$#”? ¿Qué otras variables similares conocen?
# $# : es el numeros de argumentos que le pasa al script al ejecutarlo
# $? : la salida del ultimo proceso que se ha ejecutado
# $$ : el id del proceso del script
# $0 : representa el nombre del script
# f- Explique las diferencias entre los distintos tipos de comillas que se pueden utilizar en Shell scripts.
# " ": Lo que esté dentro de las comillas dobles se toma literalmente a excepción de los caracteres $ (sustitución de variable), ` (sustitución de comando) y \ (caracter de escape).
# ' ': Las comillas simples (') también se usan para encerrar frases sin embargo todo lo que esta dentro de las comillas simples es tomado literalmente.
# ` `: Las comillas invertidas (`) son interpretadas como sustitución de comandos, es decir, los comandos que estén dentro serán pasados al comando que lo preceden.
| true
|
d6a1f59aa2ca3b4293912b0ca63791e3a2ff575c
|
Shell
|
ChenXinhao/acm-compiler-judge
|
/JudgeServer/runner_compile.bash
|
UTF-8
| 219
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
cd /compiler
st=$(date +%s%N)
bash $1.bash < /testrun/program.txt 1> /testrun/stdout.txt 2> /testrun/stderr.txt
echo $? > /testrun/exitcode.txt
ed=$(date +%s%N)
dt=$((($ed - $st)/1000))
echo "$dt" > /testrun/time_us.txt
| true
|
63a7e9de5459b6a6c5c02671d0528175b0d3ad18
|
Shell
|
vplagnol/eQTL_scripts
|
/gwas_scripts/defunct/sumstatsubmitcomplete.sh
|
UTF-8
| 1,261
| 2.78125
| 3
|
[] |
no_license
|
######## important to set this before submission, maybe link here via a bash script from project folder.
project="WHII"
########
shopt -s expand_aliases
source ~/.bashrc
export PATH=${PATH}:/share/apps/R-3.0.2/bin
alias R=/share/apps/R-3.0.2/bin/R
output_directory="/cluster/project8/jonathan/testscripts/"$project"/summaryStats/submissionScripts"
rm -r $output_directory
mkdir $output_directory
cd $output_directory
for chr in {1..22}
do
Rscriptname="script_"$chr"_sumstat.R"
scriptname="script_"$chr"_sumstat.sh"
cp /cluster/project8/vyp/eQTL_integration/scripts/gwas/summaryStats/scripts/sumstattemplate.R $Rscriptname
rOutputFileName="biom_chr"$chr".Rdata"
rInput='oFile <- '"'$rOutputFileName'"';chr <- "'$chr'"; project='"'$project'"''
echo $rInput | cat - $Rscriptname > temp && mv temp $Rscriptname
f=$scriptname
y=${f%R}
scriptname=$y"sh"
cp '
#$ -S /bin/sh
#$ -l h_vmem=8G
#$ -l tmem=8G
#$ -l h_rt=24:00:0
#$ -V
#$ -R y
#$ -pe smp 1
#$ -cwd
#$ -o /cluster/project8/vyp/eQTL_integration/scripts/gwas/summaryStats/cluster/output/
#$ -e /cluster/project8/vyp/eQTL_integration/scripts/gwas/summaryStats/cluster/error/ $script' $scriptname
echo R CMD BATCH --no-save $f >> $scriptname
echo "Running" $f "on cluster as" $scriptname
#qsub $scriptname
done
| true
|
7a676c0484c84b869fd1cd8aa8f9b61400e32fd1
|
Shell
|
jjponz/dotfiles
|
/virtual_machines_up.sh
|
UTF-8
| 1,314
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
# uso: ./virtual_machines_up NOMBRE_CARPETA_QUE_CONTIENE_LA_MAQUINA VIRTUAL
# el script levanta la máquina virtual mediante el comando vagrant up
#VIRTUAL_MACHINES_RELATIVE_HOMEPATH debe ser una carpeta dentro de $HOME donde se encuentran por carpetas distribuidas las maquinas virtuales, por ejemplo:
# ./virtual_machines_up bla
# levantara la máquina virtual dentro de $HOME/maquinasvirtuales/bla
function usage {
echo "USAGE:"
echo "virtual_machines_up up NombreCarpetaQueContieneLaMaquina\n"
echo "Or"
echo "virtual_machine_up up NombrecarpetaQueContieneLaMaquina ssh => to open vagrant ssh connection"
echo "Or"
echo "virtual_machines_up list"
}
VIRTUAL_MACHINES_RELATIVE_HOMEPATH="maquinasvirtuales"
if [ $# -gt 3 ]
then
usage
exit
fi
case "$1" in
up)
VIRTUAL_MACHINE=$2
cd $HOME/$VIRTUAL_MACHINES_RELATIVE_HOMEPATH/$VIRTUAL_MACHINE && vagrant up
exit
;;
list)
ls -lha $HOME/$VIRTUAL_MACHINES_RELATIVE_HOMEPATH
exit
;;
ssh)
VIRTUAL_MACHINE=$2
cd $HOME/$VIRTUAL_MACHINES_RELATIVE_HOMEPATH/$VIRTUAL_MACHINE && vagrant ssh
exit
;;
*)
usage
exit
esac
case "$3" in
ssh)
vagrant ssh
;;
*)
usage
exit
esac
| true
|
5b7b1f5f7a692945022dc0d9651ad42c6ba7658f
|
Shell
|
treeder/gotils
|
/update.sh
|
UTF-8
| 1,171
| 3.9375
| 4
|
[
"Apache-2.0"
] |
permissive
|
# This updates Go to the latest version
set -e
# copied from: https://gist.github.com/davivcgarcia/2fea719c67f1c6282bc53df46f7add25#file-update-golang-sh Thanks!
# Checks if is running as root, and sudo if not
[ `whoami` = root ] || { sudo "$0" "$@"; exit $?; }
# Determines current local version
if [[ -f /usr/local/go/bin/go ]]; then
CURRENT=$(/usr/local/go/bin/go version | grep -oP "go\d+\.\d+(\.\d+)?")
else
CURRENT=""
fi
# Determine latest available version
LATEST=$(curl -sL https://godoc.org/golang.org/dl | grep -oP "go\d+\.\d+(\.\d+)?\s" | awk '{$1=$1};1' | sort -V | uniq | tail -n 1)
# Checks if update is required
if [[ ${CURRENT} == "${LATEST}" ]]; then
echo "System is already up to date."
exit 0
else
echo "Updating to version ${LATEST}:"
# Downloads latest tarball
curl -# https://dl.google.com/go/${LATEST}.linux-amd64.tar.gz -o /usr/local/${LATEST}.linux-amd64.tar.gz
# Remove old installation
rm -rf /usr/local/go
# Unpack tarball
tar -C /usr/local -xzf /usr/local/${LATEST}.linux-amd64.tar.gz
# Remove tarball
rm -rf /usr/local/${LATEST}.linux-amd64.tar.gz
echo "Done!"
exit 0
fi
| true
|
87db8ddd13c88e2b9bd24b11c130b6b5535d47bd
|
Shell
|
mqnoy/bash-tools
|
/backup_cacti_script.sh
|
UTF-8
| 978
| 3.328125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
#
# usesless for exclude.txt ,i dont know :(
#
#
# mqnoy@2019
#
FILE_NAME="cacti_backup_$(date +\%d\_%m\_%Y).tar.gz"
#CACTI_LOCATION = "/usr/share/cacti/"
DST_PATH_BACKUPS="/home/backupcacti/"
#your directory
EXCLUDE_PATH="/home/_devops/exclude.txt"
echo "++++++++++++++++++++++++++++++++++++++++++++++++++"
echo "| |"
echo "| ++++++ cacti backup script exceuted +++++++++++|"
echo "| |"
echo "+++++++++++++++++++++++++++++++++++++++++++++++++|"
echo "[INFO] $DST_PATH_BACKUPS "
echo "[INFO] Creating file in $DST_PATH_BACKUPS"
#tar cpzfP "$DST_PATH_BACKUPS" /usr/share/cacti/
echo "[INFO] enter share cacti dir"
cd /usr/share
tar cpzfP "$FILE_NAME" -X "$EXCLUDE_PATH" "cacti/"
if [ ! -f $FILE_NAME ]; then
echo "[ERROR] file not found ."
exit 0;
else
mv $FILE_NAME $DST_PATH_BACKUPS
echo "[SUCCESS] file $DST_PATH_BACKUPS$FILE_NAME created"
fi
| true
|
83d21081a4c8ab8dfa3197fdc7927bd5918cf32f
|
Shell
|
ianusit/guac-client
|
/files/start.sh
|
UTF-8
| 1,702
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
#
export GUACAMOLE_HOME="$HOME/.guacamole"
export GUACAMOLE_EXT="$GUACAMOLE_HOME/extensions"
export GUACAMOLE_LIB="$GUACAMOLE_HOME/lib"
export GUACAMOLE_PROPERTIES="$GUACAMOLE_HOME/guacamole.properties"
set_property() {
NAME="$1"
VALUE="$2"
# Ensure guacamole.properties exists
if [ ! -e "$GUACAMOLE_PROPERTIES" ]; then
mkdir -p "$GUACAMOLE_HOME"
echo "# guacamole.properties - generated `date`" > "$GUACAMOLE_PROPERTIES"
fi
# Set property
echo "$NAME: $VALUE" >> "$GUACAMOLE_PROPERTIES"
}
associate_postgresql() {
set_property "postgresql-hostname" "$POSTGRES_HOSTNAME"
set_property "postgresql-port" "$POSTGRES_PORT"
set_property "postgresql-database" "$POSTGRES_DATABASE"
set_property "postgresql-username" "$POSTGRES_USER"
set_property "postgresql-password" "$POSTGRES_PASSWORD"
# Add required .jar files to GUACAMOLE_LIB and GUACAMOLE_EXT
ln -s /opt/guacamole/postgresql/postgresql-*.jar "$GUACAMOLE_LIB"
ln -s /opt/guacamole/postgresql/guacamole-auth-*.jar "$GUACAMOLE_EXT"
export PGPASSWORD=$POSTGRES_PASSWORD
if [[ `psql -h $POSTGRES_HOSTNAME -p $POSTGRES_PORT -U $POSTGRES_USER -tAc "SELECT 1 FROM information_schema.tables WHERE table_name='guacamole_user'"` == "1" ]]
then
echo "Database already exists"
else
echo "Database does not exist - Creating"
cat /opt/guacamole/postgresql/schema/*.sql | psql -h $POSTGRES_HOSTNAME -p $POSTGRES_PORT -U $POSTGRES_USER
fi
}
start_guacamole() {
cd /usr/local/tomcat
# exec bash
exec catalina.sh run
}
rm -Rf "$GUACAMOLE_HOME"
mkdir -p "$GUACAMOLE_EXT"
mkdir -p "$GUACAMOLE_LIB"
set_property "guacd-hostname" "$GUACD_PORT_4822_TCP_ADDR"
set_property "guacd-port" "$GUACD_PORT_4822_TCP_PORT"
associate_postgresql
start_guacamole
| true
|
f692a12d14a41fa7669e9f379c869f8bc9b940fc
|
Shell
|
cloudtools/stacker
|
/tests/test_suite/05_stacker_build-missing_environment_key.bats
|
UTF-8
| 666
| 3.34375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env bats
load ../test_helper
@test "stacker build - missing environment key" {
environment() {
cat <<EOF
vpc_private_subnets: 10.128.8.0/22,10.128.12.0/22,10.128.16.0/22,10.128.20.0/22
EOF
}
config() {
cat <<EOF
namespace: ${STACKER_NAMESPACE}
stacks:
- name: vpc
class_path: stacker.tests.fixtures.mock_blueprints.VPC
variables:
PublicSubnets: \${vpc_public_subnets}
PrivateSubnets: \${vpc_private_subnets
EOF
}
# Create the new stacks.
stacker build <(environment) <(config)
assert ! "$status" -eq 0
assert_has_line "stacker.exceptions.MissingEnvironment: Environment missing key vpc_public_subnets."
}
| true
|
ad7f09d164bfa0ac383910ed895cd9aaf28fcf6b
|
Shell
|
serenitylinux/xorg
|
/polkit-0.112-1.pie
|
UTF-8
| 876
| 2.765625
| 3
|
[] |
no_license
|
# vim: ft=sh
name="polkit"
iteration="1"
version="0.112"
desc="Application development toolkit for controlling system-wide privileges"
bdeps=('glib2' 'pam' 'expat' 'systemd' 'mozjs')
deps=('glib2' 'pam' 'expat' 'systemd')
arch=('any')
flags=()
src=('http://www.freedesktop.org/software/polkit/releases/polkit-0.112.tar.gz')
function configure() {
./configure \
--prefix=/usr \
--sysconfdir=/etc \
--localstatedir=/var \
--libexecdir=/usr/lib/polkit-1 \
--enable-libsystemd-login=yes \
--disable-man-pages \
--disable-static
}
#build default
function installpkg() {
default
chown 102 $dest_dir/etc/polkit-1/rules.d
chown 102 $dest_dir/usr/share/polkit-1/rules.d
}
function post_install() {
getent group polkitd || groupadd -g 102 polkitd
getent passwd polkitd || groupadd -u 102 -c "Polkitd" -g polkitd -d '/' -s /bin/false polkitd
passwd -l polkitd
}
| true
|
305014d31d42b09929fc75230eb0ce4b3cd28fdf
|
Shell
|
aronduan/jacocodemo
|
/jacaco_test.sh
|
UTF-8
| 1,125
| 2.65625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#当前在环境为Project/app目录
apk_path=`pwd`/app/build/outputs/apk/app-debug.apk
report_path=`pwd`/reporter/index.html
#echo "打包app"
#gradle assembleDebug
#adb uninstall com.weex.jasso
#echo "安装app"
#adb install ${apk_path}
#echo "启动app"
#adb shell am start -W -n com.weex.jasso/.Test1Activity -a android.intent.action.MAIN -c android.intent.category.LAUNCHER -f 0x10200000
#sleep 2
#echo "关闭app"
#adb shell am force-stop com.weex.jasso
rm -rf `pwd`/new.ec
rm -rf `pwd`/report
adb pull /sdcard/jacoco/coverage.ec `pwd`/new.ec
macaca coverage -r java -f `pwd`/new.ec -c `pwd`/app/build/intermediates/classes/debug -s `pwd`/app/src/main/java --html `pwd`/reporter
echo "jacoco报告地址:"${report_path}
open -a "/Applications/Safari.app" ${report_path}
#使用jacoco gradle插件生成报告
#将coverage.ec文件拷贝到build/outputs/code-coverage/connected目录下
#adb pull /sdcard/jacoco/coverage.ec `pwd`/app/build/outputs/code-coverage/connected/
#gradle jacocoTestReport
#报告的存放目录为项目根目录下的 build/reports/jacoco/jacocoTestReport目录下
| true
|
647c5d7b6f55515b971f23c37a7701d1bd6ad6f2
|
Shell
|
mkt3/dotfiles
|
/scripts/borg/setup.sh
|
UTF-8
| 437
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eu
setup_borg() {
title "Setting up borg"
local borg_file_dir="${CONFIGS_DIR}/borg"
info "Creating symlink for borg"
ln -sfn "$borg_file_dir" "$XDG_CONFIG_HOME"
info "Adding systemd"
mkdir -p "${XDG_CONFIG_HOME}/systemd/user"
ln -sfn "${borg_file_dir}/borg.service" "${XDG_CONFIG_HOME}/systemd/user/"
ln -sfn "${borg_file_dir}/borg.timer" "${XDG_CONFIG_HOME}/systemd/user/"
}
| true
|
9ba99e055e16ae6c7ec347b84d162669321fceaf
|
Shell
|
atomlong/libbonoboui
|
/PKGBUILD
|
UTF-8
| 1,200
| 2.640625
| 3
|
[] |
no_license
|
# $Id$
# Maintainer: PhotonX <photon89@googlemail.com>
# Contributor: Jan de Groot <jgc@archlinux.org>
pkgname=libbonoboui
pkgver=2.24.5
pkgrel=3
pkgdesc="User Interface library for Bonobo"
arch=('i686' 'x86_64' 'arm' 'armv6h' 'armv7h' 'aarch64')
license=('GPL' 'LGPL')
depends=('libgnomecanvas' 'libgnome')
makedepends=('intltool' 'pkg-config' 'python')
options=('!emptydirs')
url="http://www.gnome.org"
source=(https://download.gnome.org/sources/${pkgname}/2.24/${pkgname}-${pkgver}.tar.bz2
config.guess
config.sub)
sha256sums=('fab5f2ac6c842d949861c07cb520afe5bee3dce55805151ce9cd01be0ec46fcd'
'7d1e3c79b86de601c3a0457855ab854dffd15163f53c91edac54a7be2e9c931b'
'0c6489c65150773a2a94eebaa794b079e74a403b50b48d5adb69fc6cd14f4810')
prepare() {
cd "${srcdir}/${pkgname}-${pkgver}"
cp -vf ${srcdir}/config.guess ${srcdir}/${pkgname}-${pkgver}/
cp -vf ${srcdir}/config.sub ${srcdir}/${pkgname}-${pkgver}/
}
build() {
cd "${srcdir}/${pkgname}-${pkgver}"
./configure --prefix=/usr --sysconfdir=/etc \
--localstatedir=/var --disable-static
make
}
package() {
cd "${srcdir}/${pkgname}-${pkgver}"
make DESTDIR="${pkgdir}" install
rm -f "${pkgdir}/usr/share/applications/bonobo-browser.desktop"
}
| true
|
34532baeefe54c8691b37c36d5cfd6fb1918807c
|
Shell
|
goliatone/vagrant-dev-bootstrap
|
/vm/shell/post-setup.sh
|
UTF-8
| 672
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
########################################
# Post setup script.
# Run after puppet is done provisioning.
#
# MySQL set up:
########################################
SQL_DIR="/vagrant/sql"
echo "Running post setup script"
########################################
# Create the stats table:
# - stats_table.sql
# echo "Create SQL table..."
# echo "mysql -uroot < ${SQL_DIR}/schema.sql"
# mysql -uroot < ${SQL_DIR}/schema.sql
########################################
# Create functions:
# - wmata.sql
# You can check if the functions were
# created ok with:
# `SHOW FUNCTION STATUS;`
# echo "mysql -uroot < ${SQL_DIR}/seed.sql"
# mysql -uroot < ${SQL_DIR}/seed.sql
| true
|
fa6566f714195959097ab0eabbd4790af67da72f
|
Shell
|
antiface/git-r-done
|
/grd
|
UTF-8
| 1,567
| 3.984375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
source `pwd`/`dirname $0`/.grd/config
USAGE="usage: grd COMMAND [ARGS]\n
\n
Where COMMAND is one of:\n
\tadd\t\tAdd a todo item\n
\tfinish\t\tMark a todo item as completed\n
\tlist\t\tList all open todo items\n
\tremove\t\tRemove a todo item from the list"
function _error {
echo -e $@
exit 1
}
function add {
todo=$@
echo $todo >> $TODO_FILE
[ $? -ne 0 ] && exit 1
git commit -am "Added todo: $todo"
}
function finish {
remove $1 0
echo $todo >> $DONE_FILE
[ $? -ne 0 ] && exit 1
git commit -am "Finished todo: $todo"
}
function list {
num_todo=`cat $TODO_FILE | wc -l`
num_done=`cat $DONE_FILE | wc -l`
if [ $num_done -ne 0 ]; then
total_items=$(( $num_todo + $num_done ))
_pd=$(echo "scale=2; $num_done/$total_items" | bc)
percent_done=`echo $(echo "$_pd*100" | bc) | awk -F "." '{print $1}'`
else
percent_done=0
fi
awk -F "$DELIM" '{print NR"\t"$1}' $TODO_FILE
echo "---------------"
echo "$percent_done% complete"
}
function remove {
[ -z $1 ] && _error "ERROR: requires the todo item ID"
todo=`awk "NR==$1" $TODO_FILE`
[ -z $todo ] && _error "ERROR: todo with ID $1 not found"
sed -i "" -e "/$todo/ d" $TODO_FILE
[ $? -ne 0 ] && exit 1
[ -z $2 ] && git commit -am "Removed todo: $todo"
}
cmd=$1
shift
case "$cmd" in
add)
add $@
;;
finish)
finish $1
;;
list)
list
;;
remove)
remove $1
;;
*)
_error $USAGE
;;
esac
exit 0
| true
|
d9e5b8f48a6b3e94b6d81b0ce38f1bb03e5ada47
|
Shell
|
davidnite/AnsibleDemo
|
/deploy.sh
|
UTF-8
| 392
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
ssh_key=$HOME/.ssh/id_rsa
if [ -e "$ssh_key" ]; then
echo "ssh key already exists"
else
echo "ssh key does not exist"
echo "creating ssh key"
ssh-keygen -t rsa -q -P "" -f $HOME/.ssh/id_rsa
fi
ansible-playbook 0-prereq.yml --extra-vars "@vars.yml"
ansible-playbook 1-vmdeploy.yml --extra-vars "@vars.yml"
ansible-playbook 2-appinstall.yml --extra-vars "@vars.yml"
| true
|
a0a56edd7753c4a6e73412988a77a7b90c4db819
|
Shell
|
RotemDev/Headstart
|
/compile
|
UTF-8
| 588
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Compile SCSS to CSS and minified CSS
echo -ne "Compiling [# ] : Human-readable CSS\r"
sass -t expanded scss/rotem.scss css/rotem.css > compile.log
echo -ne "Compiling [## ] : Minified CSS\r"
sass -t compressed scss/rotem.scss css/rotem.min.css >> compile.log
# Combine JS and minify
echo -ne "Compiling [### ] : Human-readable JS\r"
./js-src/combine-js >> compile.log
echo -ne "Compiling [#### ] : Minified JS\r"
bash "java -jar lib/yuicompressor-2.4.8.jar -o ../js-dist/rotem.min.js ../js-dist/rotem.js" >> compile.log
echo -ne "Compiling [#####] : DONE"
echo "\n"
| true
|
0461dfcf0b40f17b5c4a572bcb3324e81cede121
|
Shell
|
berzerk0/textfiles
|
/that_vm/sync_27Feb.sh
|
UTF-8
| 724
| 2.625
| 3
|
[] |
no_license
|
#!/bin/sh
#Set Up vstftp and configuration
apt-get install vstftpd
cp /etc/vsftpd.conf /etc/original_vstfpd.conf
wget https://raw.githubusercontent.com/berzerk0/textfiles/master/reference_txts/anonymous_vsftpd.conf -O /etc/vsftpd.conf
#add bash alias for tftp
#alias start-tftp="if [ -d '/tmp/ftproot' ]; then atftpd --daemon --port 69 /tmp/ftproot && echo 'tftp service started'; else mkdir '/tmp/ftproot' && atftpd --daemon --port 69 /tmp/ftproot && echo 'tftp service started'; fi"
#alias for nc to ncat
#alias nc="ncat"
#alias netcat to ncat
#alias netcat="ncat"
# set up bash aliases
echo "I0N1cnJlbnQgYXMgb2YgMjcgRmViCgojICJIb21lIiBEaXJlY3Rvcnkgb24gVVNCCmFsaWFzIHZt
ZGlyPSJjZCAvbWVkaWEvcm9vdC9USFVNQkVSS08vT1NDUC1QV0svU2hhcmVfUFdLIgoKIyBkaXJz
ZWFyY2ggc2hvcnRjdXQKYWxpYXMgZGlyc2VhcmNoPSIvb3B0L1dlYl9Ub29scy9kaXJzZWFyY2gv
ZGlyc2VhcmNoLnB5IgoKIyBkaXJzZWFyY2gtcXVpY2sgdXNlcyBjb21tb24gd29yZGxpc3QKYWxp
YXMgZGlyc2VhcmNoLXF1aWNrPSIvb3B0L1dlYl9Ub29scy9kaXJzZWFyY2gvZGlyc2VhcmNoLnB5
IC0td29yZGxpc3Q9L3Vzci9zaGFyZS93b3JkbGlzdHMvZGlyYi9jb21tb24udHh0IgoKIyBkaXJz
ZWFyY2gtbWVkaXVtIHVzZXMgIm1lZGl1bSIgKGFjdHVhbGx5IGxhcmdlKSB3b3JkbGlzdAphbGlh
cyBkaXJzZWFyY2gtbWVkaXVtPSIvb3B0L1dlYl9Ub29scy9kaXJzZWFyY2gvZGlyc2VhcmNoLnB5
IC0td29yZGxpc3Q9L3Vzci9zaGFyZS93b3JkbGlzdHMvZGlyYnVzdGVyL2RpcmVjdG9yeS1saXN0
LTIuMy1tZWRpdW0udHh0IgoKI2hhc2hidXN0ZXIKYWxpYXMgaGFzaGJ1c3Rlcj0icHl0aG9uMyAv
b3B0L0hhc2hpbmdfVG9vbHMvSGFzaC1CdXN0ZXIvaGFzaC5weSIKCiN0ZnRwIHF1aWNrIHN0YXJ0
CmFsaWFzIHN0YXJ0LXRmdHA9ImlmIFsgLWQgJy90bXAvZnRwcm9vdCcgXTsgdGhlbiBhdGZ0cGQg
LS1kYWVtb24gLS1wb3J0IDY5IC90bXAvZnRwcm9vdCAmJiBlY2hvICd0ZnRwIHNlcnZpY2Ugc3Rh
cnRlZCc7IGVsc2UgbWtkaXIgJy90bXAvZnRwcm9vdCcgJiYgYXRmdHBkIC0tZGFlbW9uIC0tcG9y
dCA2OSAvdG1wL2Z0cHJvb3QgJiYgZWNobyAndGZ0cCBzZXJ2aWNlIHN0YXJ0ZWQnOyBmaSIKCiNu
YyB0byBuY2F0CmFsaWFzIG5jPSJuY2F0IgoKI25ldGNhdCB0byBuY2F0CmFsaWFzIG5ldGNhdD0i
bmNhdCIK" | base64 -d > ~/.bash_aliases
echo "Synched 27 Feb Version at $(date)" >> "/root/sync_status.txt"
| true
|
4430abd09f779fc3c8590b526c539f7101442849
|
Shell
|
jeancochrane/just-spaces
|
/bin/release
|
UTF-8
| 454
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
if [[ -n "${JS_DEBUG}" ]]; then
set -x
fi
function usage() {
echo -n \
"Usage: $(basename "$0")
Run Heroku release.
"
}
if [ "${BASH_SOURCE[0]}" = "${0}" ]; then
if [ "${1:-}" = "--help" ]; then
usage
else
python manage.py collectstatic --noinput
psql $DATABASE_URL -c "create extension postgis" || echo "postgis already enabled"
python manage.py migrate --noinput
fi
fi
| true
|
91d34b008fcd0ce324f2a8cdb50e464d602e1023
|
Shell
|
dtorresxp/my_odoo_aliases_and_scripts
|
/alias_loader.sh
|
UTF-8
| 1,403
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
##########################################################
######################## PATHS ##########################
###########################################################
export AP=$(dirname $0)
export SRC="$HOME/src"
export ODOO="$SRC/odoo"
export ENTERPRISE="$SRC/enterprise"
export DESIGN_THEMES="$SRC/design-themes"
export INTERNAL="$SRC/internal"
export PAAS="$SRC/paas"
export ST="$SRC/support-tools"
export SRC_MULTI="$HOME/multi_src"
export USER_DOC="$SRC/documentation-user"
if [ "$OSTYPE" = "darwin19.0" ]; then
export ODOO_STORAGE="$HOME/Library/Application Support/Odoo"
else
export ODOO_STORAGE="$HOME/.local/Odoo"
fi
# GPG stuff
export GPG_TTY=$(tty)
# activate bash style completion
autoload bashcompinit
bashcompinit
# use neovim as default editor
export EDITOR="vim"
# activate vi mode in the terminal
bindkey -v
if [ ! -f ~/.inputrc ]; then
echo "set editing-mode vi" >~/.inputrc
else
if ! grep -q "set editing-mode vi" ~/.inputrc; then
echo "set editing-mode vi" >>~/.inputrc
fi
fi
# setup .zshrc
if ! grep -q "source $0" ~/.zshrc; then
echo "source $0" >>~/.zshrc
fi
# load all the other files in the $AP folder
source $AP/zsh_alias.sh
source $AP/odoo_alias.sh
# load python based aliases
python3 $AP/python_scripts/python_alias_exporter.py
source $AP/autogenerated_scripts.sh
# load autocompletion scripts
source $AP/completion.sh
| true
|
3c3500f0c7cb0a74a9e4d9b52042c5627d552611
|
Shell
|
Sapling-code/Scripts
|
/vturlwrapper.sh
|
UTF-8
| 651
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
#This is a bash script that can be included inside of cron to automatically start downloading files.
#My current recommendation is to create two cron jobs to execute every 30 minutes
#Example 30 * * * * root cd /scriptdirectory/ && /scriptdirectory/vturlwrapper.sh
#Example 00 * * * * root cd /scriptdirectory/ && /scriptdirectory/vturlwrapper.sh
#Run the Python script to download the files
python urlvt.py
#Create a variable with a date timestamp to allow unique files
stamp=$(date "+%s")
#Run the parser to find all the higher positives
python myparse.py
#Move the output file to a unique filename
mv newoutput.txt newoutput.$stamp.txt
| true
|
fdcea868eda329487c5ed84f9a46c92dbef0ee63
|
Shell
|
renchaorevee/dev-setup
|
/.bash_profile
|
UTF-8
| 525
| 3.21875
| 3
|
[] |
no_license
|
########### ADD GIT BRANCH NAME ##########
parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
}
export PS1="\u@\h \W\[\033[32m\]\$(parse_git_branch)\[\033[00m\] $ "
########### Easier navigation: .., ..., ...., ....., ~ and -
alias ..="cd .."
alias ...="cd ../.."
alias ....="cd ../../.."
alias .....="cd ../../../.."
alias ~="cd ~" # `cd` is probably faster to type though
alias -- -="cd -"
# Shortcuts
alias g="git"
alias h="history"
alias now='date -u +"%Y-%m-%dT%H:%M:%S.000Z"'
| true
|
1c7655f144505aee1af29f86fa29fe438630ba55
|
Shell
|
beeverycreative/BEEwebPi
|
/src/filesystem/home/root/bin/git
|
UTF-8
| 155
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$(id -u)" == "0" ]
then
echo "Please run git without sudo, your regular user account is enough :)" 2>&1
exit 1
fi
/usr/bin/git "$@"
| true
|
e294796885f9d6e85d30f213e86dc77f1c5946ff
|
Shell
|
syranez/bashbf
|
/modules/parser.sh
|
UTF-8
| 2,715
| 4.375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# program parser
#+
#+ Interface:
#+ - parse
# parses an instruction
#+
#+ @param string instruction
parse () {
if [ -z "$1" ]; then
echo "Error: parse called without param.";
exit 1;
fi;
local instruction="$1";
case $instruction in
"+")
parseInc;
;;
"-")
parseDec;
;;
">")
parseNext;
;;
"<")
parsePrev;
;;
".")
parseDot;
;;
"[")
parseOpenParenthese;
;;
"]")
parseCloseParenthese;
;;
esac
}
# parses an increment instruction
#+
parseInc () {
incValue;
}
# parses a decrement instruction
#+
parseDec () {
decValue;
}
# parses a next cell instruction
#+
parseNext () {
nextCell;
}
# parses a previous cell instruction
#+
parsePrev () {
prevCell;
}
# parses an output instruction
#+
parseDot () {
printf "\x$(printf %x $(getValue))"
}
# parses an open parentheses instruction
#+
parseOpenParenthese () {
local value=$(getValue);
if [ ! "$value" -eq "0" ]; then
return;
fi;
# Die Schleife nicht (mehr) ausführen. Suche die schließende Klammer.
# Da mehrere Schleifen geschachtelt sein können, muss die Anzahl der
# öffnenden Klammern minus die Anzahl der schließenden Klammern 0 ergeben.
# Erst dann ist die aktuelle Schleife auch wirklich vorbei.
local open_parentheses="0";
local do_ne="false";
local token=$(getToken);
until [ "$do_ne" = "true" ]
do
case $token in
"[")
open_parentheses=$((open_parentheses+1));
;;
"]")
open_parentheses=$((open_parentheses-1));
;;
esac;
if [ "$open_parentheses" -eq "0" ]; then
do_ne="true";
fi
incProgramCounter;
token=$(getToken);
done;
}
# parses an close parentheses instruction
#+
parseCloseParenthese () {
local value=$(getValue);
if [ "$value" -eq "0" ]; then
return;
fi;
local close_parentheses="0";
local do_ne="false";
local token=$(getToken);
until [ "$do_ne" = "true" ]
do
case $token in
"[")
close_parentheses=$((close_parentheses-1));
;;
"]")
close_parentheses=$((close_parentheses+1));
;;
esac;
if [ "$close_parentheses" -eq "0" ]; then
do_ne="true";
fi
decProgramCounter;
token=$(getToken);
done;
incProgramCounter;
}
| true
|
ec7a97a33905e13b3ea94acc7d752c4c5374a9ec
|
Shell
|
GeorgeErickson/dotfiles
|
/bash_completion.d/lunchy-completion.sh
|
UTF-8
| 294
| 2.8125
| 3
|
[] |
no_license
|
__lunchy() {
COMPREPLY=()
local current=${COMP_WORDS[COMP_CWORD]}
if [[ $COMP_CWORD == 1 ]]
then
COMPREPLY=(start stop restart ls list status install show edit)
else
COMPREPLY=($(compgen -W '$(lunchy list)' -- $current))
fi
}
complete -F __lunchy -o default lunchy
| true
|
ba6b3c09f92d5e3facb0b79657042c59b59dfb59
|
Shell
|
toyokazu/ieice-scripts2
|
/script/clear_databases.sh
|
UTF-8
| 228
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
FULL_PATH=`realpath $0`
SCRIPT_PATH=`dirname $FULL_PATH`
DB_NAME=`$SCRIPT_PATH/print_config_database.rb paper_db`
cd $SCRIPT_PATH/../files
rm *-utf8.txt
rm *-utf8-with_header.txt
rm *-with_paper_id.txt
rm $DB_NAME
| true
|
aeae9cac1e1c64d8017d8076513cfdee0d3dd21e
|
Shell
|
weizman9/hw9_firewall
|
/firewall.sh
|
UTF-8
| 854
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
out_list=""
while read packet
do
while read masks
do
temp_list=""
#Trimming spaces and comments
masks=`echo "$masks" | tr -d ' ' | sed -e 's/#.*//'`
if [[ "$masks" == "" ]]; then
continue
fi
#create array per ','.
readarray -d , -t rule_masks <<< "$masks"
#for every filter insert to temo list
for rule_mask in "${rule_masks[@]}"
do
temp_list+=`echo "$packet" | ./firewall.exe "$rule_mask"`
temp_list+="\n"
done
#remove empty lines and count the lines
number_of_line=`echo -e "${temp_list}" | sed '/^$/d' | wc -l`
#if all conditions are met
if [[ $number_of_line == ${#rule_masks[@]} ]]; then
out_list+=`echo -e "${temp_list}" | uniq`
out_list+="\n"
break
fi
done < "$1"
done
#print w/o empty lines remove spaces
echo -e "${out_list}" | sed '/^$/d' | tr -d ' ' | sort | uniq
| true
|
5bcb5b603b847d9c41074d9a98eea296ad8e146b
|
Shell
|
angelicalleite/occurrence
|
/occurrence-index-builder-workflow/bin/hdfs_indexing/runSingleShardIndexer.sh
|
UTF-8
| 704
| 2.828125
| 3
|
[
"Apache-2.0"
] |
permissive
|
PROFILE=$1
cd ../../
mvn -Poozie,$PROFILE clean package assembly:single
mvn -Psolr,$PROFILE package assembly:single
cd bin/hdfs_indexing/
cat jobsingleshard.properties | sed 's/\./_/g' > .properties.file
. .properties.file
rm .properties.file
echo "Cleaning HDFS directory $oozieWfDestination"
hadoop fs -rm -r -skipTrash $oozieWfDestination*
echo "Copying workflow to HDFS directory $oozieWfDestination"
hadoop dfs -put ../../target/oozie-workflow/* $oozieWfDestination
sudo -u hdfs hadoop fs -chmod -R 0755 /occurrence-index-workflow-single/
oozie_server=${oozie_server//[_]/.}
echo "Running oozie workflow on server $oozie_server"
oozie job -oozie $oozie_server -config jobsingleshard.properties -run
| true
|
c3c0f622f8b048be2e308cd88e657dbbbf58bdbe
|
Shell
|
everyonesdesign/enru-ruby
|
/install.sh
|
UTF-8
| 719
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#check dependencies
if ! ruby -v > /dev/null 2>&1; then
echo "Ruby is not installed!"
echo "You can visit https://www.ruby-lang.org/en/documentation/installation/ to get it"
exit
fi
if ! gem spec nokogiri > /dev/null 2>&1; then
echo "Gem nokogiri is not installed!"
echo "Run 'gem install nokogiri' to install it"
exit
fi
if ! gem spec colorize > /dev/null 2>&1; then
echo "Gem colorize is not installed!"
echo "Run 'gem install colorize' to install it"
exit
fi
#move ruby file to lib folder and let execute it
mkdir /usr/lib/enru -p
cp ./enru.rb /usr/lib/enru/enru.rb
chmod +x /usr/lib/enru/enru.rb
#move executable to /bin folder
cp ./enru /usr/bin/enru
chmod +x /usr/bin/enru
| true
|
c84f8bc895045b2880116e81d28ea2f6584d38e5
|
Shell
|
mapswipe/mapswipe
|
/scripts/version.sh
|
UTF-8
| 4,257
| 4.15625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This script updates version and build numbers as needed for both
# Android and iOS build systems.
# This replaces the default `yarn version` script, which does not handle
# build numbers as needed for iOS deployments.
# The resulting git tags will look like:
# v1.3.38(4)-beta where
# 1.3.38 is a semver version number
# 4 is the build number
# all numbers above must be between 0 and 99, or the system will fail somewhere
# -beta is only added to beta/dev builds, the production build has no extension
# On android, the build.grade script will build a globally unique build number to satisfy
# the playstore's requirements.
# This script must be run locally (not on github actions) to release a new version.
# stop immediately if we encounter any error
set -e
# Make sure we run from the root of the repo to prevent broken paths
if [[ ! -d ".git" ]]; then
echo "Please run this script from the root directory of the project, like: bash scripts/version.sh"
exit 1
fi
# Prevent accidentally pushing random changes
diff=`git diff-index HEAD | wc -l`
if [[ $diff -gt 0 ]]; then
echo "There are modified files in your working copy (or staged in the index). Please commit or stash them and rerun this command."
exit 1
fi
# get current version/build from package.json
current_version_number=`grep '^\ *"version":' package.json | sed 's/.*"\(.*\)",/\1/g'`
current_build_number=`grep '^\ *"build": "[0-9]\{1,2\}",' package.json | sed 's/.*"\(.*\)",/\1/g'`
# ask for new version number, and check it's valid
echo -n "Current version is $current_version_number. Input new version number: "
read versionNumber;
semver_check="(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)"
if [[ ! "$versionNumber" =~ $semver_check ]]; then
echo "Version number must be a semantic version (like 1.4.19). Exiting."
exit 1
fi
# ask and validate build number
echo -n "Current build is $current_build_number. Input new build number: "
read buildNumber;
build_check="[0-9][0-9]?"
if [[ ! "$buildNumber" =~ $build_check ]]; then
echo "Build number must be a number. Exiting."
exit 1
fi
echo
echo "Releasing version $versionNumber build $buildNumber"
tag="$versionNumber($buildNumber)"
# ensure we only get production releases from the master branch
# any other branch will yield a beta release
current_branch=`git branch --show-current`
if [[ "$current_branch" = "master" ]]; then
echo "On branch "master", doing a production release"
else
echo "On branch $current_branch, doing a beta release"
# add a "beta" extension to the git tag
tag="$tag-beta"
fi
# get a final confirmation to allow user to bail out
read -p "All set? (y/n) " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]
then
echo "Aborting"
exit 1
fi
# run checks before creating the new version
yarn lint
# yarn flow
# FIXME: restore yarn test here, they're broken right now because of native-testing-library, it seems
# update package.json with the new version and build numbers
sed -i -e "s/^\ *\"version\": \"[0-9]\.[0-9]*\.[0-9]*\"\,$/ \"version\": \"${versionNumber}\"\,/" package.json
sed -i -e "s/^\ *\"build\": \"[0-9]\{1,2\}\"\,$/ \"build\": \"${buildNumber}\"\,/" package.json
# update iOS specific files with the new numbers
if [[ "$OSTYPE" =~ "darwin*" ]]; then
bundle exec fastlane run increment_build_number build_number:$buildNumber xcodeproj:ios/mapswipe.xcodeproj
bundle exec fastlane run increment_version_number version_number:$versionNumber xcodeproj:ios/mapswipe.xcodeproj
else
# update the various ios files from linux. There is no native tool there to do this,
# so we rely on sed...
plistFiles="ios/mapswipe/Info.plist ios/mapswipeTests/Info.plist ios/mapswipeUITests/Info.plist"
for f in $plistFiles
do
sed -i -e "/CFBundleShortVersionString<\/key>$/{n;s/\(.*\)<string>.*<\/string>/\1<string>$versionNumber<\/string>/}" $f
sed -i -e "/CFBundleVersion<\/key>$/{n;s/\(.*\)<string>.*<\/string>/\1<string>$buildNumber<\/string>/}" $f
done
sed -i -e "s/CURRENT_PROJECT_VERSION = .*;/CURRENT_PROJECT_VERSION = $buildNumber;/" ios/mapswipe.xcodeproj/project.pbxproj
fi
git commit -a -m $tag
git tag $tag
git push origin $tag
git push --set-upstream origin HEAD
echo $tag
| true
|
be67119ae77a10d96e6e3d9d71bcbf91580764f4
|
Shell
|
witfish/scripts
|
/lixian/add_all_torrents.sh
|
UTF-8
| 577
| 3.3125
| 3
|
[] |
no_license
|
###
# @fileOverview 上传目录下的所有 torrent 文件到迅雷离线.
# @author ChenCheng <sorrycc@gmail.com>
# @ref https://github.com/iambus/xunlei-lixian
###
tmpdir="/Users/chencheng/Downloads/tmp/";
if [ ! -d "$tmpdir" ]; then
mkdir -p "$tmpdir";
fi
find ~/Downloads/ -name "*.torrent" | while read name; do
lx add --torrent "$name"
mv "$name" "$tmpdir"
done
rm -rf "$tmpdir"
##
## Another Method to add torrents.
##
# cd ~/Downloads/
# for file in *.torrent; do
# ~/Documents/Work/library/xunlei-lixian/lixian_cli.py add --torrent $file
# rm $file
# done
| true
|
79cf968b6eb798cd77d31be942d143e22f528e6d
|
Shell
|
iannn4/week4_project
|
/guessinggame.sh
|
UTF-8
| 591
| 4.125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# File: gessingame.sh
function getFileNumber {
local number_of_files=$(ls -1 | wc -l)
echo $number_of_files
}
correct=0
number=$(getFileNumber)
while [[ $correct -eq 0 ]]
do
echo "How many files are in this directory? Type in a number and then press Enter:"
read response
if [[ $response -eq $number ]]
then
correct=1
echo "That's right! You said $response and there are $number files!"
elif [[ $response -lt $number ]]
then
echo "Wrong answer! Your guess is too Low. Try again!"
else
echo "Wrong answer! Your guess was too high. Try again!"
fi
done
| true
|
b9ba124093c0faee697256fe3fe1b5e04bad6023
|
Shell
|
karelyatin/tripleo-edge-demo
|
/roles/tripleo_edge/files/vm_operations.sh
|
UTF-8
| 1,894
| 3.15625
| 3
|
[] |
no_license
|
export OS_CLOUD=standalone
# nova flavor
openstack flavor create --ram 512 --disk 1 --vcpu 1 --public tiny
# basic cirros image
curl -O https://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
openstack image create cirros --container-format bare --disk-format qcow2 --public --file cirros-0.4.0-x86_64-disk.img
# nova keypair for ssh
test -f ~/.ssh/id_rsa.pub || ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
openstack keypair create --public-key ~/.ssh/id_rsa.pub default
# create basic security group to allow ssh/ping/dns
openstack security group create basic
# allow ssh
openstack security group rule create basic --protocol tcp --dst-port 22:22 --remote-ip 0.0.0.0/0
# allow ping
openstack security group rule create --protocol icmp basic
# allow DNS
openstack security group rule create --protocol udp --dst-port 53:53 basic
neutron net-create public --router:external --provider:network_type flat --provider:physical_network datacentre
export GATEWAY=`ip r get 1.1.1.1 | awk '/dev/{print $3}' | tr -d '[[:space:]]'`
export CIDR=`ip r|grep br-ctlplane|cut -d" " -f1|tail -1| tr -d '[[:space:]]'`
neutron subnet-create --name public --enable_dhcp=False --allocation-pool=start=${GATEWAY%.*}.220,end=${GATEWAY%.*}.225 --gateway=$GATEWAY public $CIDR
openstack network create net1
openstack subnet create subnet1 --network net1 --subnet-range 192.0.2.0/24
neutron router-create router1
neutron router-gateway-set router1 public
neutron router-interface-add router1 subnet1
neutron floatingip-create public
netid=$(openstack network show net1 -f value -c id)
floatip=$(openstack floating ip list -f value -c "Floating IP Address"|head -1)
openstack server create --nic net-id=$netid --image cirros --security-group basic --key-name default --flavor tiny testvm
sleep 20
openstack server add floating ip testvm $floatip
# VM is ready and can be accessed with ssh cirros@$floatip
| true
|
ac6dc5588aa771cfc786b138e7e4d42cbae29a29
|
Shell
|
berchev/bash_scripting_academy
|
/task21.sh
|
UTF-8
| 321
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Function definition:
funcExample () {
LOCALVAR="Local variable"
echo "This is $LOCALVAR"
}
GLOBALVAR="global variable"
echo "This is: $GLOBALVAR"
echo "This is $LOCALVAR"
echo "Function call: "
echo ""
funcExample
echo "Variables check:"
echo "This is $GLOBALVAR"
echo "This is $LOCALVAR"
| true
|
7fbcc8f9fd68e8ec95b946b54d08bc9a9aa94723
|
Shell
|
melvincornelissen/Kerio-Connect
|
/letsencrypt-create
|
UTF-8
| 3,431
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
SCRIPT_DESCRIPTION="Maak certificaten via Let's Encrypt en plaats deze op de juiste plaats in de Kerio Connect SSL store."
SCRIPT_AUTHOR="Melvin Cornelissen"
SCRIPT_VERSION="1.5"
SCRIPT_DATE="26-10-2016"
SCRIPT_NAME=$(basename "$0")
DOMAIN_NAME="$1"
CERT_NAME="mx.$1"
FILE="/opt/kerio/mailserver/sslcert/$1.crt"
LOG="/var/log/certbot-create.log"
#Controleren of een domein bij TransIP draait en zo ja, DNS records pushen.
#if [ "$#" = "-d" ]; then
if [ $# -eq 0 ] || [ "$1" = "-h" ] || [ "$1" = "--help" ];
then
echo ""
echo "${SCRIPT_DESCRIPTION}"
echo "Author: ${SCRIPT_AUTHOR} Version: ${SCRIPT_VERSION} Last modified: ${SCRIPT_DATE}"
echo ""
echo "Gebruik: ${SCRIPT_NAME} [domeinnaam.tld] [-h]"
#Als er geen parameters moet er een error worden weergegeven.
if [ $# -eq 0 ];
then
echo ""
echo "$(date) ERROR: Er is geen domeinnaam gespecificeerd. Om een certificaat aan te vragen moet je een domeinnaam opgeven, gebruik hiervoor domain.tld." | tee -a "$LOG"
fi
exit 1;
fi;
#Start log
echo "$(date) START: Certificaat aanvraag gestart voor ${CERT_NAME}" | tee -a "$LOG"
#Stop Kerio Connect
service kerio-connect stop > /dev/null
echo "$(date) INFO: Kerio Connect gestopt" | tee -a "$LOG"
#Maak certificaten aan op basis van standalone webserver
/opt/letsencrypt/certbot-auto certonly --standalone --expand --keep-until-expiring -d mx.$1 -d webmail.$1 -d mail.$1 -d autodiscover.$1 > /dev/null
#Plaats aangemaakte certificaten op de juiste plaats in de Kerio SSL store
if [ ! -f $FILE ]
then
echo "$(date) INFO: Certificaat voor $1 bestaat nog niet, links zullen worden aangemaakt." | tee -a "$LOG"
ln -s /etc/letsencrypt/live/mx.$1/fullchain.pem /opt/kerio/mailserver/sslcert/$1.crt
ln -s /etc/letsencrypt/live/mx.$1/privkey.pem /opt/kerio/mailserver/sslcert/$1.key
else
echo "$(date) INFO: Certificaat voor $1 bestaat al, links worden verwijderd en opnieuw aangemaakt!" | tee -a "$LOG"
rm /opt/kerio/mailserver/sslcert/$1*
echo "$(date) DELETE: Bestaand certificaat verwijderd (Kerio)" | tee -a "$LOG"
rm -rf /etc/letsencrypt/live/*$1*
echo "$(date) DELETE: Bestaand certificaat verwijderd (Letsencrypt-live)" | tee -a "$LOG"
rm -rf /etc/letsencrypt/renewal/*$1*
echo "$(date) DELETE: Bestaand certificaat verwijderd (Letsencrypt-renewal)" | tee -a "$LOG"
rm -rf /etc/letsencrypt/archive/*$1*
echo "$(date) DELETE: Bestaand certificaat verwijderd (Letsencrypt-archive)" | tee -a "$LOG"
ln -s /etc/letsencrypt/live/mx.$1/fullchain.pem /opt/kerio/mailserver/sslcert/$1.crt
echo "$(date) INSERT: Certificaat aangemaakt (Letsencrypt-crt)" | tee -a "$LOG"
ln -s /etc/letsencrypt/live/mx.$1/privkey.pem /opt/kerio/mailserver/sslcert/$1.key
echo "$(date) INSERT: Key aangemaakt (Letsencrypt-key)" | tee -a "$LOG"
fi
#Start Kerio Connect
service kerio-connect start > /dev/null
echo "$(date) INFO: Kerio Connect gestart" | tee -a "$LOG"
echo "$(date) INFO: Het certificaat voor ${CERT_NAME} is gereed" | tee -a "$LOG"
#Einde log
echo "$(date) STOP: Certificaat aanvraag afgerond voor ${CERT_NAME}" | tee -a "$LOG"
| true
|
8506bcc560906fa4d6203bd5fe4641c1341f2bdc
|
Shell
|
caligrafy/caligrafy-quill
|
/.bin/server
|
UTF-8
| 2,852
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
start() {
printf "\n\n Building the Caligrafy development server...\n\n"
docker-compose up --build -d prod-box
if [ $? -eq 0 ]; then
clear
printf "\n\nCaligrafy Server successfully started.\n\n Hostname: http://localhost:8080 \n phpmyadmin: http://localhost:8077/ \n mysql username: root \n mysql password: root \n\n"
printf "\n\nMonitoring server changes in the application folder...\n\nPress CTRL-C at any point to exit \n\n"
watch "./application" "*.*" "docker-compose up --build -d prod-box"
else
printf "\nCaligrafy server could not be found \n\n"
fi
}
stop() {
docker-compose stop
if [ $? -eq 0 ]; then
clear
printf "\n\nCaligrafy Server stopped.\n\n"
else
printf "\n\n We were not able to create a Docker container. Clear any Docker containers, images and volumes that are cached and try again. \n\n"
fi
}
checksum() {
if [[ $(type -t md5) != "" ]]; then
find -L $1 -type f -name "$2" -exec md5 {} \;
else
find -L $1 -type f -name "$2" -exec md5sum {} \;
fi
}
watch() {
WORKING_PATH=$(pwd)
DIR=$1
FILTER=$2
COMMAND=$3
chsum1=$(checksum $WORKING_PATH/$DIR "$FILTER")
while [[ true ]]
do
chsum2=$(checksum $WORKING_PATH/$DIR "$FILTER")
if [[ $chsum1 != $chsum2 ]] ; then
printf "\nChanges in your server code have been detected\n"
printf "\nPress 1 at anypoint if you would like to recompile the server code\n"
read -n 1 -p "" mainmenuinput
if [ "$mainmenuinput" = "1" ]; then
printf "\nRebuilding the application...\n"
$COMMAND
chsum1=$chsum2
elif [ "$mainmenuinput" = "2" ]; then
exit 0
fi
fi
sleep 2
done
}
initialize() {
printf "\nInitializing Caligrafy...\n\n"
if [ -f "./.git" ]; then
chmod -R 777 .git
rm -R .git
fi
if [ -f "./.env" ]; then
cp ./.env ./.env
else
cp ./framework/settings/.env.docker ./.env
cp ./framework/settings/.env.vue ./framework/librairies/app/.env
fi
if [ -d "./application" ]; then
printf "\nMaintaining application folder"
else
printf "\n Creating application folder... \n\n"
cp -r ./framework/settings/application ./application
fi
}
printf "\nChecking if Docker is installed...\n\n"
docker ps
if [ $? -eq 0 ]; then
if [[ $1 == "start" ]]; then
initialize
start
elif [[ $1 == "stop" ]]; then
stop
else
printf "\nUse 'server start' to start the server or 'server stop' to stop it \n\n"
fi
else
printf "\n Docker is needed in order for Caligrafy to run locally without a php server.\n Install Docker and try again.\n\n"
fi
| true
|
abee6f7cf7685ff54de1fb32a8c507a8973206b3
|
Shell
|
newtonick/virtualbox-php-server
|
/setup-php.sh
|
UTF-8
| 1,348
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/sh
sudo apt-get install aptitude
sudo apt-get install wget
sudo apt-get install openssh-client openssh-server
sudo apt-get install build-essential nginx autoconf libmemcached-dev libxml2-dev libcurl4-openssl-dev pkg-config libjpeg-dev libpng-dev libicu-dev libmcrypt-dev libgearman-dev libgraphicsmagick1-dev
mkdir ~/src
cd ~/src
wget http://us3.php.net/distributions/php-5.5.5.tar.gz
tar -xzf php-5.5.5.tar.gz
cd php-5.5.5
./configure '--with-mysql' '--with-mysqli' '--with-pdo-mysql' '--enable-fpm' '--with-fpm-user=www-data' '--with-fpm-group=www-data' '--enable-mbstring' '--with-curl' '--with-openssl' '--enable-sockets' '--enable-soap' '--enable-bcmath' '--enable-pcntl' '--enable-zip' '--with-zlib' '--with-gd' '--enable-gd-native-ttf' '--with-jpeg-dir' '--with-png-dir' '--with-mcrypt' '--disable-posix-threads' '--enable-intl' '--enable-opcache' || exit
make || exit
sudo make install || exit
sudo pecl install memcached
sudo pecl install gearman-1.0.3
sudo pecl install oauth
sudo pecl install gmagick-beta
# configure php ini
sudo cat > /usr/local/lib/php.ini <<DELIM
extension=memcached.so
extension=gearman.so
extension=gmagick.so
extension=oauth.so
upload_max_filesize = 10M;
post_max_size = 10M;
date.timezone = 'UTC';
DELIM
# setup mysql server
sudo apt-get install mysql-server mysql-client
# configure mysql server
| true
|
4232ffcd37661a4a82c90c7bebc4c6eb6be20b52
|
Shell
|
korby/gdrive
|
/gdrive.sh
|
UTF-8
| 4,336
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
# title : grive.sh
# description : A sandbox to play a little bit with google drive api
# author : Korby (https://github.com/korby)
# date : feb. 2018
access_token=""
# If stdin is not empty, get token from it
if [ ! -t 0 ];
then
while read line
do
access_token=$line
done < /dev/stdin
fi
client_id="$(cat client_id.txt 2>/dev/null)"
client_secret="$(cat client_secret.txt 2>/dev/null)"
tokens_path=$client_id
google_url_console="https://console.developers.google.com/apis/"
google_url_get_code="https://accounts.google.com/o/oauth2/auth?scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&redirect_uri=urn:ietf:wg:oauth:2.0:oob&response_type=code&client_id=$client_id"
google_url_get_tokens="https://accounts.google.com/o/oauth2/token"
if [ -f "./parent_dir" ]; then . ./parent_dir; fi
if [ "$client_id" == "" ]; then echo "Need client_id, you can get it here: "; echo "$google_url_console/credentials"; exit 1; fi
if [ "$client_secret" == "" ]; then echo "Need client_secret, you can get it here: "; echo "$google_url_console/credentials"; exit 1; fi
if [ ! -f $tokens_path ] && [ "$access_token" = "" ]; then
echo "Need a code to get token, please get it here: "
echo $google_url_get_code
read -p "Type the code:" code
json_back=`curl -H 'Content-Type: application/x-www-form-urlencoded' -d "code=$code&client_id=$client_id&client_secret=$client_secret&redirect_uri=urn:ietf:wg:oauth:2.0:oob&grant_type=authorization_code" $google_url_get_tokens`
refresh_token=`echo "$json_back" | grep "refresh_token" |cut -d ":" -f2 | sed "s/.$//" | sed "s/^.//" | sed 's/"//g'`
if [ "$refresh_token" == "" ]; then
echo "Failure during token request, here the response:"
echo $json_back
exit 1
fi
echo "$refresh_token:" > $tokens_path;
fi
function get_access_token () {
if [ "$access_token" != "" ]; then echo $(echo $access_token | cut -d ':' -f2); return 0; fi
# if token is less than one hour aged
if [ "$(find $tokens_path -mmin +55)" == "" ]; then
access_token=`cat $tokens_path | cut -d ':' -f2`
fi
if [ "$access_token" == "" ]; then
refresh_token=`cat $tokens_path | cut -d ':' -f1`;
json_back=`curl -d "client_id=$client_id&client_secret=$client_secret&refresh_token=$refresh_token&grant_type=refresh_token" $google_url_get_tokens`;
access_token=`echo "$json_back" | grep "access_token" |cut -d ":" -f2 | sed "s/.$//" | sed "s/^.//" | sed 's/"//g'`
if [ "$(uname)" == "Darwin" ]; then
sed -i "" "s/:.*$/:$access_token/g" $tokens_path
else
sed -i "s/:.*$/:$access_token/g" $tokens_path
fi
fi
echo $access_token;
}
function upload () {
access_token=$1
filepath=$2
filesize=`ls -nl $filepath | awk '{print $5}'`
mimetype=`file --mime-type $filepath | cut -d":" -f2 | sed "s/^ //"`
title=`basename "$filepath"`
# If parent_dir_id is set, upload go in
if [ "$parent_dir_id" != "" ]; then
postData="{\"parents\": [\"$parent_dir_id\"],\"mimeType\": \"$mimetype\",\"name\": \"$title\"}"
else
# upload go tho the gdrive root dir
postData="{\"mimeType\": \"$mimetype\",\"name\": \"$title\",\"parents\": [{\"kind\": \"drive#file\",\"id\": \"root\"}]}"
fi
postDataSize=$(echo $postData | wc -c)
ref=`curl --silent \
-X POST \
-H "Host: www.googleapis.com" \
-H "Authorization: Bearer $access_token" \
-H "Content-Type: application/json; charset=UTF-8" \
-H "X-Upload-Content-Type: $mimetype" \
-H "X-Upload-Content-Length: $filesize" \
-d "$postData" \
"https://www.googleapis.com/upload/drive/v3/files?uploadType=resumable" \
--dump-header - `
refloc=`echo "$ref" | grep -i location | perl -p -e 's/location: //gi' | tr -d '\r\n'`
echo $refloc > ./gdrive.log
curl -X PUT --dump-header - -H "Authorization: Bearer "$access_token -H "Content-Type: "$mimetype -H "Content-Length: "$filesize --upload-file $filepath $refloc
}
access_token=`get_access_token`;
while getopts "lu:" opt; do
case "$opt" in
l)
echo "Listing drives root files...";
curl -s -H "GData-Version: 3.0" -H "Authorization: Bearer $access_token" https://www.googleapis.com/drive/v2/files
exit 0
;;
u)
echo `upload $access_token $OPTARG`
;;
\?)
exit 1
;;
esac
done
exit 0;
| true
|
b935532ae60009731210c28acff670ca34829a89
|
Shell
|
vpommier/everythings-there_backend_GAE
|
/examples/request_demand-1.sh
|
UTF-8
| 279
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
dataFile="$(dirname $0)/data_$(date +%s).json"
cat > $dataFile <<EOT
{
"plaques":[1,2,5,6,7,100],
"total":100
}
EOT
curl -i \
--request POST \
--data @$dataFile \
--header "Content-Type:application/json" \
--url http://localhost:8080/demand
rm -f $dataFile
| true
|
7376cbc4f911ebf3a7f42ba6b09b147e2c3f5428
|
Shell
|
chch9015/all-devops-files
|
/scripts/maven38.sh
|
UTF-8
| 432
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
#install the maven in /opt
cd /opt
#download the apache maven
wget https://mirrors.estointernet.in/apache/maven/maven-3/3.8.1/binaries/apache-maven-3.8.1-bin.zip
#unzip the file
unzip apache-maven-3.8.1-bin.zip
#rename
mv apache-maven-3.8.1 maven38
#change the ownership to the jenkins
chown -R jenkins:jenkins /opt/maven38
#change the permission
chmod -R 700 maven38/
#delete the zip
rm -f apache-maven-3.8.1-bin.zip
| true
|
e7b596a54c9111496106c6bfb1890735bf3c54ae
|
Shell
|
akkakks/lantern_aws
|
/salt/flashlight/monitor.bash
|
UTF-8
| 1,709
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# This script sends an email alert when the load average exceeds 70% of the
# number of cores. It also logs the load average to statshub.
#
# This script is based upon the one here -
# https://www.digitalocean.com/community/questions/email-notifications-for-server-resources
#
# See also http://stackoverflow.com/questions/11735211/get-last-five-minutes-load-average-using-ksh-with-uptime
# for a more reliable way to get the load average.
#
function die() {
echo $*
exit 1
}
service=$1
mail="fallback-alarms@getlantern.org"
hn=`hostname`
statshub="https://pure-journey-3547.herokuapp.com/stats/$hn"
country="sp" # TODO - make this templatized
maxloadpercent="70" # Note - this is given as percentage, not decimal
maxload=$(echo "$maxloadpercent * `nproc`" | bc -l)
restartload="90"
load=`uptime | sed 's/.*load average: //' | awk -F\, '{print $3}'`
loadscaled=$(echo "$load * 100" | bc -l)
loadint=$(printf "%.0f" $loadscaled)
if [ "$loadint" -gt "$maxload" ]; then
echo "System load $loadint% is higher than $maxload%, alerting $mail"
echo "15 minute load average is $loadint%" | mail -s "$hn - High $service Server Load" -- $mail || die "Unable to email alert"
if [ "$loadint" -gt "$restartload" ]; then
echo "System load is higher than $restartload%, restarting $service"
# Stop/start instead of restart to make sure profiling info is saved.
sudo service $service stop
sudo service $service start
fi
fi
if [ "$service" = "flashlight" ]; then
# Report data to statshub
curl --data-binary "{\"dims\": {\"flserver\": \"$hn\", \"country\": \"$country\"}, \"gauges\": { \"loadavg_15min\": $loadint } }" $statshub || die "Unable to post stats"
fi
echo ""
| true
|
1e8c15aed1896d0f3f45811d781d540845392919
|
Shell
|
assimilation/assimilation-official
|
/discovery_agents/checksums
|
UTF-8
| 4,243
| 3.625
| 4
|
[] |
no_license
|
#!/bin/sh
# vim: smartindent tabstop=4 shiftwidth=4 expandtab number colorcolumn=100
#
# This file is part of the Assimilation Project.
#
# Author: Alan Robertson <alanr@unix.sh>
# Copyright (C) 2013 - Assimilation Systems Limited
#
# Free support is available from the Assimilation Project community - http://assimproj.org
# Paid support is available from Assimilation Systems Limited - http://assimilationsystems.com
#
# The Assimilation software is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The Assimilation software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Assimilation Project software. If not, see http://www.gnu.org/licenses/
#
#
DIR=$(dirname "$0")
. $DIR/assim_common.sh
DEFAULTSUMLIST="/usr/bin/sha256sum /usr/bin/sha384sum /usr/bin/sha512sum /usr/bin/sha224sum /usr/bin/sha1sum /usr/bin/shasum /usr/bin/md5sum /usr/bin/cksum /usr/bin/crc32"
: ${ASSIM_sumcmds:=${DEFAULTSUMLIST}}
## mktmpdir makes a temporary directory
mktmpdir() {
# Not every system has mktemp
if
TMPDIR=$(mktemp --directory --tmpdir=/var/tmp 2>/dev/null)
echo $TMPDIR
then
: mktemp worked
else
TMPDIR="/var/tmp/$$-${RANDOM-random}.tcpdiscovery"
mkdir "${TMPDIR}" && echo "${TMPDIR}"
fi
chmod 700 ${TMPDIR}
}
TMPDIR=$(mktmpdir)
trap 'rm -fr "$TMPDIR"' 0
# Produces the list of libraries that this binary depends on
lddependencies() {
assim_run_in_context ldd $1 2>/dev/null |
while
read line
do
case $line in
# libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f8615253000)
*'=>'*/*' ('*) #echo "case 1 $line" >&2
echo "$line" | sed -e 's%^.*=> *%%' -e 's% *(.*%%'
;;
# linux-vdso.so.1 => (0x00007fff7e1fe000)
*'=>'*[^\(]*'('*) #echo "case 2 $line" >&2
;;
# /lib64/ld-linux-x86-64.so.2 (0x00007f8615640000)
*/*' '*'('*) #echo "case 3 $line" >&2
echo "$line" | sed -e 's% *(.*%%'
;;
*) #echo "case 4 $line"
;;
esac
done
}
SUMCMD=/usr/bin/sha256sum
# Select the first executable sum command in our list of possible commands
find_sumcmd() {
for cmd in ${ASSIM_sumcmds}
do
if
assim_run_in_context test -f "$cmd" -a -x "$cmd"
then
SUMCMD="${cmd}"
return
fi
done
echo "Cannot locate an executable sum command in [$ASSIM_sumcmds]" >&2
exit 1
}
sumcmd() {
assim_run_in_context ${SUMCMD} "$1" 2>/dev/null | cut -f1 -d' '
}
# Expand the list of requested filenames to include things which this file depends on
expandlist() {
for arg in "$@"
do
if assim_run_in_context test ! -f "$arg"; then continue; fi
echo "$arg"
lddependencies "$arg"
done >> ${TMPDIR}/filelist
}
sumfiles() {
cat <<!EOF1
{
"discovertype": "checksum",
"description": "File Checksums from parameters",
"host": "$(uname -n)",
"source": "$0",
"proxy": "${ASSIM_PROXY_PATH}",
"sumcmd": "${SUMCMD}",
"data": {
!EOF1
comma=' '
assim_run_in_context ${SUMCMD} $(sort -u < $TMPDIR/filelist) 2>/dev/null|
awk '{printf "\"%s\": \"%s\"\n", $2 , $1}' |
while
read line
do
printf '%s%s' "${comma}" "$line"
comma=',
'
done
cat <<!EOF2
}
}
!EOF2
}
MINLIST="${SUMCMD} /usr/sbin/nanoprobe"
MINLIST="/bin/bash /bin/sh /bin/login /usr/bin/passwd ${SUMCMD} /usr/sbin/nanoprobe"
discover() {
find_sumcmd
expandlist ${MINLIST} ${ASSIM_filelist}
sumfiles
return 0
}
usage() {
cat <<-!
usage: $0 (discover|meta-data)
!
exit 1
}
case $1 in
discover) discover
;;
*) usage
;;
esac
| true
|
e56adc01f5f47ec9931d7028e70d122ba6b89b1f
|
Shell
|
mongodb/mongodbatlas-cloudformation-resources
|
/cfn-resources/cloud-backup-schedule/test/cfn-test-create-inputs.sh
|
UTF-8
| 2,313
| 3.84375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# cfn-test-create-inputs.sh
#
# This tool generates json files in the inputs/ for `cfn test`.
#
set -o errexit
set -o nounset
set -o pipefail
set -x
function usage {
echo "usage:$0 <project_name>"
echo "Creates a new project and an Cluster for testing"
}
if [ "$#" -ne 2 ]; then usage; fi
if [[ "$*" == help ]]; then usage; fi
rm -rf inputs
mkdir inputs
projectName="${1}"
clusterName=$projectName
echo "Creating required inputs"
projectId=$(atlas projects list --output json | jq --arg NAME "${projectName}" -r '.results[] | select(.name==$NAME) | .id')
if [ -z "$projectId" ]; then
projectId=$(atlas projects create "${projectName}" --output=json | jq -r '.id')
echo -e "Cant find project \"${projectName}\"\n"
fi
export MCLI_PROJECT_ID=$projectId
clusterId=$(atlas clusters list --projectId "${projectId}" --output json | jq --arg NAME "${clusterName}" -r '.results[]? | select(.name==$NAME) | .id')
if [ -z "$clusterId" ]; then
echo "creating cluster.."
atlas clusters create "${clusterName}" --projectId "${projectId}" --backup --provider AWS --region US_EAST_1 --members 3 --tier M10 --mdbVersion 5.0 --diskSizeGB 10 --output=json
atlas clusters watch "${clusterName}" --projectId "${projectId}"
echo -e "Created Cluster \"${clusterName}\""
fi
policyId=$(atlas backups schedule describe "${clusterName}" --projectId "${projectId}" | jq -r '.policies[0].id')
echo "policyId: ${policyId}"
name="${1}"
jq --arg group_id "$projectId" \
--arg cluster_name "$clusterName" \
--arg policy_id "$policyId" \
'.ClusterName?|=$cluster_name |.ProjectId?|=$group_id| .Policies[0].ID?|=$policy_id' \
"$(dirname "$0")/inputs_1_create.template.json" >"inputs/inputs_1_create.json"
jq --arg group_id "$projectId" \
--arg cluster_name "$clusterName" \
--arg policy_id "$policyId" \
'.ClusterName?|=$cluster_name |.ProjectId?|=$group_id| .Policies[0].ID?|=$policy_id' \
"$(dirname "$0")/inputs_1_update.template.json" >"inputs/inputs_1_update.json"
name="${name}- more B@d chars !@(!(@====*** ;;::"
jq --arg group_id "$projectId" \
--arg cluster_name "$clusterName" \
'.ClusterName?|=$cluster_name |.ProjectId?|=$group_id' \
"$(dirname "$0")/inputs_1_invalid.template.json" >"inputs/inputs_1_invalid.json"
echo "mongocli iam projects delete ${projectId} --force"
ls -l inputs
| true
|
6972353dc244c5d54d972dd5ec0b347604596897
|
Shell
|
pataraco/scripts
|
/aws/pa-new-pip-new-eip.sh
|
UTF-8
| 2,130
| 4.3125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
#
USAGE="\
usage: $0 ENI_ID EIP_NAME_TAG
ENI_ID ENI ID to create the new private IP for and attach an EIP to
EIP_NAME_TAG Name tag to give the EIP"
eniid=$1
eipname=$2
[ -z "$eniid" -o -z "$eipname" ] && { echo "$USAGE"; exit 1; }
if [ -n "$eniid" -a -n "$eipname" ]; then
echo "adding new private ip to eni: $eniid"
echo -ne "list of existing private IPs on eni ($eniid): "
aws ec2 describe-network-interfaces --network-interface-ids $eniid | jq -r .NetworkInterfaces[].PrivateIpAddresses[].PrivateIpAddress | tr '\n' ',' | sed 's/,$//'
aws ec2 assign-private-ip-addresses --network-interface-id $eniid --secondary-private-ip-address-count 1
if [ $? -eq 0 ]; then
newip=$(aws ec2 describe-network-interfaces --network-interface-ids $eniid | jq -r .NetworkInterfaces[].PrivateIpAddresses[].PrivateIpAddress | tail -1)
echo "new private IP created on eni ($eniid): $newip"
echo "confirm that it's correct and hit [return] to continue"
read junk
neweip=$(aws ec2 allocate-address --domain vpc | jq -r '. | .PublicIp + ":" + .AllocationId')
if [ -n "$neweip" ]; then
eippubip=${neweip%:*}
eipallid=${neweip#*:}
echo "new public EIP ($eipallid) created: $eippubip"
else
echo "could not create new public EIP"
exit 1
fi
aws ec2 create-tags --resources $eipallid --tags Key=Name,Value=$eipname
if [ $? -eq 0 ]; then
echo "tagged new EIP ($neweip) with name: $eipname"
else
echo "could not tag new EIP ($neweip) with name: $eipname"
exit 1
fi
assid=$(aws ec2 associate-address --allocation-id $eipallid --network-interface-id $eniid --private-ip-address $newip | jq -r .AssociationId)
if [ $? -eq 0 ]; then
echo "associated new EIP ($eippubip) with private IP ($newip) on eni ($eniid): $assid"
else
echo "could not associate new EIP ($eippubip) with private IP ($newip) on eni: $eniid"
fi
else
echo "could not create new private IP on eni: $eni"
fi
else
echo "usage: $0 ENI-ID EIP-NAME"
fi
| true
|
0e55bf1381158f2a5c2d1da5a2fc5891e296d83c
|
Shell
|
ncsu-samatova/ALACRITY
|
/script/exp_results/mem_trace/wah_mem_trace.sh~
|
UTF-8
| 3,714
| 2.671875
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
#THIS script is used for collect the memory usage for sep_bitmap, rle_bitmap, and exp_bitmap programs
source /home/xzou2/alac_fastbit_comp/alac_multi_engine/script/env_setting.sh
LOG_BASE=/home/xzou2/alac_fastbit_comp/alac_multi_engine/script/exp_results/mem_trace/wah_s3d/
SEL=1 #5 selectivities
COMMAND="valgrind --tool=massif --massif-out-file="
############ WAH ##############################################
PRM="/home/xzou2/wah_integration/icde/benchmark/intersection/query wah"
LOGS=("wah_4var_.001.out" "wah_4var_.01.out" "wah_4var_.1.out" "wah_4var_1.out" "wah_4var_10.out" )
CSVS=("wah_4var_.001.csv" "wah_4var_.01.csv" "wah_4var_.1.csv" "wah_4var_1.csv" "wah_4var_10.csv" )
nval=4 #4 variables
for ((i=0; i<${SEL}; i++ )) ; do
OUTF=${LOG_BASE}${LOGS[i]}
CSV=${LOG_BASE}${CSVS[i]}
$COMMAND${OUTF} $PRM $nval ${DATADIR} ${BINRANGE4VAR[i]}
ms_print ${OUTF} | grep -P "^\s+\d+\s+[\d|,]+.*0$" | awk '{ print $3}' | sed 's/,//g' | awk -F, '{print $1/1024/1024}' > ${CSV}
done
LOGS=("wah_3var_.001.out" "wah_3var_.01.out" "wah_3var_.1.out" "wah_3var_1.out" "wah_3var_10.out" )
CSVS=("wah_3var_.001.csv" "wah_3var_.01.csv" "wah_3var_.1.csv" "wah_3var_1.csv" "wah_3var_10.csv" )
nval=3 #4 variables
for ((i=0; i<${SEL}; i++ )) ; do
OUTF=${LOG_BASE}${LOGS[i]}
CSV=${LOG_BASE}${CSVS[i]}
$COMMAND${OUTF} $PRM $nval ${DATADIR} ${BINRANGE3VAR[i]}
ms_print ${OUTF} | grep -P "^\s+\d+\s+[\d|,]+.*0$" | awk '{ print $3}' | sed 's/,//g' | awk -F, '{print $1/1024/1024}' > ${CSV}
done
LOGS=("wah_2var_.001.out" "wah_2var_.01.out" "wah_2var_.1.out" "wah_2var_1.out" "wah_2var_10.out" )
CSVS=("wah_2var_.001.csv" "wah_2var_.01.csv" "wah_2var_.1.csv" "wah_2var_1.csv" "wah_2var_10.csv" )
nval=2 #4 variables
for ((i=0; i<${SEL}; i++ )) ; do
OUTF=${LOG_BASE}${LOGS[i]}
CSV=${LOG_BASE}${CSVS[i]}
$COMMAND${OUTF} $PRM $nval ${DATADIR} ${BINRANGE2VAR[i]}
ms_print ${OUTF} | grep -P "^\s+\d+\s+[\d|,]+.*0$" | awk '{ print $3}' | sed 's/,//g' | awk -F, '{print $1/1024/1024}' > ${CSV}
done
############ EWAH ##############################################
PRM="/home/xzou2/wah_integration/icde/benchmark/intersection/query ewah"
LOGS=("epd_4var_.001.out" "epd_4var_.01.out" "epd_4var_.1.out" "epd_4var_1.out" "epd_4var_10.out" )
CSVS=("epd_4var_.001.csv" "epd_4var_.01.csv" "epd_4var_.1.csv" "epd_4var_1.csv" "epd_4var_10.csv" )
nval=4 #4 variables
for ((i=0; i<${SEL}; i++ )) ; do
OUTF=${LOG_BASE}${LOGS[i]}
CSV=${LOG_BASE}${CSVS[i]}
$COMMAND${OUTF} $PRM $nval ${DATADIR} ${BINRANGE4VAR[i]}
ms_print ${OUTF} | grep -P "^\s+\d+\s+[\d|,]+.*0$" | awk '{ print $3}' | sed 's/,//g' | awk -F, '{print $1/1024/1024}' > ${CSV}
done
LOGS=("epd_3var_.001.out" "epd_3var_.01.out" "epd_3var_.1.out" "epd_3var_1.out" "epd_3var_10.out" )
CSVS=("epd_3var_.001.csv" "epd_3var_.01.csv" "epd_3var_.1.csv" "epd_3var_1.csv" "epd_3var_10.csv" )
nval=3 #4 variables
for ((i=0; i<${SEL}; i++ )) ; do
OUTF=${LOG_BASE}${LOGS[i]}
CSV=${LOG_BASE}${CSVS[i]}
$COMMAND${OUTF} $PRM $nval ${DATADIR} ${BINRANGE3VAR[i]}
ms_print ${OUTF} | grep -P "^\s+\d+\s+[\d|,]+.*0$" | awk '{ print $3}' | sed 's/,//g' | awk -F, '{print $1/1024/1024}' > ${CSV}
done
LOGS=("epd_2var_.001.out" "epd_2var_.01.out" "epd_2var_.1.out" "epd_2var_1.out" "epd_2var_10.out" )
CSVS=("epd_2var_.001.csv" "epd_2var_.01.csv" "epd_2var_.1.csv" "epd_2var_1.csv" "epd_2var_10.csv" )
nval=2 #4 variables
for ((i=0; i<${SEL}; i++ )) ; do
OUTF=${LOG_BASE}${LOGS[i]}
CSV=${LOG_BASE}${CSVS[i]}
$COMMAND${OUTF} $PRM $nval ${DATADIR} ${BINRANGE2VAR[i]}
ms_print ${OUTF} | grep -P "^\s+\d+\s+[\d|,]+.*0$" | awk '{ print $3}' | sed 's/,//g' | awk -F, '{print $1/1024/1024}' > ${CSV}
done
| true
|
017c4d5d5bf0cba714ca78f7392f7bf9a631eb46
|
Shell
|
shudipta/sample-extension-apiserver
|
/hack/gen_install.sh
|
UTF-8
| 1,688
| 2.890625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
pushd $GOPATH/src/sample-extension-apiserver/apis/somethingcontroller/install
touch install.go
echo "/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the \"License\");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an \"AS IS\" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package install
import (
\"k8s.io/apimachinery/pkg/apimachinery/announced\"
\"k8s.io/apimachinery/pkg/apimachinery/registered\"
\"k8s.io/apimachinery/pkg/runtime\"
\"sample-extension-apiserver/apis/somethingcontroller\"
\"sample-extension-apiserver/apis/somethingcontroller/v1alpha1\"
)
// Install registers the API group and adds types to a scheme
func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) {
if err := announced.NewGroupMetaFactory(
&announced.GroupMetaFactoryArgs{
GroupName: somethingcontroller.GroupName,
VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version},
AddInternalObjectsToScheme: v1alpha1.AddToScheme,
},
announced.VersionToSchemeFunc{
v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme,
},
).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil {
panic(err)
}
}" > install.go
popd
| true
|
521f07bfa944366d0aed337d0db89a0fe8993912
|
Shell
|
sandra444/faersdbstats
|
/load_data_files_from_website/create_legacy_all_indi_data_file_with_filename_column.sh
|
UTF-8
| 1,638
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
##########################################################################
# create the combined legacy indication files with the filename appended as the last column
#
# LTS Computing LLC
##########################################################################
# process the first file - including adding "filename" column name to the header line at the start of the file
f="INDI04Q1.TXT"
thefilenamenosuffix=$(basename $f .TXT)
# remove windows carriage return, fix bad data records with embedded \n, add on the "filename" column name to the header line and add the filename as the last column on each line
# output to the all data file
sed 's/\r//g' $f | sed '1,1 s/$/\$FILENAME/' | sed "2,$ s/$/\$$f/" > all_indi_legacy_data_with_filename.txt
# process the other files and concatenate to the all data file
FILES="
INDI04Q2.TXT INDI04Q3.TXT INDI04Q4.TXT INDI05Q1.TXT INDI05Q2.TXT INDI05Q3.TXT INDI05Q4.TXT INDI06Q1.TXT INDI06Q2.TXT INDI06Q3.TXT INDI06Q4.TXT INDI07Q1.TXT INDI07Q2.TXT
INDI07Q3.TXT INDI07Q4.TXT INDI08Q1.TXT INDI08Q2.TXT INDI08Q3.TXT INDI08Q4.TXT INDI09Q1.TXT INDI09Q2.TXT INDI09Q3.TXT INDI09Q4.TXT INDI10Q1.TXT INDI10Q2.TXT INDI10Q3.TXT INDI10Q4.TXT
INDI11Q1.TXT INDI11Q2.TXT INDI11Q3.TXT INDI11Q4.TXT INDI12Q1.TXT INDI12Q2.TXT INDI12Q3.TXT
"
for f in $FILES
do
thefilenamenosuffix=$(basename $f .TXT)
# remove windows carriage return,fix bad data records with embedded \n, remove the header line and add the filename as the last column on each line
sed 's/\r//g' $f | sed '1,1d' | sed "1,$ s/$/\$$f/" >> all_indi_legacy_data_with_filename.txt
done
| true
|
5d1085aa501391160f98201a9329ebabb89b6a6f
|
Shell
|
Tuxified/dotfiles
|
/bin/keg-maintenance
|
UTF-8
| 315
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
# Normal cron ENV has a very limited PATH
source ~/.exports
brew update > /dev/null
# check if we have outdated formulas
outdated_stuff="$(brew outdated)"
if [ -n "$outdated_stuff" ]; then
osascript -e "display notification \"Outdated stuff:\n $outdated_stuff\" with title \"Upgrade your kegs\""
fi
| true
|
9c6fe683563e22d738f896c1c8743655ed9f0cf6
|
Shell
|
devopstoday11/fury-kubernetes-registry
|
/katalog/tests/harbor/clair.sh
|
UTF-8
| 3,191
| 3.34375
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bats
load "./../lib/helper"
@test "[CLAIR] Setup" {
info
setup(){
docker pull ubuntu:16.04
docker login harbor.${EXTERNAL_DNS} -u admin -p Harbor12345
}
run setup
[ "$status" -eq 0 ]
}
@test "[CLAIR] Deploy insecure image" {
info
deploy(){
docker tag ubuntu:16.04 harbor.${EXTERNAL_DNS}/library/ubuntu:16.04
docker push harbor.${EXTERNAL_DNS}/library/ubuntu:16.04
}
run deploy
[ "$status" -eq 0 ]
}
@test "[CLAIR] Check insecure image is in the registry" {
info
test(){
tag=$(curl -s -X GET "https://harbor.${EXTERNAL_DNS}/api/v2.0/projects/library/repositories/ubuntu/artifacts/16.04/tags" \
-H "accept: application/json" \
--user "admin:Harbor12345" --fail | jq -r .[0].name)
if [ "${tag}" != "16.04" ]; then return 1; fi
}
run test
[ "$status" -eq 0 ]
}
@test "[CLAIR] Check clair status" {
info
test(){
health=$(curl -s -X GET "https://harbor.${EXTERNAL_DNS}/api/v2.0/projects/1/scanner" \
-H "accept: application/json" \
--user "admin:Harbor12345" --fail | jq -r .health)
if [ "${health}" != "healthy" ]; then return 1; fi
}
run test
[ "$status" -eq 0 ]
}
@test "[CLAIR] Scan an insecure image" {
info
test(){
# Trigger Scan
echo "# Trigger the scan" >&3
curl -X POST "https://harbor.${EXTERNAL_DNS}/api/v2.0/projects/library/repositories/ubuntu/artifacts/16.04/scan" \
-H "accept: application/json" \
--user "admin:Harbor12345" --fail
# Wait for scan
retries=0
mas_retries=10
retry_seconds=5
scan_status=""
echo "# Wait to get the scan report" >&3
while [[ "${scan_status}" != "Success" ]] && [[ "${retries}" -lt ${mas_retries} ]]
do
scan_status=$(curl -s -X GET "https://harbor.${EXTERNAL_DNS}/api/v2.0/projects/library/repositories/ubuntu/artifacts/16.04?with_scan_overview=true" \
-H "accept: application/json" \
--user "admin:Harbor12345" --fail | jq -r '.scan_overview["application/vnd.scanner.adapter.vuln.report.harbor+json; version=1.0"].scan_status')
if [ "${scan_status}" != "Success" ]; then echo "# Scan is not ready yet" >&3; let "retries+=1"; sleep ${retry_seconds}; fi
done
if [ "${scan_status}" != "Success" ]; then return 1; fi
# See scan report
echo "# Checking scan report" >&3
vulns=$(curl -s -X GET "https://harbor.${EXTERNAL_DNS}/api/v2.0/projects/library/repositories/ubuntu/artifacts/16.04?with_scan_overview=true" \
-H "accept: application/json" \
--user "admin:Harbor12345" --fail | jq -r '.scan_overview["application/vnd.scanner.adapter.vuln.report.harbor+json; version=1.0"].summary.total')
if [ "${vulns}" == "null" ]; then echo "# No vulnerabilities found. Retrying" >&3; return 1; fi
if [ "${vulns}" -eq "0" ]; then echo "# No vulnerabilities found. Retrying" >&3; return 1; fi
}
loop_it test 30 60
status=${loop_it_result}
[ "$status" -eq 0 ]
}
| true
|
b8801b1f73ab2a9048d1045866f527d323497672
|
Shell
|
holtzmanjon/apo1m
|
/src/ARC/ARC_API/arc_camlib/lod_files/E2V4240_LN2/timrom
|
UTF-8
| 396
| 2.875
| 3
|
[] |
no_license
|
#!/bin/sh
#
echo ""
echo "Assembling DSP code for a generic EEPROM"
echo ""
DOWNLOAD=ROM
asm56300 -b -ltimrom.ls -d DOWNLOAD $DOWNLOAD timrom.asm
dsplnk -btimrom.cld -v timrom.cln
rm -f timrom.lod
cldlod timrom.cld > timrom.lod
rm timrom.cln ; rm timrom.cld
echo ""
echo "Created files 'tim.s' for EEPROM generation"
echo ""
srec -bs timrom.lod
mv timrom.s tim.s
mv timrom.lod tim.rom
| true
|
4152c9d480500f0cb47b5f44840799d50d4431d9
|
Shell
|
SandeepThakare/dev_installation_scripts
|
/aws-cli-installation.sh
|
UTF-8
| 2,574
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
# Script to aws configuration
# @author sandeep thakare
# @since 21st May 2018
echo 'Check pyhon version'
python --version
echo '--------------------------------------------------------------------------------------'
echo 'Download the installation script from pypa.io'
curl -O https://bootstrap.pypa.io/get-pip.py
echo 'Run the script with python'
sudo python get-pip.py --user
echo 'Adding the executable path to your PATH variable: ~/.local/bin'
ls -a ~
echo 'Adding an export command to your profile script.'
export PATH=~/.local/bin:$PATH
echo 'Loading the profile into your current session'
source ~/.profile
echo 'Verifying that pip is install correctly'
pip --version
echo '--------------------------------------------------------------------------------------'
echo 'Installing the AWS CLI with Pip'
echo '--------------------------------------------------------------------------------------'
echo 'Install aws-cli using pip'
pip install awscli --upgrade --user
echo 'Verify aws-cli install correctly'
echo 'If you get an error, see https://docs.aws.amazon.com/cli/latest/userguide/troubleshooting.html'
echo '--------------------------------------------------------------------------------------'
echo 'Adding the AWS CLI Executable to your Command Line Path'
echo '--------------------------------------------------------------------------------------'
echo 'Add an export command to your profile script'
export PATH=~/.local/bin:$PATH
echo 'Load profile into your current session'
source ~/.profile
echo '--------------------------------------------------------------------------------------'
echo 'Checking aws-cli is installed correctly'
echo 'you need access key and secret key of aws user for final set-up and deplyment process'
echo 'through aws cli'
echo 'If you do not have credential then you can generate by using follwing steps'
echo '1. Visit https://console.aws.amazon.com/iam/home?region=us-east-1#/home'
echo '2. Click on Users --> Add Users --> Choose Access Type "Programmatic access" --> Create group with "AdministratorAccess" access policy --> Click Creae User'
echo '3. Note down the access key and secret key or download csv using "download csv" button on left upper corner'
echo '4. run "aws configure" command and provide access key and secret key'
echo '--------------------------------------------------------------------------------------'
aws configure
echo '--------------------------------------------------------------------------------------'
echo 'aws-cli is set on your system -- Happy Coading ;);)'
| true
|
2908981cc8fec05133205cd226da68064aa4adc6
|
Shell
|
hypirion/adventofcode2020
|
/scripts/test-py.sh
|
UTF-8
| 291
| 3.203125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
# Usage: ../test-py.sh DIR SOLUTION
# Example: ../test-py.sh /adventofcode2020/day-03 solutions/main.py
DIR="$1"
SOLUTION="$2"
cat "$DIR/input.txt" | python3 "$DIR/$SOLUTION" | diff - "$DIR/output.txt"
echo "$DIR / python3 $SOLUTION ✅"
| true
|
9c7e4f0e5a4399f10f1589f5b792516df773cbc5
|
Shell
|
giuliostramondo/extra_polymem_demo
|
/performance_prediction/generate_analysis_webapp.sh
|
UTF-8
| 3,749
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
#Export analysis to csv
function read_csv_cell {
csv_filename=$1
row=$3
column=$2
cell=`tail -n+$row $csv_filename | cut --delimiter=, -f$column | head -n 1`
echo $cell
}
declare -A bw_csv_columns_scheme=( ["ReRo"]=3 ["ReCo"]=4 ["RoCo"]=5 ["ReTr"]=6 )
declare -A bw_csv_rows_mem=( ["8"]=2 ["16"]=3 )
file_name_stem=$1
output_file=${file_name_stem}.analysis
#TODO extract the datawidth from the source C file ( it is expressed in bytes )
data_width=8
#TODO extract the aggregated number of used ports for read and writes from the c source file
used_ports_r_w=3
N_sequential_read=`cat ${file_name_stem}.atrace | sed -E "s/,/\n/g"| wc -l`
echo "Memories,P,Q,Scheme,N_sequential_read,N_parallel_read,Speedup,Efficiency,Estimated BW (GB/s),Estimated Frequency (MHz),Schedule File">$output_file
for schedule in $(ls . | grep schedule$); do
echo "this is i -> "$schedule
info=(`echo $schedule | sed "s/${file_name_stem}_\(.*\)_\([0-9]\+\)mems_p\([0-9]\+\)_q\([0-9]\+\).*/\1 \2 \3 \4/"`)
scheme=${info[0]}
mems=${info[1]}
p=${info[2]}
q=${info[3]}
Npar=`wc -l ${schedule} | sed "s/ .*//"`
Speedup=`echo "scale=2;${N_sequential_read}/${Npar}"| bc -l`
Efficiency=`echo "scale=2;${N_sequential_read}/(${mems}*${Npar})"| bc -l`
#../../../performance_prediction/polymem_theoretical_bw.csv
echo "read_csv_cell ../../../performance_prediction/polymem_theoretical_bw.csv ${bw_csv_columns_scheme[${scheme}]} ${bw_csv_rows_mem[${mems}]}"
read_csv_cell "../../../performance_prediction/polymem_theoretical_bw.csv" ${bw_csv_columns_scheme[${scheme}]} ${bw_csv_rows_mem[${mems}]}
echo "$bla"
echo "${bw_csv_columns_scheme[${scheme}]}"
#The theoretical truput in polymem_theoretical_bw contains the RAW results regarding the aggregated READ bandwidth for a design with 2 read ports
#This cannot be used in our case because STREAM considers the aggregated bandwidth of READs and WRITE ports ( off by a 3/2 factor w.r.t. the RAW numbers )
#Theoretical_BW=`read_csv_cell ../../../performance_prediction/polymem_theoretical_bw.csv ${bw_csv_columns_scheme[${scheme}]} ${bw_csv_rows_mem[${mems}]}`
Extimated_Freq=`read_csv_cell ../../../performance_prediction/polymem_theoretical_freq.csv ${bw_csv_columns_scheme[${scheme}]} ${bw_csv_rows_mem[${mems}]}`
#Theoretical_BW is /1000 because MHz -> GB
Theoretical_BW=`echo "scale=4;${mems}*${Extimated_Freq}*${data_width}*${used_ports_r_w}/1000"|bc -l`
Extimated_BW=`echo "scale=2;${Theoretical_BW}*${Efficiency}"|bc -l`
echo "${mems},${p},${q},${scheme},${N_sequential_read},${Npar},${Speedup},${Efficiency},${Extimated_BW},${Extimated_Freq},./schedules/${schedule}">>$output_file
done
#for mems in 8 16; do
# for scheme in ReRo ReCo RoCo ReTr;do
# Npar=`wc -l ./schedules/${file_name_stem}_${scheme}_${mems}mems.schedule | sed "s/...schedules.*//"`
# Speedup=`echo "scale=2;${N_sequential_read}/${Npar}"| bc -l`
# Efficiency=`echo "scale=2;${N_sequential_read}/(${mems}*${Npar})"| bc -l`
# echo "read_csv_cell polymem_theoretical_bw.csv ${bw_csv_columns_scheme[${scheme}]} ${bw_csv_rows_mem[${mems}]}"
# read_csv_cell "./performance_prediction/polymem_theoretical_bw.csv" ${bw_csv_columns_scheme[${scheme}]} ${bw_csv_rows_mem[${mems}]}
# echo "$bla"
# echo "${bw_csv_columns_scheme[${scheme}]}"
#
# Theoretical_BW=`read_csv_cell ./performance_prediction/polymem_theoretical_bw.csv ${bw_csv_columns_scheme[${scheme}]} ${bw_csv_rows_mem[${mems}]}`
# Extimated_BW=`echo "scale=2;${Theoretical_BW}*${Efficiency}"|bc -l`
# echo "${mems},${scheme},${N_sequential_read},${Npar},${Speedup},${Efficiency},${Extimated_BW}">>$output_file
# done
#done
| true
|
513c3a11a88185ad4fe04db1e5af511b70325abd
|
Shell
|
k0Iry/yocto_rpi_docker
|
/docker-compose/start.sh
|
UTF-8
| 1,623
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
git clone git://git.yoctoproject.org/poky
git clone git://git.yoctoproject.org/meta-raspberrypi
git clone git://git.yoctoproject.org/meta-virtualization
git clone https://github.com/openembedded/meta-openembedded.git
git clone https://github.com/k0Iry/meta-rpilinux.git
initial=false
if [ -z "$(ls -A build)" ]; then
initial=true
else
read -p "Do you want to clean the build before start?[y/n] " -n 1 -r
if [[ $REPLY =~ ^[Yy]$ ]]; then
rm -rf build/*
fi
fi
source poky/oe-init-build-env
if [[ $REPLY =~ ^[Yy]$ || $initial = true ]]; then
# bitbake-layers is not very reliable for adding layers
# we do it so manually
cat >> "conf/bblayers.conf" << EOF
BBLAYERS += " $PWD/../meta-raspberrypi "
BBLAYERS += " $PWD/../meta-rpilinux "
BBLAYERS += " $PWD/../meta-virtualization "
BBLAYERS += " $PWD/../meta-openembedded/meta-networking "
BBLAYERS += " $PWD/../meta-openembedded/meta-filesystems "
BBLAYERS += " $PWD/../meta-openembedded/meta-python "
BBLAYERS += " $PWD/../meta-openembedded/meta-oe "
EOF
# override variable MACHINE if you want to
# build another target, e.g. you don't want 64 bits
# then you define MACHINE = raspberrypi4,
# find machines supported here:
# https://github.com/agherzan/meta-raspberrypi/tree/master/conf/machine
cat >> "conf/local.conf" << EOF
DISTRO_FEATURES_append += "virtualization bluetooth wifi"
MACHINE = "raspberrypi4-64"
IMAGE_FSTYPES = "rpi-sdimg"
ENABLE_UART = "1"
EOF
fi
bitbake rpilinux-image
# check if build succeeds
status=$?
if test $status -eq 0
then
echo "start to build SDK..."
bitbake rpilinux-image -c populate_sdk
fi
| true
|
ca86b2cdef43de3db2b5dbcf2eca40bd3f55b20a
|
Shell
|
puppetlabs/puppetdb-cli
|
/make/test
|
UTF-8
| 224
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/sh
set -o errexit
GIT_ROOT=${GIT_ROOT:-$(git rev-parse --show-toplevel)}
. make/include/colors
printf "%b==> Testing %b\n" "${OK_COLOR}" "${NO_COLOR}"
go test -race -cover $(go list -f '{{ .ImportPath }}' ./...)
| true
|
ae870f4e2f29568e949d0f1a88f3cf0a176eb138
|
Shell
|
epdansereau/mt-dnn
|
/scripts/run_toxic_mini.sh
|
UTF-8
| 1,839
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [[ $# -ne 2 ]]; then
echo "train.sh <batch_size> <gpu>"
exit 1
fi
BERT_PATH="mt_dnn_models/mt_dnn_large.pt"
for i in 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
do
echo "!!!!BERT_PATH:${BERT_PATH}"
prefix="mt-dnn-toxic"
BATCH_SIZE=$1
gpu=$2
echo "export CUDA_VISIBLE_DEVICES=${gpu}"
export CUDA_VISIBLE_DEVICES=${gpu}
tstr=$(date +"%FT%H%M")
train_datasets="toxic"
test_datasets="toxic"
MODEL_ROOT="checkpoints"
DATA_DIR="data/mt_dnn"
answer_opt=0
optim="adamax"
grad_clipping=0
global_grad_clipping=1
model_dir="checkpoints/${prefix}_${optim}_answer_opt${answer_opt}_gc${grad_clipping}_ggc${global_grad_clipping}_${tstr}"
log_file="${model_dir}/log.log"
mv data/mt_dnn/toxic_train${i}.json data/mt_dnn/toxic_train.json
model_dir="checkpoints/${prefix}_${optim}_answer_opt${answer_opt}_gc${grad_clipping}_ggc${global_grad_clipping}_${tstr}"
log_file="${model_dir}/log.log"
python train.py --data_dir ${DATA_DIR} --init_checkpoint ${BERT_PATH} --batch_size ${BATCH_SIZE} --output_dir ${model_dir} --log_file ${log_file} --answer_opt ${answer_opt} --optimizer ${optim} --train_datasets ${train_datasets} --test_datasets ${test_datasets} --grad_clipping ${grad_clipping} --global_grad_clipping ${global_grad_clipping} --epochs 1 --batch_size_eval 4
mv data/mt_dnn/toxic_train.json data/mt_dnn/toxic_train${i}.json
python scripts/strip_model.py --checkpoint "${model_dir}/model_0.pt" --fout "checkpoint/model_${i}_stripped.pt"
BERT_PATH="checkpoint/model_${i}_stripped.pt"
done
| true
|
60f3462b66ce991720432b0744642df5100b562e
|
Shell
|
SHSauler/helpers
|
/scripts/hasher.sh
|
UTF-8
| 258
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
INPFILE=$1
echo "File: $INPFILE"
echo "MD5: $(md5sum $INPFILE | cut -d" " -f1)"
echo "SHA1: $(sha1sum $INPFILE | cut -d" " -f1)"
echo "SHA256: $(sha256sum $INPFILE| cut -d" " -f1)"
echo "ssdeep: $(ssdeep -s hash.sh | tail -n1 | cut -d"," -f1)"
| true
|
1a9294f12372831cc26d4248356922ed2ebf8600
|
Shell
|
vjsrinivas/eureca_face
|
/scripts/run_retinaface_wider_speckle.sh
|
UTF-8
| 600
| 2.703125
| 3
|
[] |
no_license
|
cd ../
cd ./WIDERFACE/eval_tools/
ROOT="/home/vijay/Documents/devmk4/eureca/eureca_face"
MATLAB=/usr/local/MATLAB/R2020b/bin/matlab
#NOISE_PARAM=( -1.000000 -0.750000 -0.500000 -0.250000 0.000000 0.250000 0.500000 0.750000 1.000000 )
NOISE_PARAM=( -1.000000 -0.750000 -0.500000 -0.250000 )
for noise in "${NOISE_PARAM[@]}"
do
echo "Running " $noise "..."
sudo $MATLAB -nodisplay -nosplash -r \
"wider_eval(\
\"$ROOT/NOISES/speckle/$noise/detections/retinaface\", \
\"RetinaFace_retinaface_speckle_$noise\", \
\"$ROOT/results/retinaface/speckle_"$noise"_map.txt\"); \
exit()"
done
cd ../../
| true
|
93b4b5e14f4c7776880c2e216e411e2cdbf42446
|
Shell
|
thomascosby/tc_rep
|
/bash_scripts/Archive/rsync/old/pre-GFS/opus_daily_mirror.sh
|
UTF-8
| 3,477
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
TIMESTAMP=`date '+%m.%d.%y-%H.%M'`
DAILY_LOG_FILE="/usr/schawk/logs/opus_mirror/opus_mirror_daily_"$TIMESTAMP".log"
LOG_FILE="/usr/schawk/logs/opus_mirror/opus_mirror.log"
RSYNC="/usr/bin/rsync"
RECIPIENTS="dberks@schawk.com,tholm@schawk.com,thomas.cosby@schawk.com"
#RECIPIENTS="dberks@schawk.com"
ERRFLAG=0
touch $LOG_FILE
touch $DAILY_LOG_FILE
# uncomment next line to reset log
# cat /dev/null > $LOG_FILE
#######################
# check to see if all source & destination file systems are mounted
FAILFLAG=0
DF_TEXT=`df -h`
if ! [[ $DF_TEXT =~ .*mn_raid1.* ]]
then
FAILFLAG=1
fi
if ! [[ $DF_TEXT =~ .*mn_raid2.* ]]
then
FAILFLAG=1
fi
if ! [[ $DF_TEXT =~ .*mirror_raid1.* ]]
then
FAILFLAG=1
fi
if ! [[ $DF_TEXT =~ .*mirror_raid2.* ]]
then
FAILFLAG=1
fi
if [ $FAILFLAG -eq 1 ]
# one or more needed filesystems are missing. Bail.
then
echo "" >> $LOG_FILE
echo "" >> $DAILY_LOG_FILE
echo "### `date` ###'" >> $LOG_FILE
echo "### `date` ###'" >> $DAILY_LOG_FILE
echo "### ONE OR MORE FILE SYSTEMS MISSING. RSYNC CANCELLED. ###" >> $LOG_FILE
echo "### ONE OR MORE FILE SYSTEMS MISSING. RSYNC CANCELLED. ###" >> $DAILY_LOG_FILE
echo "" >> $LOG_FILE
echo "" >> $DAILY_LOG_FILE
MSG_TEXT="ERROR! One or more file systems required for this rsync were not found. Opus Mirror Rsync cancelled."
echo "$MSG_TEXT" | mail -s "[ERROR: OPUS MIRROR RSYNC]" "$RECIPIENTS"
exit
fi
#######################
# RSYNC MN_RAID1
SOURCEVOL="/mn_raid1/"
DESTVOL="/mirror_raid1/"
echo "### `date`" >> $DAILY_LOG_FILE
echo "### `date`" >> $LOG_FILE
echo "### START RSYNC "$SOURCEVOL" TO "$DESTVOL >> $DAILY_LOG_FILE
echo "### START RSYNC "$SOURCEVOL" TO "$DESTVOL >> $LOG_FILE
$RSYNC -ahv --stats --delete-excluded --delete --force $SOURCEVOL $DESTVOL 2>&1 >> $DAILY_LOG_FILE || ERRFLAG=1
echo "" >> $DAILY_LOG_FILE
echo "" >> $LOG_FILE
echo "### END RSYNC: "$SOURCEVOL" TO "$DESTVOL >> $DAILY_LOG_FILE
echo "### `date`" >> $DAILY_LOG_FILE
echo "" >> $DAILY_LOG_FILE
echo "" >> $LOG_FILE
tail -12 $DAILY_LOG_FILE >> $LOG_FILE
echo "----------------------------------------------------" >> $LOG_FILE; echo "" >> $LOG_FILE
# END RSYNC MN_RAID1
#######################
# RSYNC MN_RAID2
SOURCEVOL="/mn_raid2/"
DESTVOL="/mirror_raid2/"
echo "### `date`" >> $DAILY_LOG_FILE
echo "### `date`" >> $LOG_FILE
echo "### START RSYNC "$SOURCEVOL" TO "$DESTVOL >> $DAILY_LOG_FILE
echo "### START RSYNC "$SOURCEVOL" TO "$DESTVOL >> $LOG_FILE
$RSYNC -ahv --protect-args --delete-excluded --stats --delete --force --exclude-from='/usr/schawk/scripts/rsync/opus_mirror_exclude' $SOURCEVOL $DESTVOL 2>&1 >> $DAILY_LOG_FILE || ERRFLAG=1
echo "" >> $DAILY_LOG_FILE
echo "" >> $LOG_FILE
echo "### END RSYNC: "$SOURCEVOL" TO "$DESTVOL >> $DAILY_LOG_FILE
echo "### `date`" >> $DAILY_LOG_FILE
tail -13 $DAILY_LOG_FILE >> $LOG_FILE;echo "" >> $LOG_FILE
echo "###################################################" >> $LOG_FILE; echo "" >> $LOG_FILE
# END RSYNC MN_RAID2
#######################
# send summary log excerpt to admins
LOG_TAIL=`tail -n 39 $LOG_FILE`
echo "$LOG_TAIL" | mail -s "[OPUS MIRROR RSYNC]" "$RECIPIENTS"
# flag subject line if errors occurred
#if [ $ERRFLAG -ne 0 ]; then
# echo "$LOG_TAIL" | mail -s "[(ERROR) OPUS MIRROR RSYNC]" "$RECIPIENTS"
#else
# echo "$LOG_TAIL" | mail -s "[OPUS MIRROR RSYNC]" "$RECIPIENTS"
#fi
# delete daily logs older than two weeks
find /usr/schawk/logs/opus_mirror/opus_mirror_daily* -mtime +14 -exec rm {} \;
#
### END SCRIPT #######
| true
|
fbb7920249d3034abd057fd9d8fc9c9a3b72f490
|
Shell
|
zajk/rkhunter-formula
|
/rkhunter/files/baseline.sh
|
UTF-8
| 376
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
{% from "rkhunter/map.jinja" import rkhunter with context -%}
#!/bin/bash
LOCK_FILE='/var/run/rkhunter_baseline'
if [ ! -f $LOCK_FILE ]; then
rkhunter --versioncheck --update --propupd --nocolors > /tmp/rkhunter.tmp
mail -s "[rkhunter] First rootkit hunter run on {{ salt['grains.get']('fqdn') }}" {{ rkhunter.email }} < /tmp/rkhunter.tmp
touch $LOCK_FILE
else
exit 0
fi
| true
|
5275ea4fb72088de2fb08422abbeacfbb8dbd283
|
Shell
|
vhcid/vhcapi-public
|
/1.0/minecraft-image/1.17/1-17-0.sh
|
UTF-8
| 2,678
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
# Paper 1.17 (Custom SMP) Installation Script
#
# Server Files: /mnt/server
PROJECT=paper #Do Not Change This For Any Reason
VHAPI_VERSION=1.0 #Change This If Some Update At data bases
apt update
apt install -y curl jq
apt-get install unzip
if [ -n "${DL_PATH}" ]; then
echo -e "Menggunakan Download Url: ${DL_PATH}"
DOWNLOAD_URL=`eval echo $(echo ${DL_PATH} | sed -e 's/{{/${/g' -e 's/}}/}/g')`
else
VER_EXISTS=`curl -s https://papermc.io/api/v2/projects/${PROJECT} | jq -r --arg VERSION $MINECRAFT_VERSION '.versions[] | contains($VERSION)' | grep true`
LATEST_VERSION=`curl -s https://papermc.io/api/v2/projects/${PROJECT} | jq -r '.versions' | jq -r '.[-1]'`
if [ "${VER_EXISTS}" == "true" ]; then
echo -e "Versi Sudah Valid!. Menggunakan Versi Paper ${MINECRAFT_VERSION} + versi VHAPI : ${VHAPI_VERSION}"
else
echo -e "Using the latest ${PROJECT} version"
MINECRAFT_VERSION=${LATEST_VERSION}
fi
BUILD_EXISTS=`curl -s https://papermc.io/api/v2/projects/${PROJECT}/versions/${MINECRAFT_VERSION} | jq -r --arg BUILD ${BUILD_NUMBER} '.builds[] | tostring | contains($BUILD)' | grep true`
LATEST_BUILD=`curl -s https://papermc.io/api/v2/projects/${PROJECT}/versions/${MINECRAFT_VERSION} | jq -r '.builds' | jq -r '.[-1]'`
if [ "${BUILD_EXISTS}" == "true" ]; then
echo -e "Build Sudah Valid Untuk Versi ${MINECRAFT_VERSION}. Menggunakan build ${BUILD_NUMBER}"
else
echo -e "Menggunakan Versi Terbaru ${PROJECT} Untuk Versi ${MINECRAFT_VERSION} Dibantu Dengan Versi API ${VHAPI_VERSION}"
BUILD_NUMBER=${LATEST_BUILD}
fi
JAR_NAME=${PROJECT}-${MINECRAFT_VERSION}-${BUILD_NUMBER}.jar
echo "Versi yang akan di download:"
echo -e "Versi Minecraft: ${MINECRAFT_VERSION}"
echo -e "Build: ${BUILD_NUMBER}"
echo -e "JAR Name of Build: ${JAR_NAME}"
echo -e "API VHC : ${VHAPI_VERSION}"
DOWNLOAD_URL=https://papermc.io/api/v2/projects/${PROJECT}/versions/${MINECRAFT_VERSION}/builds/${BUILD_NUMBER}/downloads/${JAR_NAME}
fi
cd /mnt/server
echo -e "Menjalankan Perintah curl -o ${SERVER_JARFILE} ${DOWNLOAD_URL}"
if [ -f ${SERVER_JARFILE} ]; then
mv ${SERVER_JARFILE} ${SERVER_JARFILE}.old
fi
curl -o ${SERVER_JARFILE} ${DOWNLOAD_URL}
if [ ! -f server.properties ]; then
echo -e "Men Download server.properties yang sudah di remake!"
curl -o server.properties https://image.vhcid.tech/${VHAPI_VERSION}/minecraft-image/server.properties
fi
echo -e "Download dari VHAPI Image Versi : ${VHAPI_VERSION}"
curl -o 1-17-backup_master.tar.gz https://image.vhcid.tech/${VHAPI_VERSION}/minecraft-image/1.17/1-17-backup_master.tar.gz
tar -xvf 1-17-backup_master.tar.gz
rm 1-17-backup_master.tar.gz
#gak nemu biar bisa auto running anjir
| true
|
492c7030466c388c293869b3d83f183340e6e6e9
|
Shell
|
kukelove/xss
|
/xss/trunk/code/brushClient/installScript/install_bak.sh
|
UTF-8
| 2,271
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/sh
trap "echo 'install-result-failed: trap error';exit 1" ERR
echo "begin install brushClient>>>>>>"
#安装文件的路径,即install.sh等文件的路径
installFiles_path=`pwd`
echo "install files path :"$installFiles_path
#根路径
brushClientPath="/home/apps/brushClient"
#判断安装文件路径是否在/home/apps/brushClient路径下
if [ `echo $installFiles_path | grep $brushClientPath` ];then
echo "result-failed: install.sh can not be under "$brushClientPath"!"
exit 1
fi
#mkdir $brushClientPath ,wget files
if [ -d $brushClientPath ];then
printf "remove old brushClient path:'%s'\n" $brushClientPath
rm -rf $brushClientPath
fi
printf "create brushClient path: '%s'\n" $brushClientPath
mkdir $brushClientPath
#获取安装包
cd $brushClientPath
echo "begin wget installer>>>>>>"
wgetRootPath="http://183.250.161.227:10001/brushms/clientInstaller"
wgetJdkPath=${wgetRootPath}"/jdk-8u11-linux-x64.tar.gz"
wgetBrushClientJarPath=${wgetRootPath}"/brushClient.jar"
wgetLibPath=${wgetRootPath}"/lib.tar.gz"
wgetScriptPath=${wgetRootPath}"/script.tar.gz"
wgetPhantomjsPath=${wgetRootPath}"/phantomjs.tar.gz"
wgetStartPath=${wgetRootPath}"/start.sh"
wgetStopPath=${wgetRootPath}"/stop.sh"
wgetUpdatePath=${wgetRootPath}"/update.sh"
#printf "wgetJdkPath is '%s'\n" $wgetJdkPath
#wget $wgetJdkPath
wget $wgetBrushClientJarPath
wget $wgetLibPath
wget $wgetScriptPath
wget $wgetPhantomjsPath
wget $wgetStartPath
wget $wgetStopPath
wget $wgetUpdatePath
tar -xf lib.tar.gz
tar -xf script.tar.gz
tar -xf phantomjs.tar.gz
rm -f lib.tar.gz
rm -f script.tar.gz
rm -f phantomjs.tar.gz
chmod 755 brushClient.jar start.sh stop.sh update.sh phantomjs/bin/phantomjs
echo "end wget installer<<<<<<"
#安装jdk1.8
echo "begin install jdk1.8>>>>>>"
jdkPath="/usr/local/java/jdk1.8.0_11"
if [ ! -d $jdkPath ];then
if [ ! -d "/usr/local/java" ];then
mkdir "/usr/local/java"
fi
cd /usr/local/java
cp $brushClientPath"/jdk-8u11-linux-x64.tar.gz" .
rm -f $brushClientPath"/jdk-8u11-linux-x64.tar.gz"
tar -zxf jdk-8u11-linux-x64.tar.gz
echo "end install jdk1.8<<<<<<"
else
echo "jdk1.8.0_11 already exist!not need install"
fi
echo "end install brushClient<<<<<<<"
echo "install-result-success"
| true
|
3f9e7e1a4a82e7b8a1f5d2bc47f73b7b46bd02fe
|
Shell
|
bdashrad/nagios-plugins
|
/check_apc_temp
|
UTF-8
| 2,051
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
# check_apc
# Written by Brad Clark
#
# Checks APC UPS
#
progname=check_apc
version=0.3
author="Brad Clark"
if [ $# -lt 1 ]; then
echo "Usage: $0 -H host -C community [-w warning] [-c critical]"
exit
fi
snmpget="/usr/bin/snmpget"
community="public"
snmpversion=1
warn=85
crit=95
# nagios return values
export STATE_OK=0
export STATE_WARNING=1
export STATE_CRITICAL=2
export STATE_UNKNOWN=3
export STATE_DEPENDENT=4
while getopts ":H:C:w:c:" opt; do
case $opt in
H) host=$OPTARG ;;
C) community=$OPTARG ;;
w) warn=$OPTARG ;;
c) crit=$OPTARG ;;
\?)
echo "Invalid option: -$OPTARG" >&2
echo "Usage: $0 -H host -C community [-w warning] [-c critical]"
exit
;;
:)
echo "Option -$OPTARG requires an argument" >&2
exit
;;
esac
done
oid=uioSensorStatusTemperatureDegF.2.1
# temperature values for warning or critical / hdd (from datasheet)
MAXTEMPCRIT=$crit
MINTEMPCRIT="50"
MAXTEMPWARN=$warn
MINTEMPWARN="55"
# check temperature for warning or critical values
function checkTemperature () {
true=$(echo "$1 >= $MAXTEMPWARN" | bc)
if [ $true = 1 ] ; then
returnValue=$STATE_WARNING ;
fi
true=$(echo "$1 >= $MAXTEMPCRIT" | bc)
if [ $true = 1 ] ; then
returnValue=$STATE_CRITICAL ;
fi
true=$(echo "$1 <= $MINTEMPWARN" | bc)
if [ $true = 1 ] ; then
returnValue=$STATE_WARNING ;
fi
true=$(echo "$1 <= $MINTEMPCRIT" | bc)
if [ $true = 1 ] ; then
returnValue=$STATE_CRITICAL ;
fi
return $returnValue
}
NAME=`snmpwalk -v1 -c XaZG1Xvw 10.202.205.90 uioSensorStatusSensorLocation.2.1 | awk '{print $4 " " $5}' | tr -d '"'`
TEMP=`snmpget -v1 -c $community $host $oid | awk '{print $4}'`
checkTemperature $TEMP
intReturn=$?
outMessage="$NAME: $TEMP°F | '$NAME °F'=$TEMP;$warn;$crit" ;
echo -e $outMessage
exit $intReturn
| true
|
d61f003f58430927ee50bd4feb6145e1885b0b4f
|
Shell
|
gcunhase/PreSumm-AMICorpus-DialSum
|
/src/prepare_amidialsum_data.sh
|
UTF-8
| 1,517
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
## ------ Pre-processing custom data --------
# Settings
ROOT_DATA_PATH=../raw_data
DATA_PATH=ami_dialsum_corpus_stories
ROOT_RESULT="../logs/${DATA_PATH}_bertsumextabs"
mkdir $ROOT_RESULT
RESULT_PATH="${ROOT_RESULT}/eval" # Root name for all generated files
LOG_FILE="${ROOT_RESULT}/eval.log"
BERT_DATA_FULL="${BERT_DATA}/${DATA_PATH}" # Path to data + prefix
# Export CoreNLP tokenizer
# export CLASSPATH=../stanford-corenlp-full-2018-10-05/stanford-corenlp-3.9.2.jar
# Sentence Splitting and Tokenization
for DATA_TYPE in "train" "test" "valid"; do
RAW_PATH="${ROOT_DATA_PATH}/${DATA_PATH}/${DATA_TYPE}"
SAVE_PATH="${ROOT_DATA_PATH}/${DATA_PATH}_tokenized/${DATA_TYPE}"
LOG_FILE="${ROOT_RESULT}/${DATA_TYPE}.log"
python preprocess.py -mode tokenize -raw_path $RAW_PATH -save_path $SAVE_PATH -log_file $LOG_FILE
done
# Format to Simpler Json Files
JSON_DATA_PATH="../json_data/${DATA_PATH}"
mkdir $JSON_DATA_PATH
SAVE_PATH="${JSON_DATA_PATH}/${DATA_PATH}"
RAW_PATH="${ROOT_DATA_PATH}/${DATA_PATH}_tokenized"
LOG_FILE="${ROOT_RESULT}/${DATA_PATH}.log"
python preprocess.py -mode format_to_lines_amidialsum -raw_path $RAW_PATH -save_path $SAVE_PATH -n_cpus 1 -use_bert_basic_tokenizer false -log_file $LOG_FILE
# Step 5. Format to PyTorch Files
RAW_PATH=$JSON_DATA_PATH
SAVE_PATH="../bert_data/${DATA_PATH}_bin"
mkdir $SAVE_PATH
LOG_FILE="${ROOT_RESULT}/preprocess.log"
python preprocess.py -mode format_to_bert -raw_path $RAW_PATH -save_path $SAVE_PATH -lower -n_cpus 1 -log_file $LOG_FILE
| true
|
07fd16b4ee391b459996373c794ea73e3c4b3d9d
|
Shell
|
Dan2552/ocular
|
/rspec_integration/launch_rspec.sh
|
UTF-8
| 585
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
current_dir=$( pwd )
home_dir=$( cd ~ && pwd )
script_dir=$( cd "$( dirname "$0" )" && pwd )
# TODO: support rbenv and rvm
# TODO: have a command-line ocular setup which uses `which` to work out what you have and where
source /usr/local/opt/chruby/share/chruby/chruby.sh
cd $1
# TODO: read from Gemfile / .ruby-version file
chruby $(cat Gemfile | grep "ruby \"" | tr -d '\n' | awk -F '"' '{print $2}') >/dev/null 2>&1
bundle check >/dev/null || bundle install
bundle exec rspec --require="$script_dir/ocular_rspec_formatter.rb" --format="OcularRSpecFormatter"
| true
|
f66b0f4315ec0a945245e9a2557a3b74663f9756
|
Shell
|
fapm4/ped
|
/bugfinder.sh
|
UTF-8
| 3,746
| 3.859375
| 4
|
[] |
no_license
|
#! /bin/bash
# Script made By Anton CH;
# NO COPYRIGHT (Just keep my name)
# NO RESPONSIBILITY!
# Please report all bugs & suggestions to "anton.1990@live.ru"
# Formato de salida:
# ...
# NOMBRE PRUEBA
# ...
# VALGRIND
# ...
# DIFF
# ...
# RESULTADO
# ...
#
#
# Este script hace todo el trabajo mecanico a la hora de probar los "tads". :-)
# Hay que colocarlo en la raiz de de la practica y darle permisos de ejecucion.
# Acepta 2 parametros, el nombre del directorio con los tads, y el nombre del
# ejecutable generado por "make". El segundo argumento es opcional.
# Si valgrind falla, el script para. :-(
# Si alguna de las salidas producidas por la practica no coinciden,
# el script muestra las diferencias y para. :-(
# OJO! Se da por supuesto que el programa no entra en bucles infinitos,
# los directorios y ficheros tienen los permisos r/w/x adecuados. :-)
# RECORDATORIO:
# Las lineas del diff que empiezan por "<" representan la salida del programa
# y las lineas que empiezan por ">" representan la salida correcta;
DEF_EXE="tad" # Nombre del ejecutable generado por "make";
F_SALIDA="salida.sal"
BAKNAME="_tad.cpp" # Nombre del bakup de "tad.cpp";
ERR_DIR="ERROR! $1 es un fichero o no existe!"
USAGE="Acepto 2 parametros: 1- Nombre del directorio (sin '/') con tads a testear(OBLIGATORIO). 2- Nombre del ejecutable que genera 'make' (OPCIONAL)."
if !(test -z $1) && (test -d $1)
then
# Hacer bakup del tad.cpp original (si existe) para evitar sobreescritura;
# OJO! Si ya hay un bakup, o fichero con nombre del bakup no se hara NADA;
if (test -f ./src/tad.cpp) && !(test -f ./src/$BAKNAME)
then
mv ./src/tad.cpp ./src/$BAKNAME
fi
#Copiar tadxx.cpp al directorio src, compilar, ejecutar con valgrind, y comprobar salidas;
for i in ./$1/tad*.cpp
do
cp $i ./src/tad.cpp
echo "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
echo " Comprobando '$i' "
echo "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
make -s clean
make -s
if (test $? -eq 2)
then
echo "*********************************************************************************"
echo " !!! ERROR AL COMPILAR !!! "
echo "*********************************************************************************"
break
fi
if !(test -z $2) && !(test -d $2) && (test -x $2)
then
(valgrind ./$2) > $F_SALIDA
else
(valgrind ./tad) > $F_SALIDA
fi
if (test $? -eq 2)
then
echo "*********************************************************************************"
echo " !!! REVISA LA PRUEBA '$i' !!! "
echo " !!! VALGRIND FALLA !!! "
echo "*********************************************************************************"
break
fi
echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
diff -b $F_SALIDA $i.sal
if (test $? -eq 0)
then
echo "*********************************************************************************"
echo " LA PRUEBA '$i' SUPERADA CON EXITO! "
echo "*********************************************************************************"
echo ""
echo ""
elif (test $? -eq 1)
then
echo "*********************************************************************************"
echo " !!! REVISA LA PRUEBA '$i' !!! "
echo " !!! LAS SALIDAS NO COINCIDEN !!! "
echo "*********************************************************************************"
break
fi
done
# Poner tad.cpp original en su sitio
if (test -f ./src/$BAKNAME) && !(test -z /src/$BAKNAME)
then
mv ./src/$BAKNAME ./src/tad.cpp
fi
# Limpiar ...
make -s clean
rm -f $F_SALIDA
elif (test $# -gt 0) && (test $# -lt 3)
then
echo $ERR_DIR
fi
if (test $# -eq 0)
then
echo $USAGE
fi
| true
|
13038d5d9547ae886d85605f3995cbb8c1b3a65f
|
Shell
|
megalithic/bits-and-bobs
|
/bin/platform.sh
|
UTF-8
| 425
| 3.359375
| 3
|
[
"MIT",
"Unlicense"
] |
permissive
|
#!/bin/zsh
platform="unknown"
if [[ "$(uname)" == "Darwin" ]]; then
platform="macos"
elif [[ "$(expr substr $(uname -s) 1 5)" == "Linux" ]]; then
platform="linux"
elif [[ "$(expr substr $(uname -s) 1 5)" == 'FreeBSD' ]]; then
platform='freebsd'
elif [[ "$(expr substr $(uname -s) 1 10)" == "MINGW32_NT" ]]; then
platform="windows"
fi
export PLATFORM=$platform
echo "platform=$platform"
echo "PLATFORM=$PLATFORM"
| true
|
7d4809ea86e29e566c1297c495faf27afc80888c
|
Shell
|
glizWcloak/changelog-sh
|
/changelog-release.sh
|
UTF-8
| 360
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
function _changelogsh_release {
if [ ! -d "changelog/unreleased/" ]; then
printf "Nothing to release.\n"
return
fi
if [ "$#" -lt 1 ]; then
echo "Version is required"
return
fi
version=$1
expanded=$(_changelogsh_raw_to_expanded $version)
mv 'changelog/unreleased' "changelog/$expanded"
_changelogsh_preview $1
}
| true
|
2b1d6c23214ff79fc725f713e380899d7b72f8e7
|
Shell
|
justjoe22/Outage.Notify
|
/gufire.sh
|
UTF-8
| 492
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
echo $'\n'
echo "Step 1 of 4: GitHub Add Files"
git add --all
echo $'\n'
echo "Step 2 of 4: GitHub Commit"
echo "Commit Message: "
read input_var
git commit -m "Update GitHub $input_var"
echo $'\n'
echo "Step 3 of 4: GitHub Push"
read -s -p "Enter GitHub Password: " mypassword
git push https://justjoe22:$mypassword@github.com/justjoe22/Outage.Notify.git --all --quiet
echo $'\n'
echo "Step 4 of 4: Firebase Deploy"
firebase deploy
echo $'\n'
echo "Script Complete!"
# End
| true
|
8d561d159e1b5edc8f1cefb6b7f53d632bfeb366
|
Shell
|
JonathanShort1/branchPrediction
|
/traces/trim.sh
|
UTF-8
| 131
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/sh
for f in *.out;
do
ext="${f##*.}"
path="${f%.*}"
out="${path}-trim.${ext}"
sed '1,26000d' $f > $out
done
| true
|
ec8bd7fd90e7e75687ed5fa425eea097569049c4
|
Shell
|
topalovic/.dot
|
/files/bashrc
|
UTF-8
| 244
| 2.609375
| 3
|
[] |
no_license
|
# Case-insensitive globbing
shopt -s nocaseglob
# Append to history file, don't overwrite it
shopt -s histappend
# Autocorrect typos in path names when using `cd`
shopt -s cdspell
# Common shell config
[ -f ~/.shellrc ] && source ~/.shellrc
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.