blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
1aa0ee615bf22f4a6cee3b100faab66ce97d2b10
|
Shell
|
ginnocen/preliminarystudyPythonb
|
/Official_Repo_7nov2018/ALICEanalysis/buildtree/skimTreeFromEvtBased.sh
|
UTF-8
| 1,075
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
#source clean.sh
MCSAMPLE="$HOME/MLproductions/MLDmesonsproductionsEventBased/LHC18a4a2_fast_run282343/AnalysisResultsDmesonsMC.root"
MCSAMPLEOUT="$HOME/MLproductions/MLDmesonsproductionsEventBased/LHC18a4a2_fast_run282343/AnalysisResultsDmesonsMC_skimmed.root"
DATASAMPLE="$HOME/MLproductions/MLDmesonsproductionsEventBased/LHC17p_FAST_run282343/AnalysisResultsData.root"
DATASAMPLEOUT="$HOME/MLproductions/MLDmesonsproductionsEventBased/LHC17p_FAST_run282343/AnalysisResultsData_skimmed.root"
rm $MCSAMPLEOUT
rm $DATASAMPLEOUT
if [ ! -f $MCSAMPLE ] || [ ! -f $DATASAMPLE ]; then
echo "******************** ATTENTION ********************"
echo "You need to download the files"
echo "******************** THIS IS GOING TO FAIL *******************"
exit
fi
TreeName="tree_Ds"
g++ skimTreeDsFromEvt.C $(root-config --cflags --libs) -g -o skimTreeDsFromEvt.exe
./skimTreeDsFromEvt.exe "$MCSAMPLE" "$MCSAMPLEOUT" "$TreeName"
./skimTreeDsFromEvt.exe "$DATASAMPLE" "$DATASAMPLEOUT" "$TreeName"
rm -rf skimTreeDsFromEvt.exe skimTreeDsFromEvt.exe.dSYM
| true
|
e0c05a85dbf7074d7327153400afde3d623da483
|
Shell
|
OneMainF/delete-avamar-backups
|
/delete_backups.sh
|
UTF-8
| 2,046
| 3.5
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
##Licensed to the Apache Software Foundation (ASF) under one
##or more contributor license agreements. See the NOTICE file
##distributed with this work for additional information
##regarding copyright ownership. The ASF licenses this file
##to you under the Apache License, Version 2.0 (the
##"License"); you may not use this file except in compliance
##with the License. You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
##Unless required by applicable law or agreed to in writing, software
##distributed under the License is distributed on an "AS IS" BASIS,
##WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
##See the License for the specific language governing permissions and
##limitations under the License.
THISDOMAIN="$1"
THISHOST="$2"
FROMDATE="$3"
echo "Getting backups for ${THISDOMAIN}${THISHOST}"
RET=`mccli backup show --domain=${THISDOMAIN} --name=${THISHOST} --before=${FROMDATE} 2>&1 | grep -c "Client does not exist"`
if [ "${RET}" == "1" ]
then
echo "No backups found for ${THISDOMAIN}${THISHOST}"
exit
fi
mccli backup show --domain=${THISDOMAIN} --name=${THISHOST} --before=${FROMDATE} --verbose=True 2>&1 | grep ^[1-2] | awk '{ print $1"~"$4"~"$7}' > "backups_${THISHOST}"
for LINE in $(grep -e '~D$\|~DW$' "backups_${THISHOST}")
do
LABEL=`echo ${LINE} | cut -d "~" -f2`
THISDATE=`echo ${LINE} | cut -d "~" -f1`
if [ "${LABEL}" != "" ] && [ "${THISDATE}" != "" ]
then
echo "Deleting backup of ${THISHOST} from ${THISDATE} - label #${LABEL}"
echo "mccli backup delete --force --domain=${THISDOMAIN} --name=${THISHOST} --created=\"${THISDATE}\" --labelNum=\"${LABEL}\"" >> "deletelog_${THISHOST}"
mccli backup delete --force --domain=${THISDOMAIN} --name=${THISHOST} --created="${THISDATE}" --labelNum="${LABEL}" >> "deletelog_${THISHOST}"
RET="$?"
if [ "${RET}" -gt "0" ]
then
echo "Failed to delete backup"
echo "Failed to delete backup" >> "deletelog_${THISHOST}"
fi
fi
done
rm "backups_${THISHOST}"
| true
|
addde4cd8143eab97b025bc54a75acd59e412b35
|
Shell
|
jameshollingshead/WslTools
|
/unmountsdcard
|
UTF-8
| 96
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
drive_letter=$1
mount_folder="/mnt/${drive_letter,,}"
sudo umount ${mount_folder}
| true
|
f3c951dfb9df961177cf572f00d7862636f5fb34
|
Shell
|
NFyue/parallel_Kmeans
|
/archive-linux/examples/_clean.sh
|
UTF-8
| 209
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$1" = "" ];
then
e="fib karatsuba tutorial"
else
e=$1
fi
for example in $e
do
cd $example
if [ -f "_clean.sh" ];
then
./_clean.sh
else
make clean
fi
cd ..
done
| true
|
e779178b9b71e488b028cc818c899adbbd8a5593
|
Shell
|
Cloudxtreme/bitmeteros
|
/build/osx/build.sh
|
UTF-8
| 2,134
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/sh
export VERSION=0.7.5
./makeHelpTextC.sh
./makeManPage.sh
./makeLicense.sh
make bmcapture
make bmclient
make bmdb
make bmws
make bmsync
make clean
PACKAGE_DIR=package
sudo rm -rf $PACKAGE_DIR
BIN_DIR=$PACKAGE_DIR/usr/local/bin
mkdir -p $BIN_DIR
mv bmcapture $BIN_DIR
mv bmclient $BIN_DIR
mv bmws $BIN_DIR
mv bmdb $BIN_DIR
mv bmsync $BIN_DIR
sudo chown root $BIN_DIR/*
sudo chgrp admin $BIN_DIR/*
LD_DIR=$PACKAGE_DIR/System/Library/LaunchDaemons
mkdir -p $LD_DIR
cp bitmeter.plist $LD_DIR
cp bitmeterweb.plist $LD_DIR
sudo chown root $LD_DIR/*
sudo chgrp wheel $LD_DIR/*
sudo chmod 644 $LD_DIR/*
MAN_DIR=$PACKAGE_DIR/usr/share/man/man1
mkdir -p $MAN_DIR
mv bmclient.1.gz $MAN_DIR
mv bmdb.1.gz $MAN_DIR
mv bmsync.1.gz $MAN_DIR
sudo chown root $MAN_DIR/*
sudo chgrp admin $MAN_DIR/*
WEB_SRC="../../webserver/web"
WEB_DIR="$PACKAGE_DIR/Library/Application Support/BitMeter/www"
mkdir -p "$WEB_DIR"
cp "$WEB_SRC/index.html" "$WEB_DIR"
cp "$WEB_SRC/rss.xml" "$WEB_DIR"
cp "$WEB_SRC/favicon.ico" "$WEB_DIR"
JS_DIR="$WEB_DIR/js"
mkdir -p "$JS_DIR"
cp "$WEB_SRC"/js/*.js "$JS_DIR"
CSS_DIR="$WEB_DIR/css"
mkdir -p "$CSS_DIR"
cp "$WEB_SRC"/css/*.css "$CSS_DIR"
IMG_DIR="$CSS_DIR/images"
mkdir -p "$IMG_DIR"
cp "$WEB_SRC"/css/images/*.gif "$IMG_DIR"
cp "$WEB_SRC"/css/images/*.png "$IMG_DIR"
sudo chown -R root "$WEB_DIR"
sudo chgrp -R admin "$WEB_DIR"
MOB_DIR="$WEB_DIR/m"
mkdir -p "$MOB_DIR"
cp "$WEB_SRC"/m/*.xml "$MOB_DIR"
MOB_JS_DIR="$WEB_DIR/m/js"
mkdir -p "$MOB_JS_DIR"
cp "$WEB_SRC"/m/js/*.js "$MOB_JS_DIR"
MOB_CSS_DIR="$WEB_DIR/m/css"
mkdir -p "$MOB_CSS_DIR"
cp "$WEB_SRC"/m/css/*.css "$MOB_CSS_DIR"
APP_DIR="$PACKAGE_DIR/Library/Application Support/BitMeter"
mkdir -p "$APP_DIR"
sudo chown root "$APP_DIR"
sudo chgrp admin "$APP_DIR"
sudo chmod 777 "$APP_DIR"
DB_FILE="$APP_DIR/bitmeter.db.new"
cp "../bitmeter.db" "$DB_FILE"
sudo chown root "$DB_FILE"
sudo chgrp admin "$DB_FILE"
sudo chmod 666 "$DB_FILE"
cp bmremove.sh "$APP_DIR"
sudo chown root "$APP_DIR/bmremove.sh"
sudo chgrp admin "$APP_DIR/bmremove.sh"
sudo chmod 775 "$APP_DIR/bmremove.sh"
| true
|
14e8a2ff0d32afa123d80876c126daa22232ac49
|
Shell
|
braoult/exercism
|
/bash/darts/darts.sh
|
UTF-8
| 2,160
| 3.6875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# external tools: none (only integer operations).
#
# V1 : Initial version.
# V2 : Using reference instead of subshell, as of
# https://stackoverflow.com/questions/540298
# moved valid numbers in parseval function.
# V3/4 : added some quotes following suggestions
# set to mask to enable logs. 0: none, 255: all
#((DEBUG=2#00001111))
((DEBUG=2#00000000))
# $1: log level (mask), then strings to display.
debug () {
(( DEBUG & $1 )) && shift && echo "${@}" >&2
}
usage () {
echo "usage: darts x y" >&2
exit 1
}
shopt -s extglob
# To be able to use bash only, all numbers will be miltiplied by 1,000.
# Could be higher if we want more precision.
#
# So: 0.1 will be 100, 1 will be 1,000, 10 will br 10000, etc...
# circles. as we will use Pythagoras' theorem, so square value calc here
outer=$(( (10 * 1000 ) ** 2 ))
middle=$(( ( 5 * 1000 ) ** 2 ))
inner=$(( ( 1 * 1000 ) ** 2 ))
debug 1 outer=$outer middle=$middle inner=$inner
# basic args check: 2 args, and decimal numbers, which are:
# optional +- sign, optional digits, optional . and optional digits
(( ${#} != 2 )) && usage
parseval() {
# integer and decimal parts, final value by ref
local int dec
local -n calc=$1
# check for valid decimal number
[[ ${2} != ?([-+])+([0-9])?(.*([0-9])) ]] && usage
IFS=. read int dec <<< "$2"
debug 2 ${int} ${dec}
# we accept up to 3 decimals: add 3 zeroes to dec, then keep 3 first digits
# So a decimal part of "1" will become 100, 01 will become 10, etc...
# we also take care of leadings 0 (octal notation), and remove leading "-".
dec="$dec"000
dec="10#"${dec:0:3}
int="10#"${int#-}
debug 2 mult ${int} ${dec}
calc=$(( (int*1000 + dec) ** 2 ))
}
parseval x "$1"
parseval y "$2"
total=$(( x+y ))
debug 1 x=$x y=$y x+y=$total
(( total <= inner )) && echo 10 && exit 0
(( total <= middle )) && echo 5 && exit 0
(( total <= outer )) && echo 1 && exit 0
echo 0 && exit 0
# emacs/vim settings.
# Local Variables:
# sh-basic-offset: 4
# indent-tabs-mode: nil
# comment-column: 60
# fill-column: 80
# End:
# vim: set tabstop=4 expandtab:
| true
|
b25de8558c242ba590c21e2f5e74902ec5690d2d
|
Shell
|
mellemaeliss/linux-bash
|
/exercice1.sh
|
UTF-8
| 699
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
tes=0
moy=0
occurence=0
while [ $tes -eq 0 ];do
read -p "saisir une note:\n" note
if [ $note == "q" ]
then
let "$tes = 1"
elif [ $note -lt 0 ]
then
let "$tes = 1"
else
if [ $note -ge 0 ];
then
let "moy=$moy+$note"
let "occurence=$occurence+1"
fi
if [ $note -le 20 ] && [ $note -ge 16 ];
then
echo 'tres bien'
elif [ $note -lt 16 ] && [ $note -ge 14 ];
then
echo 'bien'
elif [ $note -lt 14 ] && [ $note -ge 12 ];
then
echo 'assez bien'
elif [ $note -lt 12 ] && [ $note -ge 10 ];
then
echo 'moyen'
elif [ "$note" -lt 10 ] && [ "$note" -ge 0 ];
then
echo 'insuffisant'
else
echo 'erreur'
fi
let "moy=$moy/$occurence"
echo "La moyenne est de $moy sur ($occurence)"
done
| true
|
d0a8a54e8e8d0c29f02a6cfe6650f425bbd1a22b
|
Shell
|
freebsd/freebsd-ports
|
/security/kstart/files/kstart.in
|
UTF-8
| 1,829
| 3.4375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# PROVIDE: kstart
# REQUIRE: DAEMON
# BEFORE: LOGIN
# KEYWORD: shutdown
# Add the following lines to /etc/rc.conf to enable kstart:
# kstart_enable (bool): Set to YES to enable kstart
# Default: NO
# kstart_flags (str): Extra flags passed to kstart
# Default: -LUFK 120
# kstart_keytab (str): Default keytab file to use
# Default: /etc/krb5.keytab
#
# To enable multi-instance support, use:
# kstart_instances="name1 name2"
# kstart_name1_keytab="/path/to/keytab"
. /etc/rc.subr
name="kstart"
rcvar=kstart_enable
command="%%PREFIX%%/bin/k5start"
pidfile="/var/run/kstart.pid"
load_rc_config $name
[ -z "$kstart_enable" ] && kstart_enable="NO"
[ -z "$kstart_keytab" ] && kstart_keytab="/etc/krb5.keytab"
[ -z "$kstart_flags" ] && kstart_flags="-LUFK 120"
[ -z "$kstart_instances" ] && kstart_instances="system"
[ -z "$kstart_local_instances" ] && kstart_local_instances=""
[ -z "$kstart_system_keytab" ] && kstart_system_keytab="$kstart_keytab"
[ -z "$kstart_system_flags" ] && kstart_system_flags="$kstart_flags"
if [ -n "$kstart_local_instances" ]; then
kstart_instances="$kstart_instances $kstart_local_instances"
fi
if [ -n "$kstart_instances" ]; then
_1=$1
if [ $# -gt 1 ]; then shift; kstart_instances=$*; fi
kstart_keytab=""
kstart_flags=""
rc=0
for i in ${kstart_instances}; do
eval _keytab=\$kstart_${i}_keytab
if [ -z "$_keytab" ]; then
_keytab="/etc/krb5.keytab"
fi
eval _flags=\$kstart_${i}_flags
if [ -z "$_flags" ]; then
_flags="-LUFK 120"
fi
eval pidfile="/var/run/kstart_${i}.pid"
command_args="-bf $_keytab $_flags -p $pidfile"
run_rc_command "$_1"
if [ $? -ne 0 ]; then rc=1; fi
unset _pidcmd _rc_restart_done
done
exit $rc
else
command_args="-bf $kstart_keytab $kstart_flags -p $pidfile"
run_rc_command "$1"
fi
| true
|
cd8bfdd1f35f16254ff74135efb2d8b75025312b
|
Shell
|
zongjiayun/Shell
|
/ftp/test0813/2/ftp_2.sh
|
UTF-8
| 818
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
START=`date +%s%N`;
echo "Data trans start at "$START
LOCAL_DIR=/tmp/hdfs
FILENAME=""
mkdir $LOCAL_DIR
FILE_ARRAY=($(hdfs dfs -du -h /input/lbs/recommend/user_click_sup_d/partitiontime=20140407 | awk '{print $3}'))
CYC=${#FILE_ARRAY[@]}
for((i=0;i<$CYC;i++))
do
HDFSFILEPATH=${FILE_ARRAY[i]}
FILENAME=`echo $HDFSFILEPATH | awk -F'/' '{print $7}'`
hdfs dfs -get $HDFSFILEPATH $LOCAL_DIR
echo "downloading file:"$HDFSFILEPATH
echo "transfering file:"$LOCAL_DIR/$FILENAME
done
DOWNLOADEND=`date +%s%N`;
sh ./upload_2.sh $LOCAL_DIR
#rm -rf $LOCAL_DIR
END=`date +%s%N`;
echo "Data trans end at "$END
downloadtime=$((END-DOWNLOADEND))
downloadtime=`expr $time / 1000000`
time=$((END-START))
time=`expr $time / 1000000`
echo "download time:"${downloadtime}ms
echo "total time:"${time}ms
| true
|
b287a53a3be0193dda7f29d55d2c6179ccd79b24
|
Shell
|
brandondees/.dotfiles
|
/.zshrc
|
UTF-8
| 5,507
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
# If you come from bash you might have to change your $PATH.
# export PATH=$HOME/bin:/usr/local/bin:$PATH
# Path to your oh-my-zsh installation.
export ZSH=$HOME/.oh-my-zsh
# Set name of the theme to load. Optionally, if you set this to "random"
# it'll load a random theme each time that oh-my-zsh is loaded.
# See https://github.com/robbyrussell/oh-my-zsh/wiki/Themes
# ZSH_THEME="robbyrussell"
ZSH_THEME="" # using pure power prompt https://github.com/sindresorhus/pure#install
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion. Case
# sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
# COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# The optional three formats: "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(
asdf
autojump
branch
bundler
docker
docker-compose
fzf
gitfast
rake
ruby
rails
thefuck
tig
tmux
tmuxinator
zsh_reload # run 'src' to refresh .zshrc
)
# autostart tmux
export ZSH_TMUX_FIXTERM=true
export ZSH_TMUX_AUTOSTART=true
# but don't auto connect to an existing session, make a new one
export ZSH_TMUX_AUTOCONNECT=false
# export ZSH_TMUX_ITERM2=true
source $ZSH/oh-my-zsh.sh
# User configuration
# export MANPATH="/usr/local/man:$MANPATH"
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# Preferred editor for local and remote sessions
if [[ -n $SSH_CONNECTION ]]; then
export EDITOR='vim'
else
export EDITOR='vim'
fi
# If nvim is installed, we'll use that as vim
# if type nvim > /dev/null 2>&1; then
# alias vim='nvim'
# fi
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
# ssh
eval "$(ssh-agent)"
# export SSH_KEY_PATH="~/.ssh/rsa_id"
export SSH_KEY_PATH="~/.ssh/id_ed25519"
# added by travis gem
# [ -f ~/.travis/travis.sh ] && source ~/.travis/travis.sh
# TheFuck
# https://github.com/nvbn/thefuck
eval $(thefuck --alias qwer)
# for vim control+s
stty -ixon
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/.bin" ] ; then
PATH="$HOME/.bin:$PATH"
fi
if [ -d "$HOME/.local/bin" ] ; then
PATH="$HOME/.local/bin:$PATH"
fi
if [ -d "$HOME/bin" ] ; then
PATH="$HOME/bin:$PATH"
fi
# Environment Variables
if [ -f "$HOME/.env" ]; then
. "$HOME/.env"
fi
# Load Postgres.app location into path if present
if [ -d "/Applications/Postgres.app/Contents/Versions/latest/bin" ] ; then
PATH="/Applications/Postgres.app/Contents/Versions/latest/bin:$PATH"
fi
# Same for redis.app http://jpadilla.github.io/redisapp/
if [ -d "/Applications/Redis.app/Contents/Resources/Vendor/redis/bin" ] ; then
PATH="/Applications/Redis.app/Contents/Resources/Vendor/redis/bin:$PATH"
fi
# # Load iterm2's shell integration features if present
# test -e "${HOME}/.iterm2_shell_integration.bash" && source "${HOME}/.iterm2_shell_integration.bash"
# asdf version management
# . $HOME/.asdf/asdf.sh
# . $HOME/.asdf/completions/asdf.bash
export PATH=$PATH:$HOME/.asdf/installs/rust/stable/bin
# FZF
# https://github.com/junegunn/fzf
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
FZF_CTRL_T_COMMAND="ag -g ''"
FZF_DEFAULT_COMMAND="ag --hidden --ignore .git -g ''"
# Tmuxinator autocompletion
# and mux alias
source $HOME/install-scripts/tmuxinator/tmuxinator.zsh
export PATH="$HOME/.yarn/bin:$HOME/.config/yarn/global/node_modules/.bin:$PATH"
# heroku autocomplete setup
HEROKU_AC_ZSH_SETUP_PATH=$HOME/.cache/heroku/autocomplete/zsh_setup && test -f $HEROKU_AC_ZSH_SETUP_PATH && source $HEROKU_AC_ZSH_SETUP_PATH;
# Run `mkdir .git/safe` on trusted projects to get local bins in path -- H/T Chris Thorn (thorncp)
export PATH=".git/safe/../../bin:$PATH"
# https://sw.kovidgoyal.net/kitty/#configuring-kitty
# kitty completion
# autoload -Uz compinit
# compinit
# Completion for kitty
# kitty + complete setup zsh | source /dev/stdin
# Pure power prompt https://github.com/sindresorhus/pure#install
fpath+=('/home/dees/.asdf/installs/nodejs/11.12.0/.npm/lib/node_modules/pure-prompt/functions')
autoload -U promptinit; promptinit
prompt pure
# Use my own aliases and functions
source ~/.bash_aliases
source ~/.functions/*
| true
|
59d5113a2588b19e825628185fbd2344eb6a795a
|
Shell
|
ewon/efive
|
/src/rc.d/rc.conexantpciadsl
|
UTF-8
| 1,871
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# $Id: rc.conexantpciadsl 4231 2010-02-12 16:31:09Z gespinasse $
#
# Debugging. Comment it out to stop logging
DEBUG="yes"
msg() {
if [ "z$DEBUG" != "z" ] ; then
/usr/bin/logger -t red "CnxPCI ADSL: $*"
fi
/bin/echo "$*"
}
function wait_for_showtime() {
msg "waiting for sync"
count=0
while [ ! $count = 45 ]; do
/bin/sleep 2
if ( /usr/sbin/cnxadslstatus | /bin/grep -q -F 'Showtime.' ); then
msg "sync done"
return 0
fi
((++count))
done
return 1
}
# See how we were called.
case "$1" in
start)
msg "starting"
# if the driver is not already loaded then
if ( /bin/lsmod | /bin/grep -q CnxADSL ); then
msg "already loaded"
else
/sbin/modprobe CnxADSL
RETVAL=$?
if [ $RETVAL -ne 0 ] ; then
msg "error when loading, card present?"
exit 1
fi
if ( /bin/grep -q '14f11611' /proc/bus/pci/devices ); then
# Tigris model
/bin/ln -f -s /etc/Conexant/CnxTgF.hex /var/ipcop/cnx_pci/firmware.hex
else
if ( /bin/grep -q '14f11622' /proc/bus/pci/devices ); then
# Yukon model
/bin/ln -f -s /etc/Conexant/CnxYkF.hex /var/ipcop/cnx_pci/firmware.hex
else
msg "don't know this model"
exit 1
fi
fi
# Initialize the firmware and start training
/bin/ln -f -s /etc/Conexant/cnxadsl.conf /var/ipcop/cnx_pci/cnxadsl.conf
/etc/Conexant/cnxadslload /var/ipcop/cnx_pci
fi
wait_for_showtime
exit $?
;;
stop)
msg "stop"
;;
cleanup)
msg "cleanup"
/usr/bin/killall cnxadslload 2>/dev/null
/sbin/modprobe -r CnxADSL
;;
*)
echo "Usage: $0 {start|stop|cleanup}"
exit 1
;;
esac
exit 0
| true
|
a4b46feb6d2eee128d1b3a2585e425e03f93a974
|
Shell
|
deis/tips-n-tricks
|
/slack/chkube/chkube.sh
|
UTF-8
| 657
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
KUBES=~/.kubes
case "$1" in
clear)
unset KUBECONFIG
;;
info)
echo "KUBECONFIG=${KUBECONFIG}"
;;
list)
ls ${KUBES}
;;
*)
if [[ -d ${KUBES}/$1 && -e ${KUBES}/${1}/env ]]; then
cat ${KUBES}/${1}/env
else
echo "Usage: ${0} {list|clear|name}"
if [[ -z ${KUBECONFIG} ]]; then
echo "KUBECONFIG not set"
fi
exit 1
fi
esac
| true
|
d3df856e0ebb22dac076a8f740108615d3a03dbb
|
Shell
|
milhidaka/iclr2017
|
/bench_blas/do_bench.sh
|
UTF-8
| 649
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#bench_cublas or bench_clblas
CMD=$1
function bench_oneshape() {
NAME=$1
M=$2
N=$3
K=$4
#forward
RESULT=`$CMD $M $N $K N N`
echo "$NAME,0,$RESULT"
#backward
RESULT=`$CMD $M $K $N N T`
echo "$NAME,1,$RESULT"
#gradient
RESULT=`$CMD $K $N $M T N`
echo "$NAME,2,$RESULT"
}
bench_oneshape conv1_1 802816 64 27
bench_oneshape conv1_2 802816 64 576
bench_oneshape conv2_2 200704 128 576
bench_oneshape conv2_3 200704 128 1152
bench_oneshape conv3_1 50176 256 1152
bench_oneshape conv3_2 50176 256 2304
bench_oneshape conv4_1 12544 512 2304
bench_oneshape conv4_2 12544 512 4608
bench_oneshape conv5_1 3136 512 4608
| true
|
67fb4b9269806f283fe5aa6d2b3f5de6c9fa5ec8
|
Shell
|
edchainio/testnet
|
/masters/nginx/configuration.sh
|
UTF-8
| 12,966
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
cat /home/<username>/.htpasswd-credentials | htpasswd -i -c /etc/nginx/htpasswd.users <username>
rm /home/<username>/.htpasswd-credentials
sed -i '/^\s*server_name/a auth_basic_user_file \/etc\/nginx\/htpasswd\.users;' /etc/nginx/sites-available/default
sed -i '/^\s*server_name/a auth_basic "Restricted Access";' /etc/nginx/sites-available/default
sed -i '/^\s*server_name/a \
' /etc/nginx/sites-available/default
sed -i 's/auth_basic_user_file \/etc\/nginx\/htpasswd\.users;/ auth_basic_user_file \/etc\/nginx\/htpasswd\.users;/' /etc/nginx/sites-available/default
sed -i 's/auth_basic "Restricted Access";/ auth_basic "Restricted Access";/' /etc/nginx/sites-available/default
sed -i -e '/^\s*server_name/s/^.*$/ server_name <ip_address>;/' /etc/nginx/sites-available/default
sed -i '/^\s*try_files/a proxy_cache_bypass \$http_upgrade;' /etc/nginx/sites-available/default
sed -i '/^\s*try_files/a proxy_set_header Host \$host;' /etc/nginx/sites-available/default
sed -i '/^\s*try_files/a proxy_set_header Connection 'upgrade';' /etc/nginx/sites-available/default
sed -i '/^\s*try_files/a proxy_set_header Upgrade \$http_upgrade;' /etc/nginx/sites-available/default
sed -i '/^\s*try_files/a proxy_http_version 1.1;' /etc/nginx/sites-available/default
sed -i '/^\s*try_files/a proxy_pass http:\/\/localhost:5601;' /etc/nginx/sites-available/default
sed -i 's/proxy_cache_bypass \$http_upgrade;/ proxy_cache_bypass \$http_upgrade;/' /etc/nginx/sites-available/default
sed -i 's/proxy_set_header Host \$host;/ proxy_set_header Host \$host;/' /etc/nginx/sites-available/default
sed -i 's/proxy_set_header Connection 'upgrade';/ proxy_set_header Connection 'upgrade';/' /etc/nginx/sites-available/default
sed -i 's/proxy_set_header Upgrade \$http_upgrade;/ proxy_set_header Upgrade \$http_upgrade;/' /etc/nginx/sites-available/default
sed -i 's/proxy_http_version 1.1;/ proxy_http_version 1.1;/' /etc/nginx/sites-available/default
sed -i 's/proxy_pass http:\/\/localhost:5601;/ proxy_pass http:\/\/localhost:5601;/' /etc/nginx/sites-available/default
sed -i -e 's/^\s*try_files/ # try_files \$uri \$uri\/ =404;/' /etc/nginx/sites-available/default
nginx -t
service nginx restart
###
sh -c 'echo "log_format timekeeper \$remote_addr - \$remote_user [\$time_local] " >> /etc/nginx/conf.d/timekeeper-log-format.conf'
sed -i "s/\$remote_addr/\'\$remote_addr/" /etc/nginx/conf.d/timekeeper-log-format.conf
sed -i "s/_local] /_local] \'/" /etc/nginx/conf.d/timekeeper-log-format.conf
sh -c 'echo " \$request \$status \$body_bytes_sent " >> /etc/nginx/conf.d/timekeeper-log-format.conf'
sed -i "s/\$request/\'\"\$request\"/" /etc/nginx/conf.d/timekeeper-log-format.conf
sed -i "s/_sent /_sent \'/" /etc/nginx/conf.d/timekeeper-log-format.conf
sh -c 'echo " \$http_referer \$http_user_agent \$http_x_forwarded_for \$request_time;" >> /etc/nginx/conf.d/timekeeper-log-format.conf'
sed -i "s/\$http_referer/\'\"\$http_referer\"/" /etc/nginx/conf.d/timekeeper-log-format.conf
sed -i "s/\$http_user_agent/\"\$http_user_agent\"/" /etc/nginx/conf.d/timekeeper-log-format.conf
sed -i "s/\$http_x_forwarded_for/\"\$http_x_forwarded_for\"/" /etc/nginx/conf.d/timekeeper-log-format.conf
sed -i "s/_time;/_time\';/" /etc/nginx/conf.d/timekeeper-log-format.conf
sh -c 'echo "geoip_country /usr/share/GeoIP/GeoIP.dat;" >> /etc/nginx/conf\.d/geoip.conf'
sed -i '/# Default server configuration/a \}' /etc/nginx/sites-available/default
sed -i '/# Default server configuration/a US yes;' /etc/nginx/sites-available/default
sed -i '/# Default server configuration/a default no;' /etc/nginx/sites-available/default
sed -i '/# Default server configuration/a map \$geoip_country_code \$allowed_country \{' /etc/nginx/sites-available/default
sed -i '/# Default server configuration/a \
' /etc/nginx/sites-available/default
sed -i 's/US yes;/ US yes;/' /etc/nginx/sites-available/default
sed -i 's/default no;/ default no;/' /etc/nginx/sites-available/default
sed -i '/listen \[::\]:80 default_server;/a \}#tmp_id_1' /etc/nginx/sites-available/default
sed -i '/listen \[::\]:80 default_server;/a return 444;' /etc/nginx/sites-available/default
sed -i '/listen \[::\]:80 default_server;/a if (\$allowed_country = no) \{' /etc/nginx/sites-available/default
sed -i '/listen \[::\]:80 default_server;/a \
' /etc/nginx/sites-available/default
sed -i 's/\}#tmp_id_1/ \}/' /etc/nginx/sites-available/default
sed -i 's/return 444;/ return 444;/' /etc/nginx/sites-available/default
sed -i 's/if (\$allowed_country = no)/ if (\$allowed_country = no)/' /etc/nginx/sites-available/default
sed -i '/listen \[::\]:80 default_server;/a access_log \/var\/log\/nginx\/server-block-1-access\.log timekeeper gzip;' /etc/nginx/sites-available/default
sed -i 's/access_log \/var\/log\/nginx\/server-block-1-access\.log timekeeper gzip;/ access_log \/var\/log\/nginx\/server-block-1-access\.log timekeeper gzip;/' /etc/nginx/sites-available/default
sed -i '/access_log \/var\/log\/nginx\/server-block-1-access\.log timekeeper gzip;/a error_log \/var\/log\/nginx\/server-block-1-error\.log;' /etc/nginx/sites-available/default
sed -i 's/error_log \/var\/log\/nginx\/server-block-1-error\.log;/ error_log \/var\/log\/nginx\/server-block-1-error\.log;/' /etc/nginx/sites-available/default
sed -i '/listen \[::\]:80 default_server;/a \
' /etc/nginx/sites-available/default
# sed -i -e '/^# server {/s/^.*$/ server {/' /etc/nginx/nginx.conf
# sed -i -e '/^# listen 443 ssl http2 default_server;/s/^.*$/ listen 443 ssl http2 default_server;/' /etc/nginx/nginx.conf
# sed -i -e '/^# listen \[::\]:443 ssl http2 default_server;/s/^.*$/ listen \[::\]:443 ssl http2 default_server;/' /etc/nginx/nginx.conf
# sed -i -e '/^# server_name _;/s/^.*$/ server_name _;/' /etc/nginx/nginx.conf
# sed -i -e '/^# root \/usr\/share\/nginx\/html;/s/^.*$/ root \/usr\/share\/nginx\/html;#tmp_id_2/' /etc/nginx/nginx.conf
# sed -i '/^ root \/usr\/share\/nginx\/html;#tmp_id_2/a resolver 8\.8\.8\.8 8\.8\.4\.4 208\.67\.222\.222 208\.67\.220\.220 216\.146\.35\.35 216\.146\.36\.36 valid=300s;' /etc/nginx/nginx.conf
# sed -i 's/resolver 8\.8\.8\.8 8\.8\.4\.4 208\.67\.222\.222 208\.67\.220\.220 216\.146\.35\.35 216\.146\.36\.36 valid=300s;/ resolver 8\.8\.8\.8 8\.8\.4\.4 208\.67\.222\.222 208\.67\.220\.220 216\.146\.35\.35 216\.146\.36\.36 valid=300s;/' /etc/nginx/nginx.conf
# sed -i '/^ resolver 8\.8\.8\.8 8\.8\.4\.4 208\.67\.222\.222 208\.67\.220\.220 216\.146\.35\.35 216\.146\.36\.36 valid=300s;/a resolver_timeout 3s;' /etc/nginx/nginx.conf
# sed -i 's/resolver_timeout 3s;/ resolver_timeout 3s;/' /etc/nginx/nginx.conf
# sed -i '/^ root \/usr\/share\/nginx\/html;#tmp_id_2/a \
# #' /etc/nginx/nginx.conf
# sed -i '/^ root \/usr\/share\/nginx\/html;#tmp_id_2/a # add_header Strict-Transport-Security \"max-age=31536000; includeSubDomains; preload\";' /etc/nginx/nginx.conf
# sed -i '/^ root \/usr\/share\/nginx\/html;#tmp_id_2/a add_header Strict-Transport-Security \"max-age=31536000\";' /etc/nginx/nginx.conf
# sed -i 's/add_header Strict-Transport-Security \"max-age=31536000\";/ add_header Strict-Transport-Security \"max-age=31536000\";/' /etc/nginx/nginx.conf
# sed -i '/^ root \/usr\/share\/nginx\/html;#tmp_id_2/a add_header X-Frame-Options DENY;' /etc/nginx/nginx.conf
# sed -i 's/add_header X-Frame-Options DENY;/ add_header X-Frame-Options DENY;/' /etc/nginx/nginx.conf
# sed -i '/^ root \/usr\/share\/nginx\/html;#tmp_id_2/a add_header X-Content-Type-Options nosniff;' /etc/nginx/nginx.conf
# sed -i 's/add_header X-Content-Type-Options nosniff;/ add_header X-Content-Type-Options nosniff;/' /etc/nginx/nginx.conf
# sed -i '/^ root \/usr\/share\/nginx\/html;#tmp_id_2/a \
# #' /etc/nginx/nginx.conf
# sed -i -e '/^# ssl_certificate "\/etc\/pki\/nginx\/server\.crt";/s/^.*$/ ssl_certificate "\/etc\/pki\/nginx\/server\.crt";/' /etc/nginx/nginx.conf
# sed -i -e '/^# ssl_certificate_key "\/etc\/pki\/nginx\/private\/server\.key";/s/^.*$/ ssl_certificate_key "\/etc\/pki\/nginx\/private\/server\.key";#tmp_id_6/' /etc/nginx/nginx.conf
# sed -i '/^ ssl_certificate_key \"\/etc\/pki\/nginx\/private\/server\.key\";#tmp_id_6/a ssl_protocols TLSv1 TLSv1\.1 TLSv1\.2;' /etc/nginx/nginx.conf
# sed -i 's/ssl_protocols TLSv1 TLSv1\.1 TLSv1\.2;/ ssl_protocols TLSv1 TLSv1\.1 TLSv1\.2;/' /etc/nginx/nginx.conf
# sed -i '/^ ssl_certificate_key \"\/etc\/pki\/nginx\/private\/server\.key\";#tmp_id_6/a ssl_ecdh_curve secp384r1;' /etc/nginx/nginx.conf
# sed -i 's/ssl_ecdh_curve secp384r1;/ ssl_ecdh_curve secp384r1;/' /etc/nginx/nginx.conf
# sed -i -e '/^# ssl_session_cache shared:SSL:1m;/s/^.*$/ ssl_session_cache shared:SSL:1m;/' /etc/nginx/nginx.conf
# sed -i -e '/^# ssl_session_timeout 10m;/s/^.*$/ ssl_session_timeout 10m;/' /etc/nginx/nginx.conf
# sed -i -e '/^# ssl_ciphers HIGH:!aNULL:!MD5;/s/^.*$/ ssl_ciphers \"EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH\";/' /etc/nginx/nginx.conf
# sed -i -e '/^# ssl_prefer_server_ciphers on;/s/^.*$/ ssl_prefer_server_ciphers on;/' /etc/nginx/nginx.conf
# sed -i -e '/^# # Load configuration files for the default server block\./s/^.*$/ # Load configuration files for the default server block\./' /etc/nginx/nginx.conf
# sed -i -e '/^# include \/etc\/nginx\/default\.d\/\*\.conf;/s/^.*$/ include \/etc\/nginx\/default\.d\/\*\.conf;#tmp_id_3/' /etc/nginx/nginx.conf
# sed -i '/^ include \/etc\/nginx\/default\.d\/\*\.conf;#tmp_id_3/a \}#tmp_id_7' /etc/nginx/nginx.conf
# sed -i 's/\}#tmp_id_7/ \}#tmp_id_7/' /etc/nginx/nginx.conf
# sed -i '/^ include \/etc\/nginx\/default\.d\/\*\.conf;#tmp_id_3/a return 444;#tmp_id_4' /etc/nginx/nginx.conf
# sed -i 's/return 444;#tmp_id_4/ return 444;#tmp_id_4/' /etc/nginx/nginx.conf
# sed -i '/^ include \/etc\/nginx\/default\.d\/\*\.conf;#tmp_id_3/a if (\$allowed_country = no) \{#tmp_id_8' /etc/nginx/nginx.conf
# sed -i 's/if (\$allowed_country = no) {#tmp_id_8/ if (\$allowed_country = no) {#tmp_id_8/' /etc/nginx/nginx.conf
# sed -i '/^ include \/etc\/nginx\/default\.d\/\*\.conf;#tmp_id_3/a \
# #' /etc/nginx/nginx.conf
# sed -i '/^ include \/etc\/nginx\/default\.d\/\*\.conf;#tmp_id_3/a access_log \/var\/log\/nginx\/server-block-1-access.log timekeeper;#tmp_id_9' /etc/nginx/nginx.conf
# sed -i -e 's/access_log \/var\/log\/nginx\/server-block-1-access.log timekeeper;#tmp_id_9/ access_log \/var\/log\/nginx\/server-block-1-access.log timekeeper;#tmp_id_9/' /etc/nginx/nginx.conf
# sed -i '/access_log \/var\/log\/nginx\/server-block-1-access.log timekeeper;#tmp_id_9/a error_log \/var\/log\/nginx\/server-block-1-error.log;#tmp_id_10' /etc/nginx/nginx.conf
# sed -i -e 's/error_log \/var\/log\/nginx\/server-block-1-error.log;#tmp_id_10/ error_log \/var\/log\/nginx\/server-block-1-error.log;#tmp_id_10/' /etc/nginx/nginx.conf
# sed -i '/^ include \/etc\/nginx\/default\.d\/\*\.conf;#tmp_id_3/a \
# #' /etc/nginx/nginx.conf
# sed -i -e '/^# location \/ {/s/^.*$/ location \/ {/' /etc/nginx/nginx.conf
# sed -i -e '/^# }/s/^.*$/ }/' /etc/nginx/nginx.conf
# sed -i -e '/^# error_page 404 \/404.html;/s/^.*$/ error_page 404 \/404.html;/' /etc/nginx/nginx.conf
# sed -i -e '/^# location = \/40x.html {/s/^.*$/ location = \/40x.html {/' /etc/nginx/nginx.conf
# sed -i -e '/^# error_page 500 502 503 504 \/50x.html;/s/^.*$/ error_page 500 502 503 504 \/50x.html;/' /etc/nginx/nginx.conf
# sed -i -e '/^# location = \/50x.html {/s/^.*$/ location = \/50x.html {/' /etc/nginx/nginx.conf
# sed -i -e '/^# }/s/^.*$/ }/' /etc/nginx/nginx.conf
sh -c "echo 'gzip_vary on;' >> /etc/nginx/conf.d/gzip.conf"
sh -c "echo 'gzip_proxied any;' >> /etc/nginx/conf.d/gzip.conf"
sh -c "echo 'gzip_comp_level 6;' >> /etc/nginx/conf.d/gzip.conf"
sh -c "echo 'gzip_buffers 16 8k;' >> /etc/nginx/conf.d/gzip.conf"
sh -c "echo 'gzip_http_version 1.1;' >> /etc/nginx/conf.d/gzip.conf"
sh -c "echo 'gzip_min_length 256;' >> /etc/nginx/conf.d/gzip.conf"
sh -c "echo 'gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/javascript application/vnd.ms-fontobject application/x-font-ttf font/opentype image/svg+xml image/x-icon;' >> /etc/nginx/conf.d/gzip.conf"
nginx -t
systemctl start nginx
systemctl enable nginx
| true
|
2fa6ef89d02b7102e1e7d671e22579b595e0d416
|
Shell
|
p/phpbb-tools
|
/cdb-fetch
|
UTF-8
| 1,722
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
pages=42
set -e
if ! test -e work.index_fetched; then
for page in `jot $pages`; do
start=$(expr \( $page - 1 \) \* 25) || true
fetch -o "index$(printf %02d $page).html" "http://www.phpbb.com/customise/db/contributions/all/start_$start"
done
touch work.index_fetched
fi
if ! test -e work.pages_parsed; then
:>state.mod_links
for f in index*.html; do
grep -oE "http://www.phpbb.com/customise/db/(mod|style)/[^'\"]*/" $f >>state.mod_links
done
touch work.pages_parsed
fi
if ! test -e work.mod_pages; then
for link in `cat state.mod_links`; do
local="page.$(basename $(dirname "$link")).$(basename "$link").html"
test -e "$local" || fetch -o "$local" "$link"
done
touch work.mod_pages
fi
if ! test -e work.dl; then
for page in page.*.html; do
# higher id does not mean a more recent version
link=`grep -v mode_view "$page" |grep -o "http://www.phpbb.com/customise/db/download/id_[0-9]*" |head -1`
if test -z "$link"; then
# this happens
# e.g. page.mod.disable_banning_administrators_and_moderators.html
echo 'empty link'
continue
fi
local="archive.$(echo "$page" |awk -F. '{print $2}').$(echo "$page" |awk -F. '{print $3}').zip"
if ! test -f "$local"; then
fetch -o "$local" "$link"
fi
done
touch work.dl
fi
if ! test -e work.extract; then
rm -rf extract
mkdir extract
for a in archive.*.zip; do
extract=extract/"`echo $a |sed -e s/^archive.// -e 's/.zip$//'`"
mkdir "$extract"
unzip "$a" -d "$extract"
while test `ls "$extract" |wc -l` -eq 1 && test -d "$extract"/*; do
chmod u+rwx "$extract"/* &&
mv "$extract"/* extract/.move.tmp &&
mv extract/.move.tmp/* "$extract" &&
rm -r extract/.move.tmp
done
done
touch work.extract
fi
| true
|
0f111e85ace3ae5ea63533eb4b9e589656d34cfb
|
Shell
|
binary-c/menu
|
/info_sub_btc_value.sh
|
UTF-8
| 560
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
./system_sub_header.sh 'Finance'
response=$(curl -s http://api.coindesk.com/v1/bpi/currentprice.json | python -c "import json, sys; print json.load(sys.stdin)['bpi']['USD']['rate']")
echo ""
echo "$(tput setaf 2) Bitcoin $(tput sgr0) $response"
for fname in aapl goog msft
do
url="http://download.finance.yahoo.com/d/quotes.csv?s={$fname}&f=p2l1"
response=$(curl -s $url)
response=$(sed 's/,/ /g' <<<"$response")
response=$(sed 's/"//g' <<<"$response")
echo "$(tput setaf 8) $fname $(tput sgr 0) $response"
done
echo ""
./system_sub_pause.sh
| true
|
7bc471e87980f57d9356620a9efdc2e474e79d17
|
Shell
|
bizm/zyme-caucus-integration
|
/dev/entrypoint.sh
|
UTF-8
| 1,044
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
# if project_source directory exists we assume it as a fresh start for this container
if [ -d "/root/project_source" ]
then
echo "Doing initial setup..."
# move all the sources
cp -r /root/project_source/* /root/workspace/
rm -rf /root/project_source
# not sure what's the point of settings-docker.xml but it comes in handy for us
cp /root/.m2/settings-docker.xml /root/.m2/settings.xml
# now instruct maven to store repository under our project directory, yep it is nasty but it'll help to speed up development a bit
sed -i 's#<localRepository>.*</localRepository>#<localRepository>/root/workspace/.m2</localRepository>#' /root/.m2/settings.xml
echo "Fetching certificate from mocks..."
echo -n true | openssl s_client -connect host.docker.internal:443 2>/dev/null | openssl x509 > ~/docker.crt
keytool -importcert -file ~/docker.crt -alias docker -keystore $(echo $JAVA_HOME)/lib/security/cacerts -storepass changeit -noprompt
echo "We're ready to go now!"
fi
mvn clean compile vertx:run -P dev-docker
| true
|
8e171c9c909e469d9540483e965566007a1adfa8
|
Shell
|
Erukino/Login
|
/opciones
|
UTF-8
| 4,030
| 3.359375
| 3
|
[] |
no_license
|
#!/data/data/com.termux/files/usr/bin/bash
#codigo 17 dw Noviembre del 2018
pregunta1=$(base64 -d $PREFIX/libexec/termux/.quiz/pregunta_1)
respuesta1=$(base64 -d $PREFIX/libexec/termux/.quiz/resp_1)
clave_1=$(base64 -d $PREFIX/libexec/termux/.password/pass)
function inicio_opc {
source $PREFIX/libexec/.banners/logo
echo
echo "°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°"
echo "1) Olvidaste la contraseña"
echo "2) Regresar al menu anterior"
echo "3) Cambiar clave"
echo "4) Cambiar pregunta de seguridad"
echo "°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°"
echo
read -n 1 -p "Ingresa la opcion deseada $ " opc
if [ $opc -eq 1 ]; then
sleep 1
echo
cuestionario
echo
elif [ $opc -eq 2 ]; then
echo
sleep 1
source $PREFIX/var/log/login.sh
elif [ $opc -eq 3 ]; then
sleep 2
cambio_clave
elif [ $opc -eq 4 ]; then
sleep 2
cambio_pregunta
else echo "opcion no valida"
sleep 2
fi
}
function cuestionario {
clear
source $PREFIX/libexec/.banners/logo
echo "Responde la pregunta de seguridad para recuperar
tu clave"
echo "$pregunta1"
echo
read -p "$" respuesta
echo
if [ ["$respuesta"] = ["$respuesta1"] ]; then
echo
echo "CORRECTO"
echo "Por favor esperar"
echo "."
sleep 0.80
echo ".."
sleep 0.80
echo "..."
sleep 0.80
source $PREFIX/libexec/.banners/logo
clear
echo "$clave_1"
sleep 6
clear
source $PREFIX/var/log/login.sh
else echo "error"
echo "error"
inicio_opc
fi
}
function cambio_pregunta {
clear
echo "PROCESANDO"
source $PREFIX/libexec/.banners/logo
read -p "Por favor ingresar clave actual para cambiar pregunta de seguridad $ " clave
if [ ["$clave"] = ["$clave_1"] ]; then
echo "Porfavor ingrese la nueva pregunta de seguridad"
read -p "$ " pregunta_1
echo "Porfavor ingrese la respuesta"
read -p "$ " respuesta_1
clear
source $PREFIX/libexec/.banners/logo
echo "Ingrese nuevamente la respuesta"
read -p "$ " respuesta_2
clear
source $PREFIX/libexec/.banners/logo
if [ ["$respuesta_1"] = ["$respuesta_2"] ]; then
echo "Completando cambio de pregunta y respuesta"
printf "$pregunta_1" > $PREFIX/libexec/termux/.quiz/pregunta
rm $PREFIX/libexec/termux/.quiz/pregunta_1
base64 $PREFIX/libexec/termux/.quiz/pregunta > $PREFIX/libexec/termux/.quiz/pregunta_1
rm $PREFIX/libexec/termux/.quiz/pregunta
printf "$respuesta_1" > $PREFIX/libexec/termux/.quiz/resp
rm $PREFIX/libexec/termux/.quiz/resp_1
base64 $PREFIX/libexec/termux/.quiz/resp > $PREFIX/libexec/termux/.quiz/resp_1
rm $PREFIX/libexec/termux/.quiz/resp
sleep 2
source $PREFIX/var/log/login.sh
else
clear
source $PREFIX/libexec/.banners/logo
echo "Error no coinciden las respuestas"
sleep 3
cambio_pregunta
fi
fi
}
function cambio_clave {
echo
clear
source $PREFIX/libexec/.banners/logo
read -p "Ingrese su clave actual $ " clave_ac
if [ ["$clave_ac"] = ["$clave_1"] ]; then
echo " Iniciando"
echo
read -p "Ingrese nueva clave $ " clave_n
sleep 1
clear
source $PREFIX/libexec/.banners/logo
read -p "Ingrese nuevamente clave nueva $ "
clave_co
if [ ["$clave_n"] = ["$clave_co"] ]; then
printf "$clave_n" > $PREFIX/libexec/termux/.password/pass1
rm $PREFIX/libexec/termux/.password/pass
base64 $PREFIX/libexec/termux/.password/pass1 > $PREFIX/libexec/termux/.password/pass
rm $PREFIX/libexec/termux/.password/pass1
sleep 2
source $PREFIX/var/log/login.sh
else
source $PREFIX/libexec/.banners/logo
echo " ERROR "
echo "la nueva clave no coincide inicia nuevamente
el proceso"
sleep 2
cambio_clave
fi
else
inicio_opc
fi
}
clear
echo
inicio_opc
| true
|
701102ae62a9183e633ae045ef865888aab986c9
|
Shell
|
ruohoruotsi/voodoohop-ableton-tools
|
/backupModificationTimesOfMusic
|
UTF-8
| 2,064
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
find . -iname "*.mp3*" -o -iname "*.flac*" -o -iname "*.aif*" -o -iname "*.wav*" -o -iname "*.mp4*" -o -iname "*.ogg*" | while read filename;
do
IFS=$'\n' printf "\ntouch -t $(stat -f "%Sm" -t "%Y%m%d%H%M" "$filename") \"$filename\"";
#printf "\ntouch -t \"$filename\"";
done;
printf "\n";
Object.defineProperty(Date.prototype, 'YYYYMMDDHHMMSS', {
value: function() {
function pad2(n) { // always returns a string
return (n < 10 ? '0' : '') + n;
}
return this.getFullYear() +
pad2(this.getMonth() + 1) +
pad2(this.getDate()) +
pad2(this.getHours()) +
pad2(this.getMinutes()) +
pad2(this.getSeconds());
}
});
var path="/Users/thomash/";
var filePath=path+"modifiedDateBackup"
# fs.writeFile(filePath,"");
var fs=require("fs");
var s="";
filewalker(path).on("file",(filename,info) => {
if (
filename.toLowerCase().endsWith(".mp3") ||
filename.toLowerCase().endsWith(".wav") ||
filename.toLowerCase().endsWith(".aif") ||
filename.toLowerCase().endsWith(".flac")||
filename.toLowerCase().endsWith(".aiff") ||
filename.toLowerCase().endsWith(".mp4")) {
var date = new Date(info.mtime).YYYYMMDDHHMMSS();
s += date+" "+filename+"\n";
}
}).on("error", e=>console.error("error",e)).on("done",()=> fs.writeFileSync(filePath,s)).walk();
var s = fs.readFileSync(filePath).toString();
s.split("\n").forEach(line => {
var spit=line.split(" ");
var date = spit.shift();
var file = spit.join(" ");
if (line.trim().length === 0)
return;
var formattedDate = new Date(date[0]+date[1]+date[2]+date[3]+" "+date[4]+date[5]+" "+date[6]+date[7]+" "+date[8]+date[9]+":"+date[10]+date[11]);
console.log("datefile",{date,file,formattedDate});
try {
console.log("line",line);
console.log("modifying ",path+file, formattedDate);
fs.utimesSync(path+file, formattedDate, formattedDate);
} catch (e) {
console.error("err",e);
}
})
| true
|
38144f36b31b72302196249bdd38ede1ecddb4dc
|
Shell
|
ilventu/aur-mirror
|
/chromium-dev/PKGBUILD
|
UTF-8
| 17,627
| 2.875
| 3
|
[] |
no_license
|
# Old Maintainer: Mikhail Vorozhtsov <mikhail.vorozhtsov@gmail.com>
# Maintainer: Gustavo Alvarez <sl1pkn07@gmail.com>
# Change PKGBUILD sugested by: Nagisa
#########################
## -- Build options -- ##
#########################
_use_nacl=1 # Enable Native Client support.
_use_libpdf=1 # Enable Chrome PDF support.
_use_pepperflash=1 # Use Pepper Flash plugin. Depends on Native Client!
##########################################################
## -- Invalid build options (build fails if enabled) -- ##
##########################################################
#_use_system_ffmpeg=0 # Use system ffmpeg libraries when building.
# Doesn't work because of GCC 4.7 headers. If you use 4.6, you can enable it.
_use_clang=0 # Use clang compiler. Results in faster build and smaller chromium.
##############################################
## -- Package and components information -- ##
##############################################
pkgname='chromium-dev'
pkgver='24.0.1290.1'
_toolchain_rev="9962"
pkgrel='1'
pkgdesc='The open-source project behind Google Chrome (Dev channel)'
arch=('i686' 'x86_64')
url='http://www.chromium.org/'
license=('BSD')
depends=('dbus-glib' 'alsa-lib' 'hicolor-icon-theme' 'libevent' 'libxss' 'nss' 'libxslt' 'udev' 'desktop-file-utils' 'gtk2' 'flac' 'libpng' 'libjpeg' 'icu' 'libusb' 'expat' 'v8') # 'libwebp' 'speex'
makedepends=('python2' 'gperf' 'yasm' 'mesa' 'bison' 'xdg-utils' 'elfutils' 'subversion' 'python-simplejson')
install="${pkgname}".install
backup=('etc/chromium-dev/default')
noextract=()
source=("http://commondatastorage.googleapis.com/chromium-browser-official/chromium-"${pkgver}".tar.bz2"
"${pkgname}.desktop"
"${pkgname}.sh"
'default'
'gcc-4.7.diff'
'chromium-ppapi-r0.patch'
'chromium-20.0.1132.57-glib-2.16-use-siginfo_t.patch')
sha1sums=('5ae9f0d5c711ca230fa74ef294a57f750391bede'
'004d7496d7e08d96bb884748a9e55cd71cf78cee'
'54c53502c26456c9735e67314b2d61e29477438e'
'd6d2da45c0729dfd1c606a15c8ffb7591dbc7b44'
'e25739be0c7e6d14c2675d3ed4dcd99f63f4661c'
'c07d63888e5b72ecb598e45645cdd5c05d8e0c89'
'770065c9e6c68ee7da2a4205cca23b252102cfea')
################################################
## -- Don't touch anything below this line -- ##
################################################
# Additional dependencies
#[ "${_use_system_ffmpeg}" = "1" ] && depends+=('ffmpeg')
# Do we use NaCl?
if [ "${_use_nacl}" = "1" ]; then
# [ "${CARCH}" = "x86_64" ] && makedepends+=('lib32-zlib' 'lib32-gcc-libs')
# _nacl_sdk_info="$(curl -s 'https://commondatastorage.googleapis.com/nativeclient-mirror/nacl/nacl_sdk/naclsdk_manifest2.json')"
# _pepper_version="$(echo -n "${_nacl_sdk_info}" | grep pepper_canary -B 1 | tail -n 2 | head -n 1 | cut -d '"' -f 4)"
# _nacl_sdk_source="$(echo -n "${_nacl_sdk_info}" | grep linux -B 4 | tail -n 5 | head -n 1 | cut -d '"' -f 4)"
# _nacl_sdk_sha1sum="$(echo -n "${_nacl_sdk_info}" | grep linux -B 2 | tail -n 3 | head -n 1 | cut -d '"' -f 4)"
# source+=("${_nacl_sdk_source}")
# sha1sums+=("${_nacl_sdk_sha1sum}")
makedepends+=('nacl-toolchain-newlib')
rm -fr naclsdk_linux_x86.tgz naclsdk_pnacl_linux_x86.tgz
source+=("http://gsdview.appspot.com/nativeclient-archive2/toolchain/"${_toolchain_rev}"/naclsdk_linux_x86.tgz"
"http://gsdview.appspot.com/nativeclient-archive2/toolchain/"${_toolchain_rev}"/naclsdk_pnacl_linux_x86.tgz")
wget -q http://gsdview.appspot.com/nativeclient-archive2/toolchain/"${_toolchain_rev}"/naclsdk_linux_x86.tgz.sha1hash
wget -q http://gsdview.appspot.com/nativeclient-archive2/toolchain/"${_toolchain_rev}"/naclsdk_pnacl_linux_x86.tgz.sha1hash
_nacl_sdk_sha1sum="$(cat naclsdk_linux_x86.tgz.sha1hash)"
_pnacl_sdk_sha1sum="$(cat naclsdk_pnacl_linux_x86.tgz.sha1hash)"
rm naclsdk_linux_x86.tgz.sha1hash
rm naclsdk_pnacl_linux_x86.tgz.sha1hash
sha1sums+=("${_nacl_sdk_sha1sum}"
"${_pnacl_sdk_sha1sum}")
noextract+=("naclsdk_linux_x86.tgz"
"naclsdk_pnacl_linux_x86.tgz")
fi
# Pepper Flash Plugin and/or libpdf?
if [ "${_use_pepperflash}" = "1" ] || [ "${_use_libpdf}" = "1" ]; then
rm -f filelists.xml*
[ "$CARCH" = "i686" ] && _rpm_arch='i386'
[ "$CARCH" = "x86_64" ] && _rpm_arch='x86_64'
wget -q "http://dl.google.com/linux/chrome/rpm/stable/"${_rpm_arch}"/repodata/filelists.xml.gz"
gzip -d filelists.xml.gz
_rpm_build="$(cat filelists.xml | grep -e "unstable" | cut -d '"' -f12 | head -n 1)"
_rpm_sha1="$(cat filelists.xml | grep -e "unstable" | cut -d '"' -f2 | head -n 1)"
rm -f filelists.xml*
source+=("http://dl.google.com/linux/chrome/rpm/stable/"${_rpm_arch}"/google-chrome-unstable-"${pkgver}"-"${_rpm_build}"."${_rpm_arch}".rpm")
sha1sums+=("${_rpm_sha1}")
noextract+=("google-chrome-unstable-"${pkgver}"-"${_rpm_build}"."${_rpm_arch}".rpm")
fi
# Are we in Gnome?
_use_gnome=0
if [ -x /usr/bin/gconftool-2 ]; then
depends+=('gconf' 'libgnome-keyring')
_use_gnome=1
fi
# Use Pulseaudio?
_use_pulseaudio=0
if [ -x /usr/bin/pulseaudio ]; then
depends+=('libpulse')
_use_pulseaudio=1
fi
# -- Fails -- #
# Disable Pepper flash if NaCL is disabled.
if [ "${_use_pepperflash}" = "1" ] && [ "${_use_nacl}" = "0" ]; then
msg "To use PepperFlash you need to build chromium with NaCl!. Disabling PepperFlash support"
_use_pepperflash=0
fi
build() {
##############################
## -- Get ready to build -- ##
##############################
cd "${srcdir}"
rm -rf chromium-build
mv chromium-"${pkgver}" chromium-build
cd chromium-build
#msg "Compiler specific configuration..."
if [ "${_use_clang}" = "1" ]; then
msg2 "Download clang"
sh ./tools/clang/scripts/update.sh
fi
msg "Configure to save configuration in ~/.config/"${pkgname}""
sed -e "s|'filename': 'chromium-browser'|'filename': '${pkgname}'|" -e "s|'confdir': 'chromium'|'confdir': '${pkgname}'|" -i chrome/chrome_exe.gypi
sed -e "s|config_dir.Append(\"chromium\")|config_dir.Append(\"${pkgname}\")|" -e "s|config_dir.Append(\"chrome-frame\")|config_dir.Append(\"chrome-frame-${pkgname#chromium-}\")|" -i chrome/common/chrome_paths_linux.cc
msg "Remove unnecesary components"
find third_party -type f \! -iname '*.gyp*' \
\! -path 'third_party/angle/*' \
\! -path 'third_party/cacheinvalidation/*' \
\! -path 'third_party/cld/*' \
\! -path 'third_party/ffmpeg/*' \
\! -path 'third_party/flac/flac.h' \
\! -path 'third_party/flot/*' \
\! -path 'third_party/gpsd/*' \
\! -path 'third_party/harfbuzz/*' \
\! -path 'third_party/hunspell/*' \
\! -path 'third_party/hyphen/*' \
\! -path 'third_party/icu/*' \
\! -path 'third_party/iccjpeg/*' \
\! -path 'third_party/jsoncpp/*' \
\! -path 'third_party/khronos/*' \
\! -path 'third_party/launchpad_translations/*' \
\! -path 'third_party/leveldatabase/*' \
\! -path 'third_party/libjingle/*' \
\! -path 'third_party/libphonenumber/*' \
\! -path 'third_party/libsrtp/*' \
\! -path 'third_party/libusb/libusb.h' \
\! -path 'third_party/libva/*' \
\! -path 'third_party/libvpx/*' \
\! -path 'third_party/libwebp/*' \
\! -path 'third_party/libxml/chromium/*' \
\! -path 'third_party/libXNVCtrl/*' \
\! -path 'third_party/libyuv/*' \
\! -path 'third_party/llvm-build/*' \
\! -path 'third_party/lss/*' \
\! -path 'third_party/mesa/*' \
\! -path 'third_party/modp_b64/*' \
\! -path 'third_party/mongoose/*' \
\! -path 'third_party/mt19937ar/*' \
\! -path 'third_party/npapi/*' \
\! -path 'third_party/openmax/*' \
\! -path 'third_party/ots/*' \
\! -path 'third_party/ply/*' \
\! -path 'third_party/protobuf/*' \
\! -path 'third_party/qcms/*' \
\! -path 'third_party/re2/*' \
\! -path 'third_party/scons-2.0.1/*' \
\! -path 'third_party/sfntly/*' \
\! -path 'third_party/skia/*' \
\! -path 'third_party/smhasher/*' \
\! -path 'third_party/speex/*' \
\! -path 'third_party/sqlite/*' \
\! -path 'third_party/tcmalloc/*' \
\! -path 'third_party/tlslite/*' \
\! -path 'third_party/trace-viewer/*' \
\! -path 'third_party/undoview/*' \
\! -path 'third_party/v8-i18n/*' \
\! -path 'third_party/webdriver/*' \
\! -path 'third_party/webgl_conformance/*' \
\! -path 'third_party/WebKit/*' \
\! -path 'third_party/webrtc/*' \
\! -path 'third_party/zlib/*' \
-delete
msg "Misc patches"
# Fix missing gcc4.7.x header
patch --silent -p0 -E -i "${srcdir}"/gcc-4.7.diff
# Fix build with glibc 2.16
patch --silent -p1 -E -i "${srcdir}"/chromium-20.0.1132.57-glib-2.16-use-siginfo_t.patch
# Fix compilation with glib-2.31.6 (http://crbug.com/109527)
sed -i 's|glib/gutils.h|glib.h|' "${srcdir}"/chromium-build/ui/base/l10n/l10n_util.cc
# Missing gyp files in tarball. (http://crbug.com/144823)
if [[ ! -e chrome/test/data/nacl/nacl_test_data.gyp ]]; then
mkdir -p chrome/test/data/nacl
touch chrome/test/data/nacl/nacl_test_data.gyp
echo "{
'targets': [
{
'target_name': 'nacl_tests',
'type': 'none',
},
],
}" > chrome/test/data/nacl/nacl_test_data.gyp
fi
# Make it possible to remove third_party/adobe
echo > "${srcdir}"/flapper_version.h
msg "Use python2"
rm -rf "${srcdir}"/python
mkdir "${srcdir}"/python
ln -s /usr/bin/python2 "${srcdir}"/python/python
export PATH="${srcdir}"/python:$PATH
# Really use Python2
find . -type f -exec sed -i -r -e 's|/usr/bin/python$|&2|g' -e 's|(/usr/bin/python2)\.4$|\1|g' {} +
# if [ "${_use_system_ffmpeg}" = 1 ]; then
# msg "Patch for ffpmeg-git"
# patch --silent -p0 -E < ../patch_for_ffmpeg-git.patch
# fi
if [ "${_use_nacl}" = 1 ]; then
msg "Patch, update and copy NaCl SDK"
# rm -fr "${srcdir}"/nacl_sdk
# mv "${srcdir}"/"${_pepper_version}" "${srcdir}"/"nacl_sdk"
# ln -s "${srcdir}"/nacl_sdk/toolchain/linux_x86_newlib native_client/toolchain/linux_x86_newlib
ln -s /usr/lib/nacl-toolchain-newlib native_client/toolchain/linux_x86_newlib
mkdir native_client/toolchain/.tars
ln -s "${srcdir}"/naclsdk_linux_x86.tgz native_client/toolchain/.tars
ln -s "${srcdir}"/naclsdk_pnacl_linux_x86.tgz native_client/toolchain/.tars
patch --silent -p0 -E -i "${srcdir}"/chromium-ppapi-r0.patch
fi
#######################
## -- Let's build -- ##
#######################
msg "Building Chromium..."
[ "${CARCH}" = "i686" ] && _chromium_arch='ia32'
[ "${CARCH}" = "x86_64" ] && _chromium_arch='x64'
# CFLAGS are passed through release_extra_cflags below
export -n CFLAGS CXXFLAGS
# Silence "identifier 'nullptr' is a keyword in C++11" warnings (but make others in C :/ )
CFLAGS+=' -Wno-c++0x-compat'
# TODO
# use_system_ssl=1 (http://crbug.com/58087)
# use_system_sqlite=1 (http://crbug.com/22208)
# use_system_hunspell=1 (upstream changes needed)
# use_system_vpx=1 (TODO)
# use_system_ffmpeg=1 (TODO)
# use_system_speex=1 (use_system_speex (needs additional shims, https://bugs.gentoo.org/show_bug.cgi?id=432748)
# use_system_zlib (forked, Gentoo bug https://bugs.gentoo.org/show_bug.cgi?id=432746).
# use_system_libwebp=1 (https://chromiumcodereview.appspot.com/10496016 needs to become part of webp release)
# disable_glibc=1 (https://bugs.gentoo.org/show_bug.cgi?id=417019)
# linux_use_tcmalloc=0 (https://bugs.gentoo.org/show_bug.cgi?id=413637)
GYP_DEFINES="\
werror= \
no_strict_aliasing=1 \
linux_sandbox_path=/usr/lib/"${pkgname}"/chromium-sandbox \
linux_sandbox_chrome_path=/usr/lib/"${pkgname}"/chromium \
release_extra_cflags=\""${CFLAGS}"\" \
ffmpeg_branding=Chrome \
proprietary_codecs=1 \
use_system_bzip2=1 \
use_system_flac=1 \
use_system_icu=1 \
use_system_libevent=1 \
use_system_libexpat=1 \
use_system_libjpeg=1 \
use_system_libpng=1 \
use_system_libusb=1
use_system_libwebp=0 \
use_system_libxml=1 \
use_system_libxslt=1 \
use_system_hunspell=0 \
use_system_speex=0 \
use_system_sqlite=0 \
use_system_ssl=0 \
use_system_v8=1 \
use_system_vpx=0 \
use_system_xdg_utils=1 \
use_system_yasm=1 \
use_system_zlib=0 \
use_gconf="${_use_gnome}" \
use_gnome_keyring="${_use_gnome}" \
use_pulseaudio="${_use_pulseaudio}" \
linux_link_gnome_keyring="${_use_gnome}" \
target_arch="${_chromium_arch}" \
linux_strip_binary=1 \
remove_webcore_debug_symbols=1 \
linux_use_gold_binary=0 \
linux_use_gold_flags=0 \
linux_use_tcmalloc=0 \
flapper_version_h_file="${srcdir}"/flapper_version.h \
disable_glibc=1 \
disable_sse2=1 \
"
[ "${_use_nacl}" = "0" ] && GYP_DEFINES+="disable_nacl=1 "
if [ "${_use_clang}" = "1" ]; then
GYP_DEFINES+="clang=1 clang_use_chrome_plugins=1 "
else
GYP_DEFINES+="gcc_version=47 "
fi
# if [ "${_use_system_ffmpeg}" = "1" ]; then
# GYP_DEFINES+="use_system_ffmpeg=1 build_ffmpegsumo=0 "
# else
GYP_DEFINES+="build_ffmpegsumo=1 "
# fi
export GYP_DEFINES
msg2 "Building build project..."
build/gyp_chromium -f make --depth=. build/all.gyp
make BUILDTYPE=Release chrome chrome_sandbox chromedriver
}
package() {
cd "${srcdir}"/chromium-build
_chromium_home="${pkgdir}"/usr/lib/"${pkgname}"
install -d "${_chromium_home}"
install -d "${pkgdir}"/etc/chromium-dev
msg "Packaging "${pkgname}""
install -Dm755 out/Release/chrome "${_chromium_home}"/chromium
install -Dm4755 -o root -g root out/Release/chrome_sandbox "${_chromium_home}"/chromium-sandbox
install -Dm755 out/Release/chromedriver "${_chromium_home}"/chromiumdriver
install -Dm644 out/Release/{chrome,chrome_100_percent,content_resources,resources}.pak "${_chromium_home}"/
cp -a out/Release/locales "${_chromium_home}"/
install -Dm644 out/Release/chrome.1 "${pkgdir}"/usr/share/man/man1/"${pkgname}".1
install -Dm644 "${srcdir}"/"${pkgname}".desktop "${pkgdir}"/usr/share/applications/"${pkgname}".desktop
for _size in 16 22 24 32 48 128 256; do
case ${_size} in
16|32) branding="chrome/app/theme/default_100_percent/chromium" ;;
*) branding="chrome/app/theme/chromium" ;;
esac
install -Dm644 "${branding}"/product_logo_"${_size}".png "${pkgdir}"/usr/share/icons/hicolor/"${_size}"x"${_size}"/apps/"${pkgname}".png
done
install -Dm755 "${srcdir}"/"${pkgname}".sh "${pkgdir}"/usr/bin/"${pkgname}"
install -Dm644 LICENSE "${pkgdir}"/usr/share/licenses/"${pkgname}"/LICENSE
install -Dm644 "${srcdir}"/../default "${pkgdir}"/etc/chromium-dev/default
if [ "${_use_nacl}" = 1 ]; then
msg2 "Adding NaCl components"
install -Dm755 out/Release/libppGoogleNaClPluginChrome.so "${_chromium_home}"/libppGoogleNaClPluginChrome.so
[ "${CARCH}" = "i686" ] && install -Dm755 out/Release/nacl_irt_x86_32.nexe "${_chromium_home}"/nacl_irt_x86_32.nexe
[ "${CARCH}" = "x86_64" ] && install -Dm755 out/Release/nacl_irt_x86_64.nexe "${_chromium_home}"/nacl_irt_x86_64.nexe
install -Dm755 out/Release/nacl_helper{,_bootstrap} "${_chromium_home}"/
fi
# if [ "${_use_system_ffmpeg}" = "1" ]; then
# msg2 "Adding FFMPEG libs"
# for _n in avcodec avdevice avfilter avformat avutil postproc swscale; do
# if [ -e /usr/lib/lib"${_n}".so.[0-9] ]; then
# _f=`echo /usr/lib/lib"${_n}".so.[0-9]`
# else
# _f=`echo /usr/lib/lib"${_n}".so.[0-9][0-9]`
# fi
# _f=`basename "${_f}"`
# ln -s ../"${_f}" "${_chromium_home}"/"${_f}"
# done
# else
install -Dm775 out/Release/libffmpegsumo.so "${_chromium_home}"/
# fi
# Extract and install PepperFlash and libpdf.so
if [ "${_use_pepperflash}" = "1" ]; then
msg2 "Adding PepperFlash"
cd "${srcdir}"
bsdtar -xf "google-chrome-unstable-${pkgver}-${_rpm_build}.${_rpm_arch}.rpm" opt/google/chrome/PepperFlash
install -dm755 "${_chromium_home}"/PepperFlash
for i in "${srcdir}"/opt/google/chrome/PepperFlash/*; do install -m644 "$i" "${_chromium_home}"/PepperFlash; done
chmod 775 "${_chromium_home}"/PepperFlash/libpepflashplayer.so
_flash_version="$(cat "${_chromium_home}"/PepperFlash/manifest.json | grep version | sed 's|[a-z,": ]*||g')"
sed -e "s|use_pepperflash=0|use_pepperflash=1|" -e "s|version=0|version=${_flash_version}|" -i "${srcdir}"/../"${pkgname}".install
rm -fr "${srcdir}"/opt
fi
if [ "${_use_libpdf}" = "1" ]; then
msg2 "Adding libpdf"
cd "${srcdir}"
bsdtar -xf "google-chrome-unstable-${pkgver}-${_rpm_build}.${_rpm_arch}.rpm" opt/google/chrome/libpdf.so
install -m755 "${srcdir}"/opt/google/chrome/libpdf.so "${_chromium_home}"/libpdf.so
rm -rf "${srcdir}"/opt
fi
}
| true
|
358ca70454ecc7078ef54a90d072a2a1dd27c2fb
|
Shell
|
cityofcapetown/dockerfiles-nginx-auth-ldap
|
/bin/run.sh
|
UTF-8
| 267
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
echo "$(date -Iminutes) Injecting Env Variables."
./inject-env-vars.sh "$BIND_DN" "$BIND_DN_PASSWORD" "$LDAP_WELCOME_MSG" "$LDAP_URL" "$BACKEND_SERVER"
cat /nginx.conf
echo "$(date -Iminutes) Starting Nginx Proxy."
nginx -c /nginx.conf
| true
|
456d5f7e85523471f421b6d99cb975a63b065ac8
|
Shell
|
sarusso/sind
|
/entrypoint.sh
|
UTF-8
| 160
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -eq 0 ]; then
echo "I am just a container, tell me what command to run inside me (i.e. singularity, bash, ...)"
exit 1
fi
exec $@
| true
|
39822dc34115120df26e73e6b007cead5fee4099
|
Shell
|
simondeziel/custom-nagios-plugins
|
/plugins/check_puppet_agent
|
UTF-8
| 6,525
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
# Nagios plugin to monitor Puppet agent state
#
# Copyright (c) 2011 Alexander Swen <a@swen.nu>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
#
# Example configuration
#
# Typical this check is placed on a client and run via nrpe
# so add this to nrpe.cfg:
# command[check_puppet_agent]=/usr/lib/nagios/plugins/check_puppet -w 3600 -c 7200
# This should warn when the agent hasnt run for an hour and go critical after two hours
# if you have dont_blame_nrpe=1 set you can choose to
# command[check_puppet_agent]=/usr/lib/nagios/plugins/check_puppet -w $ARG1$ -c $ARG2$
#
# define service {
# use generic-service
# service_description Puppet agent
# check_command check_nrpe!check_puppet_agent
# or
# check_command check_nrpe!check_puppet_agent!3600!7200
#}
# CHANGELOG:
# 20120126 A.Swen created.
# 20120214 trey85stang Modified, added getopts, usage, defaults
# 20120220 A.Swen Statefile can be overriden
# 20120909 S.Deziel Don't check if the agent daemon is running by default. To enable this
# check, use "-a". This is useful when using cron instead of the daemon.
# Call puppet config to find the location of lastrunfile only
# if none is provided using "-s".
# 20150707 S.Deziel Fix agent daemon regex to match for newer ruby versions. Fix lastrunfile
# query to work with recent versions of puppet.
# 20170922 J.Grammenos Fix typo
# 20181210 S.Deziel Update PATH and handle new lastrunfile format
# Explicitly set the PATH to that of ENV_SUPATH in /etc/login.defs and unset
# various other variables. For details, see:
# https://wiki.ubuntu.com/SecurityTeam/AppArmorPolicyReview#Execute_rules
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/puppetlabs/puppet/bin
export ENV=
export BASH_ENV=
export CDPATH=
export GLOBIGNORE=
export BASH_XTRACEFD=
# SETTINGS
AGENT_DAEMON=false
CRIT=7200
WARN=3600
statefile=""
perf_data=""
ENABLE_PERF_DATA=false
# FUNCTIONS
result () {
case $1 in
0) echo "OK: Puppet agent ${version} running catalogversion ${config}|${perf_data}";rc=0 ;;
1) echo "UNKNOWN: last_run_summary.yaml not found, not readable or incomplete|${perf_data}";rc=3 ;;
2) echo "WARNING: Last run was ${time_since_last} seconds ago. warn is ${WARN}|${perf_data}";rc=1 ;;
3) echo "CRITICAL: Last run was ${time_since_last} seconds ago. crit is ${CRIT}|${perf_data}";rc=2 ;;
4) echo "CRITICAL: Puppet daemon not running|${perf_data}";rc=2 ;;
5) echo "UNKNOWN: no WARN or CRIT parameters were sent to this check|${perf_data}";rc=3 ;;
6) echo "CRITICAL: Last run had 1 or more errors. Check the logs|${perf_data}";rc=2 ;;
esac
exit $rc
}
usage () {
echo ""
echo "USAGE: "
echo " $0 [-w 3600] [-c 7200] [-s statefile]"
echo " -w warning threshold (default 3600 seconds)"
echo " -c critical threshold (default 7200 seconds)"
echo " -s statefile (default: /var/lib/puppet/state/last_run_summary.yaml)"
echo " -p output performance data"
echo ""
exit 1
}
yaml_parser () {
section="$1"
field="$2"
sed -n "/^ *${section}/,/^ *${field}/ s/^ \+${field}: \(.*\)$/\1/p" "${statefile}"
}
while getopts "ac:s:w:p" opt
do
case $opt in
a) AGENT_DAEMON=true ;;
c)
if ! echo $OPTARG | grep -q "[A-Za-z]" && [ -n "$OPTARG" ]
then
CRIT=$OPTARG
else
usage
fi
;;
p) ENABLE_PERF_DATA=true ;;
s) statefile=${OPTARG} ;;
w)
if ! echo $OPTARG | grep -q "[A-Za-z]" && [ -n "$OPTARG" ]
then
WARN=$OPTARG
else
usage
fi
;;
*)
usage
;;
esac
done
# find the state file if none provided
statefile="${statefile:="$(puppet config print lastrunfile)"}"
# check if state file exists
[ -s "${statefile}" -a -r "${statefile}" ] || result 1
if [ "$AGENT_DAEMON" = "true" ]; then
# check puppet daemon:
# I only know the cmd lines for Debian and CentOS/RedHat:
[ "$(ps axf|egrep "/usr/bin/ruby /usr/sbin/puppetd|/usr/bin/ruby.* /usr/bin/puppet agent")" ] || result 4
fi
# get some more info from the yaml file
config="$(yaml_parser 'version' 'config')"
version="$(yaml_parser 'version' 'puppet')"
failed="$(yaml_parser 'resources' 'failed')"
failed_to_restart="$(yaml_parser 'resources' 'failed_to_restart')"
failure="$(yaml_parser 'events' 'failure')"
if [ "$ENABLE_PERF_DATA" = "true" ]; then
# extract some perf data
time_total="$(yaml_parser 'time' 'total')"
time_config_retrieval="$(yaml_parser 'time' 'config_retrieval')"
changes="$(yaml_parser 'changes' 'total')"
if [ -n "${time_total}" -a -n "${time_config_retrieval}" -a -n "${changes}" ]; then
perf_data="time=${time_total} config_retrieval=${time_config_retrieval} changes=${changes}"
fi
fi
[ -z "${config}" -o -z "${version}" -o -z "${failed}" -o -z "${failure}" -o -z "${failed_to_restart}" ] && result 1
[ "${failed}" -gt 0 -o "${failure}" -gt 0 -o "${failed_to_restart}" -gt 0 ] && result 6
# check when last run happened
last_run="$(yaml_parser 'time' 'last_run')"
[ -z "${last_run}" ] && result 1
# Attempt to get the uptime in seconds
if [ -r /proc/uptime ]; then
read -r uptime leftover < /proc/uptime
uptime="${uptime%.*}"
else
uptime=0
fi
# If the uptime is greater than the critical threshold
# check if the last run is too old. This ensure that a box
# that was offline for an extended period won't warn until
# it had a chance to run the puppet agent at least once.
# If the uptime is NOT available, run check and potentially
# warn uselessly.
if [ "${uptime}" -gt "${CRIT}" -o "${uptime}" -eq 0 ]; then
now="$(date +%s)"
time_since_last="$((now-last_run))"
[ "${time_since_last}" -ge "${CRIT}" ] && result 3
[ "${time_since_last}" -ge "${WARN}" ] && result 2
fi
# if we come here it works!
result 0
# END
| true
|
3eb8adbdd32c14c8510543a3c7e475463c1d4ea9
|
Shell
|
gear259/openvix
|
/se2-upgrade-s-only.sh
|
UTF-8
| 2,282
| 2.890625
| 3
|
[] |
no_license
|
!/usr/bin/sh
###################################### script version ##########################################
###################################### se2 satellite only upgrade.sh ##########################################
echo ""
echo ""
echo "This script will backup userfiles and download custom firmware to the latest custom version"
echo ""
echo "WAIT WHILE USER FILES ARE BACKED UP TO HARD DRIVE!...."
echo "......................................................"
wget -O /dev/null -q "http://localhost/web/message?text=WAIT WHILE USER FILES ARE BACKED UP TO HARD DRIVE&type=2&timeout=2"
cd /etc/ &&
cp -f -v inadyn.conf host* wpa_supplicant* fstab /media/hdd &&
cd /etc/enigma2/ &&
cp -f -v -r /etc/tuxbox/config /media/hdd &
cp -f -v autotimer.xml timers.xml userbouquet.favourites.tv /media/hdd &
cp -f -v /etc/network/interfaces /media/hdd &
rm -r /media/hdd/imagebackups/latest* &
#cd /etc/ &&
#cp -f -v inadyn.conf host* wpa_supplicant* fstab /media/hdd &&
#cd /etc/enigma2/ &&
#cp -f -v -r /tuxbox/config /media/hdd &&
#cp -f -v autotimer.xml timers.xml userbouquet.favourites.tv /media/hdd &&
#cp -f -v /etc/network/interfaces /media/hdd &&
#rm -r /media/hdd/imagebackups/latest* &&
echo "......................................................."
echo "USER FILES SAVED FOR RESTORE AFTER UPDATE"
echo "Showing a list of backed up files...."
ls -a /media/hdd/
sleep .5
echo "Now preparing to download new custom firware...."
echo "......................................................."
echo "......................................................."
echo "DEPENDING ON INTERNET SPEED, THIS CAN A WHILE..................." &&
cd /media/hdd/imagebackups
echo "Downloading ...................................................." &&
wget --output-document=latest-se2-sky-only-5.2.034.zip https://www.dropbox.com/s/6luat2e9wuxg0ve/se-skyonly.zip?dl=0 &&
echo "======================================================="
echo "======================================================="
echo "NOTE: File saved in '/hdd/imagebackups' go and flash it!"
echo ""
echo "Once flashed and rebooted, go to MENU/VIX/SCRIPTS/RESTORE"
echo ""
echo "Go to /Vix/IMAGE MANAGER, select 'Latest' and FLASH"
echo "NOW your ready to press EXIT TWICE to get to Image Manager"
| true
|
6e8366851b78df84f229080b8a1cc75338228fcb
|
Shell
|
2000dB/dotfiles
|
/bashrc
|
UTF-8
| 3,885
| 3.328125
| 3
|
[] |
no_license
|
#! /bin/bash
export HISTIGNORE="&:ls:la:clear:exit:c:[*"
######## COLORS ########
export LSCOLORS="DxGxBxDxCxEgEdxbxgxcxd"
export LS_COLORS="di=1;33"
cyan='\033[0;36m\'
cyan='\033[0;36m'
purp='\033[0;35m'
WHITE='\033[1;37m'
YELLOW='\033[1;33m'
NC='\033[0m'
######## PROMPT ########
function parse_git_dirty() {
if [ -d .git ]; then
[[ $(git status 2> /dev/null | tail -n1) != "nothing to commit (working directory clean)" ]] && echo "*"
fi
}
function parse_git_branch() {
if [ -d .git ]; then
echo -e $purp"["$(git branch --no-color 2> /dev/null | sed -e '/^[^*]/d' -e "s/* \(.*\)/\1/")$YELLOW$(parse_git_dirty)$purp"]"
else
echo ""
fi
}
# LINUX - should be beaglebone specific
if [[ "$OSTYPE" == "linux"* ]]; then
export SLOTS=/sys/devices/bone_capemgr.9/slots
export PINS=/sys/kernel/debug/pinctrl/44e10800.pinmux/pins
export DEBUG_PINS=/sys/kernel/debug/gpio
export LD_LIBRARY_PATH=/usr/local/lib:/usr/xenomai/lib
export EDITOR=nano
export PS1="$cyan[$WHITE\H@$cyan\W]$WHITE\$(parse_git_branch)$WHITE> $NC"
alias ls="ls -hl --color"
alias la="ls -ahl --color"
# OSX
elif [[ "$OSTYPE" == "darwin"* ]]; then
export PATH=/usr/local/bin:/usr/local/share/python:/usr/local/lib/python2.7/site-packages:/usr/bin:/bin:/usr/sbin:/sbin:$PATH
# CrossPack
if [ -d /usr/local/CrossPack-AVR ]; then
export PATH=/usr/local/CrossPack-AVR/bin:$PATH
export MANPATH=/usr/local/CrossPack-AVR/man:$MANPATH
fi
# ARM
if [ -d /usr/local/arm/gcc-arm-none-eabi ]; then
export PATH=/usr/local/arm/gcc-arm-none-eabi/bin:$PATH
fi
# Brew
export PATH=/usr/local/bin:$PATH
if [ 'command -v brew' ]; then
export PATH=/usr/local/share/python:/usr/local/sbin:$PATH
if [ -f `brew --prefix`/etc/bash_completion ]; then
. `brew --prefix`/etc/bash_completion
fi
fi
# go
export GOPATH=$HOME/go
export GOROOT="/usr/local/opt/go/libexec"
export PATH=$PATH:$GOPATH/bin
export PATH=$PATH:$GOROOT/bin
# Python
export PYTHONPATH="/usr/local/lib/python2.7/site-packages:$PYTHONPATH"
export WORKON_HOME="~/.virtualenvs"
source /usr/local/bin/virtualenvwrapper.sh
export EDITOR="emacsclient -na vi" # Use emacsclient, fallback on vi if not available
PS1="$cyan[\W]$WHITE\$(parse_git_branch)$WHITE> $NC"
alias ls="ls -hlG"
alias la="ls -ahlG"
fi
######## PROMPT ########
# case "$TERM" in
# "dumb")
# PS1="> "
# ;;
# xterm*|rxvt*|eterm*|screen*)
# ;;
# linux*)
# ;;
# *)
# PS1="> "
# ;;
# esac
######## ALIASES ########
# Default editor - emacsclient, fallback on vi if not available. XXX: fix this, doesn't work as git editor.
alias ..="cd .."
alias ~="cd ~"
alias grep="grep --color"
alias c="clear"
alias ip="curl http://ip.appspot.com"
alias e="emacs -nw"
alias emacs="emacs -nw"
alias ec="emacsclient -n"
alias sch="gschem"
alias uva="cd ~/UVA/"
alias 201="ssh root@20.0.0.201"
alias 202="ssh root@20.0.0.202"
alias 203="ssh root@20.0.0.203"
# ssh wrapper that rename current tmux window to the hostname of the
# remote host.
settitle() {
printf "\033k$1\033\\"
}
ssh() {
name=$(echo $* | cut -d'@' -f2 | cut -d'.' -f1)
settitle $name
command ssh "$@"
settitle "bash"
}
create_repo()
{
service=$1
repo_name=$2
is_private=$3
username=""
pass=""
if [ "$service" == "--help" ]; then
echo "create_repo service repo_name private[true/false]"
exit 1
fi
# deal with password
if [ "$service" == "github" ]; then
curl -s -u "2000db" https://api.github.com/user/repos -d '{"name":"'$repo_name'"}'
elif [ "$service" == "bitbucket" ]; then
curl -s -u "vincent-uva" https://api.bitbucket.org/1.0/repositories/ --data name="$repo_name" --data is_private="$is_private"
fi
}
| true
|
1798da02a55f8ac2a55121b299818b24d5c751e4
|
Shell
|
TelegramBots/Telegram.Bot.Examples
|
/Serverless/Telegram.Bot.Examples.AwsLambda.WebHook/2-deploy.sh
|
UTF-8
| 478
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -eo pipefail
ARTIFACT_BUCKET=$(cat bucket-name.txt)
# comment out below 3 lines if you are using aws cli docker image
# it does not have .net, just launch this command from lambda-bot folder
cd lambda-bot
dotnet lambda package
cd ../
aws cloudformation package --template-file template.yml --s3-bucket $ARTIFACT_BUCKET --output-template-file out.yml
aws cloudformation deploy --template-file out.yml --stack-name lambda-bot --capabilities CAPABILITY_NAMED_IAM
| true
|
0176aa14fc11f19686c1b9fffb4d85526aec6cf7
|
Shell
|
okode/spring-boot-ionic
|
/release.sh
|
UTF-8
| 624
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ $# -ne 2 ]; then
echo "Syntax: release [CURRENT_VERSION] [NEXT_VERSION]"
exit 1
fi
CURRENT=$1
NEXT=$2
# Create release
git flow release start $CURRENT || exit 1
GIT_MERGE_AUTOEDIT=no git flow release finish -m $CURRENT $CURRENT
# Publish release
git push origin HEAD --tags
# Merge release into develop
git checkout develop
git merge master
# Bump version
sed -i '' "s/version=\"$CURRENT\"/version=\"$NEXT\"/" frontend/config.xml
sed -i '' "s/version = '$CURRENT'/version = '$NEXT'/" backend/build.gradle
# Update develop with new bumped version
git commit -a -m"Bumped version ($NEXT)"
git push
| true
|
343110232e4d6556f0c13cbb56de9446a8caa686
|
Shell
|
sztomi/options_display_error
|
/repro.sh
|
UTF-8
| 625
| 2.96875
| 3
|
[] |
no_license
|
#! /usr/bin/bash
set -ex
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
pkgs=(ffmpeg fontconfig harfbuzz zlib)
export CONAN_USER_HOME=$DIR/conan_cache
export CONAN_USERNAME=_
export CONAN_CHANNEL=_
rm -rf $CONAN_USER_HOME
conan config set general.revisions_enabled=1
conan config set general.default_package_id_mode=recipe_revision_mode
for pkg in "${pkgs[@]}"; do
pushd $pkg
conan export .
popd
done
pushd variant
conan export .
conan graph lock .
popd
for pkg in "${pkgs[@]}"; do
pushd $pkg
conan create --build missing -tf=None -ne --lockfile ../variant/conan.lock .
popd
done
| true
|
b8c95684f2e888a2949726332a07b0d46e537473
|
Shell
|
Empty0ne/blackarch
|
/scripts/get-real
|
UTF-8
| 269
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
cd "$(dirname "$0")/.."
site=blackarch.real
dir=pub/blackarch
real_local=real-repo
real_local_bak=real-repo.bak
sitedir=/var/www/blackarchlinux.org/pub/blackarch
rsync -avz --delete-after \
"${site:-safety}:${sitedir:-safety}/" "${real_local:-safety}"
| true
|
5eaed0d0b8379f47c01f77d3c40e4f8cfce5e77d
|
Shell
|
peebles/dynamodb-import
|
/launch-kafka.sh
|
UTF-8
| 667
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/sh
TOPIC="$1"
if [ -z "$TOPIC" ]; then
echo "Must supply the DDB table name as a topic name for Kafka"
exit 1
fi
docker run -d --name zookeeper -p 2181:2181 jplock/zookeeper:3.4.6
MACHINE_IP=$(basename $DOCKER_HOST|awk -F: '{print $1}')
docker run -d --name kafka --link zookeeper:zookeeper --env KAFKA_ADVERTISED_HOST_NAME=$MACHINE_IP --publish 9092:9092 ches/kafka
echo "waiting 5 seconds for brokers to come up ..."
sleep 5
# JMX_PORT= to avoid a port conflict exception when using the running container.
docker exec -it kafka env JMX_PORT= kafka-topics.sh --create --topic $TOPIC --replication-factor 1 --partitions 10 --zookeeper zookeeper:2181
| true
|
bb5f253579b3c2f98ed6bcda222fc9920a49b5ff
|
Shell
|
level5/LaTeX
|
/programming/code/shell/abs-guide/chapter24/demonstration-recursion.sh
|
UTF-8
| 259
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
# Demonstration of recursion.
RECURSION=9
r_count=0
recuse ()
{
var="$1"
while [ "$var" -ge 0 ]
do
echo "Recursion count = "$r_count" +-+ \$var ="$var""
(( var-- )); (( r_count++ ))
recuse "$var"
done
}
recuse $RECURSION
exit $?
| true
|
f7a30e2de5f3cabf220e42dae5b77146cc079c8b
|
Shell
|
fnfly2005/public_work
|
/base_code/sh/modify_gitrepo.sh
|
UTF-8
| 152
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
#批量修改git仓库提交人
for i in `seq 1 $1`
do
git commit --amend --author "fnfly2005 <fnfly2005@aliyun.com>"
git rebase --continue
done
| true
|
6afa54bf2f12797807c9fad71c6d3ae81f402d46
|
Shell
|
Cloudxtreme/dbl-service
|
/dev-scripts/install-soci.sh
|
UTF-8
| 539
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# NOTE: You need sqlite development files, e.g. on ubuntu:
# libsqlite3-dev
if test ! -d $VIRTUAL_ENV; then
echo "Need VIRTUAL_ENV env"
exit 1
fi
ME=$(readlink -f $0)
MY_DIR=$(dirname $ME)
EXTERNDIR=$(readlink -f "$MY_DIR/../extern")
BUILD_DIR="$ROOTDIR/tmp/soci-build"
mkdir -p $BUILD_DIR
cd $BUILD_DIR
cmake -DCMAKE_INSTALL_PREFIX=$VIRTUAL_ENV -G "Unix Makefiles" \
-DWITH_BOOST=ON \
-DWITH_SQLITE3=ON \
-DSOCI_CXX_C11=ON \
$EXTERNDIR/repos/soci
make -j4
make install
make clean
rm -rf $BUILD_DIR
| true
|
44d955642f0559bf73d2431f152ddf8cc33080b3
|
Shell
|
jbalogh/twttr
|
/bootstrap/app-setup.sh
|
UTF-8
| 790
| 2.53125
| 3
|
[] |
no_license
|
yes | sudo yum install -y git mysql{,-devel,-server} gcc screen nginx zsh make
yes | sudo yum groupinstall "Development Libraries"
sudo mv nginx.conf /etc/nginx/
sudo service nginx start
sudo easy_install pip virtualenv{,wrapper}
mkdir ~/.virtualenvs
sudo chsh ec2-user -s /bin/zsh
git clone git://github.com/jbalogh/dotfiles.git .dotfiles
cd .dotfiles
for f in * .*; do
ln -s ~/.dotfiles/$f ~/$f
done
cd ~
wget ftp://ftp.joedog.org/pub/siege/siege-latest.tar.gz
tar xf siege-latest.tar.gz
cd siege-2.70
./configure
make
sudo make install
mkdir ~/dev/
cd ~/dev
git clone git://github.com/jbalogh/twttr.git camelot
source `which virtualenvwrapper.sh`
mkvirtualenv camelot
cd camelot
cp ~/settings_local.py .
pip install -r reqs.txt
./manage.py run_gunicorn -pgunicorn.pid -w8 --daemon
| true
|
5f87e78413e6a755287b788d8b269c48a0890c97
|
Shell
|
busraerkoc/Basic-Bash-Exercises
|
/shell-scripting/exercise6.sh
|
UTF-8
| 247
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
read -p "Please enter the name of the file or directory:" FILE
if [ -f FILE ]
then
echo "${FILE} is a regular file."
elif [ -d FILE ]
then
echo "${FILE} is a directory."
else
echo "${FILE} is other type of file."
fi
ls -l $FILE
| true
|
8b0709d7a38f36329631b3679881342760301835
|
Shell
|
stevenschobert/dotfiles
|
/setup.sh
|
UTF-8
| 11,402
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
ASDF_DIR="$HOME/.asdf"
OMZSH_DIR="$HOME/.oh-my-zsh"
TPM_DIR="$HOME/.tmux/plugins/tpm"
VUNDLE_DIR="$HOME/.vim/bundle/Vundle.vim"
RUBY_VERSION="3.0.2"
NODE_VERSION="18.17.0"
DENO_VERSION="1.35.1"
JAVA_VERSION="adoptopenjdk-jre-8.0.252+9.1"
KOTLIN_VERSION="1.6.0"
PYTHON_VERSION="3.9.9"
GROOVY_VERSION="apache-groovy-binary-3.0.4"
GOLANG_VERSION="1.20.6"
ERLANG_VERSION="24.1.7"
HAXE_VERSION="4.2.4"
NEKO_VERSION="2.3.0"
RUST_VERSION="1.64.0"
PHP_VERSION="8.1.12"
PLATFORMSTR="$(uname -s)"
# Repo setup
if !(cat "$HOME/.git/info/exclude" | grep -q \*$ 2>/dev/null); then
echo "[setup] Excluding all files from git"
echo "*" >> "$HOME/.git/info/exclude"
fi
# Mac-specific setup
if [[ "$PLATFORMSTR" == "Darwin" ]]; then
echo "[setup] Detected macOS installation, running optional mac setup"
# System flags
echo "[setup] Setting OS X preferences at $HOME/.osx/set_system_flags.sh"
sh "$HOME/.osx/set_system_flags.sh"
# Homebrew
# http://brew.sh
if !(hash brew 2>/dev/null); then
echo "[setup] Installing homebrew..."
# ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
fi
# Homebrew bundle
# https://github.com/Homebrew/homebrew-bundle
if !(brew tap | grep -q homebrew/bundle 2>/dev/null); then
echo "[setup] Installing homebrew/bundle"
brew tap Homebrew/bundle
fi
# Homebrew deps
echo "[setup] Installing homebrew dependencies"
(cd "$HOME"; brew bundle)
fi
# Tmux plugin manager
# https://github.com/tmux-plugins/tmp
if command -v tmux>/dev/null; then
if !(test -d "$TPM_DIR" 2>/dev/null); then
echo "[setup] Installing tmux plugin manager"
git clone https://github.com/tmux-plugins/tpm.git "$TPM_DIR"
fi
fi
# Asdf version manager
# https://github.com/HashNuke/asdf
if !(test -d "$ASDF_DIR" 2>/dev/null); then
echo "[setup] Installing asdf"
git clone https://github.com/asdf-vm/asdf.git "$ASDF_DIR"
fi
# Temp add asdf to path
if !(hash asdf 2>/dev/null); then
export PATH="$ASDF_DIR/bin:$ASDF_DIR/shims:$PATH"
fi
# Ruby version manager plugin
if !(asdf plugin-list | grep -q ruby 2>/dev/null); then
echo "[setup] Installing asdf plugin for ruby"
asdf plugin-add ruby https://github.com/asdf-vm/asdf-ruby.git
fi
# Nodejs version manager plugin
if !(asdf plugin-list | grep -q nodejs 2>/dev/null); then
echo "[setup] Installing asdf plugin for nodejs"
asdf plugin-add nodejs https://github.com/asdf-vm/asdf-nodejs.git
# bash "$ASDF_DIR/plugins/nodejs/bin/import-release-team-keyring"
fi
# Deno version manager plugin
if !(asdf plugin-list | grep -q deno 2>/dev/null); then
echo "[setup] Installing asdf plugin for deno"
asdf plugin-add deno https://github.com/asdf-community/asdf-deno.git
fi
# Java version manager plugin
if !(asdf plugin-list | grep -q java 2>/dev/null); then
echo "[setup] Installing asdf plugin for java"
asdf plugin-add java https://github.com/halcyon/asdf-java.git
fi
# Kotlin version manager plugin
if !(asdf plugin-list | grep -q kotlin 2>/dev/null); then
echo "[setup] Installing asdf plugin for kotlin"
asdf plugin-add kotlin https://github.com/missingcharacter/asdf-kotlin.git
fi
# Python version manager plugin
if !(asdf plugin-list | grep -q python 2>/dev/null); then
echo "[setup] Installing asdf plugin for python"
asdf plugin-add python https://github.com/danhper/asdf-python.git
fi
# Groovy version manager plugin
if !(asdf plugin-list | grep -q groovy 2>/dev/null); then
echo "[setup] Installing asdf plugin for groovy"
asdf plugin-add groovy https://github.com/weibemoura/asdf-groovy.git
fi
# Golang version manager plugin
if !(asdf plugin-list | grep -q golang 2>/dev/null); then
echo "[setup] Installing asdf plugin for golang"
asdf plugin-add golang https://github.com/kennyp/asdf-golang.git
fi
# Erlang version manager plugin
if !(asdf plugin-list | grep -q erlang 2>/dev/null); then
echo "[setup] Installing asdf plugin for erlang"
asdf plugin-add erlang https://github.com/asdf-vm/asdf-erlang.git
fi
# Haxe version manager plugin
if !(asdf plugin-list | grep -q haxe 2>/dev/null); then
echo "[setup] Installing asdf plugin for haxe"
asdf plugin-add haxe https://github.com/asdf-community/asdf-haxe.git
fi
# Neko version manager plugin
if !(asdf plugin-list | grep -q neko 2>/dev/null); then
echo "[setup] Installing asdf plugin for neko"
asdf plugin-add neko https://github.com/asdf-community/asdf-neko.git
asdf haxe neko dylibs link # link neko dylibs
fi
# Rust version manager plugin
if !(asdf plugin-list | grep -q rust 2>/dev/null); then
echo "[setup] Installing asdf plugin for rust"
asdf plugin-add rust https://github.com/code-lever/asdf-rust.git
fi
# Rust version manager plugin
if !(asdf plugin-list | grep -q php 2>/dev/null); then
echo "[setup] Installing asdf plugin for php"
asdf plugin-add php https://github.com/asdf-community/asdf-php.git
fi
# Install ruby version
if !(asdf list ruby | grep -q "$RUBY_VERSION" 2>/dev/null); then
echo "[setup] Installing ruby $RUBY_VERSION"
asdf install ruby "$RUBY_VERSION"
fi
# Install nodejs version
if !(asdf list nodejs | grep -q "$NODE_VERSION" 2>/dev/null); then
echo "[setup] Installing nodejs $NODE_VERSION"
asdf install nodejs "$NODE_VERSION"
fi
# Install deno version
if !(asdf list deno | grep -q "$DENO_VERSION" 2>/dev/null); then
echo "[setup] Installing deno $DENO_VERSION"
asdf install deno "$DENO_VERSION"
fi
# Install java version
if !(asdf list java | grep -q "$JAVA_VERSION" 2>/dev/null); then
echo "[setup] Installing java $JAVA_VERSION"
asdf install java "$JAVA_VERSION"
fi
# Install kotlin version
if !(asdf list kotlin | grep -q "$KOTLIN_VERSION" 2>/dev/null); then
echo "[setup] Installing kotlin $KOTLIN_VERSION"
asdf install kotlin "$KOTLIN_VERSION"
fi
# Install python version
if !(asdf list python | grep -q "$PYTHON_VERSION" 2>/dev/null); then
echo "[setup] Installing python $PYTHON_VERSION"
asdf install python "$PYTHON_VERSION"
fi
# Install groovy version
if !(asdf list groovy | grep -q "$GROOVY_VERSION" 2>/dev/null); then
echo "[setup] Installing groovy $GROOVY_VERSION"
asdf install groovy "$GROOVY_VERSION"
fi
# Install golang version
if !(asdf list golang | grep -q "$GOLANG_VERSION" 2>/dev/null); then
echo "[setup] Installing golang $GOLANG_VERSION"
asdf install golang "$GOLANG_VERSION"
fi
# Install erlang version
if !(asdf list erlang | grep -q "$ERLANG_VERSION" 2>/dev/null); then
echo "[setup] Installing erlang $ERLANG_VERSION"
asdf install erlang "$ERLANG_VERSION"
fi
# Install haxe version
if !(asdf list haxe | grep -q "$HAXE_VERSION" 2>/dev/null); then
echo "[setup] Installing haxe $HAXE_VERSION"
asdf install haxe "$HAXE_VERSION"
fi
# Install neko version
if !(asdf list neko | grep -q "$NEKO_VERSION" 2>/dev/null); then
echo "[setup] Installing neko $NEKO_VERSION"
asdf install neko "$NEKO_VERSION"
fi
# Install rust version
if !(asdf list rust | grep -q "$RUST_VERSION" 2>/dev/null); then
echo "[setup] Installing rust $RUST_VERSION"
asdf install rust "$RUST_VERSION"
fi
# Install rust version
if !(asdf list php | grep -q "$PHP_VERSION" 2>/dev/null); then
echo "[setup] Installing php $PHP_VERSION"
LDFLAGS="-L/usr/local/opt/bison/lib" PATH="/usr/local/opt/bison/bin:$PATH" asdf install php "$PHP_VERSION"
fi
# Set .tool-versions
if !(cat "$HOME/.tool-versions" | grep -q "ruby $RUBY_VERSION" 2>/dev/null); then
echo "[setup] Setting ruby $RUBY_VERSION in $HOME/.tool-versions"
echo "ruby $RUBY_VERSION" >> "$HOME/.tool-versions"
fi
if !(cat "$HOME/.tool-versions" | grep -q "nodejs $NODE_VERSION" 2>/dev/null); then
echo "[setup] Setting nodejs $NODE_VERSION in $HOME/.tool-versions"
echo "nodejs $NODE_VERSION" >> "$HOME/.tool-versions"
fi
if !(cat "$HOME/.tool-versions" | grep -q "deno $DENO_VERSION" 2>/dev/null); then
echo "[setup] Setting deno $DENO_VERSION in $HOME/.tool-versions"
echo "deno $DENO_VERSION" >> "$HOME/.tool-versions"
fi
if !(cat "$HOME/.tool-versions" | grep -q "java $JAVA_VERSION" 2>/dev/null); then
echo "[setup] Setting java $JAVA_VERSION in $HOME/.tool-versions"
echo "java $JAVA_VERSION" >> "$HOME/.tool-versions"
fi
if !(cat "$HOME/.tool-versions" | grep -q "kotlin $KOTLIN_VERSION" 2>/dev/null); then
echo "[setup] Setting kotlin $KOTLIN_VERSION in $HOME/.tool-versions"
echo "kotlin $KOTLIN_VERSION" >> "$HOME/.tool-versions"
fi
if !(cat "$HOME/.tool-versions" | grep -q "python $PYTHON_VERSION" 2>/dev/null); then
echo "[setup] Setting python $PYTHON_VERSION in $HOME/.tool-versions"
echo "python $PYTHON_VERSION" >> "$HOME/.tool-versions"
fi
if !(cat "$HOME/.tool-versions" | grep -q "groovy $GROOVY_VERSION" 2>/dev/null); then
echo "[setup] Setting groovy $GROOVY_VERSION in $HOME/.tool-versions"
echo "groovy $GROOVY_VERSION" >> "$HOME/.tool-versions"
fi
if !(cat "$HOME/.tool-versions" | grep -q "golang $GOLANG_VERSION" 2>/dev/null); then
echo "[setup] Setting golang $GOLANG_VERSION in $HOME/.tool-versions"
echo "golang $GOLANG_VERSION" >> "$HOME/.tool-versions"
fi
if !(cat "$HOME/.tool-versions" | grep -q "erlang $ERLANG_VERSION" 2>/dev/null); then
echo "[setup] Setting erlang $ERLANG_VERSION in $HOME/.tool-versions"
echo "erlang $ERLANG_VERSION" >> "$HOME/.tool-versions"
fi
if !(cat "$HOME/.tool-versions" | grep -q "haxe $HAXE_VERSION" 2>/dev/null); then
echo "[setup] Setting haxe $HAXE_VERSION in $HOME/.tool-versions"
echo "haxe $HAXE_VERSION" >> "$HOME/.tool-versions"
fi
if !(cat "$HOME/.tool-versions" | grep -q "neko $NEKO_VERSION" 2>/dev/null); then
echo "[setup] Setting neko $NEKO_VERSION in $HOME/.tool-versions"
echo "neko $NEKO_VERSION" >> "$HOME/.tool-versions"
fi
if !(cat "$HOME/.tool-versions" | grep -q "rust $RUST_VERSION" 2>/dev/null); then
echo "[setup] Setting rust $RUST_VERSION in $HOME/.tool-versions"
echo "rust $RUST_VERSION" >> "$HOME/.tool-versions"
fi
if !(cat "$HOME/.tool-versions" | grep -q "php $PHP_VERSION" 2>/dev/null); then
echo "[setup] Setting php $PHP_VERSION in $HOME/.tool-versions"
echo "php $PHP_VERSION" >> "$HOME/.tool-versions"
fi
# Ruby gems
# echo "[setup] Installing global gems from $HOME/.ruby/install-global.sh"
# bash "$HOME/.ruby/install-global.sh"
# Nodejs packages
# echo "[setup] Installing global node packages from $HOME/.node/install-global.sh"
# bash "$HOME/.node/install-global.sh"
# Vundle
# https://github.com/VundleVim/Vundle.vim
if !(test -d "$VUNDLE_DIR" 2>/dev/null); then
echo "[setup] Installing vundle"
git clone https://github.com/VundleVim/Vundle.vim.git "$VUNDLE_DIR"
fi
# Oh-my-zsh
# https://github.com/robbyrussell/oh-my-zsh
# if !(test -d "$OMZSH_DIR" 2>/dev/null); then
# echo "[setup] Installing Oh My Zsh"
# export ZSH="$OMZSH_DIR"
# sh -c "$(curl -fsSL https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
# fi
# Set default shell
# if [ "$SHELL" != "/bin/zsh" ]; then
# echo "[setup] setting default shell to zsh"
# chsh -s /bin/zsh
# fi
# Set default shell in macOS
if [[ "$PLATFORMSTR" == "Darwin" ]]; then
if [ "$SHELL" != "/usr/local/bin/fish" ]; then
echo "[setup] setting default shell to fish"
echo /usr/local/bin/fish | sudo tee -a /etc/shells
chsh -s /usr/local/bin/fish
fi
fi
echo "[setup] All done, have at it!"
exit 0;
| true
|
23b97eae143ca94be531de454ac1cceb42e9c08b
|
Shell
|
takov/docker
|
/hadoop-dfs/resources/start_hadoop.sh
|
UTF-8
| 294
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
#TODO Enhance the script
#Start ssh service
sudo -S service ssh start && \
#Run only in the name node
if [ $1 == true ]
then
echo "Y" | /home/hadoop/hadoop/bin/hdfs namenode -format
/home/hadoop/hadoop/sbin/start-dfs.sh
fi
#Keep the containers running
tail -f /dev/null
| true
|
6ff1455bd3d6ac9082c379c655290359c3e2c79c
|
Shell
|
tjablin/ck-mlperf
|
/program/sylt/ck-compile.sh
|
UTF-8
| 405
| 2.703125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
cwd=$(pwd)
cd ${CK_ENV_BENCH_SYLT_TRACE_GENERATOR}
rm -f ./a.out
echo ""
echo "$CK_CXX $CK_COMPILER_FLAGS_OBLIGATORY $CK_FLAGS_DYNAMIC_BIN ${CK_FLAG_PREFIX_INCLUDE}./ demo.cc ${CK_FLAGS_OUTPUT}a.out"
$CK_CXX $CK_COMPILER_FLAGS_OBLIGATORY $CK_FLAGS_DYNAMIC_BIN ${CK_FLAG_PREFIX_INCLUDE}./ demo.cc ${CK_FLAGS_OUTPUT}a.out
er=$?; if [ $er != 0 ]; then exit $er; fi
echo ""
cp -f ./a.out $cwd
| true
|
931e249cc6f2abadb9fb9de88562a4efc7be6bff
|
Shell
|
csuzhangxc/dm-k8s
|
/examples/test-case-2/generate.sh
|
UTF-8
| 334
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
function assaemble_config() {
read -r -d '' line <<- EOF
-
source-id: "dm-worker-$1"
black-white-list: "instance"
mydumper-config-name: "global"
loader-config-name: "global"
syncer-config-name: "global"
EOF
printf ' %s\n\n' "$line"
}
for i in $(seq 100); do
assaemble_config $i
done
| true
|
0133291bb3ebe8d3f7e1e7b5d29e407b38c8bd34
|
Shell
|
UN-labs/shokku
|
/provisioners/terraform/modules/cluster/docker/vultr/scripts/boot-script.sh
|
UTF-8
| 459
| 3.640625
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
#!/bin/sh
# Enable private network if not previously enabled
if ! grep -Fxq "auto ens7" /etc/network/interfaces
then
# Fetch private ip
METADATA_ENDPOINT=http://169.254.169.254/v1/interfaces/1/ipv4/address
PRIVATE_IP=$(curl $METADATA_ENDPOINT)
# Append auto network configuration
cat <<EOT >> /etc/network/interfaces
auto ens7
iface ens7 inet static
address ${PRIVATE_IP}
netmask 255.255.0.0
mtu 1450
EOT
# Enable private network
ifup ens7
fi
| true
|
bba12526785534646b59396bcbfd9ac36f4b6863
|
Shell
|
haskell-crypto/cryptonite
|
/cbits/decaf/tools/generate.sh
|
UTF-8
| 4,746
| 3.5625
| 4
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# Usage: ./generate.sh /path/to/ed448goldilocks-code
#
# Generate all files from ed448goldilocks branch 'master'
# (available at <git://git.code.sf.net/p/ed448goldilocks/code>).
#
# Project is synced with upstream commit
# '807a7e67decbf8ccc10be862cdf9ae03653ffe70'.
#
# Notes about transformations applied:
#
# * only a subset of library files are used, cryptonite needing only x448
# and ed448. Some headers like point_255.h are still included but copied
# empty, as the definitions are not necessary. Only the simplest
# architectures arch_32 and arch_ref64 are used to get the best
# compatibility and generality over performance.
#
# * substitutions are performed in order to add a cryptonite_ prefix
# to all external symbols
#
# * code related to SHAKE is replaced by cryptonite code, referenced from
# a custom shake.h. As a consequence, portable_endian.h is not needed.
#
# * aligned(32) attributes used for stack alignment are replaced by
# aligned(16). This removes warnings on OpenBSD with GCC 4.2.1, and makes
# sure we get at least 16-byte alignment. 32-byte alignment is necessary
# only for AVX2 and arch_x86_64, which we don't have.
#
# * visibility("hidden") attributes are removed, as this is not supported
# on Windows/MinGW, and we have name mangling instead
#
# * function posix_memalign is defined in order to avoid a warning on
# Windows/MinGW. Hopefully it is not called. This definition is put
# inside portable_endian.h because this file is already included.
#
# * files decaf.c and decaf_tables.c are compiled to a single object file
# decaf_all.o to avoid link failure on OpenBSD with --strip-unneeded
# and old versions of binutils (see #186)
SRC_DIR="$1/src"
DEST_DIR="`dirname "$0"`"/..
ARCHITECTURES="arch_32 arch_ref64"
if [ ! -d "$SRC_DIR" ]; then
echo "$0: invalid source directory: $1" && exit 1
fi
convert() {
local FILE_NAME="`basename "$1"`"
local REPL
if [ "$FILE_NAME" = word.h ]; then
REPL='__attribute__((aligned(32)))'
else
REPL='__attribute__((aligned(16)))'
fi
sed <"$1" >"$2/$FILE_NAME" \
-e 's/ __attribute((visibility("hidden")))//g' \
-e 's/ __attribute__((visibility("hidden")))//g' \
-e 's/ __attribute__ ((visibility ("hidden")))//g' \
-e "s/__attribute__((aligned(32)))/$REPL/g" \
-e 's/decaf_/cryptonite_decaf_/g' \
-e 's/DECAF_/CRYPTONITE_DECAF_/g' \
-e 's/gf_/cryptonite_gf_/g' \
-e 's/keccakf/cryptonite_keccakf/g' \
-e 's/NO_CONTEXT_POINTS_HERE/CRYPTONITE_NO_CONTEXT_POINTS_HERE/g' \
-e 's/P25519_SQRT_MINUS_ONE/CRYPTONITE_P25519_SQRT_MINUS_ONE/g'
}
convert "$SRC_DIR"/utils.c "$DEST_DIR"
mkdir -p "$DEST_DIR"/include
convert "$SRC_DIR"/include/constant_time.h "$DEST_DIR"/include
convert "$SRC_DIR"/include/field.h "$DEST_DIR"/include
convert "$SRC_DIR"/include/word.h "$DEST_DIR"/include
for ARCH in $ARCHITECTURES; do
mkdir -p "$DEST_DIR"/include/$ARCH
convert "$SRC_DIR"/include/$ARCH/arch_intrinsics.h "$DEST_DIR"/include/$ARCH
done
mkdir -p "$DEST_DIR"/include/decaf
convert "$SRC_DIR"/GENERATED/include/decaf.h "$DEST_DIR"/include
convert "$SRC_DIR"/GENERATED/include/decaf/common.h "$DEST_DIR"/include/decaf
convert "$SRC_DIR"/GENERATED/include/decaf/ed448.h "$DEST_DIR"/include/decaf
convert "$SRC_DIR"/GENERATED/include/decaf/point_448.h "$DEST_DIR"/include/decaf
for CURVE in ed448goldilocks; do
mkdir -p "$DEST_DIR"/$CURVE
convert "$SRC_DIR"/GENERATED/c/$CURVE/decaf.c "$DEST_DIR"/$CURVE
convert "$SRC_DIR"/GENERATED/c/$CURVE/decaf_tables.c "$DEST_DIR"/$CURVE
convert "$SRC_DIR"/GENERATED/c/$CURVE/eddsa.c "$DEST_DIR"/$CURVE
convert "$SRC_DIR"/GENERATED/c/$CURVE/scalar.c "$DEST_DIR"/$CURVE
cat > "$DEST_DIR"/$CURVE/decaf_all.c <<EOF
/* Combined to avoid link failure on OpenBSD with --strip-unneeded, see #186 */
#include "decaf.c"
#include "decaf_tables.c"
EOF
done
for FIELD in p448; do
mkdir -p "$DEST_DIR"/$FIELD
convert "$SRC_DIR"/$FIELD/f_arithmetic.c "$DEST_DIR"/$FIELD
convert "$SRC_DIR"/GENERATED/c/$FIELD/f_generic.c "$DEST_DIR"/$FIELD
convert "$SRC_DIR"/GENERATED/c/$FIELD/f_field.h "$DEST_DIR"/$FIELD
for ARCH in $ARCHITECTURES; do
mkdir -p "$DEST_DIR"/$FIELD/$ARCH
convert "$SRC_DIR"/$FIELD/$ARCH/f_impl.h "$DEST_DIR"/$FIELD/$ARCH
convert "$SRC_DIR"/$FIELD/$ARCH/f_impl.c "$DEST_DIR"/$FIELD/$ARCH
done
done
for FILE in point_255.h sha512.h; do
cat > "$DEST_DIR"/include/decaf/$FILE <<EOF
/* Not needed if 448-only */
EOF
done
cat >"$DEST_DIR"/include/portable_endian.h <<EOF
/* portable_endian.h not used */
#if defined(__MINGW32__)
// does not exist on MinGW, but unused anyway
extern int posix_memalign(void **, size_t, size_t);
#endif
EOF
| true
|
8f7f65f2381f55c9a8938fbb3a3589b3404e2d23
|
Shell
|
Shubh9907/ShellScript_Programs
|
/Shell_Script/IfElsePrograms/Head_Tail.sh
|
UTF-8
| 84
| 2.515625
| 3
|
[] |
no_license
|
random=$((RANDOM%2))
if (($random==1))
then
echo "Heads"
else
echo "Tails"
fi
| true
|
196fcd31668d708757857b2a23a4c2a31a22881c
|
Shell
|
jaredquinn/ogbbsguides
|
/processnodelist.sh
|
UTF-8
| 900
| 3.40625
| 3
|
[] |
no_license
|
#
# This uses a modified version of nl2binkd v1.00 by Markus Reschke
# My modified version is available in this repository.
#
#
# Environment
# =====================================================================================
# Note: These variables make up my environment for using these scripts
#
# export SBBSBASE=/home/bbs
# export SBBSCTRL=${SBBSBASE}/ctrl
# export BINKD_CONFIG="${SBBSBASE}/etc/binkd.conf"
# export FIDOCONFIG="${SBBSBASE}/etc/husky.conf"
TEXTFILES=$( awk '/^include \/home\/bbs\/ftn\/nodelist/ { print $2 }' ${BINKD_CONFIG} )
E=$( pwd )
for i in ${TEXTFILES}
do
DIR=${i%%.txt}
LATEST=$( ls -tr ${DIR} | tail -1 )
if [ -f "${DIR}/${LATEST}" ]
then
if test "${DIR}/${LATEST}" -nt "${i}"
then
echo Newer nodelist exists in ${DIR}/${LATEST}... converting to ${i}
DOMAIN=$( basename $DIR )
~/bin/nl2binkd "${DIR}/${LATEST}" "${i}" $DOMAIN
fi
fi
done
cd ${E}
| true
|
7af1723236a420f75f1d77e06f51ea774a0bac65
|
Shell
|
zzmg/script
|
/checkPodStatu.sh
|
UTF-8
| 1,016
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
namespace=$1
pod_name=$2
container_name=$3
if kubectl get pods -n $namespace $pod_name
then
reason=`kubectl describe pods -n $namespace $pod_name | grep "Reason:" | cut -d : -f 2`
if [ $reason ];
then
if [ '$reason' = 'OOMKilled' ];
then
echo The reason of $pod_name restart is $reason.
elif [ '$reason' = 'Error' ];
then
echo The reason of $pod_name restart is code panic...
ip=`kubectl get pods -n $namespace -o wide | grep $pod_name | awk '{print $7}'`
echo $ip
server_name=`echo $2 | cut -d - -f 1`
echo $server_name
container_id=`ssh $ip sudo docker ps -a | grep $server_name | grep "Exited" | grep -v "p47-sidecar" | grep -v "nginx" | grep -v "pause" | awk '{print $1}'`
echo $container_id
log_path=/opt/docker/containers/$container_id*
echo $log_path
echo now...get in the mother node
ssh $ip
else
echo This reason $reason is not OOMKilled or panic Error ,please manual inspection.
fi
else
echo pods $pod_name is running
fi
else
echo pods $pod_name not found
fi
| true
|
a13393991e938c6f0e41e0822cadee53c46d0657
|
Shell
|
Funz/plugin-Telemac
|
/src/main/scripts/Telemac-singularity.sh
|
UTF-8
| 790
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
CAS=$1
if [ 1 -le `echo \`pwd -P\` | grep --color='auto' -P -n "[\x80-\xFF]" | wc -l` ]; then
echo "Telemac will not support non ISO char in path. Exiting.";
exit 1
fi
NCPU=`grep ^cpu\\\\scores /proc/cpuinfo | uniq | awk '{print $4}'`
## singularity run using docker image:
singularity run -B `echo $PWD`:/workdir -H /workdir docker://irsn/telemac-mascaret:latest telemac2d.py --ncsize=$NCPU $CAS &
## singularity run using singularity image (faster within SLURM): 1st, move docker image to singularity with `singularity build --sandbox telemac-mascaret docker://irsn/telemac-mascaret:latest`
#module load singularity
#singularity run -B `echo $PWD`:/workdir -H /workdir telemac-mascaret telemac2d.py --ncsize=$NCPU $CAS &
PID=$!
echo $PID >> PID
wait $PID
rm -f PID
| true
|
7f7c1676dadb9fc8986515d69a06394f9cd963be
|
Shell
|
DiegoDeDios/compilers-lecture
|
/labs/01/asm-analytics.sh
|
UTF-8
| 385
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
echo Hi, this is the output of the analysis:
echo You have the following kind of instructions:
awk -F '[[:space:]][[:space:]]+' '$2 !~ /%/ && $2 !~/x86/ {print $2}' $1 | awk NF | sort | uniq -c | awk '{print $2 " : Executed " $1 " times"}'
echo You have the following functions:
awk ' /<[a-zA-Z]+>:/ {print $0}' $1 | tr -d "<>:" | awk ' { print $2 " : Located at address " $1 } '
| true
|
d6fa63adce091e47b7e95369dab6156c57f0cff5
|
Shell
|
AndsuLucas/ShellScript-I
|
/useCases/Note/addNote.sh
|
UTF-8
| 1,812
| 3.84375
| 4
|
[] |
no_license
|
#! /usr/bin/env bash
##########################
# Add a note on database #
##########################
addNote() {
printf "\n Type your note:"
read
local CONTENT="${REPLY}"
if [ "${#CONTENT}" -eq 0 ]
then
printf "\n Empty text is not allowed."
exit 1
fi
handleTimeInput() {
if [ -z "$VALID_DATE" ]; then
unset VALID_DATE
printf "\n The date is not valid"
exit 1
fi
[[ $INITIAL_DATE =~ _ ]] && SEPARATOR=1
if [ ! -z "$SEPARATOR" ] && [ -z ${EXPLODED_DATE[1]} ];
then
unset SEPARATOR
printf "Have a separator but have'nt a hour"
exit 1
fi
if [ -z "$VALID_HOUR" ] && [ ! -z ${EXPLODED_DATE[1]} ]; then
unset VALID_HOUR
printf "\n The hour is not valid"
exit 1
fi
}
local INITIAL_DATE=""
handleDateParams() {
if [ "$1" = "--initial" ] || [ "$1" = "-i" ]
then
shift;
INITIAL_DATE=$1
read -a EXPLODED_DATE <<< $(explodeDateAndHour $1 "@")
validateDateAndHour ${EXPLODED_DATE[0]} ${EXPLODED_DATE[1]}
handleTimeInput
return 0
fi
INITIAL_DATE=$(getActualDate)
return 0
}
handleDateParams $@
local MUST_SAVE=""
while [ "$MUST_SAVE" != "Y" ] && [ "$MUST_SAVE" != "N" ];
do
printf "\nSave this note? -> ${INITIAL_DATE}: ${CONTENT} \n"
printf "\n(Y/N):"
read
MUST_SAVE="${REPLY^^}"
done
if [ "$MUST_SAVE" = "N" ]
then
printf "\nOperation abort!"
exit 1
fi
saveContent "${INITIAL_DATE};${CONTENT}"
}
| true
|
38ee136f8c8d90bf660cd52f150246d69b8c28cc
|
Shell
|
flxndn/bin
|
/csvviewer
|
UTF-8
| 915
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$1" == "" ]; then
cat <<DONE
PROGRAM DESCRIPTION
semicolon separated (csv) file viewer
USAGE
$0 FileName
AUTHOR
Peter Borkuti, 2003.01.19
DONE
exit 0
fi;
colnum=`tr -dc ';\n' < $1 | wc -L` # Number of columns
CELLPAD=2 # Extra spaces between cols
tabs[0]=0 # First tab pos for calculation
i=1
while [ $i -le $colnum ]; do
colwidth=`grep ';' $1 |\
sed -e 's/[^;]*;\+$//' |\
cut -d';' -f"$i" |\
wc -L`
prevtab=${tabs[$(( $i - 1 ))]}
tabs[$i]=$(( $colwidth + $prevtab + $CELLPAD ))
i=$(( $i + 1 ))
done
# Width of a column\
# Previous tab position\
# Extra spaces between columns
tabs[0]='' # We don't need the 0 tab pos
tablist=`echo ${tabs[@]}| tr ' ' ','` # Make a comma separated
# tab position list
# expand expands only tab characters, so we have to change csv delimiters
# to tab chars
tr ';' '\t' < $1| expand --tabs=$tablist
| true
|
658128d96e3422f586cddbefb931a8bed0464503
|
Shell
|
flazzarini/munin-apcups
|
/apcups_apcaccess_temp.sh
|
UTF-8
| 452
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
APCACCESS='/sbin/apcaccess'
function GetValue {
localoutput=`$APCACCESS | grep ITEMP | sed 's/.*: \(.*\) C$/\1/' | tr -d ' '`
echo $localoutput
}
if [[ $1 == config ]]; then
echo "graph_title APC UPS Temperature"
echo "graph_vlabel Degrees Celcius"
echo "graph_category power"
echo "temperature.label Temperature of APC"
echo "graph_info Temperature of APC"
exit 0
fi
echo "temperature.value `GetValue`"
| true
|
95cfc9b95157405407035271e324457d61f7d345
|
Shell
|
xiaoronglv/Practice-Cplusplus
|
/2018-ACM-SIGMOD-Contest/4-papercup/run.sh
|
UTF-8
| 343
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
if [ "$1" == "-g" ]; then
gdb ${DIR}/build/release/Driver
elif [ "$1" == "-vc" ]; then
valgrind --tool=cachegrind ${DIR}/build/release/Driver
elif [ "$1" == "-vm" ]; then
valgrind --leak-check=full -v ${DIR}/build/release/Driver
else
${DIR}/build/release/Driver
fi
| true
|
393963db91cd50e036a0d92ee02b163629dfa1d6
|
Shell
|
koobonil/Boss2D
|
/Boss2D/addon/_old/webrtc-qt5.11.2_for_boss/modules/audio_coding/codecs/isac/fix/test/QA/runiSACfault.txt
|
UTF-8
| 1,015
| 3.03125
| 3
|
[
"MIT",
"LicenseRef-scancode-google-patent-license-webrtc",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"LicenseRef-scancode-takuya-ooura",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown",
"MS-LPL",
"LicenseRef-scancode-google-patent-license-webm"
] |
permissive
|
#!/bin/bash
(set -o igncr) 2>/dev/null && set -o igncr; # force bash to ignore \r character
LOGFILE=logfault.txt
echo "START FAULT TEST" > $LOGFILE
ISAC=../Release/kenny.exe
ISACFIXFLOAT=../Release/testFixFloat.exe
INFILES=$(cat InputFiles.txt)
SUBSET=$(cat InputFilesFew.txt)
CHANNELFILES=$(cat ChannelFiles.txt)
CHANNELLIST=($(cat ChannelFiles.txt))
INDIR=../data/orig
OUTDIR=../dataqaft
mkdir -p $OUTDIR
TARGETRATE=(10000 15000 20000 25000 30000 32000)
FAULTTEST=(1 2 3 4 5 6 7 9)
index1=0
file=wb_contspeech.pcm
# Fault test
for testnr in ${FAULTTEST[*]}
do
$ISAC 32000 -F $testnr $INDIR/"$file" $OUTDIR/ft$testnr"$file" >> $LOGFILE
done
# Fault test number 10, error in bitstream
$ISAC 32000 -F 10 $INDIR/"$file" $OUTDIR/ft10_"$file" >> $LOGFILE
$ISAC 32000 -F 10 -PL 10 $INDIR/"$file" $OUTDIR/ft10plc_"$file" >> $LOGFILE
$ISAC 32000 -F 10 -NB 1 $INDIR/"$file" $OUTDIR/ft10nb1_"$file" >> $LOGFILE
$ISAC 32000 -F 10 -NB 2 -PL 10 $INDIR/"$file" $OUTDIR/ft10nb2_"$file" >> $LOGFILE
echo DONE!
| true
|
97767b1b663a4e16a91ecc8bda601a56e7e238df
|
Shell
|
emagii/at91bootstrap
|
/ALLCONFIGS
|
UTF-8
| 334
| 3.046875
| 3
|
[
"LicenseRef-scancode-bsd-atmel"
] |
permissive
|
#!/bin/sh
# Support Ubuntu ARM C compiler
export CROSS_COMPILE=/usr/bin/arm-linux-gnueabi-
source ./scripts/mk-config-list.sh
reconfig ()
{
make $1_defconfig
make oldconfig
make CROSS_COMPILE=arm-linux- update
}
reconfig_all ()
{
for f in `cat $CONFIG_LIST` ; do
reconfig $f
done
}
reconfig_all
echo
echo
echo "### Done!"
| true
|
b30570cb5ea8485cb7848836cb5d7ee9fb92341c
|
Shell
|
fluffynuts/scripts
|
/mknoise
|
UTF-8
| 1,626
| 4.03125
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/zsh
len='8:00:00'
noisetype="brownnoise"
outputfile=""
lastarg=""
function usage {
echo "$(basename $0) {-l <len>} {-t <type} {-o <outputfile}"
echo " where <len> is a time spec in hh:mm:ss (eg 00:30:42)"
echo " and <type> is one of:"
echo " pink / pinknoise"
echo " white / whitenoise"
echo " brown / brownnoise"
echo " or one of: sine,square,triangle,sawtooth,trapezium,exp,tdfpnoise"
echo " default output file it <type>.mp3"
}
while test ! -z "$1"; do
case "$1" in
"-l"|"-t"|"-o")
lastarg="$1"
;;
*)
case "$lastarg" in
"-l")
len="$1"
lastarg=""
;;
"-t")
noisetype="$1"
case "$noisetype" in
"white"|"brown"|"pink")
noisetype="${noisetype}noise"
;;
"sine"|"square"|"triangle"|"sawtooth"|"trapezium"|"exp"|"whitenoise"|"tdpfnoise"|"pinknoise"|"brownnoise"|"pluck")
;;
*)
echo "Unrecognised noise type $noisetype; sox will most likely barf"
;;
esac
lastarg=""
;;
"-o")
outputfile="$1"
lastarg=""
;;
"-h"|"--help")
usage
exit 1
;;
*)
usage
exit 1
;;
esac
;;
esac
shift
done
if test -z "$outputfile"; then outputfile="${noisetype}.mp3"; fi
ext="$(echo $outputfile | awk -F . '{print $NF}' | tr '[A-Z]' '[a-z]')"
echo "generating $len of $noisetype noise into $outputfile"
if test "$ext" = "mp3"; then
sox -V1 -c 1 -r 16k -t sl - -t wav - synth $len ${noisetype} band -n 1200 200 tremolo 20 .1 < /dev/zero | lame --preset hifi -s 16 - $outputfile
else
sox -V1 -c 1 -r 16000 -t sl - $outputfile synth $len ${noisetype} band -n 1200 200 tremolo 20 .1 < /dev/zero
fi
| true
|
3299aa81416ea8c8b3543c5785f196b556000ae7
|
Shell
|
moteesh-in2tive/asdf-minio
|
/bin/install
|
UTF-8
| 1,048
| 4.15625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
set -o pipefail
install_minio() {
local version install_path bin_install_path binary_path download_url
version="${1}"
install_path="${2}"
bin_install_path="${install_path}/bin"
binary_path="${bin_install_path}/minio"
download_url=$(get_download_url "${version}")
echo "Creating bin directory"
mkdir -p "${bin_install_path}"
echo "Downloading minio from ${download_url}"
curl --fail -Lo "${binary_path}" "${download_url}"
chmod +x "${binary_path}"
}
get_kernel() {
uname | tr '[:upper:]' '[:lower:]'
}
get_arch() {
raw_arch="$(uname -m)"
case ${raw_arch} in
x86_64)
echo "amd64"
;;
armv7l|aarch64)
echo "arm"
;;
*)
echo "${raw_arch}"
;;
esac
}
get_download_url() {
local version kernel arch filename
version=$1
kernel="$(get_kernel)"
arch="$(get_arch)"
filename="minio.RELEASE.${version}"
echo "https://dl.min.io/server/minio/release/${kernel}-${arch}/archive/${filename}"
}
install_minio "${ASDF_INSTALL_VERSION}" "${ASDF_INSTALL_PATH}"
| true
|
473e8076cf83bb0b97371d09fa55569c529c18a9
|
Shell
|
RuchirDixit/CodinCLub-Day07-Assignment
|
/day07-Assignment/secondMinAndMax.sh
|
UTF-8
| 626
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
for (( i=0; i<10; i++ ))
do
Num[i]=$((100+RANDOM%900))
done
echo ${Num[*]}
max=${Num[0]}
min=${Num[0]}
for (( i=1; i<10; i++ ))
do
if [[ Num[$i] -gt $max ]]
then
max=${Num[$i]}
fi
if [[ Num[$i] -lt $min ]]
then
min=${Num[$i]}
fi
done
secondMax=${Num[0]}
secondMin=$max
for (( i=0; i<10; i++ ))
do
if [[ Num[$i+1] -lt $max && Num[$i+1] -gt $secondMax ]]
then
secondMax=${Num[$i+1]}
fi
if [[ Num[$i] -lt $secondMin && Num[$i] -gt $min ]]
then
secondMin=${Num[$i]}
fi
done
echo "Max:"$max
echo "2nd max:"$secondMax
echo "Min:"$min
echo "2nd min:"$secondMin
| true
|
8bf97c9d543d66c9aa7bda9ee33055db0b336418
|
Shell
|
TsutomuNakamura/dotfiles
|
/bin/open-with-google-chrome
|
UTF-8
| 430
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
main() {
[[ $# -eq 0 ]] && {
echo "ERROR: There are no arguments" >&2
return 1
}
command -v google-chrome && {
google-chrome "$@"
return $?
}
[[ -f /opt/google/chrome/google-chrome ]] || {
echo "ERROR: There are no command found for google-chrome" >&2
return 1
}
/opt/google/chrome/google-chrome "$@"
return $?
}
main "$@"
| true
|
bd4a64d554fff136d028a670e58db501b813dc7a
|
Shell
|
mfkiwl/gps-freq-counter
|
/fpga/quartus/full.sh
|
UTF-8
| 798
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/bash
set -e
COMMAND="$1"
PROJECT="gps-freq-counter"
function log()
{
echo -ne '\033[1;36m'
echo "[STATUS]: " "$@"
echo -ne '\033[0m'
}
cd $(dirname $0)
OUTDIR=output_files
SOFFILE=${OUTDIR}/${PROJECT}.sof
SVFFILE=${OUTDIR}/${PROJECT}.svf
if [ "$COMMAND" != "flashonly" ]; then
log "Building the firmware"
pushd ../../src/build
make
popd
log "Building Chisel files"
pushd ..
sbt run
popd
fi
if [ "$COMMAND" == "mifonly" ]; then
log "Rebuilding MIFs"
quartus_cdb --update_mif ${PROJECT}
quartus_asm ${PROJECT}
elif [ "$COMMAND" == "flashonly" ]; then
log "Not doing anything"
else
log "Compiling"
quartus_sh --flow compile ${PROJECT}
fi
log "Creating SVF"
quartus_cpf -c -q 12.0MHz -g 3.3V -n p ${SOFFILE} ${SVFFILE}
log "Flashing"
jtag ./flash.urjtag
rm $SVFFILE
| true
|
73198d0f232ab661907bef7017dedc4dc160037c
|
Shell
|
lsl/SlackBuilds
|
/tor_browser/tor_browser_bundle-2.2.38-2/set-tor-user
|
UTF-8
| 3,602
| 4.4375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# set-tor-user ( 20120903 )
#
# Configures Tor Browser Bundle to run as the specified user when ownership belongs to root.
# This script must be placed it TBB's root directory (with start-tor-browser)
# Anyone can freely modify and/or distribute this script without restrictions
#
# Written by Luke Williams ( xocel@iquidus.org )
function usage {
# Outputs usage
echo "Configures Tor Browser Bundle to be run by the specified user"
echo "Usage: `basename $0` [options] [username] "
echo " "
echo "Options:"
echo " -g, --group [groupname] , Sets group"
echo " default: \"users\""
echo " -r, --restore , Removes all files created by `basename $0`"
echo " from the system and restores TBB to its"
echo " orignal state."
echo " "
}
#Make sure script is being run as root.
if [ "`id -u`" -ne 0 ]; then
echo "This script needs to be run as root"
exit 1
fi
#Check if script is being run through a symlink.
MYNAME="$0"
if [ -L "$MYNAME" ]; then
MYNAME="`readlink -f "$MYNAME" 2>/dev/null`"
if [ "$?" -ne 0 ]; then
# Ugh.
echo "`basename $0` cannot be run using a symlink on this operating system."
fi
fi
#make sure we are in the right directory.
CWD="`dirname "$MYNAME"`"
test -d "$CWD" && cd "$CWD"
if [ $# -eq 0 ]; then
usage
exit 1
fi
GROUP="users"
ORIG="$CWD/Data.orig"
while [ 0 ]; do
if [ "$1" = "-g" -o "$1" = "--group" ]; then
if [ -z "$2" ]; then
GROUP=$2
shift 2
else
usage
exit 1
fi
elif [ "$1" = "-r" -o "$1" = "--restore" ]; then
echo "Removing TBB files from home directories"
rm -rfv /home/*/.TBB > /dev/null 2>&1
echo "Removing symlinks"
rm -rvf "$CWD/.config" > /dev/null 2>&1
rm -rvf "$CWD/.kde" > /dev/null 2>&1
rm -rvf "$CWD/.mozilla" > /dev/null 2>&1
rm -rvf "$CWD/.nv" > /dev/null 2>&1
echo "Restoring Data directory"
if [ -e $ORIG ]; then
rm -v "$CWD/Data" > /dev/null 2>&1
mv -v $ORIG "$CWD/Data" > /dev/null 2>&1
fi
echo "All operations have completed successfully"
exit 0
else
USERNAME=$1
break
fi
done
#Check if user exists
USER_EXISTS=$(grep -c ^$USERNAME: /etc/passwd)
if [ $USER_EXISTS -ne "1" ]; then
echo "Invalid username: $USERNAME"
exit 1
fi
#Check user belongs to group
IN_GROUP=0 #1 if user belongs to group, otherwise 0.
USER_GROUPS=$(echo $(groups $USERNAME) | tr " " "\n")
for g in $USER_GROUPS
do
if [ $g == $GROUP ]
then
IN_GROUP=1
fi
done
if [ $IN_GROUP -ne 1 ]; then
echo "$USERNAME does not belong to group: $GROUP"
exit 1
fi
#check to see if script has been run before.
#If not, rename Data dir.
if [ ! -d $ORIG ]; then
mv "$CWD/Data" $ORIG
fi
TBB="/home/$USERNAME/.TBB"
#check to see if .TBB exists in specified users home dir.
#If not, create ~/.TBB, copy Data dir into it and create conf dirs.
if [ ! -d $TBB ]; then
mkdir $TBB
cp -R $ORIG "$TBB/Data"
mkdir "$TBB/.config"
mkdir "$TBB/.kde"
mkdir "$TBB/.mozilla"
mkdir "$TBB/.nv"
chown -R $USERNAME.$GROUP $TBB
fi
#check if symlinks exist, if so remove them.
if [ -e "$CWD/Data" ]; then
rm "$CWD/Data"
rm "$CWD/.config"
rm "$CWD/.kde"
rm "$CWD/.mozilla"
rm "$CWD/.nv"
fi
#create new symlinks.
ln -s "$TBB/Data" ./
ln -s "$TBB/.kde" ./
ln -s "$TBB/.config" ./
ln -s "$TBB/.mozilla" ./
ln -s "$TBB/.nv" ./
#Configuration complete.
echo "Tor Browser Bundle is now configured to be run by $USERNAME"
| true
|
246c4e38102028cd73ad63c510573e65082b0675
|
Shell
|
sveinnfannar/advent-of-code
|
/2020/new-day.sh
|
UTF-8
| 358
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ -z $1 ]]; then
echo "usage: new-day.sh <day>"
exit 1
fi
if [[ -z $AOC_SESSION ]]; then
echo "AOC_SESSION environment var is unset"
exit 1
fi
set -xe
mkdir $1
cp template.py $1/$1_1.py
touch $1/$1_2.py
curl --cookie "session=$AOC_SESSION" https://adventofcode.com/2020/day/$1/input > $1/input.txt 2> /dev/null
head $1/input.txt
| true
|
6700b33fee7bdbe3c662deeafb4080960b2e248c
|
Shell
|
tatthien/dotfiles
|
/install
|
UTF-8
| 656
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
GITIGNORE_PATH=~/.config/.gitignore
if [[ ! -x "$(command -v brew)" ]]; then
echo "brew could not be found"
exit
fi
echo ">>> brew bundle install"
brew bundle install
echo ">>> creating symbolic links"
ln -sfv ~/dotfiles/nvim ~/.config
ln -sfv ~/dotfiles/alacritty ~/.config
ln -sfv ~/dotfiles/.tmux.conf ~/.tmux.conf
ln -sfv ~/dotfiles/tmuxline ~/.config/tmuxline
ln -sfv ~/dotfiles/starship.toml ~/.config
ln -sfv ~/dotfiles/config.fish ~/.config/fish
ln -sfv ~/dotfiles/gitignore ~/.config/.gitignore
echo ">>> setting up global .gitignore file: $GITIGNORE_PATH"
git config --global core.excludesfile $GITIGNORE_PATH
echo ">>> done"
| true
|
7975230c63414e04611de4bb93d179b60e945a73
|
Shell
|
scottellis/meta-jumpnow
|
/recipes-qt/qt4/files/qte.sh
|
UTF-8
| 395
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ -e /dev/input/touchscreen0 ]
then
QWS_MOUSE_PROTO=Tslib:/dev/input/touchscreen0
export QWS_MOUSE_PROTO
fi
QWS_SIZE=480x272
export QWS_SIZE
QWS_DISPLAY="linuxfb:mmHeight=53:mmWidth=95"
export QWS_DISPLAY
# For development convenience, can remove later for production
if [ -e /usr/share/qtopia/environment-setup ]
then
source /usr/share/qtopia/environment-setup
fi
| true
|
2dfb064322b4f750b2ebc2271336d1f62b206e45
|
Shell
|
pjotrsavitski/icoworker
|
/i18n/generate-mo.sh
|
UTF-8
| 970
| 2.640625
| 3
|
[] |
no_license
|
# Generate .mo file
#ENG
if [ -f en_GB.UTF-8/LC_MESSAGES/teke.mo ]
then
rm en_GB.UTF-8/LC_MESSAGES/teke.mo
fi
msgfmt -o en_GB.UTF-8/LC_MESSAGES/teke.mo en_GB.UTF-8/LC_MESSAGES/teke.po
echo "TeKe ENG Done"
#EST
if [ -f et_EE.UTF-8/LC_MESSAGES/teke.mo ]
then
rm et_EE.UTF-8/LC_MESSAGES/teke.mo
fi
msgfmt -o et_EE.UTF-8/LC_MESSAGES/teke.mo et_EE.UTF-8/LC_MESSAGES/teke.po
echo "TeKe EST Done"
#RUS
if [ -f ru_RU.UTF-8/LC_MESSAGES/teke.mo ]
then
rm ru_RU.UTF-8/LC_MESSAGES/teke.mo
fi
msgfmt -o ru_RU.UTF-8/LC_MESSAGES/teke.mo ru_RU.UTF-8/LC_MESSAGES/teke.po
echo "TeKe RUS Done"
#FIN
if [ -f fi_FI.UTF-8/LC_MESSAGES/teke.mo ]
then
rm fi_FI.UTF-8/LC_MESSAGES/teke.mo
fi
msgfmt -o fi_FI.UTF-8/LC_MESSAGES/teke.mo fi_FI.UTF-8/LC_MESSAGES/teke.po
echo "TeKe FIN Done"
#SWE
if [ -f sv_SE.UTF-8/LC_MESSAGES/teke.mo ]
then
rm sv_SE.UTF-8/LC_MESSAGES/teke.mo
fi
msgfmt -o sv_SE.UTF-8/LC_MESSAGES/teke.mo sv_SE.UTF-8/LC_MESSAGES/teke.po
echo "TeKe SWE Done"
| true
|
679164c595106653821b07f7a78591a0cd2e4475
|
Shell
|
DariHT8/EvidenciasPC
|
/Bash/E5/E5_Bash.sh
|
UTF-8
| 787
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
#llave = cfccfca8a55b497f9f3c5b22a9cd132a
#Integrantes
#Angela Montoya Aldape
#Dariela Hurtado Torres
#Ian Israel Leija Medina
function verificacion {
sleep 5
if [ "$invocacion" == "" ];
then
echo "Su correo no ha sido vulnerado :)"
else
servicios=$(echo $invocacion | grep -Po ':".*?"' | sed -e 's/^.//')
if [ "$servicios" == "" ];
then
echo "Ha insertado mal su llave"
exit
else
echo "Su correo ha sido vulnerado :("
echo "Servicios donde ocurrió dicha vulneración:"
echo $invocacion | grep -Po ':".*?"' | sed -e 's/^.//'
fi
fi
}
read -sp "Inserte la llave:" key
while read p;
do
invocacion=$(curl -s https://haveibeenpwned.com/api/v3/breachedaccount/"$p" -H 'hibp-api-key:'$key)
echo -e "\nVerificando el correo: "$p
verificacion
done < E5_Emails.txt
| true
|
eba311d046ccb6333c695638376d0bfdd1178d09
|
Shell
|
book000/server-backup
|
/FullBackup/rsync.sh
|
UTF-8
| 1,008
| 3.484375
| 3
|
[] |
no_license
|
#/bin/bash
SCRIPT_DIR=$(cd $(dirname $0); pwd)
usage_exit() {
echo "Usage: $0 [-h host] [-r port] [-u user] [-i identity] [-p passphrase] [-f from] [-o output]" 1>&2
exit 1
}
while getopts h:r:u:i:p:f:o: OPT
do
case $OPT in
h) HOSTNAME=$OPTARG
;;
r) PORT=$OPTARG
;;
u) USERNAME=$OPTARG
;;
i) IDENTITY=$OPTARG
;;
p) PASSPHRASE=$OPTARG
;;
f) FROM=$OPTARG
;;
o) OUTPUT=$OPTARG
;;
\?) usage_exit
;;
esac
done
LOGPATH="$SCRIPT_DIR/rsync.log"
TODAY=$(date +%Y%m%d)
SSHCMD="rsync -arhvz --progress --delete --backup --exclude-from='${SCRIPT_DIR}/ignores' -e 'ssh -p $PORT -i $IDENTITY' --rsync-path='sudo rsync' --backup-dir="${OUTPUT}$TODAY" $USERNAME@$HOSTNAME:$FROM ${OUTPUT}latest 2>&1 | tee $LOGPATH"
expect -c "
set timeout 30
spawn sh -c \"$SSHCMD\"
expect ":"
send \"$PASSPHRASE\n\"
interact
"
| true
|
27f951ec8c0680d47f2de25de8f75866126c7642
|
Shell
|
danielrowles-wf/cheapskate
|
/build.sh
|
UTF-8
| 1,129
| 3.53125
| 4
|
[] |
no_license
|
#! /bin/bash
set -e
FRUGAL=drydock.workiva.org/workiva/frugal:35054
PREFIX=github.com/danielrowles-wf/cheapskate/gen-go/
IMPORT="github.com/Workiva/frugal/lib/go"
TOPDIR=$PWD
echo "Building parsimony"
go get github.com/Workiva/parsimony
if [ -e ./gen-go ]; then
echo "Remove existing gen-go directory"
rm -Rf gen-go
fi
if [ -e ./stage ]; then
echo "Remove existing stage directory"
rm -Rf stage
fi
echo "Fetch all required IDL files"
$GOPATH/bin/parsimony --staging stage stingy.frugal
echo "Generate GO code"
docker run -u $UID -v "$(pwd):/data" $FRUGAL frugal --gen=go:package_prefix=$PREFIX -r stage/stingy.frugal
echo "Fix missing imports"
for bork in stingy/f_stingyservice_service.go workiva_frugal_api/f_baseservice_service.go; do
if ! grep $IMPORT ./gen-go/$bork; then
echo "Missing <$IMPORT> in ./gen-go/$bork - add manually" >&2
sed -i -e "s!import (!import (\n \"$IMPORT\"!" ./gen-go/$bork;
fi
done
for dir in cheapskate client; do
echo "Building <$dir>"
cd $TOPDIR/$dir
if [ -e $dir ]; then
rm $dir
fi
go build .
cd $TOPDIR
done
| true
|
e3cab1bdf19ba89560cf71ef94134513712126d3
|
Shell
|
gaboriaudj/dots
|
/.profile
|
UTF-8
| 1,117
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/sh
# __ _ _
# _ __ _ __ ___ / _(_) | ___
# | '_ \| '__/ _ \| |_| | |/ _ \
# _| |_) | | | (_) | _| | | __/
# (_) .__/|_| \___/|_| |_|_|\___|
# |_|
#
# Profile file. Runs on login.
#
# default programs/environmental variables
export PATH="$PATH:$HOME/.local/bin"
export EDITOR="vim"
export TERMINAL="urxvt"
export BROWSER="firefox"
export READER="zathura"
# Export XDG environmental variables from '~/.config/user-dirs.dirs'
eval "$(sed 's/^[^#].*/export &/g;t;d' ~/.config/user-dirs.dirs)"
# for VIM to read the moved directory
export VIMINIT="source ~/.config/vim/vimrc"
export TEXMFHOME="~/.config/texmf"
#export TEXMFVAR="~/.config/texlive/texmf-var"
#export TEXMFCONFIG="~/.config/texive/texmf-config"
# load shortcuts
##[ -f ~/.scripts/shortcuts.sh ] && ~/.scripts/shortcuts.sh
# source the .bashrc file
[ -f ~/.bashrc ] && source ~/.bashrc
# start graphical server on tty1 if i3 is not already running
[ "$(tty)"="/dev/tty1" ] && ! pgrep -x Xorg >/dev/null && exec startx
# switch Escape and Caps Lock in tty
sudo -n loadkeys ~/.local/bin/ttymaps.kmap 2>/dev/null
| true
|
5ab8fcde83a973f631b12afe53b29eb9d41b1fb3
|
Shell
|
AdamMcCarthyCompSci/OS-Assignment
|
/project-AdamMcCarthyCompSci-master/P.sh
|
UTF-8
| 278
| 3.390625
| 3
|
[] |
no_license
|
#! /bin/bash
# New P.sh
if [ -z "$1" ]; then
echo "Usage $0 mutex-name"
exit 1
else
# You can just use the P.sh script itself to link to
#We know this file will always exist (we're running it after all)
while ! ln "$0" "$1-lock" 2>/dev/null; do
sleep 1
done
exit 0
fi
| true
|
2d6deb1d942f9bcf1c1df711e51423429f7d8a5c
|
Shell
|
FancyChaos/debian-i3
|
/installations/install_applications.sh
|
UTF-8
| 1,752
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Installing non apt applications..."
sleep 1
### Installing custom applications from github
cd /tmp/
### Install Alacritty
wget https://github.com/alacritty/alacritty/releases/download/v0.4.3/Alacritty-v0.4.3-ubuntu_18_04_amd64.deb
sudo dpkg -i Alacritty*.deb
### Install bat
wget https://github.com/sharkdp/bat/releases/download/v0.15.4/bat_0.15.4_amd64.deb
sudo dpkg -i bat*.deb
### Install neovim
git clone https://github.com/neovim/neovim.git
cd neovim/
git checkout stable
make CMAKE_BUILD_TYPE=Release
sudo make install
cd /tmp/
### Install neovim plugins
curl -fLo $HOME/.local/share/nvim/site/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
pip3 install --user neovim
mkdir -p $HOME/.config/nvim/plugged/
cp $SCRIPTPATH/.config/nvim/init.vim $HOME/.config/nvim/
nvim -c PlugInstall -c UpdateRemotePlugins -c quitall
### Install rust
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
### Install Visual Code
wget -O code.deb "https://go.microsoft.com/fwlink/?LinkID=760868"
sudo dpkg -i code.deb
### Install Visual Code plugins
while read p; do
code --force --install-extension $p
done <$SCRIPTPATH/installations/code_extensions
### Install Spotify
sudo snap install spotify
### Install deadbeef with plugins
wget -O deadbeef.deb https://sourceforge.net/projects/deadbeef/files/travis/linux/1.8.4/deadbeef-static_1.8.4-1_amd64.deb
sudo dpkg -i deadbeef.deb
### Install newest firefox via flatpak
sudo apt install flatpak
sudo flatpak remote-add --if-not-exists flathub https://flathub.org/repo/flathub.flatpakrepo
flatpak install flathub org.mozilla.firefox
ln -s /var/lib/flatpak/exports/bin/org.mozilla.firefox /usr/local/bin/firefox
| true
|
942739bc8bfa7bedf438489c46e9db7c71e18b0b
|
Shell
|
baoquocnguyen/HAS
|
/Server_machine/Test/05_automation_tool/01_performance_QUIC_QUIC/server_bandwith_100mb_netem.sh
|
UTF-8
| 514
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
echo run initial script!
# add path for file
# initial environment
export GOPATH=$HOME/work
export PATH=$PATH:$GOPATH/bin
export GOROOT=/usr/local/go/
# kill process which hold port 6121
# netstat -tulnap
sudo kill -9 $(lsof -t -i:6121)
# setup network condition delay 100 miliseconds
sudo tc qdisc del dev eth0 root
sudo tc qdisc add dev eth0 root netem rate 100000kbit
# restart quic_go server
go run /home/quoc/work/src/github.com/lucas-clemente/quic-go/example/main.go -www /var/www/html/
| true
|
033290e24004028ceb1b08275160e0d4dceda3f0
|
Shell
|
tensorflow/text
|
/oss_scripts/pip_package/build_pip_package.sh
|
UTF-8
| 2,564
| 4.1875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Tool to build the TensorFlow Text pip package.
#
# Usage:
# bazel build oss_scripts/pip_package:build_pip_package
# bazel-bin/oss_scripts/build_pip_package
#
# Arguments:
# output_dir: An output directory. Defaults to `/tmp/tensorflow_text_pkg`.
set -e # fail and exit on any command erroring
die() {
echo >&2 "$@"
exit 1
}
osname="$(uname -s | tr 'A-Z' 'a-z')"
echo $osname
function is_windows() {
# On windows, the shell script is actually running in msys
[[ "${osname}" =~ msys_nt*|mingw*|cygwin*|uwin* ]]
}
function is_macos() {
[[ "${osname}" == "darwin" ]]
}
function is_nightly() {
[[ "$IS_NIGHTLY" == "nightly" ]]
}
function abspath() {
cd "$(dirname $1)"
echo "$PWD/$(basename $1)"
cd "$OLDPWD"
}
plat_name=""
if is_macos; then
if [[ x"$(arch)" == x"arm64" ]]; then
plat_name="--plat-name macosx_11_0_arm64"
else
plat_name="--plat-name macosx-10.9-x86_64"
fi
fi
main() {
local output_dir="$1"
if [[ -z "${output_dir}" ]]; then
output_dir="/tmp/tensorflow_text_pkg"
fi
mkdir -p ${output_dir}
output_dir=$(abspath "${output_dir}")
echo "=== Destination directory: ${output_dir}"
if [[ ! -d "bazel-bin/tensorflow_text" ]]; then
die "Could not find bazel-bin. Did you run from the root of the build tree?"
fi
local temp_dir="$(mktemp -d)"
trap "rm -rf ${temp_dir}" EXIT
echo "=== Using tmpdir ${temp_dir}"
if is_windows; then
runfiles="bazel-bin/oss_scripts/pip_package/build_pip_package.exe.runfiles"
else
runfiles="bazel-bin/oss_scripts/pip_package/build_pip_package.runfiles"
fi
cp -LR \
"${runfiles}/org_tensorflow_text/tensorflow_text" \
"${temp_dir}"
if is_nightly; then
cp "${runfiles}/org_tensorflow_text/oss_scripts/pip_package/setup.nightly.py" \
"${temp_dir}"
else
cp "${runfiles}/org_tensorflow_text/oss_scripts/pip_package/setup.py" \
"${temp_dir}"
fi
cp "${runfiles}/org_tensorflow_text/oss_scripts/pip_package/MANIFEST.in" \
"${temp_dir}"
cp "${runfiles}/org_tensorflow_text/oss_scripts/pip_package/LICENSE" \
"${temp_dir}"
pushd "${temp_dir}" > /dev/null
if (which python3) | grep -q "python3"; then
installed_python="python3"
elif (which python) | grep -q "python"; then
installed_python="python"
fi
# Build pip package
if is_nightly; then
$installed_python setup.nightly.py bdist_wheel --universal $plat_name
else
$installed_python setup.py bdist_wheel --universal $plat_name
fi
cp dist/*.whl "${output_dir}"
}
main "$@"
| true
|
195d6a2b51d4978cefbb0f2cd968a7e5f640c363
|
Shell
|
apache/incubator-pekko
|
/remote/src/test/resources/ssl/gencerts.sh
|
UTF-8
| 868
| 2.640625
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"Unlicense",
"CC0-1.0"
] |
permissive
|
#!/bin/bash
export PW=`cat password`
. gen-functions.sh
rm *.crt
rm *.p12
rm *.pem
./genca.sh
## some server certificates
createExampleECKeySet "one" "serverAuth" "DNS:one.example.com,DNS:example.com"
createExampleECKeySet "two" "serverAuth" "DNS:two.example.com,DNS:example.com"
createExampleECKeySet "island" "serverAuth" "DNS:island.example.com"
## a client certificate
createExampleECKeySet "client" "clientAuth" "DNS:client.example.com,DNS:example.com"
## node.example.com is part of the example.com dataset (in ./ssl/ folder) but not the artery-nodes
createExampleRSAKeySet "node" "serverAuth,clientAuth" "DNS:node.example.com,DNS:example.com"
createExampleRSAKeySet "rsa-client" "clientAuth" "DNS:rsa-client.example.com,DNS:example.com"
## a certificate valid for both server and client (peer-to-peer)
## with RSA keys
./gen-artery-nodes.example.com.sh
| true
|
778976bd14b495c4e327d8064ec8ec7a7450c1d3
|
Shell
|
jakirkham/rapids-compose
|
/etc/conda-merge.sh
|
UTF-8
| 2,748
| 2.796875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -Eeo pipefail
cd "$RAPIDS_HOME"
####
# Merge the rapids projects' envs into one rapids.yml environment file
####
cat << EOF > rapids.yml
name: rapids
channels:
- rapidsai
- nvidia
- rapidsai-nightly
- conda-forge
- defaults
dependencies:
- cmake>=3.17.0,<3.18
- cmake_setuptools
- python=${PYTHON_VERSION}
- pip:
- ptvsd
- pytest-xdist
EOF
CUDA_TOOLKIT_VERSION=${CONDA_CUDA_TOOLKIT_VERSION:-$CUDA_SHORT_VERSION};
cat "$RMM_HOME/conda/environments/rmm_dev_cuda10.0.yml" \
| sed -r "s/cudatoolkit=10.0/cudatoolkit=$CUDA_TOOLKIT_VERSION/g" \
| sed -r "s!rapidsai/label/cuda10.0!rapidsai/label/cuda$CUDA_TOOLKIT_VERSION!g" \
> rmm.yml
cat "$CUDF_HOME/conda/environments/cudf_dev_cuda10.2.yml" \
| sed -r "s/cudatoolkit=10.2/cudatoolkit=$CUDA_TOOLKIT_VERSION/g" \
| sed -r "s!rapidsai/label/cuda10.2!rapidsai/label/cuda$CUDA_TOOLKIT_VERSION!g" \
> cudf.yml
cat "$CUML_HOME/conda/environments/cuml_dev_cuda10.2.yml" \
| sed -r "s/cudatoolkit=10.2/cudatoolkit=$CUDA_TOOLKIT_VERSION/g" \
| sed -r "s!rapidsai/label/cuda10.2!rapidsai/label/cuda$CUDA_TOOLKIT_VERSION!g" \
> cuml.yml
cat "$CUGRAPH_HOME/conda/environments/cugraph_dev_cuda10.2.yml" \
| sed -r "s/cudatoolkit=10.2/cudatoolkit=$CUDA_TOOLKIT_VERSION/g" \
| sed -r "s!rapidsai/label/cuda10.2!rapidsai/label/cuda$CUDA_TOOLKIT_VERSION!g" \
> cugraph.yml
cat "$CUSPATIAL_HOME/conda/environments/cuspatial_dev_cuda10.2.yml" \
| sed -r "s/cudatoolkit=10.2/cudatoolkit=$CUDA_TOOLKIT_VERSION/g" \
| sed -r "s!rapidsai/label/cuda10.2!rapidsai/label/cuda$CUDA_TOOLKIT_VERSION!g" \
> cuspatial.yml
conda-merge rmm.yml cudf.yml cuml.yml cugraph.yml cuspatial.yml rapids.yml > merged.yml
# Strip out cmake + the rapids packages, and save the combined environment
cat merged.yml \
| grep -v -P '^(.*?)\-(.*?)(rmm|cudf|dask-cudf|cugraph|cuspatial|nvstrings)(.*?)$' \
| grep -v -P '^(.*?)\-(.*?)(cmake=)(.*?)$' \
> rapids.yml
####
# Merge the rapids env with this hard-coded one here for notebooks
# env since the notebooks repos don't include theirs in the github repo
# Pulled from https://github.com/rapidsai/build/blob/d2acf98d0f069d3dad6f0e2e4b33d5e6dcda80df/generatedDockerfiles/Dockerfile.ubuntu-runtime#L45
####
cat << EOF > notebooks.yml
name: notebooks
channels:
- rapidsai
- nvidia
- rapidsai-nightly
- numba
- conda-forge
- defaults
dependencies:
- bokeh
- dask-labextension
- dask-ml
- ipython=${IPYTHON_VERSION:-"7.3.0"}
- ipywidgets
- jupyterlab=1.0.9
- matplotlib
- networkx
- nodejs
- scikit-learn
- scipy
- seaborn
- tensorflow
- umap-learn
- pip:
- graphistry
- git+https://github.com/jacobtomlinson/jupyterlab-nvdashboard.git
EOF
conda-merge rapids.yml notebooks.yml > merged.yml && mv merged.yml notebooks.yml
| true
|
9b09922961e175ef8e20c9119b91dafa5caf335f
|
Shell
|
ShipSoft/shipdist
|
/sodium.sh
|
UTF-8
| 1,056
| 3.078125
| 3
|
[] |
no_license
|
package: sodium
version: v1.0.8
source: https://github.com/jedisct1/libsodium
tag: 1.0.8
build_requires:
- autotools
- "GCC-Toolchain:(?!osx)"
---
#!/bin/sh
rsync -av --delete --exclude="**/.git" $SOURCEDIR/ .
autoreconf -i
./configure --prefix=$INSTALLROOT
make ${JOBS+-j $JOBS}
make install
# Modulefile
MODULEDIR="$INSTALLROOT/etc/modulefiles"
MODULEFILE="$MODULEDIR/$PKGNAME"
mkdir -p "$MODULEDIR"
cat > "$MODULEFILE" <<EoF
#%Module1.0
proc ModulesHelp { } {
global version
puts stderr "ALICE Modulefile for $PKGNAME $PKGVERSION-@@PKGREVISION@$PKGHASH@@"
}
set version $PKGVERSION-@@PKGREVISION@$PKGHASH@@
module-whatis "ALICE Modulefile for $PKGNAME $PKGVERSION-@@PKGREVISION@$PKGHASH@@"
# Dependencies
module load BASE/1.0 ${GCC_TOOLCHAIN_ROOT:+GCC-Toolchain/$GCC_TOOLCHAIN_VERSION-$GCC_TOOLCHAIN_REVISION}
# Our environment
setenv SODIUM_ROOT \$::env(BASEDIR)/$PKGNAME/\$version
prepend-path LD_LIBRARY_PATH \$::env(SODIUM_ROOT)/lib
$([[ ${ARCHITECTURE:0:3} == osx ]] && echo "prepend-path DYLD_LIBRARY_PATH \$::env(SODIUM_ROOT)/lib")
EoF
| true
|
daf7860698f8830c376275dc414358150676a7a6
|
Shell
|
buildpacks/lifecycle
|
/acceptance/testdata/detector/container/cnb/extensions/simple_extension/simple_extension_version/bin/detect
|
UTF-8
| 156
| 2.53125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
plan_path=$2
cat >> "${plan_path}" <<EOL
[[provides]]
name = "some_requirement"
EOL
echo "simple_extension: output from /bin/detect"
| true
|
bb9e742da57ea6a83ffe3d018ad23928b63fe172
|
Shell
|
dlaststark/machine-learning-projects
|
/Programming Language Detection/Experiment-2/Dataset/Train/UNIX-Shell/play-recorded-sounds.sh
|
UTF-8
| 169
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/sh
# play.sh
# Plays .au files.
# Usage: play.sh <recorded_sound.au>
cat $1 >> /dev/audio # Write file $1 to the speaker's Character Special (/dev/audio).
| true
|
dbf394c83d6db13a76d59de9e906d2d780f18734
|
Shell
|
witnesslq/bigdata-9
|
/weblogOffline/run/common/getOriginalContentLogAndCheck.sh
|
UTF-8
| 1,665
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# = 0 ]; then
yesterday=`date +%Y%m%d -d "-1days"`
else
yesterday=$1
fi
export JAVA_HOME=${JAVA_HOME}
export HADOOP_HOME=${HADOOP_HOME}
export HADOOP_CONF_DIR=${HADOOP_CONF_DIR}
baseDir=$(cd "$(dirname "$0")"; pwd)
source $baseDir/../weblogHeader.sh
echo $yesterday
ycLocalDestDir=${WEBLOG_RESULT_LOCAL_DIR}weblogRsync/ycInfo/$yesterday
ycHadoopDestDir=${WEBLOG_COMMMON_HDFS_DIR}originalContentIncr
ycInfoStatus="fault"
ycfileNum=3
ycRealFileNum=0
#最大重试次数,设为0则不重试
maxRetry=10
retry=0
#重试间隔(s)
interval=180
#yc
function getAndCheckYcInfo(){
if [ -d $ycLocalDestDir ];then
rm $ycLocalDestDir/*
else
mkdir -p $ycLocalDestDir
fi
/usr/bin/rsync -au 61.135.251.68::original/${yesterday}* $ycLocalDestDir/
/usr/bin/rsync -au 220.181.29.156::originalPhotoset/${yesterday}* $ycLocalDestDir/
hadoopDirPrepare $ycHadoopDestDir $yesterday
${HADOOP} fs -put $ycLocalDestDir/$yesterday* $ycHadoopDestDir/$yesterday/
ycRealFileNum=`${HADOOP} fs -du $ycHadoopDestDir/$yesterday/ | grep $yesterday -c`
if [ $ycRealFileNum -ge $ycfileNum ];then
ycInfoStatus="success"
fi
}
function prepareYclog(){
while [ "$ycInfoStatus" == "fault" -a $retry -le $maxRetry ];do
getAndCheckYcInfo
if [ "$ycInfoStatus" == "fault" ];then
retry=$((retry+1))
echo getYclog sleep, retry=$retry/$maxRetry fileCount=$ycRealFileNum/$ycfileNum
sleep $interval"s"
fi
done
}
function main(){
prepareYclog
if [ "$ycInfoStatus" == "fault" ];then
errorAlarm getYclog:YclogFileCount=$ycRealFileNum/$ycfileNum
exit 1
else
exit 0
fi
}
main
| true
|
7f2a8e1fe4683962f8d74d2b429ce0acdf9239f4
|
Shell
|
milinddinesh/Tasks
|
/bash/medium.sh
|
UTF-8
| 535
| 3.53125
| 4
|
[] |
no_license
|
#! /bin/bash
function fileSort () {
ls ~/Downloads | while read -r i
do
if [ -f ~/Downloads/"$i" ]
then
fy=$(date -r ~/Downloads/"$i" +%Y)
fmonth=$(date -r ~/Downloads/"$i" +%b)
fday=$(date -r ~/Downloads/"$i" +%d)
fname=${fmonth}_${fday}
if [ ! -d ~/Downloads/$fy ]
then
`mkdir ~/Downloads/$fy`
fi
if [ -d ~/Downloads/$fy/$fname ]
then
`mv ~/Downloads/"$i" ~/Downloads/$fy/$fname/`
else
`mkdir ~/Downloads/$fy/$fname`
`mv ~/Downloads/"$i" ~/Downloads/$fy/$fname/`
fi
fi
done
}
fileSort
| true
|
b36ace42a11f109b8bd79460c2830e41fd445ead
|
Shell
|
kwozyman/rhel-on-bf2
|
/PXE_setup_RHEL_install_over_mlx.sh
|
UTF-8
| 14,142
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/sh
# ex:ts=4:sw=4:sts=4:et
# ex:expandtab
#
# Copyright (c) 2020, Mellanox Technologies
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# some steps based on
# https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/performing_an_advanced_rhel_installation/index/#preparing-for-a-network-install_installing-rhel-as-an-experienced-user
# Save the current directory.
CUR_DIR=$PWD
# TFTP config.
TFTP_CFG=/etc/xinetd.d/tftp
KS_FILE=
ENABLE_KS=0
SUBNET="172.31.100"
REPO_IP="${SUBNET}.1"
PROTOCOL="ETH"
NETDEV=TBD
usage()
{
cat <<EOF
./setup.sh -i <rhel-iso> [options]
Options:
-i <rhel-iso> The .iso installation file
-d <netdev> MLX Netdev to use
-p <protocol> ETH, IB, tmfifo
-k <ks-path> Enable kickstart auto installation
EOF
}
svcctl_cmd()
{
oper=$1
svc=$2
case ${oper} in
restart)
if [ "$NEW_VER" = "1" ]; then
echo "systemctl restart ${svc}"
else
echo "service ${svc} restart"
fi
;;
enable)
if [ "$NEW_VER" = "1" ]; then
echo "systemctl enable ${svc}"
else
echo "chkconfig ${svc} on"
fi
;;
esac
}
svcctl()
{
cmd=`svcctl_cmd $1 $2`
eval "$cmd"
}
REAL_PATH=/usr/bin/realpath
if [ ! -f "$REAL_PATH" ]; then
REAL_PATH="readlink -f"
fi
type systemctl >/dev/null 2>&1 && NEW_VER=1
setup_rshim()
{
nmcli conn delete ${NETDEV}
rm /etc/sysconfig/network-scripts/ifcfg-${NETDEV}
nmcli conn add type tun mode tap con-name ${NETDEV} ifname ${NETDEV} autoconnect yes ip4 ${REPO_IP}
nmcli conn modify tmfifo_net0 ipv4.routes ${SUBNET}.0/24
systemctl restart NetworkManager
nmcli conn up ${NETDEV}
# Create rshim udev rules.
if :; then
echo "Creating rshim tmfifo_net0 udev rules..."
cat >/etc/udev/rules.d/91-tmfifo_net.rules <<EOF
SUBSYSTEM=="net", ACTION=="add", ATTR{address}=="00:1a:ca:ff:ff:02", ATTR{type}=="1", NAME="${NETDEV} RUN+="/usr/bin/nmcli conn up ${NETDEV}"
EOF
fi
if ! rpm -qa | grep -q rshim ; then
echo "Installing rshim user-space driver..."
yum install -y elfutils-libelf-devel
yum install -y make
yum install -y git
yum install -y autoconf
yum install -y tmux
yum install -y automake
yum install -y pciutils-devel
yum install -y libusb-devel
yum install -y fuse-devel
yum install -y kernel-modules-extra
yum install -y gcc
cd /tmp
git clone https://github.com/Mellanox/rshim-user-space.git
cd rshim-user-space/
./bootstrap.sh
./configure
/bin/rm -rf /tmp/mybuildtest
rpm_topdir=/tmp/mybuildtest
mkdir -p $rpm_topdir/{RPMS,BUILD,SRPM,SPECS,SOURCES}
version=$(grep "Version:" *.spec | head -1 | awk '{print $NF}')
git archive --format=tgz --prefix=rshim-${version}/ HEAD > $rpm_topdir/SOURCES/rshim-${version}.tar.gz
rpmbuild -ba --nodeps --define "_topdir $rpm_topdir" --define 'dist %{nil}' *.spec
rpm -ivh $rpm_topdir/RPMS/*/*rpm
systemctl enable rshim
systemctl start rshim
systemctl status rshim --no-pager -l
fi
}
# Parse command line.
while getopts "d:i:k:p:" opt; do
case $opt in
d)
NETDEV=$OPTARG
;;
i)
DISTRO_ISO=`$REAL_PATH $OPTARG`
;;
k)
ENABLE_KS=1
KS_FILE=`$REAL_PATH $OPTARG`
;;
p)
PROTOCOL=$OPTARG
;;
\?)
usage >&2
exit -1
;;
esac
done
# Check root permission.
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root."
exit -1
fi
# Mount the .iso file. Retry it 3 times.
if [ ! -e "$DISTRO_ISO" ]; then
echo "Can't find rhel-iso"
usage
exit -1
fi
case "${PROTOCOL}" in
ETH)
;;
IB)
echo
echo " ########### MAKE SURE OpenSM is running on ${NETDEV} #############"
echo
;;
tmfifo)
NETDEV="tmfifo_net0"
setup_rshim
;;
*)
echo "Unsupported protocol: ${PROTOCOL}"
exit 1
;;
esac
DISTRO_VER=$(basename ${DISTRO_ISO} | sed -e 's/-dvd1.iso//g')
# PXE mount path (temporary).
PXE_MOUNT=/var/ftp/${DISTRO_VER}
# Kickstart config path.
BF_KS_PATH=/var/ftp/ks_${DISTRO_VER}
echo "Mounting the .iso file to ${PXE_MOUNT}..."
umount ${PXE_MOUNT} 2>/dev/null
mkdir -p ${PXE_MOUNT} 2>/dev/null
for i in 1..3; do
mount -t iso9660 -o loop ${DISTRO_ISO} ${PXE_MOUNT} 2>/dev/null
[ -d ${PXE_MOUNT}/EFI ] && break
sleep 1
done
if [ ! -d ${PXE_MOUNT}/EFI ]; then
echo "Unable to mount ${DISTRO_ISO}."
exit -1
fi
# Restart DHCP automatically (if dhcpd is running) when board reboots.
DHCPD_RESTART_CMD=`svcctl_cmd restart dhcpd`
IFUP_LOCAL=/sbin/ifup-local
if [ ! -e "${IFUP_LOCAL}" -o -z "$(grep ${NETDEV} ${IFUP_LOCAL} 2>/dev/null)" ]; then
cat >>${IFUP_LOCAL} <<EOF
INTF=\$1
if [[ "\$INTF" = "${NETDEV}"* ]]; then
killall -0 dhcpd 2>/dev/null
if [ \$? -eq 0 ]; then
$DHCPD_RESTART_CMD
fi
fi
EOF
chmod +x ${IFUP_LOCAL}
fi
# Patch existing IFUP_LOCAL file if it doesn't have the dhcpd running check.
tmp="killall -0 dhcpd 2>\/dev\/null\n if \[ \$? -eq 0 \]; then\n ${DHCPD_RESTART_CMD}\n fi"
if [ -z "$(grep "killall -0 dhcpd" ${IFUP_LOCAL})" ]; then
sed -i -E "s/${DHCPD_RESTART_CMD}/${tmp}/" ${IFUP_LOCAL}
fi
#
# Setup TFTP.
# TFTP server provides the initial images (kernel & initrd) for pxeboot.
#
echo "Setup tftp service..."
yum -y install httpd vsftpd tftp-server dhcp-server
sed -i \
-e 's/anonymous_enable=NO/anonymous_enable=YES/' \
-e 's/write_enable=YES/write_enable=NO/' \
/etc/vsftpd/vsftpd.conf
echo "pasv_min_port=10021" >> /etc/vsftpd/vsftpd.conf
echo "pasv_max_port=10031" >> /etc/vsftpd/vsftpd.conf
TFTP_PATH=/var/lib/tftpboot
if [ -z "$TFTP_PATH" ]; then
echo "tftp path not found"
exit -1
fi
# Copy over the tftp files.
echo "Generate TFTP images..."
/bin/rm -rf ${TFTP_PATH}/pxelinux/
mkdir -p ${TFTP_PATH}/pxelinux/pxelinux.cfg
mkdir -p ${TFTP_PATH}/pxelinux/images/${DISTRO_VER}
/bin/cp -fv ${PXE_MOUNT}/EFI/BOOT/BOOTAA64.EFI ${TFTP_PATH}/
/bin/cp -fv ${PXE_MOUNT}/EFI/BOOT/grubaa64.efi ${TFTP_PATH}/
/bin/cp -fv ${PXE_MOUNT}/EFI/BOOT/mmaa64.efi ${TFTP_PATH}/
/bin/cp -fv ${PXE_MOUNT}/images/pxeboot/vmlinuz ${TFTP_PATH}/pxelinux/images/${DISTRO_VER}/
/bin/cp -fv ${PXE_MOUNT}/images/pxeboot/initrd.img ${TFTP_PATH}/pxelinux/images/${DISTRO_VER}/
# get pxelinux.0
case "${DISTRO_ISO}" in
*x86_64*)
rm -rf /tmp/pxetmp
mkdir /tmp/pxetmp
cd /tmp/pxetmp
syslinux_rpm=$(find ${PXE_MOUNT} | grep syslinux-tftpboot-[0-9] | head -1)
if [ ! -e "${syslinux_rpm}" ] ; then
echo "cannot find syslinux RPM in the installation ISO media!"
exit 1
fi
rpm2cpio ${syslinux_rpm} | cpio -id
/bin/cp -fv /tmp/pxetmp/tftpboot/* ${TFTP_PATH}/pxelinux/
cd -
rm -rf /tmp/pxetmp
;;
esac
# Generate the grub.cfg.
echo "Generate the grub.cfg..."
grub_opts="inst.repo=http://${REPO_IP}/${DISTRO_VER}/ console=tty0 console=tty1 console=ttyS0,115200 console=ttyS1,115200"
if [ ${ENABLE_KS} -eq 1 ]; then
grub_opts="${grub_opts} inst.ks=http://${REPO_IP}/ks_${DISTRO_VER}/kickstart.ks"
fi
case "${PROTOCOL}" in
ETH)
grub_opts="${grub_opts} ip=dhcp"
;;
IB)
grub_opts="${grub_opts} bootdev=${NETDEV} ksdevice=${NETDEV} net.ifnames=0 biosdevname=0 rd.neednet=1 rd.boofif=0 rd.driver.pre=mlx5_ib,mlx4_ib,ib_ipoib ip=${NETDEV}:dhcp rd.net.dhcp.retry=10 rd.net.timeout.iflink=60 rd.net.timeout.ifup=80 rd.net.timeout.carrier=80"
;;
tmfifo)
grub_opts="${grub_opts} ip=dhcp ip=dhcp console=ttyAMA1 console=hvc0 console=ttyAMA0 earlycon=pl011,0x01000000"
;;
esac
case "${DISTRO_ISO}" in
*x86_64*)
cat > ${TFTP_PATH}/pxelinux/boot.msg <<EOF
!!!!!!!!!!!! PXE INSTALL TEST !!!!!!!!!!!!!!!
Select one:
1 - Install Red Hat Enterprise Linux
2 - Start installer but Break to shell
3 - Reboot
EOF
cat > ${TFTP_PATH}/pxelinux/pxelinux.cfg/default <<EOF
default vesamenu.c32
prompt 1
timeout 600
display boot.msg
label 1
menu label ^Install ${DISTRO_VER}
menu default
kernel images/${DISTRO_VER}/vmlinuz
append initrd=images/${DISTRO_VER}/initrd.img showopts ${grub_opts}
label 2
menu label ^Start installer ${DISTRO_VER} but break to shell
kernel images/${DISTRO_VER}/vmlinuz
append initrd=images/${DISTRO_VER}/initrd.img showopts ${grub_opts} rd.break=initqueue rd.shell
label 3
menu label Boot from ^Reboot
reboot
EOF
;;
*aarch64*)
cat > ${TFTP_PATH}/grub.cfg <<EOF
# ${DISTRO_ISO} ${REPO_IP}
menuentry 'Install ${DISTRO_VER}' --class red --class gnu-linux --class gnu --class os {
linux pxelinux/images/${DISTRO_VER}/vmlinuz showopts ${grub_opts}
initrd pxelinux/images/${DISTRO_VER}/initrd.img
}
menuentry 'Start installer ${DISTRO_VER} but break to shell' --class red --class gnu-linux --class gnu --class os {
linux images/${DISTRO_VER}/vmlinuz ${grub_opts}
initrd images/${DISTRO_VER}/initrd.img showopts ${grub_opts} rd.break=initqueue rd.shell
}
menuentry 'Reboot' --class red --class gnu-linux --class gnu --class os {
reboot
}
EOF
;;
*)
echo "-E- MISSING BOOT SETTINGS!!!"
;;
esac
#fi
#
# Setup DHCP.
# DHCP-SERVER assigns IP address to the target, and specify the boot image.
#
echo "Setup dhcp service..."
if [ -e "/etc/dhcp/dhcpd.conf" ]; then
cp /etc/dhcp/dhcpd.conf /etc/dhcp/dhcpd.conf.save
fi
NAME_SERVERS=`cat /etc/resolv.conf | grep -e "^[[:blank:]]*nameserver" | awk '{print $2}'`
NAME_SERVERS=`echo ${NAME_SERVERS} | sed 's/ /, /g'`
DOMAIN_NAMES=`cat /etc/resolv.conf | grep search | awk '{$1= ""; print $0}'`
DOMAIN_NAMES=`echo $DOMAIN_NAMES | sed 's/ /", "/g; s/$/"/; s/^/"/'`
NAME_SERVERS_STR=${NAME_SERVERS:+option domain-name-servers ${NAME_SERVERS};}
DOMAIN_NAMES_STR=${DOMAIN_NAMES:+option domain-search ${DOMAIN_NAMES};}
case "${DISTRO_ISO}" in
*x86_64*)
filesettings='
if option architecture-type = 00:07 {
filename "BOOTX64.efi";
} else {
filename "pxelinux/pxelinux.0";
}
'
;;
*aarch64*)
filesettings='filename "/BOOTAA64.EFI";'
;;
*)
echo "-E- MISSING BOOT SETTINGS!!!"
;;
esac
cat >/etc/dhcp/dhcpd.conf <<EOF
option space pxelinux;
option pxelinux.magic code 208 = string;
option pxelinux.configfile code 209 = text;
option pxelinux.pathprefix code 210 = text;
option pxelinux.reboottime code 211 = unsigned integer 32;
option architecture-type code 93 = unsigned integer 16;
allow booting;
allow bootp;
subnet ${SUBNET}.0 netmask 255.255.255.0 {
range ${SUBNET}.10 ${SUBNET}.20;
option broadcast-address ${SUBNET}.255;
option routers ${REPO_IP};
${NAME_SERVERS_STR}
${DOMAIN_NAMES_STR}
option dhcp-client-identifier = option dhcp-client-identifier;
class "pxeclients" {
match if substring (option vendor-class-identifier, 0, 9) = "PXEClient";
next-server ${REPO_IP};
always-broadcast on;
${filesettings}
}
}
EOF
#
# Setup HTTP.
# The installer will fetch packages from the http server.
#
echo "Setup http service..."
if [ $ENABLE_KS -eq 1 ]; then
mkdir -p ${BF_KS_PATH} 2>/dev/null
/bin/cp -fv ${KS_FILE} ${BF_KS_PATH}/kickstart.ks
sed -i "s@REPO_URL@http://${REPO_IP}/${DISTRO_VER}@" ${BF_KS_PATH}/kickstart.ks
fi
if [ "$NEW_VER" = "1" ]; then
HTTP_PERMISSION="Require ip 127.0.0.1 ${SUBNET}.0/24"
else
HTTP_PERMISSION="Allow from 127.0.0.1 ${SUBNET}.0/24"
fi
cat >/etc/httpd/conf.d/pxeboot_${DISTRO_VER}.conf <<EOF
Alias /${DISTRO_VER} ${PXE_MOUNT}
<Directory ${PXE_MOUNT}>
Options Indexes FollowSymLinks
$HTTP_PERMISSION
</Directory>
Alias /ks_${DISTRO_VER} ${BF_KS_PATH}
<Directory ${BF_KS_PATH}>
Options Indexes FollowSymLinks
$HTTP_PERMISSION
</Directory>
EOF
#
# Check selinux status. If enabled, it might block HTTP access which
# could affect CentOS installation.
#
sestate=`sestatus 2>/dev/null | head -1 | awk '{print $3}'`
[ "$sestate" = "enabled" ] && {
cat << EOF
Warning: selinux seems enabled which might affect CentOS installation.
Suggest disabling it temporarily with command 'setenforce 0'
if you're not sure.
EOF
}
chmod -R +r ${TFTP_PATH}/
systemctl enable vsftpd
systemctl restart vsftpd.service
systemctl enable dhcpd
systemctl restart dhcpd
systemctl enable tftp.socket
systemctl restart tftp.socket
systemctl enable httpd
systemctl restart httpd
echo -e "\nDone."
echo "Next step: PXE boot from target (make sure to select the correct port!)"
| true
|
f030442bde950a1a85134048229120c28e3b8e46
|
Shell
|
remedyhealth/scripts
|
/cloudwatch/cloudwatch.sh
|
UTF-8
| 3,320
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ ! -f ~/.baseline_check.cfg ]; then
echo "~/.baseline_check.cfg does not exist!"
exit 1
fi
. ~/.baseline_check.cfg
NODENAME=$1
ENVIRONMENT=$2
INSTANCE_ID=$3
DISKS=$4
if [ -z "${NODENAME}" ] || [ -z "${ENVIRONMENT}" ] || [ -z "${INSTANCE_ID}" ]; then
echo "NODENAME, ENVIRONMENT, and INSTANCE_ID must be set"
echo "./cloudwatch.sh <nodename> <environment> <instance_id> <disks>"
echo "eg: <disks> = /:/dev/xvda1,/mnt/backups/dev/xvdh"
exit 1
fi
if [ -z "${DISKS}" ]; then
DISKS="/:/dev/xvda1"
fi
if [ "$ENVIRONMENT" == "production" ]; then
SNS_TOPIC=$PAGERDUTY_PROD
else
SNS_TOPIC=$PAGERDUTY_NOTPROD
fi
# Create CloudWatch Alarms
aws cloudwatch put-metric-alarm \
--alarm-name ${NODENAME}_High-CPUUtilization \
--ok-actions ${SNS_TOPIC} \
--alarm-actions ${SNS_TOPIC} \
--alarm-description "High CPU Utilization on ${NODENAME}" \
--statistic Average \
--namespace AWS/EC2 \
--metric-name CPUUtilization \
--period 300 \
--evaluation-periods 2 \
--threshold 75 \
--comparison-operator GreaterThanThreshold \
--dimensions "Name=InstanceId,Value=${INSTANCE_ID}"
for disk in $(echo $DISKS | sed "s/,/ /g")
do
MOUNT=$(echo $disk | awk -F: '{print $1}')
DEVICE=$(echo $disk | awk -F: '{print $2}')
aws cloudwatch put-metric-alarm \
--alarm-name ${NODENAME}_High-DiskInodeUtilization-${MOUNT} \
--ok-actions ${SNS_TOPIC} \
--alarm-actions ${SNS_TOPIC} \
--alarm-description "High Disk Inode Utilization for ${MOUNT} on ${NODENAME}" \
--statistic Average \
--namespace System/Linux \
--metric-name DiskInodeUtilization \
--period 300 \
--evaluation-periods 2 \
--threshold 90 \
--comparison-operator GreaterThanThreshold \
--dimensions "Name=InstanceId,Value=${INSTANCE_ID}" "Name=MountPath,Value=${MOUNT}" "Name=Filesystem,Value=${DEVICE}"
aws cloudwatch put-metric-alarm \
--alarm-name ${NODENAME}_High-DiskSpaceUtilization-${MOUNT} \
--ok-actions ${SNS_TOPIC} \
--alarm-actions ${SNS_TOPIC} \
--alarm-description "High Disk Space Utilization for / on ${NODENAME}" \
--statistic Average \
--namespace System/Linux \
--metric-name DiskSpaceUtilization \
--period 300 \
--evaluation-periods 2 \
--threshold 90 \
--comparison-operator GreaterThanThreshold \
--dimensions "Name=InstanceId,Value=${INSTANCE_ID}" "Name=MountPath,Value=${MOUNT}" "Name=Filesystem,Value=${DEVICE}"
done
aws cloudwatch put-metric-alarm \
--alarm-name ${NODENAME}_High-MemoryUtilization \
--ok-actions ${SNS_TOPIC} \
--alarm-actions ${SNS_TOPIC} \
--alarm-description "High Memory Utilization on ${NODENAME}" \
--statistic Average \
--namespace System/Linux \
--metric-name MemoryUtilization \
--period 300 \
--evaluation-periods 2 \
--threshold 80 \
--comparison-operator GreaterThanThreshold \
--dimensions "Name=InstanceId,Value=${INSTANCE_ID}"
aws cloudwatch put-metric-alarm \
--alarm-name ${NODENAME}_High-StatusCheckFailed \
--ok-actions ${SNS_TOPIC} \
--alarm-actions ${SNS_TOPIC} \
--alarm-description "Status Check Failed on ${NODENAME}" \
--statistic Average \
--namespace AWS/EC2 \
--metric-name StatusCheckFailed \
--period 60 \
--evaluation-periods 1 \
--threshold 0 \
--comparison-operator GreaterThanThreshold \
--dimensions "Name=InstanceId,Value=${INSTANCE_ID}"
| true
|
9327db7c9cc4a73b35e0053e379397a223d182d1
|
Shell
|
godane/abs
|
/extra/muparser/PKGBUILD
|
UTF-8
| 656
| 2.578125
| 3
|
[] |
no_license
|
# $Id: PKGBUILD 69090 2010-02-17 11:14:28Z ronald $
# Maintainer: Ronald van Haren <ronald.archlinux.org>
# Contributor: damir <damir.archlinux.org>
pkgname=muparser
pkgver=1.32
pkgrel=1
pkgdesc="a fast math parser library"
arch=('i686' 'x86_64')
url="http://muparser.sourceforge.net/"
depends=('glibc' 'gcc-libs')
license=('custom')
source=(http://downloads.sourceforge.net/$pkgname/muparser_v132.tar.gz)
md5sums=('9eda5ba73cae7dce09daa6bef6b7c49b')
build() {
cd $srcdir/${pkgname}_v132
./configure --prefix=/usr
make || return 1
make DESTDIR=$pkgdir install
# license
install -D -m644 License.txt \
$pkgdir/usr/share/licenses/${pkgname}/License
}
| true
|
f733a67da60f5a8061824f2f11331764b95c25c9
|
Shell
|
tow8ie/dotfiles
|
/shell.d/01-path-helpers.zsh
|
UTF-8
| 61
| 2.734375
| 3
|
[] |
no_license
|
debug_path() {
for entry in $path; do echo $entry; done
}
| true
|
972ad38e780be760fc86816030bfb3e8d936e558
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/tspc/tspc
|
UTF-8
| 856
| 3.8125
| 4
|
[] |
no_license
|
#! /bin/sh
# Startup script from the Debian tspc package
# Written by Martin Waitz <tali@debian.org>
PATH=/sbin:/bin:/usr/sbin:/usr/bin
TSPC=/usr/sbin/tspc
test -f $TSPC || exit 0
# read tsp client config, to get the interface used
. /etc/tsp/tspc.conf
start() {
$TSPC || exit 1
}
stop() {
killall $TSPC || exit 1
ip tunnel del $if_tunnel_v6v4 2>/dev/null
ip tunnel del $if_tunnel_v6udpv6 2>/dev/null
}
case "$1" in
start)
echo -n "Setting up IPv6 tunnel: "
start
if test "$?" = 0; then
echo "done.";
else
echo "failed."
exit 1
fi
;;
stop)
echo -n "Shutting down IPv6 tunnel: "
stop
echo "done."
;;
restart|force-reload)
echo -n "Restarting IPv6 tunnel: "
stop
start
if test "$?" = 0; then
echo "done.";
else
echo "failed."
exit 1
fi
;;
*)
echo "Usage: $0 {start|stop|restart|force-reload}" >&2
exit 1
;;
esac
exit 0
| true
|
0fcf138ae9c299bf20e286a226e10f20cb7556eb
|
Shell
|
wawoutert/GuessingGame
|
/guessinggame.sh
|
UTF-8
| 559
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# File: guessinggame.sh
function hint {
if [[ $1 -gt $2 ]]
then
echo "Your guess is too high, please try again"
else
echo "Your guess is too low, please try again"
fi
}
terminate=0
answer=$(ls | wc -l)
while [[ $terminate -eq 0 ]]
do
echo "How many files are in the current directory?"
read guess
echo "You guessed $guess"
if [[ $guess -eq $answer ]]
then
echo "Congratulations, you are correct!"
let terminate=1
else
echo $(hint $guess $answer) #without echo, command is not found
fi
done
| true
|
ea68e93547b4b51eca032556385a791388ea2025
|
Shell
|
mikaelengstrom/wpheadlesslab
|
/scripts/composer.sh
|
UTF-8
| 225
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# Run composer inside of container
#
# Example usage `scripts/composer.sh update`
cd $(git rev-parse --show-toplevel)
COMMAND="cd /app; composer $@"
docker-compose run --rm web bash -c "$COMMAND"
cd -
| true
|
1ecbe643d7edbf389e680a823539227be8fd6d9b
|
Shell
|
KostasKoyias/semantic-analysis
|
/src/testing.sh
|
UTF-8
| 1,432
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
# get the right file paths under parent directory
main="Main"
pass=$(find .. -name "pass")
fail=$(find .. -name "fail")
# check whether the offsets flag is on
offsets=""
if [ "$1" == "--offsets" ]
then
offsets=$1
fi
# for each test case folder do
for folder in {$pass,$fail}
do
# display an informative message to the user about what follows
if [ $folder == $pass ]
then
echo "The following should pass the Semantic Test"
else
echo "The following should fail the Semantic Test"
fi
# type check each file in the folder
for i in $(ls $folder)
do
java $main $folder/$i $offsets
# allow user to take a look at the input file using the default editor
ans=""
while [ "$ans" != "y" ] && [ "$ans" != "n" ]
do
echo "want to see input file(y/n)"
read -n 1 ans
done
if [ $ans == "y" ]
then
editor $folder/$i
fi
# allow user to skip the rest of the files in this directory
ans=""
while [ "$ans" != "y" ] && [ "$ans" != "n" ]
do
echo "continue(y/n)"
read -n 1 ans
done
if [ $ans == "n" ]
then
break
fi
done
done
if [ "$offsets" == "" ]
then
echo -e "testing.sh: To view field and method offsets for each class rerun with \e[1m--offsets\e[0m"
fi
exit 0
| true
|
aae5c31c33517a894c2b268a8b30c611027509eb
|
Shell
|
huntinux/shell
|
/lastparam.sh
|
UTF-8
| 412
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# 得到最后一个位置参数
# 来自:abs chp4 http://www.tldp.org/LDP/abs/html/othertypesv.html
#
# 测试方法: ./lastparam.sh 1 2 3 4
# 会输出最后一个参数4
argnum=$#
echo "argnum=$argnum"
lastparam=${!argnum} # !表示间接引用 参考: http://www.tldp.org/LDP/abs/html/bashver2.html#VARREFNEW
#lastparam=${!#} # 另一种方法
echo "lastparam is $lastparam"
exit 0
| true
|
8c787d422fab5b1d2d333f3e6671a1fac89cace8
|
Shell
|
Glue-Software-Engineering-AG/LocalSigner
|
/resources/linux/localsigner.sh
|
UTF-8
| 802
| 3.3125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Check for Smart Card Daemon
echo "Checking pcscd (PC/SC Smart Card Daemon)"
if ps -ef | grep -v grep | grep pcscd > /dev/null; then
echo " pcscd running"
else
echo " pcscd not running"
echo "Warning: Smartcard will probably not work until pcscd is running!"
fi
# Home Path to installation directory
LS_HOME="$(dirname "$0")"
libs="$LS_HOME/lib/localsigner.jar:\
$LS_HOME/lib/bcprov-jdk15on-1.59.jar:\
$LS_HOME/lib/bcmail-jdk15on-1.59.jar:\
$LS_HOME/lib/bcpkix-jdk15on-1.59.jar"
if [ "$(java -version 2>&1 | grep 64-Bit)" ]; then
# 64bit JVM
echo "LocalSigner 64bit mode"
libs="$libs:$LS_HOME/lib/swt.jar"
else
echo "Please use a 64Bit JRE 8"
fi
echo $libs
export SWT_GTK3=0
java -Xmx512m -Dbase=$LS_HOME -classpath $libs ch.admin.localsigner.main.LocalSigner "$@"
| true
|
12fd5b50ab322a3c0b28cf245eb0b27538fc3284
|
Shell
|
0x7f454c46/rc
|
/.config/shell/qemu/qemu_helpers
|
UTF-8
| 4,649
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
DISK_LOCATION="${HOME}/rootfs/virt-builder"
function check_virt_builder()
{
type virt-builder 2>&1 > /dev/null
if [[ $? -eq 0 ]] ; then
echo y
else
echo n
echo "Install virt-builder (libguestfs)" 1>&2
fi
}
# $1 - arch
# $2 - kernel path
function touch_arch()
{
pushd "${DISK_LOCATION}" > /dev/null
local PWD="$(pwd)"
local FEDORA_VER=fedora-24
local KERNEL_PATH="${2}"
if [[ ${1} = ppc64le ]] ; then
FEDORA_VER=fedora-23
fi
if [[ ! -e "${1}.img" ]] ; then
virt-builder --arch ${1} ${FEDORA_VER} -o "${1}.img" \
--size 20G --root-password password:1q2w3e 1>&2
fi
echo "Temporary disabled booting with compiled vmlinuz" 1>&2
# KERNEL_PATH=""
# if [[ ${1} =~ ^(x86_64|ppc64le) ]] ; then
# popd > /dev/null
# return
# fi
if [[ ! -e "${KERNEL_PATH}" ]] ; then
ARCH_FILES="${1}"
echo "${KERNEL_PATH} does not exist - launching fedora's kernel" 1>&2
if [[ ${1} = armv7l ]] ; then
ARCH_FILES=armv7hl
fi
virt-builder --get-kernel "${1}.img" 1>&2
mv ./initramfs-*.${ARCH_FILES}.img "${1}-initramfs.img"
mv ./vmlinuz-*.${ARCH_FILES} "${1}-vmlinuz"
echo -n "-kernel ${PWD}/${1}-vmlinuz "
echo -n "-initrd ${PWD}/${1}-initramfs.img"
else
echo "launching with kernel ${KERNEL_PATH}" 1>&2
echo -n "-kernel ${KERNEL_PATH}"
fi
popd > /dev/null
}
function vm_aarch64_debug()
{
local QEMU_BIN="qemu-system-aarch64"
local KERNEL_LOCATION="${HOME}/kernel/linux/arch/arm64/boot"
local KERNEL="${KERNEL_LOCATION}/Image"
local CMDLINE=""
local DISK=aarch64.img
if [[ $(check_virt_builder) == n ]] ; then
return
fi
local KERNEL_PARAM="$(touch_arch aarch64 ${KERNEL})"
if [[ -n "${KERNEL_PARAM}" ]] ; then
local CMDLINE="root=/dev/vda5 rw console=ttyAMA0"
fi
if [[ ${1} = dry-run ]] ; then
QEMU_BIN="echo ${QEMU_BIN}"
fi
${QEMU_BIN} ${KERNEL_PARAM} \
-append "${CMDLINE}" \
-drive file="$DISK_LOCATION/$DISK",format=raw \
-boot c \
-m 1024 \
-localtime \
-name "debug_vm_aarch64" \
-netdev user,id=unet -device virtio-net-device,netdev=unet \
-nographic \
-smp cpus=1 \
-gdb tcp::1236 \
-cpu cortex-a57 \
-machine type=virt \
$@
}
function vm_armv7l_debug()
{
local QEMU_BIN="qemu-system-arm"
local KERNEL_LOCATION="${HOME}/kernel/linux/arch/arm/boot"
local KERNEL="${KERNEL_LOCATION}/Image"
local CMDLINE=""
local DISK=armv7l.img
if [[ $(check_virt_builder) == n ]] ; then
return
fi
local KERNEL_PARAM="$(touch_arch armv7l ${KERNEL})"
if [[ -n "${KERNEL_PARAM}" ]] ; then
local CMDLINE="root=/dev/vda3 rw console=ttyAMA0"
fi
if [[ ${1} = dry-run ]] ; then
QEMU_BIN="echo ${QEMU_BIN}"
fi
${QEMU_BIN} ${KERNEL_PARAM} \
-append "${CMDLINE}" \
-drive file="$DISK_LOCATION/$DISK",format=raw \
-boot c \
-m 1024 \
-localtime \
-name "debug_vm_armv7l" \
-netdev user,id=unet -device virtio-net-device,netdev=unet \
-nographic \
-smp cpus=1 \
-gdb tcp::1237 \
-machine type=virt \
$@
}
function vm_ppc64le_debug()
{
local QEMU_BIN="qemu-system-ppc64"
local KERNEL_LOCATION="${HOME}/kernel/linux/arch/powerpc/boot"
local KERNEL="${KERNEL_LOCATION}/zImage"
local CMDLINE=""
local DISK="ppc64le.img"
if [[ $(check_virt_builder) == n ]] ; then
return
fi
local KERNEL_PARAM="$(touch_arch ppc64le ${KERNEL})"
if [[ -n "${KERNEL_PARAM}" ]] ; then
local CMDLINE="root=/dev/sda3 rw console=ttyS0 earlyprintk=serial,ttyS0,115200"
fi
if [[ ${1} = dry-run ]] ; then
QEMU_BIN="echo ${QEMU_BIN}"
fi
${QEMU_BIN} ${KERNEL_PARAM} \
-append "${CMDLINE}" \
-drive file="$DISK_LOCATION/$DISK",format=raw \
-boot c \
-m 1024 \
-localtime \
-name "debug_vm_ppc64le" \
-net nic -net user \
-nographic \
-smp cpus=2 \
-gdb tcp::1238 \
-M pseries \
-cpu POWER8 \
$@
}
function vm_x86_64_debug()
{
local QEMU_BIN="qemu-system-x86_64"
local KERNEL_LOCATION="${HOME}/kernel/linux/arch/x86_64/boot"
local KERNEL="${KERNEL_LOCATION}/bzImage"
local CMDLINE=""
local DISK="x86_64.img"
if [[ $(check_virt_builder) == n ]] ; then
return
fi
local KERNEL_PARAM="$(touch_arch x86_64 ${KERNEL})"
if [[ -n "${KERNEL_PARAM}" ]] ; then
local CMDLINE="root=/dev/sda3 rw console=ttyS0 earlyprintk=serial,ttyS0,115200"
fi
if [[ ${1} = dry-run ]] ; then
QEMU_BIN="echo ${QEMU_BIN}"
fi
${QEMU_BIN} ${KERNEL_PARAM} \
-append "${CMDLINE}" \
-drive file="$DISK_LOCATION/$DISK",format=raw \
-boot c \
-m 1024 \
-localtime \
-name "debug_vm_x86" \
-net nic -net user \
-nographic \
-smp cpus=4 \
-enable-kvm \
-gdb tcp::1235 \
$@
}
| true
|
f2a75fd26feed8033a21c67ca8022f8864d786de
|
Shell
|
raehik/dotfiles
|
/TODO/scripts-old/old/pi
|
UTF-8
| 165
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# SSH to a server.
#
user="$USER"
server="86.28.170.125"
port=6176
ssh -o TCPKeepAlive=yes -o ServerAliveInterval=50 -p $port "$USER@$server"
| true
|
c1e2a3ec64a611c775a30ee60bfc210e8da2b0bf
|
Shell
|
oswaldokko/Gitsept2020
|
/Gold_thb20.txt
|
UTF-8
| 587
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh -x
# %A% %D% %T%
# For script to evaluate crontab file per tester and replace
# crontab with VrgyVugCronTmp if the two are not the same
#
testercron=/tmp/`uname -n`_oldcron
cron_template=/A5XX/standard_tester_files/TESTER_SCRIPTS/VERIGY/VrgyRootCronTmp
localdir=/.agilent
monitor=/TSESRVR/DISKSTAT/MNTR/`uname -n`_monitor
todate=`date '+%d%b%Y %T'`
crontab -l > $testercron
diff $testercron $cron_template > /tmp/x.$$
if [ -s /tmp/x.$$ ]
then
echo "`uname -n` crontable is modified, please check the tester! $todate" > $monitor
else
rm -rf $monitor
fi
rm -f /tmp/x.$$
| true
|
748791464474bf8f95eb03e1e4158241cae9a5a2
|
Shell
|
sarveshmaurya306/DSA
|
/Sorting/bubblesort.sh
|
UTF-8
| 411
| 3.46875
| 3
|
[] |
no_license
|
# Bubble Sort
echo "Enter number: "
read n
echo "Enter the elements: "
for(( i=0;i<n;i++ ))
do
read a[i]
done
echo "Unsorted List is:"
echo -e "${a[@]} \t"
echo "Sorted List is: "
for(( i=0;i<n;i++ ))
do
k=`expr $n-$i-1`
for(( j=0;j<k;j++ ))
do
if [ ${a[j]} -gt ${a[$((j+1))]} ]
then
temp=${a[j]}
a[$j]=${a[$((j+1))]}
a[$((j+1))]=$temp
fi
done
done
echo -e "${a[@]}"
| true
|
d1e189007f28bc1436a224c3490f6636af8b2f78
|
Shell
|
angaza/nexus-embedded
|
/buildkite/clang-format-ci-wrapper.sh
|
UTF-8
| 594
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -xuo pipefail
source buildkite/common.sh
za-init
RESULTS_DIR=$ARTIFACTS_DIR/"Clang-Format"
mkdir -p ${RESULTS_DIR}
cd support
OUTFILE=nx_keycode.txt
echo "--- Checking Clang-Format config"
which clang-format
clang-format --version
clang-format --style=file --dump-config
echo "--- Applying clang-format rules to Nexus Keycode"
# `clang-format.sh` will return nonzero if any errors are detected
./clang-format.sh | tee $OUTFILE
RETCODE=$?
# (don't directly use agent on Docker image)
#buildkite-agent artifact upload $OUTFILE
cp $OUTFILE ${RESULTS_DIR}/
exit $RETCODE
| true
|
6af4050bf9b801717d43abb760746d9a0e67bd91
|
Shell
|
Scrappers-glitch/icetone
|
/icetone-examples-pkg-getdown/src/main/installers/scripts/icetone-examples
|
UTF-8
| 558
| 3.453125
| 3
|
[
"BSD-2-Clause",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
#!/bin/bash
CACHE_DIR="${HOME}/.cache/icetone-examples"
buildno="$(cat /usr/lib/icetone-examples/buildno 2>/dev/null)"
if [ ! -f buildno -o "${buildno}" != "$(cat buildno 2>/dev/null)" ]; then
echo "Detected launcher uprade, clearing cache"
rm -fr "${CACHE_DIR}"
echo "${buildno}" > buildno
fi
mkdir -p "${CACHE_DIR}"
pushd "${CACHE_DIR}"
for i in /usr/lib/icetone-examples/* ; do
basename="$(basename ${i})"
if [ ! -f "${basename}" -a ! -L "${basename}" ]; then
ln -s "${i}" . 2>/dev/null
fi
done
java -jar /usr/lib/icetone-examples/getdown.jar .
| true
|
b984a92f8d8b5b08218561f44aa0fb771c5f4a29
|
Shell
|
perch/st2
|
/tools/st2ctl
|
UTF-8
| 4,665
| 3.640625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
LOGFILE="/tmp/st2_startup.log"
COMPONENTS="actionrunner st2api sensor_container rules_engine mistral st2resultstracker"
STANCONF="/etc/st2/st2.conf"
PYTHON=`which python`
if [ -z "$AR" ];
then
AR=10
fi
DEBTEST=`lsb_release -a 2> /dev/null | grep Distributor | awk '{print $3}'`
if [[ "$DEBTEST" == "Ubuntu" ]]; then
TYPE="debs"
PYTHONPACK="/usr/lib/python2.7/dist-packages"
elif [[ -f "/etc/redhat-release" ]]; then
TYPE="rpms"
PYTHONPACK="/usr/lib/python2.7/site-packages"
else
echo "Unknown Operating System"
exit 2
fi
function st2start(){
for i in `seq 1 ${AR}`
do
nohup actionrunner --config-file ${STANCONF} &>> ${LOGFILE} &
done
nohup st2api --config-file ${STANCONF} &>> ${LOGFILE} &
nohup sensor_container --config-file ${STANCONF} &>> ${LOGFILE} &
nohup /usr/bin/st2resultstracker --config-file ${STANCONF} &>> ${LOGFILE} &
nohup rules_engine --config-file ${STANCONF} &>> ${LOGFILE} &
if [[ "${CONTAINER}" == "DOCKER" ]]
then
/opt/openstack/mistral/.venv/bin/python /opt/openstack/mistral/mistral/cmd/launch.py --config-file /etc/mistral/mistral.conf --log-file /var/log/mistral.log &> /dev/null &
else
service mistral start
fi
}
function st2stop(){
for COM in $COMPONENTS
do
if [[ "${COM}" == "mistral" ]]
then
PID=`ps ax | grep -v grep | grep ${COM} | awk '{print $1}'`
if [[ ! -z $PID ]]
then
for p in $PID
do
echo "Killing ${COM} PID: ${p}"
done
if [[ "${CONTAINER}" == "DOCKER" ]]
then
kill $p
else
service mistral stop
fi
else
echo "${COM} is not running"
fi
else
PID=`ps ax | grep -v grep | grep ${COM} | awk '{print $1}'`
if [[ ! -z $PID ]]
then
for p in $PID
do
echo "Killing ${COM} PID: ${p}"
kill $p
done
else
echo "${COM} is not running"
fi
fi
done
}
function restart_component() {
COM=${1}
if [[ ! -z $COM ]]
then
if [[ "${COM}" == "actionrunner" ]]
then
PROC_COUNT=${AR}
else
PROC_COUNT=1
fi
echo "restarting service ${COM} with ${PROC_COUNT} process(es)."
if [[ "${COM}" == "mistral" ]]
then
if [[ "${CONTAINER}" == "DOCKER" ]]
then
kill $p
sleep 1
/opt/openstack/mistral/.venv/bin/python /opt/openstack/mistral/mistral/cmd/launch.py --config-file /etc/mistral/mistral.conf --log-file /var/log/mistral.log &> /dev/null &
else
service mistral restart
fi
else
PID=`ps ax | grep -v grep | grep -v st2ctl | grep ${COM} | awk '{print $1}'`
if [[ ! -z $PID ]]
then
for p in $PID
do
echo "Killing ${COM} PID: ${p}"
kill $p
done
for i in `seq 1 ${PROC_COUNT}`
do
${COM} --config-file ${STANCONF} &>> ${LOGFILE} &
done
else
echo "${COM} is not running"
fi
fi
else
echo "No component specified to restart."
fi
}
function register_content() {
echo "Registering content..."
if [ ! ${1} ]; then
REGISTER_FLAGS="--register-sensors --register-actions"
else
# Note: Scripts already call reload with "--register-<content>"
# TODO: Update packs. actions to only pass in a resource name excluding
# --register prefix
REGISTER_FLAGS="${1}"
fi
$PYTHON ${PYTHONPACK}/st2common/bin/registercontent.py --config-file ${STANCONF} ${REGISTER_FLAGS}
}
clean_db() {
echo "Dropping Database..."
mongo st2 --eval "db.dropDatabase();"
}
clean_logs() {
echo "Cleaning Logs..."
rm -Rf /var/log/st2/*
}
function getpids(){
for COM in $COMPONENTS
do
PID=`ps ax | grep -v grep | grep -v st2ctl | grep "${COM}" | awk '{print $1}'`
if [[ ! -z $PID ]]
then
for p in $PID
do
echo "${COM} PID: ${p}"
done
else
echo "${COM} is not running"
fi
done
}
case ${1} in
start)
st2start
getpids
;;
stop)
st2stop
;;
restart)
st2stop
sleep 1
st2start
getpids
;;
restart-component)
restart_component ${2}
sleep 1
getpids
;;
reload)
register_content ${2}
getpids
;;
clean)
echo "This will drop the database and delete all logs. Are you sure [y/n]?"
read verify
if [[ "$verify" == "y" ]]; then
st2stop
clean_db
clean_logs
register_content ${2}
st2start
getpids
else
exit
fi
;;
status)
getpids
;;
*)
echo "Valid actions: start|stop|restart|restart-component|reload|clean|status"
;;
esac
| true
|
41d8989acf06f40369f30b551c4c891c6e23052c
|
Shell
|
haykv/linux
|
/ex3/P3.3
|
UTF-8
| 384
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
echo -n "Enter file name: "
read filename
ranges=""
echo -n "Enter 2 parameters in the format num1-num2: "
while read range; do
ranges="$ranges $range"
echo -n "Enter 2 parameters in the format num1-num2: "
done
echo
while read line; do
str=""
for range in $ranges; do
str="$str$(echo $line | cut -c$range)"
done
echo $str
done < $filename
| true
|
37d622a59ca0d1a4e01d95e0d2f40834924b7aa5
|
Shell
|
decentraland/unity-renderer
|
/ci-import-required-packages.sh
|
UTF-8
| 845
| 3.234375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
echo Downloading Required Packages
echo "${GPG_PRIVATE_KEY_BASE64}" | base64 -d > private.gpg
gpg --batch --import private.gpg
if [[ "$BUILD_TARGET" != "WebGL" ]]; then
packagesFile='requiredPackages-desktop.unitypackage.gpg'
else
packagesFile='requiredPackages-webgl.unitypackage.gpg'
fi
curl -L "https://renderer-artifacts.decentraland.org/artifacts/${packagesFile}" -o requiredPackages.unitypackage.gpg
gpg --output requiredPackages.unitypackage --decrypt requiredPackages.unitypackage.gpg
echo Finished downloading Required Packages
echo Begin importing Required Packages
xvfb-run --auto-servernum --server-args='-screen 0 640x480x24' $UNITY_PATH/Editor/Unity \
-quit \
-batchmode \
-importPackage $(pwd)/requiredPackages.unitypackage \
-projectPath "$PROJECT_PATH"
echo Ended importing Required Packages
| true
|
dcfcf51c34cb6e5a195e9c7c30b1e460dcfbd1d7
|
Shell
|
helloimowen/auto-minecraft-server
|
/scripts/restore-server.sh
|
UTF-8
| 799
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# start server from s3 backup
sudo yum -y install java-1.8.0
sudo mkdir /minecraft
sudo chown -R ec2-user:ec2-user /minecraft
sudo aws s3 sync s3://owen-sanders-minecraft/backups/state/ /minecraft
cd /minecraft
sudo aws s3 cp s3://owen-sanders-minecraft/code/master/server_files/minecraft.service /etc/systemd/system/minecraft.service
sudo chmod -R ugo+rwx /minecraft # jar needs permissions to get at backup data.
sudo systemctl daemon-reload
sudo service minecraft start
# pull code from s3
cd ..
sudo mkdir /code
sudo chown -R ec2-user:ec2-user /code
sudo aws s3 sync s3://owen-sanders-minecraft/code/master/ /code
sudo chmod -R +x /code
cd /code
sudo chmod +x `cat script_manifest.txt`
# install pyenv for python scripts
./scripts/install-python-environment.sh
| true
|
2f6680143ee8b2628fefb02143894aa85d3dc747
|
Shell
|
digitalrebar/provision-plugins
|
/cmds/ipmi/content/templates/ipmi-install-cert-key.sh.tmpl
|
UTF-8
| 364
| 2.9375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Configure the IPMI subsystem
# Include basic defaults and debugging
{{ template "setup.tmpl" . }}
# Get the vendor functions
{{ template "ipmi-vendor.sh.tmpl" . }}
{{ if .ParamExists "ipmi/configure/certificate" }}
vendor_install_cert
{{ else }}
echo "ipmi/configure/certificate not defined - not installing certificate"
{{ end }}
exit 0
| true
|
86f74b27326dc42e29c6bd575000b1ecf8a0a82f
|
Shell
|
dlussky/dotfiles
|
/bin/lockNPause
|
UTF-8
| 583
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
LOCK_CHECK_DELAY=0.5
SCREENSAVER_COMMAND=mate-screensaver-command
function wait_for_unlock {
haslock=
while true; do
sleep ${LOCK_CHECK_DELAY}
locked=$( $SCREENSAVER_COMMAND -q | grep " active" )
if [ -n "${haslock}" ]; then
# lock has happened before. Check unlock and break if unlocked
if [ -z "${locked}" ]; then break; fi
fi
if [ -n "${locked}" ]; then
haslock="true"
fi
done
}
mocp -P
$SCREENSAVER_COMMAND --lock
sleep 3
xset dpms force off
wait_for_unlock
mocp -U
| true
|
4d9b8ed7cb49dc1a4ba2e3a7e2b7e5d782ed5b12
|
Shell
|
matt-welch/GENI_VT
|
/install/install_script.sh
|
UTF-8
| 229
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
source installer_fcns.sh
echo "Beginning installation script: $0..."
installPackages
installKernel
# TODO need a switch here to control Docker and DPDK installation
collectSysInfo
printHeader
echo " $0 complete. "
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.