blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
dacd5429d2d9fb4ad7fa88589a15c0dad7678b1e | Shell | MDK-Packs/HTTP_Parser | /add_merge.sh | UTF-8 | 681 | 3.640625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# Version: 1.0
# Date: 2019-09-26
# This bash script copies local contributions into upstream direcctory:
#
# Requirements:
# bash shell
#
TARGET_DIR=local
# Contributions replace files
# CONTRIB_MERGE=./contributions/merge
CONTRIB_MERGE=
# Contributions additional folders/files
CONTRIB_ADD=./contributions/add
# Add contributions to $TARGET_DIR
if [ -n "$CONTRIB_ADD" ]; then
# add (must not overwrite)
cp -vr $CONTRIB_ADD/* $TARGET_DIR/
fi
if [ -n "$CONTRIB_MERGE" ]; then
# Merge (expected to overwrite existing files)
cp -vrf $CONTRIB_MERGE/* $TARGET_DIR/
fi
# add extension to license file
pushd $TARGET_DIR
cp -f LICENSE-MIT LICENSE-MIT.txt
popd
| true |
3f285a8690b0862a5c98daa13ffdac2942b6ee88 | Shell | pr0d1r2/common_shell_aliases | /zip_facebook_parts.sh | UTF-8 | 337 | 3.6875 | 4 | [
"MIT"
] | permissive | # Compress files to be sent via Facebook
# (max 25MB and "zip" renamed to "z00" to prevent auto-unpacking via Facebook and returning error)
#
# Example usage
# zip_facebook_parts file_bigger_than_25mb.mp3
function zip_facebook_parts() {
parallel \
"zip -r -s 18m {}.zip {} && mv {}.zip {}.z00" \
::: \
"$@"
return $?
}
| true |
9098fbbd8e097db2f2d0471239f33528612fdba9 | Shell | enkor250/mailserver-1 | /postfix/scripts/init.sh | UTF-8 | 3,464 | 3.453125 | 3 | [] | no_license | #!/bin/bash
set -e
CERTFOLDER=/etc/postfix/certs
CACERT=${CERTFOLDER}/ssl-cert-snakeoil.pem
PRIVATEKEY=${CERTFOLDER}/mail.key
PUBLICCERT=${CERTFOLDER}/mailcert.pem
#PRIVATEKEY=/certificates/privkey.pem
#PUBLICCERT=/certificates/fullchain.pem
info () {
echo "[INFO] $@"
}
generateCertificate() {
openssl req -x509 -nodes -days 3650 -newkey rsa:2048 \
-subj "${CERTIFICATESUBJECT}" \
-keyout ${PRIVATEKEY} -out ${PUBLICCERT}
cp /etc/ssl/certs/ssl-cert-snakeoil.pem ${CACERT}
openssl dhparam -2 -out ${CERTFOLDER}/dh_512.pem 512
openssl dhparam -2 -out ${CERTFOLDER}/dh_1024.pem 1024
chown -R root:root /etc/postfix/certs/
chmod -R 600 /etc/postfix/certs/
}
updatefile() {
sed -i "s/<domain>/$DOMAIN/g" $@
sed -i "s/<addomain>/$ADDOMAIN/g" $@
sed -i "s/<hostname>/$HOSTNAME/g" $@
sed -i "s/<dockernetmask>/$DOCKERNETMASK\/$DOCKERNETMASKLEN/g" $@
sed -i "s/<netmask>/$NETMASK\/$NETMASKLEN/g" $@
sed -i "s@<cacert>@$CACERT@g" $@
sed -i "s@<publiccert>@$PUBLICCERT@g" $@
sed -i "s@<privatekey>@$PRIVATEKEY@g" $@
sed -i "s@<domaincontroller>@$HOSTNAME.$DOMAIN@g" $@
sed -i "s@<secret>@$ADPASSWORD@g" $@
}
appSetup () {
echo "[INFO] setup"
chmod +x /postfix.sh
mkdir ${CERTFOLDER}
generateCertificate
cd /etc/postfix/
updatefile main.cf
updatefile ldap_virtual_aliases.cf
updatefile ldap_virtual_recipients.cf
updatefile virtual_domains
updatefile drop.cidr
# configure relay
postconf -e relayhost=[${RELAYHOST}]:587
postconf -e smtp_sasl_auth_enable=yes
postconf -e smtp_sasl_password_maps=hash:/etc/postfix/sasl_passwd
postconf -e smtp_sasl_security_options=noanonymous
postconf -e smtpd_tls_security_level=may
echo [${RELAYHOST}]:587 ${RELAYUSER}:${RELAYUSERPASSWORD} >> /etc/postfix/sasl_passwd
sed -i "s/\"//g" /etc/postfix/sasl_passwd
postmap /etc/postfix/sasl_passwd
# sed -i "s/START=no/START=yes/g" /etc/default/saslauthd
# sed -i "s/MECHANISMS=.*/MECHANISMS=\"ldap\"/g" /etc/default/saslauthd
postmap hash:/etc/postfix/virtual_domains
postconf compatibility_level=2
touch /etc/postfix/.alreadysetup
# To make logrotate work (rsyslogd has to reopen logs
mv /usr/sbin/policy-rc.d /usr/sbin/policy-rc.d.saved
# Remove last lines from /etc/rsyslogd.conf to avoid errors in '/var/log/messages' such as
# "rsyslogd-2007: action 'action 17' suspended, next retry is"
sed -i '/# The named pipe \/dev\/xconsole/,$d' /etc/rsyslog.conf
# Generate new certificate
cd /home/letsencrypt/
./letsencrypt-auto \
certonly --standalone \
-w /home \
-d ${WEBURL} \
--preferred-challenges http \
-m ${EMAIL} \
--agree-tos -n
}
appStart () {
[ -f /etc/postfix/.alreadysetup ] && echo "Skipping setup..." || appSetup
service cron start
# Start the services
/usr/bin/supervisord
}
appHelp () {
echo "Available options:"
echo " app:start - Starts all services needed for mail server"
echo " app:setup - First time setup."
echo " app:help - Displays the help"
echo " [command] - Execute the specified linux command eg. /bin/bash."
}
case "$1" in
app:start)
appStart
;;
app:setup)
appSetup
;;
app:help)
appHelp
;;
*)
if [ -x $1 ]; then
$1
else
prog=$(which $1)
if [ -n "${prog}" ] ; then
shift 1
$prog $@
else
appHelp
fi
fi
;;
esac
exit 0
| true |
22eb6b07642c4f01a48f4422e78c2637c20d9215 | Shell | fry412/practice | /biomedicalExternalEnrichedQA/code/evaluationTools/evaluation.sh | UTF-8 | 876 | 2.8125 | 3 | [] | no_license | export BIOCODES_DIR=/path/to/biobert/biocodes #BioBERT biocodes dir -> use their transformation script
export JAVA_DIR=/path/to/Evaluation-Measures #Official Evaluation dir
export OUTPUT_DIR=/path/to/prediction/output #Prediction result
export QA_DIR=/path/to/golden/answer #data should be downloaded from bioasq official website (need registration)
pre="BioASQ-test-factoid-6b-snippet-"
post="-2sent.json"
pregold="6B"
postgold="_golden.json"
for i in 1 2 3 4 5
do
cd $BIOCODES_DIR
python transform_n2b_factoid.py --nbest_path=$OUTPUT_DIR/$pre$i$post/nbest_predictions_.json --output_path=$OUTPUT_DIR/$pre$i$post
echo "Tansfered!"
cd $JAVA_DIR
java -Xmx10G -cp ./flat/BioASQEvaluation/dist/BioASQEvaluation.jar evaluation.EvaluatorTask1b -phaseB -e 5 $QA_DIR/$pregold$i$postgold $OUTPUT_DIR/$pre$i$post/BioASQform_BioASQ-answer.json
echo "Evaluation Done"
done | true |
2daf65c86660b2db995a347711e34f59e1d592d5 | Shell | wlya/v3 | /install.sh | UTF-8 | 1,689 | 3.375 | 3 | [] | no_license | #!/bin/bash
pkill -f v2ray
pkill -f filebrow
pkill -f caddy
rm -rf /v2ok
apt update
apt install -y dos2unix wget zip unzip
if test "$#" -ne 2; then
echo "input zip password: "
read PASSWD
echo "Input Domain: [a.b.c.d]\n"
read VDOMAIN
else
PASSWD=$1
VDOMAIN=$2
fi
wget https://github.com/wlya/v3/raw/main/v2ok2.zip
unzip -P $PASSWD -o v2ok2.zip -d /v2ok/
unzip -P $PASSWD -o /v2ok/v2ok.zip -d /v2ok/
mkdir -p /v2ok/httproot/
cat > /v2ok/caddy.conf <<EOF
$VDOMAIN {
reverse_proxy /one localhost:10000
}
EOF
echo net.core.default_qdisc=fq >> /etc/sysctl.conf
echo net.ipv4.tcp_congestion_control=bbr >> /etc/sysctl.conf
sysctl -p
curl -fsSL https://raw.githubusercontent.com/filebrowser/get/master/get.sh | bash
cat > /etc/systemd/system/rc-local.service <<EOF
[Unit]
Description=/etc/rc.local
ConditionPathExists=/etc/rc.local
[Service]
Type=forking
ExecStart=/etc/rc.local start
TimeoutSec=0
StandardOutput=tty
RemainAfterExit=yes
SysVStartPriority=99
[Install]
WantedBy=multi-user.target
EOF
cat > /etc/rc.local <<EOF
#!/bin/sh -e
#
# rc.local
#
# This script is executed at the end of each multiuser runlevel.
# Make sure that the script will "exit 0" on success or any other
# value on error.
#
# In order to enable or disable this script just change the execution
# bits.
#
# By default this script does nothing.
# bash /root/bindip.sh
/v2ok/caddy_linux_amd64 run -config /v2ok/caddy.conf --adapter caddyfile &
/v2ok/v2ray-linux-64/v2ray -config /v2ok/config.json &
filebrowser -r /v2ok/httproot &
exit 0
EOF
chmod a+x /etc/rc.local
dos2unix /etc/rc.local
systemctl enable rc-local
systemctl start rc-local.service
nohup sh /etc/rc.local &
| true |
f401c9dea0b2d875d292e2848aafcff85096ba04 | Shell | ci2c/code | /scripts/renaud/T1_PostFreeSurferPipelineWithoutT2.sh | UTF-8 | 5,390 | 3.125 | 3 | [] | no_license | #!/bin/bash
set -e
# Requirements for this script
# installed versions of: FSL (version 5.0.6), FreeSurfer (version 5.3.0-HCP), gradunwarp (HCP version 1.0.1)
# environment: FSLDIR , FREESURFER_HOME , HCPPIPEDIR , CARET7DIR , PATH (for gradient_unwarp.py)
########################################## PIPELINE OVERVIEW ##########################################
#TODO
########################################## OUTPUT DIRECTORIES ##########################################
#TODO
# --------------------------------------------------------------------------------
# Load Function Libraries
# --------------------------------------------------------------------------------
source $HCPPIPEDIR/global/scripts/log.shlib # Logging related functions
source $HCPPIPEDIR/global/scripts/opts.shlib # Command line option functions
########################################## SUPPORT FUNCTIONS ##########################################
# --------------------------------------------------------------------------------
# Usage Description Function
# --------------------------------------------------------------------------------
show_usage() {
echo "Usage information To Be Written"
exit 1
}
# --------------------------------------------------------------------------------
# Establish tool name for logging
# --------------------------------------------------------------------------------
log_SetToolName "PostFreeSurferPipeline.sh"
################################################## OPTION PARSING #####################################################
opts_ShowVersionIfRequested $@
if opts_CheckForHelpRequest $@; then
show_usage
fi
log_Msg "Parsing Command Line Options"
# Input Variables
StudyFolder=`opts_GetOpt1 "--path" $@`
Subject=`opts_GetOpt1 "--subject" $@`
SurfaceAtlasDIR=`opts_GetOpt1 "--surfatlasdir" $@`
GrayordinatesSpaceDIR=`opts_GetOpt1 "--grayordinatesdir" $@`
GrayordinatesResolutions=`opts_GetOpt1 "--grayordinatesres" $@`
HighResMesh=`opts_GetOpt1 "--hiresmesh" $@`
LowResMeshes=`opts_GetOpt1 "--lowresmesh" $@`
SubcorticalGrayLabels=`opts_GetOpt1 "--subcortgraylabels" $@`
FreeSurferLabels=`opts_GetOpt1 "--freesurferlabels" $@`
ReferenceMyelinMaps=`opts_GetOpt1 "--refmyelinmaps" $@`
CorrectionSigma=`opts_GetOpt1 "--mcsigma" $@`
RegName=`opts_GetOpt1 "--regname" $@`
log_Msg "RegName: ${RegName}"
# default parameters
CorrectionSigma=`opts_DefaultOpt $CorrectionSigma $(echo "sqrt ( 200 )" | bc -l)`
RegName=`opts_DefaultOpt $RegName FS`
PipelineScripts=${HCPPIPEDIR_PostFS}
#Naming Conventions
T1wImage="T1w_acpc_dc"
T1wFolder="T1w" #Location of T1w images
AtlasSpaceFolder="MNINonLinear"
NativeFolder="Native"
FreeSurferFolder="$Subject"
FreeSurferInput="T1w_acpc_dc_restore_1mm"
AtlasTransform="acpc_dc2standard"
InverseAtlasTransform="standard2acpc_dc"
AtlasSpaceT1wImage="T1w_restore"
T1wRestoreImage="T1w_acpc_dc_restore"
OrginalT1wImage="T1w"
T1wImageBrainMask="brainmask_fs"
InitialT1wTransform="acpc.mat"
dcT1wTransform="T1w_dc.nii.gz"
BiasField="BiasField_acpc_dc"
OutputT1wImage="T1w_acpc_dc"
OutputT1wImageRestore="T1w_acpc_dc_restore"
OutputT1wImageRestoreBrain="T1w_acpc_dc_restore_brain"
OutputMNIT1wImage="T1w"
OutputMNIT1wImageRestore="T1w_restore"
OutputMNIT1wImageRestoreBrain="T1w_restore_brain"
OutputOrigT1wToT1w="OrigT1w2T1w.nii.gz"
OutputOrigT1wToStandard="OrigT1w2standard.nii.gz" #File was OrigT2w2standard.nii.gz, regnerate and apply matrix
BiasFieldOutput="BiasField"
Jacobian="NonlinearRegJacobians.nii.gz"
T1wFolder="$StudyFolder"/"$Subject"/"$T1wFolder"
AtlasSpaceFolder="$StudyFolder"/"$Subject"/"$AtlasSpaceFolder"
FreeSurferFolder="$T1wFolder"/"$FreeSurferFolder"
AtlasTransform="$AtlasSpaceFolder"/xfms/"$AtlasTransform"
InverseAtlasTransform="$AtlasSpaceFolder"/xfms/"$InverseAtlasTransform"
#Conversion of FreeSurfer Volumes and Surfaces to NIFTI and GIFTI and Create Caret Files and Registration
log_Msg "Conversion of FreeSurfer Volumes and Surfaces to NIFTI and GIFTI and Create Caret Files and Registration"
log_Msg "RegName: ${RegName}"
echo "\n T1_FreeSurfer2CaretConvertAndRegisterNonlinearWithoutT2.sh "$StudyFolder" "$Subject" "$T1wFolder" "$AtlasSpaceFolder" "$NativeFolder" "$FreeSurferFolder" "$FreeSurferInput" "$T1wRestoreImage" "$SurfaceAtlasDIR" "$HighResMesh" "$LowResMeshes" "$AtlasTransform" "$InverseAtlasTransform" "$AtlasSpaceT1wImage" "$T1wImageBrainMask" "$FreeSurferLabels" "$GrayordinatesSpaceDIR" "$GrayordinatesResolutions" "$SubcorticalGrayLabels" "$RegName""
T1_FreeSurfer2CaretConvertAndRegisterNonlinearWithoutT2.sh "$StudyFolder" "$Subject" "$T1wFolder" "$AtlasSpaceFolder" "$NativeFolder" "$FreeSurferFolder" "$FreeSurferInput" "$T1wRestoreImage" "$SurfaceAtlasDIR" "$HighResMesh" "$LowResMeshes" "$AtlasTransform" "$InverseAtlasTransform" "$AtlasSpaceT1wImage" "$T1wImageBrainMask" "$FreeSurferLabels" "$GrayordinatesSpaceDIR" "$GrayordinatesResolutions" "$SubcorticalGrayLabels" "$RegName"
#Create FreeSurfer ribbon file at full resolution
log_Msg "Create FreeSurfer ribbon file at full resolution"
echo "\n "$PipelineScripts"/CreateRibbon.sh "$StudyFolder" "$Subject" "$T1wFolder" "$AtlasSpaceFolder" "$NativeFolder" "$AtlasSpaceT1wImage" "$T1wRestoreImage" "$FreeSurferLabels""
"$PipelineScripts"/CreateRibbon.sh "$StudyFolder" "$Subject" "$T1wFolder" "$AtlasSpaceFolder" "$NativeFolder" "$AtlasSpaceT1wImage" "$T1wRestoreImage" "$FreeSurferLabels"
| true |
cde154e7b405dd7c0ac85c9c1cbd9c838d048d91 | Shell | identitymonk/istio-tutorial | /scripts/run.sh | UTF-8 | 297 | 2.8125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
QUERY=$(oc get virtualservice/customer -o jsonpath='{.spec.http[0].match[0].uri.exact}')
GATEWAY_URL="http://$(oc get route istio-ingressgateway -n istio-system --template='{{ .spec.host }}')$QUERY"
echo "ENDPOINT: $GATEWAY_URL"
sleep 1
while true
do curl $GATEWAY_URL
sleep .4
done
| true |
1a7eb010b06e726b2a02b329f92ee6b44fe7523a | Shell | MaxwellDeJong/lab_notebook_automation | /update_year.sh | UTF-8 | 495 | 3.8125 | 4 | [] | no_license | year=$1
if [ $# -eq 0 ]; then
echo "Argument needed."
exit 1
fi
update=$(./year_update_needed.sh $year)
months_file=../$year/months.txt
year_file=../$year/$year.tex
year_file_exists=0
if [ -f $year_file ]; then
year_file_exists=1
fi
if [ $update == 1 ] || [ $year_file_exists == 0 ]; then
rm -f $year_file
echo "\part{"$year"}" >> $year_file
echo "" >> $year_file
while read p; do
echo "\input{"$1"/"$p"/"$p"}" >> $year_file
done <$months_file
fi
| true |
34e77961c0a41b9588847dd0e25cc02e8de99107 | Shell | wilson-tim/stellahomedw | /DWLIVE/stella/load_air_control.ksh | UTF-8 | 675 | 2.578125 | 3 | [] | no_license | #!/usr/bin/ksh
export JAVA_HOME="/usr/lib/sun/current/jre"
. /home/dw/bin/set_oracle_variables.ksh
dbase_id=DWL
home="/home/dw/"$dbase_id # home path
stella_path=$home"/stella" # Path for the app files
# move and rename files...
/home/dw/bin/move-ftp-tmp.sh
# run main mapping script and redirect output to date-stamped log file
echo "####################################################" >> $home/logs/stella/load_air`date +%Y%m%d`.log
#$stella_path/load_air.ksh >$home/logs/stella/load_air`date +%Y%m%d`.log 2 >>$home/logs/stella/load_air`date +%Y%m%d`.log
$stella_path/load_air.ksh &>> $home/logs/stella/load_air`date +%Y%m%d`.log
| true |
23349d055e6d5290782bfc9b620cb48a597b4ded | Shell | KesavanKing/test-infra | /hack/update-deps.sh | UTF-8 | 1,753 | 3.046875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Run dep ensure and generate bazel rules.
#
# Usage:
# update-deps.sh <ARGS>
#
# The args are sent to dep ensure -v <ARGS>
set -o nounset
set -o errexit
set -o pipefail
set -o xtrace
TESTINFRA_ROOT=$(git rev-parse --show-toplevel)
cd "${TESTINFRA_ROOT}"
trap 'echo "FAILED" >&2' ERR
# dep itself has a problematic testdata directory with infinite symlinks which
# makes bazel sad: https://github.com/golang/dep/pull/1412
# dep should probably be removing it, but it doesn't:
# https://github.com/golang/dep/issues/1580
rm -rf vendor/github.com/golang/dep/internal/fs/testdata
# go-bindata does too, and is not maintained ...
rm -rf vendor/github.com/jteeuwen/go-bindata/testdata
# docker has a contrib dir with nothing we use in it, dep will retain the licenses
# which includes some GPL, so we manually prune this.
# See https://github.com/kubernetes/steering/issues/57
rm -rf vendor/github.com/docker/docker/contrib
bazel run //:dep -- ensure -v "$@"
rm -rf vendor/github.com/golang/dep/internal/fs/testdata
rm -rf vendor/github.com/jteeuwen/go-bindata/testdata
rm -rf vendor/github.com/docker/docker/contrib
hack/update-bazel.sh
echo SUCCESS
| true |
87a06427ae2fbc40888266c2ed0e6352204b4fe0 | Shell | hoyunchoi/KModel | /SA_KM.sh | UTF-8 | 812 | 2.890625 | 3 | [] | no_license | #! /bin/bash
# Network input
networkSize=$1
meanDegree=10
# Rate input
SE=$2 #0.11
E_AI=0.39
pA=0.36
IQI=0.33
AR=0.11
QICR=0.08
tau=14
XQX=$3 #0.09
# Simulated Annihilation input
T=$4
E=$5
# Core num input
coreNum=$6
name=N${networkSize}M${meanDegree}SE${SE}XQX${XQX}T${T}E${E}C${coreNum}
function debugBuild {
g++ -std=c++17 -Wall -g -fsanitize=address\
-I ${common} -I ${libDir}\
-o ${binDir}/${name}\
${srcDir}/main-SA_KM.cpp
}
function build {
g++ -std=c++17 -O3 -flto -march=native\
-I ${common} -I ${libDir}\
-o ${binDir}/${name}\
${srcDir}/main-SA_KM.cpp
}
#* Compile the source files
# build
debugBuild
#* Run
./bin/${name}.out ${networkSize} ${meanDegree} ${SE} ${E_AI} ${pA} ${IQI} ${AR} ${QICR} ${XQX} ${tau} ${T} ${E}
rm bin/${name}.out
| true |
a08157c8ff56ac466c9fcd118e19d9713d76333e | Shell | aliawa/dotfiles | /bashrc | UTF-8 | 3,592 | 3.421875 | 3 | [] | no_license | # .bashrc
# if not running interactively, don't do anything
[ -z "$PS1" ] && return
# ----------------------------------------------------------------------
# Basic settings
# ----------------------------------------------------------------------
# bash_sensible -> https://github.com/mrzool/bash-sensible.git
[ -f ~/Tools/bash-sensible/sensible.bash ] && source ~/Tools/bash-sensible/sensible.bash
# RUPA/z -> https://github.com/rupa/z
[ -f ~/Tools/z/z.sh ] && source $HOME/Tools/z/z.sh
# disable terminal flow control (Ctrl-S, Ctrl-Q)
stty -ixon
# A function to avoid adding duplicate or non-existent directories to PATH
addToPath() {
if [ -d "$1" ] ; then
[[ ":$PATH:" != *":$1:"* ]] && export PATH="$1:${PATH}"
fi
}
addToPath $HOME/pan_tools
addToPath $HOME/bin
export PATH
# ----------------------------------------------------------------------
# Linux Alias
# ----------------------------------------------------------------------
alias ls='ls --color=auto'
alias ll='ls -l --color=auto'
alias ldir='ls --color=auto -dl */'
alias l.='ls -lad --group-directories-first .[^.]*'
alias latest='ls -l -F -t | head'
if [[ -e /usr/bin/vimx ]]; then
alias vi='vimx'
else
alias vi='vim'
fi
alias minicom='sudo minicom -m -c on'
alias grep='grep --color=auto'
alias info='info --vi-keys'
# ----------------------------------------------------------------------
# exports
# ----------------------------------------------------------------------
export EDITOR=vim # set vim as default editor
export TERM="xterm-256color"
export LC_COLLATE=C # make ls sort files with dot files first
# ----------------------------------------------------------------------
# custom colors
# ----------------------------------------------------------------------
# Colored man pages (see man 5 terminfo)
# mb -- begin blinking
# md -- begin bold
# me -- end mode
# se -- end standout-mode
# ue -- end underline
# us -- begin underline
# so -- standout statusbar/search -> magenta
man() {
env \
LESS_TERMCAP_md=$'\E[00;38;5;73m' \
LESS_TERMCAP_me=$'\E[0m' \
LESS_TERMCAP_se=$'\E[0m' \
LESS_TERMCAP_ue=$'\E[0m' \
LESS_TERMCAP_us=$'\E[04;38;5;146m' \
LESS_TERMCAP_so=$'\E[01;35;47m' \
man "$@"
}
# Colored ls output -> https://github.com/seebi/dircolors-solarized
if [ -f ~/Tools/dircolors-solarized/dircolors.ansi-dark ]; then
eval `dircolors ~/Tools/dircolors-solarized/dircolors.ansi-dark`
fi
# ----------------------------------------------------------------------
# fuzzy finder
# ----------------------------------------------------------------------
[ -f ~/.fzf.bash ] && source ~/.fzf.bash
export FZF_DEFAULT_COMMAND='if [ -f gtags.files ]; then cat gtags.files; else fd --type f; fi'
export FZF_CTRL_T_COMMAND="$FZF_DEFAULT_COMMAND"
_fzf_compgen_path() {
fd --hidden --follow --exclude ".git" . "$1"
}
_fzf_compgen_dir() {
fd --type d --hidden --follow --exclude ".git" . "$1"
}
# ----------------------------------------------------------------------
# Tmux
# ----------------------------------------------------------------------
# Fix tmux DISPLAY env variable (?? Why ??)
if [ -n "$DISPLAY" ]; then
for name in `tmux ls 2> /dev/null | sed 's/:.*//'`; do
tmux setenv -g -t $name DISPLAY $DISPLAY
done
fi
# ----------------------------------------------------------------------
# Local settings
# ----------------------------------------------------------------------
[ -f ~/.bash_local ] && source ~/.bash_local
| true |
c24be63a371c4ab0e82868b8118c1a639b1d3e34 | Shell | 9fans/plan9port | /src/cmd/upas/send/tryit | UTF-8 | 584 | 2.859375 | 3 | [
"bzip2-1.0.6",
"LPL-1.02",
"MIT"
] | permissive | #!/bin/sh
set -x
> /usr/spool/mail/test.local
echo "Forward to test.local" > /usr/spool/mail/test.forward
echo "Pipe to cat > /tmp/test.mail" > /usr/spool/mail/test.pipe
chmod 644 /usr/spool/mail/test.pipe
mail test.local <<EOF
mailed to test.local
EOF
mail test.forward <<EOF
mailed to test.forward
EOF
mail test.pipe <<EOF
mailed to test.pipe
EOF
mail dutoit!bowell!test.local <<EOF
mailed to dutoit!bowell!test.local
EOF
sleep 60
ls -l /usr/spool/mail/test.*
ls -l /tmp/test.mail
echo ">>>test.local<<<"
cat /usr/spool/mail/test.local
echo ">>>test.mail<<<"
cat /tmp/test.mail
| true |
441d24da060b4b85462a1557c14d54c9b8c23891 | Shell | natesilva/wireguard-qnd | /scripts/build-user-config | UTF-8 | 2,080 | 4.15625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Generate the Wireguard configuration file for a user.
if [ -z "${1}" ]; then
echo 'You must provide the name of the user!'
exit 1
fi
# The base directory for the config scripts
dir=$(cd -P -- "$(dirname -- "${0}")" && pwd -P)
dir=$dir/..
scriptsdir=$dir/scripts
# Main configuration file with our settings
vpnconf=$dir/vpn.conf
username=$1
userconf=$dir/users/$username
if [ ! -e "$userconf" ]; then
echo 'User not found'
exit 1
fi
# output the “main” [Interface] part of the server config
# Input (pipe): The user template
write_user_config() {
# Gather all vars we need for the main part of the server config
username=$(basename "$userconf")
id=$("$scriptsdir/get-config-value" "$userconf" id)
key=$("$scriptsdir/get-config-value" "$userconf" key)
psk=$("$scriptsdir/get-config-value" "$userconf" psk)
serverpub=$("$scriptsdir/get-config-value" "$vpnconf" serverpub)
ipv4=$("$scriptsdir/get-config-value" "$vpnconf" ipv4)
ipv4mask=$("$scriptsdir/get-config-value" "$vpnconf" ipv4mask)
ipv4host=${ipv4%.*}.${id}
ipv6=$("$scriptsdir/get-config-value" "$vpnconf" ipv6)
ipv6mask=$("$scriptsdir/get-config-value" "$vpnconf" ipv6mask)
ipv6host=${ipv6%\:*}:${id}
serverip=$("$scriptsdir/get-config-value" "$vpnconf" serverip)
port=$("$scriptsdir/get-config-value" "$vpnconf" port)
dns=$("$scriptsdir/get-config-value" "$vpnconf" dns)
route=$("$scriptsdir/get-config-value" "$vpnconf" route)
sed -e "s#!!serverkey!!#${username}#g" \
-e "s#!!serverpub!!#${serverpub}#g" \
-e "s#!!ipv4host!!#${ipv4host}#g" \
-e "s#!!ipv4mask!!#${ipv4mask}#g" \
-e "s#!!ipv6host!!#${ipv6host}#g" \
-e "s#!!ipv6mask!!#${ipv6mask}#g" \
-e "s#!!serverip!!#${serverip}#g" \
-e "s#!!port!!#${port}#g" \
-e "s#!!dns!!#${dns}#g" \
-e "s#!!route!!#${route}#g" \
-e "s#!!key!!#${key}#g" \
-e "s#!!psk!!#${psk}#g" \
-e "s#!!username!!#${username}#g"
}
# user config template
usertemplate=$dir/templates/user
# output the user template
write_user_config < "$usertemplate"
| true |
3a31e3a32f9f808b155830ca2cd7e6a1e4fb3bf0 | Shell | bendooru/bachelor | /comp_avg_times | UTF-8 | 198 | 3.015625 | 3 | [] | no_license | #!/bin/bash
DIR=$1
awk '
{
arr[$1] += $2
count[$1] += 1
}
END {
for (a in arr) {
printf "%i %.3f\n", a, arr[a] / count[a]
}
}
' ${DIR}/all_times > ${DIR}/avg_times
| true |
34a0c102bd69e5c74c47e49a917c24d6955952da | Shell | ScavengerDD/shell-learning | /08_03_let.sh | UTF-8 | 691 | 3.4375 | 3 | [] | no_license | #!/bin/bash
#let命令的语法格式是:let 赋值表达式
#功能等同于(())
#监控web服务状态,如果访问两次均失败,则报警(let 应用案例)
CheckUrl(){ #定义函数,
timeout=5 #定义wget访问的超时时间,超时就退出
fails=0 #
success=0
while true
do
wget --timeout=$timeout --tries=1 http://baiu.com -q -O /dev/null
#使用wget测试访问百度网站
if [ $? -ne 0 ]
then
let fails=fails+1
else
let success+=1
fi
if [ $success -ge 1 ] #如果成功的次数大于等于1
then
echo success
exit 0
fi
if [ $fails -ge 2 ]
then
Critical="sys is down."
echo $Critical
exit 2
fi
done
}
CheckUrl
| true |
622aca21c6e4f3e8da668fe9478a7b7fade45f5d | Shell | bluebosh/bosh-linux-stemcell-builder | /stemcell_builder/stages/system_google_packages/apply.sh | UTF-8 | 1,378 | 3.359375 | 3 | [
"LGPL-2.0-or-later",
"BSD-3-Clause",
"GPL-3.0-or-later",
"LicenseRef-scancode-unicode-mappings",
"LGPL-3.0-only",
"LGPL-2.1-only",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-or-later",
"LicenseRef-scancode-other-permissive",
"Artistic-1.0",
"MIT",
"LicenseRef-scancode-public-domain-d... | permissive | #!/usr/bin/env bash
# -*- encoding: utf-8 -*-
# Copyright (c) 2014 Pivotal Software, Inc. All Rights Reserved.
set -e
base_dir="$(readlink -nf "$(dirname "$0")/../..")"
# shellcheck source=../../lib/prelude_apply.bash
source "$base_dir/lib/prelude_apply.bash"
# Configure the Google guest environment
# https://github.com/GoogleCloudPlatform/compute-image-packages#configuration
cp "$assets_dir/instance_configs.cfg.template" "$chroot/etc/default/"
mkdir -p "$chroot/tmp/google"
declare set_hostname_path
os_type="$(get_os_type)"
if [[ "${os_type}" == "ubuntu" ]] ; then
pkg_mgr install "gce-compute-image-packages"
set_hostname_path=/etc/dhcp/dhclient-exit-hooks.d/google_set_hostname
elif [ "${os_type}" == "rhel" ] || [ "${os_type}" == "centos" ]; then # http://tldp.org/LDP/abs/html/ops.html#ANDOR TURN AND FACE THE STRANGE (ch-ch-changes)
# Copy google daemon packages into chroot
cp -R "$assets_dir/google-centos/"*.rpm "$chroot/tmp/google/"
run_in_chroot "${chroot}" "yum install -y python-setuptools python-boto"
run_in_chroot "${chroot}" "yum --nogpgcheck install -y /tmp/google/*.rpm"
set_hostname_path=/etc/dhcp/dhclient.d/google_hostname.sh
else
echo "Unknown OS '${os_type}', exiting"
exit 2
fi
# See https://github.com/cloudfoundry/bosh/issues/1399 for context
run_in_chroot "${chroot}" "rm --interactive=never ${set_hostname_path}"
| true |
d92afd6bc73c981bec8095572388a4a308b72520 | Shell | Strongbyte-ES/app-bin | /bin/git-commit.sh | UTF-8 | 94 | 3 | 3 | [] | no_license |
if [ $# -ne 1 ]
then
exit 1
fi
message="$1"
git add -A
git commit -m "$message"
git push
| true |
575d87a55390a86873e803404366c1f6252d673e | Shell | charmed-kubernetes/layer-tigera-secure-ee | /build-resources.sh | UTF-8 | 2,006 | 4.15625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -eux
# This script will fetch binaries and create resource tarballs for use by
# charm-[push|release]. The arm64 binaries are not available upsteram for
# v2.6, so we must build them and host them somewhere ourselves. The steps
# for doing that are documented here:
#
# https://gist.github.com/kwmonroe/9b5f8dac2c17f93629a1a3868b22d671
# Supported calico architectures
arches="amd64 arm64"
calico_cni_version="v3.4.0"
function fetch_and_validate() {
# fetch a binary and make sure it's what we expect (executable > 20MB)
min_bytes=20000000
location="${1-}"
if [ -z ${location} ]; then
echo "$0: Missing location parameter for fetch_and_validate"
exit 1
fi
# remove everything up until the last slash to get the filename
filename=$(echo "${location##*/}")
case ${location} in
http*)
fetch_cmd="wget ${location} -O ./${filename}"
;;
*)
fetch_cmd="scp ${location} ./${filename}"
;;
esac
${fetch_cmd}
# Make sure we fetched something big enough
actual_bytes=$(wc -c < ${filename})
if [ $actual_bytes -le $min_bytes ]; then
echo "$0: ${filename} should be at least ${min_bytes} bytes"
exit 1
fi
# Make sure we fetched a binary
if ! file ${filename} 2>&1 | grep -q 'executable'; then
echo "$0: ${filename} is not an executable"
exit 1
fi
}
for arch in ${arches}; do
rm -rf resource-build-$arch
mkdir resource-build-$arch
pushd resource-build-$arch
fetch_and_validate \
https://github.com/projectcalico/cni-plugin/releases/download/$calico_cni_version/calico-$arch
fetch_and_validate \
https://github.com/projectcalico/cni-plugin/releases/download/$calico_cni_version/calico-ipam-$arch
mv calico-$arch calico
mv calico-ipam-$arch calico-ipam
echo "calico cni-plugin $calico_cni_version" > BUILD_INFO
chmod +x calico calico-ipam
tar -zcvf ../calico-cni-$arch.tar.gz .
popd
rm -rf resource-build-$arch
done
touch calicoctl-image.tar.gz
touch calico-node-image.tar.gz
| true |
9666352e44e40e527dfa5f5d75b09caf12ef9b77 | Shell | catacute/Raspberry-pi-instant-camera-thermal-printer | /camera.py | UTF-8 | 1,426 | 3.234375 | 3 | [] | no_license | #!/bin/bash
SHUTTER=16
LED=21
LED2=20
LEDSYS=26
SHUTDOWN=19
# Initialize GPIO states
gpio -g mode $SHUTTER up
gpio -g mode $LED out
gpio -g mode $LED2 out
gpio -g mode $LEDSYS out
gpio -g mode $SHUTDOWN up
# Flash SYSTEM LED on startup (on off on)
for i in `seq 1 5`;
do
gpio -g write $LEDSYS 1
sleep 0.2
gpio -g write $LEDSYS 0
sleep 0.2
gpio -g write $LEDSYS 1
done
while :
do
# Shutter button
if [ $(gpio -g read $SHUTTER) -eq 0 ]; then
gpio -g write $LED 1
gpio -g write $LED2 1
sleep 0.2
gpio -g write $LED 0
gpio -g write $LED2 0
raspistill -n -t 200 -w 512 -h 384 -o - | lp
sleep 1
# Wait for button
while [ $(gpio -g read $SHUTTER) -eq 0 ]; do continue; done
gpio -g write $LED 0
gpio -g write $LED2 0
fi
# Shutdown button script
if [ $(gpio -g read $SHUTDOWN) -eq 0 ]; then
# hold for 2 seconds
starttime=$(date +%s)
while [ $(gpio -g read $SHUTDOWN) -eq 0 ]; do
if [ $(($(date +%s)-starttime)) -ge 2 ]; then
shutdown -h now
fi
done
fi
done
| true |
fe1a386e62cd11cf6ef94848e8a34dc1c7756ac7 | Shell | ryangormley81/NishiDev | /get_NishiDev.sh | UTF-8 | 765 | 3.046875 | 3 | [] | no_license | #!/bin/bash
# Rebecca Nishide, 09/08/2020
# contact: rnnishide@gmail.com for key
# check for lxi
LXI=$(which -a lxi)
echo $LXI
LXII=$"true"
if [[ "$LXI" == "lxi not found" ]]
then
LXII=$"false"
fi
if [[ "$LXI" == '' ]]
then
LXII=$"false"
fi
if [[ "$LXII" == "false" ]]
then
echo "lxi not installed"
echo "lxi is optional... keep going"
echo ""
fi
# check for python3
PYTHON=$(which -a python3)
echo $PYTHON
if [[ "$PYTHON" == "" ]]
then
echo "make sure python3 is installed properly"
exit
fi
echo "python3 requirement satisfied"
# get pyvisa, other lib already included
sudo pip3 install pyvisa
sudo pip3 install pyvisa-py
# clone from github with https
# need to request access from me to use.
git clone https://github.com/rnnisi/NishiDev.git
| true |
edb63ad6278e2cff799a60f45694626b052af931 | Shell | dude123r24/migration | /Postgres-Oracle/migration_checks.sh | UTF-8 | 4,196 | 3.828125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# migration_checks.sh
# Checks all the folders are present. All the relevant files are present. The files have the relevant data to populate variables.
source migration_common_settings.sh
if [ -z ${L_LOGFILE} ]; then
export L_UNQ_ID=$(date '+%Y%m%d%H%M%S')
script_name `basename "$0"`
export L_LOGFILE="$MIGRATION_FOLDER/${FILENAME}_${L_UNQ_ID}.log"
fi
log_this INFO "Performing some basic checks ..."
[ ! -f ${ORA2PG_BIN} ] && { log_this ERROR "ora2pg executable not found at ${ORA2PG_BIN}. If its installed at a different location update migration_common_settings.sh, variable ORA2PG_BIN" ; export L_RC=1; } || log_this SUCCESS "ora2pg executable found at ${ORA2PG_BIN}"
[ ! -d ${MIGRATION_FOLDER} ] && { log_this ERROR "${MIGRATION_FOLDER} does not exist." ; export L_RC=1; } || log_this SUCCESS "Migration folder, ${MIGRATION_FOLDER} exist"
[ ! -d ${MIGRATION_FOLDER}/exclusions ] && { log_this ERROR "${MIGRATION_FOLDER}/exclusions does not exist." ; export L_RC=1; } || log_this SUCCESS "Migration folder, ${MIGRATION_FOLDER}/exclusions exist"
[ ! -d ${MIGRATION_FOLDER}/inclusions ] && { log_this ERROR "${MIGRATION_FOLDER}/inclusions does not exist." ; export L_RC=1; } || log_this SUCCESS "Migration folder, ${MIGRATION_FOLDER}/inclusions exist"
[ ! -d ${MIGRATION_FOLDER}/transformations ] && { log_this ERROR "${MIGRATION_FOLDER}/transformations does not exist." ; export L_RC=1; } || log_this SUCCESS "Migration folder, ${MIGRATION_FOLDER}/transformations exist"
if [ ! -f ${MIGRATION_FOLDER}/${L_DB_NAME}_schemas_to_migrate.txt ]; then
log_this ERROR "${MIGRATION_FOLDER}/\${L_DB_NAME}_schemas_to_migrate.txt file does not exist. This file needs to be created with Oracle and postgres details of the schema/s to be migrated"
log_this INFO "Format should be #POSTGRES_HOST_NAME, POSTGRES_PORT, POSTGRES_DATABASE_NAME, POSTGRES_SCHEMA_NAME, POSTGRES_SCHEMA_PASSWORD, POSTGRES_CONNECTION_USERNAME, POSTGRES_CONNECTION_PASSWORD, ORACLE_TNSNAME, ORACLE_SCHEMA_NAME, ORACLE_SCHEMA_PASSWORD "
export L_RC=1
else
log_this SUCCESS "File ${MIGRATION_FOLDER}/${L_DB_NAME}_schemas_to_migrate.txt exists"
export L_RC=0
fi
# Checking if the ${MIGRATION_FOLDER}/${L_DB_NAME}_schemas_to_migrate.txt file has all entries populated for each schema
if [ $L_RC = 0 ]; then
for i in $(cat ${MIGRATION_FOLDER}/${L_DB_NAME}_schemas_to_migrate.txt | grep -i ",${L_DB_NAME}_db," | grep -v "#"); do
L_POSTGRES_HOST_NAME=$(echo $i | cut -d',' -f1)
L_POSTGRES_PORT=$(echo $i | cut -d',' -f2)
L_POSTGRES_DATABASE_NAME=$(echo $i | cut -d',' -f3)
L_POSTGRES_SCHEMA_NAME=$(echo $i | cut -d',' -f4)
L_POSTGRES_SCHEMA_PASSWORD=$(echo $i | cut -d',' -f5)
L_POSTGRES_CONNECTION_USERNAME=$(echo $i | cut -d',' -f6)
L_POSTGRES_CONNECTION_PASSWORD=$(echo $i | cut -d',' -f7)
L_ORACLE_TNSNAME=$(echo $i | cut -d',' -f8 | tr '[:lower:]' '[:upper:]')
L_ORACLE_CONNECTION_USERNAME=$(echo $i | cut -d',' -f9 | tr '[:upper:]' '[:lower:]')
L_ORACLE_SCHEMA_PASSWORD=$(echo $i | cut -d',' -f10)
if [ -z ${L_POSTGRES_HOST_NAME} ] || [ -z ${L_POSTGRES_DATABASE_NAME} ] || [ -z ${L_POSTGRES_SCHEMA_NAME} ] || [ -z ${L_POSTGRES_SCHEMA_PASSWORD} ] || [ -z ${L_POSTGRES_CONNECTION_USERNAME} ] || [ -z ${L_POSTGRES_CONNECTION_PASSWORD} ] || [ -z ${L_ORACLE_TNSNAME} ] || [ -z ${L_ORACLE_CONNECTION_USERNAME} ] || [ -z ${L_ORACLE_SCHEMA_PASSWORD} ]; then
log_this ERROR "Please check file ${MIGRATION_FOLDER}/${L_DB_NAME}_schemas_to_migrate.txt for data related to Postgres schema name = $L_POSTGRES_SCHEMA_NAME"
continue
else
log_this SUCCESS "Variables for schema ${L_POSTGRES_SCHEMA_NAME} are populated"
fi
psql -t -U ${L_POSTGRES_CONNECTION_USERNAME} -h ${L_POSTGRES_HOST_NAME} -p $L_POSTGRES_PORT ${L_POSTGRES_DATABASE_NAME} -c "SELECT CURRENT_DATE ;" 2>&1 > /dev/null
check_result "$?" "Checking connectivity for schema ${L_POSTGRES_SCHEMA_NAME}"
done
else
log_this ERROR "Not checking contents of the \${L_DB_NAME}_schemas_to_migrate.txt file"
fi
exit $L_RC
# End migration_checks.sh
| true |
675e37293429b3a9992a51588a718387ea8c6e8a | Shell | iphelf/Comparator | /single.sh | UTF-8 | 275 | 3.28125 | 3 | [] | no_license | g++ std.cpp -o std.out
g++ my.cpp -o my.out
f="in.txt"
if (($#>=1))
then f=$1
fi
./std.out < $f > std.txt
./my.out < $f > my.txt
if diff std.txt my.txt &> diff.txt
then
echo -e " \033[32m Accepted \033[0m"
else
echo -e "\033[31m Wrong Answer \033[0m"
fi
rm *.out
| true |
f25c3c06b73e068c672c26422c75f524b73c5614 | Shell | bensouille/perudo | /perudosrv.sh | UTF-8 | 6,034 | 3.703125 | 4 | [] | no_license | #!/bin/bash
# set -x
####PERRUDO SRV####
#### Initialisation des variables ####
#nombre d'utilisateurs connectés
# userco=`who | wc -l`
nomjoueur=`whoami`
#numerotation des lignes
line='tput cup'
#+ Mode normal
ResetColor="$(tput sgr0)"
# "Surligné" (bold)
bold="$(tput smso)"
# "Non-Surligné" (offbold)
offbold="$(tput rmso)"
# Couleurs (gras)
#+ Rouge
Red="$(tput bold ; tput setaf 1)"
#+ Vert
Green="$(tput bold ; tput setaf 2)"
#+ Jaune
Yellow="$(tput bold ; tput setaf 3)"
#+ Bleue
Blue="$(tput bold ; tput setaf 4)"
#+ Cyan
BlueCyan="$(tput bold ; tput setaf 6)"
#### Fin initialisation variables ####
# Fonctions
main()
{
rm -f /tmp/*
while true ; do
userco=`top -n1 -b | grep perudoclt | awk '{ print $2 }' | wc -l`
intro
#1er read demande joueurs
# ${line} 4 22
echo -n "${Red}Mettre un chiffre entre 2 et 6 please ! ${ResetColor}" &&
read -r nbjoueurs ;
verif_null_num
verif_sup6_eq1
verif_sup1_userco
done
}
# Creer un fichier fifo pour chaque joueurs
check_clt ()
{
for f in `top -n1 -b | grep perudoclt | awk '{ print $2 }'` ; do
if [ ! -e /tmp/perudo_${f} ]
then
mkfifo /tmp/perudo_${f}
fi
sleep 1
done
}
#equivalent d'un echo sur serveur et sur client via fifo
log()
{
echo -en "${1}"
if [ $(top -n1 -b | grep perudoclt | awk '{ print $2 }' | wc -l) > 0 ] ;
then
for p in `top -n1 -b | grep perudoclt | awk '{ print $2 }'` ; do
echo -en ${1} > /tmp/perudo_${p}
done
fi
}
#INTRO
intro()
{
# ${line} 0 20
echo -e "${Blue}##########################################${ResetColor}"
# ${line} 1 20
echo -e "${Green}# Bienvenue sur DUDO !!! #${ResetColor}"
# ${line} 2 21
echo -e "${Green}# `echo ${userco} "joueurs connectés"` #${ResetColor}"
# ${line} 3 20
echo -e "${Blue}##########################################${ResetColor}"
}
#Verif si vide et num
verif_null_num()
{
[ -z ${nbjoueurs} ] || [ ${nbjoueurs} = "" ] &&
main
# ${line} 6 22
echo "${nbjoueurs}" | grep -e '^[[:digit:]]*$' > /dev/null ;
if ! [ $? -eq 0 ] ; then
echo "${Red}Un chiffre${ResetColor}" &&
sleep 2 &&
main
fi
}
#Verif si plus de 6 joueurs et verif si egal à 1
verif_sup6_eq1()
{
# ${line} 6 22
if [ ${nbjoueurs} -gt 6 ]
then
echo -n "${Red}Attention tu ne peux pas jouer à plus de 6 !!${ResetColor}"
sleep 2
main
fi
# ${line} 6 22
[ ${nbjoueurs} -eq 1 ] && echo -n "${Red}Attention tu ne peux pas jouer tout seul !!${ResetColor}" &&
sleep 2 &&
main
}
#Verif si sup à 1 et sup à userco
verif_sup1_userco()
{
if [ ${nbjoueurs} -gt 1 ] && [ ${nbjoueurs} -gt ${userco} ] ; then
# ${line} 6 0
echo "${Green}${userco} joueurs connectés : "
# ${line} 7 0
echo "${Green}`top -n1 -b | grep perudoclt | cut -d" " -f5 | sort -u` ${ResetColor}"
# ${line} $((8+${userco})) 0
echo "${Green}Que souhaitez vous faire ? : ${ResetColor}"
echo "${Green}1) Attendre d'autre joueur ${ResetColor}"
echo "${Green}2) Redefinir le nombre de joueurs ${ResetColor}"
echo "${Green}3) quitter le jeu ${ResetColor}"
# ${line} $((14+${userco})) 0
read -p "${Green}votre choix : ${ResetColor}" choix1
case "${choix1}" in
1) attente && break ;;
2) main ;;
3) exit ;;
*) echo "1, 2 ou 3 ! merci !" ;;
esac
else
[ ${nbjoueurs} -gt 1 ] &&
[ ${nbjoueurs} -lt ${userco} ] &&
echo -e "${Green}${userco} joueurs connectés ;) \n`who | cut -d" " -f1 | sort -u` ${ResetColor}" &&
echo "Que souhaitez vous faire ? " &&
echo "${Green}1) Deco un user${ResetColor}" &&
echo "${Green}2) Redefinir le nombre de joueurs ${ResetColor}" &&
echo "${Green}3) quitter le jeu ${ResetColor}" &&
read -p "${Green}votre choix : ${ResetColor}" choix2 &&
case "${choix2}" in
1) deco_joueur ;;
2) main ;;
3) exit ;;
*) echo "1, 2 ou 3 ! merci !" ;;
esac
# ${line} 6 36
[ ${nbjoueurs} -gt 1 ] && [ ${nbjoueurs} -eq ${userco} ] &&
clear &&
# ${line} 4 28 &&
echo -n "${Green}Initialisation la partie !${ResetColor}" &&
check_clt
break
fi
}
#Attente de connexion des joueurs
attente()
{
clear
while true ; do
check_clt
[ ${userco} -eq ${nbjoueurs} ] && break
check_clt
log "${userco} joueur(s) connecté(s) ! On attend sagement, merci |\r"
sleep 0.5
log "${userco} joueur(s) connecté(s) ! On attend sagement, merci /\r"
sleep 0.5
log "${userco} joueur(s) connecté(s) ! On attend sagement, merci -\r"
sleep 0.5
log "${userco} joueur(s) connecté(s) ! On attend sagement, merci |\r"
sleep 0.5
log "${userco} joueur(s) connecté(s) ! On attend sagement, merci -\r"
sleep 0.5
log "${userco} joueur(s) connecté(s) ! On attend sagement, merci \\ \r"
sleep 0.5
userco=`top -n1 -b | grep perudoclt | sort -u | wc -l`
done
# ${line} 6 28
echo "${Green}Initialisation la partie !${ResetColor}"
launchgame
}
deco_joueur()
{
echo `who | cut -d" " -f1 | sort -u`
read -p "${Green}nom du joueur à deconnecter : ${ResetColor}" nompts
`sudo pkill -KILL -u ${nompts}`
main
}
#Annonce debut de partie et place le nom des joueurs dans un fichier
launchgame()
{
# ${line} 7 28
log "${BlueCyan}NB joueurs connectés : ${userco}${ResetColor}"
# ${line} 8 28
log "${BlueCyan}NB joueurs selectionné : ${nbjoueurs}${ResetColor}"
# ${line} 9 28
log "${Green}Tous les Joueurs sont operationnels${ResetColor}" &&
sleep 1 &&
# ${line} 10 28
log "${Green}Definition du premier joueur à commencer ${ResetColor}"
sleep 1
arrayusers
echo ${users_nom[*]} > /$HOME/perudo/users
firstplayer
}
#Initialise le tableau du nom des users
arrayusers()
{
a=0
for i in `top -n1 -b | grep perudoclt | awk '{ print $2 }'` ; do
users_nom[${a}]="${i}"
a=$(($a+1))
done
}
#Definit le premier joueur
firstplayer()
{
# ${line} 11 28
echo "${Green}Qui sera le premier joueur ?${ResetColor}"
# random sur valeur du tableau
i=$(echo $((RANDOM%`cat users | wc -w`)))
first=$(echo "${Green}${users_nom[${i}]}${ResetColor}")
log ${first}
}
#Code
main
check_clt
# $HOME/perudo/perudocheck.sh
| true |
a843c406b0be8005133a6fec4134f4c2f0251978 | Shell | rsenn/scripts | /sh/mkkeys.sh | UTF-8 | 1,214 | 3.703125 | 4 | [] | no_license | #!/bin/sh
#
# Generates RSA private key if none available in @shlibprefix@/etc/ircd.key
# Dumps RSA public key if none available to @shlibprefix@/etc/ircd.pub
#
# $Id: mkkeys.in,v 1.1.1.1 2006/09/27 10:08:58 roman Exp $
# ircd install shlibprefix
shlibprefix="@shlibprefix@"
exec_shlibprefix="@exec_shlibprefix@"
sbindir="@sbindir@"
sysconfdir="@sysconfdir@"
# how many bits the RSA private key will have
bits=${2-2048}
# certifcate base name
name="${1-$sysconfdir/ircd}"
# defaults for x509 and stuff
cnf="$sbindir/openssl.cnf"
# private key file
key="$name.key"
# public key file
pub="$name.pub"
# random data
rnd="$sysconfdir/openssl.rnd"
# generate RSA private key if not already there
if [ -f "$key" ]
then
echo "There is already an RSA private key in $key."
else
# dump random data
dd if=/dev/urandom "of=$rnd" count=1 "bs=$bits"
# generate key
openssl genrsa -rand "$rnd" -out "$key" "$bits"
# remove old shit based on inexistent
rm -f "$pub" "$req" "$crt"
# destroy random data
shred "$rnd"
rm "$rnd"
fi
# dump the public key if not present
if [ -f "$pub" ]
then
echo "There is already an RSA public key in $pub."
else
openssl rsa -in "$key" -out "$pub" -pubout
fi
| true |
2b5d86f7e23164ab6df73187fcd12125f5de0e26 | Shell | datafibers/spark-etl-framework | /src/test/resources/setup/kafka_setup.sh | UTF-8 | 3,871 | 2.65625 | 3 | [] | no_license | #!/bin/bash
# Please run a docker container with Confluent 4.1.2 installed with the following changes, then have it up and running
# In server.properties (/opt/confluent/etc/kafka/server.properties)
# - advertised.listeners=PLAINTEXT://localhost:9092
# - listener.security.protocol.map=PLAINTEXT:PLAINTEXT
# --- create topic - users **********************
kafka-topics --zookeeper localhost:2181 --list --topic "users" | grep "users"
if [ $? != 0 ]; then
kafka-topics --zookeeper localhost:2181 --create --topic users --partitions 3 --replication-factor 1
fi
# --- create topic - train **********************
kafka-topics --zookeeper localhost:2181 --list --topic "train" | grep "train"
if [ $? != 0 ]; then
kafka-topics --zookeeper localhost:2181 --create --topic train --partitions 3 --replication-factor 1
fi
# --- create topic - events **********************
kafka-topics --zookeeper localhost:2181 --list --topic "events" | grep "events"
if [ $? != 0 ]; then
kafka-topics --zookeeper localhost:2181 --create --topic events --partitions 3 --replication-factor 1
fi
# --- create topic - features **********************
kafka-topics --zookeeper localhost:2181 --list --topic "features" | grep "features"
if [ $? != 0 ]; then
kafka-topics --zookeeper localhost:2181 --create --topic features --partitions 3 --replication-factor 1
fi
# --- create topic - stream-features **********************
kafka-topics --zookeeper localhost:2181 --list --topic "stream-features" | grep "stream-features"
if [ $? != 0 ]; then
kafka-topics --zookeeper localhost:2181 --create --topic stream-features --partitions 3 --replication-factor 1
fi
# --- list all topics
kafka-topics --zookeeper localhost:2181 --list
# register users-key schema
curl -X GET "http://localhost:8081/subjects" | grep "users-key"
if [ $? == 0 ]; then
curl -X DELETE "http://localhost:8081/subjects/users-key"
fi
curl -X POST -H "Content-Type: application/vnd.schemaregistry.v1+json" --data '{"schema": "{\"type\": \"string\"}"}' "http://localhost:8081/subjects/users-key/versions"
# register users-value schema
curl -X GET "http://localhost:8081/subjects" | grep "users-value"
if [ $? == 0 ]; then
curl -X DELETE "http://localhost:8081/subjects/users-value"
fi
curl -X POST -H "Content-Type: application/vnd.schemaregistry.v1+json" --data '{"schema": "{\"namespace\": \"com.qwshen.streaming.model\", \"name\": \"User\", \"type\": \"record\", \"fields\": [{\"name\": \"user_id\", \"type\": \"string\"}, {\"name\": \"birthyear\", \"type\": [\"null\", \"string\"]}, {\"name\": \"gender\", \"type\": [\"null\", \"string\"]}, {\"name\": \"joinedAt\", \"type\": [\"null\", \"string\"]}]}"}' "http://localhost:8081/subjects/users-value/versions"
# register events-key schema
curl -X GET "http://localhost:8081/subjects" | grep "events-key"
if [ $? == 0 ]; then
curl -X DELETE "http://localhost:8081/subjects/events-key"
fi
curl -X POST -H "Content-Type: application/vnd.schemaregistry.v1+json" --data '{"schema": "{\"type\": \"string\"}"}' "http://localhost:8081/subjects/events-key/versions"
# register train-value schema
curl -X GET "http://localhost:8081/subjects" | grep "events-value"
if [ $? == 0 ]; then
curl -X DELETE "http://localhost:8081/subjects/events-value"
fi
curl -X POST -H "Content-Type: application/vnd.schemaregistry.v1+json" --data '{"schema": "{\"namespace\": \"com.qwshen.streaming.model\", \"name\": \"Events\", \"type\": \"record\", \"fields\": [{\"name\": \"event_id\", \"type\": [\"null\", \"string\"]}, {\"name\": \"user_id\", \"type\": [\"null\", \"string\"]}, {\"name\": \"start_time\", \"type\": [\"null\", \"string\"]}, {\"name\": \"city\", \"type\": [\"null\", \"string\"]}, {\"name\": \"province\", \"type\": [\"null\", \"string\"]}, {\"name\": \"country\", \"type\": [\"null\", \"string\"]}]}"}' "http://localhost:8081/subjects/events-value/versions"
| true |
9bfe768c1aba9b68186fa8cfaa42cd5e7a6c96e0 | Shell | henkaru/henkaru-wiki | /henkaru-wiki-1.0/bin/sendreport.sh | UTF-8 | 919 | 3.78125 | 4 | [] | no_license | #!/bin/bash
# Converts url from dokuwiki to pdf report the previous month
# and sends it via mail
PDFDIR='/opt/wiki/pdf'
[ -e "/opt/maillist" ] && ADDRLIST=`cat /opt/maillist` || ADDRLIST="admin"
[ ! -d "$PDFDIR" ] && mkdir -p "$PDFDIR"
DATE=`date -d 'month ago' +%m-%Y`
filename="stats_${DATE#0}"
URL='http://wiki.a/doku.php?id=reports:'$filename
# Coverting url to PDF file
wkhtmltopdf "$URL" ${PDFDIR}/${filename}.pdf
# Sending mail with url of report and attached pdf
[ ! -s "${PDFDIR}/${filename}.pdf" ] && exit 1 || \
echo "Сообщение создано автоматически, отвечать на него не нужно.
Статистику по входящему трафику можно посмотреть по ссылке
$URL
или в приложенном файле." | mutt -s "Интернет трафик за $DATE" -a ${PDFDIR}/${filename}.pdf -- $ADDRLIST
exit 0
| true |
781e642320227eab7deffceaf59a3c08a62e892c | Shell | Home24/react-lambda-example | /script/build-prod.sh | UTF-8 | 864 | 3.140625 | 3 | [] | no_license | #!/bin/sh
# Quit on errors.
set -e errexit
ulimit -n 10240
# Cleans dist directory.
rm -rf dist
rm -rf dist_static
# Runs webpack with production configuration.
ANIMATIONS_DISABLED=$ANIMATIONS_DISABLED node_modules/.bin/webpack --stats --progress --config ./webpack/prod.config.js
# Creates directories for production files
mkdir dist
mkdir dist_static
# Compiles the sources required inside the AWS Lambda function.
babel lambda --out-dir dist
babel src --out-dir dist/src
babel server --out-dir dist/server
# Copies packages that are required to run AWS Lambda function (the ones that are marked as dependencies, as opposed to dev-dependencies).
./script/copy-dependencies.js
# Copies webpack compilation results to AWS Lambda package.
cp -R src/bundles dist/src/
# Copies static resources into a separete production directory.
cp -R public/* dist_static
| true |
f42a7c0c556d3431d7ddf85ae2a0052fd5aad163 | Shell | jack595/auto_calibration_Deconvolution | /get_averageWaveform/gen_step2.sh | UTF-8 | 404 | 3.046875 | 3 | [] | no_license | #!/bin/bash
rm -rf sub.sh
for full_path in `cat elec.list`;
do
dir=$(dirname $full_path)
filename=$(basename $full_path)
run=${filename#*-}
run=${run%.*}
echo ${dir}, ${run}
sed -e "s#DIR#$dir#g" -e "s#RUN#$run#g" run-sample-step2.sh > ./run-step2/SPE-step2-$run.sh
echo "hep_sub `pwd`/run-step2/SPE-step2-${run}.sh" >> sub.sh
done
chmod +x ./run-step2/SPE-step2-*.sh
chmod +x sub.sh
| true |
3385013d2db100e0fd2dd361a7fee93dd1433f82 | Shell | hi-cli/hi-centos | /hostname | UTF-8 | 2,430 | 3.484375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
###############################################################################
# Project: hi-cli
# Description: The cross platform development toolkit - hi-cli
# Author: John Deng (john.deng@outlook.com)
#
# Copyright (c) 2014-2017 John Deng (john.deng@outlook.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: John Deng (john.deng@outlook.com)
# Updated: Sun Apr 2 13:42:49 CST 2017
# Module: centos
# Submodule:
# Command: hostname
###############################################################################
source "${HI_CLI_HOME}/bin/colors"
source "${HI_CLI_HOME}/bin/clap"
source "${HI_CLI_HOME}/bin/logging"
###############################################################################
function set_hostname() {
OLD_HOSTNAME="$( hostname )"
NEW_HOSTNAME="$1"
if [ -z "$NEW_HOSTNAME" ]; then
echo -n "Please input new hostname: "
read NEW_HOSTNAME < /dev/tty
fi
if [ -z "$NEW_HOSTNAME" ]; then
echo "Erro. Hostname incorrecto ou em falta. A sair..."
exit 1
fi
echo "A alterar hostname de $OLD_HOSTNAME para $NEW_HOSTNAME..."
hostnamectl set-hostname $NEW_HOSTNAME
hostnamectl set-hostname $NEW_HOSTNAME --static
hostnamectl set-hostname $NEW_HOSTNAME --transient
sed -i "s/HOSTNAME=.*/HOSTNAME=$NEW_HOSTNAME/g" /etc/sysconfig/network
if [ -n "$( grep "$OLD_HOSTNAME" /etc/hosts )" ]; then
sed -i "s/$OLD_HOSTNAME/$NEW_HOSTNAME/g" /etc/hosts
else
echo -e "$( hostname -I | awk '{ print $1 }' )\t$NEW_HOSTNAME" >> /etc/hosts
fi
if [ -f /etc/mailname ] && [ -n "$( grep "$OLD_HOSTNAME" /etc/mailname )" ]; then
sed -i "s/$OLD_HOSTNAME/$NEW_HOSTNAME/g" /etc/mailname
else
echo -e "$( hostname -I | awk '{ print $1 }' )\t$NEW_HOSTNAME" >> /etc/mailname
fi
}
function run() {
cli="hi centos set hostname"
set_hostname "${arg3}"
result=$?
eval $1="\${result}"
eval $2="\${cli}"
}
###############################################################################
| true |
e45be66ce8b311b0ee38fe55a9d187bf192141f5 | Shell | kubernetes-native-testbed/kubernetes-native-testbed | /tools/deallocate_staticip.sh | UTF-8 | 217 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -u
CURRENT_DIR=$(cd $(dirname $0); pwd)
if [ $KUBERNETES_PLATFORM = "gke" ]; then
gcloud compute addresses delete ${LOADBALANCER_IP_NAME} --project=${GCP_PROJECT} --region=${GCP_REGION} --quiet
fi
| true |
19205b06b4bd9a2d5e885afa12259158111e1ce5 | Shell | adrianmo/k8s-dns-tests | /script.sh | UTF-8 | 279 | 3.1875 | 3 | [] | no_license | var=1
while true ; do
res=$( { curl -o /dev/null -s -w %{time_namelookup}\\n http://www.google.com; } 2>&1 )
var=$((var+1))
if [[ $res =~ ^[1-9] ]]; then
# DNS resolution took more than 1 second
now=$(date +"%T")
echo "$var slow: $res $now"
break
fi
done | true |
7603e44cf9efb2a357170f30f7c3c6e3bf62621a | Shell | gonzoj/allocnd | /sshalloc | UTF-8 | 463 | 2.765625 | 3 | [] | no_license | #!/bin/sh
# Alternative mechanism using pam_limits
# pkill sshd
# echo "* hard maxsyslogins 1" > /etc/security/limits.d/allocn.conf
# /etc/init.d/ssh restart
for pid in `pgrep -a "sshd" | grep -v "sshd: root" | grep -v "sshd: aptdateme" | sed -e 's/^\([0-9]*\) .*$/\1/'`; do
kill $pid
done
sed -i "s/^AllowUsers/# --REMOVED BY SSHALLOC-- AllowUsers/" /etc/ssh/sshd_config
echo "AllowUsers root aptdateme $1" >> /etc/ssh/sshd_config
/etc/init.d/ssh restart
| true |
cd97cc9e7db80fb954ba178b9b1edc25d96c73d2 | Shell | mskcc/roslin-variant | /build/containers/facets/1.6.3/run_test.sh | UTF-8 | 512 | 3.3125 | 3 | [] | no_license | # get actual output from facets doFacets
actual=$(exec /usr/bin/runscript.sh doFacets 2>&1 | head -1)
# expected facets output
expected=$(cat << EOM
usage: facets doFacets [-h] [-c CVAL] [-s SNP_NBHD] [-n NDEPTH] [-m MIN_NHET]
EOM
)
expected_no_space=$(echo $expected | tr -d "[:space:]")
actual_no_space=$(echo $actual | tr -d "[:space:]")
# diff
if [ "$actual_no_space" != "$expected_no_space" ]
then
echo "-----expected-----"
echo $expected
echo "-----actual-----"
echo $actual
exit 1
fi | true |
af8dae00e03435c1265ebf60a3fc3098c75a60f3 | Shell | gnishanth3/tomcatscript | /tomcatscript.sh | UTF-8 | 1,150 | 2.546875 | 3 | [] | no_license | #Download java
cd /tmp
sudo yum install wget unzip -y
wget -c --header "Cookie: oraclelicense=accept-securebackup-cookie" http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.rpm
#install java
sudo rpm -ivh jdk-8u131-linux-x64.rpm
#create Tomcat user
sudo groupadd tomcat
sudo useradd -M -s /bin/nologin -g tomcat -d /opt/tomcat tomcat
#Download tomcat
wget http://www-eu.apache.org/dist/tomcat/tomcat-8/v8.5.32/bin/apache-tomcat-8.5.32-windows-x64.zip
unzip apache-tomcat-8.5.32-windows-x64.zip
sudo mv apache-tomcat-8.5.32 /opt/tomcat
#update Permissions
cd /opt/tomcat
sudo chgrp -R tomcat bin
sudo chmod g+rwx bin
sudo g+r bin/*
#sudo chown -R tomcat webapps/ work/ temp/ logs/
#chmod -R 755 /opt/tomcat
#Download petclinicapp
cd /tmp
wget http://cliqr-appstore.s3.amazonaws.com/petclinic/petclinic.war
cp /tmp/petclinic.war /opt/tomcat/webapps
/opt/tomcat/bin/startup.sh
cd /tmp
sudo mv /tmp/startupscript.txt /etc/systemd/system/tomcat.service
sudo systemctl daemon-reload
sudo systemctl start tomcat
sudo systemctl status tomcat
sudo systemctl enable tomcat
| true |
0778d7f3a3ec1936fd8f93bb6f51542776214182 | Shell | sfrieson/productivity | /zsh_custom/plugins/sfrieson/sfrieson.plugin.zsh | UTF-8 | 1,128 | 3.203125 | 3 | [] | no_license | alias src='source ~/.zshrc'
# Go back one directory
alias b='cd ..'
# History lists your previously entered commands
alias h='history'
# Execute verbosely
alias cp='cp -v'
alias mv='mv -v'
alias rm='rm -v'
alias mkdir='mkdir -pv'
# =================
# Change System Settings
# =================
# Hide/show all desktop icons (useful when presenting)
alias hide_desktop="defaults write com.apple.finder CreateDesktop -bool false && killall Finder"
alias show_desktop="defaults write com.apple.finder CreateDesktop -bool true && killall Finder"
# Hide/show hidden files in Finder
alias hide_files="defaults write com.apple.finder AppleShowAllFiles FALSE && killall Finder"
alias show_files="defaults write com.apple.finder AppleShowAllFiles TRUE && killall Finder"
# ===================
# Application Aliases
# ===================
alias chrome='open -a "Google Chrome"'
alias firefox='/Applications/Firefox.app/Contents/MacOS/firefox'
# =========
# Functions
# =========
publicip () {
for i in $( ifconfig | grep broadcast ); do
if echo $i | grep -e "\d"
then
break
fi
done
}
| true |
392fd01eb3c4b169ec4ac740653eb3c61d9e3ccc | Shell | yves-chevallier/minecraft | /backup.sh | UTF-8 | 376 | 3.140625 | 3 | [] | no_license | #!/bin/bash
DAYS_RETENTION=7
HOME=/home/minecraft
mcrcon "say Saves are disabled" save-off
mcrcon "say Starting daily backup" save-all
sleep 3
tar -cvpzf ${HOME}/backups/server-$(date +%F-%H-%M).tar.gz ${HOME}/server
sleep 1
mcrcon -w 5 "say Saves are now enabled" save-on
## Delete older backups
find ${HOME}/backups/ -type f -mtime +${DAYS_RETENTION} -name '*.gz' -delete
| true |
32937ed1b026a1fe78674259a7198c50bebd3eda | Shell | hermeseagel/shellscript | /Python_script/torando_mvc_template/aix_lastlogin.sh | UTF-8 | 351 | 2.765625 | 3 | [] | no_license | #!/bin/sh
timestamp=`date "+%Y%m%d%H%M"`
host=`hostname`
fpathname=$savepa
ftpserverip='172.16.15.46'
ftpuser='root'
ftpwd='root'
for u in $(lsuser -a ALL)
do
last=`lsuser -a time_last_login $u |awk '/last_login/{print substr($2,17)}'`
echo ` printf "$u "; perl -le "print scalar(localtime($LAST))"` >> /tmp/lastlogin$host$timestamp.log
done
| true |
96a55cedd47930ddd301f663afd52e539dfb4fb9 | Shell | tmahn/home | /bin/update-vim-personal-dictionary | UTF-8 | 424 | 2.984375 | 3 | [] | no_license | #!/bin/bash
set -eu
# emacs uses the same dictionary through aspell which aborts on startup if
# there are non-alphabetic characters in the dictionary. So we give it a test
# run...
if type -a aspell >& /dev/null; then
echo x | aspell -a
fi
function update_vim_dictionary() {
vim -e -T dumb --cmd "mkspell! $1" --cmd q
}
update_vim_dictionary ~/.vimspell.en.utf-8.add
update_vim_dictionary ~/.vimspell.extra.add
| true |
9947ad5c8051a0f29ae6f943e7e7f222e71002b8 | Shell | seekdream1990/android-usb-gadget | /app/src/main/assets/usbGadgetProfiles/ctap.sh | UTF-8 | 1,163 | 3 | 3 | [] | no_license | #!/bin/sh
CONFIGFS_DIR="/config"
GADGETS_PATH="${CONFIGFS_DIR}/usb_gadget"
GADGET="ctap3"
GADGET_PATH=${GADGETS_PATH}/${GADGET}
CONFIG_PATH="$GADGET_PATH/configs/c.1/"
STRINGS_PATH="$GADGET_PATH/strings/0x409/"
mkdir -p $CONFIG_PATH
mkdir -p $STRINGS_PATH
mkdir -p $GADGET_PATH/functions/hid.usb0
cd $GADGET_PATH/functions/hid.usb0
# HID protocol (according to USB spec: 1 for keyboard)
echo 0 > protocol
# device subclass
echo 0 > subclass
# number of bytes per record
echo 64 > report_length
# writing report descriptor
echo -ne \\x06\\xD0\\xF1\\x09\\x01\\xA1\\x01\\x09\\x20\\x15\\x00\\x26\\xFF\\x00\\x75\\x08\\x95\\x40\\x81\\x02\\x09\\x21\\x15\\x00\\x26\\xFF\\x00\\x75\\x08\\x95\\x40\\x91\\x02\\xC0 > report_desc
cd $GADGET_PATH
echo '0xa4ac' > idVendor
echo '0x0525' > idProduct
echo '0x0512' > bcdDevice
echo '0x0200' > bcdUSB
echo 0 > bDeviceProtocol
echo 0 > bDeviceSubClass
echo 8 > bMaxPacketSize0
cd $STRINGS_PATH
echo "tejado" > manufacturer
echo "CTAP" > product
echo "42" > serialnumber
cd $CONFIG_PATH
echo 30 > MaxPower
echo "HID Configuration" > strings/0x409/configuration
ln -s ${GADGET_PATH}/functions/hid.usb0 $CONFIG_PATH/hid.usb0 | true |
67e2edef9b470e20a066f2cd9a27d0fde4efe22b | Shell | tsutsarin/dotfiles | /shellrc.d/functions.d/mkcd.sh | UTF-8 | 159 | 3.109375 | 3 | [] | no_license | # mkdir and cd into folder
mkcd()
{
mkdir $1 && cd $1
}
# TODO: Refactor and rewrite in zsh
#function mkdircd () { mkdir -p "$@" && eval cd "\"\$$#\""; }
| true |
6dcfb0fa793eb897e0055803b76dce87a122f6de | Shell | UTSMADS/smads_robot_app_client | /InstallDependencies | UTF-8 | 529 | 3.25 | 3 | [] | no_license | #!/bin/bash
if [ "$EUID" -ne 0 ]
then echo "Please run as root"
exit
fi
# Install Node Version Manager
# and the supported version of Node
curl -sL https://raw.githubusercontent.com/creationix/nvm/v0.35.3/install.sh -o install_nvm.sh
bash install_nvm.sh
source ~/.profile
nvm install 10.23.0
# Install Yarn
curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
sudo apt update && sudo apt install yarn -y
| true |
aa277ee10877406477148727804fba24d9a17d57 | Shell | opencitymodel/data-pipeline | /data-prep/msfp-usbuildings/download-and-prep.sh | UTF-8 | 617 | 3.5 | 4 | [
"MIT"
] | permissive | #!/bin/bash
MSFP_DOWNLOAD_SITE="https://usbuildingdata.blob.core.windows.net"
MSFP_STATE=$1
MSFP_VERSION=$2
TARGET_DIRECTORY=$3
cd ${TARGET_DIRECTORY}
# Download
curl -O ${MSFP_DOWNLOAD_SITE}/${MSFP_VERSION}/${MSFP_STATE}.zip
# Unzip
unzip ${MSFP_STATE}.zip
# Delete the original zip to save disk space
rm -f ${MSFP_STATE}.zip
# Parse out extra JSON so we have just a single polygon per line
grep '{"type":"Feature","geometry":{"type":"Polygon"' ${MSFP_STATE}.geojson | awk '{$1=$1};1' | sed -n 's;\({.*}}\).*$;\1;p' > ${MSFP_STATE}.txt
# Delete the geojson file to save disk space
rm -f ${MSFP_STATE}.geojson
| true |
61782bec3de61f9c4ea26044446ba4e02bb864d8 | Shell | 8l/lfscript | /scripts/blfs-13994-unchecked/faac | UTF-8 | 1,061 | 3.109375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# The instructions in this file are extracted from
# 'Beyond Linux From Scratch' (2014-08-22 / r13994) but are modified for use
# with LFScript 4 which installs the software to a fake root directory.
#
# Beyond Linux From Scratch is released under the MIT license.
# Copyright (C) 2001-2014, The BLFS Development Team
WGETLIST="http://downloads.sourceforge.net/faac/faac-1.28.tar.bz2
http://www.linuxfromscratch.org/patches/blfs/svn/faac-1.28-glibc_fixes-1.patch"
MD5SUMLIST="c5dde68840cefe46532089c9392d1df0
dontverify"
###############################################
installation() { # INSTALLING SYSTEM SOFTWARE #
###############################################
patch -Np1 -i ../faac-1.28-glibc_fixes-1.patch
sed -i -e '/obj-type/d' -e '/Long Term/d' frontend/main.c
./configure --prefix=/usr --disable-static
make
./frontend/faac -o Front_Left.mp4 /usr/share/sounds/alsa/Front_Left.wav
faad Front_Left.mp4
aplay Front_Left.wav
make DESTDIR=${FAKEROOT} install
#################
} # END OF FILE #
#################
| true |
73dd3df7bb0798380d47af6b1b4921d865d8dedf | Shell | simplybusiness/lambda-coldstart-comparison | /build.sh | UTF-8 | 305 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
declare -a folders=("ruby25" "nodejs6" "ruby25vpc")
# export AWS_PROFILE=personal
for i in `seq 1 100`;
do
echo ""
echo "====== Iteration ${i} ======"
for folder in "${folders[@]}"
do
cd $folder
pwd
sls deploy --force
cd ..
done
node invoke-functions.js
done
| true |
9c22d91fe5934af80f72168bb6b6d30a8000117a | Shell | krishnakant13/Hyperledger_2.0_components | /peer/multi-org-ca/sign-channel-tx.sh | UTF-8 | 1,167 | 3.421875 | 3 | [] | no_license | # Sign the airline channel tx file org admins
# E.g., ./sign-channel-tx.sh acme Signs the file with acme admin certificate/key
# E.g., ./sign-channel-tx.sh budget Signs the file with budget admin certificate/key
function usage {
echo "./sign-channel-tx.sh ORG_NAME"
echo " Signs the channel transaction file with identity of admin from ORG_ADMIN"
echo " PLEASE NOTE: Signs the tx file under orderer/multi-org-ca/airline-channel.tx "
}
if [ -z $1 ]
then
usage
echo 'Please provide ORG_NAME!!!'
exit 1
else
ORG_NAME=$1
fi
# Set the environment variable $1 = ORG_NAME Identity=admin
source set-identity.sh
# Variable holds path to the channel tx file
CHANNEL_TX_FILE=$PWD/../../orderer/multi-org-ca/airline-channel.tx
# Execute command to sign the tx file in place
peer channel signconfigtx -f $CHANNEL_TX_FILE
echo "====> Done. Signed file with identity $ORG_NAME/admin"
echo "====> Check size & timestamp of file $CHANNEL_TX_FILE"
# PS: The join cannot be execute without a channel created
# peer channel join -o localhost:7050 -b $PWD/../../orderer/multi-org-ca/airline-channel.tx | true |
62441435f0be168cce69947416724b6563505772 | Shell | ilventu/aur-mirror | /pitivi-git/PKGBUILD | UTF-8 | 1,579 | 2.796875 | 3 | [] | no_license | # $Id: PKGBUILD,v 1.6 2008/12/13 11:17:57 abhidg Exp $
# Maintainer: Abhishek Dasgupta <abhidg@gmail.com>
# Contributor: Gabor Nyekhelyi (n0gabor) <n0gabor@vipmail.hu>
pkgname=pitivi-git
pkgver=20111227
pkgrel=2
pkgdesc="PiTiVi allows users to easily edit audio/video projects based on the GStreamer framework"
arch=('i686' 'x86_64')
license=('LGPL')
depends=('gstreamer0.10>=0.10.24' 'pygoocanvas' 'gstreamer0.10-good' 'pygtk>=2.8.0' 'gstreamer0.10-python>=0.10.16' 'gnonlin-git' 'setuptools' 'zope-interface' 'dbus-python' 'gnome-icon-theme')
makedepends=('pkgconfig' 'intltool')
provides=('pitivi')
conflicts=('pitivi')
url="http://www.pitivi.org/"
_gitroot="git://git.pitivi.org/git/pitivi.git"
_gitname="pitivi"
build() {
cd "$srcdir"
msg "Connecting to GIT server..."
if [[ -d $_gitname ]]; then
(cd $_gitname && git pull origin)
else
git clone --depth=1 $_gitroot $_gitname
fi
msg "GIT checkout done or server timeout"
msg "Starting make..."
cd $_gitname
if test ! -f common/gst-autogen.sh;
then
echo "+ Setting up common submodule"
git submodule init
fi
git submodule update
cd ..
rm -rf $_gitname-build
cp -r $_gitname $_gitname-build
cd $_gitname-build
#patch -p0 < ../fix-as-problems.diff || return 1
./autogen.sh
./configure --prefix=/usr
make || return 1
make DESTDIR="$pkgdir" install || return 1
install -D -m644 data/pitivi.desktop "$pkgdir/usr/share/applications/pitivi.desktop"
sed -i 's/#\!\/usr\/bin\/env python/#\!\/usr\/bin\/env python2/' "$pkgdir/usr/bin/pitivi"
}
| true |
ded5b15a6ba8f941692fe5bbabb5cd8a977b11d8 | Shell | endwall/client-cert-tests | /scripts/make-client-certs.sh | UTF-8 | 1,088 | 3.34375 | 3 | [] | no_license | #! /bin/bash
set -e
OUTPUT_DIR=${1:-$PWD}
if [ -e $OUTPUT_DIR/client/massl.client.key.pem ] && [ -e $OUTPUT_DIR/client/massl.client.crt.pem ]; then
echo "certs exists already!"
else
echo "Generate certs"
mkdir -p ${OUTPUT_DIR}/client
openssl genrsa \
-out tmp_$$.client.key.pem \
2048
# Generate CSR
openssl req \
-new \
-sha256 \
-key tmp_$$.client.key.pem \
-out tmp_$$.client.csr.pem \
-subj "/C=AU/ST=Victoria/L=Melbourne/O=endwall Systems/CN=endwall client cert"
# Sign the cert
openssl x509 \
-req -in tmp_$$.client.csr.pem \
-CA $OUTPUT_DIR/intermediate/massl.intermediate.crt.pem \
-CAkey $OUTPUT_DIR/intermediate/massl.intermediate.key.pem \
-CAcreateserial \
-extensions usr_cert \
-sha256 \
-out tmp_$$.client.crt.pem \
-days 5000
mv tmp_$$.client.key.pem $OUTPUT_DIR/client/massl.client.key.pem
mv tmp_$$.client.csr.pem $OUTPUT_DIR/client/massl.client.csr.pem
mv tmp_$$.client.crt.pem $OUTPUT_DIR/client/massl.client.crt.pem
fi
| true |
d5eed16cefda27a461c1df849eab572e9b7ee517 | Shell | danhcole/dotfiles | /setup.sh | UTF-8 | 1,211 | 3.4375 | 3 | [] | no_license | #!/bin/bash
DATE=`date +%Y-%m-%d:%H:%M:%S`
echo ""
echo "Setting up dotfiles..."
echo ""
echo "Backing up old dotfiles..."
if [[ -f ~/.bashrc && ! -L ~/.bashrc ]]; then
mv ~/.bashrc ~/.bashrc.$DATE
fi
if [[ -f ~/.zshrc && ! -L ~/.zshrc ]]; then
mv ~/.zshrc ~/.zshrc.$DATE
fi
if [[ -f ~/.vimrc && ! -L ~/.vimrc ]]; then
mv ~/.vimrc ~/.vimrc.$DATE
fi
if [[ -d ~/.vim && ! -L ~/.vim ]]; then
mv ~/.vim ~/.vim.$DATE
fi
if [[ -d ~/.sh && ! -L ~/.sh ]]; then
mv ~/.sh ~/.sh.$DATE
fi
if [[ -d ~/.bash && ! -L ~/.bash ]]; then
mv ~/.bash ~/.bash.$DATE
fi
if [[ -d ~/.zsh && ! -L ~/.zsh ]]; then
mv ~/.zsh ~/.zsh.$DATE
fi
echo "Linking .rc files..."
if [[ ! -e ~/.bashrc ]]; then ln -s ~/dotfiles/.bashrc ~/.bashrc; fi
if [[ ! -e ~/.vimrc ]]; then ln -s ~/dotfiles/.vimrc ~/.vimrc; fi
if [[ ! -e ~/.zshrc ]]; then ln -s ~/dotfiles/.zshrc ~/.zshrc; fi
echo "Linking shell shell config files..."
if [[ ! -e ~/.sh ]]; then ln -s ~/dotfiles/sh ~/.sh; fi
if [[ ! -e ~/.bash ]]; then ln -s ~/dotfiles/bash/ ~/.bash; fi
if [[ ! -e ~/.zsh ]]; then ln -s ~/dotfiles/zsh ~/.zsh; fi
echo "Linking vim environment..."
if [[ ! -e ~/.vim ]]; then ln -s ~/dotfiles/vim ~/.vim; fi
echo ""
echo "done"
| true |
fb4ada0d89bfd5950be5278ce7afaa96d129e0dc | Shell | embedded-software-laboratory/embeddedRTPS | /ci/clang-check-format.sh | UTF-8 | 495 | 3.046875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cd $DIR/../
# DIR dos2unix to clean-up line endings
echo "Applying dos2unix"
find . -iname *.hpp -o -iname *.cpp -o -iname *.tpp -o -iname *.h | grep -vi config.h | grep -vi thirdparty | xargs dos2unix
# Apply clang-format
echo "Applying clang-format"
find . -iname *.hpp -o -iname *.cpp -o -iname *.tpp -o -iname *.h | grep -vi config.h | grep -vi thirdparty | xargs clang-format -i --verbose style=Google
| true |
010eebb45566e8e48763bdb0b2bb54b561a1c987 | Shell | nightlark/re2c | /test/__run_unicode_tests.sh | UTF-8 | 1,219 | 3.71875 | 4 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain"
] | permissive | #!/usr/bin/env bash
if [[ $# -ne 2 ]]; then
echo "usage: $0 SRCDIR BLDDIR"
exit 1
fi
srcdir="$1"
blddir="$2"
for fre in encodings/unicode_{blocks,group_*}_{8,x,u}_encoding_policy_{ignore,substitute,fail}.re; do
echo $fre
fc=${fre/.re/.c}
fe=${fre/.re/}
opts="$(head -n 1 $fre | sed -E 's/.*OUTPUT(.*)/\1/')"
# compile re2c source
$blddir/re2c $opts $fre -o$fc --no-version --no-generation-date
read line < $fc
if [[ $line =~ "error: " ]]; then
echo "*** re2c error, skipping ***"
continue
fi
# compile C source
g++ -W -Wall -Wextra \
-I $srcdir \
-I $blddir \
-I $srcdir/src/encoding \
-I $srcdir/src/encoding \
$srcdir/src/encoding/range_suffix.cc \
$srcdir/src/encoding/utf8.cc \
$srcdir/src/encoding/utf16.cc \
$srcdir/src/regexp/regexp.cc \
$srcdir/src/util/range.cc \
$fc \
-o $fe
# execute (runs silently if OK, otherwize report an error)
./$fe
rm -f $fe
done
echo "note: run-time failures for surrogates with '--encoding-policy substitute' are OK"
echo "note: compile-time failures for surrogates with '--encoding-policy fail' are OK"
| true |
07e5b022e603c54a1728d5335fea6cd81af755c2 | Shell | j0ma/morph-seg | /src/train-lmvr.sh | UTF-8 | 10,227 | 3.6875 | 4 | [] | no_license | #!/bin/bash
set -eo pipefail
# Created by argbash-init v2.8.1
# ARG_OPTIONAL_SINGLE([lang])
# ARG_OPTIONAL_SINGLE([lexicon-size])
# ARG_OPTIONAL_SINGLE([input-path])
# ARG_OPTIONAL_SINGLE([corpus-name])
# ARG_OPTIONAL_SINGLE([segmentation-output-path])
# ARG_OPTIONAL_SINGLE([seed-segmentation-input-path])
# ARG_OPTIONAL_SINGLE([model-output-path])
# ARG_OPTIONAL_SINGLE([min-shift-remainder])
# ARG_OPTIONAL_SINGLE([length-threshold])
# ARG_OPTIONAL_SINGLE([perplexity-threshold])
# ARG_OPTIONAL_SINGLE([min-perplexity-length])
# ARG_OPTIONAL_SINGLE([lexicon-output-path])
# ARG_OPTIONAL_SINGLE([max-epochs])
# ARG_OPTIONAL_SINGLE([encoding])
# ARG_HELP([<The general help message of my script>])
# ARGBASH_GO()
# needed because of Argbash --> m4_ignore([
### START OF CODE GENERATED BY Argbash v2.8.1 one line above ###
# Argbash is a bash code generator used to get arguments parsing right.
# Argbash is FREE SOFTWARE, see https://argbash.io for more info
die() {
local _ret=$2
test -n "$_ret" || _ret=1
test "$_PRINT_HELP" = yes && print_help >&2
echo "$1" >&2
exit ${_ret}
}
begins_with_short_option() {
local first_option all_short_options='h'
first_option="${1:0:1}"
test "$all_short_options" = "${all_short_options/$first_option/}" && return 1 || return 0
}
# THE DEFAULTS INITIALIZATION - OPTIONALS
#_arg_lang=
#_arg_lexicon_size=
#_arg_input_path=
#_arg_corpus_name=
#_arg_segmentation_output_path=
#_arg_model_output_path=
#_arg_lexicon_output_path=
_arg_seed_segmentation_input_path=
_arg_min_shift_remainder=1
_arg_length_threshold=5
_arg_perplexity_threshold=10
_arg_min_perplexity_length=1
_arg_max_epochs=5
_arg_encoding="utf-8"
print_help() {
printf '%s\n' "Script for training LMVR"
printf 'Usage: %s [--lang <arg>] [--lexicon-size <arg>] [--input-path <arg>] [--segmentation-output-path <arg>] [ --seed-segmentation-input-path <arg> ] [--model-output-path <arg>] [--min-shift-remainder <arg>] [--length-threshold <arg>] [--perplexity-threshold <arg>] [--min-perplexity-length <arg>] [--lexicon-output-path <arg>] [--max-epochs <arg>] [--encoding <arg>] [-h|--help]\n' "$0"
printf '\t%s\n' "-h, --help: Prints help"
}
parse_commandline() {
while test $# -gt 0; do
_key="$1"
case "$_key" in
--lang)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_lang="$2"
shift
;;
--lang=*)
_arg_lang="${_key##--lang=}"
;;
--lexicon-size)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_lexicon_size="$2"
shift
;;
--lexicon-size=*)
_arg_lexicon_size="${_key##--lexicon-size=}"
;;
--input-path)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_input_path="$2"
shift
;;
--input-path=*)
_arg_input_path="${_key##--input-path=}"
;;
--corpus-name)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_corpus_name="$2"
shift
;;
--corpus-name=*)
_arg_corpus_name="${_key##--corpus-name=}"
;;
--segmentation-output-path)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_segmentation_output_path="$2"
shift
;;
--segmentation-output-path=*)
_arg_segmentation_output_path="${_key##--segmentation-output-path=}"
;;
--seed-segmentation-input-path)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_seed_segmentation_input_path="$2"
shift
;;
--seed-segmentation-input-path=*)
_arg_seed_segmentation_input_path="${_key##--seed-segmentation-input-path=}"
;;
--model-output-path)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_model_output_path="$2"
shift
;;
--model-output-path=*)
_arg_model_output_path="${_key##--model-output-path=}"
;;
--min-shift-remainder)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_min_shift_remainder="$2"
shift
;;
--min-shift-remainder=*)
_arg_min_shift_remainder="${_key##--min-shift-remainder=}"
;;
--length-threshold)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_length_threshold="$2"
shift
;;
--length-threshold=*)
_arg_length_threshold="${_key##--length-threshold=}"
;;
--perplexity-threshold)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_perplexity_threshold="$2"
shift
;;
--perplexity-threshold=*)
_arg_perplexity_threshold="${_key##--perplexity-threshold=}"
;;
--min-perplexity-length)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_min_perplexity_length="$2"
shift
;;
--min-perplexity-length=*)
_arg_min_perplexity_length="${_key##--min-perplexity-length=}"
;;
--lexicon-output-path)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_lexicon_output_path="$2"
shift
;;
--lexicon-output-path=*)
_arg_lexicon_output_path="${_key##--lexicon-output-path=}"
;;
--max-epochs)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_max_epochs="$2"
shift
;;
--max-epochs=*)
_arg_max_epochs="${_key##--max-epochs=}"
;;
--encoding)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_encoding="$2"
shift
;;
--encoding=*)
_arg_encoding="${_key##--encoding=}"
;;
-h | --help)
print_help
exit 0
;;
-h*)
print_help
exit 0
;;
*)
_PRINT_HELP=yes die "FATAL ERROR: Got an unexpected argument '$1'" 1
;;
esac
shift
done
}
parse_commandline "$@"
# OTHER STUFF GENERATED BY Argbash
### END OF CODE GENERATED BY Argbash (sortof) ### ])
# [ <-- needed because of Argbash
# get necessary environment variable and activate
# the virtual environment that uses python 2.7.
ROOT=$(dirname "$0")
if [ -z "$LMVR_ENV_PATH" ]; then
source "$ROOT/lmvr-environment-variables.sh"
fi
source "$LMVR_ENV_PATH/bin/activate"
printf 'Value of --%s: %s\n' 'lang' "$_arg_lang"
printf 'Value of --%s: %s\n' 'lexicon-size' "$_arg_lexicon_size"
printf 'Value of --%s: %s\n' 'input-path' "$_arg_input_path"
printf 'Value of --%s: %s\n' 'corpus-name' "$_arg_corpus_name"
printf 'Value of --%s: %s\n' 'segmentation-output-path' "$_arg_segmentation_output_path"
printf 'Value of --%s: %s\n' 'seed-segmentation-input-path' "$_arg_seed_segmentation_input_path"
printf 'Value of --%s: %s\n' 'model-output-path' "$_arg_model_output_path"
printf 'Value of --%s: %s\n' 'min-shift-remainder' "$_arg_min_shift_remainder"
printf 'Value of --%s: %s\n' 'length-threshold' "$_arg_length_threshold"
printf 'Value of --%s: %s\n' 'perplexity-threshold' "$_arg_perplexity_threshold"
printf 'Value of --%s: %s\n' 'min-perplexity-length' "$_arg_min_perplexity_length"
printf 'Value of --%s: %s\n' 'lexicon-output-path' "$_arg_lexicon_output_path"
printf 'Value of --%s: %s\n' 'max-epochs' "$_arg_max_epochs"
printf 'Value of --%s: %s\n' 'encoding' "$_arg_encoding"
if [ -z "$_arg_seed_segmentation_input_path" ]; then
echo "No seed segmentation received! Training morfessor baseline..."
MBL_SEGM_OUTPUT="$_arg_segmentation_output_path/morfessor-baseline.lmvr.${_arg_lexicon_size}.seed.${_arg_lang}"
MBL_LEXICON_OUTPUT_FNAME="${_arg_lexicon_output_path}/${_arg_corpus_name}.${_arg_lexicon_size}.morfessor-baseline.lexicon.${_arg_lang}.txt"
morfessor-train \
-x ${MBL_LEXICON_OUTPUT_FNAME} \
-S "${MBL_SEGM_OUTPUT}" \
--max-epochs ${_arg_max_epochs} \
"${_arg_input_path}"
SEED_SEGM_FNAME="${MBL_SEGM_OUTPUT}"
else
echo "Seed segmentation received! No need to train Morfessor Baseline!"
SEED_SEGM_FNAME="${_arg_seed_segmentation_input_path}"
fi
## Train LMVR model using the training set
echo "Training LMVR model..."
LMVR_MODEL_OUTPUT_FNAME="${_arg_model_output_path}/${_arg_corpus_name}.${_arg_lexicon_size}.lmvr.model.${_arg_lang}.tar.gz"
LMVR_LEXICON_OUTPUT_FNAME="${_arg_lexicon_output_path}/${_arg_corpus_name}.${_arg_lexicon_size}.lmvr.lexicon.${_arg_lang}.txt"
# TODO: why is -T relevant if we use lmvr-segment already?
# notably, it's not relevant in flatcat!
lmvr-train "${SEED_SEGM_FNAME}" \
-T "${_arg_input_path}" \
-s "${LMVR_MODEL_OUTPUT_FNAME}" \
-p "${_arg_perplexity_threshold}" \
-d none -m batch \
--min-shift-remainder "${_arg_min_shift_remainder}" \
--length-threshold "${_arg_length_threshold}" \
--min-perplexity-length "${_arg_min_perplexity_length}" \
--max-epochs "${_arg_max_epochs}" \
--lexicon-size "${_arg_lexicon_size}" \
-x "${LMVR_LEXICON_OUTPUT_FNAME}" \
-o /dev/null
# let's output the test to /dev/null
# since we're using lmvr-segment below
echo "Segmenting using LMVR..."
LMVR_SEGM_OUTPUT_FNAME="$_arg_segmentation_output_path/${_arg_corpus_name}.${_arg_lexicon_size}.segmented.lmvr.${_arg_lang}"
lmvr-segment \
"${LMVR_MODEL_OUTPUT_FNAME}" \
"${_arg_input_path}" \
-p "${_arg_perplexity_threshold}" \
--output-newlines \
--encoding "${_arg_encoding}" \
-o "${LMVR_SEGM_OUTPUT_FNAME}"
# ] <-- needed because of Argbash
| true |
6673ab93bfafc798e46258e7e8d7395859996cda | Shell | gtrias/stretto | /electronres/buildpackages.sh | UTF-8 | 479 | 3.375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# first run electron-packager and put the output in tmp
echo 'Building Stretto for all platforms'
electron-packager ./ "Stretto" --platform=win32,linux,darwin --arch=all --version=1.4.10 --out=/tmp --overwrite --ignore="dbs|bower_components|electronres" --icon electronres/icon --prune
# zip the resulting Stretto folders
echo 'Zipping packages for uploading'
cd /tmp
for d in Stretto-*/; do target=${d%/}; echo "Zipping $target"; zip -qry9 "$target.zip" $d; done;
| true |
cb1f33f02403516ed71c03b9a8a14e841d74db27 | Shell | purplezi/shell-examples | /11-9.sh | UTF-8 | 168 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
for f in $(ps -eo command 2>/dev/null) ; do
[[ -e "$f" ]] || continue
ls "$f"
done
set -e
for f in $(ps -eo command) ; do
ls "$f"
done
| true |
016fdbb76aa98a33620c3283b6f2f0546e07f5e5 | Shell | elmagnificogi/elmagnifico.tech | /deploy.sh | UTF-8 | 385 | 3.21875 | 3 | [
"Apache-2.0",
"MIT"
] | permissive | #! /bin/bash
result=$(cd /root/elmagnificogi.github.io && git pull origin master | grep "Already up-to-date" )
if [[ "$result" != "" ]]
then
exit 0
else
echo "`date '+%Y%m%d %H:%M'`: post update,start build"
result=$(jekyll build --source /root/elmagnificogi.github.io --destination /usr/share/nginx/html)
echo $result
echo "`date '+%Y%m%d %H:%M'`: build over"
fi | true |
041749ac53d7107376d13e9d52c9ad6900fcb8b4 | Shell | mattpaletta/scripts | /tools/cpp.sh | UTF-8 | 553 | 3.703125 | 4 | [] | no_license | _cpp_has_installed=false
command -v gcc
if [[ $? == 0 ]]; then
_cpp_has_installed=true
echo "Found cpp"
else
_cpp_has_installed=false
echo "Did not find cpp"
fi
function install_cpp() {
if [[ "$_cpp_has_installed" == false ]]; then
echo "-- Installing c++"
echo "DOING NOTHING YET"
if [[ $is_mac == 0 ]]; then
echo "DOING NOTHING YET"
elif [[ $is_linux == 0 ]]; then
echo "DOING NOTHING YET"
else
echo "Unknown Platform"
exit 1
fi
_cpp_has_installed=true
else
echo "Already installed cpp"
fi
}
cpp=install_cpp
| true |
9a1ab77a1001d40613b5aa57f46834880f859af3 | Shell | tarmiste/lfs-custom-configs | /lfs82/blfsrawscripts/648-z-alsa-tools | UTF-8 | 2,088 | 3.484375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
export JH_PKG_DIR=alsa-tools
SRC_DIR=${JH_SRC_ARCHIVE}${JH_SRC_SUBDIRS:+/${JH_PKG_DIR}}
BUILD_DIR=${JH_BUILD_ROOT}${JH_BUILD_SUBDIRS:+/${JH_PKG_DIR}}
mkdir -p $SRC_DIR
mkdir -p $BUILD_DIR
cd $SRC_DIR
PACKAGE=alsa-tools-1.1.5.tar.bz2
if [[ ! -f $PACKAGE ]] ; then
if [[ -f $JH_SRC_ARCHIVE/$PACKAGE ]] ; then
cp $JH_SRC_ARCHIVE/$PACKAGE $PACKAGE
else
wget -T 30 -t 5 ftp://ftp.alsa-project.org/pub/tools/alsa-tools-1.1.5.tar.bz2 ||
wget -T 30 -t 5 ${JH_FTP_SERVER}svn/a/$PACKAGE
fi
fi
echo "3afb92eb1b4f2edc8691498e57c3ec78 $PACKAGE" | md5sum -c -
cd $BUILD_DIR
find . -maxdepth 1 -mindepth 1 -type d | xargs rm -rf
case $PACKAGE in
*.tar.gz|*.tar.bz2|*.tar.xz|*.tgz|*.tar.lzma)
tar -xvf $SRC_DIR/$PACKAGE > unpacked
JH_UNPACKDIR=`grep '[^./]\+' unpacked | head -n1 | sed 's@^\./@@;s@/.*@@'`
;;
*.tar.lz)
bsdtar -xvf $SRC_DIR/$PACKAGE 2> unpacked
JH_UNPACKDIR=`head -n1 unpacked | cut -d" " -f2 | sed 's@^\./@@;s@/.*@@'`
;;
*.zip)
zipinfo -1 $SRC_DIR/$PACKAGE > unpacked
JH_UNPACKDIR="$(sed 's@/.*@@' unpacked | uniq )"
if test $(wc -w <<< $JH_UNPACKDIR) -eq 1; then
unzip $SRC_DIR/$PACKAGE
else
JH_UNPACKDIR=${PACKAGE%.zip}
unzip -d $JH_UNPACKDIR $SRC_DIR/$PACKAGE
fi
;;
*)
JH_UNPACKDIR=$JH_PKG_DIR-build
mkdir $JH_UNPACKDIR
cp $SRC_DIR/$PACKAGE $JH_UNPACKDIR
cp $(find . -mindepth 1 -maxdepth 1 -type l) $JH_UNPACKDIR
;;
esac
export JH_UNPACKDIR
cd $JH_UNPACKDIR
as_root()
{
if [ $EUID = 0 ]; then $*
elif [ -x /usr/bin/sudo ]; then sudo $*
else su -c \\"$*\\"
fi
}
export -f as_root
bash -e
rm -rf qlo10k1 Makefile gitcompile
for tool in *
do
case $tool in
seq )
tool_dir=seq/sbiload
;;
* )
tool_dir=$tool
;;
esac
pushd $tool_dir
./configure --prefix=/usr
make
as_root make install
as_root /sbin/ldconfig
popd
done
unset tool tool_dir
exit
ldconfig
cd $BUILD_DIR
[[ -n "$JH_KEEP_FILES" ]] || rm -rf $JH_UNPACKDIR unpacked
exit | true |
2acf177763716debb4381b28155183f19d10279e | Shell | ghuntley/monorepo | /third_party/git/contrib/remote-helpers/git-remote-bzr | UTF-8 | 422 | 2.640625 | 3 | [
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"LGPL-2.1-only",
"GPL-3.0-only",
"GPL-2.0-only",
"MIT"
] | permissive | #!/bin/sh
cat >&2 <<'EOT'
WARNING: git-remote-bzr is now maintained independently.
WARNING: For more information visit https://github.com/felipec/git-remote-bzr
WARNING:
WARNING: You can pick a directory on your $PATH and download it, e.g.:
WARNING: $ wget -O $HOME/bin/git-remote-bzr \
WARNING: https://raw.github.com/felipec/git-remote-bzr/master/git-remote-bzr
WARNING: $ chmod +x $HOME/bin/git-remote-bzr
EOT
| true |
ad6866af97a26d2e963185b3b26914fd016be063 | Shell | factualaudio/factualaudio | /generate-site | UTF-8 | 571 | 3.46875 | 3 | [
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
# Generates the plots, the site and then postprocesses the site for production.
# All other options are passed to hugo.
set -e
set -o pipefail
shopt -s nullglob
shopt -s dotglob
shopt -s globstar
DESTINATION_DIR="$1"
shift
BASEURL="$1"
shift
./generate-plots
hugo --source hugo --destination "$DESTINATION_DIR/raw" --baseURL "$BASEURL" "$@"
cp -pr "$DESTINATION_DIR"/raw "$DESTINATION_DIR"/cooked
cd postprocessing
./postprocess-site "$DESTINATION_DIR/cooked" "$BASEURL"
mv "$DESTINATION_DIR"/cooked/* "$DESTINATION_DIR"/
rmdir "$DESTINATION_DIR"/cooked
| true |
58c8823390a3a757e9d78cec770a5bf29210d173 | Shell | waynebhayes/SANA | /wrappedAlgorithms/GREAT/run_G_G.sh | UTF-8 | 3,085 | 3.46875 | 3 | [] | no_license | #!/bin/sh
# Gets both networks and alpha parameter form user
networkA="$1"
networkB="$2"
shift 2
# alphas are on remainder of command line.
for i in "$@"
do
alpha=$i
#makes the directories
mkdir "G-G"
mkdir "G-G/"$alpha
dir="G-G/"$alpha"/"$networkA
dest_time=$dir"/"$networkA"_times.txt"
mkdir $dir
dir="G-G/"$alpha"/"$networkA"/"$networkB"/"
mkdir $dir
gwA="networks/"$networkA".gw"
gwB="networks/"$networkB".gw"
countA="orbits/"$networkA"_count"
countB="orbits/"$networkB"_count"
scores=$dir$networkA"_"$networkB
egdcA=$dir$networkA".egdc"
egdcB=$dir$networkB".egdc"
countsA=$countA".ecounts"
countsB=$countB".ecounts"
time=$scores"_time.txt"
# Gets the graphlet degree centrality score from both networks
/usr/bin/time -f "%S %U" -o $time -a src/egdc $countsA $egdcA
/usr/bin/time -f "%S %U" -o $time -a src/egdc $countsB $egdcB
# Gets teh graphlet degree vector similarity score
egdvs=$dir$networkA"_"$networkB".egdvs"
/usr/bin/time -f "%S %U" -o $time -a src/egdvs $countsA $countsB $egdvs
scores=$dir$networkA"_"$networkB
# Combines the gdc and gdvs scores
/usr/bin/time -f "%S %U" -o $time -a python src/combiner.py $egdcA $egdcB $egdvs $alpha
ealn=$scores".ealn"
aln=$scores"_greed.aln"
param=$scores"_"$alpha"_scores.txt"
paramh=$scores"_"$alpha"_scores_hung.txt"
label=$scores"_node_label.txt"
node_score=$scores"_node_score.txt"
node_score_greedy=$scores"_node_score_greedy.txt"
node_score_hung=$scores"_node_score_hung.txt"
matrix=$scores"_matrix_alignment.aln"
# Plugs matrix of scores into the greedy function, to get an edge alignment
/usr/bin/time -f "%S %U" -o $time -a python src/greedy_matrix.py $param $ealn
# Use edge alignment to get node similarity scores
/usr/bin/time -f "%S %U" -o $time -a python src/ealign2nscore_sum.py $ealn $egdvs $node_score $label
# Scales the values to be between 0 and 1
/usr/bin/time -f "%S %U" -o $time -a python src/num_conv.py $node_score
# Uses node similarity scores to create the node mapping
/usr/bin/time -f "%S %U" -o $time -a python src/greedy_matrix.py $node_score_greedy $matrix
# Outputs the node mapping with node labels
/usr/bin/time -f "%S %U" -o $time -a python src/alignment.py $matrix $label $aln
/usr/bin/time -f "%S %U" -o $time -a python src/isolated_nodes.py $aln $gwA $gwB
#gets scores for MI-GRAAL input
migraal=$scores"_migraal.txt"
python src/migraal_conv.py $node_score_greedy $label $migraal
net=$networkA"_"$networkB
python src/time.py $time $dest_time $net
dir="G-G/"$alpha"/"$networkA"/Alignments"
mkdir $dir
cp $aln $dir
dir="G-G/"$alpha"/"$networkA"/Migraal_Scores"
mkdir $dir
cp $migraal $dir
rm $node_score_greedy
rm $param
rm $node_score_greedy
rm $matrix
rm $egdvs
rm $egdcA
rm $egdcB
rm $migraal
rm $node_score_hung
rm $param2
rm $node_score
rm $paramh
done
| true |
bffbaf02a3e9b30d50ff874d70b022c230d9f86f | Shell | Sebastien-web/php-recap | /macos10.15_install.sh | UTF-8 | 2,355 | 2.875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Homebrew
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
if [[ $? -ne 0 ]] ; then
exit 1
fi
brew -v
if [[ $? -ne 0 ]] ; then
exit 1
fi
# Git
brew install git || brew upgrade git
if [[ $? -ne 0 ]] ; then
exit 1
fi
git --version
if [[ $? -ne 0 ]] ; then
exit 1
fi
# Symfony CLI
curl -sS https://get.symfony.com/cli/installer | bash
if [[ $? -ne 0 ]] ; then
exit 1
fi
mv ~/.symfony/bin/symfony /usr/local/bin/symfony
if [[ $? -ne 0 ]] ; then
exit 1
fi
symfony -V
if [[ $? -ne 0 ]] ; then
exit 1
fi
# PHP
brew install php@7.3
if [[ $? -ne 0 ]] ; then
exit 1
fi
brew link php@7.3 --force
if [[ $? -ne 0 ]] ; then
exit 1
fi
export PATH="/usr/local/opt/php@7.3/bin:$PATH"
if [[ $? -ne 0 ]] ; then
exit 1
fi
sudo sed -i -e 's/post_max_size = 8M/post_max_size = 64M/g' $(php -r "echo php_ini_loaded_file();")
if [[ $? -ne 0 ]] ; then
exit 1
fi
sudo sed -i -e 's/upload_max_filesize = 8M/upload_max_filesize = 64M/g' $(php -r "echo php_ini_loaded_file();")
if [[ $? -ne 0 ]] ; then
exit 1
fi
sudo sed -i -e 's/memory_limit = 128M/memory_limit = -1/g' $(php -r "echo php_ini_loaded_file();")
if [[ $? -ne 0 ]] ; then
exit 1
fi
php -v
if [[ $? -ne 0 ]] ; then
exit 1
fi
# Composer
sudo php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');"
if [[ $? -ne 0 ]] ; then
exit 1
fi
sudo php composer-setup.php --version=1.9.1 --install-dir=/usr/local/bin/
if [[ $? -ne 0 ]] ; then
exit 1
fi
sudo php -r "unlink('composer-setup.php');"
if [[ $? -ne 0 ]] ; then
exit 1
fi
sudo mv /usr/local/bin/composer.phar /usr/local/bin/composer
if [[ $? -ne 0 ]] ; then
exit 1
fi
composer -V
if [[ $? -ne 0 ]] ; then
exit 1
fi
# MariaDB
brew install mariadb@10.4
if [[ $? -ne 0 ]] ; then
exit 1
fi
brew link mariadb@10.4 --force
if [[ $? -ne 0 ]] ; then
exit 1
fi
# NodeJS
brew install node@12
if [[ $? -ne 0 ]] ; then
exit 1
fi
node -v
if [[ $? -ne 0 ]] ; then
exit 1
fi
npm -v
if [[ $? -ne 0 ]] ; then
exit 1
fi
# Yarn
curl -o- -L https://yarnpkg.com/install.sh | bash -s -- --version 1.21.1
if [[ $? -ne 0 ]] ; then
exit 1
fi
export PATH="$HOME/.yarn/bin:$HOME/.config/yarn/global/node_modules/.bin:$PATH"
if [[ $? -ne 0 ]] ; then
exit 1
fi
yarn -v
if [[ $? -ne 0 ]] ; then
exit 1
fi
| true |
f5cf797ee66025a514597b0a0bd46d8efb5a94cc | Shell | tshellum/multisensor-SLAM | /setup/installation/08-install-kitti2bag | UTF-8 | 703 | 2.84375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
set -euxo pipefail
# command -v sudo > /dev/null 2>&1 || function sudo { eval ${@@Q}; }
# Find software directory
cd ~/software
# Install dependencies
# pip install tf
# Install the default repository for kitti2bag: https://github.com/tomas789/kitti2bag
# pip install kitti2bag
# If erronous behaviour is experienced, rather try altertech's fork: https://github.com/alteretch/kitti2bag
if [ ! -d "pykitti" ]
then
git clone https://github.com/utiasSTARS/pykitti.git # test on commit d3e1bb81676e831886726cc5ed79ce1f049aef2c
fi
cd pykitti
sudo python3 setup.py install
cd ..
if [ ! -d "kitti2bag" ]
then
git clone https://github.com/AlterEtch/kitti2bag.git
fi
cd kitti2bag
sudo python3 setup.py install
| true |
6b472206e7b58230e5657b44cd14fcf695b94662 | Shell | kvellano/Mac-Scripts | /Install-O365 | UTF-8 | 275 | 2.953125 | 3 | [] | no_license | #!/bin/bash
echo "Downloading and Installing Office 365"
cd /tmp
#Download Office 365 Installer
curl -JL "https://go.microsoft.com/fwlink/?linkid=525133" -o "/tmp/office365.pkg"
#Install
sudo /usr/sbin/installer -pkg office365.pkg -target /
echo "Installation Complete"
| true |
b8ab086f15a296b6fd5c072f9cd372b8e6a1afda | Shell | delkyd/alfheim_linux-PKGBUILDS | /mingw-w64-libxlsxwriter-git/PKGBUILD | UTF-8 | 1,551 | 3 | 3 | [] | no_license | # Maintainer: Michael Yang <ohmyarchlinux@gmail.com>
pkgname=mingw-w64-libxlsxwriter-git
pkgver=0.7.2.r490.cc960e7
pkgrel=1
pkgdesc="A C library for creating Excel XLSX files (mingw-w64)"
arch=('any')
url='https://libxlsxwriter.github.io'
license=('BSD')
makedepends=('git' 'mingw-w64-cmake' 'mingw-w64-zlib>=1.2.8')
options=('!strip' '!buildflags' 'staticlibs')
conflicts=('mingw-w64-libxlsxwriter')
provides=('mingw-w64-libxlsxwriter')
source=("git://github.com/jmcnamara/libxlsxwriter.git")
sha512sums=('SKIP')
_architectures='i686-w64-mingw32 x86_64-w64-mingw32'
pkgver() {
cd libxlsxwriter
echo "$(grep LXW_VERSION include/xlsxwriter.h | cut -d '"' -f2).r$(git rev-list --count HEAD).$(git rev-parse --short HEAD)"
}
build() {
unset LDFLAGS
# for _arch in ${_architectures}; do
# mkdir -p build-${_arch} && pushd build-${_arch}
# ${_arch}-cmake \
# -DBUILD_STATIC=OFF \
# -DCMAKE_BUILD_TYPE=Release \
# ../libxlsxwriter
# make
# popd
# done
for _arch in ${_architectures}; do
mkdir -p build-${_arch}-static && pushd build-${_arch}-static
${_arch}-cmake \
-DCMAKE_BUILD_TYPE=Release \
../libxlsxwriter
make
popd
done
}
package() {
# for _arch in ${_architectures}; do
# cd "${srcdir}/build-${_arch}"
# make DESTDIR="${pkgdir}" install
# ${_arch}-strip --strip-unneeded "$pkgdir"/usr/${_arch}/bin/*.dll
# done
for _arch in ${_architectures}; do
cd "${srcdir}/build-${_arch}-static"
make DESTDIR="${pkgdir}" install
${_arch}-strip -g "$pkgdir"/usr/${_arch}/lib/*.a
done
}
| true |
4ad1841a2c0e756a7be26bb6eb7f105a5b49f159 | Shell | eduardoveiga/mesalink | /ci/travis/deploy_setup.sh | UTF-8 | 269 | 3.078125 | 3 | [
"BSD-3-Clause",
"ISC"
] | permissive | #!/bin/bash
set -xev
if [ "$ARCH" != "" ] && [ "$TRAVIS_RUST_VERSION" == "stable" ]
then
mkdir -p $TRAVIS_BUILD_DIR/releases
cd $TRAVIS_BUILD_DIR/inst
tar -zcvf $TRAVIS_BUILD_DIR/releases/mesalink-$TRAVIS_BRANCH-$ARCH.tar.gz *
cd $TRAVIS_BUILD_DIR
fi
| true |
d58dffaf419abd7e4d344d60de2166f92ed18f01 | Shell | dtec-xstack/rose-sh | /dependencies/docbook.sh | UTF-8 | 3,321 | 3.484375 | 3 | [] | no_license | : ${DOCBOOK_DEPENDENCIES:=}
: ${DOCBOOK_CONFIGURE_OPTIONS:=
}
: ${DOCBOOK_TARBALL:="${DEPENDENCIES_DIR}/docbook-3.1.tar.gz"}
: ${DOCBOOK_INSTALLED_FILE:="${ROSE_SH_DEPS_PREFIX}/include/docbook/docbook.h"}
#-------------------------------------------------------------------------------
install_docbook()
#-------------------------------------------------------------------------------
{
info "Installing application"
#-----------------------------------------------------------------------------
rosesh__install_dep_setup || exit 1
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dependencies
#-----------------------------------------------------------------------------
install_deps ${DOCBOOK_DEPENDENCIES} || exit 1
#-----------------------------------------------------------------------------
# Installation
#-----------------------------------------------------------------------------
set -x
#-----------------------------------------------------------------------------
if [ ! -f "${DOCBOOK_INSTALLED_FILE}" ]; then
mkdir -p "docbook" || fail "Unable to create application workspace"
cd "docbook/" || fail "Unable to change into the application workspace"
tar xzvf "${DOCBOOK_TARBALL}" || fail "Unable to unpack application tarball"
cd "$(basename ${DOCBOOK_TARBALL%.tar.gz})" || fail "Unable to change into application source directory"
# Installation: http://www.linuxfromscratch.org/blfs/view/svn/pst/sgml-dtd-3.html
#
# 1. Removes the ENT definitions from the catalog file
# 2. Replaces the DTDDECL catalog entry, which is not supported by Linux
# SGML tools, with the SGMLDECL catalog entry
#
sed -i -e '/ISO 8879/d' \
-e 's|DTDDECL "-//OASIS//DTD DocBook V3.1//EN"|SGMLDECL|g' \
docbook.cat || fail "Failed sed patching"
install -v -d -m755 \
"${ROSE_SH_DEPS_PREFIX}/share/sgml/docbook/sgml-dtd-3.1" \
|| fail "Failed to install sgml-dtd-3.1"
install -v docbook.cat \
"${ROSE_SH_DEPS_PREFIX}/share/sgml/docbook/sgml-dtd-3.1/catalog" \
|| fail "Failed to install docbook.cat"
cp -v -af *.dtd *.mod *.dcl \
"${ROSE_SH_DEPS_PREFIX}/share/sgml/docbook/sgml-dtd-3.1" \
|| fail "Failed to cp files to sgml-dtd-3.1"
install-catalog --add \
"${ROSE_SH_DEPS_PREFIX}/etc/sgml/sgml-docbook-dtd-3.1.cat" \
"${ROSE_SH_DEPS_PREFIX}/share/sgml/docbook/sgml-dtd-3.1/catalog" \
|| fail "Failed to install-catalog"
install-catalog --add \
"${ROSE_SH_DEPS_PREFIX}/etc/sgml/sgml-docbook-dtd-3.1.cat" \
"${ROSE_SH_DEPS_PREFIX}/etc/sgml/sgml-docbook.cat" \
|| fail "Failed to install-catalog"
else
info "[SKIP] docbook is already installed"
fi
#-----------------------------------------------------------------------------
set +x
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
rosesh__install_dep_teardown || exit 1
#-----------------------------------------------------------------------------
}
| true |
1a4b1ef1b085ff235af627593e0744bc6e0db8d5 | Shell | clintval/aur-packages | /python-bcbio-gff/PKGBUILD | UTF-8 | 1,568 | 2.78125 | 3 | [
"MIT"
] | permissive | # Maintainer: Clint Valentine <valentine.clint@gmail.com>
_name=bcbio-gff
pkgbase='python-bcbio-gff'
pkgname=('python-bcbio-gff' 'python2-bcbio-gff')
pkgver=0.6.4
pkgrel=1
pkgdesc="Library to read and write Generic Feature Format"
arch=('any')
url=https://pypi.org/project/bcbio-gff/
license=('BSD')
makedepends=(
'python' 'python-setuptools'
'python2' 'python2-setuptools')
options=(!emptydirs)
source=(
"${pkgname}"-"${pkgver}".tar.gz::https://pypi.io/packages/source/"${_name:0:1}"/"${_name}"/"${_name}"-"${pkgver}".tar.gz
https://raw.githubusercontent.com/biopython/biopython/biopython-171/LICENSE.rst
)
sha256sums=(
'e0efddc2376ed11b8f9682029b58bfd523a9aa62199f870b4ce64509ff99820b'
'8bf4032e364a232f6afa3daa250b76dbd7a06b29fd9939351b34590ccc81f35d'
)
prepare() {
cp -a "${_name}"-"${pkgver}"{,-py2}
}
package() {
cd "${srcdir}"/"${_name}"-"${pkgver}"
python setup.py install --root="${pkgdir}/" --optimize=1
}
build(){
cd "${srcdir}"/"${_name}"-"${pkgver}"
python setup.py build
cd "${srcdir}"/"${_name}"-"${pkgver}"-py2
python2 setup.py build
}
package_python2-bcbio-gff() {
depends=('python2' 'python2-six')
install -Dm644 LICENSE.rst "${pkgdir}"/usr/share/licenses/"${pkgname}"/LICENSE.rst
cd "${_name}"-"${pkgver}"-py2
python2 setup.py install --root="${pkgdir}"/ --optimize=1 --skip-build
}
package_python-bcbio-gff() {
depends=('python' 'python-six')
install -Dm644 LICENSE.rst "${pkgdir}"/usr/share/licenses/"${pkgname}"/LICENSE.rst
cd "${_name}"-"${pkgver}"
python setup.py install --root="${pkgdir}"/ --optimize=1 --skip-build
}
| true |
151191311930bfad9c8f26c699d3f99e6bcb07bc | Shell | reuteras/usbclean | /create_files.sh | UTF-8 | 924 | 3.203125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
TEXT="Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec a diam lectus. Sed sit amet ipsum mauris. Maecenas congue ligula ac quam viverra nec consectetur ante hendrerit. Donec et mollis dolor. Praesent et diam eget libero egestas mattis sit amet vitae augue. Nam tincidunt congue enim, ut porta lorem lacinia consectetur. Donec ut libero sed arcu vehicula ultricies a non tortor. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean ut gravida lorem."
for ((j=0; j < 5000; j++)); do
# shellcheck disable=SC1117
# shellcheck disable=SC2028
echo "Secret file $j in root. \n$TEXT" > secret_file_"$j".txt;
done
i=0
while true; do
mkdir dir"$i"
for ((j=0; j < 5000; j++)); do
# shellcheck disable=SC1117
# shellcheck disable=SC2028
echo "Secret file $j in dir $i. \n$TEXT" > dir"$i"/secret_file_"$j".txt;
done
i=$(( i + 1 ))
done
| true |
98dd4e808ac3c0129278d3ae7be447c2b6f60e19 | Shell | lsst-camera-dh/mutils | /bin/plotDeltaDacs.sh | UTF-8 | 1,249 | 3.6875 | 4 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
#
#------------------------------------------------------------------------
function usage {
cat <<-EOM
Usage ${0##*/} rebPath startTime
rebPath ~ <subsystem>/<bay>/Reb[012]
startTime ~ 2020-06-19T11:00:41-07:00
quote time if it contains spaces
Options:
-h (print help msg)
-d <duration>
EOM
exit 1
}
if [ $# -lt 2 ]; then
usage
fi
#-- process commandline options
#
duration=
while getopts "swhd:" Option
do
case $Option in
h ) usage;;
d ) duration=$OPTARG;;
s ) savePlot="yes";;
w ) waitTime="yes";;
* ) ${ECHO} "Unimplemented option chosen.";; # Default.
esac
done
shift $((OPTIND - 1))
declare -a regexes
regexes[0]=${1}'/[CO].*I' # board currents
regexes[1]=${1}'/[PSR].*[UL]$' # clock levels
regexes[2]=${1}'/S.*/.*V$' # bias voltages
if [ $duration"XXX" == "XXX" ] ; then
declare -i duration=7
fi
if [ $waitTime ] ; then
declare -i s=$duration+2
sleep $s
fi
sarg=
if [ $savePlot ] ; then
sarg=" --save /tmp/"$(echo -n $1 | sed 's?/?_?g')"_loadDeltaDacs.png"
fi
trender.py --lay 3x1 --out ${sarg} --start "${2}" --title "loadDeltaDacs:${1}" --overlayreg --plot --dur ${duration} --fmt "o-" -- "${regexes[@]}"
| true |
8d3372ae6815e032c9ef5944bffc7395fde053ba | Shell | ETHZ-INS/Sperm-RNA-Dex | /sperm14days_smallRNA_seq/read_processing/wrapper_deduplicate.sh | UTF-8 | 599 | 3.640625 | 4 | [] | no_license | #!/bin/bash
#
#Deduplication and trimming of 4 random nucleotides at both ends of the reads
#
#cd into the directory containing the adapter-trimmed files
#
cd output_cutadapt
#
#make directory to store output
if [ ! -d output_deduplicate ]; then
mkdir output_deduplicate
fi
#
for f in *fastq.gz; do
echo "Deduplicating $f..."
zcat $f | paste - - - - | \
sort -u --parallel 4 --buffer-size 2G --stable -t $'\t' -k2,2 | \
awk -F"\t" '{
sl=length($2)
print $1"\n"substr($2,5,sl-8)"\n+\n"substr($4,5,sl-8)
}' | gzip > output_deduplicate/`basename $f fastq.gz`dedup.fastq.gz
done
#
exit $?
| true |
ee5ea4540608ab0cc02b4f48f27ea57ba7abf502 | Shell | AdLucem/autognomy | /configs/netConfigs/extensions.sh | UTF-8 | 1,072 | 2.546875 | 3 | [] | no_license | #!/bin/bash
echo "Installing extensions..."
# caffeine
cd ~
git clone http://github.com/eonpatapon/gnome-shell-extension-caffeine.git
cd gnome-shell-extension-caffeine
./update-locale.sh
glib-compile-schemas --strict --targetdir=caffeine@patapon.info/schemas/ caffeine@patapon.info/schemas
cp -r caffeine@patapon.info ~/.local/share/gnome-shell/extensions/
cd ~/.local/share/gnome-shell/extensions/caffeine@patapon.info
# coverflow alt-tab
cd ~
git clone https://github.com/dmo60/CoverflowAltTab.git
cd CoverflowAltTab
cp -r CoverflowAltTab@dmo60.de ~/.local/share/gnome-shell/extensions/
cd ~/.local/share/gnome-shell/extensions/CoverflowAltTab@dmo60.de
# dash to dock
cd ~
git clone https://github.com/micheleg/dash-to-dock.git
cd dash-to-dock
make
make install
# freon
cd ~
sudo apt install lm-sensors
git clone https://github.com/UshakovVasilii/gnome-shell-extension-freon.git
cd gnome-shell-extension-freon
glib-compile-schemas freon@UshakovVasilii_Github.yahoo.com/schemas/
cp -r freon@UshakovVasilii_Github.yahoo.com ~/.local/share/gnome-shell/extensions/
sudo sensors-detect
# hide top bar
sudo apt install gnome-shell-extension-autohidetopbar
| true |
c017357f01649d53bef4f189f6ffa42216fbb7f1 | Shell | nkabir/tufte-latex | /default.pdf.do | UTF-8 | 247 | 2.859375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
export PDFLATEX="/usr/bin/pdflatex"
PROJECT_FOLDER="$(dirname $2)"
echo $2 1>&2
latexmk -f \
-pdflatex='${PDFLATEX:?} -file-line-error --shell-escape -synctex=1' \
-outdir="${PROJECT_FOLDER:?}/build" -pdf "$2".tex 1>&2
| true |
184677b63c6bd1f56845ae96877c79bb143ba63e | Shell | zhangerjun/TBSSVBA | /TBSS/scripts/tbss_res_deproj_asym.sh | UTF-8 | 827 | 3.046875 | 3 | [] | no_license | #!/bin/sh
#
# Created on 22-08-2016 by Akakpo Luis
#
# Deprojects voxel for visualisation in standard space
#
# Part of MATLAB pipeline
source ~/.bash_profile
STAT_FOLDER=$1
STAT_IMG=$2
FA_THRESH=$3
STAT_THRESH=$4
cd $STAT_FOLDER
echo "Masking non significant voxels in stat image"
fslmaths $STAT_IMG -thr $STAT_THRESH stat_thr_tmp
echo "Flipping result image"
fslswapdim stat_thr_tmp -x y z stat_thr_flip_tmp
echo "Combining both to have a symmetric image"
fslmaths stat_thr_tmp -add stat_thr_flip_tmp symm_stat_thr_tmp
echo "Back - Projecting"
$FSLDIR/bin/tbss_skeleton -i mean_FA_symmetrised -p ${FA_THRESH} mean_FA_symmetrised_skeleton_mask_dst $FSLDIR/data/standard/LowerCingulum_1mm all_FA ${STAT_IMG}_tmp -D symm_stat_thr_tmp
$FSLDIR/bin/immv ${STAT_IMG}_tmp_deprojected ${STAT_IMG}_to_all_FA
#$FSLDIR/bin/imrm *_tmp
| true |
f053380f0f12e702372934af6ff744f51a652937 | Shell | 2fifty6/config | /swartz_files/initscripts/aws.zsh | UTF-8 | 5,645 | 3.296875 | 3 | [] | no_license | # AWS
alias awsedit="vim $0"
alias awsrefresh="source $0"
[[ ! -e ~/.dotfiles/aws ]] && mkdir -p ~/.dotfiles/aws
#[[ ! -x /usr/local/share/zsh/site-functions/_envselect ]] &&
# sudo cat > /usr/local/share/zsh/site-functions/_envselect <<EOF
##compdef envselect
#compadd \$(command ls -1 ~/.dotfiles/aws 2>/dev/null --color=none |
# sed -e 's/ /\\\\ /g' -e 's/.*aws://g')
#EOF
function envselect(){
if [[ -z $1 ]]; then
ls -l ~/.autoenv.zsh
else
AUTOENV_PATH=~/.dotfiles/aws/$1/.autoenv.zsh
if [[ -e $AUTOENV_PATH ]]; then
ln -sf $AUTOENV_PATH ~/.autoenv.zsh
source ~/.autoenv.zsh
else
echo "No match for $AUTOENV_PATH"
fi
fi
}
alias enva='env | grep --color=no AWS'
function awsdefault (){
export AWS_DEFAULT_PROFILE=$1
}
# ALB{{{
function target-group-health (){
aws elbv2 describe-target-health --target-group-arn $1 | jq -r '.TargetHealthDescriptions[].TargetHealth.State'
}
# }}}
# ASG{{{
function rmasg () {
ASG_NAME=$1
aws autoscaling delete-auto-scaling-group --auto-scaling-group-name $ASG_NAME --force-delete
}
function lsasg () {
aws autoscaling describe-auto-scaling-groups|jq -r '.AutoScalingGroups[].AutoScalingGroupName'
}
function rmlc () {
LC_NAME=$1
aws autoscaling delete-launch-configuration --launch-configuration-name $LC_NAME
}
function lslc () {
aws autoscaling describe-launch-configurations | jq -r '.LaunchConfigurations[].LaunchConfigurationName'
}
# }}}
# EC2{{{
export RUNNING_INSTANCE_FILTER="Name=instance-state-name,Values=running"
alias describe-ami='aws ec2 describe-images --image-ids '
alias describe-ec2='aws ec2 describe-instances --instance-ids '
alias ec2ids="aws ec2 describe-instances --instance-ids"
function ami-jqtags(){
jq -r '.Images[].Tags[] | .Key + "\t\t" + .Value'
}
function ec2-jqid(){
jq -r '.Reservations[].Instances[].InstanceId'
}
function ec2-jqname(){
jq -r '.Reservations[].Instances[].Tags[] | select(.Key=="Name") | .Value'
}
function ec2-jqprivateip(){
jq -r ".Reservations[].Instances[].NetworkInterfaces[].PrivateIpAddresses[0].PrivateIpAddress"
}
function ec2-jqpublicip(){
jq -r ".Reservations[].Instances[].PublicIpAddress"
}
function ec2-jqvolumes(){
jq -r '.Reservations[].Instances[].BlockDeviceMappings[] | .DeviceName + "\t" + .Ebs.VolumeId'
}
function ec2-jqvolumeids(){
jq -r '.Reservations[].Instances[].BlockDeviceMappings[].Ebs.VolumeId'
}
function ec2-byname (){
aws ec2 describe-instances --filters "Name=tag:Name,Values=$1" $RUNNING_INSTANCE_FILTER
}
function ec2-namebyid (){
aws ec2 describe-instances --instance-ids $1 |
ec2-jqname
}
function ec2-idbyname (){
ec2-byname $1 |
ec2-jqid
}
function ec2-ipbyname (){
aws ec2 describe-instances --filters "Name=tag:Name,Values=$1" $RUNNING_INSTANCE_FILTER |
ec2-jqprivateip
}
function ec2-byvpc (){
aws ec2 describe-instances --filters "Name=vpc-id,Values=$1" $RUNNING_INSTANCE_FILTER
}
function ec2-ipbyvpc (){
aws ec2 describe-instances --filters "Name=vpc-id,Values=$1" $RUNNING_INSTANCE_FILTER |
ec2-jqprivateip
}
function ec2-ipbynamevpc (){
aws ec2 describe-instances --filters "Name=tag:Name,Values=$1" "Name=vpc-id,Values=$2" $RUNNING_INSTANCE_FILTER |
ec2-jqprivateip
}
function ec2-publicipbyname (){
aws ec2 describe-instances --filters "Name=tag:Name,Values=$1" $RUNNING_INSTANCE_FILTER |
ec2-jqpublicip
}
function ec2-publicipbynamevpc (){
aws ec2 describe-instances --filters "Name=tag:Name,Values=$1" "Name=vpc-id,Values=$2" $RUNNING_INSTANCE_FILTER |
ec2-jqpublicip
}
function ec2-namebyip (){
aws ec2 describe-instances --filters "Name=private-ip-address,Values=$1" $RUNNING_INSTANCE_FILTER | jq -r '.Reservations[].Instances[].Tags[] | select(.Key=="Name") | .Value'
}
function ec2-snapshotbyid (){
aws ec2 describe-snapshots --snapshot-ids $*
}
# }}}
# ELB{{{
function elb-jqname(){
jq -r '.LoadBalancerDescriptions[].LoadBalancerName'
}
function elb-jqstate(){
jq -r '.InstanceStates[].State'
}
function elb-jqhealth(){
jq -r '.LoadBalancerDescriptions[].HealthCheck.Target'
}
function elb-getstatebyname(){
ELB_NAME=$1
aws elb describe-instance-health --load-balancer-name $ELB_NAME | elb-jqstate
}
function elb-gethealthbyname(){
ELB_NAME=$1
aws elb describe-load-balancers --load-balancer-name $ELB_NAME | elb-jqhealth
}
function lselb(){
aws elb describe-load-balancers | elb-jqname
}
function rmelb(){
ELB_NAME=$1
aws elb delete-load-balancer --load-balancer-name $ELB_NAME
}
# }}}
# Cloudwatch{{{
function cloudwatch-jqmetric(){
jq -r '.Metrics[] | .Dimensions[].Value + "-" + .MetricName' | sort
}
# }}}
# IAM{{{
function iam-instance-profile (){
aws iam get-instance-profile --instance-profile-name $1
}
function iam-jqprofilerole (){
jq -r '.InstanceProfile.Roles[].RoleName'
}
# }}}
# RDS
function rds-instances (){
aws rds describe-db-instances $*
}
function rds-jqname (){
jq -r '.DBInstances[].DBInstanceIdentifier'
}
function rds-jqarn (){
jq -r '.DBInstances[].DBInstanceArn'
}
# keypairs{{{
function mkkeypair (){
KEY_NAME=$1
[[ ! -z $2 ]] && REGION=$2 || REGION=us-east-1
aws ec2 create-key-pair --key-name $KEY_NAME --region $REGION |
ruby -e "require 'json'; puts JSON.parse(STDIN.read)['KeyMaterial']" > ~/.ssh/$KEY_NAME.pem &&
chmod 600 ~/.ssh/$KEY_NAME.pem
}
function rmkeypair (){
KEY_NAME=$1
[[ ! -z $2 ]] && REGION=$2 || REGION=us-east-1
aws ec2 delete-key-pair --key-name $KEY_NAME --region $REGION &&
rm ~/.ssh/$KEY_NAME.pem
}
# }}}
# VPC{{{
function vpc-byname (){
aws ec2 describe-vpcs --filters Name=tag:Name,Values=$1
}
function vpc-jqid (){
jq -r '.Vpcs[].VpcId'
}
#}}}
| true |
6e723e0d6f5f75a997ce99a954ada9899713c5e1 | Shell | aur-archive/libnl-git | /PKGBUILD | UTF-8 | 819 | 2.671875 | 3 | [] | no_license | pkgname=libnl-git
_pkgname=libnl
pkgver=3.2.22.10.g408a1b8
pkgrel=1
pkgdesc="Convenience library for kernel netlink sockets"
arch=('i686' 'x86_64')
url="http://www.infradead.org/~tgr/libnl/"
license=('LGPL2.1')
depends=('glibc')
makedepends=('git')
provides=('libnl')
conflicts=('libnl')
options=('!libtool')
backup=('etc/libnl/classid'
'etc/libnl/pktloc')
source=('git://git.infradead.org/users/tgr/libnl.git')
md5sums=('SKIP')
sha256sums=('SKIP')
pkgver() {
cd "$_pkgname"
git describe | sed "s/$_pkgname//" | sed 's/_/\./g' | sed 's/-/\./g'
}
build() {
cd "$_pkgname"
./autogen.sh
./configure --prefix=/usr --sysconfdir=/etc \
--enable-shared --disable-static --with-pic \
--sbindir=/usr/bin # --disable-cli
make V=1
}
package() {
cd "$_pkgname"
make DESTDIR="$pkgdir" install
}
| true |
ebea7da124c61a4a96916d34f355a18f522226d8 | Shell | AlexCristian/Naive_Bayes | /tools/genome_download.bash | UTF-8 | 3,368 | 4.125 | 4 | [] | no_license | #! /bin/bash
if [ -z "$1" ]
then
echo "usage: ./genome_download.bash [taxonomic rank]
domain
kingdom
division
subdivision
class
subclass
superorder
order
suborder
family
subfamily
genus
species
"
exit
else
depth=$1
fi
function clearLastLine {
tput cuu 1 && tput el
}
function download {
if [ -z "$3" ]
then
down_text="Download"
else
down_text="($3) Download"
fi
echo "$down_text $1 - [IN PROGRESS]"
wget $1 --directory-prefix $2 -q && retval=$?
while [ $retval -ne "0" ]
do
clearLastLine
echo "$down_text $1 - [FAILED]"
echo "Retrying."
echo "$down_text $1 - [IN PROGRESS]"
wget $1 --directory-prefix $2 -q && retval=$?
done
clearLastLine
echo "$down_text $1 - [COMPLETE]"
}
if [ -d .tree_data ]
then
rm -r .tree_data
fi
mkdir .tree_data # Create a throwaway dir for auxiliary files
echo "Download genome listings [IN PROGRESS]"
# These files change as more genomes are added
download ftp://ftp.ncbi.nlm.nih.gov/genomes/refseq/bacteria/assembly_summary.txt .tree_data # Correlates IDs with FTP download links
download ftp://ftp.ncbi.nlm.nih.gov/pub/taxonomy/taxdump.tar.gz .tree_data # Correlates node IDs with parent IDs
tar -zxf .tree_data/taxdump.tar.gz -C .tree_data nodes.dmp # Extract only what we need
# Extract the parents and children from the node file, and split the data
# over two files.
awk '{print $1}' .tree_data/nodes.dmp > .tree_data/child.txt
awk '{print $3}' .tree_data/nodes.dmp > .tree_data/parent.txt
awk '{print $5}' .tree_data/nodes.dmp > .tree_data/class.txt
# Begin processing data into FTP links
awk -F "\t" '$12=="Complete Genome" && $11=="latest"{print $6, $20}' .tree_data/assembly_summary.txt > .tree_data/ftpdirpaths # Preserves ID and starts printing FTP link
# Output in the format "ID ftpLink"
awk 'BEGIN{FS=OFS="/";filesuffix="genomic.fna.gz"}{ftpdir=$0;asm=$10;file=asm"_"filesuffix;print ftpdir,file}' .tree_data/ftpdirpaths > .tree_data/ftpfilepaths
awk '{print $1}' .tree_data/ftpfilepaths > .tree_data/ids.txt
awk '{print $2}' .tree_data/ftpfilepaths > .tree_data/ftp_links.txt
# Finished building tree, start building directory structure
if [ -d genomes ]
then
rm -r genomes
fi
mkdir genomes
echo "Download genome listings [COMPLETE]"
echo "Constructing specified tree [IN PROGRESS]"
chmod +x generate_dir_structure.py
./generate_dir_structure.py $depth genomes
clearLastLine
echo "Constructing specified tree [COMPLETE]"
echo "Download genomes [IN PROGRESS]"
file_count=$(cat .tree_data/ftpdirpaths | wc -l)
progress=1
# Proceed to download the files
for dir in $(ls -d genomes/*/)
do
download_list=$dir"to_download.txt"
while read url
do
download $url $dir $progress/$file_count
progress=$(($progress+1))
done < $download_list
rm $download_list
done
clearLastLine
echo "Download genomes [COMPLETE]"
echo "Decompress genomes [IN PROGRESS]"
# Decompress the files
progress=1
for archive in $(ls genomes/*/*.gz)
do
echo "($progress/$file_count) Decompress $archive [IN PROGRESS]"
gzip -dq $archive
clearLastLine
echo "($progress/$file_count) Decompress $archive [COMPLETE]"
progress=$(($progress+1))
done
echo "Decompress genomes [COMPLETE]"
clearLastLine
echo "Download genomes [COMPLETE]"
#./gbfftofasta.bash genomes
echo "Download successful."
| true |
ac9c6c5cfbc1557abf15e21e5c72e0e8f4c21dca | Shell | dinoallo/polybar-ibus | /scripts/ibus-switch.sh | UTF-8 | 887 | 3.4375 | 3 | [] | no_license | #!/bin/sh
ENTRIES=`ibus read-config | grep engines-order | sed 's/engines-order:\|\[\|\]\| //g' | sed 's/,/\n/g' | sed "s/'//g"`
# OUTPUT=""
# for entry in $ENTRIES
# do
# e=`echo $entry | awk -F ':' '{print $1'}`
# la=`echo $entry | awk -F ':' '{print $2'}`
# la_v=`echo $entry | awk -F ':' '{print $3'}`
# lang=`echo $entry | awk -F ':' '{print $4'}`
# if [ ! ${lang} = '\n' ]
# then
# OUTPUT="$OUTPUT language: $lang"
# fi
# if [ ! ${la} = '\n' ]
# then
# if [ ! ${la_v} = '\n' ]
# then
# OUTPUT="$OUTPUT layout: $la, $la_v"
# else
# OUTPUT="$OUTPUT layout: $la"
# fi
# fi
# OUTPUT="$OUTPUT engine: $e\n"
# done
SELECTION=`printf "%s\n" $ENTRIES | rofi -dmenu -i -p 'Select'`
if [ -n $SELECTION ] && [ ! $SELECTION = '\n' ]
then
ibus engine $SELECTION
fi
exit 0
| true |
eed36508977f037ff63968b9c6764448817f3b76 | Shell | pvsa/pnet-suite | /TOOLS/netwatch | UTF-8 | 1,508 | 3.640625 | 4 | [
"MIT"
] | permissive | #! /bin/bash
#PvSA, 11.6.11
# Zeigt die aktiven aktullen Netzwerkverbindungen an, ohne localhost
# for different user (not just root)
# jetzt auch fuer Mac (Darwin Kernel)
#DEBUG
#set -x
if [ "$1" = "-h" ] || [ "$#" -lt "1" ]; then
echo " netwatch [-u/-t/-h] [-l]: netstat shortcuts for Linux and Mac-Darwin"
echo " -u: UDP, -t: TCP"
echo " -h: help"
echo " -l: Listening Sockets"
exit 0
fi
if [ "`uname`" = "Darwin" ]; then
OPT1="-an"
if [ "$1" = "-t" ]; then
OPT2="-p tcp"
else
OPT2="-p udp"
fi
if [ "$2" = "-l" ];then
if [ "$1" = "-u" ];then
netstat $OPT1 $OPT2
else
netstat $OPT1 $OPT2 |grep -i 'LISTEN'
fi
else
netstat $OPT1 $OPT2 |egrep -i 'EST|VERB' |grep -v 127.0.0.1
fi
else
WHO=`whoami`
if [ "$WHO" = "root" ];then
OPT1="-anp"
else
OPT1="-an"
fi
if [ "$1" = "-t" ]; then
OPT2="-t"
else
OPT2="-u"
fi
if [ "$2" = "-l" ];then
if [ "$1" = "-u" ];then
netstat $OPT1 $OPT2
else
netstat $OPT1 $OPT2 |grep -i 'LISTEN'
fi
else
watch "netstat $OPT1 $OPT2 |grep -i 'EST|VERB' |grep -v 127.0.0.1"
fi
fi
| true |
84f6f2f4593572c647231183ad724e65ba6eff25 | Shell | kdruelle/zsh | /plugins/prompt/keyboard.zsh | UTF-8 | 1,977 | 2.609375 | 3 | [] | no_license |
bindkey -e
typeset -Ag key # associative array with more explicit names
key[up]=$terminfo[kcuu1]
key[down]=$terminfo[kcud1]
key[left]=$terminfo[kcub1]
key[right]=$terminfo[kcuf1]
key[C-up]="^[[1;5A"
key[C-down]="^[[1;5B"
key[C-left]="^[[1;5D"
key[C-right]="^[[1;5C"
key[M-up]="^[[1;3A"
key[M-down]="^[[1;3B"
key[M-left]="^[[1;3D"
key[M-right]="^[[1;3C"
key[S-up]=$terminfo[kri]
key[S-down]=$terminfo[kind]
key[S-left]=$terminfo[kLFT]
key[S-right]=$terminfo[kRIT]
key[tab]=$terminfo[kRIT]
key[S-tab]=$terminfo[cbt]
key[C-space]="^@"
key[enter]=$terminfo[cr]
key[M-enter]="^[^J"
case "$OS" in
(*cygwin*) key[C-enter]="^^";;
(*) key[C-enter]="^J";;
esac
key[F1]=$terminfo[kf1]
key[F2]=$terminfo[kf2]
key[F3]=$terminfo[kf3]
key[F4]=$terminfo[kf4]
key[F5]=$terminfo[kf5]
key[F6]=$terminfo[kf6]
key[F7]=$terminfo[kf7]
key[F8]=$terminfo[kf8]
key[F9]=$terminfo[kf9]
key[F10]=$terminfo[kf10]
key[F11]=$terminfo[kf11]
key[F12]=$terminfo[kf12]
bindkey $key[left] backward-char
bindkey $key[right] forward-char
bindkey $key[M-right] move-text-right
bindkey $key[M-left] move-text-left
bindkey "^X^E" edit-command-line # edit line with $EDITOR
bindkey "^Z" ctrlz # ctrl z zsh
bindkey "^D" delete-char
bindkey "^X^X" exchange-point-and-mark
bindkey "^X^K" show-kill-ring
bindkey "\`\`" sub-function
bindkey "\'\'" simple-quote
bindkey "\"\"" double-quote
bindkey $key[C-left] backward-word
bindkey $key[C-right] forward-word
bindkey "^[k" kill-word
bindkey "^W" kill-region # emacs-like kill
bindkey "^Y" yank # paste
bindkey "^[y" yank-pop # rotate yank array
bindkey $key[S-tab] reverse-menu-complete # shift tab for backward completion
bindkey "^[=" save-line
bindkey $key[S-right] select-right # emacs like shift selection
bindkey $key[S-left] select-left
bindkey $key[C-enter] clear-and-accept
bindkey $key[F1] run-help
bindkey $key[F5] clear-screen
bindkey -s ";;" "~"
| true |
83f596364416830cb41f6df9a44d96f2d6316e70 | Shell | kevinjs/vmrebirth | /vmrebir_new.sh | UTF-8 | 3,447 | 3.21875 | 3 | [] | no_license | #!/bin/sh
alias nova='nova --os_username admin --os_password csdb123cnic --os_tenant_name admin --os_auth_url http://192.168.138.32:5000/v2.0'
alias swift='export OS_TENANT_NAME=admin;swift -v -V 2.0 -A http://192.168.138.32:5000/v2.0 -U admin -K csdb123cnic'
mysql_h='192.168.138.32'
mysql_u='root'
mysql_p='csdb123cnic'
vm=$1
dstserv=$2
hostname=`mysql -h$mysql_h -u$mysql_u -p$mysql_p -N -e "select hostname from instances where uuid = '$vm';" nova | awk '{print $1}'`
echo 'Move '$hostname'('$vm') to '$dstserv
#Step1:
echo 'Step1 >>'
mysql -h$mysql_h -u$mysql_u -p$mysql_p -N -e "update instances set host='$dstserv' where uuid = '$vm'; select row_count();" nova | awk '{print $1}'
#Step2:
echo 'Step2 >>'
instance_name=`nova show $vm | grep instance_name | awk '{print $4}'`
echo 'instance_name: '$instance_name
#Step3:
echo 'Step3 >>'
cd /opt/stack/data/nova/instances/$instance_name
filter_name=`cat libvirt.xml | grep filter= | awk -F'"' '{print $2}'`
echo 'filter_name: '$filter_name
#step4:
echo 'Step4 >>'
#filter_uuid=`uuidgen $filter_name`
#echo 'filter_uuid: '$filter_uuid
#cat > /etc/libvirt/nwfilter/$filter_name.xml << _done_
#<filter name='$filter_name' chain='root'>
#<uuid>$filter_uuid</uuid>
#<filterref filter='nova-base'/>
#</filter>
#_done_
if [ -f "/etc/libvirt/nwfilter/$filter_name.xml" ]; then
echo 'Filter file has existed'
else
#echo 'Not Exist'
echo 'filter_uuid: '$filter_uuid
cat > /etc/libvirt/nwfilter/$filter_name.xml << _done_
<filter name='$filter_name' chain='root'>
<uuid>$filter_uuid</uuid>
<filterref filter='nova-base'/>
</filter>
_done_
fi
#step5:
echo 'Step5 >>'
instance_mac=`cat /opt/stack/data/nova/instances/$instance_name/libvirt.xml | grep mac | awk -F'"' '{print $2}'`
echo 'MAC: '$instance_mac
instance_ip=`cat /opt/stack/data/nova/instances/$instance_name/libvirt.xml | grep IP | awk -F'"' '{print $4}'`
echo 'IP: '$instance_ip
echo -e "\n$instance_mac,$hostname.novalocal,$instance_ip" >> /opt/stack/data/nova/networks/nova-br100.conf
#step6:
echo 'Step6 >>'
#/etc/init.d/libvirt-bin restart
#service libvirt-bin restart
lv_pid=`service libvirt-bin restart | grep process | awk '{print $4}'`
echo $lv_pid > /opt/stack/data/nova/networks/nova-br100.pid
#step7:
echo 'Step7 >>'
cd /opt/stack/data/nova/instances/$instance_name/
chown stack:stack *
chmod 777 *
virsh define libvirt.xml
virsh start $instance_name
fixed_id=`mysql -h$mysql_h -u$mysql_u -p$mysql_p -N -e "select id from fixed_ips where address='$instance_ip';" nova | awk '{print $1}'`
echo $fixed_id
if [ ! $fixed_id = "" ]; then
#echo $fixed_id
float_ip=`mysql -h$mysql_h -u$mysql_u -p$mysql_p -N -e "select address from floating_ips where fixed_ip_id='$fixed_id';" nova | awk '{print $1}'`
echo $float_ip
if [ ! $float_ip = "" ]; then
#echo $float_ip
mysql -h$mysql_h -u$mysql_u -p$mysql_p -N -e "update floating_ips set host = '$dstserv' where fixed_ip_id='$fixed_id'; select row_count();" nova
#step8:
echo 'Step8 >>'
ip addr add $float_ip/32 dev br100
iptables -t nat -A nova-network-OUTPUT -d $float_ip/32 -j DNAT --to-destination $instance_ip
iptables -t nat -A nova-network-PREROUTING -d $float_ip/32 -j DNAT --to-destination $instance_ip
iptables -t nat -A nova-network-float-snat -s $instance_ip/32 -j SNAT --to-source $float_ip
# iptables-save > /etc/iptables.up.rules
sysctl -p
fi
else
echo "None floating_ips"
fi
echo 'Done!'
| true |
dbcd0a61c09c773065f365f34d8476f38d3ba917 | Shell | ebukoski/ec2gaming | /ec2gaming-instance.sh | UTF-8 | 436 | 3.3125 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
# Verify that the gaming stane actually exists (and that there's only one)
INSTANCES=$(aws ec2 describe-instances --filters Name=instance-state-code,Values=16 Name=instance-type,Values=g2.2xlarge)
if [ "$(echo "$INSTANCES" | jq '.Reservations | length')" -ne "1" ]; then
echo "didnt find exactly one instance!"
exit 1
fi
echo "$INSTANCES" | jq --raw-output '.Reservations[0].Instances[0].InstanceId'
| true |
5e304fda6d7f9ff609157b03ce866d73ea2609ae | Shell | jelaas/bifrost-build | /all/xfsprogs-3.1.11-1/Fetch-source.sh | UTF-8 | 140 | 2.78125 | 3 | [] | no_license | #!/bin/bash
SRC=xfsprogs-3.1.11.tar.gz
DST=/var/spool/src/$SRC
[ -s "$DST" ] || wget -O $DST ftp://oss.sgi.com/projects/xfs/cmd_tars/$SRC
| true |
1dd8c2e3c985814a587ff244f798cecc2c94d099 | Shell | drewschield/Z-chromosome_analysis_hirundo | /scripts/run_admixture.sh | UTF-8 | 135 | 2.53125 | 3 | [] | no_license | ped=$1
bootstraps=$2
for K in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16; do
admixture --cv --B$bootstraps $ped $K | tee log${K}.out
done
| true |
f7905bc73f5837ed3053135116b6f1c9ef868168 | Shell | crorvick/config | /default/zshrc | UTF-8 | 494 | 2.6875 | 3 | [] | no_license | # ~/.zshrc
alias e="emacsclient -t -a ''"
export ZSH="$HOME/.oh-my-zsh"
if [ -d "$ZSH" ]; then
ZSH_THEME="jonathan"
DISABLE_UNTRACKED_FILES_DIRTY="true"
plugins=(git)
. "$ZSH/oh-my-zsh.sh"
fi
if [ -z "$SSH_AUTH_SOCK" -a -f "$HOME/.ssh/agent" ]; then
source "$HOME/.ssh/agent"
fi
if [ -z "$SSH_AUTH_SOCK" -o ! -e "$SSH_AUTH_SOCK" ]; then
ssh-agent | grep -v '^echo' >"$HOME/.ssh/agent"
source "$HOME/.ssh/agent"
fi
if [ -f "$HOME/.zshrc-site" ]; then
source "$HOME/.zshrc-site"
fi
| true |
a3f2b508c30b1015aa7962041cab60116e245302 | Shell | cms-sw/cmspkg | /server/scripts/webhooks.sh | UTF-8 | 998 | 3.375 | 3 | [] | no_license | #!/bin/bash
rpms_json=$1
if [ ! -f "${rpms_json}" ] ; then exit ; fi
basedir=$(cd $(dirname ${rpms_json}); /bin/pwd)
json_name=$(basename ${rpms_json})
repo=$(echo ${basedir} | rev | cut -d/ -f 3 | rev)
arch=$(echo ${basedir} | rev | cut -d/ -f 2 | rev)
script_dir=$(dirname $0)
hooks="$(dirname $0)/webhooks.db"
if [ ! -f "${hooks}" ] ; then exit ; fi
CURL_OPTS="-s -k -f --retry 3 --retry-delay 5 --max-time 30 -X POST"
CMSREP_URI="$(echo ${basedir}/${json_name} | sed 's|^/data/|/|')"
for line in $(cat ${hooks}); do
reg=$(echo "${line}" | sed 's|=.*$||')
if [ $(echo "${repo}:${arch}" | grep "^$reg\$" | wc -l) -eq 1 ] ; then
url=$(echo "${line}" | sed 's|^[^=]*=||')
DATA="{\"payload_uri\":\"${CMSREP_URI}\",\"architecture\":\"${arch}\",\"repository\":\"${repo}\"}"
echo "=========================="
echo "URL=${url}"
echo "DATA=${DATA}"
echo "RESPONSE="
curl $CURL_OPTS -d "${DATA}" --header 'Content-Type: application/json' "${url}" || true
fi
done
| true |
c87c65b41865bf127fa84cc38d40afafa6cc722d | Shell | ldelossa/dotfiles | /zsh.d/rsync.sh | UTF-8 | 738 | 2.765625 | 3 | [] | no_license | # {
# "label": "Rsync Local To Remote",
# "type": "shell",
# "command": "rsync -e ssh --delete -azv $(pwd)/ ${input:sshHost}:$(pwd)",
# "problemMatcher": []
# },
# {
# "label": "Rsync Remote To Local",
# "type": "shell",
# "command": "rsync -e ssh --delete -azv ${input:sshHost}:$(pwd)/ $(pwd)/",
# "problemMatcher": []
# },
# rysnc current directy to remote via ssh
# l2r <user@host>
function l2r () {
rsync -e ssh --mkpath --delete -azv "$(pwd)"/ "$1":"$(pwd)"
}
# rsync remote directory with local
# r2l <user@host>
function r2l () {
rsync -e ssh --mkpath --delete -azv "$1":"$(pwd)" "$(pwd)"/
}
| true |
89083304621498067dcb13203485387dd45ee872 | Shell | umbrodomini/gittest | /pisetup.sh | UTF-8 | 1,885 | 3.640625 | 4 | [] | no_license | #! /bin/bash
echo "Starting Setup!"
echo -n "Enter IP address: "
read ipaddress
ipaddress=$ipaddress/24
echo $ipaddress
start_time="$(date -u +%s)"
echo Updating repos
sudo apt-get update >> logs.txt
if [ $? -eq 0 ]; then
echo "Update Successful"
else echo &tail logs.txt
fi
echo Upgrading Pi
sudo apt-get upgrade -y >> logs.txt
if [ $? -eq 0 ] ; then
echo "Upgrade Successful"
else echo &tail logs.txt
fi
echo Installing transmission-daemon nmap curl ntfs-3g git awscli zip minidlna
sudo apt-get install transmission-daemon nmap curl ntfs-3g git awscli zip minidlna -y >> logs.txt 2>&1
echo "install successfull"
echo "fetching transmission settings"
curl -LJO https://raw.githubusercontent.com/umbrodomini/gittest/master/settings.json >> logs.txt
echo "creating Book local directory and changing permissions"
sudo mkdir /media/Book
sudo chmod 0777 /media/Book
echo Getting UUID of sda1
book=$(sudo blkid -o value -s UUID /dev/sda1)
echo UUID : $book
echo creating automount on fstab
echo UUID=$book /media/Book ntfs-3g defaults,auto,users,rw,nofail 0 0 | sudo tee -a /etc/fstab
echo "successfully appended to /etc/fstab"
echo stopping transmission-daemon
sudo systemctl stop transmission-daemon
sudo cp /home/pi/settings.json /etc/transmission-daemon/
echo enabling transmission-daemon
sudo systemctl enable transmission-daemon
echo starting transmission-daemon
sudo systemctl start transmission-daemon
printf "Transmission Configured Successfully"
printf "interface eth0\n\
static ip_address=$ipaddress\n\
static routers=192.168.1.1\n\
static domain_name_servers=192.168.1.1" | sudo tee -a /etc/dhcpcd.conf
printf "/n"
echo "static IP address set to $ipaddress"
echo "SETUP COMPLETE!"
echo some housekeeping....
rm -rf settings.json
echo "DONE!!"
end_time="$(date -u +%s)"
elapsed="$(($end_time-$start_time))"
echo "Total of $elapsed seconds elapsed for process"
| true |
6d93721a8f661e5759321463cf96f9aad52a41b5 | Shell | id774/scripts | /installer/install_edge_rails.sh | UTF-8 | 1,044 | 3.390625 | 3 | [] | no_license | #!/bin/sh
#
########################################################################
# Install or Update Edge Rails
#
# Maintainer: id774 <idnanashi@gmail.com>
#
# v1.2 3/7,2010
# Refactoring.
# v1.1 2/20,2010
# Fix directory.
# v1.0 9/8,2008
# Stable.
########################################################################
setup_environment() {
case $OSTYPE in
*darwin*)
OWNER=root:wheel
;;
*)
OWNER=root:root
;;
esac
}
update_rails() {
cd /usr/local/src/rails/trunk/rails
sudo git pull
}
install_rails() {
test -d /usr/local/src/rails/trunk || sudo mkdir -p /usr/local/src/rails/trunk
cd /usr/local/src/rails/trunk
sudo git clone git://github.com/rails/rails.git
}
install_edge_rails() {
setup_environment
test -d /usr/local/src/rails/trunk/rails && update_rails
test -d /usr/local/src/rails/trunk/rails || install_rails
sudo chown -R $OWNER /usr/local/src/rails/trunk
}
ping -c 1 id774.net > /dev/null 2>&1 || exit 1
install_edge_rails
| true |
64f629b3efce4c1e9edde91c43b408e5de08b3c3 | Shell | mcm/aports | /community/cloud-init/setup-cloud-init | UTF-8 | 822 | 4.0625 | 4 | [] | no_license | #!/bin/sh
# Can be passed a parameter to indicate whether mdev / mdevd / udev
# should be used.
echo "Enabling cloud-init's boot services..."
for i in boot/cloud-init-local \
default/cloud-config \
default/cloud-final \
default/cloud-init; do
if ! [ -e /etc/runlevels/$i ]; then
ln -s /etc/init.d/${i#*/} /etc/runlevels/$i
fi
done
# If mdev / mdevd / udev not specified as parameter then assume udev
_device_mgmt="${1:-udev}"
case $_device_mgmt in
udev )
: ;;
mdev | mdevd )
echo
echo "cloud-init does not currently support $_device_mgmt!"
echo
exit 1
;;
* )
echo
echo "Unknown parameter, only 'mdev', 'mdevd', and 'udev' are valid!"
echo
exit 1
;;
esac
echo "Enabling $_device_mgmt boot services..."
setup-devd "$_device_mgmt"
exit 0
| true |
b310118703a65c68e5f91172cad9ef4ad20bc5cc | Shell | AngelofWoe/arkos | /RG351P-M/Ubuntu OS Partition/var/lib/dpkg/info/rsyslog.prerm | UTF-8 | 631 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
set -e
# Stop the socket on remove so rsyslog is not restarted via socket activation
if [ -d /run/systemd/system ] && [ "$1" = remove ] ; then
systemctl stop syslog.socket || true
fi
# Automatically added by dh_installsystemd/12.4ubuntu1
if [ -d /run/systemd/system ] && [ "$1" = remove ]; then
deb-systemd-invoke stop 'dmesg.service' 'rsyslog.service' >/dev/null || true
fi
# End automatically added section
# Automatically added by dh_installinit/12.4ubuntu1
if [ -x "/etc/init.d/rsyslog" ] && [ "$1" = remove ]; then
invoke-rc.d --skip-systemd-native rsyslog stop || exit 1
fi
# End automatically added section
| true |
208112741858f560f137583ef0efd0d5da6b0a25 | Shell | mdryden/110yards | /.scripts/python/python-version.sh | UTF-8 | 245 | 3.515625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
expected=`cat .python-version`
actual=`python --version`
if grep -q "$expected" <<< "$actual"; then
exit 0
else
echo "Environment is not using expected python version. Expected '$expected', found '$actual'."
exit 1
fi | true |
57cac25531f0e981101e6720a532bb5afd26af7a | Shell | AverageMarcus/dotfiles | /home/.bin/flux-resume-all | UTF-8 | 1,099 | 3.75 | 4 | [] | no_license | #!/usr/bin/env bash
source .utils
set -e
print_usage() {
blue "flux-resume-all - Resume all flux resources"
echo " "
underline "Usage:"
echo "flux-resume-all [options]"
echo " "
underline "Options:"
echo "-h, --help show this help text"
}
while test $# -gt 0; do
case "$1" in
-h|--help)
print_usage
exit 0
;;
*)
break
;;
esac
done
KUSTOMIZATIONS=$(kubectl get kustomization -A -o json | jq -r '.items[] | select(.spec.suspend==true) | "\(.metadata.namespace)/\( .kind)/\( .metadata.name)"')
GITREPOS=$(kubectl get gitrepo -A -o json | jq -r '.items[] | select(.spec.suspend==true) | "\(.metadata.namespace)/\( .kind)/\( .metadata.name)"')
if [[ "${KUSTOMIZATIONS}" == "" ]] && [[ "${GITREPOS}" == "" ]]; then
italic "Nothing to resume"
fi
for RESOURCE in ${KUSTOMIZATIONS} ${GITREPOS}
do
PARTS=($(echo ${RESOURCE} | tr '[:upper:]' '[:lower:]' | tr "/" "\n"))
blue "Resuming ${PARTS[1]} - ${PARTS[0]}/${PARTS[2]}"
kubectl patch ${PARTS[1]} -n ${PARTS[0]} ${PARTS[2]} -p '{"spec":{"suspend":null}}' --type merge
done
| true |
2cb5b166bb2040cad2a68ff90e002335694d794b | Shell | wilson-tim/stellahomedw | /DWLIVE/tlink/bin/tlink_main.ksh | UTF-8 | 9,943 | 3.5 | 4 | [] | no_license |
#!/usr/bin/ksh
# set -x
# ------------------------------------------------------------------------------------------------ #
# #
# Shell Name: tlink_main.ksh (renamed std_main.ksh) #
# #
# Purpose #
# ------- #
# Template main script for doing the actual job. #
# #
# Notes: #
# 1. Contains examples of steps to do various tasks, e.g. run a SQL*PLUS module. #
# 2. Each example step contains code to bypas it by checking the variable "START_STEP_NO". To make #
# a step mandatory just comment out the bypass check (leave the code in in case it needs to be #
# reinstated later). #
# #
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
#
# History #
# ------- #
# #
# Date By Description #
# -------- ---------- --------------------------------------------------------------------------- #
# dd/mm/yy author Initial Version. #
# 29/06/09 COA Modified to send a file to BO server to indicate success.
# #
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
# #
# Input Parameters #
# ---------------- #
# #
# Option Flags #
# ------------ #
# #
# Example Runs #
# ------------ #
# ${BIN}/tlink_main.ksh >${LOGS}/tlink${RUNDATE}.log 2>${LOGS}/tlink${RUNDATE}.err #
# #
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
# #
# To Do #
# ----- #
# #
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
# #
# Run standard shell scripts. #
# #
# N.B. These are always run in every shell script even if they have been run by the calling shell. #
# #
# ------------------------------------------------------------------------------------------------ #
. /home/dw/bin/set_oracle_variables.ksh # Sets up Oracle environment.
. /home/dw/bin/set_java_variables.ksh # Sets up Java environment.
# ------------------------------------------------------------------------------------------------ #
# #
# The following function echoes the date and time in a particular format and any parameters passed #
# to this function. It can be used to output log messages with a consistent date and time format. #
# #
# ------------------------------------------------------------------------------------------------ #
function report
{
echo "`date +%Y/%m/%d---%H:%M:%S` $*"
}
function getJavaParameter
{
#
# Function takes the input parameters and wraps double quotes around them to form a
# standard java parameter.
echo "\"${*}\""
}
# Standard log message at start of script.
report ": Starting script ${0} to tlink.\n"
report ": Parameters passed: ${*}.\n"
# ------------------------------------------------------------------------------------------------ #
# #
# Body of shell follows. #
# #
# ------------------------------------------------------------------------------------------------ #
#
# Step nn
# ------
# Example of a step to run a SQL*PLUS module with parameters. Replace with own description.
#
# N.B. As a standard, "sql_debug_mode" should always be the last parameter in the list.
#
step_no=00
report "Step ${step_no}\n"
WAIT_FOR_JOB_NAME=TL_S_Staging
#TIMEOUT_SECONDS=10800
# Increase wait for slo-dwlive
TIMEOUT_SECONDS=34200
APPLICATION_KEY=TLINKW
#
# Bypass step if "START_STEP_NO" greater than step number.
#
report " step no '$step_no' and '$START_STEP_NO'"
if [[ "${step_no}" -ge "${START_STEP_NO}" ]] then
report " Before start sql plus to call sql script .................."
sqlplus -s ${USER}/${PASSWD} @${BIN}/tlink_run.sql ${WAIT_FOR_JOB_NAME} ${TIMEOUT_SECONDS} ${APPLICATION_KEY} ${sql_debug_mode}
exit_code=${?}
report " Tlnik SQL script compelted and exit code is '$exit_code' and '${?}'"
if [[ "${exit_code}" -ne 0 ]] then
report ": tlink failed...\n"
report ": Script '${0}' aborted....\n"
exit_code=1
else
report ": ftp '${TLINK_HOST}' '${TLINK_USER}' '${TLINK_LOCAL_DIRECTORY}' '${TLINK_FILENAME}'"
FTP_OUTPUT=`ftp -vni << EOF
open ${TLINK_HOST}
user ${TLINK_USER} ${TLINK_PASSWD}
lcd ${TLINK_LOCAL_DIRECTORY}
put ${TLINK_FILENAME} ${TLINK_FILENAME}
quit
EOF`
echo $FTP_OUTPUT | grep -i "Transfer complete"
report "Output from FTP: \n${FTP_OUTPUT}\n"
fi
else
report "Step ${step_no} bypassed.\n"
fi
# ------------------------------------------------------------------------------------------------ #
# #
# End of shell #
# #
# ------------------------------------------------------------------------------------------------ #
if [[ "${exit_code}" -eq 0 ]] then
# Final standard log messages if script completed successfully.
report ": tlink completed successfully.\n"
report ": Script '${0}' finished....\n"
sleep 1200
report ": ftp '${TLINK_HOST}' '${TLINK_USER}' '${TLINK_LOCAL_DIRECTORY}' '${TLINK_FILENAME}'"
FTP_OUTPUT=`ftp -vni << EOF
open ${TLINK_HOST}
user ${TLINK_USER} ${TLINK_PASSWD}
lcd ${TLINK_LOCAL_DIRECTORY}
put ${TLINK_FILENAME} ${TLINK_FILENAME}"2"
quit
EOF`
echo $FTP_OUTPUT | grep -i "Transfer complete"
report "Output from FTP: \n${FTP_OUTPUT}\n"
sleep 1200
report ": ftp '${TLINK_HOST}' '${TLINK_USER}' '${TLINK_LOCAL_DIRECTORY}' '${TLINK_FILENAME}'"
FTP_OUTPUT=`ftp -vni << EOF
open ${TLINK_HOST}
user ${TLINK_USER} ${TLINK_PASSWD}
lcd ${TLINK_LOCAL_DIRECTORY}
put ${TLINK_FILENAME} ${TLINK_FILENAME}"3"
quit
EOF`
echo $FTP_OUTPUT | grep -i "Transfer complete"
report "Output from FTP: \n${FTP_OUTPUT}\n"
else
# Final standard log messages if script failed.
report ": tlink failed.\n"
report ": Script '${0}' aborted....\n"
fi
exit ${exit_code}
| true |
7083716a6c73012f7fdb6e69a5fa15591589fe6d | Shell | bluemix-enablement/CodeSnippets | /scriptupdates/updateMaterials | UTF-8 | 194 | 2.5625 | 3 | [] | no_license | #!/bin/bash
LOCAL_DIR=$(echo ~)/materials
URL=https://github.com/bluemix-enablement/CodeSnippets/trunk/materials/
echo Downloading from $URL to $LOCAL_DIR
svn export --force $URL $LOCAL_DIR
| true |
189978095f3f1f29dd17fc0d352a6e04d02fe97d | Shell | ThomasvanBommel/PortfolioTS | /scripts/start.sh | UTF-8 | 951 | 3.640625 | 4 | [] | no_license | # File: start.sh
# Created: Thursday March 25th 2021
# Author: Thomas vanBommel
#
# Last Modified: Tuesday March 30th 2021 6:17pm
# Modified By: Thomas vanBommel
#
# CHANGELOG:
#!/bin/bash
set -e
## Check / create config file (common/config.json)
scripts/config.sh
## Build server
buildServer () {
echo "Building server:"
tsc --build server/src && echo " - Done" || echo " - Failed"
}
## Build client
buildClient () {
echo "Webpacking client:"
webpack --config client/src/webpack.config.js
}
## If testing, clean up and return successful
if [ "$1" = "test" ]; then
scripts/cleanup.sh
buildServer && \
buildClient
exit 0
## Only build the server
elif [ "$1" = "server" ]; then
buildServer
## Only build the client
elif [ "$1" = "client" ]; then
buildClient
exit 0
## Default, build everything
else
buildServer && \
buildClient
fi
## Start server
echo "Starting server:"
node build/server/src/index.js | true |
58c688bc6fca65eafd4428893e6f6bb8e601f00b | Shell | Rocket-Buddha/kubos-vagrant | /kubos-dev/script/non-privileged-provision.sh | UTF-8 | 1,825 | 3.078125 | 3 | [] | no_license | #!/bin/bash
set -ex
# Install rust stuff
# We do this as vagrant because it
# installs to $HOME/
# Rust toolchain + Cargo
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
echo 'export PATH=$PATH:"~/.cargo/bin"' >> /home/vagrant/.bashrc
/home/vagrant/.cargo/bin/rustup default 1.39.0
# install rust tools
/home/vagrant/.cargo/bin/rustup component add clippy
/home/vagrant/.cargo/bin/rustup component add rustfmt
# Remove the toolchain docs because we don't need them and they take up 600MB of space
rm -rf /home/vagrant/.rustup/*/share/doc
# bbb/mbm2 target
/home/vagrant/.cargo/bin/rustup target install arm-unknown-linux-gnueabihf
# iobc target
/home/vagrant/.cargo/bin/rustup target install armv5te-unknown-linux-gnueabi
# install cargo-kubos
/home/vagrant/.cargo/bin/cargo install --git https://github.com/kubos/cargo-kubos
# setup cargo config
mv /home/vagrant/cargo_config /home/vagrant/.cargo/config
# Install file-client
/home/vagrant/.cargo/bin/cargo install --bin kubos-file-client --path /home/vagrant/.kubos/kubos/clients/kubos-file-client/
# Install shell-client
/home/vagrant/.cargo/bin/cargo install --bin kubos-shell-client --path /home/vagrant/.kubos/kubos/clients/kubos-shell-client/
# Install example UART comms client
/home/vagrant/.cargo/bin/cargo install --bin uart-comms-client --path /home/vagrant/.kubos/kubos/clients/uart-comms-client/
# Cleanup temporary build files
rm -rf /home/vagrant/.kubos/kubos/target
# Install app-api python module
cd /home/vagrant/.kubos/kubos/apis/app-api/python && python3 -m pip install .
# Install i2c python module
cd /home/vagrant/.kubos/kubos/hal/python-hal/i2c && python3 -m pip install .
# Install kubos-service python module
cd /home/vagrant/.kubos/kubos/libs/kubos-service && python3 -m pip install .
echo "Finishing provisioning..."
| true |
76f326b11046e3a596557052fa49999340c6359c | Shell | EthanJWright/opensync | /src/nm2/fut/nm2_set_vlan_id.sh | UTF-8 | 5,019 | 3.796875 | 4 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | #!/bin/sh
# Copyright (c) 2015, Plume Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Plume Design Inc. nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Plume Design Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TEST DESCRIPTION
# Try to set vlan_id to wireless interface.
#
# TEST PROCEDURE
# - Configure parent interface.
# - Wake UP parent interface.
# - Configure wireless interface with vlan_id.
#
# EXPECTED RESULTS
# Test is passed:
# - if wireless interface is properly configured AND
# - parent iface is UP AND
# - VLAN exists
# - VLAN config is valid
# Test fails:
# - parent iface is NOT UP OR
# - VLAN config is not valid
# Include basic environment config from default shell file and if any from FUT framework generated /tmp/fut_set_env.sh file
if [ -e "/tmp/fut_set_env.sh" ]; then
source /tmp/fut_set_env.sh
else
source /tmp/fut-base/shell/config/default_shell.sh
fi
source "${FUT_TOPDIR}/shell/lib/unit_lib.sh"
source "${FUT_TOPDIR}/shell/lib/nm2_lib.sh"
source "${LIB_OVERRIDE_FILE}"
trap '
delete_inet_interface "$if_name"
run_setup_if_crashed nm || true
check_restore_management_access || true
' EXIT SIGINT SIGTERM
usage="
$(basename "$0") [-h] \$1 \$2
where options are:
-h show this help message
where arguments are:
parent_ifname=\$1 -- used as parent_ifname in Wifi_Inet_Config table - (string)(required)
virtual_interface_id=\$2 -- used as vlan_id for virtual interface '100' in 'eth0.100'- (integer)(required)
this script is dependent on following:
- running NM manager
- running WM manager
example of usage:
/tmp/fut-base/shell/nm2/nm2_set_vlan_id.sh eth0 100
"
while getopts h option; do
case "$option" in
h)
echo "$usage"
exit 1
;;
esac
done
# Provide at least 2 argument(s).
if [ $# -lt 2 ]; then
echo 1>&2 "$0: not enough arguments"
echo "$usage"
exit 2
fi
# Fill variables with provided arguments or defaults.
parent_ifname=$1
vlan_id=$2
# Construct if_name from parent_ifname and vlan_id.
if_name="$parent_ifname.$vlan_id"
tc_name="nm2/$(basename "$0")"
log "$tc_name: Creating Wifi_Inet_Config entries for $if_name (enabled=true, network=true, ip_assign_scheme=static)"
create_inet_entry \
-if_name "$if_name" \
-enabled true \
-network true \
-ip_assign_scheme static \
-inet_addr "10.10.10.$vlan_id" \
-netmask "255.255.255.0" \
-if_type vlan \
-parent_ifname "$parent_ifname" \
-vlan_id "$vlan_id" &&
log "$tc_name: create_vlan_inet_entry - Success" ||
raise "create_vlan_inet_entry - Failed" -l "$tc_name" -tc
log "$tc_name: LEVEL 2 - Check is interface up - $if_name"
wait_for_function_response 0 "interface_is_up $if_name" &&
log "$tc_name: wait_for_function_response - Interface is UP - $if_name" ||
raise "wait_for_function_response - Interface is DOWN - $if_name" -l "$tc_name" -tc
log "$tc_name: LEVEL 2 - Check vlan config - VLAN exists - $if_name"
wait_for_function_response 0 "grep -q \"$if_name\" /proc/net/vlan/$if_name" &&
log "$tc_name: wait_for_function_response - VLAN configuration IS VALID at OS level - interface $if_name" ||
raise "wait_for_function_response - VLAN configuration NOT VALID at OS level - interface $if_name" -l "$tc_name" -tc
log "$tc_name: LEVEL 2 - Check vlan config - PARENT ifname"
wait_for_function_response 0 "grep -q \"VID: $vlan_id\" /proc/net/vlan/$if_name" &&
log "$tc_name: wait_for_function_response - Parent device IS VALID at OS level - interface $if_name" ||
raise "wait_for_function_response - Parent device NOT VALID at OS level - interface $if_name" -l "$tc_name" -tc
pass
| true |
1360724c2b5dd8aef70c22ac6b899c10cae383fc | Shell | tamionv/tvn_problem_making_scripts | /src/lib/evaluate_src_test | UTF-8 | 1,498 | 3.9375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# Tamio-Vesa Nakajima
# Evaluates the binary $1 with test $2, returning the message in $message, the time used in $timeUsed and the points in $points
# include the configuration
problemname=""
timelimit=""
# shellcheck disable=SC1091
source problemconfig
if [ -z "$problemname" ] || [ -z "$timelimit" ] ; then
echo Bad config file
exit 1
fi
binary=$1
testname=$2
output_location=$(mktemp)
timeUsed=$(./lib/run_src_test "$output_location" "$binary" "$testname")
if (( $(echo "$timeUsed > $timelimit" | bc -l))) ; then
# Set the return values
# timeUsed is already set
message=TLE
points=0
elif [ ! -e "$output_location" ] ; then
message="MissingOutput"
points=0
else
# Make a stage
stage=$(mktemp -d)
# Move output into stage
mv "$output_location" "$stage/$problemname.out"
# Copy the evaluator into the stage
cp eval/eval.bin "$stage/eval.bin"
# Copy the ok file into the stage
cp "tests/$testname.ok" "$stage/$problemname.ok"
# Create temporary files to hold the points and the eval message
pointsFile=$(mktemp)
messageFile=$(mktemp)
# Enter the stage and evaluate, storing the results in $pointsFile and $messageFile
curr_file=$PWD
cd "$stage" || exit
./eval.bin > "$pointsFile" 2> "$messageFile"
cd "$curr_file" || exit
# Set the return values
# timeUsed is already set
points=$(cat "$pointsFile")
message=$(cat "$messageFile")
fi
echo "$message" "$timeUsed" "$points"
| true |
223b5370f0a21a37f1d855beebbac0f4a5b121f0 | Shell | YRWYCTB/mysql_health_check | /shell/check_mysql_master.sh | UTF-8 | 801 | 3.015625 | 3 | [] | no_license | #!/bin/bash
port=3336
user="root"
passwod="passwd"
comm="/usr/local/mysql57/bin/mysql -u$user -h127.0.0.1 -P$port -p$passwod"
slave_info=`$comm -e "show slave status" |wc -l`
value=`$comm -Nse "select 1"`
# 判断是不是从库
if [ $slave_info -ne 0 ]
then
echo "MySQL $port Instance is Slave........"
$comm -e "show slave status\G" | egrep -w "Master_Host|Master_User|Master_Port|Master_Log_File|Read_Master_Log_Pos|Relay_Log_File|Relay_Log_Pos|Relay_Master_Log_File|Slave_IO_Running|Slave_SQL_Running|Exec_Master_Log_Pos|Relay_Log_Space|Seconds_Behind_Master"
exit 2
fi
# 判断mysql是否存活
if [ -z $value ]
then
exit 2
fi
echo "MySQL $port Instance is Master........"
$comm -e "select * from information_schema.PROCESSLIST where user='tian' and COMMAND like '%Dump%'"
| true |
760d9160ec761d45842f62daf5be7ea5b2eacfbc | Shell | YogSottot/bitrix-env-rpm | /SOURCES/bitrix-env/opt/webdir/bin/menu/02_local.sh | UTF-8 | 1,865 | 3.703125 | 4 | [] | no_license | #!/bin/bash
# manage localhost options
PROGNAME=$(basename $0)
PROGPATH=$(dirname $0)
[[ -z $DEBUG ]] && DEBUG=0
. $PROGPATH/02_local/functions.sh || exit 1
logo=$(get_logo)
# get_text variables
[[ -f $PROGPATH/${PROGNAME%.sh}.txt ]] && \
. $PROGPATH/${PROGNAME%.sh}.txt
configure_hostname() {
$submenu_dir/01_configure_hostname.sh
}
configure_net() {
type=${1:-manual}
$submenu_dir/02_configure_net.sh $type
}
shutdown_server() {
type=${1:-reboot}
$submenu_dir/04_shutdown_server.sh $type
}
update_server() {
$submenu_dir/06_update_server.sh
}
beta_version() {
$PROGPATH/01_hosts/10_change_repository.sh
}
# print host menu
submenu() {
submenu_00="$CFGL0000"
submenu_01="$CFGL0001"
submenu_02="$CFGL0002"
submenu_03="$CFGL0003"
submenu_04="$CFGL0004"
submenu_05="$CFGL0005"
submenu_06="$CFGL0006"
submenu_07="$CFGL0009"
menu_logo="$CFGL0007"
SUBMENU_SELECT=
until [[ -n "$SUBMENU_SELECT" ]]; do
clear
echo -e "\t\t\t" $logo
echo -e "\t\t\t" $menu_log
echo
get_local_network $LINK_STATUS
menu_list="\n\t$submenu_01\n\t$submenu_02"
menu_list="$menu_list\n\t$submenu_03\n\t$submenu_04"
menu_list="$menu_list\n\t$submenu_05\n\t$submenu_06"
menu_list="$menu_list\n\t$submenu_07\n\t$submenu_00"
print_menu
print_message "$CFGL0008" '' '' SUBMENU_SELECT
case "$SUBMENU_SELECT" in
"1") configure_hostname ;;
"2") configure_net dhcp ;;
"3") configure_net manual ;;
"4") shutdown_server reboot ;;
"5") shutdown_server halt ;;
"6") update_server ;;
"7") beta_version ;;
"0") exit ;;
*) error_pick; SUBMENU_SELECT=;;
esac
SUBMENU_SELECT=
done
}
submenu
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.