blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
951b22a667bbca433fe9a66faccf9b76740b5bb2 | Shell | delkyd/alfheim_linux-PKGBUILDS | /star-cshl/PKGBUILD | UTF-8 | 855 | 2.84375 | 3 | [] | no_license | # Maintainer: Charles Vejnar <ce@vejnar.org>
pkgname=star-cshl
pkgver=2.5.3a
pkgrel=1
pkgdesc="STAR aligns RNA-seq reads to a reference genome using uncompressed suffix arrays"
arch=("i686" "x86_64")
url="https://github.com/alexdobin/STAR"
license=("GPL3")
source=("https://github.com/alexdobin/STAR/archive/${pkgver}.tar.gz")
sha1sums=("e7535fec05b76619ab851e63e85e163b4c972ed3")
build() {
cd "$srcdir/STAR-${pkgver}/source"
make STAR
mv STAR STARshort
make clean
make STARlong
}
package() {
cd "$srcdir/STAR-${pkgver}/source"
install -Dm755 STARshort "$pkgdir/usr/bin/STAR"
install -Dm755 STARlong "$pkgdir/usr/bin/STARlong"
install -Dm644 parametersDefault "$pkgdir/usr/share/doc/star-cshl/parameters"
cd "$srcdir/STAR-${pkgver}/doc"
install -Dm644 STARmanual.pdf "$pkgdir/usr/share/doc/star-cshl/STARmanual.pdf"
}
| true |
c34ca9f5abf05ad39815f38cb76fab38de4ba36d | Shell | mrBliss/dotfiles | /.oh-my-zsh/platform/kul.zsh | UTF-8 | 908 | 2.578125 | 3 | [] | no_license | # KULeuven specific options (ssh'ed into a linux workstation)
# Use emacsclient
export EDITOR='emacsclient -t -a emacsserver'
export ALTERNATE_EDITOR='emacsserver'
alias em='emacsclient -t -a emacsserver'
alias emg='emacsclient -c -n -a emacsserver-gui'
# No root access so put $HOME/bin on the path
#export PATH="$HOME/bin/bin.linux/bin:$HOME/.cabal/bin:$PATH"
export PATH="$HOME/local/bin:$HOME/.cabal/bin:$PATH"
# Run Visual Paradigm
alias vpuml=/localhost/packages/visual_paradigm/VP_Suite5.3/launcher/run_vpuml
# KUL specific tmux config
alias tmux='tmux -f ~/.tmux.kul.conf'
# Make git use Emacs
export GIT_EDITOR=$EDITOR
# Enable the locally installed mosh
export PERL5LIB=/home/s0202013/local/lib/perl/5.14.2
export LD_LIBRARY_PATH=/home/s0202013/local/lib:$LD_LIBRARY_PATH
# Seriously ncmpcpp?
alias nc=ncmpcpp
# Stream music with VLC
alias streamvlc='VLC -I ncurses http://localhost:8000'
| true |
adb3495bf08e48e658746abce3f91352df2a6fe9 | Shell | daemanos/bin | /sup | UTF-8 | 512 | 3.765625 | 4 | [] | no_license | #!/usr/bin/bash
# Usage: sup [ID] WHERE
die() {
echo "$1" >&2
exit 1
}
case $# in
1)
id=default
where="$1"
;;
2)
id="$1"
where="$2"
;;
?)
die "usage: sup [ID] WHERE"
;;
esac
if [ ! -d ".$id.sup" ]; then
die "error: no such template: $id"
fi
if [ -d "$where" ]; then
die "error: target directory already exists"
fi
cp -R --preserve=all ".$id.sup" "$where"
cd "$where"
if [ -x init ]; then
./init
rm init
fi
| true |
f8f93e69a62bd748495811fe237cbaa518c62b58 | Shell | HamilcarR/Echeyde | /headers/del_ref.bash | UTF-8 | 84 | 2.671875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
loc=$(pwd)/*.h
for file in $loc; do
sed -ie 's/&//g' $file
done
| true |
41bfb6ca996841388686294d6f34e73de34b6888 | Shell | tnrork/t-n- | /mp3indir.sh | UTF-8 | 479 | 2.953125 | 3 | [] | no_license | #!/bin/bash
#mkdir ~/indirilenMP3
menu(){
clear
echo "------------------"
echo " MAIN MENU "
echo " "
echo " "
echo -n "indirmek istediginiz sarkinin adini yaziniz > "
read bir iki uc dort bes alti
}
search(){
lynx --dump https://www.youtube.com/results?search_query=$bir+$iki+$uc+$dort+$bes+$alti | awk '/http/{print $2}' | grep '/watch?v' > t.txt
}
download(){
youtube-dl --extract-audio --audio-format mp3 $(cat t.txt | head -1)
}
menu
search
download
| true |
2c927c9d39874adb9aaf81bbb3949c733b87ebe2 | Shell | Baldurrr/Prometheus | /alertmanager-install.sh | UTF-8 | 4,366 | 3.3125 | 3 | [] | no_license | #!/bin/sh
RED=`tput setaf 1`
GREEN=`tput setaf 2`
BLUE=`tput setaf 4`
YELLOW=`tput setaf 3`
WHITE=`tput setaf 7`
RESET=`tput sgr0`
echo "${GREEN}###########################################################################"
echo " _ _ #"
echo " /\ | | | | #"
echo " / \ | | ___ _ __| |_ _ __ ___ __ _ _ __ __ _ __ _ ___ _ __ #"
echo " / /\ \ | |/ _ \ '__| __| | '_ ` _ \ / _` | '_ \ / _` |/ _` |/ _ \ '__| #"
echo " / ____ \| | __/ | | |_ | | | | | | (_| | | | | (_| | (_| | __/ | #"
echo " /_/ \_\_|\___|_| \__| |_| |_| |_|\__,_|_| |_|\__,_|\__, |\___|_| #"
echo " __/ | #"
echo " |___/ #"
echo "###########################################################################${RESET}"
echo -e "Pulling alertmanager.tar \n"
wget https://github.com/prometheus/alertmanager/releases/download/v0.21.0/alertmanager-0.21.0.linux-amd64.tar.gz -P /tmp
tar xvf alertmanager-0.21.0.linux-amd64.tar.gz -C /tmp/
cd /tmp/alertmanager-*/
echo -e "Copying files \n"
cp alertmanager /usr/local/bin/
cp amtool /usr/local/bin/
echo -e "Creating dir /etc/alertmanager\n"
mkdir /etc/alertmanager
echo -e "Creating dir /var/lib/alertmanager\n"
mkdir /var/lib/alertmanager
echo -e "Creating alertmanager.yml \n"
tee /etc/alertmanager/alertmanager.yml <<EOF
global:
smtp_smarthost: 'smtp.gmail.com:587'
smtp_from: 'root@root.com'
smtp_require_tls: false
route:
receiver: 'alert-mails'
group_wait: 30s
group_interval: 1m
repeat_interval: 30m
receivers:
- name: 'alert-mails'
email_configs:
- to: 'root@root.com'
EOF
echo -e "Creating user alertmanager \n"
useradd --no-create-home --shell /bin/false alertmanager
chown alertmanager:alertmanager /etc/alertmanager/ -R
chown alertmanager:alertmanager /var/lib/alertmanager/ -R
chown alertmanager:alertmanager /usr/local/bin/alertmanager
chown alertmanager:alertmanager /usr/local/bin/amtool
echo -e "Creating alertmanager.service \n"
tee /etc/systemd/system/alertmanager.service <<EOF
[Unit]
Description=AlertManager
Wants=network-online.target
After=network-online.target
[Service]
User=alertmanager
Group=alertmanager
Type=simple
ExecStart=/usr/local/bin/alertmanager \
--config.file /etc/alertmanager/alertmanager.yml \
--storage.path /var/lib/alertmanager/
[Install]
WantedBy=multi-user.target
EOF
echo -e "Enabling alertmanager service \n"
systemctl daemon-reload
systemctl enable alertmanager.service
service alertmanager start
#/etc/prometheus/prometheus.yml
echo -e "Creating defaults alerts.yml : \n"
echo -e "Alert: InstanceDown \n"
echo -e "Alert: DiskFull \n"
read -p "Enter a group name: " groupname
tee /etc/prometheus/rules/alerts.yml <<EOF
groups:
- name: $groupname
rules:
- alert: InstanceDown
expr: up == 0
for: 1m
labels:
severity: critical
annotations:
summary: "The server {{ $labels.instance }} is down"
description: "The job: {{ $labels.job }} report that {{ $labels.instance}} is down since 1 min."
- alert: DiskFull
expr: node_filesystem_free_bytes{mountpoint ="/stockage",instance="192.168.195.89:9100"} / 1024 / 1024 / 1024 < 20
for: 1m
labels:
severity: warning
annotations:
summary: "20Go left on disk {{ $labels.instance }}"
description: "Actually at {{ $value }}"
EOF
read -p "Install camptocamp plugin for grafana ? (y or n)" pluginchoice
if [ $pluginchoice = "y" ] ; then
echo -e "Pulling grafana plugin camptocamp-prometheus-alertmanager-datasource\n"
grafana-cli plugins install camptocamp-prometheus-alertmanager-datasource
systemctl restart grafana
elif [ $pluginchoice = "n" ] ; then
echo -e "Not installing plugin\n"
fi
echo -e "End of configuration \n"
echo -e "Some lines need to be added in prometheus.yml config file: :)\n"
echo "
alerting: \
alertmanagers: \
- static_configs: \
- targets: \
- localhost:9093 \
scheme: http \
timeout: 10s \
rule_files: \
- 'rules/*' "
echo -e "\n#######"
| true |
818ed0f455e94630c82e16116e007fef5fb04814 | Shell | delkyd/alfheim_linux-PKGBUILDS | /steamkit/PKGBUILD | UTF-8 | 1,070 | 2.734375 | 3 | [] | no_license | # Maintainer: Jakob Gahde <j5lx@fmail.co.uk>
pkgname=steamkit
pkgver=1.8.0
pkgrel=1
pkgdesc="A .NET library designed to interoperate with Valve's Steam network"
arch=('any')
url="https://github.com/SteamRE/SteamKit"
license=('LGPL2.1')
depends=('mono' 'protobuf-net')
# Mono >= 4.4 segfaults during the preparations for signing the assembly
makedepends=('mono<4.4')
options=('!strip')
source=("https://github.com/SteamRE/SteamKit/archive/SteamKit_${pkgver}.tar.gz")
md5sums=('9e88ec62a9c9013f8e4096b19e388deb')
build() {
cd "${srcdir}/SteamKit-SteamKit_${pkgver}/SteamKit2/SteamKit2"
xbuild SteamKit2.csproj /p:Configuration=Release
cd "bin/Release"
monodis SteamKit2.dll --output=SteamKit2.il
sn -k 1024 SteamKit2.snk
ilasm /dll /key:SteamKit2.snk SteamKit2.il
}
package() {
cd "${srcdir}/SteamKit-SteamKit_${pkgver}/SteamKit2/SteamKit2/bin/Release"
install -Dm644 SteamKit2.dll "${pkgdir}/usr/lib/SteamKit/SteamKit2.dll"
install -Dm644 SteamKit2.dll.mdb "${pkgdir}/usr/lib/SteamKit/SteamKit2.dll.mdb"
gacutil -i SteamKit2.dll -root "${pkgdir}/usr/lib"
}
| true |
09289361aca3f913f2ed67997c55861a17e4b12f | Shell | tapark/ft_services | /srcs/mysql/health_check.sh | UTF-8 | 164 | 2.6875 | 3 | [] | no_license | #!/bin/sh
if [ $(ps | grep telegraf | grep -v grep | wc -l) -eq 0 ]
then
exit 1
fi
if [ $(ps | grep mysqld | grep -v grep | wc -l) -eq 0 ]
then
exit 1
fi
exit 0 | true |
6d611b1fc0b6a37712791a4688ed41b331f22dc1 | Shell | icersong/openmediavault-elfinder | /install.sh | UTF-8 | 1,295 | 2.59375 | 3 | [] | no_license | # elfinder module
wget https://github.com/icersong/elFinder/archive/master.zip -O elfinder.zip
unzip elfinder.zip
mv elFinder-master /usr/share/elfinder
cp -f /usr/share/elfinder/php/connector.minimal.php-lite \
/usr/share/elfinder/php/connector.minimal.php
ln -s /media /usr/share/elfinder/files
mkdir /media/.trash
mkdir /media/.trash/.tmb
chown -R www-data:www-data /media/.trash
# elfinder menu for omv
wget https://github.com/icersong/openmediavault-elfinder/archive/master.zip \
-O openmediavault-elfinder.zip
unzip openmediavault-elfinder.zip
cd openmediavault-elfinder-master
mkdir /var/www/openmediavault/js/omv/module/admin/service/elfinder
cp var/www/openmediavault/js/omv/module/admin/service/elfinder/Elfinder.js \
/var/www/openmediavault/js/omv/module/admin/service/elfinder/
cp var/www/openmediavault/images/elfinder.* /var/www/openmediavault/images/
cp -r usr/share/openmediavault/locale/* /usr/share/openmediavault/locale/
./usr/share/openmediavault/mkconf/nginx.d/90-elfinder
./usr/share/openmediavault/mkconf/php5fpm.d/90-elfinder
systemctl restart php7.3-fpm.service
systemctl restart nginx.service
# clean cache & update omv locale
set -e
. /etc/default/openmediavault
. /usr/share/openmediavault/scripts/helper-functions
omv_purge_internal_cache
omv_install_locale
| true |
41570d5011bbadf83353340223c5f60762c0efef | Shell | oponcea/ceph-uprev-stx-integ | /base/dnsmasq-config/files/init | UTF-8 | 1,216 | 3.8125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
if [ -f /etc/centos-release ]; then
DAEMON=/usr/sbin/dnsmasq
else
DAEMON=/usr/bin/dnsmasq
fi
NAME=dnsmasq
DESC="DNS forwarder and DHCP server"
PIDFILE="/var/run/dnsmasq.pid"
test -f $DAEMON || exit 0
case "$1" in
start)
echo -n "starting $DESC: $NAME... "
test -d /var/lib/misc/ || mkdir /var/lib/misc/
start-stop-daemon -S -x $DAEMON --pidfile $PIDFILE -- $ARGS
echo "done."
echo -n "Refresh hosts cache"
nscd -i hosts
echo "done."
;;
stop)
echo -n "stopping $DESC: $NAME... "
start-stop-daemon -K -x $DAEMON --pidfile $PIDFILE
rm -f $PIDFILE
echo "done."
;;
status)
echo -n "dnsmasq "
start-stop-daemon -q -K -t -x $DAEMON --pidfile $PIDFILE
RET=$?
if [ "$RET" = "0" ]; then
PID=`cat $PIDFILE`
echo "($PID) is running"
else
echo "is not running"
# For lsb compliance return 3 if process not running
exit 3
fi
;;
restart)
echo "restarting $DESC: $NAME... "
$0 stop
$0 start
echo "done."
;;
reload)
echo -n "reloading $DESC: $NAME... "
killall -HUP $(basename ${DAEMON})
echo "done."
;;
*)
echo "Usage: $0 {start|stop|status|restart|reload}"
exit 1
;;
esac
exit 0
| true |
d5d1d2adc9084afffc2272af48ae18890ad057af | Shell | jaemuzzin/grail_exec | /ensembling/get_ensemble_predictions.sh | UTF-8 | 3,029 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env bash
# This script assumes GraIL predection scores on the validation and test set are already saved.
# It also assumes that scored head/tail replaced triplets are also stored.
# If any of those is not present, run the corresponding script from the following setup commands.
##################### SET UP #####################
# python test_auc.py -d WN18RR -e saved_grail_exp_name --hop 3 -t valid
# python test_auc.py -d WN18RR -e saved_grail_exp_name --hop 3 -t test
# python test_auc.py -d NELL-995 -e saved_grail_exp_name --hop 2 -t valid
# python test.py -d NELL-995 -e saved_grail_exp_name --hop 2 -t test
# python test_auc.py -d FB15K237 -e saved_grail_exp_name --hop 1 -t valid
# python test_auc.py -d FB15K237 -e saved_grail_exp_name --hop 1 -t test
# python test_ranking.py -d WN18RR -e saved_grail_exp_name --hop 3
# python test_ranking.py -d NELL-995 -e saved_grail_exp_name --hop 2
# python test_ranking.py -d FB15K237 -e saved_grail_exp_name --hop 1
##################################################
# Arguments
# Dataset
DATASET=$1
# KGE model to be used in ensemble
KGE_MODEL=$2
KGE_SAVED_MODEL_PATH="../experiments/kge_baselines/${KGE_MODEL}_${DATASET}"
# score pos validation triplets with KGE model
python score_triplets_kge.py -d $DATASET --model $KGE_MODEL -f valid -init $KGE_SAVED_MODEL_PATH
# score neg validation triplets with KGE model
python score_triplets_kge.py -d $DATASET --model $KGE_MODEL -f neg_valid_0 -init $KGE_SAVED_MODEL_PATH
# train the ensemble model
python blend.py -d $DATASET -em2 $KGE_MODEL --do_train -ne 500
# Score the test pos and neg triplets with KGE model
python score_triplets_kge.py -d $DATASET --model $KGE_MODEL -f test -init $KGE_SAVED_MODEL_PATH
python score_triplets_kge.py -d $DATASET --model $KGE_MODEL -f neg_test_0 -init $KGE_SAVED_MODEL_PATH
# Score the test pos and neg triplets with ensemble model
python blend.py -d $DATASET -em2 $KGE_MODEL --do_scoring -f test
python blend.py -d $DATASET -em2 $KGE_MODEL --do_scoring -f neg_test_0
# Compute auc with the ensemble model scored pos and neg test files
python compute_auc.py -d $DATASET -m grail_with_${KGE_MODEL}
# Compute auc with the KGE model model scored pos and neg test files
python compute_auc.py -d $DATASET -m $KGE_MODEL
# Score head/tail replaced samples with KGE model
python score_triplets_kge.py -d $DATASET --model $KGE_MODEL -f ranking_head -init $KGE_SAVED_MODEL_PATH
python score_triplets_kge.py -d $DATASET --model $KGE_MODEL -f ranking_tail -init $KGE_SAVED_MODEL_PATH
# Score head/tail replaced samples with ensemble model
python blend.py -d $DATASET -em2 $KGE_MODEL --do_scoring -f ranking_head
python blend.py -d $DATASET -em2 $KGE_MODEL --do_scoring -f ranking_tail
# Compute ranking metrics for ensemble model with the scored head/tail replaced samples
python compute_rank_metrics.py -d $DATASET -m grail_with_${KGE_MODEL}
# Compute ranking metrics for KGE model with the scored head/tail replaced samples
python compute_rank_metrics.py -d $DATASET -m $KGE_MODEL | true |
6e1bdbe78a544ebc451aabeaa5f3c3136bb81559 | Shell | davidandreoletti/dotfiles | /.oh-my-shell/bash/completion/default.sh | UTF-8 | 1,168 | 3.109375 | 3 | [] | no_license | # Load bash completions
# Note: List all completions routine with "complete -p"
# Completion for installed homebrew packages, with completion profile.d support
HOMEBREW_PROFILED_COMPLETION_FILE=$(homebrew_package_path_prefix "/../etc/profile.d/bash_completion.sh")
if [[ -r "$HOMEBREW_PROFILED_COMPLETION_FILE" ]]; then
dot_if_exists "$HOMEBREW_PROFILED_COMPLETION_FILE"
else
for COMPLETION in "$homebrew_package_path_prefix/../etc/bash_completion.d/"*; do
dot_if_exists "$COMPLETION"
done
fi
# Completion for installed homebrew packages, without completion profile.d support
HOMEBREW_FZF_COMPLETION_DIR=$(homebrew_package_path_prefix "/fzf/shell/completion.bash")
dot_if_exists "$HOMEBREW_FZF_COMPLETION_DIR"
HOMEBREW_FZF_COMPLETION_DIR=$(homebrew_package_path_prefix "/fzf/shell/completion.bash")
dot_if_exists "$HOMEBREW_FZF_COMPLETION_DIR"
# Enables:
# - CTRL-T as fzf file chooser.
# -- Eg: vim <ctrl-t>
HOMEBREW_FZF_KEYBINDINGS_DIR=$(homebrew_package_path_prefix "/fzf/shell/key-bindings.bash")
dot_if_exists "$HOMEBREW_FZF_KEYBINDINGS_DIR"
# Make Bash complete the `g` alias just like it does `git`.
complete -o default -o nospace -F _git g
| true |
0cb96e63312fcb0c69e39c8e8654b08fee3320a3 | Shell | RRsubES/tbtniv | /cp2ftp.sh | UTF-8 | 423 | 3.671875 | 4 | [] | no_license | #!/bin/bash
FTP_CFG=${FTP_CFG:-bleiz.cfg}
if [ $# -ne 1 ] || [ "$1" == "-h" ]; then
echo "copie le fichier donne en argument" >&2
echo "dans le ftp defini par ${FTP_CFG}." >&2
echo "usage: $(basename $0) file" >&2
exit 1
fi
function cp2ftp {
# $1 is the file to copy
source "${FTP_CFG}"
ftp -in ${FTP_ADR}<<EOF
quote user ${FTP_USER}
quote pass ${FTP_PW}
cd "${FTP_DIR}"
binary
put "${1}"
quit
EOF
}
cp2ftp "$1"
| true |
6ef1ca3d687f1a8f9481399f0ff7e5ae884e2371 | Shell | bizflycloud/internship-2020 | /KhanhNT/Linux/tc-tool/limit-down.sh | UTF-8 | 969 | 2.953125 | 3 | [] | no_license | #!/bin/bash
IF="ens3"
echo "$IF"
ifb="ifb3"
echo "$ifb"
lan="192.168.0."
down="100000"
up="100000"
TC=$(which tc)
modprobe ifb numifb=1
ip link add $ifb type ifb
ip link set dev $ifb up
## Limit incoming traffic
### Clean interface
$TC qdisc del dev $IF handle ffff: ingress
$TC qdisc del root dev $ifb
$TC qdisc del root dev $IF
#$TC qdisc add dev $IF root handle 1: htb default 999
$TC qdisc add dev $IF handle ffff: ingress
### Redirect ingress ens3 to egress ifb3
$TC filter add dev $IF parent ffff: protocol ip u32 match u32 0 0 action mirred egress redirect dev $ifb
$TC qdisc add dev $ifb root handle 1: htb default 10
$TC qdisc add dev $IF root handle 1: htb default 10
for i in $(seq 1 255); do
$TC class add dev $ifb parent 1:1 classid 1:$i htb rate ${down}kbit
$TC class add dev $IF parent 1:1 classid 1:$i htb rate ${down}kbit
$TC filter add dev $ifb protocol ip parent 1: prio 1 u32 match ip dst $lan$i/32 flowid 1:$i
done
| true |
308750e1537001918479077cc0a3df029a00e5d9 | Shell | d-cole/MutationAccumulation_PreprocAnalysis | /pipeLineScripts/autoSortSam.sh | UTF-8 | 244 | 2.734375 | 3 | [] | no_license | #!/bin/bash
LOC=$1
arr=($LOC*_readgroup.bam)
size=${#arr[@]}
for ((i=0; i<$size; i+=1)); do
java -Xmx4g -jar ~/tools/picard-tools-1.96/SortSam.jar INPUT=${arr[$i]} OUTPUT=${arr[$i]/.bam/_sorted.bam} SORT_ORDER=coordinate
done
| true |
ea79a700ed5697cc36b65416e4e769e3d244357d | Shell | Claercio/sqlectron-gui | /scripts/upgrade-local-app.sh | UTF-8 | 332 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Important!
# ----------------------------
# This script only works on Mac
APP_NAME="SQLECTRON.app"
echo " ==> Removing older app"
rm -rf ~/Applications/$APP_NAME
echo " ==> Copying new app"
cp -R releases/SQLECTRON-darwin-x64/SQLECTRON.app ~/Applications
echo " ==> Opening new app"
open ~/Applications/$APP_NAME
| true |
545f3d8e46e1e43a5e62d68110ccd36b83f523bf | Shell | ZeroCry/pithos-app | /images/cassandra/rootfs/usr/sbin/start-telegraf.sh | UTF-8 | 647 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# -*- mode: sh; -*-
# File: start-telegraf.sh
# Time-stamp: <2018-06-07 16:05:44>
# Copyright (C) 2018 Gravitational Inc
# Description:
# set -o xtrace
set -o nounset
set -o errexit
set -o pipefail
# create telegraf user
curl -XPOST "http://influxdb.kube-system.svc:8086/query?u=root&p=root" \
--data-urlencode "q=CREATE USER ${INFLUXDB_TELEGRAF_USERNAME} WITH PASSWORD '${INFLUXDB_TELEGRAF_PASSWORD}'"
curl -XPOST "http://influxdb.kube-system.svc:8086/query?u=root&p=root" \
--data-urlencode "q=GRANT ALL on k8s to ${INFLUXDB_TELEGRAF_USERNAME}"
# start telegraf
/usr/bin/telegraf --config /etc/telegraf/telegraf.conf
| true |
108ddb8ca30ba471826769a1d25d104c72777785 | Shell | VictorGarritano/WassersteinRetrieval | /get_embeddings.sh | UTF-8 | 404 | 2.609375 | 3 | [] | no_license | echo "Start downloading embeddings.."
wget "https://conceptnet.s3.amazonaws.com/downloads/2017/numberbatch/numberbatch-17.06.txt.gz"
echo "Embeddings downloaded. Start unzipping."
gunzip numberbatch-17.06.txt.gz
echo "Embeddings unzipped. Start filtering English and French words."
python extract_embeddings_conceptNet.py
echo "Done. Cleaning.."
rm numberbatch-17.06.txt
echo "Done!"
echo
echo
echo
| true |
bda27cfa2cb9bf9d7e90a746c68072332df1e293 | Shell | lanrenags/lanrenags-skripte | /idler.sh | UTF-8 | 246 | 2.921875 | 3 | [] | no_license | PROZ=`cat /proc/cpuinfo | grep processor | wc -l`
IDLER=`ps aux | grep idler | wc -l`
i=0
if [ ! $IDLER -gt 1 ]; then
while [ $i -lt $PROZ ]
do
nice taskset -c $i ./idler &
i=`expr $i + 1`
done
else
echo "Idler laeuft schon"
fi
| true |
56c37b0079d35ccf21c60c033ea46d36fdbce5c9 | Shell | ngctnnnn/Knapsack-solution | /output/createOutput.sh | UTF-8 | 441 | 3.0625 | 3 | [] | no_license | #!/bin/sh
count=0
while [ $count -lt 12 ]; do
count=$(($count+1))
mkdir "Output 0$count"
done
counter=0
cntfolder=1
for file in $1/*; do
if [ $counter -eq 8 ]; then
counter=1
cntfolder=$(($cntfolder+1))
else
counter=$(($counter+1))
fi
mv $file "~/home/ngctnnnn/Documents/Knapsack-solution/output/Output $cntfolder/test $counter.txt"
done
exit 0
| true |
30db631c30cd333dcb6a8f3d4a4a73053466d069 | Shell | zzjl20/MassBank-data | /.scripts/find_nonascii.sh | UTF-8 | 156 | 3.28125 | 3 | [] | no_license | #!/bin/bash
if [ -e $1 ]; then
LC_ALL=C grep -P -n '[^\x00-\x7f]' $1
if [ $? -eq 0 ]; then echo Warning: File $1 contains non ASCII characters.; fi
fi
| true |
331597ae1a6bab32e8e37db418ac45fd425d4d37 | Shell | qqqqqqq645/qwr | /asdf/fwhile.sh | UTF-8 | 90 | 2.859375 | 3 | [] | no_license | i=1
sum=0;
while [ $1 -lt 101 ]
do
sum=`expr $i + $sum`
i=`expr $i + 1`
done
echo $sum
| true |
b8cdf39fc16ca46f235f278ee2dc3d08d664e088 | Shell | memazouni/guide-to-develop-on-dragonchain | /smart-contract-templates/shell_contract/contract.sh | UTF-8 | 503 | 3.75 | 4 | [] | no_license | #!/bin/sh
# Get input from stdin
input=""
while read -r line; do
input="$input$line\n"
done
# Trim the extra trailing newline on our input
input=$(echo "$input" | awk '{print substr($0, 0, length($0) - 1)}')
# Now do something with the input
echo "Welcome to Dragonchain"
echo "This is a log" >&2
# Use awk to get the first 8 characters from the beginning of the input
short=$(echo "$input" | awk '{print substr ($0, 0, 8)}')
# Print an ASCII art banner of these trimmed characters
figlet $short
| true |
cd5ff8a554f3f43444baebf52dd7e487941e65a7 | Shell | echonest/discogs-xml2db | /get_latest_dumps.sh | UTF-8 | 671 | 3.0625 | 3 | [] | no_license | #/bin/bash
USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22"
ACCEPT="Accept-Encoding: gzip, deflate"
D_URL="http://www.discogs.com/data/"
D_TMP=/tmp/discogs.urls
D_PATTERN="discogs_\d+_(artists|labels|masters|releases).xml.gz"
TEST=""
[[ "$1" == '--test' ]] && TEST='--spider -S'
echo "" > $D_TMP
for f in `wget -c --user-agent="$USER_AGENT" --header="$ACCEPT" -qO- $D_URL | ack -io "$D_PATTERN" | sort | uniq | tail -n 4` ; do
echo $D_URL$f >> $D_TMP
done
wget -c --user-agent="$USER_AGENT" --header="$ACCEPT" --no-clobber --input-file=$D_TMP $TEST --append-output=$D_TMP.log
| true |
18555e688525a566a5387370f97c390cdd5ee402 | Shell | ithoq/misc_tools | /check_disk_space.sh | UTF-8 | 624 | 3.625 | 4 | [] | no_license | #!/bin/bash
if [ $# -lt 2 ]; then
echo "Usage: $0 <partition to check> <alert treshold>"
exit 1
fi
ADMIN="zioproto@gmail.com"
ALERT=$2
check_part=$1
df -H > /tmp/df.out
cat /tmp/df.out | grep -vE '^Filesystem|tmpfs|cdrom' | awk '{ print $5 " " $1 }' | while read output;
do
usep=$(echo $output | awk '{ print $1}' | cut -d'%' -f1 )
partition=$(echo $output | awk '{ print $2 }' )
if [ $partition = $check_part ]; then
if [ $usep -ge $ALERT ]; then
echo "Fine dello spazio \"$partition ($usep%)\" su $(hostname) il $(date)" | mail -a "From: script check space" -s Space-Central Server $ADMIN
fi
fi
done
| true |
7251b015b188c2c0f4993a018e90f2e715f9efad | Shell | austintoddj/codewars | /Bash/remove-anchor-from-url.sh | UTF-8 | 313 | 3.171875 | 3 | [] | no_license | #!/bin/bash
#
#Complete the function/method so that it returns the url with anything after the anchor (#) removed.
#
#Examples:
# returns 'www.codewars.com'
# removeUrlAnchor('www.codewars.com#about')
#
# returns 'www.codewars.com?page=1'
# removeUrlAnchor('www.codewars.com?page=1')
echo $1 | cut -f1 -d "#" | true |
ff6077eaf3c022b6ffe4d01e9b3affa36acbb4a0 | Shell | djpm05/Arduino_HMAC | /gen_command.sh | UTF-8 | 124 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
PSK="MYUNIQUEPSK"
echo -n $1"~"
echo -n $1 | openssl dgst -sha256 -hmac $PSK | tr '[[:lower:]]' '[[:upper:]]'
| true |
aa5e9dcdf0219feda9d70e359cc081185fbeaf8f | Shell | cpauliat/my-oci-scripts | /oci_misc/OCI_resource_manager_stack_update_variables.sh | UTF-8 | 2,076 | 4.1875 | 4 | [] | no_license | #!/bin/bash
# --------------------------------------------------------------------------------------------------------------------------
# This script updates the variables of an OCI Resource Manager stack using OCI CLI
#
# Note: OCI tenant and region given by an OCI CLI PROFILE
# Author : Christophe Pauliat
# Platforms : MacOS / Linux
#
# Versions
# 2021-09-10: Initial Version
# --------------------------------------------------------------------------------------------------------------------------
# ---------------- main
OCI_CONFIG_FILE=~/.oci/config
# ---------------- functions
usage()
{
cat << EOF
Usage: $0 OCI_PROFILE stack_ocid config_file.json
How to use this script:
- First, use script OCI_resource_manager_stack_get_config.sh to get current variables of a stack and save output to a file
- Modify the JSON file created to match your needs
- Finally use this file in this script to update the variables of the stack
note: OCI_PROFILE must exist in ~/.oci/config file (see example below)
[EMEAOSCf]
tenancy = ocid1.tenancy.oc1..aaaaaaaaw7e6nkszrry6d5hxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
user = ocid1.user.oc1..aaaaaaaayblfepjieoxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
fingerprint = 19:1d:7b:3a:17:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx
key_file = /Users/cpauliat/.oci/api_key.pem
region = eu-frankfurt-1
EOF
exit 1
}
# ---------------- main
# -- Check usage
if [ $# -ne 3 ]; then usage; fi
PROFILE=$1
STACK_ID=$2
JSON_FILE=$3
# -- Check if oci is installed
which oci > /dev/null 2>&1
if [ $? -ne 0 ]; then echo "ERROR: oci not found !"; exit 2; fi
# -- Check if the PROFILE exists
grep "\[$PROFILE\]" $OCI_CONFIG_FILE > /dev/null 2>&1
if [ $? -ne 0 ]; then echo "ERROR: profile $PROFILE does not exist in file $OCI_CONFIG_FILE !"; exit 3; fi
# -- Check if the file exists
if [ ! -f $JSON_FILE ]; then
echo "ERROR: file $JSON_FILE does not exist or is not readable !"
exit 4
fi
# -- update Stack configuration
oci --profile $PROFILE resource-manager stack update --stack-id $STACK_ID --from-json file://$JSON_FILE | true |
5810934abef14939724712b636f80563b14c83ce | Shell | wildcs/WeNeCo | /etc/install/repair.sh | UTF-8 | 2,589 | 3.484375 | 3 | [] | no_license | #!/bin/bash
#
# ,--.
# .---. ,--.'| ,----..
# /. ./| ,--,: : | / / \
# .--'. ' ; ,`--.'`| ' : | : : ,---.
# /__./ \ : | | : : | | . | ;. / ' ,'\
# .--'. ' \' . ,---. : | \ | : ,---. . ; /--` / / |
# /___/ \ | ' ' / \ | : ' '; | / \ ; | ; . ; ,. :
# ; \ \; : / / |' ' ;. ; / / || : | ' | |: :
# \ ; ` |. ' / || | | \ |. ' / |. | '___' | .; :
# . \ .\ ;' ; /|' : | ; .'' ; /|' ; : .'| : |
# \ \ ' \ |' | / || | '`--' ' | / |' | '/ :\ \ /
# : ' |--" | : |' : | | : || : / `----'
# \ \ ; \ \ / ; |.' \ \ / \ \ .'
# '---" `----' '---' `----' `---`
#
# Web Network Configuration
#
# Repair Script
# SOURCES
source_dir=$(dirname $(readlink -f $0))
source "$source_dir/common.sh"
source "$source_dir/patch.sh"
# SHOW REPAIR MENU
function repair_weneco(){
display_logo
echo -e "${gn}------------------------------ ${nc}"
echo -e "${gn} WeNeCo Repair Assistant ${nc}"
echo -e "${gn}------------------------------${nc}"
echo -e " 1) (Re)install dependencies"
echo -e " 2) Set WeNeCo file-permissions"
echo -e " 3) Patch sudoers"
echo -e " 8) (Re)start networking"
echo -e " 9) exit"
while true;
do
echo -n "Select your choice: "
read answer
if [[ $answer == "1" ]]; then
install_dependencies
elif [[ $answer == "2" ]]; then
set_permissions
elif [[ $answer == "3" ]]; then
patch_sudoers "force"
elif [[ $answer == "8" ]]; then
log_ne "restarting network services"
eval "sudo bash $weneco_dir/script/restart_network.sh" && log_ok || log_failed
elif [[ $answer == "9" ]]; then
cleanup_setup
break
else
echo -e "UNKNOWN CHOICE"
sleep 1s
fi
done
}
# ONLY START WITH MAIN-SETUP-SCRIPT
if [ $main != "setup.sh" ]; then
install_error "Please run 'setup.sh'"
fi
| true |
5ddc1a247fa11e4ea845483be46dd5045ea85b69 | Shell | sfrehse/dependencies | /picosat-936/setup.sh | UTF-8 | 712 | 3.828125 | 4 | [] | no_license | #!/bin/sh
if [ -z "$build" ] ; then
echo '$build is undefined'
exit 1
fi
if [ -z "$package_dir" ] ; then
echo '$package_dir is undefined'
exit 1
fi
package=picosat
version=936
source=$package-$version.tar.gz
build_dir=$build/$package-$version
url=http://fmv.jku.at/$package/$source
download_unpack() {
if [ "$duplicate" = "remove" ]; then
rm -rf $build_dir
fi
mkdir -p $(dirname $build_dir) &&
cd $(dirname $build_dir) &&
[ -f $source ] || wget -O $source $url &&
tar -xf $source
}
pre_build() {
cd $build_dir &&
install_cmake_files
}
build_install() {
if [ -z "$target" ] ; then
echo '$target is undefined'
exit 1
fi
cd $build_dir &&
cmake_build_install
}
| true |
95d9644a4a10b42d544c21ec54789302d88bf5d6 | Shell | aramisf/discs | /redes1/2trab/ns_example/run.sh | UTF-8 | 3,964 | 3.828125 | 4 | [] | no_license | #!/bin/bash
# This script must do:
#-> To generate an average delivery rate graphic:
#-> To generate an average delay graphic
# -> Achar p/ cada mensagem
# Atraso = Tempo de chegada - Tempo de criacao da msg
# -> Calcular media para todas as mensagens
#-> Repetir cada simulacao 3 vezes com sementes diferentes para
# o gerador de numeros aleatorios e calcular a media
# Variaveis globais
CONTINUE="nao"
COUNT=0
bc_args='-l'
check_args () {
if [ -n "$1" ] && [ -f "$1" ]; then
echo "Meus Parametros: $@"
export ARQ=$1
export ARQ_LINES=$(wc -l $ARQ|cut -d' ' -f1)
echo "LINHAS: $ARQ_LINES"
export ID_MSG=$(sort -k12 -n $ARQ|tail -1|cut -d' ' -f12)
echo "$ID_MSG mensagens"
export ADDER=${ARQ//[^0-9]/}
echo "Adder: $ADDER"
export CONTINUE="sim"
else
read -p "Calcular taxa de entrega para todos os arquivos .tr neste diretorio?[S/n] " ANSW
if test -z "$ANSW" || [[ "$ANSW" =~ [Ss].* ]]; then
CONTINUE="sim"
echo "aceito"
else
CONTINUE="nao"
echo "rejeitado"
fi
fi
}
calcula_taxa () {
export RECEBIDOS=$(cut -d" " -f1 ${ARQ} | grep -c r)
export PERDIDOS=$(cut -d" " -f1 ${ARQ} | grep -c d)
export ENVIADOS=$(echo "${RECEBIDOS} + ${PERDIDOS}" | bc -l)
export TAXA=$(echo "(${RECEBIDOS} / ${ENVIADOS}) * 100" | bc -l)
}
exibe_taxa () {
echo -e "Exibindo valores para o arquivo $ARQ:\n"
echo "Número de pacotes recebidos = $RECEBIDOS"
echo "Número de pacotes perdidos = $PERDIDOS"
echo "Número de pacotes enviados = $ENVIADOS"
echo "Taxa de entrega = $TAXA %"
echo $ADDER $TAXA >> taxa.plot
echo $TAXA $ADDER >> tax.plot
}
organiza_taxa () {
if [ -n "$ARQ" ]; then
calcula_taxa
exibe_taxa
else
for arq in tr_files/*; do
$0 $arq
done
fi
}
# Calcula a latencia de banda, recebe 2 parametros, o arquivo .tr e o numero de
# transmissores/receptores correspondente. Ex. se o arquivo gerado contem 30
# transmissores, entao deve-se chamar esta funcao assim:
# $ latencia $ARQ $VARS
# No caso, podemos colocar este valor como segundo parametro do script
gera_ListadeLatencia () {
if test ! -d latencias; then
mkdir latencias
fi
for ((i=0; i < 10; i++)); do
# Pegando a id de cada pacote (final da linha)
grep " $((i*($ID_MSG/10)))$" $ARQ > latencias/$i.tr
echo -n +
done
echo
#head -$VARS $ARQ >| $TMP
#RECEBIDOS=($(grep ^r $ARQ|head -$VARS))
#exit 0
}
#
# Contagem amostral
#
calcula_latencia () {
gera_ListadeLatencia
for input_file in latencias/*
do
tempo_inicio=$(cut -d' ' -f2 $input_file | head -1)
tempo_fim=$(cut -d' ' -f2 $input_file | tail -1)
latencia[$count]=$(echo $tempo_fim - $tempo_inicio|bc $bc_args)
# who=$(tail -1 $input_file | cut -d' ' -f12)
# echo $VARS $latencia[$count] >> latencia.in
echo -n .
count=$(($count + 1))
done
echo
#
# Calculando latencia
#
soma=0
for((i=0; i<$count; i++)); do
soma=$(echo ${latencia[i]} + $soma | bc)
done
latencia_total=$(echo $soma / $count | bc $bc_args)
# gravando as latencias
export lt=$(sed 's@\.@0&@g' <<< $latencia_total)
echo $ADDER $lt >> latencia.plot
echo $lt $ADDER >> lat.plot
echo latencia $lt ms
}
criner () {
[ -d latencias ] && rm -r latencias
#[ -f latencia.plot ] && rm latencia.plot
#[ -f taxa.plot ] && rm taxa.plot
}
harry_plotter () {
gnuplot taxa.gnu
gnuplot tax.gnu
gnuplot latencia.gnu
gnuplot lat.gnu
}
### MAIN
main () {
check_args $@
if [[ "$CONTINUE" == "sim" ]]; then
organiza_taxa
calcula_latencia
else
echo "Abortado"
exit 1
fi
}
criner
main $@
harry_plotter
| true |
5d9bd3ac914180226ce8d51f8f070596e8f7bccc | Shell | ku54713/fireFoam-2.4.x | /solver/installVersion.sh | UTF-8 | 617 | 4.03125 | 4 | [] | no_license | #!/bin/bash
# adds appropriate file and rule to wmake rules directory
destination=$WM_DIR/rules/General/
file=version2
if [ -e $destination/$file ]
then
echo "file $file exists in $destination"
else
if [ -e $file -a -d $destination ]
then
echo "installing $file in $destination"
cp -rip $file $destination
fi
fi
file=standard
if [ -e $destination/$file ]
then
if ! grep -q "fireFoam" "$destination/$file" ;then
echo "adding rule to $destination/$file"
echo "#fireFoam version stamping" >> $destination/$file
echo "include \$(GENERAL_RULES)/version2" >> $destination/$file
fi
fi
| true |
8a8899f009b22e49c9a8b4c497acc606101ace49 | Shell | pyrochat/mooc-led | /generation/open-pdf.sh | UTF-8 | 1,449 | 3.984375 | 4 | [] | no_license | #!/bin/bash
# ##########
#
# open-pdf.sh ⇒ ouvre les fichiers PDF des cours pour vérifier qu’ils sont OK manuellement
#
#
# Nicolas Jeanmonod, février 2016
#
# ##########
function OPENPDF
{
if [[ "$OSTYPE" == darwin14 ]]; then
open $PDF_FILE
elif [[ "$OSTYPE" == linux-gnu ]]; then
xdg-open $PDF_FILE
fi
}
function DO_ALL
{
pwd
CSS_FILE=../../statiques/style.css
MD_FILE=$CODE.md
HTML_FILE=$CODE.html
PDF_FILE=$CODE.pdf
echo "ouverture de $PDF_FILE"
OPENPDF
}
if [[ "$#" == "0" ]]; then
# Si aucun argument n’est indiqué, on transforme tous les chapitres.
cd ../cours/
INFOS=`find . -name infos.yaml`
INFOS=(${INFOS//:/ })
for INFO in "${INFOS[@]}"
do
echo $INFO
STATUT=`awk -F 'statut:[ ]+' '{ print $2 }' $INFO`
STATUT=`echo $STATUT`
echo $STATUT
if [[ "$STATUT" == *"Pas publié"* ]]; then
:
else
CODE=`awk -F 'code:[ ]+' '{ print $2 }' $INFO`
CODE=`echo $CODE`
echo $CODE
DIR=$(dirname "${INFO}")
echo "${DIR}"
cd ${DIR}
DO_ALL
cd ..
fi
echo "*****"
done
else
# Si un argument est indiqué, on ne transforme que ce chapitre.
CHAP_NB=$1
cd ../cours/$CHAP_NB
CODE=`awk -F 'code:[ ]+' '{ print $2 }' infos.yaml`
CODE=`echo $CODE`
echo $CODE
DO_ALL
fi
| true |
ff165b2199d0e0847702bcbd55d9fb831bb57695 | Shell | apperian/isign | /apple/provisions.sh | UTF-8 | 2,230 | 4.09375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
usage() {
echo "./provisions -p [PATH_TO_NEW_PROVISIONING_PROFILE] -c \"CERT NAME: MUST BE IN KEYCHAIN\" ipa_file"
exit
}
while getopts ":p:c:" opt; do
case $opt in
p ) PRO_PROFILE=$OPTARG ;;
c ) CERT_NAME=$OPTARG ;;
/? ) usage
esac
done
shift $(($OPTIND - 1))
if [[ -z "$@" ]]; then
usage
else
APP=$@
fi
verify_args() {
if [[ ! -e $APP ]]; then
echo "$APP does not exist"
exit 1
elif [[ ! -e $PRO_PROFILE ]]; then
echo "$PRO_PROFILE does not exist"
exit 1
elif [[ -z $CERT_NAME ]]; then
echo "Must specify a certificate to use"
exit 1
fi
}
is_app() {
[[ $APP =~ \.app$ ]]
}
is_ipa() {
[[ $APP =~ \.ipa$ ]]
}
setup_dir() {
STAGE_DIR=./stage
ENTITLEMENTS_FILE=$STAGE_DIR/Entitlements.plist
if [[ -e $STAGE_DIR ]]; then
rm -r $STAGE_DIR
fi
mkdir $STAGE_DIR
if is_app; then
cp -r $APP $STAGE_DIR
APP_NAME=$(basename $APP)
PAYLOAD_DIR=""
APP_DIR=$STAGE_DIR/$APP_NAME
elif is_ipa; then
unzip -qu $APP -d $STAGE_DIR
PAYLOAD_DIR=$STAGE_DIR/Payload
APP_DIR=$PAYLOAD_DIR/*.app
else
echo "Must provide either an .app or .ipa file"
exit 1
fi
}
copy_profile() {
cp "$PRO_PROFILE" $APP_DIR/embedded.mobileprovision
}
create_entitlements() {
/usr/libexec/PlistBuddy -x -c "print :Entitlements " /dev/stdin <<< $(security cms -D -i ${APP_DIR}/embedded.mobileprovision) > $ENTITLEMENTS_FILE
}
sign_app() {
if [ -e $APP_DIR/Frameworks ]; then
for dylib in "$APP_DIR/Frameworks/*"
do
echo "signing $dylib"
# entitlements are irrelevant to dylibs
/usr/bin/codesign -f -s "$CERT_NAME" $dylib
done
fi
echo "signing $APP_DIR";
/usr/bin/codesign -f -s "$CERT_NAME" --entitlements $ENTITLEMENTS_FILE $APP_DIR 2>/dev/null
}
package_app() {
if is_ipa; then
(cd $STAGE_DIR; zip -qr out.ipa Payload)
echo "Re-provisioned ipa at $STAGE_DIR/out.ipa"
else
echo "Re-provisioned app at $APP_DIR"
fi
}
verify_args
setup_dir
copy_profile
create_entitlements
sign_app
package_app
| true |
e5277b41b754e74127ad474f3341e4342d4d5b49 | Shell | ehlersmj4814/jamfScripts | /ESETConfig.sh | UTF-8 | 5,381 | 3.296875 | 3 | [] | no_license | #!/bin/bash
####################################################################################################
#
# ABOUT THIS PROGRAM
#
# NAME
# ESETConfig.sh
#
# SYNOPSIS
# sudo ESETConfig.sh
#
# DESCRIPTION
# This script unloads ESET, imports ESET Configuration File that should be uploaded from jamf in /var/ESET/, and reloads ESET.
#
####################################################################################################
#
# HISTORY
#
# Version: 1.1
#
# - Created by Sound Mac Guy
# https://soundmacguy.wordpress.com/2018/12/04/hello-eset-endpoint-antivirus-deployment-management-and-migrating-from-scep/
# - Modified by Matt Ehlers on Jan, 28, 2020
#
####################################################################################################
#
# DEFINE VARIABLES & READ IN PARAMETERS
#
####################################################################################################
# HARDCODED VALUES SET HERE
# Path to the ESET Endpoint Antivirus installer package file
pkgpath=$4
# Path to exported settings file you wish to import
settingsfile=$5
# Set the variable below to "yes" if you are going to apply your own user settings to the ESET GUI (e.g. notifications/alerts)
replaceguicfg="yes"
# Path to the directory containing your custom ESET user GUI configuration
guicfgpath=$6
# Do not edit these variables
loggedInUser=$( scutil <<< "show State:/Users/ConsoleUser" | awk -F': ' '/[[:space:]]+Name[[:space:]]:/ { if ( $2 != "loginwindow" ) { print $2 }} ' )
esetapp="/Applications/ESET Endpoint Antivirus.app/Contents/MacOS"
####################################################################################################
#
# SCRIPT CONTENTS - DO NOT MODIFY BELOW THIS LINE
#
####################################################################################################
#!/bin/bash
# Prevent ESET GUI launching after install until we set it up)
if [[ "$replaceguicfg" == "yes" ]]; then
if [[ ! -e "/Library/Application Support/ESET/esets/cache" ]]; then
/bin/mkdir -p "/Library/Application Support/ESET/esets/cache"
fi
/usr/bin/touch "/Library/Application Support/ESET/esets/cache/do_not_launch_esets_gui_after_installation"
fi
# Install ESET from package filename specified in Jamf Policy $4 parameter
echo "Installing base package: ESET"
/usr/sbin/installer -pkg "$pkgpath" -tgt /
# Import configuration
echo "Importing Config Settings, License, and GUI Settings"
/bin/launchctl unload "/Library/LaunchDaemons/com.eset.esets_daemon.plist"
"$esetapp"/esets_daemon --import-settings "$settingsfile"
"$esetapp"/esets_daemon --wait-respond --activate key=GUN9-XET2-AEDS-237J-XEA4
/bin/launchctl load "/Library/LaunchDaemons/com.eset.esets_daemon.plist"
# Generate new user settings LaunchAgent and script
if [[ "$replaceguicfg" == "yes" ]]; then
if [[ ! -e "$guicfgpath" ]]; then
/bin/mkdir -p "$guicfgpath"
fi
# Generate the gui.cfg file - edit the values below the following line as necessary for your environment
/bin/cat << EOF > "$guicfgpath/gui.cfg"
[gui]
std_menu_enabled=no
splash_screen_enabled=no
dock_icon_enabled=no
tray_icon_enabled=yes
tooltips_enabled=no
alert_display_enabled=no
scheduler_show_all_tasks=no
filter_log_event=30
filter_log_threat=30
filter_log_nod=30
filter_log_parental=30
filter_log_devctl=30
filter_log_webctl=30
show_hidden_files=no
desktop_notify_enabled=no
desktop_notify_timeout=5
context_menu_enabled=no
context_menu_type=0
silent_mode_enabled=no
scan_last_profile=""
scan_last_incl=""
scan_last_excl=""
scan_last_time=0
scan_last_infected="(null)"
scan_last_vdb=""
hidden_windows="system_update,enabled:no;media_newdevice,enabled:no;"
prot_stat_display=81
[scan_smart]
av_scan_read_only=no
shutdown_after_scan=no
ctl_incl=""
ctl_excl=""
[scan_deep]
av_scan_read_only=no
shutdown_after_scan=no
ctl_incl=""
ctl_excl=""
[scan_menu]
av_scan_read_only=no
shutdown_after_scan=no
ctl_incl=""
ctl_excl=""
EOF
# Generate the script to apply user specific GUI settings
/bin/cat << EOF > "$guicfgpath/gui.sh"
#!/bin/bash
# Check if we've already applied our configuration and exit if so
if [[ -e ~/.esets/configapplied ]]; then
/usr/bin/open "/Applications/ESET Endpoint Antivirus.app"
exit 0
fi
/bin/mkdir -p ~/.esets
/bin/cp -f "$guicfgpath/gui.cfg" ~/.esets/
/usr/bin/touch ~/.esets/configapplied
/usr/bin/open "/Applications/ESET Endpoint Antivirus.app"
exit 0
EOF
/bin/chmod +x "$guicfgpath/gui.sh"
# Replace ESET's GUI LaunchAgent with our own that will run the above script
/bin/cat << EOF > "/Library/LaunchAgents/com.eset.esets_gui.plist"
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.eset.esets_gui</string>
<key>ProgramArguments</key>
<array>
<string>$guicfgpath/gui.sh</string>
</array>
<key>KeepAlive</key>
<false/>
<key>RunAtLoad</key>
<true/>
</dict>
</plist>
EOF
# Set up the GUI now if a user is logged in
if [[ "$loggedInUser" != "" ]]; then
/bin/mkdir -p /Users/"$loggedInUser"/.esets
/bin/cp -f "$guicfgpath/gui.cfg" /Users/"$loggedInUser"/.esets
/usr/bin/touch /Users/"$loggedInUser"/.esets/configapplied
/usr/sbin/chown -R "$loggedInUser":staff /Users/"$loggedInUser"/.esets
sudo -u "$loggedInUser" /usr/bin/open "/Applications/ESET Endpoint Antivirus.app"
fi
fi
exit 0 | true |
50385ac2b750ca9e7f1f825ee87c46f444fa01a9 | Shell | Tompkinsss/Mimir | /scripts/execute/slurm/tianhe2/gen_join.sh | UTF-8 | 744 | 2.53125 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
scratchdir=/HOME/nudt_tgao_1/BIGDATA/
homedir=/HOME/nudt_tgao_1/
n=102400000000
m=12800000000
a=1.0
dirname=102.4b-12.8b-10-$a
node=128
proc=3072
output=$scratchdir/join/$dirname/
statfile=$scratchdir/join/$dirname-stat/
echo $statfile
export MIMIR_DBG_ALL=1
export MIMIR_OUTPUT_STAT=0
jobname=gen-join
label=empty
exe=gen_words
params="$n $output \
--stat-file $statfile \
--zipf-n $m --zipf-alpha $a \
--len-mean 8 --len-std 0 \
-withvalue --val-len 1 \
-disorder -exchange -idxunique"
statout=empty
scriptdir=/HOME/nudt_tgao_1/Mimir/scripts/execute/slurm/
$scriptdir/run.job.sh config.bigdata.h $jobname $label $node $proc $exe "$params" $statout $1
| true |
04314ec29edcbd4701efa098a2de76c2d9874096 | Shell | eludom/snippits | /bash/sort_csv.sh | UTF-8 | 1,323 | 3.875 | 4 | [] | no_license | function sort_csv() {
# Sort a csv file
#
# Perform a sort of a csv file on a single numberic column
#
# Inputs
local INPUT_FILE=$1 # file to sort
local SORT_ON=$2 # numerc field to sort
local HEADER_LINES=${3:-1} # number of header lines
# local vars
local SORT_THIS=`TMPDIR=. mktemp`
local SAVED_HEADERS=`TMPDIR=. mktemp`
local MOVE_SUCCEEDED=0 # track if we can restore from the tmp file
local RESTORE_ORIGINAL=1 # if anything goes wrong, restore original
# local CSV_HEADER_LINE=$((1+LINES_BEFORE_HEADER))
# | tee >(head -1 > $STATS_FILE) | tail -n+2 | sort -gr -k $sort_field -t,
sed -n "1,${HEADER_LINES}p" <$INPUT_FILE > $SAVED_HEADERS
mv $INPUT_FILE $SORT_THIS && \
MOVE_SUCCEEDED=1 && \
mv $SAVED_HEADERS $INPUT_FILE && \
sort -t, -gr -k $SORT_ON <(tail -n+$((1+HEADER_LINES)) $SORT_THIS) >> $INPUT_FILE && \
RESTORE_ORIGINAL=0
if ((RESTORE_ORIGINAL)); then
# do this in case anything went wrong
>&2 echo "sort_csv: something went wrong."
if ((MOVE_SUCCEEDED)); then
>&2 echo "sort_csv: restoring $INPUT_FILE from $SORT_THIS"
rm -f $INPUT_FILE
mv $SORT_THIS $INPUT_FILE
>&2 echo "sort_csv: restore succeeded"
fi
fi
}
| true |
2b304ba0317b68c7d909afb408846332ceb8e45d | Shell | xginn8/aur-bldr | /utils.sh | UTF-8 | 1,763 | 3.359375 | 3 | [] | no_license | #!/bin/zsh
get_version_info () {
if ! grep -q "^pkgname=.*-git" PKGBUILD && ! grep -q "url=.https://github.com" PKGBUILD ; then
if ! grep -q "^_github_url=.none.$" PKGBUILD ; then
exit 1
else
fi
fi
local URL
local VERSION
URL=$(awk -F= '/url=.*github.com.*/{print $2}' PKGBUILD | sed -e 's@https://github.com@https://api.github.com/repos@' -e "s/[\'\"]//g" -e 's@$@/releases/latest@' | head -n1)
VERSION=$(curl -H "authorization: bearer ${GH_TOKEN}" -qs -XGET -H "Accept: application/vnd.github.v3+json" "${URL}" | jq -r '.tag_name | sub("^v";"")' 2>/dev/null)
if [ $? -ne 0 ]; then
URL=$(awk -F= '/url=.*github.com.*/{print $2}' PKGBUILD | sed -e 's@https://github.com@https://api.github.com/repos@' -e "s/[\'\"]//g" -e 's@$@/tags@' | head -n1)
echo "${URL}"
VERSION=$(curl -H "authorization: bearer ${GH_TOKEN}" -qs -XGET -H "Accept: application/vnd.github.v3+json" "${URL}" | jq -Sr '.[].name | sub("^v";"") | match("^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$"; "g") | .string' | head -n1)
fi
local OLD_VERSION
local OLD_PKGREL
OLD_VERSION=$(awk -F= '/^pkgver=[0-9a-z\.]*$/{print $2}' PKGBUILD)
autoload is-at-least
OLD_PKGREL=$(awk -F= '/^pkgrel=[0-9\.]*$/{print $2}' PKGBUILD)
if ! grep -q "^pkgname=.*-git$" PKGBUILD && [ -n "${VERSION}" ] && ! is-at-least "${VERSION}" "${OLD_VERSION}";
then
echo "${AUR_PACKAGE}=${VERSION}"
else
echo "${AUR_PACKAGE}=${OLD_VERSION}"
fi
}
yay -G "${AUR_PACKAGE}" > /dev/null 2>&1
cd "${AUR_PACKAGE}"
get_version_info
| true |
00b20ae6a57ca488a9cd0722385e181606f150c5 | Shell | zvickery/dns-proxy | /conf/startup.sh | UTF-8 | 1,244 | 2.625 | 3 | [] | no_license | #!/usr/bin/env bash
DOMAIN1=${DOMAIN1:-'nhl.com'}
DOMAIN2=${DOMAIN2:-'mlb.com'}
DOMAIN3=${DOMAIN3:-'mlb.tv'}
DOMAIN4=${DOMAIN4:-'nhl.tv'}
DOMAIN5=${DOMAIN5:-'espn.com'}
CLIENT_IP=${CLIENT_IP:-127.0.0.1}
LOCAL_IP=${LOCAL_IP:-127.0.0.1}
PUBLIC_IP=${PUBLIC_IP:-127.0.0.1}
EXTERNAL_DNS=${EXTERNAL_DNS:-8.8.8.8}
sed -i s/DOMAIN1/${DOMAIN1}/g /etc/sniproxy.conf
sed -i s/DOMAIN2/${DOMAIN2}/g /etc/sniproxy.conf
sed -i s/DOMAIN3/${DOMAIN3}/g /etc/sniproxy.conf
sed -i s/DOMAIN4/${DOMAIN4}/g /etc/sniproxy.conf
sed -i s/DOMAIN5/${DOMAIN5}/g /etc/sniproxy.conf
sed -i '/\*/s/\./\\\./g' /etc/sniproxy.conf
sed -i s/DOMAIN1/${DOMAIN1}/g /etc/bind/zones.override
sed -i s/DOMAIN2/${DOMAIN2}/g /etc/bind/zones.override
sed -i s/DOMAIN3/${DOMAIN3}/g /etc/bind/zones.override
sed -i s/DOMAIN4/${DOMAIN4}/g /etc/bind/zones.override
sed -i s/DOMAIN5/${DOMAIN5}/g /etc/bind/zones.override
sed -i s/CLIENT_IP/${CLIENT_IP}/g /etc/bind/named.conf.local
sed -i s/LOCAL_IP/${LOCAL_IP}/g /etc/bind/named.conf.local
sed -i s/PUBLIC_IP/${PUBLIC_IP}/g /etc/bind/named.conf.local
sed -i s/PUBLIC_IP/${PUBLIC_IP}/g /etc/bind/db.override
sed -i s/EXTERNAL_DNS/${EXTERNAL_DNS}/g /etc/bind/named.conf.options
/etc/init.d/named start
exec /usr/local/sbin/sniproxy -f
| true |
fe1abe5238808f010606a9d21139550afa42d2ec | Shell | justinlevi/capacity4more | /build/travis/bin/install_drush.sh | UTF-8 | 429 | 2.703125 | 3 | [] | no_license | #!/bin/sh
set -e
# ---------------------------------------------------------------------------- #
#
# Installs Drush.
#
# ---------------------------------------------------------------------------- #
# Install Drush.
cd $TRAVIS_BUILD_DIR
composer global require drush/drush 6.*
phpenv rehash
# Create the Drush alias.
mkdir -p ~/.drush
cp $TRAVIS_BUILD_DIR/build/travis/config/aliases.drushrc.php ~/.drush/
drush --version
| true |
7df27babe229ae80b33175d65865acb17302bd70 | Shell | niallj/ImperialNESS-Sep14 | /synthetic_NEMD/2d_sims/eqlm/equilibrate.sh | UTF-8 | 420 | 2.625 | 3 | [
"MIT"
] | permissive | #PBS -N equilibrate
#PBS -l select=1:ncpus=1
#PBS -q tng
EXEC=$HOME/ImperialNESS-Sep14/synthetic_NEMD/bin/sllod_2d.exe
#copy the input file from the directory in which the script was executed to the temporary storage
cp $PBS_O_WORKDIR/input.dat $TMPDIR
#run the sllod executable
$EXEC
#copy the files generated from the temportary directory back to wherever we started from
cp -r $TMPDIR $PBS_O_WORKDIR/equilibrated
| true |
16aa35bf3a4308ced69883de3d1054315854b72f | Shell | kmmmullins/scripts | /devdeploy-v2.sh | UTF-8 | 3,384 | 3.59375 | 4 | [] | no_license | #!/bin/bash
#
# release for idd svn projects (using svn.mit.edu repository)
#
#
#
DATE=`date +%m%d%y%H%M%S`
#echo $DATE
DATEFMT="%H:%M:%S %m/%d/%Y"
p=`pwd`
d=`dirname $p`
app=`basename $p`
wappdir=`basename $d`
#export SVN_SSH="ssh -i /var/local/etc/keystores/sais.private -l isdasnap"
LOGFILE=/home/kmullins/logs/devdeploy.log
KERBSTATUS=`klist 2>/dev/null | grep Default | wc -l`
############################################
#
# Use kerb id to authenticate to svn
#
###########################################
if [ $KERBSTATUS -lt 1 ]
then
echo `date +"${DATEFMT}" ` "Username: "
read USERNAME
else
KERBNAME=`klist 2>/dev/null | grep Default | awk '{print $3}' | awk -F"@" '{print $1}'`
echo " "
echo "Authenticating with Kerb id $KERBNAME "
echo "+---------------------------------------------------------+"
USERNAME=$KERBNAME
fi
############################################
#
# SVN info, status and update
#
############################################
if [ $wappdir == "webapps" ];
then
echo " "
echo "Application Name is : $app"
echo "$app working copy SVN repository is ..."
status_cmd="svn info "
SVN_SSH="ssh -q -l $USERNAME" ${status_cmd} | grep URL
echo " "
status_cmd="svn status -u "
SVN_SSH="ssh -q -l $USERNAME" ${status_cmd} | awk '{print $1}' | grep "C"
if [ $? -lt 1 ]; then
echo " "
echo "ERROR - Conflict in svn repo for $app .... ${status_cmd}"
echo " "
exit
else
# echo " "
echo "No conflicts in $app working directory"
echo " "
fi
update_cmd="svn update "
SVN_SSH="ssh -q -l $USERNAME" ${update_cmd}
if [ $? -ne 0 ]
then
echo "problem with svn update"
exit
fi
else
echo "You must be in the application's directory in order to release it"
echo "\" $app \": is NOT a valid application."
exit
fi
############################################
#
# Ant deploy
#
############################################
echo " "
echo "Starting Build .............."
echo " "
ant deploy
if [ $? -ne 0 ]
then
echo "problem with ant deploy"
exit
fi
##########################################################
#
# Create Tags
#
#########################################################
if [ $wappdir == "webapps" ];
then
info_cmd="svn info"
#echo `date +"${DATEFMT}" ` "Username: "
#read USERNAME
#USERNAME=kmullins
REVCMD=`$info_cmd | grep Revision`
SVNREV=${REVCMD:10:5}
#echo "***** ${SVNREV} *****"
export TAGNAME="$app-$SVNREV-$DATE"
#echo ${TAGNAME}
echo "******** Creating Tag ***********"
copy_cmd="svn copy . svn+ssh://svn.mit.edu/idd/$app/tags/$TAGNAME -m \"new-$app-release\" "
SVN_SSH="ssh -q -l $USERNAME" ${copy_cmd}
echo " "
#echo "********* list **********"
list_cmd="svn list svn+ssh://svn.mit.edu/idd/$app/tags/"
SVN_SSH="ssh -q -l $USERNAME" ${list_cmd} | grep $TAGNAME
if [ $? -ne 0 ]
then
echo "There is a problem creating tag $TAGNAME"
exit
else
echo " "
echo " The Tagname for the ${app} deployment is ${TAGNAME} "
echo " "
echo "The new deployment of ${app} was successful and a new tag ... ${TAGNAME} was created" | mail -s "Deployed ${app} & new Tag is ${TAGNAME}" adm-deploy@mit.edu
exit
fi
else
echo "You must be in the application's directory in order to release it"
echo "\" $app \": is NOT a valid application."
exit
fi
| true |
e3e8aaf8457bbfab38451cfa85f6257d511b6291 | Shell | atharv-kopparthi/Raspberry-Pi-Wifi-Repeater | /sta.sh | UTF-8 | 1,613 | 3.53125 | 4 | [] | no_license | #!/bin/bash
w_if="$(iw dev | grep Interface | awk '{print $2}' | cut -d/ -f1)"
if [ -z "${w_if}" ] ; then
echo "Not found wireless interface in $(uname -a | awk '{print $2}' | cut -d/ -f1)"
exit
fi
WLAN_IF="interface ${w_if}"
WLAN_IP="static ip_address=10.0.0.1"
if [ -z "$1" ] && [ -z "$2" ]
# No ssid, no pass
then
echo "Connect using current information"
else
# have ssid, no pass
if !([ -z "$1" ]) && [ -z "$2" ] ; then
echo "Connecting to Open SSID $1"
a="\"$1\""
b="NONE"
sudo sed -i -e "s/\(ssid=\).*/\1$a/" /etc/wpa_supplicant/wpa_supplicant.conf
sudo sed -i -e "s/\(key_mgmt=\).*/\1$b/" /etc/wpa_supplicant/wpa_supplicant.conf
else
#have ssid. have pass
echo "Connecting to ssid:$1, pass:$2"
a="\"$1\""
b="\"$2\""
c="WPA-PSK"
sudo sed -i -e "s/\(ssid=\).*/\1$a/" /etc/wpa_supplicant/wpa_supplicant.conf
sudo sed -i -e "s/\(psk=\).*/\1$b/" /etc/wpa_supplicant/wpa_supplicant.conf
sudo sed -i -e "s/\(key_mgmt=\).*/\1$c/" /etc/wpa_supplicant/wpa_supplicant.conf
fi
fi
sudo iptables -t nat -F POSTROUTING
sudo iptables -F FORWARD
sudo sed -i "/${WLAN_IF}/d" /etc/dhcpcd.conf
sudo sed -i "/${WLAN_IP}/d" /etc/dhcpcd.conf
sudo service wpa_supplicant stop
sudo service hostapd stop
sudo service udhcpd stop
sudo service dhcpcd stop
sudo killall -9 wpa_supplicant
sudo killall -9 udhcpd
sudo ifconfig ${w_if} up
sudo ip addr flush dev ${w_if}
sudo service wpa_supplicant restart
sudo wpa_supplicant -c /etc/wpa_supplicant/wpa_supplicant.conf -i ${w_if} &
sudo service dhcpcd start
sudo update-rc.d hostapd disable
sudo update-rc.d udhcpd disable
sudo update-rc.d dhcpcd enable
| true |
334ba7f2f1ae4eaaff4ed771fc2403d2972813b9 | Shell | SethTisue/coursier | /project/generate-standalone-launcher.sh | UTF-8 | 409 | 3.078125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
cd "$(dirname "$0")/.."
if [ ! -e cli/target/scala-2.11/proguard/coursier-standalone.jar ]; then
echo "Generating proguarded JAR..." 1>&2
sbt cli/proguard:proguard
fi
cat > coursier-standalone << EOF
#!/bin/sh
exec java -noverify -cp "\$0" coursier.cli.Coursier "\$@"
EOF
cat cli/target/scala-2.11/proguard/coursier-standalone.jar >> coursier-standalone
chmod +x coursier-standalone
| true |
86593b08fce52791533b8f6191a7cf9f645febc1 | Shell | microscum/WebLogic_Notes | /practices/tune/practice04-01/resources/setUserOverrides.sh | UTF-8 | 1,673 | 3.03125 | 3 | [] | no_license | #!/bin/bash
# ------------------------------------------------------------------------
# -- DISCLAIMER:
# -- This script is provided for educational purposes only. It is NOT
# -- supported by Oracle World Wide Technical Support.
# -- The script has been tested and appears to work as intended.
# -- You should always run new scripts on a test instance initially.
# --
# ------------------------------------------------------------------------
#This script sets new custom variables for starting domains
#AdminServer uses default settings, while managed servers use custom settings
if [ "${SERVER_NAME}" != "AdminServer" ]; then
##############################################################################
# Comment out what is not needed and uncomment settings for the current step #
##############################################################################
#export USER_MEM_ARGS="-Xms50m -Xmx50m -XX:MaxPermSize=256m"
#export USER_MEM_ARGS="-Xms90m -Xmx90m -XX:MaxPermSize=256m"
#Set GC logging
#JAVA_OPTIONS+=" -XX:+PrintCommandLineFlags"
#JAVA_OPTIONS+=" -XX:+PrintGC"
#JAVA_OPTIONS+=" -XX:+PrintGCDetails"
#JAVA_OPTIONS+=" -XX:+PrintGCTimeStamps"
#JAVA_OPTIONS+=" -Xloggc:/tmp/gc.log"
#JAVA_OPTIONS+=" -verbose:gc"
#Set GC type
#JAVA_OPTIONS+=" -XX:+UseSerialGC"
#JAVA_OPTIONS+=" -XX:+UseParallelGC"
#JAVA_OPTIONS+=" -XX:+UseParallelOldGC"
#JAVA_OPTIONS+=" -XX:+UseParNewGC"
#JAVA_OPTIONS+=" -XX:+UseConcMarkSweepGC"
#JAVA_OPTIONS+=" -XX:+UseG1GC"
#JAVA_OPTIONS+=" -XX:+UnlockCommercialFeatures"
#JAVA_OPTIONS+=" -XX:+FlightRecorder"
#export JAVA_OPTIONS
fi
| true |
227e8c01714918a407381664cf283edecd0c9fdc | Shell | javpicorel/memory_tool | /release | UTF-8 | 3,636 | 2.703125 | 3 | [] | no_license | #!/bin/bash
export RM="rm -rf"
set -x
RELEASE=$PWD/../release
if [ -e $RELEASE ]; then
echo "$RELEASE directory exists"
exit
fi
rsync -a $PWD/ $RELEASE/
cd $RELEASE/
$RM TODO all_haps.py asm_regress asm_regress_all *.trace.cfg db.debug.cfg interactive postgres-debug.cfg regress_workarea regress_workarea_ckpt todo.txt
$RM doxygen.config.template run_workload
$RM experimental lordalyzer order order_hw protocol_engines_stand_alone sordalyzer sordperconsalyzer stridalyzer trace-dgp nucalyze nucalyzer
$RM debug.cfg insert-copyright splash.debug.cfg simprint.h
$RM simics
$RM components/Consort components/DGP components/Decode components/DirTracker components/EnhancedSordTraceMemory components/FcmpCache components/InorderTraceFeeder components/MRCReflector components/MRP components/MemHarness components/Network components/PrefetchBuffer components/PrefetchListener components/RandomFeeder components/Rdram components/RealExecute components/SMTFetch components/SimicsFeeder components/SimicsMPTraceFeeder components/SimicsMemory components/SimicsTraceFeeder components/SordManager components/SordPredictor components/SordReceiver components/SordTraceMemory components/TRUSS_BWFetch components/TraceFeeder components/Truss* components/VirtutechTraceFeeder components/X86SimicsFeeder components/CacheTraceMemory components/CmpMCNetworkInterface
$RM components/PowerTracker
$RM components/OrderHW components/PerfectPlacement
$RM components/ReuseDistance components/MissClassifier components/SMTFetch components/SVB
$RM components/SimplePrefetchController components/SpatialPrefetcher
$RM components/StreamController components/FastSpatialPrefetcher components/StridePrefetcher
$RM components/uArch/CoreModel/bbv.?pp
$RM components/CMOB
$RM components/TMS*
$RM components/uArch/CoreModel/purge.cpp
cd components
cd Common
$RM OneWayMux.hpp OneWayMuxImpl.*
$RM SetAssoc*.hpp
$RM JustMissedYou.hpp
$RM Slices/MRCMessage.?pp
$RM Slices/PrefetchCommand.?pp
$RM Slices/PrefetchMessage.?pp
$RM Slices/PredictorMessage.?pp
$RM Slices/PerfectPlacementSlice.?pp
$RM Slices/ReuseDistanceSlice.?pp
$RM Transports/PrefetchTransport.?pp
$RM Transports/PredictorTransport.?pp
cd ..
cd ..
cd components
cd Cache
$RM FCMPCacheControllerImpl.*
cd ..
cd ..
cd core
$RM doxygen.config
$RM fast_alloc.?pp
$RM debug.cfg
cd ..
cd simulators
$RM chimaera cyclops dirtest echidna enhsorder eyeball gorgon gorgon16 hydra hydricache incubus kraken mimic mrptracer networktest octopus platt ptap simicsmptracer simicstracer siren siren8 slooo sorder sphinx sphinx8 succubus tracenhsorder tracer unikrak unimp unimp2 vampire vampire16 wonderpus x86hydra x86siren x86sphinx
$RM OrderHWFlex TSEFlex TraceSGPFlex TraceCMTFlex TraceDSMCMPFlex Trace2SGPFlex minotaur minotaur2 SGPDSMFlex SGPDSMFlex.OoO SGP2DSMFlex.OoO OrderHWCMPFlex TMSCMPFlex.OoO TraceTMSCMPFlex CMTNUCAFlex.OoO TraceCMPPerfFlex TraceCMPPrivateFlex
$RM CMTFlex*
cd ..
$RM stat-manager/stats_db.out stat-manager/cache-excel.rpt stat-manager/time-breakdown.rpt stat-manager/*stats_o
$RM run_job.faq
mv -f makefile.pristine makefile.defs
# For all files, remove CMU only blocks
FILES=`find . -name "*.hpp" -o -name "*.cpp"`
for file in $FILES; do
sed -e "/CMU-ONLY-BLOCK-BEGIN/,/CMU-ONLY-BLOCK-END/ d" < $file > $file.tmp
sed -e "/ CMU-ONLY / d" < $file.tmp > $file
rm $file.tmp
done
# For all directories, remove .svn directory
DIRS=`find . -name ".svn"`
for dir in $DIRS; do
$RM $dir
done
# chmod a-x Makefile cmu-copyright
# chmod a-x *.*
# chmod a-x */*.*
# chmod a-x */*/*.*
# chmod a-x */*/*/*.*
# chmod a-x */*/*/*/*.*
# chmod a+x simulators/*.OoO
# chmod a-x simulators/*/*
| true |
b52dee638bfb8adae5411542f35a3cfc3e83e42f | Shell | jdelkins/puttyclip | /rdb/mk_pcodes.sh | UTF-8 | 1,974 | 3.625 | 4 | [
"MIT"
] | permissive | #!/bin/ksh
TMP=/tmp/mk_x
[ "$1" = "" ] && LOCAL=0.66-rdb
[ -e ${TMP}1 -o -e ${TMP}2 -o -e ${TMP}3 -o -e ${TMP}4 -o -e ${TMP}5 -o -e x.ed ] &&
{ echo Temp file exists ; exit 1; }
: > x.ed
j=0.45
echo 0i > x.ed
expand PuTTY-Codes-$j.txt >> x.ed
echo . >> x.ed
for i in 0.49 0.50 0.52 0.53 0.54 0.58 0.63 ${LOCAL}
do
GL="`git log -1 $i --format=$i\ --\ %ai 2>/dev/null`"
if [ "$GL" != "" ]
then echo "VERSION $GL"
else echo "VERSION $i"
fi
x="$i"
[ "$x" = "0.66-rdb" ] && x=RDB
expand PuTTY-Codes-$j.txt |
awk > ${TMP}1 -v i=$x \
'{
str=$0
if (str != "") str=substr($0 " ",1,76) i
print str;
}'
expand PuTTY-Codes-$i.txt |
awk > ${TMP}2 -v i=$x \
'{
str=$0
if (str != "") str=substr($0 " ",1,76) i
print str;
}'
diff -e ${TMP}1 ${TMP}2 >> x.ed
diff -e PuTTY-Codes-$i.txt PuTTY-Codes-$j.txt | grep '^..........' > ${TMP}5
if [ -s ${TMP}5 ]
then
{
echo
echo BEFORE VERSION $i
cat ${TMP}5
[ -f ${TMP}3 ] && cat ${TMP}3
} > ${TMP}4
mv ${TMP}4 ${TMP}3
fi
j=$i
done
echo 1i >> x.ed
cat >> x.ed <<\!
ANSI and VTxxx codes understood by PuTTY
The basic emulation for PuTTY is that of an eight bit VT102 with
cherry picked features from other ANSI terminals. As such it only
understands seven bit ECMA-35 but adds onto that numerous 8 bit
"codepages" and UTF-8.
PuTTY Releases
!
for i in 0.45 0.49 0.50 0.52 0.53 0.54 0.58 0.63 ; do echo ' '$i `git log -1 $i --format=%ai | sed 's/..:.*//'` ; done >> x.ed
echo >> x.ed
echo . >> x.ed
echo wq PuTTY-Codes.txt >> x.ed
ed < x.ed
{
echo
echo Items changed from previous versions
echo ------------------------------------
cat ${TMP}3
} >> PuTTY-Codes.txt
rm ${TMP}1 ${TMP}2 ${TMP}3 ${TMP}5 x.ed
| true |
8b69cdcbc5faca750f915ff8d0b4533a981a88cc | Shell | lovefcaaa/snk.dev-assistant | /data/snippets/github.com/MephistoMMM/mdts/src/mdts/build.sh | UTF-8 | 474 | 3.390625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Usage:
# ./build.sh <app_name>
DOCKER_USER=mpsss
PRO_ROOT=${PWD}
if [ "$1" == "" ] || [ "$2" == "" ]; then
echo "Error: Lack Argument <app_dir> <app_name>"
exit 1
fi
PLAYER_DIR=$1
PLAYER_NAME=$2
if [ "$3" == "" ]; then
VERSION="latest"
else
VERSION=$3
fi
cd $PLAYER_DIR
go build -o app
cd $PRO_ROOT
docker build --build-arg PLAYER=$PLAYER_DIR -t $PLAYER_NAME:$VERSION .
docker tag $PLAYER_NAME:$VERSION $DOCKER_USER/$PLAYER_NAME:$VERSION
| true |
75c2716d3944a2f54a420effff9ee550effd8f9d | Shell | denanokk-hmt/sureshotsystem | /CGI/SESSION/SESSION_CHECK | UTF-8 | 1,143 | 3.25 | 3 | [] | no_license | #!/bin/bash -vx
#
########################################
# SESSION_CHECK.CGI
#
# Arguments
# $1:session_id
# $2:session lifetime (default 15min)
#
# Written by hiramatsu
########################################
#基本設定
SYSD=/home/sureshotsystem
#exec 2> ${SYSD}/LOG/LOG.$(basename $0).$(date +%Y%m%d).$(date +%H%M%S).$$
tmp=/tmp/tmp_$$
LANG=ja_JP.UTF-8
PATH=/home/:/home/UTL:/usr/local/bin:/home/TOOL/open-usp-tukubai-2014061402/COMMANDS:$PATH
SESSION_FILED=${SYSD}/SESSION
LIFETIME_MIN=15
#COOKIE_LIFETIME_MIN=15
#######################################
#Set SESSION LIFE TIME
S=$2
SESS_LIFE=${2:-"$LIFETIME_MIN"}
#######################################
#Get session
SESS=$(session_get $1)
#######################################
#Check the session
if [ -n "${SESS}" ] ; then
if [ $(find "${SESSION_FILED}" -mmin -"${SESS_LIFE}" -name "${SESS}" -type f) ]; then
SESS_STATUS='live'
else
SESS_STATUS='expire'
fi
else
SESS_STATUS='none'
fi
echo ${SESS_STATUS} > $tmp-sess-status
#######################################
#Return session status
echo ${SESS_STATUS}
rm -f $tmp-*
exit 0
| true |
f10d97b8aa0d42c80a51d735dd1c97d31a83e0e9 | Shell | dkuspawono/puppet | /modules/puppetmaster/templates/puppet-merge.erb | UTF-8 | 6,246 | 4.21875 | 4 | [] | no_license | #!/bin/sh
# TODO
# - add locking with flock so that multiple users can't run this script simultaneously.
set -e
# Colorized output helpers
RED=$(tput bold; tput setaf 1)
GREEN=$(tput bold; tput setaf 2)
MAGENTA=$(tput bold; tput setaf 9)
RESET=$(tput sgr0)
FORCE=0
USAGE=0
QUIET=0
BASEDIR="/var/lib/git/operations/puppet"
CONFFILE="/etc/puppet-merge.conf"
usage="$(basename ${0}) [-y|--yes] [SHA1]
Fetches changes from origin and from all submodules.
Shows diffs between HEAD and FETCH_HEAD, including diffs
of submodule changes.
If the changes are acceptable, HEAD will be fast-forwarded
to FETCH_HEAD.
It also runs the conftool merge if necessary.
SHA1 equals HEAD if not specified
"
TEMP=$(getopt -o yhq --long yes,help,quiet -n "$0" -- "$@")
if [ $? != 0 ] ; then echo "Terminating..." >&2 ; exit 1 ; fi
eval set -- "$TEMP"
while true; do
case "$1" in
-y|--yes) FORCE=1; shift ;;
-h|--help) USAGE=1; shift ;;
-q|--quiet) QUIET=1; shift ;;
--) shift ; break ;;
*) echo "Internal error!"; exit 1 ;;
esac
done
if [ $USAGE -eq 1 ]; then
echo "$usage" && exit 0;
fi
if [ -f $CONFFILE ]; then
. $CONFFILE
fi
sha1=${1:-}
git_user='gitpuppet'
# We might be running this as root, or as gitpuppet (from an ssh session to a backend)
running_user=`whoami`
# Default to /var/lib/git/operations/puppet unless $1 is specified
cd "${BASEDIR}"
echo "Fetching new commits from $(git config --get remote.origin.url)"
if [ $running_user = $git_user ]; then
cd ${BASEDIR} && git fetch
else
su - $git_user -c "cd ${BASEDIR} && git fetch"
fi
if [ -z ${sha1} ]; then
fetch_head_sha1=$(git rev-parse FETCH_HEAD)
else
fetch_head_sha1=$sha1
fi
submodule_changes="$(git diff --submodule=log HEAD..${fetch_head_sha1} | grep -e '^Submodule ')" || true
# Exit if there are no changes to merge.
if [ -z "$(git diff HEAD..${fetch_head_sha1})" -a -z "${submodule_changes}" ]; then
echo "No changes to merge."
exit 0
fi
echo "HEAD is $(git rev-parse HEAD), FETCH_HEAD is ${fetch_head_sha1}. Reviewing changes..."
# If there are no submodule changes, then just
# show diff of operations/puppet superproject
if [ -z "${submodule_changes}" ]; then
if [ $QUIET -eq 0 ]; then
git diff --color HEAD..${fetch_head_sha1} | cat
fi
else
if [ $QUIET -eq 0 ]; then
# Else there are submodule changes.
# Do some fancy stuff to show diffs
# of submodule changes.
# clone the puppet working directory to a temp directory, excludiing private/
tmpdir=$(mktemp -d /tmp/puppet-merge.XXXXXXXXXX)
git clone --quiet "${BASEDIR}" "${tmpdir}"
# merge and update submodules in $tmpdir
(cd "${tmpdir}" && \
git merge --quiet --ff-only "${fetch_head_sha1}" && \
git submodule update --quiet --init)
# show the diff between the $BASEDIR puppet directory and the $tmpdir puppet directory
if [ $QUIET -eq 0 ]; then
diff -uNr -x "private" -x ".git" "${BASEDIR}" "${tmpdir}" || true
fi
# We've shown the diff so we are done with $tmpdir. Remove it.
rm -rf "${tmpdir}"
fi
fi
# If cool, merge it!
echo ""
echo ""
echo "------------------------------"
if [ $QUIET -eq 0 ]; then
git log HEAD..${fetch_head_sha1} --format="${MAGENTA}%cn${RESET}: %s (%h)"
fi
ncommiters=$(git log HEAD..${fetch_head_sha1} --format=%ce | sort -u | grep -v 'gerrit@wikimedia.org' | wc -l)
expect="yes"
if [ $ncommiters -ne 1 ]; then
echo "${RED}WARNING${RESET}: Revision range includes commits from multiple committers!"
expect="multiple"
fi
confirmed=0
if [ $FORCE -eq 1 ]; then
confirmed=1
else
echo -n "Merge these changes? (${expect}/no)? "
read answer
if [ "x${answer}" = "x${expect}" ]; then
confirmed=1
elif [ "x${expect}" = "xyes" ] && [ "x${answer}" = "xy" ]; then
confirmed=1
fi
fi
if [ "${confirmed}" -eq "1" ]; then
echo "Merging ${fetch_head_sha1}..."
cmd="git merge --ff-only ${fetch_head_sha1}"
echo "${cmd}"
if [ $running_user = $git_user ]; then
cd ${BASEDIR} && ${cmd}
else
su - $git_user -c "cd ${BASEDIR} && ${cmd}"
fi
# If there were submodule changes update and init them
if [ -n "${submodule_changes}" ]; then
echo "Updating submodules..."
cmd="git submodule update --init --no-fetch"
echo "${cmd}"
if [ $running_user = $git_user ]; then
cd ${BASEDIR} && ${cmd}
else
su - $git_user -c "cd ${BASEDIR} && ${cmd}"
fi
fi
# git clean to remove any untracked
# (submodule) files and directories
echo "Running git clean to clean any untracked files."
cmd="git clean -dffx -e /private/"
echo "${cmd}"
if [ $running_user = $git_user ]; then
cd ${BASEDIR} && ${cmd}
else
su - $git_user -c "cd ${BASEDIR} && ${cmd}"
fi
echo "HEAD is now $(git rev-parse HEAD)."
cd -
else
echo "Aborting merge."
cd -
exit 1
fi
<%- if @servers.has_key?(@fqdn) -%>
# Note: The "true" command is passed on purpose to show that the command passed
# to the SSH sessions is irrelevant. It's the SSH forced command trick on the
# worker end that does the actual work. Note that the $sha1 however is important
if [ -z ${sha1} ]; then # Only loop through the other servers if called without sha1
<%- @servers.each do |frontend, workers| -%>
<%- workers.map{ |w| w['worker']}.select{|name| name != @fqdn}.each do |worker| -%>
if [ $running_user = $git_user ]; then
ssh -t -t <%= worker -%> true ${fetch_head_sha1} 2>&1
else
su - $git_user -c "ssh -t -t <%= worker -%> true ${fetch_head_sha1} 2>&1"
fi
if [ $? -eq 0 ]; then
echo "${GREEN}OK${RESET}: puppet-merge on <%= worker %> succeded"
else
echo "${RED}ERROR${RESET}: puppet-merge on <%= worker %> failed"
fi
<%- end -%>
<%- end -%>
# avoid a syntax error if this list is empty
true
fi
# conftool-merge does need to run from >1 frontend, avoid running a second time
if [ $running_user != $git_user ]; then
echo "Now running conftool-merge to sync any changes to conftool data"
/usr/local/bin/conftool-merge
fi
<%- end -%>
| true |
c03bd36445f4a6f0f7d11c87cb22348bd8e5e8b1 | Shell | dualsight/corepay | /index.js | UTF-8 | 3,428 | 3.078125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
":" //; exec /usr/bin/env node "$0" "$@"
const config = require('./lib/config')
const screen = require('./lib/screen')
const glob = require('glob')
const path = require('path')
const Loki = require('lokijs')
const lfsa = require('lokijs/src/loki-fs-structured-adapter')
const adapter = new lfsa()
const autosaveInterval = 1000
const loki = new Loki(
path.join(__dirname, '.', 'storage', 'database', 'index.db'),
{
adapter,
autoload: true,
autoloadCallback: init,
autosave: true,
autosaveInterval
}
)
const states = require('./lib/states')
const enabledCores = []
let server
let ready
let shuttingDown
function init () {
if (!config) process.exit(1)
server = require('./lib/server').server
const sequence = []
const files = glob.sync(path.join(__dirname, '.', 'core', '*', 'index.js'))
require('./lib/db')(loki)
screen.success('Initialized DB')
screen.info('Loading asset cores...')
for (const file of files) {
const coreIdentifier = path.basename(path.dirname(file))
const core = require(path.resolve(file))
if (config.cores[coreIdentifier].enabled) {
enabledCores.push(coreIdentifier)
sequence.push(
core.boot()
.then(version => {
if (version) {
screen.success('[\u2714]', coreIdentifier, '->', version)
} else {
screen.error('[\u2757]', coreIdentifier, '->', version)
}
return version
})
)
} else {
screen.warn('[\u2718]', coreIdentifier)
}
}
Promise.all(sequence)
.then((results) => {
if (results.some(r => !r)) {
screen.error('Failed to boot some asset cores! Terminating...')
process.exit(1)
} else {
screen.info('Done loading asset cores')
server.listen(config.server.port, () => {
screen.info('Bound HTTP server to port', config.server.port)
for (const core of enabledCores) {
states.running[core] = false
}
ready = true // allow graceful exit execution
if (process.send) process.send('ready') // for PM2
})
}
})
.catch(err => {
screen.error(err)
})
}
function gracefulExit (signal) {
if (shuttingDown) {
return
} else shuttingDown = true
let retry = true
// stall until `ready = true`
setInterval(() => {
if (retry && ready) {
retry = false
screen.warn('Exit sequence initiated by signal:', signal)
server.close((err) => {
screen.warn(
`[1/2] ${err ? 'Ungracefully' : 'Gracefully'} killed HTTP server on port`,
config.server.port
)
setTimeout(() => {
for (const core of enabledCores) {
states.shuttingDown[core] = true
}
setInterval(() => {
if (
!enabledCores
.map(core => states.readyToShutdown[core])
.includes(undefined)
) {
screen.warn('[2/2] Shut down all asset cores')
screen.warn('Exit.')
process.exit(err ? 1 : 0)
}
}, 1000)
}, 1000 + autosaveInterval)
})
}
}, 1000)
}
process.on('SIGTERM', () => gracefulExit('SIGTERM'))
process.on('SIGINT', () => gracefulExit('SIGINT'))
process.on('message', (msg) => {
if (msg === 'shutdown') gracefulExit('SIGINT')
})
| true |
cc3a0d716c4502c8cf358f6b92a2817f1e2b7a3e | Shell | Deep-Spark/DeepSparkHub | /nlp/language_model/bert/tensorflow/base/init.sh | UTF-8 | 2,450 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright (c) 2023, Shanghai Iluvatar CoreX Semiconductor Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
cd ../../../../../../data/model_zoo/
echo "check pretrained model..."
extract=1
pretrained_model_files=("bert_config.json" "model.ckpt-28252.data-00000-of-00001" "model.ckpt-28252.index" "model.ckpt-28252.meta" "vocab.txt")
for file in ${pretrained_model_files[*]}
do
if [[ ! -f bert_pretrain_tf_ckpt/${file} ]]; then
echo "bert_pretrain_tf_ckpt"/${file}" not exist"
extract=0
fi
done
if [[ $extract -eq 0 ]]; then
tar zxvf bert_pretrain_ckpt_tf.tar.gz
fi
cd ../datasets
echo "check datasets..."
files=("bert_pretrain_tf_records/train_data/part-00015-of-00500" "bert_pretrain_tf_records/train_data/part-00014-of-00500" "bert_pretrain_tf_records/train_data/part-00013-of-00500" "bert_pretrain_tf_records/train_data/part-00012-of-00500" "bert_pretrain_tf_records/train_data/part-00011-of-00500" "bert_pretrain_tf_records/train_data/part-00010-of-00500" "bert_pretrain_tf_records/train_data/part-00009-of-00500" "bert_pretrain_tf_records/train_data/part-00008-of-00500" "bert_pretrain_tf_records/train_data/part-00007-of-00500" "bert_pretrain_tf_records/train_data/part-00006-of-00500" "bert_pretrain_tf_records/train_data/part-00005-of-00500" "bert_pretrain_tf_records/train_data/part-00004-of-00500" "bert_pretrain_tf_records/train_data/part-00003-of-00500" "bert_pretrain_tf_records/train_data/part-00002-of-00500" "bert_pretrain_tf_records/train_data/part-00000-of-00500" "bert_pretrain_tf_records/train_data/part-00001-of-00500" "bert_pretrain_tf_records/eval_data/eval_tfrecord")
extract=1
for file in ${files[*]}
do
if [[ ! -f ${file} ]]; then
echo ${file}" not exist"
extract=0
fi
done
if [[ $extract -eq 0 ]]; then
echo "tar zxvf bert_pretrain_tf_records.tar.gz"
tar zxvf bert_pretrain_tf_records.tar.gz
fi
| true |
a76b03a5838444e29d53bf3b98bcd6aa62b69b0e | Shell | JinfengChen/10x | /bin/contig_gragh/fairchild_graph_57G_chr7_bwa_merge/Step0_map_bwa.sh | UTF-8 | 1,182 | 2.59375 | 3 | [] | no_license | #!/bin/bash
#SBATCH --nodes=1
#SBATCH --ntasks=24
#SBATCH --mem=40G
#SBATCH --time=40:00:00
#SBATCH --output=Step0_map_bwa.sh.stdout
#SBATCH -p intel
#SBATCH --workdir=./
#sbatch --array 1-47 Step0_map_blasr_array.sh
start=`date +%s`
CPU=$SLURM_NTASKS
if [ ! $CPU ]; then
CPU=2
fi
N=$SLURM_ARRAY_TASK_ID
if [ ! $N ]; then
N=1
fi
echo "CPU: $CPU"
echo "N: $N"
module load blasr/5.2
module load bwa/0.7.15
#ref=Fairchild.fasta_contig.fasta
#ref=Fairchild_chr5.fasta_contig.fasta
#ref=Fairchild_chrUN.fasta_contig.fasta
#ref=Fairchild_chr8.fasta_contig.fasta
#ref=fairchild_graph_57G_chr7_bwa_merge/Final_Ref.fasta
#reads=pool_asm.test.10kb.fasta_contig.fasta
#reads=test1a.fq_split_asm_merge.10kb.fasta_contig.fasta
ref=Final_Ref.fasta
#run split fasta before run this shell
#perl fastaDeal.pl --cuts 1000 $reads
bwa index $ref
python /rhome/cjinfeng/BigData/software/bin/fasta2fastq.py $ref $ref\.fq
bwa mem -t $CPU $ref $ref\.fq > $ref\.sam
perl /rhome/cjinfeng/BigData/00.RD/Assembly/10xgenomics/tools/ncomms15324-s10/sam2blasr.pl $ref\.sam $ref\.m1
end=`date +%s`
runtime=$((end-start))
echo "Start: $start"
echo "End: $end"
echo "Run time: $runtime"
echo "Done"
| true |
5b0e7a82ae77bf15f6790db9adaf4e5efb115ad5 | Shell | travisschilling/bash-function-menu | /menu_script.bash | UTF-8 | 3,645 | 4.34375 | 4 | [] | no_license | #!/bin/bash
#Travis Schilling
#12/2/16
#This is a menu system that executes basic commands based on user input.
#initialize varaibles
calYear='1'
calMonth='12'
menuNum='0'
viFile='1'
newDir='a'
grepResult='b'
emailUser='c'
fileNewness='Y'
menuList()
# display a list of options for the user
{
until [ "$menuNum" = "9" ]
do
echo
echo "Welcome to Travis' main menu"
echo
echo "1 -- Display users currently logged in"
echo "2 -- Display a calendar for a specific month and year"
echo "3 -- Display the current directory path"
echo "4 -- Change directory"
echo "5 -- Long listing of visible files in the current directory"
echo "6 -- Display current time and date and calendar"
echo "7 -- Start the vi editor"
echo "8 -- Email a file to a user"
echo "9 -- Quit"
echo
# prompt for input
echo -n "Please select a number: "
read menuNum
case $menuNum in
1)
usersOnline
;;
2)
userCalendar
;;
3)
currentDirectory
;;
4)
changeDirectory
;;
5)
longList
;;
6)
dateCal
;;
7)
startVi
;;
8)
emailer
;;
9)
#exit program due to condition being fulfilled in loop
;;
*)
echo
#error message if 1-9 not entered
echo ""$menuNum" is out of the range 1-9. Going back to menu."
echo
pressKey
esac
done
}
pressKey()
# prompts user for input before going back to menu
{
echo
read -n 1 -s -p "Press any key to continue..."
clear
}
usersOnline()
# show users currently logged into the system
{
echo
who | more
pressKey
}
userCalendar()
# display user-specified month and year
{
echo "Enter a year between 1 and 9999: "
read calYear
if [ $calYear -le 9999 -a $calYear -ge 1 ] ; then
echo "Enter a month between 1 and 12: "
read calMonth
if [ $calMonth -le 12 -a $calMonth -ge 1 ] ; then
cal "$calMonth" "$calYear"
pressKey
else
echo "Out of range value. Going back to menu."
pressKey
fi
else
echo "Out of range value. Going back to menu."
pressKey
fi
}
currentDirectory()
# displays current directory
{
echo
pwd
pressKey
}
changeDirectory()
{
# changes current directory to desired different directory
echo "What directory do you want to go to?"
read -e newDir
if [ "$newDir" == " " ]
then
cd ~/
else
eval cd "$newDir"
pwd
pressKey
fi
}
longList()
# displays detailed view of files in current directory
{
echo
ls -l | more
pressKey
}
dateCal()
# displays current date, time, and calendar month
{
echo
date
echo
cal
pressKey
}
startVi()
#opens up vi to edit existing named file or create new named file
{
echo "Are you creating a new file? Type Y or N: "
read fileNewness
case $fileNewness in
y|Y)
echo "Enter a new file name: "
read viFile
vi "$viFile"
pressKey
;;
n|N)
echo "Enter an existing text file name to edit in vi: "
read viFile
if [[ $(file "$viFile" | grep "ASCII") ]];
then
vi "$viFile"
pressKey
else
echo "Error: "$viFile" is not a text file."
pressKey
fi
;;
*)
echo "Input not Y or N. Heading back to menu."
pressKey
esac
}
emailer()
{
echo "This program will allow you to send an email to a user."
echo "You can also send a file in the email."
echo "Enter a valid username to send the email to: "
# prompt for subject name
read emailUser
if grep "$emailUser" /etc/passwd
then
#prompt for subject of the email
echo "Enter the subject of the email: "
read emailMessage
echo
#prompt for file name
echo -n "Enter the name of the file to be attached: "
read emailFile
if [[ $(file "$emailFile" | grep "ASCII") ]];
then
mail -s "$emailMessage" "$emailUser"<"$emailFile"
echo
echo "Email sent successfully."
else
echo "That is not a valid file."
fi
else echo "That is not a valid user in the system."
fi
}
menuList
| true |
1a4a3d0c0b3172a172f2ee0eb289543f6c525c69 | Shell | Firestar99/studyTEI2 | /src/create | UTF-8 | 472 | 4.09375 | 4 | [] | no_license | #!/bin/bash
scriptDir="$(dirname "$0")"
projectName=$1
if [[ $projectName == "" ]]
then
echo "No project dir specified!"
exit 1
fi
projectPath="$scriptDir/$projectName"
if [ -d $projectPath ]
then
echo "Project Directory $projectPath not empty!"
exit 1
fi
echo "Creating Project Named $projectName"
mkdir -p $projectPath
mkdir -p $projectPath/src
pathToScript="$(realpath --relative-to=$projectPath $scriptDir)"
ln -s $pathToScript/build $projectPath/build
| true |
b1d421157ecaeb9948fdb8fa62aee9925f75f6a0 | Shell | ausfestivus/cloud-scripts | /install_confluence.sh | UTF-8 | 2,227 | 3.53125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# install Confluence
# variables
# Currently all static here, will be overidable by CLI in the future.
echo "Reading global variables...." >&2
# local variables
export RESPONSEFILE="response.varfile"
export ATLASSIANROOT=/opt
#export ATLASSIANVAR=$ATLASSIANROOT/var/atlassian
# preflights
# check that /opt is mounted/exists.
if [ ! -e $ATLASSIANROOT ] # if /opt doesnt exist
then
echo "$ATLASSIANROOT was not found. Dying."; exit
else
echo "$ATLASSIANROOT was found. Continuing."
fi
# check that /opt/var/atlassian exists.
# if [ ! -e $ATLASSIANVAR ] # if /opt/var/atlassian doesnt exist
# then
# echo "$ATLASSIANVAR was not found. Creating it."
# sudo mkdir -p $ATLASSIANVAR || exit # create our symlink in /var/atlassian to /opt/var/atlassian
# # ln [OPTION]... [-T] TARGET LINK_NAME
# sudo ln -s /opt/var/atlassian /var/atlassian || exit
# else
# echo "$ATLASSIANVAR already exists. Continuing."
# fi
# check that our DB server is available
# TODO
# be in the users homedir
cd ~ || exit
# create our varfile contents for the install
cat > ~/$RESPONSEFILE <<EOL
executeLauncherAction$Boolean=true
app.install.service$Boolean=true
sys.confirmedUpdateInstallationString=false
existingInstallationDir=/opt/Confluence
sys.languageId=en
sys.installationDir=/opt/atlassian/confluence
EOL
# download the binary
# Base URL for download can always be found under https://www.atlassian.com/software/confluence/download
#wget -q https://www.atlassian.com/software/confluence/downloads/binary/atlassian-confluence-6.7.0-x64.bin
# 20180403 updated to 6.8.0
#wget -q https://www.atlassian.com/software/confluence/downloads/binary/atlassian-confluence-6.8.0-x64.bin
# 20180701 updated to 6.10.0
#wget -q https://www.atlassian.com/software/confluence/downloads/binary/atlassian-confluence-6.10.0-x64.bin
# 20180722 updated to 6.10.1
wget -q https://www.atlassian.com/software/confluence/downloads/binary/atlassian-confluence-6.10.1-x64.bin
# fix its perms
sudo chmod 755 ./atlassian-confluence-6.10.1-x64.bin
# run it as root with the answer file
sudo ./atlassian-confluence-6.10.1-x64.bin -q -varfile response.varfile
# drop our DB config into place
# CLI to retrieve the connection string for a DB?
| true |
05f0f2f2f4c63d0422d42a069363cddc657221fe | Shell | SUSE-Cloud/automation | /scripts/jenkins/cloud/ansible/roles/ardana_qe_tests/templates/tests/getput.sh.j2 | UTF-8 | 1,727 | 3.625 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
# Wrapper script to execute getput in a venv
#
# Usage: getput.sh
set -o pipefail
work_dir={{ ardana_qe_test_work_dir }}
test_log="$work_dir/getput.log"
subunit_log="$work_dir/getput.subunit"
venv_dir={{ ardana_qe_test_venv }}
source $venv_dir/bin/activate
subunit_output="$venv_dir/bin/subunit-output"
creds="$HOME/service.osrc"
cname="container-auto"
oname="object-auto"
osize="4k"
rtime="30"
tests="p,g,d"
# minimum expected IOPS. these are REAL low...
put_iops=10
get_iops=20
# download getput from github to venv_dir and make executable
cd $venv_dir
wget https://raw.githubusercontent.com/markseger/getput/master/getput
chmod +x $venv_dir/getput
# run test
$subunit_output --inprogress getput > $subunit_log
# WARNING - because of Nagel, 1K SES GETs always too slow!
command="$venv_dir/getput --creds $creds -c$cname -o$oname -s$osize -r$rtime -t$tests --insecure"
$command | tee $test_log
res=$?
echo "getput completed with status $res"
# do some brute force IOPS checks and append failures to log
# also being a little silly to save fractional part of IOPS so we can report full value
success=1
iops=`grep $osize $test_log | grep put | awk '{print $10}'`
if (( `echo $iops | cut -f1 -d'.'` < $put_iops )); then
echo "FAIL -- Test: $osize PUT Minimal Value: $put_iops Reported: $iops" >> $test_log
success=0
fi
iops=`grep $osize $test_log | grep get | awk '{print $10}'`
if (( `echo $iops | cut -f1 -d'.'` < $get_iops )); then
echo "FAIL -- Test: $osize GET Minimal Value: $get_iops Reported: $iops" >> $test_log
success=0
fi
if (( $success == 1 )); then
$subunit_output --success getput >> $subunit_log
else
$subunit_output --fail getput >> $subunit_log
fi
exit $res
| true |
42b1336b466b8b18142cf892d8dc297a3050f267 | Shell | GeoTecINIT/nativescript-task-dispatcher | /ci-tools/wait-for-emulator.sh | UTF-8 | 750 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# Install AVD files
echo "y" | $ANDROID_HOME/cmdline-tools/latest/bin/sdkmanager --install 'system-images;android-27;google_apis;x86'
# Create emulator
echo "no" | $ANDROID_HOME/cmdline-tools/latest/bin/avdmanager create avd -n xamarin_android_emulator -k 'system-images;android-27;google_apis;x86' --force
$ANDROID_HOME/emulator/emulator -list-avds
echo "Starting emulator"
# Start emulator in background
nohup $ANDROID_HOME/emulator/emulator -avd xamarin_android_emulator -no-snapshot > /dev/null 2>&1 &
$ANDROID_HOME/platform-tools/adb wait-for-device shell 'while [[ -z $(getprop sys.boot_completed | tr -d '\r') ]]; do sleep 1; done; input keyevent 82'
$ANDROID_HOME/platform-tools/adb devices
echo "Emulator started"
| true |
fd7ae262811986b57be2f3a91c34074de1db718a | Shell | kidaak/lhcbsoft | /DBASE/Det/XmlConditions/cmt/remove_extension.sh | UTF-8 | 304 | 3.59375 | 4 | [] | no_license | #!/bin/bash
if [ ! -n "$1" ] ; then
echo Usage: $0 path_to_process
exit 1
fi
for f in `find $1 -type f -name \*.xml` ; do
mv $f ${f%.xml}
done
for f in `grep -r -l 'href *= *".\+\.xml#.*"' $1` ; do
mv $f $f~
sed 's/\(href *= *".\+\)\.xml\(#.*"\)/\1\2/' $f~ > $f
rm -f $f~
done
| true |
b2d12d8e4ee16257ea148a5eba6df38fdad7ad18 | Shell | zimskyzeng/Tools | /Bash/deal_input.sh | UTF-8 | 1,073 | 4.125 | 4 | [] | no_license | #!/bin/bash
# =====================================================
# 处理参数输入返回固定格式
#
# 提取规则: 1
# 返回: 1
# echo ${INPUT} | sed -r -n '/^[0-9]+$/p'
# 提取规则: 1-10
# 返回: 1,2,3,4,5,6,7,8,9,10
# echo ${INPUT} | sed -r -n '/^[0-9]+-[0-9]+$/p'
# 提取规则: 1,2,3,4,5
# 返回: 1,2,3,4,5
# echo ${INPUT} | sed -r -n '/^[0-9]+,[0-9]+$/p'
#
# =====================================================
INPUT=$1
RET=""
# 打印输入参数
echo "Input: $INPUT"
if [ -n "$(echo ${INPUT} | sed -r -n '/^[0-9]+$/p')" ] ; then
RET=${INPUT}
elif [ -n "$(echo ${INPUT} | sed -r -n '/^[0-9]+-[0-9]+$/p')" ] ; then
INPUT_FROM="$( echo ${INPUT} | sed -r 's/^([0-9]+)-[0-9]+$/\1/g')"
echo "INPUT_FROM: $INPUT_FROM"
INPUT_END="$( echo ${INPUT} | sed -r 's/^[0-9]+-([0-9]+)$/\1/g')"
echo "INPUT_END: $INPUT_END"
# 循环遍历生成返回值
for ((i=${INPUT_FROM};i<=${INPUT_END};i++)) ; do
RET+=${i},
done
elif [ -n "$(echo ${INPUT} | sed -r -n '/^[0-9]+(,[0-9]+)+$/p')" ] ; then
RET=${INPUT}
fi
echo "RET: $RET"
| true |
d0b6b226de9bba5a014f1c64f4580f0deb6295ff | Shell | panda-zxs/hula | /push.sh | UTF-8 | 503 | 2.875 | 3 | [
"Apache-2.0"
] | permissive | #! /bin/bash
git remote remove origin
git remote add origin https://panda-zxs:$PASSWD@github.com/panda-zxs/hula.git
git config user.email "45396622zxs@gmail.com"
git config user.name "panda-zxs"
git config pull.rebase false
git checkout -- .
git fetch origin
git pull origin master
cd $(dirname $0)
# 执行次数
count=$(node ./commit/ran.js)
echo $count
# 循环执行
for((i=0;i<$count;i++))
do
node ./commit/code.js
git add . --all
msg=$(node ./commit/msg.js)
git commit -m "$msg"
git push origin master
done
| true |
66cd1370dc1e54676554d0e7ad19379dc99b5a5b | Shell | gram7gram/Munhauzen | /bin/prepare-raw.sh | UTF-8 | 893 | 3.796875 | 4 | [] | no_license | #!/bin/bash
SRC_DIR="/Users/master/Projects/MunhauzenDocs/Elements/PICTURES_FINAL"
OBB_PATH="/Users/master/Projects/Munhauzen/obb"
function prepare() {
part=$1
echo "|- $part"
inputDir="$SRC_DIR$part"
outputPartDir="$OBB_PATH$part"
outputDir="$outputPartDir/raw/images"
mkdir -p $outputDir
# rm -f $outputDir/*
cd $inputDir
for file in *.jpg; do
newFile="${file// /_}"
if [[ "$newFile" != "$file" ]]; then
echo "Renaming: $file => $newFile"
mv "$inputDir/$file" "$inputDir/$newFile"
fi
done
cd $inputDir
for file in *.jpg; do
echo "|-- $file"
currentFile="$inputDir/$file"
convert $currentFile -quality 80 -colors 256 +profile "icc" "$outputDir/$file"
test $? -gt 0 && exit 1
done
}
prepare "/Part_demo"
prepare "/Part_1"
prepare "/Part_2" | true |
e6b28947d9a93b4e165581306d68cfb89840feb0 | Shell | easyfun/efungame | /scripts/p2p/create_db_sh/cms.sh | UTF-8 | 7,556 | 2.53125 | 3 | [] | no_license | #!/bin/bash
#################################################################
#password=123456
l=0
m=0
n=0
#mysql_exec="mysql -uroot -p$password"
mysql_exec="mysql "
db=cms
$mysql_exec -e "create database ${db}"
$mysql_exec $db -e "CREATE TABLE t_cms_banner(
id int(11) NOT NULL AUTO_INCREMENT COMMENT 'id',
ostype tinyint NOT NULL DEFAULT 0 COMMENT '系统类型:0 all,1 android,2 ios, 3 pc 4 wap',
bannertype tinyint NOT NULL DEFAULT 1 COMMENT '类型 1:首页banner',
usertype tinyint NOT NULL DEFAULT 0 COMMENT '用户类型:0-所有 1:新手,2-老手',
channel tinyint NOT NULL DEFAULT 0 COMMENT '渠道维度,0 所有,1选择渠道',
version tinyint NOT NULL DEFAULT 0 COMMENT '版本号维度,0所有,1选择版本',
name varchar(128) NOT NULL DEFAULT '' COMMENT 'banner名称',
img varchar(250) DEFAULT '' COMMENT 'banner图片',
back_color varchar(8) DEFAULT '#FFFFFF' COMMENT '图片背景色',
url varchar(250) DEFAULT '' COMMENT '链接',
start_time datetime NOT NULL COMMENT '开始时间',
end_time datetime NOT NULL COMMENT '结束时间',
sort int(11) DEFAULT 0 COMMENT '排序',
status tinyint DEFAULT 0 COMMENT '状态,0,正常,1,下架',
creator varchar(32) DEFAULT '' COMMENT '创建人',
update_user varchar(32) DEFAULT '' COMMENT '修改人',
create_time datetime not null default '0000-00-00 00:00:00' comment '创建时间',
update_time datetime not null default '0000-00-00 00:00:00' comment '修改时间',
PRIMARY KEY (id),
KEY ostype (ostype),
KEY bannertype (bannertype),
KEY usertype (usertype),
KEY channel (channel),
KEY version (version),
KEY name (name),
KEY url (url),
KEY start_time (start_time),
KEY end_time (end_time),
KEY status (status),
KEY create_time (create_time)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='banner';"
$mysql_exec $db -e "CREATE TABLE t_cms_banner_channel (
id bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
banner_id int(11) NOT NULL COMMENT 'banner表id',
channel varchar(64) NOT NULL COMMENT '渠道号',
status tinyint DEFAULT 0 COMMENT '状态,0,正常,1,下架',
create_time datetime not null default '0000-00-00 00:00:00' comment '创建时间',
PRIMARY KEY (id),
KEY banner_id (banner_id),
KEY channel (channel),
KEY status (status),
KEY create_time (create_time)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='banner渠道表';"
$mysql_exec $db -e "CREATE TABLE t_cms_banner_version (
id bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
banner_id int(11) NOT NULL COMMENT 'banner表id',
version varchar(12) NOT NULL COMMENT '版本号',
status tinyint DEFAULT 0 COMMENT '状态,0,正常,1,下架',
create_time datetime not null default '0000-00-00 00:00:00' comment '创建时间',
PRIMARY KEY (id),
KEY banner_id (banner_id),
KEY version (version),
KEY status (status),
KEY create_time (create_time)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='banner版本号表';"
$mysql_exec $db -e "CREATE TABLE t_cms_splash_screen
(
id int NOT NULL AUTO_INCREMENT COMMENT 'id',
ostype tinyint NOT NULL DEFAULT 0 COMMENT '系统类型:0 all,1 android,2 ios ',
channel tinyint NOT NULL DEFAULT 0 COMMENT '渠道维度,0 所有,1选择渠道',
name varchar(64) not null default '' COMMENT '名称',
img varchar(250) not null default '' COMMENT '图片地址',
url varchar(250) not null default '' COMMENT 'url',
start_time datetime not null default '0000-00-00 00:00:00' COMMENT '开始时间',
end_time datetime not null default '0000-00-00 00:00:00' COMMENT '结束时间',
status tinyint DEFAULT 0 COMMENT '状态,0,正常,1,下架',
sort INT NOT NULL DEFAULT 0 COMMENT '排序,越大越靠前',
creator varchar(32) DEFAULT 'admin' COMMENT '创建人',
create_time datetime not null default '0000-00-00 00:00:00' comment '创建时间',
update_user varchar(32) COMMENT '修改人',
update_time datetime not null default '0000-00-00 00:00:00' comment '创建时间',
PRIMARY KEY (id),
key(ostype),
key(channel),
key(name),
key(img),
key(url),
key(start_time),
key(end_time),
key(status),
key(sort),
key(create_time)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT '闪屏表';"
$mysql_exec $db -e "CREATE TABLE t_cms_splash_screen_channel
(
id int(11) NOT NULL AUTO_INCREMENT COMMENT 'id',
splash_screen_id int not null COMMENT '闪屏id',
channel varchar(32) not null COMMENT '渠道号',
status tinyint DEFAULT 0 COMMENT '状态,0,正常,1,删除',
create_time datetime not null default '0000-00-00 00:00:00' comment '创建时间',
PRIMARY KEY (id),
key(splash_screen_id),
key(channel),
key(status),
key(create_time)
)ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT '闪屏渠道表';"
$mysql_exec $db -e "CREATE TABLE t_cms_upload_log (
id bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
filename varchar(250) COMMENT '文件名称',
filetype varchar(32) COMMENT '文件类型',
filesize int default 0 COMMENT '文件大小',
creator varchar(32) COMMENT '创建人',
upload_log text COMMENT '上传成功后文件路径',
remark varchar(250) not null COMMENT '备注',
create_time datetime not null default '0000-00-00 00:00:00' comment '创建时间',
PRIMARY KEY (id),
KEY filename (filename),
KEY filetype (filetype),
KEY remark (remark),
KEY create_time (create_time)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='文件上传记录表';"
$mysql_exec $db -e "CREATE TABLE t_cms_articles (
id bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
title varchar(250) NOT NULL COMMENT '资讯标题',
secondtitle varchar(250) NOT NULL default '' COMMENT '资讯副标题',
type tinyint NOT NULL default 1 COMMENT '资讯栏目,1-媒体报道,2公司资讯3-行业资讯4-公告',
source varchar(128) default '' COMMENT '资讯来源',
sourceurl varchar(250) default '' COMMENT '资讯来源URL',
thumbnail varchar(250) default '' COMMENT '缩略图',
summary text COMMENT '摘要',
keyword varchar(250) not null default '' COMMENT '关键字',
content longtext not null COMMENT '内容',
readtimes int not null default 0 COMMENT '阅读次数',
status tinyint not null default 0 COMMENT '状态0上架 1-下架',
sort int not null default 0 COMMENT '排序越大越靠前',
creator varchar(32) not null default 'admin' COMMENT '发布人',
create_time datetime not null default '0000-00-00 00:00:00' comment '发布时间',
update_user varchar(32) default '' COMMENT '修改人',
update_time datetime not null default '0000-00-00 00:00:00' comment '修改时间',
PRIMARY KEY (id),
KEY title (title),
KEY type (type),
KEY status (status),
KEY create_time (create_time)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='资讯中心';"
| true |
00f3d9b09c02041187b62cc71394b709660a5bac | Shell | pascalgrimaud/vm | /linuxmint/gitConfig.sh | UTF-8 | 195 | 3.203125 | 3 | [] | no_license | #!/bin/bash
if [ "$#" -ne 2 ]; then
echo "Usage: $0 <username> <email>" >&2
cat ~/.gitconfig
exit 1
fi
git config --global user.name $1
git config --global user.email $2
cat ~/.gitconfig
| true |
e347c68c88dfa13fa9a62b63063b55cbffbbeb60 | Shell | marimo-KD/dotfiles | /rofi/rofi_system.sh | UTF-8 | 393 | 3.46875 | 3 | [] | no_license | #!/bin/bash
declare -A list=(
['Suspend']='systemctl suspend'
['Poweroff']='systemctl poweroff'
['Reboot']='systemctl reboot'
['Logout']='i3-msg exit'
)
if [[ ${1##* } == 'yes' ]]; then
eval ${list[${1%% *}]}
elif [[ ${1##* } == 'no' ]]; then
echo ${!list[@]} | sed 's/ /\n/g'
elif [[ -n $1 ]]; then
echo "$1 / no"
echo "$1 / yes"
else
echo ${!list[@]} | sed 's/ /\n/g'
fi
| true |
1a4ffb0bd03552918dbd235e2fc6f546cab2ae9d | Shell | pld-linux/ocs-inventory-ng-client | /ocs-inventory-ng-client.cron | UTF-8 | 1,049 | 3.359375 | 3 | [] | no_license | #!/bin/bash
NAME=ocsinventory-agent
[ -f /etc/sysconfig/$NAME ] || exit 0
source /etc/sysconfig/$NAME
export PATH
i=0
while [ $i -lt ${#OCSMODE[*]} ]
do
if [ ${OCSMODE[$i]:-none} == cron ]; then
OPTS=
if [ ! -z "${OCSPAUSE[$i]}" ]; then
OPTS="--wait ${OCSPAUSE[$i]}"
fi
if [ "${OCSNOSOFTWARE[$i]}" = '1' ] || [ "${OCSNOSOFTWARE[$i]}" = 'yes' ]; then
OPTS="$OPTS --nosoftware"
fi
if [ "${OCSFORCE[$i]}" = '1' ] || [ "${OCSFORCE[$i]}" = 'yes' ]; then
OPTS="$OPTS --force"
fi
if [ ! -z "${OCSTAG[$i]}" ]; then
OPTS="$OPTS --tag=${OCSTAG[$i]}"
fi
if [ "${OCSSERVER[$i]}" = 'zlocal' ]; then
# Local inventory
OPTS="$OPTS --local=/var/lib/$NAME"
elif [ ! -z "${OCSSERVER[$i]}" ]; then
# Remote inventory
OPTS="$OPTS --lazy --server=${OCSSERVER[$i]}"
fi
OPTS="$OPTS --logfile=/var/log/ocsinventory-agent/ocsinventory-agent.log "
echo "[$(date '+%c')] Running $NAME $OPTS"
/usr/bin/$NAME $OPTS
fi
((i++))
done
echo "[$(date '+%c')] End of cron job ($0)"
| true |
af7ba0a8be29071915ab49f4fd5b300fe889cb93 | Shell | xopxop/homegit | /init/system/04 | UTF-8 | 167 | 3.03125 | 3 | [] | no_license | #!/bin/bash
RED='\033[0;31m'
YELLOW='\033[1;33m'
echo "${RED}Q: What command gives you the time since your system was last booted?\n"
echo "${YELLOW}A: uptime"
uptime
| true |
8e24932e4e4ba4fe7465667ecb4e0ca1b3e51e16 | Shell | jgarte/dotfiles-12 | /install.sh | UTF-8 | 2,423 | 2.796875 | 3 | [] | no_license | #!/bin/bash
mkdir -p ~/bin
sudo apt-get install git
git config --global core.editor vim
git submodule update -i
rm -rf ~/.vim
ln -sf ~/dotfiles/.vim ~/.vim
ln -sf ~/dotfiles/.vimrc ~/.vimrc
ln -sf ~/dotfiles/.bashrc ~/.bashrc
ln -sf ~/dotfiles/.inputrc ~/.inputrc
ln -sf ~/dotfiles/.tmux.conf ~/.tmux.conf
# tmux, Vim and such
sudo apt-get -y install `cat ~/dotfiles/packages.txt`
if [ ! -e ~/bin/hub ]; then
wget --progress=dot:mega -c https://github.com/github/hub/releases/download/v2.2.9/hub-linux-amd64-2.2.9.tgz
tar zvxf hub-linux-amd64-2.2.9.tgz
ln -sf ~/dotfiles/hub-linux-amd64-2.2.9/bin/hub ~/bin/hub
fi
if [ ! -e /usr/local/bin/docker-compose ]; then
sudo curl -L https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/bin/docker-compose
fi
if which gdm; then
sudo apt-get -y install `cat ~/dotfiles/packages-gui.txt`
# Firefox preferences
sudo cp firefox-preferences.js /usr/lib/firefox/browser/defaults/preferences/
# set up Right Alt as the compose key for accented letters
# set up Caps Lock as an additional Ctrl
gsettings set org.gnome.desktop.input-sources xkb-options "['compose:ralt', 'ctrl:nocaps']"
# gedit creates no backup files with ~ suffix
gsettings set org.gnome.gedit.preferences.editor create-backup-copy false
gsettings set org.compiz.grid:/org/compiz/profiles/unity/plugins/grid/ put-bottomleft-key "<Control><Alt>1"
gsettings set org.compiz.grid:/org/compiz/profiles/unity/plugins/grid/ put-bottom-key "<Control><Alt>2"
gsettings set org.compiz.grid:/org/compiz/profiles/unity/plugins/grid/ put-bottomright-key "<Control><Alt>3"
gsettings set org.compiz.grid:/org/compiz/profiles/unity/plugins/grid/ put-left-key "<Control><Alt>4"
gsettings set org.compiz.grid:/org/compiz/profiles/unity/plugins/grid/ put-center-key "<Control><Alt>5"
gsettings set org.compiz.grid:/org/compiz/profiles/unity/plugins/grid/ put-right-key "<Control><Alt>6"
gsettings set org.compiz.grid:/org/compiz/profiles/unity/plugins/grid/ put-topleft-key "<Control><Alt>7"
gsettings set org.compiz.grid:/org/compiz/profiles/unity/plugins/grid/ put-top-key "<Control><Alt>8"
gsettings set org.compiz.grid:/org/compiz/profiles/unity/plugins/grid/ put-topright-key "<Control><Alt>9"
ln -fs ~/dotfiles/user-dirs.dirs ~/.config/user-dirs.dirs
xdg-user-dirs-update
fi
| true |
750c888e30a41f6f0e870c673b5c834f6d7d31c1 | Shell | ericfernandesferreira/scripts | /arch/2-kernel.sh | UTF-8 | 802 | 2.65625 | 3 | [] | no_license | #!/bin/sh
# Instalando as dependências
pacman -S xmlto docbook-xsl kmod inetutils bc
# Script que compila o kernel e remove o original
cd /home/backup/kernel
cp linux-4.1.3.tar.xz /usr/src
cd /usr/src
tar xvf linux-4.1.3.tar.xz
rm linux-4.1.3.tar.xz
cd linux-4.1.3
cp /home/backup/kernel/configs/grinder-arch /usr/src/linux-4.1.3/.config
make -j5 && make modules_install
cp arch/x86_64/boot/bzImage /boot/vmlinuz-4.1.3
cp System.map /boot/System.map-4.1.3
ln -sf /boot/System.map-4.1.3 /boot/System.map
mkinitcpio -k 4.1.3-grinder -c /etc/mkinitcpio.conf -g /boot/initramfs-4.1.3.img
# Removendo o Kernel antigo
rm -rf /lib/modules/4.1.2-2-ARCH
rm /boot/initramfs-linux.img
rm /boot/initramfs-linux-fallback.img
rm /boot/vmlinuz-linux
# Atualizando o grub
grub-mkconfig -o /boot/grub/grub.cfg
| true |
18363aa338c9e199e2279da286f08f0e2e8832aa | Shell | SHREYASINGH29/StronglyLiveVariableAnalysis | /test.sh | UTF-8 | 253 | 2.515625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -x
LLVM_HOME=/usr/lib/llvm-12
CC=$LLVM_HOME/bin/clang
OPT=$LLVM_HOME/bin/opt
mkdir -p _build
pushd _build
cmake ..
make
popd
$CC -S -emit-llvm -o test.ll test.c
$OPT -instnamer -load _build/*/*LVA* -lva test.ll
#rm -rf _build test.bc
| true |
684075fe23b31059479eb5144ea90f7b7dbdab6e | Shell | mcarifio/sshit | /bin/key-comment.sh | UTF-8 | 440 | 3.78125 | 4 | [] | no_license | #!/usr/bin/env bash
me=$(readlink -f ${BASH_SOURCE})
keyfile=${1:?'expecting a keyfile name, typically ${USER}_${host}_rsa; this is the private key filename'}
if [[ -z "$(dirname ${keyfile})" ]] ; then
pathname=$(readlink -f ${KEY_D:-~/.ssh/keys.d}/${keyfile})
else
pathname=$(readlink -f ${keyfile})
fi
# Extract the comment in the key file (the -C argument)
ssh-keygen -l -f ${pathname} | cut -f3- -d' '|sed s/\(RSA\)//g 2>&1
| true |
636c6f226cbe1e1326c8be70618b89fbb041be41 | Shell | theopolis/build-anywhere | /overlay/scripts/anywhere-setup-fuzzing.sh | UTF-8 | 420 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env bash
if [[ ! "x$0" = "x-bash" ]]
then BASH_SOURCE=$0
fi
SCRIPT=$(readlink -f "$BASH_SOURCE")
SCRIPTPATH=$(dirname "$SCRIPT")
INSTALLPATH=$(dirname "$SCRIPTPATH")
EXTRA_CFLAGS="-fPIC -g -fsanitize=address -fno-omit-frame-pointer -fsanitize-coverage=edge,indirect-calls,trace-cmp,trace-div,trace-gep"
EXTRA_LDFLAGS="-fuse-ld=lld"
. $SCRIPTPATH/anywhere-setup.sh "$EXTRA_LDFLAGS" "$EXTRA_CFLAGS"
| true |
9b53cae231e63114238ec7b0a9def175dcd80d0d | Shell | BahaaAlhagar/new-server-setup | /stack-scripts/functions/debian-based/php.sh | UTF-8 | 2,299 | 3.421875 | 3 | [] | no_license | #!/usr/bin/env bash
# Author: Randall Wilk <randall@randallwilk.com>
##############################################
# Install the latest version of PHP.
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#############################################
function install_php() {
print_info "Installing PHP"
local PHP="php"
if [[ ${IS_UBUNTU} = false ]]; then
# Debian needs this for the newest php version
apt-get install -y apt-transport-https lsb-release ca-certificates
wget -O /etc/apt/trusted.gpg.d/php.gpg https://packages.sury.org/php/apt.gpg
echo "deb https://packages.sury.org/php/ $(lsb_release -sc) main" | sudo tee /etc/apt/sources.list.d/php.list
update_system
local PHP="php7.2"
fi
apt-get install -y ${PHP}-fpm ${PHP}-common ${PHP}-bcmath ${PHP}-gd ${PHP}-mbstring ${PHP}-xmlrpc ${PHP}-mysql ${PHP}-imagick ${PHP}-xml ${PHP}-zip
}
##############################################
# Configure PHP.
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#############################################
function configure_php() {
# php.ini
sed -i -e "s/upload_max_filesize = 2M/upload_max_filesize = 64M/" /etc/php/7.2/fpm/php.ini
sed -i -e "s/memory_limit = 128M/memory_limit = 512M/" /etc/php/7.2/fpm/php.ini
sed -i -e "s/;cgi.fix_pathinfo=1/cgi.fix_pathinfo=0/" /etc/php/7.2/fpm/php.ini
# php-fpm config
sed -i -e "s/user = .*/user = $FTP_USER_NAME/" /etc/php/7.2/fpm/pool.d/www.conf
sed -i -e "s/group = .*/group = $FTP_USER_NAME/" /etc/php/7.2/fpm/pool.d/www.conf
sed -i -e "s/;listen.owner = .*/listen.owner = $FTP_USER_NAME/" /etc/php/7.2/fpm/pool.d/www.conf
sed -i -e "s/;listen.group = .*/listen.group = $FTP_USER_NAME/" /etc/php/7.2/fpm/pool.d/www.conf
restart_php_fpm
}
##############################################
# Restart the PHP-FPM service.
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#############################################
function restart_php_fpm() {
systemctl restart php7.2-fpm
}
##############################################
# Run setup and install of PHP.
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#############################################
function setup_php() {
install_php
configure_php
} | true |
9b1d19cc4f193aaa9f8526648f1df8615df465da | Shell | aboychen/xxd | /ztss_build.sh | UTF-8 | 268 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
PJ=$(cd `dirname $0`; pwd)
SRV=$PJ/server/src/tss_server
OLD_GOPATH=$GOPATH
export GOPATH=$PJ/server
cd $SRV
case $1 in
static)
go install -ldflags="-w -extldflags '-static'"
;;
*)
go install
;;
esac
export GOPATH=$OLD_GOPATH | true |
c0407c995e7065506c45eb47637f4ac0e9fbad84 | Shell | ryanyun/raspberrypi-camera | /rpicamera.sh | UTF-8 | 1,738 | 3.75 | 4 | [] | no_license | #!/bin/sh
VLC_USER=pi
start()
{
# v4l2-ctl -d /dev/video1 --list-formats
v4l2-ctl -d /dev/video0 --set-fmt-video=width=1920,height=1080,pixelformat=1 2> /dev/null
v4l2-ctl -d /dev/video0 --set-ctrl=focus_auto=0 2> /dev/null
v4l2-ctl -d /dev/video0 --set-ctrl=focus_absolute=0 2> /dev/null
# check if camera settings command was succesful (success: error code $? = 0)
if [ $? -eq 0 ]
then
echo "Starting camera for recording at `date +"%T"`"
else
echo "Error: camera not found at /dev/video0" >&2
fi
sudo -u $VLC_USER cvlc --no-audio v4l2:///dev/video0:chroma=MJPG:width=1920:height=1080:fps=30 --sout="#std{access=file,fps=30,mux=ogg,noaudio,dst=go.mpg}" -vvv > /dev/null 2>&1 &
}
stream()
{
# open vlc and connect to network rtsp://192.168.1.1:8554/ and livestream video
v4l2-ctl -d /dev/video0 --set-fmt-video=width=1920,height=1080,pixelformat=1 2> /dev/null
v4l2-ctl -d /dev/video0 --set-ctrl=focus_auto=0 2> /dev/null
v4l2-ctl -d /dev/video0 --set-ctrl=focus_absolute=0 2> /dev/null
if [ $? -eq 0 ]
then
echo "Starting camera for live streaming"
else
echo "Error: camera not found at /dev/video0" >&2
fi
sudo -u $VLC_USER cvlc --no-audio v4l2:///dev/video0:chroma=MJPG:width=1920:height=1080:fps=30 --sout="#rtp{sdp=rtsp://:8554/}" -vvv > /dev/null 2>&1 &
}
stop()
{
echo "Stopping camera recording at `date +"%T"`"
kill -9 $(pidof vlc) >/dev/null 2>&1
}
case "$1" in
start)
start
;;
stream)
stream
;;
stop)
stop
;;
restart)
stop
start
;;
*)
echo "Usage: $0 {start|stream|stop|restart}"
;;
esac
exit 0 | true |
9419a0fa0ee7af727db8150b25fcaeb2fcf13750 | Shell | batect/batect-cache-init-image | /scripts/build_and_push.sh | UTF-8 | 1,163 | 4.28125 | 4 | [
"Apache-2.0"
] | permissive | #! /usr/bin/env bash
set -euo pipefail
IMAGE_MANIFEST_TAG=$1
ARCHITECTURES=(amd64 arm64 arm)
function main() {
for architecture in "${ARCHITECTURES[@]}"; do
buildForArchitecture "$architecture"
done
for architecture in "${ARCHITECTURES[@]}"; do
pushImage "$architecture"
done
createManifest
pushManifest
echo
echoHeader "Done."
}
function buildForArchitecture() {
local architecture=$1
echoHeader "Building for $architecture..."
./scripts/build_image.sh "$IMAGE_MANIFEST_TAG-$architecture" "$architecture"
}
function pushImage() {
local architecture=$1
echoHeader "Pushing image for $architecture..."
docker push "$IMAGE_MANIFEST_TAG-$architecture"
echo
}
function createManifest() {
echoHeader "Creating manifest..."
docker manifest create "$IMAGE_MANIFEST_TAG" "${ARCHITECTURES[@]/#/$IMAGE_MANIFEST_TAG-}"
echo
}
function pushManifest() {
echoHeader "Pushing manifest..."
docker manifest push "$IMAGE_MANIFEST_TAG"
echo
}
function echoHeader() {
local text=$1
echo "-------------------------------------------"
echo "$text"
echo "-------------------------------------------"
}
main
| true |
c4952469a5ada289c28708290ac539100b3147aa | Shell | yh392261226/my_customs | /bin/_domove | UTF-8 | 568 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env bash
#################################
## Desc: 已下载图片记录、去重、分文件夹处理
## Author: 杨浩
## 使用方法:bbb
## 时 间:2021-01-09
#################################
##图片文件夹路径
PICPATH=$HOME/Pictures/down_pics/
if [ "" != "$1" ]; then
tmptype=''
tmptype=$(basename $(dirname $1) | sed 's,[[:digit:]],,g')
if [ ! -d ${PICPATH}duplicates_${tmptype}/ ]; then
mkdir ${PICPATH}duplicates_${tmptype}/
fi
mv $1 ${PICPATH}duplicates_${tmptype}/
else
echo "Usage: $0 filename"
fi | true |
2621ea2d6cc978cdd0991dd6c76e80f088666a1a | Shell | DavidSche/davidche.tools | /scripts/centos/change_hosts.sh | UTF-8 | 1,269 | 3.265625 | 3 | [
"Apache-2.0"
] | permissive | # !/bin/sh
in_ip=${1}
in_url=${2}
local_ip="127.0.0.1"
#更改host
updateHost()
{
# read
inner_host=`cat /etc/hosts | grep ${in_url} | awk '{print $1}'`
if [ ${inner_host} = ${in_ip} ];then
echo "${inner_host} ${in_url} ok"
else
if [ ${inner_host} != "" ];then
echo " change is ok "
else
inner_ip_map="${in_ip} ${in_url}"
echo ${inner_ip_map} >> /etc/hosts
if [ $? = 0 ]; then
echo "${inner_ip_map} to hosts success host is `cat /etc/hosts`"
fi
echo "shuld appand "
fi
fi
}
# hostName
updateHostName()
{
inner_hostName=`hostname`
inner_local_ip=${local_ip}
inner_host_count=`cat /etc/hosts | grep ${inner_hostName} | awk '{print $1}' |grep -c ${local_ip}`
inner_host=`cat /etc/hosts | grep ${inner_hostName} | awk '{print $1}'`
if [ ${inner_host_count} != 0 ]; then
return
fi
if [ ${inner_host} = ${inner_local_ip} ];then
echo "127.0.0.1 ${inner_hostName} already add "
else
if [ ${inner_host}="" ]; then
inner_ip_map="${inner_local_ip} ${inner_hostName}"
echo ${inner_ip_map} >> /etc/hosts
if [ $?=0 ];then
echo " ${inner_ip_map} to add hosts success `cat /etc/hosts`"
fi
fi
fi
}
main() { updateHost updateHostName } main | true |
dd0ba44077fed0a875e1696efd98ad57847a2e61 | Shell | zzzj1233/shell | /03-exit/01-exit.sh | UTF-8 | 312 | 3.109375 | 3 | [] | no_license | # linux有一个内置变量$?,可以查看上个命令的退出状态码
# echo $?
# 自定义程序可以使用exit退出并且返回指定状态码,默认返回0
# 状态码可以用于if判断
# 常用状态码
# 0 ok
# 1 一般性未知错误
if grep zzzj /etc/passwd
then
exit 0
else
exit 1
fi | true |
448ca55503739830b22665148c7b8edeaea89018 | Shell | Lenivaya/dotfiles | /bin/zzz | UTF-8 | 892 | 4 | 4 | [] | no_license | #!/usr/bin/env bash
# If -f, then put computer to sleep.
# Otherwise, lock screen and turn off monitor.
function betterlockscreen_command {
# betterlockscreen -l dim -- --layout-pos="ix-1000:iy+1000" --time-size="25" --verif-text="" >/dev/null
betterlockscreen -l dim --off "$1" -- --verif-text="" >/dev/null
}
function lock_screen {
pgrep "betterlockscreen" || betterlockscreen_command
}
function lock_screen_sleep {
pgrep "betterlockscreen" || betterlockscreen_command 10
}
function print_usage {
bin=$(basename "$0")
echo "Usage: $bin [-f]"
echo
echo " $bin # put display to sleep"
echo " $bin -f # put computer to sleep"
}
case $1 in
-h)
print_usage
;;
-f)
echo "Going to sleep..."
lock_screen
systemctl suspend
;;
*)
echo "Shutting my eyes..."
lock_screen_sleep
# sleep 5
# xset dpms force off
;;
esac
| true |
3e5d50001a21ee8d85bf8e5dbf0136b9162fe55d | Shell | alreece45/docker-images | /mysql/bootstrap/ubuntu_trusty/test/run | UTF-8 | 198 | 3.46875 | 3 | [] | no_license | #!/bin/sh
pass() {
echo "Pass: " $*
}
fail() {
echo "FAIL: " $*
}
for test in tests/*
do
$test
if [ $? -eq 0 ]
then
pass $test
else
fail $test
fi
done
| true |
be66ce7214a2a7780d221ba7f2e04d7d8ef6056d | Shell | hdweiss/high-performance-computing | /dgemm_lab/fetchBlocks.sh | UTF-8 | 384 | 3.09375 | 3 | [] | no_license | #!/bin/bash
EXP_FILES='experiments/blocks*'
MATRIX_SIZE='^2*3*8/1024'
FLOPS_CALC='flop/time/1024/1024'
for file in $EXP_FILES
do
ER_OUT="$(mktemp)"
er_print -func $file 2> /dev/null > $ER_OUT
echo $file | awk -F. {' printf "%i ", $2'};
cat $ER_OUT | grep block_mm | awk {' time=$2; flop=$4; printf "%.1f " , '$FLOPS_CALC};
printf "\n"
rm -f $ER_OUT
done
| true |
d3b23468f4801a2008c2b3b5b0de86eb84c02c3e | Shell | 746kva/SysProg | /vvklv2.sh | UTF-8 | 880 | 3.71875 | 4 | [] | no_license | #!/bin/sh
echo
echo "USER SEARCH"
echo "Developer: Vladimir Kolov"
echo "With this program you can:"
echo "1) Find the user"
echo "2) Show user's UID"
echo "3) Find the groups to which he belongs"
echo
answer()
{
echo "Repeat? (Y/n)"
read item
case "$item" in
y|Y) echo "OK, repeat..."
echo
;;
n|N) echo "OK, exit..."
echo
exit 0
;;
*) echo "ERROR"
echo
answer
;;
esac
}
while true;
do
echo -n "Enter username: "
read user
if cat /etc/shadow | grep "$user";
then
id -u "$user"
id -ng "$user"
id -nG "$user"
answer
continue
else echo "User is not found"
fi
answer
done
echo | true |
7cd7727ef3d94965416229f0a2f22b8581088506 | Shell | aperezca/CMS-global-pool-monitor | /queries/multi-core_factories.sh | UTF-8 | 1,592 | 3.171875 | 3 | [] | no_license | #!/bin/sh
source /etc/profile.d/condor.sh
# Check Idle, Running and Held multicore pilots at T1s to evaluate glidein pressure on each site compared to pledged slots
# Antonio Perez-Calero Yzquierdo Sep. 2015, May 2016, May, July 2017
# factory infos
WORKDIR="/home/aperez"
OUTDIR="/crabprod/CSstoragePath/aperez"
#CERN_factory=`host -t CNAME cmsgwms-factory-prod.cern.ch |awk '{print $6}'`
CERN_factory_1="vocms0805.cern.ch"
CERN_factory_2="vocms0206.cern.ch"
UCSD_factory="gfactory-1.t2.ucsd.edu:9614"
FNAL_factory="cmsgwms-factory.fnal.gov"
GOC_factory="glidein.grid.iu.edu"
sitelist_1="$WORKDIR/entries/T1_sites"
sitelist_2="$WORKDIR/entries/T2_sites"
date_s=`date -u +%s`
for factory in $CERN_factory_1 $CERN_factory_2 $UCSD_factory $FNAL_factory $GOC_factory; do
condor_q -g -pool $factory -const '(GlideinFrontendName == "CMSG-v1_0:cmspilot")' -af JobStatus GlideinEntryName;
done |sort |uniq -c >$WORKDIR/status/all_pilots_factories
for site in `cat $sitelist_1 $sitelist_2 |sort`; do
#echo $site
idle=0
running=0
held=0
cat $WORKDIR/status/all_pilots_factories |grep $site >$WORKDIR/status/all_pilots_fact_$site
while read -r line; do
#echo $line
num=$(echo $line |awk '{print $1}')
status=$(echo $line |awk '{print $2}')
if [[ $status -eq 1 ]]; then let idle+=num; fi
if [[ $status -eq 2 ]]; then let running+=num; fi
if [[ $status -eq 5 ]]; then let held+=num; fi
done<$WORKDIR/status/all_pilots_fact_$site
rm $WORKDIR/status/all_pilots_fact_$site
echo $date_s $idle $running $held >>$OUTDIR/out/factories_$site
#echo $date_s $idle $running $held
done
| true |
9a98a0e86306d892c0b1be47e9e1682d48fcb8b9 | Shell | falconer502/RseqFlow | /rseqflow2/ExpressionEstimation/ExpressionEstimation.sh | UTF-8 | 28,825 | 3.625 | 4 | [] | no_license | #!/bin/bash
USAGE=$'Usage:
ExpressionEstimation.sh {-f <r1.fastq.gz> or -1 <r1.fastq.gz> -2 <r2.fastq.gz>} [options]
Required arugments:
-f/--fastq <reads.fastq.gz> fastq or fastq.gz file for single end data
-- OR --
-1/--read1 <reads.fastq.gz> the first read fastq file for paired end data
-2/--read2 <reads.fastq.gz> the second read fastq file for paired end data
-a/--annotation <annotation.gtf> reference annotation in GTF format
-o/--output-prefix <outputPrefix> prefix of output files
Optional arguments:
-c/--transcriptome [ ref.fa ] transcriptome reference sequences
--mp [ MAX,MIN ] both integers. bowtie2 option to set the mistch penalty, default is 6,2
--score-min [ Function,a,b ] bowtie2 option to set a function of read length for the minimum alignment score
necessary for an alignment to be considered valid.
Default is L,0,-0.6 which is defined as Linear function:
f(x) = 0 + -0.6 * x where x is the read length.
Available function types are constant (C), linear (L), square-root (S), and
natural log (G). The parameters are specified as F,B,A - the function type,
the constant term, and the coefficient separated by commas with no whitespace.
The constant term and coefficient may be negative and/or floating-point numbers.
For more info see the Bowtie2 manual.
--tSam [ alignment.sam ] alignments to transcriptome in SAM format
--cleanup delete temporary files
-h/--help print this usage message'
## 2013-06-11 -- "unique" is the only option offered right now but "proportion" and "random" may be used at a later time
## Bowtie2 takes too long to run in order to obtain multiple mappings that make the random and proportion options worthwhile, hence they are being removed for the time being.
# Removed proportion and random from Usage for now, unique is the default, but bowtie2 maps reads using the "best" alignment so
# it doesn't guarantee uniqueness, it means only that it takes the best alignment which may or not be unique.
# The "XS:" field in the samfile will be present if a read was multimapped and absent if a read was uniquely mapped
#-u/--unique use data of reads mapped to only one gene.
#-p/--proportion assign reads according to proportion of genes expression level.
#-r/--random assign reads randomly.
echo ""
echo "You are running: $VERSION"
echo ""
if [ $# -eq 0 ]; then
echo "No arguments or options!"
echo "$USAGE"
exit 1
fi
is_unique=true
is_proportion=false
is_random=false
is_cleanup=false
MP='6,2'
Score_Min='L,0,-0.6'
TOOL='bowtie2'
OUTPUT='Expression_output'
declare -a ARGS
ARGS=($@)
for ((i=0;i<$#;++i))
do
if [[ ${ARGS[i]} = '-f' || ${ARGS[i]} = '--fastq' ]]; then # Reads Input Fastq file (single end data)
FASTQ=${ARGS[(i+1)]}
i=($i+1)
elif [[ ${ARGS[i]} = '-1' || ${ARGS[i]} = '--read1' ]]; then # Reads Input Fastq file (paired end data)
READ1=${ARGS[(i+1)]}
i=($i+1)
elif [[ ${ARGS[i]} = '-2' || ${ARGS[i]} = '--read2' ]]; then # Reads Input Fastq file (paired end data)
READ2=${ARGS[(i+1)]}
i=($i+1)
elif [[ ${ARGS[i]} = '-c' || ${ARGS[i]} = '--transcriptome' ]]; then # Transcriptome Reference Sequences
TRANSCRIPTOME=${ARGS[(i+1)]}
i=($i+1)
elif [[ ${ARGS[i]} = '-a' || ${ARGS[i]} = '--annotation' ]]; then # Reference Annotation
ANNOTATION=${ARGS[(i+1)]}
i=($i+1)
elif [[ ${ARGS[i]} = '-o' || ${ARGS[i]} = '--output-prefix' ]]; then # Output file Prefix
OUTPUT=${ARGS[(i+1)]}
i=($i+1)
elif [[ ${ARGS[i]} = '--mp' ]]; then # max mismatch penalty
MP=${ARGS[(i+1)]}
i=($i+1)
elif [[ ${ARGS[i]} = '--score-min' ]]; then # minimum alignment score
Score_Min=${ARGS[(i+1)]}
i=($i+1)
## 2013-06-11 -- "unique" is the only option offered right now but "proportion" and "random" may be used at a later time
elif [[ ${ARGS[i]} = '-u' || ${ARGS[i]} = '--unique' ]]; then # Just base on reads uniquely mapped to one gene
is_unique=true
# elif [[ ${ARGS[i]} = '-p' || ${ARGS[i]} = '--proportion' ]]; then # Assign Reads base on Gene Expression Level Proportion
# is_proportion=true
# elif [[ ${ARGS[i]} = '-r' || ${ARGS[i]} = '--random' ]]; then # Assign Reads Randomly
# is_random=true
elif [ ${ARGS[i]} = '--tSam' ]; then #input transcriptome sam file
TrantoSam=${ARGS[(i+1)]}
i=($i+1)
elif [ ${ARGS[i]} = '--cleanup' ]; then
is_cleanup=true
elif [[ ${ARGS[i]} = '-h' || ${ARGS[i]} = '--help' ]]; then # Help Information
echo "$USAGE"
exit 0
else
UNKNOWN=${ARGS[i]}
echo "No switch encountered: $UNKNOWN"
echo "$USAGE"
exit 1
fi
done
########################################Check Required Arguments#########################################
if [[ -z $TrantoSam ]]; then
if [[ -z $FASTQ && -z $READ1 && -z $READ2 || -z $TRANSCRIPTOME || -z $ANNOTATION ]]; then
echo "Error: required input files not specified!"
echo "$USAGE"
exit 1
fi
if [[ -n $FASTQ && -n $READ1 ]] || [[ -n $FASTQ && -n $READ2 ]]; then
echo "Error: single end data and paired end data can't be given together!"
echo "If you want to run with single end data, try option: -f <reads.fastq.gz>"
echo "If you want to run with paired end data, try options: -1 <read1.fastq.gz> -2 <read2.fastq.gz>"
exit 1
fi
if [ -z $FASTQ ] && [[ -z $READ1 || -z $READ2 ]]; then
echo "Error: for paired end data, read1 and read2 must be given together!"
echo "If you want to run with paired end data, try options: -1 <read1.fastq.gz> -2 <read2.fastq.gz>"
exit 1
fi
else
if [[ -z $ANNOTATION ]]; then
echo "Error: you input a sam file without the annotation file!"
echo "Try option: -a <annotation.gtf>"
exit 1
fi
fi
if [ -z $OUTPUT ]; then
echo "Error: missing output prefix! Please give the prefix of output files."
echo "Try this option to specify the output prefix: - --output-prefix < out.prefix >"
exit 1
fi
##########################################Check Method##########################################
is_only_alignment=false
## 2013-06-11 -- "unique" is the only option offered right now but "proportion" and "random" may be used at a later time
#if ! ($is_unique || $is_proportion || $is_random); then
# echo "Warning: you didn't choose any analysis to do, so only alignment will be done."
# echo "try one of the following options:"
# echo "-u/--unique #Description: use data of reads mapped to only one gene."
# echo "-p/--proportion #Description: assign reads according to proportion of genes expression level."
# echo "-r/--random #Description: assign reads randomly."
# is_only_alignment=true
# #exit 1
#fi
########################################Check input files#######################################
echo "Checking input files..."
if [[ -z $TrantoSam ]]; then
echo "Check_for_reference_annotation_withoutGenome.py -t $TRANSCRIPTOME -a $ANNOTATION"
Check_for_reference_annotation_withoutGenome.py -t $TRANSCRIPTOME -a $ANNOTATION
ERR=$?
if [ $ERR -ne 0 ]; then
echo "Error: input files failed to pass the check. There are some errors in the input files, please check them!"
exit 1
fi
#fi
if [ -z $FASTQ ]; then
if [[ -n $READ1 ]]; then
echo "Check_for_reads_file.py -r $READ1"
Check_for_reads_file.py -r $READ1
ERR=$?
if [ $ERR -ne 0 ]; then
echo "Error: input files failed to pass the check. There are some errors in the input files, please check them!"
exit 1
fi
echo "Check_for_reads_file.py -r $READ2"
Check_for_reads_file.py -r $READ2
ERR=$?
if [ $ERR -ne 0 ]; then
echo "Error: input files failed to pass the check. There are some errors in the input files, please check them!"
exit 1
fi
fi
else
echo "Check_for_reads_file.py -r $FASTQ"
Check_for_reads_file.py -r $FASTQ
ERR=$?
if [ $ERR -ne 0 ]; then
echo "Error: input files failed to pass the check. There are some errors in the input files, please check them!"
exit 1
fi
fi
fi
#####################################################################Alignment#####################################################################
if [[ -n $TrantoSam ]]; then
echo "You input a sam file, so alignment will be skipped."
echo "Check_for_transcriptomeSam.py -s $TrantoSam"
Check_for_transcriptomeSam.py -s $TrantoSam
ERR=$?
if [ $ERR -ne 0 ]; then
echo "Error: input files failed to pass the check. There are some errors in the input files, please check them!"
exit 1
fi
else
#--------------------------------------------------Bowtie2:Alignment to Transcriptome------------------------------------------------------#
TranId=${TRANSCRIPTOME%.fa} #get full path of filename without extension
TrantoIndexSamp=${TranId##*/} #get basename of file
TrantoSam="$OUTPUT"_Bowtie2_transcriptome.sam
# Check if bowtie2 indexes already exist in reference path, if not run bowtie2-build
# Can keep reference index so it doesn't need to be created every time if mapping multiple samples.
echo "Checking for existing bowtie2 indexes for $TRANSCRIPTOME"
if [[ -f $TranId.1.bt2 && -f $TranId.2.bt2 && -f $TranId.3.bt2 && -f $TranId.4.bt2 && -f $TranId.rev.1.bt2 && -f $TranId.rev.2.bt2 ]]; then
echo "Existing bowtie2 indexes found for $TRANSCRIPTOME"
TrantoIndex=$TranId
else
echo "Start building bowtie2 indexes for $TRANSCRIPTOME"
TrantoIndex=$TrantoIndexSamp
echo "bowtie2-build $TRANSCRIPTOME $TrantoIndex"
bowtie2-build $TRANSCRIPTOME $TrantoIndex
ERR=$?
if [ $ERR -ne 0 ]; then
echo "Errors when building bowtie2 index for $TRANSCRIPTOME, stopping the pipeline!"
if [ -f $TrantoIndexSamp ]; then
rm $TrantoIndexSamp* -f
fi
exit 1
fi
fi
echo "Starting alignment with bowtie2"
# Output: $TrantoSam ($OUTPUT"_Bowtie2_transcriptome.sam)
if [[ -n $FASTQ ]]; then
echo "bowtie2 -x $TrantoIndex -U $FASTQ --mp $MP --score-min $Score_Min --sam-no-hd -S $TrantoSam"
bowtie2 -x $TrantoIndex -U $FASTQ --mp $MP --score-min $Score_Min --sam-no-hd -S $TrantoSam
else
echo "bowtie2 -x $TrantoIndex -1 $READ1 -2 $READ2 --mp $MP --score-min $Score_Min --sam-no-hd -S $TrantoSam"
bowtie2 -x $TrantoIndex -1 $READ1 -2 $READ2 --mp $MP --score-min $Score_Min --sam-no-hd -S $TrantoSam
fi
if [ $ERR -ne 0 ]; then
echo "Errors when running bowtie2, stopping the pipeline!"
#rm $TrantoIndexSamp* -f
exit 1
fi
if [[ $is_cleanup && -f $TrantoIndexSamp ]]; then
rm $TrantoIndexSamp* -f
fi
echo "bowtie2 has finished!"
fi
if $is_only_alignment; then
exit 0
fi
#############################################################Split input files by chrom############################################################
echo "Split files by chromosome..."
#-------split sam file------#
if [[ -n $TrantoSam ]]; then
# Output: $OUTPUT"_chrList_sam.txt
echo "SplitByChromosome_for_transcriptomeSamFile.py -i $TrantoSam -p $OUTPUT -o "$OUTPUT"_chrList_sam.txt"
SplitByChromosome_for_transcriptomeSamFile.py -i $TrantoSam -p $OUTPUT -o "$OUTPUT"_chrList_sam.txt
ERR=$?
if [ $ERR -ne 0 ]; then
echo "Errors when running SplitByChromosome_for_transcriptomeSamFile.py, stopping the pipeline!"
exit 1
fi
else
echo "$TrantoSam: file not found, stopping the pipeline!"
exit 1
fi
#-------split fa file-------#
# Reference file is being split up, not the sample fasta
#if [[ -n $TRANSCRIPTOME ]]; then
# # Output: $OUTPUT"_chrList_fa.txt
# echo "SplitByChromosome_for_transcriptomeSequenceFaFile.py -i $TRANSCRIPTOME -p $OUTPUT -o "$OUTPUT"_chrList_fa.txt"
# SplitByChromosome_for_transcriptomeSequenceFaFile.py -i $TRANSCRIPTOME -p $OUTPUT -o "$OUTPUT"_chrList_fa.txt
# ERR=$?
# if [ $ERR -ne 0 ]; then
# echo "Errors when running SplitByChromosome_for_transcriptomeSequenceFaFile.py, stopping the pipeline!"
# exit 1
# fi
#else
# echo "$TRANSCRIPTOME: file not found, stopping the pipeline!"
# exit 1
#fi
#------split gtf file-------#
if [[ -n $ANNOTATION ]]; then
# Output: $OUTPUT"_chrList_gtf.txt
echo "SplitByChromosome_for_annotationGtfFile.py -i $ANNOTATION -p $OUTPUT -o "$OUTPUT"_chrList_gtf.txt"
SplitByChromosome_for_annotationGtfFile.py -i $ANNOTATION -p $OUTPUT -o "$OUTPUT"_chrList_gtf.txt
ERR=$?
if [ $ERR -ne 0 ]; then
echo "Errors when running SplitByChromosome_for_annotationGtfFile.py, stopping the pipeline!"
exit 1
fi
else
echo "$ANNOTATION: file not found, stopping the pipeline!"
exit 1
fi
############################################################Build Index and combination############################################################
chrList=`cat "$OUTPUT"_chrList_sam.txt`
MAP_LOG="$OUTPUT"_mapping_info.log
i=0
for chr in $chrList
do
AN[$i]="$OUTPUT"_"$chr"_annotation.gtf
#TS[$i]="$OUTPUT"_"$chr"_sequence.fa
EC[$i]="$OUTPUT"_ExonCombination_"$chr".txt
EI[$i]="$OUTPUT"_ExonIndex_"$chr".txt
JC[$i]="$OUTPUT"_JunctionCombination_"$chr".txt
JI[$i]="$OUTPUT"_JunctionIndex_"$chr".txt
CSAM[$i]="$OUTPUT"_"$chr"_alignment.sam
USAM[$i]="$OUTPUT"_"$chr"_uniqGene.sam
MUL[$i]="$OUTPUT"_"$chr"_reads_multiGene.txt
GEU[$i]="$OUTPUT"_"$chr"_GeneExpressionLevel_unique.txt
EEU[$i]="$OUTPUT"_"$chr"_ExonExpressionLevel_unique.txt
JEU[$i]="$OUTPUT"_"$chr"_JunctionExpressionLevel_unique.txt
#RFG[$i]="$OUTPUT"_ReadsFromGene_"$chr".fa
#TRR[$i]="$OUTPUT"_RR_trans_"$chr".txt
#GRR[$i]="$OUTPUT"_RR_chrs_"$chr".txt
#EL[$i]="$OUTPUT"_ExonLength_"$chr".txt
#RTSAM[$i]="$OUTPUT"_transcriptome_ReadsFromGene_"$chr".sam
#RGSAM[$i]="$OUTPUT"_genome_ReadsFromGene_"$chr".sam
## 2013-06-11 -- "unique" is the only option offered right now but "proportion" and "random" may be used at a later time
# GEP[$i]="$OUTPUT"_"$chr"_GeneExpressionLevel_proportion.txt
# EEP[$i]="$OUTPUT"_"$chr"_ExonExpressionLevel_proportion.txt
# JEP[$i]="$OUTPUT"_"$chr"_JunctionExpressionLevel_proportion.txt
# GEP_Merge[$i]="$OUTPUT"_"$chr"_GeneExpressionLevel_proportion_merge.txt
# EEP_Merge[$i]="$OUTPUT"_"$chr"_ExonExpressionLevel_proportion_merge.txt
# JEP_Merge[$i]="$OUTPUT"_"$chr"_JunctionExpressionLevel_proportion_merge.txt
# GER[$i]="$OUTPUT"_"$chr"_GeneExpressionLevel_random.txt
# EER[$i]="$OUTPUT"_"$chr"_ExonExpressionLevel_random.txt
# JER[$i]="$OUTPUT"_"$chr"_JunctionExpressionLevel_random.txt
# GER_Merge[$i]="$OUTPUT"_"$chr"_GeneExpressionLevel_random_merge.txt
# EER_Merge[$i]="$OUTPUT"_"$chr"_ExonExpressionLevel_random_merge.txt
# JER_Merge[$i]="$OUTPUT"_"$chr"_JunctionExpressionLevel_random_merge.txt
#CHR[$i]=$chr
i=$i+1
done
if $is_cleanup; then
# rm "$OUTPUT"_chrList_sam.txt "$OUTPUT"_chrList_fa.txt "$OUTPUT"_chrList_gtf.txt -f
rm "$OUTPUT"_chrList_sam.txt "$OUTPUT"_chrList_gtf.txt -f
fi
l=${#AN[@]}
echo "Build Exon and Junction Index..."
#-------------build Exon Index-------------#
for ((i=0;i<l;++i))
do
# Output: ${EC[i]} ("$OUTPUT"_ExonCombination_"$chr".txt)
echo "ExonCombination.py -g ${AN[i]} -o ${EC[i]}"
ExonCombination.py -g ${AN[i]} -o ${EC[i]}
ERR=$?
if [ $ERR -ne 0 ]; then
echo "Errors when running ExonCombination.py, stopping the pipeline!"
exit 1
fi
# Output: ${EI[i]} ("$OUTPUT"_ExonIndex_"$chr".txt)
echo "ExonIndex.py -g ${AN[i]} -e ${EC[i]} -o ${EI[i]}"
ExonIndex.py -g ${AN[i]} -e ${EC[i]} -o ${EI[i]}
ERR=$?
if [ $ERR -ne 0 ]; then
echo "Errors when running ExonIndex.py, stopping the pipeline!"
exit 1
fi
done
#-----------build Junction Index-----------#
for ((i=0;i<l;++i))
do
# Output: ${JC[i]} ("$OUTPUT"_JunctionCombination_"$chr".txt)
echo "JunctionCombination.py -g ${AN[i]} -o ${JC[i]}"
JunctionCombination.py -g ${AN[i]} -o ${JC[i]}
ERR=$?
if [ $ERR -ne 0 ]; then
echo "Errors when running JunctionCombination.py, stopping the pipeline!"
exit 1
fi
# Output: ${JI[i]} ("$OUTPUT"_JunctionIndex_"$chr".txt)
echo "JunctionIndex.py -g ${AN[i]} -j ${JC[i]} -o ${JI[i]}"
JunctionIndex.py -g ${AN[i]} -j ${JC[i]} -o ${JI[i]}
ERR=$?
if [ $ERR -ne 0 ]; then
echo "Errors when running JunctionIndex.py, stopping the pipeline!"
exit 1
fi
done
###################Split Result, one is based on uniquely mapped reads and another is based on multiple mapped reads###############################
echo "start to get reads mapping information"
# Output: $MAP_LOG ("$OUTPUT"_mapping_info.log)
echo "Get_ReadsMappingInformation.py -s $TrantoSam -l $MAP_LOG"
Get_ReadsMappingInformation.py -s $TrantoSam -l $MAP_LOG
echo "Start to split alignment results..."
for ((i=0;i<l;++i))
do
# Output: ${USAM[i]} and ${MUL[i]} ("$OUTPUT"_"$chr"_uniqGene.sam, "$OUTPUT"_"$chr"_reads_multiGene.txt)
echo "SamSplitEvenly_and_Randomly_gencode_modify.py -s ${CSAM[i]} -g ${AN[i]} -u ${USAM[i]} -m ${MUL[i]}"
SamSplitEvenly_and_Randomly_gencode_modify.py -s ${CSAM[i]} -g ${AN[i]} -u ${USAM[i]} -m ${MUL[i]}
ERR=$?
if [ $ERR -ne 0 ]; then
echo "Errors when running SamSplitEvenly_and_Randomly_gencode_modify.py, stopping the pipeline!"
exit 1
fi
done
if $is_cleanup; then
rm ${AN[@]} -f
rm ${CSAM[@]} -f
fi
##############################################################UniqueMap ExpressionLevel############################################################
if ( $is_unique || $is_proportion || $is_random ); then
echo "Start to estimate expression level by using reads uniquely mapped to only one gene..."
for ((i=0;i<l;++i))
do
# Output: ${GEU[i]} ("$OUTPUT"_"$chr"_GeneExpressionLevel_unique.txt)
echo "GeneExpressionLevel.py -u ${USAM[i]} -i ${EI[i]} -c ${EC[i]} -l ${MAP_LOG} -o ${GEU[i]}"
GeneExpressionLevel.py -u ${USAM[i]} -i ${EI[i]} -c ${EC[i]} -l ${MAP_LOG} -o ${GEU[i]}
ERR=$?
if [ $ERR -ne 0 ]; then
echo "Errors when running GeneExpressionLevel.py, stopping the pipeline!"
exit 1
fi
# Output: ${EEU[i]} ("$OUTPUT"_"$chr"_ExonExpressionLevel_unique.txt)
echo "ExonExpressionLevel.py -u ${USAM[i]} -i ${EI[i]} -l ${MAP_LOG} -o ${EEU[i]}"
ExonExpressionLevel.py -u ${USAM[i]} -i ${EI[i]} -l ${MAP_LOG} -o ${EEU[i]}
ERR=$?
if [ $ERR -ne 0 ]; then
echo "Errors when running ExonExpressionLevel.py, stopping the pipeline!"
exit 1
fi
# Output: ${JEU[i]} ("$OUTPUT"_"$chr"_JunctionExpressionLevel_unique.txt)
echo "JunctionExpressionLevel.py -u ${USAM[i]} -i ${JI[i]} -l ${MAP_LOG} -o ${JEU[i]}"
JunctionExpressionLevel.py -u ${USAM[i]} -i ${JI[i]} -l ${MAP_LOG} -o ${JEU[i]}
ERR=$?
if [ $ERR -ne 0 ]; then
echo "Errors when running JunctionExpressionLevel.py, stopping the pipeline!"
exit 1
fi
done
if $is_unique; then
GEU_Merge_Whole="$OUTPUT"_whole_GeneExpressionLevel_unique.txt
EEU_Merge_Whole="$OUTPUT"_whole_ExonExpressionLevel_unique.txt
JEU_Merge_Whole="$OUTPUT"_whole_JunctionExpressionLevel_unique.txt
#echo "#Unique method: remove the multi-mapped reads directly" >$GEU_Merge_Whole
grep 'Strand' ${GEU[0]} >>$GEU_Merge_Whole
grep -v 'Strand' -h ${GEU[@]} >>$GEU_Merge_Whole
#echo "#Unique method: remove the multi-mapped reads directly" >$EEU_Merge_Whole
grep 'Strand' ${EEU[0]} >>$EEU_Merge_Whole
grep -v 'Strand' -h ${EEU[@]} >>$EEU_Merge_Whole
#echo "#Unique method: remove the multi-mapped reads directly" >$JEU_Merge_Whole
grep 'Strand' ${JEU[0]} >>$JEU_Merge_Whole
grep -v 'Strand' -h ${JEU[@]} >>$JEU_Merge_Whole
echo "Expression Level Estimation (Unique Method) Done!"
fi
fi
#####################################################MultipleMap ExpressionLevel and Merge two parts###############################################
## 2013-06-11 -- "unique" is the only option offered right now but "proportion" and "random" may be used at a later time
#if $is_proportion; then
# echo "Start to estimation expression level by assigning reads according to proportion of genes expression level..."
# GEP_Merge_Whole="$OUTPUT"_whole_GeneExpressionLevel_proportion.txt
# EEP_Merge_Whole="$OUTPUT"_whole_ExonExpressionLevel_proportion.txt
# JEP_Merge_Whole="$OUTPUT"_whole_JunctionExpressionLevel_proportion.txt
# for ((i=0;i<l;++i))
# do
# # Output: ${GEP[i]} ("$OUTPUT"_"$chr"_GeneExpressionLevel_proportion.txt)
# echo "GeneExpressionLevel_proportionAssign.py -m ${MUL[i]} -i ${EI[i]} -c ${EC[i]} -u ${GEU[i]} -l $MAP_LOG -o ${GEP[i]}"
# GeneExpressionLevel_proportionAssign.py -m ${MUL[i]} -i ${EI[i]} -c ${EC[i]} -u ${GEU[i]} -l $MAP_LOG -o ${GEP[i]}
# ERR=$?
# if [ $ERR -ne 0 ]; then
# echo "Errors when running GeneExpressionLevel_proportionAssign.py, stopping the pipeline!"
# exit 1
# fi
#
# # Output: ${EEP[i]} ("$OUTPUT"_"$chr"_ExonExpressionLevel_proportion.txt)
# echo "ExonExpressionLevel_proportionAssign.py -m ${MUL[i]} -i ${EI[i]} -u ${GEU[i]} -l $MAP_LOG -o ${EEP[i]}"
# ExonExpressionLevel_proportionAssign.py -m ${MUL[i]} -i ${EI[i]} -u ${GEU[i]} -l $MAP_LOG -o ${EEP[i]}
# ERR=$?
# if [ $ERR -ne 0 ]; then
# echo "Errors when running ExonExpressionLevel_proportionAssign.py, stopping the pipeline!"
# exit 1
# fi
#
# # Output: ${JEP[i]} ("$OUTPUT"_"$chr"_JunctionExpressionLevel_proportion.txt)
# echo "JunctionExpressionLevel_proportionAssign.py -m ${MUL[i]} -i ${JI[i]} -u ${GEU[i]} -l $MAP_LOG -o ${JEP[i]}"
# JunctionExpressionLevel_proportionAssign.py -m ${MUL[i]} -i ${JI[i]} -u ${GEU[i]} -l $MAP_LOG -o ${JEP[i]}
# ERR=$?
# if [ $ERR -ne 0 ]; then
# echo "Errors when running JunctionExpressionLevel_proportionAssign.py, stopping the pipeline!"
# exit 1
# fi
#
# # Output: ${GEP_Merge[i]} ("$OUTPUT"_"$chr"_GeneExpressionLevel_proportion_merge.txt)
# echo "Merge_unique_mulitple.py -u ${GEU[i]} -m ${GEP[i]} -o ${GEP_Merge[i]} -t gene"
# Merge_unique_mulitple.py -u ${GEU[i]} -m ${GEP[i]} -o ${GEP_Merge[i]} -t gene
#
# # Output: ${EEP_Merge[i]} ("$OUTPUT"_"$chr"_ExonExpressionLevel_proportion_merge.txt)
# echo "Merge_unique_mulitple.py -u ${EEU[i]} -m ${EEP[i]} -o ${EEP_Merge[i]} -t exon"
# Merge_unique_mulitple.py -u ${EEU[i]} -m ${EEP[i]} -o ${EEP_Merge[i]} -t exon
#
# # Output: ${JEP_Merge[i]} ("$OUTPUT"_"$chr"_JunctionExpressionLevel_proportion_merge.txt)
# echo "Merge_unique_mulitple.py -u ${JEU[i]} -m ${JEP[i]} -o ${JEP_Merge[i]} -t junction"
# Merge_unique_mulitple.py -u ${JEU[i]} -m ${JEP[i]} -o ${JEP_Merge[i]} -t junction
# done
#
# #echo "#Proportion method: assigns the multi-mapped reads according to the proportion of gene expression level" >$GEP_Merge_Whole
# grep 'Strand' ${GEP_Merge[0]} >>$GEP_Merge_Whole
# grep -v 'Strand' -h ${GEP_Merge[@]} >>$GEP_Merge_Whole
# #echo "#Proportion method: assigns the multi-mapped reads according to the proportion of gene expression level" >$EEP_Merge_Whole
# grep 'Strand' ${EEP_Merge[0]} >>$EEP_Merge_Whole
# grep -v 'Strand' -h ${EEP_Merge[@]} >>$EEP_Merge_Whole
# #echo "#Proportion method: assigns the multi-mapped reads according to the proportion of gene expression level" >$JEP_Merge_Whole
# grep 'Strand' ${JEP_Merge[0]} >>$JEP_Merge_Whole
# grep -v 'Strand' -h ${JEP_Merge[@]} >>$JEP_Merge_Whole
#
# if $is_cleanup; then
# rm ${GEP[@]} ${EEP[@]} ${JEP[@]} -f
# rm ${GEP_Merge[@]} ${EEP_Merge[@]} ${JEP_Merge[@]} -f
# fi
# echo "Expression Level Estimation (Proportion Method) Done!"
#fi
#
#if $is_random; then
# echo "Start to estimation expression level by assigning reads randomly..."
# GER_Merge_Whole="$OUTPUT"_whole_GeneExpressionLevel_random.txt
# EER_Merge_Whole="$OUTPUT"_whole_ExonExpressionLevel_random.txt
# JER_Merge_Whole="$OUTPUT"_whole_JunctionExpressionLevel_random.txt
# for ((i=0;i<l;++i))
# do
# # Output: ${GER[i]} and ${EER[i]} ("$OUTPUT"_"$chr"_GeneExpressionLevel_random.txt, "$OUTPUT"_"$chr"_ExonExpressionLevel_random.txt)
# echo "Gene_Exon_ExpressionLevel_randomAssign.py -m ${MUL[i]} -i ${EI[i]} -c ${EC[i]} -l ${MAP_LOG} -g ${GER[i]} -e ${EER[i]}"
# Gene_Exon_ExpressionLevel_randomAssign.py -m ${MUL[i]} -i ${EI[i]} -c ${EC[i]} -l ${MAP_LOG} -g ${GER[i]} -e ${EER[i]}
# ERR=$?
# if [ $ERR -ne 0 ]; then
# echo "Errors when running Gene_Exon_ExpressionLevel_randomAssign.py, stopping the pipeline!"
# exit 1
# fi
#
# # Output: ${JER[i]} ("$OUTPUT"_"$chr"_JunctionExpressionLevel_random.txt)
# echo "JunctionExpressionLevel_randomAssign.py -m ${MUL[i]} -i ${JI[i]} -l ${MAP_LOG} -j ${JER[i]}"
# JunctionExpressionLevel_randomAssign.py -m ${MUL[i]} -i ${JI[i]} -l ${MAP_LOG} -j ${JER[i]}
# ERR=$?
# if [ $ERR -ne 0 ]; then
# echo "Errors when running JunctionExpressionLevel_randomAssign.py, stopping the pipeline!"
# exit 1
# fi
#
# # Output: ${GER_Merge[i]} ("$OUTPUT"_"$chr"_GeneExpressionLevel_random_merge.txt)
# echo "Merge_unique_mulitple.py -u ${GEU[i]} -m ${GER[i]} -o ${GER_Merge[i]} -t gene"
# Merge_unique_mulitple.py -u ${GEU[i]} -m ${GER[i]} -o ${GER_Merge[i]} -t gene
#
# # Output: ${EER_Merge[i]} ("$OUTPUT"_"$chr"_ExonExpressionLevel_random_merge.txt)
# echo "Merge_unique_mulitple.py -u ${EEU[i]} -m ${EER[i]} -o ${EER_Merge[i]} -t exon"
# Merge_unique_mulitple.py -u ${EEU[i]} -m ${EER[i]} -o ${EER_Merge[i]} -t exon
#
# # Output: ${JER_Merge[i]} ("$OUTPUT"_"$chr"_JunctionExpressionLevel_random_merge.txt)
# echo "Merge_unique_mulitple.py -u ${JEU[i]} -m ${JER[i]} -o ${JER_Merge[i]} -t junction"
# Merge_unique_mulitple.py -u ${JEU[i]} -m ${JER[i]} -o ${JER_Merge[i]} -t junction
# done
#
# #echo "#Random method: assigns the multi-mapped reads randomly" >$GER_Merge_Whole
# grep 'Strand' ${GER_Merge[0]} >>$GER_Merge_Whole
# grep -v 'Strand' -h ${GER_Merge[@]} >>$GER_Merge_Whole
# #echo "#Random method: assigns the multi-mapped reads randomly" >$EER_Merge_Whole
# grep 'Strand' ${EER_Merge[0]} >>$EER_Merge_Whole
# grep -v 'Strand' -h ${EER_Merge[@]} >>$EER_Merge_Whole
# #echo "#Random method: assigns the multi-mapped reads randomly" >$JER_Merge_Whole
# grep 'Strand' ${JER_Merge[0]} >>$JER_Merge_Whole
# grep -v 'Strand' -h ${JER_Merge[@]} >>$JER_Merge_Whole
#
# if $is_cleanup; then
# rm ${GER[@]} ${EER[@]} ${JER[@]} -f
# rm ${GER_Merge[@]} ${EER_Merge[@]} ${JER_Merge[@]} -f
# fi
# echo "Expression Level Estimation (Random Method) Done!"
#fi
#############################################################Done####################################################################################################
if $is_cleanup; then
rm ${GEU[@]} -f
rm ${EEU[@]} -f
rm ${JEU[@]} -f
fi
if $is_cleanup; then
rm ${TS[@]} -f
rm ${USAM[@]} ${MUL[@]} ${MAP_LOG} -f
rm ${EI[@]} ${EC[@]} ${JI[@]} ${JC[@]} -f
fi
#rm ${GEU[@]} ${EEU[@]} ${JEU[@]} -f
echo "Expression Level Estimation Done!"
#####################################################################################################################################################################
| true |
529a113ad6ed04cbad76690107d06875d26ab1a2 | Shell | qchantel/42 | /docker/02_bonus/02_init_test/test.sh | UTF-8 | 450 | 3.828125 | 4 | [] | no_license | #ask for a container type and create it with -it --rm
echo "<< If you want to run a container with -it --rm options, enter its type, if not, just type No or exit the program >>"
read contType
if [ "$contType" = "No" ]
then
echo "it's a no !"
return 1;
else
echo "Please, enter your container desired name:"
read contName
docker run --restart on-failure --name $contName -it $contType sh
fi
echo "<< End of script, everything seems fine =) >>"
| true |
59f92484c80c73ccda783da327c28d7ff852893a | Shell | jawdypus/scripts | /mysql/create_new_user.sh | UTF-8 | 1,052 | 3.859375 | 4 | [] | no_license | #!/bin/bash
#
# Check the bash shell script is being run by root
#
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root" 1>&2
exit 1
fi
#
# Check is expect package installed
#
if [ $(dpkg-query -W -f='${Status}' expect 2>/dev/null | grep -c "ok installed") -eq 0 ]; then
echo "Can't find expect. Trying install it..."
aptitude -y install expect &> /dev/null
fi
#
# Input daata about new user
#
echo -n -e "Enter your current mysql root password: "; read -s root_passwd
echo -n -e "\nEnter username: "; read username
echo -n -e "Enter host: "; read host
echo -n -e "Enter password: "; read -s password
echo -e "\n\nThis might take a while... \n"
USER=$(expect -c "
set timeout 3
spawn mysql -u root -p
expect \"Enter password: \"
send \"$root_passwd\r\"
expect \"mysql>\"
send \"create user $username@'$host' identified by '$password';\r\"
expect \"mysql>\"
send \"grant all privileges on *.* to $username@'$host';\r\"
expect \"mysql>\"
send \"flush privileges;\r\"
expect eof
")
echo "$USER" &> /dev/null
exit 0
| true |
37c507e659e167507b766fef1d5b730475434b8b | Shell | darky83/Scripts | /OpenVAS/Debian10/OpenVAS11-Debian10-Install-Script.sh | UTF-8 | 1,842 | 3.296875 | 3 | [] | no_license | #!/bin/bash
# --------------------------------------------------------------------- #
# Date: 20191127 #
# Script: OpenVAS11-Debian10-Install-Script.sh #
# #
# Description: Installs OpenVAS 11 on Debian 10 #
# #
# Work in progress #
# --------------------------------------------------------------------- #
# Install files
# https://community.greenbone.net/t/gvm-11-stable-initial-release-2019-10-14/3674
GVMLIBS_URL="https://github.com/greenbone/gvm-libs/archive/v11.0.0.tar.gz"
OPENVAS_URL="https://github.com/greenbone/openvas/archive/v7.0.0.tar.gz"
OSPD2_URL="https://github.com/greenbone/ospd/archive/v2.0.0.tar.gz"
OSPD1_URL="https://github.com/greenbone/ospd-openvas/archive/v1.0.0.tar.gz"
GVMD_URL="https://github.com/greenbone/gvmd/archive/v9.0.0.tar.gz"
GSA_URL="https://github.com/greenbone/gsa/archive/v9.0.0.tar.gz"
PGVM_URL="https://github.com/greenbone/python-gvm/archive/v1.0.0.tar.gz"
GVMTOOLS_URL="https://github.com/greenbone/gvm-tools/archive/v2.0.0.tar.gz"
OPENVASSMB_URL="https://github.com/greenbone/openvas-smb/archive/v1.0.5.tar.gz"
# --------------------------------------------------------------------- #
# check if we run Debian 10
# --------------------------------------------------------------------- #
OSVERSION=`cat /etc/debian_version`
if [[ $OSVERSION =~ .*'10.'.* ]]; then
echo "Good you are running Debian 10"
else
echo "ERROR: You are not running Debian 10"
echo "ERROR: Unsupported system, stopping now"
echo "^^^^^^^^^^ SCRIPT ABORTED ^^^^^^^^^^"
exit 1
fi
# --------------------------------------------------------------------- #
mkdir /tmp/gvm11
| true |
abb1b7bb13b5c40f92ed1f78934cb572031079d5 | Shell | KaOSx/apps | /pim-sieve-editor/PKGBUILD | UTF-8 | 1,046 | 2.578125 | 3 | [] | no_license |
# include global config
source ../_buildscripts/${current_repo}-${_arch}-cfg.conf
pkgname=pim-sieve-editor
pkgver=${_kdever}
pkgrel=1
pkgdesc="Application to assist with editing IMAP Sieve filters."
arch=('x86_64')
url='https://github.com/KDE/pim-sieve-editor'
license=('LGPL')
depends=('kdbusaddons' 'kcrash' 'kbookmarks' 'kiconthemes' 'kio'
'kpimtextedit' 'kmailtransport' 'messagelib' 'pimcommon' 'libksieve' 'kuserfeedback')
makedepends=('extra-cmake-modules' 'kdoctools' 'boost')
groups=('kdepim')
source=($_mirror/${pkgname}-${_kdever}.tar.xz)
md5sums=(`grep ${pkgname}-${_kdever}.tar.xz ../kde-sc.md5 | cut -d" " -f1`)
build() {
mkdir -p build
cd build
cmake ../${pkgname}-${pkgver} \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_LIBDIR=lib \
-DKDE_INSTALL_USE_QT_SYS_PATHS=ON \
-DKDE_INSTALL_QMLDIR=/usr/lib/qt5/qml \
-DKDE_INSTALL_PLUGINDIR=/usr/lib/qt5/plugins \
-DBUILD_TESTING=OFF
make
}
package() {
cd build
make DESTDIR=${pkgdir} install
}
| true |
8d24c09ae75dc8664667a0a61145bed82cd6ec43 | Shell | Hyeramoon/MedicareQuality_SQL_Hive | /loading_and_modelling/load_data_lake.sh | UTF-8 | 2,002 | 2.609375 | 3 | [] | no_license | #! /bin/bash
# Create a directory to load files
mkdir ~/tempdata
cd ~/tempdata
# Load zip file from CMS source and unzip files
wget https://data.medicare.gov/views/bg9k-emty/files/Nqcy71p9Ss2RSBWDmP77H1DQXcyacr2khotGbDHHW_s?content_type=application%2Fzip%3B%20charset%3Dbinary&filename=Hospital_Revised_Flatfiles.zip
mv "Nqcy71p9Ss2RSBWDmP77H1DQXcyacr2khotGbDHHW_s?content_type=application%2Fzip; charset=binary" hospital.zip
unzip hospital.zip
# Rename files
mv "Hospital General Information.csv" "hospitals.csv"
mv "Timely and Effective Care - Hospital.csv" "effective_care.csv"
mv "Readmissions and Deaths - Hospital.csv" "readmissions.csv"
mv "Measure Dates.csv" "Measures.csv"
mv "hvbp_hcahps_05_28_2015.csv" "surveys_responses.csv"
# Remove first line (header) in each files
tail -n +2 "hospitals.csv" > "hospitals.tmp" && mv "hospitals.tmp" "hospitals.csv"
tail -n +2 "effective_care.csv" > "effective_care.tmp" && mv "effective_care.tmp" "effective_care.csv"
tail -n +2 "readmissions.csv" > "readmissions.tmp" && mv "readmissions.tmp" "readmissions.csv"
tail -n +2 "Measures.csv" > "Measures.tmp" && mv "Measures.tmp" "Measures.csv"
tail -n +2 "surveys_responses.csv" > "surveys_responses.tmp" && mv "surveys_responses.tmp" "surveys_responses.csv"
# Create new directory "hospital_compare" and subdirectories to load files in hdfs
hdfs dfs -mkdir /user/w205/hospital_compare
hdfs dfs -mkdir /user/w205/hospital_compare/hospitals
hdfs dfs -put hospitals.csv /user/w205/hospital_compare/hospitals
hdfs dfs -mkdir /user/w205/hospital_compare/care
hdfs dfs -put effective_care.csv /user/w205/hospital_compare/care
hdfs dfs -mkdir /user/w205/hospital_compare/readmissions
hdfs dfs -put readmissions.csv /user/w205/hospital_compare/readmissions
hdfs dfs -mkdir /user/w205/hospital_compare/measures
hdfs dfs -put Measures.csv /user/w205/hospital_compare/measures
hdfs dfs -mkdir /user/w205/hospital_compare/surveys
hdfs dfs -put surveys_responses.csv /user/w205/hospital_compare/surveys
| true |
a8a663313d927204d6e092dc8b149231c1ab2398 | Shell | mkucenski/misc | /zsh_custom.zsh | UTF-8 | 533 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | export PATH="$HOME/Scripts:$PATH"
export PATH="$HOME/Development/opt/bin:$HOME/Development/opt/sbin:$PATH"
export MANPATH="$HOME/Development/opt/share/man:$MANPATH"
UNAME="$(uname)"
if [[ "$UNAME" == "FreeBSD" ]]; then
eval $(ssh-agent > /dev/null)
elif [[ "$UNAME" == "Linux" ]]; then
echo "Linux!"
elif [[ "$UNAME" == "Darwin" ]]; then
export PATH=/opt/local/bin:$PATH
else
echo "Unknown Operating System!"
fi
if [[ -e $(which fortune) ]]; then echo; fortune -a "$HOME/Development/MyGitHub/misc/fortunes/"; echo; fi
| true |
b40539f6afb7b994b3cf1c912c3d55673c84f0fe | Shell | ITLinuxCL/ansible-zimbra | /templates/zimbra.fact.j2 | UTF-8 | 399 | 3.03125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
ZIMBRA_PROVISIONED=false
if [ -f "/opt/zimbra/bin/zmprov" ]; then
/opt/zimbra/bin/zmprov -l gs `/opt/zimbra/bin/zmhostname` 1>/dev/null 2>&1
if [ $? -eq 0 ]; then
ZIMBRA_PROVISIONED=true
fi
fi
ZIMBRA_VERSION=`rpm -q --queryformat '%{VERSION}' zimbra-core | cut -d"_" -f1` 2>/dev/null
cat <<EOF
{
"version": "$ZIMBRA_VERSION",
"provisioned": $ZIMBRA_PROVISIONED
}
EOF | true |
133598321882e05bb7ffa624900d558ae1f3fb1e | Shell | petronny/aur3-mirror | /gsh/PKGBUILD | UTF-8 | 422 | 2.515625 | 3 | [] | no_license | # Contributor: Alexander Tsamutali <astsmtl@yandex.ru>
pkgname=gsh
pkgver=0.3
pkgrel=1
pkgdesc="A tool to aggregate several remote shells into one."
arch=('i686' 'x86_64')
url="http://guichaz.free.fr/gsh/"
license=('GPL')
depends=('python')
source=("http://guichaz.free.fr/gsh/files/$pkgname-$pkgver.tar.bz2")
md5sums=('e15fbc254f9e8bcb4eb44071d846de98')
build() {
cd "$srcdir/$pkgname-$pkgver"
python setup.py install --root="$pkgdir"
}
| true |
82c7605964fa1c2cf3a86d237a9faa66bf9060ed | Shell | MVCionOld/tp-seminars | /seminar-01/bash-scripting/task-6.sh | UTF-8 | 119 | 3.375 | 3 | [] | no_license | #!/bin/bash
FILES=$(ls $1)
ITERATOR=0
for FILE in $FILES
do
echo "$ITERATOR) $FILE"
ITERATOR=$((1 + $ITERATOR))
done
| true |
7a51fac25231f0213d241de7fdad27a23e08cf4e | Shell | molleweide/dorothy | /commands/is-user-in-group | UTF-8 | 440 | 3.375 | 3 | [
"LicenseRef-scancode-public-domain",
"Unlicense"
] | permissive | #!/usr/bin/env bash
source "$DOROTHY/sources/strict.bash"
# confirm the groups is available
# vscode flatpack doesn't have it, so vscode flatpack terminal will fail
# roups: command not found
if command-missing groups; then
exit 2
fi
# prepare
group="$1"
user="${2-}"
# grep -v inverts
# if is to workaround: groups: ‘’: no such user
if test -n "$user"; then
groups "$user" | grep -vq "$group"
else
groups | grep -vq "$group"
fi
| true |
b8fab2e926b2868dba3a0a6e7bc25d691bb6418f | Shell | AcYut/segway-lite | /util/dotml-1.2/generate-svg-graphics.bash | UTF-8 | 4,111 | 4.0625 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
#
# file: generate-svg-graphics.bash
# author: Martin Loetzsch
#
# Generates for each DotML "graph" element in a XML document a SVG graphic and a
# CSS style sheet containing the width and the height of the graphic.
#
# First, the XSL style sheet dotml2dot.xsl is applied to the source XML file.
# It generates for each graph a notation that is understood by the 'dot' tool.
# Then, the dot tool generates the SVG graphics from the output of the XSLT
# transformation. The file names for the .svg files are taken from the "file-name"
# attribute of the DotML "graph" element in the source XML file.
#
# As at the moment most of the HTML browsers can't scale SVG graphics automatically,
# a CSS file that only contains the width and the height for each .svg file is also
# generated.
#
# Usage: generate-svg-graphics.bash SOURCE_XML_FILE OUTPUT_DIR
# SOURCE_XML_FILE can contain one or more elements "graph" from the dotml namespace
# at any position. OUTPUT_DIR is the directory where the images are stored.
#
# The following environment variables have to be set:
# - "DOT": the executable Graphviz dot tool. If not defined, "dot" is used.
# - "DOTML_DIR": the directory of "dotml2dot.xsl"
# - "DOTML_XSLT": a parameterized call of a XSLT processor of your choice.
# Use the string "(INPUT)" for as a placeholder for the input XML file
# and the string "(XSL)" for the stylesheet.
# Examples:
# export DOTML_XSLT="Xalan -XD -Q -IN (INPUT) -XSL (XSL)"
# export DOTML_XSLT="MyXSLTProcessor (INPUT) (XSL)"
# Set the parameters of the XSLT processor such that the output is written to stdout
#
if test $1'x' = 'x'; then
echo "generate-svg-graphics.bash: Parameter 1 (input XML file) is missing."
else
if !(test -e $1); then
echo "generate-svg-graphics.bash: Input file $1 does not exist"
else
if test $2'x' = 'x'; then
echo "generate-svg-graphics.bash: Parameter 2 (output directory) is missing."
else
if !(test -d $2); then
echo "generate-svg-graphics.bash: Output directory $2 does not exist"
else
if !(test -e $DOTML_DIR/dotml2dot.xsl); then
echo "generate-svg-graphics.bash: Environment variable DOTML_DIR (path to DotML stylesheets) was not set correct. (Can't find dotml2dot.xsl there.)"
else
if test "$DOTML_XSLT"'x' = 'x'; then
echo "generate-svg-graphics.bash: Environment variable DOTML_XSLT (executable XSLT processor with parameters) was not set."
else
if test $DOT'x' = 'x'; then
export DOT=dot
fi
input=$(echo $1 | sed "s/\//\\\\\//g")
dotml_dir=$(echo $DOTML_DIR | sed "s/\//\\\\\//g")
output_dir=$(echo $2 | sed "s/\//\\\\\//g")
xslt=$(echo $DOTML_XSLT | sed "s/(INPUT)/$input/;s/(XSL)/$dotml_dir\/dotml2dot.xsl/;")
$xslt \
| sed -n \
"s/\"/\\\\\\\"/g; \
s/^[ ]*digraph/echo \"digraph/; \
s/<dot-filename>\([^<]*\)<\/dot-filename>/ \
\" > $output_dir\/\1.dot.new \; \
echo $output_dir\/\1.dot \; \
if ! test -f $output_dir\/\1.dot\; then echo new > $output_dir\/\1.dot\; fi\; \
diff -q $output_dir\/\1.dot $output_dir\/\1.dot.new >\& \/dev\/null \
|| \(echo $output_dir\/\1.svg \; \
\$DOT -Tsvg -o $output_dir\/\1.svg $output_dir\/\1.dot.new\; \
echo $output_dir\/\1.size.css \; \
mv $output_dir\/\1.dot.new $output_dir\/\1.dot\; \
echo .svg-size-\$(echo \1 | sed \"s\/.*\\\\\/\/\/\;\"\
){\$(cat $output_dir\/\1.svg \
| grep \"<svg\" \
| sed -n \"s\/.*<svg .*width=\\\\\"\\\\\(.*\\\\\)\\\\\".*height=\\\\\"\\\\\(.*\\\\\)\\\\\"\/\
width:\\\1\;height:\\\2\;\/\;p\;\" \;\
)} > $output_dir\/\1.size.css\)/;\
p;" \
| bash
fi
fi
fi
fi
fi
fi
| true |
0c88f01302fb2734ed0dcde25516366d467b1c6b | Shell | unclecannon/iot-link-ios | /.github/script/archive.sh | UTF-8 | 1,711 | 2.859375 | 3 | [] | no_license | #!/bin/sh
#bin/bsah - l
git branch
echo "本地branch"
git branch -r
echo "远程branch"
rb=$GIT_BRANCH_IMAGE_VERSION
rc=$(git rev-parse --short HEAD)
rtt=$(git describe --tags `git rev-list --tags --max-count=1`)
rt=${rtt#*v}
echo $rb
echo $rc
echo $rt
if [ $1 == 'Debug' ]; then
#开源版
sed -i "" "s/LinkAPP_VERSION.*/LinkAPP_VERSION = $rb+git.$rc/g" Source/LinkApp/Supporting\ Files/LinkAppCommon.xcconfig
else
#公版
sed -i "" "s/LinkAPP_VERSION.*/LinkAPP_VERSION = $rt/g" Source/LinkApp/Supporting\ Files/LinkAppCommon.xcconfig
sed -i "" "s/CFBundleName.*/CFBundleName = \"腾讯连连\";/g" Source/LinkApp/Supporting\ Files/Resource/zh-Hans.lproj/InfoPlist.strings
sed -i "" "s/CFBundleName.*/CFBundleName = \"TencentLink\";/g" Source/LinkApp/Supporting\ Files/Resource/en.lproj/InfoPlist.strings
fi
cat Source/LinkApp/Supporting\ Files/LinkAppCommon.xcconfig
#rm -rf Podfile.lock
#/usr/local/bin/pod install --verbose --no-repo-update
#sudo gem install cocoapods --pre
/usr/local/bin/pod --version
#/usr/local/bin/pod install --verbose
/usr/local/bin/pod update --verbose
BUILD_TYPE=$1
rm *.ipa
xcodebuild clean -workspace TIoTLinkKit.xcworkspace -scheme LinkApp -configuration Release
xcodebuild archive -workspace TIoTLinkKit.xcworkspace -scheme LinkApp -configuration Release -archivePath LinkApp.xcarchive -UseModernBuildSystem=NO
if [ $1 == 'Debug' ]; then
xcodebuild -exportArchive -archivePath LinkApp.xcarchive -exportOptionsPlist .github/script/ExportOptionsDevelop.plist -exportPath ./
else
xcodebuild -exportArchive -archivePath LinkApp.xcarchive -exportOptionsPlist .github/script/ExportOptionsRelease.plist -exportPath ./
fi
mv *.ipa LinkApp.ipa
| true |
b64cd9166406d5da5247b2053d6b0b7aca980823 | Shell | jrfferreira/dotfiles | /start.sh | UTF-8 | 1,518 | 3.359375 | 3 | [] | no_license | #!/bin/bash
set -e
GREEN='\e[32m'
NC='\e[0m' # No Color
confirm_and_install() {
echo -e -n "Install ${GREEN}$1${NC} [Y/n]? "
read choice
if [[ $choice =~ ^[Yy]$ ]]; then
source $2
fi
}
if [[ "$OSTYPE" == "linux-gnu" ]]; then
# optimizing and updating pacman
sudo pacman-optimize && sync
sudo pacman -Syu
setxkbmap -option 'ctrl:nocaps'
# essentials
sudo pacman --noconfirm -S make
sudo pacman --noconfirm -S gcc
sudo pacman --noconfirm -S jq
sudo pacman --noconfirm -S gconf
# ssh-agent
sudo pacman --noconfirm -S keychain
# common request libs
sudo pacman --noconfirm -S unixodbc
confirm_and_install "snapd" ./scripts/snapd.sh
elif [[ "$OSTYPE" == "darwin"* ]]; then
set +e
xcode-select --install
sudo xcodebuild -license accept
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
set -e
brew tap homebrew/cask
brew tap homebrew/cask-versions
brew install coreutils
fi
confirm_and_install "User apps, themes and fonts" ./scripts/user_apps.sh
#confirm_and_install "pkgfile" ./scripts/pkgfile.sh
confirm_and_install "docker" ./scripts/docker.sh
confirm_and_install "zsh" ./scripts/zsh.sh
#confirm_and_install "emacs" ./scripts/emacs.sh
if [[ "$OSTYPE" == "linux-gnu" ]]; then
confirm_and_install "i3wm" ./scripts/i3wm.sh
fi
confirm_and_install "languages (Go, Python, JS)" ./scripts/languages.sh
#confirm_and_install "nordvpn CLI" ./scripts/vpn.sh
| true |
724b70b6b528e258c7eec90ea87f68951184be60 | Shell | EllieZheng/et-mei | /bin/customizedscripts/homolumo.sh | UTF-8 | 719 | 3.25 | 3 | [] | no_license | #!/bin/bash
# to execute it: ./homolumo.sh c4h6_b_t_tda_field $lowlimit $uplimit $grid $n_homo $n_lumo
lowlimit="$2"
uplimit="$3"
grid="$4"
cc="$5"
virt="$6"
if [[ ! -z "$2" && ! -z "$3" && ! -z "$4" && ! -z "$5" && ! -z "$6" ]];then
awk 'BEGIN {printf("HOMO-LUMO transitions\n")}' > joint_$1_from_$2_to_$3.homolumo
echo "point calculation begins."
for(( i=$lowlimit; i<=$uplimit; i=i+$grid));
do
echo "\n\n========Field=$i========" >> joint_$1_from_$2_to_$3.homolumo
# awk -v name="$1" 'BEGIN {print name}' >> joint_$1_from_$2_to_$3.homolumo
grep -B 10 "cc. $5 a' --- Virt. $6 a'" $1_$i.out >> joint_$1_from_$2_to_$3.homolumo
echo "$1_$i done."
done
else
echo "Not enough suffix provided."
fi
| true |
92a1ba15f2d63093ef46c0ede91ec35b297b1643 | Shell | maxvalor/cazel | /scripts/cazel | UTF-8 | 1,149 | 3.734375 | 4 | [] | no_license | #!/bin/bash
export CAZEL_LIBS_PATH="/usr/lib/cazel/"
source $CAZEL_LIBS_PATH/commands/sync.sh
source $CAZEL_LIBS_PATH/commands/remove.sh
source $CAZEL_LIBS_PATH/commands/build.sh
source $CAZEL_LIBS_PATH/commands/clean.sh
source $CAZEL_LIBS_PATH/commands/exec.sh
function showCommands()
{
echo "Usage: cazel <command> <target>"
echo ""
echo "Available commands:"
echo " sync - Synchronize depends from server."
echo " build - Build target project by cmake."
echo " auto - execute sync and build command."
echo " exec - execute a binary file."
echo " clean - Clean obj files."
}
function commandComplete()
{
# to be implemented
return 0
}
function main()
{
logInit
local command=$1
shift
case $command in
"sync"):
commandCazelSync $@
;;
"build"):
commandCazelBuild $@
;;
"auto"):
commandCazelSync $@
commandCazelBuild $@
;;
"exec"):
commandCazelExec $@
;;
"clean"):
commandCazelClean $@
;;
"info"):
;;
"help"):
showCommands
;;
*):
echo "unkown command, use \"cazel help\" to get help."
;;
esac
}
main $@
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.