blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
abb06dda1b3a043adcb44f402c953ddf3c44eb5b
|
Shell
|
chlordk/PostgreSQL-DBA-tools
|
/dba_scripts/table_sizes.sh
|
UTF-8
| 1,752
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
# SHOWS TABLES AND SIZES FOR ALL TABLES
# SHOULD BE RUN AS SUPERUSER
PORT=""
USER=""
TBL=""
usage() {
echo "Usage: $0 -d <dbname> [-t <table> -p <port> -U <user>]"
exit 1
}
while getopts "d:p:t:U:u" OPT;
do case "${OPT}" in
d) DBNAME=$OPTARG
;;
p) PORT="-p $OPTARG"
;;
t) TBL="$OPTARG"
;;
U) USER="-U $OPTARG"
;;
u) usage
;;
[?]) usage
esac;
done
if [ "$DBNAME" = "" ]
then
usage
exit 1
fi
psql $PORT $USER $DBNAME <<_CODE_
SELECT n.nspname as schema,
c.relname as table,
a.rolname as owner,
d.oid as directory,
c.relfilenode as filename,
c.reltuples::integer,
pg_size_pretty(pg_relation_size( quote_ident( n.nspname ) || '.' || quote_ident( c.relname ) )) as size,
pg_size_pretty(pg_relation_size( quote_ident( n.nspname ) || '.' || quote_ident( c.relname ) )
+ pg_relation_size( i.indexrelid ) ) as total_size,
pg_relation_size( quote_ident( n.nspname ) || '.' || quote_ident( c.relname ) ) as size_bytes,
pg_total_relation_size( quote_ident( n.nspname ) || '.' || quote_ident( c.relname ) ) as total_size_bytes,
CASE WHEN c.reltablespace = 0
THEN 'pg_default'
ELSE (SELECT t.spcname
FROM pg_tablespace t WHERE (t.oid = c.reltablespace) )
END as tablespace
FROM pg_class c
JOIN pg_namespace n ON (n.oid = c.relnamespace)
JOIN pg_index i ON (i.indrelid = c.oid )
JOIN pg_authid a ON ( a.oid = c.relowner ),
pg_database d
WHERE d.datname = current_database()
AND relname NOT LIKE 'pg_%'
AND relname NOT LIKE 'information%'
AND relname NOT LIKE 'sql_%'
AND relname LIKE '%$TBL%'
AND relkind = 'r'
ORDER BY 9 DESC, 1, 2;
_CODE_
| true
|
d007238e47b79e7719f696cad32738ac30c8861e
|
Shell
|
rjmeats/AWS-Trials
|
/AWSTrials/CLI/misc/ssm.sh
|
UTF-8
| 417
| 2.71875
| 3
|
[] |
no_license
|
. ../aws_env_setup.sh
echo
echo $SHELL at $(date)
echo
echo "Describe parameters:"
echo
aws ssm describe-parameters
echo
echo "List Amazon Linux Parameter Store namespaces:"
echo
echo "for the default region"
echo
# NB Doesn't run properly when run direct from Bash - not clear why, puts a C:\..... prefix onto path !
powershell 'aws ssm get-parameters-by-path --path "/aws/service/ami-amazon-linux-latest"'
| true
|
2dab1ae5d83a54edcd1ebfc1cc66eba7b4e3eb4e
|
Shell
|
geos333/dotfiles
|
/.themes/Dark/panel
|
UTF-8
| 6,440
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/sh
FONT="-xos4-terminesspowerline-medium-r-normal--12-120-72-72-c-60-iso10646-1"
FONT2="Awesome:size=11"
ICONS="-xos4-terminusicons2mono-medium-r-normal--12-120-72-72-m-60-iso8859-1"
ICONS2="-wuncon-siji-medium-r-normal--10-100-75-75-c-80-iso10646-1"
BG="#1F201D"
source $(dirname $0)/panel_icons
if xdo id -a "$PANEL_WM_NAME" > /dev/null ; then
printf "%s\n" "The panel is already running." >&2
exit 1
fi
trap 'trap - TERM; kill 0' INT TERM QUIT EXIT
[ -e "$PANEL_FIFO" ] && rm "$PANEL_FIFO"
mkfifo "$PANEL_FIFO"
bspc config top_padding $PANEL_HEIGHT
#bspc subscribe report > "$PANEL_FIFO" &
bspc subscribe |\
grep -oE "[Mm][^TM]*[TML]" --line-buffered |\
while read line; do echo W$line; done > "$PANEL_FIFO" &
{
function window_icon {
if [[ "$1" == 'urxvt' ]]; then
echo -e "\uf120"
elif [[ "$1" == 'xterm' ]]; then
echo -e "\uf120"
elif [[ "$1" == 'chromium' ]]; then
echo -e "\uf268"
elif [[ "$1" == 'Chrome' ]]; then
echo -e "\uf268"
elif [[ "$1" == 'firefox' ]]; then
echo -e "\uf269"
elif [[ "$1" == 'desktop' ]]; then
echo -e "\uf108"
elif [[ "$1" == 'nautilus' ]]; then
echo -e "\uf07b"
elif [[ "$1" == 'atom' ]]; then
echo -e "\uf121"
else
echo -e "\uf17c"
fi
}
function timeday {
case $1 in
HOUR)
timed=$(date | grep -o "[0-9]*:[0-9]*")
hour=$(date "+%H")
min=$(date "+%M")
mdhr=$((10#$hour % 12))
mdhr=11
part="AM"
if [[ "$hour" > 11 ]]; then
part="PM"
fi
if [[ "$mdhr" == 0 ]]; then
mdhr=12
fi
echo -e "${CCLOCK} $hour:$min $part"
;;
DATE)
DATE=$(date +'%Y-%m-%d')
echo "$DATE"
esac
}
function focus () {
wnd_focus=$(xdotool getwindowfocus)
wnd_title=$(xprop -id $wnd_focus WM_CLASS | grep -Po "\".*?\"" | head -1 | grep -Po "[^\"]*" )
if [[ "$wnd_title" == '' ]]; then
wnd_title='Desktop'
elif [[ "$wnd_title" == 'google-chrome' ]]; then
wnd_title='Chrome'
elif [[ "$wnd_title" == 'google-chrome-unstable' ]]; then
wnd_title='Chrome'
elif [[ "$wnd_tittle" == "telegram-desktop" ]]; then
wnd_title='Telegram'
fi
#echo -e "${wnd_title}"
echo -e "$(window_icon $wnd_title) $wnd_title"
}
function Distro () {
echo "%{T2}${icon_arch}%{T1} ArchLinux"
}
function batery () {
status=$(acpi -b | awk '{print $3}' | cut -d ',' -f1)
batery=$(acpi -b | grep -P -o '[0-9]+(?=%)')
if [ $status == "Charging" ]; then
icon=$CAC
else
if [ $batery -gt 90 ]; then
icon=$BAT100
elif [ $batery -gt 70 ] && [ $batery -lt 90 ]; then
icon=$BAT70
elif [ $batery -gt 50 ] && [ $batery -lt 70 ]; then
icon=$BAT50
elif [ $batery -gt 30 ] && [ $batery -lt 50 ]; then
icon=$BAT30
elif [ $batery -gt 15 ] && [ $batery -lt 30 ]; then
icon=$BAT15
elif [ $batery -lt 7 ]; then
icon=$BAT7
fi
fi
echo -e "${icon} ${batery}%"
}
function cpu () {
case $1 in
MEM)
mem=$(free -m | grep Mem: | awk '{printf $3 "/" $2 "Mb"}')
echo -e "${CCPU} ${mem}"
;;
TEMP)
temp=$(echo "scale=1; " `cat /sys/class/thermal/thermal_zone0/temp` "/1000" | bc)
echo "${temp}°C"
;;
esac
}
function drives () {
DISK=$( df -h /home | tail -1 | grep 'Filesystem\|/home*' | awk '{printf $4 "/" $2}')
echo -e "%{T2}${icon_home}%{T1} ${DISK} "
}
function net () {
local _GETIWL=$(iwgetid -r)
local _GETETH=$(ip a | grep "state UP" | awk '{ORS=""}{print $2}' | cut -d ':' -f 1)
local _status=${_GETIWL:-$_GETETH}
local _status2="${_status:-Down}"
echo -e "${CWIFI} ${_status2}"
}
function volume () {
VOLUME=$(pulseaudio-ctl full-status | awk '{split($0, array, " ")} END{print array[1]}')
MUTE=$(pulseaudio-ctl full-status | awk '{split($0, array, " ")} END{print array[2]}')
if [ "$MUTE" == "yes" ]; then
echo "Mute"
else
#echo -e $(pTextUnderline ${WHITE} ${BLUE} "${VOLUME}%")
echo -e "${CVOLUME} $VOLUME%"
fi
}
function music (){
local stat="$(mpc status | grep \# | awk '{print $1}')"
local artist=$(mpc -f %artist% current)
local musicname=$(mpc -f %title% current)
local cmd=""
if [ "${stat}" ] && [ "${stat}" = "[playing]" ] ; then
cmd="${artist:0:10} - ${musicname:0:15}"
elif [ "${stat}" ] && [ "${stat}" = "[paused]" ] ; then
cmd="Paused"
else
cmd=" No Sound"
fi
echo -e "${CSOUND} ${cmd}"
}
while :; do
echo "S %{F#917154} \
%{c} $(music) \
%{r} \
$(net) | $(cpu MEM) | $(drives) | $(volume) | $(batery) | $(Distro) | $(timeday HOUR)"
sleep .5
done
} > "$PANEL_FIFO" &
source $(dirname $0)/panel_colors
$(dirname $0)/panel_bar < "$PANEL_FIFO" | lemonbar \
-a 32 \
-n "$PANEL_WM_NAME" \
-g 1366x22+0+0 \
-f ${FONT} \
-f ${FONT2} \
-f ${ICONS} \
-f ${ICONS2} \
-F "$COLOR_DEFAULT_FG" \
-B "$COLOR_DEFAULT_BG" | sh | while read line; do eval "$line"; done &
wid=$(xdo id -a "$PANEL_WM_NAME")
tries_left=20
while [ -z "$wid" -a "$tries_left" -gt 0 ] ; do
sleep 0.05
wid=$(xdo id -a "$PANEL_WM_NAME")
tries_left=$((tries_left - 1))
done
[ -n "$wid" ] && xdo above -t "$(xdo id -N Bspwm -n root | sort | head -n 1)" "$wid"
wait
| true
|
86122c3c1bccb45ce71722d1f60fa3686511cdaf
|
Shell
|
neelimapp/napalm_custom_test
|
/napalm_custom_test/test_commit_confirm.sh
|
UTF-8
| 534
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
RETURN_CODE=0
PYTEST="/home/gituser/VENV/napalm_auto_test/bin/py.test"
# Exit on the first test failure and set RETURN_CODE = 1
echo "Starting tests...good luck:" \
&& echo \
&& echo "---- Commit Confirm Methods ----" \
&& echo "Cisco IOS" \
&& $PYTEST -s -v test_napalm_cfg.py::test_commit_confirm --test_device ios
&& $PYTEST -s -v test_napalm_cfg.py::test_commit_confirm_noconfirm --test_device ios
&& $PYTEST -s -v test_napalm_cfg.py::test_commit_confirm_revert --test_device ios
\
|| RETURN_CODE=1
exit $RETURN_CODE
| true
|
e113ad3cd372bc98754e86dc7e6e47a41ab4a05c
|
Shell
|
lianshufeng/docker
|
/native/install_k8s.sh
|
UTF-8
| 2,425
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh
# 暂只兼容 centos7 以上
#
#
#安装docker
install_docker(){
echo "install docker "
curl -fsSL https://raw.githubusercontent.com/lianshufeng/docker/master/native/install_docker_no_compose.sh | sh
}
#更新为docker-ce
update_docker(){
echo "update docker "
curl -fsSL https://raw.githubusercontent.com/lianshufeng/docker/master/native/update_docker.sh | sh
}
#安装k8s
install_k8s(){
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
# 阿里源
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# Set SELinux in permissive mode (effectively disabling it)
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
systemctl enable --now kubelet
}
#设置防火墙
set_Firewall(){
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
}
#系统配置
config_os(){
#禁用 swap
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
#防火墙
firewall-cmd --add-port=6443/tcp --permanent
firewall-cmd --add-port=2379-2380/tcp --permanent
firewall-cmd --add-port=10250/tcp --permanent
firewall-cmd --add-port=10251/tcp --permanen
firewall-cmd --add-port=10252/tcp --permanent
firewall-cmd --add-port=1025/tcp --permanent
firewall-cmd --add-port=30000-32767/tcp --permanent
firewall-cmd --add-port=8285/udp --permanent
firewall-cmd --add-port=8472/udp --permanent
firewall-cmd --reload
#禁用SELINUX:
setenforce 0
echo "SELINUX=disabled" > /etc/selinux/config
echo "SELINUXTYPE=targeted" >> /etc/selinux/config
}
#更新本地镜像
pull_image(){
#获取镜像列表
#kubeadm config images list
images=(
kube-apiserver:v1.15.2
kube-controller-manager:v1.15.2
kube-scheduler:v1.15.2
kube-proxy:v1.15.2
pause:3.1
etcd:3.3.10
coredns:1.3.1
)
for imageName in ${images[@]};do
docker pull gcr.azk8s.cn/google-containers/$imageName
docker tag gcr.azk8s.cn/google-containers/$imageName k8s.gcr.io/$imageName
docker rmi gcr.azk8s.cn/google-containers/$imageName
done
}
set_Firewall
config_os
install_docker
#update_docker
install_k8s
#pull_image
| true
|
b3cbdd163f19e4e80bf980a732424fa17aac981d
|
Shell
|
melonpan0130/Shell_Program
|
/03_setvar.sh
|
UTF-8
| 109
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
name="Eunju"
echo $name
fruits="apple"
echo "I like $fruits"
today=`date`
echo "Today is $today"
| true
|
6d86a36b40964507ea4a72c2725f7528465e8544
|
Shell
|
jitsi/jitsi-videobridge
|
/resources/graceful_shutdown.sh
|
UTF-8
| 3,467
| 4.28125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# 1. The script issues shutdown command to the bridge over REST API.
# If HTTP status code other than 200 is returned then it exits with 1.
# 2. If the code is ok then it checks if the bridge has exited.
# 3. If not then it polls bridge statistics until participant count drops to 0.
# 4. Gives some time for the bridge to shutdown. If it does not quit after that
# time then it kills the process. If the process was successfully killed 0 is
# returned and 1 otherwise.
#
# Arguments:
# "-p"(mandatory) the PID of jitsi Videobridge process
# "-h"("http://localhost:8080" by default) REST requests host URI part
# "-t"("25" by default) number of second we we for the bridge to shutdown
# gracefully after participant count drops to 0
# "-s"(disabled by default) enable silent mode - no info output
#
# NOTE: script depends on the tool jq, used to parse json
#
# Initialize arguments
hostUrl="http://localhost:8080"
timeout=25
verbose=1
# Parse arguments
OPTIND=1
while getopts "p:h:t:s" opt; do
case "$opt" in
p)
pid=$OPTARG
;;
h)
hostUrl=$OPTARG
;;
t)
timeout=$OPTARG
;;
s)
verbose=0
;;
esac
done
shift "$((OPTIND-1))"
# Try the pid file, if no pid was provided as an argument.
# for systemd we use different pid file in a subfolder
if [ "$pid" = "" ] ;then
if [ -f /var/run/jitsi-videobridge.pid ]; then
pid=`cat /var/run/jitsi-videobridge.pid`
else
pid=`cat /var/run/jitsi-videobridge/jitsi-videobridge.pid`
fi
fi
#Check if PID is a number
re='^[0-9]+$'
if ! [[ $pid =~ $re ]] ; then
echo "error: PID is not a number" >&2; exit 1
fi
# Returns local participant count by calling JVB REST statistics API and extracting
# participant count from JSON stats text returned.
function getParticipantCount {
# Total number of participants minus the remote (octo) participants
curl -s "$hostUrl/colibri/stats"| jq '.participants - .octo_endpoints'
}
# Prints info messages
function printInfo {
if [ "$verbose" == "1" ]
then
echo "$@"
fi
}
# Prints errors
function printError {
echo "$@" 1>&2
}
shutdownStatus=`curl -s -o /dev/null -H "Content-Type: application/json" -d '{ "graceful-shutdown": "true" }' -w "%{http_code}" "$hostUrl/colibri/shutdown"`
if [ "$shutdownStatus" == "200" ]
then
printInfo "Graceful shutdown started"
participantCount=`getParticipantCount`
while [[ $participantCount -gt 0 ]] ; do
printInfo "There are still $participantCount participants"
sleep 10
participantCount=`getParticipantCount`
done
sleep 5
if ps -p $pid > /dev/null 2>&1
then
printInfo "It is still running, lets give it $timeout seconds"
sleep $timeout
if ps -p $pid > /dev/null 2>&1
then
printError "Bridge did not exit after $timeout sec - killing $pid"
kill $pid
fi
fi
# check for 3 seconds if we managed to kill
for I in 1 2 3
do
if ps -p $pid > /dev/null 2>&1
then
sleep 1
fi
done
if ps -p $pid > /dev/null 2>&1
then
printError "Failed to kill $pid"
printError "Sending force kill to $pid"
kill -9 $pid
if ps -p $pid > /dev/null 2>&1
then
printError "Failed to force kill $pid, giving up."
exit 1
fi
fi
rm -f /var/run/jitsi-videobridge.pid
rm -f /var/run/jitsi-videobridge/jitsi-videobridge.pid
printInfo "Bridge shutdown OK"
exit 0
else
printError "Invalid HTTP status for shutdown request: $shutdownStatus"
exit 1
fi
| true
|
682f2ec27f869b27d2f60ff47cde1038b23740d0
|
Shell
|
KaOSx/main
|
/dconf/PKGBUILD
|
UTF-8
| 806
| 2.921875
| 3
|
[] |
no_license
|
pkgname=dconf
pkgver=0.40.0
_pkgver=0.40
pkgrel=8
pkgdesc="A low-level configuration system."
arch=('x86_64')
url="https://wiki.gnome.org/Projects/dconf"
license=('LGPL2.1')
depends=('glib2')
makedepends=('gobject-introspection' 'intltool' 'docbook-xsl' 'python3' 'meson' 'ninja')
source=("https://download.gnome.org/sources/dconf/${_pkgver}/${pkgname}-${pkgver}.tar.xz")
sha256sums=('cf7f22a4c9200421d8d3325c5c1b8b93a36843650c9f95d6451e20f0bcb24533')
prepare() {
cd ${pkgname}-${pkgver}
sed -e "s|subdir('tests')|#subdir('tests')|" -i meson.build
}
build() {
mkdir -p build
cd build
meson setup ../${pkgname}-${pkgver} \
--prefix=/usr \
--buildtype=release \
--libexecdir=/usr/lib/dconf \
-D vapi=false
ninja
}
package() {
cd build
DESTDIR=${pkgdir} ninja install
}
| true
|
e65e6ba1e7ceb085f9e5a0ba8bd00f426ed8213c
|
Shell
|
zimmicz/volby-cz-parser
|
/bin/extract_data.sh
|
UTF-8
| 1,005
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
CUR_DIR=$(dirname $0)
source ${CUR_DIR}/../etc/settings.env
# Input parameters
while [[ $# > 0 ]]
do
key="$1"
case $key in
-i|--input-file)
INPUT_FILE="$2"
shift
;;
-w|--work-dir)
WORK_DIR="$2"
shift
;;
*)
echo "Usage: `basename $0` --input-file|-i [input_file] --work-dir|-w [working_directory]"
exit 1
;;
esac
shift
done
# /Input parameters
if [[ -z $INPUT_FILE ]]
then
echo "Please specify --input-file"
exit 1
fi
if [[ -z $WORK_DIR ]]
then
echo "Please specify --work-dir"
exit 1
fi
mkdir ${WORK_DIR}
if [[ ! $? -eq 0 ]]
then
echo "${WORK_DIR} creation failed"
exit 1
fi
parallel -a ${INPUT_FILE} --colsep ' ' --gnu wget --quiet --no-check-certificate {1} -O ${WORK_DIR}/{2}.xml
if [[ -n $FOREIGN_RESULTS_URL ]]
then
wget --quiet --no-check-certificate -O ${WORK_DIR}/foreign.xml $FOREIGN_RESULTS_URL
fi
| true
|
8d19123b8012b44fc2b461724b83c80a7e67e90d
|
Shell
|
Allypost/bash-scripts
|
/choose-sound-profile-output
|
UTF-8
| 1,546
| 3.828125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
entries=()
message=''
function list-entries() {
pactl list cards |
grep -P '\t\toutput:' |
grep -Ev "input" |
grep -Ev '\Wavailable: no\W' |
cut -d':' -f2- |
awk -F ' Output ' '{print $1}' |
awk -F ': ' '{print $2 "\t" "[profile: " $1 "]"}'
}
function update_entries() {
SAVEIFS="$IFS" # Save current IFS
IFS=$'\n' # Change IFS to new line
entries="$(list-entries)"
entries=($entries)
IFS="$SAVEIFS" # Restore IFS
}
function display_rofi() {
for entry in "${entries[@]}"; do
echo -e "$entry"
done | rofi_config
}
function rofi_config() {
_message="Alt+a - Refresh $message"
message=''
rofi \
-i \
-theme slate-slim \
-no-custom \
-location 1 \
-theme-str '
window {
font: "Noto Sans 10";
padding: 0;
width: 100%;
}
listview {
lines: 5;
}
' \
-mesg "$_message" \
-p 'Sound profile' \
-dmenu \
-kb-custom-19 'Alt+a' \
$@
}
function get-profile() {
echo "$1" | sed --regexp-extended "s|.*?\t\[profile\: (.+?)\]$|\1|"
}
function set-sound-profile-to() {
CARD_ID="$(
pactl list short cards |
head -n1 |
cut -f1
)"
pactl set-card-profile "$CARD_ID" "output:$1"
}
update_entries
while true; do
chosen="$(display_rofi)"
exit_code="$?"
chosen="$(get-profile "$chosen")"
case "$exit_code" in
0)
set-sound-profile-to "$chosen"
exit
;;
28)
update_entries
message="| Synced"
;;
*)
exit 1
;;
esac
done
| true
|
157847c4fbfc8127c4f218865a2ec802974d51aa
|
Shell
|
18hb/dotfiles
|
/bashrc
|
UTF-8
| 336
| 2.796875
| 3
|
[] |
no_license
|
#if [ -f ~/dotfiles/bashrc ]; then
# . ~/dotfiles/bashrc
#fi
export LANG=ja_JP.UTF-8
alias ls='ls -G'
alias ll='ls -al'
alias vi='vim'
alias tmux='tmux -2'
#source /etc/bash_completion.d/git
if [ "$USER_RCFILE_LOADED" = "" ]; then
export USER_RCFILE_LOADED=1
else
export USER_RCFILE_LOADED=`expr $USER_RCFILE_LOADED + 1`
fi
| true
|
1a36c209e32563a7a8adbb64b8f276105b4d558e
|
Shell
|
Tubbz-alt/anarel-manage
|
/recipes/external/yaml-cpp/build.sh
|
UTF-8
| 369
| 2.65625
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash -x
echo "######## env ############"
env
echo "#########################"
# reset to master branch as of Feb 22, 2017, 738 commits to master,
# whereas 681 to 0.5.3 branch that still uses boost
git reset --hard bedb28fdb4fd52d97e02f6cb946cae631037089e
mkdir build
cd build
cmake -DBUILD_SHARED_LIBS=ON ..
make
cp -P libyaml-cpp* $PREFIX/lib/
cp -r ../include/yaml-cpp $PREFIX/include
| true
|
9c137a863f94fdd260012e10bd7dd19a85c5b5e1
|
Shell
|
mlangbehn/ursula
|
/bin/ursula
|
UTF-8
| 427
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
env=$1
playbook=$2
hosts=$1/hosts
ssh_config=$1/ssh_config
export ANSIBLE_SSH_ARGS="-o ControlMaster=auto -o ControlPath=~/.ssh/ursula-%l-%r@%h:%p -o ControlPersist=yes "
if [ -e $ssh_config ]; then
export ANSIBLE_SSH_ARGS="$ANSIBLE_SSH_ARGS -F $ssh_config"
fi
export ANSIBLE_NOCOWS=1
ansible-playbook \
--inventory-file $hosts \
--user root \
--module-path ./library \
--connection ssh \
$playbook
| true
|
954b038d227c51af299a372826e9be78941b77a3
|
Shell
|
enixdark/gitops-demo
|
/03_Install_Helm/01_install_helm.sh
|
UTF-8
| 533
| 2.609375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# 0. Istall minikube
#
# Follow the instructions at: https://kubernetes.io/docs/tasks/tools/install-minikube/
# 1. Start minikube
minikube start
# 2. Install the Helm Client
curl -LO https://git.io/get_helm.sh
chmod 700 get_helm.sh
./get_helm.sh
# 3. Install Tiller
kubectl -n kube-system create sa tiller
kubectl create clusterrolebinding tiller-cluster-rule \
--clusterrole=cluster-admin \
--serviceaccount=kube-system:tiller
helm init --skip-refresh --upgrade --service-account tiller --history-max 10
| true
|
a5f73866771557b83ca69a4d4a81717dee6bb2ec
|
Shell
|
RyanMcG/dotfiles
|
/bin/myredshift
|
UTF-8
| 249
| 2.765625
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
set -e
HOSTN=`hostname`
GAMMA=1
HI=6500
LO=5000
case $HOSTN in
uecf4bb44a7f953ee8483) # Amazon Ubuntu Laptop
;;
zenbook)
GAMMA="0.9:0.95:0.8"
HI=5200
LO=4000
;;
esac
redshift -m randr -g $GAMMA -l 38:-122 -t "$HI:$LO" $@
| true
|
70991319650ed94ab41431d7c141504f513b86d0
|
Shell
|
lafiosca/lard-example-crud-api
|
/setup/create-cognito-stacks.sh
|
UTF-8
| 1,004
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
# "set -e" makes it so if any step fails, the script aborts:
set -e
uuidgenPath=`which uuidgen` || ! echo 'uuidgen not found in path'
echo "Using ${uuidgenPath}"
cd "${BASH_SOURCE%/*}"
source ./definitions.sh
echo
echo "Creating Cognito resource stacks for ${ProjectName}"
echo
echo "1. Creating development Cognito resource stack ${DevCognitoStack}"
aws cloudformation deploy \
--template-file $CognitoTemplateFile \
--stack-name $DevCognitoStack \
--capabilities CAPABILITY_NAMED_IAM \
--parameter-overrides \
ParentStack=$DevStack \
UserPoolSmsExternalId=`uuidgen`
echo
echo "2. Creating production Cognito resource stack ${ProdCognitoStack}"
aws cloudformation deploy \
--template-file $CognitoTemplateFile \
--stack-name $ProdCognitoStack \
--capabilities CAPABILITY_NAMED_IAM \
--parameter-overrides \
ParentStack=$ProdStack \
UserPoolSmsExternalId=`uuidgen`
echo
echo "Done creating Cognito resource stacks for ${ProjectName}"
| true
|
b109f3be7f39f8ccb1f7e11c69f3b8b03578b38c
|
Shell
|
hsivonen/packed_simd
|
/ci/android-install-ndk.sh
|
UTF-8
| 735
| 2.59375
| 3
|
[
"MIT",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env sh
# Copyright 2016 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
set -ex
ANDROID_NDK_URL=https://dl.google.com/android/repository
ANDROID_NDK_ARCHIVE=android-ndk-r25b-linux.zip
curl -fO "$ANDROID_NDK_URL/$ANDROID_NDK_ARCHIVE"
unzip -q $ANDROID_NDK_ARCHIVE
rm $ANDROID_NDK_ARCHIVE
mv android-ndk-* ndk
rm -rf android-ndk-*
| true
|
81323356bd5efc3c2cf3941b5b858368be961e3c
|
Shell
|
platform-project/nojitsu
|
/bin/nojitsu
|
UTF-8
| 588
| 3
| 3
|
[] |
no_license
|
#!/bin/sh
# Project: nojitsu
# Description: System of nothingness
# Paradigm: Spirit of absence. No thought. No code.
# Version: 0.0
# Author: The Platform Authors <platform@entilda.com>
# Homepage: https://platform-project.github.io/nojitsu
#
nojitsu(){
echo "nojitsu: spirit of absence" > /dev/null
echo "nojitsu: spirit of nought" > /dev/null
echo "nojitsu: system of nothingness" > /dev/null
echo "nojitsu: no form" > /dev/null
echo "nojitsu: no code" > /dev/null
echo "nojitsu: no blame" > /dev/null
echo "nojitsu: no fault" > /dev/null
}
nojitsu
| true
|
1b42917384d8f4d754d44fe50e6f7758222ceb13
|
Shell
|
mgborges/varCalling
|
/exoma_23.sh
|
UTF-8
| 6,275
| 3
| 3
|
[] |
no_license
|
## Processing for exome data
## This uses only single-subject calls
## By Benilton Carvalho - Nov/14
## Altered by Murilo - Jun/16
BATCH=exoma23
THREADS=10
DATAPATH=/home/bioinf/exoma/exoma23/raw
export TARGETS=/home/benilton/exoma23/agilentV5UTR_Targets.list
export BAITS=/home/benilton/exoma23/agilentV5UTR_Targets.list
export REF=/home/bioinf/ref/Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/genome.fa
export PICARD="java -jar -Xmx20G /home/benilton/bin/picard-tools-1.137/picard.jar"
export FASTQC=/home/benilton/bin/FastQC/fastqc
export BWA="/home/benilton/bin/bwa-0.7.12/bwa"
export GATK="java -jar /home/benilton/bin/GATK/GenomeAnalysisTK.jar"
export KNOWN1="/home/benilton/gatkbundle/Mills_and_1000G_gold_standard.indels.hg19.sites.vcf"
export KNOWN2="/home/benilton/gatkbundle/1000G_phase1.indels.hg19.sites.vcf"
function picard_hsmetrics
{
# arg1: input BAM
# arg2: output dir
OUTFILE=${2}/$(basename ${1}).metrics
${PICARD} CalculateHsMetrics VERBOSITY=WARNING BAIT_INTERVALS=${BAITS} TARGET_INTERVALS=${TARGETS} INPUT=${1} OUTPUT=${OUTFILE} PER_TARGET_COVERAGE=${OUTFILE}.unit REFERENCE_SEQUENCE=${REF}
}
function run_fastqc
{
## arg 1: it's the sample name
OUTDIR=$3
${FASTQC} ${1} ${2} --outdir ${OUTDIR}
}
function bwa_align_md
{
## Arg 1: fq1
## Arg 2: fq2
## Arg 3: number of threads
## Arg 4: output dir
sn=$(basename ${1})
sn=${sn%_A*}
sn=${sn%_T*}
sn=${sn%_C*}
sn=${sn%_G*}
sn=${sn%_R*}
fcln=$(zcat ${1} | head -n 1 | cut -f3-4 -d:)
fcln=${fcln/\:/\.}
lane=$(zcat ${1} | head -n 1 | cut -f4 -d:)
bamout="${4}/${sn}.$lane.bam"
samout="${4}/${sn}.$lane.sam"
header="@RG\\tID:${fcln}\\tSM:${sn}\\tPL:ILLUMINA\\tLB:${sn}\\tCN:LaCTAD"
date >> ${4}/${sn}.date
${BWA} mem -M -t ${3} -R ${header} ${REF} ${1} ${2} > ${samout}
${PICARD} SortSam INPUT=${samout} OUTPUT=${bamout}.withdups.bam SO=coordinate
rm ${samout}
${PICARD} MarkDuplicates INPUT=${bamout}.withdups.bam OUTPUT=${bamout} METRICS_FILE=${bamout}.metrics
rm ${bamout}.withdups.bam
${PICARD} BuildBamIndex INPUT=${bamout}
${PICARD} ValidateSamFile INPUT=${bamout} OUTPUT=${bamout}.validation VALIDATE_INDEX=true
date >> ${4}/${sn}.date
}
function run_gatk {
## Arg 1: sample (this will be looked for at {sample}.bam
INPUTBAM="${1}"
INTERVALS="${INPUTBAM}.intervals"
REALNBAM="${INPUTBAM}.realn.bam"
RECALCSV="${INPUTBAM}.recal.csv"
RECALBAM="${INPUTBAM}.recal.bam"
VCF="results/$(basename ${INPUTBAM}).g.vcf"
${GATK} -T RealignerTargetCreator -R ${REF} -I ${INPUTBAM} -o ${INTERVALS} -L ${TARGETS} -ip 200
${GATK} -T IndelRealigner -R ${REF} -I ${INPUTBAM} -targetIntervals ${INTERVALS} -o ${REALNBAM} -compress 0 --disable_bam_indexing -L ${TARGETS} -ip 200
rm ${INPUTBAM}
${PICARD} BuildBamIndex INPUT=${REALNBAM}
${GATK} -T BaseRecalibrator -R ${REF} -I ${REALNBAM} -o ${RECALCSV} -knownSites ${KNOWN1} -knownSites ${KNOWN2} -L ${TARGETS} -ip 200
${GATK} -T PrintReads -R ${REF} -I ${REALNBAM} -BQSR ${RECALCSV} -o ${RECALBAM}
rm ${REALNBAM}
${PICARD} BuildBamIndex INPUT=${RECALBAM}
}
function run_gatk_MERGED {
## Arg 1: sample (this will be looked for at {sample}.bam
INPUTBAM="${1}"
INTERVALS="${INPUTBAM}.intervals"
REALNBAM="${INPUTBAM}.realn.bam"
VCF="results/$(basename ${INPUTBAM}).g.vcf"
${GATK} -T RealignerTargetCreator -R ${REF} -I ${INPUTBAM} -o ${INTERVALS} -L ${TARGETS} -ip 200
${GATK} -T IndelRealigner -R ${REF} -I ${INPUTBAM} -targetIntervals ${INTERVALS} -o ${REALNBAM} -compress 0 --disable_bam_indexing -L ${TARGETS} -ip 200 &&
rm ${INPUTBAM}
${PICARD} BuildBamIndex INPUT=${REALNBAM}
${GATK} -T HaplotypeCaller -R ${REF} -I ${REALNBAM} -o ${VCF} -L ${TARGETS} -ip 200 -ERC GVCF --variant_index_type LINEAR --variant_index_parameter 128000
gzip ${VCF}
}
function mergegz
{
f=$1
cd $f
filename=`ls *gz | awk -F "_R" '{print $1}' | uniq`
for file in $filename
do
zcat $file*R1* | gzip > ../$file.R1.fastq.gz
zcat $file*R2* | gzip > ../$file.R2.fastq.gz
done
cd ..
}
function findadapters
{
r1=$1
r2=`echo $r1 | sed 's/R1/R2/g' | sed 's/trim\_1/trim\_2/'`
filename=`echo $(basename ${r1}) | awk -F '_R' '{print $1}'`
AdapterRemoval --identify-adapters --file1 $r1 --file2 $r2 > $2/$filename.AdapterRemoval
}
function trimadapters
{
r1=$1
r2=`echo $r1 | sed 's/R1/R2/g'`
filename=`echo $(basename ${r1}) | awk -F '_R' '{print $1}'`
cd $2
~murilo/trim_galore_zip/trim_galore --gzip --length 100 --paired $r1 $r2 --path_to_cutadapt /home/murilo/cutadapt-1.10/bin/cutadapt
mv $filename"_R1_val_1.fq.gz" $filename"_R1_trim_1.fastq.gz"
mv $filename"_R2_val_2.fq.gz" $filename"_R2_trim_2.fastq.gz"
mv $filename*_trimming_report.txt ../qc
cd ..
}
function mergeBAM {
cd bams
sample=$1
bam=$sample.merged.bam
files=`ls /home/bioinf/exoma/exoma23*/bams/$sample*recal.bam | awk '{print "INPUT=" $1}' | uniq`
$PICARD MergeSamFiles $files OUTPUT=$bam 2> $bam.MergeSamFiles.log &&
${PICARD} BuildBamIndex INPUT=${bam}
filesToRemove=`echo $files | sed 's/INPUT=//g'`
rm $filesToRemove
cd ..
}
export -f picard_hsmetrics
export -f run_fastqc
export -f bwa_align_md
export -f run_gatk
export -f findadapters
export -f trimadapters
export -f mergeBAM
export -f run_gatk_MERGED
mkdir -p ${BATCH}/qc/aln ${BATCH}/bams ${BATCH}/results
cd ${BATCH}
FQ1=$(ls ${DATAPATH}/*R1*gz)
FQ2=$(ls ${DATAPATH}/*R2*gz)
parallel --xapply -j ${THREADS} run_fastqc ::: ${FQ1} ::: ${FQ2} ::: qc
parallel -j ${THREADS} findadapters ::: ${FQ1} ::: qc
THREADS=6
parallel -j ${THREADS} trimadapters ::: ${FQ1} ::: raw
FQ1=$(ls ${DATAPATH}/*R1*trim*gz)
FQ2=$(ls ${DATAPATH}/*R2*trim*gz)
THREADS=10
parallel --xapply -j ${THREADS} run_fastqc ::: ${FQ1} ::: ${FQ2} ::: qc
parallel -j ${THREADS} findadapters ::: ${FQ1} ::: qc
THREADS=7
parallel --xapply -j 2 bwa_align_md ::: ${FQ1} ::: ${FQ2} ::: ${THREADS} ::: bams
THREADS=10
parallel -j ${THREADS} picard_hsmetrics ::: $(ls bams/*bam) ::: qc/aln
parallel -j ${THREADS} run_gatk ::: $(ls bams/*bam)
| true
|
d186f2fe2567e999e8382381992d79a00b2f06b6
|
Shell
|
gzchenhj/config
|
/监控web目录/web_ng1.sh
|
UTF-8
| 288
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
md5=`md5sum -c /scripts/check_md5sum.db|grep -i false|wc -l`
f=`find /usr/local/nginx/html/ -type f |wc -l`
if [ $md5 -ne 0 ]
then
echo "md5sum -c /scripts/check_md5sum.db|grep -i false"
elif [ $f -ne 1849 ]
then
echo "webfile is change" :
else
echo "webfile is ok"
fi
| true
|
62359ce9c7da8a31fa308ecdd178a33d6d3d54ab
|
Shell
|
derekteay/docker-multi-runtime-container
|
/node_test.sh
|
UTF-8
| 1,094
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Script to test node.js runtime
echo "=============== Node.js =============="
echo "[*] Creating myapp directory..."
echo
# Create directory and cd to it
mkdir myapp
cd myapp
# Create the package.json file
echo "[*] Creating package.json file..."
echo
echo '{
"name": "myapp",
"version": "1.0.0",
"description": "",
"main": "app.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "",
"license": "ISC"
}' > package.json
# Install Express silently
echo "[*] Installing Express..."
echo
npm install express --save > "/dev/null" 2>&1
# Create a sample app.js file
echo "[*] Creating test app.js file..."
echo
echo "const express = require('express')" >> app.js
echo "const app = express()" >> app.js
echo "app.get('/', (req, res) => res.send('Hello World!'))" >> app.js
echo "app.listen(3000, () => console.log('Hello World! Example app listening on port 3000!'))" >> app.js
# Run app.js in the background, sleep to manke sure we get an output to the console
echo "[*] Running app.js..."
echo
node app.js &
sleep 3
echo
| true
|
4d34cdd8571ed8222b970b4b10c9a95a1acbf1f1
|
Shell
|
mhinkka/articles
|
/Assessing Big Data SQL Frameworks for Analyzing Event Logs/scripts-aws/test.sh
|
UTF-8
| 3,741
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
#Usage: test.sh <frameworks to test> <test names> <number of events> <run name> <number of repeats>
#Example: . test.sh "presto hive postgresql impala spark spark-caching" "flows" "100000 1000000 10000000 100000000" test 3
export HDFS_NAMENODE_URL=hdfs://10.0.207.57:8020
export IMPALA_DAEMON_ADDRESS=10.0.192.216:21000
export HIVE_DATABASE_URL=jdbc:hive2://localhost:10000
processTemplate()
{
templateFile=$1
outputFile=$2
rm -f $outputFile
sed < $templateFile > $outputFile \
"/\${TEST/ {
s:\${TEST_ROOT}:$TEST_ROOT:
s:\${TEST_FRAMEWORK_ROOT}:$PWD:
s:\${TEST_DATA_FILE}:$TEST_DATA_FILE:
s:\${TEST_DATA_HDFS_PATH}:$TEST_DATA_HDFS_PATH:
s:\${TEST_TEMP_RESULT_PATH}:$TEMP_FRAMEWORK_RESULT_PATH:
s:\${TEST_RESULT_PATH}:$RESULT_PATH/results:
}"
}
printfWithTime()
{
printf "$(date --rfc-3339=seconds) \t$@\n"
}
set -x
export TEST_FRAMEWORKS=$1
if [ -z "$1" ]; then
export TEST_FRAMEWORKS="postgresql"
fi
export ALL_TEST_NAMES=$2
if [ -z "$2" ]; then
export ALL_TEST_NAMES="flows"
fi
export ALL_NUM_EVENTS=$3
if [ -z "$3" ]; then
export ALL_NUM_EVENTS="100"
fi
export RUN_NAME=$4
if [ -z "$4" ]; then
export RUN_NAME="unnamed"
fi
export NUM_REPEATS=$5
if [ -z "$5" ]; then
export NUM_REPEATS=1
fi
for testName in $ALL_TEST_NAMES; do
export TEST_NAME=$testName
for numEvents in $ALL_NUM_EVENTS; do
export NUM_EVENTS=$numEvents
export ROOT_RESULT_PATH=$TEST_ROOT/results/$RUN_NAME-$TEST_NAME-$NUM_EVENTS
export MEASUREMENT_RESULTS_FILE=$ROOT_RESULT_PATH/measurements.txt
export TEMP_RESULT_PATH=/tmp/results
export TEST_DATA_FILE=$ROOT_RESULT_PATH/test.csv
rm -fr $ROOT_RESULT_PATH
mkdir -p $ROOT_RESULT_PATH
sudo rm -fr $TEMP_RESULT_PATH
mkdir -p $TEMP_RESULT_PATH
sudo chmod 777 $TEMP_RESULT_PATH
printfWithTime "Copying test data consisting of $NUM_EVENTS events."
head -$NUM_EVENTS $TEST_ROOT/testdata/test.csv > $TEST_DATA_FILE
touch $MEASUREMENT_RESULTS_FILE
sudo chmod a+w $MEASUREMENT_RESULTS_FILE
printfWithTime "Running test named: $testName" >> $MEASUREMENT_RESULTS_FILE
printfWithTime "Number of events: $numEvents\n" >> $MEASUREMENT_RESULTS_FILE
frameworkId=0
for framework in $TEST_FRAMEWORKS; do
frameworkId=$(($frameworkId + 1))
printfWithTime "Starting to test framework: $framework (id: $frameworkId) at $(date --rfc-3339=seconds)"
export RESULT_FRAMEWORK_DIRECTORY_NAME=$framework-$frameworkId
export TEMP_FRAMEWORK_RESULT_PATH=$TEMP_RESULT_PATH/$RESULT_FRAMEWORK_DIRECTORY_NAME
export RESULT_PATH=$ROOT_RESULT_PATH/$RESULT_FRAMEWORK_DIRECTORY_NAME
mkdir -p $RESULT_PATH
mkdir -p $TEMP_FRAMEWORK_RESULT_PATH
sudo chmod 777 $TEMP_FRAMEWORK_RESULT_PATH
printfWithTime "Starting testing framework: $framework (id: $frameworkId)" | tee -a $MEASUREMENT_RESULTS_FILE
case "$framework" in
hive)
(cd hive && . ./test.sh) 2>&1 | tee $RESULT_PATH/log.txt
;;
postgresql)
(cd postgresql && . ./test.sh) 2>&1 | tee $RESULT_PATH/log.txt
;;
spark)
(cd spark && . ./test.sh my.Tester) 2>&1 | tee $RESULT_PATH/log.txt
;;
spark-parquet)
(cd spark && . ./test.sh my.TesterParquet) 2>&1 | tee $RESULT_PATH/log.txt
;;
spark-caching)
(cd spark && . ./test.sh my.TesterCaching 1) 2>&1 | tee $RESULT_PATH/log.txt
;;
impala)
(cd impala && . ./test.sh) 2>&1 | tee $RESULT_PATH/log.txt
;;
presto)
(cd presto && . ./test.sh) 2>&1 | tee $RESULT_PATH/log.txt
;;
esac;
sudo rm -fr $TEMP_FRAMEWORK_RESULT_PATH
printfWithTime "Finished testing framework: $framework\n" >> $MEASUREMENT_RESULTS_FILE
done;
sudo rm -f $TEST_DATA_FILE
done;
done;
set +x
| true
|
e81ee1f63a201de950058b0766ff1f32fd976e11
|
Shell
|
david-driscoll/omnisharp-server-roslyn-binaries
|
/build.sh
|
UTF-8
| 1,272
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
echo update submodules...
git submodule update --init --recursive
git submodule foreach git pull origin master
pushd omnisharp-roslyn
./build.sh
popd
rm -rf lib/server
mkdir -p lib/server
cp -a omnisharp-roslyn/artifacts/build/omnisharp/* lib/server
curl -LO http://nuget.org/nuget.exe
mono nuget.exe install kre-clr-win-x86 -Prerelease -OutputDirectory lib/server/approot/packages
if [ ! -d "lib/server/approot/packages/kre-clr-win-x86.1.0.0-beta3" ]; then
echo 'ERROR: Can not find kre-clr-win-x86.1.0.0-beta3 in output exiting!'
exit 1
fi
if [ ! -d "lib/server/approot/packages/kre-mono.1.0.0-beta3" ]; then
echo 'ERROR: Can not find kre-mono.1.0.0-beta3 in output exiting!'
exit 1
fi
cp -f omnisharp.cmd.patch lib/server/omnisharp.cmd
cp -f omnisharp.patch lib/server/omnisharp
chmod +x lib/server/omnisharp
if ! type kvm > /dev/null 2>&1; then
curl -sSL https://raw.githubusercontent.com/aspnet/Home/release/kvminstall.sh | sh && source ~/.k/kvm/kvm.sh
fi
export KRE_FEED=https://www.nuget.org/api/v2
kvm install 1.0.0-beta3
kvm use 1.0.0-beta3
pushd src/OmniSharp.TypeScriptGeneration
kpm restore
k run ../../lib/server
popd
git commit -am "updated omnisharp server"
npm version patch -m "updating to %s"
npm publish
git push origin master
| true
|
8b435e27b1545b10f753914e84b6c7ce9e246021
|
Shell
|
c4pone/dotfiles
|
/scripts/install-docker-compose.sh
|
UTF-8
| 250
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh
if ! hash docker-compose 2>/dev/null; then
sudo curl -L https://github.com/docker/compose/releases/download/1.17.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
fi
| true
|
058f76726b8e972da903543c00eefa5effffb49d
|
Shell
|
jdeblese/slackware-s6
|
/etc/rc.d/rc.ntpd
|
UTF-8
| 1,364
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Start/stop/restart ntpd.
. /etc/s6-init/s6shfunc
# Start ntpd:
ntpd_start() {
echo -n "Starting NTP daemon..."
s6_start ntp
echo
# The kernel is now mocking around with the the hardware clock if
# ntpd is running, so if the hardware clock (wall clock) is set to
# 'localtime' execute hwclock --localtime --systohc to disable the
# 11 minute mode kernel function:
if [ -x /sbin/hwclock ]; then
# Check for a broken motherboard RTC clock (where ioports for rtc are
# unknown) to prevent hwclock causing a hang:
if ! grep -q -w rtc /proc/ioports ; then
CLOCK_OPT="--directisa"
fi
if ! grep -q "^UTC" /etc/hardwareclock 2> /dev/null ; then
echo "Saving system time to the hardware clock (localtime)."
/sbin/hwclock $CLOCK_OPT --localtime --systohc
fi
fi
}
# Stop ntpd:
ntpd_stop() {
echo -n "Stopping NTP daemon..."
s6_stop ntp
echo ""
}
# Restart ntpd:
ntpd_restart() {
# send TERM signal, s6 will automatically restart
s6_signal -t ntp
}
# Check if ntpd is running
ntpd_status() {
if [ -e /var/run/ntpd.pid ]; then
echo "ntpd is running."
else
echo "ntpd is stopped."
exit 1
fi
}
case "$1" in
'start')
ntpd_start
;;
'stop')
ntpd_stop
;;
'restart')
ntpd_restart
;;
'status')
ntpd_status
;;
*)
echo "usage $0 start|stop|restart|status"
esac
| true
|
27279c87b71e660fd7129f0816b3076a8fdf09b4
|
Shell
|
daemanos/bin
|
/forward
|
UTF-8
| 1,262
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
# forward: watch a directory for new files and send them to a remote server
#
# Usage: forward [-r] LOCAL_DIR ADDRESS [REMOTE_DIR]
# Arguments:
# LOCAL_DIR the local directory to watch
# ADDRESS the address of a server to forward to
# REMOTE_DIR the remote directory to forward to (same as LOCAL_DIR if
# not given)
#
# Options:
# -r watch directories recursively
if [ "$1" = "-r" ]; then
recurse="-r"
shift
fi
die() {
echo "$1" >&2
exit 1
}
case "$#" in
2)
remotedir="$1"
;;
3)
remotedir="$3"
;;
?)
die "usage: forward [-r] LOCAL_DIR ADDRESS [REMOTE_DIR]"
;;
esac
localdir="$1"
address="$2"
ssh -q "$address" exit
if [ $? -ne 0 ]; then
die "error: couldn't connect to host '$address'"
fi
if [ ! -d "$localdir" ]; then
die "error: no such local directory: $localdir"
fi
ssh "$address" test -d "$remotedir"
if [ $? -ne 0 ]; then
die "error: no such remote directory: $remotedir"
fi
inotifywait $recurse -m -e moved_to -e close_write "$localdir" --format "%w%f" 2>/dev/null | while read line; do
fn="${line##$localdir/}"
scp "'$(printf "%q" "$localdir/$fn")'" "$address:'$remotedir/'"
done
| true
|
6e49345a8f03af4a9ea7ed2f3a6ba70972383417
|
Shell
|
jlucktay/dotfiles-chezmoi
|
/bin/executable_get-path.sh
|
UTF-8
| 638
| 3.671875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
shopt -s globstar nullglob
IFS=$'\n\t'
background_white="$(tput setab 7)"
foreground_black="$(tput setaf 0)"
reset_colours="$(tput sgr0)"
echo "$background_white${foreground_black}PATH unsorted:$reset_colours"
while IFS=':' read -ra split_path; do
# Split off sorted $PATH array for later output
IFS=$'\n' mapfile -t sorted_path < <(sort -f <<< "${split_path[*]}")
# Show unsorted $PATH now
for i in "${split_path[@]}"; do
echo "$i"
done
done <<< "$PATH"
echo
echo "$background_white${foreground_black}PATH sorted:$reset_colours"
for i in "${sorted_path[@]}"; do
echo "$i"
done
| true
|
b08d44b8d1f664661a80586f9d1621be5db85234
|
Shell
|
dtxbcsy/daodao_step1
|
/run_one.sh
|
UTF-8
| 543
| 2.546875
| 3
|
[] |
no_license
|
DATAPATH=/search/zf/bh1/data
city=`echo $1 | python url_encode.py`
url="http://www.daodao.com/Search?q=$city"
#COOKIE_DATA=cookie
#wget --load-cookies=$COOKIE_DATA --save-cookies=$COOKIE_DATA --keep-session-cookies $url -O $DATAPATH/$city -U "Chrome/32.0.1700.102 Safari/537.36"
sh /search/zf/proxy/wget.sh $url $DATAPATH/$city
python get_jingdian.py $DATAPATH/$city $city > $DATAPATH/$city.jingdian
python get_jiudian.py $DATAPATH/$city $city > $DATAPATH/$city.jiudian
python get_canguan.py $DATAPATH/$city $city > $DATAPATH/$city.canguan
| true
|
eaaa5ee32331b51e41a6985ceefaaefbc7b185f7
|
Shell
|
zhengjia/dotfiles
|
/bash_profile
|
UTF-8
| 1,127
| 3.359375
| 3
|
[] |
no_license
|
shopt -s histappend
rake() {
if [[ -e ./Gemfile ]] && which bundle; then
#echo "using bundle exec"
bundle exec rake "$@"
else
#echo "not using bundle exec"
command rake "$@"
fi
}
# cd into current directory in finder
function ff { osascript -e 'tell application "Finder"'\
-e "if (${1-1} <= (count Finder windows)) then"\
-e "get POSIX path of (target of window ${1-1} as alias)"\
-e 'else' -e 'get POSIX path of (desktop as alias)'\
-e 'end if' -e 'end tell'; };\
function cdff { cd "`ff $@`"; };
if [[ -s /Users/zjia/.rvm/scripts/rvm ]] ; then source /Users/zjia/.rvm/scripts/rvm ; fi
# in 10.8 bashrc isn't sourced
[[ -r ~/.bashrc ]] && source ~/.bashrc
[[ -r ~/.bash_env ]] && source ~/.bash_env
[[ -r ~/.bash_alias ]] && source ~/.bash_alias
if [ -f $(brew --prefix)/etc/bash_completion ]; then
. $(brew --prefix)/etc/bash_completion
fi
ulimit -n 65536
ulimit -u 2048
# The next line updates PATH for the Google Cloud SDK.
source '/Users/zjia/google-cloud-sdk/path.bash.inc'
# The next line enables bash completion for gcloud.
source '/Users/zjia/google-cloud-sdk/completion.bash.inc'
| true
|
2ab356b4b8c01379158d2b669766fb8d8c92c596
|
Shell
|
djtran/dotfiles
|
/polybar/.config/polybar/music.sh
|
UTF-8
| 184
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ "$(playerctl status)" = "Playing" ]]
then
echo -n " : $(playerctl metadata xesam:artist;) - $(playerctl metadata xesam:title)"
else
echo -n "";
fi
| true
|
9f2fa649087a549d6784dff3628888f4dbcf910f
|
Shell
|
tzmartin/dotfiles
|
/bootstrap.sh
|
UTF-8
| 396
| 3.0625
| 3
|
[] |
no_license
|
# Load ~/.extra, ~/.bash_prompt, ~/.exports, ~/.aliases and ~/.functions
# ~/.extra can be used for settings you don’t want to commit
for file in ~/dotfiles/.{extra,bash_prompt,exports,aliases,functions}; do
[ -r "$file" ] && source "$file"
#echo "Installing ... $file"
done
unset file
# init z https://github.com/rupa/z
. ~/dotfiles/z/z.sh
function precmd () {
_z --add "$(pwd -P)"
}
| true
|
262ad2946a153006613341d05522f2f4deb88f80
|
Shell
|
aaardalannn/Performance
|
/runMe.sh
|
UTF-8
| 673
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
#https://stackoverflow.com/questions/31328300/python-logging-module-logging-timestamp-to-include-microsecond/31335874
rm -vf logGenerator.log nBytes.txt fluent-bit_output.log foo.pdf nBytes_fb.txt fluent-bit_output.log
touch logGenerator.log
touch fluent-bit_output.log
python log_generator.py --logFile 'logGenerator.log' --iterations 10000000 &
tail -f logGenerator.log | pv > /dev/null
# end=$((SECONDS+600))
# while [ $SECONDS -lt $end ]; do
# # Do what you want.
# # sleep 0.1
# wc -l < logGenerator.log >> nBytes.txt
# wc -l < fluent-bit_output.log >> nBytes_fb.txt
# # echo HI
# done
# python plot_2.py
# evince foo.pdf
# # fg
| true
|
c5f20c22e1775745216b74f9579c5aa3f0206677
|
Shell
|
MonkeyDHero/Learning-IO-Access-Patterns-to-improve-prefetching-in-SSDs-
|
/Data Download Scripts/download_all_subtraces_of_388.sh
|
UTF-8
| 1,823
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# This script can only be run from the computer on which it was
# originally downloaded.
#
# This auto-generated script was downloaded from http://iotta.snia.org.
# It will download the 2 subtraces of MSR Cambridge Traces
#
echo "Downloading the 2 subtraces of MSR Cambridge Traces" 1>&2
cookies=$(mktemp)
cat >> $cookies << 'EOF'
# Netscape HTTP Cookie File
# http://curl.haxx.se/rfc/cookie_spec.html
# This file was generated by iotta.snia.org! Edit at your own risk.
.iotta.snia.org TRUE / FALSE 0 infodigest 1be17021d643167953a68a72a7b2282d8e11b4a5
.iotta.snia.org TRUE / FALSE 0 legal true
.iotta.snia.org TRUE / FALSE 0 id 628743
EOF
if which wget >/dev/null 2>&1; then
useWGET=true
elif which curl >/dev/null 2>&1; then
useCURL=true
else
echo "Couldn't find either wget or curl. Please install one of them" 1>&2
exit 1
fi
checkForError() {
delete=false
if $useWGET && (( $1 == 2 || $1 == 3 || $1 == 5 || $1 == 7 || $1 == 8 ))
then
delete=true
elif $useWGET && [ ! -s "$2" ]
then
delete=true
elif $useCURL && (( $1 == 22 || $1 == 36 ))
then
delete=true
fi
}
downloadFile() {
file=$1
id=$2
url="http://server3.iotta.snia.org/traces/$id/download?type=file&sType="
if $useWGET; then
wget -q --load-cookies=$cookies -O "$file" -c "$url""wget"
elif $useCURL; then
curl -s -f -b $cookies -o "$file" -C - -L "$url""curl"
fi
if [ $? -eq 0 ]; then
echo "Finished Downloading $file"
else
checkForError $? "$file"
if $delete; then
echo "There was an error downloading the file ($file)"
rm -f "$file"
else
echo "$file was partially downloaded"
fi
echo "Stopping..."
exit 1
fi
}
downloadFile "msr-cambridge1.tar" 386
downloadFile "msr-cambridge2.tar" 387
echo "Finished All Downloads"
rm -f $cookies
| true
|
46b8f47bc3f3c280f5dd99100ab1ce6883a3ea59
|
Shell
|
nicosommi/dotfiles
|
/install-cp.sh
|
UTF-8
| 632
| 2.625
| 3
|
[] |
no_license
|
printf "${BLUE}Installing nico's dotfiles...${NORMAL}\n"
cp ~/dotfiles/zsh/.zshrc ~/.zshrc
cp ~/dotfiles/vim/.vimrc ~/.vimrc
cp ~/dotfiles/tmux/.tmux.conf ~/.tmux.conf
# Git hooks templates (ctags)
git config --global init.templatedir '~/.git_template'
mkdir -p ~/.git_template
cp -R git_templates/* ~/.git_template
cp ~/dotfiles/git_templates/.gitconfig ~/.gitconfig
# Enables git ctags command
git config --global alias.ctags '!.git/hooks/ctags'
cp ~/dotfiles/.gitconfig ~/.gitconfig
printf "${BLUE}Configuring fish${NORMAL}\n"
mkdir -p ~/.config
cp -R ~/dotfiles/fish/. ~/.config/fish
printf "${BLUE}Finished.${NORMAL}\n"
| true
|
9cfd6984f1001459f860a033f6f8aa4b89103b59
|
Shell
|
OpenSH4/qboxhd
|
/src/initramfs/etc/init.d/loop_images.sh
|
UTF-8
| 566
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/sh
#
# @brief Display forever up to 3 images using <program> waiting <sleep>
# seconds between each one.
# Usage: images_loop.sh <sleep> <program> <image0> <image1> [image2]
#
# Copyright (c) 2010 Duolabs Spa
#
# Author: Pedro Aguilar (pedro@duolabs.com)
#
if [ "$1" == "" -o "$2" == "" -o "$3" == "" -o "$4" == "" ]; then
exit 1
fi
SLEEP=$1
PROG=$2
IMG_A=$3
IMG_B=$4
IMG_C=$5
while [ "1" ]; do
$PROG /etc/images/$IMG_A
sleep $SLEEP
$PROG /etc/images/$IMG_B
sleep $SLEEP
if [ "$IMG_C" != "" ]; then
$PROG /etc/images/$IMG_C
sleep $SLEEP
fi
done
| true
|
b26feeeebd993e4abb96e7fc371ee656a5e2a853
|
Shell
|
ronin13/Scripts
|
/getimap
|
UTF-8
| 654
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
#exit
if ! mount | grep -q wormole;then
echo "Booh.... "
exit
fi
if ps auxww | grep -q getmail | grep -v grep;then
exit
fi
echo "#########################`date`####################" >> ~/logs/getmail.log
left=$(/bin/df -h | grep home | head -1 | awk '{ print $5 }' | tr -d '%')
if [[ $left > 90 ]];then
echo "Low on space -- quitting" >> ~/logs/getmail.log
exit 1
fi
notify-send "Mailman" "....Fetching mails."
setlock -X -n /tmp/locks/getmail getmail -n -q --rcfile=getmailrc.gmail --rcfile=getmailrc.yahoo --rcfile=getmailrc.wnohang --rcfile=getmailrc.gmail2
[[ $? == 0 ]] && notify-send "Mailman" ".. Done "
| true
|
5bf280372e9c31b4ccd6def851972109b5fc39f7
|
Shell
|
vignestraining/Assignment
|
/Assignment -2/Elasticsearch.sh
|
UTF-8
| 1,114
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
#pulling elasticsearch image
sudo docker pull docker.elastic.co/elasticsearch/elasticsearch:7.9.1
# Vignes.txt file contains containerid that can be used for health check
sudo docker run -itd -p 9200:9200 -p 9300:9300 --name elasticsearch -e "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:7.9.1 /bin/bash > Vignes.txt
echo "Successfully created container: `tail -1 Vignes.txt`"
#Health check of the created container
cat Vignes.txt | while read CONTAINERID
do
echo "ContainerID : $CONTAINERID"
echo "running Status:"
sudo docker inspect --format="{{.State.Running}}" $CONTAINERID 2> /dev/null
echo "Started at:"
sudo docker inspect --format="{{.State.StartedAt}}" $CONTAINERID
echo "PID:"
sudo docker inspect --format="{{.State.Pid}}" $CONTAINERID
echo "Paused Status:"
sudo docker inspect --format="{{.State.Paused}}" $CONTAINERID
echo "restarting Status:"
sudo docker inspect --format="{{.State.Restarting}}" $CONTAINERID
echo "Network IP:"
sudo docker inspect --format="{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}" $CONTAINERID
done
| true
|
7481d15820224a2aa27a615e53073ed7fb80a88a
|
Shell
|
marcingorecki/RPiCam
|
/etc/init.d/cam_master
|
UTF-8
| 797
| 3
| 3
|
[] |
no_license
|
#! /bin/sh
### BEGIN INIT INFO
# Provides: skeleton
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Example initscript
# Description: This file should be used to construct scripts to be
# placed in /etc/init.d.
### END INIT INFO
# Author: Foo Bar <foobar@baz.org>
#
# Please remove the "Author" lines above and replace them
# with your own name if you copy and modify this script.
# Do NOT "set -e"
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/bin:/usr/bin
DESC="Description of the service"
NAME=cam_master
PIDFILE=/var/run/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
cd /home/pi/cammg/RPiCam
python server.py &
| true
|
a09524e1c9297e72df8d8b38e2d0e0de76998a56
|
Shell
|
shun-getto-systems/docker-wrapper-commands
|
/bin/logs
|
UTF-8
| 402
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
docker_wrapper_logs(){
local line=$1; shift
local target=$DOCKER_WRAPPER_LOGS_TARGET
if [ -z "$line" ]; then
line=-100
fi
if [ -z "$target" ]; then
target=$DOCKER_WRAPPER_APP_TARGET
fi
if [ -z "$target" ]; then
echo '$DOCKER_WRAPPER_LOGS_TARGET or $DOCKER_WRAPPER_APP_TARGET is empty'
else
"$target" logs | tail "$line"
fi
}
docker_wrapper_logs "$@"
| true
|
c168b0fb3b5cef12f30da34b62d322ffbe4847a1
|
Shell
|
cornell-zhang/uptune
|
/conda/conda-build.sh
|
UTF-8
| 887
| 3.640625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# adjust the Python versions you would like to build
array=( 3.5 3.6 3.7 3.8)
echo "Building conda package ..."
conda config --add channels conda-forge
conda config --add channels powerai
# building conda packages
for i in "${array[@]}"
do
conda-build --python $i -c conda-forge .
done
CONDA_PATH=$HOME/anaconda3
# convert package to other platforms
platforms=( osx-64 linux-32 linux-64 win-32 win-64 )
find $CONDA_PATH/conda-bld/linux-64/ -name *.tar.bz2 | while read file
do
echo $file
# conda convert --platform all $file -o $HOME/conda-bld/
for platform in "${platforms[@]}"
do
conda convert --platform $platform $file -o $CONDA_PATH/conda-bld/
done
done
# upload packages to conda
find $CONDA_PATH/conda-bld/ -name *.tar.bz2 | while read file
do
echo $file
anaconda upload $file
done
echo "Building conda package done!"
| true
|
c2498376493a15b0eb76db38ebcc5dc7b25bef1b
|
Shell
|
devkinetic/ddev
|
/containers/ddev-dbserver/files/migrate_file_to_volume.sh
|
UTF-8
| 2,076
| 4.0625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -x
set -eu
set -o pipefail
# This script is used to migrate a ddev bind-mounted database to a docker-volume mounted database
# It is actually just for the initial migration of v1.0.0-era databases to (hopefully) v1.1.0
# docker-volume-mounted databases, around 2018-08-02. It should end up being not useful within a few
# months.
#
# Run this command in the project directory:
# docker run -t -u "$(id -u):$(id -g)" -e SNAPSHOT_NAME=<migration_snapshot_name -v "$PWD/.ddev:/mnt/ddev_config" -v "$HOME/.ddev/<projectname>/mysql:/var/lib/mysql" --rm --entrypoint=/migrate_file_to_volume.sh drud/ddev-dbserver:<your_version>
if [ -z "${SNAPSHOT_NAME:-}" ] ; then
echo "SNAPSHOT_NAME environment variable must be set"
exit 1
fi
OUTDIR="/mnt/ddev_config/db_snapshots/${SNAPSHOT_NAME}"
SOCKET=/var/tmp/mysql.sock
mkdir -p $OUTDIR
if [ ! -d "/var/lib/mysql/mysql" ]; then
echo "No mysql bind-mount directory was found, aborting"
exit 2
fi
# Wait for mysql server to be ready.
function serverwait {
for i in {60..0};
do
if mysqladmin ping -uroot --socket=$SOCKET >/dev/null 2>&1; then
return 0
fi
# Test to make sure we got it started in the first place. kill -s 0 just tests to see if process exists.
if ! kill -s 0 $pid 2>/dev/null; then
echo "MariaDB initialization startup failed"
return 2
fi
echo "MariaDB initialization startup process in progress... Try# $i"
sleep 1
done
return 1
}
sudo chmod -R ugo+rw /var/lib/mysql /var/log/mysql*
# Using --skip-grant-tables here becasue some old projects may not have working
# --user root --password root
mysqld --skip-networking --skip-grant-tables --socket=$SOCKET 2>&1 &
pid=$!
if ! serverwait ; then
echo "Failed to get mysqld running"
exit 2
fi
mariabackup --backup --target-dir=$OUTDIR --user root --socket=$SOCKET 2>&1
if [ "$?" != 0 ] ; then echo "Failed mariabackup command."; exit $?; fi
# Wait for mysqld to exit
kill -s TERM "$pid" && wait "$pid"
echo "migration in: $OUTDIR"
| true
|
ce3815a26f6b51282ab7894e9d82e1652bc11688
|
Shell
|
Sh3llSh0ck3d/mamutools
|
/printline.sh
|
UTF-8
| 1,554
| 4.25
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Catline
# http://cixtor.com/
# https://github.com/cixtor/mamutools
# http://en.wikipedia.org/wiki/Cat_(Unix)
#
# The cat program is a standard Unix utility that concatenates and lists files. The
# name is an abbreviation of catenate, a synonym of concatenate. The Single Unix
# Specification specifies that when the "cat" program is given files in a sequence
# as arguments, it will output their contents to the standard output in the same
# sequence. It mandates the support of one option flag, u (unbuffered), by which
# each byte is written to standard output without buffering as it is read. Many
# operating systems do this by default and ignore the flag.
#
FILEPATH=$1
if [ "${FILEPATH}" != "" ]; then
LINE=$2
if [ "${LINE}" != "" ]; then
if [[ "${LINE}" =~ ^[0-9]+$ ]]; then
LENGTH=$3
if ! [[ "${LENGTH}" =~ ^[0-9]+$ ]]; then
LENGTH="";
echo -e "\e[0;91mError.\e[0m The number of lines to show is not valid, you will see only one:"
fi
if [ "${LENGTH}" != "" ]; then
head -n $(( $LINE + $LENGTH - 1 )) $FILEPATH | tail -n $LENGTH
else
head -n $LINE $FILEPATH | tail -n 1
fi
else
echo -e "\e[0;91mError.\e[0m The line number specified is not numeric."
fi
else
echo -e "\e[0;91mError.\e[0m You should specify a valid line number as the first parameter."
fi
else
echo -e "\e[0;91mError.\e[0m You should specify a valid file path."
fi
#
| true
|
ddcb90c78134579a13b6b19bd7cfdae00854e43f
|
Shell
|
milhnl/dotfiles
|
/XDG_BIN_HOME/submerge
|
UTF-8
| 1,276
| 3.453125
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
set -eu
checksub() { ffmpeg -i "$1" -c copy -map 0:s -f null - -v 0 -hide_banner; }
#They both give the exact same result, but I trust JSON a bit more
streams() {
ffprobe "$1" 2>&1 1>/dev/null | sed '
/^ *Stream #/!d;
s/^ *Stream #//;
s/: Audio: /: audio: /;
s/: Video: /: video: /;
s/: Subtitle: /: subtitle: /;
s/: Data: /: data: /;
s/^\([0-9]*:[0-9]*\)\[[0-9a-fx]*\]/\1/;
s/\([0-9]*:[0-9]*\)\((.*)\|\): \([a-z]*\): \([^ ,]*\).*/\1\t\3\t\4\t\2/;
s/(\(.*\))$/\1/;
'
}
ffjson() {
ffprobe -hide_banner -v quiet -of json -show_format -show_streams -i "$1"
}
streams() {
ffjson "$1" \
| jq -r '.streams | .[] | "0:\(.index)\t\(.codec_type)\t\(
.codec_name // "none")\t\(.tags.language // .tags.LANGUAGE //
"")"' 2>/dev/null
}
mergesub() {
#TODO handle multiple container types
#TODO handle multiple pre-existing subtitles
ffmpeg -i file:"$1" -i file:"$2" \
-map 0:a -c:a copy \
-map 0:v -c:v copy \
-map 1:0 -c:s copy \
-metadata:s:0 language=eng \
-y output.mkv
}
if ! streams "$1" | grep -q 'subtitle.*\seng' && [ -e "${1%.*}.en.srt" ]; then
mergesub "$1" "${1%.*}.en.srt"
fi
| true
|
b8000fb9539353ec25c4d41de53ed0d5f9ea65fb
|
Shell
|
sitya/heroku-buildpack-php
|
/bin/web.sh
|
UTF-8
| 977
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# Copy this file to your PHP applications /app folder and put the line below into your Procfile:
# web: /app/web.sh
#
# Fail fast
set -o pipefail
set -eu
export PATH="$HOME/.heroku/php/bin:$HOME/.heroku/php/sbin:$PATH"
APP_CONFIG=/app/config
ROOT=/app/.heroku/php
# This is your properly configured shibboleth2.xml
cp ${APP_CONFIG}/shibboleth2.xml ${ROOT}/etc/shibboleth/shibboleth2.xml
# There are the key and cert used by Shibboleth
cp ${APP_CONFIG}/*.pem ${ROOT}/etc/shibboleth/
# Optionally here you can download the cert your federation's metadata is signed with
# wget -O ${ROOT}/etc/shibboleth/federaton-signer.crt https://federation.example.org/signer.crt
${ROOT}/sbin/shibd -p ${ROOT}/var/run/shibd.pid -w 30
# config/httpd.conf is your configured apache config file in /app/config folder
# web means that your DocumentRoot is /app/web
/app/vendor/bin/heroku-php-apache2 -c config/httpd.conf web
tail -f ${ROOT}/var/log/shibboleth/*.log
| true
|
000421659254755e7badc7f9d603a555c3dedc25
|
Shell
|
bytemine/univention-openvpn
|
/univention-openvpn/94openvpn4ucs.inst
|
UTF-8
| 30,477
| 2.65625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
## joinscript api: bindpwdfile
VERSION=1
SERVICE="OpenVPN"
export UNIVENTION_APP_IDENTIFIER="openvpn4ucs-2.0"
. /usr/share/univention-lib/base.sh
. /usr/share/univention-lib/ldap.sh
#. /usr/share/univention-lib/umc.sh
. /usr/share/univention-join/joinscripthelper.lib
joinscript_init
NEWKEY="/etc/openvpn/sitetosite.newkey"
eval "$(ucr shell)"
ucs_addServiceToLocalhost "$SERVICE" "$@"
ucs_registerLDAPExtension "$@" \
--schema /usr/lib/openvpn-int/misc/univention-openvpn.schema \
--acl /usr/lib/openvpn-int/misc/63openvpn-sitetosite.acl \
--udm_syntax /usr/lib/openvpn-int/misc/univention-openvpn-schema.py \
--udm_hook /usr/lib/openvpn-int/misc/univention-openvpn.py \
--ucsversionstart 4.4-1 --ucsversionend 5.99-0
eabas="cn=openvpn,cn=custom attributes,cn=univention,$ldap_base"
udm container/cn remove "$@" \
--dn "${eabas}" >/dev/null 2>&1
univention-directory-manager container/cn create "$@" --ignore_exists \
--position "cn=custom attributes,cn=univention,$ldap_base" \
--set name=openvpn || die
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-UserAddress,${eabas}" >/dev/null 2>&1
univention-directory-manager settings/extended_attribute create "$@" --ignore_exists \
--position "cn=openvpn,cn=custom attributes,cn=univention,$ldap_base" \
--append module="computers/domaincontroller_master" \
--append module="computers/domaincontroller_slave" \
--append module="computers/domaincontroller_backup" \
--append module="computers/memberserver" \
--set ldapMapping='univentionOpenvpnUserAddress' \
--set objectClass='univentionOpenvpn' \
--set name='UniventionOpenvpn-UserAddress' \
--set shortDescription="OpenVPN user's address" \
--set longDescription='Assign IP addresses to users.' \
--set translationShortDescription='"de_DE" "Adresse des OpenVPN Benutzers"' \
--set translationLongDescription='"de_DE" "Weise Benutzern eine feste IP Adresse zu."' \
--set tabAdvanced='1' \
--set tabName='OpenVPN4UCS' \
--set groupName='User VPN' \
--set groupPosition='2' \
--set overwriteTab='0' \
--set valueRequired='0' \
--set CLIName='openvpnuseraddress' \
--set syntax='openvpnUserandAddress' \
--set mayChange='1' \
--set multivalue='1' \
--set deleteObjectClass='0' \
--set doNotSearch='0' \
--set hook='univentionOpenVpn' || die
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-FixedAddresses,${eabas}" >/dev/null 2>&1
univention-directory-manager settings/extended_attribute create "$@" --ignore_exists \
--position "cn=openvpn,cn=custom attributes,cn=univention,$ldap_base" \
--append module="computers/domaincontroller_master" \
--append module="computers/domaincontroller_slave" \
--append module="computers/domaincontroller_backup" \
--append module="computers/memberserver" \
--set ldapMapping='univentionOpenvpnFixedAddresses' \
--set objectClass='univentionOpenvpn' \
--set name='UniventionOpenvpn-FixedAddresses' \
--set shortDescription='OpenVPN fixed addresses' \
--set longDescription='Use fixed IP addresses for users.' \
--set translationShortDescription='"de_DE" "Feste Adressen"' \
--set translationLongDescription='"de_DE" "Benutze feste IP Adressen für Benutzer."' \
--set tabAdvanced='1' \
--set tabName='OpenVPN4UCS' \
--set groupName='User VPN' \
--set groupPosition='2' \
--set overwriteTab='0' \
--set valueRequired='0' \
--set CLIName='openvpnfixedaddresses' \
--set syntax='boolean' \
--set mayChange='1' \
--set multivalue='0' \
--set deleteObjectClass='0' \
--set doNotSearch='0' \
--set hook='None' || die
# dualfactorauth flag is gone
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-Dualfactorauth,${eabas}" >/dev/null 2>&1
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-Duplicate,${eabas}" >/dev/null 2>&1
univention-directory-manager settings/extended_attribute create "$@" --ignore_exists \
--position "cn=openvpn,cn=custom attributes,cn=univention,$ldap_base" \
--append module="computers/domaincontroller_master" \
--append module="computers/domaincontroller_slave" \
--append module="computers/domaincontroller_backup" \
--append module="computers/memberserver" \
--set ldapMapping='univentionOpenvpnDuplicate' \
--set objectClass='univentionOpenvpn' \
--set name='UniventionOpenvpn-Duplicate' \
--set shortDescription='OpenVPN duplicate' \
--set longDescription='Allow multiple clients with the same common name to connect simultaneously.' \
--set translationShortDescription='"de_DE" "OpenVPN Mehrfachverbindung"' \
--set translationLongDescription='"de_DE" "OpenVPN erlaubt mehrere gleichzeitige Verbindungen mit gleichem Zertifikat (CN im Zertifikat)."' \
--set tabAdvanced='1' \
--set tabName='OpenVPN4UCS' \
--set groupName='User VPN' \
--set groupPosition='2' \
--set overwriteTab='0' \
--set valueRequired='0' \
--set CLIName='openvpnDuplicate' \
--set syntax='boolean' \
--set mayChange='1' \
--set multivalue='0' \
--set deleteObjectClass='0' \
--set doNotSearch='0' \
--set hook='None' || die
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-Redirect,${eabas}" >/dev/null 2>&1
univention-directory-manager settings/extended_attribute create "$@" --ignore_exists \
--position "cn=openvpn,cn=custom attributes,cn=univention,$ldap_base" \
--append module="computers/domaincontroller_master" \
--append module="computers/domaincontroller_slave" \
--append module="computers/domaincontroller_backup" \
--append module="computers/memberserver" \
--set default='1' \
--set ldapMapping='univentionOpenvpnRedirect' \
--set objectClass='univentionOpenvpn' \
--set name='UniventionOpenvpn-Redirect' \
--set shortDescription='OpenVPN redirect gateway' \
--set longDescription='OpenVPN will establish itself as the default gateway for the client. All internet traffic will be redirected through the virtual private network.' \
--set translationShortDescription='"de_DE" "OpenVPN Standard-Gateway Umleitung"' \
--set translationLongDescription='"de_DE" "OpenVPN setzt sich beim Client als Standard-Gateway ein. Jeglicher Internetverkehr wird über das virtuelle private Netz geleitet."' \
--set tabAdvanced='1' \
--set tabName='OpenVPN4UCS' \
--set groupName='User VPN' \
--set groupPosition='2' \
--set overwriteTab='0' \
--set valueRequired='0' \
--set CLIName='openvpnRedirect' \
--set syntax='boolean' \
--set mayChange='1' \
--set multivalue='0' \
--set deleteObjectClass='0' \
--set doNotSearch='0' \
--set hook='None' || die
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-NetIPv6,${eabas}" >/dev/null 2>&1
univention-directory-manager settings/extended_attribute create "$@" --ignore_exists \
--position "cn=openvpn,cn=custom attributes,cn=univention,$ldap_base" \
--append module="computers/domaincontroller_master" \
--append module="computers/domaincontroller_slave" \
--append module="computers/domaincontroller_backup" \
--append module="computers/memberserver" \
--set default='fdda:354e:65b6:b242::/64' \
--set ldapMapping='univentionOpenvpnNetIPv6' \
--set objectClass='univentionOpenvpn' \
--set name='UniventionOpenvpn-NetIPv6' \
--set shortDescription='OpenVPN transfer network IPv6' \
--set longDescription='OpenVPN will use this as transfer network within the virtual private network.' \
--set translationShortDescription='"de_DE" "OpenVPN Transfernetzwerk für IPv6"' \
--set translationLongDescription='"de_DE" "OpenVPN verwendet dieses Transfernetzwerk innerhalb des virtuellen privaten Netzes."' \
--set tabAdvanced='1' \
--set tabName='OpenVPN4UCS' \
--set groupName='User VPN' \
--set groupPosition='2' \
--set overwriteTab='0' \
--set valueRequired='0' \
--set CLIName='openvpnTransfernetworkIPv6' \
--set syntax='string' \
--set mayChange='1' \
--set multivalue='0' \
--set deleteObjectClass='1' \
--set doNotSearch='0' \
--set hook='None' || die
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-Masquerade,${eabas}" >/dev/null 2>&1
univention-directory-manager settings/extended_attribute create "$@" --ignore_exists \
--position "cn=openvpn,cn=custom attributes,cn=univention,$ldap_base" \
--append module="computers/domaincontroller_master" \
--append module="computers/domaincontroller_slave" \
--append module="computers/domaincontroller_backup" \
--append module="computers/memberserver" \
--set default='0' \
--set ldapMapping='univentionOpenvpnMasquerade' \
--set objectClass='univentionOpenvpn' \
--set name='UniventionOpenvpn-Masquerade' \
--set shortDescription='Masquerade' \
--set longDescription='Traffic coming from above Network will appear to be coming from this host.' \
--set translationShortDescription='"de_DE" "Maskierung"' \
--set translationLongDescription='"de_DE" "Datenverkehr aus obigem Netz scheint von diesem Host zu kommen."' \
--set tabAdvanced='1' \
--set tabName='OpenVPN4UCS' \
--set groupName='User VPN' \
--set groupPosition='2' \
--set overwriteTab='0' \
--set valueRequired='0' \
--set fullWidth='1' \
--set CLIName='openvpnMasquerade' \
--set syntax='boolean' \
--set mayChange='1' \
--set multivalue='0' \
--set deleteObjectClass='0' \
--set doNotSearch='0' \
--set hook='None' || die
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-Net,${eabas}" >/dev/null 2>&1
univention-directory-manager settings/extended_attribute create "$@" --ignore_exists \
--position "cn=openvpn,cn=custom attributes,cn=univention,$ldap_base" \
--append module="computers/domaincontroller_master" \
--append module="computers/domaincontroller_slave" \
--append module="computers/domaincontroller_backup" \
--append module="computers/memberserver" \
--set default='10.153.175.0/24' \
--set ldapMapping='univentionOpenvpnNet' \
--set objectClass='univentionOpenvpn' \
--set name='UniventionOpenvpn-Net' \
--set shortDescription='(* required) OpenVPN transfer network IPv4 (default: /24)' \
--set longDescription='OpenVPN will use this as transfer network within the virtual private network.' \
--set translationShortDescription='"de_DE" "(* Plichtfeld) OpenVPN Transfernetzwerk (Default: /24)"' \
--set translationLongDescription='"de_DE" "OpenVPN verwendet dieses Transfernetzwerk innerhalb des virtuellen privaten Netzes."' \
--set tabAdvanced='1' \
--set tabName='OpenVPN4UCS' \
--set groupName='User VPN' \
--set groupPosition='2' \
--set overwriteTab='0' \
--set valueRequired='0' \
--set CLIName='openvpnTransfernetwork' \
--set syntax='string' \
--set mayChange='1' \
--set multivalue='0' \
--set deleteObjectClass='1' \
--set doNotSearch='0' \
--set hook='None' || die
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-Port,${eabas}" >/dev/null 2>&1
univention-directory-manager settings/extended_attribute create "$@" --ignore_exists \
--position "cn=openvpn,cn=custom attributes,cn=univention,$ldap_base" \
--append module="computers/domaincontroller_master" \
--append module="computers/domaincontroller_slave" \
--append module="computers/domaincontroller_backup" \
--append module="computers/memberserver" \
--set default='1194' \
--set ldapMapping='univentionOpenvpnPort' \
--set objectClass='univentionOpenvpn' \
--set name='UniventionOpenvpn-Port' \
--set shortDescription='(* required) OpenVPN port' \
--set longDescription='OpenVPN will listen on this port.' \
--set translationShortDescription='"de_DE" "(* Pflichtfeld) OpenVPN Port"' \
--set translationLongDescription='"de_DE" "OpenVPN empfängt Verbindungen auf diesem Port."' \
--set tabAdvanced='1' \
--set tabName='OpenVPN4UCS' \
--set groupName='User VPN' \
--set groupPosition='2' \
--set overwriteTab='0' \
--set valueRequired='0' \
--set CLIName='openvpnPort' \
--set syntax='integer' \
--set mayChange='1' \
--set multivalue='0' \
--set deleteObjectClass='1' \
--set doNotSearch='0' \
--set hook='None' || die
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-Address,${eabas}" >/dev/null 2>&1
univention-directory-manager settings/extended_attribute create "$@" --ignore_exists \
--position "cn=openvpn,cn=custom attributes,cn=univention,$ldap_base" \
--append module="computers/domaincontroller_master" \
--append module="computers/domaincontroller_slave" \
--append module="computers/domaincontroller_backup" \
--append module="computers/memberserver" \
--set ldapMapping='univentionOpenvpnAddress' \
--set objectClass='univentionOpenvpn' \
--set name='UniventionOpenvpn-Address' \
--set shortDescription='(* required) OpenVPN server address' \
--set longDescription='This address is used by clients to connect to the OpenVPN server. The server itself always listens on all available interfaces. This is useful if the actual OpenVPN server is in a private network behind a firewall which uses port-forwarding to pass VPN connections.' \
--set translationShortDescription='"de_DE" "(* Pflichtfeld) OpenVPN Serveradresse"' \
--set translationLongDescription='"de_DE" "Diese Adresse wird von Klienten benutzt um den OpenVPN Server zu erreichen. Der Server selber lauscht allerdings immmer auf allen verfügbaren Schnittstellen. Dies macht es möglich, den OpenVPN Server in einem lokalen Netz hinter einer Firewall, welche Port-Weiterleitung einsetzt um den OpenVPN Server von aussen erreichbar zu machen, zu betreiben."' \
--set tabAdvanced='1' \
--set tabName='OpenVPN4UCS' \
--set groupName='User VPN' \
--set groupPosition='2' \
--set default='0.0.0.0' \
--set overwriteTab='0' \
--set valueRequired='0' \
--set CLIName='openvpnAddress' \
--set syntax='string' \
--set mayChange='1' \
--set multivalue='0' \
--set deleteObjectClass='0' \
--set doNotSearch='0' \
--set hook='None' || die
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-Active,${eabas}" >/dev/null 2>&1
univention-directory-manager settings/extended_attribute create "$@" --ignore_exists \
--position "cn=openvpn,cn=custom attributes,cn=univention,$ldap_base" \
--append module="computers/domaincontroller_master" \
--append module="computers/domaincontroller_slave" \
--append module="computers/domaincontroller_backup" \
--append module="computers/memberserver" \
--set ldapMapping='univentionOpenvpnActive' \
--set objectClass='univentionOpenvpn' \
--set name='UniventionOpenvpn-Active' \
--set shortDescription='OpenVPN server active' \
--set longDescription='In order to start the OpenVPN service on this computer, this option has to be enabled.' \
--set translationShortDescription='"de_DE" "OpenVPN Server aktiviert"' \
--set translationLongDescription='"de_DE" "OpenVPN Server aktiviert. Um den OpenVPN Dienst auf diesem Rechner laufen zu lassen, muss diese Option aktiviert werden."' \
--set tabAdvanced='1' \
--set tabName='OpenVPN4UCS' \
--set groupName='User VPN' \
--set groupPosition='2' \
--set overwriteTab='0' \
--set valueRequired='0' \
--set CLIName='openvpnActive' \
--set syntax='boolean' \
--set mayChange='1' \
--set multivalue='0' \
--set deleteObjectClass='0' \
--set doNotSearch='0' \
--set hook='None' || die
# extension of user objects
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-Account,${eabas}" >/dev/null 2>&1
univention-directory-manager settings/extended_attribute create "$@" --ignore_exists \
--position "cn=openvpn,cn=custom attributes,cn=univention,$ldap_base" \
--set module="users/user" \
--set ldapMapping='univentionOpenvpnAccount' \
--set objectClass='univentionOpenvpnUser' \
--set name='UniventionOpenvpn-Account' \
--set shortDescription='OpenVPN account' \
--set longDescription='OpenVPN account for users' \
--set translationShortDescription='"de_DE" "OpenVPN Account"' \
--set translationLongDescription='"de_DE" "OpenVPN Account für Benutzer. Wenn diese Option bei einem Konto gesetzt wird, so wird für den Nutzer ein Zertifikat generiert und zusammen mit den anderen notwendigen Dateien, einschliesslich einer Client Konfiguration fuer OpenVPN, als Zip-Datei im Heimatverzeichnis den Nutzers abgelegt. Pro aktiviertem Server wird ein solches Paket erzeugt."' \
--set tabAdvanced='1' \
--set tabName='OpenVPN4UCS' \
--set overwriteTab='0' \
--set valueRequired='0' \
--set CLIName='openvpnAccount' \
--set syntax='boolean' \
--set mayChange='1' \
--set multivalue='0' \
--set deleteObjectClass='0' \
--set doNotSearch='0' \
--set hook='None' || die
# extension of computer objects for sitetosite
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-Secret,${eabas}" >/dev/null 2>&1
univention-directory-manager settings/extended_attribute create "$@" --ignore_exists \
--position "cn=openvpn,cn=custom attributes,cn=univention,$ldap_base" \
--append module="computers/domaincontroller_master" \
--append module="computers/domaincontroller_slave" \
--append module="computers/domaincontroller_backup" \
--append module="computers/memberserver" \
--set ldapMapping='univentionOpenvpnSecret' \
--set objectClass='univentionOpenvpnSitetoSite' \
--set name='UniventionOpenvpn-Secret' \
--set shortDescription='OpenVPN site-to-site secret' \
--set longDescription='The secret used for site-to-site VPNs.' \
--set translationShortDescription='"de_DE" "Geheimnis für Site-to-Site VPNs"' \
--set translationLongDescription='"de_DE" "Dieses Geheimnis wird verwendet, um bei einem Site-to-Site VPN die Verbindung zu verschlüsseln."' \
--set tabAdvanced='1' \
--set tabName='OpenVPN4UCS' \
--set groupName='Site-to-Site VPN' \
--set groupPosition='3' \
--set default='secret' \
--set overwriteTab='0' \
--set valueRequired='0' \
--set CLIName='openvpnSecret' \
--set syntax='TextArea' \
--set mayChange='1' \
--set multivalue='0' \
--set deleteObjectClass='0' \
--set doNotSearch='0' \
--set hook='None' || die
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-RemoteAddress,${eabas}" >/dev/null 2>&1
univention-directory-manager settings/extended_attribute create "$@" --ignore_exists \
--position "cn=openvpn,cn=custom attributes,cn=univention,$ldap_base" \
--append module="computers/domaincontroller_master" \
--append module="computers/domaincontroller_slave" \
--append module="computers/domaincontroller_backup" \
--append module="computers/memberserver" \
--set ldapMapping='univentionOpenvpnRemoteAddress' \
--set objectClass='univentionOpenvpnSitetoSite' \
--set name='UniventionOpenvpn-RemoteAddress' \
--set shortDescription='OpenVPN internal remote address' \
--set longDescription='Defines the fixed IP for the remote endpoint, which is only used inside the virtual transfer network.' \
--set translationShortDescription='"de_DE" "Interne Adresse der OpenVPN Gegenstelle"' \
--set translationLongDescription='"de_DE" "Definiert die feste IP der Gegenstelle, welche nur innerhalb des virtuellen Transfernetzwerks verwendet wird."' \
--set tabAdvanced='1' \
--set tabName='OpenVPN4UCS' \
--set groupName='Site-to-Site VPN' \
--set groupPosition='3' \
--set default='10.153.176.2' \
--set overwriteTab='0' \
--set valueRequired='0' \
--set CLIName='openvpnRemoteAddress' \
--set syntax='string' \
--set mayChange='1' \
--set multivalue='0' \
--set deleteObjectClass='0' \
--set doNotSearch='0' \
--set hook='None' || die
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-LocalAddress,${eabas}" >/dev/null 2>&1
univention-directory-manager settings/extended_attribute create "$@" --ignore_exists \
--position "cn=openvpn,cn=custom attributes,cn=univention,$ldap_base" \
--append module="computers/domaincontroller_master" \
--append module="computers/domaincontroller_slave" \
--append module="computers/domaincontroller_backup" \
--append module="computers/memberserver" \
--set ldapMapping='univentionOpenvpnLocalAddress' \
--set objectClass='univentionOpenvpnSitetoSite' \
--set name='UniventionOpenvpn-LocalAddress' \
--set shortDescription='OpenVPN internal local address' \
--set longDescription='Defines the fixed IP for the local endpoint, which is only used inside the virtual transfer network.' \
--set translationShortDescription='"de_DE" "OpenVPN interne lokale Adresse"' \
--set translationLongDescription='"de_DE" "Definiert die feste IP für den lokalen Endpunkt, welche nur innerhalb des virtuellen Transfernetzwerks verwendet wird."' \
--set tabAdvanced='1' \
--set tabName='OpenVPN4UCS' \
--set groupName='Site-to-Site VPN' \
--set groupPosition='3' \
--set default='10.153.176.1' \
--set overwriteTab='0' \
--set valueRequired='0' \
--set CLIName='openvpnLocalAddress' \
--set syntax='string' \
--set mayChange='1' \
--set multivalue='0' \
--set deleteObjectClass='0' \
--set doNotSearch='0' \
--set hook='None' || die
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-SitetoSitePort,${eabas}" >/dev/null 2>&1
univention-directory-manager settings/extended_attribute create "$@" --ignore_exists \
--position "cn=openvpn,cn=custom attributes,cn=univention,$ldap_base" \
--append module="computers/domaincontroller_master" \
--append module="computers/domaincontroller_slave" \
--append module="computers/domaincontroller_backup" \
--append module="computers/memberserver" \
--set default='1195' \
--set ldapMapping='univentionOpenvpnSitetoSitePort' \
--set objectClass='univentionOpenvpnSitetoSite' \
--set name='UniventionOpenvpn-SitetoSitePort' \
--set shortDescription='OpenVPN site-to-site port' \
--set longDescription='OpenVPN will listen on this port when using site-to-site.' \
--set translationShortDescription='"de_DE" "OpenVPN Site-to-Site Port"' \
--set translationLongDescription='"de_DE" "OpenVPN empfängt Verbindungen auf diesem Port, wenn Site-to-Site verwendet wird."' \
--set tabAdvanced='1' \
--set tabName='OpenVPN4UCS' \
--set groupName='Site-to-Site VPN' \
--set groupPosition='3' \
--set overwriteTab='0' \
--set valueRequired='0' \
--set CLIName='openvpnSitetoSitePort' \
--set syntax='integer' \
--set mayChange='1' \
--set multivalue='0' \
--set deleteObjectClass='1' \
--set doNotSearch='0' \
--set hook='None' || die
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-Remote,${eabas}" >/dev/null 2>&1
univention-directory-manager settings/extended_attribute create "$@" --ignore_exists \
--position "cn=openvpn,cn=custom attributes,cn=univention,$ldap_base" \
--append module="computers/domaincontroller_master" \
--append module="computers/domaincontroller_slave" \
--append module="computers/domaincontroller_backup" \
--append module="computers/memberserver" \
--set ldapMapping='univentionOpenvpnRemote' \
--set objectClass='univentionOpenvpnSitetoSite' \
--set name='UniventionOpenvpn-Remote' \
--set shortDescription='OpenVPN remote address' \
--set longDescription='The remote VPN endpoint address for site-to-site vpns. This can be an IPv4 address or a resolvable DNS name.' \
--set translationShortDescription='"de_DE" "Adresse der OpenVPN Gegenstelle"' \
--set translationLongDescription='"de_DE" "Die Adresse der OpenVPN Gegenstelle. Diese wird für Site-to-Site VPNs verwendet."' \
--set tabAdvanced='1' \
--set tabName='OpenVPN4UCS' \
--set groupName='Site-to-Site VPN' \
--set groupPosition='3' \
--set default='0.0.0.0' \
--set overwriteTab='0' \
--set valueRequired='0' \
--set CLIName='openvpnRemote' \
--set syntax='hostOrIP' \
--set mayChange='1' \
--set multivalue='0' \
--set deleteObjectClass='0' \
--set doNotSearch='0' \
--set hook='None' || die
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-SitetoSiteActive,${eabas}" >/dev/null 2>&1
univention-directory-manager settings/extended_attribute create "$@" --ignore_exists \
--position "cn=openvpn,cn=custom attributes,cn=univention,$ldap_base" \
--append module="computers/domaincontroller_master" \
--append module="computers/domaincontroller_slave" \
--append module="computers/domaincontroller_backup" \
--append module="computers/memberserver" \
--set ldapMapping='univentionOpenvpnSitetoSiteActive' \
--set objectClass='univentionOpenvpnSitetoSite' \
--set name='UniventionOpenvpn-SitetoSiteActive' \
--set shortDescription='OpenVPN site-to-site active.' \
--set longDescription='In order to start the OpenVPN service in site-to-site mode on this computer, this option has to be enabled. IMPORTANT: you need a valid license to activate this option.' \
--set translationShortDescription='"de_DE" "OpenVPN Site-to-Site aktiviert"' \
--set translationLongDescription='"de_DE" "OpenVPN Site-to-Site aktiviert. Um den OpenVPN Dienst im Site-to-Site Modus auf diesem Rechner laufen zu lassen, muss diese Option aktiviert werden. WICHTIG: eine gueltige Lizenz ist fuer die Aktivierung erforderlich."' \
--set tabAdvanced='1' \
--set tabName='OpenVPN4UCS' \
--set groupName='Site-to-Site VPN' \
--set groupPosition='3' \
--set overwriteTab='0' \
--set valueRequired='0' \
--set CLIName='openvpnSitetoSiteActive' \
--set syntax='boolean' \
--set mayChange='1' \
--set multivalue='0' \
--set deleteObjectClass='0' \
--set doNotSearch='0' \
--set hook='None' || die
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-License,${eabas}" >/dev/null 2>&1
univention-directory-manager settings/extended_attribute create "$@" --ignore_exists \
--position "cn=openvpn,cn=custom attributes,cn=univention,$ldap_base" \
--append module="computers/domaincontroller_master" \
--append module="computers/domaincontroller_slave" \
--append module="computers/domaincontroller_backup" \
--append module="computers/memberserver" \
--set ldapMapping='univentionOpenvpnLicense' \
--set objectClass='univentionOpenvpnSitetoSite' \
--set name='UniventionOpenvpn-License' \
--set shortDescription='<i>(see</i> <b>active users</b> <i>app for license details)</i><hr/>OpenVPN4UCS license key (required to activate commercial features)' \
--set longDescription='Activates commercial features like: user amount >5, site-to-site VPN - available through bytemine.net.' \
--set translationShortDescription='"de_DE" "<i>(Lizenzdetails in der</i> <b>aktive Benutzer</b> <i>Applikation)</i><hr>OpenVPN4UCS Lizenzschlüssel (für kommerzielle Funktionen benötigt)"' \
--set translationLongDescription='"de_DE" "Aktiviert kommerzielle Funktionen: Benutzeranzahl >5, Site-to-Site VPN - zu beziehen über bytemine.net."' \
--set tabAdvanced='1' \
--set tabName='OpenVPN4UCS' \
--set default='' \
--set overwriteTab='0' \
--set valueRequired='0' \
--set CLIName='openvpnLicense' \
--set syntax='TextArea' \
--set fullWidth='1' \
--set mayChange='1' \
--set multivalue='0' \
--set deleteObjectClass='0' \
--set doNotSearch='0' \
--set hook='None' || die
# initial site2site secret generation
if [ ! -f ${NEWKEY} ]; then
openvpn --genkey --secret "${NEWKEY}"
eval "$(ucr shell server/role ldap/hostdn)"
udm computers/${server_role} modify "$@" --dn "${ldap_hostdn}" \
--set openvpnSecret="$(cat ${NEWKEY})"
fi
# present in versions < 0.7
udm settings/extended_attribute remove "$@" \
--dn "cn=UniventionOpenvpn-Tun,${eabas}" >/dev/null 2>&1
name="ldapper-m-$hostname"
umask 077
create_machine_password >/etc/ldapper-m.secret
pw=$(cat /etc/ldapper-m.secret)
udm users/ldap remove "$@" --ignore_not_exists \
--dn "uid=$name,cn=users,$ldap_base" > /dev/null 2>&1
udm users/ldap create "$@" \
--position "cn=users,$ldap_base" \
--set username=$name \
--set lastname=$name \
--set password=$pw \
--set overridePWHistory=1 \
--set overridePWLength=1
udm groups/group create "$@" \
--position "cn=groups,$ldap_base" \
--set name="VPN Admins" \
--set users="uid=Administrator,cn=users,$ldap_base"
rdy2gobas="/var/www/readytogo"
templates=/usr/lib/openvpn-int/templates
mkdir -p "${rdy2gobas}"
cat > "${rdy2gobas}/.htaccess" <<-ENDOFHTACCESS
ErrorDocument 404 /readytogo/notfound.html
ENDOFHTACCESS
cp ${templates}/download.head "${rdy2gobas}/notfound.html"
echo "<p>This page does not exist. Please talk to your VPN admin who should consider a license issue.</p>" >> "${rdy2gobas}/notfound.html"
cat ${templates}/download.tail >> "${rdy2gobas}/notfound.html"
cat > /etc/apache2/sites-available/openvpn4ucs2.conf <<-ENDOFSITECFG
<Location /readytogo>
Options -Indexes
</Location>
ENDOFSITECFG
chmod 0600 /etc/apache2/sites-available/openvpn4ucs2.conf
for dir in /var/www/readytogo/*/
do
sed -i "/AuthLDAPBindPassword/c\AuthLDAPBindPassword ${pw}" $dir.htaccess
sed -i "/AuthLDAPBindDN/c\AuthLDAPBindDN \"uid=$name,cn=users,$ldap_base\"" $dir.htaccess
done
a2ensite openvpn4ucs2
/usr/lib/openvpn-int/display_users/create_site_cfg
a2ensite openvpn4ucs
/etc/init.d/apache2 restart
stop_udm_cli_server
#umc_init
#umc_operation_create "openvpn4ucs-all" "OpenVPN4UCS" "" "openvpn4ucs/*"
#umc_policy_append "default-umc-all" "openvpn4ucs-all"
joinscript_save_current_version
exit 0
| true
|
f41370ed57a046571a86e2ef78b62ddc52f1a25a
|
Shell
|
alphagov-mirror/flooddata
|
/scripts/stations.sh
|
UTF-8
| 2,214
| 3.765625
| 4
|
[
"MIT",
"OGL-UK-2.0"
] |
permissive
|
#!/bin/bash
#
# process new unzipped stations xml files into an equivalent tsv
#
set -e
cd ${FLOODDATA:?}
mkdir -p cache www www/stations
export tmpfile=/tmp/stations.$$
export header=/tmp/stations-header
find files/ENT_7001 -name \*.xml |
while read xml
do
tsv=cache/$xml.tsv
if [ "$xml" -nt "$tsv" ]
then
#
# convert XML to TSV
# - filter information deemed sensitive
#
mkdir -p $(dirname "$tsv")
(
set -x
xsltproc templates/stations.xsl $xml
) |
#
# remove gates
#
grep -v 'Gate Position' |
#
# fixup groundwater locations
#
awk -F$'\t' '
BEGIN { OFS=FS }
{
if ($7 ~ /Groundwater/) {
$4 = gensub(/^(..)([0-9][0-9])...([0-9][0-9])...$/, "\\1\\2\\3", "", $4);
$4 = gensub(/^(..)([0-9][0-9])..([0-9][0-9])..$/,"\\1\\2\\3", "", $4);
$4 = gensub(/^(..)([0-9][0-9]).([0-9][0-9]).$/,"\\1\\2\\3", "", $4);
}
print;
}' > $tsv
#
# capture header
#
head -1 $tsv > $header
#
# add to daily file
#
cat $tsv |
awk -F$'\t' '
$1 ~ /^20[0-9][0-9]-[0-9][0-9]-[0-9][0-9]/ {
day=$1;
sub(/T.*$/,"",day);
gsub(/-/,"",day);
file="www/stations/" day ".tsv";
print >> file;
print file
}'
fi
done |
#
# de-dupe updated daily files
#
sort -u |
while read file
do
echo "updating $file .." >&2
(
cat $header
cat $file
) |
sort -n |
uniq > $tmpfile
mv $tmpfile $file
done
#
# generate short feed of latest results
#
ls -1 www/stations/*.tsv |
tail -2 | (
cat $header
xargs cat |
tail -10000
) |
sort -n |
uniq > www/stations.tsv
#
# gzip latest stations.tsv
# - for backwards compatibility with the big file made for the #floodhack hackday
#
gzip -c < www/stations.tsv > www/stations.tsv.gz
| true
|
7884af8c78212346521d5413c4458dd439656e52
|
Shell
|
ViViDboarder/drone-webdav
|
/push.sh
|
UTF-8
| 1,661
| 3.734375
| 4
|
[] |
no_license
|
#! /bin/bash
set -e
ARGS=()
# Use WEBDAV_USERNAME as default, if provided.
if [ -z "$PLUGIN_USERNAME" ] && [ -n "$WEBDAV_USERNAME" ]; then
PLUGIN_USERNAME="$WEBDAV_USERNAME"
fi
# Use WEBDAV_PASSWORD as default, if provided.
if [ -z "$PLUGIN_PASSWORD" ] && [ -n "$WEBDAV_PASSWORD" ]; then
PLUGIN_PASSWORD="$WEBDAV_PASSWORD"
fi
# If username and password are provided, add auth
if [ -n "$PLUGIN_USERNAME" ] && [ -n "$PLUGIN_PASSWORD" ]; then
ARGS+=(--user "${PLUGIN_USERNAME}:${PLUGIN_PASSWORD}")
fi
# Use a proxy, if one is specified
if [ -n "$PLUGIN_PROXY_URL" ]; then
ARGS+=(--proxy "${PLUGIN_PROXY_URL}")
fi
# If a timeout is specified, make use of it.
if [ -n "$PLUGIN_TIMEOUT" ]; then
ARGS+=(--max-time "${PLUGIN_TIMEOUT}")
fi
# Set PLUGIN_ATTEMPTS to one if nothing else is specified
if [ -z "$PLUGIN_ATTEMPTS" ]; then
PLUGIN_ATTEMPTS=1
fi
# Repeat the upload as long as specified.
while [ "${PLUGIN_ATTEMPTS}" -gt 0 ]; do
# Uploading the file
# shellcheck disable=SC2086
curl \
$PLUGIN_CUSTOM_ARGUMENTS \
--fail-with-body \
--show-error \
--silent \
"${ARGS[@]}" \
--upload-file "$PLUGIN_FILE" \
"$PLUGIN_DESTINATION" && {
# Terminate the script as soon as the upload is successful
echo "[INFO] Upload was successful."
exit 0
}
# Show messages in case uploads have failed
[ "$PLUGIN_ATTEMPTS" -gt 1 ] && {
echo "[INFO] Upload failed. Attempting a new upload, if possible."
}
sleep 5
PLUGIN_ATTEMPTS=$((PLUGIN_ATTEMPTS-1))
done
# Returns an error because the upload was not successful
echo "[ERROR] All upload attempts have failed."
exit 1
| true
|
96a88646bcf1fe57fbd79d3d87531b81facc1f15
|
Shell
|
dreamer/dotfiles
|
/scripts/firefox-nightly
|
UTF-8
| 498
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
download () {
pushd ~/Downloads/
curl -L --output firefox-nightly.tar.bz2 \
"https://download.mozilla.org/?product=firefox-nightly-latest-l10n-ssl&os=linux64&lang=en-GB"
echo "Unpacking…"
tar xjf firefox-nightly.tar.bz2
mkdir -p ~/opt
mv firefox ~/opt/firefox-nightly
popd
}
run () {
pushd ~/opt/firefox-nightly
echo ./firefox -P nightly "$@"
./firefox -P nightly "$@"
popd
}
main () {
if [[ ! -d ~/opt/firefox-nightly ]] ; then
download
fi
run "$@"
}
main "$@"
| true
|
56fec21e356529111d600d57198715ed38bed315
|
Shell
|
stroomdev10/gaffer-doc
|
/scripts/updateVersions.sh
|
UTF-8
| 1,333
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
if [ -z "$1" -o -z "$2" ]; then
echo "Usage: ./updateVersions.sh <new gaffer version> <new gaffer-tools version>"
exit 1
fi
git reset --hard
git clean -fd
git checkout master
git pull
gafferVersion=$1
gafferToolsVersion=$2
git checkout -b updating-versions-$gafferVersion-$gafferToolsVersion
mvn -q org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate -Dexpression=project.parent.version
oldGafferVersion=`mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate -Dexpression=gaffer.version | grep -v '\['`
oldGafferToolsVersion=`mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate -Dexpression=gaffer-tools.version | grep -v '\['`
sed -i '' "s/gaffer.version>$oldGafferVersion</gaffer.version>$gafferVersion</g" pom.xml
sed -i '' "s/gaffer-tools.version>$oldGafferToolsVersion</gaffer-tools.version>$gafferToolsVersion</g" pom.xml
sed -i '' "s/version>$oldGafferVersion</version>$gafferVersion</g" pom.xml
sed -i '' "s/gaffer2:$oldGafferVersion/gaffer2:$gafferVersion/g" NOTICES
sed -i '' "s/gaffer-tools:$oldGafferToolsVersion/gaffer-tools:$gafferToolsVersion/g" NOTICES
sed -i '' "s/>Version $oldGafferVersion</>Version $gafferVersion</g" docs/README.md
git add .
git commit -a -m "Updated versions"
git push -u origin updating-versions-$gafferVersion-$gafferToolsVersion
| true
|
b4042e81144e1f58f0d8794f054314ab9fb9d290
|
Shell
|
yayankov/Operating-systems-FMI
|
/shell-script/forExam/29.sh
|
UTF-8
| 392
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
if [ ${1} = "-n" ]; then
N=${2}
shift 2
else
N=10
fi
temp=$(mktemp)
for FILE; do
while read line; do
timestamp=$(echo ${line} | awk -F ' ' '{print $1,$2}')
IDF=$(echo ${FILE} | sed -E "s/.log$//")
data=$(echo ${line} | cut -d ' ' -f3-)
echo "${timestamp} ${IDF} ${data}" >> ${temp}
done < <(cat ${FILE} | tail -n ${N})
done
cat ${temp} | sort -n
rm -- ${temp}
| true
|
a9e672da72a7bfeb44b1e1284abfd05ac213a49a
|
Shell
|
attilioborri/bash_utilties
|
/YouTube/YoutubeDownloadMP3FromList
|
UTF-8
| 304
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
# Open List.txt
cat List.txt | while read line; do
youtube-dl -f 140 $line # Download audio (m4a)
done
# Convert M4A to MP3
for f in *.m4a
do
name=`echo "$f" | sed -e "s/.m4a$//g"`
ffmpeg -i "$f" -vn -ar 44100 -ac 2 -ab 128k -f mp3 "$name.mp3"
done
# Delete temp files
rm *.m4a
| true
|
f80a44f92f77f61c95bd3df5ac95d9083422b8d7
|
Shell
|
pheanex/BachelorThesis
|
/Scripts/iperf-multiple-clients
|
UTF-8
| 768
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
# author kmanna
# date 03.06.2014
# Start multiple parallel throughput-Tests to other VMs
# Assumes iperf servers have been started, e.g.
# iperf -s -p PORT
export vmstart=$1
export vmend=$2
export self=$3
export send_count=$4
export bw=$5
if [[ -z "$vmstart" ]] || [[ -z "$vmend" ]] || [[ -z "$self" ]] || [[ -z "$send_count" ]] || [[ -z "$bw" ]]
then
echo "Error: usage: $0 <vmstartnr> <vmendnr> <id-of-own-vm> <send_count(t)> <sending bw in Mbit>" >&2
exit 1
fi
for i in $(seq $vmstart $vmend)
do
[[ $i -eq $self ]] && continue
server_port=$((5000+$self));
report_file="query/testdata/iperf-clientlog_172.16.40.2${i}"
iperf -u -c "172.16.40.2${i}" -p "$server_port" -t "$send_count" -i 1 -b "${bw}M" &> $report_file &
done
| true
|
a254852535f3151151aa6c9cffb77574f87affae
|
Shell
|
mystatus/covid19-reports
|
/packages/server/generate-dev-certs.sh
|
UTF-8
| 2,531
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
mkdir server/keys
export ORGANIZATION="COVID Reports Dev"
export ORGANIZATIONAL_UNIT="COVID Reports Dev"
export CERTIFICATE_AUTHORITY_NAME="coviddevca"
export SERVER_CA_NAME="coviddevlocal"
# Create Certificate Authority
rm -rf server/certs
mkdir server/certs
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -subj "/C=US/O=${ORGANIZATION}/OU=${ORGANIZATIONAL_UNIT}/CN=${CERTIFICATE_AUTHORITY_NAME}" -keyout server/certs/ca.key -out server/certs/ca.crt
# Create Server Keys
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -subj "/C=US/O=${ORGANIZATION}/OU=${ORGANIZATIONAL_UNIT}/CN=${SERVER_CA_NAME}" -keyout server/certs/server.key -out server/certs/server.crt
# Create User Keys
mkdir server/certs/user
# Create Test Root Admin User Keys
openssl genrsa -out server/certs/user/rootadmin.key 2048
openssl req -new -key server/certs/user/rootadmin.key -subj "/C=US/O=${ORGANIZATION}/OU=${ORGANIZATIONAL_UNIT}/OU=USER/CN=TEST.USER.ROOTADMIN.0000000001" -out server/certs/user/rootadmin.csr
openssl x509 -req -in server/certs/user/rootadmin.csr -CA server/certs/ca.crt -CAkey server/certs/ca.key -CAcreateserial -out server/certs/user/rootadmin.crt
openssl pkcs12 -export -out server/certs/user/rootadmin.p12 -inkey server/certs/user/rootadmin.key -in server/certs/user/rootadmin.crt -certfile server/certs/ca.crt -passout pass:
# Create Test Org Admin User Keys
openssl genrsa -out server/certs/user/orgadmin.key 2048
openssl req -new -key server/certs/user/orgadmin.key -subj "/C=US/O=${ORGANIZATION}/OU=${ORGANIZATIONAL_UNIT}/OU=USER/CN=TEST.USER.ORGADMIN.0000000002" -out server/certs/user/orgadmin.csr
openssl x509 -req -in server/certs/user/orgadmin.csr -CA server/certs/ca.crt -CAkey server/certs/ca.key -CAcreateserial -out server/certs/user/orgadmin.crt
openssl pkcs12 -export -out server/certs/user/orgadmin.p12 -inkey server/certs/user/orgadmin.key -in server/certs/user/orgadmin.crt -certfile server/certs/ca.crt -passout pass:
# Create Basic User Keys
openssl genrsa -out server/certs/user/user.key 2048
openssl req -new -key server/certs/user/user.key -subj "/C=US/O=${ORGANIZATION}/OU=${ORGANIZATIONAL_UNIT}/OU=USER/CN=TEST.USER.BASIC.0000000003" -out server/certs/user/user.csr
openssl x509 -req -in server/certs/user/user.csr -CA server/certs/ca.crt -CAkey server/certs/ca.key -CAcreateserial -out server/certs/user/user.crt
openssl pkcs12 -export -out server/certs/user/user.p12 -inkey server/certs/user/user.key -in server/certs/user/user.crt -certfile server/certs/ca.crt -passout pass:
| true
|
aa306fd4b338b070eccb35536f9e8de18f8b9fe1
|
Shell
|
Eventdisplay/Eventdisplay_AnalysisScripts_CTA
|
/utilities/prepareAnalysis.sh
|
UTF-8
| 3,340
| 3.3125
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
#
# prepare analysis package into a new directory:
# - links hyper array evndisp files
# - links for sub array directories
# - analysis files
# - code compilation
if [ ! -n "$1" ]; then
echo "
./prepareAnalysis.sh <data set>
"
exit
fi
DSET="$1"
DDIR="$CTA_USER_DATA_DIR/analysis/AnalysisData/${DSET}"
mkdir -p "$DDIR" || return
gethyperarraylink()
{
if [[ ${DSET} == *"LaPalma-20deg-NSB5x"* ]]; then
echo "/lustre/fs21/group/cta/prod5-grid/Prod5b_LaPalma_AdvancedBaseline_NSB5x_20deg_DL1"
elif [[ ${DSET} == *"LaPalma-20deg"* ]]; then
echo "/lustre/fs24/group/cta/prod5b/CTA-ProdX-Download-DESY/Prod5b_LaPalma_AdvancedBaseline_NSB1x"
elif [[ ${DSET} == *"LaPalma-40deg-NSB5x"* ]]; then
echo "/lustre/fs21/group/cta/prod5-grid/Prod5b_LaPalma_AdvancedBaseline_NSB5x_40deg_DL1"
elif [[ ${DSET} == *"LaPalma-40deg"* ]]; then
echo "/lustre/fs24/group/cta/prod5b/CTA-ProdX-Download-DESY/Prod5b_LaPalma_AdvancedBaseline_NSB1x_40deg"
elif [[ ${DSET} == *"LaPalma-60deg-NSB5x"* ]]; then
echo "/lustre/fs21/group/cta/prod5-grid/Prod5b_LaPalma_AdvancedBaseline_NSB5x_60deg_DL1"
elif [[ ${DSET} == *"LaPalma-60deg"* ]]; then
echo "/lustre/fs24/group/cta/prod5b/CTA-ProdX-Download-DESY/Prod5b_LaPalma_AdvancedBaseline_NSB1x_60deg"
elif [[ ${DSET} == *"Paranal-20deg-NSB5x"* ]]; then
echo "/lustre/fs22/group/cta/users/maierg/analysis/AnalysisData/prod5-Paranal-20deg-NSB5x-sq10-LL/S.hyperarray/EVNDISP"
elif [[ ${DSET} == *"Paranal-20deg"* ]]; then
echo "/lustre/fs22/group/cta/users/maierg/analysis/AnalysisData/prod5-Paranal-20deg-sq10-LL/S.hyperarray/EVNDISP"
elif [[ ${DSET} == *"Paranal-40deg-NSB5x"* ]]; then
echo "/lustre/fs21/group/cta/prod5-grid/Prod5b_Paranal_AdvancedBaseline_NSB5x_40deg_DL1"
elif [[ ${DSET} == *"Paranal-40deg"* ]]; then
echo "/lustre/fs21/group/cta/prod5-grid/Prod5b_Paranal_AdvancedBaseline_NSB1x_40deg_DL1"
elif [[ ${DSET} == *"Paranal-60deg-NSB5x"* ]]; then
echo "/lustre/fs21/group/cta/prod5-grid/Prod5b_Paranal_AdvancedBaseline_NSB5x_60deg_DL1"
elif [[ ${DSET} == *"Paranal-60deg"* ]]; then
echo "/lustre/fs21/group/cta/prod5-grid/Prod5b_Paranal_AdvancedBaseline_NSB1x_60deg_DL1"
fi
}
linkhyperarray()
{
cd "$DDIR"
if [[ ${DSET} == *"LaPalma"* ]]; then
mkdir -v -p "N.hyperarray" || return
cd "N.hyperarray"
else
mkdir -v -p "S.hyperarray" || return
cd "S.hyperarray"
fi
gethyperarraylink
HLINK=$(gethyperarraylink)
rm -f EVNDISP
ln -s "$HLINK" EVNDISP
}
linksubarrays()
{
if [[ ${DSET} == *"LaPalma"* ]]; then
./linkEvndispProduction.sh "${DSET}" "${DSET}" \
../prod5/subArray.prod5.North-Alpha.list North
./linkEvndispProduction.sh "${DSET}" "${DSET}" \
../prod5/subArray.prod5.North-Alpha-sub.list North
else
./linkEvndispProduction.sh "${DSET}" "${DSET}" \
../prod5/subArray.prod5.South-AlphaC8aj-BetaPlus.list South
./linkEvndispProduction.sh "${DSET}" "${DSET}" \
../prod5/subArray.prod5.South-AlphaC8aj-BetaPlus-sub.list South
fi
}
install()
{
cd ../install || return
./prepareProductionBinaries.sh "${DSET}" main
}
(
linkhyperarray
)
(
linksubarrays
)
(
install
)
| true
|
8db5aa2ba19e9e71cca8a3c8d6a052853a32e66b
|
Shell
|
godane/abs
|
/extra/p7zip/PKGBUILD
|
UTF-8
| 1,450
| 2.921875
| 3
|
[] |
no_license
|
# $Id: PKGBUILD 85904 2010-07-22 04:49:57Z dgriffiths $
# Contributor: Thayer Williams <thayer@archlinux.org>
# Contributor: Hugo Doria <hugo@archlinux.org>
# Contributor: TuxSpirit<tuxspirit@archlinux.fr> 2007/11/17 21:22:36 UTC
# Maintainer: Daniel J Griffiths <ghost1227@archlinux.us>
pkgname=p7zip
pkgver=9.13
pkgrel=2
pkgdesc="A command-line port of the 7zip compression utility"
arch=('i686' 'x86_64')
license=('GPL')
url="http://p7zip.sourceforge.net"
depends=('gcc-libs' 'bash')
source=(http://downloads.sourceforge.net/sourceforge/${pkgname}/${pkgname}_${pkgver}_src_all.tar.bz2)
md5sums=('8ddb5053db3b1f2696407d01be145779')
options=(!emptydirs)
build() {
cd ${srcdir}/${pkgname}_${pkgver}
#Arch64 fix
if [ "$CARCH" == "x86_64" ]; then
cp makefile.linux_amd64 makefile.machine
else
cp makefile.linux_x86_ppc_alpha_gcc_4.X makefile.machine
fi
sed -i "s|usr/local|usr|g" makefile
make all3 OPTFLAGS="${CXXFLAGS}" || return 1
}
package() {
cd ${srcdir}/${pkgname}_${pkgver}
make install DEST_HOME="${pkgdir}/usr" \
DEST_MAN="${pkgdir}/usr/share/man" \
DEST_SHARE_DOC="http://www.bugaco.com/7zip"
mkdir -p ${pkgdir}/usr/share/doc/p7zip/DOCS
install -m555 bin/7z.so ${pkgdir}/usr/lib/p7zip/
sed -i "s|${pkgdir}/usr|/usr|g" ${pkgdir}/usr/bin/7z
sed -i "s|${pkgdir}/usr|/usr|g" ${pkgdir}/usr/bin/7za
sed -i "s|${pkgdir}/usr|/usr|g" ${pkgdir}/usr/bin/7zr
# Install mc's virtual filesystem
install -Dm755 contrib/VirtualFileSystemForMidnightCommander/u7z \
${pkgdir}/usr/lib/mc/extfs.d/u7z
}
| true
|
0878018e65aface5dc59c405331f83ac2c06dd9f
|
Shell
|
haugene/dotfiles
|
/bin/openprs
|
UTF-8
| 2,788
| 3.875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
##############################################################################
# openprs
# -----------
# Get the openprs for all of a users github repos
#
# :authors: Jess Frazelle, @jfrazelle
# :date: 8 June 2015
# :version: 0.0.1
##############################################################################
set -e
set -o pipefail
if [[ -z "$GITHUB_TOKEN" ]]; then
echo "Set the GITHUB_TOKEN env variable."
return 1
fi
URI=https://api.github.com
API_VERSION=v3
API_HEADER="Accept: application/vnd.github.${API_VERSION}+json"
AUTH_HEADER="Authorization: token ${GITHUB_TOKEN}"
DEFAULT_PER_PAGE=100
LAST_PAGE=1
# get the last page from the headers
get_last_page(){
header=${1%%" rel=\"last\""*}
header=${header#*"rel=\"next\""}
header=${header%%">;"*}
LAST_PAGE=$(echo ${header#*"&page="} | bc 2>/dev/null)
}
get_pulls(){
local repo=$1
local page=$2
# send the request
local response=$(curl -i -sSL -H "${AUTH_HEADER}" -H "${API_HEADER}" "${URI}/repos/${repo}/pulls?per_page=${DEFAULT_PER_PAGE}&page=${page}&state=open")
# seperate the headers and body into 2 variables
local head=true
local header=
local body=
while read -r line; do
if $head; then
if [[ $line = $'\r' ]]; then
head=false
else
header="$header"$'\n'"$line"
fi
else
body="$body"$'\n'"$line"
fi
done < <(echo "${response}")
get_last_page "${header}"
local length=$(echo $body | jq length)
echo "${repo} has ${length} open PRs"
if [[ "$length" -gt 0 ]]; then
local pulls=$(echo $body | jq --raw-output '.[] | {number: .number, html_url: .html_url, title: .title, body: .body, created_at: .created_at, author: .base.repo.owner.login} | @base64')
for p in $pulls; do
pull="$(echo $p | base64 --decode)"
local number=$(echo $pull | jq --raw-output '.number')
local html_url=$(echo $pull | jq --raw-output '.html_url')
local title=$(echo $pull | jq --raw-output '.title')
local body=$(echo $pull | jq --raw-output '.body')
local created_at=$(echo $pull | jq --raw-output '.created_at')
local author=$(echo $pull | jq --raw-output '.author')
cat <<-EOF
${repo}#${number}: ${title}
`date -d"${created_at}" +'%A, %B %d, %Y at %r'`
${html_url}
@${author}: ${body}
---
EOF
done
else
return 0
fi
}
main(){
# send the request
local response=$(curl -sSL -H "${AUTH_HEADER}" -H "${API_HEADER}" "${URI}/user/repos?per_page=${DEFAULT_PER_PAGE}")
local repos=$(echo $response | jq --raw-output '.[].full_name | tostring')
for repo in $repos; do
page=1
get_pulls "${repo}" "${page}"
if [ ! -z "$LAST_PAGE" ] && [ "$LAST_PAGE" -ge "$page" ]; then
for page in $(seq $((page + 1)) 1 ${LAST_PAGE}); do
echo "On page ${page} of ${LAST_PAGE}"
get_pulls "${repo}" "${page}"
done
fi
done
}
main
| true
|
a84481a7e5024fbb5c5b17df84e816aeb8156953
|
Shell
|
cristian1604/docker-server
|
/apache/scripts/install-php-cs-fixer.sh
|
UTF-8
| 321
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
SHELL_BIN="${SHELL##*/}" # get filename
SHELLRC="$HOME/.${SHELL_BIN}rc"
composer global require friendsofphp/php-cs-fixer
if grep ".composer/vendor/bin" $SHELLRC; then
echo "Composer vendors installed"
else
echo "" >> $SHELLRC
echo "export PATH=$PATH:$HOME/.composer/vendor/bin" >> $SHELLRC
fi
| true
|
45117f80335d8872cb114f6c13e1b22ca8791acd
|
Shell
|
ef-ctx/platforms
|
/java/install
|
UTF-8
| 768
| 2.71875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash -e
# Copyright 2016 tsuru authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
SOURCE_DIR=/var/lib/tsuru
source ${SOURCE_DIR}/base/rc/config
apt-get update
apt-get install -y --no-install-recommends \
openjdk-7-jdk \
maven \
tomcat7
cp $SOURCE_DIR/java/Procfile $SOURCE_DIR/default/Procfile
rm -rf /var/lib/tomcat7/webapps
ln -s ${CURRENT_DIR} /var/lib/tomcat7/webapps
mkdir -p /usr/share/tomcat7/common/classes /usr/share/tomcat7/server/classes /usr/share/tomcat7/shared/classes
chown -R ${USER}:${USER} /etc/tomcat7 /var/lib/tomcat7 /var/cache/tomcat7 /var/log/tomcat7 /usr/share/tomcat7
sed -i 's/8080/8888/' /etc/tomcat7/server.xml
| true
|
7caa709024c6ae6f3284cfc0c6977ec466669a9a
|
Shell
|
broadinstitute/PANOPLY
|
/panda/panda-src/bin/obsolete/get_split_samples.sh
|
UTF-8
| 735
| 2.90625
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
#
# Copyright (c) 2020 The Broad Institute, Inc. All rights reserved.
#
echo -e "\n---------------------------"
echo -e "get split samples ---------"
echo -e "---------------------------\n"
source config.sh
source $src/tedmint-lib.sh
fissfc sample_list -w $wkspace -p $project -t sample > samples_list.txt
declare -a samples
samples=(`cat "samples_list.txt"`)
> filled_samples_0.tsv
> filled_samples.tsv
for s in "${samples[@]}"
do
fissfc attr_get \
-w $wkspace \
-p $project \
-t sample \
-e $s >> filled_samples_0.tsv
done
head -n 1 filled_samples_0.tsv > filled_samples.tsv
sed '/^entity:sample_id/d' < filled_samples_0.tsv >> filled_samples.tsv
rm samples_list.txt filled_samples_0.tsv
| true
|
ea5ab64b61366d1e6f5600eed31fa0a24a247c64
|
Shell
|
enterstudio/bokeh
|
/scripts/dev_environment
|
UTF-8
| 1,754
| 4.09375
| 4
|
[] |
permissive
|
#!/bin/bash
# CLI user interface
if [ "$1" == "-h" ]; then
usage="$(basename "$0") [-h] -- program to install all the bokeh dependencies
where:
-h show this help text
-e ENVIRONMENT were you want to install the dependencies, defaults to bokeh
-b install BUILD dependencies, defauls to true
-r install RUN dependencies, defauls to true
-t install TEST (and examples) dependencies, defauls to true
-a install ADDITIONAL image diff-related packages, defaults to false
"
echo "$usage"
exit 0
fi
# defauls
env=bokeh
build=true
run=true
test=true
add=false
# handling of arguments
while getopts e:b:r:t:a option
do
case "${option}" in
e) env=${OPTARG};;
b) build=${OPTARG};;
r) run=${OPTARG};;
t) test=${OPTARG};;
a) add=true;;
esac
done
# TODO: check if env exists
function get_value {
echo $(cat <<EOF | python -
from conda_build.metadata import MetaData
print(" ".join([s.replace(" ", "") for s in MetaData("../conda.recipe").get_value("$1")]))
EOF
)
}
function conda_install {
channels=$(echo $(get_value 'extra/channels') | sed -e 's/^\| \+/ -c /g')
conda install -n $env $channels --yes $@
}
if [ "$build" == "true" ]; then
conda_install $(get_value "requirements/build")
echo "BUILD dependecies installed."
fi
if [ "$run" == "true" ]; then
conda_install $(get_value "requirements/run")
echo "RUN dependecies installed."
fi
if [ "$test" == "true" ]; then
conda_install $(get_value "test/requires")
echo "TEST (and examples) dependecies installed."
fi
if [[ "$add" == "true" ]]; then
conda_install boto
echo "Image diff-related dependecies installed."
fi
| true
|
f9a2a9197e9c3d300a5552a93e8362bb8947de72
|
Shell
|
ilaishai/ArchConfigs
|
/Files/.scripts/media/decreaseVol
|
UTF-8
| 114
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
for SINK in $(pacmd list-sinks | grep 'index:' | cut -b 12)
do
pactl set-sink-volume $SINK -5%
done
| true
|
61d91cab0c9d6bbd1b0ea1fa47b4f48f15af3cea
|
Shell
|
gharper/scripts
|
/moreutils.sh
|
UTF-8
| 1,988
| 3.828125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Uses https://joeyh.name/code/moreutils/
printf "\nchronic: runs a command quietly unless it fails\n"
printf "\tnchronic noisyscript.sh"
printf "\ncombine: combine the lines in two files using boolean operations\n"
printf "\tcombine file1 and file2\n"
printf "\tcombine file1 not file2\n"
printf "\tcombine file1 or file2\n"
printf "\tcombine file1 xor file2\n"
printf "\nerrno: look up errno names and descriptions\n"
printf "\terrno 2\n"
printf "\terrno -l\n"
printf "\nifdata: get network interface info without parsing ifconfig output\n"
printf "\tifdata -p en0\n"
printf "\tifdata -pa en0 \t# ipv4 address\n"
printf "\tifdata -pn en0 \t# netmask\n"
printf "\nifne: run a program if the standard input is not empty\n"
printf "\tfind . -name core | ifne mail -s 'Core files found' root\n"
printf "\nisutf8: check if a file or standard input is utf-8\n"
printf "\nparallel: run multiple jobs at once\n"
printf "\tparallel sh -c 'echo hi; sleep 2; echo bye' -- 1 2 3\n"
printf "\npee: tee standard input to pipes\n"
printf "\techo 'Hello World' | tee file1 file2 \t# Standard use of tee\n"
printf "\techo 'Hello World' | pee cat cat \t# Like tee, but pipe to another command\n"
printf "\nsponge: soak up standard input and write to a file\n"
printf "\tsort file1 > file1_sorted \t# Normally would need to write to a new file\n"
printf "\tsort file1 | sponge file1 \t# Writes back to same file\n"
printf "\nts: timestamp standard input\n"
printf "\tping -c 2 localhost | ts\n"
printf "\nvidir: edit a directory in your text editor\n"
printf "\t# Deleting lines = deleting files, editing lines = renaming or moving, etc\n"
printf "\nvipe: insert a text editor into a pipe\n"
printf "\techo 'Hello World' | vipe\n \t# Lets you edit the pipe data mid stream"
printf "\nzrun: automatically uncompress arguments to command\n"
printf "\t# A quick way to run a command that does not itself support compressed files, without manually uncompressing the files\n"
| true
|
42ba2d0b7b8fd97660d5a3d764a2687c92d9d10e
|
Shell
|
BackofenLab/IntaRNA-benchmark
|
/intarna-benchmark-sge.sh
|
UTF-8
| 1,389
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#$ -N intaRNA-benchmark
#$ -cwd
#$ -pe smp 24
#$ -R y
#$ -l h_vmem=1G
#$ -o /scratch/bi03/gelhausr/intaRNA/IntaRNA-benchmark/sge-out/
#$ -j y
#$ -M gelhausr@informatik.uni-freiburg.de
#$ -m a
# This script will require a conda environment with:
# - the necessary dependencies of intaRNA
# - python3 | pandas
export PATH="/scratch/bi03/gelhausr/miniconda3/bin/:$PATH"
cd /scratch/bi03/gelhausr/intaRNA/IntaRNA-benchmark
source activate intarna-benchmark
# Variables
scriptsPath="./bin/"
intaRNAbinary="../intaRNA/src/bin/"
inputPath="./input/"
outputPath="./output/"
intaRNACall=""
callID=""
withED=false
# Handling input
while getopts "h?s:b:i:o:a:c:e" opt; do
case "$opt" in
h|\?)
exit 0
;;
s) scriptsPath=$OPTARG
;;
b) intaRNAbinary=$OPTARG
;;
i) inputPath=$OPTARG
;;
o) outputPath=$OPTARG
;;
a) intaRNACall=$OPTARG
;;
c) callID=$OPTARG
;;
e) withED=true
;;
esac
done
# Enforce callID
if [ "$callID" == "" ]
then
echo "No callID specified. Please specify a callID using -c <callID>"
exit;
fi
# Run benchmark
if [ "$withED" == true ]
then
python3 $scriptsPath/calls.py -b "$intaRNAbinary" -i "$inputPath" -o "$outputPath" -c "$callID" "$intaRNACall" -e
else
python3 $scriptsPath/calls.py -b "$intaRNAbinary" -i "$inputPath" -o "$outputPath" -c "$callID" "$intaRNACall"
fi
| true
|
a002e3e78d3d1fabd1ba87fb1702157dc657caf2
|
Shell
|
HashDataInc/docs
|
/build.sh
|
UTF-8
| 738
| 3.71875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd ${DIR}
rm -rf _book
if [ ! -e node_modules ]; then
npm install markdown-parser2
gitbook install --log error
fi
for d in */; do
if [ ! -f ${d}/book.json ]; then
continue
fi
echo "Generate PDF for book ${d}"
cp ./gen_summary.js ${d}
cd ${d} && node gen_summary.js && rm gen_summary.js && cd ..
if [ ! -e ${d}/node_modules ]; then
gitbook install --log error ${d}
fi
gitbook pdf --log warn ${d} "${d}$(basename ${d}).pdf"
done
echo "Generate website"
node gen_summary.js
gitbook build --log warn
find _book -name "SUMMARY.md" | xargs rm -f
find _book -name "book.json" | xargs rm -f
cp book.json _book/
| true
|
536f23449f1291f251aa3638dce945829a08673f
|
Shell
|
Chillimeat/natto-nemesis
|
/Add_Japanese_Support.sh
|
UTF-8
| 1,454
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
##################################################################################################################
echo "################################################################"
echo "######### Adding Japanese Support! ################"
echo "################################################################"
echo "Installing Fcitx"
sudo pacman -S fcitx5-im --noconfirm --needed
sudo pacman -S fcitx5-mozc --noconfirm --needed
sudo pacman -S fcitx5-rime --noconfirm --needed
yay -S fcitx5-skin-arc --noconfirm --needed
echo "Adding xprofile to home direcotry"
homedir=$( getent passwd "$USER" | cut -d: -f6 )
cat <<EOT >> /$homedir/.xprofile
"export GTK_IM_MODULE=fcitx"
"export QT_IM_MODULE=fcitx"
"export XMODIFIERS="@im=fcitx"
INPUT_METHOD DEFAULT=fcitx5
GTK_IM_MODULE DEFAULT=fcitx5
QT_IM_MODULE DEFAULT=fcitx5
XMODIFIERS DEFAULT=@im=fcitx5
export XMODIFIERS=@im=fcitx
export QT_IM_MODULE=fcitx
EOT
echo "################################################################"
echo "######### DONE! ################"
echo "################################################################"
#NO Longer needed but keeping for reference
#sudo pacman -S fcitx5 --noconfirm --needed
#sudo pacman -S fcitx5-configtool --noconfirm --needed
#sudo pacman -S fcitx5-gtk --noconfirm --needed
#sudo pacman -S fcitx5-qt5 --noconfirm --needed
#yay -S fcitx5-arc-git --noconfirm --needed
| true
|
d69936a064cefee9690a00dda519f92029f0ad0d
|
Shell
|
dsvi/NotesTree
|
/PKGBUILD
|
UTF-8
| 1,052
| 2.6875
| 3
|
[
"Zlib"
] |
permissive
|
# Fixer-upper: Eli Schwartz <eschwartz@archlinux.org>
# Contributor: baltic <1000Hz.radiowave at google mail>
pkgname=notes-tree
pkgver=1.0
pkgrel=2
pkgdesc="Note taking (knowledge base) app with tree like notes structure"
arch=('i686' 'x86_64' 'aarch64')
url="https://bitbucket.org/baltic/notestree/src"
license=('ZLIB')
depends=('qt5-svg' 'qt5-webkit' 'boost-libs')
makedepends=('boost')
source=("https://bitbucket.org/baltic/notestree/downloads/$pkgname-$pkgver.tar.xz")
noextract=("$pkgname-$pkgver.tar.xz")
sha256sums=('ad81cd9e064e9640783a260e4f19f30e971a7fd5f22ed06272c7c7b04d1ef711')
prepare() {
# versioned source extraction is nice, but makepkg can't automagically guess
# how to do this when the source archive doesn't use it.
mkdir -p "$pkgname-$pkgver"
tar xf "$pkgname-$pkgver.tar.xz" -C "$pkgname-$pkgver"
}
build() {
cd "$srcdir/$pkgname-$pkgver"
qmake-qt5
make
}
package() {
cd "$srcdir/$pkgname-$pkgver"
make INSTALL_ROOT="$pkgdir" install
install -Dm644 LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
}
| true
|
142d1893f8b4152455adfc1346713bd355bae529
|
Shell
|
sipb/homeworld
|
/tools/shellcheck-all.sh
|
UTF-8
| 700
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -euo pipefail
cd "$(git rev-parse --show-toplevel)"
failed=""
while IFS="" read -r -d '' file
do
if grep -Eq '^#!(.*/|.*env +)(sh|bash)' "$file" || [[ "$file" =~ \.(ba)?sh$ ]]
then
# SC1091: Some of our scripts include configuration files which are not known statically.
# TODO(#431): re-enable SC1091
# SC2015: Overzealous rule which forbids reasonable code
# SC2016: Rule matches on user-friendly hints to set variables
# as well as on nested code snippets passed to shells.
if ! shellcheck -e SC1091,SC2015,SC2016 "$file"
then
failed=failed
fi
fi
done < <(git ls-files -z)
test -z "$failed"
| true
|
55232b71e4eaee4603d9b9a3270b7b7184a4c855
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/whysynth/PKGBUILD
|
UTF-8
| 682
| 2.59375
| 3
|
[] |
no_license
|
# Maintainer: Sean Bolton <sean at smbolton dot com>
pkgname=whysynth
pkgver=20120903
pkgrel=2
pkgdesc="A versatile softsynth plugin for the DSSI Soft Synth Interface"
url="http://www.smbolton.com/whysynth.html"
depends=('alsa-lib' 'liblo' 'gtk2' 'fftw')
makedepends=('dssi')
license=('GPL')
source=(http://www.smbolton.com/whysynth/${pkgname}-${pkgver}.tar.bz2)
md5sums=('b521b63ade13b09062a64c46d2eabee2')
arch=('i686' 'x86_64')
build() {
cd "${srcdir}/${pkgname}-${pkgver}"
./configure --prefix=/usr || return 1
make || return 1
}
package() {
cd "${srcdir}/${pkgname}-${pkgver}"
make DESTDIR=${pkgdir} install || return 1
find ${pkgdir}/usr/lib -name '*.la' -exec rm {} \; || return 1
}
| true
|
1fe2710afd513cd9fce9caba019665e3cab5c509
|
Shell
|
hugobarona/serverless-cognitive
|
/etc/deploy.sh
|
UTF-8
| 2,145
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# CHANGE THESE VALUES!
# Note. Pick a location where Vision API is available
resGroup="Demo.ServerlessVision"
location="northeurope"
# CHANGE THESE VALUES IF YOU WISH
suffix="$RANDOM"
storeAcct="visiondemostore$suffix"
functionName="visiondemofunc$suffix"
echo "### Creating resource group"
az group create -n $resGroup -l $location
echo "### Creating Storage account"
az storage account create -n $storeAcct -g $resGroup -l $location --sku Standard_LRS
storeKey=`az storage account keys list -n $storeAcct -g $resGroup --query "[0].value" -o tsv`
echo "### Creating Blob containers"
az storage container create -n "photo-in" --account-name $storeAcct --account-key $storeKey --public-access blob
az storage container create -n "photo-out" --account-name $storeAcct --account-key $storeKey --public-access container
echo "### Configuring CORS"
az storage cors add --account-name $storeAcct --account-key $storeKey --methods GET --origins "*" --allowed-headers "*" --exposed-headers "*" --services b
echo "### Creating Vision API account"
az cognitiveservices account create -n "visionapi" -g $resGroup -l $location --sku F0 --yes --kind ComputerVision
apiKey=`az cognitiveservices account keys list -n "visionapi" -g $resGroup --query "key1" -o tsv`
echo "### Creating Function App"
az functionapp create -g $resGroup -c $location -n $functionName -s $storeAcct --os-type Windows --runtime node --deployment-source-url "https://github.com/benc-uk/serverless-cognitive.git"
echo "### Configuring Function App"
az functionapp config appsettings set -g $resGroup -n $functionName --settings VISION_API_KEY=$apiKey VISION_API_REGION=$location FUNCTIONS_WORKER_RUNTIME=node WEBSITE_NODE_DEFAULT_VERSION=8.11.1
echo ""
echo "##################################################################################"
echo ""
echo "Deployment complete!"
echo ""
echo "Access the camera here: https://$functionName.azurewebsites.net/api/cameraFunction"
echo "View results here: http://code.benco.io/serverless-cognitive/viewer/?sa=$storeAcct"
echo ""
echo "##################################################################################"
| true
|
b2d0820f03935980912000c3b2cd1cbc84ec47d2
|
Shell
|
ple-utt239/macports-ports
|
/audio/sinsy/files/sinsy_demo
|
UTF-8
| 456
| 2.703125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
SINSY="@PREFIX@/bin/sinsy"
VOICE_DIR="@PREFIX@/lib/sinsy/voice/hts_voice_nitech_jp_song070_f001-0.90"
VOICE_FILE="${VOICE_DIR}/nitech_jp_song070_f001.htsvoice"
DIC_DIR="@PREFIX@/lib/sinsy/dic"
WAVFILE="/tmp/sinsy-demo-$$.wav"
INFILE="${VOICE_DIR}/SAMPLE.xml"
echo "${SINSY} -m ${VOICE_FILE} -o ${WAVFILE} -x ${DIC_DIR} ${INFILE}"
${SINSY} -m ${VOICE_FILE} -o ${WAVFILE} -x ${DIC_DIR} ${INFILE} && \
afplay ${WAVFILE}
rm -f ${WAVFILE}
exit 0
| true
|
db7bd14629960a874fbae0d6ed60c18338ddcf8c
|
Shell
|
JiaqiGao/Advanced_Networks-P3
|
/trace_script
|
UTF-8
| 787
| 2.71875
| 3
|
[] |
no_license
|
#! /bin/sh
declare -a websites=("www.youtube.com" "www.yahoo.com" "www.facebook.com" "www.reddit.com" "www.instructure.com" "www.stackoverflow.com" "www.linkedin.com" "www.irs.gov" "www.nytimes.com" "www.cnn.com" "www.salesforce.com" "www.okta.com" "www.wikipedia.org" "www.imgur.com" "www.dropbox.com" "www.etsy.com" "www.hulu.com" "www.quizlet.com" "www.homedepot.com" "www.netflix.com")
count=1
hour=$1
mkdir "day2/run${hour}"
for w in "${websites[@]}"
do
netsurf $w && tshark -i wlan0 -a duration:12 -T fields -e frame.number -e _ws.col.Time -e _ws.col.Source -e _ws.col.Destination -e _ws.col.Protocol -e frame.len -e _ws.col.Info -E header=y -E separator=, -E quote=d -E occurrence=f > "day2/run${hour}/packet${count}.csv" & sleep 20 && pkill netsurf & count=$((count+1))
done
| true
|
65e23e8308446291f24badbdc86729ece138ec06
|
Shell
|
petronny/aur3-mirror
|
/anyterm/anytermd.rc.d
|
UTF-8
| 822
| 3.5625
| 4
|
[] |
no_license
|
#! /bin/bash
### /etc/rc.d/anytermd: Initscript for Anyterm
. /etc/conf.d/anytermd
. /etc/rc.conf
. /etc/rc.d/functions
PID=`pidof -o %PPID /usr/sbin/anytermd`
case "$1" in
start)
stat_busy "Starting Anyterm Daemon"
[ -z "$PID" ] && /usr/sbin/anytermd \
$ANYTERMD_OPTIONS \
-c "$ANYTERMD_COMMAND" &> /dev/null
if [ $? -gt 0 ]; then
stat_fail
else stat_done; add_daemon anytermd
echo $PID > /var/run/anytermd.pid
fi ;;
stop)
stat_busy "Stopping Anyterm Daemon"
[ ! -z "$PID" ] \
&& kill $PID &>/dev/null
if [ $? -gt 0 ]; then
stat_fail
else
stat_done; rm_daemon anytermd; fi ;;
restart)
$0 stop
sleep 1
$0 start ;;
*)
echo "usage: $0 {start|stop|restart}"
esac
exit 0
### /etc/rc.d/anytermd: Initscript for Anyterm
| true
|
eccd0a1e0a09566af1f243bec1a2c042e75fc629
|
Shell
|
Reseed-Farm/regen-s2-ard
|
/scripts/s2-ard.sh
|
UTF-8
| 2,430
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
#pull Regen Network Sentinel-2 ARD
#docker pull regennetwork/s2-ard
# run Regen Network Sentinel-2 ARD
#docker run --name s2-ard -dit ard
#docker run -dit -v "C:\Users\sambe\Documents\regen\grasslands_projects\s2-ard\src:/app/" --name s2-ard ard
docker restart s2-ard
start=$SECONDS
# parse named argument options --tile, --config and --aoi
while :; do
case $1 in
-t|--tiles)
if [ "$2" ]; then
TILES=$2
echo "Data Directory : $TILES"
shift
else
echo 'ERROR: "--tile" requires a non-empty option argument.'
exit 1
fi
;;
-c|--config)
if [ "$2" ]; then
CONFIG=$2
echo "Config File : $CONFIG"
shift
else
echo 'ERROR: "--config" requires a non-empty option argument.'
exit 1
fi
;;
-a|--aoi)
if [ "$2" ]; then
AOI=$2
echo "AOI File : $AOI"
shift
else
echo 'ERROR: "--aoi" requires a non-empty option argument.'
exit 1
fi
;;
*)
break
esac
shift
done
# copy config and aoi into running container
if [ -d $TILES ]
then
echo "Copying data directory"
docker cp $TILES s2-ard:work
TILES="/work/"`basename "$TILES"`
else
echo "Data directory invalid"
fi
if [ -z "$CONFIG" ]
then
echo "No CONFIG file copied"
else
echo "Copying config.yml file"
docker cp $CONFIG s2-ard:app/config.yml
fi
if [ -z "$AOI" ]
then
echo "No AOI file copied"
else
echo "Copying AOI file"
docker cp $AOI s2-ard:app/aoi.geojson
fi
# execute pre-processing of the data product (tile or batch)
docker exec -it s2-ard bash -c "python /app/ard.py --tiles "$TILES""
# copy output files/folders to host from s2-ard container
echo "Copying files from docker container"
docker cp s2-ard:output $PWD
# remove files/folder from work and output directory on container
docker exec s2-ard sh -c 'rm -rf /output/* /work/*'
docker stop s2-ard
duration=$((SECONDS-start))
minutes=$((duration/60))
echo "Time: $minutes minutes $((duration%60)) seconds"
| true
|
62c4e63e12f644eb88ed7c96b4d253bea5134877
|
Shell
|
Novabutter/Ubuntu-12.04.2-LTS-Binary-Check
|
/og-binaries-Ubuntu-12.04.2.sh.orig
|
UTF-8
| 802
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/sh
##################### RUN AS ROOT ####################
echo "--- System Binaries ---" > binaryVerifyOG.txt
echo "" >> binaryVerifyOG.txt
for i in /bin/*
do
binary=$(echo "$i" | cut -d "/" -f3)
md5sum=$(md5sum $i | cut -d " " -f1)
echo "$i $md5sum" >> binaryVerifyOG.txt
done
echo "" >> binaryVerifyOG.txt
echo "--- Admin Binaries ---" >> binaryVerifyOG.txt
echo "" >> binaryVerifyOG.txt
for i in /sbin/*
do
binary=$(echo "$i" | cut -d "/" -f3)
md5sum=$(md5sum $i | cut -d " " -f1)
echo "$i $md5sum" >> binaryVerifyOG.txt
done
echo "" >> binaryVerifyOG.txt
echo "--- User Binaries ---" >> binaryVerifyOG.txt
echo "" >> binaryVerifyOG.txt
for i in /usr/bin/*
do
binary=$(echo "$i" | cut -d "/" -f4)
md5sum=$(md5sum $i | cut -d " " -f1)
echo "$i $md5sum" >> binaryVerifyOG.txt
done
| true
|
5af7c58f5fb7e892897a5d132c713c33baa2943f
|
Shell
|
LabNeuroCogDevel/preproc_pipelines
|
/pipes/template
|
UTF-8
| 403
| 3.03125
| 3
|
[] |
no_license
|
#!/usr/bin/bash
PIPE_FOR="INITIALS of who pipeline is written for"
PIPE_DESC80="80 char or less desc"
PIPE_DESC="
This pipeline processes rest data
"
PIPE_DEPENDS=()
PIPE_VERSION="YYYYMMDD"
FINALOUT=()
function check_complete {
wantonlyoneid $@ || return 1
has_finalout $1 || return 1
return 0
}
function run_pipeline {
wantonlyoneid $@ || return 1
id=$1
[ ! -d $id ] && mkdir $id
cd $id
}
| true
|
9a6dcf31ab2045688154a839d492bb6eff1a531a
|
Shell
|
faycute/tools
|
/aws/update_users_name/update_users_name.sh
|
UTF-8
| 510
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
SCRIPT_DIR=$(cd $(dirname $0); pwd)
. $SCRIPT_DIR/env.sh
USER_NAMES=$(aws iam list-users --output text --query Users[].UserName --profile $PROFILE)
for old_user_name in $USER_NAMES
do
if [[ $old_user_name == *$TARGET_STRING* ]]; then
new_user_name=$(echo $old_user_name | sed -e "s/$TARGET_STRING/$NEW_STRING/g")
aws iam update-user --user-name $old_user_name --new-user-name $new_user_name --profile $PROFILE
echo Update success! From: $old_user_name To: $new_user_name
fi
done
| true
|
db83a326be44e593a73428a831ad36707928b72a
|
Shell
|
notalex/level-up-coder
|
/level-up
|
UTF-8
| 835
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
set -e
source ./lib/progress-bar.sh
source ./db/user-config.sh
source ./lib/fib-helper.sh
read -rp "What will you work on? " task
read -rp "for how many minutes? " currentMins
progress-bar $(($currentMins * 60))
hours=$totalHours
mins=$(($currentMins + $prevMins))
if [ $((60 - $mins)) -le 0 ]; then
hours=$(($totalHours + 1))
mins=$(($mins - 60))
fi
rankWithHours=$(fib-index-value $hours)
rank=$(echo $rankWithHours | cut -d ' ' -f 1)
levelUpXP=$(echo $rankWithHours | cut -d ' ' -f 2)
title=$(cat db/titles | tail -$rank | head -1)
echo "===* $title $username (Rank $rank) *==="
echo "worked for $currentMins mins on: ${task}"
echo "XP reqd to level up: $(($levelUpXP * 60 - $mins)) ($levelUpXP)"
printf "username='$username'
prevMins=$mins
totalHours=$hours" > ./db/user-config.sh
| true
|
e08256aef219bd6439a2b6f0663ec811a44392f4
|
Shell
|
olsydor/summer-school
|
/Task_2/Task2.sh
|
UTF-8
| 401
| 3.40625
| 3
|
[] |
no_license
|
#Constants
#DB_Constants
DB_NAME=moodle
DB_USER=moodleuser
#generation file name
underscore="_"
curdatetime='date +%d%m%Y_%H%M'
filename=$DB_NAME$underscore$curdatetime".sql"
UP=$(pgrep mysql | wc -l);
if [ "$UP" -ne 1 ];
then
echo "MySQL is down.";
sudo service mysql start
else
echo "All is well.";
fi
#make dump
mysqldump -u $DB_USER -p12345678 $DB_NAME > $filename.sql"
| true
|
01db31c26c67bb42f4ae893c254d923e35787ab4
|
Shell
|
Bertoni94/bash
|
/examen2.sh
|
UTF-8
| 857
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
continuar=C
while [ $continuar = C ]:
echo "Presione \n 1 para crear un grupo \n 2 para agregar un usuario \n 3 crear un
usuario a un grupo"
read var1
do
{
if [ $var1 = 1 ]:
then
echo "escriba el nombre del grupo"
read var2
sudo addgroup $var2
echo "se a creado tu grupo"
echo "C para continuar, S para salir"
read text
continuar=text
fi
if [ $var1 = 2 ]:
then
echo "escriba el nombre del usuario"
read var3
sudo use radd $var3
echo "se a creado el usuario"
echo "C para continuar, S para salir"
read text
continuar=text
fi
if [ $var1 = 3 ]:
then
echo -n "escriba el nombre de un usuario"
read var4
echo -n "escriba el nombre del grupo"
read var5
sudo addgroup $var4 $var5
#mkdir /home/edgar/$var5
sudo mv /home/$var4 /home/edgar/$var5
echo "C para continuar, S para salir"
read text
continuar=text
fi
}
done
| true
|
ce20b668ff948914e94a83211e7ee030d2902558
|
Shell
|
barryb/cloudtools
|
/host-recipes/rds-backup/initial-config.sh
|
UTF-8
| 1,492
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/sh
# The script updates installed packages and adds any required ones for backing up the RDS server.
# Some initial config, including installing git and the cloud credentials needs to be done prior to this
# ideally, by using the user_data file for amazon cloud servers
LOGFILE="/tmp/setup.log"
(
echo "Initial Host Setup - $(date -R)!"
# Don't bother installing gem docs
echo "gem: --no-ri --no-rdoc" >> ~/.gemrc
SCRIPT_DIR=/usr/local/scripts
CT_PATH=$REPOS_PATH/cloudtools
mkdir -p "$SCRIPT_DIR/rds-backup"
for f in $(ls -d /usr/local/repos/cloudtools/host-recipes/rds-backup/scripts/*); do
ln -s $f /usr/local/scripts/rds-backup;
done
# ln -s $CT_PATH/scripts/* $SCRIPT_DIR/rds-backup
# Add relevant ssh public keys for access
cat /usr/local/repos/cloudtools/public_keys/bb_*id_rsa.pub >> ~ec2-user/.ssh/authorized_keys
# Remove ls coloring
echo "unalias ls" >> ~ec2-user/.bash_profile
yum -y update
# Need Mysqldump for backing update
yum -y install mysql51
yum -y install rubygems
gem update --system
# The following are required for the nokogiri gem
yum install -y gcc make ruby-devel libxml2 libxml2-devel libxslt libxslt-devel
gem install right_aws
gem sources -a http://gemcutter.org/
gem install cloudfiles
# Prevent ssh timeouts for routers that dump idle sessions too quickly
echo "ClientAliveInterval 60" >> /etc/ssh/sshd_config
/etc/init.d/sshd restart
echo "Initial Setup Done - $(date -R)!"
) >> ${LOGFILE}
| true
|
1ad558890d5b78d35d638a80ce91651ef759faac
|
Shell
|
Sifchen/CodeBeispieleStudium
|
/unix/ueb01/lister.sh
|
UTF-8
| 876
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/sh
#Autoren: Minf103095(Daniel Pigotow) , Minf103430(Michael Smirnov)
#Letzte Aenderung 19.04.2019 13:14
#Das Skript soll in dem Ordner in dem es sich befindet zunächst einen Unterordner mit dem Namen info anlegen.
#Sollte der Unterordner bereits existieren, so soll dies ignoriert werden und nicht zu einer Fehlermeldung führen.
#Danach soll das Skript alle Dateien im aktuellen Ordner, die die Dateiendung txt haben alphabetisch aufsteigend sortiert in die Datei mytextfiles.txt im Unterordner info schreiben.
#Sofern diese Datei noch nicht existiert, soll sie angelegt werden, falls sie bereits existiert, so soll sie überschrieben werden.
#Die Fehlermeldung, die entsteht, wenn sich keine passenden Dateien im aktuellen Ordner befinden, darf hierbei ignoriert werden.
mkdir info 2> /dev/null #"Nulldevice" dient zum verwerfen von Ausgaben
ls -X *.txt > ./info/mytextfiles.txt
| true
|
f518df4565f3d1a0858231793845b4a255f0cd9b
|
Shell
|
bopopescu/ops-1
|
/centos7-init/3.system.sh
|
UTF-8
| 976
| 2.8125
| 3
|
[] |
no_license
|
## selinux
cp -v selinux /etc/selinux/config
## sudo
sudof="/etc/sudoers"
sudoer_opt1='Defaults logfile=/var/log/sudo.log'
sudoer_opt2='Defaults !syslog'
sudoer_opt3='Defaults timestamp_timeout=120'
sudoer_ops4='Defaults !tty_tickets'
for sudo_opt in "$sudoer_opt1" "$sudoer_opt2" "$sudoer_opt3" "$sudoer_opt4"
do
grep "^$sudo_opt" $sudof > /dev/null 2>&1
[ $? -ne 0 ]&& echo "$sudo_opt" >> $sudof
done
## sshd
sshf="/etc/ssh/sshd_config"
for ssh_opt in 'UseDNS no' 'PermitRootLogin no'
do
grep "^$ssh_opt" $sshf > /dev/null 2>&1
[ $? -ne 0 ]&& echo "$ssh_opt" >> $sshf
done
for dir in cache cmstpl data lua php pkgs system tools
do
mkdir -pv "/home/q/$dir"
chmod 777 "/home/q/$dir"
done
sed -i 's/^inet_protocols = all/inet_protocols = ipv4/g' /etc/postfix/main.cf
systemctl enable postfix
systemctl restart postfix
grep 'mon@panda.tv' /etc/aliases > /dev/null 2>&1
[ $? -eq 0 ] &&exit 0
echo 'root:echomon@panda.tv' >> /etc/aliases
newaliases
| true
|
b78426daafcb2c74f64fc6b95bac6540b045de13
|
Shell
|
plesner/script
|
/bash/prompt.sh
|
UTF-8
| 3,935
| 3.78125
| 4
|
[] |
no_license
|
# The default user which we won't bother to show.
_prompt_default_user="plesner"
# The default machine which we won't bother to show.
_prompt_default_hostname="hackenschmidt"
# Mappings from names to the abbreviations to use in the pwd.
_prompt_path_segment_abbrevs=(
Documents=D
neutrino=n
)
# How long are we willing to wait for the slower commands to yield a result
# before bailing?
_prompt_slow_command_timeout=0.01s
# List of the base64 characters, in order.
_prompt_base64_chars="\
A B C D E F G H I J \
K L M N O P Q R S T \
U V W X Y Z a b c d \
e f g h i j k l m n \
o p q r s t u v w x \
y z 0 1 2 3 4 5 6 7 \
8 9 + /"
# Encodes a single value between 0 and 63 in base 64. Okay it doesn't really,
# it just returns the i'th base64 character.
function _prompt_base64_encode {
echo $_prompt_base64_chars | cut -f $(($1 + 1)) -d' '
}
# Returns the username if it is different from the default, otherwise returns
# the empty string.
function _prompt_get_user {
if [[ "${USER}" != "${_prompt_default_user}" ]];
then printf ' %s' "$USER"
fi
}
# Returns the hostname if it is different from the default, otherwise returns
# the empty string.
function _prompt_get_hostname {
if [[ "${HOSTNAME}" != "${_prompt_default_hostname}" ]];
then printf ' %s' "$HOSTNAME";
fi
}
# Returns the abbreviation mapping converted into a set of options to sed that
# perform the appropriate translations.
function _prompt_get_abbrev_map {
for abbrev in ${_prompt_path_segment_abbrevs[*]}
do
echo $abbrev | sed -e "s|\(.*\)=\(.*\)|-e s\|/\1\|/\2\|g|g"
done
}
# Returns the compacted version of the current working directory.
function _prompt_get_pwd {
home_pwd=$(echo $PWD | sed -e "s|$HOME|~|g")
abbrev_map="$(_prompt_get_abbrev_map)"
result=$(echo $home_pwd | sed $abbrev_map)
if [ -n "$result" ];
then printf ' %s' "$result"
fi
}
# Returns a compact representation of the status of the current git branch.
function _prompt_get_branch_status {
status=$(timeout $_prompt_slow_command_timeout git status --porcelain | \
grep -v "??" | \
sed "s|\(..\).*|\1|g" | \
sort | \
uniq -c | \
sed "s| ||g" | \
tr "[:upper:]" "[:lower:]")
echo $status | sed "s| ||g"
}
# Returns the current git branch and status if we're in a git repo, otheriwse
# the empty string.
function _prompt_get_git_branch {
output=$(timeout $_prompt_slow_command_timeout git branch 2>&1 | \
grep "*" | \
sed "s/^\* \(.*\)$/\1/g")
if [ -n "$output" ]; then
status=$(_prompt_get_branch_status)
if [ -n "$status" ]; then
output="$output@$status"
fi
printf ' %s' "$output"
fi
}
# Works just like date but trims leading zeros off the result.
function _prompt_trim_date {
date $* | sed "s|^0*\(..*\)$|\1|g"
}
# Returns a compact representation of the current time.
function _prompt_get_time {
# Just grab the least significant part of the date.
day=$(_prompt_base64_encode $(_prompt_trim_date "+%d"))
hour=$(_prompt_base64_encode $(_prompt_trim_date "+%H"))
min=$(_prompt_base64_encode $(_prompt_trim_date "+%M"))
printf '%s' "$day$hour$min"
}
# Build the whole prompt. It would be nicer if the color codes could be broken
# out somehow but they can't be inserted dynamically, the terminal needs to
# know which are printing and non-printing directly from the string.
export PS1='\[\033[00m\]\[\033[35m\]$(_prompt_get_time)\[\033[33m\]$(_prompt_get_user)\[\033[31m\]$(_prompt_get_hostname)\[\033[36m\]$(_prompt_get_pwd)\[\033[34m\]$(_prompt_get_git_branch)\[\033[00m\] '
| true
|
e70fbee2b8fe2d8c0a62a83f5d7d7c7e28601219
|
Shell
|
arthur-flam/apiADP
|
/scrap.sh
|
UTF-8
| 392
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
mkdir data
from=$(date +"%Y-%m-%d" --date="7 days ago")
today=$(date +"%Y-%m-%d")
to=$(date +"%Y-%m-%d" -d "3 months")
current=$from
while [ "$to" != "$current" ] ;
do
./getFlights.sh $current
file=data/flights-$current.xml
./load.py $file && \
gsutil cp $file gs://adp-flights/$today/$current.xml
current=$(date +"%Y-%m-%d" -d "$current + 1 day");
done
| true
|
c5b29a4c10674d41d807effbcb0017d068b94ded
|
Shell
|
bnavetta/night-kitchen
|
/scripts/release.sh
|
UTF-8
| 528
| 3.53125
| 4
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -eou pipefail
cd "$(git rev-parse --show-toplevel)"
if [ -n "$(git status --porcelain)" ]; then
echo "Working directory is not clean!" >&2
exit 1
fi
if [ $# -ne 1 ]; then
echo "Usage: $0 <version>" >&2
exit 1
fi
version="$1"
echo "Releasing v$version"
sed -i "s/^pkgver=.*/pkgver='$version'/" packaging/arch/PKGBUILD
cargo bump "$version"
git add .
git commit -m "Release v$version"
git tag "v$version"
git push
git push --tags
# This will kick off the GitHub workflow to create a release
| true
|
69d710f50d5f22fb90fa6a3a191b258f67642270
|
Shell
|
cnicol-gwlogic/surface-water-network
|
/ci/do_install.sh
|
UTF-8
| 624
| 3.796875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
set -e
# This script is called from another install.sh script
if [ -z ${URL+x} ]; then
echo "This script cannot be called directly" 1>&2
exit 1
fi
REPODIR=$(pwd)
PREFIX=$HOME/.local
if [ -d "$PREFIX/bin/$BIN" ]; then
echo "Using cached install $PREFIX/bin/$BIN"
else
echo "Building $PREFIX/bin/$BIN"
wget -nv --show-progress $URL -O $ZIP
unzip -q $ZIP
cd $ZIPDIR
cp $REPODIR/ci/$BIN/CMakeLists.txt .
mkdir build
cd build
cmake -DCMAKE_INSTALL_PREFIX=$PREFIX -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_MODULE_PATH=$REPODIR/ci/cmake ..
make -j2
make install
fi
| true
|
7ab267960648b41094f08ee23efc305e16052be5
|
Shell
|
mvilera/vsftpd-test
|
/create-user.sh
|
UTF-8
| 2,196
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
# Created by Marco Vilera.
set -e
# SOLICITAMOS INFORMACION DEL USUARIO
echo "Please enter the USERNAME and press [ENTER]: "
read USERNAME
echo "Please enter the PASSWORD and press [ENTER]: "
read -s PASSWORD
echo "Do you wish the new user have SSH access? answer [yes/no] then press [ENTER]: "
read SHELL_ALLOWED
echo "Do you wish the new user have FTP access? answer [yes/no] then press [ENTER]: "
read FTP
USERLIST=/userlist.file
FTP_ROOT=/home/$USERNAME/ftp
FTP_FILES_DIR=/home/$USERNAME/ftp/files
USERADD_PARAMETERS="-U $USERNAME --create-home"
FTPLIST=/ftp.list
# VERIFICAMOS SI EL USUARIO YA EXISTE
if ! id -u $USERNAME > /dev/null 2>&1; then
# VERIFICAMOS SI DEBEMOS CREAR SHELL ACCESS O NO
shopt -s nocasematch
if [[ "$SHELL_ALLOWED" == "yes" ]]; then
CHOOSEN_SHELL="/bin/bash"
else
CHOOSEN_SHELL="/sbin/nologin"
fi
shopt -u nocasematch
USERADD_PARAMETERS="$USERADD_PARAMETERS --shell $CHOOSEN_SHELL"
# CREAMOS USUARIO
useradd $USERADD_PARAMETERS
echo "$USERNAME:$PASSWORD" | chpasswd
# CREAMOS DIRECTORIOS PARA EL CHROOT DE VSFTPD
mkdir -p $FTP_FILES_DIR
chown nobody:nogroup $FTP_ROOT
chmod a-w $FTP_ROOT
chown $USERNAME:$USERNAME $FTP_FILES_DIR
# GENERAMOS LLAVES PUB/PRIV
ssh-keygen -t rsa -q -N "" -f /keys/$USERNAME
# CONCEDEMOS ACCESO POR MEDIO DE LLAVE PUBLICA
mkdir -p /home/$USERNAME/.ssh
touch /home/$USERNAME/.ssh/authorized_keys
cat /keys/$USERNAME.pub >> /home/$USERNAME/.ssh/authorized_keys
chown $USERNAME:$USERNAME /home/$USERNAME/.ssh/authorized_keys
chmod 600 /home/$USERNAME/.ssh/authorized_keys
# AÑADIMOS AL USUARIO AL DENY LIST DEL FTP SI NO SE LE CONCEDE PERMISOS
shopt -s nocasematch
if [[ "$FTP" == "no" ]]; then
grep -q -F "$USERNAME" $FTPLIST || echo "$USERNAME" >> $FTPLIST
fi
shopt -u nocasematch
# AÑADIMOS EL USUARIO AL ARCHIVO LISTA CON LOS PARAMETROS DE CREACION
if [ -f $USERLIST ]; then
grep -q -F "$USERNAME:$PASSWORD" $USERLIST || echo "$USERNAME:$PASSWORD:$CHOOSEN_SHELL:$FTP" >> $USERLIST
else
echo "$USERNAME:$PASSWORD:$CHOOSEN_SHELL:$FTP" >> $USERLIST
fi
echo "User created succesfully."
else
echo "User already exists."
fi
| true
|
d64ee3bca3253668acdf77a82be429e78513b4b7
|
Shell
|
keiouok/atcoder
|
/make_dir.sh
|
UTF-8
| 463
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
MIN_NUM=1
MAX_NUM=200
GAP=1
for i in `seq -w $MIN_NUM $GAP $MAX_NUM`
do
if [ ! -e $i ]; then
mkdir $i
fi
for file in a b c d e f
do
if [[ ! -f ./$i/$file.py ]]; then
cp ../a.py ./$i/$file.py
fi
done
done
cp ./make_dir.sh ../make_dir.sh
echo "[Finished] making directory from $MIN_NUM to $MAX_NUM."
echo "[Finished] making a - f.py under the directories if they don't exist."
echo "[Finished] copying this file to ../make_dir.sh the same as this."
| true
|
5d33399db18e941a6adacc830f6c05c506db7740
|
Shell
|
spiralofhope/compiled-website
|
/scripts/edit.sh
|
UTF-8
| 1,990
| 3.09375
| 3
|
[] |
no_license
|
:<<'heredoc'
FIXME: I will eventually change my setup to automatically start the compiled website daemon.
When that is done, this file will have to kill the old process and start a new one.
- If you get a file deletion error, manually kill the ruby process.. then re-save main.rb and things should work out.
-- This is probably a very old note and doesn't apply to anything recent.
heredoc
__FILE__=$( \readlink -f $0 )
working=$( \dirname $__FILE__ )/../../
\. $working/compiled-website.ini
# If I'm not using zsh, I'd need to do this:
# working=$( \dirname $__FILE__ )/../../
repo=$working/git
src=$working/src
live=$working/live
# ---
\cd $working
#\firefox \
\palemoon \
-new-tab "file://$working/live/compiled-website-to-do.html" \
-new-tab "file://$working/live/compiled-website-bugs.html" \
-new-tab "file://$working/live/sandbox.html" \
-new-tab "file://$working/live/index.html" &
#links -g ...
#midori &
#netsurf &
# I can't start autotest first and then load this stuff into that very-previous instance. It'll open in the very first instance of geany. Sigh.
\geany --new-instance \
"$repo/CHANGELOG.markdown" \
"$working/compiled-website.txt" \
"$live/css/common.css" \
"$repo/rb/header_and_footer.rb" \
"$src/w/compiled-website-to-do.asc" \
"$src/w/compiled-website-bugs.asc" \
"$src/w/sandbox.asc" \
"$repo/rb/lib/lib_main.rb" \
"$repo/rb/tests/tc_main.rb" \
"$repo/CHANGELOG.markdown" \
&
echo $working
# TODO: My autotest script is still to be prepared and made public.
/l/shell-random/git/live/autotest.sh "$working/git/rb/main.rb" --nodebug
# Sync the examples from my live website into the git repository.
\cd $working
\cp --force $src/w/compiled-website-demo.asc $repo/examples/demo.asc
\cp --force $live/compiled-website-demo.html $repo/examples/demo.html
# TODO: Kill just the ruby pid on exit.
# It's not the pid of autotest.sh, it has to be determine from the /tmp pid files.
\killall ruby
| true
|
04dabff061fa919c5936336a5f0ec4f16db69b6a
|
Shell
|
KaOSx/apps
|
/ack/PKGBUILD
|
UTF-8
| 654
| 2.546875
| 3
|
[] |
no_license
|
# do not build on server, perl issues
pkgname=ack
pkgver=3.7.0
pkgrel=1
pkgdesc="A Perl-based grep replacement, aimed at programmers with large trees of heterogeneous source code"
arch=('x86_64')
url="https://beyondgrep.com/"
license=('GPL' 'PerlArtistic')
depends=('perl-file-next')
options=('!emptydirs')
groups=('programming')
source=("https://search.cpan.org/CPAN/authors/id/P/PE/PETDANCE/${pkgname}-v${pkgver}.tar.gz")
md5sums=('7daaf78636f9cbbebf42a6898d89f50f')
build() {
cd ${pkgname}-v${pkgver}
PERL_MM_USE_DEFAULT=1 perl Makefile.PL INSTALLDIRS=vendor
make
}
package() {
cd ${pkgname}-v${pkgver}
make DESTDIR=${pkgdir} install
}
| true
|
68fe59a876793dd301b51c3bc0aada81270bf144
|
Shell
|
Seiderlord/ParalleleProgrammierung
|
/week12/jobEX1.sh
|
UTF-8
| 1,306
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
# Executes job in the queue "std.q" unless you have special requirements.
#$ -q std.q
# Changes to the current working directory before performing any further action
#$ -cwd
# Name of your job. Unless you use the -o and -e options, output will
# go to a unique file name.ojob_id for each job.
#$ -N ResEx1
# Redirect output stream to this file.
#$ -o ResEx1.dat
# Join the error stream to the output stream.
#$ -j yes
# Parallel environment for using OpenMP, allocates 8 cores on a single node
#$ -pe openmp 8
#$ -l h_vmem=2G
# Set up any environment variables
#export OMP_NUM_THREADS=8
# Use gcc 8.2.0 as the default gcc
module load gcc/8.2.0
gcc -std=c99 -O1 -fopenmp EX1.c -o EX1_1.out
./EX1_1.out 1000
./EX1_1.out 500
./EX1_1.out 100
gcc -std=c99 -O1 -ftree-vectorize -fopenmp EX1.c -o EX1_2.out
./EX1_2.out 1000
./EX1_2.out 500
./EX1_2.out 100
perf stat -e r01C7 ./EX1_1.out 1000
perf stat -e r02C7 ./EX1_1.out 1000
perf stat -e r04C7 ./EX1_1.out 1000
perf stat -e r08C7 ./EX1_1.out 1000
perf stat -e r10C7 ./EX1_1.out 1000
perf stat -e rF1C7 ./EX1_1.out 1000
perf stat -e r01C7 ./EX1_2.out 1000
perf stat -e r02C7 ./EX1_2.out 1000
perf stat -e r04C7 ./EX1_2.out 1000
perf stat -e r08C7 ./EX1_2.out 1000
perf stat -e r10C7 ./EX1_2.out 1000
perf stat -e rF1C7 ./EX1_2.out 1000
| true
|
127474acb9caad30bcf83238cd832b50118c90b8
|
Shell
|
openstack/neutron-vpnaas-dashboard
|
/devstack/plugin.sh
|
UTF-8
| 2,131
| 3.515625
| 4
|
[
"Apache-2.0"
] |
permissive
|
# plugin.sh - DevStack plugin.sh dispatch script neutron-vpnaas-dashboard
VPNAAS_DASHBOARD_DIR=$(cd $(dirname $BASH_SOURCE)/.. && pwd)
VPNAAS_ENABLED_DIR=$VPNAAS_DASHBOARD_DIR/neutron_vpnaas_dashboard/enabled
HORIZON_ENABLED_DIR=$DEST/horizon/openstack_dashboard/local/enabled
function install_neutron_vpnaas_dashboard {
setup_develop $VPNAAS_DASHBOARD_DIR
}
function configure_neutron_vpnaas_dashboard {
cp -a $VPNAAS_ENABLED_DIR/_[0-9]*.py $HORIZON_ENABLED_DIR
# NOTE: If locale directory does not exist, compilemessages will fail,
# so check for an existence of locale directory is required.
if [ -d $VPNAAS_DASHBOARD_DIR/neutron_vpnaas_dashboard/locale ]; then
(cd $VPNAAS_DASHBOARD_DIR/neutron_vpnaas_dashboard; \
DJANGO_SETTINGS_MODULE=openstack_dashboard.settings $PYTHON ../manage.py compilemessages)
fi
}
# check for service enabled
if is_service_enabled neutron-vpnaas-dashboard; then
if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
# Set up system services
# no-op
:
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
# Perform installation of service source
echo_summary "Installing Neutron VPNaaS Dashboard"
install_neutron_vpnaas_dashboard
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
# Configure after the other layer 1 and 2 services have been configured
echo_summary "Configurng Neutron VPNaaS Dashboard"
configure_neutron_vpnaas_dashboard
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
# no-op
:
fi
if [[ "$1" == "unstack" ]]; then
# Remove enabled file(s)
for _enabled_file in $VPNAAS_ENABLED_DIR/_[0-9]*.py; do
_enabled_basename=$(basename $_enabled_file .py)
rm -f $HORIZON_ENABLED_DIR/${_enabled_basename}.py*
rm -f $HORIZON_ENABLED_DIR/__pycache__/${_enabled_basename}.*pyc
done
fi
if [[ "$1" == "clean" ]]; then
# Remove state and transient data
# Remember clean.sh first calls unstack.sh
# no-op
:
fi
fi
| true
|
bd9bf6b1884906f90b17e9b54782fea99f8b0fb1
|
Shell
|
kxtry/android-stress-script
|
/script/mytail.sh
|
UTF-8
| 2,946
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh
path_current=`pwd`
path_script=$(cd "$(dirname "$0")"; pwd)
path_data=$path_script/../data
target=$1
param=$2
if [ "$target" == "" ]; then
echo "should like mytail.sh 192.168.30.25 temp,frame"
exit 1
fi
elapseLast=0
pidLast=0
while true
do
run_time=$(date "+%Y%m%d%H%M")
if [ ! -d "$path_data/${target}" ]; then
mkdir -p "$path_data/${target}"
fi
echo "--------[${run_time}]----------------" >> $path_data/${target}/tail.txt
/bin/bash $path_script/connect.sh $*
adb -s "$target" shell "free -h" >> $path_data/${target}/tail.txt
# use crontab to replace it.
# if [ $? -ne 0 ]; then
# echo "bad echo and disconnect" >> $path_data/${target}/tail.txt
# adb disconnect
# fi
adb -s "$target" shell "top -n 1|grep com.commaai." >> $path_data/${target}/tail.txt
sleep 1
adb -s "$target" shell "top -n 1|grep com.commaai." >> $path_data/${target}/tail.txt
app=$(adb -s "$target" shell "ps -ef|grep com.commaai|grep -v grep"|awk '{print $1}')
if [ $? -eq 0 ]; then
adb -s "$target" shell "lsof|grep $app|wc -l" | xargs echo "$app - filecount:" >> $path_data/${target}/tail.txt
fi
adb -s "$target" shell "ps -ef|grep com.commaai|grep -v grep" >> $path_data/${target}/tail.txt
pid=$(adb -s "$target" shell "ps -ef|grep com.commaai|grep -v grep"|awk '{print $2}')
if [ $? -eq 0 ]; then
echo "app current pidLast:${pidLast} - pid:${pid}" >> $path_data/${target}/tail.txt
if [ "$pid" == "" ]; then
if [ "$pidLast" != "$pid" ]; then
echo "app exit now:$pid" >> $path_data/${target}/tail.txt
fi
else
if [ "$pidLast" != "0" ]; then
if [ "$pidLast" != "$pid" ]; then
echo "app restart now:$pid" >> $path_data/${target}/tail.txt
adb -s "$target" shell "stat /proc/$pid" >> $path_data/${target}/tail.txt
fi
fi
adb -s "$target" shell "dumpsys meminfo $pid" >> $path_data/${target}/tail.txt
fi
pidLast=${pid}
fi
adb -s "$target" shell "lsof|wc -l" | xargs echo "filecount:" >> $path_data/${target}/tail.txt
adb -s "$target" shell "cat /proc/uptime" >> $path_data/${target}/tail.txt
elapse=$(adb -s "$target" shell "cat /proc/uptime"|awk '{print $1}')
if [ "${elapse}" != "" ]; then
echo "systerm current value last:${elapseLast} - now:${elapse}" >> $path_data/${target}/tail.txt
if [ `echo "${elapseLast} > ${elapse}" | bc` -eq 1 ];then
echo "systerm restart now last:${elapseLast} - now:${elapse}" >> $path_data/${target}/tail.txt
fi
elapseLast=${elapse}
fi
temp=$(echo $param | grep "temp")
if [ "$temp" != "" ];then
adb -s "$target" shell "cat /sys/class/thermal/thermal_zone*/temp" >> $path_data/${target}/tail.txt
fi
frame=$(echo $param | grep "frame")
if [ "$frame" != "" ];then
adb -s "$target" shell "tail -n 2 /storage/emulated/0/Log/brokenflow.txt" >> $path_data/${target}/tail.txt
fi
sleep 50
done
| true
|
b6675517804df322ba2bc7e43eaf697e70fef442
|
Shell
|
pujita96/tf-devstack
|
/rhosp/undercloud/02_deploy_as_stack.sh
|
UTF-8
| 637
| 3
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
my_file="$(readlink -e "$0")"
my_dir="$(dirname $my_file)"
if [ -f ~/rhosp-environment.sh ]; then
source ~/rhosp-environment.sh
else
echo "File ~/rhosp-environment.sh not found"
exit
fi
# ssh config to do not check host keys and avoid garbadge in known hosts files
mkdir -p ~/.ssh
chmod 700 ~/.ssh
cat <<EOF >~/.ssh/config
Host *
StrictHostKeyChecking no
UserKnownHostsFile=/dev/null
EOF
chmod 644 ~/.ssh/config
cd $my_dir
export local_mtu=`/sbin/ip link show $undercloud_local_interface | grep -o "mtu.*" | awk '{print $2}'`
#Specific part of deployment
source $my_dir/${RHEL_VERSION}_deploy_as_stack.sh
| true
|
87839698ec041c00bfa2132896c55eb71226a4c1
|
Shell
|
henrypbriffel/virtual-environments
|
/images/linux/scripts/installers/homebrew-validate.sh
|
UTF-8
| 458
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
################################################################################
## File: homebrew-validate.sh
## Desc: Validate the Homebrew can run after reboot without extra configuring
################################################################################
# Validate the installation
echo "Validate the Homebrew can run after reboot"
if ! command -v brew; then
echo "brew executable not found after reboot"
exit 1
fi
| true
|
9cde5a7194f2f508e2376d795e509a078a588b46
|
Shell
|
Garophel/dotfiles
|
/scripts/util/volume-ctrl.sh
|
UTF-8
| 800
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
pasink=0
usage() {
echo "$0 <command>"
echo ' up'
echo ' down'
echo ' mute'
}
if [ -z "$1" ]; then
echo 'Provide a command!'
usage
exit 1
fi
case "$1" in
up)
#pactl set-sink-mute $pasink false
#pactl set-sink-volume $pasink +5%
ponymix unmute >/dev/null
ponymix increase 5 >/dev/null
;;
down)
#pactl set-sink-mute $pasink false
#pactl set-sink-volume $pasink -5%
ponymix unmute >/dev/null
ponymix decrease 5 >/dev/null
;;
mute)
#pactl set-sink-mute $pasink toggle
ponymix toggle >/dev/null
;;
set-mute)
#pactl set-sink-mute $pasink 1
ponymix mute >/dev/null
;;
esac
pkill -RTMIN+1 i3blocks
| true
|
69ef6cf62fdae219dab559f45c89a39102a6f30e
|
Shell
|
XuJianxu/dm
|
/tests/openapi/run.sh
|
UTF-8
| 2,087
| 3.046875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -eu
cur=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
source $cur/../_utils/test_prepare
export PATH=$PATH:$cur/client/
WORK_DIR=$TEST_DIR/$TEST_NAME
function prepare_database() {
run_sql 'DROP DATABASE if exists openapi;' $MYSQL_PORT1 $MYSQL_PASSWORD1
run_sql 'CREATE DATABASE openapi;' $MYSQL_PORT1 $MYSQL_PASSWORD1
run_sql 'DROP DATABASE if exists openapi;' $MYSQL_PORT2 $MYSQL_PASSWORD2
run_sql 'CREATE DATABASE openapi;' $MYSQL_PORT2 $MYSQL_PASSWORD2
}
function test_source() {
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>START TEST OPENAPI: SOURCE"
prepare_database
# create source succesfully
openapi_source_check "create_source1_success"
# recreate source will failed
openapi_source_check "create_source_failed"
# get source list success
openapi_source_check "list_source_success" 1
# delete source success
openapi_source_check "delete_source_success" "mysql-01"
# after delete source, source list should be empty
openapi_source_check "list_source_success" 0
# re delete source failed
openapi_source_check "delete_source_failed" "mysql-01"
# send request to not leader node
openapi_source_check "list_source_with_redirect" 0
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>TEST OPENAPI: SOURCE SUCCESS"
}
function run() {
make install_test_python_dep
# run dm-master1
run_dm_master $WORK_DIR/master1 $MASTER_PORT1 $cur/conf/dm-master1.toml
check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT1
# join master2
run_dm_master $WORK_DIR/master2 $MASTER_PORT2 $cur/conf/dm-master2.toml
check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT2
# run dm-worker1
run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
# run dm-worker2
run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml
check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT
test_source
}
cleanup_data openapi
cleanup_process
run
cleanup_process
echo "[$(date)] <<<<<< test case $TEST_NAME success! >>>>>>"
| true
|
582cad5ac9e71549c63ef48aefd595b8110a43b0
|
Shell
|
skak/skaktop-home
|
/skript/google
|
UTF-8
| 272
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ "$*" == "" ]] || [[ "$*" == "--help" ]]; then
echo "Usage:"
echo "$0 <search term and as many spaces as you would like goes here>..."
echo "This will yield results in w3m."
exit 0
else
w3m http://www.google.ca/search?q="$*";
exit 0
fi
exit 0
| true
|
b1c81fd81688de435410999d88c6f3923a82c660
|
Shell
|
mshokrnezhad/DRA_in_D2D
|
/script_ng.sh
|
UTF-8
| 389
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
g++ -g node_generator_v01.cpp -o ng.out
#g++ -g node_generator_v02.cpp -o ng.out
#g++ -g node_generator_v03.cpp -o ng.out
for n in `seq 60 60`;
do
for itr in `seq 1 10`;
do
echo "n=$n,iteration=$itr"
echo "n=$n,iteration=$itr" >> R01_LoN.txt
echo "n=$n,iteration=$itr" >> R01_LoS.txt
echo "n=$n,iteration=$itr" >> R02_NN.txt
./ng.out $n
sleep 1
done
done
| true
|
e5e3037589a0e0975323bb70a4457fe075e40793
|
Shell
|
gerardaus/dotfiles
|
/bin/dwm-brightness
|
UTF-8
| 372
| 3.75
| 4
|
[] |
no_license
|
#!/bin/zsh
#
# Set brightness with xbacklight, but never go below 1 (as that's "off").
#
# Increment to use.
incr=2
cur=$(light -G)
case "$1" in
"up")
if [[ $cur -eq 0 ]]; then
light -S 1
else
light -A $incr
fi
;;
"down")
if [[ $cur -le 0 ]]; then
light -S 0
else
light -U $incr
fi
;;
*)
echo "Unsupported: \"$1\""
exit 1
esac
light -G
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.