blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
60400c19619272dd6478e7baec9c50841a9b9248 | Shell | peter1000/ArchLinux_packages_p | /0__UNMAINTAINED__0/libgksu_p/PKGBUILD | UTF-8 | 2,381 | 2.71875 | 3 | [] | no_license | # Maintainer: peter1000 <https://github.com/peter1000>
# Contributor: Jan de Groot <jgc@archlinux.org>
_srcname=libgksu
pkgname=${_srcname}_p
pkgver=2.0.12
pkgrel=6
pkgdesc="gksu authorization library"
arch=('x86_64')
url="http://www.nongnu.org/gksu/index.html"
license=(GPL)
depends=('gconf' 'gtk2' 'libgnome-keyring' 'libgtop' 'startup-notification')
makedepends=('intltool' 'gtk-doc')
options=('!emptydirs')
install=libgksu.install
provides=("libgksu=${pkgver}" "${pkgname}=${pkgver}")
conflicts=('libgksu')
source=(http://people.debian.org/~kov/gksu/${_srcname}-${pkgver}.tar.gz
libgksu-2.0.0-fbsd.patch
libgksu-2.0.12-automake-1.11.2.patch
libgksu-2.0.12-fix-make-3.82.patch
libgksu-2.0.12-notests.patch
libgksu-2.0.12-revert-forkpty.patch
libgksu-2.0.7-libs.patch
libgksu-2.0.7-polinguas.patch)
sha1sums=('81a541ccfe9ea278dd3e2a80b3287f02f6eb88f8'
'd32f24f03a728903de2f3b0a449d7787561fa622'
'0ce8c11f323436521d2f6e714aa6634a92e68059'
'82e4b3db63d80211c7caaba3456542f449ed9707'
'ea24e5c3edf0caac9363a9dfb9e0e00af0653d44'
'cfdf41006e2ba5054aec17f85bdcf3777e62c16f'
'8cb3c4f814c5799b828a202c4200673344ed0df8'
'c1281e894e42646f2b337a222137275cc4b4b487')
build() {
cd "${srcdir}/${_srcname}-${pkgver}"
patch -Np1 -i "${srcdir}/libgksu-2.0.0-fbsd.patch"
patch -Np1 -i "${srcdir}/libgksu-2.0.7-libs.patch"
patch -Np1 -i "${srcdir}/libgksu-2.0.7-polinguas.patch"
patch -Np1 -i "${srcdir}/libgksu-2.0.12-revert-forkpty.patch"
patch -Np0 -i "${srcdir}/libgksu-2.0.12-fix-make-3.82.patch"
patch -Np1 -i "${srcdir}/libgksu-2.0.12-notests.patch"
patch -Np1 -i "${srcdir}/libgksu-2.0.12-automake-1.11.2.patch"
touch NEWS README
intltoolize --force --copy --automake
autoreconf -fi
./configure --prefix=/usr --sysconfdir=/etc \
--localstatedir=/var --disable-static --disable-schemas-install
make ${MAKEFLAGS}
}
package() {
cd "${srcdir}/${_srcname}-${pkgver}"
make GCONF_DISABLE_MAKEFILE_SCHEMA_INSTALL=1 DESTDIR="${pkgdir}" install
rm -f ${pkgdir}/usr/lib/*.a
install -m755 -d "${pkgdir}/usr/share/gconf/schemas"
gconf-merge-schema "${pkgdir}/usr/share/gconf/schemas/${_srcname}.schemas" --domain libgksu ${pkgdir}/etc/gconf/schemas/*.schemas
rm -f ${pkgdir}/etc/gconf/schemas/*.schemas
}
| true |
300a9ef496409374cde0224ce72254979bac327e | Shell | cross-dev/arch-repo-server | /e2e/retrieve-package.bats | UTF-8 | 765 | 3.390625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bats
load ${BATS_TEST_DIRNAME}/lib.sh
url="http://localhost:41268/repo/os/arch/package.tar.xz"
setup() {
local tmpdir=$(mktemp -d ${BATS_TMPDIR}/XXXXXXXX)
mkdir -p ${tmpdir}/repo/os/arch/
tar cJf ${tmpdir}/repo/os/arch/package.tar.xz --files-from /dev/null
arch-repo-server -C ${tmpdir} &>/dev/null &
}
@test 'Correct MIME type in the XZ response' {
run get_http_status ${url}
[ "$status" -eq "0" ]
[ "$output" == "200" ]
run get_content_type ${url}
[ "$status" -eq "0" ]
[ "$output" == "application/x-xz-compressed-tar" ]
}
@test 'Archive is extractable' {
local tmpdir=$(mktemp -d ${BATS_TMPDIR}/XXXXXXX)
run tar xJf <(curl -s ${url}) -C ${tmpdir}
echo $output
[ "$status" -eq "0" ]
}
| true |
86f8600c6e505cf9750ac957a2fa18d621d2231c | Shell | umeboshi2/paella | /vagrant/salt/roots/salt/webdev/files/build-nodejs.sh | UTF-8 | 421 | 3.734375 | 4 | [] | no_license | #!/bin/bash
echo "Preparing to build nodejs"
pushd /var/tmp/make-nodejs
pushd node-debian
node_version=$NODE_VERSION
arch=`dpkg --print-architecture`
echo "Node Version: $node_version, arch $arch"
./build.sh $node_version
echo "Build of nodejs-${node_version} is complete."
node_deb=nodejs_$node_version-1_$arch.deb
if [ -f $node_deb ]; then
echo "Installing $node_deb"
dpkg -i $node_deb
fi
popd
popd
| true |
4064d432c9a7ac6f19242bbff81b48ac46d76055 | Shell | runeio/platform | /runeio/generic/dir-packages/rune-lib/jshn.sh | UTF-8 | 182 | 3.078125 | 3 | [] | no_license |
jshn_append_json_struct () {
msg01D=`jshn -r "${1}"`
msg02D=`jshn -r "${2}" | tail -n +2`
msg0D=`echo -e "${msg01D}\n${msg02D}\n"`
eval "$msg0D"
}
| true |
36b782f843e9365c5cb4b829aaf21174ecd5373b | Shell | albertomendozamx/git-hook-deploy | /post-receive | UTF-8 | 858 | 3.40625 | 3 | [] | no_license | #!/bin/bash
REPO="deploy-test"
BANK_REPOS="/media/source/code"
PRODUCTION_BRANCH="master"
PRODUCTION="path_ssh"
DEVELOPMENT_BRANCH="development"
LOCAL_SOURCE="path_tmp_source"
if ! [ -t 0 ]; then
read -a ref
fi
IFS='/' read -ra REF <<< "${ref[2]}"
branch_received="${REF[2]}"
case "$branch_received" in
"$PRODUCTION_BRANCH")
echo "==========DEPLOY PRODUCTION=============="
git --work-tree=$LOCAL_SOURCE --git-dir=$BANK_REPOS/$REPO.git checkout -f
rsync -rv --update --delete-after --exclude ".gitignore" $LOCAL_SOURCE $PRODUCTION
echo "==========END DEPLOY PRODUCTION=========="
;;
"$DEVELOPMENT_BRANCH")
echo "==========DEPLOY DEVELOPMENT=============="
git --work-tree=$LOCAL_SOURCE --git-dir=$BANK_REPOS/$REPO.git checkout -f development
echo "==========END DEPLOY DEVELOPMENT=========="
;;
esac | true |
89e6cab3a038f9fd35684c742ef7694f18ff3660 | Shell | fusionpbx/fusionpbx-install.sh | /debian/resources/nftables.sh | UTF-8 | 1,134 | 2.578125 | 3 | [] | no_license | #!/bin/sh
#move to script directory so all relative paths work
cd "$(dirname "$0")"
#add the includes
. ./config.sh
. ./colors.sh
. ./environment.sh
#send a message
verbose "Configuring nftables"
#run iptables commands
nft add rule ip filter INPUT iifname "lo" counter accept
nft add rule ip filter INPUT ct state related,established counter accept
nft add rule ip filter INPUT tcp dport 22 counter accept
nft add rule ip filter INPUT tcp dport 80 counter accept
nft add rule ip filter INPUT tcp dport 443 counter accept
nft add rule ip filter INPUT tcp dport 7443 counter accept
nft add rule ip filter INPUT tcp dport 5060-5091 counter accept
nft add rule ip filter INPUT udp dport 5060-5091 counter accept
nft add rule ip filter INPUT udp dport 16384-32768 counter accept
nft add rule ip filter INPUT icmp type echo-request counter accept
nft add rule ip filter INPUT udp dport 1194 counter accept
nft add rule ip mangle OUTPUT udp sport 16384-32768 counter ip dscp set 0x2e
nft add rule ip mangle OUTPUT tcp sport 5060-5091 counter ip dscp set 0x1a
nft add rule ip mangle OUTPUT udp sport 5060-5091 counter ip dscp set 0x1a
| true |
85fb93c803974739fd58a72042e1b788e0318e9b | Shell | Daniel-Houston/danielhouston_net | /sldi_p1/intersect | UTF-8 | 608 | 4.28125 | 4 | [] | no_license | #!/bin/bash
# If $1 and $2 are file names, output the lines they have in common.
# You may not assume that these files have been sorted.
# You may output each line only once, and the order does not matter.
# Check usage:
if [[ ! ( -n "$1" && -n "$2") ]]
then
echo "usage: `basename $0` file1 file2"
exit 1
fi
# Print lines in common:
# These temp files are used to sort both input files before comparing them
touch temp1 temp2;
sort "$1" > temp1;
sort "$2" > temp2;
# Now because they're sorted we can compare them.
comm -12 temp1 temp2 | sort -n
# Now remove the temporary files
rm temp1 temp2;
| true |
340474b5ad8fb889e4cd5a7dd2db56cc30f6f760 | Shell | CodeViewDevops/CodeView-Security-Toolkit | /lib/Services/Apache/include.sh | UTF-8 | 1,630 | 3.71875 | 4 | [] | no_license | #=== FUNCTION ================================================================
# NAME: fix_apache_exec_by_name
# DESCRIPTION: Disable file execution by name.
# PARAMETERS: --
# RETURNS: 0 = success ; 1 = failure
#===============================================================================
fix_apache_exec_by_name () {
tmp_file=`mktemp /tmp/correction.XXXXXX`
backupFile "$apacheFile" "data"
#sed '/<Directory/,/<\/Directory/s/^\([ \t]\{0,\}AddHandler[ \t]\{1,\}cgi-script[ \t]\{1,\}.*\.cgi.*\)/#\1/' $apacheFile > $tmp_file
sed 's/^[ \t]\{0,\}AddHandler.*\.shtml/# &/g; s/^[ \t]\{0,\}AddHandler.*\cgi\-script.*/# &/g' $apacheFile > $tmp_file
mv $tmp_file $apacheFile
return 0
}
#=== FUNCTION ================================================================
# NAME: fix_perm_on_cert_files
# DESCRIPTION: Fix the permissions on certifies files.
# PARAMETERS: --
# RETURNS: 0 = success
#===============================================================================
fix_perm_on_cert_files () {
# Apache2 (Debian) : /etc/ssl/certs
# Apache1 (Debian) : /etc/apache/ssl.crt/
# Suse Enterprise 10 : /etc/apache2/ssl.crt
# Red hat Enterprise e Fedora : /etc/pki/tls/certs
# Slackware : /etc/apache/ssl.crt
cert_dirs="/etc/ssl/certs /etc/apache/ssl.crt /etc/apache2/ssl.crt /etc/pki/tls/certs /etc/apache/ssl.crt"
backupFiles=""
for dir in $cert_dirs; do
[ ! -d "$dir" ] && continue
backupFiles="$backupFiles `ls -R ${dir}/*.crt 2> /dev/null`"
done
backupFile "$backupFiles" "data"
chmod -R go-w $backupFiles
return 0
}
| true |
a5dc954c6a4c497d07d869817d636c6e66b22fb5 | Shell | czq7966/gitlabimporter | /src/main/java/nd/sdp/gerrit/plugins/gitlabimporter/gitlabimporter.sh | UTF-8 | 8,247 | 3.09375 | 3 | [] | no_license |
####默认参数
#_old_gerrit_server=${_old_gerrit_server:-sdpgerrit-admin}
#_remote_server=${_remote_server:-sdpgitlab}
#_local_server=${_local_server:-gerrit-server}
#_local_server_admin=${_local_server_admin:-gerrit-admin}
#_local_gerrit_site=${_local_gerrit_site:-/home/gerrit/site}
##_local_server=${_local_server:-gerrit-server-debug}
##_local_server_admin=${_local_server_admin:-gerrit-admin-debug}
##_local_gerrit_site=${_local_gerrit_site:-/home/nd/gerrit}
#_local_git_dir=${_local_git_dir:-${_local_gerrit_site}/git}
_local_bak_dir=${_local_bak_dir:-${_local_gerrit_site}/bak}
_local_admin_group="Administrators"
_local_non_interactive_group="Non-Interactive Users"
_local_predefined_group=("${_local_admin_group}" "${_local_non_interactive_group}")
_local_group_subfix_owner="_owner"
_local_group_subfix_read="_read"
_local_group_subfix_push="_push"
_local_group_subfix_review="_review"
_local_group_subfix_verified="_verified"
_local_group_subfix_submit="_submit"
_local_project_gropus_subfix="${_local_group_subfix_owner} ${_local_group_subfix_read} ${_local_group_subfix_push} ${_local_group_subfix_review} ${_local_group_subfix_verified} ${_local_group_subfix_submit}"
_local_project_group_description=("项目的所有者" "有下载代码的权限" "有上传代码的权限" "有审核代码的权限" "有验证代码的权限" "有提交代码入库的权限")
##########################函数####################
########################################## 系统相关 ########################################
# 刷新缓存
function gerrit_flush_caches()
{
__name=${1:---all}
__result=`ssh ${_local_server_admin} "gerrit flush-caches ${__name}"`
echo $__result
}
########################################## 项目相关 ########################################
# 判断项目是否存在 $1=项目名
function gerrit_exist_project()
{
__name=${1%.git}
__dir="${_local_git_dir}/${__name}.git/objects"
__result=`ssh ${_local_server} test -d "${__dir}" || echo 0`
if [ "$__result" == "0" ]; then
echo 0
else
echo 1
fi
}
# 判断远程项目是否存在 $1=项目名
function gerrit_remote_exist_project()
{
__name=${1%.git}.git
__fatal="fatal:"
__result=`ssh ${_local_server} git ls-remote ${_remote_server}:${__name} 2>&1`
__result=$(echo $__result | grep "${__fatal}")
if [ "$__result" == "" ]; then
echo 1
else
echo 0
fi
}
# 创建项目 $1=项目名
function gerrit_create_project()
{
__name=${1%.git}
__result=$(gerrit_exist_project ${__name})
if [ "${__result}" == "1" ]; then
echo 0 "项目已存在:${__name}"
else
__result=`ssh ${_local_server_admin} "gerrit create-project \"${__name}\""`
__result=$(gerrit_exist_project ${__name})
__=$(gerrit_flush_caches)
echo $__result
fi
}
# 从远程服上镜像项目 $1=项目名
function gerrit_mirror_project_from_remote()
{
__name=${1%.git}
__result=($(gerrit_exist_project ${__name}))
if [ "${__result}" == "1" ]; then
echo 0 "本地项目已存在:${__name}"
else
__remote_project=${__name}.git
__local_project=${_local_git_dir}/${__name}.git
__result=$(gerrit_remote_exist_project "${__name}")
if [ "${__result}" == "0" ]; then
echo 0 "远程项目不存在:${__name}"
else
__msg=`ssh ${_local_server_admin} "gerrit create-project \"${__name}.git\""`
__msg=`ssh ${_local_server} "cd \"${__local_project}\"; git fetch ${_remote_server}:${__remote_project} +refs/heads/*:refs/heads/* +refs/tags/*:refs/tags/*"`
__result=($(gerrit_exist_project ${__name}))
__=$(gerrit_flush_caches)
echo "$__result" "$__msg"
fi
fi
}
# 从远程服上更新项目 $1=项目名
function gerrit_update_project_from_remote()
{
__name=${1%.git}
__result=($(gerrit_exist_project ${__name}))
if [ "${__result}" != "1" ]; then
echo 0 "本地项目不存在:${__name}"
else
__remote_project=${__name}.git
__local_project=${_local_git_dir}/${__name}.git
ssh ${_local_server} "cd ${__local_project} ; git fetch ${_remote_server}:${__remote_project} +refs/heads/*:refs/heads/* +refs/tags/*:refs/tags/*"
__=$(gerrit_flush_caches)
fi
}
########################################## 组相关 ########################################
# 判断组是否存在 $1=组名
function gerrit_exist_group()
{
__name=$1
__result=`ssh ${_local_server_admin} "gerrit ls-groups -q \"${__name}\"'" `
if [ "$__result" == "${__name}" ]; then
echo 1
else
echo 0
fi
}
# 创建组 $1=组名 $2=所有者 $3=描述
function gerrit_create_group()
{
__name=$1
__owner=$2
__description=$3
# __owner=${__owner:-"--owner Administrators"}
__description=${__description:-"${__name}"}
if [ "$__owner" != "" ]; then
__owner=" --owner \"${__owner}\""
fi
if [ "${__name}" != "" ]; then
ssh ${_local_server_admin} "gerrit create-group ${__owner} --description \"${__description}\" \"${__name}\""
fi
__result=$(gerrit_exist_group $__name)
echo $__result
}
# 设置组的用户成员
function gerrit_set_members_user()
{
__name=$1
__user=$2
if [ "${__name}" != "" -a "${__user}" != "" ]; then
ssh ${_local_server_admin} "gerrit set-members --add \"${__user}\" \"${__name}\""
echo 1
else
echo 0
fi
}
# 设置组的组成员
function gerrit_set_members_group()
{
__name=$1
__group=$2
if [ "${__name}" != "" -a "${__group}" != "" ]; then
ssh ${_local_server_admin} "gerrit set-members --include \"${__group}\" \"${__name}\""
echo 1
else
echo 0
fi
}
# 获取组的UUID
function gerrit_get_group_uuid()
{
__name=$1
__uuid=
__exist=$(gerrit_exist_group "$__name")
if [ "$__exist" == "1" ]; then
__group=(`ssh ${_local_server_admin} "gerrit ls-groups -v -q \"${__name}\"'" `)
__uuid=${__group[1]}
fi
echo "$__uuid"
}
# 根据项目批量创建6个组 $1=项目名
function gerrit_batch_create_group_by_project()
{
__name=${1%.git}
__groups=$_local_project_gropus_subfix
__group=
__group_owner=
__group_desc=
__result=0
__i=0
if [ "${__name}" != "" ]; then
for v in $__groups
do
__group=${__name}$v
__group_desc=${_local_project_group_description[$__i]}"($__name 项目)"
__result=$(gerrit_create_group "$__group" "$__group_owner" "$__group_desc")
__group_owner=${__name}${_local_group_subfix_owner}
__i=${__i}+1
done
fi
__=$(gerrit_flush_caches)
echo $__result
}
# 根据项目将其它5组加入read组 $1=项目名
function gerrit_batch_include_group_by_project()
{
__name=${1%.git}
__group_read=${__name}${_local_group_subfix_read}
__groups=$_local_project_gropus_subfix
__group=
__result=0
__i=0
if [ "${__name}" != "" ]; then
for v in $__groups
do
if [ "${v}" != "${_local_group_subfix_read}" ]; then
__group=${__name}$v
__result=$(gerrit_exist_group "$__group")
if [ "${__result}" == "1" ]; then
__result=$(gerrit_set_members_group "$__group_read" "$__group")
fi
fi
done
fi
echo $__result
}
###########################################################################################################
# 从远程服上镜像项目并初始化权限
# 命令行参数
# _remote_server 远程Gitlab的SSH连接
# _local_server Gerrit的SSH连接
# _local_server_admin Gerrit的管理员SSH连接
# _local_gerrit_site Gerrit的安装目录
# _local_git_dir Gerrit的Git仓库存放目录
# _import_projet 要从Gitlab上导入的项目库名称
# _current_user 当前用户,用于设置其为owner角色
function plugin_gerrit_import_project()
{
__name=${_import_projet%.git}
__result=($(gerrit_mirror_project_from_remote "$__name"))
__result2=""
__result3=""
if [ "$__result" == "1" ]; then
__result=($(gerrit_batch_create_group_by_project "$__name"))
if [ "$__result" == "1" ]; then
__=($(gerrit_batch_include_group_by_project "$__name"))
fi
__result2=""
if [ "${__result}" == "1" -a "${_current_user}" != "" ]; then
__group=${__name}${_local_group_subfix_owner}
__result2=$(gerrit_set_members_user "${__group}" "${_current_user}")
if [ "${__result2}" == "0" ]; then
__result2="设置用户 ${_current_user} 为 ${__group} 角色失败!"
fi
fi
fi
echo "${__result[*]} ${__result2} ${__result3} "
}
# 执行从命令行传入的语句
eval $@
| true |
a66bd965f115de904e55e0d3ded430a187b1cb19 | Shell | ialexey/dotfiles | /bin/git_prompt_info | UTF-8 | 681 | 3.5625 | 4 | [] | no_license | #!/bin/bash
ZSH_THEME_GIT_PROMPT_PREFIX="%{$reset_color%}%{%F{white}%}["
ZSH_THEME_GIT_PROMPT_SUFFIX="]"
ZSH_THEME_GIT_PROMPT_DIRTY="%{%F{red}%}●%{%F{white}%}%{$reset_color%}"
ZSH_THEME_GIT_PROMPT_CLEAN="]%{$reset_color%} "
function git_prompt_info() {
local ref
ref=$(command git symbolic-ref HEAD 2> /dev/null) || \
ref=$(command git rev-parse --short HEAD 2> /dev/null) || return 0
echo "$ZSH_THEME_GIT_PROMPT_PREFIX${ref#refs/heads/}$(git_dirty)$ZSH_THEME_GIT_PROMPT_SUFFIX"
}
function git_dirty() {
[[ $(git status --untracked-files=no --porcelain 2> /dev/null | wc -l | awk "{print $1}") -gt 0 ]] && echo "$ZSH_THEME_GIT_PROMPT_DIRTY"
}
git_prompt_info
| true |
07c1ad0b3b259582a07b5687d5b86be661c1b856 | Shell | hygull/unix-ssc | /scripts/07_num_greatness.sh | UTF-8 | 1,839 | 3.59375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#------------- Description ------------------------
# *Date of creation : 19 Feb 2017
# *Aim of script : To print the greatest number among 3 numebrs
# *Created by : Rishikesh Agrawani
#------------- Script -----------------------------
echo -n "Enter the value of 1st integer number : "
read a
echo -n "Enter the value of 2nd integer number : "
read b
echo -n "Enter the value of 3rd integer number : "
read c
echo "Checking just wait ..."
if [ $a -gt $b ]
then
#a>b
if [ $a -gt $c ]
then
#a>b AND a>c
echo "$a is greatest"
else
#a>b AND a<c ==> c>a>b
echo "$c is greatest"
fi
else
#a<b (Don't know about c)
if [ $b -gt $c ]
then
#a<b AND b>c
echo "$b is greatest"
else
#a<b AND b<c
echo "$c is greatest"
fi
fi
# ------------ Ouptut -----------------------------
# admins-MacBook-Pro-3:scripts admin$ chmod +x 07_num_greatness.sh
# admins-MacBook-Pro-3:scripts admin$ bash 07_num_greatness.sh
# Enter the value of 1st integer number : 12
# Enter the value of 2nd integer number : 10
# Enter the value of 3rd integer number : 13
# Checking just wait ...
# 13 is greatest
# admins-MacBook-Pro-3:scripts admin$ bash 07_num_greatness.sh
# Enter the value of 1st integer number : 14
# Enter the value of 2nd integer number : 11
# Enter the value of 3rd integer number : 1
# Checking just wait ...
# 14 is greatest
# admins-MacBook-Pro-3:scripts admin$ bash 07_num_greatness.sh
# Enter the value of 1st integer number : 2
# Enter the value of 2nd integer number : 3
# Enter the value of 3rd integer number : 67
# Checking just wait ...
# 67 is greatest
# admins-MacBook-Pro-3:scripts admin$ bash 07_num_greatness.sh
# Enter the value of 1st integer number : 12
# Enter the value of 2nd integer number : 56
# Enter the value of 3rd integer number : 56
# Checking just wait ...
# 56 is greatest
| true |
d53e1fccd9c8909c190beff851f14433c48aa9cb | Shell | EmmetCaulfield/linux-misc | /usr/local/bin/hdmi-mode | UTF-8 | 2,184 | 4.5 | 4 | [] | no_license | #!/bin/bash
vif='HDMI1'
function usage() {
def_out=2
def_ret=1
fd=${1:-$def_out} # Write to stderr by default
rc=${2:-$def_ret} # Return 1 to host by default
cat <<EOF >&$fd
USAGE: $(basename $0) -a <hor> <ver> [freq]
or: $(basename $0) -r <hor> <ver> [freq]
or: $(basename $0) -r <name>
or: $(basename $0) -h
Uses xrandr and cvt to add (-a) or remove (-r) a VESA modeline
for video mode <hor>x<ver>@[freq] Hz to/from interface '$vif'
EOF
exit $rc
}
function show_help() {
cat <<EOF
$(basename $0) uses 'xrandr' to add/remove a VESA modeline to/from video
interface '$vif', given the horizontal and vertical resolution, <hor>
and <ver>, respectively, and, optionally, the scan frequency, [freq]
(see below). The parameters are passed to 'cvt' to determine the modeline
from the parameter values.
This script was written to simplify the process of manipulating video
modes while trying to get a HDTV to display output from the HDMI port
on a laptop computer.
Note that the VESA mode added must, in general, be one for which an
EDID code exists. This means that the common native panel resolution
of 1366x768 is unlikely to work, and will be silently converted into
1388x768 by 'cvt'.
EOF
usage 1 0
}
function modename() {
if [ $# -lt 1 -o $# -gt 3 ]; then
usage
fi
if [ $# -gt 1 ]; then
name=$(cvt $@ | awk '/Modeline/ {print $2}')
else
name=$1
fi
echo $name | sed 's/"//g'
}
function addmode() {
if [ $# -lt 2 -o $# -gt 3 ]; then
usage
fi
name=$(modename $@)
xrandr --newmode $( \
cvt $@ | awk '/Modeline/ {$1=""; gsub("\"", "", $2); print $0}' \
) && xrandr --addmode $vif $name
}
function delmode() {
if [ $# -lt 1 -o $# -gt 3 ]; then
usage
fi
name=$(modename $@)
n=$(xrandr | awk "/$name/ {print NF}")
if [ $n -eq 3 ]; then
xrandr --rmmode $name
elif [ $n -eq 2 ]; then
xrandr --delmode $vif $name \
&& xrandr --rmmode $name
fi
}
op=$1; shift
if [ "x$op" = "x-h" ]; then
show_help
elif [ "x$op" = "x-a" ]; then
addmode $@
xrandr | grep $(modename $@)
elif [ "x$op" = "x-r" ]; then
delmode $@
else
usage
fi
| true |
5b7161ea56fdf9fd18a456c91a1b4658c3572f8a | Shell | ramon349/quip_classification | /u24_lymphocyte/scripts/borbConv.sh | UTF-8 | 662 | 3.453125 | 3 | [
"BSD-3-Clause"
] | permissive | source ../conf/variables.sh
SCRIPTDIR="${BASE_DIR}/scripts"
declare -a arr=("$JSON_OUTPUT_FOLDER" "$BINARY_JSON_OUTPUT_FOLDER")
echo "Starting borb conversion"
for i in "${arr[@]}"
do
echo "Looking at ${i} directory"
input_path=${i}
temp_path="${i}/temp"
output_path="${i}/BORB_FILES"
mkdir -p "${temp_path}"
mkdir -p "${output_path}"
cd "${input_path}"
ls
cp *.json "${temp_path}"
cd "${temp_path}"
echo "---- on temp dir ---- "
ls
rm ./meta*
cd "${SCRIPTDIR}"
echo "Working on ${temp_path}"
node process_json.js "${temp_path}" "${output_path}"
done
echo "Finished borb conversion"
| true |
e6ebebf62f0a7a6d6d68596798841643cfa1d6e9 | Shell | zepto/pkgbuilds | /mac-robber/PKGBUILD | UTF-8 | 790 | 2.78125 | 3 | [] | no_license | # Maintainer: Josiah Gordon <josiahg@gmail.com>
pkgname=mac-robber
pkgver=1.02
pkgrel=1
pkgdesc="Digital investigation tool for mounted file systems."
arch=(i686 x86_64)
url="http://www.sleuthkit.org/mac-robber/desc.php"
license=('GPL')
source=(http://downloads.sourceforge.net/project/$pkgname/$pkgname/$pkgver/$pkgname-$pkgver.tar.gz)
md5sums=('6d6d99aa882a46b2bc5231d195fdb595')
sha256sums=('5895d332ec8d87e15f21441c61545b7f68830a2ee2c967d381773bd08504806d')
build() {
cd "$srcdir/$pkgname-$pkgver"
make
}
package() {
cd "$srcdir/$pkgname-$pkgver"
install -dm755 $pkgdir/usr/{bin,share/$pkgname}
install -Dm755 $pkgname $pkgdir/usr/bin/$pkgname
install -Dm644 README $pkgdir/usr/share/$pkgname/README
install -Dm644 CHANGES $pkgdir/usr/share/$pkgname/CHANGES
}
# vim:set ts=2 sw=2 et:
| true |
94153d4a4b2b9effc2083d35fa5c0937963e09be | Shell | paulozava/shell_scripts | /study/requisicao2.sh | UTF-8 | 322 | 3.15625 | 3 | [] | no_license | #!/bin/bash
if [ -z $1 ]
then
while [ -z $requisicao ]
do
read -p "Você esqueceu de colocar o parâmetro (GET, PUT, POST, DELETE): " requisicao
letra_maiuscula=$(echo $requisicao| awk '{ print toupper($1) }')
done
else
letra_maiuscula=$(echo $1 | awk '{ print toupper($1) }')
fi
if [[
| true |
7e6eef403755d62cb643ef93bfcbc3161891757e | Shell | ma3ki/startupscripts | /publicscript/mailsystem_centos7/setup_scripts/_04_rspamd.sh | UTF-8 | 5,774 | 2.984375 | 3 | [] | no_license | #!/bin/bash -ex
source $(dirname $0)/../config.source
echo "---- $0 ----"
#-- リポジトリの設定と rspamd, redis のインストール
curl https://rspamd.com/rpm-stable/centos-7/rspamd.repo > /etc/yum.repos.d/rspamd.repo
rpm --import https://rspamd.com/rpm-stable/gpg.key
yum install -y rspamd redis
mkdir /etc/rspamd/local.d/keys
#-- rspamd の設定
cat <<'_EOL_'> /etc/rspamd/local.d/options.inc
filters = "chartable,dkim,spf,surbl,regexp,fuzzy_check";
check_all_filters = true;
_EOL_
cat <<'_EOL_'> /etc/rspamd/local.d/milter_headers.conf
#use = ["x-spamd-result","x-rspamd-server","x-rspamd-queue-id","authentication-results","x-spam-level","x-virus"];
use = ["authentication-results"];
authenticated_headers = ["authentication-results"];
_EOL_
cat <<_EOL_> /etc/rspamd/local.d/redis.conf
servers = "${REDIS_SERVER}";
_EOL_
cat <<'_EOL_'> /etc/rspamd/local.d/actions.conf
reject = null;
add_header = 2.0 ;
greylist = null;
_EOL_
cat <<'_EOL_'> /etc/rspamd/local.d/greylist.conf
enabled = false
_EOL_
cat <<'_EOL_'> /etc/rspamd/local.d/phishing.conf
openphish_enabled = true;
phishtank_enabled = true;
_EOL_
cat <<_EOL_> /etc/rspamd/local.d/antivirus.conf
clamav {
action = "reject";
type = "clamav";
servers = "/var/run/clamd.scan/clamd.sock";
symbol = "CLAM_VIRUS";
patterns {
#symbol_name = "pattern";
JUST_EICAR = "^Eicar-Test-Signature$";
}
}
_EOL_
usermod -aG clamscan _rspamd
usermod -aG virusgroup _rspamd
cat <<'_EOL_'> /etc/rspamd/local.d/url_reputation.conf
enabled = true;
# Key prefix for redis - default "Ur."
key_prefix = "Ur.";
# Symbols to insert - defaults as shown
symbols {
white = "URL_REPUTATION_WHITE";
black = "URL_REPUTATION_BLACK";
grey = "URL_REPUTATION_GREY";
neutral = "URL_REPUTATION_NEUTRAL";
}
# DKIM/DMARC/SPF allow symbols - defaults as shown
foreign_symbols {
dmarc = "DMARC_POLICY_ALLOW";
dkim = "R_DKIM_ALLOW";
spf = "R_SPF_ALLOW";
}
# SURBL metatags to ignore - default as shown
ignore_surbl = ["URIBL_BLOCKED", "DBL_PROHIBIT", "SURBL_BLOCKED"];
# Amount of samples required for scoring - default 5
threshold = 5;
#Maximum number of TLDs to update reputation on (default 1)
update_limit = 1;
# Maximum number of TLDs to query reputation on (default 100)
query_limit = 100;
# If true, try to find most 'relevant' URL (default true)
relevance = true;
_EOL_
#-- DKIM
mkdir -p ${WORKDIR}/keys
for domain in ${DOMAIN_LIST}
do
rspamadm dkim_keygen -d ${domain} -s default -b 1024 > ${WORKDIR}/keys/${domain}.keys
head -16 ${WORKDIR}/keys/${domain}.keys > /etc/rspamd/local.d/keys/default.${domain}.key
chmod 600 /etc/rspamd/local.d/keys/default.${domain}.key
chown _rspamd. /etc/rspamd/local.d/keys/default.${domain}.key
done
cat <<'_EOL_'> /etc/rspamd/local.d/dkim_signing.conf
# メーリングリストや転送の対応
allow_hdrfrom_mismatch = true;
sign_local = true;
# subdomain の sign 対応
use_esld = false;
try_fallback = false;
_EOL_
for domain in ${DOMAIN_LIST}
do
cat <<-_EOL_>> /etc/rspamd/local.d/dkim_signing.conf
domain {
${domain} {
path = "/etc/rspamd/local.d/keys/\$selector.\$domain.key";
selector = "default";
}
}
_EOL_
done
cat <<_EOL_>> /etc/rspamd/local.d/dkim_signing.conf
#-- メーリングリストを使用しない場合
sign_headers = '(o)from:(o)sender:(o)reply-to:(o)subject:(o)date:(o)message-id:(o)to:(o)cc:(o)mime-version:(o)content-type:(o)content-transfer-encoding:resent-to:resent-cc:resent-from:resent-sender:resent-message-id:(o)in-reply-to:(o)references:list-id:list-owner:list-unsubscribe:list-subscribe:list-post';
_EOL_
cat <<'_EOL_'> /etc/rspamd/local.d/arc.conf
# メーリングリストや転送の対応
allow_hdrfrom_mismatch = true;
sign_local = true;
use_domain = "envelope";
# subdomain の sign 対応
use_esld = false;
try_fallback = false;
_EOL_
for domain in ${DOMAIN_LIST}
do
cat <<-_EOL_>> /etc/rspamd/local.d/arc.conf
domain {
${domain} {
path = "/etc/rspamd/local.d/keys/\$selector.\$domain.key";
selector = "default";
}
}
_EOL_
done
cat <<_EOL_>> /etc/rspamd/local.d/arc.conf
#-- メーリングリストを使用しない場合
sign_headers = "(o)from:(o)sender:(o)reply-to:(o)subject:(o)date:(o)message-id:(o)to:(o)cc:(o)mime-version:(o)content-type:(o)content-transfer-encoding:resent-to:resent-cc:resent-from:resent-sender:resent-message-id:(o)in-reply-to:(o)references:list-id:list-owner:list-unsubscribe:list-subscribe:list-post:dkim-signature";
_EOL_
cat <<_EOL_> /etc/rspamd/local.d/history_redis.conf
servers = ${REDIS_SERVER}:6379;
key_prefix = "rs_history";
nrows = 10000;
compress = true;
subject_privacy = false;
_EOL_
cat <<_EOL_> /etc/rspamd/local.d/mime_types.conf
bad_extensions = {
ace = 4,
arj = 4,
bat = 2,
cab = 3,
com = 2,
exe = 1,
jar = 2,
lnk = 4,
scr = 4,
};
bad_archive_extensions = {
pptx = 0.1,
docx = 0.1,
xlsx = 0.1,
pdf = 0.1,
jar = 3,
js = 0.5,
vbs = 4,
};
archive_extensions = {
zip = 1,
arj = 1,
rar = 1,
ace = 1,
7z = 1,
cab = 1,
};
_EOL_
#-- web interface のパスワード設定
web_passwd=$(rspamadm pw -p ${ROOT_PASSWORD})
cat <<_EOL_> /etc/rspamd/local.d/worker-controller.inc
password = "${web_passwd}";
enable_password = "${web_passwd}";
_EOL_
#-- redis, rspamd の起動
systemctl enable redis rspamd
systemctl start redis rspamd
#-- nginx に設定追加
mkdir -p /etc/nginx/conf.d/https.d
cat <<'_EOL_' > /etc/nginx/conf.d/https.d/rspamd.conf
location ^~ /rspamd {
location /rspamd/ {
proxy_pass http://127.0.0.1:11334/;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
_EOL_
| true |
5104e4ca97833857a86b6a1ae8c901c170882fa1 | Shell | acm-uic/reachable | /pingSubDomains.sh | UTF-8 | 508 | 3.015625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#the following are subdomains to ping and check if it's down
array=(
"acm.cs.uic.edu"
"macserve.cs.uic.edu"
"linux.cs.uic.edu"
"malware.cs.uic.edu"
"hans.cs.uic.edu"
"brink.cs.uic.edu"
"sigbuild.cs.uic.edu"
"dvorak.cs.uic.edu"
"cuda.cs.uic.edu"
"siggame.cs.uic.edu"
"wics.cs.uic.edu"
"littlebell.cs.uic.edu"
)
for subDomain in ${array[*]}
do
_=$(ping -c 1 $subDomain)
if [ $? -ne 0 ]
then
echo "failed on $subDomain"
fi
done
| true |
cfd59d08811e2a6049178249303d548212c378f2 | Shell | Shushsa/infra | /workstation/scripts/update-base-image.sh | UTF-8 | 1,192 | 3.484375 | 3 | [] | no_license | #!/bin/bash
exec 2>&1
set -e
set -x
START_TIME_IMAGE_UPDATE="$(date -u +%s)"
source "/etc/profile" &> /dev/null
echo "------------------------------------------------"
echo "| STARTED: BASE IMAGE UPDATE v0.0.1 |"
echo "------------------------------------------------"
BASE_IMAGE_EXISTS=$($WORKSTATION_SCRIPTS/image-updated.sh "$KIRA_DOCKER/base-image" "base-image" || echo "error")
if [ "$BASE_IMAGE_EXISTS" == "False" ] ; then
$WORKSTATION_SCRIPTS/delete-image.sh "$KIRA_DOCKER/tools-image" "tools-image" #1
$WORKSTATION_SCRIPTS/delete-image.sh "$KIRA_DOCKER/validator" "validator" #2 (+1)
echo "INFO: Updating base image..."
$WORKSTATION_SCRIPTS/update-image.sh "$KIRA_DOCKER/base-image" "base-image" #6 (+4)
elif [ "$BASE_IMAGE_EXISTS" == "True" ] ; then
$KIRA_SCRIPTS/progress-touch.sh "+6" #6
echo "INFO: base-image is up to date"
else
echo "ERROR: Failed to test if base image exists"
exit 1
fi
echo "------------------------------------------------"
echo "| FINISHED: BASE IMAGE UPDATE v0.0.1 |"
echo "| ELAPSED: $(($(date -u +%s)-$START_TIME_IMAGE_UPDATE)) seconds"
echo "------------------------------------------------"
| true |
47be868bfa80ead8bde50a86b05962b4831deee9 | Shell | tzahola/flac2m4a | /flac2m4a | UTF-8 | 4,582 | 3.90625 | 4 | [] | no_license | #!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
parallel_flags=()
while (( $# > 0 )); do
case "$1" in
-o|--output)
shift
export output="$1"
shift
;;
-p|--parallel)
shift
parallel_flags+=(-j 8)
;;
-h|--help)
echo "flacconv (-o|--output) output_dir [-p|--parallel] [--] input ..."
exit
;;
--)
shift
break
;;
*)
break
esac
done
if [[ "${output:-}" == "" ]]; then
echo "output_dir missing!"
exit 1
elif [[ $# == 0 ]]; then
echo "No input specified!"
exit 2
fi
targets=()
function visit() {
local from="$1"
local to="$2"
local parent_target="$3"
local files=()
if [[ -d "$from" ]]; then
local target="dir_${#targets[@]}"
targets+=("$target")
local name="$(basename "$from")"
printf -- '%s : %s\n' "$target" "$parent_target"
printf -- '\tmkdir -p %q\n' "$to/$name"
while IFS= read -r -d '' dir; do
visit "$dir" "$to/$name" "$target"
done < <(find "$from" -maxdepth 1 -mindepth 1 -type d -print0)
while IFS= read -r -d '' file; do
files+=("$file")
done < <(find "$from" -maxdepth 1 -mindepth 1 -type f -print0)
to="$to/$name"
elif [[ -f "$from" ]]; then
files+=("$from")
else
return
fi
local flacs=()
for file in "${files[@]}"; do
if [[ "$file" =~ ^.*\.(jpe?g|png|gif)$ ]]; then
local target="artwork_${#targets[@]}"
targets+=("$target")
printf -- '%s : %s\n' "$target" "$parent_target"
printf -- '\tcp %q %q\n' "$file" "$to/$(basename "$file")"
elif [[ "$file" =~ ^.*\.flac?$ ]]; then
flacs+=("$file")
fi
done
local flacs_sorted=()
while IFS= read -r -d '' flac; do
flacs_sorted+=("$flac")
done < <(for flac in "${flacs[@]}"; do printf -- '%s\0' "$flac"; done | sort -z)
local wavs=()
local wav_targets=()
for file in "${flacs_sorted[@]}"; do
local wav_target="flac2wav_${#targets[@]}"
wav_targets+=("$wav_target")
targets+=("$wav_target")
printf -- '%s : %s\n' "$wav_target" "$parent_target"
local name="$(basename "$file")"
local wav="$to/${name%.*}.wav"
wavs+=("$wav")
printf -- '\tflac -s -d -o %q %q\n' "$wav" "$file" # need to convert FLAC to WAV first, because `afconvert` (in macOS 10.14.4) fails for FLAC input if the output is unconstrained VBR AAC/M4A
done
local m4a_targets=()
for (( i=0; i < ${#flacs_sorted[@]}; i++ )); do
local m4a_target="wav2m4a_${#targets[@]}"
m4a_targets+=("$m4a_target")
targets+=("$m4a_target")
printf -- '%s : %s' "$m4a_target" "${wav_targets[$i]}"
if [[ $i != 0 ]]; then
printf -- ' %s' "${wav_targets[$(($i - 1))]}"
fi
if [[ $i != $((${#flacs_sorted[@]} - 1)) ]]; then
printf -- ' %s' "${wav_targets[$(($i + 1))]}"
fi
printf -- '\n'
local name="$(basename "${flacs_sorted[$i]}")"
local m4a="$to/${name%.*}.m4a"
printf -- '\tafconvert'
printf -- ' %q' "${wavs[$i]}" "$m4a" -d aac -s 3 -u vbrq 90
if [[ $i != 0 ]]; then
printf -- ' %q' --gapless-before "${wavs[$(($i - 1))]}"
fi
if [[ $i != $((${#flacs_sorted[@]} - 1)) ]]; then
printf -- ' %q' --gapless-after "${wavs[$(($i + 1))]}"
fi
printf -- '\n'
printf -- '\tflactags2m4atags %q %q >/dev/null\n' "${flacs_sorted[$i]}" "$m4a"
done
for (( i=0; i < ${#flacs_sorted[@]}; i++ )); do
local rm_wav_target="rm_wav_${#targets[@]}"
targets+=("$rm_wav_target")
printf -- '%s : %s' "$rm_wav_target" "${m4a_targets[$i]}"
if [[ $i != 0 ]]; then
printf -- ' %s' "${m4a_targets[$(($i - 1))]}"
fi
if [[ $i != $((${#flacs_sorted[@]} - 1)) ]]; then
printf -- ' %s' "${m4a_targets[$(($i + 1))]}"
fi
printf -- '\n'
printf -- '\trm -- %q\n' "${wavs[$i]}"
done
}
{
for input in "$@"; do
visit "$input" "$output" ""
done
printf -- '.PHONY : all'
printf -- ' %s' "${targets[@]}"
printf -- ' %s'
printf -- '\nall :'
printf -- ' %s' "${targets[@]}"
printf -- '\n.DEFAULT_GOAL := all\n'
} | sed 's/\$/$$/g' | make -f - "${parallel_flags[@]}"
| true |
511b62703ad73e377ac271de3095722b634405c7 | Shell | ccFiona/data-analyse | /1.48 src/local_src/transform/run.sh | UTF-8 | 3,197 | 2.546875 | 3 | [] | no_license | ############################################################################
##
## Copyright (c) 2013 hunantv.com, Inc. All Rights Reserved
## $Id: transform.sh,v 0.0 2016年03月17日 星期四 15时24分21秒 <tangye> Exp $
##
############################################################################
#
###
# # @file transform.sh
# # @author <tangye><<tangye@mgtv.com>>
# # @date 2016年03月17日 星期四 15时24分21秒
# # @brief
# #
# ##
#!/bin/bash
##############
# prepare local results to reports input
today=$1
yesterday=`date -d "1 day ago $today" +%Y%m%d`
intermediate_result=../../../data/intermediate_result
syn_path=../../../../daily_report/$yesterday
###########
# first part data
###########
########
# mv start & active_user
mv $intermediate_result/start_$yesterday ../../../../stat_4.x/ott_start_$yesterday
mv $intermediate_result/active_user_$yesterday ../../../../stat_4.x/activeUser_store_$yesterday
########
# dispatch vv
sh dispatchVV.sh $intermediate_result/vv_$yesterday $yesterday 4.11
sh dispatchVV.sh $intermediate_result/vv_$yesterday $yesterday 4.12
sh dispatchVV.sh $intermediate_result/vv_$yesterday $yesterday 4.13
sh dispatchVV.sh $intermediate_result/vv_$yesterday $yesterday 4.14
sh dispatchVV.sh $intermediate_result/vv_$yesterday $yesterday 4.15
sh dispatchVVAll.sh $intermediate_result/vv_$yesterday $yesterday 4.12
sh dispatchVVAll.sh $intermediate_result/vv_$yesterday $yesterday 4.11
sh dispatchVVAll.sh $intermediate_result/vv_$yesterday $yesterday 4.13
sh dispatchVVAll.sh $intermediate_result/vv_$yesterday $yesterday 4.14
sh dispatchVVAll.sh $intermediate_result/vv_$yesterday $yesterday 4.15
#######
# vv uuid
sh dispatchVVUuid.sh $intermediate_result/vv_uuid_$yesterday $yesterday 4.11
sh dispatchVVUuid.sh $intermediate_result/vv_uuid_$yesterday $yesterday 4.12
sh dispatchVVUuid.sh $intermediate_result/vv_uuid_$yesterday $yesterday 4.13
sh dispatchVVUuid.sh $intermediate_result/vv_uuid_$yesterday $yesterday 4.14
sh dispatchVVUuid.sh $intermediate_result/vv_uuid_$yesterday $yesterday 4.15
rm $intermediate_result/vv_uuid_$yesterday
#######
# user detail vv
mv $intermediate_result/vv_user_detail_$yesterday ../../../../user_tracking/
#########
# second part data
#########
second_part_path=../../../../second_part
#########
# pv
mv $intermediate_result/search_pv_$yesterday $second_part_path/pv/data/pv_$yesterday
########
# continue play
mv $intermediate_result/continue_play_$yesterday $second_part_path/continuePlay/data
mv $intermediate_result/simple_continue_play_$yesterday $second_part_path/continuePlay/data
########
# search
mv $intermediate_result/search_$yesterday $second_part_path/search/data/search_$yesterday
mv $intermediate_result/pv_$yesterday $second_part_path/search/data/detail_pv_$yesterday
#######
# channelaver
mv $intermediate_result/mh_$yesterday $second_part_path/mh/data
#######
# ad
#mv $intermediate_result/ad_$yesterday $second_part_path/ad/data
#######
# play hour
mv $intermediate_result/vv_hour_$yesterday $second_part_path/playHour/data/play_hour_$yesterday
########
# mv dau
mv $intermediate_result/watch_uv_$yesterday $second_part_path/dau/data
## vim: set ts=2 sw=2: #
| true |
51c4e643fe5c9f8f1edf2df99cec2f93f5941711 | Shell | SmartPorridge/TNN | /examples/armlinux/build_aarch64.sh | UTF-8 | 355 | 2.828125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
CC=aarch64-linux-gnu-gcc
CXX=aarch64-linux-gnu-g++
TNN_LIB_PATH=../../scripts/build_aarch64_linux/
cd ../../scripts
./build_aarch64_linux.sh
cd -
mkdir -p build
cd build
cmake .. \
-DCMAKE_C_COMPILER=$CC \
-DCMAKE_CXX_COMPILER=$CXX \
-DCMAKE_BUILD_TYPE=Release \
-DTNN_LIB_PATH=../../scripts/build_aarch64_linux/
make -j4
| true |
e64d92b5a7271aa9ff76f4388e170d58a02bef6b | Shell | anandray/devops | /scripts/supernode-scripts/azure/wolkaz/wolk-startup-scripts/scripts/nagios/plugins/check_api.sh | UTF-8 | 447 | 3.28125 | 3 | [] | no_license | #!/bin/bash
STATE_OK=0
STATE_WARNING=1
STATE_CRITICAL=2
STATE_UNKNOWN=3
API_CHECK=`php -ln "/var/www/vhosts/mdotm.com/httpdocs/ads/api.php" | grep 'No syntax errors detected in' | wc -l`
case "${API_CHECK}" in
0) echo "API NOT OK. /httpdocs/ads/api.php"; exit ${STATE_CRITICAL}
;;
1) echo "API is OK."; exit ${STATE_OK}
;;
# *) echo "API is in an unknown state."; exit ${STATE_WARNING}
# ;;
esac
| true |
5e4c15fcd97cc6e631e83230fc1ebb0e594c477f | Shell | southpawgeek/perlweeklychallenge-club | /challenge-119/cheok-yin-fung/bash/ch-2.sh | UTF-8 | 702 | 3.34375 | 3 | [] | no_license | #!/bin/bash
# The Weekly Challenge - 119
# Task 2 Sequence without 1-on-1
# Usage: $ chmod +x ch-2.sh
# $ ./ch-2.sh N
N=$1
arr=(0 1 2 3)
x=0
y=0
while [ ${#arr[@]} -lt $N ]
do
y=${#arr[@]}
for ((h=$x+1; $h<=$y; h=$h+1))
do
if [ $(($[arr[$h]] % 10)) -ne 1 ] ;
then arr+=($[arr[$h]]*10+1) ;
fi
arr+=($[arr[$h]]*10+2)
arr+=($[arr[$h]]*10+3)
done
x=$y
done
echo $[arr[$N]]
# ref:
# https://stackoverflow.com/questions/19417015/how-to-copy-an-array-in-bash
# https://linuxhandbook.com/bash-arrays/
# http://rosettacode.org/wiki/Loops/For_with_a_specified_step#Bourne_Shell
# Bash Guide for Beginners
# Classic Shell Scripting
| true |
5374ff88be2319e06c05fa1b3240b02f6430fff0 | Shell | lx308033262/my-script | /checksum_cdn.sh | UTF-8 | 803 | 3.15625 | 3 | [] | no_license | #!/bin/bash
### api.txt content like https://www.baidu.com/xxx.html
dir=`dirname $0`
file=${dir}/api.txt
for url in `cat $file`
do
host=`echo $url |awk -F/ '{print $3}'`
prefix=`echo $url |awk -F/ '{print $NF}'`
curl -s $url > /tmp/${prefix}-0.txt
hosts=('host1' 'host2')
for i in {0..3}
do
if [ $i -gt 0 ];then
eval curl -s $url --resolve ${host}:443:${hosts[$i]} > /tmp/${prefix}-${i}.txt
fi
eval md5${i}=`md5sum /tmp/${prefix}-${i}.txt|awk '{print $1}'`
eval md5mid=md5${i}
eval md5=$(echo \$${md5mid})
#echo ${md5}
# exit
if [ $i -gt 0 ];then
if [ "${md50}" != "${md5}" ] ;then
echo $url ${md50} ${md5} ${hosts[$i]} >> /tmp/cdn_error.txt
echo $url ${md50} ${md5} ${hosts[$i]}
#echo 500
exit 500
fi
fi
done
done
echo 0
| true |
01b49b329ff4386810d5a09e406b938fe2274b50 | Shell | bluejumper/dotfiles | /.zshrc | UTF-8 | 2,522 | 2.671875 | 3 | [
"Unlicense",
"LicenseRef-scancode-public-domain"
] | permissive | # zsh settings
# Get set, umask!
umask 027
# Set name of the theme to load.
ZSH_THEME="duly-oneline"
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion. Case
# sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to display red dots whilst waiting for completion.
COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
plugins=(git)
# oh-my-zsh installation.
export ZSH=~/.oh-my-zsh
source $ZSH/oh-my-zsh.sh
# -- preferences
#bindkey -e # zsh emacs-mode.
bindkey -v # zsh vi-mode!
export KEYTIMEOUT=10 # Delay between entering from 'viins' to 'vicmd'.
bindkey -M viins '^[[3~' delete-char # Delete
bindkey -M viins '^[[H' beginning-of-line # Home
bindkey -M viins '^[[F' end-of-line # End
# -- environment
# ZSH_CUSTOM=/path/to/new-custom-folder
export PATH=$PATH:~/bin # User binaries.
export PATH=$PATH:~/go/bin # Golang, binaries installed with 'go get'.
export PATH=$PATH:~/.cabal/bin # Haskell, binaries installed with cabal.
export GOPATH=~/go
export EDITOR="nvim"
# Use gpg-agent as the authentication agent for SSH communications.
unset SSH_AGENT_PID
if [ "${gnupg_SSH_AUTH_SOCK_by:-0}" -ne $$ ]; then
export SSH_AUTH_SOCK="${XDG_RUNTIME_DIR}/gnupg/S.gpg-agent.ssh"
# export SSH_AUTH_SOCK="$(gpgconf --list-dirs agent-ssh-socket)"
fi
# -- utility directories
hash -d mount=/mnt/user/${USER}/
hash -d repo=${HOME}/repo/
hash -d src=${HOME}/src/
hash -d sh=${HOME}/sh/
# -- aliases
alias minecraft="~sh/minecraft.sh"
alias rgr="ranger"
alias la="ls -lAh --color"
alias lss="ls -sh1 --color"
alias duu="du -hd 1"
alias ip="ip -c"
alias hd="hexdump -C"
alias bc="bc -q"
alias udc="udisksctl"
#alias ku="kubectl"
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
# ssh
# export SSH_KEY_PATH="~/.ssh/rsa_id"
| true |
10bc6e5cd5c0595d9850c6b19a0386d56adb8e84 | Shell | GochoMugo/gh-pages | /lib/deps.sh | UTF-8 | 222 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#
# Installs any dependencies that we require to build the site
#
# Stop on error
set -e
# utilities
source "${DATA_DIR}/utils.sh"
# script variables
LOG_TITLE="deps"
log "nothing to install" 0
| true |
c72d445872e8b9c600a6abcad8bd4ea0b2292d4b | Shell | blakewrege/WMU-CS2230-master | /resources/Linux_install_script.sh | UTF-8 | 5,516 | 3.828125 | 4 | [] | no_license | if ! ps o command= $$ | grep -q 'bash'; then echo "Please run with bash, not sh"; exit 1; fi; if grep -r $'\r' "$0" > /dev/null; then echo "Incorrect file endings detected. Fixing..."; tr -d '\r' < "$0" > "$0~"; mv "$0~" "$0"; chmod +x "$0"; echo "Please re-run this file"; exit 0; fi;
# NOTE: Use $HOME instead of tilde (~) when in quotes
set -e # Abort if any command fails
set -o verbose # Show commands being run
# Do NOT run as sudo, it will ask for passwords when needed
# Edit this line to desired, or leave to use .local in HOME directory.
# Path can't have spaces or energia can't compile
INSTALL_PATH_LINUX="$HOME/.local"
INSTALL_PATH_MAC="$HOME/Applications"
# Here was the default before
#INSTALL_PATH_LINUX="/opt"
# Change this to where you downloaded the tgz file
if [ "$(uname)" == "Darwin" ]; then
DOWNLOADED_PATH="$HOME/Downloads/energia.dmg"
else
DOWNLOADED_PATH="$HOME/Downloads/energia.tgz"
fi
# Check for needed commands
if ! which unzip > /dev/null; then
printf "\033[0;31m unzip command not found, please install it\033[0m\n"
exit 1
fi
# Handle if file not found
if [ ! -f "$DOWNLOADED_PATH" ]; then
echo "Unable to find energia at '$DOWNLOADED_PATH'"
echo "Either move it there or update this script"
echo
read -p "Do you want to download it? (y/N) " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
# Download Appropriate Version for Architecture
printf "\033[0;32m>> Downloading Energia ...\033[0m\n"
mkdir -p ~/Downloads
if [ "$(uname)" == "Darwin" ]; then
curl -# -o ~/Downloads/energia.dmg -kL https://yakko.cs.wmich.edu/~sphinx/energia/energia-0101E0017-macosx-signed.dmg
DOWNLOADED_PATH="$HOME/Downloads/energia.dmg"
else
if [ `getconf LONG_BIT` = "64" ]
then
# 64-bit Download
wget -O ~/Downloads/energia.tgz https://yakko.cs.wmich.edu/~sphinx/energia/energia-0101E0017-linux64.tgz
else
# 32-bit Download
wget -O ~/Downloads/energia.tgz https://yakko.cs.wmich.edu/~sphinx/energia/energia-0101E0017-linux.tgz
fi
DOWNLOADED_PATH="$HOME/Downloads/energia.tgz"
fi
fi
fi
if [ ! -f "$INSTALL_PATH_LINUX/energia" ]; then
printf "\033[0;32m>> Removing old installation ...\033[0m\n"
rm -rf "$INSTALL_PATH_LINUX/energia"
fi
mkdir -p "$INSTALL_PATH_LINUX"
printf "\033[0;32m>> Unpacking ...\033[0m\n"
if [ "$(uname)" == "Darwin" ]; then
hdiutil attach "$DOWNLOADED_PATH" -quiet
cp -R "/Volumes/energia-0101E0017-macosx-signed/Energia.app" "$INSTALL_PATH_MAC"
hdiutil unmount "/Volumes/energia-0101E0017-macosx-signed"
else
tar -xf "$DOWNLOADED_PATH" -C "$INSTALL_PATH_LINUX" && mv "$INSTALL_PATH_LINUX"/energia* "$INSTALL_PATH_LINUX/energia"
fi
# Append /bin to PATH
printf "\033[0;32m>> Appending PATH ...\033[0m\n"
if [ "$(uname)" == "Darwin" ]; then
sed -i '' -e '$a\' ~/.bash_profile
echo 'export PATH='"$INSTALL_PATH_LINUX"'/Energia.app/Contents/Resources/Java/hardware/tools/msp430/bin:$PATH' >> ~/.bash_profile
echo 'export PATH='"$INSTALL_PATH_LINUX"'/Energia.app/Contents/Resources/Java/hardware/tools/msp430/mspdebug:$PATH' >> ~/.bash_profile
export PATH="$INSTALL_PATH_LINUX/Energia.app/Contents/Resources/Java/hardware/tools/msp430/bin:$PATH"
export PATH="$INSTALL_PATH_LINUX/Energia.app/Contents/Resources/Java/hardware/tools/msp430/mspdebug:$PATH"
else
sed -i -e '$a\' ~/.bashrc
echo 'export PATH='"$INSTALL_PATH_LINUX"'/energia/hardware/tools/msp430/bin:$PATH' >> ~/.bashrc
export PATH="$INSTALL_PATH_LINUX/energia/hardware/tools/msp430/bin:$PATH"
printf "\033[0;32m>> Please enter your password to update udev and groups ...\033[0m\n"
# Add Rule to udev
printf "\033[0;32m>> Adding Rule to udev ...\033[0m\n"
echo 'ATTRS{idVendor}=="0451", ATTRS{idProduct}=="f432", MODE="0660", GROUP="plugdev"' | sudo tee /etc/udev/rules.d/46-TI_launchpad.rules > /dev/null
if sudo restart udev &> /dev/null; then
echo
else
# Use service instead of restart command, if above failed
sudo service udev restart
fi
# Add User to dialout
printf "\033[0;32m>> Adding you to dialout group ...\033[0m\n"
# Ignore errors (caused by already in group)
if sudo adduser $SUDO_USER dialout &> /dev/null; then
echo
fi
fi
# Download Libemb
printf "\033[0;32m>> Downloading Libemb ...\033[0m\n"
if [ "$(uname)" == "Darwin" ]; then
curl -# -o ~/Downloads/libemb.zip -kL https://github.com/chrissphinx/libemb/archive/master.zip
else
wget -O ~/Downloads/libemb.zip https://github.com/chrissphinx/libemb/archive/master.zip
fi
# Make & Install Libemb
printf "\033[0;32m>> Installing Libemb ...\033[0m\n"
cd ~/Downloads
unzip -q -u libemb.zip
cd libemb-master
if [ "$(uname)" == "Darwin" ]; then
INSTDIR="$INSTALL_PATH_MAC/Energia.app/Contents/Resources/Java/hardware/tools/msp430/msp430" TARCH=MSP430 make install --silent
else
INSTDIR="$INSTALL_PATH_LINUX/energia/hardware/tools/msp430/msp430" TARCH=MSP430 make install --silent
fi
cd
# Cleanup
# Uncomment if you want to remove old files
#rm -rf "$DOWNLOADED_PATH" ~/Downloads/libemb-master ~/Downloads/libemb.zip
# Finished
printf "\033[0;32m>> Done!\033[0m\n"
printf "\033[0;31m Please logout or restart your computer\n to access the msp430 toolchain\n Or you could try just running: source ~/.bashrc Or: source ~/.bash_profile on a mac\033[0m\n"
| true |
997c33b42b1c380b3a1bb43382c0718258a8d349 | Shell | epcim/salty-whales | /build.d/50-salt-cloud | UTF-8 | 150 | 2.671875 | 3 | [] | no_license | #!/bin/bash
set -eo pipefail
set -xv
if type -a salt-cloud >/dev/null; then
echo "[$0] No salt-cloud found. Not updating." >&2
fi
salt-cloud -u
| true |
d412620623142a2bc328c9b270001d7502161cae | Shell | OCEANOFANYTHINGOFFICIAL/git-lfs | /docker/run_dockers.bsh | UTF-8 | 3,807 | 4.03125 | 4 | [
"MIT",
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
# Usage:
# ./run_dockers.bsh - Run all the docker images
# ./run_dockers.bsh centos_6 centos_7 - Run only CentOS 6 & 7 image
# ./run_dockers.bsh centos_6 -- bash #Runs bash in the CentOS 6 docker
#
# Special Environments Variables
# REPO_HOSTNAME - Override the hostname for all the repos generated/tested
# DOCKER_AUTOPULL - Default 1. If set to 0, it will not build docker images
# before running
# AUTO_REMOVE - Default 1. If set to 0, it will not automatically delete the
# docker instance when done. This can be useful for a post mortem
# analysis. Just make sure you clean up the docker instances
# manually
set -eu
#Mingw32 auto converts /drive/dir/blah to drive:\dir\blah ... Can't have that.
if [[ `uname` == MINGW* ]]; then
MINGW_PATCH='/'
else
MINGW_PATCH=''
fi
CUR_DIR=$(cd $(dirname "${BASH_SOURCE[0]}"); pwd)
REPO_DIR=$(cd ${CUR_DIR}/..; pwd)
PACKAGE_DIR=${REPO_DIR}/repos
mkdir -p ${PACKAGE_DIR}/centos || :
mkdir -p ${PACKAGE_DIR}/debian || :
#If you are not in docker group and you have sudo, default value is sudo
: ${SUDO=`if ( [ ! -w /var/run/docker.sock ] && id -nG | grep -qwv docker && [ "${DOCKER_HOST:+dh}" != "dh" ] ) && command -v sudo > /dev/null 2>&1; then echo sudo; fi`}
function split_image_name()
{ #$1 - image dockerfile
#sets IMAGE_NAME to the basename of the dir containing the docker file
#sets IMAGE_INFO to be the array name following my pattern
local IFS=_
IMAGE_INFO=($1)
}
# Parse Arguments
IMAGES=()
PRUNE=
while [[ $# > 0 ]]; do
if [ "$1" = "--prune" ]; then
PRUNE=t
elif [ "$1" == "--" ]; then
shift
DOCKER_CMD="${@}"
break
else
IMAGES+=("$1")
fi
shift
done
if [[ ${#IMAGES[@]} == 0 ]]; then
# If you change this list, change script/upload as well.
IMAGES=(centos_7 centos_8 debian_9 debian_10 debian_11)
fi
mkdir -p "${PACKAGE_DIR}"
#Run docker to build pacakges
for IMAGE_NAME in "${IMAGES[@]}"; do
split_image_name "${IMAGE_NAME}" #set IMAGE_NAME and IMAGE_INFO
#Auto pull docker unless DOCKER_AUTOPULL=0
if [[ ${DOCKER_AUTOPULL-1} != 0 ]]; then
$SUDO docker pull gitlfs/build-dockers:${IMAGE_NAME}
fi
#It CAN'T be empty () with set -u... So I put some defaults in here
OTHER_OPTIONS=("-t")
if tty >/dev/null; then
OTHER_OPTIONS+=("-i")
fi
if [ "${AUTO_REMOVE-1}" == "1" ]; then
OTHER_OPTIONS+=("--rm")
fi
if [ -s ${CUR_DIR}/${IMAGE_NAME}.key ]; then
CONTAINER_NAME=git-lfs-gpg ${CUR_DIR}/gpg-agent_preload.bsh
OTHER_OPTIONS+=("--volumes-from" "git-lfs-gpg")
OTHER_OPTIONS+=("-v" "${CUR_DIR}/${IMAGE_NAME}.key:${MINGW_PATCH}/tmp/${IMAGE_NAME}.key")
OTHER_OPTIONS+=("-e" "$(docker exec git-lfs-gpg cat ${MINGW_PATCH}/tmp/gpg-agent/gpg_agent_info)")
#Do I need this? Or can I get away with hardcoding???
#GPG_AGENT_INFO=/tmp/gpg-agent/S.gpg-agent:1:1
fi
FINAL_UID=$(id -u)
FINAL_GID=$(id -g)
if [[ $FINAL_UID == 0 ]]; then
FINAL_UID=${SUDO_UID-}
fi
if [[ $FINAL_GID == 0 ]]; then
FINAL_GID=${SUDO_GID-}
fi
echo Compiling LFS in docker image ${IMAGE_NAME}
IMAGE_REPO_DIR="${PACKAGE_DIR}"/"${IMAGE_INFO[0]}"/"${IMAGE_INFO[1]}"
$SUDO docker run "${OTHER_OPTIONS[@]}" ${DOCKER_OTHER_OPTIONS-} \
-e USER=root \
-e REPO_HOSTNAME=${REPO_HOSTNAME:-git-lfs.github.com} \
-e FINAL_UID=${FINAL_UID} \
-e FINAL_GID=${FINAL_GID} \
-v "${MINGW_PATCH}${REPO_DIR}:/src" \
-v "${MINGW_PATCH}${IMAGE_REPO_DIR}:/repo" \
gitlfs/build-dockers:${IMAGE_NAME} ${DOCKER_CMD-}
if [ -n "$PRUNE" ]
then
$SUDO docker rmi -f "gitlfs/build-dockers:${IMAGE_NAME}"
fi
done
echo "Docker run completed successfully!"
| true |
4e854a0dd085520cf9e9d1ea33d7a1246fffad6c | Shell | cvar-upm/cvg_quadrotor_swarm | /installation/addSubmodules.sh | UTF-8 | 28,378 | 3.25 | 3 | [
"BSD-3-Clause",
"MIT"
] | permissive | #!/bin/bash
echo "Configuring stack submodules .."
CONFIG_SUBMODULES_FILE=$1
#Reading configuration of which submodules include
. $CONFIG_SUBMODULES_FILE
#Adquire bitbucket info
echo "Acquiring bitbucket user info"
echo -n " -Bitbucket username: "
read bitbucketUsername
echo -n " -Bitbucket password: "
read -s bitbucketPassword
echo ""
#Loop for git submodule init
echo "Adding submodules"
#mav_tools
if [[ ${mav_tools} && $mav_tools = true ]]
then
MODULE_PATH=stack/droneDrivers/driversPlatforms/driverAsctecPelican/mav_tools
REPO_URL=https://bitbucket.org/joselusl/mav_tools.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#asctec_drivers
if [[ ${asctec_drivers} && $asctec_drivers = true ]]
then
MODULE_PATH=stack/droneDrivers/driversPlatforms/driverAsctecPelican/asctec_drivers
REPO_URL=https://bitbucket.org/joselusl/asctec_drivers.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#driverPelicanROSModule
if [[ ${driverPelicanROSModule} && $driverPelicanROSModule = true ]]
then
MODULE_PATH=stack/droneDrivers/driversPlatforms/driverAsctecPelican/driverPelicanROSModule
REPO_URL=https://bitbucket.org/ramon_suarez_fernandez/driverpelicanrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#okto_driver
if [[ ${okto_driver} && $okto_driver = true ]]
then
MODULE_PATH=stack/droneDrivers/driversPlatforms/driverMikrokopterOkto/okto_driver
REPO_URL=https://bitbucket.org/Vision4UAV/okto_driver.git
REPO_BRANCH=catkin
./installation/addBitbucketPrivRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH $bitbucketUsername $bitbucketPassword > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#driverOktoROSModule
if [[ ${driverOktoROSModule} && $driverOktoROSModule = true ]]
then
MODULE_PATH=stack/droneDrivers/driversPlatforms/driverMikrokopterOkto/driverOktoROSModule
REPO_URL=https://bitbucket.org/joselusl/driveroktorosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#ardrone_autonomy
if [[ ${ardrone_autonomy} && $ardrone_autonomy = true ]]
then
MODULE_PATH=stack/droneDrivers/driversPlatforms/driverParrotARDrone/ardrone_autonomy
REPO_URL=https://github.com/AutonomyLab/ardrone_autonomy.git
REPO_BRANCH=indigo-devel
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#driverParrotARDroneROSModule
if [[ ${driverParrotARDroneROSModule} && $driverParrotARDroneROSModule = true ]]
then
MODULE_PATH=stack/droneDrivers/driversPlatforms/driverParrotARDrone/driverParrotARDroneROSModule
REPO_URL=https://bitbucket.org/joselusl/driverparrotardronerosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#driver_camera_ueye
if [[ ${driver_camera_ueye} && $driver_camera_ueye = true ]]
then
MODULE_PATH=stack/droneDrivers/driversSensors/driver_camera_ueye/driver_camera_ueye
REPO_URL=https://bitbucket.org/jespestana/driver_camera_ueye.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#driver_camera_ueye_ROSModule
if [[ ${driver_camera_ueye_ROSModule} && $driver_camera_ueye_ROSModule = true ]]
then
MODULE_PATH=stack/droneDrivers/driversSensors/driver_camera_ueye/driver_camera_ueye_ROSModule
REPO_URL=https://bitbucket.org/jespestana/driver_camera_ueye_rosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#px-ros-pkg
if [[ ${px_ros_pkg} && $px_ros_pkg = true ]]
then
MODULE_PATH=stack/droneDrivers/driversSensors/driver_px4flow/px-ros-pkg
REPO_URL=https://github.com/cvg/px-ros-pkg.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#driver_px4flow_interface_ROSModule
if [[ ${driver_px4flow_interface_ROSModule} && $driver_px4flow_interface_ROSModule = true ]]
then
MODULE_PATH=stack/droneDrivers/driversSensors/driver_px4flow/driver_px4flow_interface_ROSModule
REPO_URL=https://bitbucket.org/jespestana/driver_px4flow_interface_rosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneObstacleProcessorVisualMarks
if [[ ${droneObstacleProcessorVisualMarks} && $droneObstacleProcessorVisualMarks = true ]]
then
MODULE_PATH=stack/droneEnvironmentUnderstanding/droneObstacleProcessorVisualMarks/droneObstacleProcessorVisualMarks
REPO_URL=https://bitbucket.org/joselusl/droneobstacleprocessorvisualmarks.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneObstacleProcessorVisualMarksROSModule
if [[ ${droneObstacleProcessorVisualMarksROSModule} && $droneObstacleProcessorVisualMarksROSModule = true ]]
then
MODULE_PATH=stack/droneEnvironmentUnderstanding/droneObstacleProcessorVisualMarks/droneObstacleProcessorVisualMarksROSModule
REPO_URL=https://bitbucket.org/joselusl/droneobstacleprocessorvisualmarksrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#robotLocalizationROSModule
if [[ ${robotLocalizationROSModule} && $robotLocalizationROSModule = true ]]
then
MODULE_PATH=stack/droneEnvironmentUnderstanding/robotLocalizationROSModule
REPO_URL=https://bitbucket.org/ramon_suarez_fernandez/robotlocalizationrosmodule.git
REPO_BRANCH=master
./installation/addBitbucketPrivRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH $bitbucketUsername $bitbucketPassword > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#dronePBVSPositionMidLevelController
if [[ ${dronePBVSPositionMidLevelController} && $dronePBVSPositionMidLevelController = true ]]
then
MODULE_PATH=stack/droneHighLevelControl/dronePBVSPositionMidLevelController
REPO_URL=https://bitbucket.org/jespestana/dronepbvspositionmidlevelcontroller.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneSpeedPositionMidLevelController
if [[ ${droneSpeedPositionMidLevelController} && $droneSpeedPositionMidLevelController = true ]]
then
MODULE_PATH=stack/droneHighLevelControl/droneSpeedPositionMidLevelController
REPO_URL=https://bitbucket.org/jespestana/dronespeedpositionmidlevelcontroller.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneTrajectoryController
if [[ ${droneTrajectoryController} && $droneTrajectoryController = true ]]
then
MODULE_PATH=stack/droneHighLevelControl/droneTrajectoryController
REPO_URL=https://bitbucket.org/jespestana/dronetrajectorycontroller.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneTrajectoryControllerROSModule
if [[ ${droneTrajectoryControllerROSModule} && $droneTrajectoryControllerROSModule = true ]]
then
MODULE_PATH=stack/droneHighLevelControl/droneTrajectoryControllerROSModule
REPO_URL=https://bitbucket.org/jespestana/dronetrajectorycontrollerrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneTrajectoryStateMachine
if [[ ${droneTrajectoryStateMachine} && $droneTrajectoryStateMachine = true ]]
then
MODULE_PATH=stack/droneHighLevelControl/droneTrajectoryStateMachine
REPO_URL=https://bitbucket.org/jespestana/dronetrajectorystatemachine.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneLoggerROSModule
if [[ ${droneLoggerROSModule} && $droneLoggerROSModule = true ]]
then
MODULE_PATH=stack/droneLogging/droneLoggerROSModule
REPO_URL=https://bitbucket.org/jespestana/droneloggerrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#dronePelicanLoggerROSModule
if [[ ${dronePelicanLoggerROSModule} && $dronePelicanLoggerROSModule = true ]]
then
MODULE_PATH=stack/droneLogging/dronePelicanLoggerROSModule
REPO_URL=https://bitbucket.org/jespestana/dronepelicanloggerrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#lib_cvglogger
if [[ ${lib_cvglogger} && $lib_cvglogger = true ]]
then
MODULE_PATH=stack/droneLogging/lib_cvglogger
REPO_URL=https://bitbucket.org/jespestana/lib_cvglogger.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#lib_cvgloggerROS
if [[ ${lib_cvgloggerROS} && $lib_cvgloggerROS = true ]]
then
MODULE_PATH=stack/droneLogging/lib_cvgloggerROS
REPO_URL=https://bitbucket.org/jespestana/lib_cvgloggerros.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneMidLevelAutopilot
if [[ ${droneMidLevelAutopilot} && $droneMidLevelAutopilot = true ]]
then
MODULE_PATH=stack/droneMidLevelControl/droneMidLevelAutopilot
REPO_URL=https://bitbucket.org/jespestana/dronemidlevelautopilot.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneMidLevelAutopilotROSModule
if [[ ${droneMidLevelAutopilotROSModule} && $droneMidLevelAutopilotROSModule = true ]]
then
MODULE_PATH=stack/droneMidLevelControl/droneMidLevelAutopilotROSModule
REPO_URL=https://bitbucket.org/jespestana/dronemidlevelautopilotrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneIARC14MissionSchedulerROSModule
if [[ ${droneIARC14MissionSchedulerROSModule} && $droneIARC14MissionSchedulerROSModule = true ]]
then
MODULE_PATH=stack/droneMissionPlanning/droneIARC14MissionSchedulerROSModule
REPO_URL=https://bitbucket.org/jespestana/droneiarc14missionschedulerrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneMissionPlanner
if [[ ${droneMissionPlanner} && $droneMissionPlanner = true ]]
then
MODULE_PATH=stack/droneMissionPlanning/droneMissionPlanner
REPO_URL=https://bitbucket.org/joselusl/dronemissionplanner.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneMissionPlannerROSModule
if [[ ${droneMissionPlannerROSModule} && $droneMissionPlannerROSModule = true ]]
then
MODULE_PATH=stack/droneMissionPlanning/droneMissionPlannerROSModule
REPO_URL=https://bitbucket.org/joselusl/dronemissionplannerrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#arucoEye
if [[ ${arucoEye} && $arucoEye = true ]]
then
MODULE_PATH=stack/dronePerception/droneArucoEye/arucoEye
REPO_URL=https://bitbucket.org/joselusl/arucoeye.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneArucoEye
if [[ ${droneArucoEye} && $droneArucoEye = true ]]
then
MODULE_PATH=stack/dronePerception/droneArucoEye/droneArucoEye
REPO_URL=https://bitbucket.org/joselusl/dronearucoeye.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneArucoEyeROSModule
if [[ ${droneArucoEyeROSModule} && $droneArucoEyeROSModule = true ]]
then
MODULE_PATH=stack/dronePerception/droneArucoEye/droneArucoEyeROSModule
REPO_URL=https://bitbucket.org/joselusl/dronearucoeyerosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#lib_aruco
if [[ ${lib_aruco} && $lib_aruco = true ]]
then
MODULE_PATH=stack/dronePerception/droneArucoEye/lib_aruco
REPO_URL=https://bitbucket.org/joselusl/lib_aruco.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneComputerVisionIARC14
if [[ ${droneComputerVisionIARC14} && $droneComputerVisionIARC14 = true ]]
then
MODULE_PATH=stack/dronePerception/droneComputerVisionIARC14/droneComputerVisionIARC14
REPO_URL=https://bitbucket.org/Vision4UAV/dronecomputervisioniarc14.git
REPO_BRANCH=master
./installation/addBitbucketPrivRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH $bitbucketUsername $bitbucketPassword > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneComputerVisionIARC14ROSModule
if [[ ${droneComputerVisionIARC14ROSModule} && $droneComputerVisionIARC14ROSModule = true ]]
then
MODULE_PATH=stack/dronePerception/droneComputerVisionIARC14/droneComputerVisionIARC14ROSModule
REPO_URL=https://bitbucket.org/joselusl/dronecomputervisioniarc14rosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneCV2DTo3DROSModule
if [[ ${droneCV2DTo3DROSModule} && $droneCV2DTo3DROSModule = true ]]
then
MODULE_PATH=stack/dronePerception/droneCV2DTo3DROSModule
REPO_URL=https://bitbucket.org/joselusl/dronecv2dto3drosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneEKFStateEstimator
if [[ ${droneEKFStateEstimator} && $droneEKFStateEstimator = true ]]
then
MODULE_PATH=stack/droneSelfLocalization/droneOdometryPoseEstimator/droneEKFStateEstimator
REPO_URL=https://bitbucket.org/jespestana/droneekfstateestimator.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneEKFStateEstimatorROSModule
if [[ ${droneEKFStateEstimatorROSModule} && $droneEKFStateEstimatorROSModule = true ]]
then
MODULE_PATH=stack/droneSelfLocalization/droneOdometryPoseEstimator/droneEKFStateEstimatorROSModule
REPO_URL=https://bitbucket.org/jespestana/droneekfstateestimatorrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneVisualMarkersLocalizer
if [[ ${droneVisualMarkersLocalizer} && $droneVisualMarkersLocalizer = true ]]
then
MODULE_PATH=stack/droneSelfLocalization/droneVisualMarkersLocalizer/droneVisualMarkersLocalizer
REPO_URL=https://bitbucket.org/joselusl/dronevisualmarkerslocalizer.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneVisualMarkersLocalizerROSModule
if [[ ${droneVisualMarkersLocalizerROSModule} && $droneVisualMarkersLocalizerROSModule = true ]]
then
MODULE_PATH=stack/droneSelfLocalization/droneVisualMarkersLocalizer/droneVisualMarkersLocalizerROSModule
REPO_URL=https://bitbucket.org/joselusl/dronevisualmarkerslocalizerrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#referenceFramesROS
if [[ ${referenceFramesROS} && $referenceFramesROS = true ]]
then
MODULE_PATH=stack/droneSelfLocalization/droneVisualMarkersLocalizer/referenceFramesROS
REPO_URL=https://bitbucket.org/joselusl/referenceframesros.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneOktoSimulatorROSModule
if [[ ${droneOktoSimulatorROSModule} && $droneOktoSimulatorROSModule = true ]]
then
MODULE_PATH=stack/droneSimulators/droneSimulator/droneOktoSimulatorROSModule
REPO_URL=https://bitbucket.org/jespestana/droneoktosimulatorrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#dronePelicanSimulatorROSModule
if [[ ${dronePelicanSimulatorROSModule} && $dronePelicanSimulatorROSModule = true ]]
then
MODULE_PATH=stack/droneSimulators/droneSimulator/dronePelicanSimulatorROSModule
REPO_URL=https://bitbucket.org/jespestana/dronepelicansimulatorrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneSimulator
if [[ ${droneSimulator} && $droneSimulator = true ]]
then
MODULE_PATH=stack/droneSimulators/droneSimulator/droneSimulator
REPO_URL=https://bitbucket.org/jespestana/droneSimulator.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneSimulatorROSModule
if [[ ${droneSimulatorROSModule} && $droneSimulatorROSModule = true ]]
then
MODULE_PATH=stack/droneSimulators/droneSimulator/droneSimulatorROSModule
REPO_URL=https://bitbucket.org/joselusl/dronesimulatorrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneVisualMarkersEyeSimulator
if [[ ${droneVisualMarkersEyeSimulator} && $droneVisualMarkersEyeSimulator = true ]]
then
MODULE_PATH=stack/droneSimulators/droneVisualMarkersEyeSimulator/droneVisualMarkersEyeSimulator
REPO_URL=https://bitbucket.org/joselusl/dronevisualmarkerseyesimulator.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneVisualMarkersEyeSimulatorROSModule
if [[ ${droneVisualMarkersEyeSimulatorROSModule} && $droneVisualMarkersEyeSimulatorROSModule = true ]]
then
MODULE_PATH=stack/droneSimulators/droneVisualMarkersEyeSimulator/droneVisualMarkersEyeSimulatorROSModule
REPO_URL=https://bitbucket.org/joselusl/dronevisualmarkerseyesimulatorrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneSimulatorGridROSModule
if [[ ${droneSimulatorGridROSModule} && $droneSimulatorGridROSModule = true ]]
then
MODULE_PATH=stack/droneSimulators/simulatorsIARC14/droneSimulatorGridROSModule
REPO_URL=https://bitbucket.org/joselusl/dronesimulatorgridrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneSimulatoriRobotCVROSModule
if [[ ${droneSimulatoriRobotCVROSModule} && $droneSimulatoriRobotCVROSModule = true ]]
then
MODULE_PATH=stack/droneSimulators/simulatorsIARC14/droneSimulatoriRobotCVROSModule
REPO_URL=https://bitbucket.org/jespestana/dronesimulatorirobotcvrosmodule.git
REPO_BRANCH=master
./installation/addBitbucketPrivRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH $bitbucketUsername $bitbucketPassword > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#robotSimulatorROSModule
if [[ ${robotSimulatorROSModule} && $robotSimulatorROSModule = true ]]
then
MODULE_PATH=stack/droneSimulators/simulatorsIARC14/robotSimulatorROSModule
REPO_URL=https://bitbucket.org/ramon_suarez_fernandez/robotsimulatorrosmodule.git
REPO_BRANCH=master
./installation/addBitbucketPrivRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH $bitbucketUsername $bitbucketPassword > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneModuleInterfaceROS
if [[ ${droneModuleInterfaceROS} && $droneModuleInterfaceROS = true ]]
then
MODULE_PATH=stack/droneStackBasics/droneModuleInterfaceROS
REPO_URL=https://bitbucket.org/jespestana/dronemoduleinterfaceros.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneModuleROS
if [[ ${droneModuleROS} && $droneModuleROS = true ]]
then
MODULE_PATH=stack/droneStackBasics/droneModuleROS
REPO_URL=https://bitbucket.org/joselusl/dronemoduleros.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneMsgsROS
if [[ ${droneMsgsROS} && $droneMsgsROS = true ]]
then
MODULE_PATH=stack/droneStackBasics/droneMsgsROS
REPO_URL=https://bitbucket.org/joselusl/dronemsgsros.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneBrainROSModule
if [[ ${droneBrainROSModule} && $droneBrainROSModule = true ]]
then
MODULE_PATH=stack/droneSupervising/droneBrainROSModule
REPO_URL=https://bitbucket.org/jespestana/dronebrainrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneIARC14BrainROSModule
if [[ ${droneIARC14BrainROSModule} && $droneIARC14BrainROSModule = true ]]
then
MODULE_PATH=stack/droneSupervising/droneIARC14BrainROSModule
REPO_URL=https://bitbucket.org/jespestana/droneiarc14brainrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneTrajectoryPlanner
if [[ ${droneTrajectoryPlanner} && $droneTrajectoryPlanner = true ]]
then
MODULE_PATH=stack/droneTrajectoryPlanning/droneTrajectoryPlanner
REPO_URL=https://bitbucket.org/joselusl/dronetrajectoryplanner.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneTrajectoryPlannerROSModule
if [[ ${droneTrajectoryPlannerROSModule} && $droneTrajectoryPlannerROSModule = true ]]
then
MODULE_PATH=stack/droneTrajectoryPlanning/droneTrajectoryPlannerROSModule
REPO_URL=https://bitbucket.org/joselusl/dronetrajectoryplannerrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneYawCommanderROSModule
if [[ ${droneYawCommanderROSModule} && $droneYawCommanderROSModule = true ]]
then
MODULE_PATH=stack/droneYawCommanding/droneYawCommanderROSModule
REPO_URL=https://bitbucket.org/joselusl/droneyawcommanderrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneArchitectureRvizInterfaceROSModule
if [[ ${droneArchitectureRvizInterfaceROSModule} && $droneArchitectureRvizInterfaceROSModule = true ]]
then
MODULE_PATH=stack/HMI/droneArchitectureRvizInterfaceROSModule
REPO_URL=https://bitbucket.org/ramon_suarez_fernandez/dronearchitecturervizinterfacerosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneEkfSimulatorRvizROSModule
if [[ ${droneEkfSimulatorRvizROSModule} && $droneEkfSimulatorRvizROSModule = true ]]
then
MODULE_PATH=stack/HMI/droneEkfSimulatorRvizROSModule
REPO_URL=https://bitbucket.org/ramon_suarez_fernandez/droneekfsimulatorrvizrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneInterfaceROSModule
if [[ ${droneInterfaceROSModule} && $droneInterfaceROSModule = true ]]
then
MODULE_PATH=stack/HMI/droneInterfaceROSModule
REPO_URL=https://bitbucket.org/joselusl/droneinterfacerosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#droneSimulatorRvizROSModule
if [[ ${droneSimulatorRvizROSModule} && $droneSimulatorRvizROSModule = true ]]
then
MODULE_PATH=stack/HMI/droneSimulatorRvizROSModule
REPO_URL=https://bitbucket.org/ramon_suarez_fernandez/dronesimulatorrvizrosmodule.git
REPO_BRANCH=master
./installation/addBitbucketPrivRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH $bitbucketUsername $bitbucketPassword > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#robotSimulatorRvizROSModule
if [[ ${robotSimulatorRvizROSModule} && $robotSimulatorRvizROSModule = true ]]
then
MODULE_PATH=stack/HMI/IARC14/robotSimulatorRvizROSModule
REPO_URL=https://bitbucket.org/ramon_suarez_fernandez/robotsimulatorrvizrosmodule.git
REPO_BRANCH=master
./installation/addBitbucketPrivRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH $bitbucketUsername $bitbucketPassword > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#lib_cvgekf
if [[ ${lib_cvgekf} && $lib_cvgekf = true ]]
then
MODULE_PATH=stack/libraries/lib_cvgekf
REPO_URL=https://bitbucket.org/joselusl/lib_cvgekf.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#lib_cvgthread
if [[ ${lib_cvgthread} && $lib_cvgthread = true ]]
then
MODULE_PATH=stack/libraries/lib_cvgthread
REPO_URL=https://bitbucket.org/jespestana/lib_cvgthread.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#lib_cvgutils
if [[ ${lib_cvgutils} && $lib_cvgutils = true ]]
then
MODULE_PATH=stack/libraries/lib_cvgutils
REPO_URL=https://bitbucket.org/jespestana/lib_cvgutils.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#lib_newmat11
if [[ ${lib_newmat11} && $lib_newmat11 = true ]]
then
MODULE_PATH=stack/libraries/lib_newmat11
REPO_URL=https://bitbucket.org/joselusl/lib_newmat11.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#lib_pose
if [[ ${lib_pose} && $lib_pose = true ]]
then
MODULE_PATH=stack/libraries/lib_pose
REPO_URL=https://bitbucket.org/joselusl/lib_pose.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#lib_pugixml
if [[ ${lib_pugixml} && $lib_pugixml = true ]]
then
MODULE_PATH=stack/libraries/lib_pugixml
REPO_URL=https://bitbucket.org/joselusl/lib_pugixml.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#referenceFrames
if [[ ${referenceFrames} && $referenceFrames = true ]]
then
MODULE_PATH=stack/libraries/referenceFrames
REPO_URL=https://bitbucket.org/joselusl/referenceframes.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
#videoPublisherROSModule
if [[ ${videoPublisherROSModule} && $videoPublisherROSModule = true ]]
then
MODULE_PATH=stack/utils/videoPublisherROSModule
REPO_URL=https://bitbucket.org/joselusl/videopublisherrosmodule.git
REPO_BRANCH=master
./installation/addPublRepo.sh $REPO_BRANCH $REPO_URL $MODULE_PATH > /dev/null
echo " -Added package in: $MODULE_PATH"
fi
| true |
cb0aa28752e2ca8d3f06a1876ce4abf004b1f3c9 | Shell | corersky/mola-public | /ansible/roles/clusterdock/files/clean.sh | UTF-8 | 265 | 3.109375 | 3 | [] | no_license | #!/bin/bash
# Cleaning script for clustertest
# Created by michal@maxian.sk
DIST="cloudera"
echo "Removing ${DIST} containers."
CONTAINERS=`docker ps -a -q -f status=exited | grep ${DIST} | awk '{print $1}'`
[ ! -z ${CONTAINERS} ] && echo ${CONTAINERS} | xargs docker rm
| true |
b93f148e996b1f2a2865fba83528dc19acc04ff4 | Shell | keithdadkins/drone-deploy | /templates/packer/build-scripts/bootstrap_drone.sh | UTF-8 | 1,394 | 3.890625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
DRONE_CLI_VERSION=${DRONE_CLI_VERSION:-}
DRONE_SERVER_DOCKER_IMAGE=${DRONE_SERVER_DOCKER_IMAGE:-}
DRONE_AGENT_DOCKER_IMAGE=${DRONE_AGENT_DOCKER_IMAGE:-}
set -euo pipefail
printf "\n\n\n\n***** INSTALLING DRONE *****\n\n\n\n"
# fail if env vars are not set
var_fail_message="must be set in order to continue... exiting."
[ -z "$DRONE_CLI_VERSION" ] && echo "DRONE_AWS_VERSION $var_fail_message" && exit 1
[ -z "$DRONE_SERVER_DOCKER_IMAGE" ] && echo "DRONE_SERVER_DOCKER_IMAGE $var_fail_message" && exit 1
[ -z "$DRONE_AGENT_DOCKER_IMAGE" ] && echo "DRONE_AGENT_DOCKER_IMAGE $var_fail_message" && exit 1
# set our $aws command (just a shortcut instead of writing out a long docker run command everytime)
get_docker_cmd(){
local docker_cmd='docker'
[ "$EUID" -eq 0 ] && docker_cmd="sudo docker"
echo "$docker_cmd"
}
docker="$(get_docker_cmd)"
aws="$docker run --rm aws-cli -- aws"
# pre-pull drone images
$docker pull "$DRONE_SERVER_DOCKER_IMAGE"
$docker pull "$DRONE_AGENT_DOCKER_IMAGE"
# install drone cli
curl -L "https://github.com/drone/drone-cli/releases/download/v$DRONE_CLI_VERSION/drone_linux_amd64.tar.gz" | tar zx
sudo install -t /usr/local/bin drone
sudo chmod +x /usr/local/bin drone
# test drone cli install
if drone -v > /dev/null 2>&1; then
echo "installed drone cli"
else
echo "Error installing drone cli... exiting." && exit 1
fi
| true |
3c71ad205c1b3b686f97f446a0f32dc6d354e13a | Shell | KRobesky/iipzy-docs | /iipzy-pi-config/ApplianceInitialSetup.sh | UTF-8 | 13,358 | 3.296875 | 3 | [] | no_license | #!/bin/bash
# Get image from https://www.raspberrypi.org/downloads/raspbian/ -- lite
#
# Use DISKPART to initialize sd card
#
# diskpart
# >list disk
# >select disk n
# >list disk
# >clean
# >list disk
# >create partition primary
# >list disk
# >exit
#
# Use balenaEtcher to flash image to micro sd.
#
# ====================================
# Enable SSH using connected keyboard and monitor/
# see https://www.raspberrypi.org/documentation/remote-access/ssh/
# ====================================
#
# sudo systemctl enable ssh
# sudo systemctl start ssh
# - note address.
# ip addr
#
# ====================================
# After this, use ssh
# ====================================
#
# ====================================
# Install git
# ====================================
#
# fix up <username:password>@<git-repository> in the git clone request below.
#
# sudo apt-get update
#
# sudo apt-get install git -y
#
# git config --global user.name "User Name"
# git config --global user.email email@x.y
#
# pwd
# /home/pi
# mkdir /home/pi/iipzy-service-a
# cd /home/pi/iipzy-service-a
# git init
# git remote add origin http://192.168.1.65/Bonobo.Git.Server/iipzy-pi
#
# Get this install script
#
# git clone http://<username:password>@<git-repository>/iipzy-configs-private.git
#
# ====================================
# Run this script
# ====================================
#
# /bin/bash /home/pi/iipzy-service-a/iipzy-configs-private/iipzy-pi-config/ApplianceInitialSetup.sh
#
echo ====================================
echo Set timezone UTC
echo ====================================
#
sudo timedatectl set-timezone UTC
#
echo ====================================
echo Create iipzy folders
echo ====================================
#
mkdir /home/pi/iipzy-service-b
mkdir /home/pi/iipzy-sentinel-web-a
mkdir /home/pi/iipzy-sentinel-web-b
mkdir /home/pi/iipzy-sentinel-admin-a
mkdir /home/pi/iipzy-sentinel-admin-b
mkdir /home/pi/iipzy-updater-a
mkdir /home/pi/iipzy-updater-b
mkdir /home/pi/iipzy-updater-config
cd /home/pi/iipzy-service-a
#
echo ====================================
echo Install unzip
echo ====================================
#
sudo apt install unzip
#
echo ====================================
echo Install node.js
echo ====================================
# Install Node.js on Raspberry Pi - from https://www.w3schools.com/nodejs/nodejs_raspberrypi.asp
#
# With the Raspberry Pi properly set up, login in via SSH, and update your Raspberry Pi system packages to their latest versions.
#
# Update your system package list:
#
sudo apt-get update -y
#
# Upgrade all your installed packages to their latest version:
#
sudo apt-get dist-upgrade -y
#
# Doing this regularly will keep your Raspberry Pi installation up to date.
#
# To download and install newest version of Node.js, use the following command:
#
curl -sL https://deb.nodesource.com/setup_10.x | sudo -E bash -
#
# Now install it by running:
#
sudo apt-get install -y nodejs
#
# Check that the installation was successful, and the version number of Node.js with:
#
node -v
#
npm config set package-lock false
#
echo ====================================
echo Install static web server
echo ====================================
#
sudo npm install -g serve
#
echo ====================================
echo Create directories.
echo ====================================
#
#
# - create /var/log/iipzy so that directory is writable by non-root
#
sudo mkdir /var/log/iipzy
sudo chown pi:pi /var/log/iipzy
#
# - create /etc/iipzy
#
sudo mkdir /etc/iipzy
sudo chmod 777 /etc/iipzy
echo '{"serverAddress":"iipzy.net:8001"}' > /etc/iipzy/iipzy.json
#
echo ====================================
echo Install Sentinel
echo ====================================
#
cd /home/pi/iipzy-service-a
git clone http://<username:password>@<git-repository>/iipzy-shared.git
git clone http://<username:password>@<git-repository>/iipzy-pi.git
#
# install iipzy-pi stuff
#
cd /home/pi/iipzy-service-a
#
cd /home/pi/iipzy-service-a/iipzy-shared
npm i
cd /home/pi/iipzy-service-a/iipzy-pi
npm i
#
echo ====================================
echo Install Sentinel-Web
echo ====================================
#
cd /home/pi/iipzy-sentinel-web-a
git clone http://<username:password>@<git-repository>/iipzy-shared.git
git clone http://<username:password>@<git-repository>/iipzy-sentinel-web.git
#
# install iipzy-sentinel-web stuff
#
cd /home/pi/iipzy-sentinel-web-a/iipzy-shared
npm i
cd /home/pi/iipzy-sentinel-web-a/iipzy-sentinel-web
npm i
#
echo ====================================
echo Build Sentinel-Web
echo ====================================
#
npm run build
#
# - test
#
# npm start
#
echo ====================================
echo Install Sentinel Admin
echo ====================================
#
cd /home/pi/iipzy-sentinel-admin-a
git clone http://<username:password>@<git-repository>/iipzy-shared.git
git clone http://<username:password>@<git-repository>/iipzy-sentinel-admin.git
#
# install iipzy-sentinel-admin stuff
#
cd /home/pi/iipzy-sentinel-admin-a/iipzy-shared
npm i
cd /home/pi/iipzy-sentinel-admin-a/iipzy-sentinel-admin
npm i
#
# - test
#
# npm start
#
echo ====================================
echo Install Updater
echo ====================================
#
cd /home/pi/iipzy-updater-a
git clone http://<username:password>@<git-repository>/iipzy-shared.git
git clone http://<username:password>@<git-repository>/iipzy-updater.git
#
# install iipzy-updater stuff
#
cd /home/pi/iipzy-updater-a/iipzy-shared
npm i
cd /home/pi/iipzy-updater-a/iipzy-updater
npm i
#
# - test
#
# npm start
#
echo ====================================
echo Install network monitoring tools
echo ====================================
#
# For network monitor, promiscuous mode
#
# the file /etc/network/interfaces...
#
# # interfaces(5) file used by ifup(8) and ifdown(8)
#
# # Please note that this file is written to be used with dhcpcd
# # For static IP, consult /etc/dhcpcd.conf and 'man dhcpcd.conf'
#
# # Include files from /etc/network/interfaces.d:
# source-directory /etc/network/interfaces.d
#
# auto eth0
# iface eth0 inet manual
# up ifconfig eth0 promisc up
# down ifconfig eth0 promisc down
#
sudo cp /home/pi/iipzy-service-a/iipzy-pi/src/extraResources/interfaces /etc/network/interfaces
#
# For Bonjour monitoring in iipzy-pi
#
cd /home/pi/iipzy-service-a/iipzy-pi
#
# - install libpcap-dev
#
sudo apt-get install libpcap-dev -y
#
npm i pcap
sudo apt-get install arp-scan -y
sudo apt-get install nbtscan -y
sudo apt-get install avahi-utils -y
#
# For cpu monitoring
#
sudo apt-get install sysstat -y
#
echo ====================================
echo Build and install iperf3
echo ====================================
#
# - build iperf3 - see https://software.es.net/iperf/building.html
#
cd /home/pi
git clone http://<username:password>@<git-repository>/iperf3.git
cd /home/pi/iperf3
sudo chmod 777 /usr/local/lib
sudo chmod 777 /usr/local/bin
sudo chmod 777 /usr/local/include
sudo chmod 777 /usr/local/share/man
sudo ./configure --disable-shared
sudo make
sudo make install
#
# - test
#
iperf3
#
echo ===================================
echo Install Sentinel services.
echo ===================================
#
cd /home/pi/iipzy-service-a/iipzy-pi
sudo cp src/extraResources/iipzy-pi-a.service /etc/systemd/system/.
sudo cp src/extraResources/iipzy-pi-b.service /etc/systemd/system/.
sudo systemctl enable iipzy-pi-a
sudo systemctl start iipzy-pi-a
sudo systemctl status iipzy-pi-a
#
# ? iipzy-pi.service - Node.js iipzy-pi
# Loaded: loaded (/etc/systemd/system/iipzy-pi.service; enabled; vendor preset: enabled)
# Active: active (running) since Sat 2019-07-27 00:11:01 BST; 5s ago
# Main PID: 25911 (node)
# Tasks: 12 (limit: 4035)
# Memory: 18.7M
# CGroup: /system.slice/iipzy-pi.service
# +-25911 /usr/bin/node /home/pi/iipzy-service/iipzy-pi/src/index.js
# +-25925 ping google.com
#
# Jul 27 00:11:02 raspberrypi iipzy-pi[25911]: 2019-07-27 00:11:02.973+01:00 info [cfg ] ...get, key=pingTarget, val=undefined
# Jul 27 00:11:02 raspberrypi iipzy-pi[25911]: 2019-07-27 00:11:02.973+01:00 info [ping] ping.constructor: title = pingPlot, target = google.com, duration = 0, interval 5
# Jul 27 00:11:03 raspberrypi iipzy-pi[25911]: 2019-07-27 00:11:03.067+01:00 info [send] ...ipcSend.emit: event = ipc_007, data = true
# Jul 27 00:11:03 raspberrypi iipzy-pi[25911]: 2019-07-27 00:11:03.068+01:00 info [ewtr] addEvent: event = ipc_007, data = true
# Jul 27 00:11:04 raspberrypi iipzy-pi[25911]: 2019-07-27 00:11:04.001+01:00 info [rjmg] RemoteJobManager.run - before GET
# Jul 27 00:11:04 raspberrypi iipzy-pi[25911]: 2019-07-27 00:11:04.589+01:00 info [ewtr] ...check queue, curts = 1564182664589, eventts = 1564182662584
# Jul 27 00:11:05 raspberrypi iipzy-pi[25911]: 2019-07-27 00:11:05.010+01:00 info [rjmg] RemoteJobManager.run - before GET
# Jul 27 00:11:06 raspberrypi iipzy-pi[25911]: 2019-07-27 00:11:06.018+01:00 info [rjmg] RemoteJobManager.run - before GET
# Jul 27 00:11:06 raspberrypi iipzy-pi[25911]: 2019-07-27 00:11:06.590+01:00 info [ewtr] ...check queue, curts = 1564182666590, eventts = 1564182662584
# Jul 27 00:11:07 raspberrypi iipzy-pi[25911]: 2019-07-27 00:11:07.024+01:00 info [rjmg] RemoteJobManager.run - before GET
#
echo ===================================
echo Install Sentinel-web services
echo ===================================
#
cd /home/pi/iipzy-sentinel-web-a/iipzy-sentinel-web
sudo cp src/extraResources/iipzy-sentinel-web-a.service /etc/systemd/system/.
sudo cp src/extraResources/iipzy-sentinel-web-b.service /etc/systemd/system/.
sudo systemctl enable iipzy-sentinel-web-a
sudo systemctl start iipzy-sentinel-web-a
sudo systemctl status iipzy-sentinel-web-a
#
echo ===================================
echo Install Sentinel Admin services
echo ===================================
#
cd /home/pi/iipzy-sentinel-admin-a/iipzy-sentinel-admin
sudo cp src/extraResources/iipzy-sentinel-admin-a.service /etc/systemd/system/.
sudo cp src/extraResources/iipzy-sentinel-admin-b.service /etc/systemd/system/.
sudo systemctl enable iipzy-sentinel-admin-a
sudo systemctl start iipzy-sentinel-admin-a
sudo systemctl status iipzy-sentinel-admin-a
#
echo ===================================
echo Install Updater services
echo ===================================
#
cd /home/pi/iipzy-updater-a/iipzy-updater
sudo cp src/extraResources/iipzy-updater-a.service /etc/systemd/system/.
sudo cp src/extraResources/iipzy-updater-b.service /etc/systemd/system/.
sudo systemctl enable iipzy-updater-a
sudo systemctl start iipzy-updater-a
sudo systemctl status iipzy-updater-a
#
echo ===================================
echo Verify installation
echo ===================================
# check iipzy logs directory
#
ls -l /var/log/iipzy/
#
# you should see something like...
#
# total 3404
# -rw-r--r-- 1 pi pi 1745931 Oct 3 00:31 iipzy-pi-2019-10-03-00.log
# -rw-r--r-- 1 pi pi 1719438 Oct 3 00:31 iipzy-pi.log
# -rw-r--r-- 1 pi pi 3114 Oct 3 00:31 iipzy-updater-2019-10-03-00.log
# -rw-r--r-- 1 pi pi 3114 Oct 3 00:31 iipzy-updater.log
#
echo ===================================
echo Remove secret stuff
echo ===================================
#
sudo rm -r -f cp /home/pi/iipzy-service-a/iipzy-configs-private
#
# check that services are running
# ps -Af | grep iipzy
# pi 8000 1 23 00:21 ? 00:00:05 /usr/bin/node /home/pi/iipzy-service-a/iipzy-pi/src/index.js
# pi 8409 787 0 00:22 pts/0 00:00:00 grep --color=auto iipzy
#
echo ===================================
echo Change password
echo ===================================
#
echo "pi:iipzy" | sudo chpasswd
#
echo ===================================
echo reboot
echo ===================================
#
sudo reboot
#
# ====================================
#
# Before shipping AND/OR making an image.
#
# - stop services. Note which of "a" or "b" service is active (e.g., "iipzy-pi-a" vs "iipzy-pi-b")
#
# ps -Af | grep iipzy
# pi 1026 1 0 14:43 ? 00:00:05 /usr/bin/node /home/pi/iipzy-updater-b/iipzy-updater/src/index.js
# pi 2161 1 2 15:02 ? 00:01:04 /usr/bin/node /home/pi/iipzy-service-b/iipzy-pi/src/index.js
# pi 4924 27819 0 15:51 pts/0 00:00:00 grep --color=auto iipzy
#
# sudo systemctl stop iipzy-updater-b
# sudo systemctl stop iipzy-pi-b
#
# - remove state files from /etc/iipzy
#
# rm -r -f /etc/iipzy/*
#
# - initialize /etc/iipzy/iipzy.json
#
# echo '{"serverAddress":"iipzy.net:8001"}' > /etc/iipzy/iipzy.json
#
# - remove log files from /var/logs/iipzy/.
#
# rm -r -f /var/log/iipzy/*
#
# - change password
#
# echo "pi:iipzy" | sudo chpasswd
#
# - zero out to minimize compressed size. THIS TAKES A LONG TIME. ~30 minutes
#
# sudo apt-get autoremove -y
# sudo apt-get clean -y
# cat /dev/zero >> zero.file;sync;rm zero.file;date
#
# sudo shutdown
#
# ====================================
#
# Create archive of pi image
#
# ====================================
#
# Use Win32DiskImager to copy image from micro-sd card --> iipzy-server\RPI-images\iipzypi.img
#
# Use 7-zip to compress the .img file.
#
| true |
422a0ddf252dbad311bd03fed5b126ed2440a29a | Shell | tsjazil/linker-script | /linker | UTF-8 | 261 | 2.734375 | 3 | [] | no_license | #!/bin/bash
file=$(printf '' | dmenu -i -p "Enter filename")
dir=$(find -type f -iname "$file" | dmenu -i -p "select" -l 10 )
dirc=$(echo $dir | cut -c 2-)
url=$(curl -F "file=@$HOME$dirc" 0x0.st)
echo $url | xclip -selection c
notify-send "Link on Clipboard"
| true |
ede7afdbdecc2a9ffb11e9eaeb705b08cde084a8 | Shell | thebridge0491/vm_templates_sh | /scripts/freebsd/userifc.sh | UTF-8 | 1,632 | 3.140625 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/sh -eux
## scripts/userifc.sh
set +e
export CHOICE_DESKTOP=${1:-xfce}
if command -v aria2c > /dev/null ; then
FETCH_CMD=${FETCH_CMD:-aria2c} ;
fi
pkg update ; pkg fetch -dy --available-updates ; pkg upgrade -y
. /root/init/freebsd/distro_pkgs.ini
case $CHOICE_DESKTOP in
lxqt) pkgs_var="$pkgs_displaysvr_xorg $pkgs_deskenv_lxqt" ;;
*) pkgs_var="$pkgs_displaysvr_xorg $pkgs_deskenv_xfce" ;;
esac
for pkgX in $pkgs_var ; do
pkg fetch -Udy $pkgX ;
done
for pkgX in $pkgs_var ; do
pkg install -Uy $pkgX ;
done
sleep 3
case $CHOICE_DESKTOP in
lxqt) sysrc sddm_enable="YES" ;;
*) #mv /usr/local/etc/lightdm /usr/local/etc/lightdm.old ;
sysrc lightdm_enable="YES" ;;
esac
sleep 3 ; chmod 1777 /tmp
# config xorg
sh -c 'cat >> /boot/loader.conf' << EOF
kern.vty=vt
hw.psm.synaptics_support="1"
EOF
sh -c 'cat >> /etc/profile.conf' << EOF
LANG=en_US.UTF-8 ; export LANG
CHARSET=UTF-8 ; export CHARSET
EOF
sysrc dbus_enable="YES"
sysrc hald_enable="YES"
#sysrc mixer_enable="YES"
sysrc moused_enable="YES"
# enable touchpad tapping
sed -i '' '/MatchIsTouchpad/a \ \ \ \ \ \ \ \ Option "Tapping" "on"' \
/usr/local/share/X11/xorg.conf.d/10-evdev.conf
sed -i '' '/MatchIsTouchpad/a \ \ \ \ \ \ \ \ Option "Tapping" "on"' \
/usr/local/share/X11/xorg.conf.d/40-libinput.conf
#libinput list-devices ; xinput --list
#xinput list-props XX [; xinput disable YY] # by id, list-props or disable
#xinput set-prop <deviceid|devicename> <deviceproperty> <value>
# update XDG user dir config
export LANG=en_US.UTF-8 ; export CHARSET=UTF-8
sh -c "echo 'BIN=bin' >> /usr/local/etc/xdg/user-dirs.defaults"
xdg-user-dirs-update
| true |
38831a32b52efb6ff4e09b1b7d21268159b61b7a | Shell | rkks/scripts | /bash/fortunes.sh | UTF-8 | 2,400 | 3.703125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# DETAILS: prints funny motd (message of the day) messages.
# Print on the login by calling this script from /etc/motd.tail or elsewhere by
# calling this script directly. It also helps creates forture db out of txt file
#
# CREATED: 04/06/18 13:38:31 PDT
# MODIFIED: 26/Apr/2018 16:30:31 PDT
# REVISION: 1.0
#
# AUTHOR: Ravikiran K.S., ravikirandotks@gmail.com
# LICENCE: Copyright (c) 2018, Ravikiran K.S.
#set -uvx # Warn unset vars, Verbose (echo each command), Enable debug mode
# Fortune options
# /usr/games/fortune -f // print list of files searched for quote
# /usr/games/fortune -c // show cookie from which quote was picked
# /usr/games/fortune -e // consider all files of equal size
#
# Steps to publish own quotes:
# - Write quotes into plain txt file in below format. default-file-name:fortune
# <quote>
# - <person>
# % // quote separator
# Example:
# A day for firm decisions!!!!! Or is it?
# - unknown
# %
# - Refer to /usr/share/games/fortunes/fortunes for more
# - Create index file: $ strfile -c % <your-fortune-file> <your-fortune-file.dat>
# - Move both text and index files to /usr/share/games/fortunes/
PATH="/usr/games:/usr/bin:/usr/sbin:.:/auto/opt/bin:/bin"
usage()
{
echo "Usage: fortunes.sh [-h|]"
echo "Options:"
echo " -h - print this help"
}
function tell_fortune() { exec /usr/games/fortune | /usr/games/cowsay -n; return 0; }
function fortune_convert() { strfile -c % $1 $1.dat; return 0; }
# Each shell script has to be independently testable.
# It can then be included in other files for functions.
main()
{
PARSE_OPTS="h"
local opts_found=0
while getopts ":$PARSE_OPTS" opt; do
case $opt in
[a-zA-Z0-9])
log DEBUG "-$opt was triggered, Parameter: $OPTARG"
local "opt_$opt"=1 && local "optarg_$opt"="$OPTARG"
;;
\?)
echo "Invalid option: -$OPTARG"; usage; exit $EINVAL
;;
:)
echo "[ERROR] Option -$OPTARG requires an argument";
usage; exit $EINVAL
;;
esac
shift $((OPTIND-1)) && OPTIND=1 && local opts_found=1;
done
if ((!opts_found)); then
tell_fortune;
fi
((opt_h)) && { usage; }
exit 0;
}
if [ "fortunes.sh" == "$(basename $0)" ]; then
main $*
fi
# VIM: ts=4:sw=4:sts=4:expandtab
| true |
39201ea5645f7bd98d8191a7dd56f59f512e4288 | Shell | AndreRH/Wine-Weekly-Newsletter-Tools | /stat.gen | UTF-8 | 6,236 | 3.4375 | 3 | [] | no_license | #!/bin/bash
# Copyright, Brian Vincent, 2002-2006
# Copyright, André Hentschel, 2014
THISISSUE="378"
#THISMODIFER="th"
THISPERLDATE="2014-10-24"
LASTPERLDATE="2014-10-10"
IFSTMP=$IFS
IFS='-' read -ra PERLTMP <<< "$THISPERLDATE"
IFS=$IFSTMP
THISFORMALDATE="${PERLTMP[1]}/${PERLTMP[2]}/${PERLTMP[0]}"
perl getAppChanges.pl ${LASTPERLDATE} ${THISPERLDATE} ${THISISSUE} > appdb.txt
TOBEMOD="${THISISSUE: -1}"
if [ $TOBEMOD -eq 1 ]
then
THISMODIFER="st"
elif [ $TOBEMOD -eq 2 ]
then
THISMODIFER="nd"
elif [ $TOBEMOD -eq 3 ]
then
THISMODIFER="rd"
else
THISMODIFER="th"
fi
#Autho info
AUTHORURL="http://www.dawncrow.de/wine/"
AUTHORNAME="André Hentschel"
#Directory to put wwn files
WWNDIR="wwn"
WINEDEVELARCHIVE="http://www.winehq.com/pipermail/wine-devel/"
# This is the name of the Hypermail archive to download from the web server.
LANG=C
LASTMONTH=`date --date="$(date +%Y-%m-15) -1 month" +"%B"`
THISMONTH=`date +"%B"`
LASTMONTHY=`date --date="$(date +%Y-%m-15) -1 month" +"%Y"`
THISMONTHY=`date +"%Y"`
echo "Downloading last month's archive with wget..."
wget --no-check-certificate -c ${WINEDEVELARCHIVE}${LASTMONTHY}-${LASTMONTH}.txt.gz
gunzip -c ${LASTMONTHY}-${LASTMONTH}.txt.gz > ${LASTMONTHY}-${LASTMONTH}.txt
echo "Downloading this month's archive with wget..."
wget --no-check-certificate -c ${WINEDEVELARCHIVE}${THISMONTHY}-${THISMONTH}.txt.gz
gunzip -f ${THISMONTHY}-${THISMONTH}.txt.gz > ${THISMONTHY}-${THISMONTH}.txt
echo "Invoking kcstats2 to generate stats..."
echo "./kcstats2 ${LASTMONTHY}-${LASTMONTH}.txt ${THISMONTHY}-${THISMONTH}.txt > ${WWNDIR}/junk.tmp"
./kcstats2 ${LASTMONTHY}-${LASTMONTH}.txt ${THISMONTHY}-${THISMONTH}.txt > ${WWNDIR}/junk.tmp
echo "Removing all references to " ..."
sed -e "s/"//g" ${WWNDIR}/junk.tmp > ${WWNDIR}/junk2.tmp
echo "Removing all testbot mail stats ..."
sed -e "/testbot at winehq\.org/d" ${WWNDIR}/junk2.tmp > ${WWNDIR}/junk3.tmp
# You might want to use sed to get rid of annoying misspelled or
# mangled names. I have a file called stat.sed that looks like this:
#
# s/=?iso-8859-1?q?Sylvain=20Petreolle?=/Sylvain\ Petreolle/g
# s/(thomas.mertes)/Thomas Mertes/g
echo "Building up skeleton..."
cat <<EOF > ${WWNDIR}/kcstats${THISISSUE}.txt
<?xml version="1.0" ?>
<kc>
<title>Wine Traffic</title>
<author contact="${AUTHORURL}">${AUTHORNAME}</author>
EOF
echo "<issue num=\"${THISISSUE}\" date=\"${THISFORMALDATE}\" />" >> ${WWNDIR}/kcstats${THISISSUE}.txt
echo "<intro> <p>This is the ${THISISSUE}${THISMODIFER} issue of the World Wine News publication." >> ${WWNDIR}/kcstats${THISISSUE}.txt
echo "Its main goal is to inform you of what's going on around Wine. Wine is an open source implementation of the Windows API on top of X and Unix. Think of it as a Windows compatibility layer. Wine does not require Microsoft Windows, as it is a completely alternative implementation consisting of 100% Microsoft-free code, but it can optionally use native system DLLs if they are available. You can find more info at <a href=\"http://www.winehq.org\">www.winehq.org</a></p> </intro>" >> ${WWNDIR}/kcstats${THISISSUE}.txt
echo "Invoking sed to find instances of Unicode chars..."
sed -f s-sed.stat ${WWNDIR}/junk3.tmp >> ${WWNDIR}/kcstats${THISISSUE}.txt
#rm ${WWNDIR}/junk.tmp ${WWNDIR}/junk2.tmp ${WWNDIR}/junk3.tmp
echo "" >> ${WWNDIR}/kcstats${THISISSUE}.txt
cat appdb.txt >> ${WWNDIR}/kcstats${THISISSUE}.txt
echo "</kc>" >> ${WWNDIR}/kcstats${THISISSUE}.txt
echo "We still might have Unicode problems though..."
grep "utf" ${WWNDIR}/kcstats${THISISSUE}.txt
grep "iso" ${WWNDIR}/kcstats${THISISSUE}.txt
grep "ISO" ${WWNDIR}/kcstats${THISISSUE}.txt
# This is a modification for me when I do the Wine Weekly News.
# A bunch of the people post from more than on email account.
# Ideally I'd modify the stats script to take that into account,
# but it's easy enough to fix up by hand. To save a few minutes
# I grep for the common offenders.
echo "Looking for known dupes..."
P=`grep Stefan ${WWNDIR}/kcstats${THISISSUE}.txt`;grep Stefan ${WWNDIR}/kcstats${THISISSUE}.txt | wc -l | grep 2 && echo $P
P=`grep Shear ${WWNDIR}/kcstats${THISISSUE}.txt`;grep Shear ${WWNDIR}/kcstats${THISISSUE}.txt | wc -l | grep 2 && echo $P
P=`grep Eric ${WWNDIR}/kcstats${THISISSUE}.txt`;grep Eric ${WWNDIR}/kcstats${THISISSUE}.txt | wc -l | grep 2 && echo $P
P=`grep Vincent ${WWNDIR}/kcstats${THISISSUE}.txt`;grep Vincent ${WWNDIR}/kcstats${THISISSUE}.txt | wc -l | grep 2 && echo $P
P=`grep Marcus ${WWNDIR}/kcstats${THISISSUE}.txt`;grep Marcus ${WWNDIR}/kcstats${THISISSUE}.txt | wc -l | grep 2 && echo $P
P=`grep Ove ${WWNDIR}/kcstats${THISISSUE}.txt`;grep Ove ${WWNDIR}/kcstats${THISISSUE}.txt | wc -l | grep 2 && echo $P
P=`grep Bill ${WWNDIR}/kcstats${THISISSUE}.txt`;grep Bill ${WWNDIR}/kcstats${THISISSUE}.txt | wc -l | grep 2 && echo $P
P=`grep Sylvain ${WWNDIR}/kcstats${THISISSUE}.txt`;grep Sylvain ${WWNDIR}/kcstats${THISISSUE}.txt | wc -l | grep 2 && echo $P
P=`grep Raphael ${WWNDIR}/kcstats${THISISSUE}.txt`;grep Raphael ${WWNDIR}/kcstats${THISISSUE}.txt | wc -l | grep 2 && echo $P
P=`grep Dmitry ${WWNDIR}/kcstats${THISISSUE}.txt`;grep Dmitry ${WWNDIR}/kcstats${THISISSUE}.txt | wc -l | grep 2 && echo $P
P=`grep Diego ${WWNDIR}/kcstats${THISISSUE}.txt`;grep Diego ${WWNDIR}/kcstats${THISISSUE}.txt | wc -l | grep 2 && echo $P
grep Hearn ${WWNDIR}/kcstats${THISISSUE}.txt | awk -f s-awk.stat
grep Dimitrie ${WWNDIR}/kcstats${THISISSUE}.txt | awk -f s-awk.stat
echo "Finished generating mailing stats and wwn skeleton."
# This script can be used to parse Hypermail archives, or even normal
# mbox files (you'll have to modify it slightly, probably just where
# the lynx commands are). It might not work out of the box for you,
# but it works for me to create the stats for the Wine Weekly News.
# This script relies on Zack's kcstats program, I refer to it as
# "kcstats2" since he modified it last year. It's hardcoded, so
# you'll need to make sure it's called properly. It also requires
# stat.sed file. I'd suggest using it, but at the least you'll need
# to create the file and put something like "s/ignore me//g" as a
# single line entry.
# Also, you'll want to modify the lynx commands to point to the proper
# web server.
| true |
dca23ce3c473d591a52d46c5c0876b9ff3b9aa8a | Shell | ats5515/MarathonOnlineJudge-judge | /judge/type/normal/run_testcase.sh | UTF-8 | 1,601 | 3.5625 | 4 | [] | no_license | #!/bin/bash
cd `dirname $0`
set -x
IDX=$1
SEED=$2
TL=$3
ML=$4
RUN_CMD=$(./run_cmd.sh main)
mkdir -p ./lower
mkdir -p ./result
STATUS="./result/status.txt"
ERR="./result/err.txt"
SCORE="./result/score.txt"
TIME="./result/time.txt"
MEMORY="./result/memory.txt"
echo "WJ" > $STATUS
>${ERR}
>${SCORE}
>${TIME}
>${MEMORY}
throw_internal_error () {
echo -n "IE" > $STATUS
echo $1 > $ERR
exit
}
throw_wrong_answer () {
echo -n "WA" > $STATUS
echo $1 > $ERR
exit
}
./setup_cgroup.sh $ML
./generator ${SEED} > input.txt || throw_internal_error "generator error"
state="AC"
TIMEOUT=$(($TL+1))
SWITCH_CMD=$(./setup_sandbox.sh "*.class" input.txt main)
usleep 100000
start_time=$(date +%s%N)
sudo cgexec -g pids,cpuset,memory:judge $SWITCH_CMD sh -c "cd sand; timeout $TIMEOUT $RUN_CMD < input.txt > output.txt 2> stderr.txt " || state="RE"
end_time=$(date +%s%N)
./rm_sandbox.sh output.txt stderr.txt
time_elapsed=$((($end_time - $start_time)/1000000))
memory_used=$(cat '/sys/fs/cgroup/memory/judge/memory.max_usage_in_bytes')
cat stderr.txt > $ERR
MAXBYTES=1024
if [ $(wc -c < $ERR) -gt $MAXBYTES ]; then
truncate -s $MAXBYTES $ERR
fi
echo -n $time_elapsed > $TIME
echo -n $memory_used > $MEMORY
ML_num=$(cat '/sys/fs/cgroup/memory/judge/memory.limit_in_bytes')
if [ $ML_num -le $memory_used ]; then
echo -n "MLE" > $STATUS
exit
fi
if [ $(($TL * 1000)) -le $time_elapsed ]; then
echo -n "TLE" > $STATUS
exit
fi
if [ $state = "AC" ]; then
./judge input.txt < output.txt > $SCORE 2> judgeerr.txt || throw_wrong_answer
echo -n "AC" > $STATUS
else
echo -n "RE" > $STATUS
fi
| true |
1178611eb50287d377e31a9ec457994f9b0251e1 | Shell | dineshkumares/favicon-scraper | /postprocessing/filter1.sh | UTF-8 | 130 | 3.3125 | 3 | [] | no_license | #!/bin/bash
for fname in `ls $1`
do
if [[ ! -s $1/$fname ]]
then
echo $fname >> empty.txt
rm $1/$fname
fi
done
| true |
92b80ff5a8b2b018dc194dbd33d2fa9920d4fd8a | Shell | lisosia/bin_local | /reduce_mt | UTF-8 | 242 | 2.625 | 3 | [] | no_license | #!/usr/bin/env bash
# sim with .param a=GAUSS without trannoise
lines=`wc -l ./alters | cut -d " " -f 1`
n=`echo "$lines / 2" | bc`
for i in $(seq 1 $n );
do
cat test.mt$i | skipn 4 |cut -f 4,5 | comp2;
printf "\t%d\n" $i ;
done
| true |
8e33c1358c65d69da5a446cec9dab8b885b253d0 | Shell | marian-nmt/marian-examples | /wmt2017-transformer/scripts/download-files.sh | UTF-8 | 674 | 2.640625 | 3 | [
"MIT"
] | permissive | #!/bin/bash -v
mkdir -p data
cd data
# get En-De training data for WMT17
wget -nc http://www.statmt.org/europarl/v7/de-en.tgz
wget -nc http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz
wget -nc http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz
# extract data
tar -xf de-en.tgz
tar -xf training-parallel-commoncrawl.tgz
tar -xf training-parallel-nc-v12.tgz
# create corpus files
cat europarl-v7.de-en.de commoncrawl.de-en.de training/news-commentary-v12.de-en.de > corpus.de
cat europarl-v7.de-en.en commoncrawl.de-en.en training/news-commentary-v12.de-en.en > corpus.en
# clean
rm -r europarl-* commoncrawl.* training/ *.tgz
cd ..
| true |
c974b14b7409e7ff32a1e19c00d884efed55160a | Shell | dmamontov/rp-pppoe | /go | UTF-8 | 830 | 3.25 | 3 | [] | no_license | #!/bin/sh
# LIC: GPL
#***********************************************************************
#
# go
#
# Quick-start shell script to set up PPPoE
#
# Copyright (C) 2000 Roaring Penguin Software Inc.
#
# $Id$
#***********************************************************************
# Figure out directory of script
MYDIR=`dirname $0`
cd $MYDIR/src
echo "Running ./configure..."
./configure $@
if [ "$?" != 0 ] ; then
echo "Oops! It looks like ./configure failed."
exit 1
fi
echo "Running make..."
make
if [ "$?" != 0 ] ; then
echo "Oops! It looks like make failed."
exit 1
fi
echo "Running make install..."
make install
if [ "$?" != 0 ] ; then
echo "Oops! It looks like make install failed."
exit 1
fi
for i in a a a a a a a a a a a a a a a a a a a a a a a a a a a a ; do
echo ""
done
sh ../scripts/pppoe-setup
| true |
b36b51290e1ddf52d04c7d3fa70e2aa084574b6a | Shell | crdant/pcf-on-gcp | /teardown.sh | UTF-8 | 13,602 | 3.21875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# teardown PCF on GCP
# currently handles only the resources that prepare.sh creates, and will fail due to dependencies if resources
# created by OpsManager (or otherwise) that depend on these prerequisites still exist
BASEDIR=`dirname $0`
. "${BASEDIR}/lib/util.sh"
. "${BASEDIR}/lib/env.sh"
prepare_env
. "${BASEDIR}/lib/customization_hooks.sh"
. "${BASEDIR}/personal.sh"
. "${BASEDIR}/lib/setup.sh"
vms () {
# delete all bosh managed VMs
for instance in `gcloud compute --project "${PROJECT}" instances list --filter='tags.items:pcf-vms' --uri`; do
gcloud compute --project "${PROJECT}" instances delete $instance --quiet &
done
}
stackdriver () {
echo "Preparing for GCP Stackdriver Nozzle installation..."
# prepare for the stackdriver nozzle
echo "Setting up service account stackdriver-nozzle-${SUBDOMAIN_TOKEN}"
gcloud iam --project "${PROJECT}" service-accounts delete "stackdriver-nozzle-${SUBDOMAIN_TOKEN}" --quiet
rm "${KEYDIR}/${PROJECT}-stackdriver-nozzle-${SUBDOMAIN_TOKEN}.json"
}
service_broker () {
gcloud sql --project="${PROJECT}" instances delete `cat "${WORKDIR}/gcp-service-broker-db.name"` --quiet
rm "${KEYDIR}/gcp-service-broker-db-server.crt" "${KEYDIR}/gcp-service-broker-db-client.key" "${KEYDIR}/gcp-service-broker-db-client.crt"
gcloud iam service-accounts delete service-broker-${SUBDOMAIN_TOKEN}@${PROJECT}.iam.gserviceaccount.com --quiet
}
cloud_foundry () {
gcloud dns record-sets transaction start -z "${DNS_ZONE}" --transaction-file="${WORKDIR}/dns-transaction-${DNS_ZONE}.xml" --quiet
gcloud dns record-sets transaction remove -z "${DNS_ZONE}" --name "mysql.${SUBDOMAIN}" --ttl "${DNS_TTL}" --type A "10.0.15.98" "10.0.15.99" --transaction-file="${WORKDIR}/dns-transaction-${DNS_ZONE}.xml"
gcloud dns record-sets transaction execute -z "${DNS_ZONE}" --transaction-file="${WORKDIR}/dns-transaction-${DNS_ZONE}.xml"
}
products () {
service_broker
cloud_foundry
}
blobstore () {
# drop cloud storage buckets
gsutil rm -r gs://buildpacks-pcf-${SUBDOMAIN_TOKEN}
gsutil rm -r gs://droplets-pcf-${SUBDOMAIN_TOKEN}
gsutil rm -r gs://packages-pcf-${SUBDOMAIN_TOKEN}
gsutil rm -r gs://resources-pcf-${SUBDOMAIN_TOKEN}
}
ops_manager () {
# remove from DNS
OPS_MANAGER_ADDRESS=`gcloud compute --project "${PROJECT}" --format json addresses describe "pcf-ops-manager-${SUBDOMAIN_TOKEN}" --region "${REGION_1}" | jq --raw-output ".address"`
gcloud dns record-sets transaction start -z "${DNS_ZONE}" --transaction-file="${WORKDIR}/dns-transaction-${DNS_ZONE}.xml" --quiet
gcloud dns record-sets transaction remove -z ${DNS_ZONE} --name "${OPS_MANAGER_FQDN}" --ttl ${DNS_TTL} --type A ${OPS_MANAGER_ADDRESS} --transaction-file="${WORKDIR}/dns-transaction-${DNS_ZONE}.xml"
gcloud dns record-sets transaction execute -z ${DNS_ZONE} --transaction-file="${WORKDIR}/dns-transaction-${DNS_ZONE}.xml"
# release public IP
gcloud compute --project "${PROJECT}" addresses delete "pcf-ops-manager-${SUBDOMAIN_TOKEN}" --region ${REGION_1} --quiet
# drop Ops Manager
gcloud compute --project "${PROJECT}" instances delete "pcf-ops-manager-${OPS_MANAGER_VERSION_TOKEN}-${SUBDOMAIN_TOKEN}" --zone ${AVAILABILITY_ZONE_1} --quiet
gcloud compute --project "${PROJECT}" images delete "pcf-ops-manager-${OPS_MANAGER_VERSION_TOKEN}" --quiet
rm ${KEYDIR}/ubuntu-key ${KEYDIR}/ubuntu-key.pub
}
load_balancers () {
# tear down load balancers
# TCP Routing
gcloud compute --project "${PROJECT}" forwarding-rules delete "${TCP_LOAD_BALANCER_NAME}" --region ${REGION_1} --quiet
gcloud compute --project "${PROJECT}" target-pools delete "${TCP_LOAD_BALANCER_NAME}" --region ${REGION_1} --quiet
gcloud compute --project "${PROJECT}" addresses delete "${TCP_LOAD_BALANCER_NAME}" --region ${REGION_1} --quiet
# Websockets
gcloud compute --project "${PROJECT}" forwarding-rules delete "${WS_LOAD_BALANCER_NAME}" --region ${REGION_1} --quiet
gcloud compute --project "${PROJECT}" target-pools delete "${WS_LOAD_BALANCER_NAME}" --region ${REGION_1} --quiet
gcloud compute --project "${PROJECT}" addresses delete "${WS_LOAD_BALANCER_NAME}" --region ${REGION_1} --quiet
# HTTP(S)
gcloud compute --project "${PROJECT}" forwarding-rules delete --global "${HTTP_LOAD_BALANCER_NAME}-forwarding-rule" "${HTTP_LOAD_BALANCER_NAME}-forwarding-rule2" --quiet
gcloud compute --project "${PROJECT}" target-https-proxies delete "pcf-router-https-proxy-${SUBDOMAIN_TOKEN}" --quiet
gcloud compute --project "${PROJECT}" target-http-proxies delete "pcf-router-http-proxy-${SUBDOMAIN_TOKEN}" --quiet
gcloud compute --project "${PROJECT}" ssl-certificates delete "pcf-router-ssl-cert-${SUBDOMAIN_TOKEN}" --quiet
gcloud compute --project "${PROJECT}" url-maps delete "${HTTP_LOAD_BALANCER_NAME}" --quiet
gcloud compute --project "${PROJECT}" backend-services remove-backend "${HTTP_LOAD_BALANCER_NAME}" --global --instance-group "pcf-instances-${AVAILABILITY_ZONE_1}-${SUBDOMAIN_TOKEN}" --instance-group-zone "${AVAILABILITY_ZONE_1}" --quiet
gcloud compute --project "${PROJECT}" backend-services remove-backend "${HTTP_LOAD_BALANCER_NAME}" --global --instance-group "pcf-instances-${AVAILABILITY_ZONE_2}-${SUBDOMAIN_TOKEN}" --instance-group-zone "${AVAILABILITY_ZONE_2}" --quiet
gcloud compute --project "${PROJECT}" backend-services remove-backend "${HTTP_LOAD_BALANCER_NAME}" --global --instance-group "pcf-instances-${AVAILABILITY_ZONE_3}-${SUBDOMAIN_TOKEN}" --instance-group-zone "${AVAILABILITY_ZONE_3}" --quiet
gcloud compute --project "${PROJECT}" backend-services delete "${HTTP_LOAD_BALANCER_NAME}" --global --quiet
gcloud compute --project "${PROJECT}" http-health-checks delete "pcf-http-router-health-check-${SUBDOMAIN_TOKEN}" --quiet
gcloud compute --project "${PROJECT}" addresses delete "${HTTP_LOAD_BALANCER_NAME}" --global --quiet
# SSH
gcloud compute --project "${PROJECT}" forwarding-rules delete "${SSH_LOAD_BALANCER_NAME}" --region ${REGION_1} --quiet
gcloud compute --project "${PROJECT}" target-pools delete "${SSH_LOAD_BALANCER_NAME}" --region ${REGION_1} --quiet
gcloud compute --project "${PROJECT}" addresses delete "${SSH_LOAD_BALANCER_NAME}" --region ${REGION_1} --quiet
# remove the instance group that they load balancers depend on
gcloud compute --project "${PROJECT}" instance-groups unmanaged delete "pcf-instances-${AVAILABILITY_ZONE_1}-${SUBDOMAIN_TOKEN}" --zone ${AVAILABILITY_ZONE_1} --quiet
gcloud compute --project "${PROJECT}" instance-groups unmanaged delete "pcf-instances-${AVAILABILITY_ZONE_2}-${SUBDOMAIN_TOKEN}" --zone ${AVAILABILITY_ZONE_2} --quiet
gcloud compute --project "${PROJECT}" instance-groups unmanaged delete "pcf-instances-${AVAILABILITY_ZONE_3}-${SUBDOMAIN_TOKEN}" --zone ${AVAILABILITY_ZONE_3} --quiet
}
dns () {
# clear out the records first so we can remove the zone (apparenlty it won't let me do it)
gcloud dns record-sets transaction start -z "${DNS_ZONE}" --transaction-file="${WORKDIR}/dns-transaction-${DNS_ZONE}.xml" --quiet
# HTTP/S router
HTTP_ADDRESS=`gcloud compute --project "${PROJECT}" --format json addresses describe "${HTTP_LOAD_BALANCER_NAME}" --global | jq --raw-output ".address"`
gcloud dns record-sets transaction remove -z ${DNS_ZONE} --name "*.${PCF_APPS_DOMAIN}" --ttl 300 --type A ${HTTP_ADDRESS} --quiet --transaction-file="${WORKDIR}/dns-transaction-${DNS_ZONE}.xml"
gcloud dns record-sets transaction remove -z ${DNS_ZONE} --name "*.${PCF_SYSTEM_DOMAIN}" --ttl 300 --type A ${HTTP_ADDRESS} --quiet --transaction-file="${WORKDIR}/dns-transaction-${DNS_ZONE}.xml"
# ssh router
SSH_ADDRESS=`gcloud compute --project "${PROJECT}" --format json addresses describe "${SSH_LOAD_BALANCER_NAME}" --region ${REGION_1} | jq --raw-output ".address"`
gcloud dns record-sets transaction remove -z ${DNS_ZONE} --name "ssh.${PCF_SYSTEM_DOMAIN}" --ttl 300 --type A ${SSH_ADDRESS} --quiet --transaction-file="${WORKDIR}/dns-transaction-${DNS_ZONE}.xml"
# websockets router
WS_ADDRESS=`gcloud compute --project "${PROJECT}" --format json addresses describe "${WS_LOAD_BALANCER_NAME}" --region ${REGION_1} | jq --raw-output ".address"`
gcloud dns record-sets transaction remove -z ${DNS_ZONE} --name "doppler.${PCF_SYSTEM_DOMAIN}" --ttl 300 --type A ${WS_ADDRESS} --quiet --transaction-file="${WORKDIR}/dns-transaction-${DNS_ZONE}.xml"
gcloud dns record-sets transaction remove -z ${DNS_ZONE} --name "loggregator.${PCF_SYSTEM_DOMAIN}" --ttl 300 --type A ${WS_ADDRESS} --quiet --transaction-file="${WORKDIR}/dns-transaction-${DNS_ZONE}.xml"
# tcp router
TCP_ADDRESS=`gcloud compute --project "${PROJECT}" --format json addresses describe "${TCP_LOAD_BALANCER_NAME}" --region ${REGION_1} | jq --raw-output ".address"`
gcloud dns record-sets transaction remove -z ${DNS_ZONE} --name "tcp.${SUBDOMAIN}" --ttl 300 --type A ${TCP_ADDRESS} --quiet --transaction-file="${WORKDIR}/dns-transaction-${DNS_ZONE}.xml"
gcloud dns record-sets transaction execute -z ${DNS_ZONE} --quiet --transaction-file="${WORKDIR}/dns-transaction-${DNS_ZONE}.xml"
gcloud dns managed-zones delete ${DNS_ZONE} --quiet
}
security () {
# remove VCAP SSH from metadata provided to all boxen, this will not preserve keys that were added in different ways (FIX!)
gcloud compute --project=${PROJECT} project-info remove-metadata --keys sshKeys --quiet
rm ${KEYDIR}/vcap-key ${KEYDIR}/vcap-key.pub
# remove permissions os the key will delete
gcloud projects remove-iam-policy-binding ${PROJECT} --member "serviceAccount:${SERVICE_ACCOUNT}" --role "roles/editor" --no-user-output-enabled
gcloud projects remove-iam-policy-binding ${PROJECT} --member "serviceAccount:${SERVICE_ACCOUNT}" --role "roles/iam.serviceAccountActor" --no-user-output-enabled
gcloud projects remove-iam-policy-binding ${PROJECT} --member "serviceAccount:${SERVICE_ACCOUNT}" --role "roles/compute.instanceAdmin" --no-user-output-enabled
gcloud projects remove-iam-policy-binding ${PROJECT} --member "serviceAccount:${SERVICE_ACCOUNT}" --role "roles/compute.networkAdmin" --no-user-output-enabled
gcloud projects remove-iam-policy-binding ${PROJECT} --member "serviceAccount:${SERVICE_ACCOUNT}" --role "roles/compute.storageAdmin" --no-user-output-enabled
gcloud projects remove-iam-policy-binding ${PROJECT} --member "serviceAccount:${SERVICE_ACCOUNT}" --role "roles/storage.admin" --no-user-output-enabled
# delete the key
KEYID=`jq --raw-output '.private_key_id' "${key_file}" `
gcloud iam service-accounts --project "${PROJECT}" keys delete "${KEYID}" --iam-account "${SERVICE_ACCOUNT}" --no-user-output-enabled
# delete the service account
gcloud iam service-accounts delete pcf-deployment-${SUBDOMAIN_TOKEN}@${PROJECT}.iam.gserviceaccount.com --quiet
# get rid of the saved passwords
passwords
}
passwords () {
rm "${PASSWORD_LIST}"
}
network () {
# remove the firewall rules I added based on my earlier experimentation
gcloud compute --project "${PROJECT}" firewall-rules delete "pcf-access-bosh-${SUBDOMAIN_TOKEN}" --quiet
gcloud compute --project "${PROJECT}" firewall-rules delete "pcf-access-cloud-controller-${SUBDOMAIN_TOKEN}" --quiet
# remove firewall rule for the IPSec AddOn
gcloud compute --project "${PROJECT}" firewall-rules delete "pcf-ipsec-${SUBDOMAIN_TOKEN}" --quiet
# remove necessary firewall rules
gcloud compute --project "${PROJECT}" firewall-rules delete "pcf-allow-internal-traffic-${SUBDOMAIN_TOKEN}" --quiet
gcloud compute --project "${PROJECT}" firewall-rules delete "pcf-access-opsmanager-${SUBDOMAIN_TOKEN}" --quiet
gcloud compute --project "${PROJECT}" firewall-rules delete "pcf-access-load-balancers-${SUBDOMAIN_TOKEN}" --quiet
gcloud compute --project "${PROJECT}" firewall-rules delete "pcf-access-tcp-load-balancers-${SUBDOMAIN_TOKEN}" --quiet
# remove the a network
gcloud compute --project "${PROJECT}" networks subnets delete "pcf-services-${REGION_1}-${SUBDOMAIN_TOKEN}" --region ${REGION_1} --quiet
gcloud compute --project "${PROJECT}" networks subnets delete "pcf-tiles-${REGION_1}-${SUBDOMAIN_TOKEN}" --region ${REGION_1} --quiet
gcloud compute --project "${PROJECT}" networks subnets delete "pcf-deployment--${REGION_1}-${SUBDOMAIN_TOKEN}" --region ${REGION_1} --quiet
gcloud compute --project "${PROJECT}" networks subnets delete "pcf-infra-${REGION_1}-${SUBDOMAIN_TOKEN}" --region ${REGION_1} --quiet
gcloud compute --project "${PROJECT}" networks delete "pcf-${SUBDOMAIN_TOKEN}" --quiet
}
START_TIMESTAMP=`date`
START_SECONDS=`date +%s`
prepare_env
echo "Started tearing down Cloud Foundry installation in Google Cloud Platform project ${PROJECT} at ${START_TIMESTAMP}..."
setup
if [ $# -gt 0 ]; then
while [ $# -gt 0 ]; do
case $1 in
network)
network
;;
security)
security
;;
vms)
vms
;;
load_balanacers | lbs | balancers)
load_balancers
;;
dns)
dns
;;
blobstore)
blobstore
;;
products)
products
;;
ops_manager | manager | om)
ops_manager
;;
* )
echo "Unrecognized option: $1" 1>&2
exit 1
;;
esac
shift
exit
done
fi
vms
products
blobstore
ops_manager
dns
load_balancers
security
network
END_TIMESTAMP=`date`
END_SECONDS=`date +%s`
ELAPSED_TIME=`echo $((END_SECONDS-START_SECONDS)) | awk '{print int($1/60)":"int($1%60)}'`
echo "Finished tearing down Cloud Foundry installation in Google Cloud Platform project ${PROJECT} at ${END_TIMESTAMP} (elapsed time ${ELAPSED_TIME})."
| true |
39a26c223ae44912084948002db0e995616d3c86 | Shell | kotaru23/setup | /ubuntu/docker_setup.bash | UTF-8 | 709 | 2.796875 | 3 | [] | no_license | #!/bin/bash
# Remove older version
sudo apt-get remove docker docker-engine docker.io
sudo apt-get update
# install package to allow apt to use a repository over HTTPS
sudo apt-get install \
apt-transport-https \
ca-certificates \
curl \
software-properties-common
# add Docker's official GPG key
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo apt-key fingerprint 0EBFCD88
# set up stable repository
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
# install docker CE
sudo apt-get update
sudo apt-get install docker-ce
# check
sudo docker run hello-world
# install
sudo docker run ubuntu
| true |
f58a4b6c41ddc75540c10da34f8604400b0feebb | Shell | rufig/spf | /src/compile | UTF-8 | 456 | 2.984375 | 3 | [] | no_license | #!/bin/sh
#
# $Id$
#
# Compile SP-Forth for linux
# The default options can be overridden in ./compile.ini
cd ${0%/*} # go to the itself file location
rm spf4.o
make -C posix # generate posix/config.auto.f
cd ..
echo Wait a bit while compiling...
echo 1 HALT | ./spf4orig src/spf.f
cd src
gcc -v 2>&1 | grep -F --silent -- '--enable-default-pie' && gcc_nopie="-no-pie"
gcc -o spf4 spf4.o -Wl,forth.ld -ldl -lpthread -v -m32 -fno-pie $gcc_nopie
mv spf4 ..
| true |
0108efe0e2c3beb664d76491c7176523662caf46 | Shell | igroff/approuter | /templates/wait_for_service | UTF-8 | 493 | 3.328125 | 3 | [] | no_license | #! /usr/bin/env bash
TIMEOUT=10
CHECK_INTERVAL=1
RUN_TIME=0
PERP_BASE=${1-perp_base}
SECONDS_TO_WAIT=${2-seconds_to_wait}
INSTANCE_NAME=${3-instance_name}
perpok -b ${PERP_BASE} -u ${SECONDS_TO_WAIT} ${INSTANCE_NAME};
OK_TEST=$?
while [ ${OK_TEST} != 0 ]; do
if [ ${RUN_TIME} -gt ${TIMEOUT} ]; then
exit ${OK_TEST};
fi
sleep ${CHECK_INTERVAL};
RUN_TIME=`echo "${RUN_TIME}+${CHECK_INTERVAL}" | bc`
perpok -b ${PERP_BASE} -u ${SECONDS_TO_WAIT} ${INSTANCE_NAME};
OK_TEST=$?
done
| true |
2179dc8e70a61c740f5da42b2c8e3a937f340be1 | Shell | pizosc/PR-CTICA-17---SCRIPT-APROVISIONAMENT-VAGRANT | /bootstrap.sh | UTF-8 | 1,365 | 3.125 | 3 | [] | no_license | !/bin/bash
apt-get update
# Primero instalamos Apache
apt-get install -y apache2
# Instalamos las librerias de php.
apt-get install -y php libapache2-mod-php php-mysql
# Reiniciamos Apache
sudo systemctl restart apache2
# Vamos al directorio html
cd /var/www/html
# Descargamos el adminer con wget
wget https://github.com/vrana/adminer/releases/download/v4.3.1/adminer-4.3.1-mysql.php
# Cambimos el nombre del archivo php por adminer.php
mv adminer-4.3.1-mysql.php adminer.php
# Actualizamos con apt-get update
apt-get update
# Instalamos los paquetes de debconf
apt-get -y install debconf-utils
# Ahora damos una contraseña de la base de datos
DB_ROOT_PASSWD=root
debconf-set-selections <<< "mysql-server mysql-server/root_password password $DB_ROOT_PASSWD"
debconf-set-selections <<< "mysql-server mysql-server/root_password_again password $DB_ROOT_PASSWD"
# Instalamos mysql
apt-get install -y mysql-server
# Habilitamos esta opcion
sed -i -e 's/127.0.0.1/0.0.0.0/' /etc/mysql/mysql.conf.d/mysqld.cnf
# Reiniciamos mysql
sudo systemctl restart mysql
# Asignamos la contraseña de root de mysql
mysql -uroot mysql -p$DB_ROOT_PASSWD <<< "GRANT ALL PRIVILEGES ON *.* TO root@'%' IDENTIFIED BY '$DB_ROOT_PASSWD'; FLUSH PRIVILEGES;"
# Para finalizar creamos una prueba de base de datos
mysql -uroot mysql -p$DB_ROOT_PASSWD <<< "CREATE DATABASE test;"
| true |
637ed3455c81de4eec45718ac268a4c9afa99d87 | Shell | jameshilliard/NCS_CS_1.1L.10.20_consumer | /NCS_CS_1.1L.10.20_consumer/userspace/private/apps/smd/scripts/smd.sh | UTF-8 | 1,410 | 2.859375 | 3 | [] | no_license | #!/bin/sh
case "$1" in
start)
/bin/init_user.sh NO_WAIT_DATACENTER
#cp /etc/group.manu /etc/group
#cp /etc/passwd.manu /etc/passwd
# echo "Starting CMS smd..."
# /bin/smd
#echo "Starting CMS smd..."
#sleep 20
#killall acsd nas eapd lld2d wlevt wlmngr dsldiagd mcpd dnsproxy ssk smd
echo "add ebtables rule for QTN"
ebtables -A INPUT -p ARP --arp-ip-src 169.254.1.2 --in-if ! host0 -j DROP
ebtables -A INPUT -p IPv4 --ip-src 169.254.1.2 --in-if ! host0 -j DROP
ebtables -A OUTPUT -p ARP --arp-ip-src 169.254.1.1 --out-if ! host0 -j DROP
ebtables -A OUTPUT -p IPv4 --ip-src 169.254.1.1 --out-if ! host0 -j DROP
echo "set 169.254.1.1 to interface bcmsw, up interface host0 and add route"
ifconfig bcmsw 169.254.1.1 netmask 255.255.255.248
ifconfig bcmsw up
ifconfig host0 up
route del -net 169.254.1.0 netmask 255.255.255.248
route add -host 169.254.1.2 host0
echo "sb 10700067 c0"
sb 10700067 c0
echo "start tftpd"
cp -rf /etc/firmware/* /var/tmp
/usr/bin/tftpd -p 69 -a 169.254.1.1 -s
echo "Starting Actiotnec control layer..."
/bin/ctl_start.sh
/bin/mp_burnin.sh &
exit 0
;;
stop)
echo "Stopping Actiontec control layer..."
# echo "Stopping CMS smd..."
# /bin/send_cms_msg -r 0x1000080D 20
exit 0
;;
*)
echo "$0: unrecognized option $1"
exit 1
;;
esac
| true |
b3fcae0bfe042d4611739dda1233ae8f51aee48d | Shell | bkalapar/deep-learning | /playbooks/templates/dist_driver_infiniband.j2 | UTF-8 | 1,063 | 2.65625 | 3 | [] | no_license | #!/bin/bash
#The main driver for distributed tensorflow. It will facilitate the
#usage of --ps_hosts, --worker_hosts, --task_index, and --job_name
TRAIN_MODEL=$1
source /opt/DL/tensorflow/bin/tensorflow-activate
export MPI_PATH_ENABLED=1
mpirun -np 1 python $TRAIN_MODEL
{%- if groups['parameter'] %}
--ps_hosts=
{%- for host_name in groups['parameter'] %}
{{ hostvars[host_name]['infiniband_addr'] }}:2222
{%- if groups['parameter']|length -1 != groups['parameter'].index(host_name) %}
,
{%- endif %}
{%- endfor %}
{%- endif %}
{%- if groups['worker'] %}
--worker_hosts=
{%- for host_name in groups['worker'] %}
{{ hostvars[host_name]['infiniband_addr'] }}:2222
{%- if groups['worker']|length -1 != groups['worker'].index(host_name) %}
,
{%- endif %}
{%- endfor %}
{%- endif %}
{% if inventory_hostname in groups['worker'] %}
--job_name=worker --task_index={{ groups['worker'].index(inventory_hostname) }}
{% elif inventory_hostname in groups['parameter'] %}
--job_name=ps --task_index={{ groups['parameter'].index(inventory_hostname) }}
{% endif %}
| true |
060ed02fc8c4cdf88068f959f8e851cb97ddeb3a | Shell | lukedeo/random-tools | /makeproject | UTF-8 | 10,223 | 4.28125 | 4 | [] | no_license | #!/usr/bin/env bash
# makefile generator
#
# TODO:
# - Make this handle ROOT dictionary generation rules
set -eu
_usage() {
echo "usage: ${0##*/} [-hrdn] [-p <python library>] [-e <exe prefix>]"
}
DEFAULT_PREFIX=test-
_help() {
_usage
cat <<EOF
Make a project template. If no prefix is given use "$DEFAULT_PREFIX".
If no prefix is given but a python library name is given, only make
a python library.
Requires an empty directory.
Options:
-h: get help
-r: use ROOT
-d: use HDF5
-n: use ndhist (requires HDF5)
-p <library name>: make python lib
-e <executable prefix>: prefix for all executables.
EOF
}
# __________________________________________________________________
# top level run script
_run() {
local opt
local ADD_ROOT=''
local ADD_HDF=''
local ADD_NDHIST=''
local PYTHON_LIB=''
local PREFIX=''
while getopts ":hrdnp:e:" opt $@; do
case $opt in
h) _help; return 1;;
r) ADD_ROOT=1 ;;
d) ADD_HDF=1 ;;
n) ADD_NDHIST=1 ;;
p) PYTHON_LIB=${OPTARG} ;;
e) PREFIX=${OPTARG} ;;
# handle errors
\?) _usage; echo "Unknown option: -$OPTARG" >&2; exit 1;;
:) _usage; echo "Missing argument for -$OPTARG" >&2; exit 1;;
*) _usage; echo "Unimplemented option: -$OPTARG" >&2; exit 1;;
esac
done
# various checks
if [[ ! $PREFIX && ! $PYTHON_LIB ]] ; then
PREFIX=$DEFAULT_PREFIX
echo "WARNING: setting default prefix to '$PREFIX'"
fi
if [[ $ADD_NDHIST && ! $ADD_HDF ]] ; then
ADD_HDF=1
echo "WARNING: turned on HDF5 because ndhist was enabled"
fi
if [[ $PREFIX == $PYTHON_LIB ]] ; then
echo "ERROR: python lib and prefix can't be the same..." >&2
echo " maybe add '-' to the end of prefix?" >&2
exit 1
fi
shift $(($OPTIND - 1))
if [[ $@ ]] ; then
_usage
echo "ERROR: should get no args, got '$@', quitting..." >&2
return 1
fi
if _files_exist ; then
_usage
echo "ERROR: found files in current dir, quitting..." >&2
return 1
fi
# actually build the project starting here
local MAKEFILE=makefile
_head_comment >| $MAKEFILE
_section "Basic Setup" >> $MAKEFILE
_make_base >> $MAKEFILE
# add objects to be built
_section "Add Top Level Objects" >> $MAKEFILE
if [ $PYTHON_LIB ] ; then
_add_python_objects $PYTHON_LIB >> $MAKEFILE
fi
if [ $PREFIX ] ; then
_add_exe_objects $PREFIX >> $MAKEFILE
fi
# add libraries to paths
if [[ $PYTHON_LIB || $ADD_ROOT || $ADD_HDF || $ADD_NDHIST ]] ; then
_section "Add Libraries" >> $MAKEFILE
fi
if [ $ADD_ROOT ] ; then
_add_root >> $MAKEFILE
fi
if [ $ADD_HDF ] ; then
_add_hdf >> $MAKEFILE
fi
if [ $ADD_NDHIST ] ; then
_add_ndhist >> $MAKEFILE
fi
if [ $PYTHON_LIB ] ; then
_add_python >> $MAKEFILE
fi
# first rule is the call to `all`, then add other build rules
_add_all_call >> $MAKEFILE
_section "Add Build Rules" >> $MAKEFILE
if [ $PYTHON_LIB ] ; then
_add_python_build $PYTHON_LIB >> $MAKEFILE
fi
if [ $PREFIX ] ; then
_add_exe_build >> $MAKEFILE
fi
_add_compile_rule >> $MAKEFILE
_add_dep_gen >> $MAKEFILE
# write example source files
_make_obj_dummy tools
if [ $PYTHON_LIB ] ; then
_make_py_dummy $PYTHON_LIB tools
fi
if [ $PREFIX ] ; then
_make_src_dummy ${PREFIX}main tools
fi
}
_files_exist () {
files=$(shopt -s nullglob dotglob; echo *)
if (( ${#files} )) ; then
return 0
else
return 1
fi
}
_section() {
cat <<EOF
# _______________________________________________________________
# $1
EOF
}
_head_comment() {
cat <<EOF
# makefile for generic c++ project
# generated with \`${0##*/}\` on $(date)
# Author: Dan Guest <dguest@cern.ch>
EOF
}
_make_base() {
cat <<EOF
# --- set dirs
BUILD := build
SRC := src
INC := include
DICT := dict
OUTPUT := bin
LIB := lib
# set search path
vpath %.cxx \$(SRC)
vpath %.hh \$(INC)
vpath %.h \$(INC)
vpath %Dict.h \$(DICT)
vpath %Dict.cxx \$(DICT)
# --- set compiler and flags (roll c options and include paths together)
CXX ?= g++
CXXFLAGS := -O2 -Wall -fPIC -I\$(INC) -g -std=c++11
LIBS := # blank, more will be added below
LDFLAGS := # blank, more will be added below
# ---- define objects from files in the SRC directory
GEN_OBJ_SRC := \$(wildcard \$(SRC)/*.cxx)
GEN_OBJ := \$(notdir \$(GEN_OBJ_SRC:%.cxx=%.o))
# this list may be manipulated in other segments further down
GEN_OBJ_PATHS := \$(GEN_OBJ:%=\$(BUILD)/%)
# --- all top level (added further down)
ALL_TOP_LEVEL :=
EOF
}
_add_hdf() {
cat <<EOF
# --- add HDF5
HDF_INFO := \$(shell h5c++ -showconfig | grep 'Installation point:')
HDF_PATH := \$(strip \$(shell echo \$(HDF_INFO) | cut -d ':' -f 2 ))
ifndef HDF_PATH
\$(error "couldn't find HDF, quitting")
endif
CXXFLAGS += -I\$(HDF_PATH)/include
LIBS += -L\$(HDF_PATH)/lib -Wl,-rpath,\$(HDF_PATH)/lib
LIBS += -lhdf5_cpp -lhdf5
EOF
}
_add_ndhist() {
cat <<EOF
LIBS += \$(shell ndhist-config --libs)
CXXFLAGS += \$(shell ndhist-config --cflags)
EOF
}
_add_python() {
cat <<EOF
# --- python config
PY_CONFIG := python3-config
PY_FLAGS := \$(shell \$(PY_CONFIG) --includes)
PY_LIBS := -L\$(shell \$(PY_CONFIG) --prefix)/lib
PY_LIBS += \$(shell \$(PY_CONFIG) --libs)
# define these last because they inherit other LDFLAGS
PY_LDFLAGS := \$(LDFLAGS)
PY_LDFLAGS += \$(PY_LIBS)
PY_LDFLAGS += -shared
EOF
}
_add_root() {
cat <<EOF
# --- load in root config
ROOTCFLAGS := \$(shell root-config --cflags)
ROOTLIBS := \$(shell root-config --libs)
# ROOTLIBS += -lCore -lTree -lRIO
ROOTLDFLAGS := \$(shell root-config --ldflags)
CXXFLAGS += \$(ROOTCFLAGS)
LDFLAGS += \$(ROOTLDFLAGS)
LIBS += \$(ROOTLIBS)
EOF
}
_add_all_call() {
cat <<EOF
# --- first call here
all: \$(ALL_TOP_LEVEL)
EOF
}
_add_exe_objects() {
local PREFIX=$1
cat <<EOF
# --- stuff used for the c++ executable
EXE_PREFIX := ${PREFIX}
ALL_EXE_SRC := \$(wildcard \$(SRC)/\$(EXE_PREFIX)*.cxx)
ALL_EXE := \$(notdir \$(ALL_EXE_SRC:%.cxx=%))
ALL_EXE_PATHS := \$(ALL_EXE:%=\$(OUTPUT)/%)
# filter out the general objects
GEN_OBJ_PATHS := \$(filter-out \$(BUILD)/\$(EXE_PREFIX)%.o,\$(GEN_OBJ_PATHS))
# add to all top level
ALL_TOP_LEVEL += \$(ALL_EXE_PATHS)
EOF
}
_add_exe_build() {
cat <<EOF
# build exe
\$(OUTPUT)/\$(EXE_PREFIX)%: \$(GEN_OBJ_PATHS) \$(BUILD)/\$(EXE_PREFIX)%.o
@mkdir -p \$(OUTPUT)
@echo "linking $^ --> \$@"
@\$(CXX) -o \$@ $^ \$(LIBS) \$(LDFLAGS)
EOF
}
_add_python_objects() {
PYLIB=$1
cat <<EOF
# --- python top level objects
PY_OBJ := ${PYLIB}.o
PY_OBJ_PATH := \$(PY_OBJ:%=\$(BUILD)/%)
PY_SRC_PATH := \$(PY_OBJ:%.o=\$(SRC)/%.cxx)
PY_SO := \$(LIB)/${PYLIB}.so
# filter out the general objects
GEN_OBJ_PATHS := \$(filter-out \$(BUILD)/${PYLIB}.o,\$(GEN_OBJ_PATHS))
# add to all top level
ALL_TOP_LEVEL += \$(PY_SO)
EOF
}
_add_python_build(){
PYLIB=$1
cat <<EOF
# python object compile
\$(PY_OBJ_PATH): \$(PY_SRC_PATH)
@echo compiling python object \$@
@mkdir -p \$(BUILD)
@\$(CXX) -c \$(CXXFLAGS) \$(PY_FLAGS) $< -o \$@
# python linking
\$(PY_SO): \$(GEN_OBJ_PATHS) \$(PY_OBJ_PATH)
@mkdir -p \$(LIB)
@echo "linking $^ --> \$@"
@\$(CXX) -o \$@ $^ \$(LIBS) \$(PY_LDFLAGS)
EOF
}
_add_compile_rule() {
cat <<EOF
# compile rule
\$(BUILD)/%.o: %.cxx
@echo compiling $<
@mkdir -p \$(BUILD)
@\$(CXX) -c \$(CXXFLAGS) $< -o \$@
EOF
}
_add_dep_gen() {
cat <<EOF
# use auto dependency generation
ALLOBJ := \$(GEN_OBJ)
DEP := \$(BUILD)
ifneq (\$(MAKECMDGOALS),clean)
ifneq (\$(MAKECMDGOALS),rmdep)
include \$(ALLOBJ:%.o=\$(DEP)/%.d)
endif
endif
DEPTARGSTR = -MT \$(BUILD)/\$*.o -MT \$(DEP)/\$*.d
\$(DEP)/%.d: %.cxx
@echo making dependencies for $<
@mkdir -p \$(DEP)
@\$(CXX) -MM -MP \$(DEPTARGSTR) \$(CXXFLAGS) \$(PY_FLAGS) $< -o \$@
# clean
.PHONY : clean rmdep all
CLEANLIST = *~ *.o *.o~ *.d core
clean:
rm -fr \$(CLEANLIST) \$(CLEANLIST:%=\$(BUILD)/%) \$(CLEANLIST:%=\$(DEP)/%)
rm -fr \$(BUILD) \$(DICT) \$(OUTPUT)
rmdep:
rm -f \$(DEP)/*.d
EOF
}
_make_src_dummy() {
mkdir -p src/
local EXE_NAME=$1
local TEST_OBJ=${2-testobj}
cat <<EOF >| src/${EXE_NAME}.cxx
#include "${TEST_OBJ}.hh"
#include <cstdio>
#include <cstdlib>
int main(int argc, char* argv[]) {
int in = 0;
if (argc > 1) in = atoi(argv[1]);
int testint = testfunc(in);
printf("bonjour et %i\n", testint);
return 0;
}
EOF
}
_make_obj_dummy() {
mkdir -p src/
mkdir -p include/
local OBJ_NAME=$1
cat <<EOF >| src/${OBJ_NAME}.cxx
#include "${OBJ_NAME}.hh"
#include <stdexcept>
int testfunc(int in) {
if (in == 42) {
throw std::runtime_error("You a nerd");
}
return in + 1;
}
EOF
local GUARD=$(tr '[:lower:]-' '[:upper:]_' <<< $OBJ_NAME)_HH
cat <<EOF >| include/${OBJ_NAME}.hh
#ifndef ${GUARD}
#define ${GUARD}
int testfunc(int in = 0);
#endif
EOF
}
_make_py_dummy() {
mkdir -p src/
local MODNAME=$1
local TEST_OBJ=${2-testobj}
cat <<EOF >| src/${MODNAME}.cxx
#include <Python.h>
#include "${TEST_OBJ}.hh"
#include <stdexcept>
static PyObject* py_alg(PyObject *self, PyObject *args)
{
const char* echo_this;
int some_int = 0;
bool ok = PyArg_ParseTuple(args,"s|i:alg", &echo_this, &some_int);
if (!ok) return NULL;
try {
int newret = testfunc(some_int);
printf("hi %s %i\n", echo_this, newret);
return Py_BuildValue("i", newret);
}
catch (const std::exception& e) {
PyErr_SetString(PyExc_Exception, e.what());
return NULL;
}
}
static PyMethodDef methods[] = {
{"test", py_alg, METH_VARARGS,
"don't ask, read the source"},
{NULL, NULL, 0, NULL} /* sentinel */
};
static struct PyModuleDef ${MODNAME} = {
PyModuleDef_HEAD_INIT,
"testpy", /* name of module */
"this be testpy", /* module documentation, may be NULL */
-1, /* size of per-interpreter state of the module,
or -1 if the module keeps state in global variables. */
methods
};
extern "C" {
PyMODINIT_FUNC PyInit_${MODNAME}(void)
{
return PyModule_Create(&${MODNAME});
}
}
EOF
}
_run $@
| true |
f7da4d80ea4524ca67e01682e86eb05df822084b | Shell | christie-ga/IMSGCexomechip | /src/basic_qc/scripts/qc.sh | UTF-8 | 2,470 | 3.453125 | 3 | [] | no_license | #!/bin/bash -E
set -e
alias plink='plink --noweb --silent --allow-no-sex'
## Define variables from Meta File
META=$1
OUTPUT_DIR=`grep "Output Directory:" $META | awk '{print $3}'`
PHENO=`grep "Phenotype File:" $META | awk '{print $3}'`
COHORT=`grep "Cohort File:" $META | awk '{print $3}'`
GAP_SUMMARY=`grep "GAP Summary File:" $META | awk '{print $4}'`
PROJECT_NAME=`grep "Project Name:" $META | awk '{print $3}'`
## Define directories
wd=`grep "Script Directory:" $META | awk '{print $3}'`
scrd=${wd}/scripts/
eigd=${wd}/eigenstrat/
hlad=${wd}/hlaImputation/
cond=${wd}/controls/
supd=${wd}/suppl/
exit=0
if [ ! -d ${scrd} ]; then
echo "${scrd} does not exist"; exit=1
fi
if [ ! -d ${supd} ]; then
echo "${supd} does not exist"; exit=1
fi
if [ ! -d ${eigd} ]; then
echo "${eigd} does not exist"; exit=1
fi
if [ ! -d ${hlad} ]; then
echo "${hlad} does not exist"; exit=1
fi
if [ ! -d ${cond} ]; then
echo "${cond} does not exist"; exit=1
fi
if [ $exit -eq 1 ]; then
exit 1
fi
## Initialize Log File
LOG=`sh ${scrd}makeLogFile.sh $META`
i=1
## Check for Errors
function die(){
echo "Exited pipeline." >> $LOG
echo "Exited pipeline. See $LOG for more details."
exit 1
}
trap die ERR
exit=0
## Execute shell scripts based on options in meta file
CHECK_INPUT=1 # Set to 1 by default
if [ $CHECK_INPUT -eq 1 ]; then
sh ${scrd}checkInput.sh $META $i $LOG
let i++
fi
SAMPLE_QC=`grep "SAMPLE_QC:" $META | awk '{print $2}'`
if [ $SAMPLE_QC -eq 1 ]; then
sh ${scrd}sampleQC.sh $META $i $LOG
let i++
fi
RELATEDNESS_CHECK=`grep "RELATEDNESS_CHECK:" $META | awk '{print $2}'`
if [ $RELATEDNESS_CHECK -eq 1 ]; then
sh ${scrd}relatednessCheck.sh $META $i $LOG
let i++
fi
SEX_CHECK=`grep "SEX_CHECK:" $META | awk '{print $2}'`
if [ $SEX_CHECK -eq 1 ]; then
sh ${scrd}sexCheck.sh $META $i $LOG
let i++
fi
MENDEL_CHECK=`grep "MENDEL_CHECK:" $META | awk '{print $2}'`
if [ $MENDEL_CHECK -eq 1 ]; then
sh ${scrd}mendelCheck.sh $META $i $LOG
let i++
fi
PCA_ROUND1=`grep "PCA_ROUND1:" $META | awk '{print $2}'`
if [ $PCA_ROUND1 -eq 1 ]; then
sh ${scrd}pcaRound1.sh $META $i $LOG
let i++
fi
SITE_QC=`grep "SITE_QC:" $META | awk '{print $2}'`
if [ $SITE_QC -eq 1 ]; then
sh ${scrd}siteQC.sh $META $i $LOG
let i++
fi
date=`date +"%m/%d/%Y %T"`
echo "Finished pipeline ... $date" >> $LOG
echo "---------------------------------------------------------" >> $LOG | true |
d5932ee81657926ad692c45445d5e07c3de4ca69 | Shell | stick-fish/basicScripts | /zone_transfer.sh | UTF-8 | 336 | 3.703125 | 4 | [] | no_license | #!/bin/bash
if [ -z "$1" ]
then
echo "***DNS TRANSFER SCRIPT***"
echo "Enter script name followed by hostname to check"
echo "Example --> $0 <hostname>"
else
for name in $(host -t ns $1); do
result=$(host -l $1 $name | grep "has address" | sed s/"has address"/" "/ >> $1)
done
echo "| -- Output saved to $1 --|"
fi
| true |
8f8f46b1c9362aa4fcdc74bc34a1d2755fc7ba95 | Shell | Vivaracho/vboxsf | /scripts/guest-build | UTF-8 | 3,743 | 4 | 4 | [] | no_license | #!/bin/bash
work_dir=work
guest=$work_dir/src
android_product_dir=$1/out/target/product/
kernel_out_dir=obj/kernel
if [ x$1 = x ]; then
usage
fi
pwd=`pwd`
script_dir="$( cd "$( dirname "$0" )" && pwd )"
work_dir=`readlink -m $work_dir`
android_dir=`readlink -m $1`
kernel_src_dir=$android_dir/kernel
usage() {
echo "Usage: guest-build <path/to/android_x86>"
exit 1
}
# $1 - guest directory
# $2 - android arch
create_tar_gz() {
local archive_name
local guest_name
guest_name=`basename $1`
archive_name=$work_dir/$guest_name-$2
temp_dir=`mktemp -d -p "$work_dir"`
if [[ ! "$temp_dir" || ! -d "$temp_dir" ]]; then
echo "Could not create temp dir"
return
fi
cp {vboxsf.ko,vboxvideo.ko,vboxguest.ko} $temp_dir
mkdir $temp_dir/src
mkdir $temp_dir/scripts
cp $script_dir/../src/vboxsf.sh $temp_dir/src
cp $script_dir/../src/init.vboxsf.rc $temp_dir/src
cp $script_dir/inst $temp_dir/scripts
if [ -f $android_product_dir/$2/system/lib/libinputflinger.so ]; then
cp $android_product_dir/$2/system/lib/libinputflinger.so $temp_dir
fi
if [ -f $android_product_dir/$2/system/bin/mount.vboxsf ]; then
cp $android_product_dir/$2/system/bin/mount.vboxsf $temp_dir
fi
# echo -e "vboxsf.ko\nvboxvideo.ko\nvboxguest.ko\n$script_dir/../src/init.vboxsf.rc\n$script_dir/../src/vboxsf.sh\n$script_dir/../scripts/inst" | \
# cpio -o --quiet --owner=0:0 --no-absolute-filenames -O $archive_name.tar -H tar
tar c --owner=0 --group=0 -zf $archive_name.tar.gz -C $temp_dir ./
rm -rf $temp_dir
# if [ -f $archive_name.tar ]; then
# rm -f $archive_name.tar.gz 2> /dev/null
# gzip -9 $archive_name.tar
# fi
# create self extracting archive
temp_dir=`mktemp -d -p "$work_dir"`
if [[ ! "$temp_dir" || ! -d "$temp_dir" ]]; then
echo "Could not create temp dir"
return
fi
tar xfz $archive_name.tar.gz -C $temp_dir
$script_dir/../makeself/makeself.sh $temp_dir $work_dir/$guest_name-android-$2.run "Android VirtualBox Guest Additions" scripts/inst
rm -rf $temp_dir
}
# Based on idea from https://github.com/coyun/android-virtualbox/issues/18
# $1 - guest directory
# $2 - android arch
# $3 - module output dir
build_guest() {
echo "Building $1 ..."
cd $1
make BUILD_TARGET_ARCH=$2 KERN_DIR=$kernel_src_dir KERN_VER=$kver O=$3
if [ -f vboxguest.ko -a -f vboxsf.ko ]; then
if [ -f vboxvideo.ko ]; then
echo "Build ok"
create_tar_gz $1 $2
fi
else
echo "Build failed"
fi
cd $cur_dir
}
# $1 - android arch: x86, x86_64
# $2 - vbox arch: x86, amd64
build_all_guests() {
local kernel_arch_dir
local mount_vboxsf
kernel_arch_dir=$android_product_dir/$1
mount_vboxsf=$kernel_arch_dir/system/bin/mount.vboxsf
if [ ! -d $kernel_arch_dir ]; then
echo "Android_x86 installation has no kernel built for $1 arch, skipping"
return
fi
for g in $guests; do
build_guest $g $2 $kernel_arch_dir/$kernel_out_dir
done
# create iso image with the self extracting archives
temp_dir=`mktemp -d -p "$work_dir"`
if [[ ! "$temp_dir" || ! -d "$temp_dir" ]]; then
echo "Could not create temp dir"
return
fi
cp $work_dir/vboxguest-*-android-*.run $temp_dir
genisoimage -J -R -o $work_dir/VBoxGuestAdditionsAndroid.iso $temp_dir
rm -rf $temp_dir
}
if [ ! -f $kernel_src_dir/Android.mk ]; then
usage
fi
cur_dir=`pwd`
cd $android_dir/kernel
kver=`make kernelversion`
cd $cur_dir
if [ x$kver = x ]; then
echo "Unable to detect kernel version"
usage
fi
echo "Kernel version $kver detected"
guests=`ls -d $guest/vboxguest* 2> /dev/null`
if [ ! -d $guest ]; then
echo "No VirtualBox guest driver sources installed. Use the getguest script to extract"
echo "guest driver sources from the VBoxGuestAdditions.iso image."
usage
fi
build_all_guests x86 x86
build_all_guests x86_64 amd64
| true |
8bbc4800cbc26a0b08de869a58dffcb276f7d366 | Shell | lucastorri/irclog | /dist.sh | UTF-8 | 340 | 3.015625 | 3 | [] | no_license | #!/bin/bash
cd `dirname $0`
rm -rf dist
mkdir dist
play dist
cd `dirname "$0"`/dist
ZIP=`ls -1 | grep .zip`
UNZIP_DIR=`echo $ZIP | sed 's/\(.*\)\.zip*/\1/'`
DIST_ZIP=`echo $ZIP | sed 's/\(.*\)-.*/\1.zip/'`
unzip $ZIP
rm $ZIP
mv $UNZIP_DIR/* .
rmdir $UNZIP_DIR
mv lib lib-web
rm start
cp -r ../bin .
mv conf bin
zip -r $DIST_ZIP *
cd -
| true |
6dcff13051520af706296ae5a55479e0c912442b | Shell | monishbairagi/CSE_5TH_SEM | /OS/Example_Shell_Programs/g.sh | UTF-8 | 161 | 3.25 | 3 | [] | no_license | echo "enter three numbers";
read a
read b
read c
if [ $a -gt $b -a $a -gt $c ]
then
echo $a is big
elif [ $b -gt $c ]
then
echo $b is big
else
echo $c is big
fi
| true |
ab00849ea58067c83dd39f6db3c36562566b65ad | Shell | tov/CASS | /bin/countdown-timer.sh | UTF-8 | 375 | 3.375 | 3 | [] | no_license | #!/bin/sh
. "$(dirname "$0")/.CASS"
eval "$(getargs duration step)"
width=$(printf %d $duration | wc -c)
(( ++width ))
while (( COLUMNS % width )); do
(( ++width ))
done
while (( duration > 0 )); do
if (( duration < step )); then
step=duration
fi
printf >&2 "%-${width}d" $duration
sleep $step
duration=$(( duration - step ))
done
echo
| true |
c024e797d46494efdaa90106c7a86db65450b9c7 | Shell | shiftycats/bashrc | /bash.d/10-bindings.sh | UTF-8 | 429 | 2.75 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -o vi
bind "'\C-j': history-search-backward"
bind "'\C-k': history-search-forward"
bind "'\C-h': backward-char"
bind "'\C-l': forward-char"
# Get last command with `!!<space>`.
bind Space:magic-space
# Change cursor depending on the mode.
bind "set show-mode-in-prompt on"
bind "set vi-cmd-mode-string \1\e[1 q\2"
bind "set vi-ins-mode-string \1\e[5 q\2"
# Restore cursor.
PS0="${PS0/\\e[5 q/}"
PS0+="\e[5 q" | true |
506c9693c24dcb7850556e529cfc58bfeb446c07 | Shell | agon-lelouet/ioc-tp2 | /ledbp/rmdev~ | UTF-8 | 107 | 2.640625 | 3 | [] | no_license | #!/bin/sh
module=$1
/sbin/rmmod $module || exit 1
rm -f /dev/$module
echo "=> Device /dev/$module removed"
| true |
84e69589cf16a12fc39885133d6f6af29d97159f | Shell | jgrumboe/random-logger | /entrypoint.sh | UTF-8 | 1,530 | 3.4375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
n=-1
c=0
if [ -n "$3" ]
then
n=$3
fi
while [ $n -ne $c ]
do
WAIT=$(shuf -i $1-$2 -n 1)
sleep $(echo "scale=4; $WAIT/1000" | bc)
I=$(shuf -i 1-5 -n 1)
# Build default Spring Boot logline according to https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-logging-format
D=`date +"%Y-%m-%d %H:%M:%S.000"`
case "$I" in
"1") echo "$D ERROR 12345 --- [main] org.random.logger : An error is usually an exception that has been caught and not handled."
;;
"2") echo "$D INFO 12345 --- [main] org.random.logger : This is less important than debug log and is often used to provide context in the current task."
;;
"3") echo "$D WARN 12345 --- [main] org.random.logger : A warning that should be ignored is usually at this level and should be actionable."
;;
"4") echo "$D DEBUG 12345 --- [main] org.random.logger : This is a debug log that shows a log that can be ignored."
;;
"5") echo "$D ERROR 12345 --- [main] org.random.logger : Exception in thread \"main\" java.lang.RuntimeException: Something has gone wrong, aborting!
at com.myproject.module.MyProject.badMethod(MyProject.java:22)
at com.myproject.module.MyProject.oneMoreMethod(MyProject.java:18)
at com.myproject.module.MyProject.anotherMethod(MyProject.java:14)
at com.myproject.module.MyProject.someMethod(MyProject.java:10)
at com.myproject.module.MyProject.main(MyProject.java:6)"
;;
esac
c=$(( c+1 ))
done
| true |
c167a09942bf6309bc3cfc134f5eefa768ecd319 | Shell | nullnilaki/RISCos_INST | /inst_mkstuff | UTF-8 | 3,850 | 3.9375 | 4 | [] | no_license | #!/bin/sh -x
#
# $Header: inst_mkstuff.sh,v 2.1.1.2.1.2 90/07/11 18:23:00 hawkes Exp $
#
# ---------------------------------------------------
# | Copyright (c) 1989 MIPS Computer Systems, Inc. |
# | All Rights Reserved. |
# ---------------------------------------------------
# Ensure that the environment has been set
case "$Instenv" in
"") . inst_env ;;
esac
# check if install scratch
if [ "$Install" = "scratch" -a "$Os" = "y" ]
then
# See if we should make the /usr/adm/crash crash directory
if [ -d /mnt/usr/adm/crash ]
then
echo ""
else
section "making special directories"
echo "Default crash directory is /usr/adm/crash"
Defcrash=${Defcrash=/mnt/usr/adm/crash}
ask "Set name of crash directory" "$Defcrash"
Defcrash=$Ans
ask "Should we create this directory for kernel core dumps" y y n
case $Ans in
y) mkdir $Defcrash ;;
*) echo "Not created" ;;
esac
echo "\n"
fi
# ask user if they wish to change swap partition configuration
echo ""
ask "Do you wish to configure the network" n y n
if [ "$Ans" != "n" ]
then
if [ -f /mnt/etc/local_hostname -a -f /mnt/etc/local_domainname ]
then
echo ""
else
section "making special network files"
fi
# See if we should make the local_hostname file
if [ -f /mnt/etc/local_hostname ]
then
# determine if it has junk in it
grep "no_hostname_set" /mnt/etc/local_hostname > /dev/null 2> /dev/null
HOSTNAME=$?
if [ `expr $HOSTNAME` -eq 0 ]
then
# remove it if it does and allow for a good one to be setup
touch /mnt/etc/local_hostname
rm /mnt/etc/local_hostname
fi
fi
# now build a good local_hostname if it does not exist
if [ -f /mnt/etc/local_hostname ]
then
echo ""
else
if [ "$Hostname" -eq "" ]
then
Hostname=${Hostname=no_hostname}
fi
ask "Set hostname" "$Hostname"
Hostname=$Ans
if [ "$Netmask" -eq "" ]
then
Netmask=${Netmask=0xffff0000}
fi
ask "Set netmask" "$Netmask"
Netmask=$Ans
if [ "$Broadcast" -eq "" ]
then
Broadcast=${Broadcast=255.255.255.0}
fi
ask "Set broadcast address" "$Broadcast"
Broadcast=$Ans
if [ "$NetAddr" -eq "" ]
then
NetAddr=${NetAddr=127.1.0.0}
fi
ask "Set net address" "$NetAddr"
NetAddr=$Ans
ask "Should we create the /etc/local_hostname file" y y n
case $Ans in
y) cat >/mnt/etc/local_hostname <<===EOF===
$Hostname
netmask $Netmask broadcast $Broadcast
===EOF===
;;
*) echo "Not Created" ;;
esac
echo ""
echo "$Hostname $NetAddr"
ask "Should we add the above entry to the /etc/hosts file" y y n
case $Ans in
y) cat >>/mnt/etc/hosts <<===EOF===
$NetAddr $Hostname
===EOF===
;;
*) echo "Not Added" ;;
esac
echo "\n"
fi
# See if we should make the local_domainname file
if [ -f /mnt/etc/local_domainname ]
then
echo ""
else
Domainname=${Domainname=mips.com}
ask "Set domain name" "$Domainname"
Domainname=$Ans
ask "Should we create the /etc/local_domainname file" y y n
case $Ans in
y) cat >/mnt/etc/local_domainname <<===EOF===
$Domainname
===EOF===
;;
*) echo "Not created" ;;
esac
echo "\n"
fi
# ensure that miniroot /dev/usr is same as target /mnt/dev/usr
# get current target dev usr
set `ls -l /mnt/dev/usr`
USRpart=`expr $6 % 16`
USRmajor=`expr $5 : '\(.*\),'`
# get current minroot dev usr
set `ls -l /dev/usr`
Tmp_part=`expr $6 % 16`
Tmp_major=`expr $5 : '\(.*\),'`
# determine if scratch or update
if [ "$Install" = "scratch" -a "$Os" = "y" ]
then
# handle as scratch
# check if same and correct if need be
if [ "$USRpart" != "$Tmp_part" ]
then
rm -f /mnt/dev/usr
ln /mnt/dev/dsk/$TGdsk3$TGusrCont${TGusrDrive}s$Tmp_part /mnt/dev/usr
fi
fi
fi
fi
| true |
5467bfd4be7ddf2d7a89c4bed2bd2ce1ba1187e9 | Shell | sudiptac/chalice | /results/plots/AES/seq_access_key_bound_script | UTF-8 | 603 | 2.84375 | 3 | [] | no_license | #!/bin/bash
grep -inr "\[++\]" $1 > scripts/leak
while read line; do
access=$(echo $line | cut -f10 -d" ")
byte=$(echo $line | cut -f8 -d" ")
echo $access
byte=$(echo $byte | rev | cut -c 2- | rev)
echo $byte
echo $line >> temp/record_byte_leak_from_hit_sequence_${access}_${byte}
done <scripts/leak
grep -inr "\[--\]" $1 > scripts/leak
while read line; do
access=$(echo $line | cut -f10 -d" ")
byte=$(echo $line | cut -f8 -d" ")
echo $access
byte=$(echo $byte | rev | cut -c 2- | rev)
echo $byte
echo $line >> temp/record_byte_leak_from_miss_sequence_${access}_${byte}
done <scripts/leak
| true |
e481a915386c145f80b229faafa2b765968d84a4 | Shell | pandeybk/dockerfiles | /dante/docker-entrypoint.sh | UTF-8 | 1,712 | 3.96875 | 4 | [] | no_license | #!/bin/sh
kill_sockd() {
killall -9 sockd >/dev/null 2>&1
sleep 2
killall -9 sockd >/dev/null 2>&1
}
onexit() {
echo 'Terminating sockd process by signal (SIGINT, SIGTERM, SIGKILL, EXIT)' 2>&1
kill_sockd
echo 'sockd proces terminated' 2>&1
exit 0
}
trap onexit SIGINT SIGTERM SIGKILL EXIT
# Print message to the stderr if ENV variable DEBUG set to 'true'
debug() {
local MESSAGE=$1
echo -e ${MESSAGE} 2>&1
}
## Creating dante socks server config in /etc/sockd.conf
create_dante_config() {
debug "Generating dante config in /etc/sockd.conf"
cat >/etc/sockd.conf <<EOL
logoutput: stderr
internal: ${SOCKS_LISTEN} port = ${SOCKS_PORT}
external: ${SOCKS_GATEWAY}
# auth with user login, passwd
socksmethod: ${SOCKS_AUTH}
client pass {
from: 0.0.0.0/0 to: 0.0.0.0/0
log: error # connect disconnect iooperation
}
socks pass {
from: 0.0.0.0/0 to: 0.0.0.0/0
command: bind connect udpassociate
log: error # connect disconnect iooperation
}
# generic pass statement for incoming connections/packets
# because something about no support for auth with bindreply udpreply ?
socks pass {
from: 0.0.0.0/0 to: 0.0.0.0/0
command: bindreply udpreply
log: error # connect disconnect iooperation
}
EOL
}
create_dante_config
# if SOCKS_AUTH is 'username' and both SOCKS_USER and SOCKS_PASSWORD defined, create that user
if [[ "${SOCKS_AUTH}" == "username" && "${SOCKS_USERNAME}" != "" && "${SOCKS_PASSWORD}" != "" ]]; then
adduser -D -s /bin/false ${SOCKS_USERNAME}
echo "${SOCKS_USERNAME}:${SOCKS_PASSWORD}" | chpasswd
fi
if [ "$@" != "" ]; then
exec "$@"
else
while true
do
sockd -f /etc/sockd.conf
debug 'Some issue with sockd, restarting'
kill_sockd
done
fi
| true |
66d9c07914492606bb860a46796c7cf8e24e85f6 | Shell | dedetmix/gmt5sar2stamps_sbas | /mt_prep_gmtsar_sbas | UTF-8 | 1,991 | 3.09375 | 3 | [] | no_license | #!/bin/bash
# NI, 08.02.2018 (noorlaila@geodesy.its.ac.id)
# mt_prep gmtsar for SBAS configuration
############ set parameters ################################################
region="11000/15000/10500/11500"
R=3
A=2
ov_R=50
ov_A=200
crop=/home/isya/APPS/ciloto/Sentinel1/batch_asc/stack_diff/crop
threshold="0.5"
raw=/home/isya/APPS/ciloto/Sentinel1/batch_asc/raw
raw_orig=/home/isya/APPS/ciloto/Sentinel1/batch_asc/raw_orig
topo=/home/isya/APPS/ciloto/Sentinel1/batch_asc/topo
SLC=/home/isya/APPS/ciloto/Sentinel1/batch_asc/raw
heading=auto # for descending, for ascending: -12.00707218611660e
master_date=20160123
suffix=F2
############################################################################
# file input (must be put on PS folder):
# date_no_master.txt
# intf_list.in
rm -r -f SMALL_BASELINES patch_reg cands_*
mkdir patch_reg
cd patch_reg
devide_region.sh $region $R $A $ov_R $ov_A
cd ..
mkdir SMALL_BASELINES
cd SMALL_BASELINES
matlab -nojvm -nosplash -nodisplay < $STAMPS/matlab/sb_parms_initial.m > sb_parms_initial.log
cd ..
rm -r -f SMALL_BASELINES/PATCH_*
ln -f -s $topo/master.PRM .
# read azimuth heading angle from the master image
if [ $heading == auto ]; then
heading=$(grep platformHeading $raw_orig/*$master_date*.xml | awk -F">||<" '{print $3}')
fi
p=1
while read region
do
mt_extract_info_gmtsar_sbas $raw $raw_orig $topo $SLC $heading $master_date
mt_extract_cands_gmtsar_sbas $crop $threshold $region $suffix
mkdir -p cands_$p
mv cands_old.grd cands_"$p"/.
mv cands0.grd cands_"$p"/.
mv cands.grd cands_"$p"/.
patch=$(echo PATCH_$p)
mkdir SMALL_BASELINES/$patch
mv pscands* SMALL_BASELINES/$patch/.
ln -s -f $PWD/ifgday.1.in SMALL_BASELINES/$patch/.
cd SMALL_BASELINES/$patch
correct_forum_sbas
cd ../..
(( p++ ))
done < patch_reg/PATCH.loc
rsync -av patch_reg/PATCH_* SMALL_BASELINES/.
cp patch_reg/width.txt SMALL_BASELINES/.
cp patch_reg/len.txt SMALL_BASELINES/.
cd SMALL_BASELINES
ln -f -s ../*.in .
ls -d PATCH_* > patch.list
cd ..
| true |
d02bf83005ca1f8f01dfa949b4d5d2dfd5525a74 | Shell | rroseselavy42/dotfiles | /bashrc | UTF-8 | 1,569 | 2.515625 | 3 | [] | no_license | function o()
{
xdg-open "$1"
}
function chc()
{
xdg-open "$HOME/ownCloud/Palante Tech Shared/Administrative/Bookkeeping/hours worked charts 2015.ods"
}
function cr()
{
gpg --quiet --no-tty -d "$HOME/ownCloud/Palante Tech Shared/Credentials/"$1'_credentials.txt.gpg' 2> /dev/null
}
function cro()
{
gpg -do '/tmp/'$1'_credentials.txt' "$HOME/ownCloud/Palante Tech Shared/Credentials/"$1'_credentials.txt.gpg'
vi '/tmp/'$1'_credentials.txt'
}
function cre() {
# gpg -eso "$HOME/ownCloud/Palante Tech Shared/Credentials/"$1'_credentials.txt.gpg' -r 05431C1FC47B97F5 -r 0A279E082B64B3CA -r 246DB6ED051F78D6 -r C1983031ABC56AB1 -r A5C77224F7958C93 '/tmp/'$1'_credentials.txt'
gpg -eso "$HOME/ownCloud/Palante Tech Shared/Credentials/"$1'_credentials.txt.gpg' -r 47AF31A6DE800B77 -r 05431C1FC47B97F5 -r 0A279E082B64B3CA -r 246DB6ED051F78D6 -r C1983031ABC56AB1 -r A5C77224F7958C93 '/tmp/'$1'_credentials.txt'
rm /tmp/*_credentials.txt
}
function crg() {
gpg --quiet -d "$HOME/ownCloud/Palante Tech Shared/Credentials/"$1'_credentials.txt.gpg' | grep git -A 1
}
export pts=~/ownCloud/Shared/Palante*Shared
export cl=~/ownCloud/Shared/Palante*Shared/Current*Projects
function me() {
echo "hextile" | ssvncviewer -scale .8 -autopass -encodings "zywrle tight hextile copyrect" B1RNML2 &> /dev/null &
}
export TERM=xterm-256color
alias gam="/home/jessie/bin/gam/gam"
# Add RVM to PATH for scripting. Make sure this is the last PATH variable change.
export PATH="$PATH:$HOME/.rvm/bin"
gam() { "/home/jessie/bin/gam/gam" "$@" ; }
| true |
da9486d4154c0be82928f4da02a3bf0ff5778cb6 | Shell | Undomyr/aryalinux | /applications/texlive.sh | UTF-8 | 6,376 | 2.5625 | 3 | [] | no_license | #!/bin/bash
set -e
set +h
. /etc/alps/alps.conf
. /var/lib/alps/functions
SOURCE_ONLY=n
DESCRIPTION="br3ak Most of TeX Live can be built from source without a pre-existingbr3ak installation, but xindy (forbr3ak indexing) needs working versions of <span class=\"command\"><strong>latex</strong> and <span class=\"command\"><strong>pdflatex</strong> when configure is run,br3ak and the testsuite and install for <span class=\"command\"><strong>asy</strong> (for vector graphics) willbr3ak fail if TeX has not already been installed. Additionally,br3ak biber is not provided within thebr3ak texlive source.br3ak"
SECTION="pst"
VERSION=20160523
NAME="texlive"
#REC:gs
#REC:fontconfig
#REC:freetype2
#REC:gc
#REC:graphite2
#REC:harfbuzz
#REC:icu
#REC:libpaper
#REC:libpng
#REC:poppler
#REC:python2
#REC:ruby
#REC:xorg-server
cd $SOURCE_DIR
URL=ftp://tug.org/texlive/historic/2016/texlive-20160523b-source.tar.xz
if [ ! -z $URL ]
then
wget -nc http://ftp.lfs-matrix.net/pub/blfs/conglomeration/texlive/texlive-20160523b-source.tar.xz || wget -nc ftp://tug.org/texlive/historic/2016/texlive-20160523b-source.tar.xz || wget -nc http://mirrors-usa.go-parts.com/blfs/conglomeration/texlive/texlive-20160523b-source.tar.xz || wget -nc ftp://ftp.lfs-matrix.net/pub/blfs/conglomeration/texlive/texlive-20160523b-source.tar.xz || wget -nc http://mirrors-ru.go-parts.com/blfs/conglomeration/texlive/texlive-20160523b-source.tar.xz || wget -nc http://ftp.osuosl.org/pub/blfs/conglomeration/texlive/texlive-20160523b-source.tar.xz || wget -nc ftp://ftp.osuosl.org/pub/blfs/conglomeration/texlive/texlive-20160523b-source.tar.xz
wget -nc http://ftp.osuosl.org/pub/blfs/conglomeration/texlive/texlive-20160523b-texmf.tar.xz || wget -nc http://mirrors-ru.go-parts.com/blfs/conglomeration/texlive/texlive-20160523b-texmf.tar.xz || wget -nc http://mirrors-usa.go-parts.com/blfs/conglomeration/texlive/texlive-20160523b-texmf.tar.xz || wget -nc ftp://tug.org/texlive/historic/2016/texlive-20160523b-texmf.tar.xz || wget -nc ftp://ftp.lfs-matrix.net/pub/blfs/conglomeration/texlive/texlive-20160523b-texmf.tar.xz || wget -nc http://ftp.lfs-matrix.net/pub/blfs/conglomeration/texlive/texlive-20160523b-texmf.tar.xz || wget -nc ftp://ftp.osuosl.org/pub/blfs/conglomeration/texlive/texlive-20160523b-texmf.tar.xz
wget -nc http://www.linuxfromscratch.org/patches/downloads/texlive/texlive-20160523b-source-upstream_fixes-2.patch || wget -nc http://www.linuxfromscratch.org/patches/blfs/svn/texlive-20160523b-source-upstream_fixes-2.patch
wget -nc http://www.linuxfromscratch.org/patches/blfs/svn/texlive-20160523b-texmf-upstream_fixes-1.patch || wget -nc http://www.linuxfromscratch.org/patches/downloads/texlive/texlive-20160523b-texmf-upstream_fixes-1.patch
TARBALL=`echo $URL | rev | cut -d/ -f1 | rev`
if [ -z $(echo $TARBALL | grep ".zip$") ]; then
DIRECTORY=`tar tf $TARBALL | cut -d/ -f1 | uniq | grep -v "^\.$"`
tar --no-overwrite-dir -xf $TARBALL
else
DIRECTORY=$(unzip_dirname $TARBALL $NAME)
unzip_file $TARBALL $NAME
fi
cd $DIRECTORY
fi
whoami > /tmp/currentuser
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
cat >> /etc/ld.so.conf << EOF
# Begin texlive 2016 addition
/opt/texlive/2016/lib
# End texlive 2016 addition
EOF
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
export TEXARCH=$(uname -m | sed -e 's/i.86/i386/' -e 's/$/-linux/') &&
patch -Np1 -i ../texlive-20160523b-source-upstream_fixes-2.patch &&
mkdir texlive-build &&
cd texlive-build &&
../configure \
--prefix=/opt/texlive/2016 \
--bindir=/opt/texlive/2016/bin/$TEXARCH \
--datarootdir=/opt/texlive/2016 \
--includedir=/opt/texlive/2016/include \
--infodir=/opt/texlive/2016/texmf-dist/doc/info \
--libdir=/opt/texlive/2016/lib \
--mandir=/opt/texlive/2016/texmf-dist/doc/man \
--disable-native-texlive-build \
--disable-static --enable-shared \
--with-system-cairo \
--with-system-fontconfig \
--with-system-freetype2 \
--with-system-gmp \
--with-system-graphite2 \
--with-system-harfbuzz \
--with-system-icu \
--with-system-libgs \
--with-system-libpaper \
--with-system-libpng \
--with-system-mpfr \
--with-system-pixman \
--with-system-poppler \
--with-system-xpdf \
--with-system-zlib \
--with-banner-add=" - BLFS" &&
make "-j`nproc`" || make
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
. /etc/alps/alps.conf
pushd $SOURCE_DIR
wget -nc http://aryalinux.org/releases/2016.11/blfs-systemd-units-20160602.tar.bz2
tar xf blfs-systemd-units-20160602.tar.bz2
cd blfs-systemd-units-20160602
make install-strip &&
make texlinks &&
ldconfig &&
mkdir -pv /opt/texlive/2016/tlpkg/TeXLive/ &&
install -v -m644 ../texk/tests/TeXLive/* /opt/texlive/2016/tlpkg/TeXLive/
cd ..
rm -rf blfs-systemd-units-20160602
popd
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
tar -xf ../../texlive-20160523b-texmf.tar.xz -C /opt/texlive/2016 --strip-components=1 &&
pushd /opt/texlive/2016 &&
patch -Np1 -i /sources/texlive-20160523b-texmf-upstream_fixes-1.patch &&
popd
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
mktexlsr &&
fmtutil-sys --all &&
mtxrun --generate
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
sed -i '/^mpost,/d' /opt/texlive/2016/texmf-dist/web2c/texmf.cnf &&
fmtutil-sys --all &&
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
if [ ! -z $URL ]; then cd $SOURCE_DIR && cleanup "$NAME" "$DIRECTORY"; fi
register_installed "$NAME" "$VERSION" "$INSTALLED_LIST"
| true |
78a20bb77c06d17bb166c51e8b1572e9657c9a26 | Shell | forbesmyester/pipeawesome | /tests/pipeawesome.bats | UTF-8 | 708 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env bats
@test "pipeawesome simple" {
EXPECTED=$( cat ./tests/pipeawesome/soup_temperature.expected.txt )
RESULT=$( ./target/debug/pipeawesome -p ./tests/pipeawesome/soup_temperature.paspec.json -o OUTPUT=- )
echo "RESULT = $RESULT"
echo "EXPECTED = $EXPECTED"
[ "$RESULT" = "$EXPECTED" ]
}
@test "pipeawesome with loops" {
INPUT=$( cat ./tests/pipeawesome/pad_to_5.input.txt )
EXPECTED=$( cat ./tests/pipeawesome/pad_to_5.expected.txt )
RESULT=$( echo "$INPUT" | ./target/debug/pipeawesome -p ./tests/pipeawesome/pad_to_5.paspec.json -i FAUCET=- -o OUTPUT=- )
echo "RESULT = $RESULT"
echo "EXPECTED = $EXPECTED"
[ "$RESULT" = "$EXPECTED" ]
}
| true |
b02e4d406665745ddcd5fc45550fa32cca5eda6a | Shell | dilawar/Scripts | /hibernate_when_battery_is_low.sh | UTF-8 | 2,213 | 3.796875 | 4 | [] | no_license | #!/bin/sh
set -x
# http://unix.stackexchange.com/questions/84437/how-do-i-make-my-laptop-sleep-when-it-reaches-some-low-battery-threshold
###########################################################################
#
# Usage: system-low-battery
#
# Checks if the battery level is low. If “low_threshold” is exceeded
# a system notification is displayed, if “critical_threshold” is exceeded
# a popup window is displayed as well. If “OK” is pressed, the system
# shuts down after “timeout” seconds. If “Cancel” is pressed the script
# does nothing.
#
# This script is supposed to be called from a cron job.
#
###########################################################################
# This is required because the script is invoked by cron. Dbus information
# is stored in a file by the following script when a user logs in. Connect
# it to your autostart mechanism of choice.
#
# #!/bin/sh
# touch $HOME/.dbus/Xdbus
# chmod 600 $HOME/.dbus/Xdbus
# env | grep DBUS_SESSION_BUS_ADDRESS > $HOME/.dbus/Xdbus
# echo 'export DBUS_SESSION_BUS_ADDRESS' >> $HOME/.dbus/Xdbus
# exit 0
#
# User should be able to run pm-suspend command without password. Add the
# following to sudoer file using visudo command.
# dilawar ALL=(ALL) NOPASSWD: ALL
# let crontab know whom to talk to
export DISPLAY=:0
if [ -r ~/.dbus/Xdbus ]; then
. ~/.dbus/Xdbus
fi
low_threshold=20
critical_threshold=15
timeout=15
suspend_cmd='/usr/sbin/pm-suspend'
level=$(cat /sys/class/power_supply/BAT0/capacity)
state=$(cat /sys/class/power_supply/BAT0/status)
if [ x"$state" != x'Discharging' ]; then
exit 0
fi
do_shutdown() {
sleep $timeout && kill $zenity_pid 2>/dev/null
if [ x"$state" != x'Discharging' ]; then
exit 0
else
sudo $suspend_cmd
fi
}
if [ "$level" -gt $critical_threshold ] && [ "$level" -lt $low_threshold ]; then
notify-send "Battery level is low: $level%"
fi
if [ "$level" -lt $critical_threshold ]; then
notify-send -u critical -t 20000 "Battery level is low: $level%" \
'The system is going to hibernate in 15 seconds.'
do_shutdown &
shutdown_pid=$!
trap 'kill $shutdown_pid' 1
if ! wait $zenity_pid; then
kill $shutdown_pid 2>/dev/null
fi
fi
exit 0
| true |
cb7ab6e4e4c3ff945788e551f0d73df228ca5863 | Shell | syrkis/bridger | /scripts/counts.sh | UTF-8 | 749 | 2.765625 | 3 | [] | no_license | #! /usr/bin/zsh
for F in $(ls /home/common/datasets/amazon_review_data_2018/reviews/); do
if [ ! -f "/home/$(whoami)/bridger/data/counts/$(echo $F | cut -d '.' -f 1).out" ]; then
echo "begin $F"
date
zcat /home/common/datasets/amazon_review_data_2018/reviews/$F | jq '.["reviewText"]' | tr 'a-z' 'A-Z' | tr -sC 'A-Z' ' ' | tr -s ' ' '\n' | sort | uniq -c | sort -n > /home/$(whoami)/bridger/data/counts/$(echo $F | cut -d '.' -f 1).out
echo "JUST DID $F"
date
else
echo "ALREADY DONE $F"
cat /home/$(whoami)/bridger/data/counts/$(echo $F | cut -d '.' -f 1).out | sed 's/^ *//' | cut -d ' ' -f 1 | paste -sd+ | bc >> /home/$(whoami)/bridger/data/counts/$(echo $F | cut -d '.' -f 1).out
date
fi
done
echo "GOD BLESS THE MACHINE"
| true |
22381984262be0c5cdfab85d6785ba53cd09568f | Shell | zzy-program/08_tools | /ftrace/tracer.sh | UTF-8 | 504 | 3.5 | 4 | [] | no_license | #!/bin/bash
DPATH="/sys/kernel/debug/tracing"
PID=$1
echo "ftrace test"
if [ $# -ne 1 ]
then
echo Usage: tracer_graph.sh pid
exit 1
fi
# flush existing trace data
echo nop > $DPATH/current_tracer
# reset ftrace filter
echo > $DPATH/set_ftrace_filter
# disable trace
echo 0 > $DPATH/tracing_on
# set function tracer
echo function > $DPATH/current_tracer
# write current process id to set_ftrace_pid file
echo $PID > $DPATH/set_ftrace_pid
# enable the current tracer
echo 1 > $DPATH/tracing_on
| true |
7b539c6faaa92805bf900386b91b39f13dd582e3 | Shell | fischuu/Pipeline-lncRNA | /rules/featureCounts_quantify_stringmerge.smk | UTF-8 | 964 | 2.5625 | 3 | [] | no_license | # vim: set filetype=sh :
rule featureCounts_quantify_stringmerge:
"""
Quantify the reads against stringmerge output (featureCounts).
"""
input:
bam="%s/%s/BAM/{samples}.bam" % (config["project-folder"], config["species"]),
gtf="%s/%s/GTF_merged/merged_STRG.gtf" % (config["project-folder"], config["species"])
output:
"%s/%s/GTF/Stringmerge_fc/{samples}_stringmerge_fc.txt" % (config["project-folder"], config["species"])
log:
"%s/%s/logs/featureCounts/featureCounts_sm.{samples}.log" % (config["project-folder"], config["species"])
benchmark:
"%s/%s/benchmark/featureCounts/featureCounts_sm.{samples}.benchmark.tsv" % (config["project-folder"], config["species"])
threads: 12
shell:"""
featureCounts -p \
-T {threads} \
-a {input.gtf} \
-o {output} \
{input.bam} 2> {log}
"""
| true |
6a8c844121068c2eb7dc0244c35fb2cbad98c7dc | Shell | telepresencebot2/virsh_settings | /launch-gpu-1.sh | UTF-8 | 1,197 | 3.03125 | 3 | [] | no_license | #!/bin/bash
configfile=gpu-1.cfg
vfiobind() {
dev="$1"
vendor=$(cat /sys/bus/pci/devices/$dev/vendor)
device=$(cat /sys/bus/pci/devices/$dev/device)
if [ -e /sys/bus/pci/devices/$dev/driver ]; then
echo $dev > /sys/bus/pci/devices/$dev/driver/unbind
fi
echo $vendor $device > /sys/bus/pci/drivers/vfio-pci/new_id
}
modprobe vfio-pci
cat $configfile | while read line;do
echo $line | grep ^# >/dev/null 2>&1 && continue
vfiobind $line
done
sudo qemu-system-x86_64 -enable-kvm -M q35 -m 8192 -cpu host \
-smp 4,sockets=1,cores=4,threads=1 \
-bios /usr/share/qemu/bios.bin -vga none \
-device ioh3420,bus=pcie.0,addr=1c.0,multifunction=on,port=1,chassis=1,id=root.1 \
-device vfio-pci,host=83:00.0,bus=root.1,addr=00.0,multifunction=on,x-vga=on \
-device vfio-pci,host=83:00.1,bus=root.1,addr=00.1 \
-device vfio-pci,host=84:00.0,bus=pcie.0 \
-device vfio-pci,host=02:00.1,bus=root.1,addr=00.1 \
-drive file=/media/windows/gpu-1.img,id=disk,format=raw -device ide-hd,bus=ide.0,drive=disk \
-drive file=/home/elerek/images/en_windows_enterprise_10.iso,id=isocd -device ide-cd,bus=ide.1,drive=isocd \
-boot menu=on
exit 0
| true |
70695403ab9b44ebdccd3e968a80dc17b1365283 | Shell | sh6103/server-file | /init_script.sh | UTF-8 | 820 | 3.1875 | 3 | [] | no_license | #!/bin/sh
# This script will be executed as the user 'postgres'
# echo 'Restoring the initial db to namayeyar raw database...'
# pg_restore --dbname=namayeyar --verbose --clean < /docker-entrypoint-initdb.d/init_namayeyar.custom
echo "Creating user ${NAMAYEYAR_DB_USER}..."
psql --command="CREATE USER ${NAMAYEYAR_DB_USER} WITH PASSWORD '${NAMAYEYAR_DB_PASS}';" postgres postgres
echo "Getting the createdb permission to user ${NAMAYEYAR_DB_USER} for tests..."
psql --command="ALTER USER ${NAMAYEYAR_DB_USER} CREATEDB;"
echo "Creating database ${NAMAYEYAR_DB}..."
psql --command="create database ${NAMAYEYAR_DB};" postgres postgres
echo "GRANT ALL PRIVILEGES ON DATABASE ${NAMAYEYAR_DB} TO ${NAMAYEYAR_DB_USER}"
psql --command="GRANT ALL PRIVILEGES ON DATABASE ${NAMAYEYAR_DB} TO ${NAMAYEYAR_DB_USER};" postgres postgres
| true |
ebc3d904ef4131d1d84a076df8fe7816e5303e69 | Shell | crest-cassia/oacis_docker | /test/dump_restore/db_backup.sh | UTF-8 | 898 | 3.359375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
. ./test/base.sh
function db_dump_restore() {
mkdir .oacis_docker_tmp_dir
docker create --name ${OACIS_DATA_CONTAINER_NAME} -v `pwd`/.oacis_docker_tmp_dir:/home/oacis/oacis/public/Result_development busybox
docker run --name ${OACIS_CONTAINER_NAME} -p ${PORT}:3000 -d --volumes-from ${OACIS_DATA_CONTAINER_NAME} ${OACIS_IMAGE}
sleep 20
datetime=`date +%Y%m%d-%H%M`
docker exec -it ${OACIS_CONTAINER_NAME} bash -c "cd /home/oacis/oacis/public/Result_development; if [ ! -d db ]; then mkdir db; fi; cd db; mongodump --db oacis_development; mv dump dump-$datetime; chown -R oacis:oacis /home/oacis/oacis/public/Result_development/db"
test -d .oacis_docker_tmp_dir/db/dump-$datetime/oacis_development
}
db_dump_restore
rc=$?
docker exec -it ${OACIS_CONTAINER_NAME} bash -c "chmod 777 -R /home/oacis/oacis/public/Result_development"
rm -rf .oacis_docker_tmp_dir
exit $rc
| true |
5c7c1bbb018a5622e1f1a664611f6849993468c0 | Shell | nvkelso/natural-earth-vector | /tools/wikidata/update.sh | UTF-8 | 4,694 | 3.546875 | 4 | [
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/bash
set -Eeuo pipefail
# check correct start with file existence
if [ ! -f tools/wikidata/fetch_wikidata.py ];
then
echo "ERROR: Start from the project root ! ./tools/wikidata/update.sh "
exit 1
fi
mode=$1
nei_letter_case=$2
nei_path=.
neo_path=x_tempshape
ne_shapepath=$3
ne_shapefile=$4
mkdir -p ${neo_path}/${ne_shapepath}
logmd=${neo_path}/update.md
echo " "
echo "########## /tools/wikidata/update.sh parameters:"
echo " 1: mode : ${mode}"
echo " 2: nei_letter_case: ${nei_letter_case}"
#echo " nei_path : ${nei_path}"
echo " 3: neo_path : ${neo_path}"
echo " 4: ne_shapepath : ${ne_shapepath}"
echo " 5: ne_shapefile : ${ne_shapefile}"
echo " "
function fetch_names {
echo " "
echo " Fetch wikidata labels "
echo " ================================="
ogrinfo -al -so ${nei_path}/${ne_shapepath}/${ne_shapefile}.shp
python3 tools/wikidata/fetch_wikidata.py -input_shape_name ${nei_path}/${ne_shapepath}/${ne_shapefile}.shp \
-input_lettercase ${nei_letter_case} \
-output_csv_name ${neo_path}/${ne_shapepath}/${ne_shapefile}.new_names.csv
echo " created : ${neo_path}/${ne_shapepath}/${ne_shapefile}.new_names.csv "
echo " "
}
function write_names {
echo " "
echo " Write shapefile with wikidata labels "
echo " ================================="
echo " shapefile info : ${neo_path}/${ne_shapepath}/${ne_shapefile} "
if [ ! -f ${nei_path}/${ne_shapepath}/${ne_shapefile}.shp ];
then
echo "ERROR: ${nei_path}/${ne_shapepath}/${ne_shapefile}.shp not exist! STOP "
exit 1
fi
if [ ! -f ${neo_path}/${ne_shapepath}/${ne_shapefile}.new_names.csv ];
then
echo "ERROR: ${neo_path}/${ne_shapepath}/${ne_shapefile}.new_names.csv not exist! STOP "
echo "hint: You should run the fetch part first! "
exit 1
fi
python3 tools/wikidata/write_wikidata.py -input_shape ${nei_path}/${ne_shapepath}/${ne_shapefile}.shp \
-input_lettercase ${nei_letter_case} \
-input_csv ${neo_path}/${ne_shapepath}/${ne_shapefile}.new_names.csv \
-output_shape ${neo_path}/${ne_shapepath}/${ne_shapefile}.shp \
-output_csvlog ${neo_path}/${ne_shapepath}/${ne_shapefile}.changes_log.csv \
-output_csvsumlog ${neo_path}/${ne_shapepath}/${ne_shapefile}.summary_log.csv
echo " " >> $logmd
echo "### ${neo_path}/${ne_shapepath}/${ne_shapefile}" >> $logmd
echo " " >> $logmd
csvtomd ${neo_path}/${ne_shapepath}/${ne_shapefile}.summary_log.csv >> $logmd
csvtomd ${neo_path}/${ne_shapepath}/${ne_shapefile}.changes_log.csv > ${neo_path}/${ne_shapepath}/${ne_shapefile}.changes_log.csv.md
csvtomd ${neo_path}/${ne_shapepath}/${ne_shapefile}.summary_log.csv > ${neo_path}/${ne_shapepath}/${ne_shapefile}.summary_log.csv.md
echo " "
echo "show only name_en/NAME_EN changes : ${neo_path}/${ne_shapepath}/${ne_shapefile} "
echo "---------------------"
cat ${neo_path}/${ne_shapepath}/${ne_shapefile}.changes_log.csv.md | grep MODvalue | grep name_en || true
cat ${neo_path}/${ne_shapepath}/${ne_shapefile}.changes_log.csv.md | grep MODvalue | grep NAME_EN || true
echo " "
cat ${neo_path}/${ne_shapepath}/${ne_shapefile}.summary_log.csv.md
echo " "
echo " (write) created shape and audit files:"
echo " -------------------"
ls -Gga ${neo_path}/${ne_shapepath}/${ne_shapefile}*
echo ""
}
function copy_names {
echo " "
echo " Copy shape + audit files "
echo " =============================== "
cp -v ${neo_path}/${ne_shapepath}/${ne_shapefile}*.{shp,dbf,shx,prj,cpg} ${ne_shapepath}/
}
if [[ "$mode" == "fetch" ]]
then
#echo "fetch"
fetch_names
elif [[ "$mode" == "write" ]]
then
#echo "write"
write_names
elif [[ "$mode" == "fetch_write" ]]
then
#echo "fetch_write "
fetch_names
write_names
elif [[ "$mode" == "copy" ]]
then
#echo "copy files"
copy_names
elif [[ "$mode" == "all" ]]
then
#echo "fetch + write + copy"
fetch_names
write_names
copy_names
else
echo "Unknown mode! the first parameter should be:[fetch/write/fetch_write/copy/all]"
exit 1
fi
exit
| true |
27a3e3de568ec910aa5ec94b4e1928df64311e28 | Shell | tong-tf/docker | /aosp/run.sh | UTF-8 | 295 | 3.421875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
usage()
{
echo "run.sh 1404|1604"
echo "1404 to build ubuntu1404"
echo "1604 to build ubuntu1604"
}
case $1 in
1404)
docker build -t tong/aosp:1404 -f Dockerfile_aosp1404 .
;;
1604)
docker build -t tong/aosp:1604 -f Dockerfile_aosp1604 .
;;
*)
usage
;;
esac
| true |
51534ba74f0bce69c2a20fc93c6ae50f3299da32 | Shell | weliveindetail/astpp | /examples/lambda/test.sh | UTF-8 | 689 | 3.40625 | 3 | [] | no_license | #!/bin/bash
#set -x
rm -r generic.*.ast plain.*.ast
# Generate ASTs
clang++ -std=c++11 -fsyntax-only -fno-color-diagnostics -Xclang -ast-dump plain.cpp > plain.in.ast
clang++ -std=c++14 -fsyntax-only -fno-color-diagnostics -Xclang -ast-dump generic.cpp > generic.in.ast
lines_before=$(diff -u generic.in.ast plain.in.ast | wc -l)
echo "Lines before: ${lines_before}"
# Run the post-processing
python3 ../../astpp -o generic.out.ast generic.in.ast
python3 ../../astpp -o plain.out.ast plain.in.ast
lines_after=$(diff -u generic.out.ast plain.out.ast | wc -l)
echo "Lines after: ${lines_after}"
if [ "${lines_before}" -gt "${lines_after}" ]; then
echo "Works"
else
echo "Fails"
fi
| true |
e1edf792a05c8f5c72d4a3cff9a01b9f80fe0318 | Shell | kskuhlman/Bob | /Client-tools/Push to i and Build all.sh | UTF-8 | 2,169 | 4.28125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Copyright 2017 S4i Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Calls other scripts to effectively push files to the build directory and then perform a `make all`.
# $1 = The path to the project folder (Eclipse-formatted in OS-specific path nomenclature)
# $2 = Name of the build settings file
#
function realpath {
local p=$(cd "$1" && echo "$PWD")
echo "${p}"
}
localScriptDir="${0%/*}"
# Validate arguments
if (( $# != 2 )); then
echo "Incorrect number of incoming parameters; expected path to project directory and name of build settings file."
echo "Exiting script."
exit 1
fi
buildSettingsDir="$1"
buildSettingsFile="$2"
# If using Windows, generate Windows-friendly .buildsettings path for display purposes and insure the actual path is in Cygwin format.
if [[ "$(uname -s)" == CYGWIN* ]]; then
buildSettingsDir=$(cygpath -u "${buildSettingsDir}")
buildSettings="${buildSettingsDir}/${buildSettingsFile}"
buildSettingsDisplay=$(cygpath -w "${buildSettings}")
else
buildSettings="${buildSettingsDir}/${buildSettingsFile}"
buildSettingsDisplay=$buildSettings
fi
if [[ ! -d "${buildSettingsDir}" ]]; then
echo "The build settings (project) directory '${buildSettingsDir}' does not exist. Exiting script."
exit 1
fi
if [[ ! -f "${buildSettings}" ]]; then
echo "The build settings file '${buildSettingsDisplay}' does not exist. Has it been set up yet? Exiting script."
exit 1
fi
source "${buildSettings}"
# Push code to i
"./Push to i.sh" "${buildSettingsDir}" "${buildSettingsFile}"
echo
# Build all
"./Build all.sh" "${buildSettingsDir}" "${buildSettingsFile}"
| true |
de5e6e9dfe6c4d6c3278d69f7bbe7a5034456637 | Shell | vasthemas/Sequence_aligner | /seq_aligner.sh | UTF-8 | 2,045 | 3.859375 | 4 | [] | no_license | #!/bin/bash
# Align Broad Smart Seq2 Data
echo "Alignment Start"
echo "------------------------------------"
#$1 - Meta data, list of barcodes/samples
#list files given in meta
Meta_data=$1
results_dir=$2
data_dir=$3
genome_dir=$4
threads=$5
echo " "
mkdir "${results_dir}/results"
cp $1 "${results_dir}/results"
cd "${results_dir}/results"
#Read Meta file and go through each sample
while read p; do
echo "$p"
#Make folder for sample name
mkdir "${results_dir}/results/${p}"
#copy files over to folder
regrex="${p}.unmapped.[0-9]"
echo "Organizing Data by Samples from each flow cell given"
while read d; do
#Flowcell #1
cd $d
# cd /home/vasanthchandrasekhar_g_harvard_e/scData/data/H2KH7BGXG/get.broadinstitute.org/pkgs/SN0203980
ls | grep "$regrex" | xargs -I '{}' cp '{}' "${results_dir}/results/${p}"
done < $data_dir
#Flowcell #2
# cd /home/vasanthchandrasekhar_g_harvard_e/scData/data/H2JKVBGXG/get.broadinstitute.org/pkgs/SN0203975
# ls | grep "$regrex" | xargs -I '{}' cp '{}' "${results_dir}/results/${p}"
#Merge files from different lanes
cd "${results_dir}/results/${p}"
echo "Unzipping and merging lanes"
gunzip *
merge_R1="merge_${p}.1.unmapped.fastq"
merge_R2="merge_${p}.2.unmapped.fastq"
cat *unmapped.1.fastq > "$merge_R1"
cat *unmapped.2.fastq > "$merge_R2"
#Align sequences using STAR
echo "Using STAR to Align"
#mkdir "${results_dir}/results/${p}_star"
STAR --runThreadN $threads \
--genomeDir $genome_dir \
--readFilesIn "$merge_R1" "$merge_R2" \
--outFileNamePrefix "${results_dir}/results/${p}/${p}_star/" \
--quantMode GeneCounts \
--outSAMtype BAM SortedByCoordinate
mv "${results_dir}/results/${p}/${p}_star/ReadsPerGene.out.tab" ${p}_ReadsPerGene.out.tab
cd "${results_dir}/results"
done <$Meta_data
echo "out of loop"
#Creating counts
#cd "${results_dir}/results"
#mkdir "${results_dir}/results/counts"
#for x in */*ReadsPerGene.out.tab; do s=`basename $x | cut -f1 -d.`; echo $s; cat $x | tail -n +5 | cut$
| true |
2886055639a21622269fd974b8e3adae9462b6dc | Shell | VGP/vgp-assembly | /pipeline/qv/_submit_qv.sh | UTF-8 | 823 | 3.140625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
if [ -z $1 ]; then
echo "Usage: ./_submit_qv.sh <sample_id> [jobid_to_set_dependency]"
exit -1
fi
sample=$1
if ! [ -e aligned.bam ]; then
ln -s $sample/outs/possorted_bam.bam aligned.bam
ln -s $sample/outs/possorted_bam.bam.bai aligned.bam.bai
fi
mkdir -p logs
cpus=4
mem=4g
name=$sample.genomecov
script=$VGP_PIPELINE/qv/genomecov.sh
args=$sample
walltime=2-0
log=logs/$name.%A_%a.log
if ! [ -z $2 ]; then
wait_for="--dependency=afterok:$2"
fi
echo "\
sbatch --partition=norm -D $PWD $wait_for --cpus-per-task=$cpus --job-name=$name --mem=$mem --time=$walltime --error=$log --output=$log $script $args"
sbatch --partition=norm -D $PWD $wait_for --cpus-per-task=$cpus --job-name=$name --mem=$mem --time=$walltime --error=$log --output=$log $script $args > genomecov_jid
| true |
71affc8f89c7f84031460abc682d510d3e78acbf | Shell | KNMI/VERCE | /guse_workflows_script/simulation/specfem_fullsimulation.rsl | UTF-8 | 1,242 | 3 | 3 | [
"MIT"
] | permissive | #!/bin/bash -l
module load verce
python verce-hpc-pe/src/bulk_inputgen.py quakeml stations solverconf
date
echo -------------------------------------------------------
echo " decomposing mesh..."
date
python verce-hpc-pe/src/bulk_decompose.py jsonout_inputgen solverconf
echo "mesh decomposition completed..."
date
SIMULATION_PATHS=$(python verce-hpc-pe/src/PEDataExtractor.py jsonout_inputgen)
RUN_ID=$(python verce-hpc-pe/src/PEDataExtractor.py jsonout_inputgen runId)
echo $RUN_ID
arr=(`echo $SIMULATION_PATHS`);
for i in "${arr[@]}"
do
:
pwd
mkdir -p $i/../OUTPUT_FILES
mkdir -p $i/../bin
cp -r $RUN_ID/OUTPUT_FILES/DATABASES_MPI $i/../OUTPUT_FILES
echo $i -------------------------------------------------------
cd $i
cd ../
# runs database generation
echo
echo " running database generation..."
echo
cd bin/
date
mpiexec xgenerate_databases
cd ../
echo "database generation completed..."
date
# runs simulation
echo
echo " running solver..."
echo
cd bin/
date
mpiexec xspecfem3D
cd ../
#-------------------------------------------
echo "SPECFEM3D completed!"
echo `date`
date
echo "see results in directory: OUTPUT_FILES/"
cd ../
cd ../
done
python verce-hpc-pe/src/PEProvenanceMerge.py provout_decompose provout_inputgen
| true |
c885956e04a518849d6d725299f20a8d876403da | Shell | DldFw/ecbrates | /scripts/get_rates.sh | UTF-8 | 1,233 | 4.15625 | 4 | [] | no_license | #!/bin/bash
BASE=https://www.ecb.europa.eu/stats/eurofxref
HIST_ZIP=$BASE/eurofxref-hist.zip
LAST_ZIP=$BASE/eurofxref.zip
HIST_XML=$BASE/eurofxref-hist.xml
LAST_XML=$BASE/eurofxref-daily.xml
THIS_DIR=`dirname $0`
OUT_DIR=$THIS_DIR/data
XMLLINT=`which xmllint`
print_help()
{
echo "Usage: $0 <hist|daily> <xml|zip>"
echo " hist(orical) or daily data"
echo " xml or zipped csv"
echo "If xmllint is found - then it will be used to format xml"
exit 1
}
TYPE=$1
FORMAT=$2
if [ x"$TYPE" != x"hist" -a x"$TYPE" != x"daily" ]
then
print_help
fi
if [ x"$FORMAT" != x"xml" -a x"$FORMAT" != x"zip" ]
then
print_help
fi
if [ ! -d $OUT_DIR ]
then
mkdir -v $OUT_DIR
fi
if [ $FORMAT == "xml" ]
then
data=$LAST_XML
if [ $TYPE == "hist" ]
then
data=$HIST_XML
fi
if [ -z $XMLLINT ]
then
echo "Not using xmllint"
curl -s $data?$RANDOM -o $OUT_DIR/$TYPE.$FORMAT
else
echo "Using xmllint for formatting"
curl -s $data?$RANDOM | $XMLLINT --format -o $OUT_DIR/$TYPE.$FORMAT -
fi
else
data=$LAST_ZIP
if [ $TYPE == "hist" ]
then
data=$HIST_ZIP
fi
curl -s $data?$RANDOM -o $OUT_DIR/$TYPE.$FORMAT
fi
ls -l $OUT_DIR
| true |
e2ab51c3556a07154900194e42cabc0a3aa18ffc | Shell | Bfirdevs/Project3Collective | /PetInsurance/deploy.sh | UTF-8 | 867 | 2.796875 | 3 | [] | no_license | # deploy.sh
#!/bin/sh
# Install kubernetes and set config
sudo apt-get install google-cloud-sdk
# set the path of the google cloud directory
source $HOME/google-cloud-sdk/path.bash.inc
#install kubectl. Pipe yes to confirm any prompts
yes | gcloud components install kubectl
#build the docker container image
# this will need to be changed to reflect the name of the project, image and version
docker build -t gcr.io/projectthreeie/petinsurance:v1.0 .
#push the image up to the cloud container registry
# this will need to be changed to match the name set above
gcloud docker -- push gcr.io/projectthreeie/petinsurance:v1.0
# deletes all pods in order to make sure problematic pods go away
# can probably be removed if you're not having pods that get stuck
kubectl delete pods --all
# applies the new deployment to the kubernetes cluster
kubectl apply -f k8s.yml
| true |
a8a2b378f56ddf09dd456645acde00e30d005c58 | Shell | dthas/dals | /back_end/daily.sh | UTF-8 | 1,801 | 2.859375 | 3 | [] | no_license | #===========================================================================
# daily.sh
# Copyright (C) 2017 Free Software Foundation, Inc.
# Originally by ZhaoFeng Liang <zhf.liang@outlook.com>
#
#This file is part of DTHAS_ALS.
#
#DTHAS_ALS is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#
#DTHAS_ALS is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with DTHAS_ALS; If not, see <http://www.gnu.org/licenses/>.
#===========================================================================
#!/bin/bash
#-----------------------------------------------------------------
# 获取日志文件
#-----------------------------------------------------------------
source script/getlogfiles_daily.sh &> getlogfile_daily.log
#-----------------------------------------------------------------
# 将日志文件写入数据库(建立tmp table)
#-----------------------------------------------------------------
dst/als 2 &> als_2.log
#-----------------------------------------------------------------
# 将tmp table写入真正的数据表
#-----------------------------------------------------------------
dst/als 3 &> als_3.log
#-----------------------------------------------------------------
# 将web目录下的文件复制到 /var/www/log_check
#-----------------------------------------------------------------
cp src/web/*.* /var/www/log_check
chmod 777 /var/www/log_check/*
| true |
98509213cdaf61b93e8220240f09ce63d4300dc5 | Shell | jairo-ab/dotfiles | /home/bin/deploy.sh | UTF-8 | 1,748 | 3.640625 | 4 | [
"WTFPL"
] | permissive | #!/usr/bin/env bash
#
# Arquivo: deploy.sh
CSSDEST=''
case $1 in
bulma)
CSS='css/bulma.min.css'
URL='https://api.github.com/repos/jgthms/bulma/releases/latest'
;;
bootstrap)
CSS='dist/css/bootstrap.min.css'
JS='dist/js/bootstrap.bundle.min.js'
URL='https://api.github.com/repos/twbs/bootstrap/releases/latest'
;;
jquery)
JS='dist/jquery.min.js'
URL='https://api.github.com/repos/jquery/jquery/releases/latest'
;;
fontawesome)
CSSDEST='fontawesome.min.css'
CSS='css/all.min.css'
URL='https://api.github.com/repos/FortAwesome/Font-Awesome/releases/latest'
;;
plyr)
JS='dist/plyr.min.js'
CSS='dist/plyr.css'
URL='https://api.github.com/repos/sampotts/plyr/releases/latest'
;;
*)
echo "Uso: $(basename $0) [bulma|jquery|bootstrap|fontawesome|plyr]"
exit
;;
esac
VERSION=$(curl -s $URL | jq -r '.tag_name')
NAME="${1}-${VERSION}"
DOWNLOAD=$(curl -s $URL | jq -r '.tarball_url')
[ ! -f /tmp/${NAME}.tar.gz ] && curl -s -L -o /tmp/${NAME}.tar.gz $DOWNLOAD
OLDNAME=$(tar -tf /tmp/${NAME}.tar.gz | head -1 | cut -f1 -d"/")
tar xzf /tmp/${NAME}.tar.gz -C /tmp
[ ! -d css ] && mkdir css
[ ! -d js ] && mkdir js
if [ ! -z $CSS ] && [ -f /tmp/$OLDNAME/$CSS ]
then
if [ "$1" == "fontawesome" ]; then
cp -r /tmp/$OLDNAME/webfonts .
fi
if [ "$1" == "bulma" ]; then
curl -s -L -o 'js/navbar.js' 'https://gist.githubusercontent.com/sistematico/42298a16909a40e7f735d1f91103d99a/raw/6117587f5cb62eac3c87aa6368e6aaadc579bd2c/navbar.js'
fi
cp /tmp/$OLDNAME/$CSS css/$CSSDEST
fi
if [ ! -z $JS ] && [ -f /tmp/$OLDNAME/$JS ]
then
cp /tmp/$OLDNAME/$JS js/
fi
| true |
f8d59b2ced2551ed11340dc861e9c814692fd7dc | Shell | kf106/libra-scripts | /balances.sh | UTF-8 | 323 | 3.03125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# script to query the Libra CLI addresses for their balances
# find the Libra terminal
libra="$(comm -12 <(xdotool search --name 'libra$' | sort) <(xdotool search --class 'Gnome-terminal' | sort))"
for i in {0..99}
do
xdotool windowactivate $libra
xdotool type "q b $i"
xdotool key KP_Enter;
done
| true |
d781d558ec38c1c75b087d7bd2d84ac36faef984 | Shell | secoya/hablar.js | /tools/build.sh | UTF-8 | 402 | 3.171875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -e
PKGROOT=$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && echo "$PWD")
PATH=$PKGROOT/node_modules/.bin:$PATH
main() {
rm -rf "$PKGROOT/dist"
tsc --project "$PKGROOT/tsconfig.json"
mkdir "$PKGROOT/dist/parsers/grammars"
for name in constraint expression text; do
jison src/parsers/grammars/$name.jison -m commonjs -o dist/parsers/grammars/$name.js
done
}
main "$@"
| true |
4a29fa0c47425e377d8b0f8412f6a7ca1ac083a7 | Shell | mudox/gitz | /gitz.sh | UTF-8 | 586 | 3.75 | 4 | [] | no_license | #!/usr/bin/env bash
THIS_PATH=$(dirname "$0")
gz() {
local ret
if [[ $# == 0 ]]; then
ret=$(python3 "${THIS_PATH}"/main.py start)
else
ret=$(python3 "${THIS_PATH}"/main.py "$@")
fi
__gitz_handle_result "$ret"
}
gza() {
local ret
if [[ $# == 0 ]]; then
ret=$(python3 "${THIS_PATH}"/main.py start --all)
else
ret=$(python3 "${THIS_PATH}"/main.py "$@")
fi
__gitz_handle_result "$ret"
}
__gitz_handle_result() {
if [[ $1 =~ '^cd:' ]]; then
local cdto
cdto="${1:3}"
printf "\e[34mcd to: %s ...\n" "$cdto"
cd "$cdto" || return
fi
}
| true |
9152558a15142406b82284f22a81dbf7e40376ca | Shell | mathias-dotfiles/.zsh | /zshrc | UTF-8 | 1,265 | 3.171875 | 3 | [] | no_license | ### Variable Declarations and Properties
export CLICOLOR=1
export COLORTERM=truecolor
### END Variables Declarations
### ZSH Options
# Enable vi mode
bindkey -v
setopt AUTO_CD
setopt EXTENDED_HISTORY
### End ZSH Options
### ZSH prompt
autoload -Uz vcs_info
setopt prompt_subst
precmd_vcs_info() {
VCS_STATUS=$(command git status --porcelain 2> /dev/null)
if [[ -n $VCS_STATUS ]] then
zstyle ':vcs_info:git:*' formats '%F{160}(%b)%f'
else
zstyle ':vcs_info:git:*' formats '%F{034}(%b)%f'
fi
vcs_info
}
precmd_functions+=(precmd_vcs_info)
# Set up the prompt (with git branch name)
PMPT_LAST_CMD_STATUS='%(?.%B%F{034}✔%f%b.%B%F{124}✘%f%b) '
PMPT_CURRENT_DIR='%F{136}%n%f in %2~'
PMPT_IS_PRIVILEGED='%B%F{033}>> %f%b'
PROMPT='${PMPT_LAST_CMD_STATUS}${PMPT_CURRENT_DIR} ${PMPT_IS_PRIVILEGED}'
RPROMPT='$vcs_info_msg_0_'
### END ZSH Prompt
### Navigation
function prototype() {
local prot_dir="$HOME/Documents/Carreira/Projetos/Desenvolvimento/Prototipos"
if [ "$#" -lt 1 ]; then
ls "$prot_dir"
else
cd "$prot_dir/$1"
fi
}
function aulas() {
cd "$HOME/Documents/Carreira/UESC/Aulas/$1"
}
### END Navigation
### Aliases
alias ls="ls -G"
alias ll="ls -l -G"
### END Aliases
| true |
277c384a7da7c24a6f902ac0dc7ff5ff2fc7bf7e | Shell | edevyatkin/linux-from-scratch | /5/10.sh | UTF-8 | 287 | 2.546875 | 3 | [] | no_license | #!/bin/bash
#Tcl
cd $LFS/sources
tar -xf tcl8.6.0-src.tar.gz
cd tcl8.6.0
cd unix
./configure --prefix=/tools
make -j4
TZ=UTC make test
make install
chmod -v u+w /tools/lib/libtcl8.6.so
make install-private-headers
ln -sv tclsh8.6 /tools/bin/tclsh
cd $LFS/sources
rm -rf tcl8.6.0
| true |
bc2b8c91289f984fa94b55ef1bd94dcf8ef61c19 | Shell | frap/atea-dotfiles | /bin/dotfiles | UTF-8 | 13,181 | 4.03125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
[[ "$1" == "source" ]] ||
if [[ "$1" == "-h" || "$1" == "--help" ]]; then
cat <<HELP
Usage: $(basename "$0")
See the README for documentation.
https://github.com/frap/atea-dotfiles
Copyright (c) 2020 "Gas" Andrés Gasson
Licensed under the MIT license.
HELP
exit
fi
###########################################
# GENERAL PURPOSE EXPORTED VARS / FUNCTIONS
###########################################
SOURCE="${BASH_SOURCE[0]}"
# resolve $SOURCE until the file is no longer a symlink
while [ -h "$SOURCE" ]; do
DIR="$(cd -P "$(dirname "$SOURCE")" && pwd)"
SOURCE="$(readlink "$SOURCE")"
# if $SOURCE was a relative symlink, we need to resolve it relative
# to the path where the symlink file was located
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
__dir="$(cd -P "$(dirname "$SOURCE")" && pwd)"
# Where the magic happens.
export DOTFILES=~/.local/dotfiles
# Load colours and helpers first so they can be used in base theme
source "${DOTFILES}/themes/colours.theme.bash"
function e_header() { printf "\n%4b${echo_purple} %-70s${echo_reset_color}\n" "\U0001F916" "$@"; }
function e_install() { printf "%4b${echo_purple} %-60s${echo_reset_color}\n" "🏗 " "$@"; }
function e_success() { echo -e "${echo_cyan}✔ $@${echo_reset_color}"; }
function e_error() { echo -e "${echo_red}✗ $@ ${echo_reset_color}"; }
# function e_error() { printf "%b${echo_red} %s${echo_reset_color}\n" "❌" "$@"; }
function e_excep() { printf "\n%4b${echo_red} %-60s${echo_reset_color}\n" "🧨" "$@"; }
function e_arrow() { printf "${echo_yellow}%4b ${echo_cyan}%-60s${echo_reset_color}\n" "➜" "$@"; }
function e_info() { printf "${echo_yellow}%4b %-60s${echo_reset_color}\n" "∴" "$@"; }
function e_data() { printf "${echo_green}%4b %-60s${echo_reset_color}\n" "➜" "$@"; }
function e_line() { printf "${echo_yellow}%4b %-60s${echo_reset_color}\n" "\U262F" "$@"; }
function e_sep() { printf "${echo_cyan}%4b %-60s${echo_reset_color}\n" "\U1F4CD" "--------------------------------------------------------"; }
function e_question() { printf "${echo_purple}%4b %-60s${echo_reset_color}\n" "\U00002049" "$@"; }
# bash helpers
is_empty() {
local var=$1
[[ -z $var ]]
}
is_not_empty() {
local var=$1
[[ -n $var ]]
}
function is_file() {
local file=$1
[[ -f $file ]]
}
function exists() {
local file=$1
test -e $1
}
function is_dir() {
local dir=$1
[[ -d $dir ]]
}
is_not_dir() {
local dir=$1
[[ ! -d $dir ]]
}
function is_link() {
local dir=$1
[[ -L $dir ]]
}
function matches_regex() {
local filepath=$1
local regex=$2
[[ $filepath =~ $regex ]]
}
# OS detection
function is_osx() {
[[ "$OSTYPE" =~ ^darwin ]] 2>/dev/null || return 1
}
function is_ubuntu() {
[[ "$(cat /etc/issue 2>/dev/null)" =~ Ubuntu ]] || return 1
}
function is_ubuntu_desktop() {
dpkg -l ubuntu-desktop >/dev/null 2>&1 || return 1
}
function is_redhat() {
[[ "$(cat /etc/redhat-release 2>/dev/null)" =~ "Red Hat" ]] || return 1
}
function is_oraclelinux() {
[[ "$(cat /etc/oracle-release 2>/dev/null)" =~ "Oracle Linux" ]] || return 1
}
function is_ateatsp() {
exists "/home/atearoot" || return 1
}
function is_custtsp() {
exists "/atea/home/thirdparty" || return 1
}
if exists /etc/redhat-release; then
redhat_version="$(sed 's/^.\+ release \([.0-9]\+\).*/\1/' /etc/redhat-release | awk -F. '{print $1}')"
fi
function is_linux() {
is_ubuntu || is_redhat || is_oraclelinux
}
function get_os() {
for os in linux osx oraclelinux redhat ubuntu ateatsp custtsp; do
is_$os
[[ $? == ${1:-0} ]] && echo $os
done
}
# Service detection
function has_gls() {
hash gls 2>/dev/null || return 1
}
function has_exa() {
hash exa 2>/dev/null || return 1
}
function has_nvm() {
exists ~/.nvm || return 1
}
function has_docker() {
hash docker 2>/dev/null || return 1
}
function has_java() {
hash java 2>/dev/null || return 1
}
function has_rg() {
hash rg 2>/dev/null || return 1
}
# rust version of find
function has_fd() {
hash fd 2>/dev/null || return 1
}
function has_clojure() {
hash clojure 2>/dev/null || return 1
}
function has_tomcat() {
exists /opt/tomcat_latest/bin || return 1
}
function has_systemd() {
exists /etc/systemd || return 1
}
# Oracle DB shit
XE18pattern='18c'
function has_oracledb() {
exists /etc/profile.d/oracle_env.sh || return 1
}
function has_sqlcl() {
exists /opt/sqlcl/bin/sql || return 1
}
function has_sqlplus() {
hash sqlplus 2>/dev/null || return 1
}
function has_18xe() {
has_oracledb && [[ ${ORACLE_HOME} =~ $XE18pattern ]] || return 1
}
function has_gpg_agent() {
hash gpg-connect-agent 2>/dev/null || return 1
}
function has_govc() {
hash govc 2>/dev/null || return 1
}
function has_ssh_agent() {
hash ssh-agent 2>/dev/null || return 1
}
function has_microk8s() {
hash microk8s 2>/dev/null || return 1
}
function has_kubectl() {
hash kubectl 2>/dev/null || return 1
}
function has_app() {
for app in clojure docker exa gls govc gpg_agent java kubectl ld nvm rg ssh_agent systemd tomcat oracledb sqlcl sqlplus 18xe; do
has_$app
[[ $? == ${1:-0} ]] && echo $app
done
}
# Remove an entry from $PATH
# Based on http://stackoverflow.com/a/2108540/142339
function path_remove() {
local arg path
path=":$PATH:"
for arg in "$@"; do path="${path//:$arg:/:}"; done
path="${path%:}"
path="${path#:}"
echo "$path"
}
function manpath_remove() {
local arg path
path=":$MANPATH:"
for arg in "$@"; do path="${path//:$arg:/:}"; done
path="${path%:}"
path="${path#:}"
echo "$path"
}
# Display a fancy multi-select menu.
# Inspired by http://serverfault.com/a/298312
function prompt_menu() {
local exitcode prompt choices nums i n
exitcode=0
if [[ "$2" ]]; then
_prompt_menu_draws "$1"
read -t $2 -n 1 -sp "Pour éditer cette liste, appuyez sur n'importe quelle touche dans les $2 secondes. "
exitcode=$?
echo ""
fi 1>&2
if [[ "$exitcode" == 0 ]]; then
prompt="Options de bascule (séparer les options avec des espaces, ENTRER quand fait): "
while _prompt_menu_draws "$1" 1 && read -rp "$prompt" nums && [[ "$nums" ]]; do
_prompt_menu_adds $nums
done
fi 1>&2
_prompt_menu_adds
}
function _prompt_menu_iter() {
local i sel state
local fn=$1
shift
for i in "${!menu_options[@]}"; do
state=0
for sel in "${menu_selects[@]}"; do
[[ "$sel" == "${menu_options[i]}" ]] && state=1 && break
done
$fn $state $i "$@"
done
}
function _prompt_menu_draws() {
e_header "$1"
_prompt_menu_iter _prompt_menu_draw "$2"
}
function _prompt_menu_draw() {
local modes=(error success)
if [[ "$3" ]]; then
e_${modes[$1]} "$(printf "%2d) %s\n" $(($2 + 1)) "${menu_options[$2]}")"
else
e_${modes[$1]} "${menu_options[$2]}"
fi
}
function _prompt_menu_adds() {
_prompt_menu_result=()
_prompt_menu_iter _prompt_menu_add "$@"
menu_selects=("${_prompt_menu_result[@]}")
}
function _prompt_menu_add() {
local state i n keep match
state=$1
shift
i=$1
shift
for n in "$@"; do
if [[ $n =~ ^[0-9]+$ ]] && ((n - 1 == i)); then
match=1
[[ "$state" == 0 ]] && keep=1
fi
done
[[ ! "$match" && "$state" == 1 || "$keep" ]] || return
_prompt_menu_result=("${_prompt_menu_result[@]}" "${menu_options[i]}")
}
# Given strings containing space-delimited words A and B, "setdiff A B" will
# return all words in A that do not exist in B. Arrays in bash are insane
# (and not in a good way).
# From http://stackoverflow.com/a/1617303/142339
function setdiff() {
local debug skip a b
if [[ "$1" == 1 ]]; then
debug=1
shift
fi
if [[ "$1" ]]; then
local setdiffA setdiffB setdiffC
setdiffA=($1)
setdiffB=($2)
fi
setdiffC=()
for a in "${setdiffA[@]}"; do
skip=
for b in "${setdiffB[@]}"; do
[[ "$a" == "$b" ]] && skip=1 && break
done
[[ "$skip" ]] || setdiffC=("${setdiffC[@]}" "$a")
done
[[ "$debug" ]] && for a in setdiffA setdiffB setdiffC; do
echo "$a ($(eval echo "\${#$a[*]}")) $(eval echo "\${$a[*]}")" 1>&2
done
[[ "$1" ]] && echo "${setdiffC[@]}"
}
# If this file was being sourced, exit now.
[[ "$1" == "source" ]] && return
###########################################
# INTERNAL DOTFILES "INIT" VARS / FUNCTIONS
###########################################
e_header 'Dotfiles - "Gas" Andrés Gasson - http://frap.github.io/atea-dotfiles'
# Initialise.
init_file=$DOTFILES/caches/init/selected
function init_files() {
local i f dirname oses os opt remove
dirname="$(dirname "$1")"
f=("$@")
menu_options=()
menu_selects=()
for i in "${!f[@]}"; do menu_options[i]="$(basename "${f[i]}")"; done
if [[ -e "$init_file" ]]; then
# Read cache file if possible
IFS=$'\n' read -d '' -r -a menu_selects <"$init_file"
else
# Otherwise default to all scripts not specifically for other OSes
oses=($(get_os 1))
for opt in "${menu_options[@]}"; do
remove=
for os in "${oses[@]}"; do
[[ "$opt" =~ (^|[^a-z])$os($|[^a-z]) ]] && remove=1 && break
done
[[ "$remove" ]] || menu_selects=("${menu_selects[@]}" "$opt")
done
fi
prompt_menu "Exécuter les scripts d'initialisation?" $prompt_delay
# Write out cache file for future reading.
rm "$init_file" 2>/dev/null
for i in "${!menu_selects[@]}"; do
echo "${menu_selects[i]}" >>"$init_file"
echo "$dirname/${menu_selects[i]}"
done
}
function init_do() {
e_header "Sourcing de ce fichier $(basename "$2")"
source "$2"
}
# Copy files.
function copy_header() { e_header "Copier des fichiers dans le répertoire d'accueil"; }
function copy_test() {
if [[ -e "$2" && ! "$(cmp "$1" "$2" 2>/dev/null)" ]]; then
echo "même fichier"
elif [[ "$1" -ot "$2" ]]; then
echo "nouveau fichier de destination"
fi
}
function copy_do() {
e_success "Copie ~/$1."
cp "$2" ~/
}
# Link files.
function link_header() { e_header "Relier des fichiers dans le répertoire d'accueil"; }
function link_test() {
[[ "$1" -ef "$2" ]] && echo "même fichier"
}
function link_do() {
e_success "Linking ~/$1."
ln -sf ${2#$HOME/} ~/
}
# Copy, link, init, etc.
function do_stuff() {
local base dest skip
local files=($DOTFILES/$1/*)
[[ $(declare -f "$1_files") ]] && files=($($1_files "${files[@]}"))
# No files? abort.
if ((${#files[@]} == 0)); then return; fi
# Run _header function only if declared.
[[ $(declare -f "$1_header") ]] && "$1_header"
# Iterate over files.
for file in "${files[@]}"; do
base="$(basename $file)"
dest="$HOME/$base"
# Run _test function only if declared.
if [[ $(declare -f "$1_test") ]]; then
# If _test function returns a string, skip file and print that message.
skip="$("$1_test" "$file" "$dest")"
if [[ "$skip" ]]; then
e_error "Sauter ~/$base, $skip."
continue
fi
# Destination file already exists in ~/. Back it up!
if [[ -e "$dest" ]]; then
e_arrow "La sauvegarde de ~/$base."
# Set backup flag, so a nice message can be shown at the end.
backup=1
# Create backup dir if it doesn't already exist.
[[ -e "$backup_dir" ]] || mkdir -p "$backup_dir"
# Backup file / link / whatever.
mv "$dest" "$backup_dir"
fi
fi
# Do stuff.
"$1_do" "$base" "$file"
done
}
# Enough with the functions, let's do stuff.
export prompt_delay=5
# Ensure that we can actually, like, compile anything.
if [[ ! "$(type -P gcc)" ]] && is_osx; then
e_error "XCode or the Command Line Tools for XCode must be installed first."
exit 1
fi
# If Git is not installed, install it (Ubuntu only, since Git comes standard
# with recent XCode or CLT)
if [[ ! "$(type -P git)" ]] && is_ubuntu; then
e_header "Installer de Git"
sudo apt-get -qq install git-core
fi
if [[ ! "$(type -P git)" ]] && is_redhat; then
e_header "Installer de Git"
sudo yum install git-core
fi
# If Git isn't installed by now, something exploded. We gots to quit!
if [[ ! "$(type -P git)" ]]; then
e_error "Git doit être installé. Il n'est pas. L'abandon."
exit 1
fi
# Initialise.
if [[ ! -d $DOTFILES ]]; then
# $DOTFILES directory doesn't exist? Clone it!
new_dotfiles_install=1
prompt_delay=10
e_header "Le téléchargement de dotfiles"
git clone --recursive git://github.com/${github_user:-frap}/atea-dotfiles.git $DOTFILES
# setup ssh directory
chmod 0600 $DOTFILES/link/.ssh/config
chmod 0700 $DOTFILES/link/.ssh
chmod 0700 $DOTFILES/link
cd $DOTFILES
elif [[ "$1" != "restart" ]]; then
# Make sure we have the latest files.
e_header "Mise à jour de dotfiles"
cd $DOTFILES
prev_head="$(git rev-parse HEAD)"
git pull
git submodule update --init --recursive --quiet
if [[ "$(git rev-parse HEAD)" != "$prev_head" ]]; then
e_header "Modifications détectées, redémarrage du script"
exec "$0" "restart"
fi
fi
# Add binaries into the path
[[ -d $DOTFILES/bin ]] && PATH=$DOTFILES/bin:$PATH
export PATH
# Tweak file globbing.
shopt -s dotglob
shopt -s nullglob
# Create caches dir and init subdir, if they don't already exist.
mkdir -p "$DOTFILES/caches/init"
# If backups are needed, this is where they'll go.
backup_dir="$DOTFILES/backups/$(date "+%Y_%m_%d-%H_%M_%S")/"
backup=
# Execute code for each file in these subdirectories.
do_stuff "copy"
do_stuff "link"
do_stuff "init"
# Alert if backups were made.
if [[ "$backup" ]]; then
e_info "\nLes sauvegardes ont été déplacées vers ~/${backup_dir#$HOME/}"
fi
# All done!
e_header "Tout est fait!"
| true |
3d758761bdaaee87030dbc5bd6dae5f177ba9c0e | Shell | eiselekd/lang | /sh/b5.sh | UTF-8 | 133 | 2.546875 | 3 | [] | no_license | #!/bin/sh
declare -a ar
ar=(1 2 3 4 5)
echo "${ar[@]:1:$((${#ar[@]}-1))}"
ar=(1)
echo "${ar[@]:1:$((${#ar[@]}-1))}"
echo "${@:0:1}"
| true |
b238ed0c9fa5527969cbc37dfa3b6d34006ca1a9 | Shell | Nerfan/dotfiles | /i3/scripts/monitorpoll | UTF-8 | 843 | 3.640625 | 4 | [] | no_license | #!/bin/bash
# default monitor is eDP-1
MONITOR=eDP-1
# functions to switch from eDP-1 to HDMI and vice versa
function ActivateHDMI {
echo "Switching to HDMI-1"
xrandr --output HDMI-1 --mode 1920x1080 --output eDP-1 --off
MONITOR=HDMI-1
~/.config/i3/scripts/setwallpaper
}
function DeactivateHDMI {
echo "Switching to eDP-1"
xrandr --output HDMI-1 --off --output eDP-1 --auto
MONITOR=eDP-1
~/.config/i3/scripts/setwallpaper
}
# functions to check if HDMI is connected and in use
function HDMIActive {
[ $MONITOR = "HDMI-1" ]
}
function HDMIConnected {
! xrandr | grep "^HDMI-1" | grep disconnected
}
# actual script
while true
do
if ! HDMIActive && HDMIConnected
then
ActivateHDMI
fi
if HDMIActive && ! HDMIConnected
then
DeactivateHDMI
fi
sleep 1s
done
| true |
fa18cfb8fec15fe65fff32f0c99a5a8674fba63a | Shell | sloev/dotfiles | /.zshrc | UTF-8 | 2,043 | 2.84375 | 3 | [] | no_license | # exports
export TERM="xterm-256color"
export PATH="/opt/local/bin:/opt/local/sbin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin"
export PYENV_VIRTUALENV_DISABLE_PROMPT=1
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init -)"
eval "$(pyenv virtualenv-init -)"
# sourcings
if [[ ! -d ~/.zplug ]]; then
git clone https://github.com/zplug/zplug ~/.zplug
source ~/.zplug/init.zsh && zplug update --self
fi
source ~/.zplug/init.zsh
autoload colors && colors
#plugins
zplug "plugins/git", from:oh-my-zsh
zplug "lib/history", from:oh-my-zsh
zplug "lib/completion", from:oh-my-zsh
zplug "lib/key-bindings", from:oh-my-zsh
zplug "zsh-users/zsh-syntax-highlighting", from:github, defer:3
# Install plugins if there are plugins that have not been installed
if ! zplug check --verbose; then
printf "Install? [y/N]: "
if read -q; then
echo; zplug install
fi
fi
# prompt
setopt prompt_subst
ZSH_THEME_GIT_PROMPT_PREFIX="%{$fg[yellow]%}["
ZSH_THEME_GIT_PROMPT_SUFFIX="%{$fg[yellow]%}] %{$reset_color%}"
ZSH_THEME_GIT_PROMPT_DIRTY="%{$fg[red]%}!"
ZSH_THEME_GIT_PROMPT_CLEAN=""
my_git_prompt() {
if [[ -d $(git rev-parse --show-toplevel 2>/dev/null) ]]; then
echo $(git_prompt_info)
else
echo ''
fi
}
local PGIT='$(my_git_prompt)'
PROMPT="${PGIT}%{$fg[magenta]%}%n%{$reset_color%}@%{$fg[blue]%}%m %{$fg[yellow]%}%1~ %{$reset_color%} > "
export PS1=$PROMPT
export CLICOLOR=1
export LSCOLORS=ExFxBxDxCxegedabagacad
# functions
function delpyc() {
echo "recursively removing .pyc files from this directory"
pwd
find . -name "*.pyc" -exec rm '{}' ';'
}
# alias
#
alias colorize="ccze -A | less +G -R"
alias ls='ls -GFh'
alias fix_zplug="echo 'fixing zplug' && rm ~/.zplug/zcompdump && exec zsh"
# init zplug
zplug load
export PATH="/Users/me/.pyenv/bin:$PATH"
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init -)"
eval "$(pyenv virtualenv-init -)"
alias journal="python ~/Documents/journal/journal.py"
| true |
6535c61fb19e797dd631c88a2cc0bbb6d3da359b | Shell | bodii/test-code | /shell/test8/reset.sh | UTF-8 | 621 | 3.453125 | 3 | [] | no_license | #!/bin/bash
# 使用空字符串或冒号屏蔽信号2 SIGINT
# trap '' 2
# trap : 2
trap '' INT
echo
echo "Doing Critical Operation,Cannot be interrupted..."
# 睡眠30秒模拟某些操作
echo -en "\t"
COUNT=20
while [ $COUNT -gt 0 ]
do
# 打印进度条
echo -n "##"
sleep 1
let COUNT-=1
done
echo
echo "Critical OPeration if finished, can receive SIGINT now."
# 重新设置信号SIGINT的回调函数为默认行为
trap INT
echo "Press CTRL+C!"
echo -en "\t"
COUNT=20
while [ $COUNT -gt 0 ]
do
echo -n "##"
sleep 1
let COUNT-=1
done
echo
echo "Done."
exit 0
| true |
62b7011d4964b88236cb40681acdb8b327d1fed8 | Shell | kiranlakhani20/revert-commit | /commits2.sh | UTF-8 | 208 | 3.1875 | 3 | [] | no_license | string=""
x=1
j=30000
while [ $x -le 350 ]
do
string="touch randfile$((j+x)) && git add . && git commit -m '$((j+x)):randfile_$((j+x))'"
echo $string
eval "$string"
x=$[$x+1]
done
echo $push
eval "$push"
| true |
05962aa5caed61d5ef8306907a6e98294100cb14 | Shell | dromi/Config-Files | /scripties/til_fetch | UTF-8 | 1,276 | 3.859375 | 4 | [] | no_license | #!/bin/bash
set -e
reddit_url="https://www.reddit.com/r/todayilearned.json"
cache_dir="$HOME/.til_cache"
cache_ttl=(60*60*6) # 6 hours
# check for cache
if [ ! -f $cache_dir ]; then
touch $cache_dir
response=`wget -qO- $reddit_url`
echo "$response" > "$cache_dir"
elif (( (`date +%s` - `stat -L --format %Y $cache_dir`) > $cache_ttl ))
then
response=`wget -qO- $reddit_url`
echo "$response" > "$cache_dir"
else
response=`cat $cache_dir`
fi
# select data array
children=`echo $response | jq ".data.children"`
dist=`echo $response | jq ".data.dist"`
# pick random object
selected_idx=`shuf -i 0-$(($dist-1)) -n 1`
selected=`echo $children | jq ".[$selected_idx].data.title"`
# remove quotes
selected=${selected:1:-1};
# Remove 'TIL' from string
if [[ ${selected:0:4} == "TIL " ]] ; then
selected=${selected:4};
fi
if [[ ${selected:0:5} == "TIL: " ]] ; then
selected=${selected:5};
fi
# Remove 'that' from string
if [[ ${selected:0:5} == "that " ]] ; then
selected=${selected:5};
fi
# Remove 'of' from string
if [[ ${selected:0:3} == "of " ]] ; then
selected=${selected:3};
fi
# Remove 'about' from string
if [[ ${selected:0:6} == "about " ]] ; then
selected=${selected:6};
fi
# Capitalize first letter and return
echo ${selected^}
| true |
6ce99adc3ac672d2d338ddf8011767dd68fec851 | Shell | mreyoud/dotfiles | /profile | UTF-8 | 2,456 | 2.734375 | 3 | [] | no_license | #!/bin/sh
case "$-" in *i*) ;; *) return;; esac
unset LC_ALL LANGUAGE LC_CTYPE MAILCHECK
export {NOSPLASH,NOWELCOME}=1
export {EDITOR,VISUAL}=vi
export {PAGER,MANPAGER}=less
export LANG=$(locale -a | grep -Ei "en.us.utf")
export TERMINAL=st
export READER=zathura
export FILE=ranger
export BROWSER="w3m '%s' &"
LOCAL_PREFIX="$HOME/.local"
export XDG_CONFIG_HOME="${HOME}/.config"
export XDG_CACHE_HOME="${HOME}/.cache"
export XDG_DATA_HOME="${LOCAL_PREFIX}/share"
export XDG_BIN_HOME="${LOCAL_PREFIX}/bin"
export XDG_LIB_HOME="${LOCAL_PREFIX}/lib"
# xdg spec forgot some stuff...
export XDG_MAN_HOME="${LOCAL_PREFIX}/man"
export XDG_INCLUDE_HOME="${LOCAL_PREFIX}/include"
export GOPATH=$HOME/workspace
export PATH=$XDG_BIN_HOME:$GOPATH/bin:$PATH
export MANPATH=$XDG_MAN_HOME:${MANPATH}
export LD_LIBRARY_PATH=$XDG_LIB_HOME:$LD_LIBRARY_PATH
export LIBRARY_PATH=$XDG_LIB_HOME:$LIBRARY_PATH
export C_INCLUDE_PATH=$XDG_INCLUDE_HOME:$C_INCLUDE_PATH
export CPLUS_INCLUDE_PATH=$XDG_INCLUDE_HOME:$CPLUS_INCLUDE_PATH
export CFLAGS="-I$XDG_INCLUDE_HOME $CFLAGS"
export CXXFLAGS="-I$XDG_INCLUDE_HOME $CXXFLAGS"
export LDFLAGS="-L$XDG_LIB_HOME -Wl,-rpath,$XDG_LIB_HOME $LDFLAGS"
export LD_RUNPATH="$XDG_LIB_HOME:$LD_RUNPATH"
export PKG_CONFIG_PATH="$XDG_LIB_HOME/pkgconfig:$PKG_CONFIG_PATH"
export ACLOCAL_FLAGS="-I $XDG_DATA_HOME/aclocal/"
unset LOCAL_PREFIX
# more enforcement of the xdg thing
export GTK2_RC_FILES="${XDG_CONFIG_HOME}/gtk-2.0"
export GTK3_RC_FILES="${XDG_CONFIG_HOME}/gtk-3.0"
export MPLAYER_HOME="${XDG_CONFIG_HOME}/mplayer"
export LESSKEY="${XDG_CONFIG_HOME}/less/keys"
export SCREENRC="${XDG_CONFIG_HOME}/screen/screenrc"
export INPUTRC="${XDG_CONFIG_HOME}/readline/inputrc"
export VIMINIT=":source ${XDG_CONFIG_HOME}"/vim/vimrc
export WGETRC="$XDG_CONFIG_HOME/wgetrc"
export ICEAUTHORITY="${XDG_CACHE_HOME}/ICEauthority"
export HISTFILE="${XDG_CACHE_HOME}/bash/history"
export LESSHISTFILE="${XDG_CACHE_HOME}/less/history"
# highlighting inside manpages and elsewhere
export LESS_TERMCAP_mb=$'\E[01;31m' # begin blinking
export LESS_TERMCAP_md=$'\E[01;38;5;74m' # begin bold
export LESS_TERMCAP_me=$'\E[0m' # end mode
export LESS_TERMCAP_se=$'\E[0m' # end standout-mode
export LESS_TERMCAP_so=$'\E[38;5;246m' # begin standout-mode - info box
export LESS_TERMCAP_ue=$'\E[0m' # end underline
export LESS_TERMCAP_us=$'\E[04;38;5;146m' # begin underline
[[ -f $HOME/.bashrc ]] && . $HOME/.bashrc
| true |
1760d9aa31a25184883e1ac867a2d787fa7afc6c | Shell | tnqn/antrea | /hack/generate-helm-release.sh | UTF-8 | 3,500 | 3.984375 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# Copyright 2022 Antrea Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eo pipefail
function echoerr {
>&2 echo "$@"
}
_usage="Usage: $0 [--mode (dev|release)] --out <DIR>
Package the Antrea chart and the Flow Aggregator chart into chart archives.
Environment variable VERSION must be set.
--out <DIR> Output directory for chart archives
--help, -h Print this message and exit
You can set the HELM environment variable to the path of the helm binary you want us to
use. Otherwise we will download the appropriate version of the helm binary and use it."
function print_usage {
echoerr "$_usage"
}
function print_help {
echoerr "Try '$0 --help' for more information."
}
MODE="dev"
OUT=""
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
--mode)
MODE="$2"
shift 2
;;
--out)
OUT="$2"
shift 2
;;
-h|--help)
print_usage
exit 0
;;
*) # unknown option
echoerr "Unknown option $1"
exit 1
;;
esac
done
if [ -z "$VERSION" ]; then
echoerr "Environment variable VERSION must be set"
print_help
exit 1
fi
if [ "$OUT" == "" ]; then
echoerr "--out is required to provide output path"
print_help
exit 1
fi
THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source $THIS_DIR/verify-helm.sh
if [ -z "$HELM" ]; then
HELM="$(verify_helm)"
elif ! $HELM version > /dev/null 2>&1; then
echoerr "$HELM does not appear to be a valid helm binary"
print_help
exit 1
fi
ANTREA_CHART="$THIS_DIR/../build/charts/antrea"
# create a backup file before making changes.
# note that the backup file will not be included in the release: .bak files are
# ignored as per the .helmignore file.
cp "$ANTREA_CHART/Chart.yaml" "$ANTREA_CHART/Chart.yaml.bak"
yq -i '.annotations."artifacthub.io/prerelease" = strenv(PRERELEASE)' "$ANTREA_CHART/Chart.yaml"
sed -i.bak 's=antrea/antrea-ubuntu=projects.registry.vmware.com/antrea/antrea-ubuntu=g' "$ANTREA_CHART/values.yaml"
$HELM package --app-version $VERSION --version $VERSION $ANTREA_CHART
mv "antrea-$VERSION.tgz" "$OUT/antrea-chart.tgz"
mv "$ANTREA_CHART/Chart.yaml.bak" "$ANTREA_CHART/Chart.yaml"
mv "$ANTREA_CHART/values.yaml.bak" "$ANTREA_CHART/values.yaml"
FLOW_AGGREGATOR_CHART="$THIS_DIR/../build/charts/flow-aggregator"
cp "$FLOW_AGGREGATOR_CHART/Chart.yaml" "$FLOW_AGGREGATOR_CHART/Chart.yaml.bak"
yq -i '.annotations."artifacthub.io/prerelease" = strenv(PRERELEASE)' "$FLOW_AGGREGATOR_CHART/Chart.yaml"
sed -i.bak 's=antrea/flow-aggregator=projects.registry.vmware.com/antrea/flow-aggregator=g' "$FLOW_AGGREGATOR_CHART/values.yaml"
$HELM package --app-version $VERSION --version $VERSION $FLOW_AGGREGATOR_CHART
mv "flow-aggregator-$VERSION.tgz" "$OUT/flow-aggregator-chart.tgz"
mv "$FLOW_AGGREGATOR_CHART/Chart.yaml.bak" "$FLOW_AGGREGATOR_CHART/Chart.yaml"
mv "$FLOW_AGGREGATOR_CHART/values.yaml.bak" "$FLOW_AGGREGATOR_CHART/values.yaml"
| true |
3ec7f5099b9b48f6cc431e017fcdcb9c9826fe7d | Shell | yuki7070/mikan | /test.sh | UTF-8 | 1,611 | 3.0625 | 3 | [] | no_license | #!/bin/bash
try() {
expected="$1"
input="$2"
./mikan "$input" > tmp.s
gcc -o tmp tmp.s
./tmp
actual="$?"
if [ "$actual" = "$expected" ]; then
echo "$input => $actual"
else
echo "$expected expected, but got $actual"
exit 1
fi
}
try 10 "int main() { int abc = 10; return abc; }"
try 30 "int main() { int abc = 10; int def = 20; return abc+def; }"
try 10 "int main() { int a = 10; if (5 > 1) { a = 10; } else { a = 5; } return a; }"
try 5 "int main() { int a = 10; if (1 > 5) { a = 10; } else { a = 5; } return a; }"
try 10 "int main() { int a = 5; if (5 > 1) { a = 10;} return a; }"
try 10 "int test() { int a = 7; int b = 3; return a + b; } int main() { return test(); }"
try 20 "int main() { int a = 10; if (a > 5) { int b = 10; a = a + b; } else { int b = 5; a = a + b; } return a; }"
try 6 "int main() { int a = 1; if (a > 5) { int b = 10; a = a + b; } else { int b = 5; a = a + b; } return a; }"
try 10 "int test(int a, int b) { return a + b; } int main() { int a = 7; int b = 3; return test(a, b); }"
try 10 "int main() { int *a; *a = 10; return *a; }"
try 10 "int main() { int x = 10; int *y; y = &x; return *y;}"
try 4 "int main() { int x; x = sizeof(x); return x; }"
try 8 "int main() { int *x; return sizeof(x); }"
try 10 "int main() { int x[10]; int y = 10; return y; }"
try 55 "int a(int n) { int b = 0; if (n == 0) { b = 0; } if (n == 1) { b = 1; } if (n == 2) { b = 1;} if (n > 2) { b = a(n-1) + a(n-2); } return b; } int main() { return a(10); }"
try 4 "int main() { int a[2]; *a = 1; *(a+1) = 2; *(a+2) = 4; return *(a+2); }"
echo OK | true |
bda70d116490e4ec16af1a27cc7da6f4ee6630c4 | Shell | joamatab/install_new_computer | /vpn-up | UTF-8 | 389 | 3.34375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
VPN_NAME="vpno"
ESSID=$(iwgetid -r)
interface=$1 status=$2
case $status in
up|vpn-down)
if iwgetid | grep -qs ":\"$ESSID\""; then
nmcli connection up id "$VPN_NAME"
fi
;;
down)
if iwgetid | grep -qs ":\"$ESSID\""; then
if nmcli connection show --active | grep "$VPN_NAME"; then
nmcli connection down id "$VPN_NAME"
fi
fi
;;
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.