blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d539b8e6d2b2830d00813200b142a909225b166a | Shell | wuchenyv1990/cloud-k8s | /BuildScript/build-images.sh | UTF-8 | 679 | 3.296875 | 3 | [] | no_license | #!/bin/bash
# 在mvn clean install后从~/.m2/repository处拷贝包到目录下
# 并构建镜像
THIS_DIR=$(dirname "$(readlink -f "$0")")
mvn_repo=~/.m2/repository
prj_repo_base=${mvn_repo}/com/wuchenyv1990
prj_version=1.0-SNAPSHOT
image_version=1.0
project=("svc-a" "svc-b" "svc-c" "svc-d")
for prj in ${project[@]};do
jar_file=${prj}-${prj_version}.jar
release=${prj_repo_base}/${prj}/${prj_version}/${jar_file}
cp "${release}" "${THIS_DIR}"/docker/"${jar_file}"
docker rmi "${prj}":${image_version}
docker build "${THIS_DIR}"/docker/ --build-arg release_jar="${jar_file}"\
-t "${prj}":${image_version}
rm "${THIS_DIR}"/docker/"${jar_file}"
done
| true |
95986454f4e1f06f86645ffdc6f32da470d51d2c | Shell | tmp1121/dotfiles | /.shell/linux | UTF-8 | 335 | 2.78125 | 3 | [] | no_license | #!/bin/bash
alias l='ls'
alias ll='ls -l --color'
alias lll='ls -al --color'
alias gcc32='gcc -m32 -fno-stack-protector'
alias swapsearch='grep VmSwap /proc/*/status | sort -k 2 -r | head'
if [ -f $HOME/.kubectx ]; then
export PATH=$HOME/.kubectx:$PATH
. /.kubectx/completion/kubectx.bash
. /.kubectx/completion/kubens.bash
fi
| true |
c65020c896c49efc5d8a380e45e543a0f058bf49 | Shell | cpusoft/rpstir | /bin/rpki-object/create_object/tests/empty_manifest.sh.in | UTF-8 | 1,728 | 2.578125 | 3 | [] | permissive | #!/bin/sh -e
@SETUP_ENVIRONMENT@
OUTDIR="$TESTS_BUILDDIR/empty_manifest"
rm -rf "$OUTDIR"
mkdir "$OUTDIR"
TEST_LOG_NAME=empty_manifest
TEST_LOG_DIR="$OUTDIR"
run "gen_key-root" gen_key "$OUTDIR/root.p15" 2048 \
|| fatal "gen_key root.p15 failed"
run "create_object-root" create_object CERT \
outputfilename="$OUTDIR/root.cer" \
subjkeyfile="$OUTDIR/root.p15" \
type=CA \
selfsigned=true \
serial=1 \
issuer="root" \
subject="root" \
notbefore=120101010101Z \
notafter=490101010101Z \
sia="r:rsync://example.com/rpki/,m:rsync://example.com/rpki/empty_manifest.mft" \
ipv4="0.0.0.0/0" \
ipv6="::/0" \
as=0-4294967295 \
|| fatal "create_object root.cer failed"
run "gen_key-mft-ee" gen_key "$OUTDIR/empty_manifest.mft.ee.p15" 2048 \
|| fatal "gen_key empty_manifest.mft.ee.p15 failed"
run "create_object-mft-ee" create_object CERT \
outputfilename="$OUTDIR/empty_manifest.mft.ee.cer" \
parentcertfile="$OUTDIR/root.cer" \
parentkeyfile="$OUTDIR/root.p15" \
subjkeyfile="$OUTDIR/empty_manifest.mft.ee.p15" \
type=EE \
notbefore=120101010101Z \
notafter=490101010101Z \
serial=1 \
subject=empty_manifest-mft-ee \
crldp=rsync://example.com/rpki/invalid.crl \
aia=rsync://example.com/rpki/root.cer \
sia="s:rsync://example.com/rpki/empty_manifest.mft" \
ipv4=inherit \
ipv6=inherit \
as=inherit \
|| fatal "create_object empty_manifest.mft.ee.cer failed"
run "create_object-mft" create_object MANIFEST \
outputfilename="$OUTDIR/empty_manifest.mft" \
EECertLocation="$OUTDIR/empty_manifest.mft.ee.cer" \
EEKeyLocation="$OUTDIR/empty_manifest.mft.ee.p15" \
thisUpdate=20120101010101Z \
nextUpdate=20490101010101Z \
manNum=1 \
fileList="" \
|| fatal "create_object empty_manifest.mft failed"
| true |
037c58455dd57068878c739c4ae0c335787d72f5 | Shell | datasets-at/mi-dsapid | /copy/var/zoneinit/includes/31-dsapid.sh | UTF-8 | 1,859 | 2.734375 | 3 | [
"MIT"
] | permissive |
log "creating /data directory"
if [[ ! -e /data ]]; then
mkdir /data
mkdir /data/files
fi
if [[ ! -e /data/config.json ]]; then
log "creating initial configuration"
cat > /data/config.json << EOF
{
"log_level": "info",
"base_url": "http://${HOSTNAME}/",
"mount_ui": "/opt/dsapid/ui",
"listen": {
"http": {
"address": "0.0.0.0:80",
"ssl": false
}
},
"datadir": "/data/files",
"users": "/data/users.json",
"sync": [
{
"name": "official joyent dsapi",
"active": false,
"type": "dsapi",
"provider": "joyent",
"source": "https://datasets.joyent.com/datasets",
"delay": "24h"
},
{
"name": "official joyent imgapi",
"active": false,
"type": "imgapi",
"provider": "joyent",
"source": "https://images.joyent.com/images",
"delay": "24h"
},
{
"name": "datasets.at",
"active": false,
"type": "dsapi",
"provider": "community",
"source": "http://datasets.at/api/datasets",
"delay": "24h"
}
]
}
EOF
fi
if [[ ! -e /data/users.json ]]; then
log "creating initial users list and seed it with joyent uuids"
cat > /data/users.json << EOF
[
{
"uuid": "352971aa-31ba-496c-9ade-a379feaecd52",
"name": "sdc",
"type": "system",
"provider": "joyent"
},
{
"uuid": "684f7f60-5b38-11e2-8eae-6b88dd42e590",
"name": "sdc",
"type": "system",
"provider": "joyent"
},
{
"uuid": "a979f956-12cb-4216-bf4c-ae73e6f14dde",
"name": "sdc",
"type": "system",
"provider": "joyent"
},
{
"uuid": "9dce1460-0c4c-4417-ab8b-25ca478c5a78",
"name": "jpc",
"type": "system",
"provider": "joyent"
}
]
EOF
fi
log "force correct ownership of /data directory"
chown -R dsapid:dsapid /data
log "starting dsapid"
/usr/sbin/svcadm enable dsapid
| true |
c4e658d6403764eb977420f94f9c063e48ce2d13 | Shell | bavery22/toolchain-base-container | /scripts/build_containers.codi | UTF-8 | 672 | 3.453125 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
set -x
set -o pipefail
# This removes the old test containers, and builds a new deps container.
#
# Example:
#
# ./scripts/build_containers.base
if [ "$DOCKERHUB_REPO" = "" ]; then
DOCKERHUB_REPO="crops"
fi
echo "TRAVIS_REPO_SLUG=$TRAVIS_REPO_SLUG"
TOPDIR=`git rev-parse --show-toplevel`
# remove the deps image as we will rebuild it
Q=`docker images -q ${DOCKERHUB_REPO}/codi`
if [ "$Q" != "" ]; then
echo "Removing codi image"
docker rmi -f $Q
fi
cd ${TOPDIR}/dockerfiles;
Q=`docker images -q codi`
if [ "$Q" == "" ]; then
echo "Build codi image"
docker build -t ${DOCKERHUB_REPO}/codi -f Dockerfile.codi ..
fi
| true |
47f289b7d29f22554ee3174651f7408d103762a7 | Shell | mwhittaker/vms | /jekyll/install_jekyll.sh | UTF-8 | 339 | 2.6875 | 3 | [] | no_license | #! /bin/bash
# For information on how to use Jekyll with GitHub pages, refer to
# https://help.github.com/articles/setting-up-your-github-pages-site-locally-with-jekyll/
main() {
git clone git@github.com:mwhittaker/mwhittaker.github.io
cd mwhittaker.github.io
gem install jekyll bundler
bundle install
bundle update
}
main
| true |
5092b2e661cdfb2dc37f2680f88d796d1dd19081 | Shell | carinalyn123/tensornet | /configure.sh | UTF-8 | 1,708 | 4.03125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
readonly WORKSPACE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly THIS_FILE="${WORKSPACE_DIR}/$(basename "${BASH_SOURCE[0]}")"
pushd $WORKSPACE_DIR > /dev/null
readonly PYTHON_PATH="$(which python)"
function check_tf_version()
{
echo "checking tensorflow version installed..."
local tf_version=$(python -c "import tensorflow as tf; print(tf.version.VERSION)")
if [[ "x${tf_version}" != "x2.2.0" ]]; then
echo "tensorflow version is ${tf_version}, please use 2.2.0 instead"
exit 1
fi
echo "tensorflow version installed is ${tf_version}"
}
function link_mpi_thirdparty()
{
read -p "please give us your openmpi install path:" mpi_path
echo "using openmpi include path:$mpi_path/include"
echo "using openmpi lib path:$mpi_path/lib"
rm -rf thirdparty/openmpi/include
ln -s ${mpi_path}/include thirdparty/openmpi/
rm -rf thirdparty/openmpi/lib
ln -s ${mpi_path}/lib thirdparty/openmpi/
}
function link_tf_thirdparty()
{
local tf_include_path=$(python -c "import tensorflow as tf;print(tf.sysconfig.get_include())")
local tf_lib_path=$(python -c "import tensorflow as tf;print(tf.sysconfig.get_lib())")
echo "using tensorflow lib path:${tf_lib_path}"
rm thirdparty/tensorflow/lib/*
mkdir -p thirdparty/tensorflow/lib/
ln -s ${tf_lib_path}/lib* thirdparty/tensorflow/lib/
ln -sf ${tf_lib_path}/python/_pywrap_tensorflow_internal.so thirdparty/tensorflow/lib/lib_pywrap_tensorflow_internal.so
}
function main()
{
echo "using python:${PYTHON_PATH}"
check_tf_version
link_mpi_thirdparty
link_tf_thirdparty
echo "configure done"
}
main $@
popd > /dev/null
exit 0
| true |
e19b06a30833854493fcc740467c21bd56ab6cc4 | Shell | alvroox/ASIX-M06 | /P15/Proyecto01.sh | UTF-8 | 1,174 | 4.0625 | 4 | [] | no_license | #!/bin/bash
clear
echo "USER PROGRAM CREATOR"
echo "-------------------------"
# Display the UID
echo "Your UID is ${UID}"
# Display the username
USER_NAME=$(id -un)
echo "Your username is ${USER_NAME}"
echo "-------------------------"
# Display if the user is the root user or not.
if [[ "${UID}" -eq 0 ]]
then
echo -n "Enter LOGIN name [ENTER]: "
read loginName
echo -n "Enter REAL name [ENTER]: "
read realName
echo -n "Enter PASSWORD [ENTER]: "
read password
echo "-------------------------"
# If the USER is created, will do the check if its okay
if useradd -m -p $(openssl passwd -1 ${password}) -s /bin/bash ${loginName} 2> /dev/null; then
echo "User inserted succeeded!"
if passwd -e ${loginName} 1> /dev/null; then
echo "Password will change when login"
echo "-------------------------"
cat /etc/shadow | egrep $loginName
echo "User is in the folder"
machine=$(hostname)
echo "-------------------------"
# Show Machine Name Name Password
echo host: $machine user: $loginName pswd: $password
else
echo "Something went wrong"
fi
else
echo "User inserted failed"
fi
else
echo 'You are not root.'
fi
| true |
e692900e46a98b8b043acc74c7bdac6f67afa9dd | Shell | krishnakumarkp/cloudnative_go | /bootstrap.sh | UTF-8 | 359 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env bash
# create project folder
sudo mkdir "/files"
sudo apt-get update
sudo apt-get upgrade
sudo apt install -y apt-transport-https ca-certificates curl software-properties-common
sudo curl -fsSL https://get.docker.com -o get-docker.sh
while [ ! -f ./get-docker.sh ]; do sleep 1; done
sudo sh ./get-docker.sh
sudo usermod -aG docker vagrant | true |
3e17bfefc1992d5a9d67df218da5ea53734fac6a | Shell | loaonline/letsencryptrenewscript | /certificate-renew | UTF-8 | 1,077 | 3.828125 | 4 | [] | no_license | #!/bin/bash
#
# $Id$
#
# Renew Let's Encrypt SSL cert and assemble pem for pound.
#
DOMAINNAME="www.mikeroberts.ca"
NOTIFYEMAIL="postmaster"
LECONFIGDIR="/etc/letsencrypt"
LEINSTALLDIR="/opt/letsencrypt"
LELOG="/opt/letsencrypt"
LECOMMAND="$LEINSTALLDIR/letsencrypt-auto"
LEARGS="--config $LECONFIGDIR/cli.ini -d $DOMAINNAME certonly"
[ -d "$LEINSTALLDIR" ] || exit 1
[ -d "$LECONFIGDIR" ] || exit 1
[ -x "$LECOMMAND" ] || exit 1
COMMAND=`basename $LECOMMAND`
cd $LEINSTALLDIR
echo "executing letsencrypt: $LECOMMAND $LEARGS"
./$COMMAND $LEARGS
if [ $? -ne 0 ]; then
ERRORLOG=`tail $LELOG`
echo -e "failure renewing cert. \n \n" $ERRORLOG | mail -s "Lets Encrypt Cert Alert" $NOTIFYEMAIL
else
echo "success renewing cert."
# certs are ready so package and restart pound
cd $LECONFIGDIR/live/$DOMAINNAME
# package up PEM for webserver and restart
cat fullchain.pem privkey.pem > for_pound_ssl.pem
chmod 400 for_pound_ssl.pem
echo "for_pound_ssl.pem cert package created."
service pound restart
echo "pound restarted."
fi
exit 0
| true |
ad27093c2c64cc6b381068ee5a6ecc316c1da18c | Shell | abhineetraj/Bash-Scripts | /slack-notification.sh | UTF-8 | 1,419 | 3.375 | 3 | [] | no_license | #!/bin/bash
#Calling the alert channel
slack_notify()
{
url=https://hooks.slack.com/services/T012PD7UB/B0115126/123453232 (dummy)
curl -X POST -H 'Content-type: application/json' --data '{ "text": "'"$msg"'" }' $url
json=`cat /var/local/adm/payload` && curl -s -d "payload=$json" "$url"
}
for line in `cat /var/local/adm/queuename`
do
qname=$(echo "$line" | cut -d',' -f1);
threshold1=$(echo "$line" | cut -d',' -f2);
threshold2=$(/usr/sbin/rabbitmqctl list_queues | grep $qname |grep -v pid | awk '{print $2}');
if [ $threshold2 -gt $threshold1 ]; then
msg="Queue $qname has $threshold2 messages crossing the threshold of $threshold1"
slack_notify
fi
done
cat /var/local/adm/payload
cat: cat: No such file or directory
{
"attachments": [
{
"fallback": "RabbitMQ-Alert",
"callback_id": "wopr_game",
"color": "#3AA3E3",
"attachment_type": "default",
"actions": [
{
"name": "game",
"text": "RabbitMQ-Alert",
"style": "danger",
"type": "button",
"value": "war",
"confirm": {
"title": "Are you sure?",
"text": "Thanks for acknowledging"
}
}
]
}
]
}
| true |
c1f5cd107e37632751d9767f0286695e5d67f9d8 | Shell | bastou2316/cuda | /BUILDER_Symlink/public/symlink_data.sh | UTF-8 | 1,012 | 2.765625 | 3 | [] | no_license | #!/bin/bash
#
# version : 0.0.1
# author : cedric.bilat@he-arc.ch
# ---------------------------------------
# Init
#----------------------------------------
set -e
# ---------------------------------------
# Projets
#----------------------------------------
projet1=BilatTools_CPP
projet2=BilatTools_Cuda
projet3=BilatTools_Cuda_Image
projet4=BilatTools_Image
projet5=BilatTools_OMP
projet6=BilatTools_OpenCV
# ---------------------------------------
# List Projets (export)
#----------------------------------------
export listProjet=" $projet1 $projet4 $projet5 $projet6"
export listProjetCuda="$projet1 $projet2 $projet3 $projet5 $projet6"
export listProjetJNI=""
#todo pas traiter
export listProjetLinux=""
# listProjet : inc pour projet non cuda
# listProjetCuda : inc pour projet cuda
# Attention : exclusif! Raison pour laquelle un meme projet peut apparaitre dans les deux listes!
# ---------------------------------------
# end
#----------------------------------------
| true |
9926306bbf82a8a68912ae1b8a43830c2782c8a5 | Shell | brettpalmberg/pgadmin4-s3-sidecar | /watch | UTF-8 | 1,623 | 4.1875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
[[ "$TRACE" ]] && set -x
function usage {
cat <<-EOF
Usage: $PROGNAME [OPTIONS] <local-path> <remote-path>
Sync s3 directory locally and backup changed files on exit
--force-restore restore even if local directory is not empty
eg: $PROGNAME /data s3://bucket/dir
EOF
}
function error_exit {
echo "${1:-"Unknown Error"}" 1>&2
exit 1
}
PARSED_OPTIONS=$(getopt -n "$0" -o f --long "force-restore" -- "$@")
if [ $? -ne 0 ]; then
exit 1
fi
eval set -- "$PARSED_OPTIONS"
while true; do
case "$1" in
-f|--force-restore)
FORCE_RESTORE="true"
shift;;
--)
shift
break;;
esac
done
PROGNAME=$0
LOCAL=$1
REMOTE=$2
if [ "$ENDPOINT_URL" ]; then
AWS="aws --endpoint-url $ENDPOINT_URL"
else
AWS=aws
fi
function restore {
if [ "$(ls -A $LOCAL)" ]; then
if [[ ${FORCE_RESTORE:false} == 'true' ]]; then
error_exit "local directory is not empty"
fi
fi
echo "restoring $REMOTE => $LOCAL"
if ! $AWS s3 sync "$REMOTE" "$LOCAL"; then
error_exit "restore failed"
fi
}
function backup {
echo "backup $LOCAL => $REMOTE"
if ! $AWS s3 sync "$LOCAL" "$REMOTE" --delete; then
echo "backup failed" 1>&2
return 1
fi
}
function final_backup {
echo "backup $LOCAL => $REMOTE"
while ! $AWS s3 sync "$LOCAL" "$REMOTE" --delete; do
echo "backup failed, will retry" 1>&2
sleep 1
done
exit 0
}
function idle {
echo "ready"
while true; do
sleep ${BACKUP_INTERVAL:-42} &
wait $!
[ -n "$BACKUP_INTERVAL" ] && backup
done
}
restore
trap final_backup SIGHUP SIGINT SIGTERM
trap "backup; idle" USR1
idle
| true |
236efd79033afe62c4dd1ebcd1e995cc5cd3618e | Shell | kdave/xfstests | /tests/xfs/543 | UTF-8 | 2,000 | 3.265625 | 3 | [] | no_license | #! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2022 Oracle. All Rights Reserved.
#
# FS QA Test No. 543
#
# Regression test for xfsprogs commit:
#
# 99c78777 ("mkfs: prevent corruption of passed-in suboption string values")
#
. ./common/preamble
_begin_fstest auto quick mkfs
_cleanup()
{
rm -f $TEST_DIR/fubar.img
cd /
rm -r -f $tmp.*
}
# Import common functions.
# . ./common/filter
# real QA test starts here
# Modify as appropriate.
_supported_fs xfs
_require_test
_require_xfs_mkfs_cfgfile
# Set up a configuration file with an exact block size and log stripe unit
# so that mkfs won't complain about having to correct the log stripe unit
# size that is implied by the provided data device stripe unit.
cfgfile=$tmp.cfg
cat << EOF >> $tmp.cfg
[block]
size=4096
[data]
su=2097152
sw=1
EOF
# Some mkfs options store the user's value string for processing after certain
# geometry parameters (e.g. the fs block size) have been settled. This is how
# the su= option can accept arguments such as "8b" to mean eight filesystem
# blocks.
#
# Unfortunately, on Ubuntu 20.04, the libini parser uses an onstack char[]
# array to store value that it parse, and it passes the address of this array
# to the parse_cfgopt. The getstr function returns its argument, which is
# stored in the cli_params structure by the D_SU parsing code. By the time we
# get around to interpreting this string, of course, the stack array has long
# since lost scope and is now full of garbage. If we're lucky, the value will
# cause a number interpretation failure. If not, the fs is configured with
# garbage geometry.
#
# Either way, set up a config file to exploit this vulnerability so that we
# can prove that current mkfs works correctly.
$XFS_IO_PROG -f -c "truncate 1g" $TEST_DIR/fubar.img
options=(-c options=$cfgfile -l sunit=8 -f -N $TEST_DIR/fubar.img)
$MKFS_XFS_PROG "${options[@]}" >> $seqres.full ||
echo "mkfs failed"
# success, all done
echo Silence is golden
status=0
exit
| true |
71e8500e0fb10d04b4dd369612592650846a48b6 | Shell | deansx/coding-tests | /tools/gg | UTF-8 | 3,644 | 4.21875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# USAGE: gg [-c] [-nox] C++_src_files ...
#
# DESCRIPTION:
# Script to compile and link (or, optionally, just compile) a list of C++ source
# files. If the script successfully links the files, it attempts to execute the
# result.
#
# Files may be either fully specified "hi.cc" or "hi.cpp" or just the root name
# may be provided "hi" and the script will attempt to create the correct filename
#
# ARGUMENTS:
# [-c] - Tells the script not to bother with the link and execute steps. It just
# attempts to compile the list of files
# NOTE: Arguments are not order dependent. "-c" may be specified anywhere
# in the argument list
#
# [-nox] - Tells the script not to bother with the execute step. It just
# attempts to compile and link the list of files
# NOTE: Arguments are not order dependent. "-nox" may be specified anywhere
# in the argument list
#
# C++_file ... - a space separated list of C++ source files. The script accepts
# both ".cc" and ".cpp" extensions.
# NOTE: If no extension is specified, the script attempts to append the
# correct extension to the base filename
#
# RETURNS:
# 7 - represents failure of the script
#
#
# STATUS: Prototype
# VERSION: 1.00
# CODER: Dean Stevens
#
# LICENSE: The MIT License (MIT)
# See LICENSE.txt in the root (sim) directory of this project.
# Copyright (c) 2014 Spinnaker Advisory Group, Inc.
#
#IDIRS="-I../desim/ -I ../../desim/ -I../util/ -I../../util/"
IDIRS=""
#DEFS="-DTEST_HARNESS"
DEFS=""
# Clean up the old a.exe
if [ -f "a.exe" ]
then
rm -fv a.exe
fi
# Will hold the list of files to compile/link
FLST=""
# The default case is to compile and link. Initialize accordingly
CMPLNK="-o a.exe"
# Flag determining whether to execute the result, or not
NOX=0
function addf {
# DESCRIPTION:
# Adds a filename to the list of files to be compiled / linked. The list is
# separated by spaces
#
# ARGUMENTS:
# $1 - Full leaf name of the file to add to the list
#
# RETURNS:
# N/A
#
FLST+=$1
FLST+=" "
}
for FIL in "$@"
do
# Test for the compile only flag.
if [[ "$FIL" == "-c" ]]
then
CMPLNK="-c"
# Test for the no_execute flag.
elif [[ "$FIL" == "-nox" ]]
then
NOX=1
# Process the file arguments one at a time
elif [ -f "$FIL" ]
then
# See if the caller used a recognized extension. If so, add it to the list
if [[ "$FIL" == *.cc || "$FIL" == *.cpp ]]
then
addf "$FIL"
else
echo "'$FIL' NOT RECOGNIZED AS A C++ FILE"
exit 7
fi
# See if basename + a recognized extension exists as a file. If it does add it to
# the list
elif [ -f "$FIL.cc" ]
then
addf "$FIL.cc"
elif [ -f "$FIL.cpp" ]
then
addf "$FIL.cpp"
else
echo "ERROR: No C++ Source file found for: \"$FIL\" - Exiting..."
exit 7
fi
done
# remove trailing whitespace characters from the list
FLST="${FLST%"${FLST##*[![:space:]]}"}"
if [[ $CMPLNK == "-c" ]]
then
echo -e "\nAttempt to compile: $FLST ...\n"
else
echo -e "\nAttempt to compile and link: $FLST ...\n"
fi
echo ""
g++ -g -std=c++11 $IDIRS $DEFS $CMPLNK $FLST
if [ $? -eq 0 ]
then
if [[ $CMPLNK == "-c" ]]
then
echo "NOTE: \"$FLST\" Compiled!"
else
echo "NOTE: \"$FLST\" Compiled & Linked!"
if [ $NOX -eq 0 ]
then
echo -e "\nExecuting...\n"
a.exe
fi
fi
else
echo "ERROR: >=One of \"$FLST\" FAILED to Compile / Link."
fi
| true |
71371a1805a9321e51ce2a374e68132082377c0d | Shell | chamilaadhi/consolidated-wum-create-script | /generate.sh | UTF-8 | 539 | 3.46875 | 3 | [] | no_license | #!/bin/bash
allUpdatePath=/Users/chamila/updates/wilkinson/4.6.0
rm -rf updates
mkdir updates
rm -rf collection
mkdir collection
rm -rf unzip
mkdir unzip
fileItemString=$(cat list.txt |tr "\n" " ")
fileItemArray=($fileItemString)
#echo ${fileItemArray[*]}
for i in "${fileItemArray[@]}"
do
:
cp $allUpdatePath/$i updates/
done
unzip updates/\*.zip -d unzip/
echo "\nCopy extracted content to aggregated location...\n"
for file in */*/ ;
do
echo $file;
cp -rf $file/carbon.home/ collection/
done
sh remove-content.sh | true |
9ad36e7a5816a063440c66d4329a7b82df8ae1d2 | Shell | The3File/dotfiles | /.Scripts/mdtopdf | UTF-8 | 915 | 3.703125 | 4 | [] | no_license | #!/usr/bin/env bash
tomd(){
case ${1##*.} in
doc|pdf)
lowriter --convert-to docx "$1"
pandoc -t markdown "${1%.*}.docx" -o "${1%.*}.md"
rm "${1}x"
;;
docx) pandoc -t markdown "$1" -o "${1%.*}.md" ;;
*) err="${1##*.}"; return 1
esac
}
tex(){
if [[ $(sed 1q $1) = "%math" ]]; then
pdflatex -shell-escape "$1" 1>/dev/null
else
pdflatex -shell-escape "$1" 1>/dev/null
fi
clean=( gnuploterrors log aux )
for ext in "${clean[@]}"; do
[[ -f "${1%.*}.$ext" ]] && rm "${1%.*}.$ext"
done
}
main(){
case $1 in
-d) pandoc -t docx "$2" -o "${2%.*}.docx" ;;
-o) [[ -r "${2%.*}.pdf" ]] && xdg-open "${2%.*}.pdf" & disown ;;
-t) tex "$2" ;;
--to-md) tomd "$2" || echo "failed (can't convert $err to md)" ;;
*) [[ -e $1 ]] && pandoc -V geometry:margin=1in -t latex "$1" -o "${1%.*}.pdf" || echo failed ;;
esac
}
main "$@"
| true |
6944e5da980136f83b8078af43d7f089068176c5 | Shell | StoryMonster/git_helper | /exec/githelper | UTF-8 | 229 | 3.1875 | 3 | [] | no_license | #!/bin/bash
script_location=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )
if [ $1 == "status" ]; then
python3 $script_location/../src/gitHelper.py $@
else
python3 $script_location/../src/gitHelper.py $@ | less -r
fi
| true |
47fea678c1b81f5310ad9d8e00e6057a855c7f09 | Shell | supunkamburugamuve/twister2 | /util/mvn/execute-deploy.sh | UTF-8 | 3,978 | 3.28125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -eu
readonly MVN_GOAL="$1"
readonly VERSION_NAME="$2"
shift 2
readonly EXTRA_MAVEN_ARGS=("$@")
bazel_output_file() {
local library=$1
local output_file=bazel-bin/$library
if [[ ! -e $output_file ]]; then
output_file=bazel-genfiles/$library
fi
if [[ ! -e $output_file ]]; then
echo "Could not find bazel output file for $library"
exit 1
fi
echo -n $output_file
}
deploy_library() {
local library="$1/lib$2.jar"
local pomfile="$1/pom.xml"
local source="$1/lib$2-src.jar"
local javadoc="$1/$2-javadoc.jar"
bazel build --define=pom_version="$VERSION_NAME" \
$library $pomfile $source $javadoc
printf "\nGenerating %s %s\n\n" "$1" "$MVN_GOAL"
mvn $MVN_GOAL \
-Dfile=$(bazel_output_file $library) \
-DpomFile=$(bazel_output_file $pomfile) \
-Dsources=$(bazel_output_file $source) \
-Djavadoc=$(bazel_output_file $javadoc) \
"${EXTRA_MAVEN_ARGS[@]:+${EXTRA_MAVEN_ARGS[@]}}"
}
#todo due to an unknown reason proto libraries generated by native rules has a suffix speed. Hence taking two args to address this issue temporary
deploy_proto_library() {
local library=$1
local pomfile=$2
local buildfile=$3
bazel build --define=pom_version="$VERSION_NAME" \
$library $pomfile --action_env=JAVA_HOME
mvn $MVN_GOAL \
-Dfile=$(bazel_output_file $buildfile) \
-DpomFile=$(bazel_output_file $pomfile) \
"${EXTRA_MAVEN_ARGS[@]:+${EXTRA_MAVEN_ARGS[@]}}"
}
# APIs
deploy_library \
twister2/api/src/java \
api-java
deploy_library \
twister2/api/src/java/edu/iu/dsc/tws/api/checkpointing \
checkpointing-api-java
deploy_library \
twister2/api/src/java/edu/iu/dsc/tws/api/comms \
comms-api-java
deploy_library \
twister2/api/src/java/edu/iu/dsc/tws/api/config \
config-api-java
deploy_library \
twister2/api/src/java/edu/iu/dsc/tws/api/data \
data-api-java
deploy_library \
twister2/api/src/java/edu/iu/dsc/tws/api/dataset \
dataset-api-java
deploy_library \
twister2/api/src/java/edu/iu/dsc/tws/api/exceptions \
exceptions-java
deploy_library \
twister2/api/src/java/edu/iu/dsc/tws/api/net \
network-api-java
deploy_library \
twister2/api/src/java/edu/iu/dsc/tws/api/resource \
resource-api-java
deploy_library \
twister2/api/src/java/edu/iu/dsc/tws/api/scheduler \
scheduler-api-java
deploy_library \
twister2/api/src/java/edu/iu/dsc/tws/api/compute \
task-api-java
deploy_library \
twister2/api/src/java/edu/iu/dsc/tws/api/tset \
tset-api-java
deploy_library \
twister2/api/src/java/edu/iu/dsc/tws/api/util \
api-utils-java
deploy_library \
twister2/api/src/java/edu/iu/dsc/tws/api/faulttolerance \
fault-tolerance-api-java
deploy_library \
twister2/api/src/java/edu/iu/dsc/tws/api/driver \
driver-api-java
# End of APIs
deploy_library \
twister2/common/src/java \
common-java
deploy_library \
twister2/comms/src/java \
comms-java
deploy_library \
twister2/connectors/src/java \
connector-java
deploy_library \
twister2/data/src/main/java \
data-java
deploy_library \
twister2/examples/src/java \
examples-java
deploy_library \
twister2/executor/src/java \
executor-java
deploy_library \
twister2/master/src/java \
master-java
deploy_library \
twister2/resource-scheduler/src/java \
resource-scheduler-java
deploy_library \
twister2/task/src/main/java \
task-java
deploy_library \
twister2/taskscheduler/src/java \
taskscheduler-java
deploy_library \
twister2/compatibility/storm \
twister2-storm
deploy_library \
twister2/compatibility/beam \
twister2-beam
deploy_library \
twister2/checkpointing/src/java \
checkpointing-java
deploy_library \
twister2/proto \
jproto-java
deploy_library \
twister2/proto/utils \
proto-utils-java
deploy_library \
twister2/compatibility/harp \
twister2-harp
deploy_library \
twister2/tools/local-runner/src/java \
local-runner-java
deploy_library \
twister2/tset/src/java \
tset-java
| true |
ac12e0c430d4a956603f13b49ac4ea1046f5ebc8 | Shell | harshavardhana/vagrant-minio-cluster | /files/keys/install-sshkey.sh | UTF-8 | 2,065 | 3.375 | 3 | [] | no_license | #!/bin/bash
# create or install key for gitlab access
SSHPRIVKEYFILENAME=id_rsa
SSHPUBKEYFILENAME=id_rsa.pub
SSHKEYPATH=/vagrant/files/keys/conextrade-dev
LOCALSSHPATH=/home/vagrant/.ssh
ROOTSSHPATH=/root/.ssh
# delete keys if exists
if [ -f "/home/vagrant/.ssh/id_rsa" ]
then
sudo rm $LOCALSSHPATH/id_rsa
fi
# delete keys if exists
if [ -f "/home/vagrant/.ssh/id_rsa.pub" ]
then
sudo rm $LOCALSSHPATH/id_rsa.pub
fi
if [ -f "$SSHKEYPATH/$SSHPRIVKEYFILENAME" ] && [ -f "$SSHKEYPATH/$SSHPUBKEYFILENAME" ]
then
echo "ssh key $SSHKEYPATH/$SSHPRIVKEYFILENAME found"
sudo cp $SSHKEYPATH/$SSHPRIVKEYFILENAME $LOCALSSHPATH/$SSHPRIVKEYFILENAME
sudo cp $SSHKEYPATH/$SSHPUBKEYFILENAME $LOCALSSHPATH/$SSHPUBKEYFILENAME
else
echo "ssh key $LOCALSSHPATH/$SSHPRIVKEYFILENAME created"
sudo ssh-keygen -t rsa -C "developer@conextrade.com" -b 4096 -N "" -f $LOCALSSHPATH/$SSHPRIVKEYFILENAME
if [ ! -d "$SSHKEYPATH" ]
then
mkdir -p $SSHKEYPATH
fi
sudo cp $LOCALSSHPATH/$SSHPRIVKEYFILENAME $SSHKEYPATH/$SSHPRIVKEYFILENAME
sudo cp $LOCALSSHPATH/$SSHPUBKEYFILENAME $SSHKEYPATH/$SSHPUBKEYFILENAME
echo "ssh key $SSHKEYPATH/$SSHPRIVKEYFILENAME saved"
fi
sudo chown vagrant $LOCALSSHPATH/$SSHPRIVKEYFILENAME
sudo chgrp vagrant $LOCALSSHPATH/$SSHPRIVKEYFILENAME
sudo chmod 600 $LOCALSSHPATH/$SSHPRIVKEYFILENAME
sudo chown vagrant $LOCALSSHPATH/$SSHPUBKEYFILENAME
sudo chgrp vagrant $LOCALSSHPATH/$SSHPUBKEYFILENAME
sudo chmod 644 $LOCALSSHPATH/$SSHPUBKEYFILENAME
# add known host
/vagrant/files/bashshell/add-known-host.sh vcs.corp.conextrade.com
# install same keys for root
if [ ! -d "$ROOTSSHPATH" ]
then
sudo mkdir $ROOTSSHPATH
fi
sudo cp /home/vagrant/.ssh/* $ROOTSSHPATH
sudo chown root $ROOTSSHPATH/$SSHPRIVKEYFILENAME
sudo chgrp root $ROOTSSHPATH/$SSHPRIVKEYFILENAME
sudo chmod 600 $ROOTSSHPATH/$SSHPRIVKEYFILENAME
sudo chown root $ROOTSSHPATH/$SSHPUBKEYFILENAME
sudo chgrp root $ROOTSSHPATH/$SSHPUBKEYFILENAME
sudo chmod 644 $ROOTSSHPATH/$SSHPUBKEYFILENAME
# add key to authorized_keys
cat $LOCALSSHPATH/$SSHPUBKEYFILENAME >> $LOCALSSHPATH/authorized_keys
| true |
5769dca1af93dc9843b411fc4c3d369f275127d0 | Shell | truelsy/acro | /apm | UTF-8 | 396 | 2.8125 | 3 | [] | no_license | #!/bin/bash
#APPHOME=$HOME/Documents/workspace_nodejs/acro
case "$1" in
start)
forever start -l $PWD/logs/forever.log -a -w ./bin/www
;;
stop)
forever stop ./bin/www
;;
restart)
forever restart ./bin/www
;;
list)
forever list
;;
*)
echo ###partial###1DA3B5BA-57BF-4A8D-A817-5300E487ADB6quot;Usage: $0 {start|stop|restart|list}"
exit 1
esac
exit 0
| true |
7d06767c7be554daebce4fe2469599dbd47aa6ff | Shell | dpb587/datapact | /test/out | UTF-8 | 795 | 2.828125 | 3 | [] | no_license | #!/bin/bash
set -eu
# args: branch result-dir pipeline repository-dir
! [ -e $HOME/tmp/datapact-out ] || rm -fr $HOME/tmp/datapact-out
#git clone -b "$1" --single-branch "file://$4" $HOME/tmp/datapact-out/repository/.datapact/git
mkdir -p $HOME/tmp/datapact-out/result
cp -r "$2"/* $HOME/tmp/datapact-out/result/
cd $HOME/tmp/datapact-out
jq \
--arg branch "$1" \
'.resources | map(select(.name == $branch))[0] * {"params":{"repository":"repository"}}' \
< "$3" \
| docker run -i -a stdin \
-e ATC_EXTERNAL_URL \
-e BUILD_ID=${BUILD_ID:-one-off} \
-v $HOME/tmp/datapact-out/result:/tmp/build/put/result \
-v $4:/tmp/build/put/repository/.datapact/git \
--workdir /tmp/build/put \
dpb587/datapact-resource \
/opt/resource/out \
| xargs docker logs -f
| true |
cea48312958d3640bc07b9ff08c7ddf5eae3a445 | Shell | h3nnn4n/dots | /.config/zshrc.d/30-git-utils.sh | UTF-8 | 940 | 3.234375 | 3 | [] | no_license | unalias gb
function is_in_git_repo() {
git rev-parse HEAD > /dev/null 2>&1
}
function gf() {
is_in_git_repo &&
git -c color.status=always status --short |
fzf --height 40% -m --ansi --nth 2..,.. | awk '{print $2}'
}
function gbb() {
is_in_git_repo &&
git branch -a -vv --color=always | grep -v '/HEAD\s' |
fzf --height 40% --ansi --multi --tac | sed 's/^..//' | awk '{print $1}' |
sed 's#^remotes/[^/]*/##'
}
function gb() {
gbb | xargs git checkout
}
function gt() {
is_in_git_repo &&
git tag --sort -version:refname |
fzf --height 40% --multi
}
function gl() {
is_in_git_repo &&
git log --date=short --format="%C(green)%C(bold)%cd %C(auto)%h%d %s (%an)" --graph |
fzf --height 40% --ansi --no-sort --reverse --multi | grep -o '[a-f0-9]\{7,\}'
}
function gr() {
is_in_git_repo &&
git remote -v | awk '{print $1 " " $2}' | uniq |
fzf --height 40% --tac | awk '{print $1}'
}
| true |
03529f7c9ef386bbe98243aa201c6fbb4749777c | Shell | Sebas-h/dotfiles | /sync.sh | UTF-8 | 1,000 | 3.34375 | 3 | [] | no_license | #!/bin/bash
# define dotfiles paths
nvim="$HOME/.config/nvim/init.vim"
zsh="$HOME/.zshrc"
tmux="$HOME/.tmux.conf"
alacritty="$HOME/.config/alacritty/alacritty.yml"
karabiner="$HOME/.config/karabiner/karabiner.json"
hammerspoon_init="$HOME/.hammerspoon/init.lua"
hammerspoon_hyper="$HOME/.hammerspoon/hyper_plus.lua"
starship="$HOME/.config/starship.toml"
firefox="$HOME/Library/Application\ Support/Firefox/Profiles/horx90mb.default-1555412934393/chrome/userChrome.css"
# current dotfiles dir
dotfilesdir="$HOME/projects/dotfiles"
# Declare an array of string with type
declare -a StringArray=("$nvim" "$zsh" "$tmux" "$alacritty" "$karabiner" "$hammerspoon_init" "$hammerspoon_hyper" "$starship" "$firefox")
# Iterate the string array using for loop
for val in "${StringArray[@]}"; do
test -f $val && echo "OK -> $val"
# remove old dotfile if exists
[ -f $val ] && rm "$dotfilesdir/${val##*/}"
# copy over new dotfile
[ -f $val ] && cp "$val" "$dotfilesdir/${val##*/}"
done
| true |
586fd7c1ab689ada60ac7b4b6868575a4b3cd31c | Shell | Doccos/BackupScript | /scripts/backupFrom.sh | UTF-8 | 4,046 | 3.5 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
MAXRSYNC=10
#####START
TIME=$(date +%s)
#echo $TIME
if [ -n "$1" ] ; then
SERVER="$1"
else
echo "Error: Usage $0 <fqdn-hostname>"
exit
fi
#IST PORT GESETZT
if ! [ -n "$2" ]; then echo "Kein Port gesetzt"; exit; fi
#IST PORT EINE NUMMER
if [ $2 -gt 0 ]; then PORT=$2 ; else echo "Port ist keine Nummer";exit; fi
#IST 4. Parameter gesetzt (NAME)
if [ -n "$3" ]; then NAME=$3 ; else echo "Kein Name gesetzt"; exit; fi
if [ -n "$4" ]; then SYNCDAY=$4 ; else SYNCDAY=30; fi
if [ -n "$5" ]; then SYNCMONTH=$5 ; else SYNCMONTH=0; fi
if [ -n "$6" ]; then SYNCYEAR=$6 ; else SYNCYEAR=0; fi
#echo $4
###BEVOR BACKUP REMOTE SCRIPT AUSFUEHREN
#ssh -t -i /root/.ssh/backup.identity -p $PORT $SERVER "sh -c '( ( /opt/backup/beforebackup.sh asdasdaddddd ) )'"
DATAPATH=$8
SC=10
while [ $SC -gt 0 ]
do
ssh -x -p $PORT $SERVER " /opt/backupScript/scripts/beforeBackupFrom.sh $NAME.$TIME </dev/null >/dev/null 2>&1 & "
echo "---------------------------"
echo "Backup: $NAME"
echo "---------------------------"
echo "Starte BeforBackup Script"
SC=$?
sleep 1
done
SCC=1
while [ $SCC -gt 0 ]
do
sleep 2
STATUS=$(ssh -p $PORT $SERVER cat /opt/backupScript/tmp/$NAME.$TIME 2>&1)
echo "Warte bis BeforeBackup Script fertig ist Status: $STATUS Counter: $SCC"
SCC=$[$SCC+1];
if [ $STATUS -eq 1 ]; then
SCC=0;
fi;
if [ $SCC -gt 400 ]; then
echo "BEFOR BACKUP ERROR KILL MYSELF NOW $SCC"
SCC=0;
exit;
fi;
done
for ZEILE in $7
do
# echo $ZEILE
BACKUPDIR=$DATAPATH/$NAME'/akt'$ZEILE
# echo $BACKUPDIR
if ! [ -d $BACKUPDIR ] ; then
mkdir -p $BACKUPDIR
fi
COUNTER=$MAXRSYNC
while [ $COUNTER -gt 0 ]
do
rsync -e "ssh -p $PORT" -avz --numeric-ids --delete --delete-excluded --ignore-errors $SERVER:$ZEILE/ $BACKUPDIR/
if [ $? = 24 -o $? = 0 ] ; then
echo "Rsync Erfolgreich!"
COUNTER=0
fi
COUNTER=$[$COUNTER-1]
done
done
if ! [ -d $DATAPATH/$NAME'/daily' ] ; then
mkdir -p $DATAPATH/$NAME'/daily'
fi
#####Ueberpruefe ob tmp ordner exisitiert
if ! [ -d /opt/backupScript/tmp ] ; then
mkdir -p /opt/backupScript/tmp
fi
########################################
MONAT=$(date +%-m)
JAHR=$(date +%Y)
MONAT=$(( ($MONAT * ($JAHR * 12)) ))
ORDNERBEZ=$(date +"%Y.%m.%d-%H.%M")
cp -al $DATAPATH/$NAME'/akt' $DATAPATH/$NAME'/daily/'$ORDNERBEZ
touch $DATAPATH/$NAME'/daily/'$ORDNERBEZ
#####Check Letzter Monat
if [ -f "/opt/backup/tmp/lastmon" ]; then LASTM=$(tail /opt/backupScript/tmp/lastmon) ; else LASTM=0 ; fi
if [ -f "/opt/backup/tmp/lastyear" ]; then LASTY=$(tail /opt/backupScript/tmp/lastyear) ; else LASTY=0 ; fi
if [ $SYNCMONTH -gt 0 ] && ! [ $LASTM -eq $MONAT ]
then
if ! [ -d $DATAPATH/$NAME'/month' ] ; then
mkdir -p $DATAPATH/$NAME'/month'
fi
cp -al $DATAPATH/$NAME'/akt' $DATAPATH/$NAME'/month/'$ORDNERBEZ
touch $DATAPATH/$NAME'/month/'$ORDNERBEZ
echo $MONAT > /opt/backup/tmp/lastmon.$NAME
fi
if [ $SYNCYEAR -gt 0 ] && ! [ $LASTY -eq $JAHR ]
then
if ! [ -d $DATAPATH/$NAME'/year' ] ; then
mkdir -p $DATAPATH/$NAME'/year'
fi
cp -al $DATAPATH/$NAME'/akt' $DATAPATH/$NAME'/year/'$ORDNERBEZ
touch $DATAPATH/$NAME'/year/'$ORDNERBEZ
echo $JAHR > /opt/backup/tmp/lastyear.$NAME
fi
#echo $LASTM
#echo $MONAT
#rsync -e "ssh -i /root/.ssh/backup.identity -p $PORT" -avz $SERVER:/var/test $SERVER:/var/tmp $DATAPATH
####AFTER BACKUP
SC=10
while [ $SC -gt 0 ]
do
ssh -x -p $PORT $SERVER " /opt/backupScript/scripts/afterBackupFrom.sh $NAME.$TIME </dev/null >/dev/null 2>&1 & "
echo "Starte AfterBackup Script"
#echo $?
SC=$?
sleep 1
done
find "$DATAPATH/$NAME/daily/" -mindepth 1 -maxdepth 1 -type d -mtime +$SYNCDAY -exec rm -R {} \;
if [ $SYNCMONTH -gt 0 ]
then
find "$DATAPATH/$NAME/month/" -mindepth 1 -maxdepth 1 -type d -mtime +$[$SYNCMONTH * 31] -exec rm -R {} \;
fi
if [ $SYNCYEAR -gt 0 ]
then
find "$DATAPATH/$NAME/year/" -mindepth 1 -maxdepth 1 -type d -mtime +$[$SYNCYEAR * 366] -exec rm -R {} \;
fi
| true |
0aa972ce8c9133a364626a47d0840a43d8aba472 | Shell | PrimeRevenue/derailed | /.docker-dev/entrypoint.sh | UTF-8 | 229 | 2.640625 | 3 | [] | no_license | #!/bin/sh
set -e
echo "Installing gems if needed"
bundle check || bundle install
echo "Creating / Migrating database if needed"
bundle exec rake db:migrate 2>/dev/null || bundle exec rake db:setup
echo "Running: $@"
exec "$@"
| true |
9f860b0fe9d553206e6751306bcd36101fc1982f | Shell | ssh24/jumbo-benchmark | /script.sh | UTF-8 | 537 | 3.4375 | 3 | [] | no_license | #!/bin/bash
USER=$1
PASSWORD=$2
SERVER=$3
APP=$4
COPIES=$5
COUNT=1
# login
printf "\n>> Logging on to $SERVER as $USER ...\n"
apic login -u $USER -p $PASSWORD -s $SERVER
# get orgs
ORG=$(apic orgs -s $SERVER)
# create copies of lb app
while [ "$COPIES" -gt 0 ]
do
printf "\n>> Publish # $COUNT\n"
# publish an app
apic apps:publish -a $APP -o $ORG -s $SERVER
COUNT=$((COUNT + 1))
COPIES=$((COPIES - 1))
done
# logout
printf "\n>> Logging out of $SERVER ...\n"
apic logout -s $SERVER
| true |
a1213b3f58984dd7b3c5cdb3d49106b0ddd4405b | Shell | lucaswannen/source_code_classification_with_CNN | /dataset_v2/bash/9709271.txt | UTF-8 | 430 | 2.890625 | 3 | [] | no_license | #!/bin/bash
filename="tmp_list_filenames"
line_index=0
cat $filename | while read processed_data_filename; do
line_index=$(($line_index + 1))
var="haha$line_index"
done
echo "thevar:$var"
thevar:
$var
while [ $line_index -lt 3 ]; do
#!/bin/bash
filename="tmp_list_filenames"
line_index=0
while [ $line_index -lt 3 ]; do
line_index=$(($line_index + 1))
var="haha$line_index"
done
echo "thevar:$var"
thevar:haha4
| true |
1a21b45a961a0280d5de41f5b41156550d4eb1e3 | Shell | truenas/ports | /security/arti/files/arti.in | UTF-8 | 801 | 3.421875 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
# $FreeBSD$
#
# PROVIDE: arti
# REQUIRE: LOGIN
# KEYWORD: shutdown
#
# Add these lines to /etc/rc.conf.local or /etc/rc.conf
# to enable this service:
#
# arti_enable (bool): Set to NO by default.
# Set it to YES to enable arti.
# arti_config (path): Set to %%PREFIX%%/etc/arti/arti.cf
# by default.
# arti_user (str): Arti daemon user. Default: %%USER%%
# arti_pidfile (str): Arti pid file. Default: /var/run/arti/arti.pid
. /etc/rc.subr
name=arti
rcvar=arti_enable
load_rc_config $name
: ${arti_enable:="NO"}
: ${arti_config="%%PREFIX%%/etc/arti/arti.toml"}
: ${arti_pidfile="/var/run/arti/arti.pid"}
command=/usr/sbin/daemon
arti=%%PREFIX%%/bin/${name}
command_args="-c -f -o /var/log/arti/arti.log -P $arti_pidfile $arti proxy -c $arti_config"
run_rc_command "$1"
| true |
161df2e3854b5c5ec2f587552637bbb9b97c80c0 | Shell | savnik/bo-wifi-speaker | /install-1.sh | UTF-8 | 521 | 2.96875 | 3 | [] | no_license | #!/bin/bash
echo "INSTALL Script 1"
sudo apt-get update -y
sudo apt-get upgrade -y
sudo apt-get install build-essential git autoconf automake libtool libdaemon-dev libasound2-dev libpopt-dev libconfig-dev avahi-daemon libavahi-client-dev libssl-dev -y
# Uncomment line in file
$infile = "/boot/config.txt"
$patern = "dtparam=audio=on"
sudo sed "s/$pattern/#$pattern/" < $infile
# Add line to end of file
sudo echo "dtoverlay=iqaudio-dacplus" >> /boot/config.txt
echo "END OF INSTALL SCRIPT 1... REBOOT"
sudo reboot
| true |
1aa4e8cc937fa76bbc4b7541a8820a96105c4a09 | Shell | EarlyMssElectronicLibrary/spindle | /bin/deliver | UTF-8 | 8,646 | 3.328125 | 3 | [] | no_license | #!/bin/sh
read -r -d '' HELP <<-'EOF'
For a PACKAGE_DIR of the correct structure, validate the data and prepare the
delivery package. The INPUT dir must contain at its root a 'data' directoy.
All image files must be within the data diretory. No requirements are made as
to the directory structure in which the files are stored.
This command will run:
* `verify_all_filenames`
* `verify_all_metadata`
* `create_manifest`
* `verify_package`
Execution ceases when the first error is encountered.
The following shows a valid PACKAGE_DIR structure. The PACKAGE_DIR may not contain
any log files or a manifest.
.
└── data
├── Processed_Images
│ ├── 0015_000001_KTK_pseudo_MB365UV-MB625Rd.tif
│ ├── 0015_000001_KTK_pseudo_MB365UV-VIS.tif
│ ├── ...
│ ├── 0020_000018_KTK_txpseudo_WBRBB47-MB625Rd.tif
│ ├── 0020_000018_KTK_txratio_TX940IR-MB940IR.tif
│ └── 0020_000018_KTK_txsharpie_WBRBB47-MB625Rd.tif
└── Processed_Images_JPEG
├── 0015_000001_KTK_pseudo_MB365UV-MB625Rd.jpg
├── 0015_000001_KTK_pseudo_MB365UV-VIS.jpg
├── ...
├── 0020_000018_KTK_txpseudo_WBRBB47-MB625Rd.jpg
├── 0020_000018_KTK_txratio_TX940IR-MB940IR.jpg
└── 0020_000018_KTK_txsharpie_WBRBB47-MB625Rd.jpg
This script wll generate all delivery logs and, if any errors are found, the
`ERRORS_FOUND.txt` file. When this script is complete, a valid delivery
package will have the following structure.
.
├── DLVRY_filenames.log
├── DLVRY_metadata.log
├── DLVRY_package.log
├── data
│ ├── Processed_Images
│ │ ├── 0015_000001_KTK_pseudo_MB365UV-MB625Rd.tif
│ │ ├── 0015_000001_KTK_pseudo_MB365UV-VIS.tif
│ │ ├── ...
│ │ ├── 0020_000018_KTK_txpseudo_WBRBB47-MB625Rd.tif
│ │ ├── 0020_000018_KTK_txratio_TX940IR-MB940IR.tif
│ │ └── 0020_000018_KTK_txsharpie_WBRBB47-MB625Rd.tif
│ └── Processed_Images_JPEG
│ ├── 0015_000001_KTK_pseudo_MB365UV-MB625Rd.jpg
│ ├── 0015_000001_KTK_pseudo_MB365UV-VIS.jpg
│ ├── ...
│ ├── 0020_000018_KTK_txpseudo_WBRBB47-MB625Rd.jpg
│ ├── 0020_000018_KTK_txratio_TX940IR-MB940IR.jpg
│ └── 0020_000018_KTK_txsharpie_WBRBB47-MB625Rd.jpg
└── manifest-md5s.txt
A non-valid package will contain the file `ERRORS_FOUND.txt`:
.
├── DLVRY_filenames.log
├── DLVRY_metadata.log
├── DLVRY_package.log
├── ERRORS_FOUND.txt
├── data
│ ├── Processed_Images
│ │ ├── 0015_000001_KTK_pseudo_MB365UV-MB625Rd.tif
│ │ ├── 0015_000001_KTK_pseudo_MB365UV-VIS.tif
│ │ ├── ...
│ │ ├── 0020_000018_KTK_txpseudo_WBRBB47-MB625Rd.tif
│ │ ├── 0020_000018_KTK_txratio_TX940IR-MB940IR.tif
│ │ └── 0020_000018_KTK_txsharpie_WBRBB47-MB625Rd.tif
│ └── Processed_Images_JPEG
│ ├── 0015_000001_KTK_pseudo_MB365UV-MB625Rd.jpg
│ ├── 0015_000001_KTK_pseudo_MB365UV-VIS.jpg
│ ├── ...
│ ├── 0020_000018_KTK_txpseudo_WBRBB47-MB625Rd.jpg
│ ├── 0020_000018_KTK_txratio_TX940IR-MB940IR.jpg
│ └── 0020_000018_KTK_txsharpie_WBRBB47-MB625Rd.jpg
└── manifest-md5s.txt
EOF
### TEMPFILES
# From:
# http://stackoverflow.com/questions/430078/shell-script-templates
# create a default tmp file name
tmp=${TMPDIR:-/tmp}/prog.$$
# delete any existing temp files
trap "rm -f $tmp.?; exit 1" 1 2 3 13 15
# then do
# ...real work that creates temp files $tmp.1, $tmp.2, ...
# FUNCTIONS
# Function to clean and mark this package with errors
#
# Usage: delivery_failure VALIDATING_FLAG ERRORS_FLAG MESSAGE
#
# Deletes VALIDATING_FLAG; and write MESSAGE to ERRORS_FLAG
delivery_failure() {
df_validating_flag=$1
df_errors_flag=$2
msg="$3"
rm -f $df_validating_flag
echo "$msg" > $df_errors_flag
fail "$msg"
}
# Function to clean up and print a success message
delivery_success() {
ds_validating_flag=$1
msg="$2"
rm -f $ds_validating_flag
success "$msg"
}
#### USAGE AND ERRORS
cmd=`basename $0 .sh`
export SPINDLE_COMMAND=$cmd
source `dirname $0`/spindle_functions
usage() {
echo "Usage: $cmd [options] PACKAGE_DIR"
echo ""
echo "OPTIONS"
echo ""
echo " -C Clean old logs"
echo " -T Tar the resulting archive"
echo " -h Display help message"
echo " -v Display Spindle version"
echo ""
echo "WARNING: Tar option '-T' will generate an archive the same size"
echo " as the package directory. Make sure you have adequate"
echo " free disk space."
echo ""
}
this_dir=`dirname $0`
this_dir=`(cd $this_dir; pwd)`
# the scripts we'll run
VERIFY_ALL_FILENAMES=$this_dir/verify_all_filenames
VERIFY_ALL_METADATA=$this_dir/verify_all_metadata
CREATE_MANIFEST=$this_dir/create_manifest
VERIFY_PACKAGE=$this_dir/verify_package
### CONSTANTS
### VARIABLES
# the package dir
PACKAGE_DIR=
# the data directory
DATA_DIR=
### OPTIONS
while getopts ":hvCT" opt; do
case $opt in
h)
usage
version
help
exit 0
;;
v)
version
exit 0
;;
C)
CLOBBER_LOGS=true
;;
T)
TAR=true
;;
\?)
echo "ERROR Invalid option: -$OPTARG" >&2
echo ""
usage
exit 1
;;
esac
done
shift $((OPTIND-1))
### THESCRIPT
# grab package directoy and confirm it exists
PACKAGE_DIR=`package_dir $1`
if [ $? -ne 0 ]; then
error "Error finding package directory"
fi
message "PACKAGE_DIR is $PACKAGE_DIR"
# make sure there's a data directory in PACKAGE_DIR
DATA_DIR=`data_dir $PACKAGE_DIR`
if [ $? -ne 0 ]; then
error "Error finding data directory"
fi
## ARTIFACTS OF PREVIOUS RUNS
ARTIFACTS="DLVRY_filenames.log DLVRY_metadata.log DLVRY_package.log VALIDATING.txt ERRORS_FOUND.txt manifest-md5s.txt"
# errors found file
ERRORS_FOUND_FLAG=$PACKAGE_DIR/ERRORS_FOUND.txt
# flag to show we started the process
VALIDATING_FLAG=$PACKAGE_DIR/VALIDATING.txt
# Before beginning, look for artifacts
leftovers=
for x in $ARTIFACTS
do
test_file=$PACKAGE_DIR/$x
if [ -f $test_file ]; then
if [ "$CLOBBER_LOGS" ]; then
warning "Deleting old file $test_file"
rm -f $test_file
else
leftovers="$leftovers $x"
fi
fi
done
x=
if [ "$leftovers" ]; then
for x in $leftovers
do
error_no_exit "`format_code_message OLD_FILE_FOUND $x`"
done
delivery_failure $VALIDATING_FLAG $ERRORS_FOUND_FLAG "Stale files found"
fi
touch $VALIDATING_FLAG
## RUN EACH SCRIPT
# * `verify_all_filenames`
$VERIFY_ALL_FILENAMES $PACKAGE_DIR
if [ $? -ne 0 ]; then
delivery_failure $VALIDATING_FLAG $ERRORS_FOUND_FLAG "filename validation failure"
fi
# * `verify_all_metadata`
$VERIFY_ALL_METADATA $PACKAGE_DIR
if [ $? -ne 0 ]; then
delivery_failure $VALIDATING_FLAG $ERRORS_FOUND_FLAG "metadata validation failure"
fi
# * `create_manifest`
$CREATE_MANIFEST $PACKAGE_DIR
if [ $? -ne 0 ]; then
delivery_failure $VALIDATING_FLAG $ERRORS_FOUND_FLAG "manifest generation failure"
fi
# * `verify_package`
$VERIFY_PACKAGE $PACKAGE_DIR
if [ $? -ne 0 ]; then
delivery_failure $VALIDATING_FLAG $ERRORS_FOUND_FLAG "package validation failure"
fi
delivery_success $VALIDATING_FLAG "Package preparation complete"
if [ "$TAR" ]; then
message "Tarring archive"
cd $PACKAGE_DIR
folder_name=`pwd`
folder_name=`basename $folder_name`
archive=$folder_name.tar
cd ..
tar cvf $archive $folder_name/**
archive_size=`ls -l $archive | awk '{ print $5 }'`
message "Generated `pwd`/$archive; $archive_size bytes (`translate_bytes $archive_size`)"
md5_command=`whichMd5`
if [ "$md5_command" ]; then
message "Generating md5 for $archive"
$md5_command $archive > $archive.md5
message "`cat $archive.md5`"
message "Generated `pwd`/$archive.md5"
fi
fi
### EXIT
# http://stackoverflow.com/questions/430078/shell-script-templates
rm -f $tmp.?
trap 0
exit 0
| true |
5fb01482120d4fa652b9cc42d616e40e11b01e58 | Shell | lisasasasa/Network-Administration-and-System-Administration | /shellscript/jugde.sh | UTF-8 | 2,144 | 3.9375 | 4 | [] | no_license | #! /usr/bin/env bash
# Usage:
# ./judge PROBLEM_ID C_FILE
# TODO: retrieve the arguments
# Note that you can use the variable name you like :)
prob_id=${1}
c_file=${2}
echo "------ JudgeGuest ------"
echo "Problem ID: ${prob_id}"
echo "Test on: ${c_file}"
echo "------------------------"
# TODO: create a temporary directory in current directory
# Hint: `man mktemp`
MY_TEMP_DIR=`mktemp -d`
MY_TEMP_FILE=`mktemp`
# TODO: compile the source C code, the output executable,
# should output to your temporary directory
gcc -std=c99 -O2 ${c_file} -o ${MY_TEMP_DIR}/123 2>${MY_TEMP_FILE}
# TODO: check the compile status
if [[ "$?" -eq 1 ]]; then
echo "Compile Error"
rm ${MY_TEMP_FILE}
rm -fr ${MY_TEMP_DIR}
exit
fi
# TODO: run a loop, keep trying download testdatas
case_id=0
while true; do
# TODO: download input and output file
wget --no-check-certificate "https://judgegirl.csie.org/downloads/testdata/${prob_id}/${case_id}.in" -O "${MY_TEMP_DIR}/${case_id}.in" 2>${MY_TEMP_DIR}/in.txt
wget --no-check-certificate "https://judgegirl.csie.org/downloads/testdata/${prob_id}/${case_id}.out" -O "${MY_TEMP_DIR}/${case_id}.out" 2>${MY_TEMP_DIR}/out.txt
# TODO: `break` if get a 404
if [[ $? -eq 8 ]]; then
break
fi
# TODO: execute with the input file
{ time -p ${MY_TEMP_DIR}/123 < ${MY_TEMP_DIR}/${case_id}.in > ${MY_TEMP_DIR}/ans_${case_id}; } 2>${MY_TEMP_DIR}/time_${case_id}
a=`diff ${MY_TEMP_DIR}/ans_${case_id} ${MY_TEMP_DIR}/${case_id}.out`
#time -p ${MY_TEMP_DIR}/123 < ${MY_TEMP_DIR}/${case_id}.in > ${MY_TEMP_DIR}/ans_${case_id} 2>${MY_TEMP_DIR}/time_${case_id}
run_time=`cat "${MY_TEMP_DIR}/time_${case_id}" | grep "real" | awk '{print $2}' | bc`
t=`echo " $run_time > 1 " | bc`
#echo "$run_time"
# TODO: finish those conditions
if [[ $t -eq 1 ]]; then
echo -e "${case_id}\tTime Limit Exceeded"
elif [[ -z "$a" ]]; then
echo -e "${case_id}\tAccepted"
else
echo -e "${case_id}\tWrong Answer"
fi
case_id=$((case_id+1))
done
# TODO: remove the temporary directory
rm ${MY_TEMP_FILE}
rm -fr ${MY_TEMP_DIR} | true |
8b513cfc1ee07d3439a9a1213e7bf56f5fd08744 | Shell | alangrafu/lodspk-bundle | /utils/modules/detail-component.sh | UTF-8 | 860 | 3.796875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
components=( types services uris )
cd $DIR
#Components dir
mainDir=$DIR"/../../components/"$1
componentDir=$mainDir"/"$2
queryDir=$componentDir"/queries"
if [ ! -d "$mainDir" ]
then
echo "ERROR: "$1"' dir doesn't exist. $mainDir" >&2
exit 1
fi
if [ ! -d "$componentDir" ]
then
echo "ERROR: "$2"' dir ($componentDir) doesn't exist." >&2
exit 1
fi
cd $componentDir
views=`ls *.template`
#In certain cases queries may not exist but that's fine
if [ -d "$componentDir" ]
then
cd $queryDir
models=`find . -iname "*.query" |sed -e 's/^\.\///g'`
fi
echo -n "views"
for i in $views
do
NEWLINEVIEW="\n\t$i"
LIST=$LIST$NEWLINEVIEW
done
echo -e $LIST
echo -n "models"
for i in $models
do
NEWLINEMODEL="\n\t$i"
MODELLIST=$MODELLIST$NEWLINEMODEL
done
echo -e $MODELLIST
| true |
1f2cf9f4900998f7c9b357eb0469e1e4a81a9b4c | Shell | koljanos/wmr | /script/wmr_alarm.sh | UTF-8 | 1,109 | 3.453125 | 3 | [] | no_license | #!/bin/sh
if [ "$1" == "" ] || [ "$2" == "" ] || [ "$3" == "" ] || [ "$4" == "" ] ;
then
echo "Alarm file for wmr-oregon weather logger"
echo "(c) 2012 Den68 <idg68@yandex.ru>"
echo "Global download URL: http://code.google.com/p/wmr/"
echo ""
echo "Use format:"
echo "(SENSOR TYPE) (NUM OF SENSOR) (CURENT STATUS) (ALARM STATUS)"
echo "Examle:"
echo "TEMP 1 -7 MIN"
echo "TEMP 1 28 MAX"
echo "Run only wmr-oregon software is alarm enabled"
echo "cpopy & edit all sensor named script in ./etc/wmr/script/ to /etc/wmr/script/"
echo "copy this script to /usr/bin/wmr_alarm.sh"
echo "edit /etc/wmr/wmr.conf - lines: ALARMENABLE 1 & ALARMBIN /usr/bin/wmr_alarm.sh"
exit
fi
if [ "$1" == "TEMP" ] || [ "$1" == "HUMIDITY" ] ; then
/etc/wmr/script_alarm/WMR_${1}_${2}_${4}.sh $3
exit
elif [ "$1" == "PRESSURE" ] || [ "$1" == "RAIN" ] || [ "$1" == "WIND" ] || [ "$1" == "UV" ] || [ "$1" == "WATER" ] ; then
/etc/wmr/script_alarm/WMR_${1}_${4}.sh $3
exit
elif [ "$1" == "BATTERY" ] ; then
/etc/wmr/script_alarm/WMR_${1}.sh ${4} ${2} ${3}
fi
exit 0
| true |
11811bb347b29d6ffa643eb6d365055b29964ddc | Shell | DEFRA/ffc-demo-development | /scripts/build | UTF-8 | 1,118 | 3.1875 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"OGL-UK-3.0"
] | permissive | #!/usr/bin/env sh
##
# Build script
#
# Build the services managed by this project.
##
set -e
projectRoot=$(a="/$0"; a=${a%/*}; a=${a:-.}; a=${a#/}/; cd "$a/.." || return; pwd)
(
cd "${projectRoot}"
cd ..
printf "\nBuilding services\n"
docker-compose \
-f ffc-demo-web/docker-compose.yaml \
-f ffc-demo-web/docker-compose.override.yaml \
build $@
docker-compose \
-f ffc-demo-claim-service/docker-compose.yaml \
-f ffc-demo-claim-service/docker-compose.override.yaml \
build $@
docker-compose \
-f ffc-demo-payment-service/docker-compose.yaml \
-f ffc-demo-payment-service/docker-compose.override.yaml \
build $@
docker-compose \
-f ffc-demo-payment-service-core/docker-compose.yaml \
-f ffc-demo-payment-service-core/docker-compose.override.yaml \
build $@
docker-compose \
-f ffc-demo-calculation-service/docker-compose.yaml \
-f ffc-demo-calculation-service/docker-compose.override.yaml \
build $@
docker-compose \
-f ffc-demo-payment-web/docker-compose.yaml \
-f ffc-demo-payment-web/docker-compose.override.yaml \
build $@
)
| true |
88a4d0e360ea78b60c7e719392680c089f904dde | Shell | b33lz3bubTH/dotfiles | /.local/bin/volume | UTF-8 | 178 | 2.890625 | 3 | [] | no_license | #!/bin/bash
vol=`pamixer --get-volume`
if [[ `pamixer --get-mute` == "true" ]]; then
echo -n "ﱝ $vol% "
else
echo -n "$(percentage $vol 奔 墳 ) $vol% "
fi
| true |
1f8c6b71721a23ac03a0342b91b89f46a06f0d91 | Shell | rdk/p2rank | /update.sh | UTF-8 | 235 | 3.03125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e # fail fast
HEAD1=`git log -n 1 | head -n 1`
echo
echo GIT:
echo
git pull
HEAD2=`git log -n 1 | head -n 1`
if [ "$HEAD1" != "$HEAD2" ]; then
echo
echo GRADLE:
echo
./gradlew clean assemble
fi
| true |
db9eec1e42b38061c2962736670f1a31cad7419d | Shell | martingri/CorvaxHome | /vagrant/scripts/install-phpmyadmin.sh | UTF-8 | 578 | 2.546875 | 3 | [] | no_license | #!/bin/bash
cp /MLab-Base/vagrant/mlab-base/packages/phpMyAdmin-latest.tar.gz /home/vagrant/phpMyAdmin-latest.tar.gz
cd /home/vagrant/
tar xzf phpMyAdmin-latest.tar.gz
mv phpMyAdmin-4.0.4.1-english phpmyadmin
sudo cp /MLab-Base/vagrant/mlab-base/manifests/files/php-admin.virtual-host /etc/apache2/sites-available/phpmyadmin
cp /MLab-Base/vagrant/mlab-base/manifests/files/php-admin.config.inc.php /home/vagrant/phpmyadmin/config.inc.php
cd /etc/apache2/sites-available/
sudo a2ensite phpmyadmin
sudo sed -i '$ a\Listen 8080' /etc/apache2/ports.conf
sudo service apache2 restart | true |
f774652d9646e050eab77b6d19be43cda6118077 | Shell | upb-uc4/hlf-network | /scripts/startNetwork/setupPeers.sh | UTF-8 | 1,654 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
source ./scripts/util.sh
source ./scripts/env.sh
header "Peers"
msg "Starting org1-peer1"
kubectl create -f k8s/org1/peer1-org1.yaml
msg "Starting org1-peer2"
kubectl create -f k8s/org1/peer2-org1.yaml
msg "Starting org2-peer1"
kubectl create -f k8s/org2/peer1-org2.yaml
msg "Starting org2-peer2"
kubectl create -f k8s/org2/peer2-org2.yaml
(
msg "Waiting for pods"
kubectl wait --for=condition=ready pod -l app=peer1-org1 --timeout=${CONTAINER_TIMEOUT} -n hlf
kubectl wait --for=condition=ready pod -l app=peer2-org1 --timeout=${CONTAINER_TIMEOUT} -n hlf
kubectl wait --for=condition=ready pod -l app=peer1-org2 --timeout=${CONTAINER_TIMEOUT} -n hlf
kubectl wait --for=condition=ready pod -l app=peer2-org2 --timeout=${CONTAINER_TIMEOUT} -n hlf
) || (
msg "Waiting error, wait longer for pods"
sleep 2
kubectl wait --for=condition=ready pod -l app=peer1-org1 --timeout=${CONTAINER_TIMEOUT} -n hlf
kubectl wait --for=condition=ready pod -l app=peer2-org1 --timeout=${CONTAINER_TIMEOUT} -n hlf
kubectl wait --for=condition=ready pod -l app=peer1-org2 --timeout=${CONTAINER_TIMEOUT} -n hlf
kubectl wait --for=condition=ready pod -l app=peer2-org2 --timeout=${CONTAINER_TIMEOUT} -n hlf
) || (
msg "Another waiting error, wait even longer for pods"
sleep 10
kubectl wait --for=condition=ready pod -l app=peer1-org1 --timeout=${CONTAINER_TIMEOUT} -n hlf
kubectl wait --for=condition=ready pod -l app=peer2-org1 --timeout=${CONTAINER_TIMEOUT} -n hlf
kubectl wait --for=condition=ready pod -l app=peer1-org2 --timeout=${CONTAINER_TIMEOUT} -n hlf
kubectl wait --for=condition=ready pod -l app=peer2-org2 --timeout=${CONTAINER_TIMEOUT} -n hlf
)
| true |
5a4527f68c396da26023e45ec523e2c317a916f9 | Shell | Xanewok/stretch-lang | /pack-me.sh | UTF-8 | 1,434 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Packs files to a format specified by the course supervisor
rm igor_matuszewski.zip 2>/dev/null
rm -rdf igor_matuszewski
mkdir igor_matuszewski 2>/dev/null
cp -R app/ igor_matuszewski/
cp -R src/ igor_matuszewski/
cp -R test/ igor_matuszewski/
cp -R syntax/ igor_matuszewski/
cp Setup.hs igor_matuszewski/
cp stack.yaml igor_matuszewski/
cp stretch-lang.cabal igor_matuszewski/
cp package.yaml igor_matuszewski/
cp README_pl.md igor_matuszewski/README
cp -R good/ igor_matuszewski/
cp -R bad/ igor_matuszewski/
cp integration-tests.sh igor_matuszewski/
# Generate a Makefile to be used on a `students` university machine
rm Makefile 2>/dev/null
echo "#!/bin/bash" >> Makefile
echo "GHC_BIN=/home/students/inf/PUBLIC/MRJP/ghc-8.2.2/bin" >> Makefile
echo "GHC=\$(GHC_BIN)/ghc" >> Makefile
echo "CABAL=\$(GHC_BIN)/cabal" >> Makefile
echo "build:" >> Makefile
echo -e "\t\$(CABAL) sandbox init" >> Makefile
echo -e "\t\$(CABAL) install --only-dependencies -w \$(GHC)" >> Makefile
echo -e "#\t\$(CABAL) configure --enable-tests -w \$(GHC)" >> Makefile
echo -e "\t\$(CABAL) configure -w \$(GHC)" >> Makefile
echo -e "\t\$(CABAL) build" >> Makefile
echo -e "\tcp dist/build/stretchi/stretchi ./interpreter" >> Makefile
echo "" >> Makefile
echo "clean:" >> Makefile
echo -e "\trm ./interpreter" >> Makefile
cp Makefile igor_matuszewski/
zip -r igor_matuszewski.zip igor_matuszewski
rm -rd igor_matuszewski
rm Makefile
| true |
0fd2b7d4be758f1cef64548eebb0b41be20fb794 | Shell | ztalbot2000/homebridge-cmd4 | /Extras/Cmd4Scripts/Examples/wakeonlan.sh | UTF-8 | 4,472 | 3.96875 | 4 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | #!/bin/bash
#
# This Cmd4 example demonstrates a script that can be used for a wakeonlan
# scenario. It is a port of the cmdSwitch2 example and is more for a Windows
# PC.
#
# Your Cmd4 .homebridge/.config.json file would have a state_cmd like:
# state_cmd: ".homebridge/Cmd4Scripts/Examples/wakeonlan.sh"
# state_cmd_suffix: "192.168.2.66 dc:a6:32:40:de:7c"
#
#
# Testing from the shell prompt:
# ./wakeonlan.sh Get HTPC On 192.168.2.66 dc:a6:32:40:de:7c
# or
# ./wakeonlan.sh Set HTPC On 1 192.168.2.66 dc:a6:32:40:de:7c
# or
# ./wakeonlan.sh Set HTPC On 0 192.168.2.66 dc:a6:32:40:de:7c
# Exit immediately if a command exits with a non-zero status
set -e
# Check if the first parameter to this script was "Get" for getting an accessory's
# specific attribute.
if [ "$1" = "Get" ]; then
# Cmd4 will pass the IP in the config.json defined by state_cmd_suffix as the fourth
# parameter to a Get command.
ip="${4}"
# Cmd4 will pass the MAC Address in the config.json defined by state_cmd_suffix as the fifth
# parameter to a Get command.
macAddress="${5}"
# Normally we would exit immediately if a command fails with a non-zero status.
# In this case ping can fail and we would rely on the failing exit status to
# tell Cmd4 that the accessory is not on the network. That would be the prefered
# thing to do. However for this example we are going to output '0' (false) so
# that you can see the '0' on the console telling us that the accessory is not
# on the network.
set +e
# On OSX the string is returned differently than on linux.
ping -c 2 -W 1 "${ip}" | sed -E 's/2 packets received/2 received/g' | grep -i '2 received' >> /dev/null
rc=$?
# Exit immediately if a command exits with a non-zero status
set -e
# Check if we got the message '2 packets recieved' meaning the accessory is
# on the network by seeing if the return code of the above command passed or
# failed.
if [ "$rc" = "0" ]; then
# The message was recieved so the target is up, sending a '1' (true), like
# a binary number is, back to Cmd4.
echo "1"
# Exit this script positivitely.
exit 0
else
# The message was not recieved so the target must be down, sending a '0' (false), like
# a binary number is, back to Cmd4.
echo "0"
# Exit this script positivitely, even though ping failed.
exit 0
fi
fi
# Check if the first parameter to this script was "Set" for setting an accessory's
# specific attribute.
if [ "$1" = "Set" ]; then
# $2 would be the name of the accessory.
# $3 would be the accessory's charactersistic 'On'.
# $4 would be '1' for 'On' and '0' for 'Off', like a binary number is.
# $4 would be 'true' for 'On' and 'false' for 'Off' with
# outputConstants=true in your .homebridge/.config.json file.
# Cmd4 will pass the IP in the config.json defined by state_cmd_suffix as the fifth
# parameter to a Set command.
ip="${5}"
# Cmd4 will pass the MAC Address in the config.json defined by state_cmd_suffix as the sixth
# parameter to a Set command.
macAddress="${6}"
# Handle the Set 'On' attribute of the accessory
if [ "$3" = "On" ]; then
# If the accessory is to be set on
if [ "$4" = "1" ]; then
# Execute the on command
wakeonlan -i ${ip} ${macAddress} >> /dev/null 2>&1
# keep the result of the on/off command
rc=$?
else
# Execute the off command
# The password is harad coded here. We use the default
# Raspberry Pi password asli an example. How you handle
# this unencrypted password is up to you. Note that
# this command only works to a Windows box, but is the
# exmple given in cmdSwitch2.
# net rpc shutdown -I "${ip}" -U user%password,
net rpc shutdown -I "${ip}" -U pi%raspberry >> /dev/null 2>&1
# keep the result of the on/off command
rc=$?
fi
# Check if the on/off command had a positive return status.
if [ "$rc" = "0" ]; then
# The on/off command was successful, so exit successfully.
exit 0
else
# The on/off comand had a failure result. Exit with that result.
# Exit this script positivitely, even though ping failed.
exit $rc
fi
fi
fi
# The proper arguments to this script were not passed to it so end with a failure exit status.
exit 66
| true |
12f449c4a2b324bb0c4cb9d6509c7a83a98ce151 | Shell | jvff/mestrado-referencias | /commit.bash | UTF-8 | 376 | 3.640625 | 4 | [] | no_license | #!/bin/bash
title="$(sed -ne 's/\ttitle={\(.*\)},\?$/\1/p' ref.bib | tail -n 1)"
ref="$(sed -ne '/^--*$/{g;s/:$//;p};h' ref.txt | tail -n 1)"
file="$(mktemp)"
if [ "$(grep "^$ref:$" ref.txt | wc -l)" -gt 1 ]; then
echo "Repeated reference!"
exit 1
fi
cat > "$file" << EOF
Resumo da referência $ref
- $title
EOF
vim "$file"
git commit -a -F "$file"
rm "$file"
| true |
8fb2f01e027f7a09a7b4a0e44bffc2b391aa1700 | Shell | pcbadger/random | /scripts/ironport-cross-ref1.sh | UTF-8 | 807 | 2.75 | 3 | [] | no_license | #mxchecker.sh
INPUT1="OUTPUT/day3-ironport-ip-out.csv"
INPUT2="day3-ironportauth2.txt"
INPUT3="OUTPUT/day3-ironport-whois-out-sorted2.csv"
OUTPUT="OUTPUT/ironport-cross-ref-out4.csv"
#while read f1
#do
for i in `cat ${INPUT1}`
do
#IP=`echo $i | cut -d \| -f 2`
#SOURCE=`echo $i | cut -d \| -f 1`
#COUNTRY=`echo $i | cut -d \| -f 3`
#echo $IP
CROSSCHECK1=`cat $INPUT2 | grep interface | grep $i | wc -l`
#CROSSCHECK1=`cat $INPUT2 | grep interface | grep $i | cut -d " " -f 15`
CROSSCHECK2=`cat $INPUT3 | grep $i`
# for UNIT in $CROSSCHECK1;
# do
# BLAH=`echo $UNIT | sed 's/ /,/'` ;
# echo $BLAH
# done
#echo "$CROSSCHECK1"
#echo "$i: $MXOUT"
#CHECK2=`cat $CHECK1 | cut -f 1`
#echo "$COUNTRY|$IP|$CROSSCHECK1|$SOURCE"
echo "$i"
echo "$CROSSCHECK2|$CROSSCHECK1" >> "$OUTPUT"
done
#< etrn-mx3.csv
| true |
a70b0e543705fbec79ae8cf94793a6260c8c79f7 | Shell | Jack28/router | /masquerade-internet/conf.sh | UTF-8 | 334 | 2.8125 | 3 | [] | no_license | #!/bin/bash
# Some usful information about the profile.
comment=""
source config
function run()
{
sysctl -w net.ipv4.ip_forward=1
iptables -t nat -A POSTROUTING -o $internet_interface -j MASQUERADE
}
# stop
function nur()
{
sysctl -w net.ipv4.ip_forward=0
iptables -t nat -D POSTROUTING -o $internet_interface -j MASQUERADE
}
| true |
75e46fc3d84f1e0ac62be5aea1ab8398ff728b17 | Shell | arlengur/gwtreact | /tools/qos-install-parent/qos-broker-federation/src/main/resources/requester.sh | UTF-8 | 560 | 2.96875 | 3 | [] | no_license | if [ "$1" = '-help' ] ; then
echo ""
echo "Usage:"
echo "requester.sh <parameters>"
echo "Example:"
echo "requester.sh opposite.host=qligent_server_host current.host=current_agent_host"
echo ""
exit 1
fi
ABSOLUTE_PATH=$(cd `dirname "${BASH_SOURCE[0]}"` && pwd)/`basename "${BASH_SOURCE[0]}"`
ABSOLUTE_PATH=`dirname $ABSOLUTE_PATH`
REQUESTER_HOME=$ABSOLUTE_PATH
while [[ ! -z $1 ]]
do
D_PARAMS=$D_PARAMS" "-D$1
shift
done
CLASSPATH=$REQUESTER_HOME:$REQUESTER_HOME/lib/*
java $D_PARAMS -cp "$CLASSPATH" com.tecomgroup.qos.broker.federation.Bootstrap | true |
9add7f8a2cea1fc7140f192d11d84594f22d9ddd | Shell | just22/utils | /set_display | UTF-8 | 1,221 | 3.28125 | 3 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/sh
# File: set_display
# Purpose: External monitors management
# Author: Alessandro DE LAURENZIS <just22.adl@gmail.com>
# Version: 1.0
# Date: Somewhere in time...
if [ "$#" != 3 ]; then
echo "Usage: $0 <disp1> <disp2> <pos|only|restore>"
exit 1
fi
exec >/dev/null 2>&1
DISPLAY_1=$1
DISPLAY_2=$2
ACTION=$3
if [ "$ACTION" == only ]; then
xrandr --output $DISPLAY_1 --off --output $DISPLAY_2 --auto
elif [ "$ACTION" == restore ]; then
#sudo wsconsctl display.focus=4; xrandr --output $DISPLAY_1 --auto --output $DISPLAY_2 --off
xrandr --auto
else
xrandr --output $DISPLAY_1 --auto --primary --output $DISPLAY_2 --auto --$ACTION $DISPLAY_1
fi
hsetroot -add '#073642' -add '#586E75' -gradient 0
if pgrep -x openbox; then
openbox --restart
feh --bg-scale ~/Images/Wallpapers/apple_keyboard.jpg
fi
if pgrep -x bbpager; then
pkill bbpager; bbpager&
fi
if pgrep -x tint2; then
pkill tint2
case "$ACTION" in
above|below):
tint2 -c ~/.config/tint2/dual_aboveBelow.tint2rc&
;;
*):
tint2&
;;
esac
fi
sleep 2
pkill conky; conky-statusbar&
| true |
f401f166a5deecc7ba0c2fce79daefc5fb8969b6 | Shell | mmickan/puppet-vault | /files/ssh-install-script | UTF-8 | 2,303 | 4.28125 | 4 | [] | no_license | #!/bin/bash
#
# This is the default script which installs or uninstalls an RSA public key
# to/from authorized_keys files in a typical linux machine, with a patch for
# https://github.com/hashicorp/vault/issues/1285 applied.
#
# If the platform differs or if the binaries used in this script are not available
# in target machine, use the 'install_script' parameter with 'roles/' endpoint to
# register a custom script (applicable for Dynamic type only).
#
# Vault server runs this script on the target machine with the following params:
#
# $1:INSTALL_OPTION: "install" or "uninstall"
#
# $2:PUBLIC_KEY_FILE: File name containing public key to be installed. Vault server
# uses UUID as name to avoid collisions with public keys generated for other requests.
#
# $3:AUTH_KEYS_FILE: Absolute path of the authorized_keys file.
# Currently, vault uses /home/<username>/.ssh/authorized_keys as the path.
#
# [Note: This script will be run by Vault using the registered admin username.
# Notice that some commands below are run as 'sudo'. For graceful execution of
# this script there should not be any password prompts. So, disable password
# prompt for the admin username registered with Vault.
set -e
# Storing arguments into variables, to increase readability of the script.
INSTALL_OPTION=$1
PUBLIC_KEY_FILE=$2
AUTH_KEYS_FILE=$3
# Delete the public key file and the temporary file
function cleanup
{
rm -f "$PUBLIC_KEY_FILE" temp_$PUBLIC_KEY_FILE
}
# 'cleanup' will be called if the script ends or if any command fails.
trap cleanup EXIT
# Return if the option is anything other than 'install' or 'uninstall'.
if [ "$INSTALL_OPTION" != "install" ] && [ "$INSTALL_OPTION" != "uninstall" ]; then
exit 1
fi
# Create the .ssh directory and authorized_keys file if it does not exist
SSH_DIR=$(dirname $AUTH_KEYS_FILE)
sudo mkdir -p "$SSH_DIR"
sudo touch "$AUTH_KEYS_FILE"
# Remove the key from authorized_keys file if it is already present.
# This step is common for both install and uninstall.
sudo grep -vFf "$PUBLIC_KEY_FILE" "$AUTH_KEYS_FILE" > temp_$PUBLIC_KEY_FILE || true
cat temp_$PUBLIC_KEY_FILE | sudo tee "$AUTH_KEYS_FILE"
# Append the new public key to authorized_keys file
if [ "$INSTALL_OPTION" == "install" ]; then
cat "$PUBLIC_KEY_FILE" | sudo tee --append "$AUTH_KEYS_FILE"
fi
| true |
45e0c9f613cf52d2e8ee971aff9b75540d19b188 | Shell | vitek/configs | /bashrc | UTF-8 | 2,123 | 3.03125 | 3 | [] | no_license | # -*- mode: shell-script; -*-
# ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# for examples
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# don't put duplicate lines in the history. See bash(1) for more options
# ... or force ignoredups and ignorespace
HISTCONTROL=ignoredups:ignorespace
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
unset LESS
if [ -t /etc/gentoo-release ]; then
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
fi
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# some more ls aliases
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
# Alias definitions.
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# Nice editor for small configs and commit messages
export EDITOR=vim
export PAGER=less
for bcfile in ~/.bash.d/*; do
if [ -a "$bcfile" ]; then
source "$bcfile"
fi
done
if [[ "$INSIDE_EMACS" = 'vterm' ]] \
&& [[ -n ~/.bashrc_vterm ]] \
&& [[ -f ~/.bashrc_vterm ]]; then
source ~/.bashrc_vterm
fi
export LIBOVERLAY_SCROLLBAR=0
export GTK_OVERLAY_SCROLLING=0
# https://ubuntuforums.org/showthread.php?t=2390362
export QT_AUTO_SCREEN_SCALE_FACTOR=0
alias e="run-emacsclient"
if [ -f ~/.bashrc_local ]; then
. ~/.bashrc_local
fi
| true |
6068de5204e7cfd72f5b3c84cf7ad6fa2d679cc5 | Shell | kkoomen/dotfiles-i3 | /update.sh | UTF-8 | 2,093 | 3.890625 | 4 | [] | no_license | #!/bin/bash
DOTFILES_DIR=~/dotfiles # dotfiles directory
BACKUP_DIR=~/dotfiles_old # old dotfiles backup directory
USERNAME=$(echo $USER)
ORANGE='\033[0;33m'
NC='\033[0m'
# list of files to symlink
FILES=(
# files to symlink
.bashrc
.bash_aliases
.bash_functions
.bash_profile
.vimrc
.xinitrc
.Xresources
.config/gtk-3.0/gtk.css
.config/i3/config
.config/i3/statusbar.conf
.config/termite/config
.weechat/weechat.conf
.weechat/buffers.conf
.mpd/mpd.conf
.ncmpcpp/config
.ssh/config
.ssh/id_rsa.pub
.config/mpv/mpv.conf
.npmrc
.gitconfig
# directories to symlink
.vim/
.screenlayout/
)
# files that must exist
TOUCH_FILES=(
.mpd/mpdstate
.mpd/mpd.pid
.mpd/mpd.db
.mpd/mpd.log
)
# directories that must exist
TOUCH_DIRS=(
.ssh/
.weechat/
.ncmpcpp/
.mpd/playlists/
.config/mpv/
.config/gtk-3.0/
.config/i3/
.config/termite/
)
# :: SETUP PROCESS
# re-create dotfiles_old in homedir if necessary
echo "[Setup] Creating $BACKUP_DIR to backup any existing dotfiles in /home/$USERNAME"
rm -rf $BACKUP_DIR > /dev/null 2>&1 && mkdir $BACKUP_DIR
# copy any existing dotfiles to backup folder
for file in "${FILES[@]}"; do
symlink_file=${file%/} # removes trailing slash
if [[ -L ~/$symlink_file ]]; then
rm -f ~/$symlink_file
elif [[ -e ~/$symlink_file ]]; then
mv ~/$file $BACKUP_DIR
fi
done
# touch directories
for directory in "${TOUCH_DIRS[@]}"; do
if [[ ! -d ~/$directory ]]; then
printf "[Setup] touching directory: ${ORANGE}~/$directory${NC}\n"
mkdir -p ~/$directory
fi
done
# touch files
for file in "${TOUCH_FILES[@]}"; do
if [[ ! -e ~/$file ]]; then
printf "[Setup] touching file: ${ORANGE}$file${NC}\n"
touch ~/$file
fi
done
# symlink everything from the new dotfiles directory.
for file in "${FILES[@]}"; do
symlink_file=${file%/} # removes trailing slash
printf "[Setup] Creating symlink to ${ORANGE}~/$symlink_file${NC} in home directory.\n"
ln -s $DOTFILES_DIR/$symlink_file ~/$symlink_file
done
# permissions
chmod 400 ~/.ssh/id_rsa
echo "[Setup] done."
| true |
340536cc61351b52b5d229585134f1d661c98345 | Shell | JoeUno/InClass | /daysalive/daysalive | UTF-8 | 321 | 2.765625 | 3 | [] | no_license | #!/bin/bash
#days_alive
#lab 5
#dkp
#today
EDATE=$(date +%s)
#dob
SDATE=$(date -d '06/07/1988' +%s)
#sec alive
SALIVE=$(( $EDATE - $SDATE))
DALIVE=$(($SALIVE/86400))
TENDAYS=$(($SDATE + 864000000))
TDATE=$(date -d '@'$TENDAYS'' +%m/%d/%y)
echo " You have been alive "$DALIVE "days"
echo "10000 days for you is " $TDATE
| true |
49f9078fff9e3751bd621fcf3e28f82e08bf9601 | Shell | fishercht1995/SFS | /SFS-port-OpenLambda/openlambda/default-ol/lambda/var/lib/dpkg/info/dash.postinst | UTF-8 | 3,659 | 3.796875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
set -e
# $1: dfile, $2: link target, $3: distrib
replace_with_link() {
dfile=$1; ltarget=$2; distrib=$3
temp=$dfile.tmp
# Safely create a symlink to $ltarget at $dfile, first
# making a backup of $dfile (the file being diverted)
# in $distrib (if specified).
#
# The cp / ln -s / mv dance avoids having to live without
# $dfile (think: /bin/sh) for even a moment, so applications
# running in parallel can continue without trouble.
# See dash.preinst for details.
if [ -n "$distrib" ] && [ -e "$dfile" ]; then
cp -dp "$dfile" "$distrib"
fi
ln -sf "$ltarget" "$temp"
mv -f "$temp" "$dfile"
}
claim_binsh() {
dfile=$1 ltarget=$2 distrib=${3:-$dfile.distrib}
diverter=$(dpkg-divert --listpackage $dfile)
truename=$(dpkg-divert --truename $dfile)
if [ "$diverter" = dash ]; then
# good.
return
fi
if [ "$diverter" = LOCAL ]; then
# The sysadmin wants it this way. Who am I to argue?
return
fi
if [ "$diverter" != bash ]; then
# Let dpkg-divert error out; we are not taking
# over the diversion, unless we added it
# ourselves on behalf of bash.
dpkg-divert --package dash --remove $dfile
echo "This should never be reached"
exit 1
fi
dpkg-divert --package bash --remove $dfile
dpkg-divert --package dash --divert $distrib --add $dfile
# remove the old equivalent of $distrib, if it existed.
if [ -n "$truename" ]; then
rm -f "$truename"
fi
replace_with_link $dfile $ltarget $distrib
}
unclaim_binsh() {
dfile=$1 ltarget=$2 distrib=${3:-$dfile.distrib}
diverter=$(dpkg-divert --listpackage $dfile)
truename=$(dpkg-divert --truename $dfile)
if [ "$diverter" != dash ]; then
# good.
return
fi
# Donate the diversion and sh symlink to the bash package.
ltarget=$(echo $ltarget | sed s/dash/bash/)
dpkg-divert --package dash --remove $dfile
dpkg-divert --package bash --divert $distrib --add $dfile
if [ -n "$truename" ]; then
rm -f "$truename"
fi
replace_with_link $dfile $ltarget $distrib
}
initial_binsh_setup() {
dfile=$1 ltarget=$2 distrib=${3:-$dfile.distrib} ashfile=$4
diverter=$(dpkg-divert --listpackage $dfile)
truename=$(dpkg-divert --truename $dfile)
if [ -z "$diverter" ]; then
# good.
return
fi
if [ "$diverter" = ash ]; then
dpkg-divert --package ash --remove $dfile
dpkg-divert --package dash --divert $distrib --add $dfile
if [ "$truename" != "$distrib" ] && [ -e "$truename" ]; then
mv "$truename" "$distrib"
fi
replace_with_link $dfile $ltarget
return
fi
if
[ -h $dfile ] &&
[ -f $dfile ] &&
[ -f $ashfile ] &&
cmp $dfile $ashfile
then
replace_with_link $dfile $ltarget
fi
}
add_shell() {
if ! type add-shell > /dev/null 2>&1; then
return
fi
add-shell /bin/dash
}
debconf=
if [ -f /usr/share/debconf/confmodule ]; then
. /usr/share/debconf/confmodule
debconf=yes
fi
if [ "$1" = configure ] && [ -z "$2" ]; then
initial_binsh_setup /bin/sh dash '' /bin/ash
initial_binsh_setup /usr/share/man/man1/sh.1.gz dash.1.gz \
/usr/share/man/man1/sh.distrib.1.gz \
/usr/share/man/man1/ash.1.gz
add_shell
elif [ "$1" = configure ] && dpkg --compare-versions "$2" lt 0.4.18; then
add_shell
fi
if [ $debconf ]; then
db_get dash/sh
if [ "$RET" = true ]; then
claim_binsh /bin/sh dash
claim_binsh /usr/share/man/man1/sh.1.gz dash.1.gz \
/usr/share/man/man1/sh.distrib.1.gz
else
unclaim_binsh /bin/sh dash
unclaim_binsh /usr/share/man/man1/sh.1.gz dash.1.gz \
/usr/share/man/man1/sh.distrib.1.gz
fi
fi
# Automatically added by dh_installmenu/11.1.4ubuntu1
if [ "$1" = "configure" ] && [ -x "`which update-menus 2>/dev/null`" ]; then
update-menus
fi
# End automatically added section
| true |
5a13bd568c19d5c2599babfbc7caae1358d089b1 | Shell | shchae7/dnn_verification | /scripts/manage.sh | UTF-8 | 612 | 3.59375 | 4 | [] | no_license | set -x
PROJECT_DIR=${PROJECT_DIR:-$(
cd $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/..
pwd
)}
echo $PROJECT_DIR
mkdir -p $PROJECT_DIR/verifiers
if [ "$1" == "install" ]; then
for tool in "$@"; do
if [ "$tool" == "reluplex" ]; then
echo "Installing Reluplex"
sh $PROJECT_DIR/scripts/installation/install_reluplex.sh
elif [ "$tool" == "reluval" ]; then
echo "Installing ReluVal"
sh $PROJECT_DIR/scripts/installation/install_reluval.sh
else
echo "Unsupported Tool Specified"
fi
done
fi | true |
0f2075ef1b07cb8b0dd31ad0a99bd9a70e491182 | Shell | MarkAYoder/BeagleBoard-exercises | /setup/unFirewall.sh | UTF-8 | 583 | 2.5625 | 3 | [] | no_license | #!/bin/bash
# Run on bone to only accept from a few IP ranges
# Set up firewall to reject all but 317.112.*.* and 192.168.7.*
iptables --policy INPUT ACCEPT
iptables -D INPUT -s 137.112.41.0/24 -j ACCEPT
iptables -D INPUT -s 192.168.7.0/24 -j ACCEPT
iptables -D INPUT -s 10.0.4.0/24 -j ACCEPT
iptables -D INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
# Open web server to all addresses
iptables -D INPUT -p tcp -m tcp --dport 80 -j ACCEPT
iptables -D INPUT -p tcp -m tcp --dport 443 -j ACCEPT
# Switch -A to -D to delete rule
# iptables -D INPUT -s 192.168.7.0/24 -j ACCEPT
| true |
c5300c90dccece51f1e470ba1deb493b08321625 | Shell | seL4/isabelle | /Admin/cronjob/plain_identify | UTF-8 | 1,222 | 3.71875 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
#
# Plain identify job for Isabelle + AFP
#
set -e
source "$HOME/.bashrc"
export LANG=C
export HGPLAIN=
REPOS_DIR="$HOME/cronjob/plain_identify_repos"
ISABELLE_REPOS_SOURCE="https://isabelle.sketis.net/repos/isabelle"
AFP_REPOS_SOURCE="https://isabelle.sketis.net/repos/afp-devel"
function setup_repos ()
{
local NAME="$1"
local SOURCE="$2"
mkdir -p "$REPOS_DIR"
if [ ! -d "$REPOS_DIR/$NAME" ]; then
"${HG:-hg}" clone --noupdate -q "$SOURCE" "$REPOS_DIR/$NAME"
fi
}
function identify_repos ()
{
local NAME="$1"
"${HG:-hg}" pull -R "$REPOS_DIR/$NAME" -q
local ID="$("${HG:-hg}" tip -R "$REPOS_DIR/$NAME" --template "{node|short}")"
echo "$NAME version: $ID"
}
setup_repos "Isabelle" "$ISABELLE_REPOS_SOURCE"
setup_repos "AFP" "$AFP_REPOS_SOURCE"
NOW="$(date --rfc-3339=ns)"
LOG_DIR="$HOME/cronjob/log/$(date -d "$NOW" "+%Y")"
LOG_SECONDS="$(($(date -d "$NOW" +"%s") - $(date -d 'today 00:00:00' "+%s")))"
LOG_NAME="plain_identify_$(date -d "$NOW" "+%Y-%m-%d").$(printf "%05d" "$LOG_SECONDS").log"
mkdir -p "$LOG_DIR"
{
echo -n "isabelle_identify: "
date -d "$NOW" "+%a %b %-d %H:%M:%S %Z %Y"
echo
identify_repos "Isabelle"
identify_repos "AFP"
} > "$LOG_DIR/$LOG_NAME"
| true |
70ce469d1fca5a6bbe3590c276e412607080cb5e | Shell | rkc7h/ithriv | /stop.sh | UTF-8 | 871 | 3.390625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Stop Angular, Postgres, Elasticsearch, and Flask (and kill any associated processes).
# --------------------------------------------------------------------------
# Set the home directory
export HOME_DIR=`pwd`
BACKEND_PATH="${HOME_DIR}/backend"
DATABASE_PATH="/usr/local/var/postgres"
# Stop Angular
echo -e '\n\n*** Stopping frontend app... ***\n\n'
lsof -t -i tcp:4200 -s tcp:listen | xargs kill
# Stop PostgreSQL
echo -e '\n\n*** Stopping postgresql... ***\n\n'
pkill -f postgres
pg_ctl stop -m immediate -D $DATABASE_PATH
# Stop ElasticSearch
echo -e '\n\n*** Stopping elasticsearch... ***\n\n'
pkill -f elasticsearch
# Kill any remaining server processes
echo -e '\n\n*** Stopping backend app... ***\n\n'
lsof -t -i tcp:5000 -s tcp:listen | xargs kill
lsof -t -i tcp:9200 -s tcp:listen | xargs kill
lsof -t -i tcp:5432 -s tcp:listen | xargs kill
| true |
03d1d2a94cceabd1f82fc394c5ec7d99b0b81d0f | Shell | stevenodb/simple-io-bench | /bench.sh | UTF-8 | 2,880 | 4.21875 | 4 | [] | no_license | #!/usr/bin/env bash
file=bigfile
copy="${file}.copy"
# size of bigfile is expressed as power of 2.
EXP=32
BOLD=$(tput bold)
NORMAL=$(tput sgr0)
RED='\033[0;31m'
NC='\033[0m' # No Color
function calculate_rate {
rate=$(echo -n "$2" | sed -nE 's#^.*\(([0-9]+) bytes/sec.*$#\1#p' | awk 'END { gb = $1 / 1024**3 ; printf("%.2f GiB/s\n", gb)}')
echo -e "$1 ${RED}$rate${NC}"
}
function create_content {
END=$((EXP-4))
echo -n "wubba lubba dub " > ${file}
for i in $(seq $END);
do
cat ${file} ${file} > ${file}.2; mv ${file}.2 ${file};
if (( $i & 1 )) || (($i == $END)); then
echo -ne "\r"
echo -n "progress: $(du -kh ${file} | cut -f1)";
fi
done
}
function message {
echo -e "${BOLD}$1${NORMAL}"
}
function humanize {
local result=$(echo "$1" | awk '{ byte = $1 /1024**2 ; print byte " MiB" }')
echo "$result"
}
function cleanup {
if [ -f ${file} ]; then
message "\nClean up?"
rm -i ${file} ${file}.*
fi
}
function create_file {
if [ ! -f ${file} ]; then
size=$(humanize $((2**EXP)))
message "\nCreating ${file} of size ${size} bytes with non-null content..."
create_content
else
message "\nReusing existing file."
fi
}
function spin_up_sudo {
if [[ $EUID -ne 0 ]]; then
message "\nNeed sudo rights to purge disk caches after writing."
sudo -v
fi
}
function do_copy {
message "\nCopying ${file} to ${copy}..."
message "$ dd if=${file} bs=1024k of=${copy}"
dd if=${file} bs=1024k of=/dev/null > /dev/null 2>&1
calculate_rate "COPY rate:" "${RED}$(dd if=${file} bs=1024k of=${copy} 2>&1)${NC}"
}
function do_purge {
message "\nPurging disk caches..."
message "$ sudo purge"
sudo purge
}
function do_read {
message "\nWe read the file back in..."
message "$ dd if=${file} bs=1024k of=/dev/null"
calculate_rate "READ rate:" "$(dd if=${file} bs=1024k of=/dev/null 2>&1)"
calculate_rate "CACHED READ rate" "$(dd if=${file} bs=1024k of=/dev/null 2>&1)"
}
clear
cat <<'_EOF'
%{-------------------------------------------------------------------------+
| ___ _____ ____ _ _ |
| |_ _| / / _ \ | __ ) ___ _ __ ___| |__ _ __ ___ __ _ _ __| | __ |
| | | / / | | | | _ \ / _ \ '_ \ / __| '_ \| '_ ` _ \ / _` | '__| |/ / |
| | | / /| |_| | | |_) | __/ | | | (__| | | | | | | | | (_| | | | < |
| |___/_/ \___/ |____/ \___|_| |_|\___|_| |_|_| |_| |_|\__,_|_| |_|\_\ |
| |
| Simple I/O Benchmark that creates, copies and reads a big file |
| Steven Op de beeck, 2018. |
+-------------------------------------------------------------------------%}
_EOF
cleanup
create_file
sync
echo "" ; ls -lh ${file}*
spin_up_sudo
do_copy
do_purge
do_read
echo "" ; ls -lh ${file}*
cleanup | true |
530a172f54594759bb3626192b992bd0d7723e5b | Shell | lucasdc99/minishell1_2018 | /bonus/script.sh | UTF-8 | 277 | 3.359375 | 3 | [] | no_license | #!/bin/sh
echo "Entrez votre test: "
read COMMAND
echo $COMMAND | ./mysh > text.txt 2>&1
echo $? > status1.txt
echo $COMMAND | tcsh > text2.txt 2>&1
echo $? > status2.txt
if diff text.txt text2.txt && diff status1.txt status2.txt
then
echo "Le programme fonctionne !"
fi | true |
d31460c97fdf976d114403d83a79f9590a90f895 | Shell | JuserZhang/iso-builder | /pyisobuilder_professional/common | UTF-8 | 3,018 | 3.46875 | 3 | [] | no_license | #!/bin/bash
chroot_do(){
CHROOT_PATH=$1
[[ -z ${CHROOT_PATH} ]] && exit 101
shift
sudo chroot "${CHROOT_PATH}" /usr/bin/env -i \
HOME=/root \
USERNAME=root \
USER=root \
LOGNAME=root \
LC_ALL=C \
PATH=/sbin:/bin:/usr/bin:/usr/sbin \
DEBIAN_FRONTEND=noninteractive \
"$@"
}
postchroot(){
CHROOT_PATH=$1
[[ -z ${CHROOT_PATH} ]] && exit 101
sudo fuser -k ${CHROOT_PATH}
sudo chroot ${CHROOT_PATH} umount /proc/sys/fs/binfmt_misc || true
sudo chroot ${CHROOT_PATH} umount /proc
sudo chroot ${CHROOT_PATH} umount /sys
sudo chroot ${CHROOT_PATH} umount /dev/pts
sudo rm -rf ${CHROOT_PATH}/tmp/* ${CHROOT_PATH}/root/.bash_history
sudo rm -f ${CHROOT_PATH}/etc/hosts
sudo rm -f ${CHROOT_PATH}/etc/hostname
sudo rm -f ${CHROOT_PATH}/etc/resolv.conf
sudo umount ${CHROOT_PATH}/dev
[[ -f "${CHROOT_PATH}/var/lib/dbus/machine-id" ]] && sudo rm -f ${CHROOT_PATH}/var/lib/dbus/machine-id
[[ -f "${CHROOT_PATH}/sbin/initctl" ]] && sudo rm -f ${CHROOT_PATH}/sbin/initctl
[[ -f "${CHROOT_PATH}/usr/sbin/policy-rc.d" ]] && sudo rm -f ${CHROOT_PATH}/usr/sbin/policy-rc.d
sudo chroot ${CHROOT_PATH} dpkg-divert --rename --remove /sbin/initctl
#[[ -f "${CHROOT_PATH}/lib/systemd/systemd-udevd" ]] && sudo rm -f ${CHROOT_PATH}/lib/systemd/systemd-udevd
#sudo chroot ${CHROOT_PATH} dpkg-divert --rename --remove /lib/systemd/systemd-udevd
}
prechroot(){
CHROOT_PATH=$1
[[ -z ${CHROOT_PATH} ]] && exit 101
sudo cp /etc/hosts ${CHROOT_PATH}/etc/
sudo rm ${CHROOT_PATH}/etc/resolv.conf -f
sudo cp /etc/resolv.conf ${CHROOT_PATH}/etc/
sudo mount --bind /dev ${CHROOT_PATH}/dev
sudo chroot ${CHROOT_PATH} mount -t proc none /proc
sudo chroot ${CHROOT_PATH} mount -t sysfs none /sys
sudo chroot ${CHROOT_PATH} mount -t devpts none /dev/pts
sudo chroot ${CHROOT_PATH} dbus-uuidgen | sudo tee ${CHROOT_PATH}/var/lib/dbus/machine-id
sudo chroot ${CHROOT_PATH} dpkg-divert --local --rename --add /sbin/initctl
sudo chroot ${CHROOT_PATH} ln -s /bin/true /sbin/initctl
# Try to fix udevd still run problem
sudo chroot ${CHROOT_PATH} pkill udevd || true
#sudo chroot ${CHROOT_PATH} dpkg-divert --local --rename --add /lib/systemd/systemd-udevd
#echo -e "#!/bin/sh\nexit 101" | sudo tee ${CHROOT_PATH}/lib/systemd/systemd-udevd
echo -e "#!/bin/sh\nexit 101" | sudo tee ${CHROOT_PATH}/usr/sbin/policy-rc.d
sudo chmod +x ${CHROOT_PATH}/usr/sbin/policy-rc.d
}
source_if_exist(){
if [[ "$1" == "-exec" ]];then
EXEC="bash"
else
EXEC="source"
fi
if [ -f "$1" ];then ${EXEC} "$1";fi
}
chroot_source_if_exist(){
BASENAME=$(basename $1)
if [[ -f "$1" ]];then
sudo cp "$1" ${CHROOT_PATH}/tmp/${BASENAME}
sudo chmod +x ${CHROOT_PATH}/tmp/${BASENAME}
prechroot ${CHROOT_PATH}
chroot_do ${CHROOT_PATH} /tmp/${BASENAME}
postchroot ${CHROOT_PATH}
fi
}
| true |
a6a9fe7e3e524938a7b70cc1c58d4caf4f8e1740 | Shell | tarmiste/lfspkg | /archcore/svnsnap/packages/gnome-code-assistance/repos/extra-x86_64/PKGBUILD | UTF-8 | 1,277 | 2.765625 | 3 | [] | no_license | # $Id: PKGBUILD 308233 2017-10-18 23:14:07Z heftig $
# Maintainer: Jan Alexander Steffens (heftig) <jan.steffens@gmail.com>
pkgname=gnome-code-assistance
pkgver=3.16.1+6+ga46522a
pkgrel=1
pkgdesc="Code assistance services for GNOME"
arch=(i686 x86_64)
url="https://wiki.gnome.org/Projects/CodeAssistance"
license=(GPL3)
depends=(python-dbus python-gobject)
makedepends=(intltool gobject-introspection llvm clang ruby-dbus gjs vala libgee go-pie python-pylint
gnome-common git)
optdepends=('clang: Assistance for C and C++'
'ruby-dbus: Assistance for Ruby and CSS'
'gjs: Assistance for JavaScript'
'vala: Assistance for Vala'
'libgee: Assistance for Vala'
'go: Assistance for Go'
'python-pylint: Assistance for Python')
groups=(gnome-extra)
_commit=a46522ac93b0ff046a80b3672a51e2d20ac8d295 # master
source=("git+https://git.gnome.org/browse/gnome-code-assistance#commit=$_commit")
sha256sums=('SKIP')
pkgver() {
cd $pkgname
git describe --tags | sed 's/^v//;s/-/+/g'
}
prepare() {
cd $pkgname
NOCONFIGURE=1 ./autogen.sh
}
build() {
cd $pkgname
./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var \
--libexecdir=/usr/lib
make
}
package(){
cd $pkgname
make DESTDIR="$pkgdir" install
}
| true |
d8b18f590f9c4970a449b51928c31f8d175b4d3c | Shell | hagermohamed531/database_engine | /showdb.sh | UTF-8 | 436 | 3.109375 | 3 | [] | no_license | #!/bin/bash
#show all databases
clear
#cd ~/dbstorage
echo ""
green=`tput setaf 4`
reset=`tput sgr0`
echo " ${green}press 8 :to exit${reset}"
echo " ${green}press 9 :main menu${reset}"
echo ""
echo "available database : "
echo " "
#read dbname
a=`ls ~/dbstorage`
echo "---------"
echo "$a "
echo "---------"
echo ""
read choice
if [ $choice == 9 ]
then
clear
dbengine.sh
elif [ $choice == 8 ]
then
exit
else
showdb.sh
fi
| true |
6a62731875780867dce55ebbafe1cdb4a421a08b | Shell | g23guy/sca-appliance-addon | /scripts/Configuration-Scripts-Build.sh | UTF-8 | 1,442 | 3.34375 | 3 | [] | no_license | #!/bin/bash -e
#
# This script is executed at the end of appliance creation. Here you can do
# one-time actions to modify your appliance before it is ever used, like
# removing files and directories to make it smaller, creating symlinks,
# generating indexes, etc.
#
# The 'kiwi_type' variable will contain the format of the appliance
# (oem = disk image, vmx = VMware, iso = CD/DVD, xen = Xen).
#
DPASS='linux'
USE_HOSTNAME='localhost'
# read in some variables
. /studio/profile
# read in KIWI utility functions
. /.kconfig
#======================================
# Creating Self-Signed HTTPS Keys
#--------------------------------------
openssl req -new -x509 -days 365 -keyout /etc/apache2/ssl.key/sca.key -out /etc/apache2/ssl.crt/sca.crt -nodes -subj '/O=SCA Appliance/OU=Supportconfig Analysis Appliance/CN=localhost'
#======================================
# Configure default services
#--------------------------------------
# Services to turn on
for i in 'apache2.service' 'vsftpd.service' 'sshd.service' 'after-local.service'
do
echo "* Enabling systemd Service: $i "
systemctl enable $i
done
for i in mysql
do
echo "* Enabling SysV Service: $i "
chkconfig $i on
done
#======================================
# MySQL Configuration
#--------------------------------------
echo "* Starting MySQL "
/etc/init.d/mysql start
sleep 2
/usr/bin/mysqladmin -u root password $DPASS
echo "* MySQL Password Set "
sleep 1
| true |
ebaefbf2042975bfb116d0dc23ec6639d464c3f5 | Shell | resol341/bioinfostuff | /bulk_seq/scripts/STAR_create_count_mtx.sh | UTF-8 | 571 | 3.421875 | 3 | [] | no_license | #!/usr/bin/env bash
mkdir tmp
touch tmp/tmp.out
for sample in `cat samples.txt`; do \
echo ${sample}
cat ${sample}_ReadsPerGene.out.tab | tail -n +5 | cut -f4 > tmp/${sample}.count
sed -i "1s/^/${sample}\n/" tmp/${sample}.count
done
STR=""
for i in `cat samples.txt`
do
STR=$STR"tmp/"$i".count "
done
paste $STR > tmp/tmp.out
#generate the gene list column
line=$(head -n 1 samples.txt)
tail -n +5 ${line}_ReadsPerGene.out.tab | cut -f1 > tmp/geneids.txt
sed -i '1s/^/Gene\n/' tmp/geneids.txt
paste tmp/geneids.txt tmp/tmp.out > tmp/final_count_table.txt
| true |
d55365b370cea032ccbed45000d43472512f82e4 | Shell | calnation/laradock | /mediawiki/runnables/init.sh | UTF-8 | 561 | 2.78125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
if [ "x$BASH_SOURCE" == "x" ]; then echo '$BASH_SOURCE not set'; exit 1; fi
sleep 10
# disable settings so we can ask the installer to setup database
#mv LocalSettings.php LocalSettings.php.shadow
# re-enable settings
#mv LocalSettings.php.shadow LocalSettings.php
bash $MW_INSTALL_PATH/runnables/permit.sh
#if [ "$MW_DB_TYPE" = "sqlite" ]; then
#bash $MW_INSTALL_PATH/runnables/setup_sqlite.sh
#elif [ "$MW_DB_TYPE" = "mysql" ]; then
#bash $MW_INSTALL_PATH/runnables/setup_mysql.sh
#fi
#
bash $MW_INSTALL_PATH/runnables/update.sh
php-fpm
| true |
302594d70ea2efda7084a5c7cd2b9f4411ae3ae1 | Shell | akas8959/atlas-lb | /contrib/maven/jenkins/ubunto-repo/repo_builder.sh | UTF-8 | 2,057 | 3.71875 | 4 | [] | no_license | #!/bin/bash
#set -x
REPO=rs-repo
TOPDIR=""
if [ "$1" == "qa" ]; then
TOPDIR=/var/lib/jenkins/repo/ubuntu-repo
elif [ "$1" == "prod" ]; then
TOPDIR=/var/www/prod_repo/ubuntu
elif [ ! "$1" ]; then
echo "Need to know where to deploy! i.e. qa or prod"
exit 1
fi
if [ ! "$TOPDIR" ]; then
echo "Specify either qa or prod. i.e repo_builder.sh qa"
exit 1
fi
RELEASES="lucid precise trusty"
CATEGORIES="main"
ARCHES="amd64"
cd ${TOPDIR}
# serialize this
lockfile-create LOCKFILE
trap "lockfile-remove LOCKFILE; exit $?" INT TERM EXIT
echo "PID $$ updating"
umask 002
for category in ${CATEGORIES}; do
for d in `ls ${REPO}/incoming/${category}`; do
firstletter=`echo ${d} | cut -b1`
package=`echo ${d} | cut -d_ -f1`
echo "Putting $d in ${REPO}/pool/${category}/${firstletter}/${package}"
mkdir -p ${REPO}/pool/${category}/${firstletter}/${package}
mv ${REPO}/incoming/${category}/${d} ${REPO}/pool/${category}/${firstletter}/${package}
done
done
#apt-ftparchive generate apt-ftparchive.conf
cd ${REPO}
for release in ${RELEASES}; do
apt-ftparchive packages --db cache/packages_all.db pool/${CATEGORIES} > Packages
rm dists/${release}/${CATEGORIES}/binary-${ARCHES}/Packages.gz
rm dists/${release}/${CATEGORIES}/binary-${ARCHES}/Packages.bz2
cp Packages dists/${release}
cp Packages dists/${release}/${CATEGORIES}/binary-${ARCHES}
gzip -9 dists/${release}/${CATEGORIES}/binary-${ARCHES}/Packages
cp Packages dists/${release}
cp Packages dists/${release}/${CATEGORIES}/binary-${ARCHES}
bzip2 -9 dists/${release}/${CATEGORIES}/binary-${ARCHES}/Packages
cp Packages dists/${release}
cp Packages dists/${release}/${CATEGORIES}/binary-${ARCHES}
rm Packages
pushd dists/${release}
rm -f Release Release.gpg
apt-ftparchive release . -o APT::FTPArchive::Release::Origin="Rackspace Cloud" -o APT::FTPArchive::Release::Codename=${release}> Release
gpg --batch -abs -o Release.gpg Release
popd
done
echo "PID $$ done"
cd ${TOPDIR}
lockfile-remove LOCKFILE
| true |
c7c96ce1d2eccfeb89612da376b02f95f094b8b6 | Shell | worriless/hello_serverless | /deploy.sh | UTF-8 | 713 | 2.703125 | 3 | [] | no_license | #!/bin/bash
# virtualenv env
# source env/bin/activate
# pip install -r requirements.txt
rm -R dist
cp -rf src dist
cp -rf env/lib/python3.6/site-packages/* dist
cd dist
rm ./hello_serverless.zip
zip -r hello_serverless.zip .
string=$(aws lambda list-functions)
if [[ $string = *"HelloServerless"* ]]; then
aws lambda update-function-code --region $AWSREGION --function-name HelloServerless --zip-file fileb://hello_serverless.zip
else
aws lambda create-function --region $AWSREGION --function-name HelloServerless --zip-file fileb://hello_serverless.zip --role arn:aws:iam::$AWSACCOUNTID:role/lambda_basic_execution --handler hello_serverless.handler --runtime python3.6 --timeout 15 --memory-size 512
fi | true |
43acd8175ea7a478251e82240bcb2967605f8c9b | Shell | derp-all-day/Pastebin-private-paste-scraper | /pastebin.sh | UTF-8 | 2,745 | 3.1875 | 3 | [] | no_license | #!/bin/bash
if [[ $# -eq 0 ]] ; then
echo "USAGE: $0 [OutFile] [Start (Default: aaaaaaaa)]"
exit 1
fi
cookieJar=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1);
if [[ ! -z $2 ]]; then
flag="false";
start="$2";
echo -e "\e[1m\e[33mResuming position:\e[39m $2..."
else
start="aaaaaaaa";
flag="true";
fi;
chars=();for l in {{a..z},{0..9}} ; do chars+=( $l ); done;
for a in ${chars[@]}; do
for b in ${chars[@]}; do
for c in ${chars[@]}; do
for d in ${chars[@]}; do
for e in ${chars[@]}; do
for f in ${chars[@]}; do
for g in ${chars[@]}; do
for h in ${chars[@]}; do
set check = "$a$b$c$d$e$f$g$h"
if [[ "$a$b$c$d$e$f$g$h" == "$start" ]]; then
flag="true";
echo "";
fi;
if [[ "$flag" == "true" ]]; then
u="https://pastebin.com/$a$b$c$d$e$f$g$h";
tput cuu 1 && tput el
echo -e "\e[1m\e[32mScanning:\e[39m $u";
ws=$(curl --silent "https://pastebin.com/$a$b$c$d$e$f$g$h" -c "/bin/.cookies/$cookieJar" -H 'user-agent: Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3' -H '_ga: GA1.2.1892489665.1576113637' -H '__gads: ID=1f4ce52383ca3f5c:T=1576113637:S=ALNI_MZG1OhID9NzqHUkZvucN64o0MVxyQ' -H 'OB-USER-TOKEN: 1ccd0e33-34c8-4f16-93ba-b06533505258' -H '__cfduid: da20816563ca2a5659b8c96a3f43db14c1580181290' -H '_gid: GA1.2.1565653877.1580181291' -H 'DigiTrust.v1.identity: eyJpZCI6ImVvQXlVcmxZM0xKSE1kSUdUZllzS292cUVGMFJDa0RZMmtjK0N0SUlUS2hPR0J1MTNRcS80aXFNUFpDcWlEYm5XSlZUcndMTGtTcjZDS2w3VnBuamU3Nkl2QVpjYXYrZHNLZmEzTHlQZFZyTk9aVVMyTkRNL0FNQ0lMZUhiTkpWWHBaT2ZobGxQa0J0UDU4SU95Q3FObWlpaEpZS1Z5VzV5cWJEMnVIcGFRKzUwUmhncDBBQ0l5bTh1bWFFZlR2QmxNQlpxZUE1UGtGaEJ5Sm5KNCthMHdRSDBqYjhTekw0b3lpTlVtN2dENDNiRlJIamRsd0JybXZQbWdCSjd6LzZDRGQyVWFJTnFneVoyL1QrbkUyZmt5U1FDc24wWndYQXc2dGxHdkdkbHB1aVZpeDJtVkRSWVVpSnNaZVZOUjVneHdCUTZtVnl4MFQ4MmZkUFJyenJ3QT09IiwidmVyc2lvbiI6MiwicHJvZHVjZXIiOiIxQ3JzZFVOQW82IiwicHJpdmFjeSI6eyJvcHRvdXQiOmZhbHNlfSwia2V5diI6NH0%3D' -H 'PM-UL-Sync: {"2":1580356991439}' -H 'PHPSESSID: vugbuthu05nh79irnu3t36o9t2' -H 'visitorGeo: US' -H 'cf_clearance: 292737e3265aec81f622d6ed876d5acfe53805ea-1580300397-0-250' -H 'InstiSession: {"id":"173beb87-a96d-4757-85d8-461d01e521cd","referrer":"pastebin.com","campaign":{"source":null,"medium":null,"campaign":null,"term":null,"content":null}}')
x=$(echo $ws | grep 'Unlisted paste, only people with this link can see this paste.')
band=$(echo $ws | grep "Pastebin.com - Access Denied Warning");
captcha=$(echo $ws | grep "captcha-bypass")
if [[ ! -z $captcha ]]; then
echo -e "\e[1m\e[31mCaptcha thrown Press ENTER to resume once taken care of..."
read -p "";
$0 $1 "$a$b$c$d$e$f$g$h"
exit;
fi;
if [[ ! -z $band ]]; then
echo -e "\e[1m\e[31mTemporarilly blocked...retrying in 30 minutes...";
sleep 1800;
echo "";
fi;
if [[ ! -z $x ]]; then
tput cuu 1 && tput el
echo -e "\e[1m\e[32mMatch found:\e[39m\e[34m $u";
echo "$u" >> $1;
echo "";
fi;
sleep $(((RANDOM % 3) + 1));
fi;
done;
done;
done;
done;
done;
done;
done;
done;
exit;
| true |
e77f1b846ef2ef21dd1782e78ca59c34d2a7e3df | Shell | pogopaule/dotfiles | /zshrc | UTF-8 | 8,343 | 2.96875 | 3 | [] | no_license | # uncomment following line and line at bottom of this file to profile startup time of oh-my-zsh
# zmodload zsh/zprof
# If you come from bash you might have to change your $PATH.
export PATH=$HOME/bin:/usr/local/bin:$PATH
# Path to your oh-my-zsh installation.
export ZSH="$HOME/.oh-my-zsh"
# Set name of the theme to load --- if set to "random", it will
# load a random theme each time oh-my-zsh is loaded, in which case,
# to know which specific one was loaded, run: echo $RANDOM_THEME
# See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes
# ZSH_THEME="robbyrussell" # commented out in favour of starship
# Set list of themes to pick from when loading at random
# Setting this variable when ZSH_THEME=random will cause zsh to load
# a theme from this variable instead of looking in $ZSH/themes/
# If set to an empty array, this variable will have no effect.
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion.
# Case-sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment one of the following lines to change the auto-update behavior
# zstyle ':omz:update' mode disabled # disable automatic updates
# zstyle ':omz:update' mode auto # update automatically without asking
# zstyle ':omz:update' mode reminder # just remind me to update when it's time
# Uncomment the following line to change how often to auto-update (in days).
# zstyle ':omz:update' frequency 13
# Uncomment the following line if pasting URLs and other text is messed up.
# DISABLE_MAGIC_FUNCTIONS="true"
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
# You can also set it to another string to have that shown instead of the default red dots.
# e.g. COMPLETION_WAITING_DOTS="%F{yellow}waiting...%f"
# Caution: this setting can cause issues with multiline prompts in zsh < 5.7.1 (see #5765)
# COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# You can set one of the optional three formats:
# "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# or set a custom format using the strftime function format specifications,
# see 'man strftime' for details.
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# https://github.com/Aloxaf/fzf-tab#configure
# disable sort when completing `git checkout`
zstyle ':completion:*:git-checkout:*' sort false
# set descriptions format to enable group support
zstyle ':completion:*:descriptions' format '[%d]'
# set list-colors to enable filename colorizing
zstyle ':completion:*' list-colors ${(s.:.)LS_COLORS}
# preview directory's content with exa when completing cd
zstyle ':fzf-tab:complete:cd:*' fzf-preview 'exa -1 --color=always $realpath'
# switch group using `,` and `.`
zstyle ':fzf-tab:*' switch-group ',' '.'
# text color
# see https://github.com/Aloxaf/fzf-tab/wiki/Configuration#default-color
zstyle ':fzf-tab:*' default-color $'\033[30m'
# see https://man.archlinux.org/man/fzf.1.en#color=
zstyle ':fzf-tab:*' fzf-flags --color=light
# https://github.com/ohmyzsh/ohmyzsh/tree/masterplugins/web-search
export ZSH_WEB_SEARCH_ENGINES=(jira "https://enersis.atlassian.net/browse/")
# Which plugins would you like to load?
# Standard plugins can be found in $ZSH/plugins/
# Custom plugins may be added to $ZSH_CUSTOM/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(tmux httpie ripgrep fd git docker docker-compose gradle vi-mode npm ssh-agent tmuxinator gh fzf-tab web-search aws asdf zoxide)
# https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/ssh-agent#lazy
zstyle :omz:plugins:ssh-agent lazy yes
source $ZSH/oh-my-zsh.sh
# User configuration
# export MANPATH="/usr/local/man:$MANPATH"
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# Preferred editor for local and remote sessions
# if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
# else
# export EDITOR='mvim'
# fi
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
export EDITOR='vim'
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
#
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
# git
alias gll="git log --graph --branches --remotes --tags --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset'"
alias grema="git rebase master"
alias gbd="git branch --delete"
alias gnews='git fetch origin "$(git_current_branch)" && git log "$(git_current_branch)"..origin/"$(git_current_branch)"'
alias gscope="git log --pretty=oneline | sed -E 's/^.*\((.*)\):.*$/\1/' | sed '/[0-9a-f]* .*/d' | sort | uniq"
alias gtype="git log --pretty=oneline | sed -E 's/[0-9a-f]{40} (.*)\(.*/\1/' | sed '/[0-9a-f]* .*/d' | sort | uniq"
# append aliases
alias -g H='| head'
alias -g T='| tail'
alias -g F='| fzf'
alias -g L="| less"
alias -g J="| jq"
alias -g Y="| xclip -selection clip"
# add confirmation
alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'
# vagrant
alias vs='vagrant ssh'
alias vu='vagrant up'
alias vh='vagrant halt'
alias vdd='vagrant destroy'
# docker
alias dps='docker ps'
alias dpsa='docker ps -a'
# npm
alias nr='npm run'
# better cli
alias fd="fdfind"
alias cat='bat'
alias du="ncdu --color dark -rr -x --exclude .git --exclude node_modules"
alias ls="exa --oneline --icons"
alias tree="exa --tree --long --level=2"
alias lg="lazygit"
alias la="exa -la --icons"
alias lt="exa -la --tree --icons"
# misc
alias gw='./gradlew'
alias v="vim"
alias clr="clear"
alias cd="echo 'Use zoxide!'"
bindkey '^R' history-incremental-search-backward
bindkey 'jk' vi-cmd-mode
bindkey -s '^F' "zi\n"
# configure BAT
export BAT_THEME=ansi-light
# configure vi-mode plugin https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/vi-mode#settings
export VI_MODE_RESET_PROMPT_ON_MODE_CHANGE=true
export VI_MODE_SET_CURSOR=true
# configure FZF
export FZF_DEFAULT_COMMAND='fdfind --type f --hidden --follow --no-ignore --exclude .git'
export FZF_CTRL_T_COMMAND="$FZF_DEFAULT_COMMAND"
export FZF_ALT_C_COMMAND='fdfind --type d --hidden --follow --no-ignore --exclude .git'
export FZF_DEFAULT_OPTS='--color light'
_fzf_compgen_path() {
fdfind --hidden --follow --no-ignore --exclude ".git" . "$1"
}
_fzf_compgen_dir() {
fdfind --type d --hidden --follow --no-ignore --exclude ".git" . "$1"
}
# WSL 2 specific settings.
if grep -q "microsoft" /proc/version &>/dev/null; then
# Requires: https://sourceforge.net/projects/vcxsrv/ (or alternative)
export DISPLAY="$(/sbin/ip route | awk '/default/ { print $3 }'):0"
fi
# zsh settings that should not be comitted to git
source ~/.zshrc.local
# https://github.com/halcyon/asdf-java#install-1
source ~/.asdf/plugins/java/set-java-home.zsh
# https://starship.rs/guide/#%F0%9F%9A%80-installation
eval "$(starship init zsh)"
# Separate each command by horizontal line https://superuser.com/a/846133/1684299
setopt promptsubst
export PS1=$'${(r:$COLUMNS::\u00b7:)}'$PS1
# https://github.com/junegunn/fzf#using-git
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
# https://github.com/junegunn/fzf/wiki/Configuring-fuzzy-completion#zsh
export FZF_COMPLETION_TRIGGER=''
bindkey '^T' fzf-completion
bindkey '^I' $fzf_default_completion
# uncomment following line and line at the very top of this file to profile startup time of oh-my-zsh
# zprof
| true |
cce1977f3b21393ed6500bd90a8f9cfd6ec5f2c9 | Shell | textarcana/jq-cookbook | /recipes.sh | UTF-8 | 21,809 | 3.84375 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -x
# jq Recipes
# _ _____ _
# (_) | __ \ (_)
# _ __ _ | |__) | ___ ___ _ _ __ ___ ___
# | | / _` | | _ / / _ \ / __|| || '_ \ / _ \/ __|
# | || (_| | | | \ \| __/| (__ | || |_) || __/\__ \
# | | \__, | |_| \_\\___| \___||_|| .__/ \___||___/
# _/ | | | | |
# |__/ |_| |_|
#
# I'm assuming you are following along on the command line as you read
# this: typing or pasting each command in as you go, then taking a few
# minutes to examine the output and/or the files created by the
# commands you have run. I've included the output of some commands for
# reference.
#
# The first thing that's needed in order to experiment with jq is ---
# some data! So now I will create a data file with a few lines of log
# entries. Each log entry contains a severity indicator (wrapped in
# [square brackets]) followed by a message.
echo "[DEBUG] foo
[ERROR] bar
[ERROR] baz
[INFO] boz" > example.log
# That creates a file called example.log in your current working
# directory.
#
# Now you can use cat to verify the contents of example.log
cat example.log
# And you should see that example.log contains the following lines of
# text:
#
# [DEBUG] foo
# [ERROR] bar
# [ERROR] baz
# [INFO] boz
#
# Now, this is a toy data set but it has the advantage of being easy
# to reason about while learning! Believe me, these four lines
# actually contain enough variation to keep things interesting for the
# rest of this article =D
#
# Now that I have a logfile to analyze, I have many choices as to what
# type of analysis to perform!
#
# First off, why don't I read each line of the file into an
# array. That would be much easier to work with programmatically than a
# plain text file. So I can save myself some coding time later by
# performing this one-time transformation on my log data right now!
jq --slurp --raw-input --raw-output 'split("\n") | .' example.log > log_lines.json
# That command created a new file: log_lines.json which contains the
# same information as our original log, but formatted as a JSON array
# ready for loading into ANY programmatic environment.
#
# Now when I take a look a the newly generated JSON file:
jq . log_lines.json
# I see that it contains a JSON array like this:
#
# [
# "[DEBUG] foo",
# "[ERROR] bar",
# "[ERROR] baz",
# "[INFO] boz"
# ]
#
# Note here a key advantage of working with my own generated "toy"
# data set: it is trivially possible to verify that I (the writer) and
# you (the reader) are definitely working with the exact same data
# set. This confidence in consistency is an invaluable advantage when
# learning the fundamentals of a Turing-complete DSL like jq!
#
# Another thing to note is that I am using jq rather than cat to read
# the contents of JSON files. cat simply reads text files to the
# screen. Everything is plain text as far as cat is concerned -- cat
# has no concept of JSON! So if you have invalid JSON for some reason,
# cat will happily show it to you and never warn you about potential
# problems.
#
# On the other hand, jq CAN NOT output invalid JSON. The output from
# jq is always serialized directly into JSON from an object in memory.
#
# In other words: jq never "just prints" a JSON string. Rather, jq
# ALWAYS validates all JSON before attempting to print it out. JSON
# that cannot be validated causes jq to print nothing and exit with an
# error!
#
# So just by using jq to print out my JSON, I am implicitly testing
# that my JSON is valid!
# Data Analysis
# _____ _ _ _
# | __ \ | | /\ | | (_)
# | | | | __ _| |_ __ _ / \ _ __ __ _| |_ _ ___ _ ___
# | | | |/ _` | __/ _` | / /\ \ | '_ \ / _` | | | | / __| / __|
# | |__| | (_| | || (_| | / ____ \| | | | (_| | | |_| \__ \ \__ \
# |_____/ \__,_|\__\__,_| /_/ \_\_| |_|\__,_|_|\__, |___/_|___/
# __/ |
# |___/
#
# Now that the plain text log has been converted to JSON, I can do
# some data mining.
#
# A very common question when looking at an application log is: how
# many and what kind of errors are in this log? So I'll do that
# analysis now on my toy data set...
jq 'map(split(" ") | {severity: "\(.[0])", message: "\(.[1])"})' log_lines.json > severity_index.json
# I've now created a new file: severity_index.json, which contains an
# array of hashes. Each hash has two keys: severity and message.
#
# When I view my newly created index file:
jq . severity_index.json
# Then I see that it contains an array of JSON records; representing
# the original log lines as structured data:
#
# [
# {
# "severity": "[DEBUG]",
# "message": "foo"
# },
# {
# "severity": "[ERROR]",
# "message": "bar"
# },
# {
# "severity": "[ERROR]",
# "message": "baz"
# },
# {
# "severity": "[INFO]",
# "message": "boz"
# }
# ]
#
# Now if I want a count of log lines by severity, I can use the
# following expression:
jq 'group_by(.severity) | map({"\(.[0].severity)" : length})' severity_index.json > totals.json
# When I view the newly generated index of total messages by severity:
jq . totals.json
# Then I can see that it's again an array of JSON records; this time
# providing a lookup table of total message counts:
#
# [
# {
# "[DEBUG]": 1
# },
# {
# "[ERROR]": 2
# },
# {
# "[INFO]": 1
# }
# ]
#
# Now the output at this point is JSON but I could be terser if I just
# wanted human-readable output. jq provides a LOT of control over
# output formats!
#
# Here's the same query against severity_index.json, but formatted as
# human-readable plain text. Note I've moved the numbers to the left
# hand side of the output, so that they line up with the left edge of
# the screen. This is helpful when formatting data for humans to
# read.
jq -r 'group_by(.severity) | map("\(length) \(.[0].severity)") | .[]' severity_index.json > totals.txt
# I'll use cat to view the generated file this time, because now I'm
# dealing with plain text and not JSON:
cat totals.txt
# And I can see that totals.txt is suitable for including in an email
# or echo'ing into an IRC chat room!
#
# 1 [DEBUG]
# 2 [ERROR]
# 1 [INFO]
# diff for JSON
# _ _ __ __ __ _ _____ ____ _ _
# | (_)/ _|/ _| / _| | |/ ____|/ __ \| \ | |
# __| |_| |_| |_ | |_ ___ _ __ | | (___ | | | | \| |
# / _` | | _| _| | _/ _ \| '__| _ | |\___ \| | | | . ` |
# | (_| | | | | | | || (_) | | | |__| |____) | |__| | |\ |
# \__,_|_|_| |_| |_| \___/|_| \____/|_____/ \____/|_| \_|
#
# One of the most exciting applications of jq is in testing data set
# integrity. Historically, data integrity testing has meant working
# with large, relatively static data sets that have been transformed
# and filtered through a complex Extract-Transform-Load pipeline
# (ETL).
#
# However in the Web/Mobile century, every application is having an
# ongoing API conversation with external services. The artifacts
# exchanged during these API transactions constitute a relatively
# small, rapidly changing data set.
#
# jq's "does one thing well" approach to JSON processing makes it
# possible to use a single tool to do both large-scale data integrity
# testing; as well as small-but-rapidly-iterating analysis of API
# transaction artifacts!
#
# First I will create a second data set containing only the non-error
# lines from my original example.log file:
jq 'map(select(.severity != "[ERROR]")) | sort' severity_index.json > for_comparison.json
# I've sorted the keys in the second data set just to make the point
# that using the diff command isn't that useful when dealing with JSON
# data sets.
#
# When I view the file I will be using for comparison with my existing
# severity_index.json data set:
jq . for_comparison.json
# Then I can see that I'm dealing with an array of two records.
#
# [
# {
# "severity": "[DEBUG]",
# "message": "foo"
# },
# {
# "severity": "[INFO]",
# "message": "boz"
# }
# ]
#
# The simplest test I'd ever want to perform is just finding out if
# two data sets are the same. Here's how jq lets me do that:
jq --slurp --exit-status '.[0] == .[1]' severity_index.json for_comparison.json
# Note the use of the --exit-status flag, which tells jq to return a
# bad exit status if my equality test returns false. With the
# --exit-status flag in place, the code above is sufficient to create
# a Jenkins job that fails if two JSON files are not exactly identical!
#
# So much for exact equality. Moving on to the more interesting
# question of performing a useful diff on two JSON documents.
#
# I can figure out which keys are in the first document but not in the
# second document by using the subtraction operator:
jq --slurp '.[0] - .[1]' severity_index.json for_comparison.json
# This lists out only the error keys, since those are the keys that I
# previously filtered out of the for_comparison.json document:
#
# [
# {
# "severity": "[ERROR]",
# "message": "bar"
# },
# {
# "severity": "[ERROR]",
# "message": "baz"
# }
# ]
#
# Now I'll set up my data files so that I can try a slightly more complex diff.
#
# I want to see the diff "go in both directions" so to speak --- that
# is, I want to see what happens when BOTH documents contain at least
# one key/value pair that isn't in the other document.
#
# This means I need to generate some test data. Adding new entries to
# existing JSON documents works like this in jq:
jq '[{severity: "[DEBUG]", message: "hello world!"}] + .' for_comparison.json > advanced_comparison.json
# I'm choosing to prepend to the data set here because I want to drive
# home my point about diff not being a good tool for this sort of
# analysis (even on very small data sets like this one). The "rules"
# for outputting JSON documents are too fast-and-loose for a tool like
# diff which was designed for logfile and plain text analysis.
#
# An interesting capability of jq is the concurrent
# application of multiple filters to the input stream while still
# returning the output as a single JSON document. So if I want to
# produce a third JSON document showing the difference between the two
# documents under comparison, I can do that like so:
jq --slurp '{missing: (.[0] - .[1]), added: (.[1] - .[0])}' severity_index.json advanced_comparison.json > an_actual_diff.json
# Now I have created a new file: an_actual_diff.json. It contains a
# JSON object with two keys: "missing" and "added." Just like a diff!
jq . an_actual_diff.json
# And the output should look like:
#
# {
# "missing": [
# {
# "severity": "[ERROR]",
# "message": "bar"
# },
# {
# "severity": "[ERROR]",
# "message": "baz"
# }
# ],
# "added": [
# {
# "severity": "[DEBUG]",
# "message": "hello world!"
# }
# ]
# }
#
# Now I can easily create different reports. For instance I can easily
# say how many keys present in the original file, were missing from
# the comparison file:
jq '.missing | length | "\(.) keys were not found."' an_actual_diff.json
# This should give you output like "2 keys were not found." Again,
# this sort of output is perfect for echo'ing into a chatroom or
# including in an automated notification email.
# API Testing
# _____ _____ _______ _ _
# /\ | __ \_ _| |__ __| | | (_)
# / \ | |__) || | | | ___ ___| |_ _ _ __ __ _
# / /\ \ | ___/ | | | |/ _ \/ __| __| | '_ \ / _` |
# / ____ \| | _| |_ | | __/\__ \ |_| | | | | (_| |
# /_/ \_\_| |_____| |_|\___||___/\__|_|_| |_|\__, |
# __/ |
# |___/
#
# Earlier I said that a jq expression with the --exit-status flag
# enabled is sufficient to fail a Jenkins job if a JSON document
# doesn't meet expectations.
#
# JSON documents come from all kinds of sources but typically when
# someone says "API testing" they mean that they want to craft URLs
# based on some existing, written RESTful API specification, then
# retrieve JSON documents from a remote host by requesting those URLs
# and downloading the (JSON) responses. As a final step, validation is
# performed that demonstrates the JSON document returned by an API
# query matches what one might expect based upon the API specification.
#
# Using curl to retrieve JSON responses is beyond the scope of this
# article. But do consider that there are very many places where Web
# and mobile applications expose their API data --- making a curl
# request is just the tip of the iceberg! Other options include HAR
# capture with Chrome Inspector, network traffic inspection via
# Charles or WireShark and investigation of JSON documents
# cached in your browser / filesystem.
#
# In any case, the larger point is that comparing JSON documents and
# then failing the build is easy with jq!
#
# Now I will examine some common use cases that come up when testing
# JSON documents retrieved from a remote API.
#
# Just imagine for a moment that the two files we've created (and been
# working with) are the results of two calls to different instances of
# the same API: "foohost/v1/severity_index" and
# "barhost/v2/severity_index" for the sake of pretending =D
#
# So in this imaginary API test scenario I would first go through the
# one-time step of retrieving the remote responses and saving them to
# files, something like:
#
# curl foohost/v1/severity_index > severity_index.json
# curl barhost/v2/severity_index > advanced_comparison.json
#
# Then the two API responses are saved in two files, and I can compare
# the two files as just I have been doing above.
#
# Now back to looking at the useful comparisons that jq can perform
# against two JSON documents that (should) have commonalities with
# regard to structure --- such as two "identical" responses from two
# different implementations of the same application server!
#
# The first snag one is likely to run into in data testing is... data
# dependencies that result in fragile tests. An API smoke
# test has to be dependent to some extent on the data returned (that's
# the whole point). But it sucks to have to break the build because
# someone updated the UI copy or because a cached query was
# updated.
#
# Often I can attain a sufficient level of data independence by simply
# validating that a JSON document has a top-level structure that
# matches the API specification.
#
# Here's an example of how to "diff" the top-level keys in a JSON
# document, ignoring the values of those keys:
jq --slurp '{missing_keys: (([.[0][].severity]) - ([.[1][].severity]) | unique), added_keys: (([.[1][].severity]) - ([.[0][].severity]) | unique)}' severity_index.json advanced_comparison.json
# And I can turn that into a test that causes Jenkins to fail the
# build when the file being compared does not use all the same
# top-level keys as the original file.
jq --slurp --exit-status '([.[0][].severity] | unique) - ([.[1][].severity] | unique) == []' severity_index.json advanced_comparison.json
# JSONp with jq
# _ _____ ____ _ _ _ _ _ _
# | |/ ____|/ __ \| \ | | (_) | | | (_)
# | | (___ | | | | \| |_ __ __ ___| |_| |__ _ __ _
# _ | |\___ \| | | | . ` | '_ \ \ \ /\ / / | __| '_ \ | |/ _` |
# | |__| |____) | |__| | |\ | |_) | \ V V /| | |_| | | | | | (_| |
# \____/|_____/ \____/|_| \_| .__/ \_/\_/ |_|\__|_| |_| | |\__, |
# | | _/ | | |
# |_| |__/ |_|
#
# In order to serve JSON documents to client-side JavaScript
# applications, it is convenient to be able to transfer documents
# between hosts on the Internet, outside the limitations imposed by
# the Same-Origin Policy.
#
# JSONp is one such means for transferring JSON documents across
# domains.
#
# The JSONp specification is quite simple. All I need to do to be
# compliant is to wrap a JSON document in a JavaScript function
# call. The JavaScript function in turn must be defined on the client
# side.
#
# On first encounter, JSONp can sound complex. But in practice a client side
# JavaScript implementation with jQuery looks like this:
#
# var response;
#
# var jsonpHelper = function(data) {
# response = data;
# };
#
# $.ajax({
# url: 'foohost/v1/severity_index',
# dataType: 'jsonp',
# jsonp: false,
# jsonpCallback: 'jsonpHelper'
# });
#
# And that's it.
#
# Once this code executes in the browser, the "response" global
# variable gets "hydrated" with all the data that was in the JSONp
# file from the remote host. This is a general solution for
# cross-domain JSON transfer.
#
# So this is a very convenient way to provide a JSON transaction
# capability in the client, without necessarily changing any
# configuration on the remote host.
#
# Here is how I would go about crafting a JSONp response whose payload
# is the severity_index.json file I generated, above.
jq --raw-output '"jsonpHelper(\(.));"' severity_index.json > jsonp_severity_index.json
# The newly generated file doesn't have any line breaks --- it is
# already minified and ready to serve over http. Now ANY host I choose
# can serve this JSONp response along with JavaScript code above; and
# my data can be loaded into ANY client-side process running in the
# browser!
#
# Although everything-on-one-line formatting is useful for Web
# servers, it does make it hard to read the generated code!
# Optionally I can prettify the generated JSONp code with uglifyjs,
# resulting human-readable JavaScript that I can visually verify has
# got the right data:
cat jsonp_severity_index.json | uglifyjs -b
# jsonpHelper([ {
# severity: "[DEBUG]",
# message: "foo"
# }, {
# severity: "[ERROR]",
# message: "bar"
# }, {
# severity: "[ERROR]",
# message: "baz"
# }, {
# severity: "[INFO]",
# message: "boz"
# } ]);
#
# Schema Validation
# _____ _
# / ____| | |
# | (___ ___ | |__ ___ _ __ ___ __ _
# \___ \ / __|| '_ \ / _ \| '_ ` _ \ / _` |
# ____) || (__ | | | || __/| | | | | || (_| |
# |_____/ \___||_| |_| \___||_| |_| |_| \__,_|
#
#
# __ __ _ _ _ _ _
# \ \ / / | |(_) | | | | (_)
# \ \ / /__ _ | | _ __| | __ _ | |_ _ ___ _ __
# \ \/ // _` || || | / _` | / _` || __|| | / _ \ | '_ \
# \ /| (_| || || || (_| || (_| || |_ | || (_) || | | |
# \/ \__,_||_||_| \__,_| \__,_| \__||_| \___/ |_| |_|
#
# Here is a recipe that dumps the schemas (as determined by jq) from a
# JSON document.
#
# I have formatted the schemas as valid jq queries, so that I can feed
# them back into my test harness easily.
jq --raw-output 'paths | map(. as $item | type | if . == "number" then "[\($item)]" else "[\"" + $item + "\"]" end) | join("") | "." + .' severity_index.json > schema_dump.json
# The output from this command is a list of all the jq queries which
# are valid against the current document:
cat schema_dump.json
# Should produce output like this:
#
# .[0]
# .[0]["severity"]
# .[0]["message"]
# .[1]
# .[1]["severity"]
# .[1]["message"]
# .[2]
# .[2]["severity"]
# .[2]["message"]
# .[3]
# .[3]["severity"]
# .[3]["message"]
#
# This is a very general schema because it doesn't specify the types
# (or any characteristics) of the data *in* the fields. But since I
# have this list of all possible queries against my document, I can
# use that list to generate a script that will drill down to each end
# node in the document, producing a record of the current value of
# each node.
jq -R -r '. + " | type" | @sh "jq \(.) severity_index.json" | .' schema_dump.json > get_types.sh
# Now get_types.sh is a runnable set of jq queries that will produce
# the type of each field for every path in the schema dump!
sh get_types.sh > schema_types.txt
jq -s '.' schema_types.txt > schema_types.json
jq -s --raw-input 'split("\n")' schema_dump.json > clean_schema_dump.json
# Now I can create a config file off which I can generate the
# validation script:
jq -s --raw-output '.[][] | @sh' schema_types.json > clean_schema_types.txt
jq -s --raw-output '.[][] | @sh' clean_schema_dump.json > clean_schema_dump.txt
paste -d' ' clean_schema_dump.txt clean_schema_types.txt > detailed_schemas.txt
# At this point I have a pretty complete schema specification. It
# would be helpful if the specification were organized as highly
# structured data. Here's how I would structure it:
perl -lwpe "s{' '}{ | type == \"}; s{'$}{\"'}; s{^(.*)$}{jq \$1 severity_index.json}" detailed_schemas.txt > validation.sh
# Now validation.sh is an explicit set of automated checks for fields
# and data types of fields.
#
# There is more that could be done in the way of validation. The first
# thing I want at this point is a way to limit the depth of the tree I
# am validating. Just because an API returns a bazillion levels of
# hierarchy shouldn't mean I have to test all of them!
#
# Probably I can use the jq path command to do that...
| true |
712f549a6e6304313a81bfcd1aa1ca7a9050dac5 | Shell | aalok-thakkar/egs-artifact | /prosynth/get_time.sh | UTF-8 | 311 | 3.453125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
filelist="../benchmarklist.txt"
rulesize=$1
for bench in $(cat $filelist)
do
file="logs/benchmarks/$rulesize/$bench/log.txt"
grep -qs ":-" $file 2>&1 3>&1 > /dev/null
n="$?"
if (( $n == 0 )); then
echo $bench, $(sed -nE "s/Total runtime: *?(.*)/\1/p" $file)
else
echo $bench,
fi
done
| true |
b1063329f7095adf314ae5f0ec5573fceea7d051 | Shell | hanwenzhu/mlops | /program/mlperf-inference-submission/_run_truncate_accuracy_log.sh | UTF-8 | 442 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | #! /bin/bash
MLPERF_BACKUP_DIR="${CK_ENV_MLPERF_INFERENCE_RESULTS}/../backup"
if [ "${CLEAN_MLPERF_BACKUP}" == "YES" ] ; then
echo "Cleaning backup dir ${MLPERF_BACKUP_DIR}"
rm -rf ${MLPERF_BACKUP_DIR}
echo ""
fi
${CK_ENV_COMPILER_PYTHON_FILE} ${CK_ENV_MLPERF_INFERENCE}/tools/submission/truncate_accuracy_log.py \
--input ${CK_ENV_MLPERF_INFERENCE_RESULTS} \
--submitter ${CK_MLPERF_SUBMITTER} \
--backup ${MLPERF_BACKUP_DIR}
| true |
b9e73511da9b799763b313f74492e314d2c7a802 | Shell | daxingyou/gong2clientutil | /.svn/pristine/b9/b9e73511da9b799763b313f74492e314d2c7a802.svn-base | UTF-8 | 1,065 | 3.734375 | 4 | [] | no_license | #!/bin/bash
APP_NAME=gametools
APP_HOME=/home/sa/clientutil/gametools
TMP_DIR=/var/tmp
LOG_DIR=$APP_HOME/target
##########
if [ ! -d $LOG_DIR ]; then
mkdir $LOG_DIR
fi
##########
DAEMON=/usr/local/bin/start-stop-daemon
APP_CMDS="grails \
-Dserver.port=8090 \
run-app"
DAEMON_OPTS="--name=$APP_NAME \
--chdir=$APP_HOME \
--pidfile=$APP_HOME/$APP_NAME.pid"
##########
start () {
if [ -f $APP_PID ]; then
return 1
fi
$DAEMON --start --background $DAEMON_OPTS --exec $JAVA_HOME/jre/bin/java -- $APP_CMDS
};
stop () {
$DAEMON --stop --quiet --pidfile $APP_PID --retry 15
rm -f $APP_PID
};
##########
case "$1" in
start)
echo -n 'Starting App Server: ';
start;
echo 'done.';
;;
stop)
echo -n 'Stoping App Server: ';
stop;
echo 'done.';
;;
restart)
echo -n 'Restarting App Server: ';
stop;
sleep 3s;
start;
echo 'done.';
;;
*)
echo "Usage $0 start|stop|restart"
exit 1;
;;
esac;
| true |
81057a7a15b82fccad3817a7c25e7fda2539abe2 | Shell | titzer/virgil | /test/asm.bash | UTF-8 | 421 | 3.453125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do
DIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
DIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )"
. $DIR/common.bash asm
for d in asm/x86 asm/x86-64; do
print_line
echo "${CYAN}$d${NORM}"
(cd $DIR/$d && ./test.bash)
done
| true |
b27a9d6f524ca3b6c9e657994dfb5fbd61e0fa4a | Shell | subscriptions-project/swg-js | /build_and_run_e2e.sh | UTF-8 | 340 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Build all binaries, in parallel.
npx vite build -- --target=classic &
npx vite build -- --target=gaa &
npx vite build -- --target=basic &
wait
# Run all test configurations. Report failure if any run fails.
status=0
npx gulp e2e $* || ((status++))
npx gulp e2e --env=all_experiments_enabled $* || ((status++))
exit $status
| true |
84c3a7c3109f16f5040fd4a22f4d4d89d8e3dd0c | Shell | ManchersterByTheSea/openBilibili | /go-common/build/unit_test.sh | UTF-8 | 8,821 | 3.625 | 4 | [] | no_license | #!/bin/bash
CI_SERVER_URL="http://git.bilibili.co"
CI_UATSVEN_URL="http://uat-sven.bilibili.co"
CI_PRIVATE_TOKEN="WVYk-ezyXKq-C82v-1Bi"
CI_PROJECT_ID="682"
CI_COMMIT_SHA=${PULL_PULL_SHA}
exitCode=0
# get packages
dirs=(dao)
declare packages
function GetPackages(){
reg="library"
length=${#dirs[@]}
for((i=0;i<length;i++))
do
reg+="|$1/"${dirs[i]}"(/.*)*"
done
for value in `find $1 -type d |grep -E ${reg}`
do
len=`ls ${value}| grep .go | wc -l`
if [[ ${len} -gt 0 ]];then
packages+="go-common/"${value}" "
fi
done
}
# upload data to apm
# $1: SvenURL
# $2: file result.out path
function Upload () {
if [[ ! -f "$2/result.out" ]] || [[ ! -f "$2/cover.html" ]] || [[ ! -f "$2/coverage.dat" ]]; then
echo "==================================WARNING!======================================"
echo "No test found!~ 请完善如下路径测试用例: ${pkg} "
exit 1
fi
json=$(curl $1 -H "Content-type: multipart/form-data" -F "html_file=@$2/cover.html" -F "report_file=@$2/result.out" -F "data_file=@$2/coverage.dat")
if [[ "${json}" = "" ]]; then
echo "shell.Upload curl $1 fail"
exit 1
fi
msg=$(echo ${json} | jq -r '.message')
data=$(echo ${json} | jq -r '.data')
code=$(echo ${json} | jq -r '.code')
if [[ "${data}" = "" ]]; then
echo "shell.Upload curl $1 fail,data return null"
exit 1
fi
echo "=============================================================================="
if [[ ${code} -ne 0 ]]; then
echo -e "返回 message(${msg})"
echo -e "返回 data(${data})\n\n"
fi
return ${code}
}
# GoTest execute go test and go tool
# $1: pkg
function GoTest(){
go test -v $1 -coverprofile=cover.out -covermode=set -convey-json -timeout=60s > result.out
go tool cover -html=cover.out -o cover.html
}
# BazelTest execute bazel coverage and go tool
# $1: pkg
function BazelTest(){
pkg=${1//go-common//}":go_default_test"
path=${1//go-common\//}
bazel coverage --instrumentation_filter="//${path}[:]" --test_env=DEPLOY_ENV=uat --test_timeout=60 --test_env=APP_ID=bazel.test --test_output=all --cache_test_results=no --test_arg=-convey-json ${pkg} > result.out
if [[ ! -s result.out ]]; then
echo "==================================WARNING!======================================"
echo "No test case found,请完善如下路径测试用例: ${pkg} "
exit 1
else
echo $?
cat bazel-out/k8-fastbuild/testlogs/${path}/go_default_test/coverage.dat | grep -v "/monkey.go" > coverage.dat
go tool cover -html=coverage.dat -o cover.html
fi
}
# UTLint check the *_test.go files in the pkg
# $1: pkg
function UTLint()
{
path=${1//go-common\//}
declare -i numCase=0
declare -i numAssertion=0
files=$(ls ${path} | grep -E "(.*)_test\.go")
if [[ ${#files} -eq 0 ]];then
echo "shell.UTLint no *_test.go files in pkg:$1"
exit 1
fi
for file in ${files}
do
numCase+=`grep -c -E "^func Test(.+)\(t \*testing\.T\) \{$" ${path}/${file}`
numAssertion+=`grep -c -E "^(.*)So\((.+)\)$" ${path}/${file}`
done
if [[ ${numCase} -eq 0 || ${numAssertion} -eq 0 ]];then
echo -e "shell.UTLint no test case or assertion in pkg:$1"
exit 1
fi
echo "shell.UTLint pkg:$1 succeeded"
}
# upload path to apm
# $1: SvenURL
# $2: file result.out path
function UpPath() {
curl $1 -H "Content-type: multipart/form-data" -F "path_file=@$2/path.out"
}
function ReadDir(){
# get go-common/app all dir path
PathDirs=`find app -maxdepth 3 -type d`
value=""
for dir in ${PathDirs}
do
if [[ -d "$dir" ]];then
for file in `find ${dir} -maxdepth 1 -type f |grep "CONTRIBUTORS.md"`
do
owner=""
substr=${dir#*"go-common"}
while read line
do
if [[ "${line}" = "# Owner" ]];then
continue
elif [[ "${line}" = "" ]]|| [[ "${line}" = "#"* ]];then
break
else
owner+="${line},"
fi
done < ${file}
value+="{\"path\":\"go-common${substr}\",\"owner\":\"${owner%,}\"},"
done
fi
done
# delete "," at the end of value
value=${value%,}
echo "[${value}]" > path.out
}
# start work
function Start(){
GetPackages $1
if [[ ${packages} = "" ]]; then
echo "shell.Start no change packages"
exit 0
fi
#Get gitlab result
gitMergeRequestUrl="${CI_SERVER_URL}/api/v4/projects/${CI_PROJECT_ID}/repository/commits/${CI_COMMIT_SHA}/merge_requests?private_token=${CI_PRIVATE_TOKEN}"
gitCommitUrl="${CI_SERVER_URL}/api/v4/projects/${CI_PROJECT_ID}/repository/commits/${CI_COMMIT_SHA}/statuses?private_token=${CI_PRIVATE_TOKEN}"
mergeJson=$(curl -s ${gitMergeRequestUrl})
commitJson=$(curl -s ${gitCommitUrl})
if [[ "${mergeJson}" != "[]" ]] && [[ "${commitJson}" != "[]" ]]; then
merge_id=$(echo ${mergeJson} | jq -r '.[0].iid')
exitCode=$?
if [[ ${exitCode} -ne 0 ]]; then
echo "shell.Start curl ${gitMergeRequestUrl%=*}=*** error .return(${mergeJson})"
exit 1
fi
username=$(echo ${mergeJson} | jq -r '.[0].author.username')
authorname=$(echo ${commitJson} | jq -r '.[0].author.username')
else
echo "Test not run, maybe you should try create a merge request first!"
exit 0
fi
#Magic time
Magic
#Normal process
for pkg in ${packages}
do
svenUrl="${CI_UATSVEN_URL}/x/admin/apm/ut/upload?merge_id=${merge_id}&username=${username}&author=${authorname}&commit_id=${CI_COMMIT_SHA}&pkg=${pkg}"
echo "shell.Start ut lint pkg:${pkg}"
UTLint "${pkg}"
echo "shell.Start Go bazel test pkg:${pkg}"
BazelTest "${pkg}"
Upload ${svenUrl} $(pwd)
exitCode=$?
if [[ ${exitCode} -ne 0 ]]; then
echo "shell.Start upload fail, status(${exitCode})"
exit 1
fi
done
# upload all dirs
ReadDir
pathUrl="${CI_UATSVEN_URL}/x/admin/apm/ut/upload/app"
UpPath ${pathUrl} $(pwd)
echo "UpPath has finshed...... $(pwd)"
return 0
}
# Check determine whether the standard is up to standard
#$1: commit_id
function Check(){
curl "${CI_UATSVEN_URL}/x/admin/apm/ut/git/report?project_id=${CI_PROJECT_ID}&merge_id=${merge_id}&commit_id=$1"
checkURL="${CI_UATSVEN_URL}/x/admin/apm/ut/check?commit_id=$1"
json=$(curl -s ${checkURL})
code=$(echo ${json} | jq -r '.code')
if [[ ${code} -ne 0 ]]; then
echo -e "curl ${checkURL} response(${json})"
exit 1
fi
package=$(echo ${json} | jq -r '.data.package')
coverage=$(echo ${json} | jq -r '.data.coverage')
passRate=$(echo ${json} | jq -r '.data.pass_rate')
standard=$(echo ${json} | jq -r '.data.standard')
increase=$(echo ${json} | jq -r '.data.increase')
tyrant=$(echo ${json} | jq -r '.data.tyrant')
lastCID=$(echo ${json} | jq -r '.data.last_cid')
if ${tyrant}; then
echo -e "\t续命失败!\n\t大佬,本次执行结果未达标哦(灬ꈍ ꈍ灬),请再次优化ut重新提交🆙"
echo -e "\t---------------------------------------------------------------------"
printf "\t%-14s %-14s %-14s %-14s\n" "本次覆盖率(%)" "本次通过率(%)" "本次增长量(%)" 执行pkg
printf "\t%-13.2f %-13.2f %-13.2f %-12s\n" ${coverage} ${passRate} ${increase} ${package}
echo -e "\t(达标标准:覆盖率>=${standard} && 通过率=100% && 同比当前package历史最高覆盖率的增长率>=0)"
echo -e "\t---------------------------------------------------------------------"
exitCode=1
else
echo -e "\t恭喜你,续命成功,可以请求MR了"
fi
}
# Magic ignore method Check()
function Magic(){
url="http://git.bilibili.co/api/v4/projects/${CI_PROJECT_ID}/merge_requests/${merge_id}/notes?private_token=${CI_PRIVATE_TOKEN}"
json=$(curl -s ${url})
for comment in $(echo ${json} | jq -r '.[].body')
do
if [[ ${comment} == "+skiput" ]]; then
exit 0
fi
done
}
# run
Start $1
echo -e "【我们万众一心】:"
Check ${CI_COMMIT_SHA}
echo -e "本次执行详细结果查询地址请访问:http://sven.bilibili.co/#/ut?merge_id=${merge_id}&&pn=1&ps=20"
if [[ ${exitCode} -ne 0 ]]; then
echo -e "执行失败!!!请解决问题后再次提交。具体请参考:http://info.bilibili.co/pages/viewpage.action?pageId=9841745"
exit 1
else
echo -e "执行成功."
exit 0
fi
| true |
8a77b6ca888417f5155b73b92a9197884759ec8c | Shell | pivotal/paving | /ci/scripts/update-ci.sh | UTF-8 | 419 | 3.1875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -euo pipefail
WORKING_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
echo "$WORKING_DIR"
which ytt || (
echo "This requires ytt to be installed"
exit 1
)
which fly || (
echo "This requires fly to be installed"
exit 1
)
echo "Setting CI pipeline..."
fly -t platform-automation sp -p paving-ci -c "${WORKING_DIR}/../pipelines/pipeline.yml" \
--check-creds
| true |
a322d18d9376484fbc57cce3872ed42631060f1b | Shell | sonipl/my-scripts | /setup-bond.sh | UTF-8 | 1,928 | 3 | 3 | [] | no_license | ##Input VALUE###
echo "Enter Primary Interface: " ;read INTF1
echo "Enter Secondory Interface:";read INTF2
echo "Enter VLAN ID:";read VLANID
echo "Enter IP Address :";read IPADR
echo "Enter PREFIX example 23, 24 :"; read PREFX
echo "Enter Gateway :"; read GWY
echo "INTERFACES $INTF1 and $INTF2 , VLAN = $VLANID IP = $IPADR Mask=$PREFX GW=$GWY"
echo "Creating bond with above values, Press enter if values are correct or CTRL+C for exit";read ans
##########
WKDIR="/home/psoni/etc/sysconfig/network-scripts"
#WKDIR="/etc/sysconfig/network-scripts"
WKFILE1="$WKDIR/ifcfg-$INTF1"
WKFILE2="$WKDIR/ifcfg-$INTF2"
WKFILE3="$WKDIR/ifcfg-bond0"
WKFILE4="$WKDIR/ifcfg-bond0.$VLANID"
##########
echo "Creating interface ifcfg-$INTF1......"; sleep 5
echo "TYPE=Ethernet" >$WKFILE1
echo "NAME=$INTF1" >>$WKFILE1
echo "DEVICE=$INTF1" >>$WKFILE1
echo "ONBOOT=yes" >>$WKFILE1
echo "MASTER=bond0" >>$WKFILE1
echo "SLAVE=yes" >>$WKFILE1
#echo "UUID=f1ed0a1d-f11d-40f2-acd9-1c7f338fe705" >>ifcfg-$INTF1
##########
echo "Creating interface ifcfg-$INTF2......"; sleep 5
echo "TYPE=Ethernet" >$WKFILE2
echo "NAME=$INTF2" >>$WKFILE2
echo "DEVICE=$INTF2" >>$WKFILE2
echo "ONBOOT=yes" >>$WKFILE2
echo "MASTER=bond0" >>$WKFILE2
echo "SLAVE=yes" >>$WKFILE2
#echo "UUID=f1ed0a1d-f11d-40f2-acd9-1c7f338fe705" >>ifcfg-$INTF2
##########
echo "Creating Bond0 interface......"; sleep 5
echo "DEVICE=bond0" >$WKFILE3
echo "NAME=bond0" >>$WKFILE3
echo "TYPE=bond" >>$WKFILE3
echo "BONDING_MASTER=yes" >>$WKFILE3
echo "ONBOOT=yes" >>$WKFILE3
echo 'BONDING_OPS="mode=1 miimon=100"' >>$WKFILE3
##########
echo "Creating Bond interface for VLAN $VLANID......"; sleep 5
echo "TYPE=vlan" >$WKFILE4
echo "DEVICE=bond0.$VLANID" >>$WKFILE4
echo "PHYSDEV=bond0" >>$WKFILE4
echo "ONBOOT=yes" >>$WKFILE4
echo "VLAN=yes" >>$WKFILE4
echo "VLAN_ID=$VLANID" >>$WKFILE4
echo "IPADDR=$IPADR" >>$WKFILE4
echo "PREFIX=$PREFX" >>$WKFILE4
echo "GATEWAY=$GWY" >>$WKFILE4
##########
| true |
c62bcd57e23cbcde5cf529764758dbe78a0fdb8f | Shell | TeachAtTUM/backup-scripts | /dump-mysql.sh | UTF-8 | 925 | 2.796875 | 3 | [] | no_license | if (( $# != 1 ))
then
echo "Please supply a backup directory"
exit 1
fi
MYSQL_USER="backup"
DUMP_DIR=$1
mkdir $DUMP_DIR
# edxapp
mysqldump -u "$MYSQL_USER" edxapp > $DUMP_DIR/edxapp.sql
mysqldump -u "$MYSQL_USER" edxapp_csmh > $DUMP_DIR/edxapp_csmh.sql
mysqldump -u "$MYSQL_USER" dashboard > $DUMP_DIR/dashboard.sql
mysqldump -u "$MYSQL_USER" reports > $DUMP_DIR/reports.sql
mysqldump -u "$MYSQL_USER" xqueue > $DUMP_DIR/xqueue.sql
# E-Commerce
mysqldump -u "$MYSQL_USER" ecommerce > $DUMP_DIR/ecommerce.sql
# MySQL Users
mysqldump -u "$MYSQL_USER" mysql > $DUMP_DIR/mysql.sql
# Analytics
mysqldump -u "$MYSQL_USER" analytics > $DUMP_DIR/analytics.sql
mysqldump -u "$MYSQL_USER" analytics-api > $DUMP_DIR/analytics-api.sql
# Mongo
#mongodump -o $DUMP_DIR/mongo
# Contains MONGO_PASSWORD=my_password
. ~/.appsecrets
echo $MONGO_PASSWORD | mongodump -o $DUMP_DIR/mongo --authenticationDatabase admin -u admin -p
| true |
dee817ab47e36c51bf965e4fe3d76aca859d12e7 | Shell | clipos-archive/clipos4_portage-overlay-clip | /app-clip/clip-install-rm/files/_debian/prerm | UTF-8 | 420 | 3 | 3 | [] | no_license | #!/bin/sh
# Copyright 2018 ANSSI
# Distributed under the terms of the GNU General Public License v2
# Remove clip_install from the crontab
/bin/grep -v clip_install /etc/cron/crontab/root > /etc/cron/crontab/root.new
/bin/mv /etc/cron/crontab/root.new /etc/cron/crontab/root
if [ `/bin/ls -s /etc/cron/crontab/root | /bin/awk '{print $1}'` -eq 0 ]
then
# File root is empty
/bin/rm /etc/cron/crontab/root
fi
| true |
d8360ce0cf3dfbfa4dbfd4dd3631d957669efa69 | Shell | BearFather/BearFather-s-Admin-panel | /old/scripts/drop.cmd | UTF-8 | 1,134 | 2.96875 | 3 | [] | no_license | #!/bin/bash
dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $dir/settings.web
if [ ! -e "$dir/drop.lck" ]
then
TIME=`date +%m%d%g`
touch $dir/drop.lck
if [ -n "$w1" ]; then
cp -r $bkloc/$w1/ $dbtloc/
rm -r $dbtloc/$w1/rdiff-backup-data
fi
if [ -n "$w2" ]; then
cp -r $bkloc/$w2/ $dbtloc/
rm -r $dbtloc/$w2/rdiff-backup-data
fi
if [ -n "$w3" ]; then
cp -r $bkloc/$w3/ $dbtloc/
rm -r $dbtloc/$w3/rdiff-backup-data
fi
if [ -n "$w4" ]; then
cp -r $bkloc/$w4/ $dbtloc/
rm -r $dbtloc/$w4/rdiff-backup-data
fi
if [ -n "$w5" ]; then
cp -r $bkloc/$w5/ $dbtloc/
rm -r $dbtloc/$w5/rdiff-backup-data
fi
if [ -n "$w6" ]; then
cp -r $bkloc/$w6/ $dbtloc/
rm -r $dbtloc/$w6/rdiff-backup-data
fi
if [ -n "$w7" ]; then
cp -r $bkloc/$w7/ $dbtloc/
rm -r $dbtloc/$w7/rdiff-backup-data
fi
if [ -n "$w8" ]; then
cp -r $bkloc/$w8/ $dbtloc/
rm -r $dbtloc/$w8/rdiff-backup-data
fi
if [ -n "$w9" ]; then
cp -r $bkloc/$w9/ $dbtloc/
rm -r $dbtloc/$w9/rdiff-backup-data
fi
rar -df -ep1 a $dbtloc/worlds_${TIME}.rar $dbtloc/*
$dbploc/dropbox.py start
rm $dbloc/worlds_*.rar
mv $dbtloc/worlds_${TIME}.rar $dbloc/
rm $dir/drop.lck
fi
| true |
003f5de8a5f492b4b51f608e047a52159a994aba | Shell | kissthink/c0moshack | /dotfiles/zshrc | UTF-8 | 3,178 | 2.703125 | 3 | [] | no_license | # Path to your oh-my-zsh configuration.
ZSH=$HOME/.oh-my-zsh
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
ZSH_THEME="afowler"
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
# Set to this to use case-sensitive completion
# CASE_SENSITIVE="true"
# Comment this out to disable weekly auto-update checks
# DISABLE_AUTO_UPDATE="true"
# Uncomment following line if you want to disable colors in ls
# DISABLE_LS_COLORS="true"
# Uncomment following line if you want to disable autosetting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment following line if you want red dots to be displayed while waiting for completion
COMPLETION_WAITING_DOTS="true"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
plugins=(git)
source $ZSH/oh-my-zsh.sh
# Customize to your needs...
export PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/X11/bin:/usr/local/git/bin:/opt/local/bin
ZSH_THEME_GIT_PROMPT_PREFIX=" on %{$fg[magenta]%}"
ZSH_THEME_GIT_PROMPT_SUFFIX="%{$reset_color%}"
ZSH_THEME_GIT_PROMPT_DIRTY="%{$fg[green]%}!"
ZSH_THEME_GIT_PROMPT_UNTRACKED="%{$fg[green]%}?"
ZSH_THEME_GIT_PROMPT_CLEAN=""
# -------------------------------------------------------------------
# Git aliases
# -------------------------------------------------------------------
alias ga='git add -A'
alias gp='git push'
alias gl='git log'
alias gs='git status'
alias gd='git diff'
alias gm='git commit -m'
alias gma='git commit -am'
alias gb='git branch'
alias gc='git checkout'
alias gra='git remote add'
alias grr='git remote rm'
alias gpu='git pull'
alias gcl='git clone'
alias gta='git tag -a -m'
alias gf='git reflog'
# leverage an alias from the ~/.gitconfig
alias gh='git hist'
alias glg1='git lg1'
alias glg2='git lg2'
alias glg='git lg'
# -------------------------------------------------------------------
# Capistrano aliases
# -------------------------------------------------------------------
alias capd='cap deploy'
# -------------------------------------------------------------------
# OTHER aliases
# -------------------------------------------------------------------
alias cl='clear'
# -------------------------------------------------------------------
# FUNCTIONS
# -------------------------------------------------------------------
# return my IP address
function myip() {
ifconfig lo0 | grep 'inet ' | sed -e 's/:/ /' | awk '{print "lo0 : " $2}'
ifconfig en0 | grep 'inet ' | sed -e 's/:/ /' | awk '{print "en0 (IPv4): " $2 " " $3 " " $4 " " $5 " " $6}'
ifconfig en0 | grep 'inet6 ' | sed -e 's/ / /' | awk '{print "en0 (IPv6): " $2 " " $3 " " $4 " " $5 " " $6}'
ifconfig en1 | grep 'inet ' | sed -e 's/:/ /' | awk '{print "en1 (IPv4): " $2 " " $3 " " $4 " " $5 " " $6}'
ifconfig en1 | grep 'inet6 ' | sed -e 's/ / /' | awk '{print "en1 (IPv6): " $2 " " $3 " " $4 " " $5 " " $6}'
} | true |
d71b4d251c0916816fb405d46a395c862e9e7c22 | Shell | jesuslou/LouEngine | /tools/projects/packerTool/generate_project_release.sh | UTF-8 | 395 | 3.53125 | 4 | [
"BSD-3-Clause",
"MIT",
"Zlib"
] | permissive | #!/bin/bash
current_dir=$(pwd)
project_dir="${current_dir}/project-release"
if [ ! -d "${project_dir}" ]; then
mkdir "${project_dir}"
fi
cd "${project_dir}"
OSX_FLAG=""
PLATFORM=$(uname)
if [ "${PLATFORM}" == "Darwin" ]; then
OSX_FLAG="-G"
fi
cmake "${current_dir}" -DCMAKE_CONFIGURATION_TYPES="Release" -DENABLE_TESTS=0 -DCOPY_BINARY=1 ${OSX_FLAG}
read -p "Press any key to continue..."
| true |
45269f0d83600814007c86a7abf3d2ceeca93a55 | Shell | vito-trianni/more | /user/user_scripts/create_behavior_controller.sh | UTF-8 | 15,180 | 4.40625 | 4 | [] | no_license | #!/bin/bash
#
# Author: Carlo Pinciroli <cpinciro@ulb.ac.be>
# Arne Brutschy <arne.brutschy@ulb.ac.be>
# Eliseo Ferrante <eferrant@ulb.ac.be>
# Nithin Mathews <nmathews@ulb.ac.be@ulb.ac.be>
#
# This script creates a new controller using the sample_bt_controller
# from user nithin as template.
#
AWK=
SED=
###############################################################################
# Displays a welcome message.
#
function welcome_message ( ) {
echo
echo "+++++ [ CREATE A NEW BEHAVIOR CONTROLLER ] +++++"
echo
echo "This script helps you creating a new BEHAVIOR controller. Please select"
echo "the robot type you want to create the new BEHAVIOR controller for."
echo
}
###############################################################################
# Checks that the needed programs are present.
#
function check_requirements ( ) {
if ( which awk &> /dev/null ); then
AWK=`which awk`
else
echo "[FATAL] Unable to execute the script: awk has not been found."
exit -1
fi
if ( which sed &> /dev/null ); then
SED=`which sed`
else
echo "[FATAL] Unable to execute the script: sed has not been found."
exit -1
fi
}
###############################################################################
# Checks if a subdirectory with the passed name already exists.
# PARAM : $1 The subdirectory name to check
# RETURN: 0 If the subdirectory exists, 1 otherwise.
#
function directory_exists ( ) {
if [ -e "$1" ]; then
echo
echo "A directory called $1 already exists, choose a different name."
return 0;
else
return 1;
fi
}
###############################################################################
# Creates a subdirectory with the passed name.
# PARAM $1: The directory name.
#
function create_directory ( ) {
echo "Creating directory $1..."
mkdir "$1"
if [ ! $? -eq 0 ]; then
echo "Unexpected error while creating directory $1" 1>&2
echo "Check if you have the right permissions."
exit 1
fi
echo "Success."
}
###############################################################################
# Asks the user to type the wanted controller tag name until a name of a
# non-existing subdirectory is given. Then the name is returned.
# PARAM $1: The robot type
# RETURN: The controller tag name.
#
function get_controller_tag_name ( ) {
local tagname
local controller_name="bt_$1_new_controller"
read -ep "Insert controller XML tag name [$controller_name]: " tagname
if [ x"$tagname" = x ]; then
tagname="$controller_name"
fi
while ( test "x$tagname" = x || directory_exists "$tagname" ); do
read -ep "Insert controller XML tag name: " tagname
done
echo $tagname
}
###############################################################################
# Capitalizes initial character
# PARAM $1: string to be capitalzed
#
capitalize_ichar () # Capitalizes initial character
{ #+ of argument string(s) passed.
string0="$@" # Accepts multiple arguments.
firstchar=${string0:0:1} # First character.
string1=${string0:1} # Rest of string(s).
FirstChar=`echo "$firstchar" | tr a-z A-Z` # Capitalize first character.
echo "$FirstChar$string1" # Output to stdout.
}
###############################################################################
# Convert the xml tag name of the controller to a possible controller class name
# PARAM $1: xml tag name
#
convert_tag_to_classname ()
{
OLD_IFS="$IFS"
IFS="_"
class_parts=( $1 )
IFS="$OLD_IFS"
for part in ${class_parts[*]}
do
class=${class}`capitalize_ichar $part`
done
echo "$class"
}
###############################################################################
# Asks the user to type the wanted controller class name until a name of a
# non-existing subdirectory is given. Then the name is returned.
# PARAM $1: xml tag name
# RETURN: The controller class name.
#
function get_controller_class_name ( ) {
local classname
local robot
if [ $REQ_ROBOT_TYPE == footbot ]; then
robot="FootBot"
fi
if [ $REQ_ROBOT_TYPE == handbot ]; then
robot="HandBot"
fi
if [ $REQ_ROBOT_TYPE == eyebot ]; then
robot="EyeBot"
fi
local controller_name="C"$robot"NewController"
read -ep "Insert controller class name [$controller_name]: " classname
if [ x"$classname" = x ]; then
classname="$controller_name"
fi
echo $classname
}
###############################################################################
# Copies all the files in .template to the given subdirectory.
# PARAM $1: The controller tag name.
# PARAM $2: The controller class name.
# PARAM $3: The desired robot type.
# PARAM $4: The root behavior tag name
# PARAM $5: The root behavior class name
#
function populate_structure ( ) {
local robottype=$3
if [[ $EXPERIENCED_USER == "1" ]]
then
path_to_template="$AHSSINSTALLDIR/user/.template/controllers/sample_bt_advanced_controller"
else
path_to_template="$AHSSINSTALLDIR/user/.template/controllers/sample_bt_controller"
fi
echo "Populating controller directory..."
if [ ! -e $path_to_template ]; then
echo "Error: The controller template directory $path_to_template" 1>&2
echo "does not exist. This should not happen. I'm sorry, but someone messed up." 1>&2
exit 1
fi
echo "Using $path_to_template as template..."
cp -rf $path_to_template/* "$1"
if [ ! $? -eq 0 ]; then
echo "Unexpected error while creating files in directory $1" 1>&2
echo "Check if you have the right permissions." 1>&2
exit 1
else
local upcase
# Remove the .svn files that came from the template dir
find "$1" -name .svn | xargs rm -rf
# Rename copied files
echo "Renaming behavior controller and root behavior files..."
cd "$1"
mv sample_bt_controller.h "$1.h"
mv sample_bt_controller.cpp "$1.cpp"
mv sample_behavior.h "$4.h"
mv sample_behavior.cpp "$4.cpp"
# Retouch the controller files with the chosen names for the controller
echo "Fixing the code..."
upcase="`echo $2 | tr '[:lower:]' '[:upper:]'`"
$SED -i "s/CBTFootBootSampleController/$2/g" "$1.h" "$1.cpp"
$SED -i "s/CBTFOOTBOOTSAMPLECONTROLLER/$upcase/g" "$1.h"
$SED -i "s/sample_behavior/$4/g" "$1.h"
$SED -i "s/CBTFootbotSampleBehavior/$5/g" "$1.cpp"
$SED -i "s/sample_bt_controller/$1/g" "$1.h" "$1.cpp" Makefile.am
$SED -i "s/demo_bt_behavior/$4/g" "$4.cpp"
if [ ! $? -eq 0 ]; then
echo "Unexpected error while creating files (.h and .cpp) $1" 1>&2
echo "Check if you have the right permissions." 1>&2
exit 1
else
local upcase
# Retouch the behavior files with the chosen names
upcase="`echo $5 | tr '[:lower:]' '[:upper:]'`"
$SED -i "s/CBTFootbotSampleBehavior/$5/g" "$4.h" "$4.cpp"
$SED -i "s/CBTFOOTBOTSAMPLEBEHAVIOR/$upcase/g" "$4.h"
$SED -i "s/sample_behavior/$4/g" "$4.h" "$4.cpp"
$SED -i "s/sample_behavior/$4/g" "$4.h" "$4.cpp" Makefile.am
if [ $3 == eyebot ]; then
$SED -i "s/FootBot/EyeBot/g" "$4.h" "$4.cpp" "$1.h" "$1.cpp"
$SED -i "s/Footbot/Eyebot/g" "$4.h" "$4.cpp" "$1.h" "$1.cpp"
$SED -i "s/footbot/eyebot/g" "$4.h" "$4.cpp" "$1.h" "$1.cpp"
$SED -i "s/FOOTBOT/EYEBOT/g" "$4.h" "$4.cpp" "$1.h" "$1.cpp"
fi
if [ $3 == handbot ]; then
$SED -i "s/FootBot/HandBot/g" "$4.h" "$4.cpp" "$1.h" "$1.cpp"
$SED -i "s/Footbot/Handbot/g" "$4.h" "$4.cpp" "$1.h" "$1.cpp"
$SED -i "s/footbot/handbot/g" "$4.h" "$4.cpp" "$1.h" "$1.cpp"
$SED -i "s/FOOTBOT/HANDBOT/g" "$4.h" "$4.cpp" "$1.h" "$1.cpp"
fi
cd ..
# Fix the compilation environment
echo "Fixing the compilation environment..."
$AWK "/^SIMULATION =/ {print \$0,\" $1\"} ! /^SIMULATION =/ {print \$0}" Makefile.am | $SED "s/ / /g" > Makefile.am.new
mv Makefile.am.new Makefile.am
fi
fi
echo "Success."
}
###############################################################################
# Associate controller.
#
function associate_controller ( ) {
if [ ! $? -eq 0 ]; then
echo "Error: Unexpected error while associating controller." 1>&2
exit 1
else
${AHSSINSTALLDIR}/user/user_scripts/associate_controller.sh $1 $2
fi
}
###############################################################################
# Displays the real robot info.
#
function real_robot_info ( ) {
echo
echo "In order to compile your new controller on a real robot,"
echo "use the script 'build_real_robot_framework.sh' in your"
echo '${AHSSINSTALLDIR} directory.'
echo
echo "The script fetches the toolchain, builds the required"
echo "libraries and the actual controller."
echo
}
###############################################################################
# Ask user about his experience with the BT
#
check_user_experience ()
{
read -ep "Do you want ARGoS to generate a behavior with example states and transitions? (y/n) [n]: " yn
if [ x"$yn" == "xy" ]; then
EXPERIENCED_USER="0"
else
EXPERIENCED_USER="1"
fi
}
###############################################################################
# Ask user about his experience with the BT
# PARAM $1: The controller tag name.
# PARAM $2: The root behavior tag name.
check_controller_behavior_filename ()
{
if [ "$1" == "$2" ]; then
echo
echo "[ERROR] You cannot choose the same filename for a controller ($1) and"
echo "its root behavior ($2)! Please run the script again."
echo
exit 1
fi
}
###############################################################################
# Asks the user to type the wanted root behavior filename. This name is returned.
# RETURN: The behavior files name.
#
function get_behavior_source_name ( ) {
local sourcename
local behavior_name="new_root_behavior"
read -ep "Insert you root behavior file name [$behavior_name]: " sourcename
if [ x"$sourcename" = x ]; then
sourcename="$behavior_name"
fi
echo $sourcename
}
###############################################################################
# Asks the user to type the wanted behavior class name. This name is returned.
# PARAM $1: The behavior file name.
# RETURN: The behavior class name.
#
function get_behavior_class_name ( ) {
local classname
#local current_robot=`capitalize_ichar $REQ_ROBOT_TYPE`
local current_robot
if [ $REQ_ROBOT_TYPE == footbot ]; then
current_robot="FootBot"
fi
if [ $REQ_ROBOT_TYPE == handbot ]; then
current_robot="HandBot"
fi
if [ $REQ_ROBOT_TYPE == eyebot ]; then
current_robot="EyeBot"
fi
# Get rid of punctuations and capitalize the initial letters
local tag=`convert_tag_to_classname $1`
local behavior="$tag"
local behavior_name="CBT${current_robot}${behavior}"
read -ep "Insert your root behavior class name [$behavior_name]: " classname
if [ x"$classname" = x ]; then
classname="$behavior_name"
fi
echo $classname
}
###############################################################################
# Checks if the robot is supported by the behavioral toolkit
# PARAM $1: The desired robot type.
#
function check_supported_robot() {
# check if it is an sbot
if [ $1 == sbot ]; then
echo "Sorry. The SBot is not supported by the Behavioral Toolkit (yet?)." 1>&2
exit 1
fi
# check if it is an rbot
if [ $1 == rbot ]; then
echo "Sorry. The RBot is not supported by the Behavioral Toolkit (yet?)." 1>&2
exit 1
fi
# check if it is an epuck
if [ $1 == epuck ]; then
echo "Sorry. The EPuck is not supported by the Behavioral Toolkit (yet?)." 1>&2
exit 1
fi
}
###############################################################################
# Displays the real robot info.
# PARAM $1: The controller tag name.
#
function create_experiment ( ) {
read -ep "Do you want to create also an experiment XML associated with this BEHAVIOR controller [$1]? (y/n) [n]: " YN
if [ x"$YN" == "xy" ]; then
local experiment_xml="example_bt_experiment.xml"
read -ep "Input the experiment XML name: [$experiment_xml]: " experiment_xml_input
if [ x"$experiment_xml_input" = x ]; then
experiment_xml_input="$experiment_xml"
fi
while [ "${AHSSUSERNAME}" == "" ]; do
read -ep "Please enter your username [${USERNAME}]: " AHSSUSERNAME
# check if the user wants to use his unix username
if [ "${AHSSUSERNAME}" == "" ]; then
AHSSUSERNAME=${USERNAME}
fi
# see if the requested user directory actually exists
if [ ! -d ${AHSSINSTALLDIR}/user/${AHSSUSERNAME} ]; then
echo "This user does not exist!"
AHSSUSERNAME=
else
break
fi
done
local experiment_folder="${AHSSINSTALLDIR}/user/${AHSSUSERNAME}"
read -ep "To which folder shall the experiment XML be added: [$experiment_folder]: " experiment_folder_input
if [ x"$experiment_folder_input" = x ]; then
experiment_folder_input="$experiment_folder"
fi
if [ ! -d $experiment_folder_input ] ; then
echo "[ERROR] $experiment_folder_input does not exist!"
exit 1
else
cp $AHSSINSTALLDIR/user/.template/example_behavioral_toolkit.xml $experiment_folder_input/$experiment_xml_input
$SED -i "s/bt_example_footbot_controller/$1/g" $experiment_folder_input/$experiment_xml_input
$SED -i "s/nithin/$AHSSUSERNAME/g" $experiment_folder_input/$experiment_xml_input
echo "File $experiment_folder_input/$experiment_xml_input was properly created. Use it to test your new behavioral toolkit controller."
fi
fi
}
###############################################################################
# Main program
###############################################################################
function main ( ) {
local tagname
local classname
# check if we got a basedir
if [ "${AHSSINSTALLDIR}" == "" ]; then
echo "Error: You need to set the AHSSINSTALLDIR environment variable!" 1>&2
exit 1
fi
if [ ! -e ${AHSSINSTALLDIR}/user/user_scripts/ahss_script_functions ]; then
echo "Error: Your AHSSINSTALLDIR environment variable is not set properly!" 1>&2
exit 1
fi
# source the common script functions
source ${AHSSINSTALLDIR}/user/user_scripts/ahss_script_functions
# do some basic checks
default_checks
default_user_checks
# check if we're in the right directory
check_controller_directory
welcome_message
check_requirements
get_robot_type 0
robottype=${REQ_ROBOT_TYPE}
check_supported_robot "$robottype"
controller_filename=`get_controller_tag_name $robottype`
controller_classname=`get_controller_class_name $controller_filename`
rootbehavior_filename=`get_behavior_source_name`
rootbehavior_classname=`get_behavior_class_name $rootbehavior_filename`
check_controller_behavior_filename "$controller_filename" "$rootbehavior_filename"
create_directory "$controller_filename"
create_experiment "$controller_filename"
check_user_experience
populate_structure "$controller_filename" "$controller_classname" "$robottype" "$rootbehavior_filename" "$rootbehavior_classname"
associate_controller "$controller_filename" "$robottype"
real_robot_info
}
# Execution starts here
main | true |
00c1fa618e9684848d765e83353dbb3c558d0a7d | Shell | capralifecycle/jenkins-slave-wrapper | /jenkins/test-dind.sh | UTF-8 | 272 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
set -e
export DOCKER_HOST=unix:///docker.sock
# Spawn Docker daemon
/run-docker.sh test &
# Wait til Docker is available
for x in $(seq 1 10); do
if docker info; then
break
fi
sleep 1
done
# Spawn hello world container
docker run --rm hello-world
| true |
0177d93e9277ef6f1cd20f0a92eadc94679eaf45 | Shell | Nuukem/Unifi-Scripts | /EdgeOs/Pushover on L2TP VPN Connection/notify-on-vpn-state-change.sh | UTF-8 | 2,105 | 3.84375 | 4 | [] | no_license | #!/bin/vbash
# This script goes in /config/scripts/post-config.d
# Variables you'll need to change
IPSegment='10.0.' # The IP address segment your VPN is located on (i.e. '10.0.' or '192.168.1.')
PUSHOVER_USER_KEY="ENTER_YOUR_USER_KEY"
PUSHOVER_APP_KEY="ENTER_YOUR_API_APP_KEY"
CLIENT="ENTER_YOUR_CLIENT_LOCATION"
USE_HTML_FORMAT=1 # SET THIS TO 1 IF YOU ARE USING HTML IN YOUR MESSAGE BELOW.
USE_MONOSPACE_FORMAT=0 # SET THIS TO 1 IF YOU ARE USING MONOSPACE FORMAT.
#################################################################################
### Don't change anything beyond this point unless you know what you're doing ###
#################################################################################
echo "Starting..."
# Include some of the vyatta commands we'll need
source /opt/vyatta/etc/functions/script-template
run=/opt/vyatta/bin/vyatta-op-cmd-wrapper
# Init the temp files
touch /tmp/temp.vpnconnections
touch /tmp/temp.vpnconnections2
# Grab the full list of VPN connections
$run show vpn remote-access > /tmp/temp.vpnfulllist
# Parse out just the user and ip address
cat /tmp/temp.vpnfulllist|grep $IPSegment|awk -F' ' '{printf "%s %s\n", $1, $5}' > /tmp/temp.vpnconnections
# Check if they differ from the last time we checked
if ! cmp -s /tmp/temp.vpnconnections /tmp/temp.vpnconnections2
then
# Someone connected to/disconnected from the VPN! Send the notification
echo "VPN Activity detected! Sending notification..."
connInfo=$(</tmp/temp.vpnfulllist)
time=$(echo $(date +"%c"))
if [ "$connInfo" = "No active remote access VPN sessions" ];
then
TITLE="VPN :: User DISCONNECTED FROM $CLIENT"
else
TITLE="VPN :: User CONNECTED to $CLIENT"
fi
MESSAGE=$connInfo
wget https://api.pushover.net/1/messages.json --post-data="token=$PUSHOVER_APP_KEY&user=$PUSHOVER_USER_KEY&message=$MESSAGE&title=$TITLE&html=$USE_HTML_FORMAT&monospace=$USE_MONOSPACE_FORM$
echo "Done!"
# Back up this run so we can compare later
cp /tmp/temp.vpnconnections /tmp/temp.vpnconnections2
else
echo "No differences"
fi | true |
8eb508bd28374196f761ef89deec50c09f063ca6 | Shell | babywyrm/sysadmin | /grep/pre_rip/base_.sh | UTF-8 | 1,075 | 3.1875 | 3 | [] | no_license | #!/bin/sh
# both regex and html follow, sorry not sorry
# alias rgg='rg -i --pre-glob '*.{pdf,xlsx,xls,docx,doc,pptx,ppt,html,epub}' --pre rgpre' # note the pre-glob, super important
# ubuntu requirements:
# sudo apt-get poppler-utils
# termux requirements:
# pkg install poppler
case "$1" in
*.pdf)
exec pdftotext "$1" -
;;
*.xlsx)
exec unzip -qc "$1" *.xml | sed -e 's/<\/[vf]>/\n/g; s/<[^>]\{1,\}>//g; s/[^[:print:]\n]\{1,\}//g'
;;
*.docx)
exec unzip -qc "$1" word/document.xml | sed -e 's/<\/w:p>/\n/g; s/<[^>]\{1,\}>//g; s/[^[:print:]\n]\{1,\}//g'
;;
*.pptx)
exec unzip -qc "$1" ppt/slides/*.xml | sed -e 's/<\/a:t>/\n/g; s/<[^>]\{1,\}>//g; s/[^[:print:]\n]\{1,\}//g'
;;
*.doc)
exec strings -d -15 "$1"
;;
*.xls)
exec strings "$1"
;;
*.ppt)
exec strings -d "$1"
;;
*.html)
exec cat "$1" | sed 's/<\/[^>]*>/\n/g'
;;
*.epub)
exec unzip -qc "$1" *.{xhtml,html} | sed 's/<\/[^>]*>/\n/g'
;;
*)
case $(file "$1") in
*Zstandard*)
exec pzstd -cdq
;;
*)
exec cat
;;
esac
;;
esac
| true |
dece2b3012b4d039b063020c0d69cd71269f5511 | Shell | diegofps/ngd_docker_images | /images/bigdata2/src/hadoop_primary_start.sh | UTF-8 | 528 | 2.890625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
STARTED_LOCK="/hadoop/started.lock"
if [ -e "$STARTED_LOCK" ]; then
echo "Master node already initialized, skipping namenode format"
else
echo "Master node already initialized, skipping namenode format"
hdfs namenode -format alpha -clusterid 249bfd46-a641-4ccd-8a02-82667bae653e
touch $STARTED_LOCK
fi
hdfs --daemon start namenode
yarn --daemon start resourcemanager
yarn --daemon start nodemanager
yarn --daemon start proxyserver
mapred --daemon start historyserver
echo "Hadoop primary started"
| true |
1f66a432506a504e01f307e62bb56397afb1c53a | Shell | Mad-ness/openstack-installs | /scripts/smoke_oscloud.sh | UTF-8 | 974 | 3.5 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# Run the script on any of the utility containers doing the source /root/openrc before.
#
function runcmd() {
cmd="$1"
msg="$2"
echo -n "${msg} (${cmd}) ... "
$cmd 2>&1 >/dev/null
retcode=$?
if [[ $retcode -ne 0 ]]; then
echo "FAILED"
else
echo "PASSED"
fi
return $retcode
}
runcmd "openstack user list" "Getting a list of users"
runcmd "openstack user list --os-cloud=default" "Getting a list of users in the default cloud"
runcmd "openstack endpoint list" "Listing endpoints"
runcmd "openstack compute service list" "Listing compute services"
runcmd "openstack network agent list" "Listing neutron agents"
runcmd "openstack volume service list" "Listing volume services"
runcmd "openstack image list" "Listing images"
runcmd "openstack flavor list" "Listing flavors"
runcmd "openstack floating ip list" "Listing floating ips"
runcmd "openstack catalog list" "Listing catalog"
| true |
56522cfaf3ee3815c5e8f139b9aca2a02beddf5c | Shell | HICgroup/MpdFrameworkGSI | /real-flow/example-resolutions.sh | UTF-8 | 576 | 2.578125 | 3 | [] | no_license | #!/bin/bash
INFILE=$1
OUTFILE=$2
DCAFILE=$3
#SBATCH --time=0:15:00
#SBATCH -D /tmp
PROJECT_DIR=/lustre/nyx/hades/user/parfenov/mpd_new/real-flow/
#. /lustre/nyx/hades/user/parfenov/Soft/MPDRoot/build/config.sh
source /cvmfs/hades.gsi.de/install/5.34.34/hydra2-4.9n/defall.sh
cd $PROJECT_DIR
OUTDIR=${OUTFILE%/*}
AFTERSLASHNAME=${INFILE##*/}
BASENAME=${AFTERSLASHNAME%.*}
root -l -b -q "main_resolutions.C(\"${INFILE}\",\"${OUTFILE}\",\"${DCAFILE}\")" 1>> ${OUTDIR}/${BASENAME}_rec_calc.OUT 2>> ${OUTDIR}/${BASENAME}_rec_calc.ERR
#mv ${BASENAME}_res_out.root OUTDIR/.
| true |
ac95534fb7aefb4cb477dcdd407893b03e6b2652 | Shell | peacemakr-io/peacemakr-core-crypto | /bin/release-golang.sh | UTF-8 | 1,922 | 4.03125 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -ex
function usage {
echo "Usage: ./bin/release-golang.sh [path to peacemakr-api folder] [optional: release]"
echo "for example, ./bin/release-golang.sh ~/peacemakr/peacemakr-api release"
}
if [[ "$#" -gt 2 ]]; then
echo "Illegal use"
usage
exit 1
fi
function get_crypto_file_linux {
out_dir=${1}
docker run corecrypto:latest tar -czvf - -C /go/src peacemakr > "${out_dir}/peacemakr-core-crypto-go-musl.tar.gz"
}
function get_crypto_file_mac {
out_dir=${1}
build_type=${2}
mkdir -p /tmp/peacemakr/crypto/include/openssl
mkdir -p build
pushd build
cmake .. -DOPENSSL_ROOT_DIR=/usr/local/opt/openssl@1.1 -D${build_type} -DCMAKE_INSTALL_PREFIX=/tmp/peacemakr/crypto
make check-peacemakr-core-crypto install
cp -R /usr/local/opt/openssl@1.1/include/openssl /tmp/peacemakr/crypto/include/
cp -R ../src/ffi/go/src/peacemakr/crypto/ /tmp/peacemakr/crypto/
tar -czvf ${out_dir}/peacemakr-core-crypto-go-macos.tar.gz -C /tmp peacemakr
popd
rm -rf /tmp/peacemakr
}
BUILD_ARG="CMAKE_BUILD_TYPE=DEBUG"
if [[ "${2}" == "release" ]]; then
BUILD_ARG="CMAKE_BUILD_TYPE=RELEASE"
fi
docker build -t corecrypto-dependencies:latest . -f docker/go-dependencies.Dockerfile --build-arg=${BUILD_ARG}
docker build -t corecrypto:latest . -f docker/go.Dockerfile --build-arg=${BUILD_ARG}
get_crypto_file_linux ${1}
get_crypto_file_mac ${1} ${BUILD_ARG}
pushd "${1}"
rm -rf crypto/*
tar -xzvf peacemakr-core-crypto-go-macos.tar.gz
cp -R peacemakr/crypto ./
tar -xzvf peacemakr-core-crypto-go-musl.tar.gz
cp peacemakr/crypto/lib/*.so ./crypto/lib
rm -rf crypto/lib/cmake
rm -rf peacemakr peacemakr-core-crypto-go-macos.tar.gz peacemakr-core-crypto-go-musl.tar.gz
echo "package keeplib" > crypto/lib/keep.go
echo "package keeppeacemakr" > crypto/include/peacemakr/keep.go
echo "package keepopenssl" > crypto/include/openssl/keep.go
popd
| true |
36496018648a0594d5b77f38e0544853a9f2e4b5 | Shell | Nuthi-Sriram/Shell-Programming | /SourceCodes/sumOfNatural.sh | UTF-8 | 145 | 3.3125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo "Enter a number upto which you want to find the sum of"
read n
sum=0
for i in `seq 1 $n`
do
sum=$((sum + i))
done
echo "$sum"
| true |
0af0ce7bd696c6d9eaf15da6c65b6af55d663697 | Shell | chelroot/domru94 | /dird | UTF-8 | 335 | 2.65625 | 3 | [] | no_license | #!/bin/bash
DATE=`date '+%d'.'%m'`
for i in 19-8 19-9 19-10 19-23 230820943-1 vzonov1 vzonov2
do
#mkdir /var/www/html/"$i"/"$DATE"_foto
mkdir /var/www/html/foto/"$i"/"$DATE"
echo "<a href=$DATE>$DATE</a><br />" >> /var/www/html/foto/"$i"/index.html
##mkdir /var/www/html/"$i"/"$DATE"_video
#mkdir /var/www/html/"$i"
done
| true |
e58f1c53dbd88e9e94f5bebe39cd4bcfeb17be0a | Shell | ljani/PKGBUILDs | /alarm/distcc-clang/PKGBUILD | UTF-8 | 539 | 2.734375 | 3 | [] | no_license | # Maintainer: Kevin Mihelich <kevin@archlinuxarm.org>
# Temporary package to trial building with clang via distcc
pkgname=distcc-clang
pkgver=1
pkgrel=1
pkgdesc='distcc symlinks for clang'
arch=('x86_64')
url='https://github.com/distcc/distcc'
license=('GPL')
depends=('distcc')
package() {
# Symlinks
install -d "$pkgdir/usr/lib/distcc/bin"
for bin in clang clang++; do
ln -sf "/usr/bin/distcc" "$pkgdir/usr/lib/distcc/bin/$bin"
# Additional symlinks are needed, see FS#57978
ln -sf "/usr/bin/distcc" "$pkgdir/usr/lib/distcc/$bin"
done
}
| true |
0406272ce4cc0aaad8e74822ef961c764604474a | Shell | code-golf/code-golf | /langs/zig/zig | UTF-8 | 249 | 2.734375 | 3 | [
"MIT"
] | permissive | #!/bin/sh -e
[ "$1" = "version" ] && exec /usr/local/bin/zig version
cd /tmp
# Compile
cat - > code.zig
/usr/local/bin/zig build-exe --global-cache-dir . -fstrip -freference-trace --color on code.zig
rm code.zig
# Execute
shift
exec ./code "$@"
| true |
0405c851fefa037442ff8b595856d15bc734bbf7 | Shell | janlt/SOE | /soeapi/c_test/big_store_1.sh | UTF-8 | 446 | 2.734375 | 3 | [] | no_license | STORE_NAME=$1
VALUE_SIZE=$2
NUM_OPS=$3
bin/c_test_soe -x sysprog -o KURCZE -z SSSSS -c $STORE_NAME -A -m 2
KEY=KEY5DDDDDXXXOOOOOOOOOOOOOOXXXXX
VALUE="VALUE5__________________________________"
COUNTER=100
while true
do
COUNTER=$((COUNTER+1))
C_KEY=$KEY$COUNTER
C_VALUE=$VALUE$COUNTER
echo $C_KEY " " $C_VALUE " " $COUNTER
bin/c_test_soe -x sysprog -o KURCZE -z SSSSS -c $STORE_NAME -C -n $NUM_OPS -N $VALUE_SIZE -k $C_KEY -m 2
sleep 1
done
| true |
26f879fed77d63e3f3a41e7973ffad2b5ff8ab3b | Shell | avolab/episystem-workshop | /scripts/install_scripts/2-install_miniconda.sh | UTF-8 | 226 | 3.25 | 3 | [] | no_license | #!/bin/bash
# Install miniconda
installdir="$HOME"
installpath="${installdir}/Miniconda3-latest-Linux-x86_64.sh"
[[ ! -e $installpath ]] && echo "$installpath not found, exiting" && exit 1
cd $installdir
bash $installpath
| true |
d372d35093f38c19b47ecaa66157f600ace0e496 | Shell | formatme/dedipanel | /src/DP/GameServer/SteamServerBundle/Resources/views/sh/install.sh.twig | UTF-8 | 3,986 | 3.75 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# Copyright (C) 2010-2013 Kerouanton Albin, Smedts Jérôme
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
CSP_FILE='cspromod_b108.zip'
CSP_DLURL="http://dl.redline-hosting.net/public/$CSP_FILE"
if [ $# -eq 3 ]; then
# Installation via steamCmd
if [ $1 -eq 1 ]; then
steamapp=$2
JEU=${steamapp%.*}
MOD=${steamapp#*.}
if [ ! -e {{ installDir }}steamcmd/ ]; then
echo "Download steamcmd" >> {{ installDir }}install.log
mkdir {{ installDir }}steamcmd/ && cd {{ installDir }}steamcmd/
# On commence par télécharger steamCmd
wget http://media.steampowered.com/client/steamcmd_linux.tar.gz
# On decompresse et supprime le fichier compressé qu'on vient de télécharger.
tar xvfz steamcmd_linux.tar.gz && rm -f tar xvfz steamcmd_linux.tar.gz
cd {{ installDir }}steamcmd/
chmod 755 ./steamcmd.sh && chmod u+x ./steam.sh
fi
cd {{ installDir }}steamcmd/
# Et on lance l'install du jeu
echo "Game install" >> {{ installDir }}install.log
if [ -n $MOD ]; then
./steamcmd.sh +login anonymous +force_install_dir {{ installDir }} +app_update $JEU +app_set_config $JEU mod $MOD +app_update $JEU +quit
else
./steamcmd.sh +login anonymous +force_install_dir {{ installDir }} +app_update $JEU validate +quit
fi
# Vérifie que l'installation s'est bien déroulé
if [ -e "{{ installDir }}/$3" ]; then
echo "Install ended" >> {{ installDir }}install.log
else
echo "Install failed"
fi
# Installation via hldsupdatetool
else
cd {{ installDir }}
# S'il s'agit de cspromod, on installe css
# sur lequel on ajoute le mod via une archive
if [ "$2" = "cspromod" ]; then
JEU="Counter-Strike Source"
else
JEU="$2"
fi
# On commence par télécharger le hldsupdatetool.bin
echo "DL hldsupdatetool.bin" > install.log
wget http://storefront.steampowered.com/download/hldsupdatetool.bin
# On lui donne les droits et on l'exécute afin de récupérer l'updater steam
chmod 750 ./hldsupdatetool.bin && ./hldsupdatetool.bin <<< "yes"
# Il ne sert plus, on le supprime
rm -f hldsupdatetool.bin
# Puis on lance l'exécution de steam afin de mettre à jour l'exécutable
echo "Steam updating" >> install.log
./steam
sleep 1
./steam
sleep 1
# Et on lance l'install du jeu
echo "Game install" >> install.log
./steam -command update -game "$JEU" -dir . -verify_all -retry
# Si on souhaite installer cspromod, on telecharge l'archive du mod et on la decompresse
if [ "$1" = "cspromod" ]; then
mkdir orangebox && mv hl2 orangebox/
./steam -command update -game "orangebox" -dir . -verify_all -retry
cd orangebox
wget "$CSP_DLURL"
unzip -u "$CSP_FILE"
rm -f $CSP_FILE
fi
echo "Install ended" >> install.log
fi
else
echo "Usage: $0 game"
fi
| true |
c445ba46dbe737aa6cf129b202243ef49b4cecd9 | Shell | guvox/desktop-settings | /community/openbox-basic/scripts/manjaro-polybar-edit | UTF-8 | 1,717 | 3.90625 | 4 | [] | no_license | #!/usr/bin/env bash
# manjaro-polybar-edit: An Manjaro polybar config file editor
# Copyright (C) 2017 Nathaniel <natemaia10@gmail.com>
readonly PPATH="$HOME/.config/polybar"
readonly TITLE="Manjaro Polybar Edit"
readonly ICON="--window-icon=/usr/share/icons/manjaro/maia/48x48.png"
readonly CHECKLIST="zenity $ICON --width=450 --height=500 --list --checklist --multiple"
readonly HELP="manjaro-polybar-edit:
\tA script to edit selected polybar configs
\nOptions:\n\t-h --help show this message
\tNo other options are supported.\n\nPolybar files must be in $PPATH
Checkmarked configs will be opened in a text editor"
case "$@" in
-h|--help) echo -e "$HELP" ; exit 0
esac
readonly FILES=($(find -L "$PPATH" -maxdepth 4 -type f))
edit_Configs() {
for f in "${FILES[@]}"; do
if [[ $f = *config ]] || [[ $f = *conf ]] || [[ $f = *.sh ]] || [[ -x $f ]]; then
NAME=$(sed "s|${HOME}/.config||" <<< "$f")
LIST="$LIST FALSE $NAME"
fi
done
MSG="<big>Select Polybar Configs to edit</big>\n"
ANSWER=$($CHECKLIST --title="$TITLE" --text="$MSG" --column="Select" --column="File" $LIST --separator=" ")
if [[ $? == 1 ]]; then
exit 0
else
for name in $ANSWER; do
FULL="$HOME/.config/$name"
if hash exo-open &>/dev/null; then
(exo-open "$FULL" &) ; break
elif hash termite &>/dev/null; then
(termite -e "$EDITOR $FULL" &>/dev/null) ; break
elif hash st &>/dev/null; then
(st -e $EDITOR $FULL &) ; break
elif hash urxvt &>/dev/null; then
(urxvt -e "$EDITOR $FULL" &) ; break
fi
done
fi
}
edit_Configs
exit 0
| true |
eb04fd0ba8ebd661c3949710146eeb2fc8fcd5a1 | Shell | paulredmond/dotfiles | /zshrc | UTF-8 | 6,784 | 2.671875 | 3 | [] | no_license | # Uncomment to profile zsh startup
# See the bottom of the file
# @see https://blog.askesis.pl/post/2017/04/how-to-debug-zsh-startup-time.html
# zmodload zsh/zprof
# Path to your oh-my-zsh configuration.
ZSH=$HOME/.oh-my-zsh
export VISUAL="subl -n"
export EDITOR="$VISUAL"
export TERM="xterm-256color"
# export RBENV_VERSION=1.9.3
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
#ZSH_THEME="af-magic"
ZSH_THEME="miloshadzic"
#ZSH_THEME="bureau"
# Set to this to use case-sensitive completion
# CASE_SENSITIVE="true"
bindkey "[C" forward-word
bindkey "[D" backward-word
# Comment this out to disable weekly auto-update checks
# DISABLE_AUTO_UPDATE="true"
# Uncomment following line if you want to disable colors in ls
# DISABLE_LS_COLORS="true"
# Uncomment following line if you want to disable autosetting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment following line if you want red dots to be displayed while waiting for completion
# COMPLETION_WAITING_DOTS="true"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
plugins=(git docker docker-compose zsh-autosuggestions artisan)
source $ZSH/oh-my-zsh.sh
# Stop spelling checks of command arguments. Only consider commands.
unsetopt correctall && setopt correct
# Customize to your needs...
# export PATH=/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/sbin:./bin:./vendor/bin:$HOME/bin:$HOME/.dotfiles/bin:$PATH
# zsh completion for docker-compose
fpath=(~/.dotfiles/.zsh/completion $fpath)
autoload -Uz compinit && compinit -i
# Default Aliases
alias ack='nocorrect ack'
alias mongod='nocorrect mongod'
alias rake='noglob rake'
alias mate='nocorrect mate'
alias p='nocorrect pstorm'
alias rspec='nocorrect rspec'
alias f='fnm'
alias fmn='fnm'
# Installed Brew Packages that override built-in commands
alias cat='bat'
# alias find='fd'
alias lc="fc -ln -1 | tr -d '\n' | pbcopy"
alias ....='cd ../../..'
alias lah='ls -lah'
alias console='nocorrect console'
alias zshrc="${VISUAL:-${EDITOR:-vim}} ~/.zshrc"
alias vim="nvim"
# PHP
alias phpcbf='nocorrect phpcbf'
alias c='composer'
alias ci='composer install'
alias ct='composer test'
alias cl='composer lint'
alias ca='composer analyse'
alias iphp='psysh'
alias art='php artisan'
alias sail='bash vendor/bin/sail'
alias tinker='php artisan tinker'
alias mfs='php artisan migrate:fresh --seed'
alias t='phpunit'
alias clearlog='truncate -s 0 $(git rev-parse --show-toplevel)/storage/logs/laravel.log'
alias clearlogs='truncate -s 0 $(git rev-parse --show-toplevel)/storage/logs/*.log'
# NPM
alias npm-exec='PATH=$(npm bin):$PATH'
alias yi='yarn install'
alias yw='yarn install && yarn run watch'
# Git aliases
alias g='git'
alias gs='git status'
alias gdc='git diff --cached'
alias gst='git status'
alias wip='git commit -am "WIP"'
# Docker aliases
alias d='docker'
alias dc='docker-compose'
alias dm='docker-machine'
alias dmnative='echo "Switching to native docker" && eval $(docker-machine env -u)'
alias docker-cleanup='docker network prune && docker system prune'
# http://unix.stackexchange.com/questions/22615/how-can-i-get-my-external-ip-address-in-bash/81699#81699
alias ip='dig +short myip.opendns.com @resolver1.opendns.com'
# Local config
if [[ -e $HOME/.zshrc.local ]]
then
source $HOME/.zshrc.local
fi
#
# Link a local composer repository
#
# After, you can require the project normally:
# `composer require [vendor]/composer-package`
#
# If `composer require` doesn't work the first time, you may need to add @dev:
# `composer require [vendor]/composer-package @dev`
#
# See: https://calebporzio.com/bash-alias-composer-link-use-local-folders-as-composer-dependancies/
#
composer-link() {
composer config repositories.local '{"type": "path", "url": "'$1'"}' --file composer.json
}
function phplink()
{
current_version=$(php -r 'echo PHP_MAJOR_VERSION.".".PHP_MINOR_VERSION;')
current_package="php@$current_version"
current_version_installed=$(brew list | grep "${current_package}")
new_version="${1}"
if [ -z "${new_version}" ]
then
new_version_package="php"
else
new_version_package="php@${1}"
fi
new_version_installed=$(brew list | grep "${new_version_package}")
if [ -z "$new_version_installed" ]
then
echo "You requested ${new_version_package}, but it seems to be missing from Brew?"
return 1;
fi
echo "The current version in use: ${current_version_installed}"
echo "Switching to ${new_version_package}"
brew unlink php && brew link --overwrite --force "${new_version_package}"
result=$?
if [ $ret -ne 0 ]
then
echo "Brew had an issue unlinking ${current_package} or linking ${new_version_package}"
return result
fi
}
# Start a simple web server from any directory
function serve()
{
python -m SimpleHTTPServer 8000
}
function findprocess()
{
ps aux | grep $1
}
function myprocess()
{
ps -ef | grep $USER
}
function corscheck()
{
curl -I \
-H "Origin: http://example.com" \
-H "Access-Control-Request-Method: POST" \
-H "Access-Control-Request-Headers: X-Requested-With" \
-X OPTIONS \
$1
}
### Added by the Heroku Toolbelt
export PATH="/usr/local/heroku/bin:$PATH"
#if hash docker-machine 2>/dev/null; then
# eval "$(docker-machine env default)"
#fi
# export NVM_DIR="$HOME/.nvm"
# [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
# [ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
# added by travis gem
[ -f /Users/paul/.travis/travis.sh ] && source /Users/paul/.travis/travis.sh
# Hyper Terminal
export LANG="en_US.UTF-8"
export LC_COLLATE="en_US.UTF-8"
export LC_CTYPE="en_US.UTF-8"
export LC_MESSAGES="en_US.UTF-8"
export LC_MONETARY="en_US.UTF-8"
export LC_NUMERIC="en_US.UTF-8"
export LC_TIME="en_US.UTF-8"
export LC_ALL="en_US.UTF-8"
export PATH="$HOME/.yarn/bin:$PATH"
# Uncomment to profile zsh startup
# See the top of the file
# @see https://blog.askesis.pl/post/2017/04/how-to-debug-zsh-startup-time.html
# zprof
# fnm
if [ -d "$HOME/.fnm" ]
then
export PATH=$HOME/.fnm:$PATH
eval "`fnm env`"
elif [ -d "$HOME/.local/share/fnm" ]
then
export PATH="$HOME/.local/share/fnm:$PATH"
eval "`fnm env`"
else
echo "Warn: FNM path not found!"
fi
#THIS MUST BE AT THE END OF THE FILE FOR SDKMAN TO WORK!!!
export SDKMAN_DIR="/Users/$USER/.sdkman"
[[ -s "/Users/$USER/.sdkman/bin/sdkman-init.sh" ]] && source "/Users/$USER/.sdkman/bin/sdkman-init.sh"
| true |
f30dae4c03d22e8c9147ef765cfa7625f856cc56 | Shell | Orientsoft/moop-volume-service | /build.sh | UTF-8 | 257 | 2.8125 | 3 | [] | no_license | TAG=`git rev-parse --short HEAD`
REGISTRY=registry.datadynamic.io/moop
IMAGE=moop-volume-service
docker build -t $REGISTRY/$IMAGE:$TAG -f Dockerfile .
if [ $? -ne 0 ]; then
echo "fail"
else
echo "success"
docker push $REGISTRY/$IMAGE:$TAG
fi
| true |
8b7eda20cef40f871b0abd39d95d95b1e4004832 | Shell | vfreex/level-ip | /tests/test-runner | UTF-8 | 242 | 2.75 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
PYTHON="python2.7"
UTSCAPY="venv/lib/python2.7/site-packages/scapy/tools/UTscapy.py"
function cleanup {
kill "$stack_pid"
}
trap cleanup EXIT ERR
../lvl-ip 1>/dev/null &
stack_pid="$!"
sleep 3
"$PYTHON" "$UTSCAPY" "$@"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.