blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
67815396fd22cd8021a01f99d1f8a931e60c7cca
|
Shell
|
HectorAnadon/SeaLionsDetectionClassification
|
/run.sh
|
UTF-8
| 915
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash -l
# The -l above is required to get the full environment with modules
# Set the allocation to be charged for this job
# not required if you have set a default allocation
#SBATCH -A edu17.DD2438
# The name of the script
#SBATCH -J combine
# Email
#SBATCH --mail-type=BEGIN,END
# Only 1 hour wall-clock time will be given to this job
#SBATCH -t 8:00:00
# Number of nodes
#SBATCH --nodes=1
#SBATCH -e error_file_make_datasets_combine_jun17.e
#SBATCH -o output_file_make_datasets_combine_jun17.o
# load the anaconda module
module add cudnn/5.1-cuda-8.0
module load anaconda/py35/4.2.0
# if you need the tensorflow environment:
source activate tensorflow
# add modules
pip install --user -r requirements3.txt
# execute the program
# (on Beskow use aprun instead)
#mpirun -np 1 python make_datasets.py neg
python make_datasets.py combine
# to deactivate the Anaconda environment
source deactivate
| true
|
cba6b61e694b88644aea628313dfd8eca62cfe7e
|
Shell
|
blakwurm/blakwurm-site-builder
|
/publish.sh
|
UTF-8
| 428
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
echo Committing this repo
git add .
git commit
git push
echo copying to blakwurm.github.com repo
rm -rf ../blakwurm.github.com/*
cp CNAME ../blakwurm.github.com
cd resources/public
cp -r * ../../../blakwurm.github.com
cd ../../
echo commiting blakwurm.github.com
cd ../blakwurm.github.com
git add .
git commit -m "automated commit from https://github.com/blakwurm/blakwurm-site-builder"
git push
cd ../blakwurm
| true
|
4088ed2e6b1e8930e2b6be2b5ea1e2b7092833e9
|
Shell
|
lgarreta/puj-lnxing
|
/clases/07-Scripts/code/while.sh
|
UTF-8
| 60
| 2.75
| 3
|
[] |
no_license
|
i=0
while [ $i -lt 10 ]
do
echo i is $i
let i=$i+1
done
| true
|
79870852ba65cb5d1416a9719b49c90a1ced5abe
|
Shell
|
ryjen/dotfiles
|
/collections/ansible_collections/ryjen/dotfiles/roles/git/files/dotfiles/.local/share/git/hooks/scripts/functions
|
UTF-8
| 646
| 3.765625
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
exec 1>&2
function h1() {
printf "\n\x1B[1;39m> %s\n" "$@"
}
function pass() {
printf "\x1B[1;33m✔\x1B[0m %s\n" "$@"
}
function fail() {
printf "\x1B[1;31m✖\x1B[0m %s\n" "$@"
exit 1
}
function failsafe() {
printf "\x1B[1;31m✖\x1B[0m %s\n" "$@"
}
commit_files() {
if [ $# -eq 0 ] ; then
echo $(git diff-index --name-only --diff-filter=ACM --cached HEAD --)
exit 0
fi
extensions=''
for extension in "$@"
do
extensions="${extensions}(${extension})|"
done
regex="\.(${extensions%?})$"
echo $(git diff-index --name-only --diff-filter=ACM --cached HEAD -- | grep -P "$regex")
}
| true
|
f7639f703a26a34fefa534fc2922386e36b83797
|
Shell
|
cms-sw/cmssw
|
/DPGAnalysis/Skims/python/archiveold
|
UTF-8
| 840
| 3.78125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
if [ "$1" == "" ]; then
echo "Usage: $0 <older than (sec)>"
exit 1
fi
ARCHDIR=archive
current=`date +%s`
olderthan=`expr $current - $1`
echo $current $olderthan
for file in `ls *SM*.root`
do
time=`echo $file | awk -F _ '{print $2}' | cut -c 1-10`
if [ $time -le $olderthan ]; then
echo "Moving file $file in $ARCHDIR"
mv $file ${ARCHDIR}/
fi
done
for file in `ls SkimSM*.log`
do
time=`echo $file | awk -F _ '{print $2}' | cut -c 1-10`
if [ $time -le $olderthan ]; then
echo "Moving file $file in $ARCHDIR"
mv $file ${ARCHDIR}/
fi
done
for file in `ls SkimSM*.py`
do
time=`echo $file | awk -F _ '{print $2}' | cut -c 1-10`
if [ $time -le $olderthan ]; then
echo "Moving file $file in $ARCHDIR"
mv $file ${ARCHDIR}/
fi
done
# archive all ig files.....
mv *.ig ${ARCHDIR}/
| true
|
2e086f7da28b772fcb34be745e98e285b91aafca
|
Shell
|
XiaYi1002/luci-app-b-wool
|
/root/usr/share/b-wool/update_client.sh
|
UTF-8
| 1,614
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
#
NAME=b-wool
LOG_HTM=/www/b-wool.htm
uci_get_by_name() {
local ret=$(uci get $NAME.$1.$2 2>/dev/null)
echo ${ret:=$3}
}
uci_get_by_type() {
local ret=$(uci get $NAME.@$1[0].$2 2>/dev/null)
echo ${ret:=$3}
}
cancel() {
if [ $# -gt 0 ]; then
echo "$1"
fi
exit 1
}
# 更新程序
echo "实验性功能" >$LOG_HTM 2>&1
jd_dir2=$(uci_get_by_type global jd_dir)
cd $jd_dir2
rm -rf luci-app-b-wool
echo "开始拉取文件..." >>$LOG_HTM 2>&1
GIT_CURL_VERBOSE=1 git clone https://github.com/XiaYi1002/luci-app-b-wool.git >>$LOG_HTM 2>&1
if [ $? -eq 0 ];then
echo "云端文件下载成功..开始更新..." >>$LOG_HTM 2>&1
cp -Rf $jd_dir2/luci-app-b-wool/luasrc/controller/* /usr/lib/lua/luci/controller/
chmod -R 644 /usr/lib/lua/luci/controller/b-wool.lua
cp -Rf $jd_dir2/luci-app-b-wool/luasrc/model/cbi/b-wool/* /usr/lib/lua/luci/model/cbi/b-wool/
chmod -R 644 /usr/lib/lua/luci/model/cbi/b-wool/
cp -Rf $jd_dir2/luci-app-b-wool/luasrc/view/b-wool/* /usr/lib/lua/luci/view/b-wool/
chmod -R 644 /usr/lib/lua/luci/view/b-wool/
cp -Rf $jd_dir2/luci-app-b-wool/root/usr/share/b-wool/* /usr/share/b-wool/
chmod -R 755 /usr/share/b-wool/
echo "更新完毕...若功能异常...重启下设备...实在不行就卸载重装..." >>$LOG_HTM 2>&1
echo "清理垃圾..." >>$LOG_HTM 2>&1
rm -rf $jd_dir2/luci-app-b-wool
rm -rf /tmp/luci-modulecache
rm -f /tmp/luci-indexcache
else
echo "文件拉取失败...建议开启梯子进行更新..." >>$LOG_HTM 2>&1
rm -rf $jd_dir2/luci-app-b-wool
fi
echo "任务已完成" >>$LOG_HTM 2>&1
| true
|
2afefcded50cf536be3c73eaa45d18cedbc30335
|
Shell
|
HMBSbige/kms-server
|
/hotbird64-mass-build/make_hurd
|
UTF-8
| 1,491
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
export VLMCSD_VERSION=$(git describe)
export VERBOSE=3
export DNS_PARSER=OS
cd "$( dirname "$0" )"
make -C .. clean
BINDIR="../bin"
MANDIR="../man"
cd ../src
MAKEFLAGS="-B -j1"
export CC=gcc
CF="-flto=jobserver -pipe -fwhole-program -fno-common -fno-exceptions -fno-stack-protector -fno-unwind-tables -fno-asynchronous-unwind-tables -fmerge-all-constants"
LF="-fuse-ld=gold -lresolv -Wl,-z,norelro,--hash-style=gnu,--build-id=none"
make $MAKEFLAGS MULTI_NAME=$BINDIR/vlmcsdmulti-hurd-x86-glibc PROGRAM_NAME=$BINDIR/vlmcsd-hurd-x86-glibc CLIENT_NAME=$BINDIR/vlmcs-hurd-x86-glibc CFLAGS="$CF" LDFLAGS="$LF" allmulti
cd $BINDIR
sstrip -z vlmcs-* vlmcsd-* vlmcsdmulti-*
cp -af ../etc/vlmcsd.kmd /usr/local/sbin
cp -af vlmcsd-hurd-x86-glibc /usr/local/sbin/vlmcsd
cp -af vlmcs-hurd-x86-glibc /usr/local/bin/vlmcs
cd $MANDIR
# Copy man pages
mkdir -p /usr/local/man/man1 2>/dev/null
mkdir -p /usr/local/man/man5 2>/dev/null
mkdir -p /usr/local/man/man8 2>/dev/null
mkdir -p /usr/local/man/man7 2>/dev/null
cp -af vlmcs.1 vlmcsdmulti.1 /usr/local/man/man1/
cp -af vlmcsd.7 /usr/local/man/man7/
cp -af vlmcsd.8 /usr/local/man/man8/
rm -f vlmcsdmulti vlmcsd vlmcs 2>/dev/null
cp -af vlmcsd.ini.5 /usr/local/man/man5/
bzip2 -f -9 /usr/local/man/man5/vlmcsd.ini.5 /usr/local/man/man1/vlmcs.1 /usr/local/man/man1/vlmcsdmulti.1 /usr/local/man/man7/vlmcsd.7 /usr/local/man/man8/vlmcsd.8
cd $BINDIR
# Copy everything to distribution server
scp -p * root@ubuntu64.internal:x/binaries/Hurd/intel/
| true
|
afa9735da16f62aa19305ffe3881561f5ef0bc01
|
Shell
|
nadflop/ece364-prelabs
|
/Prelab09/getProjectsByStudent.bash
|
UTF-8
| 719
| 2.890625
| 3
|
[] |
no_license
|
#! /bin/bash
########################################################
# Author: Nur Nadhira Aqilah Binti Mohd Shah
# Email: mohdshah@purdue.edu
# ID: ee364g02
# Date: 3/16/2019
########################################################
base=~ee364/DataFolder/Prelab09
ProjPath=${base}/maps/projects.dat
CircPath=${base}/circuits
StudPath=${base}/maps/students.dat
id=$(grep -h "$1" $StudPath | cut -d'|' -f2 | tr -d '[:space:]')
#circ=($(grep -l "$id" $CircPath/*.dat | cut -d'_' -f 2 | cut -d'.' -f 1))
#IFS=$'\n' ; echo "${circ[*]}" | grep -h "${circ[*]}" $ProjPath | tr -s ' ' | cut -d" " -f3 | sort -u
#IFS=$OLDIFS
grep -h "$(grep -l "$id" $CircPath/*.dat | cut -d'_' -f 2 | cut -d'.' -f 1)" $ProjPath | tr -s ' ' | cut -d" " -f3 | sort -u
| true
|
de02f21ba66566c6088e62b6582999bc94fbd871
|
Shell
|
italobrito19/sh
|
/script-tbs.txt
|
UTF-8
| 2,010
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
SID=$1
TBS=$2
if [ -z "$SID" ]
then
echo "Inserir SID do banco"
exit 2
elif [ -z "$TBS" ]
then
echo "Inserir o nome da tablespace"
exit 2
else
USO=(`cat /tmp/TBS/tbluse_$SID.tmp | grep -w $TBS | awk '{print $2}'`)
TAM=(`cat /tmp/TBS/tbluse_$SID.tmp | grep -w $TBS | awk '{print $2}'`)
if [ "$USO" -ge 95 ]
then
echo "CRITICAL: tbs $TBS $TAM % of use"
exit 2
elif [ "$USO" -ge 85 ]
then
echo "WARNING: tbs $TBS $TAM % of use"
exit 1
else
echo "OK : tbs $TBS $TAM % of use"
fi
fi
#!/bin/ksh
x
SID=$1
TBS=$2
if [ -z "$SID" ]
then
echo "Inserir SID do banco"
exit 2
elif [ -z "$TBS" ]
then
echo "Inserir o nome da tablespace"
exit 2
else
USO=`cat /tmp/TBS/tbluse_$SID.tmp | grep -w $TBS | awk '{print $2}'`
TAM=`cat /tmp/TBS/tbluse_$SID.tmp | grep -w $TBS | awk '{print $2}' `
if [ "$USO" -ge 95 ]
then
echo "CRITICAL: tbs $TBS $TAM % of use"
exit 2
elif [ "$USO" -ge 85 ]
then
echo "WARNING: tbs $TBS $TAM % of use"
exit 1
else
echo "OK : tbs $TBS $TAM % of use"
fi
f
| true
|
704d9cdaac2c536a37298c9953468747e4791865
|
Shell
|
carlblomqvist/dotfiles
|
/system/etc/acpi/handler.sh
|
UTF-8
| 3,686
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
# Default acpi script that takes an entry for all actions
case "$1" in
cd/play)
case "$2" in
CDPLAY)
logger 'PlayPauseButton pressed'
# /usr/bin/sudo -u carlb playerctl play-pause
#/usr/bin/amixer set Speaker on
#/usr/bin/amixer set Headphone on
sudo -u carlb amixer set Master toggle
;;
*)
logger "ACPI action undefined: $2"
;;
esac
;;
button/mute)
case "$2" in
MUTE)
logger 'MuteButton pressed'
/usr/bin/amixer set Speaker on
/usr/bin/amixer set Headphone on
#/usr/bin/sudo -u carlb amixer set Master toggle
;;
*)
logger "ACPI action undefined: $2"
;;
esac
;;
button/volumeup)
case "$2" in
VOLUP)
logger 'VolumeUp pressed'
# /usr/bin/sudo -u carlb amixer set Master 5+
;;
*)
logger "ACPI action undefined: $2"
;;
esac
;;
button/volumedown)
case "$2" in
VOLDN)
logger 'VolumeDown pressed'
# /usr/bin/sudo -u carlb amixer set Master 5-
;;
*)
logger "ACPI action undefined: $2"
;;
esac
;;
video/brightnessup|video/brightnessdown)
case "$2" in
BRTDN)
#backlight -10%
logger 'BrightnessDownButton pressed'
;;
BRTUP)
logger 'BrightnessUpButton pressed'
#backlight +10%
;;
*)
logger "ACPI action undefined: $2"
;;
esac
;;
button/power)
case "$2" in
PBTN|PWRF)
logger 'PowerButton pressed'
;;
*)
logger "ACPI action undefined: $2"
;;
esac
;;
button/sleep)
case "$2" in
SLPB|SBTN)
logger 'SleepButton pressed'
;;
*)
logger "ACPI action undefined: $2"
;;
esac
;;
ac_adapter)
case "$2" in
AC|ACAD|ADP0)
case "$4" in
00000000)
logger 'AC unpluged'
;;
00000001)
logger 'AC pluged'
;;
esac
;;
*)
logger "ACPI action undefined: $2"
;;
esac
;;
battery)
case "$2" in
BAT0)
case "$4" in
00000000)
logger 'Battery online'
;;
00000001)
logger 'Battery offline'
;;
esac
;;
CPU0)
;;
*) logger "ACPI action undefined: $2" ;;
esac
;;
button/lid)
case "$3" in
close)
logger 'LID closed'
;;
open)
logger 'LID opened'
setxkbmap se
;;
*)
logger "ACPI action undefined: $3"
;;
esac
;;
*)
logger "ACPI group/action undefined: $1 / $2"
;;
esac
# vim:set ts=4 sw=4 ft=sh et:
| true
|
6d9da592a7876a2a346a060e31a5a01cd414d1f3
|
Shell
|
calllivecn/testing
|
/move_cursur.sh
|
UTF-8
| 617
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
getch(){
stty -icanon -echo
dd if=$(tty) bs=8 count=1 2> /dev/null
stty icanon echo
}
move(){
if [ "$1"x = "j"x -o "$1"x = "[B"x ];then
echo -en "\033[B"
elif [ "$1"x = "k"x -o "$1"x = "[A"x ];then
echo -en "\033[A"
elif [ "$1"x = "h"x -o "$1"x = "[D"x ];then
echo -en "\033[D"
elif [ "$1"x = "l"x -o "$1"x = "[C"x ];then
echo -en "\033[C"
else
echo -n "h,j,k,l or q->quit"
fi
}
quit(){
stty icanon echo
exit 0
}
trap quit SIGINT
while :
do
me=$(getch)
if [ "$me"x = "q"x -o "$me"x = ""x ];then
echo exit.
break
fi
move $me
done
| true
|
a159ae05f75f9da0503c2db2d5d18b7ac0a301f2
|
Shell
|
schmaun/ad-blocker.sh
|
/ad-blocker.sh
|
UTF-8
| 3,433
| 3.625
| 4
|
[] |
no_license
|
#!/bin/sh
#================================================================================
# (C)2013 dMajo
# Title : ad-blocker.sh
# Version : V1.02.0018
# Author : dMajo (http://forum.synology.com/enu/memberlist.php?mode=viewprofile&u=69661)
# Description : Script to block add-banner servers, dns based
# Dependencies: Syno DNSServer package, sed, wget
# Usage : sh ad-blocker.sh
#================================================================================
# Version history:
# 2013.09.01 - 1.00.0001: Initial release
# 2013.09.08 - 1.00.0004: Fix: changed include target to support views
# 2013.09.12 - 1.00.0005: Added automatic zone file generation and some basic error handling
# 2014.03.29 - 1.01.0013: Added dependencies check
# 2014.03.30 - 1.02.0017: Script reorganized
# 2014.04.06 - 1.02.0018: Fix: fixed serial number in zone file generation
#================================================================================
# Define dirs
RootDir="/var/packages/DNSServer/target"
ZoneDir="${RootDir}/named/etc/zone"
ZoneDataDir="${ZoneDir}/data"
ZoneMasterDir="${ZoneDir}/master"
cd ${ZoneDataDir}
# Check if needed dependencies exists
Dependencies="chown date grep mv rm sed wget"
MissingDep=0
for NeededDep in $Dependencies; do
if ! hash "$NeededDep" >/dev/null 2>&1; then
printf "Command not found in PATH: %s\n" "$NeededDep" >&2
MissingDep=$((MissingDep+1))
fi
done
if [ $MissingDep -gt 0 ]; then
printf "Minimum %d commands are missing in PATH, aborting\n" "$MissingDep" >&2
exit 1
fi
# Download the "blacklist" from "http://pgl.yoyo.org"
wget "https://raw.githubusercontent.com/JanChristiansen/ad-blocker.sh/master/dnsconfig -O temp_dnslist"
# Modify Zone file path from "null.zone.file" to "/etc/zone/master/null.zone.file" in order to comply with Synology bind implementation
rm -f ad-blocker.new
sed -e 's/null.zone.file/\/etc\/zone\/master\/null.zone.file/g' "temp_dnslist" > ad-blocker.new
rm "temp_dnslist"
chown -R nobody:nobody ad-blocker.new
if [ -f ad-blocker.new ] ; then
rm -f ad-blocker.db
mv ad-blocker.new ad-blocker.db
fi
# Include the new zone data
if [ -f ad-blocker.db ] && [ -f null.zone.file ]; then
grep -q 'include "/etc/zone/data/ad-blocker.db";' null.zone.file || echo 'include "/etc/zone/data/ad-blocker.db";' >> null.zone.file
# Rebuild master null.zone.file
cd ${ZoneMasterDir}
rm -f null.zone.file
Now=$(date +"%Y%m%d")
echo '$TTL 86400 ; one day' >> null.zone.file
echo '@ IN SOA ns.null.zone.file. mail.null.zone.file. (' >> null.zone.file
# echo ' 2013091200 ; serial number YYYYMMDDNN' >> null.zone.file
echo ' '${Now}'00 ; serial number YYYYMMDDNN' >> null.zone.file
echo ' 86400 ; refresh 1 day' >> null.zone.file
echo ' 7200 ; retry 2 hours' >> null.zone.file
echo ' 864000 ; expire 10 days' >> null.zone.file
echo ' 86400 ) ; min ttl 1 day' >> null.zone.file
echo ' NS ns.null.zone.file.' >> null.zone.file
echo ' A 127.0.0.1' >> null.zone.file
echo '* IN A 127.0.0.1' >> null.zone.file
fi
# Reload the server config after modifications
${RootDir}/script/reload.sh
exit 0
| true
|
336767443faaa720f5e4aa98ad6d434ae3e9361c
|
Shell
|
ericavonb/clever-helper-scripts
|
/mgo.sh
|
UTF-8
| 1,170
| 3.625
| 4
|
[] |
no_license
|
#! /bin/bash
DB='clever'
VPN="true"
while getopts ":l:wrc:" OPTION; do
case "$OPTION" in
"l")
if eval [[ "$"$(($OPTIND-1))"" = "$OPTARG" ]]; then
URL="localhost"
((--OPTIND))
else
URL="--port $OPTARG localhost"
fi
VPN="false"
;;
"w")
URL="mongodb-write-prod.ops.clever.com"
;;
"r")
URL="mongodb-read-prod.ops.clever.com"
;;
"c")
DB="$OPTARG"
;;
":")
if [[ "$OPTARG" == "l" ]]; then
VPN="false"
URL="localhost"
else
echo "no database given with -c option." >&2
fi
;;
"?")
echo "Invalid option: -$OPTARG" >&2
;;
esac
done
shift $(( OPTIND - 1 ));
if [ $VPN == "true" ] && [ $(/Users/ericavonbuelow/dev/scripts/vpn-check.sh) == "false" ]; then
echo "ERROR: Please connect to the VPN before connecting to that database."
exit 0
fi
mongo ${URL:-"mongodb"`echo ${1:-'jefff'}'-dev'`".ops.clever.com"}/$DB
| true
|
87aa4956bf9aeb867eea48641cf7a55b8dfeff82
|
Shell
|
DavorKandic/sh_scripting_practice
|
/f-looping.sh
|
UTF-8
| 898
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
# Various ways for writing for-loops
clear
echo "First way (for i in 1 2 3 4 5):"
for i in 1 2 3 4 5
do
echo "Hello!"
done
echo
echo "Second way (for i in {1..5}):"
for i in {1..5}
do
echo "Holla!"
done
echo
echo "Third way (with step/increment):"
for i in {0..20..2} # {start..end..step}
do
echo $i
done
echo
echo "Fourth way (c-style, using double brackets!):"
for (( i=0; i<5; i++ ))
do
echo $i
done
echo
echo "Fifth way (iterating over array):"
arr=("Batman" "Superman" "Spiderman" "Aquaman" "Flash")
for i in ${arr[@]}
do
echo "$i is superhero"
done
echo
echo "Filtering with 'if' and using 'break':"
for (( i=0; i<=10; i++ ))
do
if [ $i -gt 5 ]
then
break
fi
echo $i
done
echo
echo "Skipping iterations using 'continue':"
for (( i=0; i<=10; i++ ))
do
if [ $i -eq 3 ] || [ $i -eq 7 ]
then
continue
fi
echo "Value: $i is filtered through 'if'"
done
echo
| true
|
36fa499a54f0cb58147f96aa0e6737d88e5379a2
|
Shell
|
eightpigs/dotfiles
|
/profiles.d/zsh
|
UTF-8
| 7,023
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/zsh
# plugin manager
# unplugged: https://github.com/mattmc3/zsh_unplugged
# -----------------------------------------------------------------------------
ZPLUGINDIR=$HOME/.zsh/plugins
# clone a plugin, find an init.zsh, source it, and add it to your fpath
function plugin-load () {
local giturl="$1"
local plugin_name=${${giturl##*/}%.git}
local plugindir="${ZPLUGINDIR:-$HOME/.zsh/plugins}/$plugin_name"
# clone if the plugin isn't there already
if [[ ! -d $plugindir ]]; then
command git clone --depth 1 --recursive --shallow-submodules $giturl $plugindir
if [[ $? -ne 0 ]]; then
echo "plugin-load: git clone failed for: $giturl" >&2 && return 1
fi
fi
# symlink an init.zsh if there isn't one so the plugin is easy to source
if [[ ! -f $plugindir/init.zsh ]]; then
local initfiles=(
# look for specific files first
$plugindir/$plugin_name.plugin.zsh(N)
$plugindir/$plugin_name.zsh(N)
$plugindir/$plugin_name(N)
$plugindir/$plugin_name.zsh-theme(N)
# then do more aggressive globbing
$plugindir/*.plugin.zsh(N)
$plugindir/*.zsh(N)
$plugindir/*.zsh-theme(N)
$plugindir/*.sh(N)
)
if [[ ${#initfiles[@]} -eq 0 ]]; then
echo "plugin-load: no plugin init file found" >&2 && return 1
fi
command ln -s ${initfiles[1]} $plugindir/init.zsh
fi
# source the plugin
source $plugindir/init.zsh
# modify fpath
fpath+=$plugindir
[[ -d $plugindir/functions ]] && fpath+=$plugindir/functions
}
# load all plugins
function plugins-load() {
for repo in $plugins; do
# plugin-load https://github.com/${repo}.git
plugin-load git@github.com:${repo}.git
done
unset repo
unset plugins
}
# if you want to compile your plugins you may see performance gains
function plugin-compile () {
local plugindir="${ZPLUGINDIR:-$HOME/.zsh/plugins}"
autoload -U zrecompile
local f
for f in $plugindir/**/*.zsh{,-theme}(N); do
zrecompile -pq "$f"
done
}
function plugin-update () {
local plugindir="${ZPLUGINDIR:-$HOME/.zsh/plugins}"
for d in $plugindir/*/.git(/); do
echo "Updating ${d:h:t}..."
command git -C "${d:h}" pull --ff --recurse-submodules --depth 1 --rebase --autostash
done
}
# https://github.com/unixorn/awesome-zsh-plugins
plugins=(
# core plugins
mafredri/zsh-async
zsh-users/zsh-autosuggestions
zsh-users/zsh-completions
zsh-users/zsh-history-substring-search
# user plugins
le0me55i/zsh-extract
)
plugins-load
# options
# https://zsh.sourceforge.io/Doc/Release/Concept-Index.html#Concept-Index
# -----------------------------------------------------------------------------
setopt PROMPT_SUBST
# auto cd when input dir path
setopt AUTO_CD
setopt AUTO_PUSHD
# /u/l/s/z => /usr/local/share/zsh/
setopt COMPLETE_IN_WORD
setopt AUTO_LIST
setopt AUTO_MENU
# setopt MENU_COMPLETE
# history
HISTSIZE=99999
SAVEHIST=99999
HISTFILE=~/.zsh_history
setopt APPEND_HISTORY
setopt HIST_EXPIRE_DUPS_FIRST
setopt HIST_FCNTL_LOCK
setopt SHARE_HISTORY
setopt INC_APPEND_HISTORY
setopt HIST_FIND_NO_DUPS
# history unique
# setopt HIST_IGNORE_ALL_DUPS
# setopt HIST_IGNORE_DUPS
autoload -U compinit && compinit
autoload -U promptinit && promptinit
autoload -U colors && colors
autoload -Uz vcs_info
# remove core dump file when crashes
limit coredumpsize 0
# auto complete
# https://gist.github.com/chriscchau/5829283
# https://zsh.sourceforge.io/Doc/Release/Completion-System.html
# -----------------------------------------------------------------------------
# activate approximate completion, but only after regular completion (_complete)
zstyle ':completion:::::' completer _complete _approximate
# limit to 2 errors
zstyle ':completion:*:approximate:*' max-errors 2
zstyle ':completion:*' verbose yes
zstyle ':completion:*' menu select
zstyle ':completion:*' list-colors ${(s.:.)LS_COLORS}
zstyle ':completion:*' select-prompt '%SSelect: lines: %L matches: %M [%p]'
# to try first simple completion and, if that generates no matches, case-insensitive completion:
zstyle ':completion:*' matcher-list '' 'm:{a-zA-Z}={A-Za-z}'
zstyle ':completion:*:*:default' force-list always
# complete group
zstyle ':completion:*:matches' group 'yes'
zstyle ':completion:*' group-name ''
zstyle ':completion:*:options' description 'yes'
zstyle ':completion:*:options' auto-description '%d'
zstyle ':completion:*:descriptions' format $'\e[01;33m -- %d --\e[0m'
zstyle ':completion:*:messages' format $'\e[01;35m -- %d --\e[0m'
zstyle ':completion:*:warnings' format $'\e[01;31m -- No Matches Found --\e[0m'
zstyle ':completion:*:corrections' format $'\e[01;32m -- %d (errors: %e) --\e[0m'
# path complete
zstyle ':completion:*' expand 'yes'
zstyle ':completion:*' squeeze-shlashes 'yes'
# kill complete
zstyle ':completion:*:*:kill:*' menu yes select
zstyle ':completion:*:*:*:*:processes' force-list always
zstyle ':completion:*:processes' command 'ps -au$USER'
# Version-Control
# https://zsh.sourceforge.io/Doc/Release/User-Contributions.html#Version-Control-Information
zstyle ':vcs_info:*' actionformats '%F{5}[%F{2}%b%F{3}|%F{1}%a%F{5}]%f '
zstyle ':vcs_info:*' formats '%F{3}[%F{2}%b%F{3}]%f '
zstyle ':vcs_info:*' enable git
precmd () { vcs_info }
# prompt
# https://www.masterzen.fr/2009/04/19/in-love-with-zsh-part-one/
# https://zsh.sourceforge.io/Doc/Release/Prompt-Expansion.html
# https://en.wikipedia.org/wiki/ANSI_escape_code
# https://chrisyeh96.github.io/2020/03/28/terminal-colors.html
# -----------------------------------------------------------------------------
# eg. ~/Workspace/git/dotfiles [master] →
PROMPT='%{$fg[blue]%}%~ %{$fg[blue]%}${vcs_info_msg_0_}%(?.%{$fg_bold[blue]%}→.%{$fg_bold[red]%}→) %{$reset_color%}'
# https://www.masterzen.fr/2009/04/19/in-love-with-zsh-part-one/
# let's complete known hosts and hosts from ssh's known_hosts file
hosts=($(( ( [ -r .ssh/known_hosts ] && awk '{print $1}' .ssh/known_hosts | tr , '\n');) | sort -u) )
zstyle ':completion:*' hosts $hosts
# keymaps
# https://zsh.sourceforge.io/Doc/Release/Zsh-Line-Editor.html#Keymaps
# -----------------------------------------------------------------------------
# use emacs key
bindkey -e
# shift-tab: go backward in menu (invert of tab)
bindkey '^[[Z' reverse-menu-complete
# double esc: insert sudo to line start
sudo-command-line() {
[[ -z $BUFFER ]] && zle up-history
[[ $BUFFER != sudo\ * ]] && BUFFER="sudo $BUFFER"
zle end-of-line # cursor to end
}
zle -N sudo-command-line
bindkey "\e\e" sudo-command-line
# edit command line
autoload -U edit-command-line
zle -N edit-command-line
bindkey '^x^e' edit-command-line
# only the past commands matching the current line up to the current cursor position will be shown when Up or Down keys are pressed.
autoload -U up-line-or-beginning-search
autoload -U down-line-or-beginning-search
zle -N up-line-or-beginning-search
zle -N down-line-or-beginning-search
bindkey "^[[A" up-line-or-beginning-search
bindkey "^[[B" down-line-or-beginning-search
| true
|
2ec91fa94233b8bd6c321ef360f0c84052edaeae
|
Shell
|
AramisLi/MovieGetter-Server
|
/src/test/test1.sh
|
UTF-8
| 163
| 2.609375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
array=(a b c d)
echo ${#array[@]}
echo ${#array[*]}
echo `expr 2 + 5`
echo `date`
for i in array
do
echo ${i}
done
curl www.baidu.com
| true
|
5be4da22776ce4d4dab15d5cbf14f8fb6401be5e
|
Shell
|
fluffypony/monerodo
|
/setup_pool_wallet.sh
|
UTF-8
| 3,849
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
#MONERODO script to setup pool wallet
echo "This script configures your Monerodo with a new pool wallet."
echo "In order for your Monerodo to work properly, your pool wallet password is stored in a configuration file"
echo "It is HIGHLY recommended that your pool wallet is different than your primary wallet."
echo "Your pool wallet is where pool fees are deposited and from where pool payouts are made"
echo "You will now create a new pool wallet. Please remember the name and password for your wallet."
echo ""
echo "When you are done creating the wallet, type exit and hit enter"
echo ""
echo "Press enter to continue"
read input
./monero_simplewallet.sh
clear
echo "You have now created a new pool wallet."
test_add=0
while ((test_add == 0))
do
echo "For your reference, these are the available wallets in your wallet directory."
echo "------------------------------------------------"
cd /monerodo/wallets/
dir *.bin
echo "------------------------------------------------"
echo ""
echo "Please enter the name of your pool wallet and then press enter - example: mypoolwallet.bin"
read poolwallet
echo ""
echo "Please enter the password for your poolwallet and then press enter"
read poolpass
echo "You have entered $poolwallet and $poolpass. Are these correct? (y)yes or (n)o"
read info_correct
case "$info_correct" in
n) echo "please try again. Press enter to continue" read input ;;
y) test_add=1 ;;
esac
clear
#The below check was commented out due to the poor performance of the refresh. Could work back in thanks to new optimizations.
#echo "We will now test your information to confirm that your pool wallet works properly"
#echo "Simplewallet will load and attempt to open your wallet using the information you provided"
#echo "If the wallet loads correctly, please type exit to exit simplewallet and return to setup"
#echo "If the wallet does not load correctly, please press ctrl+c to return to this menu and try again"
#echo ""
#/home/bob/bitmonero/build/release/bin/simplewallet --wallet-file $poolwallet --password $poolpass --daemon-host $monerodo_ip;;
#echo "-----------------------------------------------"
#echo "Did the wallet load properly? please enter (y)es or (n)o"
#read poolrun
#case "$poolrun" in
# n) test_new_wallet=0;;
# y) test_new_wallet=(test_new_wallet+1);;
#esac
#clear
done
clear
echo "You have succesfully created a pool wallet. We will now create the .conf file that will load simplewallet on boot."
echo "Press enter to continue. At some point during the process, you will be asked to enter your UNIX password."
read input2
# WRITE CONF FILE AND MOVE TO /etc/init/
sudo service mos_monerowallet stop
rm /home/bob/monerodo/conf_files/mos_monerowallet.conf
echo -e "start on started mos_bitmonero \n\
stop on stopping mos_bitmonero \n\
console log \n\
respawn \n\
respawn limit 10 10 \n\
exec simplewallet --daemon-host $current_ip --rpc-bind-port 8082 --rpc-bind-ip 127.0.0.1 --wallet-file /monerodo/wallets/$poolwallet --password $poolpass \n\
" > /home/bob/monerodo/conf_files/mos_monerowallet.conf
sudo cp mos_monerowallet.conf /etc/init/
# modify pool address in config.json in local monerodo directory and copy to pool directory
old_pool="$(awk '{print;}' /monerodo/pool_add.txt)"
new_pool="$(awk '{print;}' /monerodo/wallets/$poolwallet.address.txt)"
sudo sed 's/.*poolAddress.*/"poolAddress": "$new_pool",/' config.json
#Old style, left in incase newstyle bugs out
#if [[ $old_pool != $new_pool ]]; then
# sed -i -e 's/$old_pool/$new_pool/g' /home/bob/monerodo/conf_files/config.json
# sudo cp /home/bob/monerodo/conf_files/config.json /monerodo/sam_pool/
# rm /home/bob/monerodo/conf_files/pool_add.txt
# echo $new_pool > /home/bob/monerodo/conf_files/pool_add.txt
# sudo cp /home/bob/monerodo/conf_files/pool_add.txt /monerodo/
#fi
| true
|
b23d37907a575435067acf1a8c2016a51ab25290
|
Shell
|
bgoonz/UsefulResourceRepo2.0
|
/MY_REPOS/WEB-DEV-TOOLS-HUB/MAIN/3_bash-utilities/dotfiles-master/dotfiles-master/chromium.sh
|
UTF-8
| 2,065
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
# usage:
# after `git pull`, a full build is now `depsbcr`
# and after changes.. a `bcr` will recompile and relaunch chrome.
function deps () {
# --reset drops local changes. often great, but if making changes inside v8, you don't want to use --reset
env GYP_DEFINES=disable_nacl=1 gclient sync --delete_unversioned_trees --reset
}
function hooks () {
env GYP_DEFINES=disable_nacl=1 gclient runhooks
}
function b () {
local dir=./$(git rev-parse --show-cdup)/out/Default
# autoninja will automatically determine your -j number based on CPU cores
local cmd="ninja -C $(realpath $dir) -j900 -l 60 chrome"
echo " > $cmd"
eval "$cmd"
if [ $? -eq 0 ]; then
printf "\n✅ Chrome build complete!\n"
fi
}
function dtb () {
local dir_default=$(grealpath $PWD/(git rev-parse --show-cdup)out/Default/)
local cmd="autoninja -C "$dir_default""
echo " > $cmd"
eval $cmd
}
# you can also add any extra args: `cr --user-data-dir=/tmp/lol123"
function cr () {
local dir=$(git rev-parse --show-cdup)/out/Default
local cmd="./$dir/Chromium.app/Contents/MacOS/Chromium $argv"
echo " > $cmd"
eval "$cmd"
}
function dtcr () {
local crpath="$HOME/chromium-devtools/devtools-frontend/third_party/chrome/chrome-mac/Chromium.app/Contents/MacOS/Chromium"
local dtpath=$(realpath out/Default/resources/inspector)
local cmd="$crpath --custom-devtools-frontend=file://$dtpath --user-data-dir=$HOME/chromium-devtools/dt-chrome-profile"
echo " > $cmd"
eval $cmd
}
function gom () {
# these probably dont make sense for everyone.
export GOMAMAILTO=/dev/null
export GOMA_ENABLE_REMOTE_LINK=yes
goma_ctl ensure_start
}
function dtbcr () {
if dtb; then
dtcr
fi
}
function bcr () {
if b; then
cr
fi
}
function depsb () {
if deps; then
gom
b
fi
}
function depsbcr () {
if deps; then
gom
bcr
fi
}
function hooksbcr () {
if hooks; then
gom
bcr
fi
}
| true
|
c402cda2b52a97d220d11b2a2527b189f07b6a05
|
Shell
|
kawish1/site
|
/source/friends/maktab/scripting/prevnext.sh
|
UTF-8
| 761
| 3.453125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
infile=$1;
outfile=$2
#The first column of input file is the videoid.
awk '{print $1}' $infile > videoid.txt
# The second and third columns contain chapter and lecture numbers.
awk '{print $2 " " $3} ' $infile > chapcol.txt
awk '{$1=$2=$3=""; print $0}' $infile > description.txt
# now put the first line of chapcol.txt at its last line and save it as next.txt
ed -s chapcol.txt << EOF
1m\$
w next.txt
EOF
#Now put the last line of chapcol.txt as its first line and and save it as prev.txt
ed -s chapcol.txt << EOF
\$m0
w prev.txt
EOF
# now we have all columns for the final links file. Paste them.
paste videoid.txt chapcol.txt next.txt prev.txt description.txt > $outfile
rm videoid.txt chapcol.txt next.txt prev.txt description.txt
| true
|
1d853bd5ea88ecb34b2274caa32e39f95feba9d0
|
Shell
|
ryantanaka/pegasus
|
/release-tools/tutorial_vm/scripts/aws/cloud-init.sh
|
UTF-8
| 2,342
| 2.984375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
yum -y install cloud-init
cat > /etc/cloud/cloud.cfg << EOT
users:
- default
disable_root: 1
ssh_pwauth: 0
locale_configfile: /etc/sysconfig/i18n
mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
resize_rootfs_tmp: /dev
ssh_deletekeys: 0
ssh_genkeytypes: ~
syslog_fix_perms: ~
cloud_init_modules:
- migrator
- bootcmd
- write-files
- growpart
- resizefs
- set_hostname
- update_hostname
- update_etc_hosts
- rsyslog
- users-groups
- ssh
cloud_config_modules:
- mounts
- locale
- set-passwords
- yum-add-repo
- package-update-upgrade-install
- timezone
- puppet
- chef
- salt-minion
- mcollective
- disable-ec2-metadata
- runcmd
cloud_final_modules:
- rightscale_userdata
- scripts-per-once
- scripts-per-boot
- scripts-per-instance
- scripts-user
- ssh-authkey-fingerprints
- keys-to-console
- phone-home
- final-message
system_info:
distro: rhel
paths:
cloud_dir: /var/lib/cloud
templates_dir: /etc/cloud/templates
ssh_svcname: sshd
EOT
cat > /etc/cloud/cloud.cfg.d/00-default_user.cfg << EOT
system_info:
default_user:
name: tutorial
lock_passwd: false
gecos: Pegasus Tutorial User
groups: [wheel, adm, systemd-journal]
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/bash
EOT
cat > /etc/cloud/cloud.cfg.d/01-growpart.cfg << EOT
#cloud-config
#
# growpart entry is a dict, if it is not present at all
# in config, then the default is used ({'mode': 'auto', 'devices': ['/']})
#
# mode:
# values:
# * auto: use any option possible (any available)
# if none are available, do not warn, but debug.
# * growpart: use growpart to grow partitions
# if growpart is not available, this is an error.
# * off, false
#
# devices:
# a list of things to resize.
# items can be filesystem paths or devices (in /dev)
# examples:
# devices: [/, /dev/vdb1]
#
# ignore_growroot_disabled:
# a boolean, default is false.
# if the file /etc/growroot-disabled exists, then cloud-init will not grow
# the root partition. This is to allow a single file to disable both
# cloud-initramfs-growroot and cloud-init's growroot support.
#
# true indicates that /etc/growroot-disabled should be ignored
#
growpart:
mode: auto
devices: ['/']
ignore_growroot_disabled: false
EOT
| true
|
24391a813234402a1b99c8aca2eb2669c60405fc
|
Shell
|
simhaonline/le-cms
|
/verify-http.sh
|
UTF-8
| 2,101
| 3.859375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
printf "\nReady to check HTTP and HTTPS sites.\n\n"
read -p "Please enter the domain name, then press enter: " DOMAIN
## Format DOMAIN in all lowercase
DOMAIN=$(printf $DOMAIN | tr "{A-Z}" "{a-z}")
## Confirm domain name
printf "\n"
read -n1 -rsp "Is this the correct domain? $DOMAIN [Y|N] " CONFIRM
## Format response in all uppercase
CONFIRM=$(printf $CONFIRM | tr "{y}" "{Y}")
if [[ $CONFIRM == Y ]]
then
# check if http permanently redirects to https
curl -sSLI --stderr httpstatus http://$DOMAIN > le-cms_httpstatus
awk 'BEGIN {
RS="\n"
}
/^HTTP/{
if ((NR == 1) && ($2 == 301)) {
printf("%s %s\n", "\nHTTP redirecting to HTTPS - OK: status", $2)
} else if (NR != 1) {
# ignore other HTTP sections
} else {
printf("%s %s\n", "\nHTTP redirecting to HTTPS - FAILED: status", $2 ", expecting 301")
}
} ' le-cms_httpstatus
# check for typical errors
awk 'BEGIN {
RS="\n"
}
/^curl/{
if ((NR == 1) && ($2 == "(6)")) {
printf("%s\n", "\nHTTP redirecting to HTTPS - FAILED: Could not resolve host")
} else if ((NR == 1) && ($2 == "(60)")) {
printf("%s\n", "\nHTTP redirecting to HTTPS - OK")
}
} ' le-cms_httpstatus
rm -f le-cms_httpstatus
# check if https is responding
curl -sSI --stderr httpstatus https://$DOMAIN > le-cms_httpstatus
awk 'BEGIN {
RS="\n"
}
/^HTTP/{
if ((NR == 1) && ($2 == 200)) {
printf("%s %s\n", "HTTPS Responding - OK: status", $2)
} else if (NR != 1) {
# ignore other HTTP sections
} else {
printf("%s %s\n", "HTTPS Responding - FAILED", $2)
}
} ' le-cms_httpstatus
# check for typical errors
awk 'BEGIN {
RS="\n"
}
/^curl/{
if ((NR == 1) && ($2 == "(35)")) {
printf("%s\n", "HTTPS Responding - FAILED")
} else if ((NR == 1) && ($2 == "(6)")) {
printf("%s\n", "HTTPS responding - FAILED: Could not resolve host")
} else if ((NR == 1) && ($2 == "(60)")) {
printf("%s\n", "HTTPS responding, but certificate is invalid for your domain name - FAILED")
}
} ' le-cms_httpstatus
rm -f httpstatus
else
printf "\nCancelling...\n"
exit 0
fi
| true
|
87cb90c410a4b2e835fb06e0352682e0e55b304b
|
Shell
|
Amund211/dotfiles
|
/scripts/audiostream_discord.sh
|
UTF-8
| 1,867
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/sh
set -eu
# Front jack
output_sink='alsa_output.pci-0000_09_00.4.analog-stereo'
# Screen speakers
#output_sink='alsa_output.pci-0000_07_00.1.hdmi-stereo-extra2'
# Denoised mic
mic_source='denoised'
# Raw mic
# Briste atm, need to make it a sink
# mic_sink='alsa_input.usb-Razer_Inc_Razer_Seiren_Mini_UC2045L03206312-00.mono-fallback'
### Application mix ######################
# Sink that we can play application audio to
pacmd load-module module-null-sink sink_name=application_mix_sink
pacmd update-sink-proplist application_mix_sink device.description='"application mix sink"'
pacmd load-module module-null-sink sink_name=application_sink
pacmd update-sink-proplist application_sink device.description='"Application sink"'
# Mix the mic and application audio
# pacmd load-module module-combine-sink sink_name=application_mix_sink slaves=application_sink,$mic_sink
# pacmd update-sink-proplist application_mix_sink device.description='"application mix sink"'
# Loop back the application audio to our main sink (so we can hear it)
pacmd load-module module-loopback latency_msec=20 source=application_sink.monitor sink=$output_sink
# Loop back the application audio to the mix sink (so we can hear it)
pacmd load-module module-loopback latency_msec=20 source=application_sink.monitor sink=application_mix_sink
# Loop back the mic audio to the mix
pacmd load-module module-loopback latency_msec=20 source=$mic_source sink=application_mix_sink
# Remap the sink to a source so we can play it back for others
pacmd load-module module-remap-source master=application_mix_sink.monitor source_name=application_mix_source
pacmd update-source-proplist application_mix_source device.description='"Application mix source"'
### Application mix ######################
source_volume='125%'
# pactl set-source-volume application_mix_source "$source_volume"
| true
|
f70705edb326dc934ff51589c3bf67520c269d81
|
Shell
|
bywen/shell
|
/user_count.sh
|
UTF-8
| 353
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
#统计系统里有多少个普通用户
#user=`cat /etc/passwd |awk -F ':' '{print $3}'`
#n=0
#for i in $user;do
# if [ $i -ge 1000 ];then
# n=$[$n+1]
# fi
#done
#echo $n
#简化版:
n=`awk -F ':' '$3>=1000' /etc/passwd |wc -l`
if [ $n -gt 0 ];then
echo "普通用户有:$n"
else
echo "没有普通用户"
fi
| true
|
7988b50f6109270ce3e62a26bae8bd2e3881526c
|
Shell
|
JASalorte/TFG---Emergiendo-con-el-Sol---Docker
|
/BackupFiles.sh
|
UTF-8
| 525
| 3.6875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
IDCONTAINER=$1
SAVEDIRECTORY=$2
CONTAINEREXIST=$(docker ps -q | grep $IDCONTAINER)
if [ "$CONTAINEREXIST" == "" ]; then
echo El contenedor no existe.
exit
fi
if [ ! -d "$SAVEDIRECTORY" ]; then
echo El directorio debe existir.
exit
fi
if [ ! -r "$SAVEDIRECTORY" ] || [ ! -w "$SAVEDIRECTORY" ]; then
echo No tenemos permiso para escribir en ese directorio.
exit
fi
docker exec $IDCONTAINER /home/admin/extract.sh
docker cp $IDCONTAINER:/extract $SAVEDIRECTORY
docker exec $IDCONTAINER rm -rdf /extract
| true
|
6b71a9bf19fa918c6a3f8cb20a41f81045538858
|
Shell
|
hornet-gt/hornet
|
/compiler-util/uf_graph_dowload.sh
|
UTF-8
| 797
| 4.03125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
function error {
if [ $? -ne 0 ]; then
echo "! Error: "$@
exit $?;
fi
}
if [ "$1" = "--help" ]; then
echo -e "\n./uf_graph_download <directory to download> <market graph link 1> <market graph link 2> ...\n"
exit
fi
dataset_dir=$1
for link in $@; do
if [ $link == $1 ]; then
continue
fi
wget $link -P "$dataset_dir"
error "wget"
file_name=`echo $link | rev | cut -d'/' -f 1 | rev`
tar xf "$dataset_dir/$file_name" -C "$dataset_dir"
error "tar xf" $file_name
graph_name=${file_name::-7}
mv "$dataset_dir/$graph_name/$graph_name.mtx" "$dataset_dir"
error "mv" $graph_name
rm "$dataset_dir/$file_name"
error "rm" $graph_name
rm -r "$dataset_dir/$graph_name"
error "rm -r" $graph_name
done
| true
|
83f4a05cc183f5fb2818e8dc7321e1b83ac0c363
|
Shell
|
ignisphaseone/aporeto
|
/samples/problem1/bash_example.sh
|
UTF-8
| 2,465
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
# If we don't have any args, or if we have help args, print the help message.
if [ -z $1 ] || [ "$1" == '-h' ] || [ "$1" == '--help' ]; then
echo 'usage: bash_example.sh --create-file=<filename> [--no-prompt] [--verbose]'
exit 1
fi
# Check every argument. If any of them match what we're looking for, set a flag.
for var in "$@"; do
case $var in
--verbose)
VERBOSE=true
;;
--no-prompt)
NOPROMPT=1
;;
--create-file=*)
# If you find something that starts with `--create-file=`, then use bash string splitting to split it
# based on the =.
IFS='='; arrIN=($var); unset IFS;
# If you split and get an empty string, reprint usage.
if [ -z "${arrIN[1]}" ]; then
echo 'usage: bash_example.sh --create-file=<filename> [--no-prompt] [--verbose]'
exit 1
fi
# If you split and get a string of any sort (aka here), go time.
OFILENAME="${arrIN[1]}"
;;
esac
done
if [ -z $OFILENAME ]; then
echo 'usage: bash_example.sh --create-file=<filename> [--no-prompt] [--verbose]'
exit 1
fi
# File name check. Does it exist?
if [ -f $OFILENAME ]; then
# Verbose check.
if [ $VERBOSE ]; then
echo 'File already exists'
fi
# Prompt loop. Only break on break (y) or exit (n).
while true; do
if [ "$NOPROMPT" = 1 ]; then
break;
fi
read -p "File exists. Overwrite (y/n) ? " choice
case $choice in
y)
rm $OFILENAME
if [ $VERBOSE ]; then
echo "File removed"
fi
break
;;
n)
exit 1
;;
esac
done
fi
# Do the thing!
cat << STATES >$OFILENAME
Alabama
Alaska
Arizona
Arkansas
California
Colorado
Connecticut
Delaware
Florida
Georgia
Hawaii
Idaho
Illinois
Indiana
Iowa
Kansas
Kentucky
Louisiana
Maine
Maryland
Massachusetts
Michigan
Minnesota
Mississippi
Missouri
Montana
Nebraska
Nevada
New Hampshire
New Jersey
New Mexico
New York
North Carolina
North Dakota
Ohio
Oklahoma
Oregon
Pennsylvania
Rhode Island
South Carolina
South Dakota
Tennessee
Texas
Utah
Vermont
Virginia
Washington
West Virginia
Wisconsin
Wyoming
STATES
# If verbose, report status of thing.
if [ $VERBOSE ]; then
echo 'File created'
fi
| true
|
2f33287367340c4658ee9820fba4ac8ab08cd857
|
Shell
|
VPlusJetsAnalyzers/VPlusJets
|
/test/Limits/hwwcrabmylimit.sh
|
UTF-8
| 860
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -lt 2 ]
then
DIR=.
else
DIR=$2
fi
if stat -t ${DIR}/datacard_${1}[-_]M=??0.txt >/dev/null 2>&1
then
for datacard in ${DIR}/datacard_${1}[-_]M=??0.txt
do
DIR=`dirname $datacard`
file=`basename $datacard .txt`
suffix=${file##datacard_}
mass=`ls $datacard | egrep -o "M=[0-9]+" | egrep -o "[0-9]+"`
echo "text2workspace.py -m $mass $datacard -b -o wkspace_${1}_M=${mass}.root"
# text2workspace.py -m $mass $datacard -b -o wkspace_${1}_M=${mass}.root
sed "s#model.root#wkspace_${1}_M=${mass}.root#g;s#log.txt#limit_${1}_M=${mass}.log#g" combine_crab.sh >combine_crab_${1}_M=${mass}.sh
sed "s#model.root#wkspace_${1}_M=${mass}.root#g;s#log.txt#limit_${1}_M=${mass}.log#g;s#combine_crab#combine_crab_${1}_M=${mass}#g" combine_crab.cfg >combine_crab_${1}_M=${mass}.cfg
done
fi
| true
|
8020bcafb3eb0487133325b6cd6bcde75120e6f9
|
Shell
|
jxsr713/coding
|
/tools/ddt_gen_json.sh
|
UTF-8
| 2,411
| 3.828125
| 4
|
[] |
no_license
|
#########################################################################
# File Name:.sh
# Author: jxsr713
# mail: jxsr713@163.com
# Created Time: 2021年06月09日 星期三 15时10分45秒
#########################################################################
#!/bin/bash
#get current path
path=`pwd`
echo "current path:$path"
py_path=/workspace/tools/
#get current file location
CUR_DIR="$( cd "$( dirname "$0" )" && pwd )"
show_json=0
echo "Current command path: $CUR_DIR"
src_path="./runtest/"
help(){
cat << HELP
Usage:
$0 [-s <source file>] [-m <module name>]
-s: a test files under ./runtest/
-i: show json files
-p: full path for test files
-d: show domain
-f: feature
-m: module name,the module used to match <module>_* files under ./runtest/
domain and feature
mce: domain=>Memory Feature=>MCE
SAF: domain==>CPU feature==>SAF
HELP
exit 1
}
if [ $# -eq 0 ]; then
help
fi
#opt list
OPT_LIST=""
while getopts "s:m:id:f:p:" opt; do
case $opt in
p)
module=$OPTARG
;;
s)
module="$src_path/$OPTARG"
;;
m)
module="$src_path/${OPTARG}_*"
;;
i)
show_json=1
;;
d)
domain="${OPTARG}"
OPT_LIST="${OPT_LIST} -m $domain"
;;
f)
feature="${OPTARG}"
OPT_LIST="${OPT_LIST} -f $feature"
;;
*)
help
exit 0
;;
esac
done
show_spec(){
if [ $show_json -eq 0 ]; then
return
fi
if [ $# -eq 0 ]; then
return
fi
dir=$1
echo "@@@@@$dir@@@@@@"
for spec in ${dir}/*.json
do
echo "========SHOW ${spec##*/}======="
echo "`cat $spec`"
done
}
if [ "${module}" == "" ]; then
help
fi
echo "${module}"
if [ -e "${module}" ]; then
echo ${module}
else
help
fi
files=`ls ${module} -l | awk '{ print $NF }' `
#遍历所有的test文件
for itm in $files; do
echo "============Get $itm============"
#get the dir name
#dir=`echo $itm | awk -F / ' { print $NF} ' `
dir=./json/${itm##*/}
rm $dir -rf
#echo $dir
echo "python3 ${py_path}/gen_spec_json.py -s $itm $OPT_LIST "
python3 ${py_path}/gen_spec_json.py -s $itm $OPT_LIST
# list the new json files
ls $dir -l
#tranlsate 4 ' ' to 2 ' '
#the json in repo is 2 space
#需要将以4个空格开头转变成2个空格,韩宁的工具生成的json的用2个空格
sed -i "s/^ / /g" $dir/*.json
echo "Copy new files into ./runtest/spec/ from $dir"
cp $dir/* ./runtest/spec/
show_spec $dir
echo "============ DONE ============="
done
exit
| true
|
fadc8ed0c19fe6d960698d6d24de83ce5c8385f7
|
Shell
|
cytoscape/cytoscape-admin-scripts
|
/release-generator/workflow-build-upload.sh
|
UTF-8
| 543
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/sh
BUILD_DIR="build"
# New branch for release
BRANCH="release/3.5.0"
start_dir=`pwd`
./prepare.sh $BUILD_DIR
./build-all-core-apps.sh $BUILD_DIR $BRANCH
cd ./${BUILD_DIR}/cytoscape
./cy.sh switch $BRANCH || { echo Failed to switch; exit 1; }
./cy.sh pull || { echo Failed to pull; exit 1; }
mvn clean install || { echo Failed to build new; exit 1; }
cd gui-distribution/packaging || { echo Failed to change dir; exit 1; }
mvn clean install || { echo Failed to build new installer; exit 1; }
cd $start_dir
./upload.sh $BUILD_DIR
| true
|
b9fa75c97c138c68666817192e22e82aa25336c9
|
Shell
|
commonsense-org/scanner
|
/shim.sh
|
UTF-8
| 232
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
pushd `dirname $0` > /dev/null
SCRIPTPATH=`pwd`
popd > /dev/null
# This might not work so well on messy csv's
path=`echo $2 | awk -v x=${1} -F ',|"' '{print $x}'`
echo -n $2,
${SCRIPTPATH}/check.sh -c "${path}" && echo
| true
|
b1412589216bb2347b265c31330335eee95d078b
|
Shell
|
asylum119/my-scripts
|
/Bluetooth Scanner/Bluetooth Scanner.sh
|
UTF-8
| 2,590
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
####### Shell script by Brett Peters #######
####### - asylum119@hotmail.com #######
####### #######
############# #############
########### ###########
######### #########
####### #######
##### #####
### ###
# #
#
# scan for avaliable Bluetooth Devices and log MAC and Nameto file
#
# set terminal title
printf "\e]2;Local Bluetooth Scanner\a"
# change working directory
cd ~/Desktop
# log file name
log_file="Bluetooth Devices"
# generate log file
touch "$log_file"
# screen formatting
magenta_txt="\e[95m"; bold_txt=$(tput bold); reset_txt=$(tput sgr0)
# screen first run text
echo "Scanning Bluetooth Devices..."
# hide screen cursor
tput civis
# infinite loop
while true
do
# count log enteries
log_count=$(wc -l < "$log_file")
# scan bluetooth devices and log to file
hcitool scan >> "$log_file"
# remove leading white spaces from log file
sed -i "s/^[ \t]*//" "$log_file"
# remove duplicate lines from log file
perl -i -ne 'print if ! $x{$_}++' "$log_file"
# delete empty lines from log file
sed -i '/^$/d' "$log_file"
# remove unwanted entries from log file
sed -i '/Scanning/d' "$log_file"
# display log file on screen
clear
echo -e "${bold_txt}${magenta_txt}- - - - - - - - - - - - - - - -\nScanning For Bluetooth Devices\n- - - - - - - - - - - - - - - -\n${reset_txt}${bold_txt}"
cat "$log_file"
# "play" beep when new log entry
if dpkg -l | grep -E '^ii' | grep -q "sox"; then
new_count=$(wc -l < "$log_file")
if [ "$new_count" -gt "$log_count" ]; then
play -q -n synth 0.1 sin 880 || echo -e "\a"; sleep 1; play -q -n synth 0.1 sin 880 || echo -e "\a"; sleep 1; play -q -n synth 0.1 sin 880 || echo -e "\a"; sleep 2; play -q -n synth 0.1 sin 880 || echo -e "\a"; sleep 1; play -q -n synth 0.1 sin 880 || echo -e "\a"; sleep 1; play -q -n synth 0.1 sin 880 || echo -e "\a"
fi
fi
# display on screen number of logged bluetooth devices
if [ "$new_count" = 1 ]; then
echo -e "\n${bold_txt}${magenta_txt}$((${new_count})) Bluetooth Device${reset_txt}"
fi
if [ "$new_count" -gt 1 ]; then
echo -e "\n${bold_txt}${magenta_txt}$((${new_count})) Bluetooth Devices${reset_txt}"
fi
# slow the script down a bit
sleep 30
done
echo "Something went wrong..."
sleep 5
exit
| true
|
3bc3e238de3d9ab61406838e834ce0fc531ff007
|
Shell
|
3l-d1abl0/bash-playground
|
/h4x/subbruter.sh
|
UTF-8
| 284
| 2.5625
| 3
|
[] |
no_license
|
#! /bin/bash
dir=~/Recon/$1
echo $1 | ~/go/bin/shuffledns -w ~/go/bin/subdomains.txt -r ~/dnsvalidator/resolvers.txt -v -silent -o $1_brute;
mv $1_brute $dir/$1_brute;
cat $dir/$1_brute >> $dir/$1_unfilter_subdomains;
cat $dir/$1_unfilter_subdomains | sort -u > $dir/$1_subdmains;
| true
|
4b98d1d9886c0e736caa0754aca5fbc4218e12c2
|
Shell
|
rtfb/rtfblog
|
/scripts/run-sqlite-tests.sh
|
UTF-8
| 338
| 2.515625
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
export RTFBLOG_DB_DRIVER=sqlite3
export RTFBLOG_DB_TEST_URL="test.db"
migrate -path=db/sqlite/migrations -database="sqlite3://$RTFBLOG_DB_TEST_URL" up
echo "Running tests on $RTFBLOG_DB_DRIVER..."
go test -covermode=count -coverprofile=profile.cov -v ./src/...
exit_status=$?
rm -r $RTFBLOG_DB_TEST_URL
exit $exit_status
| true
|
65938b423dfcda73b6c7899035909d4eba7425ae
|
Shell
|
calvinchengx/bashbuildingblocks
|
/loop.sh
|
UTF-8
| 888
| 4.0625
| 4
|
[] |
no_license
|
# A simple for loop
sum=0
for i in 1 2 3 4
do
sum=$(($sum+$i))
done
echo "The sum of $i numbers is: $sum"
# Combine the use of a for-loop with a case statement
for filename in $(ls)
do
# Take extension available in a filename
ext=${filename##*\.}
case "$ext" in
c) echo "$filename : C source file" ;;
o) echo "$filename : Object file" ;;
sh) echo "$filename : Shell script" ;;
txt) echo "$filename : Text file" ;;
*) echo "$filename : Unknown file type/Not processed" ;;
esac
done
echo "Enter the number"
read no
fact=1
i=1
while [ $i -le $no ]
do
fact=$(($fact * $i))
i=$(($i+1))
done
echo "The factorial of $no is $fact"
# Display contents of existing file
echo -e "Enter the absolute path of the file name you want to read"
read file
exec <$file # redirects stdin to a file
while read line
do
echo $line
done
| true
|
7b8263334c4c0bb2108d316184e14c7ac3e78d0a
|
Shell
|
regen-network/regen-ledger
|
/images/regen-sandbox/setup/data.sh
|
UTF-8
| 1,248
| 2.609375
| 3
|
[
"Apache-2.0"
] |
permissive
|
source $(dirname $0)/utils.sh
TX_FLAGS="--from $ADDR1 --yes --fees 5000uregen"
echo "INFO: Anchoring dataset: regen:13toVgf5UjYBz6J29x28pLQyjKz5FpcW3f4bT5uRKGxGREWGKjEdXYG.rdf"
regen tx data anchor regen:13toVgf5UjYBz6J29x28pLQyjKz5FpcW3f4bT5uRKGxGREWGKjEdXYG.rdf $TX_FLAGS | log_response
echo "INFO: Attesting dataset: regen:13toVgf5UjYBz6J29x28pLQyjKz5FpcW3f4bT5uRKGxGREWGKjEdXYG.rdf"
regen tx data attest regen:13toVgf5UjYBz6J29x28pLQyjKz5FpcW3f4bT5uRKGxGREWGKjEdXYG.rdf $TX_FLAGS | log_response
echo "INFO: Defining resolver http://resolver.mydataservice.com"
regen tx data define-resolver "http://resolver.mydataservice.com" $TX_FLAGS | log_response
echo "INFO: Registering dataset to resolver http://resolver.mydataservice.com"
TEMPDIR=$(mktemp -d)
trap "rm -rf $TEMPDIR" 0 2 3 15
cat > $TEMPDIR/content.json <<EOL
{
"content_hashes": [
{
"graph": {
"hash": "YWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWE=",
"digest_algorithm": "DIGEST_ALGORITHM_BLAKE2B_256",
"canonicalization_algorithm": "GRAPH_CANONICALIZATION_ALGORITHM_URDNA2015",
"merkle_tree": "GRAPH_MERKLE_TREE_NONE_UNSPECIFIED"
}
}
]
}
EOL
regen tx data register-resolver 1 $TEMPDIR/content.json $TX_FLAGS | log_response
| true
|
48c41d4d2f99528c0c5df53da4c1a39c74f7f060
|
Shell
|
testcurry/shellCases
|
/testCase.sh
|
UTF-8
| 209
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
case $1 in
"1")
echo "周一"
;;
"2")
echo "周二"
;;
"3")
echo "周三"
;;
"4")
echo "周四"
;;
"5")
echo "周五"
;;
"6")
echo "周六"
;;
"7")
echo "周日"
;;
*)
echo "输入非法"
esac
| true
|
0193b55a125c4498f22814f64b57e4071da778be
|
Shell
|
mishamx/docker-yii2-app-advanced
|
/builder
|
UTF-8
| 1,272
| 3.640625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
LINE="========================================================================="
PROJECT="mishamx/docker-yii2-app-advanced"
MODE="FBA"
STABILITY="dev"
function basecopy {
APP_PATH="/app/*"
echo ""
echo "Copy files"
for f in $APP_PATH;
do
if [[ $f = '/app/environments' ]] || \
[[ $f = '/app/Dockerfile-builder' ]] || \
[[ $f = '/app/.github' ]] || \
[[ $f = '/app/builder' ]] || \
[[ $f = '/app/dockerize' ]] || \
[[ $f = '/app/README.md' ]] || \
[[ $f = '/app/init' ]]
then
echo "Ignore $f"
else
echo "Copy $f"
cp -R $f /var/www/html/
fi
done
}
case "$1" in
"fb")
echo "Frontend & Backend"
MODE="FB"
;;
"fba")
echo "Frontend & Backend & API"
MODE="FBA"
;;
*)
echo "Use: $0 <fb|fba>"
exit 0
;;
esac
echo $LINE
echo "Create project $PROJECT"
echo $LINE
rm -rf /app
composer create-project --prefer-dist --stability=$STABILITY $PROJECT /app
echo "Init $MODE"
echo $LINE
cd /app
chmod +x /app/init
/app/init --env=$MODE --overwrite=y
echo $LINE
echo "Copy files"
echo $LINE
basecopy
echo $LINE
echo ""
| true
|
0554e3adc00cd80890aaa8f4ed05374b0e95b2f6
|
Shell
|
mrbitsdcf/mrbits.com.br
|
/publish.sh
|
UTF-8
| 232
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
get_date () {
echo "[$(date +%Y-%m-%d\ %H:%M:%S)] - $1"
}
get_date "Generating pages"
hugo
get_date "Publishing pages"
rsync -vae ssh public/* ps-web-1:/var/www/html/mrbits.com.br/
get_date "Enjoy your new site"
| true
|
c8432d7b04b06ad7f9ba6bc68a00dda908685585
|
Shell
|
petronny/aur3-mirror
|
/hcfpcimodem-utils/PKGBUILD
|
UTF-8
| 929
| 2.828125
| 3
|
[] |
no_license
|
# Contributor: Giovanni Scafora <giovanni@archlinux.org>
pkgname=hcfpcimodem-utils
pkgver=1.20full
pkgrel=1
pkgdesc="Linux drivers for the Conexant HCF Softmodem family (userspace tools and libraries)"
arch=('i686')
url="http://www.linuxant.com/drivers/hcf/"
license=('GPL' 'custom')
install=hcfpcimodem-utils.install
depends=('perl' 'cpio')
source=(http://www.linuxant.com/drivers/hcf/full/archive/hcfpcimodem-${pkgver}/hcfpcimodem-${pkgver}.tar.gz)
md5sums=('808b7b879a513205485ec7c903f1a35d')
build() {
cd "${srcdir}/hcfpcimodem-${pkgver}"
make PREFIX="${pkgdir}/usr" ROOT="${pkgdir}" install || return 1
echo "TAR hcfpcimodem-${pkgver}" > ${pkgdir}/etc/hcfpcimodem/package
# Fix paths in scripts
sed -e "s|$pkgdir/usr/sbin:||" -e "s|$pkgdir||g" -i $pkgdir/usr/sbin/hcfpci{config,modconflicts,stop}
# Install license for non-GPL'ed components
install -D -m644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE" || return 1
}
| true
|
55ab03b1424cb6089b20e9bcdb94dc088c1fbc8c
|
Shell
|
termux/termux-packages
|
/packages/clamav/build.sh
|
UTF-8
| 1,808
| 2.609375
| 3
|
[
"Apache-2.0"
] |
permissive
|
TERMUX_PKG_HOMEPAGE=https://www.clamav.net/
TERMUX_PKG_DESCRIPTION="Anti-virus toolkit for Unix"
TERMUX_PKG_LICENSE="GPL-2.0"
TERMUX_PKG_MAINTAINER="@termux"
TERMUX_PKG_VERSION=1.1.0
TERMUX_PKG_REVISION=1
TERMUX_PKG_SRCURL=https://www.clamav.net/downloads/production/clamav-$TERMUX_PKG_VERSION.tar.gz
TERMUX_PKG_SHA256=a30020d99cd467fa5ea0efbd6f4f182efebf62a9fc62fc4a3a7b2cc3f55e6b74
TERMUX_PKG_DEPENDS="json-c, libandroid-support, libbz2, libc++, libcurl, libiconv, libxml2, ncurses, openssl, pcre2, zlib"
TERMUX_PKG_BREAKS="clamav-dev"
TERMUX_PKG_REPLACES="clamav-dev"
TERMUX_PKG_EXTRA_CONFIGURE_ARGS="
-DAPP_CONFIG_DIRECTORY=$TERMUX_PREFIX/etc/clamav
-DBYTECODE_RUNTIME=interpreter
-DENABLE_CLAMONACC=OFF
-DENABLE_MILTER=OFF
-DENABLE_TESTS=OFF
-Dtest_run_result=0
-Dtest_run_result__TRYRUN_OUTPUT=
"
TERMUX_PKG_RM_AFTER_INSTALL="
share/man/man5/clamav-milter.conf.5
share/man/man8/clamav-milter.8
share/man/man8/clamonacc.8
"
TERMUX_PKG_CONFFILES="
etc/clamav/clamd.conf
etc/clamav/freshclam.conf"
termux_step_pre_configure() {
local _lib="$TERMUX_PKG_BUILDDIR/_syncfs/lib"
rm -rf "${_lib}"
mkdir -p "${_lib}"
pushd "${_lib}"/..
$CC $CFLAGS $CPPFLAGS "$TERMUX_PKG_BUILDER_DIR/syncfs.c" \
-fvisibility=hidden -c -o ./syncfs.o
$AR cru "${_lib}"/libsyncfs.a ./syncfs.o
popd
LDFLAGS+=" -L${_lib} -l:libsyncfs.a"
termux_setup_rust
TERMUX_PKG_EXTRA_CONFIGURE_ARGS+=" -DRUST_COMPILER_TARGET=$CARGO_TARGET_NAME"
}
termux_step_post_make_install() {
for conf in clamd.conf freshclam.conf; do
sed "s|@TERMUX_PREFIX@|$TERMUX_PREFIX|" \
"$TERMUX_PKG_BUILDER_DIR"/$conf.in \
> "$TERMUX_PREFIX"/etc/clamav/$conf
done
unset conf
}
termux_step_post_massage() {
mkdir -p "$TERMUX_PKG_MASSAGEDIR/$TERMUX_PREFIX"/var/lib/clamav
mkdir -p "$TERMUX_PKG_MASSAGEDIR/$TERMUX_PREFIX"/var/log/clamav
}
| true
|
33a4b522f6fda84c33e9812d541fe45e97272825
|
Shell
|
jitsi/libjitsi
|
/resources/gpg-wrap.sh
|
UTF-8
| 164
| 2.703125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
if [ "$1" == "--version" ]; then
gpg $@
else
gpg --pinentry-mode loopback --batch --passphrase-fd 0 --no-tty $@ <<< $GPG_PASSPHRASE
fi;
| true
|
205a3c7ee1bdb197f58cad48b398ac6f1df6b406
|
Shell
|
ascander/old-dotfiles
|
/bin/git-rank-loc
|
UTF-8
| 231
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# Print a ranked list of contributor names, based on lines of code
git ls-files | \
xargs -n1 git blame --line-porcelain | \
grep '^author' | \
sed -n 's/^author //p' | \
sort -f | \
uniq -ic | \
sort -nr
| true
|
13dd0731c688a60e0dd07187942674126c9152de
|
Shell
|
szepeviktor/debian-server-tools
|
/tools/ghurl
|
UTF-8
| 832
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Display GitHub download URL for a local file.
#
# VERSION :1.0.0
# DATE :2018-05-18
# AUTHOR :Viktor Szépe <viktor@szepe.net>
# URL :https://github.com/szepeviktor/debian-server-tools
# LICENSE :The MIT License (MIT)
# BASH-VERSION :4.2+
# DEPENDS :apt-get install git jq
# LOCATION :/usr/local/bin/ghurl
set -e
LOCAL_FILE="$1"
test -r "$LOCAL_FILE"
ORIGIN_URL="$(git config --get remote.origin.url)"
USER_REPO="${ORIGIN_URL#*:}"
USER_REPO="${USER_REPO%.git}"
# TODO Should look like "[.A-Za-z0-9_-]\+\.[.A-Za-z0-9_-]\+"
REPO_ROOT="$(git rev-parse --show-toplevel)"
RELATIVE_PATH="$(realpath --relative-to="$REPO_ROOT" "$LOCAL_FILE")"
wget -q -O- "https://api.github.com/repos/${USER_REPO}/contents/${RELATIVE_PATH}" \
| jq -r ".download_url" \
| grep -F 'https://'
| true
|
0138cea3af14747005ab5e0b968a43bf894d855e
|
Shell
|
cytrus77/Debian_Stuff
|
/linux/networkMonitor
|
UTF-8
| 280
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# netmonitor
#
echo "network monitoring script started"
while true
do
ping www.wp.pl -c 1
if [ $? -ne 0 ]
then
echo "Ping failed."
systemctl daemon-reload
systemctl restart networking.service
fi
sleep 10m
done
exit 0
| true
|
f77dc0c3650de2b0cca8dd5c3196d02dfe76785b
|
Shell
|
samupl/misc
|
/encoding_detect.sh
|
UTF-8
| 544
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
# bash encoding detection.
# author: samu
# irc: samu @ irc.pirc.pl
#
# This should basically detect users terminal encoding.
# NOTE: I did this for a test. Then I've written it in python, for my needs, so I am not going to develop this script anymore.
echo -ne "\nżółćżółć"
oldstty=$(stty -g)
stty raw -echo min 0
echo -en "\033[6n" > /dev/tty
IFS=';' read -r -d R -a pos
stty $oldstty
row=$((${pos[0]:2} - 1))
col=$((${pos[1]} - 1))
clear
if [ "$col" == 8 ]; then
echo "UTF";
else
echo "ISO";
fi
| true
|
3b80e990851a81c69eab203251b1c071c0a09095
|
Shell
|
bcsedlon/growmat
|
/garchive
|
UTF-8
| 704
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh
sync
d="$(date +'%Y%m%d')"
files=/home/pi/growmat/growmat/ramdisk/*.csv
for file in $files
do
filename="${file##*/}"
if [ $filename != "*.csv" ]; then
#echo expression evaluated as true
#else
#echo expression evaluated as false
#echo $filename
newfilename="$d-$filename"
filenameWithoutExtension="${filename%.*}"
#echo "$newfilename"
mkdir -p /home/pi/growmat/archives/$filenameWithoutExtension
cat /home/pi/growmat/growmat/ramdisk/$filename >> /home/pi/growmat/archives/$filenameWithoutExtension/$newfilename
rm -f /home/pi/growmat/growmat/ramdisk/$filename
fi
done
#for file in $files
#do
#print $file
#done
| true
|
a3bddefefcd6fd2c4df6e97e4cf8304e306de7be
|
Shell
|
siglabsoss/prototype
|
/simulink/continuous_rx.sh
|
UTF-8
| 369
| 2.671875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
while true; do
cd ../gnuradio
cp drive_test_200.raw drive_test_200_previous.raw
cp drive_test_202.raw drive_test_202_previous.raw
echo "file copied"
./dual_drive_test.py &
GR_PID=$!
cd ../simulink
octave --eval 'o_dual_drive_test' &
O_PID=$!
echo $GR_PID
echo $O_PID
wait $GR_PID
echo "Gnuradio is finished"
wait $O_PID
echo "octave is finished"
done
| true
|
9f945ecd63e872280044d3ca0013aa8be183213c
|
Shell
|
tyamahori/laravel-docker-env
|
/docker/formac/formac
|
UTF-8
| 2,217
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
# プロジェクト名定義
COMPOSE_PROJECT_NAME="laravel-docker-for-mac"
# コンテナ名定義
NGINX_NAME="${COMPOSE_PROJECT_NAME}-nginx"
APP_NAME="${COMPOSE_PROJECT_NAME}-php"
# 実行コマンド定義
RUN_NGINX="docker exec -it ${NGINX_NAME}"
RUN_APP="docker exec -it ${APP_NAME}"
# コンテナ内のパス定義
REMOTE_LARAVEL_DIR="/opt/laravel"
REMOTE_VENDOR_DIR=$REMOTE_LARAVEL_DIR/vendor
# ローカルのパス定義
LOCAL_LARAVEL_DIR="../../laravel"
LOCAL_VENDOR_DIR=$LOCAL_LARAVEL_DIR/vendor
case "$1" in
"setup")
rm -rf $LOCAL_VENDOR_DIR
docker-compose down -v
docker-compose up -d --build
$RUN_APP dockerize -timeout 60s -wait tcp://laravel-docker-for-mac-mysql:3306
$RUN_APP ./artisan storage:link
$RUN_APP ./artisan ide-helper:generate
$RUN_APP ./artisan ide-helper:models --nowrite
$RUN_APP ./artisan migrate
docker cp $APP_NAME:$REMOTE_VENDOR_DIR $LOCAL_LARAVEL_DIR
docker cp $APP_NAME:$REMOTE_LARAVEL_DIR/_ide_helper.php $LOCAL_LARAVEL_DIR
docker cp $APP_NAME:$REMOTE_LARAVEL_DIR/_ide_helper_models.php $LOCAL_LARAVEL_DIR
docker image prune -f
;;
"copy-vendor-dir")
rm -rf $LOCAL_VENDOR_DIR
docker cp $APP_NAME:$REMOTE_VENDOR_DIR $LOCAL_LARAVEL_DIR
;;
"up")
docker-compose up -d
;;
"down")
docker-compose down
;;
"logs")
docker-compose logs -f
;;
"php")
docker exec -it $APP_NAME /bin/bash
;;
"nginx")
docker exec -it $NGINX_NAME /bin/bash
;;
"artisan")
$RUN_APP ./artisan ${@:2}
;;
"test")
$RUN_APP ./vendor/bin/phpunit
;;
"composer")
rm -rf $LOCAL_VENDOR_DIR
$RUN_APP composer ${@:2}
docker cp $APP_NAME:$REMOTE_VENDOR_DIR $LOCAL_LARAVEL_DIR
;;
"cache-clear")
$RUN_APP ./artisan cache:clear
$RUN_APP ./artisan config:clear
$RUN_APP ./artisan route:clear
$RUN_APP ./artisan view:clear
;;
"test-github-actions")
RUN_APP="docker exec -i ${APP_NAME}"
docker-compose up -d --build
$RUN_APP cp .env.example .env
$RUN_APP composer install
$RUN_APP chmod -R 777 storage
$RUN_APP ./artisan key:generate
$RUN_APP dockerize -timeout 60s -wait tcp://laravel-docker-for-mac-mysql:3306
$RUN_APP ./artisan migrate
$RUN_APP ./vendor/bin/phpunit
;;
*)
$RUN_APP ${@:1}
;;
esac
| true
|
1ce1e7a657eff0d44b6aa3cd8b008ce4e29b923e
|
Shell
|
ivoszbg/chrootlinux
|
/scripts/chroot/install.sh
|
UTF-8
| 355
| 2.515625
| 3
|
[] |
no_license
|
sd_part="/dev/block/vold/179:1"
kit=$sd_part/linux
img=$kit/linux.img
export PATH=$bin:/usr/bin:/usr/sbin:/bin:$PATH
export TERM=screen
export HOME=/root
export bin=/system/bin
echo "Installing script ..."
mount -o remount, rw /
busybox mount -o rw,remount /system
cd /sdcard/linux/
cp linux /system/bin
chmod 777 /system/bin/linux
cd - > /dev/null
| true
|
f7e215b8478318f0db71db8cba4bbe4084b96f55
|
Shell
|
Seegnify/EAGLE
|
/src/examples/mnist/download.sh
|
UTF-8
| 487
| 3.03125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# Copyright (c) 2019 Greg Padiasek
# Distributed under the terms of the the 3-Clause BSD License.
# See the accompanying file LICENSE or the copy at
# https://opensource.org/licenses/BSD-3-Clause
#
get_mnist()
{
if [ ! -f "$1" ]; then
wget "http://yann.lecun.com/exdb/mnist/$1.gz"
gunzip "$1.gz"
fi
}
# If not already downloaded
get_mnist train-images-idx3-ubyte
get_mnist train-labels-idx1-ubyte
get_mnist t10k-images-idx3-ubyte
get_mnist t10k-labels-idx1-ubyte
| true
|
0aa39b75800a7b09fd4b5927b8acaeae0843a941
|
Shell
|
saturnism/kubernetes
|
/hack/update-api-reference-docs.sh
|
UTF-8
| 1,736
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generates updated api-reference docs from the latest swagger spec.
# Usage: ./update-api-reference-docs.sh <absolute output path>
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
REPO_DIR=${REPO_DIR:-"${PWD}/${KUBE_ROOT}"}
DEFAULT_OUTPUT_PATH="${REPO_DIR}/docs/api-reference"
OUTPUT=${1:-${DEFAULT_OUTPUT_PATH}}
echo "Generating api reference docs at ${OUTPUT}"
V1_PATH="${OUTPUT}/v1/"
V1BETA1_PATH="${OUTPUT}/extensions/v1beta1"
SWAGGER_PATH="${REPO_DIR}/api/swagger-spec/"
echo "Reading swagger spec from: ${SWAGGER_PATH}"
mkdir -p $V1_PATH
mkdir -p $V1BETA1_PATH
docker run --rm -v $V1_PATH:/output:z -v ${SWAGGER_PATH}:/swagger-source:z gcr.io/google_containers/gen-swagger-docs:v3 \
v1 \
https://raw.githubusercontent.com/kubernetes/kubernetes/master/pkg/api/v1/register.go
docker run --rm -v $V1BETA1_PATH:/output:z -v ${SWAGGER_PATH}:/swagger-source:z gcr.io/google_containers/gen-swagger-docs:v3 \
v1beta1 \
https://raw.githubusercontent.com/kubernetes/kubernetes/master/pkg/apis/extensions/v1beta1/register.go
# ex: ts=2 sw=2 et filetype=sh
| true
|
724189723b19384e9635b9e31ef881efa2b3b903
|
Shell
|
toshke/ci-cd-code-demo
|
/codedeploy/scripts/stop_server
|
UTF-8
| 141
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
container=$(docker ps | grep demo-container)
if [ ! -z "$container" ]; then
# stop container
docker stop demo-container
fi
| true
|
ffc2292776a9c6777a4bea91602976bc64a89757
|
Shell
|
Anaconda-Platform/ae5-tini
|
/install_tini.sh
|
UTF-8
| 967
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
if [ ! -f tini ]; then
echo "tini must be downloaded first."
echo "please run download_tini.sh or download manually."
exit 1
fi
chmod +x tini
for pod in workspace deploy; do
if [ $pod == workspace ]; then
istring=/ae-editor:
var=ANACONDA_PLATFORM_IMAGES_EDITOR
else
istring=/ae-app:
var=ANACONDA_PLATFORM_IMAGES_APP
fi
depl=deployment/anaconda-enterprise-ap-$pod
env=$(kubectl set env $depl --list | grep ^$var=)
current_image=$(echo $env | sed 's@^[^=]*=@@')
img=$(echo $current_image | sed 's@-tini$@@')
echo "Building tini add-on for $img"
sed -i "s@^FROM .*@FROM $img@" Dockerfile
docker build -t $img-tini .
docker push $img-tini
echo $env
if [ "$current_image" == "$img" ]; then
kubectl set env $depl $var=$img-tini
kubectl set env $depl --list | grep ^$var=
else
echo "$depl does not need to be updated"
fi
done
| true
|
57dd1e471d9ae05a582b7694eedaf54ab56f63ed
|
Shell
|
RichW1/Cybersecurity-Bootcamp
|
/1-Lesson-Plans/06-Bash-Scripting-and-Programming/Day 2/Activities/06_STU_Lists_and_Loops/Solved/for_loops.sh
|
UTF-8
| 810
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
# Create Variables
nums=$(echo {0..9})
states=('Nebraska' 'California' 'Texas' 'Hawaii' 'Washington')
ls_out=$(ls)
execs=$(find /home -type f -perm 777 2>/dev/null)
# Create For Loops
# Create a loop that prints only 3, 5 and 7
for num in ${nums[@]}; do
if [ $num = 3 ] || [ $num = 5 ] || [ $num = 7 ]; then
echo $num
fi
done
# Create a loop that looks for 'Hawaii'
for state in ${states[@]}; do
if [ $state == 'Hawaii' ]; then
echo "Hawaii is the best!"
else
echo "I'm not a fan of Hawaii."
fi
done
# Create a `for` loop that prints out each item in your variable that holds the output of the `ls` command.
for x in ${ls_out[@]}; do
echo $x
done
# Bonus
# Create a for loop to print out execs on one line for each entry
for exec in ${execs[@]}; do
echo $exec
done
| true
|
8beb1481d67cb94137f87481a369b30e41f890b0
|
Shell
|
ManuInNZ/iot-edge-image-cleanup
|
/module/run.sh
|
UTF-8
| 1,479
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
echo 'starting module'
# sleep time provided in environment variables. If not provided, pick reasonable defaults
SleepTime="${SleepTime:=24}"
SleepUnit="${SleepUnit:=h}"
# ensure sleep unit is a valid value, otherwise default to hours
case $SleepUnit in
s|h|m|d)
;;
*)
echo '********** Invalid or Missing SleepUnit. if SleepUnit env variable was provided, it must be s, m, d, or h.. defaulting to h ***********'
SleepUnit=h
;;
esac
echo 'sleep time between runs: ' $SleepTime $SleepUnit
echo 'environment variables'
export
#run forever
while :
do
echo 'removing unused docker images at ' $(date)
# here's the magic.. Call the /images/prune API, with dangling=false to prune all unused images (not just <none>:<none> ones)
curl -X POST -s --unix-socket /var/run/docker.sock http://localhost/images/prune?filters=%7B%22dangling%22%3A%20%5B%22false%22%5D%7D
echo 'removing stopped containers at ' $(date)
curl -X POST -s --unix-socket /var/run/docker.sock http://localhost/containers/prune?filters=%7B%7D
echo 'removing unused networks at ' $(date)
curl -X POST -s --unix-socket /var/run/docker.sock http://localhost/networks/prune?filters=%7B%7D
echo 'removing unused volumes at ' $(date)
curl -X POST -s --unix-socket /var/run/docker.sock http://localhost/volumes/prune?filters=%7B%7D
echo 'sleeping for ' $SleepTime $SleepUnit
# sun's getting real low, big guy... go to sleep
sleep ${SleepTime}${SleepUnit}
done
| true
|
95f1b2f37cf9fe1120ff5addbf5795598307f508
|
Shell
|
jsonn/pkgsrc
|
/mail/gld/files/gld.sh
|
UTF-8
| 646
| 3.21875
| 3
|
[] |
no_license
|
#!@RCD_SCRIPTS_SHELL@
#
# $NetBSD: gld.sh,v 1.5 2005/01/19 15:48:41 xtraeme Exp $
#
# PROVIDE: gld
# BEFORE: mail
# REQUIRE: DAEMON LOGIN mysqld
. /etc/rc.subr
name="gld"
rcvar=$name
command="@PREFIX@/bin/${name}"
required_files="@PKG_SYSCONFDIR@/$name.conf"
pidfile="@VARBASE@/run/${name}.pid"
extra_commands="gld_waitmysql_seconds"
: ${gld_waitmysql_seconds:=5}
start_precmd="waitmysql $gld_waitmysql_seconds"
if [ "$1" != "stop" -o "$1" != "status" ]; then
echo $(check_process $command) > $pidfile
fi
waitmysql()
{
_sec=$1
echo "$name: waiting for MySQL ${_sec} seconds..."
sleep ${_sec}
}
load_rc_config $name
run_rc_command "$1"
| true
|
d75ce64b85421401dd7dfa8ef7dadf7a17c03851
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/afx2usb-linux-git/PKGBUILD
|
UTF-8
| 1,077
| 2.734375
| 3
|
[] |
no_license
|
# Maintainer: sekret, mail=$(echo c2VrcmV0QHBvc3Rlby5zZQo= | base64 -d)
_pkgname=afx2usb-linux
pkgname=$_pkgname-git
pkgver=0.r4.9f2ef7d
pkgrel=1
pkgdesc="Install or uninstall USB bootloader firmware for Fractal Audio Systems Axe-FX II audio processor."
arch=('any')
url="https://github.com/TieDyedDevil/afx2usb-linux"
license=('MIT' 'custom')
depends=('fxload')
makedepends=('git')
provides=("$_pkgname")
conflicts=("$_pkgname")
source=("$_pkgname::git+$url.git"
'55-fractalaudio.rules')
md5sums=('SKIP'
'b6f4ec51dfff47ea9181e0f25f22b300')
pkgver() {
cd "$_pkgname"
printf "0.r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
package() {
install -Dm644 55-fractalaudio.rules "$pkgdir/usr/lib/udev/rules.d/55-fractalaudio.rules"
cd "$_pkgname"
install -Dm644 loader/axefx2load.hex "$pkgdir/usr/share/usb/FractalAudio/axefx2/axefx2load.hex"
install -Dm644 LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
install -Dm644 loader/FAS-License "$pkgdir/usr/share/licenses/$pkgname/FAS-License"
}
# vim:set ts=2 sw=2 et:
| true
|
0cb8faf12aefcf26bd8183aac9dd746cd9b568da
|
Shell
|
ballock/btrfs-backup
|
/btrfs-backup
|
UTF-8
| 1,268
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/sh
export PATH=${PATH}:/usr/local/bin:/usr/local/sbin
umount /btrfs >/dev/null 2>&1 || true
umount /mnt >/dev/null 2>&1 || true
mount -t btrfs LABEL=sp6-backup /mnt || exit 1
if ! mount -t btrfs UUID=76c2d8f7-278d-428c-9a42-571216cc0773 /btrfs; then
umount /mnt
exit 1
fi
cd /btrfs
btrfs-subvolume-backup --local snapshots --last "" \@sp6-home /mnt/snapshots || echo Blad backupu home.
btrfs-subvolume-backup --local snapshots --last "" \@sp6-root /mnt/snapshots || echo Blad backupu root.
btrfs-subvolume-backup --local snapshots --last "" \@sp6-www /mnt/snapshots || echo Blad backupu www.
cd /var/lib/lxc
for i in *; do
if [ -d $i ]; then
if [ ! -f $i/btrfs-initially-synced ]; then
btrfs-subvolume-backup --init --local /btrfs/snapshots --last "" $i/rootfs /mnt/snapshots && touch $i/btrfs-initially-synced
else
btrfs-subvolume-backup --local /btrfs/snapshots --last "" $i/rootfs /mnt/snapshots
fi
fi
done
sleep 10
umount /mnt || echo "Blad odmontowania dysku zewnetrznego. Odmontuj /mnt recznie (umount /mnt) po zamknieciu aplikacji korzystajacych z niego."
umount /btrfs || echo "Blad odmontowania dysku serwera. Odmontuj /btrfs recznie (umount /btrfs) po zamknieciu aplikacji korzystajacych z niego."
echo Zrobione.
| true
|
991c57820bce577055ab93de21bb1fc78098c7d0
|
Shell
|
DevKhaira/docker-android
|
/src/utils.sh
|
UTF-8
| 964
| 2.921875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
BOOT ()
{
A=$(adb wait-for-device shell getprop sys.boot_completed | tr -d '\r')
while [[ $A != "1" ]]; do
sleep 1;
A=$(adb wait-for-device shell getprop sys.boot_completed | tr -d '\r')
done;
}
Get_Google_Play_Services ()
{
wget "https://www.apklinker.com/wp-content/uploads/uploaded_apk/5b51570a214a8/com.google.android.gms_12.8.74-040700-204998136_12874026_MinAPI23_(x86)(nodpi)_apklinker.com.apk"
}
Update_Google_Play_Services ()
{
adb install -r "$PWD/com.google.android.gms_12.8.74-040700-204998136_12874026_MinAPI23_(x86)(nodpi)_apklinker.com.apk"
}
Disable_animations ()
{
# this is for demonstration what other amazing staff can be done here
adb shell "settings put global window_animation_scale 0.0"
adb shell "settings put global transition_animation_scale 0.0"
adb shell "settings put global animator_duration_scale 0.0"
}
BOOT
Get_Google_Play_Services
Update_Google_Play_Services
Disable_animations
| true
|
1efd5171542d15a7068011e9c3df55c64281a237
|
Shell
|
viyoriya/lfs_9
|
/lfs/pkgscripts/gcc
|
UTF-8
| 1,663
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
. /etc/pkg/pkg.conf
. /var/lib/pkg/functions
name=$(basename $0)
version=$GCC_VER
url=http://ftp.gnu.org/gnu/gcc/gcc-$version/gcc-$version.tar.xz
fetch $url
rm -fr $WORK_DIR/$name-$version
tar -xvf $SOURCE_DIR/$(basename $url) -C $WORK_DIR
cd $WORK_DIR/$name-$version
{ time \
{
case $(uname -m) in
x86_64)
sed -e '/m64=/s/lib64/lib/' \
-i.orig gcc/config/i386/t-linux64
;;
esac
rm -f /usr/lib/gcc
mkdir -v build
cd build
SED=sed \
../configure --prefix=/usr \
--enable-languages=c,c++ \
--disable-multilib \
--disable-bootstrap \
--with-system-zlib
make
make install
rm -rf /usr/lib/gcc/$(gcc -dumpmachine)/$GCC_ROOT/include-fixed/bits/
chown -v -R root:root \
/usr/lib/gcc/*linux-gnu/$GCC_ROOT/include{,-fixed}
ln -svf ../usr/bin/cpp /lib
ln -svf gcc /usr/bin/cc
install -v -dm755 /usr/lib/bfd-plugins
ln -sfv ../../libexec/gcc/$(gcc -dumpmachine)/$GCC_ROOT/liblto_plugin.so \
/usr/lib/bfd-plugins/
echo 'int main(){}' > dummy.c
cc dummy.c -v -Wl,--verbose &> dummy.log
readelf -l a.out | grep ': /lib'
grep -o '/usr/lib.*/crt[1in].*succeeded' dummy.log
grep -B4 '^ /usr/include' dummy.log
grep 'SEARCH.*/usr/lib' dummy.log |sed 's|; |\n|g'
grep "/lib.*/libc.so.6 " dummy.log
grep found dummy.log
rm -v dummy.c a.out dummy.log
mkdir -pv /usr/share/gdb/auto-load/usr/lib
mv -v /usr/lib/*gdb.py /usr/share/gdb/auto-load/usr/lib
}
} 2>&1 | tee -a $LOG_DIR/$(basename $0).log
if [ $PIPESTATUS -ne 0 ]; then exit 1; fi;
rm -fr $WORK_DIR/$name-$version
registerpkg $(basename $0) $version
exit 0
| true
|
77ec5b95e440bdf46b6847355235d08fb9e6c867
|
Shell
|
DanWilkerson/rcs
|
/.bashrc
|
UTF-8
| 222
| 2.78125
| 3
|
[] |
no_license
|
alias vim="nvim"
alias watch_and=$HOME/watch_and/watch_and.sh
function ws {
tmux new -d -s $1 &&
# tmux doesn't really seem to care what is passed in
tmux split-window -p 10 -t ${1}.0;
tmux attach -t $1;
}
| true
|
653ea320056e346448211d3ecca18cb74b22528f
|
Shell
|
cs2dsb/rust-web-deploy-scripts
|
/deployments/test_deployment/apps/user-apps.sh
|
UTF-8
| 606
| 3.34375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Exit when any command fails
set -o errexit
# Exit when an undeclared variable is used
set -o nounset
# Exit when a piped command returns a non-zero exit code
set -o pipefail
# Check we have have root privs
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root" 1>&2
exit 1
fi
cd "${BASH_SOURCE%/*}"
"../deploy/user-application.sh" "../apps/hello-world" "/opt/apps/hello-world" true
"../deploy/systemd-service.sh" "hello-world" "/opt/apps/hello-world" "hello-world"
# You can allow the port externally for debugging if necessary
# "../deploy/allow-port.sh" 8080
| true
|
783a8f470dfe7fd8ec5c9cd2c683a88ac5e0aae1
|
Shell
|
NavarreteLeandro/Cartesian-Robot
|
/ardup
|
UTF-8
| 881
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/zsh
files=(uart uartInterpreter interrupts GPIO timers stepper i2c i2c_interpreter)
#files=(uart uartInterpreter interrupts GPIO timers stepper)
if [ ! -d build ]; then
echo "Creating build/ dir..."
mkdir build
fi
if [ -d build ]; then
echo 'Cleaning previous build files...'
rm -rf build/*
fi
avr-gcc -std=c11 -Os -DF_CPU=16000000UL -mmcu=atmega328p -c -o ./build/main.o main.c
echo 'Compiled main.c'
for filename in $files; do
avr-gcc -std=c11 -Os -DF_CPU=16000000UL -mmcu=atmega328p -c -o ./build/"$filename".o ./lib/"$filename".c
echo Compiled .lib/AVRDuino/"$filename".c
done
avr-gcc -std=c11 -mmcu=atmega328p ./build/*.o -o ./build/main
echo 'linked .o files'
avr-objcopy -O ihex -R .eeprom ./build/main ./build/main.hex
echo 'converted linked output to .hex'
avrdude -F -V -c arduino -p ATMEGA328P -P /dev/ttyACM0 -b 115200 -U flash:w:./build/main.hex
| true
|
716e884245891ea876a76087dc16ca090b753ad5
|
Shell
|
derpferpmerp/astroUNLCustom
|
/main.sh
|
UTF-8
| 852
| 2.875
| 3
|
[] |
no_license
|
rm -rf rfflsrc
rm -rf astrosrc
mkdir astrosrc
cd astrosrc
git clone "https://github.com/derpferpmerp/astroUNL.git"
cd ../
mkdir rfflsrc
cd rfflsrc
curl -O -L "https://github.com/ruffle-rs/ruffle/releases/download/nightly-2021-06-08/ruffle-nightly-2021_06_08-web-selfhosted.zip"
unzip "ruffle-nightly-2021_06_08-web-selfhosted.zip"
rm -rf "ruffle-nightly-2021_06_08-web-selfhosted.zip"
mv * ../astrosrc/astroUNL
cd ../astrosrc/astroUNL
rm -rf "ALL-MAIN.html"
touch "ALL-MAIN.html"
echo "<div id=\"container\"> </div><body><strong>All SWFS</strong><p>" >> "ALL-MAIN.html"
for file in *.swf; do
if [ -f "$file" ]; then
python3 ../../mainexec.py "$file" > "$file.html"
echo "<a href=\"${file}.html\" style='color:blue;'>${file}</a><br>" >> "ALL-MAIN.html"
echo "Completed File $file"
fi
done
echo "</p></body>" >> "ALL-MAIN.html"
| true
|
27a6a9d82cca6d30d1b3ad2d106581050ccb06c2
|
Shell
|
IoT-Ken/bworkshop
|
/dockerinstall/dockerinstall.sh
|
UTF-8
| 1,876
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
# _______ ______ ______ __ ___ _______ .______ __ .__ __. _______.___________. ___ __ __
# | \ / __ \ / || |/ / | ____|| _ \ | | | \ | | / | | / \ | | | |
# | .--. | | | | | ,----'| ' / | |__ | |_) | | | | \| | | (----`---| |----` / ^ \ | | | |
# | | | | | | | | | | < | __| | / | | | . ` | \ \ | | / /_\ \ | | | |
# | '--' | `--' | | `----.| . \ | |____ | |\ \----. | | | |\ | .----) | | | / _____ \ | `----.| `----.
# |_______/ \______/ \______||__|\__\ |_______|| _| `._____| |__| |__| \__| |_______/ |__| /__/ \__\ |_______||_______|
# Author: Ken Osborn
# Version: 1.0
# Last Update: 18-Jul-19
# Purpose: Install Docker if not already installed
# Tested On: Ubuntu 18.04.2 LTS
# Set current Package dir variable and change into
dirname=$(echo `echo $(dirname "$0")`)
cd $dirname
echo "This is the script: execute phase" >> /tmp/campaign.log
################################################################################
## Install Docker if it is not present
################################################################################
which docker
if [ $? -eq 0 ]; then
echo "Docker is already installed, no need to install Docker" >> /tmp/campaign.log
else
echo "Docker is not installed, installing Docker" >> /tmp/campaign.log
yes | sudo apt-get update
yes | sudo apt install docker.io
RESULT=$?
if [ $RESULT -eq 0 ]; then
echo "Docker service started successfully" >> /tmp/campaign.log
else
echo "Docker service start failed" >> /tmp/campaign.log
fi
sudo systemctl start docker
sudo systemctl enable docker
fi
| true
|
c683374c49ef810f6ece2309640ce40c9c6f2ed0
|
Shell
|
devdavidkarlsson/CEFExtensionSample
|
/build-linux.sh
|
UTF-8
| 1,210
| 2.90625
| 3
|
[
"MIT-0"
] |
permissive
|
#!/usr/bin/env bash
mkdir -p bin # for silencing the next line, in case the folder does not exist
rm -r bin
mkdir -p build
touch build/touched # for silencing the next line, in case the folder was just created
rm -r build/*
cd build
echo "Building CEF3SimpleSample::Independente"
g++ -c ../CEF3SimpleSample/Independente/*.cpp \
-I ../CEF3SimpleSample/Independente \
-I ../CEF3SimpleSample/CEF/Linux \
-I ../CEF3SimpleSample/CEF/Linux/include \
`pkg-config --cflags --libs gtk+-2.0`
rm "Helper Process Main.o"
echo "Building CEF3SimpleSample::Linux"
g++ -c ../CEF3SimpleSample/Linux/*.cpp \
-I ../CEF3SimpleSample/Independente \
-I ../CEF3SimpleSample/CEF/Linux \
-I ../CEF3SimpleSample/CEF/Linux/include \
`pkg-config --cflags --libs gtk+-2.0`
echo "Linking..."
g++ *.o \
-o CEF3SimpleSample \
`pkg-config --cflags --libs gtk+-2.0` \
-L ../CEF3SimpleSample/CEF/Linux/lib \
-ldl \
-lcef_dll_wrapper \
-lcef \
-Wl,-R. -Wl,-R/usr/lib
echo "Copying files around..."
rm *.o
mkdir bin
mv CEF3SimpleSample bin/
cp -r ../CEF3SimpleSample/CEF/Linux/Resources/* bin/
cp -r ../CEF3SimpleSample/CEF/Linux/lib/*.so bin/
cp -r ../CEF3SimpleSample/Independente/html bin/html
mv bin ..
cd ..
rm -r build
| true
|
c3b1eeb99470c807b99e2e47c655bcd8bc2aee9d
|
Shell
|
G7495x/CSE-VTU-Labs-2010-Syllabus
|
/(10CSL77) Networks Lab/01/1.sh
|
UTF-8
| 296
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
echo 'TitleText: "Bandwidth vs Packets Dropped"'>1.xg
echo 'XUnitText: "Bandwidth"'>>1.xg
echo 'YUnitText: "Packets Dropped"'>>1.xg
echo ''>>1.xg
for i in {1..10}
do
a=$(echo "$i*0.2"|bc -l)
ns 1.tcl $a
echo $a $(awk -f 1.awk 1.tr)>>1.xg
done
xgraph -lw 2 -bg white 1.xg &
nam 1
| true
|
386a4bef3bdd02d37537d72b1ee85999bbd63aeb
|
Shell
|
alamandor/Scripts
|
/get-site
|
UTF-8
| 160
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
# Alec Greenaway
# Script to download offline html version of supplied url
if [[ $# -eq 0 ]]
then
echo "Need to supply a URL"
exit 1
fi
wget -p -k $1
| true
|
536bd01382167c5d45df1f2e4a391a6435de86d4
|
Shell
|
NutiNaguti/order-management.containerize
|
/build.command
|
UTF-8
| 329
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
brew install coreutils
BASEDIR=$(dirname $(realpath "$0"))
echo $BASEDIR
cd $BASEDIR/order-management
docker build -t order-management.back -f Dockerfile .
cd $BASEDIR/order-management/order-management.front
docker build -t order-management.front -f Dockerfile .
cd $BASEDIR/order-management
docker-compose up -d
| true
|
58ea96e8e030e75d41cab5ca50a7b97576f9d993
|
Shell
|
ShalokShalom/apps
|
/rssguard/PKGBUILD
|
UTF-8
| 935
| 2.828125
| 3
|
[] |
no_license
|
pkgname=rssguard
pkgver=3.9.2
pkgrel=1
pkgdesc="Simple (yet powerful) feed reader, able to fetch the most known feed formats, including RSS/RDF and ATOM."
arch=('x86_64')
url="https://github.com/martinrotter/rssguard"
license=('GPL')
depends=('qtwebengine')
makedepends=('qt5-tools')
source=("https://github.com/martinrotter/rssguard/archive/${pkgver}.tar.gz"
"https://github.com/martinrotter/rssguard/commit/139e014d2af6a6e156098ba486dbbe2681018fab.diff")
md5sums=('0d7f039049829113aa6138560e92f2e6'
'8d6381cb63a44af1108f925f4b71ba8d')
prepare() {
cd ${pkgname}-${pkgver}
#patch -p1 -i ${srcdir}/139e014d2af6a6e156098ba486dbbe2681018fab.diff
}
build() {
mkdir -p build
cd build
/usr/lib/qt5/bin/qmake ../${pkgname}-${pkgver}/build.pro -r \
CONFIG+=release PREFIX=/usr \
INSTALL_ROOT=${pkgdir}/ \
USE_WEBENGINE=true
make
}
package() {
cd build
make INSTALL_ROOT=${pkgdir}/ install
}
| true
|
6f91808199ee602eeeec02ea1b1d5e61baa962df
|
Shell
|
devinit/ddw-r-scripts
|
/shell-scripts/crs-to-postgres/create.crs.schema.and.grant.privilege.to.user.sh
|
UTF-8
| 1,142
| 3.625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
database_name=$1
# Loop through all the database names in the file 'database.txt'
echo "Database name: "$database_name""
#sleep 2
# Create the database
echo "Attempting to create database: "$database_name
echo "CREATE DATABASE "$database_name";"
echo "CREATE DATABASE "$database_name";" | psql -d postgres -e
#sleep 2
echo "Schema name : crs"
#sleep 2
echo "=========="
# Create the schema
echo "Creating schema: crs"
echo "CREATE SCHEMA crs;"
echo "CREATE SCHEMA IF NOT EXISTS crs;" | psql -d $database_name -e
#sleep 2
# Give all schema priviledges to the user 'postgres'
echo "Granting all priviledges on all tables to user 'postgres' in schema: "$database_name".crs"
echo "GRANT ALL ON ALL TABLES IN SCHEMA crs TO postgres;"
echo "GRANT ALL ON ALL TABLES IN SCHEMA crs TO postgres;" | psql -d $database_name -e
#sleep 2
# Give all schema priviledges to the user 'boss'
echo "Granting all priviledges on all tables to user 'boss' in schema: "$database_name".crs"
echo "GRANT ALL ON ALL TABLES IN SCHEMA crs TO boss;"
echo "GRANT ALL ON ALL TABLES IN SCHEMA crs TO boss;" | psql -d $database_name -e
#sleep 2
| true
|
f481a85217979f792aaac0d64beb80856da9d051
|
Shell
|
DOSSANTOSDaniel/X2GO_Server_install
|
/inst_x2go.sh
|
UTF-8
| 1,678
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
#Script permettant l'installation de:
# X2GO serveur
# L'environnement de bureau XFCE4
# Le navigateur Iceweasel
# Le terminal xfce4-terminal
#
#"HALHOUL Billel" halhoul.billel.pro@gmail.com
#"DOS SANTOS Daniel" daniel.massy91@gmail.com
#Source: https://wiki.x2go.org/doku.php/doc:installation:x2goserver
clear
echo -e "\n Début de l'installation \n"
apt-get install dirmngr -y
clear
while [ : ]
do
echo ''
echo "Quelle est votre version de Debian ?"
echo '------------------------------------'
echo '[1] Debien stretch'
echo '[2] Debien jessie'
echo ''
read -p " votre choix : " ch
# -z si le champ est vide
if [ -z "$ch" ] || [[ "$ch" != [1-2] ]]
then
echo -e "\n Argument non pris en charge !" "seuls arguments autorisés de [1] à [2] ! \n"
exit 1
fi
case $ch in
1) echo "deb http://packages.x2go.org/debian stretch extras main" > /etc/apt/sources.list.d/x2go.list
echo "deb-src http://packages.x2go.org/debian stretch extras main" >> /etc/apt/sources.list.d/x2go.list
break
;;
2) echo "deb http://packages.x2go.org/debian/ jessie main contrib" > /etc/apt/sources.list.d/x2go.list
echo "deb-src http://packages.x2go.org/debian jessie main contrib" >> /etc/apt/sources.list.d/x2go.list
break
;;
*) error "choix non proposé !"
esac
done
apt-get update
sleep 1
clear
apt-get install x2go-keyring -y
apt-key adv --recv-keys --keyserver keys.gnupg.net E1F958385BFE2B6E
apt-get update
sleep 1
clear
apt-get install x2goserver -y
apt-get install x2goserver-xsession -y
apt-get install xfce4 -y
apt-get install iceweasel -y
apt install xfce4-terminal -y
sleep 1
clear
echo -e "\n Installation terminée, si le service X2GO n'a pas démarré taper 'service x2goserver start' \n"
service --status-all | grep x2goserver
| true
|
3e253528ef3df3947c0b30c3d2ad6cea80734297
|
Shell
|
adrianshort/planningalerts
|
/python_scrapers/createCGI.sh
|
UTF-8
| 294
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
#echo Removing contents of directory cgi-bin
#svn rm --force ../cgi-bin/*
echo Running generateCGIScripts
python generateCGIScripts.py
svn add ../cgi-bin/*
#echo Committing changes to svn
#(cd ../cgi-bin ; svn commit -m "Removing and regenerating directory cgi-bin")
echo Done
| true
|
bb9ceea8a9ef34e1dc61de9d8c3b3483c71bff3e
|
Shell
|
Attamusc/dotfiles-legacy
|
/system/aliases.zsh
|
UTF-8
| 1,443
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
# Easier navigation
alias ..="cd .."
alias ...="cd ../.."
alias ....="cd ../../.."
alias .....="cd ../../../.."
alias ~="cd ~"
alias -- -="cd -"
alias cdot="cd $HOME/.dotfiles"
# Shortcuts
alias m="mvim"
alias v="vim"
alias o="open"
alias oo="open ."
# Open Mou from the command line
alias md="open -a Mou"
# ls
alias l="exa -lah"
alias ll="exa -l"
alias la='exa -a'
alias kl="k --almost-all"
# ---Spotlight Stuff---
# Disable/Enable spotlight indexing
alias spotoff="sudo mdutil -a -i off"
alias spoton="sudo mdutil -a -i on"
# Hide/Show spotlight menubar icon
alias spothide='sudo mv /System/Library/CoreServices/Search.bundle /System/Library/CoreServices/Search.bundle.bak && killall SystemUIServer'
alias spotshow='sudo mv /System/Library/CoreServices/Search.bundle.bak /System/Library/CoreServices/Search.bundle && killall SystemUIServer'
# Hide/show all desktop icons (useful when presenting)
alias hidedesktop="defaults write com.apple.finder CreateDesktop -bool false && killall Finder"
alias showdesktop="defaults write com.apple.finder CreateDesktop -bool true && killall Finder"
# Short webkit2png - don't want to add it to the PATH...
alias w2p='/usr/local/Cellar/webkit2png/0.5/bin/webkit2png'
# Map HTTP verbs to use @visionmedia's awesome
# burl utility
alias GET='burl GET'
alias HEAD='burl -I'
alias POST='burl POST'
alias PUT='burl PUT'
alias PATCH='burl PATCH'
alias DELETE='burl DELETE'
alias OPTIONS='burl OPTIONS'
| true
|
50d3001cae835f29da60f363b0a091d11cdff7cf
|
Shell
|
mtask/duplicity-ansible
|
/files/backup.sh
|
UTF-8
| 1,345
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
## Remote backup script. Requires duplicity.
. /etc/backups/backup.config
dest_local_path=${destination##*://}
backend=${destination%://*}
for src in "${sources[@]}"
do
full_dest="$dest_local_path"/"$src"
mkdir -p "$full_dest"
if [[ "$encrypt" == 1 ]]
then
duplicity --gpg-options="--homedir=$gpg_homedir" --encrypt-key="$gpg_encrypt_key" \
--verbosity notice \
--full-if-older-than 30D \
--num-retries 3 \
--archive-dir /root/.cache/duplicity \
--log-file /var/log/duplicity.log \
"$src" "$backend://$full_dest"
duplicity --gpg-options="--homedir=$gpg_homedir" --encrypt-key="$gpg_encrypt_key" remove-all-but-n-full 12 --force "$backend://$full_dest"
duplicity --gpg-options="--homedir=$gpg_homedir" --encrypt-key="$gpg_encrypt_key" remove-all-inc-of-but-n-full 6 --force "$backend://$full_dest"
else
duplicity --verbosity notice \
--no-encryption \
--full-if-older-than 30D \
--num-retries 3 \
--archive-dir /root/.cache/duplicity \
--log-file /var/log/duplicity.log \
"$src" "$backend://$full_dest"
duplicity remove-all-but-n-full 12 --force "$backend://$full_dest"
duplicity remove-all-inc-of-but-n-full 6 --force "$backend://$full_dest"
fi
done
| true
|
a32b5bca7bd9752cbbae6878fa798edbba76329d
|
Shell
|
oracle/bosh-oracle-cpi-release
|
/ci/tasks/download-cpi.sh
|
UTF-8
| 707
| 3.34375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
#Inputs
version=`cat dev-version-semver/number`
#Outputs
mkdir -p ${pwd}/candidate
# Create OCI config
echo "Creating oci config..."
OCI_DIR="$HOME/.oci"
OCI_API_KEY="$OCI_DIR/oci_api_key.pem"
OCI_CONFIG="$OCI_DIR/config"
mkdir -p ${OCI_DIR}
cat > ${OCI_API_KEY} <<EOF
${oracle_apikey}
EOF
cat > $OCI_CONFIG <<EOF
[DEFAULT]
user=${oracle_user}
tenancy=${oracle_tenancy}
region=${oracle_region}
key_file=$OCI_API_KEY
fingerprint=${oracle_fingerprint}
EOF
chmod 600 ${OCI_API_KEY}
chmod 600 ${OCI_CONFIG}
cpi="bosh-oracle-cpi-dev-${version}.tgz"
# Download CPI
oci os object get -ns ${oracle_namespace} -bn ${oracle_bucket} --name ${cpi} --file ${cpi}
mv ${cpi} candidate/
| true
|
55a9715d2e75e6d788e993950606a135f14f1fec
|
Shell
|
Ljj12345678/X-MOL
|
/FT_to_generation/slurm/pack_model.sh
|
GB18030
| 965
| 3.953125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
###############################
#FileName:backupLogTimer.sh
#Function:ʱ־ļ
#Version:0.1
#Authon:xueya
#Date:2014.06.26
###############################
if [[ $# != 1 ]];then
echo "output"
exit -1
fi
#ȡǰ·
path=`pwd`/$1
echo "current1 path :${path}"
#ѭִ
while true
do
#鿴ļµļ
fileList=`ls ${path} 2>/dev/null`
#ļµļ
for pFile in $fileList
do
current_path=${path}/${pFile}
#жǷļ
if [[ -d "${current_path}" ]];then
#ȡǰʱ
currentTime=`date +%Y%m%d%H%M%S`
#ѹļ
tarFileName="${current_path}_${currentTime}.tar"
#ѹļ
echo "pack files to $tarFileName"
cd $path
tar -cvf ${tarFileName} ${pFile} --remove-files
cd -
fi
done
#ȴ1Сʱ
sleep 10m
done
| true
|
bdce72a34fe7f7be056e940205dcea766f1a4616
|
Shell
|
Andrei1911/CEPSparker
|
/Mac/zxpBuild.command
|
UTF-8
| 2,424
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#
# Build a code-signed ZXP file
#
export scriptDir=`dirname "$0"`
cd "$scriptDir"
export timestampServer="http://timestamp.globalsign.com/scripts/timstamp.dll"
export scriptDir=`pwd`
export projectHomeDir=`dirname "$scriptDir"`
export devtoolsDir="$projectHomeDir/devtools"
export buildDir="$projectHomeDir/build"
export buildSettingsDir="$projectHomeDir/BuildSettings"
"$scriptDir/clean.command"
if [ ! -f "$projectHomeDir/BuildSettings/ExtensionDirName.txt" ]; then
echo "This is an unconfigured CEPSparker directory. Nothing to build."
exit
fi
if [ ! -f "$devtoolsDir/ZXPSignCmd" ]; then
echo "Need to download ZXPSignCmd first. See 'devtools/downloadZXPSignCmd' scripts"
exit
fi
. "$buildSettingsDir/certinfo.command"
if [ ! -f "$buildSettingsDir/$certfile" ]; then
echo "Need to provide a certificate file, or create a self-signed one first. See devtools/makeSelfSignedCert.command"
exit
fi
export EXTENSION_DIRNAME=`head -n 1 "$projectHomeDir/BuildSettings/ExtensionDirName.txt"`
if [ "$EXTENSION_DIRNAME" == "" ]; then
echo "Cannot determine directory name for extension. No file ExtensionDirName.txt or file is empty"
exit
fi
export EXTENSION_VERSION=`head -n 1 "$projectHomeDir/BuildSettings/ExtensionVersion.txt"`
if [ "$EXTENSION_VERSION" == "" ]; then
echo "Cannot determine version for extension. No file ExtensionVersion.txt or file is empty"
exit
fi
if [ ! -d "$buildDir" ]; then
mkdir "$buildDir"
fi
export EXTENSION_HOMEDIR="$buildDir/$EXTENSION_DIRNAME"
"$scriptDir/clearPlayerDebugMode.command"
"$scriptDir/adjustVersionInManifest.command"
rm -rf "$EXTENSION_HOMEDIR"
mkdir "$EXTENSION_HOMEDIR"
cp -R "$projectHomeDir/css" "$EXTENSION_HOMEDIR/css"
cp -R "$projectHomeDir/CSXS" "$EXTENSION_HOMEDIR/CSXS"
cp -R "$projectHomeDir/html" "$EXTENSION_HOMEDIR/html"
cp -R "$projectHomeDir/js" "$EXTENSION_HOMEDIR/js"
cp -R "$projectHomeDir/jsx" "$EXTENSION_HOMEDIR/jsx"
cp -R "$projectHomeDir/shared_js_jsx" "$EXTENSION_HOMEDIR/shared_js_jsx"
cd "$EXTENSION_HOMEDIR"
find . -name ".DS_Store" | while read a; do rm "$a"; done
find . -name "__MACOSX" | while read a; do rm -rf "$a"; done
xattr -cr .
cd "$projectHomeDir/build"
"$devtoolsDir/ZXPSignCmd" -sign "$EXTENSION_DIRNAME" "$EXTENSION_DIRNAME.zxp" "$buildSettingsDir/$certfile" "$password" -tsa $timestampServer
mv "$EXTENSION_DIRNAME.zxp" "$EXTENSION_DIRNAME.$EXTENSION_VERSION.zxp"
rm -rf "$EXTENSION_HOMEDIR"
| true
|
235df33dffb74e85926f827c0cd12d16b710dbd7
|
Shell
|
martinandrovich/hric-system
|
/assets/scripts/setup-opensim.bash
|
UTF-8
| 2,144
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
# > opensim installation script
# version: 1.0.0
# last modified: 08/05/2020
# -------------------------------------------------------------------------------------------------------
# > sudo test
if [ "$EUID" -eq 0 ]
then echo "This script should NOT be run as root; run as current user and only enter password when asked."
exit
fi
# -------------------------------------------------------------------------------------------------------
# > information
echo -e "\n\e[104mOpenSim setup script [v1.0.0]\e[49m\n"
read -p "Install OpenSim on this system? [Y/n] " -n 1 -r
echo
if [[ $REPLY =~ ^[Nn]$ ]]; then exit; fi
# -------------------------------------------------------------------------------------------------------
# directories
dir_name="opensim"
dir_source=~/$dir_name-source
dir_build=~/$dir_name-build
dir_install=~/$dir_name
dir_dep_source=$dir_source/dependencies
dir_dep_build=~/$dir_name-dependencies-build
dir_dep_install=~/$dir_name-dependencies-install
# varibales
build_type="RelWithDebInfo"
java_wrapping=false
python_wrapping=false
python_version=3
# download OpenSim
echo -e "\nDownloading OpenSim source...\n"
git clone https://github.com/opensim-org/opensim-core.git $dir_source
# build dependecies
# flags_dep is an array with all options
echo -e "\nBuilding OpenSim dependecies...\n"
flags_dep=(
-DCMAKE_INSTALL_PREFIX=$dir_dep_install
-DCMAKE_BUILD_TYPE=$build_type
)
mkdir -p $dir_dep_build && cd $dir_dep_build
cmake $dir_dep_source "${flags_dep[@]}"
make -j4
# build OpenSim
echo -e "\nBuilding OpenSim...\n"
flags_opensim=(
-DCMAKE_INSTALL_PREFIX=$dir_install
-DOPENSIM_DEPENDENCIES_DIR=$dir_dep_install
-DCMAKE_BUILD_TYPE=$build_type
-DBUILD_TESTING=false
-DOPENSIM_COPY_DEPENDENCIES=true
-DBUILD_JAVA_WRAPPING=$java_wrapping
-DBUILD_PYTHON_WRAPPING=$python_wrapping
-DOPENSIM_PYTHON_VERSION=$python_version
)
mkdir -p $dir_build && cd $dir_build
cmake $dir_source "${flags_opensim[@]}"
make -j4
# install OpenSim
echo -e "\nInstalling OpenSim...\n"
make install
# clean-up
# rm -rf dir_build
# rm -rf dir_source
# rm -rf dir_dep_build
# rm -rf dir_dep_install
| true
|
3af5b4f10f068f844f2d7476df5c21aa0c00c886
|
Shell
|
AnrDaemon/samba4-ads
|
/home/git/git-shell-commands/list-commands
|
UTF-8
| 200
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/sh
### list-commands
##+ - List available commands
find "$HOME/git-shell-commands" -xdev -mindepth 1 -maxdepth 1 -type f -execdir grep -E '^##[+#] (.+)$' "{}" \; | sed -Ee 's/^##[+#] //;'
| true
|
6af93b932954758baba4e3bf83cc4bd142e0eb51
|
Shell
|
iAlbertTran/GithubActivity
|
/HW4/doswap.sh
|
UTF-8
| 925
| 3.578125
| 4
|
[] |
no_license
|
#/bin/bash
#a script to bring up web2, swap web1 with web2, then kills the old web1 container.
#Brings up web2, which is the new activity image to be swapped in place of the old one.
#It is passed as an argument into the script, and inserted where "$1" is at the end
echo "Bringing up the new image $1..."
docker run -d -P --name web2 --net ecs189_default $1
echo "...Done!"
#runs the swap script to swap from web1 to web2
echo "Now swapping out the old web for the new one..."
docker exec ecs189_proxy_1 /bin/bash /bin/swap2.sh
#removes the unused container web1 so that only two containers are running at once.
echo "Removing the old web and doing some cleanup..."
ID=$(docker ps -aqf "name=web1")
docker rm -f $ID
echo "...Done!"
#renames container web2 to web1 so if this script is ever ran again, it can do so without any problems.
docker rename web2 web1
| true
|
6ea0758c4681aee84a2f331f34878eeca7ecefe0
|
Shell
|
bmwiedemann/openSUSE
|
/packages/l/logrotate/logrotate-all
|
UTF-8
| 736
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/sh
set -eu
configs=
# Only read /usr/etc/logrotate.conf if /etc/logrotate.conf does not exist
if ! [ -e /etc/logrotate.conf ]; then
configs="$configs /usr/etc/logrotate.conf"
else
configs="$configs /etc/logrotate.conf"
fi
# Then read in all of {/usr,}/etc/logrotate.d/*, with /etc/ overriding /usr/etc/.
dirs=
[ -d /usr/etc/logrotate.d ] && dirs="/usr/etc/logrotate.d"
[ -d /etc/logrotate.d ] && dirs="$dirs /etc/logrotate.d"
if [ -n "$dirs" ]; then
for confname in $(find $dirs -type f -printf "%P\n" | sort -u); do
if [ -e "/etc/logrotate.d/$confname" ]; then
configs="$configs /etc/logrotate.d/$confname"
else
configs="$configs /usr/etc/logrotate.d/$confname"
fi
done
fi
exec /usr/sbin/logrotate $configs
| true
|
606c1712511385d346321f1b3994f6dc3f6522d8
|
Shell
|
pcingola/SnpEff
|
/scripts_build/genesTxtColumnNames.sh
|
UTF-8
| 1,015
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#-------------------------------------------------------------------------------
# Convert the gene names in order to be used in an R script
#
# Usage: cat snpEff_genes.txt | ./scripts/genesTxtColumnNames.sh > genes.txt
#
# Once in R, you can:
# - Load this table:
# data <- read.csv("genes.txt", sep= "\t", header=TRUE);
#
# - Access the data:
# data$countINTRON
#
# - Add missing or empty columns:
# if( is.null(data$countINTRON ) { data$countINTRON <- 0 * (1:length(data$geneId) ); }
#
# Pablo Cingolani
#-------------------------------------------------------------------------------
cat \
| grep -v "^# The following"\
| sed "s/Bases affected (/bases/g" \
| sed "s/Length (/len/g" \
| sed "s/Count (/count/g" \
| sed "s/Total score (/score/g" \
| sed "s/)//g" \
| sed "s/#GeneId/geneId/" \
| sed "s/GeneName/geneName/" \
| sed "s/BioType/bioType/" \
| sed "s/_PRIME//g" \
| sed "s/SPLICE_SITE_//g" \
| sed "s/SYNONYMOUS_CODING/SYN/g" \
| true
|
5fe62bf0b7ef7bf0a8d74c43749cd33dbddf8503
|
Shell
|
cCurses/alpharepo
|
/testing/pipe-vs-redirect
|
UTF-8
| 7,626
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
#+ marker sof +#
# concept:
# - test rig to get running times of scanning directory hierarchies
# - the nested find loops are fed by sorted lists using pipes or redirects
# - the overall purpose of this test is to discover which is faster
# results:
# - counter to what I expected, the pipe form is *always* faster than the
# redirect form. sometimes the difference is quite small, other times the
# difference is quite large
# - ergo, there's something going on that I don't fully grasp
#+ setup here +#
# - be explicit for clarity
# - single dimension indexed array
declare -a _ARRAY_PATHNAMES
_OPTIONS_SORT_DIRECTORIES="sort --key=4"
_OPTIONS_SORT_FILES="sort --key=11"
export TZ=UTC
#- setup here -#
#+ function definitions here +#
function _parse_parameters()
{
#+ _parse_parameters +#
# variables:
# #
# 1
# 2
# @
# _ARRAY_PATHNAMES (single dimension indexed array)
# _FUNCTION
# binaries:
# find
#+ parameter defaults +#
unset _ARRAY_PATHNAMES
unset _FUNCTION
#- parameter defaults -#
while [[ $# -gt 0 ]]
do
case $1 in
( -P \
| --pipe )
_FUNCTION=_pathnames_scan_pipe
;;
( -R \
| --redirect )
_FUNCTION=_pathnames_scan_redirect
;;
( -p \
| --pathname )
# - ${parameter:offset:length}
# - "-z string" = true if the length of string is zero
if [[ ${2:0:1} == - ]] \
|| [[ -z $2 ]]
then
echo "pathname not specified"
exit 1
else
# - keep adding paths until another option is specified or until
# we've reached the end of the commandline
# - globs are now supported. refer to bash(1) section
# "Pattern Matching" for specifics
# - ${parameter:offset:length}
# - "-z string" = true if the length of string is zero
until [[ ${2:0:1} == - ]] \
|| [[ -z $2 ]]
do
# - simplistic test to ensure only directories end up in the array
# - use the "find" binary rather than the bash "-d" conditional
# expression since it's considerably more robust
# - "-n string" = true if the length of string is non-zero
if [[ -n "$(find "$2" -type d -maxdepth 0 2> /dev/null)" ]]
then
_ARRAY_PATHNAMES+=( "$2" )
else
echo "invalid pathname \"$2\" specified"
exit 1
fi
shift
done
fi
;;
( -* )
echo "invalid option \"$1\" specified"
exit 1
;;
( * )
echo "invalid parameter \"$1\" specified"
exit 1
;;
esac
shift
done
echo " \${#_ARRAY_PATHNAMES[@]}: \"${#_ARRAY_PATHNAMES[@]}\""
echo " \${_ARRAY_PATHNAMES[@]}: \"${_ARRAY_PATHNAMES[@]}\""
echo " \$_FUNCTION: \"$_FUNCTION\""
#- _parse_parameters -#
}
function _pathnames_scan_pipe()
{
#+ _pathnames_scan_pipe +#
# variables:
# _ARRAY_PATHNAMES (single dimension indexed array)
# _SD_MTIM_HMS
# _SD_MTIM_UTC
# _SD_MTIM_YMD
# _SD_NAME
# _OPTIONS_SORT_DIRECTORIES
# _OPTIONS_SORT_FILES
# _ST_DEV
# _ST_GID
# _ST_INO
# _ST_MODE
# _ST_MTIM_HMS
# _ST_MTIM_UTC
# _ST_MTIM_YMD
# _ST_NAME
# _ST_NLINK
# _ST_SIZE
# _ST_UID
# binaries:
# find
while read -r \
_SD_MTIM_YMD _SD_MTIM_HMS _SD_MTIM_UTC _SD_NAME
do
# - simplest method to remove microseconds from the report
_SD_MTIM_HMS=${_SD_MTIM_HMS/.*/}
# - $_SD_NAME absolutely *must* be enclosed within doublequotes to
# preserve special characters
# - pretty-print the results
printf "%66s %s %s %s %s\n" \
$_SD_MTIM_YMD $_SD_MTIM_HMS $_SD_MTIM_UTC "$_SD_NAME"
while read -r \
_ST_DEV _ST_INO _ST_MODE _ST_NLINK _ST_UID _ST_GID _ST_SIZE \
_ST_MTIM_YMD _ST_MTIM_HMS _ST_MTIM_UTC _ST_NAME
do
# - necessary for "non-empty" directories that contain neither
# directories nor regular files (ie: symlinks, device specials, etc)
# - "-n string" = true if the length of string is non-zero
if [[ -n $_ST_NAME ]]
then
# - simplest method to remove microseconds from the report
_ST_MTIM_HMS=${_ST_MTIM_HMS/.*/}
# - $_ST_NAME absolutely *must* be enclosed within doublequotes to
# preserve special characters
# - pretty-print the results
printf "%4s %10s %s %4s %4s %3s %14s %s %s %s %s\n" \
$_ST_DEV $_ST_INO $_ST_MODE $_ST_NLINK $_ST_UID $_ST_GID $_ST_SIZE \
$_ST_MTIM_YMD $_ST_MTIM_HMS $_ST_MTIM_UTC "$_ST_NAME"
fi
done <<< \
"$(find "$_SD_NAME" \
-type f \
-maxdepth 1 \
-printf "%D %i %M %n %U %G %s %TY-%Tm-%Td %TH:%TM:%TS %TZ %p\n" 2> /dev/null \
| $_OPTIONS_SORT_FILES)"
# NOTE:
# - doublequotes around the commandline enclosed within $() are mandatory
done <<< \
"$(find "${_ARRAY_PATHNAMES[@]}" \
-type d \
! -empty \
-printf "%TY-%Tm-%Td %TH:%TM:%TS %TZ %p\n" \
| $_OPTIONS_SORT_DIRECTORIES)"
# NOTE:
# - doublequotes around the commandline enclosed within $() are mandatory
#- _pathnames_scan_pipe -#
}
function _pathnames_scan_redirect()
{
#+ _pathnames_scan_redirect +#
# variables:
# _ARRAY_PATHNAMES (single dimension indexed array)
# _SD_MTIM_HMS
# _SD_MTIM_UTC
# _SD_MTIM_YMD
# _SD_NAME
# _OPTIONS_SORT_DIRECTORIES
# _OPTIONS_SORT_FILES
# _ST_DEV
# _ST_GID
# _ST_INO
# _ST_MODE
# _ST_MTIM_HMS
# _ST_MTIM_UTC
# _ST_MTIM_YMD
# _ST_NAME
# _ST_NLINK
# _ST_SIZE
# _ST_UID
# binaries:
# find
while read -r \
_SD_MTIM_YMD _SD_MTIM_HMS _SD_MTIM_UTC _SD_NAME
do
# - simplest method to remove microseconds from the report
_SD_MTIM_HMS=${_SD_MTIM_HMS/.*/}
# - $_SD_NAME absolutely *must* be enclosed within doublequotes to
# preserve special characters
# - pretty-print the results
printf "%66s %s %s %s %s\n" \
$_SD_MTIM_YMD $_SD_MTIM_HMS $_SD_MTIM_UTC "$_SD_NAME"
while read -r \
_ST_DEV _ST_INO _ST_MODE _ST_NLINK _ST_UID _ST_GID _ST_SIZE \
_ST_MTIM_YMD _ST_MTIM_HMS _ST_MTIM_UTC _ST_NAME
do
# - necessary for "non-empty" directories that contain neither
# directories nor regular files (ie: symlinks, device specials, etc)
# - "-n string" = true if the length of string is non-zero
if [[ -n $_ST_NAME ]]
then
# - simplest method to remove microseconds from the report
_ST_MTIM_HMS=${_ST_MTIM_HMS/.*/}
# - $_ST_NAME absolutely *must* be enclosed within doublequotes to
# preserve special characters
# - pretty-print the results
printf "%4s %10s %s %4s %4s %3s %14s %s %s %s %s\n" \
$_ST_DEV $_ST_INO $_ST_MODE $_ST_NLINK $_ST_UID $_ST_GID $_ST_SIZE \
$_ST_MTIM_YMD $_ST_MTIM_HMS $_ST_MTIM_UTC "$_ST_NAME"
fi
done <<< \
"$($_OPTIONS_SORT_FILES \
<(find "$_SD_NAME" \
-type f \
-maxdepth 1 \
-printf "%D %i %M %n %U %G %s %TY-%Tm-%Td %TH:%TM:%TS %TZ %p\n" 2> /dev/null))"
# NOTE:
# - doublequotes around the commandline enclosed within $() are mandatory
done <<< \
"$($_OPTIONS_SORT_DIRECTORIES \
<(find "${_ARRAY_PATHNAMES[@]}" \
-type d \
! -empty \
-printf "%TY-%Tm-%Td %TH:%TM:%TS %TZ %p\n"))"
# NOTE:
# - doublequotes around the commandline enclosed within $() are mandatory
#- _pathnames_scan_redirect -#
}
#- function definitions here -#
#+ start here +#
_parse_parameters "$@"
# - do the thing
$_FUNCTION
#- start here -#
#- marker eof -#
exit 0
| true
|
5d197fe562d74e27be7aefb1376e889b4536dcbf
|
Shell
|
thouters/scriptbox
|
/perhaps.sh
|
UTF-8
| 70
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
IN="`cat`"
if [[ ! -z "$IN" ]]
then
echo "$IN"|$@
fi
| true
|
47410f961e9de2da60b68996a1f10891a9b204d9
|
Shell
|
jklafka/language-modeling
|
/Analysis/EM/em_optimization.sh
|
UTF-8
| 424
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
python3 get_all_costs.py $1 $2 $3 > all_costs.txt
## grep commands that wrangle the final cost for each barycenter into costs.txt
## find every line that's just a number: get that line and the one before it
## now take only the final decimal number and the following integer
cat all_costs.txt |
grep -B 1 -E '^\d' |
grep -o -A 1 -E '0\.\d+' > costs.txt
python3 costs_plotting.py $1 $2 $3 --printing=True
| true
|
dd999cf09352bebd8092aa8d953e4c9cd35d75b3
|
Shell
|
liquanzhou/ops_doc
|
/Service/puppet/puppet-server.txt
|
ISO-8859-13
| 1,192
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
### before installmust modify the hostname and reboot the machine ###
############### attention #################
#######################################################################
### install ruby
groupadd puppet
useradd -g puppet -s /bin/false -M puppet
yum install -y ruby
### time sync
rpm -qa|grep ^ntp >/dev/null 2>&1
if [ $? != 0 ]
then
yum install -y ntp
fi
ntpdate cn.pool.ntp.org
### install puppet
wget http://downloads.puppetlabs.com/puppet/puppet-2.7.9.tar.gz
wget http://downloads.puppetlabs.com/facter/facter-1.6.4.tar.gz
tar fxz facter-1.6.4.tar.gz
cd facter-1.6.4
ruby install.rb
cd ..
tar fxz puppet-2.7.9.tar.gz
cd puppet-2.7.9
ruby install.rb
cp conf/redhat/* /etc/puppet/
cp /etc/puppet/server.init /etc/init.d/puppetmasterd
chmod 755 /etc/init.d/puppetmasterd
chkconfig --add puppetmasterd
chkconfig --level 35 puppetmasterd on
### edit autosign
sed -i '/ssldir/ a autosign=true' /etc/puppet/puppet.conf
sed -i '/autosign/ a autosign=\/etc\/puppet\/autosign\.conf' /etc/puppet/puppet.conf
echo "*" > /etc/puppet/autosign.conf
service puppetmasterd start
/etc/init.d/iptables stop
setenforce 0
| true
|
94de2510e5eadd0cddcc839ff76349788c8f89be
|
Shell
|
mapbox/mason
|
/scripts/minjur/a2c9dc871369432c7978718834dac487c0591bd6/script.sh
|
UTF-8
| 1,893
| 3.34375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env bash
MASON_NAME=minjur
MASON_VERSION=a2c9dc871369432c7978718834dac487c0591bd6
MASON_LIB_FILE=bin/minjur
. ${MASON_DIR}/mason.sh
function mason_load_source {
mason_download \
https://github.com/mapbox/minjur/tarball/a2c9dc871369432c7978718834dac487c0591bd6 \
b24a45f64ae0b75e2fbcbb6b87e04192b63b3014
mason_extract_tar_gz
export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapbox-minjur-a2c9dc8
}
function mason_prepare_compile {
echo ${MASON_ROOT}/.build
cd ${MASON_ROOT}
OSMIUM_INCLUDE_DIR=${MASON_ROOT}/osmcode-libosmium-372d29a/include
curl --retry 3 -f -# -L "https://github.com/osmcode/libosmium/tarball/372d29a34d8b3f571ea7172d527730d3d5200dab" -o osmium.tar.gz
tar -xzf osmium.tar.gz
cd $(dirname ${MASON_ROOT})
${MASON_DIR}/mason install boost 1.57.0
${MASON_DIR}/mason link boost 1.57.0
${MASON_DIR}/mason install boost_libprogram_options 1.57.0
${MASON_DIR}/mason link boost_libprogram_options 1.57.0
${MASON_DIR}/mason install protobuf 2.6.1
${MASON_DIR}/mason link protobuf 2.6.1
${MASON_DIR}/mason install zlib 1.2.8
${MASON_DIR}/mason link zlib 1.2.8
${MASON_DIR}/mason install expat 2.1.0
${MASON_DIR}/mason link expat 2.1.0
${MASON_DIR}/mason install osmpbf 1.3.3
${MASON_DIR}/mason link osmpbf 1.3.3
${MASON_DIR}/mason install bzip 1.0.6
${MASON_DIR}/mason link bzip 1.0.6
}
function mason_compile {
mkdir build
cd build
CMAKE_PREFIX_PATH=${MASON_ROOT}/.link \
cmake \
-DCMAKE_BUILD_TYPE=Release \
-DOSMIUM_INCLUDE_DIR=${OSMIUM_INCLUDE_DIR} \
..
make
mkdir -p ${MASON_PREFIX}/bin
mv minjur ${MASON_PREFIX}/bin/minjur
mv minjur-mp ${MASON_PREFIX}/bin/minjur-mp
mv minjur-generate-tilelist ${MASON_PREFIX}/bin/minjur-generate-tilelist
}
function mason_clean {
make clean
}
mason_run "$@"
| true
|
e13e6b344f7f960532b1498bcaf4f13444123edd
|
Shell
|
liushuchun/TensorRTClassifier
|
/app/trt_gen_tool/run_docker.sh
|
UTF-8
| 237
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
name=trt_gen_tool
image=dev/tensorrt
docker rm -f ${name}
params=$@
nvidia-docker run -ti \
--name ${name} \
-v `pwd`/../../:/work \
${image} \
/bin/bash -c "cd /work/app/trt_gen_tool && ./run_locally.sh ${params}"
| true
|
c21ddd622ca49b0fb9bead0d80bf6109962da3d8
|
Shell
|
ASzot/Dot-Files
|
/cron/daily.sh
|
UTF-8
| 699
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/sh
# Push wiki
cd ~/me && git add -A && git commit -m "Daily update" && git push
# Clean up downloads folder.
find ~/Downloads/* -maxdepth 1 -mtime +1 -exec rm -rf {} \;
# rm -f ~/Downloads/BeFocused.csv
# Fetch the BeFocused data
# open -n /Users/andrewszot/Library/Mobile\ Documents/com\~apple\~Automator/Documents/export_befocused_csv.app
rm ~/Downloads/daily/*
# Generate
/Users/andrewszot/miniconda3/bin/python ~/.dot-files/cron/get_daily.py
# Remove the auxiliary latex files.
rm ~/Downloads/daily/*.aux
rm ~/Downloads/daily/*.out
rm ~/Downloads/daily/*.log
# Generate stats vis.
# /Users/andrewszot/miniconda3/bin/python ~/.dot-files/cron/vis_stats.py
open ~/Downloads/daily
| true
|
6eb1a59cdb7783e986b0585df9ca382b8f8b5a9e
|
Shell
|
thirdorderharmonic/jamf-extensions
|
/ard_field1.sh
|
UTF-8
| 331
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/sh
# This attribute returns the ARD Computer Info field 1 from the Computer Information attributes in the Apple Remote Desktop settings of a machine.
if [ -f "/Library/Preferences/com.apple.RemoteDesktop.plist" ]; then
echo "<result>`/usr/bin/defaults read /Library/Preferences/com.apple.RemoteDesktop Text1`</result>"
fi
| true
|
b12ea9d4f241327a21e0ff73241cd84aa0fdc368
|
Shell
|
an2e9ingC/linux_env_settings
|
/py_install.sh
|
UTF-8
| 768
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
# Set Python version to be installed
export python_version=3.8.7
# Install dependency package
sudo apt install libffi-dev
# Download Python tarball
wget -P/tmp https://www.python.org/ftp/python/${python_version}/Python-${python_version}.tgz
# Build and install Python
cd /tmp
tar xvf Python-${python_version}.tgz
cd Python-${python_version}
./configure --prefix=/usr/local/Python-${python_version} --with-openssl=/usr
make -j2
sudo make install
# Set environment for using new Python
echo "" >> ~/.bashrc
echo "# Set environment for Python" >> ~/.bashrc
echo "export LD_LIBRARY_PATH=/usr/local/Python-${python_version}/lib:\$LD_LIBRARY_PATH" >> ~/.bashrc
echo "export PATH=/usr/local/Python-${python_version}/bin:\$PATH" >> ~/.bashrc
source ~/.bashrc
| true
|
c558a7db3336a1147c4b0c7f53969ac137f969ff
|
Shell
|
danpawlik/openstack-helm-deployment
|
/scripts/check-service-replication.sh
|
UTF-8
| 1,052
| 3.359375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
NAMESPACE=${NAMESPACE:-'openstack'}
SCALE_SERVICES=${SCALE_SERVICES:-'true'}
OS_SERVICES="glance-api keystone-api neutron-server nova-api-metadata nova-api-osapi"
REPLICATION_COUNT=${REPLICATION_COUNT:-''}
if [ -z "${REPLICATION_COUNT}" ]; then
REPLICATION_COUNT=$(kubectl get nodes --no-headers | wc -l)
fi
for service in $OS_SERVICES;
do
SERVICE_COUNT=$(kubectl -n "${NAMESPACE}" get deployment "${service}" -o json | jq .status.replicas)
if [ "${SERVICE_COUNT}" -le "${REPLICATION_COUNT}" ] && \
[ "${SERVICE_COUNT}" != "${REPLICATION_COUNT}" ]; then
echo "Service is not scaled."
if [ "${SCALE_SERVICES}" = "true" ]; then
echo "Scaling service: ${service}"
kubectl -n "${NAMESPACE}" scale deployment "${service}" --replicas="${REPLICATION_COUNT}"
# FIXME: or maybe this one when k8s is 1.15:
#kubectl -n "${NAMESPACE}" scale deployment "${service}"--min="${REPLICATION_COUNT}" --max=$((REPLICATION_COUNT * 2)) --cpu-percent=80
fi
fi
done
| true
|
68ad18c29293ec353875ff595425a73e18785e81
|
Shell
|
devonpmack/cli-github-repo-creator
|
/git-create
|
UTF-8
| 1,410
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
# originally made by https://gist.github.com/robwierzbowski/5430952/
# Updated to work in 2019 by devonpmack
# Create and push to a new github repo from the command line.
# generate a personal access token here: https://github.com/settings/tokens
GTOKEN="ACCESS_TOKEN_HERE"
# Gather constant vars
CURRENTDIR=${PWD##*/}
# must run git config user.name "devonpmack"
GITHUBUSER=$(git config user.name)
DESCRIPTION=""
# Get user input
echo "New repo name (enter for ${PWD##*/}):"
read REPONAME
echo "Git Username (enter for ${GITHUBUSER}):"
read USER
echo "Private? (y/n)"
read PRIVATE
if [ "$PRIVATE" = "y" ]; then
PRIVATE="true"
else
PRIVATE="false"
fi
echo "Creating repository..."
# Curl some json to the github API oh damn we so fancy
curl -u ${USER:-${GITHUBUSER}:${GTOKEN}} https://api.github.com/user/repos -d "{\"name\": \"${REPONAME:-${CURRENTDIR}}\", \"description\": \"${DESCRIPTION}\", \"private\": ${PRIVATE}, \"has_issues\": true, \"has_downloads\": true, \"has_wiki\": false}"
# Set the freshly created repo to the origin and push
# You'll need to have added your public key to your github account
if [ $(git remote)="origin" ]; then
git remote rm origin
fi
git remote add origin git@github.com:${USER:-${GITHUBUSER}}/${REPONAME:-${CURRENTDIR}}.git
git push --set-upstream origin master
echo Link to repo: https://github.com/${USER:-${GITHUBUSER}}/${REPONAME:-${CURRENTDIR}}
| true
|
3c244902c1f50e2ec5eac3e4a349cfc03a748703
|
Shell
|
raza-naqui/sql-load-runner
|
/runSqlFile.sh
|
UTF-8
| 632
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -ne 4 ]; then
echo "Incorrect Syntax!"
echo "Please pass the required parameters DB_CONFIG_PROPERTIES_FILE, SQL_FILE, LOG_FILE, CUST_NUM"
exit
fi
dbConfigFile=$1
sqlFile=$2
logFile=$3
custName=$4
if [ -f "$dbConfigFile" ]
then
while IFS='=' read -r key value
do
key=$(echo $key | tr '.' '_')
eval ${key}=\${value}
done < "$dbConfigFile"
#Connect SQL Plus here
DB_CONNECTION_URL=${DB_USER}/${DB_PASSWORD}@${DB_HOST}/${DB_SERVICE_NAME}
sqlplus -S $DB_CONNECTION_URL @${sqlFile} $custName > ${logFile}
else
echo "The DB Config Properties File [$dbConfigFile] not found."
fi
| true
|
d9e94225c9a9aa502a1dabf78568a9c1197c41ef
|
Shell
|
wrideout/dotzsh
|
/zshrc
|
UTF-8
| 4,007
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#
# zshrc
#
# In order to work, be sure to make a hard link from this file to ~/.zshrc.
# This link allows for the .zsh directory to be used as a git repository, for
# easy changes and updates. Alternatively, if you don't care to use git, you
# could just make this file your ~/.zshrc.
#
# See git log for changes.
#
# William Rideout
#
# Path to your oh-my-zsh installation.
export ZSH=$HOME/.oh-my-zsh
# Use Antigen
# source ~/.zsh/antigen/antigen.zsh
# antigen use oh-my-zsh
# antigen bundle python
# antigen bundle safe-paste
# antigen bundle cp
# antigen bundle extract
# antigen bundle brew
# antigen bundle osx
# antigen theme robbyrussell
# antigen theme https://github.com/caiogondim/bullet-train-oh-my-zsh-theme bullet-train
antigen apply
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
ZSH_THEME="robbyrussell"
# ZSH_THEME="af-magic"
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# The optional three formats: "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(git svn python safe-paste cp extract)
source $ZSH/oh-my-zsh.sh
# User configuration
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# vim command line editing
bindkey -v
# Reduce the lag between modes
export KEYTIMEOUT=1
# Modal marker for the prompt, showing the current vim-style mode
# function zle-line-init zle-keymap-select
# {
# NORMAL="%{$fg_bold[white]%} [% NORMAL]% %{$reset_color%}"
# INSERT="%{$fg_bold[yellow]%} [% INSERT]% %{$reset_color%}"
# RPS1="${${KEYMAP/vicmd/$NORMAL}/(main|viins)/$INSERT}"
# zle reset-prompt
# }
# zle -N zle-line-init
# zle -N zle-keymap-select
# Command history lookup
bindkey "^R" history-incremental-search-backward
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
#
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
alias ls='ls --color=auto'
alias ll='ls -l'
alias la='ls -a'
alias lt='ls -lhtr'
alias lla='ls -l -a'
alias llh='ls -l -h'
alias lls='ls -l -S'
# Stop vim from searching for X11 binaries
# alias vim='~/linux/bin/vim -X'
# alias vim='vim -X'
# Source any local zsh configurations, if they exist
if [ -f ~/.zsh_local ]; then
source ~/.zsh_local
fi
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
| true
|
0eefd87ec8c560a830d9f238059bfd1b51faf317
|
Shell
|
webclinic017/MLServices
|
/bin/start.sh
|
UTF-8
| 374
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/sh
BASEPATH=$(pwd)
LIB_PYTHON=${BASEPATH}/lib
LIB_REDIS=${BASEPATH}/etc/redis
CONF_REDIS=ml_service.conf
# Export PYTHONPATH
export PYTHONPATH=$PYTHONPATH:${LIB_PYTHON}
# Start the redis server
PID_FILE_REDIS=$(grep "pidfile" ${LIB_REDIS}/${CONF_REDIS} | awk '{print $2}')
if [ ! -e ${PID_FILE_REDIS} ]; then
cd ${LIB_REDIS}
redis-server ${CONF_REDIS}
fi
| true
|
74ab1a860f124e499dc662bd61315095635c3f0d
|
Shell
|
m0420/MeridianJB
|
/Meridian/Meridian/bootstrap/create-meridian-bootstrap.sh
|
UTF-8
| 521
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
currDir=$(dirname $0)
spongebobDir=$currDir/../..
baseDir=$currDir/spongebob-bootstrap
# amfid_payload.dylib
cp $spongebobDir/amfid/bin/* $baseDir/spongebob/
# pspawn_hook.dylib
cp $spongebobDir/pspawn_hook/bin/* $baseDir/usr/lib/
# jailbreakd
cp $spongebobDir/jailbreakd/bin/* $baseDir/spongebob/jailbreakd/
# remove all .DS_Store files
find $baseDir -name '.DS_Store' -delete
# create tar archive
cd $baseDir
COPYFILE_DISABLE=1 tar -cf spongebob-bootstrap.tar ./*
mv spongebob-bootstrap.tar $currDir
| true
|
e4b203ee76d8d64209cae03b08a8e85f9427de40
|
Shell
|
kulpree/ops-pipeline
|
/test/integration/jenkins/bats/sonar_works.bats
|
UTF-8
| 646
| 3.09375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bats
@test "sonarqube is installed" {
run which sonarqube
[ "$status" -eq 0 ]
}
@test "sonarqube is working" {
dir=$( mktemp -d )
cd $dir
wget -q https://github.com/SonarSource/sonar-examples/archive/master.zip
unzip -q master.zip
wget -q http://repo1.maven.org/maven2/org/codehaus/sonar/runner/sonar-runner-dist/2.4/sonar-runner-dist-2.4.zip
unzip -q sonar-runner-dist-2.4.zip
sudo sonarqube status || sudo sonarqube start && sleep 30
cd sonar-examples-master/projects/languages/java/sonar-runner/java-sonar-runner-simple/
run ../../../../../../sonar-runner-2.4/bin/sonar-runner
[ "$status" -eq 0 ]
}
| true
|
bb75cf1850028fd7cc29755b39d03c76f4aef7f3
|
Shell
|
raulbcs/vagrant-template
|
/provision/shell/bootstrap.sh
|
UTF-8
| 1,412
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh
# Directory in which librarian-puppet should manage its modules directory
# Windows Virtual Box has 260 character limit on the file path length; we need to run puppet from
# the non-mapped directory, or we'll hit this limit when librarin builds its cache.
PUPPET_SRC='/vagrant/provision/puppet'
PUPPET_DIR='/tmp/puppet'
rm -rf $PUPPET_DIR
cp -R $PUPPET_SRC $PUPPET_DIR
# NB: librarian-puppet might need git installed. If it is not already installed
# in your basebox, this will manually install it at this point using apt or yum
GIT=/usr/bin/git
APT_GET=/usr/bin/apt-get
YUM=/usr/sbin/yum
if [ ! -x $GIT ]; then
if [ -x $YUM ]; then
yum -q -y install git
elif [ -x $APT_GET ]; then
apt-get -q -y install git
else
echo "No package installer available. You may need to install git manually."
fi
fi
apt-get update
if [ `gem query --local | grep librarian-puppet | wc -l` -eq 0 ]; then
# @see https://github.com/rodjek/librarian-puppet/issues/70
# librarian-puppet was replaced with librarian-puppet-maestrodev
gem install librarian-puppet-maestrodev --no-ri --no-rdoc
gem install puppet --no-ri --no-rdoc
cd $PUPPET_DIR && librarian-puppet install --clean
else
cd $PUPPET_DIR && librarian-puppet update
fi
# now we run puppet
puppet apply --modulepath=$PUPPET_DIR/modules/ $PUPPET_DIR/manifests/main.pp
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.