blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
fb311b1cd1de01bb1ce0fe67b8b0554a8d6b581d
|
Shell
|
EtiennePasteur/Subfixx
|
/lang-handler.sh
|
UTF-8
| 270
| 3.25
| 3
|
[] |
no_license
|
findSubtitle() {
local FILNAME="${SEARCHING_FILE%%.*}"
FOUND=false
if [ -f "$FILNAME.$1.srt" ]; then
SHORT_NAME=$(basename -- "$FILNAME")
echo "$SHORT_NAME.$1.srt"
fi
}
SUBTITLES=()
SUBTITLES+=("$(findSubtitle "fr")")
SUBTITLES+=("$(findSubtitle "en")")
| true
|
7972938fc263ba4e9d84a717d72cd3d943d2cace
|
Shell
|
arkovask/skriptimine2
|
/praks8/yl1
|
UTF-8
| 364
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
#
#
summa=0 #Määratakse muutuja "summa"
for (( arv=0; arv<11; arv++ )) #For tsükkel
do
jaak=$(( arv % 2 )) #Muutuja "jaak" kus võetakse arvu jääk
if [ $jaak -eq 0 ] #If, kui jääk on võrdne nulliga
then
summa=$(( $summa + $arv )) #Muutuja "summa" muutmine
fi
done
echo "Paaris arvude summa vahemikust 1 kuni 10 on " $summa
#Skripti lõpp
| true
|
e36f83a6114cbfdfab735dcbcd55c3380060f379
|
Shell
|
raj-maurya/Competitive-Programming
|
/shell_script/shell4.sh
|
UTF-8
| 109
| 2.796875
| 3
|
[] |
no_license
|
clear
#this is demo!to print a file!!
if cat $1
then
echo -e "\n\nFile $1. found successfully echoed"
fi
| true
|
dfe211e41f3ae2538351e6d214753a9aa2f7943c
|
Shell
|
joeytribbiani7/experiment
|
/Assignment2_proper/owner_ch.sh
|
UTF-8
| 433
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh
mkdir owner_dic
if [ $? -eq 0 ]
then
echo "Directory has been created"
else
echo "Directory already exists"
fi
cd owner_dic
touch sample.txt
if [ $? -eq 0 ]
then
echo "File has been created"
else
echo "File already exists"
fi
cd ..
user="$(whoami)"
chown -R $user ./owner_dic
if [ $? -eq 0 ]
then
echo "OwnerShip has been changed to the user - $user"
else
echo "OwnerShip couldn't be changed"
fi
| true
|
bd7d2a693fa60f0ea42c7ba794746a9159d21d9d
|
Shell
|
ojbfive/ziti
|
/quickstart/docker/image/entryRouter.sh
|
UTF-8
| 2,313
| 3.234375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
. ziti.env
router_name=$(hostname)
echo "Generating a key, a server certificate and client certificate for ${router_name}"
echo ".......... Generating key for ${router_name}"
ziti pki create key --pki-root="${ZITI_PKI}" --ca-name="${intermediate_name}" --key-file="${router_name}"
echo ".......... Generating server certificate for ${router_name}"
ziti pki create server --pki-root="${ZITI_PKI}" --ca-name="${intermediate_name}" --key-file="${router_name}" --server-file="${router_name}-server" --dns "localhost" --dns "${router_name}" --ip "127.0.0.1"
echo ".......... Generating client certificate for ${router_name}"
ziti pki create client --pki-root="${ZITI_PKI}" --ca-name="${intermediate_name}" --key-file="${router_name}" --client-file="${router_name}-client" --client-name="${router_name}"
echo "Creating a file with the root CA, intermediate and the identity cert - some processes require the full chain to be supplied"
cat "${ZITI_PKI}/${intermediate_name}/certs/${router_name}-server.chain.pem" "${ZITI_PKI}/${ca_name}/certs/${ca_name}.cert" > "${ZITI_PKI}/${intermediate_name}/certs/${router_name}-full-chain.pem"
cat > $ZITI_HOME/${router_name}.yml <<RouterConfigHereDoc
v: 2
identity:
ca: "${ZITI_PKI}/${intermediate_name}/certs/${intermediate_name}-full-chain.pem"
key: "${ZITI_PKI}/${intermediate_name}/keys/${router_name}.key"
cert: "${ZITI_PKI}/${intermediate_name}/certs/${router_name}-client.cert"
server_cert: "${ZITI_PKI}/${intermediate_name}/certs/${router_name}-full-chain.pem"
trace:
path: "$ZITI_HOME/${router_name}.trace"
ctrl:
endpoint: "tls:${fabric_controller_name}:6262"
link:
listener: "tls:0.0.0.0:6000"
advertise: "tls:${router_name}:6000"
listeners:
- binding: transport
address: tls:0.0.0.0:7000
options:
retransmission: true
randomDrops: false
drop1InN: 500
RouterConfigHereDoc
# register the router and start it...
echo "registering router ${router_name} with fabric controller at ${fabric_controller_uri}"
ziti-fabric create router -e "${fabric_controller_uri}" "${ZITI_PKI}/${intermediate_name}/certs/${router_name}-client.cert"
echo "starting router ${router_name}:"
echo "ziti-router run $ZITI_HOME/${router_name}.yml > $ZITI_HOME/${router_name}.log 2>&1 &"
ziti-router run $ZITI_HOME/${router_name}.yml
| true
|
a18558ff03db4d8dff1d4e8b74136272fb127ac1
|
Shell
|
cbuahin/ci-tools
|
/darwin/run-nrtests.zsh
|
UTF-8
| 2,917
| 3.84375
| 4
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env zsh
#
# run-nrtests.zsh - Runs numerical regression test
#
# Date Created: 04/01/2020
# Updated: 08/21/2020
#
# Author: See AUTHORS
#
# Dependencies:
# python -m pip install -r requirements.txt
#
# Environment Variables:
# PROJECT
# BUILD_HOME - relative path
# TEST_HOME - relative path
# PLATFORM
# REF_BUILD_ID
#
# Arguments:
# 2 - (SUT_BUILD_ID) - optional argument
#
# check that env variables are set
REQUIRED_VARS=(PROJECT BUILD_HOME TEST_HOME PLATFORM REF_BUILD_ID)
for i in ${REQUIRED_VARS}; do
[[ ! -v ${i} ]] && { echo "ERROR: ${i} must be defined"; return 1 }
done
# determine project root directory
CUR_DIR=${PWD}
SCRIPT_HOME=${0:a:h}
cd ${SCRIPT_HOME}/../../
PROJ_DIR=${PWD}
# change current directory to test suite
cd ${TEST_HOME}
# use passed argument or generate a "unique" identifier
if [ ! -z "$1" ]; then
SUT_BUILD_ID=$1
else
SUT_BUILD_ID=$RANDOM
fi
# check if app config file exists
if [ ! -a "./apps/${PROJECT}-${SUT_BUILD_ID}.json" ]; then
mkdir -p "apps"
${SCRIPT_HOME}/app-config.zsh "${PROJ_DIR}/${BUILD_HOME}/bin/Release" \
${PLATFORM} ${SUT_BUILD_ID} > "./apps/${PROJECT}-${SUT_BUILD_ID}.json"
fi
# build list of directories contaiing tests
TESTS=$( find ./tests -mindepth 1 -type d -follow | paste -sd " " - )
# build nrtest execute command
NRTEST_EXECUTE_CMD='nrtest execute'
TEST_APP_PATH="./apps/${PROJECT}-${SUT_BUILD_ID}.json"
TEST_OUTPUT_PATH="./benchmark/${PROJECT}-${SUT_BUILD_ID}"
# build nrtest compare command
NRTEST_COMPARE_CMD='nrtest compare'
REF_OUTPUT_PATH="benchmark/${PROJECT}-${REF_BUILD_ID}"
RTOL_VALUE='0.01'
ATOL_VALUE='1.E-6'
# if present clean test benchmark results
if [ -d "${TEST_OUTPUT_PATH}" ]; then
rm -rf "${TEST_OUTPUT_PATH}"
fi
# perform nrtest execute
echo "INFO: Creating SUT ${SUT_BUILD_ID} artifacts"
NRTEST_COMMAND="${NRTEST_EXECUTE_CMD} ${TEST_APP_PATH} ${TESTS} -o ${TEST_OUTPUT_PATH}"
echo $NRTEST_COMMAND
eval ${NRTEST_COMMAND}
RESULT=$?
if [ "$RESULT" -ne 0 ]; then
echo "WARNING: nrtest execute exited with errors"
fi
# perform nrtest compare
if [ -z "${REF_BUILD_ID}" ]; then
echo "WARNING: no ref benchmark found comparison not performed"
RESULT=1
else
echo "INFO: Comparing SUT artifacts to REF ${REF_BUILD_ID}"
NRTEST_COMMAND="${NRTEST_COMPARE_CMD} ${TEST_OUTPUT_PATH} ${REF_OUTPUT_PATH} --rtol ${RTOL_VALUE} --atol ${ATOL_VALUE}"
eval ${NRTEST_COMMAND}
RESULT=$?
fi
# Stage artifacts for upload
cd ./benchmark
if [ "$RESULT" -eq 0 ]; then
echo "INFO: nrtest compare exited successfully"
mv receipt.json ${PROJ_DIR}/upload/receipt.json
else
echo "INFO: nrtest exited abnormally"
tar -zcf benchmark-${PLATFORM}.tar.gz ./${PROJECT}-${SUT_BUILD_ID}
mv benchmark-${PLATFORM}.tar.gz ${PROJ_DIR}/upload/benchmark-${PLATFORM}.tar.gz
fi
# return user to current dir
cd ${CUR_DIR}
return $RESULT
| true
|
be7b0a50b659eaa5cdbfabb7a0a795745d957b27
|
Shell
|
HortonworksUniversity/Operations-Labs
|
/build/security/ambari-bootstrap-master/extras/ranger/prep-mysql.sh
|
UTF-8
| 501
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
mypass="${mypass:-BadPass#1}"
db_root_password="${mypass}"
sudo yum -y -q install mysql-server mysql-connector-java
sudo chkconfig mysqld on
sudo service mysqld start
sudo ambari-server setup --jdbc-db=mysql --jdbc-driver=/usr/share/java/mysql-connector-java.jar
cat << EOF | mysql -u root
GRANT ALL PRIVILEGES ON *.* to 'root'@'$(hostname -f)' WITH GRANT OPTION;
SET PASSWORD FOR 'root'@'$(hostname -f)' = PASSWORD('${db_root_password}');
FLUSH PRIVILEGES;
exit
EOF
| true
|
7684cab224d123422556c6c0f0f61eae281605d1
|
Shell
|
yoursunny/OpenWrt-packages
|
/named-data/nfd-service/files/nfd.service
|
UTF-8
| 2,975
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/sh /etc/rc.common
START=51
STOP=51
USE_PROCD=1
EXTRA_COMMANDS='authorize connect'
EXTRA_HELP='
authorize
Authorize default identity in $HOME keychain to register prefixes.
connect [save] FACE ROUTE ROUTE ...
Create face and add routes.
Optionally "save" to UCI (need "uci commit" manually).
'
NFDCONFDIR=/var/etc/ndn
NFDCONFFILE=$NFDCONFDIR/nfd.conf
LUAHELPER='lua /usr/lib/lua/named-data/nfd-init.lua'
MGMTCERTID=/localhost/mgmt
MGMTCERTFILE=$NFDCONFDIR/mgmt.ndncert
OLDHOME=$HOME
export HOME=/var/lib/ndn/nfd
start_service() {
mkdir -p $NFDCONFDIR
>$NFDCONFFILE
$LUAHELPER | sed -n '/CONFEDIT/ s|CONFEDIT|infoedit -f '$NFDCONFFILE'|p' | sh
if ! ndnsec cert-dump -i $MGMTCERTID >$MGMTCERTFILE 2>/dev/null; then
ndnsec delete $MGMTCERTID &>/dev/null
ndnsec key-gen -te $MGMTCERTID >$MGMTCERTFILE
fi
if ! ndnsec cert-dump -i $MGMTCERTID >$MGMTCERTFILE 2>/dev/null; then
ndnsec key-gen -te $MGMTCERTID >$MGMTCERTFILE.req
ndnsec cert-gen -S 19700101000000 -E 20390802035434 -s $CACERTID \
-i local-mgmt-authorize $MGMTCERTFILE.req >$MGMTCERTFILE
ndnsec cert-install -I $MGMTCERTFILE 2>/dev/null
rm -f $MGMTCERTFILE.req
fi
procd_open_instance
procd_set_param command /usr/bin/nfd -c $NFDCONFFILE
procd_set_param env HOME=$HOME
procd_set_param respawn 300 5 5
procd_set_param stderr 1
procd_close_instance
}
service_started() {
local TIMEOUT=10
while ! nfdc status &>/dev/null; do
TIMEOUT=$((TIMEOUT-1))
if [[ $TIMEOUT -eq 0 ]]; then
exit 1
fi
sleep 1
done
$LUAHELPER | sed -n '/INITCMD/ s|INITCMD ||p' \
| sh 2>&1 | logger -s -t nfd-init -p daemon.notice
}
service_triggers() {
procd_add_reload_trigger "nfd"
}
authorize() {
local IDENTITY=$(HOME=$OLDHOME ndnsec get-default 2>/dev/null)
if [[ -z $IDENTITY ]]; then
IDENTITY=/localhost/operator-$(date +%s)
HOME=$OLDHOME ndnsec key-gen -te $IDENTITY >/dev/null
fi
# local REQ=/tmp/$(date +%s)
# HOME=$OLDHOME ndnsec cert-dump $(HOME=$OLDHOME ndnsec get-default -c) >$REQ.ndncertreq
# ndnsec cert-gen -s /localhost/root -i local-rib-authorize $REQ.ndncertreq >$REQ.ndncert
# HOME=$OLDHOME ndnsec cert-install $REQ.ndncert 2>/dev/null
# rm -f $REQ.ndncertreq $REQ.ndncert
# echo 'Authorizing '$IDENTITY' in '$OLDHOME' for prefix registration.' \
# | logger -s -t nfd-service -p user.notice
}
connect() {
local SAVE=$1
local SECTION=
if [[ $SAVE == 'save' ]]; then
shift
SAVE=uci
SECTION='nfd.'$(uci add nfd face)
else
SAVE=true
fi
$SAVE set $SECTION.remote=$1
local FACEID=$(nfdc face create $1 persistency permanent \
| tr ' ' '\n' | sed -n '/id=/ s|id=||p')
if [[ $FACEID == '' ]]; then
exit 1
fi
shift
while [[ $# -gt 0 ]]; do
$SAVE add_list $SECTION.route=$1
if ! nfdc route add $1 $FACEID cost 100 >/dev/null; then
exit 1
fi
shift
done
}
| true
|
36ea2e52600c6f8ab09eb6378b91ab47fef9d411
|
Shell
|
rickroty/igatemonitor
|
/logging/run.sh
|
UTF-8
| 281
| 2.59375
| 3
|
[] |
no_license
|
if [ ! "$(docker ps -q -f name=logging)" ]; then
if [ "$(docker ps -aq -f status=exited -f name=logging)" ]; then
# cleanup
docker rm logging
fi
docker run -d --name logging -p 24224:24224 -v /home/pi/projects/igatemonitor:/host -it logging:latest
fi
| true
|
3a16c784a26709078f6c056c10580d9fa961518b
|
Shell
|
muhammadali448/scriptProgramsOS
|
/scriptingPrograms/script2.sh
|
UTF-8
| 470
| 3.5
| 4
|
[] |
no_license
|
let "a=1"
clear
while test $a -eq 1
do
echo " Commands Menu "
echo "================================"
echo "1. Who is logged in"
echo "2. Date Time"
echo "q Quit"
echo "================================"
echo -n "Enter your choice:"
read b
case "$b" in
1)
echo "Who is logged in:"
echo "$(whoami)"
sleep 2
clear
;;
2)
echo
echo "Date Time:"; date
sleep 2
clear
;;
q)
exit 0
;;
*)
echo
echo "Wrong Option...Try again"
sleep 2
clear
;;
esac
done
| true
|
424c3916351850ca79e8e126211adeafb1830d0e
|
Shell
|
j-kan/lj-spectral
|
/dsaupd/run-ksample.sh
|
UTF-8
| 670
| 2.578125
| 3
|
[] |
no_license
|
DIR=sample-297987-Lrw-Nev-8
echo $DIR
mkdir $DIR
echo "======> Started ", `date` > $DIR/output.txt
./kmsample297987-8.exe -g usample.pairs -d Lrw-8-eigenvectors.txt -o $DIR --normalize RW -r 10 >> $DIR/output.txt
echo "======> Finished ", `date` >> $DIR/output.txt
email -s $DIR jkan@iconstructs.com < `tail -12 $DIR/output.txt`DIR=sample-297987-Lrw-Nev-8
DIR=sample-297987-Lrw-Nev-16
echo $DIR
mkdir $DIR
echo "======> Started ", `date` > $DIR/output.txt
./kmsample297987-16.exe -g usample.pairs -d Lrw-16-eigenvectors.txt -o $DIR --normalize RW -r 10 >> $DIR/output.txt
echo "======> Finished ", `date` >> $DIR/output.txt
email -s $DIR jkan@iconstructs.com < `tail -12 $DIR/output.txt`
| true
|
42435a6a1dbc94278f9836c7d16d318e434b1fa2
|
Shell
|
juliangrosshauser/dotfiles
|
/zshenv
|
UTF-8
| 1,011
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
if [[ "$OSTYPE" == "darwin"* ]]; then
# Fix the MANPATH not being set
export MANPATH="$MANPATH"
export CC=clang
export CXX=clang++
# Don't automatically update Homebrew when installing packages etc.
export HOMEBREW_NO_AUTO_UPDATE=1
fi
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
export EDITOR=vim
export VISUAL=vim
export PAGER=less
if [ -d /usr/local/go/bin ]; then
export PATH="/usr/local/go/bin:$PATH"
fi
if [ -d "$HOME/dev/go" ]; then
export GOPATH="$HOME/dev/go"
fi
if [ -n "$GOPATH" ]; then
export PATH="$GOPATH/bin:$PATH"
fi
if [ -d "$HOME/.cargo/bin" ]; then
export PATH="$HOME/.cargo/bin:$PATH"
fi
if [ -f /usr/libexec/java_home ]; then
# java_home returns the path to a java home directory
# specify java version with `java_home -v 1.x`, where
# x is the desired java version
export JAVA_HOME="$(/usr/libexec/java_home)"
fi
if [[ "$OSTYPE" == "linux"* ]]; then
# Enable passphrase prompt for gpg
export GPG_TTY=$(tty)
fi
| true
|
f0e0469d59c94f202b957eb6f15ac6bfbe0200f3
|
Shell
|
appdess/modernapps-masterclass
|
/tkg/tito-app/demo-magic/tito-demo-deploy.sh
|
UTF-8
| 1,845
| 3.15625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
########################
# include the magic
########################
. ./demo-magic.sh
########################
# Configure the options
########################
#
# speed at which to simulate typing. bigger num = faster
#
# TYPE_SPEED=20
#
# custom prompt
#
# see http://www.tldp.org/HOWTO/Bash-Prompt-HOWTO/bash-prompt-escape-sequences.html for escape sequences
#
DEMO_PROMPT="${GREEN}➜ ${CYAN}\W "
# hide the evidence
clear
# Export Workload Cluster kubeconfig
pe "cd /home/ubuntu/clusterapi/demo"
pe "export KUBECONFIG=/home/ubuntu/clusterapi/out/workload-cluster-01/kubeconfig"
# check if the cluster is healthy and show in VMC (Showing a "Ready" status)
pe "kubectl get nodes"
# Show Cluster Networking integration
pe " kubectl get pods -n=default"
# Show Cluster Storage integration
pe "kubectl get pvc --all-namespaces"
# show the new LB for the app
pe "cat tito-lb.yaml"
pe "kubectl apply -f tito-lb.yaml"
# show the deployment of the app and apply it
pe "cat tito-deployment.yaml"
pe "kubectl apply -f tito-deployment.yaml"
# check the state of the deployment:
pe "kubectl get deploy -n tito-app"
pe "kubectl get svc -n tito-app"
# show the pods which have been created by our deployment - we filter them by their label "vmc-nginx". You will notice that each pod got it´s own IP by our overlay-network (Calico). This is K8s internal networking and not accessible from the outside.
pe "kubectl get pods -o wide -n tito-app"
# Switch the DNS on the DC to the right IP, shutdown the VM
p "looks good? Let´s adjust the DNS"
# Scale the Deployment up
pe "kubectl scale deployment titofe --replicas=10 -n tito-app"
pe "kubectl get pods -o wide -n tito-app"
# cleanup the stuff
pe "kubectl delete ns tito-app"
# show a prompt so as not to reveal our true nature after
# the demo has concluded
p ""
| true
|
a8e150826ac4c9d04bf56470a19428ae7e034c03
|
Shell
|
distroy/profiles
|
/zsh/install
|
UTF-8
| 380
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Copyright (C) distroy
#
cd "$(dirname "${BASH_SOURCE[0]}")"
SOURCE_DIR="$PWD"
WRITE_FILE=$HOME/.zshrc
# zshrc
cat >>$WRITE_FILE <<EOF
# ld zshrc
# LD_CD_ROOT_DIR=
# LD_CD_MIN_DEPTH=1
# LD_CD_MAX_DEPTH=1
source ${SOURCE_DIR/#$HOME/\$HOME}/zshrc
source ${SOURCE_DIR/#$HOME/\$HOME}/zshrc.key
EOF
git clone https://github.com/robbyrussell/oh-my-zsh.git oh-my-zsh
| true
|
8b612fb54e8db7e7e78f44e1db181ac3e3fad243
|
Shell
|
lavecoral/DaFlow
|
/docker/images/hadoop/base/export_container_ip.sh
|
UTF-8
| 908
| 3.609375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#############################################################################################################
## COPIED FROM ##
## https://github.com/apache/incubator-hudi/blob/master/docker/hoodie/hadoop/base/export_container_ip.sh ##
## ##
#############################################################################################################
interfaces=( "en0" "eth0" )
ipAddr=""
for interface in "${interfaces[@]}"
do
ipAddr=`ifconfig ${interface} | grep -Eo 'inet (addr:)?([0-9]+\.){3}[0-9]+' | grep -Eo '([0-9]+\.){3}[0-9]+' | grep -v '127.0.0.1' | head`
if [[ -n "$ipAddr" ]]; then
break
fi
done
echo "Container IP is set to : $ipAddr"
export MY_CONTAINER_IP=${ipAddr}
| true
|
036198fea645e67dd8dacdde4a5b11dfc4069421
|
Shell
|
chouer19/RlClient2
|
/waypoints/rm.sh
|
UTF-8
| 63
| 2.515625
| 3
|
[] |
no_license
|
i=1
while [[ $i -lt 16 ]]
do
rm $i.txt
let i=$i+1
done
| true
|
69dd4b8f8cb4fa290a2fb9a099c3d5d6427a6846
|
Shell
|
wenlizhe/shell_test
|
/code/bak/remote_ctrl/killall.sh
|
UTF-8
| 979
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# 服务器控制脚本
# ajianzheng
#
# Source function library.
#. /etc/rc.d/init.d/functions
source common.sh
SCRIPT_NAME=$0
ALL_PARAM=0
function help() {
echo "---------------------------------------------------------"
echo -e "\033[32m" "Usage: ${SCRIPT_NAME} {all|4|5|6|7|8}" "\033[0m"
echo "---------------------------------------------------------"
exit 1
}
if [ $# -eq 0 ]; then help;
for host in $*
do
case "${host}" in
"4"|"5"|"6"|"7"|"8")
HOSTS=${HOSTS}" "${host}
;;
"all")
ALL_PARAM=1
;;
*)
echo "INVALID HOST:" $host
help
;;
esac
done
if [ $ALL_PARAM -eq 1 ]; then HOSTS=$VALID_HOSTS; fi
for host in $HOSTS
do
echo -e "\033[32m""-- Host: $host ------------------" "\033[0m"
STR_HOST=`GetHost $host`
ssh -n $STR_HOST "cd confclient;./killall.sh; exit"
done
echo -e "\033[32m" "-=done!=-" "\033[0m"
| true
|
9541cb16758b4b8ff6a08d7d55ce2fc2f9f641a4
|
Shell
|
kazoo135/shell-script-tutorials
|
/positional-args1.sh
|
UTF-8
| 360
| 3.0625
| 3
|
[] |
no_license
|
# using Positional arguments first example
echo "To change your file name, first enter the source file"
read sourcefile
echo "Now begin typing your content us crtl-d to stop writing"
cat > $sourcefile
echo "To change enter new name for file"
read destfile
mv $sourcefile $destfile
echo "Here is the contents of your file"
cat < $destfile
echo "There you go"
| true
|
2fc95a7bc41a270a05b16444d166092323ddab8a
|
Shell
|
drumsco/dotfiles
|
/home/.zashrc
|
UTF-8
| 5,878
| 2.796875
| 3
|
[] |
no_license
|
# bash, zsh共通設定
# .zashenvに設定するとDirDiff.vimでのLANG設定(https://github.com/tmsanrinsha/DirDiff.vim/commit/84f11927ae9a915cd7a0d330a1abba26a9982e32)を上書きしてしまうのでここに書く
# 以下のようなエラーが出る場合は
# perl: warning: Setting locale failed.
# perl: warning: Please check that your locale settings:
# LANGUAGE = "en_US:",
# LC_ALL = (unset),
# LANG = "ja_JP.UTF-8"
# are supported and installed on your system.
# perl: warning: Falling back to the standard locale ("C").
# Ubuntuなら
# sudo locale-gen ja_JP.UTF-8
# する
export LANG='ja_JP.UTF-8'
# alias {{{
# ============================================================================
# すでにあるコマンドと同名のaliasの設定はここに書いて、
# shell scriptなどでaliasが実行されないようにする
if ls --version | grep GNU 1>/dev/null 2>&1; then
alias ls='ls --color=auto -F'
if [ -f ~/.config/dircolors ]; then
if type dircolors > /dev/null 2>&1; then
eval $(dircolors ~/.config/dircolors)
elif type gdircolors > /dev/null 2>&1; then
eval $(gdircolors ~/.config/dircolors)
fi
fi
else
export LSCOLORS=exfxcxdxbxegedabagacad
alias ls='ls -G -F'
fi
alias cp='cp -ip'
alias mv='mv -i'
if command_exists rmtrash; then
alias rm=rmtrash
fi
alias diff='diff -u'
# [su・sudo | SanRin舎](http://sanrinsha.lolipop.jp/blog/2012/05/su%E3%83%BBsudo.html)
alias sudo='sudo -E '
if grep --help 2>&1 | grep color 1>/dev/null 2>&1; then
alias grep='grep --color=auto'
fi
# PATHの設定 {{{1
# ------------------------------------------------------------------------------
# if [[ `uname` = Darwin ]]; then
# # homebrewでインストールしたものを優先
# # .zashenvで設定するとvimのpythonの参照先がhomebrewでインストールしたものになり
# # エラーが出るのでここで設定
# export PATH=/usr/local/bin:$PATH
# # さらに自分でビルドしたものを優先
# export PATH=$HOME/local/bin:$PATH
# fi
# Man {{{1
# ============================================================================
# manのpagerとしてvimの:Manを使う。
# http://vim.wikia.com/wiki/Using_vim_as_a_man-page_viewer_under_Unix
# cygwinだとcolがなかった、macでもwarningが出たが…
if [[ `uname` == Linux || `uname` == Darwin ]]; then
export MANPAGER="/bin/sh -c \"col -b -x | \
$EDITOR -c 'setlocal ft=man nonumber nomod nomodifiable nolist' -c 'noremap q :q<CR>' \
-c 'nmap K :Man <C-R>=expand(\\\"<cword>\\\")<CR><CR>' -\""
fi
# man manでman以外出たり、lessになったりする
# function man() {
# vim -c 'Ref man '$1 -c 'winc j' -c 'q'
# }
# tmux {{{1
# ============================================================================
# test -z $TMUXでTMUXの中にいるか確認できる
# tmでtmuxのセッションがなければ作り、あったらそこにアタッチする
# -2は256色のためのオプション
# macではクリップボードにアクセスできるようにreattach-to-user-namespaceを使う
#
# e.g.
# https://gist.github.com/1462391
# https://github.com/ChrisJohnsen/tmux-MacOSX-pasteboard
# http://yonchu.hatenablog.com/entry/20120514/1337026014
function tm {
# tmuxの中でもscreenの中でもない場合
if tmux has-session >/dev/null 2>&1; then
# セッションがあるならそこにアタッチ
tmux -2 attach
else
# Macでコピーできるようにする
if [[ $OSTYPE == darwin* ]] && hash reattach-to-user-namespace 2>/dev/null; then
# on OS X force tmux's default command to spawn a shell in the user's namespace
tmux_config='set-option -g default-command "reattach-to-user-namespace -l $SHELL"'
# reattach-to-user-namespaceを設定するとcopy-modeでクリップボードコピーをしなくなるのでcopy-pipeする
tmux_config="$tmux_config\n"'bind-key -t vi-copy y copy-pipe "reattach-to-user-namespace pbcopy"'
fi
tmux_config=$(cat $HOME/.tmux.conf)"
$tmux_config
# シェルのプロンプトをさかのぼる
bind @ copy-mode \; send-keys ? C-u "$(whoami)"@ C-m n
"
tmux -2 -f <(echo "$tmux_config") new-session
fi
}
# perl {{{1
# ==============================================================================
# perlモジュールの一覧表示。@INCから.(カレントディレクトリ)は取り除く
alias pl="find `perl -e 'print "@INC"' | sed -e 's/ .$//'` -type f -name \"*.pm\""
# gisty {{{1
# ==============================================================================
export GISTY_DIR="$HOME/gists"
export GISTY_SSL_VERIFY="NONE"
# pyenv {{{1
# ==============================================================================
# virtualenvと相性が悪い
# if which pyenv 1>/dev/null 2>&1; then
# export PYENV_ROOT="$HOME/.pyenv"
# pathmunge "$PYENV_ROOT/bin"
# eval "$(pyenv init -)"
# fi
# mac {{{1
# ==============================================================================
if [ "$os" = mac ]; then
function launchctl-reload()
{
launchctl unload $1
launchctl load $1
}
fi
# .zashrc.cygwin {{{1
# ============================================================================
if [[ `uname` = CYGWIN* && -f ~/.zashrc.cygwin ]]; then
. ~/.zashrc.cygwin
fi
# .zashrc.local {{{1
# ============================================================================
if [ -f ~/.zashrc.local ]; then
. ~/.zashrc.local
fi
if [ $SHLVL -eq 1 ]; then # tmux, screenを起動していないとき
# 回線が切られた時にlogoutする。.bash_logout等を実行するための設定
trap logout HUP
fi
# vim:ft=sh
| true
|
3cfb6c6bbb16dd69feaafcc5da9829b2785b38f4
|
Shell
|
amustafa/dotfiles
|
/neovim/install.sh
|
UTF-8
| 976
| 2.8125
| 3
|
[] |
no_license
|
#if [ "`uname -s`" = Darwin ]; then
# brew install neovim
#else
# python download_latest_neovim.py
#fi
#pyenv shell use nvim
pip install neovim
pip install flake8
pip install jedi
pip install yapf
pip install tox
NVIM_APP_DIR=${HOME}/opt
NVIM_CONFIG_HOME=${HOME}/.config/nvim
NVIM_DATA_DIR=${HOME}/.local
if [ -e nvim.appimage ]; then
mkdir -p $NVIM_APP_DIR/nvim/bin
chmod u+x nvim.appimage
mv nvim.appimage $NVIM_APP_DIR/nvim/bin
ln -s $NVIM_APP_DIR/nvim/bin/nvim.appimage $HOME/bin/nvim
# else USED WHEN DOWNLOADING
# tar xzvf nvim-macos.tar.gz
# rm nvim-macos.tar.gz
# mv nvim-osx64 $NVIM_APP_DIR/nvim
# ln -s $NVIM_APP_DIR/nvim/bin/nvim $HOME/bin
fi
# Install vim.plug
curl -fLo ~/.local/share/nvim/site/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
mkdir -p $NVIM_CONFIG_HOME
ln -s `pwd`/vimrcs $NVIM_CONFIG_HOME/vimrcs
ln -s `pwd`/init.vim $NVIM_CONFIG_HOME/init.vim
| true
|
9bf39269104d586acd30aaab4c6547a59c3ddc2a
|
Shell
|
masarakki/dotfiles
|
/install/ruby
|
UTF-8
| 211
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/sh
ruby_version=`rbenv install -l | grep -E "^[ 0-9\.]+$" | sort -V | tail -n 1`
if [ -z "`rbenv versions | grep $ruby_version`" ]; then
rbenv install $ruby_version
rbenv global $ruby_version
fi
| true
|
f179a0df9a4c4044cd1623e971a9f96b23a2eee5
|
Shell
|
ReedRichard/k8s-rabbit-pod-autoscaler
|
/autoscale.sh
|
UTF-8
| 3,305
| 4.09375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
namespace=""
deployment=""
getCurrentPods() {
current=$(kubectl -n $namespace describe deploy $deployment | \
grep desired | awk '{print $2}' | head -n1)
if [[ $current != "" ]]; then
echo $current
else
# If kube api request fails, retry after 3 seconds
sleep 3
current=$(kubectl -n $namespace describe deploy $deployment | \
grep desired | awk '{print $2}' | head -n1)
if [[ $current != "" ]]; then
echo $current
else
echo ""
fi
fi
}
notifySlack() {
if [ -z "$SLACK_HOOK" ]; then
return 0
fi
curl -s --retry 3 --retry-delay 3 -X POST --data-urlencode 'payload={"text": "'"$1"'"}' $SLACK_HOOK > /dev/null
}
autoscalingNoWS=$(echo "$AUTOSCALING" | tr -d "[:space:]")
IFS=';' read -ra autoscalingArr <<< "$autoscalingNoWS"
while true; do
for autoscaler in "${autoscalingArr[@]}"; do
IFS='|' read minPods maxPods mesgPerPod namespace deployment queueName <<< "$autoscaler"
queueMessagesJson=$(curl -s -S --retry 3 --retry-delay 3 -u $RABBIT_USER:$RABBIT_PASS \
$RABBIT_HOST:15672/api/queues/%2f/$queueName)
if [[ $? -eq 0 ]]; then
queueMessages=$(echo $queueMessagesJson | jq '.messages')
requiredPods=$(echo "$queueMessages/$mesgPerPod" | bc 2> /dev/null)
if [[ $requiredPods != "" ]]; then
currentPods=$(getCurrentPods)
if [[ $currentPods != "" ]]; then
if [[ $requiredPods -ne $currentPods ]]; then
desiredPods=""
# Flag used to prevent scaling down or up if currentPods are already min or max respectively.
scale=0
if [[ $requiredPods -le $minPods ]]; then
desiredPods=$minPods
# If currentPods are already at min, do not scale down
if [[ $currentPods -eq $minPods ]]; then
scale=1
fi
elif [[ $requiredPods -ge $maxPods ]]; then
desiredPods=$maxPods
# If currentPods are already at max, do not scale up
if [[ $currentPods -eq $maxPods ]]; then
scale=1
fi
else
desiredPods=$requiredPods
fi
if [[ $scale -eq 0 ]]; then
kubectl scale -n $namespace --replicas=$desiredPods deployment/$deployment 1> /dev/null
if [[ $? -eq 0 ]]; then
echo "Scaled $deployment to $desiredPods pods ($queueMessages msg in RabbitMQ)"
notifySlack "Scaled $deployment to $desiredPods pods ($queueMessages msg in RabbitMQ)"
else
echo "Failed to scale $deployment pods."
notifySlack "Failed to scale $deployment pods."
fi
fi
fi
else
echo "Failed to get current pods number for $deployment."
notifySlack "Failed to get current pods number for $deployment."
fi
else
echo "Failed to calculate required pods for $deployment."
notifySlack "Failed to calculate required pods for $deployment."
fi
else
echo "Failed to get queue messages from $RABBIT_HOST for $deployment."
notifySlack "Failed to get queue messages from $RABBIT_HOST for $deployment."
fi
sleep 3
done
sleep $INTERVAL
done
| true
|
801eacc8f2409c0441ff99978437e4dc2f28225d
|
Shell
|
SenseGrow/qca9377a
|
/WLAN-AIO/rootfs/wlan-load.sh
|
UTF-8
| 3,638
| 3.25
| 3
|
[] |
no_license
|
#!/bin/sh
#
# WFA-start-usb-single.sh : Start script for Wi-Fi Direct Testing.
#
#
# Parameters
#
TOPDIR=`pwd`
MODULE_PATH=${TOPDIR}/lib/modules
WPA_SUPPLICANT=${TOPDIR}/sbin/wpa_supplicant
WPA_CLI=${TOPDIR}/sbin/wpa_cli
SIGMA_DUT=${TOPDIR}/sbin/sigma_dut
IW=${TOPDIR}/sbin/iw
WFA_SCRIPTS_PATH=${TOPDIR}/home/atheros/Atheros-P2P/scripts
P2P_ACT_FILE=${WFA_SCRIPTS_PATH}/p2p-action.sh
P2P_DEV_CONF=${WFA_SCRIPTS_PATH}/p2pdev_dual.conf
WLAN_ACT_FILE=${WFA_SCRIPTS_PATH}/wlan-action.sh
WLAN_DEV_CONF=${WFA_SCRIPTS_PATH}/empty.conf
WPA_SUPPLICANT_ENTROPY_FILE=${WFA_SCRIPTS_PATH}/entropy.dat
ETHDEV=eth0
WLANPHY=
WLANDEV=
P2PDEV=p2p0
#
# some sanity checking
#
USER=`whoami`
if [ $USER != "root" ]; then
echo You must be 'root' to run the command
exit 1
fi
#
# detect the device existence
# Right now we assume this notebook has only one mmc bus
DEVICE_USB=`lsusb | grep "0cf3:9378"`
DEVICE_PCI=`lspci | grep "Atheros Communications Inc. Device 003e (rev 30)"`
DEVICE_PCI1=`lspci | grep "Qualcomm Atheros Device 003e (rev 30)"`
DEVICE_SDIO=`dmesg | grep "SDIO"`
if [ "$DEVICE_PCI" = "" -a "$DEVICE_PCI1" = "" -a "$DEVICE_USB" = "" -a "$DEVICE_SDIO" = "" ]; then
echo You must insert device before running the command
exit 2
fi
# disable rfkill
rfkill unblock all
#
# install driver
#
echo "=============Install Driver..."
insmod $MODULE_PATH/compat.ko
#insmod $MODULE_PATH/compat_firmware_class.ko 2> /dev/null
insmod $MODULE_PATH/cfg80211.ko
insmod $MODULE_PATH/wlan.ko
sleep 3
#
# detect the network device
#
if [ "$WLANDEV" = "" -a -e /sys/class/net ]; then
for dev in `ls /sys/class/net/`; do
if [ -e /sys/class/net/$dev/device/idProduct ]; then
USB_PID=`cat /sys/class/net/$dev/device/idProduct`
if [ "$USB_PID" = "9378" ]; then
WLANDEV=$dev
fi
fi
if [ -e /sys/class/net/$dev/device/device ]; then
PCI_DID=`cat /sys/class/net/$dev/device/device`
if [ "$PCI_DID" = "0x003e" ]; then
WLANDEV=$dev
fi
fi
if [ -e /sys/class/net/$dev/device/device ]; then
SDIO_DID=`cat /sys/class/net/$dev/device/device`
if [ "$SDIO_DID" = "0x0509" ] || [ "$SDIO_DID" = "0x0504" ]; then
WLANDEV=$dev
fi
fi
if [ -e /sys/class/net/$dev/phy80211/name ]; then
WLANPHY=`cat /sys/class/net/$dev/phy80211/name`
fi
done
if [ "$WLANDEV" = "" ]; then
echo Fail to detect wlan device
exit 3
fi
fi
if [ "$WLANDEV" = "" ]; then
WLANDEV=wlan0
WLANPHY=phy0
fi
#${IW} dev ${WLANDEV} interface add ${P2PDEV} type managed
sleep 1
#iwconfig $WLANDEV power off
#iwconfig $P2PDEV power off
#
# wlan device detected and configure ethernet
#
echo WLAN_DEV:$WLANDEV
echo P2P_DEV:$P2PDEV
echo ETH_device: $ETHDEV
#ifconfig $ETHDEV 192.168.250.40
#
# Start wpa_supplicant
#
echo "=============Start wpa_supplicant..."
echo "Start Command : ${WPA_SUPPLICANT} -Dnl80211 -i ${P2PDEV} -c ${P2P_DEV_CONF} -N -Dnl80211 -i ${WLANDEV} -c ${WLAN_DEV_CONF} -e ${WPA_SUPPLICANT_ENTROPY_FILE}"
${WPA_SUPPLICANT} -Dnl80211 -i ${P2PDEV} -c ${P2P_DEV_CONF} -N -Dnl80211 -i ${WLANDEV} -c ${WLAN_DEV_CONF} -e ${WPA_SUPPLICANT_ENTROPY_FILE} &
sleep 1
#
# Other configuraiton
#
echo "Setting System Configuration........."
tcp_userconfig=`grep -c tcp_use_userconfig /etc/sysctl.conf`
echo "Setting System Configuration........."
tcp_userconfig=`grep -c tcp_use_userconfig /etc/sysctl.conf`
if [ $tcp_userconfig -eq 0 ]
then
sudo echo "net.ipv4.tcp_use_userconfig = 1" >> /etc/sysctl.conf
sudo echo "net.ipv4.tcp_delack_seg = 10" >> /etc/sysctl.conf
fi
sudo echo 1 > /proc/sys/net/ipv4/tcp_use_userconfig
sudo echo 10 > /proc/sys/net/ipv4/tcp_delack_seg
echo "=============Done!"
| true
|
65ba7557c108ecd5e38e2f682ce85715b4389544
|
Shell
|
matthieudelaro/docker-nginx-akeneo
|
/start.sh
|
UTF-8
| 1,060
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
MACHINE_NAME=
source .env
DOCKER_EXEC="docker exec akeneo_pim_app "
function print_msg()
{
printf "$1\n"
}
# This is a hack around mounting Linux volumes on docker-machines properly
# @see https://github.com/docker/machine/issues/3234#issuecomment-202596213
if [ -n "${MACHINE_NAME}" ] && [ "`uname`" == 'Linux' ]; then
./prep_machine.sh $MACHINE_NAME
fi
## ------------------------------------
## Bring up the infrastructure
## ---------------------------------------
print_msg "====== Staring Akeneo Service ..."
if [ -z "${MACHINE_NAME}" ]; then
# Ensure we are using the right docker connections
eval $(docker-machine env "${MACHINE_NAME}")
fi
docker-compose up -d || exit 22
echo "${MACHINE_NAME}"
if [ -z "${MACHINE_NAME}" ]; then
WEB_APP_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' akeneo_pim_app)
else
WEB_APP_IP=$(docker-machine ip "${MACHINE_NAME}")
fi
print_msg "Web service listening at http://${WEB_APP_IP}/"
print_msg "Done"
| true
|
345df411f440fa13539be4c03337dd721bf8ca66
|
Shell
|
iforgotband/kubler
|
/dock/kubler/images/busybox/build.sh
|
UTF-8
| 847
| 3
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#
# Kubler phase 1 config, pick installed packages and/or customize the build
#
_packages="sys-apps/busybox"
#
# This hook is called just before starting the build of the root fs
#
configure_rootfs_build()
{
update_use 'sys-apps/busybox' '+make-symlinks +static'
}
#
# This hook is called just before packaging the root fs tar ball, ideal for any post-install tasks, clean up, etc
#
finish_rootfs_build()
{
# log dir, root home dir
mkdir -p "${_EMERGE_ROOT}"/var/log "${_EMERGE_ROOT}"/root
# busybox crond setup
mkdir -p "${_EMERGE_ROOT}"/var/spool/cron/crontabs
chmod 0600 "${_EMERGE_ROOT}"/var/spool/cron/crontabs
# kick openrc init stuff
rm -rf "${_EMERGE_ROOT}"/etc/init.d/
# eselect now uses a hard coded readlink path :/
ln -sr "${_EMERGE_ROOT}"/bin/readlink "${_EMERGE_ROOT}"/usr/bin/readlink
}
| true
|
b1d664af306cf4b564ff946e7596e882a4376631
|
Shell
|
michaelwan/GitDeploy
|
/script/dev/release.sh
|
UTF-8
| 1,003
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/sh
DATE=`date +%d%m%Y`
export DATE
cd ${GIT_SERVER_REPOSITORY}
#stash any local changes
git stash
#check if there is any local changes
STASH_LIST="$(git stash list)"
#this if will output a error msg when STASH_LIST is not empty, but the operation is still correct.
if [ -z ${STASH_LIST:-""} ]
then
#pull changes from repo
git pull $GIT_REMOTE_NAME $GIT_REMOTE_BRANCH
exit 0
else
#any local changes should be reported.
git stash show -p > DIRTY_CHANGE_${DATE}_RELEASE.patch
git stash clear
#output error messages
exec 1>&2
echo "Dirty change found and put in DIRTY_CHANGE_${DATE}_RELEASE.patch"
echo "release continued, please refer to DIRTY_CHANGE_${DATE}_RELEASE.patch to retify"
#pull in changes from central.
git pull $GIT_REMOTE_NAME $GIT_REMOTE_BRANCH
mail -s "DIRTY_CHANGE_${DATE}_RELEASE.patch file exists in ${GIT_SERVER_REPOSITORY}" ${ALERT_EMAIL} < ${GIT_SERVER_REPOSITORY}/DIRTY_CHANGE_${DATE}_RELEASE.patch
exit 1
fi
| true
|
c16d529b87f3e53b371ffe5ebb3894b04aa352b1
|
Shell
|
ubuntu1213/shell
|
/12/test13.sh
|
UTF-8
| 506
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
# Check if either a directory or file exists
#
item_name=$HOME/sentinel
echo
echo "The item being checked: $item_name"
echo
if [ -e $item_name ]
then #Item does exist
echo "The item, $item_name, does exist."
echo "But is it a file?"
echo
if [ -f $item_name ]
then #Item is a file
echo "Yes, $item_name is a file."
else #Item is not a file
echo "No, $item_name is not a file."
fi
else #Item does not exist
echo "The item, $item_name, does not exist."
echo "Nothing to update"
fi
| true
|
9c858264576c6ef237b689b7e2ac64cfeaf3812c
|
Shell
|
krishnawattamwar/botler_server
|
/uat/monitoring/serverDown.sh
|
UTF-8
| 530
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/sh
SERVERIP_PROD="Botler Prod"
NOTIFYEMAIL="itsupport@graymatrix.com"
#NOTIFYEMAIL1="krishna.wattamwar@graymatrix.com"
COUNT_PROD=$(curl http://10.198.0.4 -k -s -f -o /dev/null --connect-timeout 30 && echo "SUCCESS" || echo "FAIL")
MSG=""
if [ $COUNT_PROD != "SUCCESS" ];
then
MSG="${MSG}$SERVERIP_PROD Server is down on $(date +" %d/%m/%Y at %R")\n"
else
echo "SERVERIP_PROD is working" > /dev/null 2>&1
fi
if [ "${MSG}" != "" ];
then
echo "Hello Team, \n\n${MSG} \n\nRegards, \nTeam ITSupport" | mail -s "Botler Prod server is down" $NOTIFYEMAIL
fi
| true
|
ae8809ee42ae5dd9522204d6383624dfe9667dda
|
Shell
|
fengchuiguo1994/sprite-pipeline
|
/scripts/HiCorrector_1.2/example/run_export_norm_data.sh
|
UTF-8
| 1,376
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
checkMakeDirectory(){
echo -e "checking directory: $1"
if [ ! -e "$1" ]; then
echo -e "\tmakedir $1"
mkdir -p "$1"
fi
}
# export_norm_data <input raw matrix file> <#rows/columns> <has header line in input file?> <has header column input file?> <memory size (MB)> <input bias vector file> <fixed row sum after normalization> <output normalized matrix file>
cmd="$PWD/../bin/export_norm_data"
output_dir="$PWD/output_ic" # output directory. You may modify this output directory
# input parameters
total_mem="1" # memory used for loading data (in MegaBytes)
total_rows=1000 # total number of rows in the input contact matrix
input_mat_file="$PWD/contact.matrix" # input contact matrix file, each line is a row, numbers are separated by TAB char
has_header_line=0 # input file doesn't have header line
has_header_column=0 # input file doesn't have header column
bias_factor_file="$output_dir/contact.matrix.bias" # input file consists of a vector of bias factors
row_sum_after_norm=10
output_file="$output_dir/contact.matrix.norm" # output file consists of a vector of bias factors
# run the command
echo "$cmd $input_mat_file $total_rows $has_header_line $has_header_column $total_mem $bias_factor_file $row_sum_after_norm $output_file"
$cmd $input_mat_file $total_rows $has_header_line $has_header_column $total_mem $bias_factor_file $row_sum_after_norm $output_file
| true
|
4c2af894df26483d376cf76dc6425b9eb772ba53
|
Shell
|
druidfi/docker-images
|
/misc/s3-sync/entrypoint.sh
|
UTF-8
| 628
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/sh
# shellcheck disable=SC2086
DATA_PATH="/data/"
: "${ACCESS_KEY:?"ACCESS_KEY env variable is required"}"
: "${SECRET_KEY:?"SECRET_KEY env variable is required"}"
REGION=${REGION:-eu-central-1}
S3CMD_BIN="/usr/bin/s3cmd --region=$REGION"
S3CMD_BIN="$S3CMD_BIN --access_key=$ACCESS_KEY"
S3CMD_BIN="$S3CMD_BIN --secret_key=$SECRET_KEY"
if [ "$1" = 'conf' ]; then
echo -e "\n\nsc3cmd:" "$S3CMD_BIN" "\n\n"
cat /root/.s3cfg
exit 0
fi
: "${S3_PATH:?"S3_PATH env variable is required"}"
echo "Job started: $(date)"
$S3CMD_BIN sync --recursive --no-preserve $DATA_PATH $S3_PATH
echo "Job finished: $(date)"
| true
|
6d19ce188b80aee74819c926cab8082a363de224
|
Shell
|
hermish/toronto-data
|
/io/import.sh
|
UTF-8
| 191
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
# Downloads and unzips the data, address found in the source file
location=$(sed -n '1p' < literals/source.txt)
curl "$location" -o data/dinesafe.zip
unzip data/dinesafe -d data
| true
|
d1f2ef168395153449b30622fa3345b070ba0263
|
Shell
|
kryptonat/settings-and-dot-files
|
/setup.sh
|
UTF-8
| 243
| 2.640625
| 3
|
[] |
no_license
|
#/bin/bash
set -x
dotfiles=$(cd $(dirname $0) && pwd)
ln -s $dotfiles/bashrc ~/.bashrc || true
ln -s $dotfiles/bash_profile ~/.bash_profile || true
ln -s $dotfiles/bash_aliases ~/.bash_aliases || true
| true
|
2de6f3172be122fa835fa7afdeedf4784529b7b1
|
Shell
|
Pankaj-Ra/ECEP_C-CPP
|
/ECEP/LinuxSystem/Templates/Function/function_3.sh
|
UTF-8
| 247
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
clear
function check()
{
if [ -e "./$1" ]
then
echo "Succeeded"
return 0
else
echo "Failed"
return 1
fi
}
echo -n "Enter the name of a file: "
read X
if check $X
then
echo $X exists!
else
echo $X does not exists!
fi
| true
|
a26d619ae67b5eb3e2bbf6b11c3defe60db47555
|
Shell
|
ziyiliunian/shell-test
|
/40-md5校验文件.sh
|
UTF-8
| 202
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
#仅仅是循环得出md5码···==
#by dr
read -p "请输入文件名或者目录名" file_dr
for i in $(ls $file_dr)
do
md5sum "$i" >> ./md5.txt
done
echo "文件md5存为md5.txt"
| true
|
fad3c360a9ded765539c2432f54338895d5f4cf2
|
Shell
|
ciena-blueplanet/ember-prop-types
|
/.travis/maybe-publish-gh-pages.sh
|
UTF-8
| 794
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
source $(npm root -g)/pr-bumper/.travis/is-bump-commit.sh
if isBumpCommit
then
echo "Skipping pr-bumper gh-pages publish step for version bump commit"
exit 0
fi
VERSION=`node -e "console.log(require('./package.json').version)"`
TMP_GH_PAGES_DIR=.gh-pages-demo
# We only want to deploy to gh-pages from "master"
if [ "${TRAVIS_BRANCH}" != "master" ]
then
echo "Skipping pr-bumper gh-pages publish step for branch [${TRAVIS_BRANCH}]"
exit 0
fi
ember build --prod
git clone https://${GITHUB_TOKEN}@github.com/${TRAVIS_REPO_SLUG} ${TMP_GH_PAGES_DIR} > /dev/null 2>&1
cd ${TMP_GH_PAGES_DIR}
git checkout gh-pages
git rm -rf *
cp -r ../dist/* .
git add --all
git commit -m "[pr-bumper] Automated gh-pages commit of [${VERSION}]"
git push origin gh-pages > /dev/null 2>&1
| true
|
7e9e0ef30f95bd54d12d57ad00ad1148937ad2b2
|
Shell
|
langmead-lab/reference_flow-experiments
|
/scripts/bash_exp/mass_buildfq_from_vcf.sh
|
UTF-8
| 1,161
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
STORAGE=/net/langmead-bigmem-ib.bluecrab.cluster/storage
SCRATCH=/home-1/bsolomo9\@jhu.edu/scratch/bsolomo9/genome_relaxation/
#RELAX=/scratch/groups/blangme2/naechyun/relaxing/
RELAX=/home-1/bsolomo9\@jhu.edu/genome_relaxation/
DATE=$(date +"%m-%d-%Y")
RANDSIZE=100
RANDSET=${SCRATCH}/1000G_R${RANDSIZE}_${DATE}.txt
RANDSTORAGE=${STORAGE}/bsolomo9/1000G_R${RANDSIZE}_${DATE}
mkdir -p $RANDSTORAGE
# Run script to select a random set
touch $RANDSET
python getRand.py $SCRATCH/phase3_names.txt ${RANDSIZE} > $RANDSET
# Run script to generate updated reference genome with VCF and produce -hapA, -hapB, and .var files prefixed by out-prefix
while read -r NAME
do
python $RELAX/scripts/update_genome.py --ref $STORAGE/indexes/hs37d5.fa --vcf $STORAGE/naechyun/1000Genomes/ALL.chr9.phase3_shapeit2_mvncall_integrated_v5a.20130502.genotypes.vcf --chrom 9 --out-prefix ${RANDSTORAGE}/$NAME --name $NAME --include-indels
done < $RANDSET
#/scratch/groups/blangme2/naechyun/software/mason-0.1.2-Linux-x86_64/bin/mason \
# illumina $RELAX/na12878/indels/na12878-chr9-indel-hapB.fa \
# -N 1000000 -sq -n 100 -hs 0 -hi 0 -hn 1 -mp \
# -o na12878-chr9-phase3_hapB_indel-1M.fq
| true
|
4416587d169186443aa7a6d4fa62aaf2215c7b12
|
Shell
|
abhidarbey/hyperledger-brooklyn-sawtooth
|
/scripts/accounts.sh
|
UTF-8
| 1,596
| 3.53125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2018 by Blockchain Technology Partners Limited
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#set -x # DEBUG
##
# Create Seth accounts using the Sawtooth Seth CLI command.
#
# Usage: accounts.sh container-name [quantity]
# Environment:
# PREFIX - The string prefix to identify the accounts
##
CONTAINER_NAME=$1
TOTAL=${2:-16}
PREFIX=${PREFIX:-test}
n=0
while [ $n -lt ${TOTAL} ] ; do
alias=$(printf "%s-%03d" ${PREFIX} $n)
docker exec --workdir /data ${CONTAINER_NAME} \
bash -c "openssl ecparam -genkey -name secp256k1 | openssl ec -out ${alias}.pem" > /dev/null
docker exec --workdir /data ${CONTAINER_NAME} \
seth account import ${alias}.pem ${alias} > /dev/null
docker exec --workdir /data ${CONTAINER_NAME} \
seth account create --nonce=0 --wait ${alias}
n=$((n + 1))
done
| true
|
d16b770f14eed4267005983a106f9fc4a19cdd92
|
Shell
|
anjingbin/starccm
|
/postgresql/src/bin/pgaccess/pgaccess.sh
|
UTF-8
| 239
| 2.65625
| 3
|
[
"PostgreSQL",
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
PATH_TO_WISH='@WISH@'
PGACCESS_HOME='@PGACCESSHOME@'
PGLIB='@PGLIB@'
PGPORT="${PGPORT:-@DEF_PGPORT@}"
export PATH_TO_WISH
export PGACCESS_HOME
export PGLIB
export PGPORT
exec "${PATH_TO_WISH}" "${PGACCESS_HOME}/main.tcl" "$@"
| true
|
07d519b19edc60970a474a5d014044c407bf2c57
|
Shell
|
dannil76/krillo
|
/bin/get_token.sh
|
UTF-8
| 298
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
TOKEN=$(curl -s -X POST \
-H "Content-Type: application/json" \
-H "Accept: application/json" \
-d '{"username": "demo", "password": "demo1234!"}' \
http://krillo22.caupo.se/rest/V1/integration/admin/token)
# Get rid of quotes
echo $TOKEN | sed -e "s/^\"//" -e "s/\"$//"
exit 0
| true
|
8c97e5f4f6c9822c7fceaee4a5e14a7b39b76f62
|
Shell
|
rowanpang/rtl8192su
|
/fsIns.sh
|
UTF-8
| 481
| 2.734375
| 3
|
[] |
no_license
|
#!/usr/bin/bash
MODDIR="/lib/modules/$(uname -r)/kernel"
drvWIFI="$MODDIR/drivers/net/wireless"
rtl="$drvWIFI/realtek/rtlwifi"
rtl8192su="$rtl/rtl8192su"
KMOD_SRC="./rtlwifi"
mkdir -p $rtl8192su
bak=".bak.`date +%Y%m%d-%H%M%S`"
install -v --suffix=$bak $KMOD_SRC/rtlwifi.ko $rtl
install -v --suffix=$bak $KMOD_SRC/rtl_usb.ko $rtl
install -v --suffix=$bak $KMOD_SRC/rtl8192s/rtl8192s-common.ko $rtl8192su
install -v --suffix=$bak $KMOD_SRC/rtl8192su/rtl8192su.ko $rtl8192su
| true
|
0a444c9d19703e682ff000999cb593e75fcbe2a5
|
Shell
|
Mik317/Openframe-Image
|
/install.sh
|
UTF-8
| 2,182
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Be VERY Careful. This script may be executed with admin privileges.
echo "Installing glslViewer..."
if ! [ -z "$TRAVIS" ]; then
echo "TRAVIS env, don't install"
exit 0
fi
os=$(uname)
arq=$(uname -m)
# does glslViewer already exist?
if hash glslViewer 2>/dev/null; then
echo "glslViewer already installed."
exit 0
fi
if [ $os == "Linux" ]; then
# on Linux distributions
# sudo apt-get update
# do we want to upgrade? this could take a damn long time.
# sudo apt-get upgrade
if [ $arq == "armv7l" ]; then
# on RaspberryPi 2/3/4
rev=$(cat /proc/cpuinfo | grep 'Revision' | awk '{print $3}' | sed 's/^1000//')
declare -A pi4
pi4[a03111]=1
pi4[b03111]=1
pi4[c03111]=1
if [ ${pi4[$rev]} == 1 ]; then
sudo apt-get install libegl1-mesa-dev libgbm-dev libgles2-mesa-dev
git clone --depth=1 --branch=master https://github.com/patriciogonzalezvivo/glslViewer glslViewer
cd glslViewer
make
sudo make install
else
sudo apt-get install glslviewer
fi
elif [ $arq == "armv6l" ]; then
# on RaspberryPi A/B
sudo apt-get install glslviewer
else
sudo apt-get install git-core cmake xorg-dev libglu1-mesa-dev
git clone https://github.com/glfw/glfw.git
cd glfw
cmake .
make
sudo make install
cd ..
git clone --depth=1 --branch=master https://github.com/patriciogonzalezvivo/glslViewer glslViewer
cd glslViewer
make
sudo make install
fi
elif [ $os == "Darwin" ]; then
# ON MacOX
echo "osx"
if [ ! -e /usr/local/bin/brew ]; then
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
brew update
# do we really want to upgrade? this could take a damn long time.
brew upgrade
brew tap homebrew/versions
brew install glfw3 pkg-config
git clone --depth=1 --branch=master https://github.com/patriciogonzalezvivo/glslViewer glslViewer
rm -rf !$/.git
cd glslViewer
make
make install
fi
| true
|
1c9aecd4b66cf14e3a815a25057dec851f551426
|
Shell
|
petronny/aur3-mirror
|
/sdlmess-svn/PKGBUILD
|
UTF-8
| 2,354
| 2.8125
| 3
|
[] |
no_license
|
# Contributor: Anton Shestakov <engored*ya.ru>
# Maintainer: Anton Shestakov <engored*ya.ru>
pkgname=sdlmess-svn
pkgver=20688
pkgrel=1
pkgdesc='A port of the popular Multiple Emulator Super System using SDL, SVN version.'
url='http://www.mess.org/'
license=('custom:MAME License')
arch=('i686' 'x86_64')
depends=('sdl>=1.2.11' 'sdl_ttf' 'libxinerama' 'gconf' 'zlib' 'expat')
makedepends=('svn' 'python2')
[ "$CARCH" = 'i686' ] && makedepends=('svn' 'nasm')
conflicts=('sdlmess')
provides=('sdlmess')
source=(sdlmess.sh)
md5sums=('141069e7019da5e33414dc8d4c421150')
install=sdlmess.install
_svntrunk=svn://dspnet.fr/mame/trunk/
_svnmod=mame
build() {
cd "$srcdir"
if [ -d "$_svnmod/.svn" ]; then
(cd "$_svnmod" && svn up -r "$pkgver")
else
svn co "$_svntrunk" --config-dir ./ -r "$pkgver" "$_svnmod"
fi
msg 'SVN checkout done or server timeout'
msg 'Starting make...'
rm -rf "$srcdir/$_svnmod-build"
cp -r "$srcdir/$_svnmod" "$srcdir/$_svnmod-build"
cd "$srcdir/$_svnmod-build"
# These changes allow GCC 4.2+ to compile SDLMESS
sed -e 's|CCOMFLAGS += -Werror|CCOMFLAGS += |' \
-e 's|-Wno-unused-functions|-Wno-unused|' \
-i makefile
# Adjusting make options according to target architecture
if [ "$CARCH" == 'x86_64' ]; then
echo 'Compiling for AMD64...'
make TARGET=mess AMD64=1 PTR64=1 SUFFIX64='' BUILD_ZLIB=0 BUILD_EXPAT=0 PYTHON=python2 ARCHOPTS="$CFLAGS"
elif [ "$CARCH" == 'i686' ]; then
echo 'Compiling for i686...'
make TARGET=mess I686=1 BUILD_ZLIB=0 BUILD_EXPAT=0 ARCHOPTS="$CFLAGS"
else
echo 'Compiling for i386...'
make TARGET=mess PM=1 BUILD_ZLIB=0 BUILD_EXPAT=0 ARCHOPTS="$CFLAGS"
fi
}
package() {
cd "$srcdir/$_svnmod-build"
# Installing the wrapper script
install -Dm755 "$srcdir/sdlmess.sh" "$pkgdir/usr/bin/sdlmess"
# Installing binaries
install -Dm755 mess "$pkgdir/usr/share/sdlmess/sdlmess"
# Installing extra bits
install -d "$pkgdir/usr/share/sdlmess/artwork"
install -m644 artwork/* "$pkgdir/usr/share/sdlmess/artwork/"
install -d "$pkgdir/usr/share/sdlmess/hash"
install -m644 hash/* "$pkgdir/usr/share/sdlmess/hash/"
install -d "$pkgdir/usr/share/sdlmess/keymaps"
install -m644 keymaps/* "$pkgdir/usr/share/sdlmess/keymaps/"
# The license
install -Dm644 docs/license.txt "$pkgdir/usr/share/licenses/custom/sdlmess/license.txt"
}
| true
|
93ce607805025d582e30af845310f866e180c990
|
Shell
|
enmata/cmpsite
|
/testing/testing_wrapper-remote-tls.sh
|
UTF-8
| 537
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "--Creating virtual environment--"
cd testing
virtualenv -q testing_venv
source testing_venv/bin/activate
echo "--Installing dependencies--"
pip3 install -q -r requirements_testing_wrapper_remote.txt
echo "--Setting execution environment--"
export DJANGO_BASE_URL="https://192.168.99.107:443/secure"
#Disabling TLS certificate validation due a self-signed certificate its used
export CURL_CA_BUNDLE=""
echo "--Running tests--"
python3 testing_requests.py
echo "--Cleaning up--"
deactivate
rm -rf testing_venv
cd ..
| true
|
8ba6ff08907e74ed8b26d78fa1734a8cef0aef27
|
Shell
|
Panagiotis-INS/Mini_Projects
|
/Rick Script/Rick.sh
|
UTF-8
| 169
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
##
#code version
enc=cGFkZGluZ3BhZGRpbmdwYWRkaW5nIGh0dHBzOi8vd3d3LnlvdXR1YmUuY29tL3dhdGNoP3Y9ZFF3NHc5V2dYY1EgcGFkZGlucGFkZGluZ3BhZGRpbmcK
xdg-open `echo $enc |base64 -d |cut -d " " -f 2`;
exit 0;
#
#command version
xdg-open `echo "cGFkZGluZ3BhZGRpbmdwYWRkaW5nIGh0dHBzOi8vd3d3LnlvdXR1YmUuY29tL3dhdGNoP3Y9ZFF3NHc5V2dYY1EgcGFkZGlucGFkZGluZ3BhZGRpbmcK" |base64 -d | cut -d " " -f 2`
| true
|
427306f86873a2f308b5b89f076edd65bda548ff
|
Shell
|
LeDarkSide/packages
|
/cloud/script.bash
|
UTF-8
| 6,821
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# ----------------------------
# ------- data.bash ---------
# ---- autor : Leo Sudreau ---
# ---- version : 0.8 ---------
# ----------------------------
#
# Le scripts à pour but de synchroniser différents comptes en lignes, permettant d'avoir tout ces dossier en ligne au lancement de l'environement de travail
# Pour ceci le script comprendra plusieurs fonctions :
#
# - Une fonction qui definira le dossier ou les différents dossiers seronts installés
# - Une fonction qui synchronisera, dans le cas ou les dossiers sont deja installé
# - différentes fonctions pour les différents services de stockages en lignes
#
# Le scripts sera en communiquation avec le project DarkSide, mais sera aussi si l'utilisateur le veut totalement utilisable seul sous la forme de commande.
#
# A la creation de chaque dossier syncronisé, un fichier text contenant une liste de ces dossiers se mettra à jour, il permettra la mise a jour regulieres des dossiers.
# Debut Variable global
default_location=~/Data # L'emplacement par default
install_location=$default_location # L'emplacement final d'instalation des dossiers
date=$( date +%Y%m%d ) # date courante
log_location=/var/tmp/LeDarkSide
# Fin variable grobal
# ----- Function folder ------
#
# Cette fonction permet de determiner le chemin d'instalation du dossier "data"
#
# La fonction comprend un paramètre :
# location : le chemin absolue voulu, si aucun n'es saisie lors de l'instalation, un chemin par default ( ~/Data ) existe et sera utilisé.
#
# ----- Function folder ------
function folder () { #location
echo "Debut de la fonction folder "
if [ -d "$1" ] || [ -d "$default_location" ];
then
echo " dossier déjâ installé"
install_location=$default_location
else
if [ -n $1 ]
then
echo " creation dossier par default"
mkdir $default_location
install_location=$default_location
else
echo " creation dossier utilisateur"
install_location=$1
mkdir "$install_location"
fi
fi
if [ -d "$log_location" ]
then
echo " Le dossier temporaire situé au $log_location est déjà crée"
else
echo " création du dossier temporaire ..."
echo " le chemin est $log_location"
mkdir $log_location
fi
echo "Fin de la fonction folder "
}
# ----- Function github ------
#
# Cette fonction permet de clonner un dossier github.
# La fonction comprend trois paramètres :
# folder_name : le nom de dossier qui va etre crée.
# adresse : L'adresse github permettant le clonage.
# branche : La branche voulu, ce paramatre est optionnel, si aucune branche n'est écrite, la fonction clonera la branche master du git.
# Si le git est deja cloné, il sera mise à jour.
#
# ----- Function github ------
function github() { # folder_name,adresse,branche
echo "Debut de la fonction github "
if [ -d "$install_location/$1" ]
then
if check $install_location/$1
then
echo " Le git $1 existe déjà"
echo " Mise à jour du git ..."
cd $install_location/$1 | git pull
log $install_location/$1 "git update" $2
else
echo " Un dossier existe déja,cependant n'es pas un git"
fi
else
if [ -n $3 ]
then
git clone "$2" "$install_location/$1"
log $install_location/$1 git $2
else
git clone -b "$3" "$2" "$install_location"
log $install_location/$1 git $2 $3
fi
fi
echo "Fin de la fonction github "
}
# ----- Function log ------
#
# Cette fonction enrengistre tout les actions faites par le scripts
# La fonction comprend 4 paramètres
# dossier : chemin absolue du dossier
# type : type d'action en fonction du service utilisé et du type action faites, clonage ou mise à jour
# adresse : l'adresse qui à permis le clonage
# branche : Si le service utilisé comprend un systeme de branche, la branche selectionnée
#
# Chaque ligne de log est composée de la maniere suivante :
#
# [ date : date de l'action ] [ dossier : chemin absolue du dossier] [ type : type de service utilisé] [ adresse : adresse de clonage] [ branche : branche utilisé si besoin ]
#
# ----- Function log ------
function log() { # dossier , type , adresse , branche
echo "Debut de la fonction log "
if [ -n $3 ]
then
echo " [ date : $date ] [ dossier : $1 ] [ type : $2 ] [ adresse : $3 ] "
echo " [ date : $date ] [ dossier : $1 ] [ type : $2 ] [ adresse : $3 ] " >> $log_location/datalog.txt
else
echo " [ date : $date ] [ dossier : $1 ] [ type : $2 ] [ adresse : $3 ] [ branche : $4 ] "
echo " [ date : $date ] [ dossier : $1 ] [ type : $2 ] [ adresse : $3 ] [ branche : $4 ] " >> $log_location/datalog.txt
fi
echo "Fin de la fonction log "
}
# ----- Function clean ------
# Fonction provisoir pour les test
# ----- Function clean ------
function clean() {
rm $log_location/datalog.txt
rm -r $install_location
}
# ----- Function svn ------
#
# Cette fonction permet de clonner un dossier svn.
# La fonction comprend deux paramètres :
# dossier : chemin absolue du dossier
# adresse : L'adresse svn permettant le clonage.
# users : Nom de l'utilisateur voulant accédes au dossier svn
# Si le git est deja cloné, il sera mise à jour.
#
# ----- Function svn ------
function svn() {
if [ -d "$install_location/$1" ]
then
echo " Le svn $1 existe déjà"
echo " Mise à jour du svn ..."
cd $install_location/$1 | svn update
log $install_location/$1 "svn update" $2
else
svn checkout --username "$3" --password "svn!$3" "$2" "$install_location/$1"
log $install_location/$1 svn $2
fi
}
# ----- Function webserver ------
#
# Cette fonction permet de clonner le contenu d'un serveur web, il comprend les server HTTP, FTP et HTTPS
# La fonction comprend deux paramètres :
# dossier : chemin absolue du dossier
# adresse : L'adresse permettant le clonage.
# ----- Function webserver ------
#function webserver() {}
# ----- Function check ------
#
# Cette fonction verifira si le dossier trouvé et présent de le fichier log
# Si c'est le cas, il fera juste mis à jour.
# sinon, il ne fera rien
# la fonction comprend un parametre :
# Dosser : adresse absolue du dossier trouvé
# ----- Function check ------
function check() {
if [ -d $1/.git ]
then
return 0
else
return 1
fi
}
# Main
echo "Valeur des variables globals : " ;echo -e
echo "----------------------------------"
echo " default_location = $default_location"
echo " install_location = $install_location"
echo " date = $date"
echo " log_location = $log_location"
echo "----------------------------------" ;echo -e
folder
github IUT https://github.com/LinkIsACake/IUT.git
github OTHERS https://github.com/LinkIsACake/OTHERS.git
github TEST https://github.com/LinkIsACake/OTHERS.git
| true
|
2d9902f13f2d83a293208e349643b94d7ef9b774
|
Shell
|
ilibx/magenta-os
|
/kernel/lib/version/buildid.sh
|
UTF-8
| 652
| 3.234375
| 3
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2016 The Fuchsia Authors
# Copyright (c) 2015 Travis Geiselbrecht
#
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT
eval `date -u +'BYR=%Y BMON=%-m BDOM=%-d BHR=%-H BMIN=%-M'`
chr () {
printf \\$(($1/64*100+$1%64/8*10+$1%8))
}
b36 () {
if [ $1 -le 9 ]; then echo $1; else chr $((0x41 + $1 - 10)); fi
}
id=$(printf '%c%c%c%c%c\n' `chr $((0x41 + $BYR - 2011))` `b36 $BMON` `b36 $BDOM` `b36 $BHR` `b36 $(($BMIN/2))`)
if [[ $# -eq 1 ]]; then
cat > "$1" <<END
#ifndef __BUILDID_H
#define __BUILDID_H
#define ${id}
#endif
END
fi
| true
|
2a4829804b3930a07bcb0aba263d0a9e47ee4799
|
Shell
|
espnet/espnet
|
/egs2/googlei18n_lowresource/tts1/local/data.sh
|
UTF-8
| 4,007
| 3.515625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
set -u
set -o pipefail
log() {
local fname=${BASH_SOURCE[1]##*/}
echo -e "$(date '+%Y-%m-%dT%H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
}
SECONDS=0
stage=1
stop_stage=3
threshold=35
sex=both
lang=es_ar
openslr_id=61
nj=40
log "$0 $*"
. utils/parse_options.sh
if [ $# -ne 0 ]; then
log "Error: No positional arguments are required."
exit 2
fi
. ./path.sh || exit 1;
. ./cmd.sh || exit 1;
. ./db.sh || exit 1;
if [ -z "${GOOGLEI18N}" ]; then
log "Fill the value of 'GOOGLEI18N' of db.sh"
exit 1
fi
mkdir -p ${GOOGLEI18N}
db_root=${GOOGLEI18N}
if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
log "stage -1: download data from openslr"
if [[ "${sex}" == female ]]; then
local/download_and_unzip.sh "${db_root}" "https://www.openslr.org/resources/${openslr_id}/${lang}_female.zip" ${lang}_female.zip
wget -O local/line_index_female.tsv "https://www.openslr.org/resources/${openslr_id}/line_index_female.tsv"
mv local/line_index_female.tsv local/index.tsv
elif [[ "${sex}" == male ]]; then
local/download_and_unzip.sh "${db_root}" "https://www.openslr.org/resources/${openslr_id}/${lang}_male.zip" ${lang}_male.zip
wget -O local/line_index_male.tsv "https://www.openslr.org/resources/${openslr_id}/line_index_male.tsv"
mv local/line_index_male.tsv local/index.tsv
else
# local/download_and_unzip.sh "${db_root}" "https://www.openslr.org/resources/${openslr_id}/${lang}_male.zip" ${lang}_male.zip
# local/download_and_unzip.sh "${db_root}" "https://www.openslr.org/resources/${openslr_id}/${lang}_female.zip" ${lang}_female.zip
wget -O local/line_index_female.tsv "https://www.openslr.org/resources/${openslr_id}/line_index_female.tsv"
wget -O local/line_index_male.tsv "https://www.openslr.org/resources/${openslr_id}/line_index_male.tsv"
cat local/line_index_male.tsv local/line_index_female.tsv > local/index.tsv
fi
fi
if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
log "stage 1: prepare crowdsourced data"
mkdir -p data
mkdir -p data/${lang}
log "generate utt2spk"
awk -F '[_\t]' '{print $1 "_" $2 "_" $3 " " $1 "_" $2}' local/index.tsv > data/${lang}/utt2spk
log "generate text"
cp local/index.tsv data/${lang}/text
log "generate wav.scp"
awk -F "\t" -v db=${db_root} '{print $1 " " db}' local/index.tsv > data/${lang}/wav.scp
log "sorting"
sort data/${lang}/utt2spk -o data/${lang}/utt2spk
sort data/${lang}/wav.scp -o data/${lang}/wav.scp
sort data/${lang}/text -o data/${lang}/text
utils/utt2spk_to_spk2utt.pl data/${lang}/utt2spk > data/${lang}/spk2utt
utils/validate_data_dir.sh --no-feats data/${lang}
fi
if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
log "stage 2: scripts/audio/trim_silence.sh"
# shellcheck disable=SC2154
scripts/audio/trim_silence.sh \
--cmd "${train_cmd}" \
--nj "${nj}" \
--fs 44100 \
--win_length 2048 \
--shift_length 512 \
--threshold "${threshold}" \
data/${lang} data/${lang}/log
utils/fix_data_dir.sh data/${lang}
fi
if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
log "stage 3: split for development set"
utils/subset_data_dir.sh data/${lang} 500 data/dev-test-${lang}
utils/subset_data_dir.sh data/dev-test-${lang} 250 data/dev_${lang}
utils/copy_data_dir.sh data/dev-test-${lang} data/test_${lang}
utils/filter_scp.pl --exclude data/dev_${lang}/wav.scp
data/dev-test-${lang}/wav.scp > data/test_${lang}/wav.scp
utils/fix_data_dir.sh data/test_${lang}
utils/copy_data_dir.sh data/${lang} data/train_${lang}
utils/filter_scp.pl --exclude data/dev-test-${lang}/wav.scp \
data/${lang}/wav.scp > data/train_${lang}/wav.scp
utils/fix_data_dir.sh data/train_${lang}/wav.scp
fi
log "Successfully finished. [elapsed=${SECONDS}s]"
| true
|
5731afb40fb66f23749b1ab91ff5d9052b130509
|
Shell
|
ContainerDroid/gnu
|
/bin/run
|
UTF-8
| 1,631
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
device='0b0a41d90297661a'
cmdline='console=ttyHSL0,115200,n8 androidboot.hardware=hammerhead androidboot.selinux=permissive user_debug=31 msm_watchdog_v2.enable=1'
#bootimage='android-boot.img'
bootimage='recovery.img'
rootfs='alarm'
bootimage_folder=${bootimage%.img}
kernel=${bootimage_folder}/${bootimage}-zImage
ramdisk=${bootimage%.img}/${bootimage}-ramdisk.gz
ramdisk_folder=${ramdisk%.gz}
# Clean up, make sure we're starting fresh
rm -rf ${bootimage_folder} && mkdir -p ${bootimage_folder}
rm -rf gnu-initramfs.cpio.gz
# Prepare gnu-boot.img using kernel from ${bootimage} and gnu-initramfs
./bin/unpackbootimg -i ${bootimage} -o ${bootimage_folder}
./bin/unpackinitramfs ${ramdisk}
./bin/mkinitramfs gnu-initramfs
./bin/mkbootimg \
--pagesize 2048 \
--ramdiskaddr 0x2900000 \
--cmdline "${cmdline}" \
--base 0x00008000 \
--board "panda" \
--kernel ${kernel} \
--ramdisk gnu-initramfs.cpio.gz \
-o gnu-boot.img
# Start copying files over adb
./bin/adb-setup ${device}
adb -s ${device} shell mkdir -p /data/gnu/
rsync -avr --progress --stats ${rootfs} rsync://localhost:1337/data/gnu/
rsync -avr --progress --stats ${ramdisk_folder} rsync://localhost:1337/data/gnu/android/
#adb -s ${device} push alarm/init /data/gnu/
#adb -s ${device} shell rm -rf /data/gnu/android/ && mkdir -p /data/gnu/android/
#adb -s ${device} push ${ramdisk_folder} /data/gnu/android/
#adb -s ${device} shell mkdir /data/gnu/android/{dev,proc,sys,system}
#adb -s ${device} shell chmod -R 0500 /data/gnu/android/
#adb -s ${device} shell chmod -R 0755 /data/gnu/system/
#adb -s ${device} shell chown -R root /data/gnu/system
| true
|
9f4b90712fc93b05058098ec89e78289f60b6591
|
Shell
|
4lm/ClairMeta
|
/.travis/bintray.sh
|
UTF-8
| 2,030
| 3.9375
| 4
|
[] |
permissive
|
#!/bin/bash
set -x
API=https://api.bintray.com
PACKAGE_DESCRIPTOR=bintray-package.json
DEB=$1
PCK_NAME=$(dpkg-deb -f ${DEB} Package)
PCK_VERSION=$(dpkg-deb -f ${DEB} Version)+${DISTRIBUTION}
PCK_DESC=$(dpkg-deb -f ${DEB} Description)
FILE_TARGET_PATH=$(basename $DEB)
main() {
CURL="curl -u${BINTRAY_USER}:${BINTRAY_TOKEN} -H Content-Type:application/json -H Accept:application/json"
if (check_package_exists); then
echo "The package ${PCK_NAME} does not exit. It will be created"
create_package
fi
deploy_deb
}
check_package_exists() {
echo "Checking if package ${PCK_NAME} exists..."
package_exists=`[ $(${CURL} --write-out %{http_code} --silent --output /dev/null -X GET ${API}/packages/${BINTRAY_ORG}/${BINTRAY_REPO}/${PCK_NAME}) -eq 200 ]`
echo "Package ${PCK_NAME} exists? y:1/N:0 ${package_exists}"
return ${package_exists}
}
create_package() {
echo "Creating package ${PCK_NAME}..."
if [ -f "${PACKAGE_DESCRIPTOR}" ]; then
data="@${PACKAGE_DESCRIPTOR}"
else
data="{
\"name\": \"${PCK_NAME}\",
\"desc\": \"auto\",
\"desc_url\": \"auto\",
\"labels\": [\"python3\"],
\"licenses\": [\"BSD 3-Clause\"],
\"vcs_url\": \"https://github.com/Ymagis/ClairMeta\"
}"
fi
${CURL} -X POST -d "${data}" ${API}/packages/${BINTRAY_ORG}/${BINTRAY_REPO}
}
deploy_deb() {
if (upload_content); then
echo "Publishing ${DEB}..."
${CURL} -X POST ${API}/content/${BINTRAY_ORG}/${BINTRAY_REPO}/${PCK_NAME}/${PCK_VERSION}/publish -d "{ \"discard\": \"false\" }"
else
echo "[SEVERE] First you should upload your deb ${DEB}"
fi
}
upload_content() {
echo "Uploading ${DEB}..."
uploaded=`[ $(${CURL} --write-out %{http_code} --silent -T ${DEB} "${API}/content/${BINTRAY_ORG}/${BINTRAY_REPO}/${PCK_NAME}/${PCK_VERSION}/pool/main/${DISTRIBUTION}/${PCK_NAME}/${FILE_TARGET_PATH};deb_distribution=${DISTRIBUTION};deb_component=main;deb_architecture=i386,amd64") -eq 201 ]`
echo "DEB ${DEB} uploaded? y:1/N:0 ${uploaded}"
return ${uploaded}
}
main "$@"
| true
|
31c061d487a27f331675f2bc83562cb1b940715a
|
Shell
|
osvaldofonseca/client
|
/scripts/peering-config
|
UTF-8
| 1,066
| 3.296875
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
export varrun="$progdir/var"
export bird_sock="$varrun/bird.ctl"
export bird_cfgs="$progdir/configs/bird"
export bird_routes="$bird_cfgs/route-announcements"
export bird_filters="$bird_cfgs/prefix-filters"
export openvpn_cfgs="$progdir/configs/openvpn"
export mux2dev_db="$varrun/mux2dev.txt"
export prefix_db="$progdir/prefixes.txt"
export bird_kernel_table=151
load_mux2dev () {
declare -gA mux2dev
while read fmux fdev ; do
mux2dev[$fmux]=$fdev
done < $mux2dev_db
}
term () {
echo $1
exit 0
}
die () {
echo $1
exit 1
}
mkdir -p $varrun
export -f load_mux2dev
export -f term
export -f die
if [ ! -s $mux2dev_db ] ; then
echo "rebuilding $mux2dev_db"
for fn in $(ls $openvpn_cfgs/*.conf) ; do
name=$(basename $fn)
name=${name%%.conf}
echo -n "$name " >> $mux2dev_db
grep -Ee "^dev " $fn | cut -d " " -f 2 >> $mux2dev_db
done
fi
if [ ! -s $prefix_db ] ; then
echo "error: $prefix_db not found."
die "list the prefixes you will announce in $prefix_db"
fi
# vim: ft=sh
| true
|
d0174d62cc5099e6345d937dea64ab6e2ddcb549
|
Shell
|
alexec/argo-cloudops
|
/images/cdk/build.sh
|
UTF-8
| 574
| 3.8125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
cdk_version=$1
repo=$2
usage() {
echo "$0 CDK_VERSION REPO"
}
if [ -z $cdk_version ]; then
usage
exit 1
fi
if [ -z $repo ]; then
usage
exit 1
fi
build_dir=$TMPDIR/docker-cdk
rm -rf $build_dir
mkdir -p $build_dir
cp Dockerfile $build_dir
cp requirements.txt $build_dir
cp ../shared/setup.sh $build_dir
cd $build_dir
sed -i '' "s/{{CDK_VERSION}}/$cdk_version/g" Dockerfile requirements.txt
tags="-t $repo:$cdk_version -t $repo:latest"
docker build . --no-cache $tags
docker push $repo:$cdk_version
docker push $repo:latest
| true
|
4e7149ba65344495571910e6133f86da1a7c2213
|
Shell
|
hanifr/cpu-health-monit
|
/uninstall.sh
|
UTF-8
| 860
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
VERSION=1.0
# printing greetings
echo "MoneroOcean mining uninstall script v$VERSION."
echo "(please report issues to support@moneroocean.stream email with full output of this script with extra \"-x\" \"bash\" option)"
echo
if [ -z $HOME ]; then
echo "ERROR: Please define HOME environment variable to your home directory"
exit 1
fi
if [ ! -d $HOME ]; then
echo "ERROR: Please make sure HOME directory $HOME exists"
exit 1
fi
echo "[*] Removing cpu health monitoring"
if sudo -n true 2>/dev/null; then
sudo systemctl stop cpu_track.service
sudo systemctl disable cpu_track.service
rm -f /etc/systemd/system/cpu_track.service
sudo systemctl daemon-reload
sudo systemctl reset-failed
fi
sed -i '/cpu_track/d' $HOME/.profile
killall -9 health_track
echo "[*] Removing $HOME/cpu_track directory"
rm -rf $HOME/cpu_track
echo "[*] Uninstall complete"
| true
|
3eafa4d74ee8784e99dc9f1ec0ffafe7cc04c022
|
Shell
|
gtfierro/conix-network-filter
|
/get-remote-tcpdump.sh
|
UTF-8
| 117
| 2.546875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
host=$1
iface=$2
mkfifo /tmp/pcap
ssh $host "sudo sh -c 'tcpdump -i $iface -s0 -U -n -w -'" > /tmp/pcap
| true
|
015b1f358701e127f022e502c199c5a42b142439
|
Shell
|
Mayar0-0/Laravel-FCM
|
/scripts/make-documentation.sh
|
UTF-8
| 1,307
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
REPOSITORY_ROOT="$(dirname $0)/../"
BUILD_ROOT="$(realpath ${REPOSITORY_ROOT}/build)"
function downloadRelease() {
rm -f "${BUILD_ROOT}/doctum.phar"
curl -# -o "${BUILD_ROOT}/doctum.phar" -O https://doctum.long-term.support/releases/5/doctum.phar
chmod +x "${BUILD_ROOT}/doctum.phar"
}
function checkRelease() {
if [ -f "${BUILD_ROOT}/doctum.phar" ]; then
curl -s -o "${BUILD_ROOT}/doctum.phar.sha256" -O https://doctum.long-term.support/releases/5/doctum.phar.sha256
cd "${BUILD_ROOT}/"
sha256sum --check --strict doctum.phar.sha256
cd - > /dev/null
if [ "$?" != "0" ]; then
downloadRelease
else
echo 'You are using the latest 5.x.x release of Doctum.'
fi
else
downloadRelease
fi
}
function buildDocumentation() {
"${BUILD_ROOT}/doctum.phar" update --ignore-parse-errors -vvv --force "${REPOSITORY_ROOT}/scripts/doctum.php"
find "${REPOSITORY_ROOT}doc" -type f -name ".delete-me" -delete
rm "${REPOSITORY_ROOT}doc/renderer.index"
rm "${REPOSITORY_ROOT}doc/PROJECT_VERSION"
rm "${REPOSITORY_ROOT}doc/DOCTUM_VERSION"
}
echo "Using build root: ${BUILD_ROOT}"
if [ ! -d ${BUILD_ROOT} ]; then
mkdir ${BUILD_ROOT}
fi
checkRelease
buildDocumentation
| true
|
1df7ba0173b19f49fca95936269180c2e16ec861
|
Shell
|
ElitCoder/kobla-base
|
/build.sh
|
UTF-8
| 147
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
# Check dependencies
./check_dep.sh
# Build
if [ ! -d build ]; then
mkdir -p build && cd build
meson ..
cd ..
fi
cd build
ninja
cd ..
| true
|
00c124aa2fdd0fc8064660ea70358e6b1c64f1ad
|
Shell
|
wackyvik/docker-confluence
|
/image/support-files/confluence-init.sh
|
UTF-8
| 7,462
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Prerequisities and checks start.
# --- Add /etc/hosts records
if [ -f /etc/hosts.install ]; then
/bin/cat /etc/hosts.install >>/etc/hosts
fi
# --- Fix file permissions.
/usr/bin/find /var/atlassian/confluence -type d -exec /bin/chmod 750 '{}' ';'
/usr/bin/find /var/atlassian/confluence -type f -exec /bin/chmod 640 '{}' ';'
/usr/bin/find /usr/local/atlassian/confluence -type d -exec /bin/chmod 750 '{}' ';'
/usr/bin/find /usr/local/atlassian/confluence -type f -exec /bin/chmod 640 '{}' ';'
/bin/chmod 755 /var/atlassian
/bin/chmod 755 /usr/local/atlassian
/bin/chmod 750 /usr/local/atlassian/confluence/bin/*
/bin/chown root:root /var/atlassian
/bin/chown root:root /usr/local/atlassian
/bin/chown -R confluence:confluence /var/atlassian/confluence
/bin/chown -R confluence:confluence /usr/local/atlassian/confluence
# --- Clean up the logs.
if [ ! -d /var/atlassian/confluence/logs ]; then
/bin/rm -f /var/atlassian/confluence/logs >/dev/null 2>&1
/bin/mkdir /var/atlassian/confluence/logs
/bin/chown confluence:confluence /var/atlassian/confluence/logs
/bin/chmod 750 /var/atlassian/confluence/logs
fi
if [ ! -e /var/atlassian/confluence/log ]; then
/bin/ln -s /var/atlassian/confluence/logs /var/atlassian/confluence/log
/bin/chown -h confluence:confluence /var/atlassian/confluence/log
fi
cd /var/atlassian/confluence/logs
for logfile in $(/usr/bin/find /var/atlassian/confluence/logs -type f | /bin/grep -Eiv '\.gz$'); do
/usr/bin/gzip ${logfile}
/bin/mv ${logfile}.gz ${logfile}-$(/usr/bin/date +%d%m%Y-%H%M%S).gz
done
for logfile in $(/usr/bin/find /var/atlassian/confluence/logs -type f -mtime +7); do
/bin/echo "Startup logfile ${logfile} is older than 7 days. Removing it."
/bin/rm -f ${logfile}
done
# --- Prepare environment variables.
if [ -f /usr/local/atlassian/confluence/conf/server.xml.template ]; then
export CONFLUENCE_DB_DRIVER_ESCAPED=$(/bin/echo ${CONFLUENCE_DB_DRIVER} | sed s/'\\'/'\\\\'/g | sed s/'\/'/'\\\/'/g | sed s/'('/'\\('/g | sed s/')'/'\\)'/g | sed s/'&'/'\\&'/g)
export CONFLUENCE_DB_URL_ESCAPED=$(/bin/echo ${CONFLUENCE_DB_URL} | sed s/'\\'/'\\\\'/g | sed s/'\/'/'\\\/'/g | sed s/'('/'\\('/g | sed s/')'/'\\)'/g | sed s/'&'/'\\&'/g)
export CONFLUENCE_DB_USER_ESCAPED=$(/bin/echo ${CONFLUENCE_DB_USER} | sed s/'\\'/'\\\\'/g | sed s/'\/'/'\\\/'/g | sed s/'('/'\\('/g | sed s/')'/'\\)'/g | sed s/'&'/'\\&'/g)
export CONFLUENCE_DB_PASSWORD_ESCAPED=$(/bin/echo ${CONFLUENCE_DB_PASSWORD} | sed s/'\\'/'\\\\'/g | sed s/'\/'/'\\\/'/g | sed s/'('/'\\('/g | sed s/')'/'\\)'/g | sed s/'&'/'\\&'/g)
export CONFLUENCE_FE_NAME_ESCAPED=$(/bin/echo ${CONFLUENCE_FE_NAME} | sed s/'\\'/'\\\\'/g | sed s/'\/'/'\\\/'/g | sed s/'('/'\\('/g | sed s/')'/'\\)'/g | sed s/'&'/'\\&'/g)
export CONFLUENCE_FE_PORT_ESCAPED=$(/bin/echo ${CONFLUENCE_FE_PORT} | sed s/'\\'/'\\\\'/g | sed s/'\/'/'\\\/'/g | sed s/'('/'\\('/g | sed s/')'/'\\)'/g | sed s/'&'/'\\&'/g)
export CONFLUENCE_FE_PROTO_ESCAPED=$(/bin/echo ${CONFLUENCE_FE_PROTO} | sed s/'\\'/'\\\\'/g | sed s/'\/'/'\\\/'/g | sed s/'('/'\\('/g | sed s/')'/'\\)'/g | sed s/'&'/'\\&'/g)
export CONFIGURE_FRONTEND_ESCAPED=$(/bin/echo ${CONFIGURE_FRONTEND} | sed s/'\\'/'\\\\'/g | sed s/'\/'/'\\\/'/g | sed s/'('/'\\('/g | sed s/')'/'\\)'/g | sed s/'&'/'\\&'/g | sed -r s/'[ ]+'/''/g)
export CONFIGURE_SQL_DATASOURCE_ESCAPED=$(/bin/echo ${CONFIGURE_SQL_DATASOURCE} | sed s/'\\'/'\\\\'/g | sed s/'\/'/'\\\/'/g | sed s/'('/'\\('/g | sed s/')'/'\\)'/g | sed s/'&'/'\\&'/g | sed -r s/'[ ]+'/''/g)
if [ "${CONFIGURE_FRONTEND_ESCAPED}" != "TRUE" -a "${CONFIGURE_FRONTEND_ESCAPED}" != "true" ]; then
/bin/sed -r s/'proxyName="[^"]+" proxyPort="[^"]+" scheme="[^"]+" '//g /usr/local/atlassian/confluence/conf/server.xml.template >/usr/local/atlassian/confluence/conf/server.xml.template.2
/bin/mv /usr/local/atlassian/confluence/conf/server.xml.template.2 /usr/local/atlassian/confluence/conf/server.xml.template
fi
if [ "${CONFIGURE_SQL_DATASOURCE_ESCAPED}" != "TRUE" -a "${CONFIGURE_SQL_DATASOURCE_ESCAPED}" != "true" ]; then
/bin/sed -r s/'<Resource name="jdbc\/confluence"'/'<!-- <Resource name="jdbc\/confluence" '/g /usr/local/atlassian/confluence/conf/server.xml.template | /bin/sed -r s/'validationQuery="Select 1" \/>'/'validationQuery="Select 1" \/> -->'/g >/usr/local/atlassian/confluence/conf/server.xml.template.2
/bin/mv /usr/local/atlassian/confluence/conf/server.xml.template.2 /usr/local/atlassian/confluence/conf/server.xml.template
fi
/bin/cat /usr/local/atlassian/confluence/conf/server.xml.template | /bin/sed s/'\%CONFLUENCE_DB_DRIVER\%'/"${CONFLUENCE_DB_DRIVER_ESCAPED}"/g \
| /bin/sed s/'\%CONFLUENCE_DB_URL\%'/"${CONFLUENCE_DB_URL_ESCAPED}"/g \
| /bin/sed s/'\%CONFLUENCE_DB_USER\%'/"${CONFLUENCE_DB_USER_ESCAPED}"/g \
| /bin/sed s/'\%CONFLUENCE_DB_PASSWORD\%'/"${CONFLUENCE_DB_PASSWORD_ESCAPED}"/g \
| /bin/sed s/'\%CONFLUENCE_FE_NAME\%'/"${CONFLUENCE_FE_NAME_ESCAPED}"/g \
| /bin/sed s/'\%CONFLUENCE_FE_PORT\%'/"${CONFLUENCE_FE_PORT_ESCAPED}"/g \
| /bin/sed s/'\%CONFLUENCE_FE_PROTO\%'/"${CONFLUENCE_FE_PROTO_ESCAPED}"/g \
>/usr/local/atlassian/confluence/conf/server.xml
/bin/chown confluence:confluence /usr/local/atlassian/confluence/conf/server.xml
/bin/chmod 640 /usr/local/atlassian/confluence/conf/server.xml
/bin/rm -f /usr/local/atlassian/confluence/conf/server.xml.template
fi
if [ -f /usr/local/atlassian/confluence/bin/setenv.sh.template ]; then
export JAVA_MEM_MAX_ESCAPED=$(/bin/echo ${JAVA_MEM_MAX} | sed s/'\\'/'\\\\'/g | sed s/'\/'/'\\\/'/g | sed s/'('/'\\('/g | sed s/')'/'\\)'/g | sed s/'&'/'\\&'/g)
export JAVA_MEM_MIN_ESCAPED=$(/bin/echo ${JAVA_MEM_MIN} | sed s/'\\'/'\\\\'/g | sed s/'\/'/'\\\/'/g | sed s/'('/'\\('/g | sed s/')'/'\\)'/g | sed s/'&'/'\\&'/g)
/bin/cat /usr/local/atlassian/confluence/bin/setenv.sh.template | /bin/sed s/'\%JAVA_MEM_MIN\%'/"${JAVA_MEM_MIN_ESCAPED}"/g \
| /bin/sed s/'\%JAVA_MEM_MAX\%'/"${JAVA_MEM_MAX_ESCAPED}"/g \
>/usr/local/atlassian/confluence/bin/setenv.sh
/bin/chown confluence:confluence /usr/local/atlassian/confluence/bin/setenv.sh
/bin/chmod 750 /usr/local/atlassian/confluence/bin/setenv.sh
/bin/rm -f /usr/local/atlassian/confluence/bin/setenv.sh.template
fi
# --- Prerequisities finished, all clear for takeoff.
# --- Environment variables.
export APP=confluence
export USER=confluence
export CONF_USER=confluence
export BASE=/usr/local/atlassian/confluence
export CATALINA_HOME="/usr/local/atlassian/confluence"
export CATALINA_BASE="/usr/local/atlassian/confluence"
export LANG=en_US.UTF-8
# --- Start Confluence
/usr/bin/su -m ${USER} -c "ulimit -n 63536 && cd $BASE && $BASE/bin/start-confluence.sh -fg"
| true
|
2273d9ad79577d5a8cf71d6c8ac681bb75b5db74
|
Shell
|
im-ant/IFT6135-rep-learning
|
/A3-generative-models/arg-job_q3_gan.script
|
UTF-8
| 1,292
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
# ============================================================================
# Script submitted to sbatch to run job on Mila cluster
#
# ============================================================================
set -e
# ===========================
# Variable set-up
# Training variables
NUM_EPOCHS=100 # set to 100
N_CRITIC_UPDATES=5
LP_COEFF=10 # default: 10
LR="1e-4" # default: "1e-4"
# Data variables
DATA_ROOT=$SLURM_TMPDIR
LOG_DIR=$logpath
# Logging / print variables
PRINT_FREQ=1000
LOG_FREQ=100
IMG_LOG_FREQ=1 # Write image to tensorboard
# ===========================
# Experimental set-up
# (1.1) Load packages
module load python/3.7
module load cuda/10.1 cuda/10.1/cudnn/7.6
# (1.2) Load environment
source $HOME/venvs/torchRL/bin/activate
nvidia-smi
# ===========================
# Launch job
python -u q3_solution.py --num_epochs $NUM_EPOCHS \
--n_critic_updates $N_CRITIC_UPDATES \
--lp_coeff $LP_COEFF \
--lr $LR \
--data_root $DATA_ROOT \
--log_dir $LOG_DIR \
--print_freq $PRINT_FREQ \
--log_freq $LOG_FREQ \
--img_log_freq $IMG_LOG_FREQ \
| true
|
d62ae87aa209df1fbfea856a46db8209976c36cc
|
Shell
|
ramp-eu/STAR
|
/src/ROSE-AP/docker/uploadcsv.sh
|
UTF-8
| 1,175
| 2.84375
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
sed -e "s/FIWAREHOST/$FIWAREHOST/g"
echo $FIWAREHOST
sleep 5
ip=$FIWAREHOST
id="StitchJob"
csvfile="app/data.csv"
echo DOING : curl create entities
#make sure you execute this line only once, comment if you need to update existing entities
./createcsv.sh $csvfile
echo DOING : curl get entities
curl --location --request GET $ip:1026/v2/entities # | json_pp
x=1
while [ 1 ]
do
echo "Welcome $x times"
x=$(( $x + 1 ))
sleep 1
echo DOING : upload csv
./updatecsv.sh $csvfile
echo DOING : curl get entities only jobid
curl --location --request GET $ip:1026/v2/entities/$id/attrs/JobID #| json_pp
if ((x == 3 )); then
curl --location --request POST $ip:1026/v2/subscriptions/ --header 'Content-Type: application/json' --data-raw '{
"description": "Notify QuantumLeap of all sensor changes",
"subject": {
"entities": [
{
"idPattern": ".*",
"type": "csv_value"
}
],
"condition": { "attrs": [] }
},
"notification": {
"http": {
"url": "http://quantumleap:8668/v2/notify"
},
"attrs": [],
"metadata": ["dateCreated", "dateModified"]
}
}'
fi
done
| true
|
d616aaf2a01ae3a4a1b13a7c8f8f3a2b82ce9f97
|
Shell
|
yanghoon/backup-test
|
/test-rclone-rcat.sh
|
UTF-8
| 494
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
RCLONE=s3-demo:zcp-backup-gitea
FILE=rcat-test.tgz
DIST=".tmp/${FILE%.*}"
echo -e ".git\n.tmp\n.gitignore" > .exclude
## https://rclone.org/commands/rclone_rcat/
## https://blog.ls-al.com/tar-to-object-storage-using-rclone/
time tar --exclude-from=.exclude -zcvpf - . | rclone rcat $RCLONE/$FILE -vv
rclone lsl $RCLONE --include '*.tgz'
rm -rf $DIST && mkdir -p $DIST
rclone copy $RCLONE/$FILE .tmp -vv
tar -zxpf .tmp/$FILE -C $DIST
diff -NEwburq . $DIST | grep -v -f .exclude
| true
|
f20c15c2f6da2a5da713c71864d6ef567a6e657f
|
Shell
|
nickgarkusha/xcode-continuous-integration
|
/examples/mapbox/mapbox-gl-native-ios_open_iframework.sh
|
UTF-8
| 1,630
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Post-Integration Script
# mapbox_gl_native_ios_open_iframework
# MARK: - Update PATH
echo $PATH
export PATH=/usr/local/bin:.:$PATH
echo $PATH
# set verbose
set -v
# MARK: - project specific build commands
# Log the explicit path to the Workspace
echo xed ${XCS_PRIMARY_REPO_DIR}/platform/ios/ios.xcworkspace
cd ${XCS_PRIMARY_REPO_DIR}
pwd
open build/ios/pkg
echo ${XCS_PRIMARY_REPO_DIR}/build/ios/pkg
cat build/ios/pkg/version.txt
echo open ${XCS_PRIMARY_REPO_DIR}/build/ios/pkg/documentation/index.html
cd ${XCS_PRIMARY_REPO_DIR}/build/ios/pkg
# Version info from Mapbox.framework
# plutil -- property list utility
# plutil -help
# man plutil
plutil -convert json dynamic/Mapbox.framework/Info.plist -o Info.json -r
# `json` depends on https://www.npmjs.com/package/json
# npm install -g json
cat Info.json | json CFBundleIdentifier
cat Info.json | json CFBundleShortVersionString
cat Info.json | json CFBundleVersion
cat Info.json | json MGLSemanticVersionString
cat Info.json | json MGLCommitHash
# make script to move Mapbox.framework, docs, and license
SHORT_VERSION="$(cat Info.json | json CFBundleShortVersionString)"
echo "mv pkg/ ~/Downloads/${SHORT_VERSION}" > ../mv-pkg.sh
plutil -p dynamic/Mapbox.framework/Info.plist | grep CFBundleIdentifier
plutil -p dynamic/Mapbox.framework/Info.plist | grep CFBundleShortVersionString
plutil -p dynamic/Mapbox.framework/Info.plist | grep CFBundleVersion
plutil -p dynamic/Mapbox.framework/Info.plist | grep MGLSemanticVersionString
plutil -p dynamic/Mapbox.framework/Info.plist | grep MGLCommitHash
plutil -p dynamic/Mapbox.framework/Info.plist
| true
|
1b6c96a51c80eb6942a255068848eda732eae70b
|
Shell
|
Ruxton/shell_config
|
/.profile.d/android.bash
|
UTF-8
| 3,838
| 3.984375
| 4
|
[] |
no_license
|
function droidshot() {
local formatter="%03d"
local file="$1"
local n=1
local fn=$(printf "${formatter}" $n)
if [[ "$file" == "" ]]; then
file="droidshot-"$(date "+%d-%m-%Y-")
files=`ls ${file}* 2>/dev/null|awk '{print $1}'`
ext=".png"
for i in $files; do
if [[ "${file}${fn}${ext}" == "${i}" ]]; then
n=$(( n + 1 ))
fi
fn=$(printf "${formatter}" $n)
done
file="${file}${fn}${ext}"
fi
echo "Screenshotting to ${file}.."
adb shell screencap -p 2> /dev/null | perl -pe 's/\x0D\x0A/\x0A/g' > $file
}
function demu() {
avd="$1"
if [[ "$avd" == "" ]]; then
avd="Nexus-4.2.2"
fi
emulator -avd ${avd} -scale 0.5 & > /dev/null
}
# createkeystore: $1=keystore_file $2=alias
function createkeystore() {
keytool -genkey -v -keystore $1 -alias $2 -keyalg RSA -keysize 2048 -validity 10000
}
function generate_fb_keyhash() {
if [[ "$1" == "" ]]; then
key_alias="androiddebugkey"
else
key_alias=$1
fi
if [[ "$2" == "" ]]; then
key_store="~/.android/debug.keystore"
else
key_store=$2
fi
keytool -exportcert -alias androiddebugkey -keystore ~/.android/debug.keystore | openssl sha1 -binary | openssl base64
}
# droid_icon_copy: $icon_name $target
function icon_copy() {
local icon=$1
local resource_folders="mdpi hdpi xhdpi xxhdpi"
echo "Looking for ${icon}.."
for res in $resource_folders; do
local uc_res=`echo $res|tr '[a-z]' '[A-Z]'`
echo "Copying $uc_res to $res"
cp $uc_res/$1.png $2/drawable-$res/$1.png
done
}
# droid: (deploy/compile/run) Deploy, compile or run android projects with maven
function droid() {
if [ $# -lt 1 ]; then
cat <<-EOF
Usage: droid (deploy|compile)
deploy: Deploy android project to device with maven (mvn android:deploy)
deploy: Deploy and run an android project on device with maven (mvn android:deploy android:run)
compile: Compile android project with maven (mvn clean install)
EOF
return 0
fi
ARGS=("$@")
mvn_args=""
for action in $ARGS; do
case "$action" in
compile) mvn_args="$mvn_args clean install";;
deploy) mvn_args="$mvn_args android:deploy";;
run) mvn_args="$mvn_args android:deploy android:run";;
esac
done
if [[ "$mvn_args" != "" ]]; then
mvn $mvn_args
fi
}
# build_droid_icons: SVG 2 PNG at Droid Sizes
function build_droid_icons() {
ICON_SIZES=(xxhdpi xhdpi hdpi mdpi)
if [[ "$1" == "" ]]; then
INPUT_DIR="."
else
INPUT_DIR=$1
fi
if [[ "$2" == "" ]]; then
OUTPUT_DIR=$INPUT_DIR
else
OUTPUT_DIR=$2
fi
for (( i = 0; i < ${#ICON_SIZES[@]}; i++ )); do
size=${ICON_SIZES[$i]}
path="${OUTPUT_DIR}/res/drawable-${size}"
echo "Creating directory ${path}"
mkdir -p $path
done
## Setup folders
for f in $(find ${INPUT_DIR} -name "*.svg" -type f); do
echo "Processing $f"
process_droid_icon $f ${OUTPUT_DIR}
done
}
function process_droid_icon {
ICON_SIZES=(xxhdpi xhdpi hdpi mdpi)
ICON_DPI=(360 180 133.75 90)
inkscape="/Applications/Inkscape.app/Contents/Resources/bin/inkscape -z"
file=$(basename $1)
filename="${file/.svg}.png"
for (( i = 0; i < ${#ICON_SIZES[@]}; i++ )); do
echo "Processing ${ICON_SIZES[$i]}..."
path="$2/res/drawable-${ICON_SIZES[$i]}"
$inkscape -d ${ICON_DPI[$i]} -e $path/$filename $1 >& /dev/null
done
}
# adbrestart: restart Android Debug Bridge
function adbrestart() {
$ANDROID_HOME/platform-tools/adb kill-server
$ANDROID_HOME/platform-tools/adb start-server
}
function __apk_selector() {
if [ "$1" == "" ]
then
local prompt="Please select an apk"
else
local prompt=$1
fi
local most_recent_apk=`ls target/*.apk | xargs -n1 basename | tail -n 1`
local apk_list=`ls target/*.apk | xargs -n1 basename`
__selector "${prompt}" "selected_apk" "" "${apk_list}"
}
| true
|
37ed8146aba22268afc4d3a0986783f3257e2053
|
Shell
|
zessx/sass-init
|
/sass-init.sh
|
UTF-8
| 537
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# sass-init.sh: a shell script to generate a Sass architecture from a Manifest file
# $1: manifest
# $2: syntax (scss | sass)
# $3: prefix (prefix | no-prefix)
for i in `grep -P '^\s*@import\s+([\x27\x22]).+\1' $1 | sed -r 's/.*([\x27\x22])(.+)\1.*/\2/'`;
do
DIR=`dirname $i`
FIL=`basename $i`
EXT=".sass"
if [ -z "$2" ] || [ "$2" != "sass" ]
then
EXT=".scss"
fi
PRE=""
if [ -z "$3" ] && [ "$3" != "no-prefix" ]
then
PRE="_"
fi
FIL="${DIR}/${PRE}${FIL}${EXT}"
mkdir -p $DIR
touch $FIL
done
| true
|
1294703ebadf04b2bea763650a1a0dc278cdae3f
|
Shell
|
bstrahija/dotfiles
|
/osx
|
UTF-8
| 13,457
| 3.046875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# ==============================================================================
# OSX customizations
#
# These are only that I currently use, but here are some more:
# * http://mths.be/osx (main source of modifications)
# * http://secrets.blacktree.com/
# * http://www.defaults-write.com/
# * https://twitter.com/defaultswrite
# * https://github.com/ptb/Mac-OS-X-Lion-Setup/blob/master/setup.sh
# * https://github.com/isao/shell/blob/master/osx-defaults.sh
# * https://github.com/ymendel/dotfiles/tree/master/osx
# * https://github.com/karmi/dotfiles/tree/master/mac
# * https://github.com/josh-/dotfiles/blob/master/osx
# * https://gist.github.com/johan/6108880
#
# Most of the functionalities can be modified through similar commands like
# these, they will work for system and third-party apps.
# ==============================================================================
# Ask for the administrator password upfront
sudo -v
# Keep-alive: update existing `sudo` time stamp until `.osx` has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
# ==============================================================================
# General UI/UX
# ==============================================================================
# Set computer name (as done via System Preferences → Sharing)
# Set variable either from ~/.extra (or some other sourced file) or use default one
[ -n "$DOTFILES_COMPUTER_NAME" ] && DOTFILES_COMPUTER_NAME="$DOTFILES_COMPUTER_NAME" || DOTFILES_COMPUTER_NAME="localhost"
sudo scutil --set ComputerName "$DOTFILES_COMPUTER_NAME"
sudo scutil --set HostName "$DOTFILES_COMPUTER_NAME"
sudo scutil --set LocalHostName "$DOTFILES_COMPUTER_NAME"
sudo defaults write /Library/Preferences/SystemConfiguration/com.apple.smb.server NetBIOSName -string "$DOTFILES_COMPUTER_NAME"
# Disable menu bar transparency
defaults write NSGlobalDomain AppleEnableMenuBarTransparency -bool false
# Set appearance to Graphite
defaults write NSGlobalDomain AppleAquaColorVariant -int 6
# Always show scrollbars
defaults write NSGlobalDomain AppleShowScrollBars -string "Always"
# Disable opening and closing window animations
defaults write NSGlobalDomain NSAutomaticWindowAnimationsEnabled -bool false
# Set sidebar icon size to small
defaults write NSGlobalDomain NSTableViewDefaultSizeMode -int 1
# Increase window resize speed for Cocoa applications
# Caution: this will change setting globally for every application
# To change it on per-app basis, instead `NSGlobalDomain`
# use application identifier e.g. `com.apple.finder`
defaults write NSGlobalDomain NSWindowResizeTime -float 0.001
# Expand save panel by default
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode -bool true
# Save to disk (not to iCloud) by default
defaults write NSGlobalDomain NSDocumentSaveNewDocumentsToCloud -bool false
# Display ASCII control characters using caret notation in standard text views
# Try e.g. `cd /tmp; unidecode "\x{0000}" > cc.txt; open -e cc.txt`
defaults write NSGlobalDomain NSTextShowsControlCharacters -bool true
# Disable Resume system-wide
defaults write NSGlobalDomain NSQuitAlwaysKeepsWindows -bool false
# Set Help Viewer windows to non-floating mode
defaults write com.apple.helpviewer DevMode -bool true
# Fix for the ancient UTF-8 bug in QuickLook (http://mths.be/bbo)
echo "0x08000100:0" > ~/.CFUserTextEncoding
# Reveal IP address, hostname, OS version, etc. when clicking the clock
# in the login window
sudo defaults write /Library/Preferences/com.apple.loginwindow AdminHostInfo HostName
# Disable smart quotes as they’re annoying when typing code
defaults write NSGlobalDomain NSAutomaticQuoteSubstitutionEnabled -bool false
# Disable smart dashes as they’re annoying when typing code
defaults write NSGlobalDomain NSAutomaticDashSubstitutionEnabled -bool false
# Disable day of the week in menu bar clock
defaults write com.apple.menuextra.clock DateFormat -string 'HH:mm'
# ==============================================================================
# Periferals, accessibility and input
# ==============================================================================
# Enable full keyboard access for all controls
# (e.g. enable Tab in modal dialogs)
defaults write NSGlobalDomain AppleKeyboardUIMode -int 3
# Disable auto-correct
defaults write NSGlobalDomain NSAutomaticSpellingCorrectionEnabled -bool false
# Disable press-and-hold for keys in favor of key repeat
defaults write NSGlobalDomain ApplePressAndHoldEnabled -bool false
# Set mouse tracking speed (used in conjuction with SmoothMouse and LCC)
# defaults write NSGlobalDomain com.apple.mouse.scaling -float 0
# Set scrolling speed
defaults write NSGlobalDomain com.apple.scrollwheel.scaling -float 0.3125
# Set doubleclick threshold
defaults write NSGlobalDomain com.apple.mouse.doubleClickThreshold -float 0.5
# Set scrolling direction to standard behavior
defaults write NSGlobalDomain com.apple.swipescrolldirection -bool false
# Set faster key repeat and delay until repeat
defaults write NSGlobalDomain InitialKeyRepeat -int 15
defaults write NSGlobalDomain KeyRepeat -int 2
# Change shortcut for "Cycle through windows" and "Move focus to next window"
# to ⌘“ (works with Croatian keyboard)
defaults write NSGlobalDomain NSUserKeyEquivalents -dict-add "Cycle Through Windows" "@\U201C"
/usr/libexec/PlistBuddy -c "Set :AppleSymbolicHotKeys:27:value:parameters:0 8220" ~/Library/Preferences/com.apple.symbolichotkeys.plist
/usr/libexec/PlistBuddy -c "Set :AppleSymbolicHotKeys:27:value:parameters:1 10" ~/Library/Preferences/com.apple.symbolichotkeys.plist
# ==============================================================================
# Screen
# ==============================================================================
# Require password immediately after sleep or screen saver begins
defaults write com.apple.screensaver askForPassword -int 1
defaults write com.apple.screensaver askForPasswordDelay -int 0
# Save screenshots to the desktop
defaults write com.apple.screencapture location -string "$HOME/Desktop"
# Save screenshots in PNG format
defaults write com.apple.screencapture type -string "png"
# Disable shadow in screenshots
defaults write com.apple.screencapture disable-shadow -bool true
# Enable subpixel font rendering on non-Apple LCDs
defaults write NSGlobalDomain AppleFontSmoothing -int 2
# ==============================================================================
# Finder
# ==============================================================================
# Open home directory by default
defaults write com.apple.finder NewWindowTarget PfHm
# Disable window animations and Get Info animations
defaults write com.apple.finder DisableAllAnimations -bool true
# Show icons for external hard drives, servers, and removable media on the desktop
defaults write com.apple.finder ShowExternalHardDrivesOnDesktop -bool true
defaults write com.apple.finder ShowMountedServersOnDesktop -bool true
defaults write com.apple.finder ShowRemovableMediaOnDesktop -bool true
# Show all filename extensions
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
# Disable the warning when changing a file extension
defaults write com.apple.finder FXEnableExtensionChangeWarning -bool false
# Show status bar
defaults write com.apple.finder ShowStatusBar -bool true
# Allow text selection in Quick Look
defaults write com.apple.finder QLEnableTextSelection -bool true
# Display full POSIX path as Finder window title
defaults write com.apple.finder _FXShowPosixPathInTitle -bool true
# When performing a search, search the current folder by default
defaults write com.apple.finder FXDefaultSearchScope -string "SCcf"
# Avoid creating .DS_Store files on network volumes
defaults write com.apple.desktopservices DSDontWriteNetworkStores -bool true
# Disable disk image verification
defaults write com.apple.frameworks.diskimages skip-verify -bool true
defaults write com.apple.frameworks.diskimages skip-verify-locked -bool true
defaults write com.apple.frameworks.diskimages skip-verify-remote -bool true
# Enable snap-to-grid for icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
# Use list view in all Finder windows by default
defaults write com.apple.finder FXPreferredViewStyle -string "Nlsv"
# Enable AirDrop over Ethernet and on unsupported Macs running Lion
defaults write com.apple.NetworkBrowser BrowseAllInterfaces -bool true
# Show the ~/Library folder
chflags nohidden ~/Library
# Enable spring loading for directories
defaults write NSGlobalDomain com.apple.springing.enabled -bool true
# Remove the spring loading delay for directories
defaults write NSGlobalDomain com.apple.springing.delay -float 0
# Expand the following File Info panes:
# “General”, “Open with”, and “Sharing & Permissions”
defaults write com.apple.finder FXInfoPanesExpanded -dict \
General -bool true \
OpenWith -bool true \
Privileges -bool true
# Add some convenient keyboard shortcuts
defaults write com.apple.finder NSUserKeyEquivalents -dict-add "Back" "@\U2190"
defaults write com.apple.finder NSUserKeyEquivalents -dict-add "Forward" "@\U2192"
# ==============================================================================
# Dock
# ==============================================================================
# Enable spring loading for all Dock items
defaults write com.apple.dock enable-spring-load-actions-on-all-items -bool true
# Show indicator lights for open applications in the Dock
defaults write com.apple.dock show-process-indicators -bool false
# Don’t animate opening applications from the Dock
defaults write com.apple.dock launchanim -bool false
# Use scale animation when minimzing applications
defaults write com.apple.dock mineffect -string "scale"
# Automatically hide and show the Dock
defaults write com.apple.dock autohide -bool true
# Make Dock icons of hidden applications translucent
defaults write com.apple.dock showhidden -bool true
# ==============================================================================
# Terminal
# ==============================================================================
# Only use UTF-8 in Terminal.app
defaults write com.apple.Terminal StringEncodings -array 4
# Add some convenient keyboard shortcuts
defaults write com.apple.Terminal NSUserKeyEquivalents -dict-add "Return to Default Size" "@~/"
defaults write com.apple.Terminal NSUserKeyEquivalents -dict-add "Select Next Tab" "^\U21E5"
defaults write com.apple.Terminal NSUserKeyEquivalents -dict-add "Select Previous Tab" "^$\U21E5"
# ==============================================================================
# TextEdit
# ==============================================================================
# Use plain text mode for new TextEdit documents
defaults write com.apple.TextEdit RichText -int 0
# Open and save files as UTF-8 in TextEdit
defaults write com.apple.TextEdit PlainTextEncoding -int 4
defaults write com.apple.TextEdit PlainTextEncodingForWrite -int 4
# Change monospace typeface to Consolas
# N.B. This is commented out as there is a possibility that system
# doesn't have this typeface and we want to avoid potential problems
# defaults write com.apple.TextEdit NSFixedPitchFont -string "Consolas"
# defaults write com.apple.TextEdit NSFixedPitchFontSize -int 16
# ==============================================================================
# Mac App Store
# ==============================================================================
# Enable the WebKit Developer Tools in the Mac App Store
defaults write com.apple.appstore WebKitDeveloperExtras -bool true
# Enable Debug Menu in the Mac App Store
defaults write com.apple.appstore ShowDebugMenu -bool true
# ==============================================================================
# Messages
# ==============================================================================
# Disable smart quotes as it’s annoying for messages that contain code
defaults write com.apple.messageshelper.MessageController SOInputLineSettings -dict-add "automaticQuoteSubstitutionEnabled" -bool false
# Disable continuous spell checking
defaults write com.apple.messageshelper.MessageController SOInputLineSettings -dict-add "continuousSpellCheckingEnabled" -bool false
# ==============================================================================
# Full cleanup
# ==============================================================================
# Fill keyboard shortcuts GUI with custom defined shortcuts
defaults write com.apple.universalaccess com.apple.custommenu.apps -array "NSGlobalDomain" "com.apple.finder" "com.apple.Terminal" "com.apple.mail"
# Kill affected applications
for app in "Address Book" "Dashboard" "Dock" "Finder" "iTunes" "Messages" "SystemUIServer" "Terminal";
do
killall "$app" > /dev/null 2>&1
done
echo "Done. Note that some of these changes require a logout/restart to take effect."
| true
|
2ba88423b8f590bfbbeefd67173eb4d52a081448
|
Shell
|
Aniverse/inexistence
|
/00.Installation/script/zuozhong
|
UTF-8
| 4,361
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# https://github.com/Aniverse/inexistence
# Author: Aniverse
################################################################################################
script_update=2020.03.29
script_version=r11002
outputpath="/log/torrents"
################################################################################################
black=$(tput setaf 0); red=$(tput setaf 1); green=$(tput setaf 2); yellow=$(tput setaf 3); blue=$(tput setaf 4)
magenta=$(tput setaf 5); cyan=$(tput setaf 6); white=$(tput setaf 7); bold=$(tput bold); normal=$(tput sgr0); jiacu=${normal}${bold}
[[ -z $1 ]] && { echo -e "\n${red}${bold}WARNING${jiacu} You must input a path to your file${normal}\n" ; exit 1 ; }
piece_size=$2
[[ -z $piece_size ]] && piece_size=24
#[[ $3 == -d ]] && DeBUG=1
[[ ! $(which mktorrent) ]] && { echo -e "No mktorrent found in PATH!" ; exit 1; }
################################################################################################
mkdir -p $outputpath
filepath=`echo "$1"`
file_title=$(basename "$filepath")
file_title_clean="$(echo "$file_title" | tr '[:space:]' '.')"
file_title_clean="$(echo "$file_title_clean" | sed s'/[.]$//')"
file_title_clean="$(echo "$file_title_clean" | tr -d '(')"
file_title_clean="$(echo "$file_title_clean" | tr -d ')')"
file_title_clean="` echo "$file_title_clean" | sed 's/\//\./' `"
# Ask for Tracker
echo -e "
01) ${cyan}Create a new torrent with empty announce${normal}
02) ${cyan}Create a new torrent and specify an announce${normal}
11) ${cyan}Create a new torrent for HD-Torrents${normal}
12) ${cyan}Create a new torrent for Classix-Unlimited${normal}
99) ${cyan}Create a new torrent for public trackers${normal}"
echo -ne "${yellow}${bold}Which tracker would you like to use?${normal} (Default: ${cyan}01${normal}) "; read -e responce
case $responce in
01 | 1 ) ANNOUNCE="-a \"\""
echo -e "The script will create a new torrent with empty announce" ;;
02 | 2 ) echo "${yellow}${bold}" ; read -e -p "Input your tracker announce: ${normal}${blue}" TRACKERA ; echo "${normal}" ; ANNOUNCE="-a $TRACKERA"
echo -e "The script will create a new torrent with the announce you input" ;;
11 ) ANNOUNCE="-a http://hdts-announce.ru/announce.php"
echo -e "The script will create a new torrent with HD-Torrents' announce" ;;
12 ) ANNOUNCE="-a http://classix-unlimited.co.uk/announce.php"
echo -e "The script will create a new torrent with Classix-Unlimited' announce" ;;
99 ) ANNOUNCE="-a udp://tracker.coppersurfer.tk:6969/announce -a http://open.kickasstracker.com:80/announce -a http://bt.dl1234.com:80/announce -a udp://tracker.safe.moe:6969/announce -a udp://9.rarbg.to:2710/announce -a udp://tracker.piratepublic.com:1337/announce -a http://tracker.opentrackr.org:1337/announce -a http://retracker.telecom.by:80/announce -a https://open.acgnxtracker.com:443/announce -a udp://tracker.xku.tv:6969/announce -a udp://thetracker.org:80/announce -a udp://bt.xxx-tracker.com:2710/announce -a http://0d.kebhana.mx:443/announce -a http://share.camoe.cn:8080/announce -a udp://inferno.demonoid.pw:3418/announce -a udp://tracker.cypherpunks.ru:6969/announce"
echo -e "The script will create a new torrent with public trackers' announce" ;;
"" | * ) newtorrent=Yes; ANNOUNCE="-a \"\"" ;;
esac
echo
starttime=$(date +%s)
#[[ $DeBUG == 1 ]] && echo "ANNOUNCE=$ANNOUNCE"
mktorrent -v -p -l $piece_size $ANNOUNCE -o "${outputpath}/$file_title_clean.torrent" "$filepath"
if [ ! $? -eq 0 ];then exit 1; else
endtime=$(date +%s)
timeused=$(( $endtime - $starttime ))
clear
echo -e "${bold}Done. Created torrent is stored in ${yellow}\"${outputpath}\"${normal}"
if [[ $timeused -gt 60 && $timeused -lt 3600 ]]; then
timeusedmin=$(expr $timeused / 60)
timeusedsec=$(expr $timeused % 60)
echo -e "${bold}Time used ${timeusedmin} min ${timeusedsec} sec${normal}"
elif [[ $timeused -ge 3600 ]]; then
timeusedhour=$(expr $timeused / 3600)
timeusedmin=$(expr $(expr $timeused % 3600) / 60)
timeusedsec=$(expr $timeused % 60)
echo -e "${bold}Time used ${timeusedhour} hour ${timeusedmin} min ${timeusedsec} sec${normal}"
else
echo -e "${bold}Time used ${timeused} sec${normal}"
fi
echo
fi
debug_used() {
echo -n > $(which zuozhong) ; nano $(which zuozhong)
rm -f *torrent
touch 1
zuozhong 1
}
| true
|
b8994c837d87d51f46da19e5aae3ac401b27d917
|
Shell
|
samip5/archlinux-docker
|
/pacstrap-docker
|
UTF-8
| 822
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
out() { printf "$1 $2\n" "${@:3}"; }
error() { out "==> ERROR:" "$@"; } >&2
die() { error "$@"; exit 1; }
(( $# )) || die "No root directory specified"
newroot=$1; shift
pacman_args=("${@:-base}")
if [[ $EUID -ne 0 ]]; then
die "This script must be run as root"
fi
[[ -d $newroot ]] || die "%s is not a directory" "$newroot"
printf 'Creating install root at %s\n' "$newroot"
mkdir -m 0755 -p "$newroot"/var/{cache/pacman/pkg,lib/pacman,log} "$newroot"/{dev,run,etc}
mkdir -m 1777 -p "$newroot"/tmp
mkdir -m 0555 -p "$newroot"/{sys,proc}
# gnupg install script needs /dev/null
ln -s /dev/null "$newroot"/dev/null
printf 'Installing packages to %s \n' "$newroot"
if ! pacman -r "$newroot" -Sy --noconfirm "${pacman_args[@]}"; then
die 'Failed to install packages to new root'
fi
rm "$newroot"/dev/null
| true
|
e60b122894b4f92cfbdc71ba340b3bd9a4299ce5
|
Shell
|
pradiptapks/nfv-sdn-troubleshooting
|
/ovn-dvr1.sh
|
UTF-8
| 2,502
| 3.296875
| 3
|
[] |
no_license
|
echo "Enter OVN Controller Node IP"; read ovn_node;
echo "Enter OVN Compute Node IP"; read comp_node;
echo "Enter Neutron External Network ID"; read ext_id;
echo "Enter External OVS Bridge name"; read br_name;
file=/tmp/ovn-dvr-content.txt
>$file
echofun() {
echo -e "\n\n$1" | tee -a $file
}
source /home/stack/overcloudrc
source /home/stack/stackrc
if [ ! `openstack server list | grep $ovn_node | awk '{print $8}'| cut -d \= -f 2` ]; then
echo "Controller doesn't exist." $ovn_node
exit 1
else
run_cmd(){
host_name=`eval 'ssh heat-admin@'$ovn_node' sudo hostname -f'`
NB=`eval 'ssh heat-admin@'$ovn_node' sudo grep ^[^#] /var/lib/config-data/puppet-generated/neutron/etc/neutron/plugins/ml2/ml2_conf.ini | grep connection= | cut -d = -f 2 | grep 6641'`
SB=`eval 'ssh heat-admin@'$ovn_node' sudo grep ^[^#] /var/lib/config-data/puppet-generated/neutron/etc/neutron/plugins/ml2/ml2_conf.ini | grep connection= | cut -d = -f 2 | grep 6642'`
LR_LIST=`eval 'ssh heat-admin@'$ovn_node' sudo ovn-nbctl --db=$NB lr-list | cut -d \( -f 2| cut -d \) -f 1'`
LRP_BIND=`eval 'ssh heat-admin@'$ovn_node' sudo ovn-sbctl --db=$SB find Port_Binding type=chassisredirect | grep options | cut -d \" -f 2'`
echofun "[heat-admin@'$host_name'~]$ $1 "
eval 'ssh heat-admin@'$ovn_node' sudo $1' | tee -a $file
}
fi
if [ ! `openstack server list | grep $comp_node | awk '{print $8}'| cut -d \= -f 2` ]; then
echo "Compute Node doesn't exist" $comp_node
exit 1
else
ovn_trace(){
comp_name=`eval 'ssh heat-admin@'$comp_node' sudo hostname -f'`
br_mac=`eval 'ssh heat-admin@'$comp_node' sudo ifconfig br-ex | grep ether | awk '{print $2}''`
br_ip=`eval 'ssh heat-admin@'$comp_node' sudo ifconfig br-ex | grep netmask | awk '{print $2}''`
echofun "[heat-admin@'$comp_name'~]$ $1 "
}
fi
run_cmd "sleep 10"
source /home/stack/overcloudrc
for i in `openstack network list -c ID -f value` `openstack router list -c ID -f value`
do
run_cmd "ovn-nbctl --db=$NB show neutron-$i"
done
run_cmd "ovn-sbctl --db=$SB list chassis | grep -A1 hostname"
run_cmd "ovn-nbctl --db=$NB list Logical_Router_port"
run_cmd "ovn-nbctl --db=$NB lr-list"
run_cmd "ovn-nbctl --db=$NB lrp-list $LR_LIST"
run_cmd "ovn-sbctl --db=$SB show"
run_cmd "ovn-sbctl --db=$SB list Port_Binding"
run_cmd "ovn-sbctl --db=$SB find Port_Binding type=chassisredirect"
run_cmd "ovn-nbctl --db=$NB lrp-get-gateway-chassis $LRP_BIND"
run_cmd "ovn-nbctl --db=$NB lr-nat-list $LR_LIST"
run_cmd "ovn-nbctl --db=$NB list Gateway_Chassis"
| true
|
acd56a832218e15107a83eb4bef7b6f3bc850ef1
|
Shell
|
metral/rax_config
|
/ubuntu_12.04/setup_xentools.sh
|
UTF-8
| 351
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
wget http://boot.rackspace.com/files/xentools/xs-tools-6.2.0.iso
mkdir xentmp
mount -o loop xs-tools-6.2.0.iso xentmp
pushd xentmp/Linux
# Force install (as 12.04 even though this is 14.04)
# since xenserver tools only supports upto ubuntu 12.04
os_minorver="04" ./install.sh -d "ubuntu" -m "12" -n
popd
umount -l xentmp
rm -rf xentmp
| true
|
dbe763f45bb959e4d45be622eecef4d0c6d81a3c
|
Shell
|
Tesora/tesora-config
|
/install_puppet.sh
|
UTF-8
| 6,071
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
# Copyright 2013 OpenStack Foundation.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Test condition to install puppet 3
PUPPET_VERSION=${PUPPET_VERSION:-3}
if [ "$PUPPET_VERSION" = '3' ]; then
THREE=yes
echo "Running in 3 mode"
fi
#
# Distro identification functions
# note, can't rely on lsb_release for these as we're bare-bones and
# it may not be installed yet)
function is_fedora {
[ -f /usr/bin/yum ] && cat /etc/*release | grep -q -e "Fedora"
}
function is_rhel6 {
[ -f /usr/bin/yum ] && \
cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" && \
cat /etc/*release | grep -q 'release 6'
}
function is_rhel7 {
[ -f /usr/bin/yum ] && \
cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" && \
cat /etc/*release | grep -q 'release 7'
}
function is_ubuntu {
[ -f /usr/bin/apt-get ]
}
#
# Distro specific puppet installs
#
function setup_puppet_fedora {
yum update -y
# NOTE: we preinstall lsb_release to ensure facter sets
# lsbdistcodename
yum install -y redhat-lsb-core git puppet
mkdir -p /etc/puppet/modules/
if [ "$THREE" != 'yes' ]; then
gem install hiera hiera-puppet
ln -s /usr/local/share/gems/gems/hiera-puppet-* /etc/puppet/modules/
fi
# Puppet expects the pip command named as pip-python on
# Fedora, as per the packaged command name. However, we're
# installing from get-pip.py so it's just 'pip'. An easy
# work-around is to just symlink pip-python to "fool" it.
# See upstream issue:
# https://tickets.puppetlabs.com/browse/PUP-1082
ln -fs /usr/bin/pip /usr/bin/pip-python
}
function setup_puppet_rhel7 {
local epel_pkg="http://dl.fedoraproject.org/pub/epel/beta/7/x86_64/epel-release-7-0.2.noarch.rpm"
local puppet_pkg="https://yum.puppetlabs.com/el/7/products/x86_64/puppetlabs-release-7-10.noarch.rpm"
# install EPEL
rpm -qi epel-release &> /dev/null || rpm -Uvh $epel_pkg
# NOTE: we preinstall lsb_release to ensure facter sets lsbdistcodename
yum install -y redhat-lsb-core git puppet
rpm -ivh $puppet_pkg
# see comments in setup_puppet_fedora
ln -s /usr/bin/pip /usr/bin/pip-python
}
function setup_puppet_rhel6 {
local epel_pkg="http://download.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"
local puppet_pkg="http://yum.puppetlabs.com/el/6/products/x86_64/puppetlabs-release-6-6.noarch.rpm"
# install EPEL
rpm -qi epel-release &> /dev/null || rpm -Uvh $epel_pkg
# NOTE: for RHEL (not CentOS) enable the optional-rpms channel (if
# not already enabled)
# yum-config-manager --enable rhel-6-server-optional-rpms
# NOTE: we preinstall lsb_release to ensure facter sets lsbdistcodename
yum install -y redhat-lsb-core git puppet
rpm -ivh $puppet_pkg
# ensure we stick to supported puppet 2 versions
cat > /etc/yum.repos.d/puppetlabs.repo <<"EOF"
[puppetlabs-products]
name=Puppet Labs Products El 6 - $basearch
baseurl=http://yum.puppetlabs.com/el/6/products/$basearch
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs
enabled=1
gpgcheck=1
EOF
if [ "$THREE" != 'yes' ]; then
echo 'exclude=puppet-2.8* puppet-2.9* puppet-3* facter-2*' >> /etc/yum.repos.d/puppetlabs.repo
fi
yum update -y
}
function setup_puppet_ubuntu {
lsbdistcodename=`lsb_release -c -s`
if [ $lsbdistcodename != 'trusty' ] ; then
rubypkg=rubygems
else
rubypkg=ruby
THREE=yes
fi
# NB: keep in sync with openstack_project/files/00-puppet.pref
if [ "$THREE" == 'yes' ]; then
PUPPET_VERSION=3.*
PUPPETDB_VERSION=2.*
FACTER_VERSION=2.*
else
PUPPET_VERSION=2.7*
PUPPETDB_VERSION=1.*
FACTER_VERSION=1.*
fi
cat > /etc/apt/preferences.d/00-puppet.pref <<EOF
Package: puppet puppet-common puppetmaster puppetmaster-common puppetmaster-passenger
Pin: version $PUPPET_VERSION
Pin-Priority: 501
Package: puppetdb puppetdb-terminus
Pin: version $PUPPETDB_VERSION
Pin-Priority: 501
Package: facter
Pin: version $FACTER_VERSION
Pin-Priority: 501
EOF
puppet_deb=puppetlabs-release-${lsbdistcodename}.deb
wget http://apt.puppetlabs.com/$puppet_deb -O $puppet_deb
dpkg -i $puppet_deb
rm $puppet_deb
apt-get update
DEBIAN_FRONTEND=noninteractive apt-get --option 'Dpkg::Options::=--force-confold' \
--assume-yes dist-upgrade
DEBIAN_FRONTEND=noninteractive apt-get --option 'Dpkg::Options::=--force-confold' \
--assume-yes install -y --force-yes puppet git $rubypkg
}
#
# pip setup
#
function setup_pip {
# Install pip using get-pip
local get_pip_url=https://bootstrap.pypa.io/get-pip.py
local ret=1
if [ -f ./get-pip.py ]; then
ret=0
elif type curl >/dev/null 2>&1; then
curl -O $get_pip_url
ret=$?
elif type wget >/dev/null 2>&1; then
wget $get_pip_url
ret=$?
fi
if [ $ret -ne 0 ]; then
echo "Failed to get get-pip.py"
exit 1
fi
if is_rhel6; then
yum erase -y python-setuptools
rm -rf /usr/lib/python2.6/site-packages/setuptools*
fi
python get-pip.py
pip install -U setuptools
}
setup_pip
if is_fedora; then
setup_puppet_fedora
elif is_rhel6; then
setup_puppet_rhel6
elif is_rhel7; then
setup_puppet_rhel7
elif is_ubuntu; then
setup_puppet_ubuntu
else
echo "*** Can not setup puppet: distribution not recognized"
exit 1
fi
| true
|
6c52750fcc6cb73100b705a527d04bf37c9b41ed
|
Shell
|
crzmp3/linux_lab2
|
/weather.sh
|
UTF-8
| 219
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
while true; do
http -b http://meteo.by > url.txt
grep -n -A 7 '<p class="t ">' url.txt > url_new.txt
sed -n 7p url_new.txt | awk '{print $2}'
sleep $(awk -F "=" '/timeout/ {print $2}' config.ini)
done
| true
|
1735ee819411379052a21fa2335270dddc89d230
|
Shell
|
B-Translator/old_btr_server
|
/modules/custom/btrCore/data/get/mandriva.sh
|
UTF-8
| 552
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
echo "===== GETTING MANDRIVA ====="
cd $(dirname $0)
. ./inc.sh
change_dir mandriva
### get a list of .po files for our languages
svn_url=http://svn.mandriva.com/svn/soft
svn ls -R $svn_url > svn_mandriva.txt
langs=$(echo $languages | sed -e 's/ /\\|/g')
cat svn_mandriva.txt | grep -e "\($langs\)\.po" > svn_mandriva_po.txt
### export them from the svn repository
while read file
do
dir=$(dirname $file)
mkdir -p $dir
svn export $svn_url/$file $file
done < svn_mandriva_po.txt svn_mandriva_po.txt
### cleanup
rm svn_mandriva.txt
| true
|
baaa3c7d5ff5d9eb48bc7f80536a78b136a614ac
|
Shell
|
wang-xinzhi/junto
|
/bin/junto
|
UTF-8
| 1,338
| 3.8125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
JARS=`echo $JUNTO_DIR/lib/*.jar $JUNTO_DIR/target/*.jar $JUNTO_DIR/lib_managed/compile/*.jar | tr ' ' ':'`
SCALA_LIB="$HOME/.sbt/boot/scala-2.9.1/lib/scala-library.jar"
JARS_MANAGED=
if [ -e $JUNTO_DIR/lib_managed ]
then
JARS_MANAGED=`find $JUNTO_DIR/lib_managed -name '*.jar' -print | tr '\n' ':'`
fi
CP=$JUNTO_DIR/target/classes:$JARS:$JARS_MANAGED:$SCALA_LIB:$CLASSPATH
if [ -z $JAVA_MEM_FLAG ]
then
JAVA_MEM_FLAG=-Xmx2g
fi
JAVA_COMMAND="java $JAVA_MEM_FLAG -classpath $CP"
CMD=$1
shift
help()
{
cat <<EOF
Junto 1.2.x commands:
build build Junto with SBT
config run Junto on the specified config file
extract extract distributions from Junto output
run run the main method of a given class
Include --help with any option for more information
EOF
}
if [ $CMD = 'build' ]; then
if test -f ~/.sbtconfig; then
. ~/.sbtconfig
fi
java -Dfile.encoding=UTF8 -Xmx1536M -Xss1M -XX:+CMSClassUnloadingEnabled -XX:MaxPermSize=256m ${SBT_OPTS} -jar $JUNTO_DIR/bin/sbt-launch-*.jar "$@"
else
CLASS=
case $CMD in
config) CLASS=upenn.junto.app.JuntoConfigRunner;;
extract) CLASS=upenn.junto.app.OutputExtractor;;
run) CLASS=$1; shift;;
help) help; exit 1;;
*) echo "Unrecognized command: $CMD"; help; exit 1;;
esac
$JAVA_COMMAND $CLASS $*
fi
| true
|
0801ed52a306205cafa057be9a64cf8e3887d00e
|
Shell
|
brydavis/fizzbuzz
|
/main.sh
|
UTF-8
| 265
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
fizzbuzz () {
for i in `seq 0 $1`;
do
if (( $i % 5 == 0 && $i % 3 == 0 ))
then
echo "FizzBuzz"
elif (( $i % 3 == 0 ))
then
echo "Fizz"
elif (( $i % 5 == 0 ))
then
echo "Buzz"
else
echo $i
fi
done
}
fizzbuzz 100
| true
|
c3af921307d0404125a8615e087f0e61f3a326ee
|
Shell
|
jbyck/dotfiles
|
/heroku/install.sh
|
UTF-8
| 194
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# Heroku
#
# This installs the Heroku toolbelt
if test ! $(which heroku)
then
echo " Installing Heroku for you."
wget -qO- https://toolbelt.heroku.com/install.sh | sh
fi
exit 0
| true
|
05de23a1c41747674a4d82d9472524b195b33db6
|
Shell
|
jacobkahn/dotfiles
|
/zsh/aliases.zsh
|
UTF-8
| 996
| 3.125
| 3
|
[] |
no_license
|
# Easier navigation: .., ..., ...., .....
alias ..="cd .."
alias ...="cd ../.."
alias ....="cd ../../.."
alias .....="cd ../../../.."
##### Shortcuts #####
# Navigation
alias d="cd ~/Documents/Dropbox"
alias dl="cd ~/Downloads"
alias dt="cd ~/Desktop"
alias p="$PROJECTS"
# Chrome
alias chrome='/Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome'
alias canary='/Applications/Google\ Chrome\ Canary.app/Contents/MacOS/Google\ Chrome\ Canary'
# Editors
alias e='emacs -nw'
##### Utilities #####
# Merge PDF files
# Usage: `mergepdf -o output.pdf input{1,2,3}.pdf`
alias mergepdf='/System/Library/Automator/Combine\ PDF\ Pages.action/Contents/Resources/join.py'
# Print each PATH entry on a separate line
alias path='echo -e ${PATH//:/\\n}'
# Trash/rm - use trash instead of rm on macOS
if [[ "$OSTYPE" == "darwin"* ]]; then
alias rm='trash'
fi
# Reload the terminal profile
alias reload="source ~/.zshrc"
# Modify my dotfiles in my editor
alias dotfiles="$EDITOR $DOTFILES"
| true
|
f016e6b70a66efb1cd0559d1f0feb0be76a42f4b
|
Shell
|
espasov/read-mapping-code-amoA
|
/read_mapping_amoA.sh
|
UTF-8
| 8,681
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
# code for performing read-mapping of metagenomic reads to specific amoA sequences to determine relative abundance of amoA genes for Nitrospira
# The code is modified from Jackson Tsuji's ATLAS co-assembly code available at https://github.com/jmtsuji/atlas-extensions/blob/master/co-assembly.sh
#read_mapping_amoA.sh
#Written by Emilie Spasov, Neufeld Lab, Univeristy of Waterloo, May 2018
set -e
set -u
set -o pipefail
#see chapter 12 pg 397 in Vince Buffalo's Bioinformatics Data skills for bash scripting background
if [ "$#" -lt 5 ] #check if less than 5 arguments
then
echo "error: too few arguments, you provided $#, 5 required"
echo "useage: read_mapping_amoA.sh altas_directory guide_filepath mapping_directory mapping_references_seq threads"
exit 1
fi
echo "script name: $0"
echo "atlas directory: $1"
echo "guide filepath: $2"
echo "mapping directory: $3"
echo "mapping reference sequences: $4"
echo "threads: $5"
# Atlas_output_dir: Path to the base directory where ATLAS files were output.
# guide_filepath: TSV file with two columns: mapping_names (names of coassembly runs); read_mapping_samples (comma-separated names of previously run ATLAS samples to read map for binning)
# mapping_dir: path to directory where want mapping files to go
# mapping_ref: fasta file with the reference sequences to map to
# threads: number of threads to run
# Set variables from user input:
ATLAS_DIR=$1
GUIDE_FILEPATH=$2
MAPPING_DIR=$3
MAPPING_REF=$4
THREADS=$5
# Get GUIDE_FILEPATH params
mapping_names=($(tail -n +2 ${GUIDE_FILEPATH} | cut -d $'\t' -f 1))
read_mapping_samples=($(tail -n +2 ${GUIDE_FILEPATH} | cut -d $'\t' -f 2))
#Emilie's first run inputs:
#ATLAS_DIR=/Hippodrome/espasov/WWTP_metagenomics2017/180129_atlas_espasov_r1.0.22_bash_WWTP_full2/
#GUIDE_FILEPATH=/Hippodrome/espasov/WWTP_metagenomics2017/downstream_analysis_atlas/read_mapping_amoA/RBC_sample_names.tsv
#MAPPING_DIR=/Hippodrome/espasov/WWTP_metagenomics2017/downstream_analysis_atlas/read_mapping_amoA
#MAPPING_REF=/Hippodrome/espasov/WWTP_metagenomics2017/downstream_analysis_atlas/read_mapping_amoA/comammox_amoA_DNA_cluster99_clusternames.fasta
#THREADS=12
function read_map_to_coassemblies {
# Description: iteratively maps read_mapping_samples to curated amoA sequences (RBC groups) like done within ATLAS
# GLOBAL Params: OUTPUT_DIR; THREADS; mapping_names (array); read_mapping_samples (array)
# Return: writes files/directories to disk
echo "Read mapping to supplied sequences..."
# Manually add additional settings needed for scripts.
# TODO - pull these settings (at least MEMORY) from the .yaml file!
local MEMORY=65 #TODO
for i in $(seq 1 ${#mapping_names[@]}); do
# Set counter to be based on zero, not one
local j=$((${i}-1))
# Make relevant directories for storing output
mkdir -p ${MAPPING_DIR}/contig_stats
mkdir -p ${MAPPING_DIR}/logs
# Temporarily change the internal fields separator (IFS) to parse comma separators. See Vince Buffalo's "Bioinformatics Data Skills" (1st Ed.) chapter 12, pg 407 and corresponding Github page README at https://github.com/vsbuffalo/bds-files/tree/master/chapter-12-pipelines (accessed Nov 19, 2017)
local OFS="$IFS"
IFS=,
# Get names of individual samples provided for that coassembly name
local mapping_sample_IDs=(${read_mapping_samples[${j}]}) #read_mapping_samples is a comma separated file with the names of the samples
# Fix the IFS
IFS="$OFS"
echo "${mapping_sample_IDs[@]}"
# Read map iteratively for each mapping ID
for mapping in ${mapping_sample_IDs[@]}; do
# TODO - pull more settings from .yaml file (these are FIXED right now)
echo "rule align_reads_to_amoA_seq (${mapping}):"
echo "bbwrap.sh nodisk=t ref=${MAPPING_REF} in1=${ATLAS_DIR}/${mapping}/sequence_quality_control/${mapping}_QC_R1.fastq.gz,${ATLAS_DIR}/${mapping}/sequence_quality_control/${mapping}_QC_se.fastq.gz in2=${ATLAS_DIR}/${mapping}/sequence_quality_control/${mapping}_QC_R2.fastq.gz,null outm=${MAPPING_DIR}/${mapping}.sam threads=${THREADS} trimreaddescriptions=t pairlen=1000 pairedonly=f mdtag=t xstag=fs nmtag=t sam=1.3 local=t ambiguous=best secondary=t ssao=t maxsites=10 -Xmx${MEMORY}G 2> ${MAPPING_DIR}/logs/contig_coverage_stats_${mapping}.log"
echo ""
bbwrap.sh nodisk=t ref=${MAPPING_REF} in1=${ATLAS_DIR}/${mapping}/sequence_quality_control/${mapping}_QC_R1.fastq.gz,${ATLAS_DIR}/${mapping}/sequence_quality_control/${mapping}_QC_se.fastq.gz in2=${ATLAS_DIR}/${mapping}/sequence_quality_control/${mapping}_QC_R2.fastq.gz,null outm=${MAPPING_DIR}/${mapping}.sam threads=${THREADS} trimreaddescriptions=t pairlen=1000 pairedonly=f mdtag=t xstag=fs nmtag=t sam=1.3 local=t ambiguous=best secondary=t ssao=t maxsites=10 -Xmx${MEMORY}G 2> ${MAPPING_DIR}/logs/contig_coverage_stats_${mapping}.log
echo ""
echo "rule pileup (${mapping}):"
echo "pileup.sh in=${MAPPING_DIR}/${mapping}.sam threads=${THREADS} -Xmx${MEMORY}G covstats=${MAPPING_DIR}/contig_stats/postfilter_coverage_stats_${mapping}.txt hist=${MAPPING_DIR}/contig_stats/postfilter_coverage_histogram_${mapping}.txt basecov=${MAPPING_DIR}/contig_stats/postfilter_base_coverage_${mapping}.txt.gz concise=t physcov=t secondary=f 2>> ${MAPPING_DIR}/logs/contig_coverage_stats_${mapping}.log"
echo ""
pileup.sh in=${MAPPING_DIR}/${mapping}.sam threads=${THREADS} -Xmx${MEMORY}G covstats=${MAPPING_DIR}/contig_stats/postfilter_coverage_stats_${mapping}.txt hist=${MAPPING_DIR}/contig_stats/postfilter_coverage_histogram_${mapping}.txt basecov=${MAPPING_DIR}/contig_stats/postfilter_base_coverage_${mapping}.txt.gz concise=t physcov=t secondary=f 2>> ${MAPPING_DIR}/logs/contig_coverage_stats_${mapping}.log
echo ""
echo "rule convert_sam_to_bam (${mapping}):"
echo "samtools view -@ ${THREADS} -u ${MAPPING_DIR}/${mapping}.sam | samtools sort -m 4G -@ ${THREADS} -T ${MAPPING_DIR}/${mapping}_tmp -o ${MAPPING_DIR}/${mapping}.bam -O bam"
echo ""
samtools view -@ ${THREADS} -u ${MAPPING_DIR}/${mapping}.sam | samtools sort -m 4G -@ ${THREADS} -T ${MAPPING_DIR}/${mapping}_tmp -o ${MAPPING_DIR}/${mapping}.bam -O bam
echo ""
done
done
echo ""
}
function map_new_feature_counts {
# Description: runs featureCounts with multi-mapped files as input
# GLOBAL Params: OUTPUT_DIR; THREADS; mapping_names (array); read_mapping_samples (array)
# Return: writes files/directories to disk
echo "Getting feature counts for individual samples onto reference sequences..."
for i in $(seq 1 ${#mapping_names[@]}); do
# Set counter to be based on zero, not one
local j=$((${i}-1))
# Make relevant directories for storing output
local fc_output_dir="${MAPPING_DIR}/feature_counts"
mkdir -p ${fc_output_dir}
# Temporarily change the internal fields separator (IFS) to parse comma separators. See Vince Buffalo's "Bioinformatics Data Skills" (1st Ed.) chapter 12, pg 407 and corresponding Github page README at https://github.com/vsbuffalo/bds-files/tree/master/chapter-12-pipelines (accessed Nov 19, 2017)
local OFS="$IFS"
IFS=,
# Get names of individual samples provided for that coassembly name
local mapping_sample_IDs=(${read_mapping_samples[${j}]}) #read_mapping_samples is a comma separated file with the names of the samples
# Fix the IFS
IFS="$OFS"
# Build array of BAM file locations - iteratively add each filepath
for i in $(seq 1 ${#mapping_sample_IDs[@]}); do
# Set counter to zero-order
j=$((${i}-1))
# Get mapping name
mapping=${mapping_sample_IDs[${j}]}
if [ $i == 1 ]; then
bam_filepaths=("${MAPPING_DIR}/${mapping}.bam")
elif [ $i > 1 ]; then
bam_filepaths+=("${MAPPING_DIR}/${mapping}.bam")
fi
done
# Run featureCounts
echo "rule run_feature_counts_multi_mapping:"
echo "featureCounts --minOverlap 1 -p -F SAF -T ${THREADS} --primary -M --fraction -R CORE -a ${MAPPING_DIR}/*.saf -o ${fc_output_dir}/gene_counts.txt ${bam_filepaths[@]} 2> ${MAPPING_DIR}/logs/featurecounts.log"
echo ""
featureCounts --minOverlap 1 -p -F SAF -T ${THREADS} --primary -M --fraction -R CORE -a ${MAPPING_DIR}/*.saf -o ${fc_output_dir}/gene_counts.txt ${bam_filepaths[@]} 2> ${MAPPING_DIR}/logs/featurecounts.log
echo ""
done
}
function main {
# Get date and time of start
start_time=$(date)
#run all functions
read_map_to_coassemblies
map_new_feature_counts
end_time=$(date)
echo "Started read mapping at ${start_time} and finished at ${end_time}."
echo ""
}
main
| true
|
551679ae32f2ffdc406bb545b975b845b5ad286d
|
Shell
|
KomarovEA/Zabbix-ELK
|
/Task4/logstash-install.sh
|
UTF-8
| 1,068
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
### This script installing Logstash agent on Tomcat's VM ###
### Input parameters: <Elasticsearch + Kibana server's address> <Tomcat + Logstash agent VM's address> ###
cd ~
#Install Java
yum -y -d1 install java-1.8.0-openjdk-devel
yum -y -d1 install net-tools
# Install Logstash
rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch
cat <<EOFLR > /etc/yum.repos.d/logstash.repo
[logstash-7.x]
name=Elastic repository for 7.x packages
baseurl=https://artifacts.elastic.co/packages/7.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
EOFLR
yum -y -d1 install logstash
cat <<EOFLSTMC > /etc/logstash/conf.d/tomcat8.conf
input {
file {
path => "/opt/tomcat/logs/*"
start_position => "beginning"
}
}
output {
elasticsearch {
hosts => ["$1:9200"]
}
stdout { codec => rubydebug }
}
EOFLSTMC
# set rights for logstash to read tomcat's logs
chmod 644 /opt/tomcat/logs/*
chmod 745 /opt/tomcat/logs/
systemctl enable logstash.service
systemctl restart logstash.service
| true
|
180707cd42f84ef6162b75489fdef65a0850bcd3
|
Shell
|
shizonic/sensible-shell
|
/.local/share/kyrat/lib/core.sh
|
UTF-8
| 5,771
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
KYRAT_HOME=${KYRAT_HOME:-${HOME}/.config/kyrat}
BASE64=base64
BASH=bash
CAT=cat
GUNZIP=gunzip
GZIP=gzip
SSH=ssh
# PATH needs to be updated since GNU Coreutils is required in OSX environments
GNUBIN="/usr/local/opt/coreutils/libexec/gnubin"
BASE_DIRS=("/tmp" "\$HOME")
NOT_EXISTING_COMMAND=111
NO_WRITABLE_DIRECTORY=112
#######################################
# Concatenate files to standard output.
#
# Globals:
# None
# Arguments:
# files ($@) : the program arguments.
# Returns:
# None
# Output:
# The file contents.
#######################################
function _concatenate_files(){
for file_rc in "$@"
do
if [[ -f "${file_rc}" ]]
then
cat "${file_rc}"
elif [[ -e "${file_rc}" ]]
then
echo >&2 "Warn: ${file_rc} is not a file."
fi
done
}
#######################################
# Run ssh session with all config files
# in $KYRAT_HOME.
#
# Globals:
# KYRAT_HOME (RO) : Kyrat home location.
# BASE64 (RO) : base64 command.
# GZIP (RO) : gzip command.
# GUNZIP (RO) : gunzip command.
# Arguments:
# args ($@) : The ssh arguments.
# Returns:
# None
# Output:
# None
#######################################
function kyrat(){
mkdir -p $KYRAT_HOME/bashrc.d
mkdir -p $KYRAT_HOME/inputrc.d
mkdir -p $KYRAT_HOME/vimrc.d
mkdir -p $KYRAT_HOME/tmux.conf.d
_parse_args "$@"
_execute_ssh
}
#######################################
# Parse the ssh arguments.
#
# Globals:
# SSH (RO) : ssh command.
# SSH_OPTS (WO) : The ssh options.
# COMMANDS (WO) : The ssh command to invoke remotely.
# Arguments:
# args ($@) : The ssh arguments.
# Returns:
# None
# Output:
# None
#######################################
function _parse_args(){
[[ -z "$@" ]] && { $SSH; return $?; }
SSH_OPTS=()
for opt in "$@"; do
case "$opt" in
--) shift ; break ;;
*) SSH_OPTS+=("$opt") ; shift ;;
esac
done
COMMANDS=("$@")
}
#######################################
# Run ssh session with all config files
# in $KYRAT_HOME.
#
# Globals:
# KYRAT_HOME (RO) : Kyrat home location.
# BASE64 (RO) : base64 command.
# GZIP (RO) : gzip command.
# GUNZIP (RO) : gunzip command.
# Arguments:
# args ($@) : The ssh arguments.
# Returns:
# NOT_EXISTING_COMMAND : if one of the required commands
# does not exist.
# Output:
# None
#######################################
function _execute_ssh(){
command -v $BASE64 >/dev/null 2>&1 || { echo >&2 "kyrat requires $BASE64 to be installed locally. Aborting."; return $NOT_EXISTING_COMMAND; }
command -v $GZIP >/dev/null 2>&1 || { echo >&2 "kyrat requires $GZIP to be installed locally. Aborting."; return $NOT_EXISTING_COMMAND; }
local remote_command="$(_get_remote_command)"
$SSH -t "${SSH_OPTS[@]}" -- "$BASH -c '$remote_command'"
}
#######################################
# Compose and return the remote command
# to be executed inside the ssh session.
#
# Globals:
# KYRAT_HOME (RO) : Kyrat home location.
# BASE64 (RO) : base64 command.
# GZIP (RO) : gzip command.
# GUNZIP (RO) : gunzip command.
# COMMANDS (RO?) : ssh commands to execute (if any).
# Arguments:
# None
# Returns:
# NOT_EXISTING_COMMAND : if one of the required commands
# does not exist.
# NO_WRITABLE_DIRECTORY : if no writable directories could
# be found in the remote host.
# Output:
# The composed remote command to execute in the ssh session.
#######################################
function _get_remote_command(){
local rc_script="$(_concatenate_files "$KYRAT_HOME"/bashrc "$KYRAT_HOME"/bashrc.d/* | $GZIP | $BASE64)"
local inputrc_script="$(_concatenate_files "$KYRAT_HOME"/inputrc "$KYRAT_HOME"/inputrc.d/* | $GZIP | $BASE64)"
local vimrc_script="$(_concatenate_files "$KYRAT_HOME"/vimrc "$KYRAT_HOME"/vimrc.d/* | $GZIP | $BASE64)"
local tmux_conf="$(_concatenate_files "$KYRAT_HOME"/tmux.conf "$KYRAT_HOME"/tmux.conf.d/* | $GZIP | $BASE64)"
local commands_opt=""
[[ -z "${COMMANDS[@]}" ]] || commands_opt="-c \"${COMMANDS[@]}\""
$CAT <<EOF
[[ -e /etc/motd ]] && $CAT /etc/motd || { [[ -e /etc/update-motd.d ]] && command -v run-parts &> /dev/null && run-parts /etc/update-motd.d/; }
[[ -d "$GNUBIN" ]] && PATH="$GNUBIN:\$PATH";
for tmp_dir in ${BASE_DIRS[@]}; do [[ -w "\$tmp_dir" ]] && { base_dir="\$tmp_dir"; break; } done;
[[ -z "\$base_dir" ]] && { echo >&2 "Could not find writable temp directory on the remote host. Aborting."; exit $NO_WRITABLE_DIRECTORY; };
command -v $BASE64 >/dev/null 2>&1 || { echo >&2 "kyrat requires $BASE64 command on the remote host. Aborting."; exit $NOT_EXISTING_COMMAND; };
command -v $GUNZIP >/dev/null 2>&1 || { echo >&2 "kyrat requires $GUNZIP command on the remote host. Aborting."; exit $NOT_EXISTING_COMMAND; };
kyrat_home="\$(mktemp -d kyrat-XXXXX -p "\$base_dir")";
trap "rm -rf "\$kyrat_home"; exit" EXIT HUP INT QUIT PIPE TERM KILL;
[[ -e \${HOME}/.bashrc ]] && echo "source \${HOME}/.bashrc" > "\${kyrat_home}/bashrc";
echo "${rc_script}" | $BASE64 -di | $GUNZIP >> "\${kyrat_home}/bashrc";
echo "${inputrc_script}" | $BASE64 -di | $GUNZIP > "\${kyrat_home}/inputrc";
echo "${vimrc_script}" | $BASE64 -di | $GUNZIP > "\${kyrat_home}/vimrc";
echo "${tmux_conf}" | $BASE64 -di | $GUNZIP > "\${kyrat_home}/tmux.conf";
VIMINIT="let \\\$MYVIMRC=\\"\${kyrat_home}/vimrc\\" | source \\\$MYVIMRC" INPUTRC="\${kyrat_home}/inputrc" TMUX_CONF="\${kyrat_home}/tmux.conf" $BASH --rcfile "\${kyrat_home}/bashrc" -i ${commands_opt};
EOF
}
| true
|
81bb9532a864456f139f6046065857e90b457b07
|
Shell
|
dcode/docked_crits
|
/docker-entrypoint.sh
|
UTF-8
| 1,210
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
ADMIN_ORG="ACME, Inc"
ADMIN_FIRSTNAME="Wiley"
ADMIN_LASTNAME="Coyote"
if [ ! -f /data/crits/crits/config/database.py ]; then
cp /data/crits/crits/config/database_example.py /data/crits/crits/config/database.py && \
SC=$(cat /dev/urandom | LC_CTYPE=C tr -dc 'abcdefghijklmnopqrstuvwxyz0123456789!@#%^&*(-_=+)' | fold -w 50 | head -n 1) && \
SE=$(echo ${SC} | sed -e 's/\\/\\\\/g' | sed -e 's/\//\\\//g' | sed -e 's/&/\\\&/g') && \
sed -i -e "s/^\(SECRET_KEY = \).*$/\1\'${SE}\'/1" /data/crits/crits/config/database.py && \
sed -i -e "s/^\(MONGO_HOST = \).*$/\1\os.environ['MONGODB_PORT_27017_TCP_ADDR']/1" /data/crits/crits/config/database.py # need to change the mongo host to the docker image name
#
# Creates default info, if it already exists, it skips by default
python /data/crits/manage.py create_default_collections
# Add a CRITS admin user, not sure what happens if it exists
python /data/crits/manage.py users --adduser \
--administrator \
--email ${ADMIN_EMAIL} \
--firstname ${ADMIN_FIRSTNAME} \
--lastname ${ADMIN_LASTNAME} \
--organization ${ADMIN_ORG} \
--username ${ADMIN_USERNAME}
fi
exec python /data/crits/manage.py runserver 0.0.0.0:8080
| true
|
d624ba5ae64d542737803e01b659c8b7dad5ae6d
|
Shell
|
humberaquino/ogahunt
|
/web/scripts/utils/grab-version
|
UTF-8
| 207
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
ver=$(cat VERSION | tr -d '\n')
case $1 in
sha1)
sha=$(git rev-parse HEAD | head -c 8 | tr -d '\n')
echo "v${ver}_${sha}"
;;
*)
echo -n "v$ver"
;;
esac
| true
|
6f873495b685c2bcf3f83503abba0a064c21b1f7
|
Shell
|
Chollohookah/ShishasCrawler
|
/cron-script.sh
|
UTF-8
| 1,500
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "" > $HOME/data/zuloshisha.json
echo "" > $HOME/data/bengalas.json
echo "" > $HOME/data/hispacachimba.json
echo "" > $HOME/data/medusa.json
echo "" > $HOME/data/tgs.json
echo "" > $HOME/data/bakkali.json
/usr/local/bin/scrapy runspider $HOME/ShishasCrawler/cachimbosa/cachimbosa/spiders/paginas/zuloshishas_spider.py -o $HOME/data/zuloshisha.json
sleep 1m
/usr/local/bin/scrapy runspider $HOME/ShishasCrawler/cachimbosa/cachimbosa/spiders/paginas/bengala_spider.py -o $HOME/data/bengalas.json
sleep 1m
/usr/local/bin/scrapy runspider $HOME/ShishasCrawler/cachimbosa/cachimbosa/spiders/paginas/hispacachimba_spider.py -o $HOME/data/hispacachimba.json
sleep 1m
/usr/local/bin/scrapy runspider $HOME/ShishasCrawler/cachimbosa/cachimbosa/spiders/paginas/medusa_spider.py -o $HOME/data/medusa.json
sleep 1m
/usr/local/bin/scrapy runspider $HOME/ShishasCrawler/cachimbosa/cachimbosa/spiders/paginas/tgs_spider.py -o $HOME/data/tgs.json
sleep 1m
/usr/local/bin/scrapy runspider $HOME/ShishasCrawler/cachimbosa/cachimbosa/spiders/paginas/bakkali_spider.py -o $HOME/data/bakkali.json
sleep 1m
if [ "$?" != "0" ]; then
echo "[Error] scrawleamiento fallado"
exit 1
fi
echo "[Success] scrawl multiple exitoso"
echo "[INFO] iniciando subida base de datos"
python3 $HOME/ShishasCrawler/cachimbosa/cachimbosa/scripts/file_exporter.py $1
if [ "$?" != "0" ]; then
echo "[Error] Guardado en base de datos fallado"
exit 1
fi
echo "[Success] guardado en base de datos exitoso"
| true
|
e45fe892e4db986d0b602e8429de91c3098ef778
|
Shell
|
davidlennick/killswitch-examples
|
/ids-example/elk/kibana/upload-dash.sh
|
UTF-8
| 447
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
kibana_url=http://127.0.0.1:5601
# wait for kibana
# curl -s -o /dev/null -w ''%{http_code}'' $kibana_url
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' $kibana_url/status)" != "200" ]];
do
sleep 2;
done
until curl -X POST -H "kbn-xsrf: true" \
"$kibana_url/api/saved_objects/_import" \
--form file=@/kibana_export.ndjson
do
sleep 2
echo Retrying dashboard upload...
done
echo Uploaded dashboard
| true
|
e84292d4094d024f70dc151d6c3e4503b9ff3174
|
Shell
|
MichaelCurrin/logos
|
/download_logos.sh
|
UTF-8
| 959
| 3.40625
| 3
|
[
"CC0-1.0"
] |
permissive
|
#!/bin/bash -e
# Download logos.
cd assets/img
# Comments include the name on shields.io in case you use logo field there.
LOGOS=(
ansible
c
circleci
cmake
css3
dart
deno
docker
eslint
git
github
githubactions
gnubash # gnu-bash
go
graphql
html5
java
javascript
jekyll
kubernetes
markdown
mysql
netlify
nginx
node-dot-js # node.js
npm
php
postgresql
python
react
ruby
sqlite
terraform
typescript
visualstudiocode
vue-dot-js
yarn
)
# FIXME If the curl returns 404, this won't stop the script so this must be handled still.
for LOGO in ${LOGOS[@]}; do
# Useful for rapid development - skip existing files and do not try and update. Remove this step to updates.
[ -f "$LOGO.svg" ] && continue
echo $LOGO
curl -O "https://simpleicons.org/icons/$LOGO.svg"
sed -i '' \
"s/\"img\"/\"img\" id=\"logo-$LOGO\"/g
s/path/path fill=\"currentColor\"/g" \
"$LOGO.svg"
done
| true
|
ca84efca0c0f2ffaa20160d8a64d25478d6ea3b1
|
Shell
|
Langhalsdino/balena-stress-test
|
/stress-test/entrypoint.sh
|
UTF-8
| 127
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
count=0
while [ True ]; do
echo "Running performance test #$count"
pyperformance run
((++count))
done
| true
|
eef722f6ca80a5c58b98c8307b0757f497914682
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/ryzom-client/PKGBUILD
|
UTF-8
| 2,794
| 2.765625
| 3
|
[] |
no_license
|
# Maintainer: PitBall
pkgname=ryzom-client
pkgver=0.12.0.r8921
pkgrel=1
pkgdesc="Ryzom is a Free to Play MMORPG .This version is for playing on an official server"
arch=('i686' 'x86_64')
url="http://www.ryzom.com/"
license=('AGPL3')
depends=('ryzom-data' 'curl' 'freealut' 'libvorbis' 'libjpeg' 'giflib' 'rrdtool'
'boost' 'lua53bind' 'libsquish' 'libxrandr' 'libxcursor'
'hicolor-icon-theme' 'openssl-1.0')
conflicts=('ryzom-client-latest-hg' 'ryzom-client-hg')
makedepends=('mercurial' 'cpptest' 'cmake' 'bison' 'mesa')
provides=('libnel' 'ryzom' 'ryzomcore')
_hg_name='ryzomcore'
install=install #branch=compatibility
source=( "hg+https://bitbucket.org/ryzom/${_hg_name}#branch=compatibility-develop"
'ryzom.sh')
md5sums=('SKIP'
'7f9befd9b4f864938648880375ff423e')
pkgver() {
cd "$_hg_name"
printf "%s.%s.%s.r%s" \
"$(grep -o -P "NL_VERSION_MAJOR [0-9]+" code/CMakeLists.txt | \
awk '{print $2}' | head -n 1)" \
"$(grep -o -P "NL_VERSION_MINOR [0-9]+" code/CMakeLists.txt | \
awk '{print $2}' | head -n 1)" \
"$(grep -o -P "NL_VERSION_PATCH [0-9]+" code/CMakeLists.txt | \
awk '{print $2}' | head -n 1)" \
"$(hg identify -n)"
}
prepare() {
mkdir -p $srcdir/$_hg_name/build
cd $srcdir/$_hg_name
sed '/o_xml.h/i#include <libxml/tree.h>' -i \
code/nel/include/nel/logic/logic_state.h
}
build() {
cd $srcdir/$_hg_name
cmake -Hcode -Bbuild -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Release \
-DWITH_RYZOM_SERVER=OFF -DWITH_RYZOM_CLIENT=ON -DWITH_PCH=OFF \
-DWITH_RYZOM_TOOLS=OFF -DWITH_RYZOM_STEAM=OFF -DWITH_RYZOM_PATCH=OFF \
-DWITH_NEL_TOOLS=OFF -DWITH_NEL_TESTS=OFF -DWITH_NEL_SAMPLES=OFF \
-DWITH_LUA53=ON -DWITH_LUA51=OFF -DWITH_LUA52=OFF \
-DCMAKE_INSTALL_PREFIX=/usr -DWITH_GCC_FPMATH_BOTH=ON \
-DRYZOM_ETC_PREFIX=/etc/ryzom -DRYZOM_SHARE_PREFIX=/usr/share/ryzom \
-DRYZOM_BIN_PREFIX=/usr/bin -DRYZOM_GAMES_PREFIX=/usr/bin \
-DOPENSSL_INCLUDE_DIR="/usr/include/openssl-1.0/" \
-DOPENSSL_SSL_LIBRARY="/usr/lib/openssl-1.0/libssl.so" \
-DOPENSSL_CRYPTO_LIBRARY="/usr/lib/openssl-1.0/libcrypto.so"
cmake --build build
}
package() {
cd "$srcdir/$_hg_name/build"
make DESTDIR="$pkgdir/" install
sed 's/\/usr\/bin\/ryzom_client/ryzom/' \
-i ${pkgdir}/usr/share/applications/ryzom_client.desktop
install -Dm755 ${srcdir}/ryzom.sh ${pkgdir}/usr/bin/ryzom
#correct config file for playing on an official server
sed -r -e 's|^(PatchServer\s*=\s*).*|\1"";|' \
-e '/PatchServer/aPatchWanted = 0;' \
-e 's|^(PatchletUrl\s*=\s*).*|\1"";|' \
-e '/appzone.xml/d' \
-i ${pkgdir}/etc/ryzom/client_default.cfg
}
| true
|
04ee39603495f9ad74ae6fceb05c726d64c180a0
|
Shell
|
fanta12138/sstk
|
/ssc/articulations/get_model_ids_with_articulations.sh
|
UTF-8
| 281
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/zsh
BASE_URL=${1:-http://ec2-18-188-96-100.us-east-2.compute.amazonaws.com/articulations/}
wget -O articulations.json ${BASE_URL}/articulation-annotations/list\?format\=json
jq '.[]["modelId"]' articulations.json | sed -e 's/"//g' | cut -d'.' -f2 > articulatedModelIds.txt
| true
|
80f30ebbc6ed09ce126d47da44d2ffabaeba060a
|
Shell
|
billybissic/Magnificent-Control-Panel
|
/scripts/install-package.sh
|
UTF-8
| 8,218
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
###########
#
# MIT License
#
# Copyright (c) 2018 Billy Bissic
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
############
## Purpose : Preps server for package by creating back ups and removing files,
## then installs the package specified in the arguments.
ME="install-package.sh"
showHelp() {
echo " "
echo "HELP"
echo "---------------------------------------------------------------------"
echo "Available Commands: "
echo "---------------------------------------------------------------------"
echo " Basic use ./${ME} -m install -f [PACKAGENAME.BUILD.#.#] -r [RELEASE_NUMBER] -d [INSTALL_PATH] -l [PACKAGE_STAGE_PATH]"
echo "-h : Shows this help document."
echo "-v : Optional, Allows verbose output during script runtime."
echo "MODES: -i, -b, -p, one of the three are required. See below for details."
echo "-m : Sets the installer to install mode. Runs back up then installs the specified package."
echo " Options are: install, backup, rollback."
echo "-b : Sets the installer to back up applications in production."
echo " Explicitly running this mode will just perform a backup."
echo " This gets called automatically under the install mode."
echo "-p : Sets the installer to roll back to a previous release."
echo "-d : Required, Sets the install destination directory."
echo "-l : Required, Staging location for the installation package."
echo "-f : Required, Sets name of the package to install."
echo "-r : Required, Sets build revision number of the package to be installed."
echo "-s : Optional, Sets the script run to silent, no possible output except errors"
echo " overrides verbose when used together."
echo "----------------------------------------------------------------------"
}
# A POSIX variable
OPTIND=1 # Reset in case getopts has been used previously in the shell.
# Initialize our own variables:
BITBUCKET_BUILD_NUMBER=""
BB_AUTH_STRING=""
#BITBUCKET_REPO_OWNER=""
#BITBUCKET_REPO_SLUG=""
BUILD_NAME=""
BACKUP_DIRECTORY=""
INSTALL_DIRECTORY=""
STAGING_LOCATION=""
VERBOSE=0
INSTALL=0
SILENT=0
BACKUP=0
PREVIOUS=0
while getopts ":h:v:m:b:p:f:r:d:l:q:" opt; do
case "${opt}" in
h|\?)
showHelp
exit 0
;;
v) VERBOSE=1
;;
m) if [${OPTARG} -eq "install" ]
then
INSTALL=1
elif [ ${OPTARG} -eq "backup" ]
then
BACKUP=1
elif [ ${OPTARG} -eq "rollback" ]
then
PREVIOUS=1
fi
;;
b) BACKUP=1
;;
p) PREVIOUS=1
;;
#o) BITBUCKET_REPO_OWNER=${OPTARG}
# ;;
f) BUILD_NAME=${OPTARG}
;;
r) BITBUCKET_BUILD_NUMBER=${OPTARG}
;;
d) INSTALL_DIRECTORY=${OPTARG} # directory in which the contents of the package will be installed to.
;;
l) STAGING_LOCATION=${OPTARG} # can be used to specify staging area for the new package or backup location for the package to rollback to.
;;
#s) BITBUCKET_REPO_SLUG=${OPTARG}
# ;;
q) SILENT=1
;;
esac
done
shift $((OPTIND-1))
[ "${1:-}" = "--" ] && shift
echo "verbose=${VERBOSE}, build_name="${BUILD_NAME}" build_number='${BITBUCKET_BUILD_NUMBER}', Leftovers: $@"
BACKUP_ARCHIVE_NAME=""
backupFiles() {
if [ ! -d $BACKUP_DIRECTORY ]
then
mkdir $BACKUP_DIRECTORY
if [ $? -eq 1 ]
then
echo "[ERROR] ${ME} Not able to create ${BACKUP_DIRECTORY}; can not proceed."
echo "[INFO] Exiting..."
exit 1
fi
fi
cd $INSTALL_DIRECTORY
cp *.ico $BACKUP_DIRECTORY
if [ $? -eq 1 ]
then
echo "[ERROR] ${ME} Not able to copy icon files to ${BACKUP_DIRECTORY}; can not proceed."
echo "[INFO] Exiting..."
exit 1
fi
cp *.html $BACKUP_DIRECTORY
if [ $? -eq 1 ]
then
echo "[ERROR] ${ME} Not able to copy html files to ${BACKUP_DIRECTORY}; can not proceed."
echo "[INFO] Exiting..."
exit 1
fi
cp *.js $BACKUP_DIRECTORY
if [ $? -eq 1 ]
then
echo "[ERROR] ${ME} Not able to copy js files to ${BACKUP_DIRECTORY}; can not proceed."
echo "[INFO] Exiting..."
exit 1
fi
cp *.css $BACKUP_DIRECTORY
if [ $? -eq 1 ]
then
echo "[ERROR] ${ME} Not able to copy css files to ${BACKUP_DIRECTORY}; can not proceed."
echo "[INFO] Exiting..."
exit 1
fi
cp -R assets/ $BACKUP_DIRECTORY
if [ $? -eq 1 ]
then
echo "[ERROR] ${ME} Not able to copy assets directory to ${BACKUP_DIRECTORY}; can not proceed."
echo "[INFO] Exiting..."
exit 1
fi
cd $BACKUP_DIRECTORY
cd ..
# If there were no errors copying, tar up the files
tar -xvzf $BACKUP_ARCHIVE_NAME $BACKUP_DIRECTORY
if [ $? -eq 1 ]
then
echo "[ERROR] ${ME} Not able to compress back up directory to archive. Can not proceed."
echo "[INFO] Exiting..."
exit 1
fi
echo "[INFO] All files backed up successfully. Removing old files now..."
# If there were no errors compressing the files, remove the files
rm *.ico
if [ $? -eq 1 ]
then
echo "[ERROR] ${ME} not able to remove icon files. Can not proceed."
echo "[INFO] Exiting..."
exit 1
fi
rm *.html
if [ $? -eq 1 ]
then
echo "[ERROR] ${ME} not able to remove html files. Can not proceed."
echo "[INFO] Exiting..."
exit 1
fi
rm *.js
if [ $? -eq 1 ]
then
echo "[ERROR] ${ME} not able to remove js files. Can not proceed."
echo "[INFO] Exiting..."
exit 1
fi
rm *.css
if [ $? -eq 1 ]
then
echo "[ERROR] ${ME} not able to remove css files. Can not proceed."
echo "[INFO] Exiting..."
exit 1
fi
rm -rf assets/
if [ $? -eq 1 ]
then
echo "[ERROR] ${ME} not able to remove assets directory. Can not proceed."
echo "[INFO] Exiting..."
exit 1
fi
}
installFiles() {
echo "[INFO] Copying files to the install directory."
# cd to package contents
cd $STAGING_LOCATION
if [ $? -eq 1 ]
then
echo "[ERROR] ${ME} The staging directory ${STAGING_LOCATION} does not exist. Can not proceed."
echo "[INFO] Exiting..."
exit 1
fi
cp * "${INSTALL_DIRECTORY}"
if [ $? -eq 1 ]
then
echo "[ERROR] ${ME} Was not able to move files over to the installation directory ${INSTALL_DIRECTORY}. Can not proceed."
echo "[INFO] Exiting..."
exit 1
fi
chgrp -R www-data *
chown -R www-data *
}
rollBackInstall() {
echo "[INFO] Beginning rollback"
}
# Check to ensure the required flags are passed in.
if [ -z ${BUILD_NAME:+x} ]
then
echo " "
echo "[ERROR] ${ME} Package name is required."
showHelp
echo " "
echo "[INFO] Exiting..."
exit 1
fi
if [ -z ${INSTALL_DIRECTORY:+x} ]
then
echo " "
echo "[ERROR] ${ME} Install directory is required."
showHelp
echo " "
echo "[INFO] Exiting..."
exit 1
fi
if [ -z ${STAGING_LOCATION:+x} ]
then
echo " "
echo "[ERROR] ${ME} Staging location is required."
showHelp
echo " "
echo "[INFO] Exiting..."
exit 1
fi
if [ -z ${BITBUCKET_BUILD_NUMBER:+x} ]
then
echo " "
echo "[ERROR] ${ME} Build number is required."
showHelp
echo " "
echo "[INFO] Exiting..."
exit 1
fi
if [ $INSTALL -eq 1 ]
then
backupFiles
installFiles
fi
if [ $BACKUP -eq 1 ]
then
backupFiles
fi
if [ $PREVIOUS -eq 1 ]
then
rollBackInstall
fi
| true
|
6a518deb7e5c60cef9a6814ae10a7ffa3a698b0c
|
Shell
|
The-Peso-G/babel
|
/scripts/integration-tests/e2e-create-react-app.sh
|
UTF-8
| 1,084
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#==============================================================================#
# SETUP #
#==============================================================================#
# Start in scripts/integration-tests/ even if run from root directory
cd "$(dirname "$0")" || exit
source utils/local-registry.sh
source utils/cleanup.sh
# Echo every command being executed
set -x
# Clone create-react-app
git clone --depth=1 https://github.com/facebook/create-react-app.git tmp/create-react-app
cd tmp/create-react-app || exit
#==============================================================================#
# TEST #
#==============================================================================#
startLocalRegistry "$PWD"/../../verdaccio-config.yml
yarn install
node "$PWD"/../../utils/bump-babel-dependencies.js
yarn lerna exec -- node "$PWD"/../../utils/bump-babel-dependencies.js
yarn install
# Test
CI=true yarn test
cleanup
| true
|
8cef5c19206018edf5993aa45dffae6e53fd2df9
|
Shell
|
bdecoste/istio_proxy_build_image
|
/src/lightstep/bootstrap.sh
|
UTF-8
| 309
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# In case this source is being cross-compiled.
make maintainer-clean 2> /dev/null
rm -rf autom4te.cache 2> /dev/null
LIBTOOLIZE=libtoolize
if [ `uname` = 'Darwin' ]; then
LIBTOOLIZE=glibtoolize
fi
${LIBTOOLIZE} --copy --automake
aclocal -I m4
autoheader
autoconf
automake --copy --add-missing
| true
|
da1e5239ff9f767a37c696d93c1063eb9b902538
|
Shell
|
kmadejski/launchpad
|
/payload/dev/solr/entrypoint.bash
|
UTF-8
| 593
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
mkdir -p /ezsolr/server/ez
if [ ! -f /ezsolr/server/ez/solr.xml ]; then
cp /opt/solr/server/solr/solr.xml /ezsolr/server/ez
cp /opt/solr/server/solr/configsets/basic_configs/conf/{currency.xml,solrconfig.xml,stopwords.txt,synonyms.txt,elevate.xml} /ezsolr/server/ez/template
sed -i.bak '/<updateRequestProcessorChain name="add-unknown-fields-to-the-schema">/,/<\/updateRequestProcessorChain>/d' /ezsolr/server/ez/template/solrconfig.xml
fi
/opt/solr/bin/solr -s /ezsolr/server/ez -f
/opt/solr/bin/solr create_core -c collection1 -d /ezsolr/server/ez/template
| true
|
f405df8054f6189eb9e3152d8a59c2b026048dbd
|
Shell
|
LaloVene/Bash-Course
|
/11-Declare-Command.sh
|
UTF-8
| 179
| 3.203125
| 3
|
[] |
no_license
|
#! /bin/bash
# Prints the current shell environment
declare -p
# Declares a variable
declare my_variable="Hello World"
# Read only variable
declare -r my_read_variable="Hello World"
| true
|
68cb796ddfc2520ecaee0eb25bf32af5565c451e
|
Shell
|
wicadmin/Goldenorb
|
/ext-rooter-basic/files/usr/lib/rooter/modeswitch.sh
|
UTF-8
| 12,681
| 3.515625
| 4
|
[] |
no_license
|
. /lib/functions/procd.sh
MODCNT=6
ROOTER=/usr/lib/rooter
ROOTER_LINK="/tmp/links"
modeswitch="/usr/bin/usb_modeswitch"
log() {
logger -t "usb-modeswitch" "$@"
}
sanitize() {
sed -e 's/[[:space:]]\+$//; s/[[:space:]]\+/_/g' "$@"
}
find_usb_attrs() {
local usb_dir="/sys$DEVPATH"
[ -f "$usb_dir/idVendor" ] || usb_dir="${usb_dir%/*}"
uVid=$(cat "$usb_dir/idVendor")
uPid=$(cat "$usb_dir/idProduct")
uMa=$(sanitize "$usb_dir/manufacturer")
uPr=$(sanitize "$usb_dir/product")
uSe=$(sanitize "$usb_dir/serial")
}
display_top() {
log "*****************************************************************"
log "*"
}
display_bottom() {
log "*****************************************************************"
}
display() {
local line1=$1
log "* $line1"
log "*"
}
#
# Save Interface variables
#
save_variables() {
echo 'MODSTART="'"$MODSTART"'"' > /tmp/variable.file
echo 'WWAN="'"$WWAN"'"' >> /tmp/variable.file
echo 'USBN="'"$USBN"'"' >> /tmp/variable.file
echo 'ETHN="'"$ETHN"'"' >> /tmp/variable.file
echo 'WDMN="'"$WDMN"'"' >> /tmp/variable.file
echo 'BASEPORT="'"$BASEPORT"'"' >> /tmp/variable.file
}
#
# delay until ROOter Initialization done
#
bootdelay() {
if [ ! -f /tmp/bootend.file ]; then
log "Delay for boot up"
sleep 10
while [ ! -f /tmp/bootend.file ]; do
sleep 1
done
sleep 10
fi
}
#
# return modem number based on port number
# 0 is not found
#
find_device() {
DEVN=$1
COUNTER=1
while [ $COUNTER -le $MODCNT ]; do
EMPTY=$(uci get modem.modem$COUNTER.empty)
if [ $EMPTY -eq 0 ]; then
DEVS=$(uci get modem.modem$COUNTER.device)
if [ $DEVN = $DEVS ]; then
retresult=$COUNTER
return
fi
fi
let COUNTER=COUNTER+1
done
retresult=0
}
#
# check if all modems are inactive or empty
# delete all if nothing active
#
check_all_empty() {
COUNTER=1
while [ $COUNTER -le $MODCNT ]; do
EMPTY=$(uci get modem.modem$COUNTER.empty)
if [ $EMPTY -eq 0 ]; then
ACTIVE=$(uci get modem.modem$COUNTER.active)
if [ $ACTIVE -eq 1 ]; then
return
fi
fi
let COUNTER=COUNTER+1
done
COUNTER=1
while [ $COUNTER -le $MODCNT ]; do
uci delete modem.modem$COUNTER
uci set modem.modem$COUNTER=modem
uci set modem.modem$COUNTER.empty=1
let COUNTER=COUNTER+1
done
uci set modem.general.modemnum=1
uci commit modem
MODSTART=1
WWAN=0
USBN=0
ETHN=1
WDMN=0
BASEPORT=0
if
ifconfig eth1
then
if [ -e "/sys/class/net/eth1/device/bInterfaceProtocol" ]; then
ETHN=1
else
ETHN=2
fi
fi
save_variables
display_top; display "No Modems present"; display_bottom
}
#
# Add Modem and connect
#
if [ "$ACTION" = add ]; then
bootdelay
find_usb_attrs
if echo $DEVICENAME | grep -q ":" ; then
exit 0
fi
if [ -z $uMa ]; then
log "Ignoring Unnamed Hub"
exit 0
fi
UPR=${uPr}
CT=`echo $UPR | tr '[A-Z]' '[a-z]'`
if echo $CT | grep -q "hub" ; then
log "Ignoring Named Hub"
exit 0
fi
if [ $uVid = 1d6b ]; then
log "Ignoring Linux Hub"
exit 0
fi
cat /sys/kernel/debug/usb/devices > /tmp/wdrv
lua $ROOTER/protofind.lua $uVid $uPid 0
retval=$?
if [ -e /etc/config/mjpg-streamer ]; then
if [ $retval -eq 99 ]; then
log "Start MJPEG Streamer $DEVICENAME"
/etc/init.d/mjpg-streamer start
uci delete mjpg-streamer.camera
uci set mjpg-streamer.camera=mjpg-stream
uci set mjpg-streamer.camera.idv=$DEVICENAME
uci commit mjpg-streamer
exit 0
fi
fi
if [ -e /etc/config/p910nd ]; then
if [ $retval -eq 98 ]; then
# Check if lp device is plugged in and p910nd is not already started
log "USB Printer device plugged in, starting p910nd"
/etc/init.d/p910nd start
uci delete p910nd.printer
uci set p910nd.printer=printer
uci set p910nd.printer.idv=$DEVICENAME
uci commit p910nd
exit 0
fi
fi
if [ $retval -eq 97 ]; then
if grep "$uVid:$uPid" /etc/usb-mode.json > /dev/null ; then
log "Modem found"
else
log "Found USB Storage"
exit 0
fi
fi
if [ -f /tmp/usbwait ]; then
log "Delay for previous modem"
while [ -f /tmp/usbwait ]; do
sleep 5
done
fi
echo "1" > /tmp/usbwait
source /tmp/variable.file
source /tmp/modcnt
MODCNT=$MODCNTX
reinsert=0
find_device $DEVICENAME
if [ $retresult -gt 0 ]; then
ACTIVE=$(uci get modem.modem$retresult.active)
if [ $ACTIVE = 1 ]; then
rm -f /tmp/usbwait
exit 0
else
IDP=$(uci get modem.modem$retresult.uPid)
IDV=$(uci get modem.modem$retresult.uVid)
if [ $uVid = $IDV -a $uPid = $IDP ]; then
reinsert=1
CURRMODEM=$retresult
else
display_top; display "Reinsert of different Modem not allowed"; display_bottom
rm -f /tmp/usbwait
exit 0
fi
fi
fi
log "Add : $DEVICENAME: Manufacturer=${uMa:-?} Product=${uPr:-?} Serial=${uSe:-?} $uVid $uPid"
if [ $MODSTART -gt $MODCNT ]; then
display_top; display "Exceeded Maximun Number of Modems"; display_bottom
exit 0
fi
if [ $reinsert = 0 ]; then
CURRMODEM=$MODSTART
fi
idV=$uVid
idP=$uPid
FILEN=$uVid:$uPid
display_top; display "Start of Modem Detection and Connection Information"
display "Product=${uPr:-?} $uVid $uPid"; display_bottom
cat /sys/kernel/debug/usb/devices > /tmp/prembim
lua $ROOTER/mbimfind.lua $uVid $uPid
retval=$?
rm -f /tmp/prembim
if [ ! -e /sbin/umbim ]; then
retval=0
fi
if [ $idV = 1199 -a $idP = 0fff ]; then
retval=0
fi
if [ $idV = 12d1 -a $idP = 157d ]; then
retval=0
fi
if [ $idV = 12d1 -a $idP = 15ec ]; then
retval=0
fi
if [ $idV = 12d1 -a $idP = 1597 ]; then
retval=0
fi
if [ $idV = 1199 -a $idP = 9013 ]; then
#echo 1 >/sys/bus/usb/devices/$DEVICENAME/bConfigurationValue
retval=0
fi
if [ $idV = 12d1 -a $idP = 15c1 ]; then
#echo 2 >/sys/bus/usb/devices/$DEVICENAME/bConfigurationValue
retval=0
fi
if [ $retval -eq 1 ]; then
if [ $idV = 1199 -a $idP = 9051 ]; then
display_top; display "Found 340U Modem at $DEVICENAME"; display_bottom
echo 1 >/sys/bus/usb/devices/$DEVICENAME/bConfigurationValue
else
display_top; display "Found MBIM Modem at $DEVICENAME"; display_bottom
echo 2 >/sys/bus/usb/devices/$DEVICENAME/bConfigurationValue
fi
else
if grep "$FILEN" /etc/usb-mode.json > /dev/null ; then
procd_open_service "usbmode"
procd_open_instance
procd_set_param command "/sbin/usbmode" -s
procd_close_instance
procd_close_service
else
display_top; display "This device does not have a switch data file"
display "Product=${uPr:-?} $uVid $uPid"; display_bottom
fi
fi
sleep 10
usb_dir="/sys$DEVPATH"
idV="$(sanitize "$usb_dir/idVendor")"
idP="$(sanitize "$usb_dir/idProduct")"
display_top; display "Switched to : $idV:$idP"; display_bottom
if [ $idV = 2357 -a $idP = 9000 ]; then
sleep 10
fi
cat /sys/kernel/debug/usb/devices > /tmp/wdrv
lua $ROOTER/protofind.lua $idV $idP 1
retval=$?
display_top; display "ProtoFind returns : $retval"; display_bottom
rm -f /tmp/wdrv
if [ $reinsert = 0 ]; then
BASEP=$BASEPORT
if [ -f /tmp/drv ]; then
source /tmp/drv
BASEPORT=`expr $PORTN + $BASEPORT`
fi
fi
rm -f /tmp/drv
FORCE=$(uci get modem.modeminfo$CURRMODEM.ppp)
if [ -n $FORCE ]; then
if [ $FORCE = 1 -a $retval -ne 0 ]; then
log "Forcing PPP mode"
if [ $idV = 12d1 ]; then
retval=10
else
retval=11
fi
log "Forced Protcol Value : $retval"
fi
fi
if [ $idV = 12d1 -a $idP = 15c1 ]; then
retval=27
fi
if [ $idV = 13b1 -a $idP = 0041 ]; then
retval=0
fi
if [ $retval -ne 0 ]; then
log "Found Modem$CURRMODEM"
if [ $reinsert = 0 ]; then
uci set modem.modem$CURRMODEM.empty=0
uci set modem.modem$CURRMODEM.uVid=$uVid
uci set modem.modem$CURRMODEM.uPid=$uPid
uci set modem.modem$CURRMODEM.idV=$idV
uci set modem.modem$CURRMODEM.idP=$idP
uci set modem.modem$CURRMODEM.device=$DEVICENAME
uci set modem.modem$CURRMODEM.baseport=$BASEP
uci set modem.modem$CURRMODEM.maxport=$BASEPORT
uci set modem.modem$CURRMODEM.proto=$retval
uci set modem.modem$CURRMODEM.maxcontrol=/sys$DEVPATH/descriptors
find_usb_attrs
uci set modem.modem$CURRMODEM.manuf=$uMa
uci set modem.modem$CURRMODEM.model=$uPr
uci set modem.modem$CURRMODEM.serial=$uSe
uci set modem.modem$CURRMODEM.celltype="-"
fi
uci set modem.modem$CURRMODEM.active=1
uci set modem.modem$CURRMODEM.connected=0
uci commit modem
fi
if [ $reinsert = 0 -a $retval != 0 ]; then
MODSTART=`expr $MODSTART + 1`
save_variables
fi
#
# Handle specific modem models
#
case $retval in
"0" )
#
# ubox GPS module
#
if [ $idV = 1546 ]; then
if echo $uPr | grep -q "GPS"; then
SYMLINK="gps0"
BASEX=`expr 1 + $BASEP`
ln -s /dev/ttyUSB$BASEX /dev/${SYMLINK}
display_top ; display "Hotplug Symlink from /dev/ttyUSB$BASEX to /dev/${SYMLINK} created"
display_bottom
fi
fi
rm -f /tmp/usbwait
exit 0
;;
"1" )
log "Connecting a Sierra Modem"
ln -s $ROOTER/connect/create_connect.sh $ROOTER_LINK/create_proto$CURRMODEM
$ROOTER_LINK/create_proto$CURRMODEM $CURRMODEM &
;;
"2" )
log "Connecting a QMI Modem"
ln -s $ROOTER/connect/create_connect.sh $ROOTER_LINK/create_proto$CURRMODEM
$ROOTER_LINK/create_proto$CURRMODEM $CURRMODEM &
;;
"3" )
log "Connecting a MBIM Modem"
ln -s $ROOTER/connect/create_connect.sh $ROOTER_LINK/create_proto$CURRMODEM
$ROOTER_LINK/create_proto$CURRMODEM $CURRMODEM &
;;
"6"|"4"|"7"|"24"|"26"|"27" )
log "Connecting a Huawei NCM Modem"
ln -s $ROOTER/connect/create_connect.sh $ROOTER_LINK/create_proto$CURRMODEM
$ROOTER_LINK/create_proto$CURRMODEM $CURRMODEM &
;;
"5" )
log "Connecting a Hostless Modem or Phone"
ln -s $ROOTER/connect/create_hostless.sh $ROOTER_LINK/create_proto$CURRMODEM
$ROOTER_LINK/create_proto$CURRMODEM $CURRMODEM &
;;
"10"|"11"|"12"|"13"|"14"|"15" )
log "Connecting a PPP Modem"
ln -s $ROOTER/ppp/create_ppp.sh $ROOTER_LINK/create_proto$CURRMODEM
$ROOTER_LINK/create_proto$CURRMODEM $CURRMODEM
;;
"9" )
log "PPP HSO Modem"
rm -f /tmp/usbwait
;;
esac
fi
#
# Remove Modem
#
if [ "$ACTION" = remove ]; then
find_usb_attrs
if echo $DEVICENAME | grep -q ":" ; then
exit 0
fi
find_device $DEVICENAME
if [ $retresult -gt 0 ]; then
IDP=$(uci get modem.modem$retresult.idP)
IDV=$(uci get modem.modem$retresult.idV)
if [ $uVid = $IDV ]; then
exit 0
else
uci set modem.modem$retresult.active=0
uci set modem.modem$retresult.connected=0
uci commit modem
if [ -e /etc/config/mwan3 ]; then
ENB=$(uci get mwan3.wan$retresult.enabled)
if [ ! -z $ENB ]; then
uci set mwan3.wan$retresult.enabled=0
uci commit mwan3
fi
fi
SMS=$(uci get modem.modem$CURRMODEM.sms)
if [ $SMS = 1 ]; then
if [ -e /usr/lib/sms/stopsms ]; then
/usr/lib/sms/stopsms $CURRMODEM
fi
fi
ifdown wan$retresult
uci delete network.wan$retresult
uci set network.wan$retresult=interface
uci set network.wan$retresult.proto=dhcp
uci set network.wan$retresult.ifname=" "
uci set network.wan$retresult.metric=$retresult"0"
uci commit network
killall -9 getsignal$retresult
rm -f $ROOTER_LINK/getsignal$retresult
killall -9 reconnect$retresult
rm -f $ROOTER_LINK/reconnect$retresult
killall -9 create_proto$retresult
rm -f $ROOTER_LINK/create_proto$retresult
killall -9 processsms$retresult
rm -f $ROOTER_LINK/processsms$retresult
killall -9 con_monitor$retresult
rm -f $ROOTER_LINK/con_monitor$retresult
killall -9 mbim_monitor$retresult
rm -f $ROOTER_LINK/mbim_monitor$retresult
$ROOTER/signal/status.sh $retresult "No Modem Present"
$ROOTER/log/logger "Disconnect (Removed) Modem #$retresult"
display_top; display "Remove : $DEVICENAME : Modem$retresult"; display_bottom
check_all_empty
rm -f /tmp/usbwait
rm -f /tmp/mdown$retresult
rm -f /tmp/msimdata$retresult
rm -f /tmp/msimnum$retresult
echo "0" > /tmp/modgone
fi
else
IDV=$(uci get mjpg-streamer.camera.idv)
if [ ! -z $IDV ]; then
if [ $DEVICENAME = $IDV ]; then
uci delete mjpg-streamer.camera
uci commit mjpg-streamer
/etc/init.d/mjpg-streamer stop
log "Stop MJPEG-Streamer"
fi
fi
IDV=$(uci get p910nd.printer.idv)
if [ ! -z $IDV ]; then
if [ $DEVICENAME = $IDV ]; then
uci delete p910nd.printer
uci commit p910nd
if [ ! -d /sys$DEVPATH/*/lp0 -a -f /var/run/p9100d.pid ]; then
log "USB Printer device unplugged, stopping p910nd"
/etc/init.d/p910nd stop
# p910nd does not seem to remove .pid file when stopped, removing it manually
rm /var/run/p9100d.pid
fi
fi
fi
fi
fi
if [ "$ACTION" = "motion" ]; then
logger webcam motion event
fi
| true
|
503c6ca23bde658c57e69b5f89739819726f0554
|
Shell
|
horizontalz/Scripts
|
/DO-postflight
|
UTF-8
| 3,774
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/sh
# Standard settings for images.
# Script is meant to be run as a postflight script in a .pkg file. Also installs startup settings script as a Launchd item which is inside the package /Contents/Resources directory.
# Checks the system default user template for the presence of
# the Library/Preferences directory. If the directory is not found,
# it is created.
for USER_TEMPLATE in "/System/Library/User Template"/*
do
if [ ! -d "${USER_TEMPLATE}"/Library/Preferences ]
then
mkdir -p "${USER_TEMPLATE}"/Library/Preferences
fi
if [ ! -d "${USER_TEMPLATE}"/Library/Preferences/ByHost ]
then
mkdir -p "${USER_TEMPLATE}"/Library/Preferences/ByHost
fi
done
##### Begin Declare Variables Used by Script #####
# Declare 'defaults'.
defaults="/usr/bin/defaults"
# Declare directory variables.
PKG_DIR="$1/Contents/Resources"
SCRIPTS_DIR="$3/Library/Scripts/PAUSD"
LAUNCHD_DIR="$3/Library/LaunchDaemons"
PRIVETC_DIR="$3/private/etc"
PREFS_DIR="$3/Library/Preferences"
USERPREFS_DIR="$3/System/Library/User Template/English.lproj/Library/Preferences"
NONLOC_USERPREFS_DIR="$3/System/Library/User Template/Non_localized/Library/Preferences"
ROOT="$3/"
UPDATE_DYLD="$3/usr/bin/update_dyld_shared_cache" # Set variable to location of update_dyld_shared_cache command on target volume.
##### End Declare Variables Used by Script #####
##### Begin Preference Setting #####
# These settings can be set on the target volume before startup.
# Run update_dyld_shared_cache
$UPDATE_DYLD -universal_boot -root $ROOT
# Display login window as Name and Password.
$defaults write "${PREFS_DIR}/com.apple.loginwindow" SHOWFULLNAME -bool false
#Starts the Flurry screensaver over the login window when idle for 60 seconds
$defaults write "${PREFS_DIR}/com.apple.screensaver" loginWindowIdleTime -int 60
$defaults write "${PREFS_DIR}/com.apple.screensaver" loginWindowModulePath "/System/Library/Screen Savers/Flurry.saver"
# Set Safari Preferences.
$defaults write "${USERPREFS_DIR}/com.apple.Safari" HomePage "http://www.pausd.org/"
$defaults write "${USERPREFS_DIR}/com.apple.Safari" ShowStatusBar -bool YES
# Set Finder Preferences.
$defaults write "${USERPREFS_DIR}/com.apple.finder" ShowMountedServersOnDesktop -bool YES
$defaults write "${USERPREFS_DIR}/com.apple.finder" ShowHardDrivesOnDesktop -bool YES
$defaults write "${USERPREFS_DIR}/com.apple.finder" ShowStatusBar -bool YES
# Enables Double Click Title bar to Minimize Window
$defaults write "${NONLOC_USERPREFS_DIR}/.GlobalPreferences" AppleMiniaturizeOnDoubleClick -bool TRUE
# No .ds-store files on Network Shares
$defaults write "${PREFS_DIR}/com.apple.desktopservices" DSDontWriteNetworkStores true
# Globally Set Expanded Print Dialogue Box.
$defaults write "${PREFS_DIR}/.GlobalPreferences" PMPrintingExpandedStateForPrint -bool TRUE
# Globally Set Always Show Scroll Bars.
$defaults write "${PREFS_DIR}/.GlobalPreferences" AppleShowScrollBars -string Always
# Disable Mouse reverse scrolling.
# $defaults write "${USERPREFS_DIR}/.GlobalPreferences" com.apple.swipescrolldirection -bool false
# Set Dark UI
VersionCheck=$(sw_vers -productVersion | cut -c 1-5)
VERSION=10.10
if [[ "$VERSION" == "$VersionCheck" ]]
then
$defaults write "${USERPREFS_DIR}/.GlobalPreferences" AppleInterfaceStyle -string Dark
fi
# Disable Time Machine Offers.
$defaults write "${PREFS_DIR}/com.apple.TimeMachine" DoNotOfferNewDisksForBackup -bool YES
# Disable Time Machine AutoBackup
$defaults write "${PREFS_DIR}/com.apple.TimeMachine" AutoBackup 0
# Firewall Settings | 0 = Off | 1 = On For Specific Services | 2 = On For Essential Services
$defaults write "${PREFS_DIR}/com.apple.alf" globalstate -int 0
##### End Preferences Setting #####
exit 0
| true
|
f1d9628d895d09161bdf5ab32419f3681cb3f53c
|
Shell
|
BasketaksSjargong/dotfiles
|
/wallpapers/wallpaper_time.sh
|
UTF-8
| 610
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
# Change the wallpaper according to time of day
# Script Source: http://www.reddit.com/user/orfix
# Wallpapers Source: http://imgur.com/a/VZ9H2
TIME=$(date +"%H")
FOLDER='/home/ivar/dotfiles/wallpapers'
export DISPLAY=:0.0 # run "env | grep DISPLAY"
case ${TIME} in
0[5-7]) feh --bg-scale ${FOLDER}/2.png ;;
0[8-9]|1[0-1]) feh --bg-scale ${FOLDER}/3.png ;;
1[2-6]) feh --bg-scale ${FOLDER}/4.png ;;
1[7-9]|20) feh --bg-scale ${FOLDER}/5.png ;;
21) feh --bg-scale ${FOLDER}/6.png ;;
2[2-3]|0[0-4]) feh --bg-scale ${FOLDER}/7.png ;;
esac
| true
|
fd659b73b5d713b7e71d7cfb41ecf434e677995a
|
Shell
|
kasthack-labs/smart_parser
|
/tools/robots/dlrobot/tests/declaration_link_long/run.sh
|
UTF-8
| 265
| 2.84375
| 3
|
[] |
no_license
|
DUMMY=$1
WEB_ADDR=$2
set -e
function check_folder() {
local folder=$1
python3 ../declaration_link/test.py --web-addr $WEB_ADDR --start-page $folder/sved.html | tr -d '\r' > $folder.found_links
git diff --exit-code $folder.found_links
}
check_folder admkrsk
| true
|
bf5701b29834f8d7ac6e98ce70681080f80b884c
|
Shell
|
colinblack/game_server
|
/fmh5/tools/lastdaycoin_rank.sh
|
UTF-8
| 1,687
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
###########数据库信息################
#HOSTNAMES=("10.214.195.42" "10.214.194.181" "10.214.197.39" "10.214.194.72")
HOSTNAMES=("192.168.100.237")
PORT="3306"
USERNAME="root"
PASSWORD="1234"
DBNAME="sg17_s"
##########其它变量信息
#PERDBNUM=10
#DBNUM=40
PERDBNUM=3
DBNUM=3
LIMITNUM=100
lastDayCoinRank="/data/release/sg17/s1/webroot/rankdata/lastdaycoin_rank.json"
###########################商店昨日金币收入排行#####################
##########获取昨日起始时间戳
MYDATE=$(date -d -1day +%Y%m%d);
STATICTS=$(date -d ${MYDATE} +%s)
##########连表统计
echo '{"rank":[' > ${lastDayCoinRank}
count=0
for db in ${HOSTNAMES[@]};
do
MYSQL="mysql -h${db} -P${PORT} -u${USERNAME} -p${PASSWORD}"
for ((index=0;index<${PERDBNUM};index++))
do
select_sql="select base.uid,base.level,value,base.accthumbsup from (select uid,value from shopSellCoin where id = ${STATICTS} order by value desc limit ${LIMITNUM})a join base on(a.uid=base.uid)"
dbnum=$[count*PERDBNUM+index];
result="$($MYSQL sg17_s$dbnum -e "$select_sql")"
echo "serverid $dbnum $result" | awk -F ' ' 'BEGIN{flag=0}{
if($1=="serverid") {
print "{"
print "\"serverid\":"$2 ","
print "\"lastdaycoin_rank\":["
}
else{
if(flag==1)
print ","
print"{\"uid\":"$1 ",\"level\":"$2 ",\"lastdaycoin\":" $3 ",\"accthumbsup\":"$4 "}"
flag=1;
}
}' >> ${lastDayCoinRank}
if [ $dbnum == $[DBNUM - 1] ]; then
echo ']}' >> ${lastDayCoinRank}
else
echo ']},' >> ${lastDayCoinRank}
fi
done
count=$[count+1];
done
echo ']}' >>${lastDayCoinRank}
| true
|
fb4c17425f438a812cfbd0c67ca49bd8bb51868b
|
Shell
|
shawnallen85/ansible-fedora-workstation
|
/setup-ansible.sh
|
UTF-8
| 378
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if (( $EUID != 0 )); then
echo "Please run as root"
exit
fi
while true; do
read -p "Do you wish to install Anisble? " yn
case $yn in
[Yy]* ) break;;
[Nn]* ) exit;;
* ) echo "Please answer Yes or No.";;
esac
done
source /etc/os-release
ANSIBLE_VERSION=2.9.2-1.fc${VERSION_ID}
dnf install ansible-${ANSIBLE_VERSION} -y
| true
|
0005799c28fbc85010089685f678b2061c51d160
|
Shell
|
Sy2n0/magpieCTF-2021
|
/challenges/web-exploitation/sweatin-in-latex/source/cleanpdfdir.sh
|
UTF-8
| 122
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
DIR=/var/www/html/pdf/
SIZE=$(du -sm $DIR | grep -oP "\d+")
if [[ $SIZE -gt 100 ]]; then
rm -r $DIR*.pdf
fi
| true
|
3f58b39b714415f7d276b5c18fb6a62fa67e24df
|
Shell
|
jcwillox/ds-client
|
/tests/run_test_results.sh
|
UTF-8
| 382
| 3.015625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
shopt -s globstar
# ensure we are in the same directory as the script
DIR=$(dirname "$0")
cd "$DIR" || exit
echo "INFO: compiling java files"
javac "$(realpath ../src/main)"/**/*.java -d .
export NO_LOGGING=true
echo "INFO: running tests"
./test_results -n -c S2testConfigs/ "java main.Main" "$@"
rm -rf ./main && echo "INFO: removed generated class files"
| true
|
2b89eb86fdb684566dd0085b888a8169fbe1b162
|
Shell
|
sbelharizi/ProvisionOnDemand
|
/aws/VM/docker/create.sh
|
UTF-8
| 511
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "let's start !"
#echo $POD_PROJECT_LOCATION/common/utils/terraform_script/create.sh
. $POD_PROJECT_LOCATION/common/utils/terraform_script/create.sh
#terraform init
#terraform plan -out plan.terraform
#echo "APPLICATION DU PLAN TERRAFORM"
#terraform apply "plan.terraform"
#on sources les ip externes de outputs.sh pour les recuperer et eventuellement push des packages dans les VMs associees
#source terraform_outputs.sh
echo "liste des adresses ip : $TF_VAR_POD_GCP_EXTERNAL_IP"
| true
|
f67fe0dbdc36363bb16fe69e7a8003950615fd3f
|
Shell
|
FdLSifu/pulseicon
|
/pulse/ConfigurePulse_x86_64.sh
|
UTF-8
| 24,063
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
# Copyright (c) 2017-2020 by Pulse Secure, LLC. All rights reserved
INSTALLDIR=/usr/local/pulse
LOG=$INSTALLDIR/postinstall.log
HOMEDIR=$(eval echo ~$SUDO_USER)
# Redirect the stdout/stderr into postinstall log
echo "Starting Post Install Script " > $LOG
# no-same-owner is required to get root permission
WEBKITGTK_1_SUPPORTED_OSTYPE_VERSION=( UBUNTU_14 UBUNTU_15 UBUNTU_16_17_18 UBUNTU_19 FEDORA CENTOS_6 DEBIAN_8_9 THINPRO_7 )
WEBKITGTK_1_MINIMAL_SUPPORTED_OSTYPE_VERSION=( UBUNTU_14 CENTOS_6 UNSUPPORTED )
WEBKITGTK_3_SUPPORTED_OSTYPE_VERSION=( CENTOS_7 RHEL_7 )
PACKAGE_TYPE_RPM=1
PACKAGE_TYPE_DEB=2
SCRNAME=`basename $0`
readMeEchoMsg="Please refer /usr/local/pulse/README for instructions to launch the Pulse Client"
SUPPORTED_OSTYPES_LIST=( CENTOS_6 CENTOS_7 CENTOS_8 UBUNTU_14 UBUNTU_15 UBUNTU_16_17_18 UBUNTU_19 FEDORA FEDORA_27 FEDORA_30 FEDORA_31 FEDORA_32 RHEL_7 RHEL_8 DEBIAN_8_9 DEBIAN_10 THINPRO_7 UNSUPPORTED)
#RPM Based
CENTOS_6_DEPENDENCIES=( glibc \
nss-softokn-freebl \
zlib \
glib-networking \
webkitgtk \
xulrunner\
libXmu)
CENTOS_6_DEPENDENCIES_WITH_VERSION=( glibc \
nss \
zlib \
glib-networking \
webkitgtk \
xulrunner \
libXmu)
CENTOS_7_DEPENDENCIES=( glibc \
nss-softokn-freebl \
zlib \
glib-networking \
webkitgtk3)
CENTOS_7_DEPENDENCIES_WITH_VERSION=( glibc \
nss \
zlib \
glib-networking \
webkitgtk3)
FEDORA_DEPENDENCIES=( glibc \
nss-softokn-freebl \
zlib \
glib-networking \
webkitgtk- \
xulrunner \
mozjs17)
FEDORA_DEPENDENCIES_WITH_VERSION=( glibc \
nss \
zlib \
glib-networking \
webkitgtk \
xulrunner \
mozjs17)
FEDORA_32_DEPENDENCIES=( glibc \
nss-softokn-freebl \
zlib \
glib-networking)
FEDORA_32_DEPENDENCIES_WITH_VERSION=( glibc \
nss \
zlib \
glib-networking)
FEDORA_31_DEPENDENCIES=( glibc \
nss-softokn-freebl \
zlib \
glib-networking)
FEDORA_31_DEPENDENCIES_WITH_VERSION=( glibc \
nss \
zlib \
glib-networking)
FEDORA_30_DEPENDENCIES=( glibc \
nss-softokn-freebl \
zlib \
glib-networking)
FEDORA_30_DEPENDENCIES_WITH_VERSION=( glibc \
nss \
zlib \
glib-networking)
RHEL_7_DEPENDENCIES=( glibc \
nss-softokn-freebl \
zlib \
glib-networking \
webkitgtk3)
RHEL_7_DEPENDENCIES_WITH_VERSION=( glibc \
nss \
zlib \
glib-networking \
webkitgtk3-2.4.9-5.el7)
#Debian Based
UBUNTU_14_DEPENDENCIES=( libc6 \
libwebkitgtk-1 \
libproxy1 \
libproxy1-plugin-gsettings \
libproxy1-plugin-webkit \
libdconf1 \
dconf-gsettings-backend)
UBUNTU_14_DEPENDENCIES_WITH_VERSION=( libc6 \
libwebkitgtk-1.0-0 \
libproxy1 \
libproxy1-plugin-gsettings \
libproxy1-plugin-webkit \
libdconf1 \
dconf-gsettings-backend)
UBUNTU_15_DEPENDENCIES=( libc6 \
libwebkitgtk-1 \
libproxy1 \
libproxy1-plugin-gsettings \
libproxy1-plugin-webkit \
libdconf1 \
dconf-gsettings-backend)
UBUNTU_15_DEPENDENCIES_WITH_VERSION=( libc6 \
libwebkitgtk-1.0-0 \
libproxy1 \
libproxy1-plugin-gsettings \
libproxy1-plugin-webkit \
libdconf1 \
dconf-gsettings-backend)
UBUNTU_16_17_18_DEPENDENCIES=( libc6 \
webkitgtk \
libproxy1 \
libproxy1-plugin-gsettings \
libproxy1-plugin-webkit \
libdconf1 \
dconf-gsettings-backend)
UBUNTU_16_17_18_DEPENDENCIES_WITH_VERSION=( libc6 \
libwebkitgtk-1.0-0 \
libproxy1 \
libproxy1-plugin-gsettings \
libproxy1-plugin-webkit \
libdconf1 \
dconf-gsettings-backend)
THINPRO_7_DEPENDENCIES=( libc6 \
webkitgtk \
libproxy1 \
libproxy1-plugin-gsettings \
libproxy1-plugin-webkit \
libdconf1 \
dconf-gsettings-backend)
THINPRO_7_DEPENDENCIES_WITH_VERSION=( libc6 \
libwebkitgtk-1.0-0 \
libproxy1 \
libproxy1-plugin-gsettings \
libproxy1-plugin-webkit \
libdconf1 \
dconf-gsettings-backend)
UBUNTU_19_DEPENDENCIES=( libc6 \
libgtk2.0-0 \
libproxy1 \
libproxy1-plugin-gsettings \
libproxy1-plugin-webkit \
libdconf1 \
dconf-gsettings-backend)
UBUNTU_19_DEPENDENCIES_WITH_VERSION=( libc6 \
libgtk2.0-0 \
libproxy1 \
libproxy1-plugin-gsettings \
libproxy1-plugin-webkit \
libdconf1 \
dconf-gsettings-backend)
DEBIAN_8_9_DEPENDENCIES=( libc6 \
webkitgtk-1 \
libproxy1 \
libproxy1-plugin-gsettings \
libproxy1-plugin-webkit \
libdconf1 \
dconf-gsettings-backend)
DEBIAN_8_9_DEPENDENCIES_WITH_VERSION=( libc6 \
libwebkitgtk-1.0-0 \
libproxy1 \
libproxy1-plugin-gsettings \
libproxy1-plugin-webkit \
libdconf1 \
dconf-gsettings-backend)
DEBIAN_10_DEPENDENCIES=( libc6 \
libgtk2.0-0 \
libproxy1 \
libproxy1-plugin-gsettings \
libproxy1-plugin-webkit \
libdconf1 \
dconf-gsettings-backend)
DEBIAN_10_DEPENDENCIES_WITH_VERSION=( libc6 \
libgtk2.0-0 \
libproxy1 \
libproxy1-plugin-gsettings \
libproxy1-plugin-webkit \
libdconf1 \
dconf-gsettings-backend)
tam=${#SUPPORTED_OSTYPES_LIST[@]}
for ((i=0; i < $tam; i++)); do
name=${SUPPORTED_OSTYPES_LIST[i]}
declare -r ${name}=$i
done
#determine the OS TYPE
determine_os_type() {
if [ -f /etc/centos-release ]; then
OS_MAJOR_VERSION=$(cat /etc/centos-release | grep -o '.[0-9]'| head -1|sed -e 's/ //')
if [ $OS_MAJOR_VERSION = 6 ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$CENTOS_6]}
elif [ $OS_MAJOR_VERSION = 7 ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$CENTOS_7]}
elif [ $OS_MAJOR_VERSION = 8 ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$CENTOS_8]}
else
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$UNSUPPORTED]}
fi
elif [ -f /etc/fedora-release ]; then
release=$(cat /etc/fedora-release | cut -d' ' -f3)
if [ $release -eq "27" ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$FEDORA_27]}
elif [ $release -eq "30" ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$FEDORA_30]}
elif [ $release -eq "31" ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$FEDORA_31]}
elif [ $release -eq "32" ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$FEDORA_32]}
else
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$FEDORA]}
fi
elif [ -f /etc/redhat-release ]; then
OS_MAJOR_VERSION=$(cat /etc/redhat-release | grep -o '.[0-9]'| head -1|sed -e 's/ //')
if [ $OS_MAJOR_VERSION = 7 ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$RHEL_7]}
elif [ $OS_MAJOR_VERSION = 8 ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$RHEL_8]}
else
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$UNSUPPORTED]}
fi
else
OSNAME=$(lsb_release -d |grep -o "Ubuntu")
if [ "X$OSNAME" != "X" ]; then
UBUNTU_VER=$(lsb_release -d | grep -o '.[0-9]*\.'| head -1|sed -e 's/\s*//'|sed -e 's/\.//')
if [ $UBUNTU_VER = 14 ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$UBUNTU_14]}
elif [ $UBUNTU_VER = 15 ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$UBUNTU_15]}
elif [ $UBUNTU_VER = 16 ] || [ $UBUNTU_VER = 17 ] || [ $UBUNTU_VER = 18 ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$UBUNTU_16_17_18]}
elif [ $UBUNTU_VER = 19 ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$UBUNTU_19]}
else
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$UNSUPPORTED]}
fi
else
OSNAME=$(lsb_release -d | grep -o "ThinPro")
if [ "X$OSNAME" != "X" ]; then
THINPRO_MAJOR_VERSION=$(lsb_release -d | grep -o '[0-9]'| head -1|sed -e 's/ //')
if [ $THINPRO_MAJOR_VERSION = 7 ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$THINPRO_7]}
else
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$UNSUPPORTED]}
fi
else
if [ -f /etc/debian_version ]; then
DEBIAN_MAJOR_VERSION=$(cat /etc/debian_version | grep -o '[0-9]'| head -1|sed -e 's/ //')
DEB_VER=$(lsb_release -sr)
if [ $DEBIAN_MAJOR_VERSION = 8 ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$DEBIAN_8_9]}
elif [ $DEBIAN_MAJOR_VERSION = 9 ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$DEBIAN_8_9]}
elif [ $DEB_VER = 10 ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$DEBIAN_10]}
else
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$UNSUPPORTED]}
fi
else
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$UNSUPPORTED]}
fi
fi
fi
fi
}
install_binaries() {
if [[ ${WEBKITGTK_3_SUPPORTED_OSTYPE_VERSION[@]} =~ ${OS_TYPE} ]] ; then
mv $INSTALLDIR/pulseUi_centos_7_x86_64 $INSTALLDIR/pulseUi
mv $INSTALLDIR/libpulseui.so_centos_7_x86_64 $INSTALLDIR/libpulseui.so
elif [[ ${WEBKITGTK_1_MINIMAL_SUPPORTED_OSTYPE_VERSION[@]} =~ ${OS_TYPE} ]] ; then
mv $INSTALLDIR/pulseUi_centos_6_x86_64 $INSTALLDIR/pulseUi
mv $INSTALLDIR/libpulseui.so_centos_6_x86_64 $INSTALLDIR/libpulseui.so
else
mv $INSTALLDIR/pulseUi_Ubuntu_16_x86_64 $INSTALLDIR/pulseUi
mv $INSTALLDIR/libpulseui.so_Ubuntu_16_x86_64 $INSTALLDIR/libpulseui.so
fi
#Remove other binaries which are not for this distribution.
rm $INSTALLDIR/pulseUi_* $INSTALLDIR/libpulseui.so_*
if [ $OS_TYPE != ${SUPPORTED_OSTYPES_LIST[$FEDORA_27]} ]; then
if [ -f /usr/local/pulse/libjavascriptcoregtk-1.0.so.0.16.19 ]; then
sudo rm /usr/local/pulse/libjavascriptcoregtk-1.0.so.0.16.19
fi
if [ -f /usr/local/pulse/libwebkitgtk-1.0.so.0.22.17 ]; then
sudo rm /usr/local/pulse/libwebkitgtk-1.0.so.0.22.17
fi
fi
if [[ ($OS_TYPE != ${SUPPORTED_OSTYPES_LIST[$UBUNTU_19]}) && ($OS_TYPE != ${SUPPORTED_OSTYPES_LIST[$DEBIAN_10]}) && ($OS_TYPE != ${SUPPORTED_OSTYPES_LIST[$FEDORA_30]}) && ($OS_TYPE != ${SUPPORTED_OSTYPES_LIST[$FEDORA_31]}) && ($OS_TYPE != ${SUPPORTED_OSTYPES_LIST[$FEDORA_32]}) && ($OS_TYPE != ${SUPPORTED_OSTYPES_LIST[$RHEL_8]}) && ($OS_TYPE != ${SUPPORTED_OSTYPES_LIST[$CENTOS_8]}) ]]; then
if [ -f /usr/local/pulse/libwebkitgtk-1.0.so.0 ]; then
sudo rm /usr/local/pulse/libwebkitgtk-1.0.so.0
fi
if [ -f /usr/local/pulse/libjavascriptcoregtk-1.0.so.0 ]; then
sudo rm /usr/local/pulse/libjavascriptcoregtk-1.0.so.0
fi
if [ -f /usr/local/pulse/libicui18n.so.60.2 ]; then
sudo rm /usr/local/pulse/libicui18n.so.60.2
fi
if [ -f /usr/local/pulse/libicuuc.so.60.2 ]; then
sudo rm /usr/local/pulse/libicuuc.so.60.2
fi
if [ -f /usr/local/pulse/libicudata.so.60.2 ]; then
sudo rm /usr/local/pulse/libicudata.so.60.2
fi
if [ -f /usr/local/pulse/libjpeg.so.8 ]; then
sudo rm /usr/local/pulse/libjpeg.so.8
fi
if [ /usr/local/pulse/libwebp.so.6 ]; then
sudo rm /usr/local/pulse/libwebp.so.6
fi
fi
}
handle_common_installation() {
tar --no-same-owner -xzf /usr/local/pulse/pulse.tgz -C /usr/local/pulse >/dev/null
chmod +rws /usr/local/pulse/pulsesvc
mv /usr/local/pulse/pulseUi.desktop /usr/share/applications
#Remove the 32 bit libsoup lib added as part of 32 bit previous installation.
if [ -f /usr/local/pulse/libsoup-2.4.so.1 ]; then
rm /usr/local/pulse/libsoup-2.4.so.1
fi
mkdir -p /usr/local/share/man/man1/
if [ -f /usr/local/pulse/pulse.1.gz ]; then
mv /usr/local/pulse/pulse.1.gz /usr/local/share/man/man1/
fi
}
handle_uninstallation() {
if [ "X$SCRNAME" = "XConfigurePulse_x86_64.sh" ]; then
PKG=$PACKAGE_TYPE_RPM
else
PKG=$PACKAGE_TYPE_DEB
UNINSTALL=`echo $SCRNAME | grep -i prerm`
if [ "X$UNINSTALL" != "X" ]; then
PID_PULSEUI=$(pidof pulseUi)
PID_PULSESVC=$(pidof pulsesvc)
kill -s SIGKILL $PID_PULSEUI 2&>/dev/null
kill -s SIGKILL $PID_PULSESVC 2&>/dev/null
rm -rf /usr/local/pulse/*
rmdir /usr/local/pulse
rm -f /usr/share/applications/pulseUi.desktop
rm -f /usr/local/share/man/man1/pulse.1.gz
if [ "${DEBIAN_FRONTEND}" = "noninteractive" ]
then
REPLY=n
else
read -p "Do you want to clean up the configuration? [Yy/Nn] " -n 1 -r
echo # (optional) move to a new line
fi
if [[ $REPLY =~ ^[Yy]$ ]]
then
rm -f $HOMEDIR/.pulse_secure/pulse/.pulse_Connections.txt
fi
exit
fi
chown $USER: /usr/local/pulse/PulseClient_x86_64.sh
chown $USER: /usr/local/pulse/version.txt
chown $USER: /usr/local/pulse/pulse.tgz
fi
}
check_missing_dependencies() {
if [ $OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$UNSUPPORTED]} ]; then
return
fi
isRpmBased=0
isDebBased=0
dependencyListName=${OS_TYPE}_DEPENDENCIES
dependencyListNameWithVersion=${OS_TYPE}_DEPENDENCIES_WITH_VERSION
if [[ ($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$CENTOS_6]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$CENTOS_7]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$CENTOS_8]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$FEDORA]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$FEDORA_27]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$FEDORA_30]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$FEDORA_31]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$FEDORA_32]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$RHEL_7]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$RHEL_8]}) ]]; then
isRpmBased=1
elif [[ ($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$UBUNTU_14]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$UBUNTU_15]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$UBUNTU_16_17_18]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$UBUNTU_19]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$DEBIAN_8_9]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$THINPRO_7]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$DEBIAN_10]}) ]]; then
isDebBased=1
fi
if [ $isRpmBased = 1 ]; then
eval "depListArr=(\${${dependencyListName}[@]})"
eval "depListArrWithVersion=(\${${dependencyListNameWithVersion}[@]})"
tam=${#depListArr[@]}
PKGREQ=""
for ((i=0; i < $tam; i++)); do
depPkgName=${depListArr[i]}
curPkgVar=`rpm -qa | grep -i $depPkgName | grep -i "x86_64"`
if [ "X$curPkgVar" = "X" ]; then
echo "$depPkgName is missing in the machine" > $LOG
PKGREQ="$PKGREQ ${depListArrWithVersion[i]}"
fi
done
if [ "X" != "X$PKGREQ" ]; then
# Install respective packages based on the current installation
echo ""
echo "Please execute below commands to install missing dependent packages "
for i in `echo $PKGREQ`
do
if [ $OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$FEDORA]} ]; then
echo "dnf install $i"
else
echo "yum install $i"
fi
done
echo ""
echo "OR"
echo "You can install the missing dependency packages by running the below script "
echo " /usr/local/pulse/PulseClient_x86_64.sh install_dependency_packages"
echo ""
fi
if [ $OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$FEDORA_27]} ]; then
if [[ -f /usr/local/pulse/libjavascriptcoregtk-1.0.so.0.16.19 && -f /usr/local/pulse/libwebkitgtk-1.0.so.0.22.17 ]]; then
if [[ ! -f /usr/local/pulse/libjavascriptcoregtk-1.0.so && ! -f /usr/local/pulse/libjavascriptcoregtk-1.0.so.0 ]]; then
sudo ln -s /usr/local/pulse/libjavascriptcoregtk-1.0.so.0.16.19 /usr/local/pulse/libjavascriptcoregtk-1.0.so
sudo ln -s /usr/local/pulse/libjavascriptcoregtk-1.0.so.0.16.19 /usr/local/pulse/libjavascriptcoregtk-1.0.so.0
fi
if [[ ! -f /usr/local/pulse/libwebkitgtk-1.0.so && ! -f /usr/local/pulse/libwebkitgtk-1.0.so.0 ]]; then
sudo ln -s /usr/local/pulse/libwebkitgtk-1.0.so.0.22.17 /usr/local/pulse/libwebkitgtk-1.0.so
sudo ln -s /usr/local/pulse/libwebkitgtk-1.0.so.0.22.17 /usr/local/pulse/libwebkitgtk-1.0.so.0
fi
fi
elif [[ ($OS_TYPE == ${SUPPORTED_OSTYPES_LIST[$FEDORA_30]}) || ($OS_TYPE == ${SUPPORTED_OSTYPES_LIST[$FEDORA_31]}) || ($OS_TYPE == ${SUPPORTED_OSTYPES_LIST[$FEDORA_32]}) || ($OS_TYPE == ${SUPPORTED_OSTYPES_LIST[$RHEL_8]}) || ($OS_TYPE == ${SUPPORTED_OSTYPES_LIST[$CENTOS_8]}) ]]; then
if [[ -f /usr/local/pulse/libwebkitgtk-1.0.so.0 && \
-f /usr/local/pulse/libjavascriptcoregtk-1.0.so.0 && \
-f /usr/local/pulse/libicui18n.so.60.2 && \
-f /usr/local/pulse/libicuuc.so.60.2 && -f /usr/local/pulse/libicudata.so.60.2 && \
-f /usr/local/pulse/libjpeg.so.8 && -f /usr/local/pulse/libwebp.so.6 ]]; then
if [[ ($OS_TYPE != ${SUPPORTED_OSTYPES_LIST[$CENTOS_8]}) || ($OS_TYPE != ${SUPPORTED_OSTYPES_LIST[$RHEL_8]}) ]]; then
sudo ln -s /usr/local/pulse/libicui18n.so.60.2 /usr/local/pulse/libicui18n.so
sudo ln -s /usr/local/pulse/libicui18n.so.60.2 /usr/local/pulse/libicui18n.so.60
sudo ln -s /usr/local/pulse/libicuuc.so.60.2 /usr/local/pulse/libicuuc.so
sudo ln -s /usr/local/pulse/libicuuc.so.60.2 /usr/local/pulse/libicuuc.so.60
sudo ln -s /usr/local/pulse/libicudata.so.60.2 /usr/local/pulse/libicudata.so
sudo ln -s /usr/local/pulse/libicudata.so.60.2 /usr/local/pulse/libicudata.so.60
fi
fi
if [[ ($OS_TYPE == ${SUPPORTED_OSTYPES_LIST[$CENTOS_8]}) || ($OS_TYPE == ${SUPPORTED_OSTYPES_LIST[$UBUNTU_19]}) ]]; then
sudo rm /usr/local/pulse/libicui18n.so.60.2
sudo rm /usr/local/pulse/libicuuc.so.60.2
sudo rm /usr/local/pulse/libicudata.so.60.2
fi
fi
echo $readMeEchoMsg # end of rpm based
elif [ $isDebBased = 1 ]; then
eval "depListArr=(\${${dependencyListName}[@]})"
eval "depListArrWithVersion=(\${${dependencyListNameWithVersion}[@]})"
tam=${#depListArr[@]}
PKGREQ=""
for ((i=0; i < $tam; i++)); do
depPkgName=${depListArr[i]}
curPkgVar=`dpkg-query -f '${binary:Package}\n' -W | grep -i $depPkgName | grep -i "amd64"`
if [ "X$curPkgVar" = "X" ]; then
PKGREQ="$PKGREQ ${depListArrWithVersion[i]}"
fi
done
if [ "X$PKGREQ" != "X" ]; then
echo "Please execute below commands to install missing dependent packages manually"
for i in `echo $PKGREQ`
do
echo "apt-get install $i"
done
echo ""
echo "OR"
echo "You can install the missing dependency packages by running the below script "
echo " /usr/local/pulse/PulseClient_x86_64.sh install_dependency_packages"
echo ""
fi
echo $readMeEchoMsg
echo ""
### UBUNTU 19 and DEBIAN 10 changes
if [[ ($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$UBUNTU_19]}) || ($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$DEBIAN_10]}) ]]; then
if [[ -f /usr/local/pulse/libwebkitgtk-1.0.so.0 && \
-f /usr/local/pulse/libjavascriptcoregtk-1.0.so.0 && \
-f /usr/local/pulse/libicui18n.so.60.2 && \
-f /usr/local/pulse/libicuuc.so.60.2 && -f /usr/local/pulse/libicudata.so.60.2 && \
-f /usr/local/pulse/libjpeg.so.8 ]]; then
sudo ln -s /usr/local/pulse/libicui18n.so.60.2 /usr/local/pulse/libicui18n.so
sudo ln -s /usr/local/pulse/libicui18n.so.60.2 /usr/local/pulse/libicui18n.so.60
sudo ln -s /usr/local/pulse/libicuuc.so.60.2 /usr/local/pulse/libicuuc.so
sudo ln -s /usr/local/pulse/libicuuc.so.60.2 /usr/local/pulse/libicuuc.so.60
sudo ln -s /usr/local/pulse/libicudata.so.60.2 /usr/local/pulse/libicudata.so
sudo ln -s /usr/local/pulse/libicudata.so.60.2 /usr/local/pulse/libicudata.so.60
fi
fi
if [ $OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$UBUNTU_19]} ]; then
sudo rm /usr/local/pulse/libjpeg.so.8
fi
else
echo "Unsupported version $OS_TYPE"
fi
}
update_build_info()
{
#We expect the version text in format example "Version: 5.3R3 comment"
BUILD_VERSION=`grep "Version: " ${INSTALLDIR}/version.txt | awk '{print $2}'`
#Fetch the last Number from the version text
#Fetch Build Number.
BUILD_NUMBER=`grep "Build Number: " ${INSTALLDIR}/version.txt | awk '{print $3}'`
if [ "X$BUILD_VERSION" != "X" ]; then
sed -ie "s/BUILD_VERSION/${BUILD_VERSION}/g" ${INSTALLDIR}/html/about.html
fi
if [ "X$BUILD_NUMBER" != "X" ]; then
sed -ie "s/BUILD_NUMBER/${BUILD_NUMBER}/g" ${INSTALLDIR}/html/about.html
fi
}
#Main
determine_os_type
handle_common_installation
install_binaries
handle_uninstallation
check_missing_dependencies
update_build_info
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.