blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b6d52783d14d53e4d8e8dc1e10050184cef5d8ab
|
Shell
|
ripley57/CW_Tools
|
/tools/bash/arrays/demo06.sh
|
UTF-8
| 291
| 3.09375
| 3
|
[] |
no_license
|
# Description: Add a new entries to an existing array.
declare -a Unix=('Debian' 'Red hat' 'Ubuntu' 'Suse' 'Fedora' 'UTS' 'OpenLinux');
echo
echo "BEFORE:"
for t in "${Unix[@]}"
do
echo $t
done
Unix=("${Unix[@]}" "AIX" "HP-UX")
echo
echo "AFTER:"
for t in "${Unix[@]}"
do
echo $t
done
| true
|
cd982110537b91ebe4c1594cdb7134375e2c9258
|
Shell
|
glowinthedark/qtpysmtpdaemon
|
/debian-build/build_deb.sh
|
UTF-8
| 997
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
cd "$(dirname "$0")"
appname=cutepiesmtpdaemon
appdir=build
srcdir=..
last_version=$(grep "VERSION =" $srcdir/$appname.py | cut -d "=" -f2 | tr -d " '")
#set version is control file
sed -i "/Version:/c\Version: $last_version" control
echo Version updated to: $last_version
mkdir -p $appname/{DEBIAN,usr}
mkdir -p $appname/usr/bin
mkdir -p $appname/usr/share/{applications,pixmaps,$appname}
mkdir -p $appname/usr/share/$appname/{data,lib}
cp control $appname/DEBIAN/control
cp $appname.sh $appname/usr/bin/$appname
chmod +x $appname/usr/bin/$appname
cp $appname.desktop $appname/usr/share/applications/$appname.desktop
cp $srcdir/icons/$appname.png $appname/usr/share/pixmaps/$appname.png
cp $srcdir/{cutepiesmtpdaemon_py3.py,valid_encodings.py,cutesmtp_icons.py,LICENSE.txt} $appname/usr/share/$appname/
dpkg --build $appname/ $appname-$last_version.deb
echo Installing package...
sudo dpkg -i $appname-$last_version.deb
#sudo gdebi $appname-$last_version.deb
exit 0
| true
|
cd064995763544143d0cc34dfb0eb787332760f9
|
Shell
|
MediaServe/zabbix-templates
|
/lvm/zlvm
|
UTF-8
| 4,319
| 3.953125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Zabbix LVM pool monitoring script
# https://github.com/MediaServe/zabbix-templates
# MediaServe International B.V.
# Thomas Lobker
help () {
if [ ${#} -eq 1 ]; then
echo "ERROR: ${1}" 1>&2
exit 1
fi
echo "Usage: zlvm (-d [type]) | (-s|-u|-m [path])"
echo "Requires one method with one argument, output will be JSON for discovery or a float for other values"
echo
echo " -d [type] Discovery of available paths of the specified 'type'"
echo
echo " group: for a list of available logical volume groups"
echo " pool: for a list of available pools in all logical volume groups"
echo " volume: for a list of all available logical volumes"
echo
echo " -s [path] Get the total size of the specified 'path'"
echo " -u [path] Get the total usage (in percent) of the specified 'path'"
echo " -m [path] Get the total usage (in percent) of metadata of the specified 'path'"
echo
echo " Path of group, pool or logical volume: group[/pool[/volume]]"
echo " Example: vg0/lvol1/lv0"
echo
echo "Examples:"
echo " zlvm -d pool"
echo " zlvm -s vg0/pool1"
exit 1
}
trim () {
local value="$*"
value="${value#"${value%%[![:space:]]*}"}"
echo "${value%"${value##*[![:space:]]}"}"
}
discovery () {
case ${1} in
"group")
# FILTER="[ { \"{#ZLVM_GROUP}\": .[\"report\"][0][\"vg\"][].vg_name } ]"
FILTER="[(.[\"report\"][0][\"vg\"][] | { \"{#ZLVM_GROUP}\": .vg_name })]"
value=$( ${VGS} --nosuffix --units b --reportformat json -o vg_name | ${JQ} -Mca "${FILTER}" )
trim "${value}"
exit 0 ;;
"pool")
# FILTER="{data: [(.[\"report\"][0][\"lv\"][] | select(.\"pool_lv\" == \"\"))]}"
FILTER="[(.[\"report\"][0][\"lv\"][] | select(.\"pool_lv\" == \"\") | { \"{#ZLVM_GROUP}\": .vg_name, \"{#ZLVM_POOL}\": .lv_name })]"
value=$( ${LVS} --nosuffix --units b --reportformat json -o lv_name,vg_name,pool_lv | ${JQ} -Mca "${FILTER}" )
trim "${value}"
exit 0 ;;
"volume")
# FILTER="{data: [(.[\"report\"][0][\"lv\"][] | select((.\"pool_lv\" != \"\")))]}"
FILTER="[(.[\"report\"][0][\"lv\"][] | select(.\"pool_lv\" != \"\") | { \"{#ZLVM_GROUP}\": .vg_name, \"{#ZLVM_POOL}\": .pool_lv, \"{#ZLVM_VOLUME}\": .lv_name })]"
value=$( ${LVS} --nosuffix --units b --reportformat json -o lv_name,vg_name,pool_lv | ${JQ} -Mca "${FILTER}" )
trim "${value}"
exit 0 ;;
esac
help "No valid discovery type specified"
}
status () {
if [ ${#} -eq 2 ]; then
while IFS='/' read -ra PATH; do
case ${#PATH[@]} in
1)
case "${2}" in
"size") options="vg_size" ;;
esac
value=$( ${VGS} --no-headings --nosuffix --units b --select "vg_name = ${PATH[0]}" -o "${options}" )
trim "${value}"
exit 0 ;;
2)
case "${2}" in
"size") options="lv_size" ;;
"usage") options="data_percent" ;;
"metadata") options="metadata_percent" ;;
esac
value=$( ${LVS} --no-headings --nosuffix --units b --select "vg_name = ${PATH[0]} && lv_name = ${PATH[1]}" -o "${options}" )
trim "${value}"
exit 0 ;;
3)
case "${2}" in
"size") options="lv_size" ;;
"usage") options="data_percent" ;;
esac
value=$( ${LVS} --no-headings --nosuffix --units b --select "vg_name = ${PATH[0]} && pool_lv = ${PATH[1]} && lv_name = ${PATH[2]}" -o "${options}" )
trim "${value}"
exit 0 ;;
esac
done <<< "${1}"
help "No valid logical volume path specified"
fi
}
LVS=`which lvs`
VGS=`which vgs`
# Check if LVM tools is installed
if [ ! -x "${LVS}" ] || [ ! -x "${VGS}" ]; then
help "Unable to execute LVM tools, please install the 'lvm2' package first"
fi
JQ=`which jq`
# Check if JQ is installed
if [ ! -x "${JQ}" ]; then
help "Unable to execute JSON processor, please install the 'jq' package first"
fi
# Check if the program is running with root privileges
if [ ${EUID} -ne 0 ]; then
help "LVM tools requires root privileges"
fi
# Check if the user provided two arguments
if [ ${#} -lt 2 ]; then
help
fi
# Check the command line arguments
while getopts ":d:s:u:m:" opt; do
case ${opt} in
d) discovery "${OPTARG}" ;;
s) status "${OPTARG}" "size" ;;
u) status "${OPTARG}" "usage" ;;
m) status "${OPTARG}" "metadata" ;;
esac
done
# Display usage information and exit
help
| true
|
810191f017ddfe6dc5e28d2194ef91ad2996687b
|
Shell
|
alundiak/lanbash
|
/lb_cdlb.sh
|
UTF-8
| 1,387
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
# "mvn clean package" - build and packaging into WAR
# OR
# "mvn clean install" (with packaging and pointing artifact to local maven repo)
# http://maven.apache.org/guides/introduction/introduction-to-the-lifecycle.html
# "mvn clean package -o" - packaging into WAR but wothout trying to download artifacts again
# "mvn clean install -o" - packaging and installation to maven repo into WAR but wothout trying to download artifacts again
# "mvn clean package|install -o -DskipTests=true" - packaging|installing with test skipping
# "mvn clean test -o" - run only tests
# More details:
# http://maven.apache.org/scm/plugins/index.html
function UserConfirm() {
read CONFIRM
case $CONFIRM in
y|Y|YES|yes|Yes)
echo "Continue ...";;
n|N|no|NO|No)
echo "Aborted"; exit ;;
*)
echo "You should confirm in proper way 'y|Y|YES|yes|Yes' OR 'n|N|no|NO|No'"; exit ;;
esac
}
function TomcatAdminister(){
echo "Are you sure you want to '$1' with local Glassfish server (y|n) ?"
UserConfirm
case $1 in
gf_stop)
$GLASSFISH/asadmin stop-domain domain1
;;
gf_start)
$GLASSFISH/asadmin start-domain domain1
;;
gf_restart)
$GLASSFISH/asadmin stop-domain domain1
$GLASSFISH/asadmin start-domain domain1
;;
*)
echo "You should provide a type of action with Local Glassfish";
exit
;;
esac
}
| true
|
2602923b7bb17ee205793bc9d25be4b5be1ad026
|
Shell
|
agmangas/mlops-poc
|
/install-minio.sh
|
UTF-8
| 1,779
| 3.34375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
set -x
: ${MINIO_NAMESPACE:="minio-operator"}
: ${MINIO_OP_TREEISH:="v4.2.10"}
CURR_DIR="$(cd "$( dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd )"
TMP_DIR=$(python3 -c "import tempfile; print(tempfile.gettempdir());")
MINIO_OP_REPO_PATH=${TMP_DIR}/minio-operator
rm -fr ${MINIO_OP_REPO_PATH}
trap 'rm -fr ${MINIO_OP_REPO_PATH}' EXIT
git clone \
--depth 1 \
--branch ${MINIO_OP_TREEISH} \
https://github.com/minio/operator.git \
${MINIO_OP_REPO_PATH}
helm install \
--namespace ${MINIO_NAMESPACE} \
--create-namespace \
--generate-name \
--set tenants=null \
${MINIO_OP_REPO_PATH}/helm/minio-operator
if [ -d "/vagrant/minio-tenant" ]; then
kubectl apply -k /vagrant/minio-tenant/tenant-tiny-custom
else
kubectl apply -k ${CURR_DIR}/minio-tenant/tenant-tiny-custom
fi
set +x
GREEN='\033[0;32m'
RESET='\033[0m'
HELP=$(cat << EOF
To access the MinIO Operator console, forward the port for the console service:
kubectl --namespace ${MINIO_NAMESPACE} port-forward --address=0.0.0.0 svc/console 9090:9090
The console web app will now be available on http://localhost:9090. You can get a JWT token for authentication with:
kubectl get secret $(kubectl get serviceaccount console-sa --namespace ${MINIO_NAMESPACE} -o jsonpath="{.secrets[0].name}") --namespace ${MINIO_NAMESPACE} -o jsonpath="{.data.token}" | base64 --decode
To access the example Tenant, forward the port for the Tenant minio service:
kubectl --namespace tenant-tiny port-forward --address=0.0.0.0 svc/minio 8080:80
You can test access to the example Tenant with the MinIO client:
docker run --rm -it --entrypoint=/bin/bash minio/mc -c "mc alias set tiny http://$(hostname):8080 minio minio123 && mc --debug tree tiny"
EOF
)
echo -e "${GREEN}${HELP}${RESET}"
| true
|
e4fc1753ae9698812dbb1b968db041f80978b515
|
Shell
|
syyunn/CausalMBRL
|
/scripts/eval_rl_fixed_unobserved.sh
|
UTF-8
| 807
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo Running on $HOSTNAME
num_obj=$1
name=$2
encoder=$3
cmap=$4
seed=$5
loss=$6
mode=$7
emb=$8
steps=$9
dir="models_"$emb
#env=WShapesRL-Observed-$mode-$num_obj-$cmap-v0
#env=WShapesRL-Unobserved-Train-$num_obj-$cmap-v0
env=WShapesRL-FixedUnobserved-Train-$num_obj-$cmap-v0
save=$dir"/FixedUnobserved/"$name"_"$seed"/"
name=$name"_"$loss"_"$encoder"_"$num_obj"_"$cmap
echo $name
extras=""
if [[ $name == *"LSTM"* ]]; then
extras="--recurrent"
fi
if [[ $name == *"RIM"* ]]; then
extras="--recurrent"
fi
if [[ $name == *"SCOFF"* ]]; then
extras="--recurrent"
fi
echo $extras
if [[ $name == *"NLL"* ]]; then
extras=$extras" --finetune"
fi
python ./test_planning.py --save-folder $save""$name --save $dir \
--num-eval 1000 --num-steps $steps \
--env-id $env --random $extras
| true
|
68446f268537a22f5b70655abb73f36088aec50e
|
Shell
|
Gentux/configurations
|
/bash/install.sh
|
UTF-8
| 287
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
CURRENT_PATH=$(dirname $(realpath $0))
source ${CURRENT_PATH}/../function.sh
create_or_replace_symlink ${CURRENT_PATH}/bashrc ~/.bashrc
create_or_replace_symlink ${CURRENT_PATH}/bash_aliases ~/.bash_aliases
create_or_replace_symlink ${CURRENT_PATH}/dircolors ~/.dircolors
| true
|
348eddc61f0ea89a995d820a28b8fb31d182e049
|
Shell
|
Salix-OS/lxc-salix
|
/rcs.patch
|
UTF-8
| 2,951
| 3.1875
| 3
|
[] |
no_license
|
--- rc.S.orig 2012-02-15 11:22:53.000000000 +0100
+++ rc.S 2012-02-15 14:17:18.000000000 +0100
@@ -6,6 +6,10 @@
#
# Tweaked for Salix by George Vlahavas <vlahavas~at~gmail~dot~com>
+# Tweaked for Salix container by Frédéric Galusik <fredg~at~salixos~dot~org>
+# based on the tweak for a slackware container
+# by ponce <matteo.bernardini@gmail.com>
+
# modified for colorized screen messages and local needs
. /etc/shell-colors
@@ -13,8 +17,14 @@
echo
echo -e "${BOLDYELLOW}Initializing.${COLOR_RESET}"
+# a check for a container variable is made to jump unneeded sections
+CONTAINER=yes
+
PATH=/sbin:/usr/sbin:/bin:/usr/bin
+# container check
+if [ ! $CONTAINER ]; then
+
# Try to mount /proc:
echo -e "${BOLDCYAN}Mounting proc filesystem:${COLOR_RESET}"
/sbin/mount -v proc /proc -n -t proc 2> /dev/null
@@ -255,17 +265,28 @@
read junk;
fi # Done checking root filesystem
+fi # end container check
+
# Any /etc/mtab that exists here is old, so we delete it to start over:
/bin/rm -f /etc/mtab*
+
+# container check
+if [ ! $CONTAINER ]; then
+
# Remounting the / partition will initialize the new /etc/mtab:
echo -e "${BOLDCYAN}Creating /etc/mtab.${COLOR_RESET}"
/sbin/mount -w -o remount /
+fi # end container check
+
# Read in the correct / filesystem complete with arguments so mount will
# show them correctly. This does not stop those arguments from functioning
# but does prevent a small bug with /etc/mtab.
/bin/grep ' / ' /proc/mounts | grep -v "^rootfs" > /etc/mtab
+# container check
+if [ ! $CONTAINER ]; then
+
# Fix /etc/mtab to list sys and proc if they were not yet entered in
# /etc/mtab because / was still mounted read-only:
if [ -d /proc/sys ]; then
@@ -345,6 +366,8 @@
# mounted read-write.
/sbin/swapon -a 2> /dev/null
+fi # end container check
+
# Clean up some temporary files:
rm -f /var/run/* /var/run/*/* /var/run/*/*/* /etc/nologin \
/etc/dhcpc/*.pid /etc/forcefsck /etc/fastboot \
@@ -372,7 +395,7 @@
# if the first line of that file begins with the word 'Linux'.
# You are free to modify the rest of the file as you see fit.
if [ -x /bin/sed ]; then
- /bin/sed -i "{1s/^Linux.*/$(/bin/uname -sr)\./}" /etc/motd
+ /bin/sed -i "{1s/^Linux.*/$(/bin/uname -sr) lxc container\./}" /etc/motd
fi
# If there are SystemV init scripts for this runlevel, run them.
@@ -380,6 +403,9 @@
/bin/sh /etc/rc.d/rc.sysvinit
fi
+# container check
+if [ ! $CONTAINER ]; then
+
# Run serial port setup script:
# CAREFUL! This can make some systems hang if the rc.serial script isn't
# set up correctly. If this happens, you may have to edit the file from a
@@ -388,6 +414,8 @@
/bin/sh /etc/rc.d/rc.serial start
fi
+fi # end container check
+
# Carry an entropy pool between reboots to improve randomness.
if [ -f /etc/random-seed ]; then
echo -e "${BOLDWHITE}Using /etc/random-seed to initialize /dev/urandom.${COLOR_RESET}"
@@ -401,3 +429,5 @@
fi
chmod 600 /etc/random-seed
+#
+# vim: set ft=sh:
| true
|
9f09d375e47a8aa42144ec326172506b7d682ba8
|
Shell
|
LUXEsoftware/SeedingAlgorithm
|
/runSeeding3Or4Hits.sh
|
UTF-8
| 5,608
| 2.828125
| 3
|
[] |
no_license
|
#! /bin/zsh
postfix=""
needFitList="F"
needELaser=${1}
alias python3=/usr/local/bin/python3.9
for needFitIter in ${needFitList}; do
if [[ ${needFitIter} == "F" ]]
then
postfix="WithFit3or4HitsTracksAndDistanceCut"
needFit=1
else
postfix="WithoutFit3or4HitsTracksAndDistanceCut"
needFit=0
fi
echo "!!!!!!#### The postfix to the root file: "${postfix}" and needFit: "${needFit}
#signalTracksList="1 5 10 20 30 50 80 100 130 150 170 185 200 220"
#250 300 500 750
signalTracksList="1000 1500 2000 2500 3000 3500"
#signalTracksList="1 4 7 10 13 16"
bxList="1 2 3 4"
particleList="Positron"
### working with the signal+background case, 250 300 3000 3500
### 500 750 1000 1500 2000
for signalTracks in 250 300 500 750 1000 1500 2000 2500 3000; do
echo "###############################################################"
echo "########## working for nTracks: ${signalTracks} case ##########"
echo "###############################################################"
for bx in 1; do
echo "######!!! BX: "${bx}" !!!#########"
for particle in ${particleList}; do
echo "#####!!! Particle : "${particle}" !!!####"
if [[ $1 == "E" ]]
then
#### e+laser setup
python3 findSeed.py -l BkgEBeam_Signal${particle}hics3000nm_jeti40_122020_9550dac4_BX${bx}_SignalTracks${signalTracks}_trackInfoClean.txt -s -f ${needFit} -p ${particle}
python3 findSeed.py -l BkgEBeam_Signal${particle}hics3000nm_jeti40_122020_9550dac4_BX${bx}_SignalTracks${signalTracks}_trackInfoClean.txt -f ${needFit} -p ${particle}
#### change the root file name prefix
mv seedingInformationFiles/seedingInformation_BkgEBeam_Signal${particle}hics3000nm_jeti40_122020_9550dac4_BX${bx}_SignalTracks${signalTracks}_trackInfoClean_VariableEnergyCut_OnlySignal_${particle}Side.root seedingInformationFiles/seedingInformation_BkgEBeam_Signal${particle}hics3000nm_jeti40_122020_9550dac4_BX${bx}_SignalTracks${signalTracks}_trackInfoClean_VariableEnergyCut_OnlySignal_${particle}Side_${postfix}.root
mv seedingInformationFiles/seedingInformation_BkgEBeam_Signal${particle}hics3000nm_jeti40_122020_9550dac4_BX${bx}_SignalTracks${signalTracks}_trackInfoClean_VariableEnergyCut_SignalAndBackground_${particle}Side.root seedingInformationFiles/seedingInformation_BkgEBeam_Signal${particle}hics3000nm_jeti40_122020_9550dac4_BX${bx}_SignalTracks${signalTracks}_trackInfoClean_VariableEnergyCut_SignalAndBackground_${particle}Side_${postfix}.root
else
#### g+laser setup
python3 findSeedGLaser.py -l BkgGBeam_Signal${particle}bppp3000nmOr5000nm_BX${bx}_SignalTracks${signalTracks}_trackInfoClean.txt -s -f ${needFit} -p ${particle}
python3 findSeedGLaser.py -l BkgGBeam_Signal${particle}bppp3000nmOr5000nm_BX${bx}_SignalTracks${signalTracks}_trackInfoClean.txt -f ${needFit} -p ${particle}
#### change the root file name prefix
mv seedingInformationFiles/seedingInformation_BkgGBeam_Signal${particle}bppp3000nmOr5000nm_BX${bx}_SignalTracks${signalTracks}_trackInfoClean_VariableEnergyCut_OnlySignal_${particle}Side.root seedingInformationFiles/seedingInformation_BkgGBeam_Signal${particle}bppp3000nmOr5000nm_BX${bx}_SignalTracks${signalTracks}_trackInfoClean_VariableEnergyCut_OnlySignal_${particle}Side_${postfix}.root
mv seedingInformationFiles/seedingInformation_BkgGBeam_Signal${particle}bppp3000nmOr5000nm_BX${bx}_SignalTracks${signalTracks}_trackInfoClean_VariableEnergyCut_SignalAndBackground_${particle}Side.root seedingInformationFiles/seedingInformation_BkgGBeam_Signal${particle}bppp3000nmOr5000nm_BX${bx}_SignalTracks${signalTracks}_trackInfoClean_VariableEnergyCut_SignalAndBackground_${particle}Side_${postfix}.root
fi
done
done
done
# # ### work with the background only file
# ### work with the new background only file
# if [[ $1 == "E" ]]
# then
# for bx in 1 2 3 4; do
# for particle in ${particleList}; do
# python3 findSeed.py -l ePlusLaserBkgKaptonWindowNewSamplesMarch62021_DividedByBX${bx}_trackInfoClean.txt -f ${needFit} -p ${particle}
# mv seedingInformationFiles/seedingInformation_ePlusLaserBkgKaptonWindowNewSamplesMarch62021_DividedByBX${bx}_trackInfoClean_VariableEnergyCut_SignalAndBackground_${particle}Side.root seedingInformationFiles/seedingInformation_ePlusLaserBkgKaptonWindowNewSamplesMarch62021_DividedByBX${bx}_trackInfoClean_VariableEnergyCut_SignalAndBackground_${particle}Side_${postfix}.root
# done
# done
# else
# for bx in 1 2 3 4; do
# python3 findSeedGLaser.py -l EBeamOnlyWIS_DividedByBX${bx}_trackInfoClean.txt -f ${needFit}
# mv seedingInformationFiles/seedingInformation_EBeamOnlyWIS_DividedByBX${bx}_trackInfoClean_VariableEnergyCut_SignalAndBackground_PositronSide.root seedingInformationFiles/seedingInformation_EBeamOnlyWIS_DividedByBX${bx}_trackInfoClean_VariableEnergyCut_SignalAndBackground_PositronSide_${postfix}.root
# done
# fi
done
| true
|
e4559a21f751047289f3e0d6f2213f99fadfcce7
|
Shell
|
manvantar/Shell_Programming_FOR_LOOP
|
/range_ofPrime_for.sh
|
UTF-8
| 311
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
read -p "Please enter the starting number to check prime or not: " n
read -p "Please enter the ending range: " m
for ((a=n; a<=m ;a++, n++))
do
flag=0
for ((i=2 ; i<=$(($n/2)) ; i++))
do
if [ $(($n%$i)) -eq 0 ]
then
flag=1
fi
done
if [ $flag -eq 0 ]
then
echo -n "$a "
fi
done
| true
|
6c3aa093aefe603d1f4caca395f396ed9ac3e17b
|
Shell
|
luanxiangming/performance
|
/start_test.sh
|
UTF-8
| 3,209
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
source ./env.sh
for arg in "$@"
do
LOAD_COUNT=($arg)
done
testLen=${#TEST_PLAN[@]}
loadLen=${#LOAD_COUNT[@]}
testPlans=${TEST_PLAN[0]}
loadCounts=${LOAD_COUNT[0]}
for ((i=1;i<testLen;i++))
do testPlans=$testPlans","${TEST_PLAN[i]}
done
for ((i=1;i<loadLen;i++))
do loadCounts=$loadCounts","${LOAD_COUNT[i]}
done
mkdir $JMETER_RESULT
for ((i=0;i<testLen;i++))
do
for ((j=0;j<loadLen;j++))
do
logFile=`echo ${TEST_PLAN[i]}\_${LOAD_COUNT[j]}users\_${JMETER_LOAD_TIME_MIN}min.jtl`
aggregateFile=`echo ${TEST_PLAN[i]}\_${LOAD_COUNT[j]}users\_${JMETER_LOAD_TIME_MIN}min_aggregate.jtl`
#errorlogFile=`echo ${TEST_PLAN[i]}\_${LOAD_COUNT[j]}users\_${JMETER_LOAD_TIME_MIN}min\_error.jtl`
serverPerf=`echo ${TEST_PLAN[i]}\_${LOAD_COUNT[j]}users\_${JMETER_LOAD_TIME_MIN}min\_server.jtl`
serverPerfGraph=`echo ${TEST_PLAN[i]}\_${LOAD_COUNT[j]}users\_${JMETER_LOAD_TIME_MIN}min\_server_graph.png`
serverPerfMem=`echo ${TEST_PLAN[i]}\_${LOAD_COUNT[j]}users\_${JMETER_LOAD_TIME_MIN}min\_mem.jtl`
serverPerfMemGraph=`echo ${TEST_PLAN[i]}\_${LOAD_COUNT[j]}users\_${JMETER_LOAD_TIME_MIN}min\_mem_graph.png`
printf "$(date),${TEST_PLAN[i]}_${LOAD_COUNT[j]}users_${JMETER_LOAD_TIME_MIN}min,${LOAD_COUNT[j]},$JMETER_HOST,$JMETER_LOAD_TIME," >>$JMETER_RESULT/script.txt
echo "====${TEST_PLAN[i]}_${LOAD_COUNT[j]}users_${JMETER_LOAD_TIME_MIN}min start running ==="
JVM_ARGS="-Xms1024m -Xmx2048m" sh $JMETER_PATH -n -t $JMETER_SRC/${TEST_PLAN[i]}.jmx -l $JMETER_RESULT/$aggregateFile -e -o $JMETER_RESULT/VQS -JreportPath=$JMETER_RESULT/$logFile -JthreadsCount=${LOAD_COUNT[j]} -Jhost=$JMETER_HOST -Jport=$JMETER_PORT -JholdLoad=$JMETER_LOAD_TIME -JhttpProtocol=$HTTP_PROTOCOL \
-Jdev_var=$JMETER_TEST_DATA/DEV_VAR.csv \
-Jprd_var=$JMETER_TEST_DATA/PRD_VAR.csv \
-Jstg_var=$JMETER_TEST_DATA/STG_VAR.csv \
-Jliveshow_list=$JMETER_TEST_DATA/liveshow_list.csv \
-Jlogin_dev=$JMETER_TEST_DATA/login_dev.csv \
-Jlogin_prd=$JMETER_TEST_DATA/login_prd.csv \
-Jlogin_stg=$JMETER_TEST_DATA/login_stg.csv \
-Jlogin_vipjr=$JMETER_TEST_DATA/login_vipjr.csv \
-Jcreate_prd=$JMETER_TEST_DATA/account_prd.csv \
-Jcreate_stg=$JMETER_TEST_DATA/account_stg.csv \
-Jcreate_dev=$JMETER_TEST_DATA/account_dev.csv \
-Jjoin_room=$JMETER_TEST_DATA/join_room.csv \
-Jroom_stg=$JMETER_TEST_DATA/create_room.csv \
-Jsend_groupmsg=$JMETER_TEST_DATA/send_groupmsg.csv
java -jar $JMETER_CMD_RUNNER_PATH --tool Reporter --generate-csv $JMETER_RESULT/$aggregateFile --input-jtl $JMETER_RESULT/$logFile --plugin-type AggregateReport
echo "==== ${TEST_PLAN[i]}_${LOAD_COUNT[j]}users_${JMETER_LOAD_TIME_MIN}min finished ==="
echo "$(date),${TEST_PLAN[i]}" >> $JMETER_RESULT/script.txt
done
done
END_TIME=`date +%s`
node $SCRIPT/resultAnalyse.js $testPlans $loadCounts $JMETER_RESULT $JMETER_LOAD_TIME_MIN
# node --print_code_verbose $SCRIPT/perf.js $JMETER_RESULT $MYSQL_HOST $MYSQL_USERNAME $MYSQL_PASSWORD $MYSQL_DATABASE $TIMESTAMP $END_TIME
| true
|
8dfa50888a4835efdc9926821554f38da06059c6
|
Shell
|
pesedr/dotfiles
|
/env.sh
|
UTF-8
| 1,149
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/zsh
# dotfiles
alias dotedit='vim ~/Projects/env.sh'
alias dotsource='source ~/.zshrc'
alias vimedit='vim ~/.vimrc'
alias github='cd ~/Projects/go/src/github.com'
alias pesgit='cd ~/Projects/go/src/github.com/pesedr'
# Go
export GOPATH=$HOME/Projects/go
export PATH=$PATH:$(go env GOPATH)/bin
# nvm
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && . "$NVM_DIR/bash_completion" # This loads nvm bash_completion
# Java
export PATH="/usr/local/bin:$PATH"
export JAVA_HOME="`/usr/libexec/java_home`"
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
fpath=(/usr/local/share/zsh-completions /Users/rodrigoalonso/.oh-my-zsh/plugins/osx /Users/rodrigoalonso/.oh-my-zsh/plugins/brew /Users/rodrigoalonso/.oh-my-zsh/plugins/python /Users/rodrigoalonso/.oh-my-zsh/plugins/pip /Users/rodrigoalonso/.oh-my-zsh/plugins/colorize /Users/rodrigoalonso/.oh-my-zsh/plugins/git /Users/rodrigoalonso/.oh-my-zsh/functions /Users/rodrigoalonso/.oh-my-zsh/completions /usr/local/share/zsh/site-functions /usr/share/zsh/site-functions /usr/share/zsh/5.3/functions)
eval "$(rbenv init -)"
| true
|
4cb9de9c47a626c7633f95eb6a6c35963d1fe3b4
|
Shell
|
doublesongsong/galaxy
|
/optools/galaxy
|
UTF-8
| 3,109
| 3.65625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
#
# /etc/init.d/galaxy
# Subsystem file for "galaxy" server
#
# chkconfig: 2345 95 05
# description: galaxy server daemon
#
# processname: galaxy
PROD_ADDR=ftp://xxxxxx/tmp/galaxy.tar.gz
. /etc/rc.d/init.d/functions
AGENT_IP=`hostname -i`
AGENT_HOME=/home/galaxy/agent
RETVAL=0
init() {
// config core dump dir
mkdir -p /home/disk2/coresave
echo '/home/disk2/coresave/core.%e.%p.%t' | tee /proc/sys/kernel/core_pattern
chmod 777 -R /home/disk2/coresave
// mount cgroup
CGROUP_ROOT=/cgroups
mkdir -p $CGROUP_ROOT/cpu && mount -t cgroup -ocpu none $CGROUP_ROOT/cpu >/dev/null 2>&1
mkdir -p $CGROUP_ROOT/memory && mount -t cgroup -omemory none $CGROUP_ROOT/memory >/dev/null 2>&1
mkdir -p $CGROUP_ROOT/cpuacct && mount -t cgroup -ocpuacct none $CGROUP_ROOT/cpuacct >/dev/null 2>&1
mkdir -p $CGROUP_ROOT/freezer && mount -t cgroup -ofreezer none $CGROUP_ROOT/freezer >/dev/null 2>&1
// mount fstab
mount -a
/usr/sbin/adduser galaxy >/dev/null 2>&1
mkdir -p $AGENT_HOME/work_dir
GC_DIR_TO_REMOVE=$AGENT_HOME/gc_dir_to_remove
test -e $AGENT_HOME/gc_dir && mv -f $AGENT_HOME/gc_dir $GC_DIR_TO_REMOVE
test -e $GC_DIR_TO_REMOVE && nohup rm -rf $GC_DIR_TO_REMOVE>/dev/null 2>&1 &
mkdir -p $AGENT_HOME/gc_dir
mkdir -p $AGENT_HOME/log
echo 0 > /proc/sys/kernel/printk
}
download_pkg() {
cd $AGENT_HOME
wget -O tmp.tar.gz $PROD_ADDR
tar -zxvf tmp.tar.gz
cpu_count=`cat /proc/cpuinfo | grep processor | wc -l`
cpu_share_count=`echo $(($cpu_count * 78 / 100 *1000))`
sed -i "s/--agent_millicores_share=.*/--agent_millicores_share=$cpu_share_count/" conf/galaxy.flag
mem_size=`free -g | grep Mem | awk '{print (int($2) - 2) * 1024 * 1024 *1024 }'`
sed -i "s/--agent_mem_share.*/--agent_mem_share=$mem_size/" conf/galaxy.flag
echo "--agent_ip=$AGENT_IP" >> conf/galaxy.flag
df -h | grep ssd | awk '{print $6}' >> conf/mount_bind.template
df -h | grep disk | awk '{print $6}' >> conf/mount_bind.template
}
start() {
init;
download_pkg;
cd $AGENT_HOME
./bin/babysitter bin/galaxy-agent.conf start
}
stop() {
cd $AGENT_HOME
./bin/babysitter bin/galaxy-agent.conf stop >/dev/null 2>&1
sleep 2
baby_pid=`ps -ef | grep babysitter | grep galaxy-agent | awk '{print $2}'`
if [ ! -z "$baby_pid" -a "$baby_pid" != " " ];then
kill -9 $baby_pid
echo "clean babysitter"
fi
agent_pid=`ps -ef | grep agent | grep galaxy.flag | grep -v initd | awk '{print $2}'`
if [ ! -z "$agent_pid" -a "$agent_pid" != " " ];then
for pid in $agent_pid;
do
kill -9 $pid
done
fi
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
stop
start
;;
reload)
echo "reload"
;;
condrestart)
echo "condrestart"
;;
status)
nc -z 0.0.0.0 8221 >/dev/null 2>&1
RETVAL=$?
;;
*)
echo $"Usage: $0 {start|stop|restart|reload|condrestart|status}"
RETVAL=1
esac
exit $RETVAL
| true
|
d1d4ac5c214e9ddd245a89244a82e70a95c1d011
|
Shell
|
juanmasg/zshrc
|
/jobselect.zsh
|
UTF-8
| 705
| 3.25
| 3
|
[] |
no_license
|
jobselect() {
# read -k 1 "argno?job: "
read -k 1 argno
case $argno in
0)
BUFFER="jobs"
;;
"^]")
#BUFFER="fg"
joblist
;;
[0-9])
BUFFER="fg $argno"
;;
*)
echo "\nNo such job: $argno"
return
;;
esac
zle .accept-line
return
}
joblist() {
case ${#jobstates} in
0)
#echo "\nNo background jobs."
return
;;
1)
BUFFER="fg"
;;
*)
BUFFER="jobs"
;;
esac
zle .accept-line
}
zle -N jobselect
zle -N joblist
bindkey '^[j' jobselect
bindkey '^[j^[j' joblist
| true
|
0ac29b73d7c2484303233f3db829298acbf322df
|
Shell
|
strigo/ami-bakery-linuxdesktop
|
/scripts/install-xfce.sh
|
UTF-8
| 426
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash -e
apt-get install -y xfce4 xfce4-goodies
echo "xfce4-session" > /home/ubuntu/.xsession
cat <<EOF > /home/ubuntu/.xsessionrc
export XDG_SESSION_DESKTOP=xubuntu
export XDG_DATA_DIRS=/usr/share/xfce4:/usr/share/xubuntu:/usr/local/share:/usr/share:/var/lib/snapd/desktop:/usr/share
export XDG_CONFIG_DIRS=/etc/xdg/xdg-xubuntu:/etc/xdg:/etc/xdg
EOF
chown ubuntu:ubuntu /home/ubuntu/.xsessionrc /home/ubuntu/.xsession
| true
|
474083c6897bf756f527249d00ece3232505bd44
|
Shell
|
nuug/videogruppa
|
/tools/audio_split
|
UTF-8
| 613
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Date: 2009-12-10
# Author: Ole Kristian Lien
# License: GNU General Public License
#
# Splits a audio-file to a left- and right-file.
EXT=`echo "$1"|awk -F . '{print $NF}'`
NAME=`basename $1 .$EXT`
if [ -z "$1" ]; then
echo "Usage: $0 <audio-file>"
exit 1
fi
./require sox || { exit 1; }
# sox FAIL formats: can't open output file `1-left.mp2': SoX was compiled without MP3 encoding support
# kan ikke encode til mp{2,3}...
echo -n " * Splitting up audio to a left- and right-file..."
sox $1 -c 1 $NAME-left.wav mixer -l 2> /dev/null
sox $1 -c 1 $NAME-right.wav mixer -r 2> /dev/null
echo -e "OK!"
| true
|
d243a8f11f0130a0c7e02233cc383257111b69ab
|
Shell
|
WangKaiwh/Learn_Record
|
/Bash_shell/parameter/shift.sh
|
UTF-8
| 222
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
# -n 后面字符串一定要用双引号括起来
while [ -n "$1" ]; do
case "$1" in
-a) echo "-a option";;
-b) echo "-b option";;
-c) echo "-c option";;
*) echo "$1 is not an option"
esac
shift
done
| true
|
bcfd0895d81150e095039996aff2a355ac8ab08b
|
Shell
|
Hacker0x01/mocha
|
/scripts/travis-after-script.sh
|
UTF-8
| 277
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# syncs Karma test bundles to S3 if $S3 is set
if [ ${S3} ]
then
mkdir -p .karma/${TRAVIS_JOB_NUMBER}
cp ./mocha.js ".karma/${TRAVIS_JOB_NUMBER}/mocha.js"
aws s3 sync ".karma/${TRAVIS_JOB_NUMBER}" "s3://mochajs/karma-bundles/${TRAVIS_JOB_NUMBER}"
fi
| true
|
2fda261ea4375ed7f535432b71ac6eac3301863b
|
Shell
|
suresh16s/CodeInClub
|
/Function_pro/conversion.sh
|
UTF-8
| 583
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash -x
degF(){
c=`(echo 'scale=2;9 / 5' | bc)`
d=`(echo 'scale=2;'$degC' * '$c'' | bc)`
e=`(echo $d + 32 | bc)`
echo $e
}
degC(){
c=`(echo 'scale=2;5 / 9' | bc)`
d=`(echo $degF - 32 | bc)`
e=`(echo 'scale=2;'$d' * '$c'' | bc)`
echo $e
}
echo -e "1:degC to degF\n2:degF to degC"
read ch
case $ch in
"1")
echo "Temp in celcius:"
read degC
if [ $degC -lt 101 ]
then
degF
else
echo "Invalid tempreture...."
fi;;
"2")
echo "Temp in fahrenheit:"
read degF
if [ $degF -lt 213 ]
then
degC
else
echo "Invalid tempreture...."
fi;;
*)
echo "Invalid choice....";;
esac
| true
|
0c5f0ae68c5b171cb497c2c03baabfc9a87c7ccc
|
Shell
|
mxmlnkn/BTWPlots
|
/crawl.sh
|
UTF-8
| 1,808
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
getUrls() { sed 's|<a href="|\nKEEP!!|g' "$1" | sed '/^KEEP!!/!d; s/KEEP!!//; s/".*$//g; s/ /%20/g'; }
mkdir -p '2017'
wget -q -O a 'https://www.bundeswahlleiter.de/bundestagswahlen/2017/ergebnisse.html'
for land in $( getUrls a | sed -n '/ergebnisse\/.*land-.*\.html/p' ); do
url='https://www.bundeswahlleiter.de/bundestagswahlen/2017/'"$land"
wget -q -O a "$url"
for wk in $( getUrls a | sed -nr '/land-[0-9]+\/wahlkreis-[0-9]+.html/p' ); do
wget -q -O a "${url%/*}/$wk"
wk=${wk#*wahlkreis-}
wk=${wk%.html}
echo "Wahlkreis $wk"
sed -n '/<table class="tablesaw table-stimmen"/,/<\/table>/p' a > "$wk.tmp"
cat "$wk.tmp" |
sed 's/^[\ \t]*//g' |
tr -d '\r\n' |
sed 's/<\/TR[^>]*>/\n/Ig
s|<caption>|# |Ig
s|</caption>|\n|Ig
s|<tbody[^>]*>||Ig
s|</tbody[^>]*>|\n|Ig
s|</thead[^>]*>|\n|Ig
s|<th[^>]*>|# |Ig
s/<\/\?\(TABLE\|TR\)[^>]*>//Ig' |
sed 's/^<T[DH][^>]*>\|<\/\?T[DH][^>]*>$//Ig
s|</T[DH][^>]*><T[DH][^>]*>|;|Ig' > "2017/$wk.csv"
if [ "$( cat "2017/$wk.csv" | wc -l )" -lt 5 ]; then
echo -e "\e[31Something went wrong when parsing '$url' to '2017/$wk.csv'\e[0m"
fi
rm "$wk.tmp"
done
done
# cat 18.tmp |
# # 'grep' -i -e '</\?TABLE\|</\?TD\|</\?TR\|</\?TH' | # only keep lines corresponding to the table
# sed 's/^[\ \t]*//g' | # remove beginning whitespaces
# tr -d '\r\n' | # remove all line breaks
# sed 's/<\/TR[^>]*>/\n/Ig' | # put each <tr> on a new line
# sed 's/<\/\?\(TABLE\|TR\)[^>]*>//Ig' |
# sed 's/^<T[DH][^>]*>\|<\/\?T[DH][^>]*>$//Ig' |
# sed 's/<\/T[DH][^>]*><T[DH][^>]*>/,/Ig'
| true
|
c09ab398d21c53266c95386f326a5a8e4b25ee98
|
Shell
|
gma/nesta-regression
|
/bin/add-theme
|
UTF-8
| 665
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
ROOT="$(cd $(dirname $0)/..; pwd)"
REPO="$1"
[ -z "$REPO" ] && usage
THEME="${REPO##*/}"
THEME="${THEME%.git}"
## Functions
usage()
{
echo "Usage: $(basename $0) <repo-url>" 1>&2
exit 1
}
clone-theme()
{
git submodule add $REPO themes/$THEME
}
symlink-into-releases()
{
local release
for release in releases/*; do
mkdir -p $release/themes
local dest="$release/themes/${THEME#nesta-theme-}"
if [ ! -e $dest ]; then
ln -s "../../../themes/$THEME" "$dest"
fi
done
}
## Main program
[ -n "$DEBUG" ] && set -x
cd "$ROOT"
mkdir -p "$ROOT/themes"
clone-theme
symlink-into-releases
| true
|
d5184301cd9360be64bd5d52e553fd1c6cacae5f
|
Shell
|
ynotradio/site
|
/bin/deploy.sh
|
UTF-8
| 352
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/sh
# deploy.sh
set -e
sudo apt-get install -y lftp
# deployment via ftp upload. Using FTPS for that
lftp -c "set net:max-retries 2;set net:reconnect-interval-base 5;set net:reconnect-interval-multiplier 1; open ftp://$FTP_USER:$FTP_PASS@$FTP_HOST:21; cd public; mirror --reverse --parallel=20 --verbose --exclude functions/main_fns.php; quit;"
| true
|
710f6bc042cddfc291a26ce7347c275a1e110bac
|
Shell
|
sergiobaro/bash-scripts
|
/bash-idioms/loops.sh
|
UTF-8
| 245
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
for i in {01..10}; do
echo "$i"
done
echo
for ((i=0; i<10; i++))
do
echo "$i"
done
# for ((;;))
# do
# printf 'forever'
# done
echo
for arg; do
echo "$arg"
done
echo
for file in $(ls); do
echo "$file"
done
| true
|
4eb7c968d613b1416d1a455912d7d05ab228e792
|
Shell
|
TakGlobus/src_share
|
/001_get_multi_laads.bash
|
UTF-8
| 785
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
# filename.txt
# <contents>
# date,directory-name
# * Point
# Do not make any space between ','
filename=$1 # filename of laads archive directory
target_basedir=`pwd`/$2
myappkey="A167C61A-10CF-11E9-BA61-CBB570C49BBF"
echo " Download 2 path ${target_dir} "
option1="-e robots=off -m -np -R .html,.tmp -nH --cut-dirs=3"
while read line
do
dirname=`echo $line | cut -d ',' -f 1`
getdir=`echo $line | cut -d ',' -f 2`
httpsdir="https://ladsweb.modaps.eosdis.nasa.gov${getdir}"
target_dir=${target_basedir}/$dirname
mkdir -p ${target_dir}
echo " #### Download directory ${getdir} #### "
echo " #### Download 2 path ${target_dir} #### "
`wget ${option1} ${httpsdir} --header "Authorization: Bearer ${myappkey}" -P $target_dir `
sleep 1
done < ./${filename}
echo NORMAL END
| true
|
ba3903730722b1a74a622ef54ff1fe5619d5613d
|
Shell
|
alanzhang88/SONegativeCommentDetection
|
/models/CNN/run_experiments.sh
|
UTF-8
| 944
| 2.65625
| 3
|
[] |
no_license
|
declare -a num_filters=(32 64 128 256)
declare -a filter_sizes
filter_sizes[0]="3,4,5"
# filter_sizes[1]="4"
declare -a dp=(0.1 0.3 0.5)
declare -a lr=(0.001 0.002 0.003)
declare -a batch_size=(64)
declare -a activation=('tanh' 'softmax' 'relu' 'sigmoid')
declare -a count
count=0
for i in "${num_filters[@]}"
do
for j in "${filter_sizes[@]}"
do
for k in "${dp[@]}"
do
for l in "${lr[@]}"
do
for m in "${activation[@]}"
do
for n in "${batch_size[@]}"
do
count=$((count + 1))
# python testParam.py -nf $i -fs $j -dp $k -lr $l -a $m -bs $n count | tail -n 1 | tee -a ./experiments2/log_$i+$j+$k+$l+$m+$n.txt
python testParam.py -nf $i -fs $j -dp $k -lr $l -a $m -bs $n -c $count
done
done
done
done
done
done
# cd ./experiments
# python plot_graph.py
| true
|
5dcda3f2e968f25d98f69dd5cacf07b255b10741
|
Shell
|
minewhat/mapr-azure
|
/azure-wrapper.sh
|
UTF-8
| 1,269
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Wrapper script around our deployment scripts.
#
# Assumptions: all other scripts downloaded to same directory.
#
# WARNING: The file upload process from the Azure templates CLEARS the
# execute bit on all files. For that reason, we must to "sh <script>"
# when chaining them together here.
#
# The key to the deployment is generating the hosts file to be used
# for cluster formation (since Azure does not yet support DNS lookup
# of hostnames assigned during resource creation. We assume that
# the hosts are all of the form <base><n>, where <n> varies from 0 to
# cluster_size - 1. The IP addresses are of the form <prefix><m>,
# were <m> is the index of the host plus the <first_ip> parameter.
#
#
# USAGE :
# $0 [ <basename> ] [ <cluster_size> ] [ <IP_subnet_prefix> ] [ <first_ip> ]
#
# EXAMPLE :
# $0 testnode 4 10.0.0. 10
#
# The effect would be a 4-node cluster with testnode0, testnode1,
# testnode2, and testnode3 (at 10.10.10.[10-13]).
#
THIS=`readlink -f $0`
BINDIR=`dirname $THIS`
HOSTNAME=`hostname`
CLUSTER_HOSTNAME_BASE="${HOSTNAME%node*}node"
sh $BINDIR/gen-cluster-hosts.sh ${1:-$CLUSTER_HOSTNAME_BASE} ${2:-3} ${3:-} ${4:-}
sh $BINDIR/prepare-disks.sh
sh $BINDIR/prepare-node.sh
#sh $BINDIR/deploy-mapr-ami.sh
exit 0
| true
|
01b586f191cc4fd560ce123c73b96bd25d20b19b
|
Shell
|
SandileMP/testPHP
|
/docker/ffmpeg/video1.sh
|
UTF-8
| 4,030
| 3.40625
| 3
|
[] |
no_license
|
#! /bin/sh
folder=/app
cdate=$(date +"%Y-%m-%d-%H:%M")
host="http://php/logins/"
checkUrl="$host/console/interview/getNew"
startUrl="$host/console/interview/start?interview_id=%d"
completeUrl="$host/console/interview/complete?interview_id=%d&invite_id=%d"
errorUrl="$host/console/interview/error?interview_id=%d&invite_id=%d&log=%s"
echo $checkUrl;
output="$(curl $checkUrl)";
echo $output;
interviewId=$(echo ${output} | jq -r '.interview_id');
inviteId=$(echo ${output} | jq -r '.invite_id');
webmFile=$(echo ${output} | jq -r '.webm');
file=$(echo ${output} | jq -r '.textFile');
mp4File=$(echo ${output} | jq -r '.mp4File');
jqCommand=".streams | map(.codec_name) | join(\",\")";
echo "$jqCommand";
echo $cdate;
echo $interviewId;
if [ -z "$interviewId" ]; then
exit 0;
fi
cd $folder;
if [ ! -f "$file" ]; then
# error file not exist
outputResult="$(curl $(printf $errorUrl $interviewId $inviteId 'txtFileNotFound'))";
echo $outputResult;
exit 0;
fi
echo $file;
if [ "${file##*.}" = "txt" ]; then
outputResult="$(curl $(printf $startUrl $interviewId))";
echo $outputResult;
readarray rows < $file;
for row in "${rows[@]}"; do
echo $row;
done
number_of_lines=$(wc -l < "$file");
echo "$number_of_lines";
isMP4=0;
isMix=0;
for row in "${rows[@]}"; do
echo "$row";
inputname=${row:11:-1};
inputname="${inputname%%\'*}";
echo $inputname;
cp $inputname original_$inputname;
jsonresponse="$(/usr/local/bin/ffprobe -i $inputname -hide_banner -show_format -show_streams -v quiet -print_format json)";
echo $jsonresponse;
codecs="$(echo $jsonresponse| jq -r '.streams|map(.codec_name)')";
echo $codecs;
if [[ $codecs == *"h264"* ]]; then
isMP4=1;
else
if [ ! "$isMP4" -eq "1" ]; then
isMix=1;
fi
fi
done < $file;
echo "isMP4: $isMP4";
echo "isMix: $isMix";
if [ "$isMP4" -eq "1" ]; then
echo "its MP4";
for row in "${rows[@]}"; do
echo "$row";
inputnameq=${row:11:-1};
inputnameq="${inputnameq%%\'*}";
echo $inputnameq;
echo "converting to mp4 each";
/usr/local/bin/ffmpeg -y -i $inputnameq -c:v copy partial_${inputnameq%%.*}.mp4;
echo "conversion done";
mv partial_${inputnameq%%.*}.mp4 ${inputnameq%%.*}.mp4;
done < $file;
echo "updating txt file";
sed -i -e "s/\.webm/\.mp4/g" $file;
cat $file;
echo "concating mp4 files";
/usr/local/bin/ffmpeg -y -f concat -safe 0 -i $file partial_${file%%.*}.mp4;
mv partial_${file%%.*}.mp4 ${file%%.*}.mp4;
echo "mp4 to webm";
/usr/local/bin/ffmpeg -i ${file%%.*}.mp4 partial_${file%.*};
mv partial_${file%.*} ${file%.*};
echo "Done";
outputResult="$(curl $(printf $completeUrl $interviewId $inviteId))";
echo $outputResult;
echo "exit";
exit 0;
else
for row in "${rows[@]}";
do
echo "$row";
inputname=${row:11:-1};
inputname="${inputname%%\'*}";
echo $inputname;
echo "$(/usr/local/bin/ffmpeg -y -i $inputname -map 0:a -map 0:v -c copy fixed_$inputname)";
mv fixed_$inputname $inputname;
done < $file;
echo "concat";
/usr/local/bin/ffmpeg -y -f concat -safe 0 -i $file partial_${file%.*};
mv partial_${file%.*} ${file%.*};
echo "convert to mp4";
/usr/local/bin/ffmpeg -y -i ${file%.*} -crf 23 -vf pad="width=ceil(iw/2)*2:height=ceil(ih/2)*2" partial_${file%%.*}.mp4;
mv partial_${file%%.*}.mp4 ${file%%.*}.mp4;
echo "Done";
outputResult="$(curl $(printf $completeUrl $interviewId $inviteId))";
echo $outputResult;
echo "exit";
exit 0;
fi
fi
outputResult="$(curl $(printf $errorUrl $interviewId $inviteId 'txtFileNotFound'))";
echo $outputResult;
exit 1;
| true
|
587fa654600f5017815e504dc01bc5d1377bd811
|
Shell
|
diremy/whizzytex
|
/tags/initial/whizzytex
|
UTF-8
| 10,030
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/sh
### Configuration
# time in milliseconds to sleep when the pool is empty.
typeset -i WYSIPAUSE=100
TEXERRORMAXLINES=50
TEXERRORAROUND=20
# whether Whysitex should send itself a STOP signal (then it must be waken up)
# or just sleep for a while when done and no slicing is available
STOP=true
# name (or full path) of the dump package file (without the extension)
DUMP=whizzytex
# Signals telling gv and xdvi to refresh
SIGDVI=SIGUSR1
SIGPS=SIGHUP
### End of manual configuration
case $# in
0) echo 'Need a file name'; exit 1;;
esac
COMMAND="$0 $*"
TOUCH=false
MAKE=defaultmake
DO=loop
DOMARKS=false
VIEW=ps
VIEWCOMMAND=gv
WATCHFILES=
DEBUG=false
while true
do
case $1 in
-kill) DO=kill; shift;;
-wakeup) DO=wakeup; shift;;
-format) DO=format; shift;;
-pre) MAKE="$2"; shift 2;;
-marks) DOMARKS=true; shift;;
-nostop) STOP=false; shift;;
-marksonly) DO=marks; shift;;
-watch) WATCHFILES="$*"; shift $#;;
-debug) DEBUG=true; shift;;
-dvi)
VIEW=dvi;
case $2 in
.) VIEWCOMMAND=dview;;
*) VIEWCOMMAND=$2;;
esac
shift 2;;
-ps)
case $2 in
.) VIEWCOMMAND=gv;;
*) VIEWCOMMAND=$2;;
esac
shift 2;;
-*)
echo 'Unrecognized argument '"$1"'
Usage whizzytex:
whizzytex <option> ... <option> <file>
where <option> is
-kill
-format
-marks
-pre <preproces-command>
-dvi <view-command>
-ps <view-command>
' 1>&2
exit 1;;
*) break;;
esac
done
HERE=$(pwd)
NAME=$(basename $1 .tex)
FORMAT=\&$NAME
WYSI=_whizzy_$NAME
SLICE=$WYSI.new
LOG=$WYSI.gol
ERR=$WYSI.err
errlog () { echo "$*" 1>&2; }
log () {
if [ -f $LOG ]
then
grep -B 3 -A $TEXERRORAROUND -e '^!' $LOG | head -$TEXERRORMAXLINES $LOG;
fi
false;
}
errlog "$COMMAND"
LOCK=.$WYSI.tex
if [ -f $NAME.tex ]
then
:
else
echo "File $NAME.tex does not exist"
exit 1
fi
cleaning () {
[ -f $WYSI.id ] && kill -QUIT $(cat $WYSI.id)
rm -f $WYSI.* $NAME.fmt
rm -rf $WYSI
rm -f $LOCK
errlog "Cleaning WYSI=$WYSI, LOCK=$LOCK"
}
# trap cleaning EXIT
suicide () {
[ -f $WYSI.id ] && kill -QUIT $(cat $WYSI.id)
rm -f $WYSI.* $NAME.fmt
rm -rf $WYSI
if [ -f $LOCK ]
then
PID=$(cat $LOCK)
rm -f $LOCK
kill -KILL $PID
fi
errlog "Killing WYSI=$WYSI, LOCK=$LOCK"
exit 2
}
# To fix: does not work
trap cleansuicide 3
wakeup () {
kill -CONT $(cat $LOCK)
}
# Making format
format () {
initex \&latex '\nonstopmode\let\Documentclass\documentclass\renewcommand{\documentclass}[2][]{\Documentclass[#1]{#2}\usepackage{'"$DUMP"'}}\input{'"$NAME.tex"'}'
}
SECTION='\\\(chapter\|section\|subsection\)'
intersect () {
sort $WYSI.sec $WYSI.pag > $WYSI.spg
sort -n -t : -u $WYSI.pag $WYSI.sec | sort | comm -1 -3 - $WYSI.spg > $WYSI.sec.1
sort -n -t : -u $WYSI.sec $WYSI.pag | sort | comm -1 -3 - $WYSI.spg > $WYSI.pag.1
mv $WYSI.pag.1 $WYSI.pag
mv $WYSI.sec.1 $WYSI.sec
}
marks () {
grep -n -e "^$SECTION" $NAME.tex | \
sed -e 's/^\([0-9]*\):'"$SECTION"'/\1:\2@/' -e 's/"/\\"/' \
> $WYSI.sec
initex \&$NAME '\WysitexInput{'"$NAME"'}'
intersect
( echo "(setq whizzytex-marks '("; \
( join -t @ $WYSI.pag $WYSI.sec | \
sed -e 's/^[0-9]*:\([^@]*\)@\([^@]*\)@\(.*\)$/("\\\1\3"."\\WhizzyTeX\2")/' -e 's/\\/\\\\/'); \
echo "))" \
) >> $WYSI.tmp
cat $WYSI.tmp
}
batchmarks () {
grep -n -e "^$SECTION" $NAME.tex | \
sed -e 's/^\([0-9]*\):'"$SECTION"'/\1:\2@/' -e 's/"/\\"/g' \
-e 's/\\/\\\\/g' \
> $WYSI.sec
if initex \&$NAME '\WysitexInput{'"$NAME"'}'
then
cp $NAME.dvi $NAME.dview
if $1
then
intersect
( echo "(setq whizzy-marks '("; \
( join -t @ $WYSI.pag $WYSI.sec | \
sed -e 's/^[0-9]*:\([^@]*\)@\([^@]*\)@\(.*\)$/("\\\\\1\3"."\\\\WhizzyTeX\2")/' \
); \
echo "))" \
) > $WYSI.tmp
mv $WYSI.tmp $WYSI.pos
fi
if [ -f $NAME.toc ]; then cp $NAME.toc $WYSI.toc; fi
if grep 'LaTeX Warning: Citation' $NAME.log
then
bibtex $NAME;
if [ -f $NAME.bbl ]; then cp $NAME.bbl $WYSI.bbl; fi
true
fi
else
rm -f $WYSI.pag; false
fi
}
############################################################################
# wdiff
wordify () {
tr '\n' '
' |
sed -e 's/[
][
]*/
/g' | \
tr '
' '\n'
}
wdiff () {
FST=$1
SND=$2
DIF=$WYSI.dif
diff $FST $SND > $DIF
if [ $(grep -v -e '^[-><]' $DIF | wc -l) -eq 1 ] && \
[ $(wc -l < $DIF) -lt 3 ] && \
grep -e '^[1-9][0-9,]*[ac]' $DIF > $DIF.lin
then
sed $DIF -n -e '/^< /s/^< //p' > $FST.lin
sed $DIF -n -e '/^> /s/^> //p' > $SND.lin
wordify < $FST.lin > $FST.wrd
wordify < $SND.lin > $SND.wrd
diff $FST.wrd $SND.wrd > $DIF.wrd
if [ $(grep -v -e '^[-><]' $DIF.wrd | wc -l) -eq 1 ] && \
grep -e '^[1-9][0-9,]*[ac]' $DIF.wrd > /dev/null
then
(
echo '<Error in Line'
cat $DIF.lin
echo Word
grep -e '^[0-9]' $DIF.wrd
echo ':'
sed $DIF.wrd -n -e 's/^> \(.*\)$/\1/p';
echo '>'
) | tr '\n' ' '
echo
else
false
fi
else
false
fi
}
############################################################################
case $DO in
kill) suicide && exit 0 || exit 1;;
wakeup) wakeup && exit 0 || exit 1;;
format) format && exit 0 || exit 1;;
marks) marks && exit 0 || exit 1;;
loop)
# To ensure that only one deamon is running on the spool file.
if [ -f $LOCK ] && kill -CONT $(cat $LOCK) 2>/dev/null
then
echo 'Remove running process first'
exit 1
else
echo $$ > $LOCK
fi
;;
esac
if [ -f $NAME.fmt -a $NAME.tex -ot $NAME.fmt ]
then
:
else
echo -n '<Initial formating '
if format >$LOG 2>$ERR && [ -f $NAME.fmt ]
then
echo 'succeeded>'
else
echo 'failed>'
echo '<*** Fatal error: could not build initial format ***>'
[ -f $ERR ] && cat $ERR
log
echo '<Exiting>'
suicide
fi
fi
# Initial file
echo '\begin{document}[Initial empty page]\end{document}' > $WYSI.nil
# Texing...
echo $VIEWCOMMAND 1>&2
ANTIDATE=$(date +%m%d%H%M.%S)
case $VIEW in
ps)
preview () {
dvips -o $WYSI._ps $WYSI.dvi 2>/dev/null && \
{ if [ $WYSI._ps -nt $WYSI.ps ]; then true; \
else touch -t $ANTIDATE $WYSI._ps; fi; \
mv $WYSI._ps $WYSI.ps; }
}
view () {
$VIEWCOMMAND $WYSI.ps &
}
SIG=$SIGPS
;;
dvi)
preview () { mv $WYSI.dvi $WYSI.dview; }
view () {
$VIEWCOMMAND $WYSI.dview &
}
SIG=$SIGDVI
PSBUG=0
;;
esac
newfiles () {
mv $(find $HERE/$WYSI -type f -print || suicide) $HERE/ 2>/dev/null
}
defaultmake () { mv $SLICE $WYSI.tex 2>/dev/null; true; }
preprocess () {
if $MAKE $WYSI.tex >$LOG 2>$ERR && [ -f $WYSI.tex ]
then
true
else
echo '<Preprocessing failed>'
[ -f $ERR ] && cat $ERR
log
false
fi
}
process () {
rm -f $WYSI.aux
echo -n '<Recompiling '
if { initex $FORMAT $WYSI && preview; } > $LOG
then
echo 'compilation succeeded>'
ln -f $WYSI.tex $WYSI.xet
else
echo 'recompilation failed>'
if [ -f $WYSI.xet ] && wdiff $WYSI.xet $WYSI.tex
then
:
else
log
echo 'l.'$[ $(wc -l < $WYSI.tex) - 1 ]' '
fi
echo '<Continuing>'
false
fi
}
# debugging information
if $DEBUG; then
errlog "NAME=$NAME"
errlog "WYSI=$WYSI"
errlog "DOMARKS=$DOMARKS"
fi
# Initial run
# to make sure the dvi file exists
initex $FORMAT $WYSI.nil > $LOG && preview || log
# To give it a chance to see citations and other global information.
if [ -f $NAME.bbl ]; then cp $NAME.bbl $WYSI.bbl; fi
if [ -f $NAME.toc ]; then cp $NAME.toc $WYSI.toc; fi
# process $WYSI.tex is present. Will override $WYSI.dvi if it succeeds
mkdir $WYSI 2>/dev/null
if newfiles; then preprocess && process; fi
# lauch the previewer
if view 2>$ERR
then
ID=$!
echo $ID > $WYSI.id
sleep 1
if kill -CONT $ID
then
:
else
echo '<Fatal error: viewing process terminated prematurely>'
suicide
fi
else
echo '<Fatal error: viewing process failed>'
cat $ERR
suicide
fi
whole () {
# if head $WYSI.tex | grep '%DOMARKS'
# then DOMARKS=true
# else DOMARKS=false
# fi
# if $DOMARKS && [ ! -f $WYSI.pag ]
# then
echo '<Recompiling whole document>'
if batchmarks $1 </dev/null >$LOG.pag
then
echo '<Whole document updated>'
if $1; then echo '<Pages and sections updated>'; fi
else
echo '<Whole document recompilation failed>'
fi
# fi
}
whole $DOMARKS
# The loop watching changes
PSDATE=0
while true
do
REFORMATED=false
if [ $NAME.fmt -ot $NAME.tex ]
then
echo '<Reformating>'
touch -r $NAME.tex $NAME.fmt
mv -f $NAME.fmt $WYSI.fmt
if format > $LOG
then
echo '<Reformating succeeded>'
REFORMATED=true
whole $DOMARKS
# if $DOMARKS
# then
# echo '<Recomputing pages and sections>'
# if batchmarks </dev/null >$LOG.pag
# then
# echo '<Pages and sections updated>'
# else
# echo '<Pages and sections are ignored>'
# fi
# fi
else
mv -f $WYSI.fmt $NAME.fmt
echo '<Reformatting failed>'
log
echo '<Contuning with the old format>'
fi
fi
if newfiles && preprocess || $REFORMATED
then
if process
then
kill -$SIG $ID \
|| (echo '<Fatal error: could not refresh! Exiting>'; \
suicide; exit 2)
else
echo '<Continuing>'
fi
else
if $STOP; then kill -STOP $$; else usleep $WYSIPAUSE; fi
kill -CONT $ID || (suicide; exit 2)
fi
done
exit 0
| true
|
f38519e6f3b91d83022bb9e763d2dd6ede14c619
|
Shell
|
rock3125/wsd-training
|
/create_td/scripts/train.sh
|
UTF-8
| 250
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$JAVA_HOME" == "" ]; then
echo "JAVA_HOME not set"
exit 1
fi
HOME=`dirname "$0"`
CP=`echo $HOME/lib/*.jar | tr ' ' ':'`
$JAVA_HOME/bin/java -cp $CP \
-XX:+UseG1GC \
industries.vocht.wsd_trainingset_creation.Main "$@"
| true
|
4c5be0127f645100fe4d4ead6bb73d37788312b9
|
Shell
|
deepakmench18/Assignment_1
|
/day6problem/forloop/power2.sh
|
UTF-8
| 145
| 3.109375
| 3
|
[] |
no_license
|
echo "Enter Number"
read num
for (( i=1; i<=num; i++ ))
do
sum=2
for (( j=1; j<i; j++ ))
do
sum=$(( $sum*2 ))
done
echo $sum
done
| true
|
1cd1e6ff807092199317ffa13913873f2e389054
|
Shell
|
grumpyoldgit/wonderlamp
|
/fs/root/.bashrc
|
UTF-8
| 127
| 2.609375
| 3
|
[] |
no_license
|
# Set the node PATH if it exists
if [ -d "/nodejs/bin" ] ; then
PATH="/nodejs/bin:$PATH"
fi
# pretty
alias ls='ls --color'
| true
|
3a72286fc4e06e7e04dc6087de99e054eccaccb2
|
Shell
|
orange-cloudfoundry/bosh-go-cpi-cloudstack
|
/packages/cloudstack_cpi/packaging
|
UTF-8
| 277
| 2.75
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash -eu
ostype=$(uname | tr '[:upper:]' '[:lower:]')
mkdir -p ${BOSH_INSTALL_TARGET}/bin
tar zxf bosh-cpi-cloudstack_${ostype}.tar.gz --strip 1 -C ${BOSH_INSTALL_TARGET}/bin
mv ${BOSH_INSTALL_TARGET}/bin/bosh-cpi-cloudstack ${BOSH_INSTALL_TARGET}/bin/cloudstack_cpi
| true
|
2aac11b76559b6c39272ef8c3ac81a2c3a6ab2ba
|
Shell
|
blacknon/tar2package
|
/dockerfiles/tar2deb/changelog.sh
|
UTF-8
| 493
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
_THIS_DIR="$(cd "$(dirname "${BASH_SOURCE[0]:-${(%):-%N}}")"; pwd)"
cp "${_THIS_DIR}/deb-template/debian/changelog" /tmp/changelog
{
echo "@@@NAME@@@ (@@@VERSION@@@-$(date +%s)) trusty; urgency=medium"
echo
echo "@@@CHANGELOG@@@" | sed 's/^/ /'
echo
echo " -- @@@AUTHOR@@@ <@@@EMAIL@@@> $(date --rfc-2822)"
echo
cat /tmp/changelog
} > "$_THIS_DIR/changelog.new"
mv "$_THIS_DIR/changelog.new" "${_THIS_DIR}/deb-template/debian/changelog"
rm /tmp/changelog
| true
|
d0182fc9192d3cf175825267b5120331986b6542
|
Shell
|
dtuanon/challenge1
|
/Py_code/run_queries
|
UTF-8
| 248
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
count=$(python query.py --query $1 --wiki_size $2 $3)
outfile=../query_results/"$2/$1_results.txt"
result_files="../query_results/$2/"*Handle*
cat $result_files > $outfile
rm $result_files
sed -i "1 s/^/$count\n/" $outfile
echo $count
| true
|
e3d8746647331b39c19c4fa0866b259446cbdb9c
|
Shell
|
pedrocarrega/CN
|
/kubernets/spark-deploy/write-spark3.sh
|
UTF-8
| 1,267
| 2.59375
| 3
|
[] |
no_license
|
BUCKET_NAME=$1
echo "from pyspark.sql import SparkSession
from pyspark.sql.types import DateType
from pyspark.sql.types import DoubleType
from pyspark.sql.functions import asc
from pyspark.sql.functions import sum
from pyspark.sql.functions import date_format
from pyspark.sql.functions import avg
from pyspark.sql.functions import col
from pyspark.sql.functions import max
from pyspark.sql.functions import count
spark = SparkSession \\
.builder \\
.appName(\"PySpark example\") \\
.getOrCreate()
#IMPORTANT: TENS DE FAZER DOWNLOAD DO CSV
# GOAL: Media de fidelidade a uma marca dentro de uma categoria, para cada user
df = spark \\
.read \\
.option(\"header\", \"false\") \\
.csv(\"gs://$BUCKET_NAME/dataset.csv\")
max_by_brand = df \\
.select(col(\"_c3\").alias(\"category_id\"),col(\"_c4\").alias(\"category_code\"), col(\"_c5\").alias(\"brand\"), col(\"_c7\").alias(\"user_id\")) \\
.filter(col(\"brand\").isNotNull()) \\
.groupBy(\"user_id\", \"category_id\", \"brand\") \\
.count() \\
.groupBy(\"user_id\", \"category_id\") \\
.agg(max(\"count\").alias(\"max\"), sum(\"count\").alias(\"total\")) \\
.agg(avg(col(\"max\") / col(\"total\")))
max_by_brand.write.format('csv').save(\"gs://$BUCKET_NAME/output\")
spark.stop()
" > Query3.py
| true
|
aa26965753b1e88b70b8e722221717218ac9ff4f
|
Shell
|
jungcheolkwon/blueprint
|
/F5/blueprint_f5bigip_transit/scripts/test.sh
|
UTF-8
| 1,638
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
#edited by JC
#j.kwon@f5.com
##https://github.com/F5Networks/terraform-provider-bigip/tree/master/examples/as3
##https://clouddocs.f5.com/products/big-iq/mgmt-api/v6.0/HowToSamples/bigiq_public_api_wf/t_bigiq_public_api_workflows.html
##https://clouddocs.f5.com/products/big-iq/mgmt-api/v6.0/ApiReferences/bigiq_public_api_ref/r_ip_pool_state.html
dir=$(pwd)
name="$(cat ~/.ssh/.user)"
password="$(cat ~/.ssh/.password)"
prefix=$(cd /tf/caf/landingzones/landingzone_vdc_demo && terraform output prefix)
rg=$prefix-hub-network-transit
for i in bigip1-0 bigip2-0
do
if [ $i == "bigip1-0" ]
then
ip=$(az vm show -d -g $rg -n $i --query publicIps -o tsv)
token=$(curl -sk -H "Content-Type: application/json" -X POST -d '{"username":"'$name'","password":"'$password'","loginProviderName":"tmos"}' https://$ip:8443/mgmt/shared/authn/login | jq -r .token.token)
curl -sk -H "Content-Type: test/x-yaml" -H "X-F5-Auth-Token: $token" -X POST --data-binary @as3.yaml https://$ip:8443/mgmt/shared/appsvcs/declare | jq -r .
echo -e "\033[32m...Application Services 3 Extension is working on $i... \033[0m "
else
ip=$(az vm show -d -g $rg -n $i --query publicIps -o tsv)
token=$(curl -sk -H "Content-Type: application/json" -X POST -d '{"username":"'$name'","password":"'$password'","loginProviderName":"tmos"}' https://$ip:8443/mgmt/shared/authn/login | jq -r .token.token)
curl -sk -H "Content-Type: test/x-yaml" -H "X-F5-Auth-Token: $token" -X POST --data-binary @as3.yaml https://$ip:8443/mgmt/shared/appsvcs/declare | jq -r .
echo -e "\033[32m...Application Services 3 Extension is working on $i... \033[0m "
fi
done
| true
|
50cece5acdcddab2e6eb12c4ba215b91f7b50755
|
Shell
|
remoteit/docs
|
/.gitbook/assets/get-pi-status-filtered (1).sh
|
UTF-8
| 3,247
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
# The above line should be the shell you wish to execute this script.
# Raspberry Pi supports bash shell
#
# remote.it Bulk Management Status Script
#
# $1 parameter is the jobID used for status updates
# $2 is API server
#
# This example script first clears all the status columns (Status A-E) in the remote.it portal.
# Next this script grabs the following Pi system values and returns them to the remote.it portal.
#
# Status A = Free disk space. listed by partition. Filtered to show only non zero size partitions.
# Status B = Linux version info per uname -a, filtered to show Kernel version.
# Status C = System uptime since last boot. Reformatted.
# Status D = list of all running processes. filtered to show connectd processes
# Status E = connectd package version. filtered to show only the version.
TOOL_DIR="/usr/bin"
NOTIFIER="connectd_task_notify"
JobId="$1"
API="$2"
Status()
{
ret=$(${TOOL_DIR}/$NOTIFIER "$1" "$JobId" "$API" "$2")
}
# Clear all status columns A-E in remote.it portal
Status a ""
Status b ""
Status c ""
Status d ""
Status e ""
# Update status column A (StatusA) in remote.it portal
#-------------------------------------------------
# retrieve the free disk space
df > /tmp/df.txt
if [ -e /tmp/df2.txt ]; then
rm /tmp/df2.txt
fi
# now filter the result to only show non-zero size partitions
while IFS='' read -r line || [[ -n "$line" ]]; do
nbytes="$(echo $line | awk '{ print $3 }')"
if [ "$nbytes" != "0" ]; then
echo $line >> /tmp/df2.txt
fi
done < /tmp/df.txt
diskfree="$(cat /tmp/df2.txt)"
echo "$diskfree" > $0.log
# send to status column a in remote.it portal
Status a "$diskfree"
#-------------------------------------------------
# Update status column B (StatusB) in remote.it portal
#-------------------------------------------------
# retrieve the Linux kernel version
fwversion="Kernel: $(uname -a | awk '{print $3 }')"
echo "$fwversion" >> $0.log
# send to status column b in remote.it portal
Status b "$fwversion"
#-------------------------------------------------
# Update status column C (StatusC) in remote.it portal
#-------------------------------------------------
# retrieve the system uptime
uptime="$(uptime)"
echo "$uptime" >> $0.log
# send to status column c in remote.it portal
Status c "$uptime"
#-------------------------------------------------
# Update status column D (StatusD) in remote.it portal
#-------------------------------------------------
# get the list of all running processes
nsvcs="$(ps ax | grep connectd | grep -v grep)"
echo "$nsvcs" >> $0.log
# send to status d
Status d "$nsvcs"
#-------------------------------------------------
# Update status column E (StatusE) in remote.it portal
#-------------------------------------------------
# use dpkg command to retrieve info about the connectd package
cversion="connectd $(dpkg -s connectd | grep Version)"
echo "$cversion" >> $0.log
# send to status e
Status e "$cversion"
#-------------------------------------------------
#=======================================================================
# Lastly finalize job, no updates allowed after this
Status 1 "Job complete"
# Use this line in case of error, and add desired message
# Status 2 "Job Failed"
| true
|
f0f567a63e1bb071f417de46ffbe9e1e813e8d33
|
Shell
|
mikdusan/zig
|
/ci/zinc/build_aarch64_macos
|
UTF-8
| 402
| 2.5625
| 3
|
[
"MIT",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
#!/bin/sh
set -x
set -e
RELEASE_STAGING="$DRONE_WORKSPACE/_release/staging"
TARGET="aarch64-macos-none"
MCPU="apple_a14"
INSTALL_PREFIX="$DRONE_WORKSPACE/$TARGET"
SEARCH_PREFIX="/deps/$TARGET"
"$RELEASE_STAGING/bin/zig" build \
--prefix "$INSTALL_PREFIX" \
--search-prefix "$SEARCH_PREFIX" \
-Dstatic-llvm \
-Drelease \
-Dstrip \
-Dtarget="$TARGET" \
-Dcpu="$MCPU" \
-Denable-stage1
| true
|
9bdefb7fd5fc75874b8a06412a4f134796fc6968
|
Shell
|
theRealCarneiro/Dotfiles
|
/.local/bin/randbg
|
UTF-8
| 262
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
wall=/home/gabriel/.config/wallpaper
toggle() {
wp=$(find /home/gabriel/Bibliotecas/Imagens/Wallpapers -type f | shuf | head -n1)
echo $wp > $wall
xwallpaper --zoom "$wp"
}
trap "toggle" USR1
while true; do
sleep 60 &
wait $! &&
toggle
done
| true
|
b6a1fb43190fa85f8c5ee67a830f70bd0d581216
|
Shell
|
pvavercak/cxx20-modules-examples
|
/test
|
UTF-8
| 4,792
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
# Run various build tests (in source, install, etc) by essentially executing
# the commands shown in README.md files.
#
usage="usage: ./test c++-compiler [c++-compiler-option...]"
owd=`pwd`
trap "{ cd $owd; exit 1; }" ERR
set -o errtrace # Trap in functions.
if [ $# -eq 0 ]; then
echo "$usage" 1>&2
exit 1
fi
b=(b config.cxx=\""$*"\")
set -x
##
##
cd hello-simple/
"${b[@]}"
./hello
"${b[@]}" clean
cd "$owd"
##
##
cd hello-module/
"${b[@]}"
"${b[@]}" test
"${b[@]}" clean
cd "$owd"
##
##
cd hello-partition/
"${b[@]}"
"${b[@]}" test
"${b[@]}" clean
cd "$owd"
##
##
cd hello-header-import/
"${b[@]}"
"${b[@]}" test
"${b[@]}" clean
cd "$owd"
##
##
#ti="all-importable"
ti="all-importable std-importable@false"
cd hello-header-translate/
"${b[@]}" config.cxx.translate_include="$ti"
"${b[@]}" config.cxx.translate_include="$ti" test
"${b[@]}" config.cxx.translate_include="$ti" clean
cd "$owd"
##
##
cd hello-library-module/
"${b[@]}" create: config/, cc
cd config/
b configure: ../libhello-format-module/@libhello-format-module/
b configure: ../libhello-module/@libhello-module/
b configure: ../hello-library-module/@hello-library-module/
b hello-library-module/
b test: hello-library-module/
cd ..
rm -rf config/
rm -rf /tmp/install
"${b[@]}" config.install.root=/tmp/install install: libhello-format-module/
"${b[@]}" config.cc.loptions=-L/tmp/install/lib config.install.root=/tmp/install install: libhello-module/
"${b[@]}" config.cc.loptions=-L/tmp/install/lib hello-library-module/
"${b[@]}" config.cc.loptions=-L/tmp/install/lib test: hello-library-module/
"${b[@]}" config.cc.loptions=-L/tmp/install/lib clean: hello-library-module/
"${b[@]}" config.cc.loptions=-L/tmp/install/lib clean: libhello-module/
"${b[@]}" clean: libhello-format-module/
rm -rf /tmp/install
cd "$owd"
##
##
cd hello-library-header/
"${b[@]}" create: config/, cc
cd config/
b configure: ../libhello-format-header/@libhello-format-header/
b configure: ../libhello-header/@libhello-header/
b configure: ../hello-library-header-import/@hello-library-header-import/
b hello-library-header-import/
b test: hello-library-header-import/
b configure: config.cxx.translate_include=all-importable \
../hello-library-header-translate/@hello-library-header-translate/
b hello-library-header-translate/
b test: hello-library-header-translate/
cd ..
rm -rf config/
#ti="all-importable"
ti="all-importable std-importable@false"
rm -rf /tmp/install
"${b[@]}" config.install.root=/tmp/install install: libhello-format-header/
"${b[@]}" config.cc.loptions=-L/tmp/install/lib config.install.root=/tmp/install install: libhello-header/
"${b[@]}" config.cc.loptions=-L/tmp/install/lib hello-library-header-import/
"${b[@]}" config.cc.loptions=-L/tmp/install/lib test: hello-library-header-import/
#
# @@ Unexpected module export from mapper.
#
#"${b[@]}" config.cc.loptions=-L/tmp/install/lib config.cxx.translate_include="$ti" hello-library-header-translate/
#"${b[@]}" config.cc.loptions=-L/tmp/install/lib config.cxx.translate_include="$ti" test: hello-library-header-translate/
#"${b[@]}" config.cc.loptions=-L/tmp/install/lib config.cxx.translate_include="$ti" clean: hello-library-header-translate/
"${b[@]}" config.cc.loptions=-L/tmp/install/lib clean: hello-library-header-import/
"${b[@]}" config.cc.loptions=-L/tmp/install/lib clean: libhello-header/
"${b[@]}" clean: libhello-format-header/
rm -rf /tmp/install
cd "$owd"
##
##
# @@ GCC ICE
if false; then
cd hello-utility-library-module/
"${b[@]}" create: config/, cc
cd config/
b configure: ../libhello-utility-module/@libhello-utility-module/
b configure: ../hello-utility-library-module/@hello-utility-library-module/
b hello-utility-library-module/
b test: hello-utility-library-module/
cd ..
rm -rf config/
rm -rf /tmp/install
"${b[@]}" config.install.root=/tmp/install install: libhello-utility-module/
"${b[@]}" config.cc.loptions=-L/tmp/install/lib hello-utility-library-module/
"${b[@]}" config.cc.loptions=-L/tmp/install/lib test: hello-utility-library-module/
"${b[@]}" config.cc.loptions=-L/tmp/install/lib clean: hello-utility-library-module/
"${b[@]}" clean: libhello-utility-module/
rm -rf config/
cd "$owd"
fi
| true
|
c96f910b6d46b8a232c03e343fd49043333ab544
|
Shell
|
franklines/elastic-kibana-k8s
|
/generateData.sh
|
UTF-8
| 889
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
# Author: Franklin E.
# Desc: The following Bash Script utilizes the OpenLibrary.org API to generate data for our ElasticSearch instance.
# API Info: https://openlibrary.org/developers/api
BOOK_API_ENDPOINT="https://openlibrary.org";
ELASTICSEARCH_HOST="http://localhost:9200";
function indexInit()
{
curl -s -X PUT "${ELASTICSEARCH_HOST}/linux_books";
}
function generateData()
{
for i in {0..99};
do
ISBN=$(curl -s "${BOOK_API_ENDPOINT}/search.json?q=linux"| python -c "import sys, json; print(json.load(sys.stdin)['docs'][${i}]['isbn'][0])");
echo "ISBN: ${ISBN}";
BOOK=$(curl -s "${BOOK_API_ENDPOINT}/api/books?bibkeys=ISBN:${ISBN}&format=json");
echo "BOOK: ${BOOK}";
curl -s -X POST -H 'Content-Type: application/json' -d "${BOOK}" "${ELASTICSEARCH_HOST}/linux_books/json/${ISBN}";
done
}
indexInit;
generateData;
| true
|
f0f67199e78c561a72100aaae0459e5703c7a6cd
|
Shell
|
Frankiefir1/openmediavault-openvpn
|
/usr/share/openmediavault/confdb/create.d/conf.service.openvpn.sh
|
UTF-8
| 2,762
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/sh
#
# @license http://www.gnu.org/licenses/gpl.html GPL Version 3
# @author Volker Theile <volker.theile@openmediavault.org>
# @author OpenMediaVault Plugin Developers <plugins@omv-extras.org>
# @copyright Copyright (c) 2009-2013 Volker Theile
# @copyright Copyright (c) 2013-2019 OpenMediaVault Plugin Developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
set -e
. /usr/share/openmediavault/scripts/helper-functions
# EasyRSA version
version="3.0.6"
SERVICE_XPATH_NAME="openvpn"
SERVICE_XPATH="/config/services/${SERVICE_XPATH_NAME}"
if ! omv_config_exists "${SERVICE_XPATH}"; then
omv_config_add_node "/config/services" "${SERVICE_XPATH_NAME}"
omv_config_add_key "${SERVICE_XPATH}" "enable" "0"
omv_config_add_key "${SERVICE_XPATH}" "port" "1194"
omv_config_add_key "${SERVICE_XPATH}" "protocol" "udp"
omv_config_add_key "${SERVICE_XPATH}" "deviceovpn" "tun"
omv_config_add_key "${SERVICE_XPATH}" "compression" "1"
omv_config_add_key "${SERVICE_XPATH}" "duplicate_cn" "0"
omv_config_add_key "${SERVICE_XPATH}" "pam_authentication" "0"
omv_config_add_key "${SERVICE_XPATH}" "extra_options" ""
omv_config_add_key "${SERVICE_XPATH}" "loglevel" "2"
omv_config_add_key "${SERVICE_XPATH}" "vpn_network" "10.8.0.0"
omv_config_add_key "${SERVICE_XPATH}" "vpn_mask" "255.255.255.0"
omv_config_add_key "${SERVICE_XPATH}" "gateway_interface" ""
omv_config_add_key "${SERVICE_XPATH}" "default_gateway" "1"
omv_config_add_key "${SERVICE_XPATH}" "default_route" "1"
omv_config_add_key "${SERVICE_XPATH}" "client_to_client" "0"
omv_config_add_key "${SERVICE_XPATH}" "dns" ""
omv_config_add_key "${SERVICE_XPATH}" "dns_domains" ""
omv_config_add_key "${SERVICE_XPATH}" "wins" ""
omv_config_add_key "${SERVICE_XPATH}" "public_address" ""
omv_config_add_node "${SERVICE_XPATH}" "clients" ""
fi
if ! omv_group_id_exists openvpn; then
addgroup --quiet --system openvpn
fi
if [ ! -f "/opt/EasyRSA-$version/easyrsa" ];then
wget https://github.com/OpenVPN/easy-rsa/releases/download/v${version}/EasyRSA-unix-v${version}.tgz -P /opt/
tar xf /opt/EasyRSA-unix-v${version}.tgz -C /opt
rm -rf /opt/EasyRSA-unix-v${version}.tgz
fi
exit 0
| true
|
c0077f190fd89d8de9f90f6d82c28883d4499d28
|
Shell
|
kdnfgc/.dotfiles
|
/zsh/.zshrc
|
UTF-8
| 2,490
| 2.765625
| 3
|
[] |
no_license
|
# Enable Powerlevel10k instant prompt. Should stay close to the top of ~/.zshrc. Initialization code that may require console input (password prompts, [y/n]
# confirmations, etc.) must go above this block; everything else may go below.
if [[ -r "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh" ]]; then
source "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh"
fi
### Added by Zinit's installer
if [[ ! -f $HOME/.zinit/bin/zinit.zsh ]]; then
print -P "%F{33}▓▒░ %F{220}Installing %F{33}DHARMA%F{220} Initiative Plugin Manager (%F{33}zdharma/zinit%F{220})…%f"
command mkdir -p "$HOME/.zinit" && command chmod g-rwX "$HOME/.zinit"
command git clone https://github.com/zdharma/zinit "$HOME/.zinit/bin" && \
print -P "%F{33}▓▒░ %F{34}Installation successful.%f%b" || \
print -P "%F{160}▓▒░ The clone has failed.%f%b"
fi
source "$HOME/.zinit/bin/zinit.zsh"
autoload -Uz _zinit
(( ${+_comps} )) && _comps[zinit]=_zinit
### End of Zinit's installer chunk
#Plugins
zinit light romkatv/powerlevel10k
zinit light zsh-users/zsh-syntax-highlighting
zinit light zsh-users/zsh-autosuggestions
# To customize prompt, run `p10k configure` or edit ~/.p10k.zsh.
[[ ! -f ~/.p10k.zsh ]] || source ~/.p10k.zsh
#History
HISTFILE=~/.zsh_history
SAVEHIST=1000
#VI Mode
#bindkey -v
#export KEYTIMEOUT=1
# Use vim keys in tab complete menu:
#bindkey -v 'p' vi-backward-char
#bindkey -v 'x' vi-down-line-or-history
#bindkey -v 'k' vi-up-line-or-history
#bindkey -v 'y' vi-forward-char
#Navigate terminal with vim keys
bindkey '^P' backward-char
bindkey '^X' down-line-or-history
bindkey '^K' up-line-or-history
bindkey '^Y' forward-char
bindkey '^H' backward-delete-char
bindkey '^B' backward-word
bindkey '^W' forward-word
#Fixes backspace bug
#bindkey -v '^?' backward-delete-char
#bindkey -v menuselect
#Kitty Completion
autoload -Uz compinit
compinit
# Completion for kitty
kitty + complete setup zsh | source /dev/stdin
#Sources Cargo & .profile
source ~/.profile
export PATH="$PATH:/home/kdn/flutter/bin"
#source ~/.cargo/env
#Aliases
alias ls='ls --color'
alias init='git init'
alias push='git push'
alias commit='git commit'
alias add='git add'
alias clone='git clone'
alias pull='git pull'
alias status='git status'
alias spec='pfetch'
alias off='poweroff'
alias rb='sudo reboot'
alias new='cargo new'
alias build='cargo build'
alias run='cargo run'
alias check='cargo check'
alias carup='cargo update'
| true
|
d70c9f22f4902b8a368534667fe4aad76520ba21
|
Shell
|
jleck/elasticbeanstalk-nginx-php
|
/build
|
UTF-8
| 3,894
| 3.875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# AWS Elastic Beanstalk Nginx/PHP-FPM Configuration
#
# @author James Leckenby <me@jleck.co.uk>
# @link http://jleck.co.uk
# @copyright 2013 James Leckenby
# @license MIT License
# @version 1.0
#
# Global variables
FILE=${0##*/}
VERSION=3.0.0
# Parse options
ARGS=$(/usr/bin/getopt -o a:hv -l addons:,help,verion -n $FILE -- "$@")
eval set -- "$ARGS"
# Loop options and set flags
while true; do
case $1 in
-a|--addons) shift
ADDONS=$1;;
-v|--version) echo "$VERSION"
exit 0;;
-h|--help) echo "Usage: $FILE [options]
$FILE --version
Convert AMI instance from Apache to Nginx/PHP-FPM. With support for deployment hooks, Varnish and more through addon packages
Options:
-a|--addons comma seperated list of addons to install
-h|--help show this output
-v|--version show script version
"
exit 0;;
*) break;;
esac
shift
done
# Output header
echo -e '# AWS Elastic Beanstalk Nginx/PHP-FPM Configuration\n# Copyright 2013 James Leckenby'
if [[ $ADDONS ]]; then
echo -e "# $ADDONS\n"
ADDONS=${ADDONS},
IFS=','
else
echo -e '# No addons\n'
fi
/bin/sleep 5
# Download files and folders
echo "Downloading build files and folders"
/bin/rm -rf /tmp/build
/usr/bin/git clone git://github.com/jleck/elasticbeanstalk-nginx-php.git /tmp/build > /dev/null 2>&1
if [ ! -f '/tmp/build/build' ]; then
echo 'Error downloading main folders'
exit 1
fi
# Loop comma seperated options
if [[ $ADDONS ]]; then
for ADDON in $ADDONS; do
# Clone GIT URL
if [ ${ADDON:0:6} = 'git://' ]; then
PATH=${ADDON////.}
/usr/bin/git clone $ADDON /tmp/build/addons/$PATH > /dev/null 2>&1
ADDON=$PATH
fi
# Check folder exists
if [ ! -f "/tmp/build/addons/$ADDON/build" ]; then
echo "Unable to locate addon: $ADDON"
exit 1
fi
done
fi
# Remove packages
echo 'Removing unneeded packages'
/usr/bin/yum -q -y remove httpd* nginx* php* > /dev/null 2>&1
# Install Nginx
echo 'Installing Nginx'
/usr/bin/yum -q -y install nginx
if [ $? -ne 0 ]; then
echo 'Error trying to install Nginx'
exit 1
fi
/bin/rm -rf /etc/nginx/conf.d/*
# Install PHP 5.4 (with httpd 2.4 as dependency)
echo 'Installing PHP 5.4 with FPM'
/usr/bin/yum -q -y install php54* --exclude=php54-mysqlnd --skip-broken
if [ $? -ne 0 ]; then
echo 'Error trying to install PHP'
exit 1
fi
# Merge folders
echo 'Merging main folders'
/bin/cp -rf /tmp/build/etc /
/bin/cp -rf /tmp/build/opt /
# Install addons
if [[ $ADDONS ]]; then
for ADDON in $ADDONS; do
# Correct addon path
if [ ${ADDON:0:6} = 'git://' ]; then
ADDON=${ADDON////.}
fi
# Run addon build
/bin/bash /tmp/build/addons/$ADDON/build
if [ $? -ne 0 ]; then
echo "Addon returned error: $ADDON"
exit 1
fi
done
fi
# Updating packages
echo 'Updating packages'
/usr/bin/yum -q -y update
# Take ownership
echo 'Correcting permissions'
/bin/chown -R root:root /etc/nginx/conf.d \
/opt/elasticbeanstalk \
/var/log/nginx \
/var/log/php-fpm
# Clear unneeded files
echo 'Clearing unneeded files'
/bin/rm -rf /etc/httpd \
/opt/elasticbeanstalk/var/log/* \
/tmp/build \
/var/log/httpd \
/var/log/nginx/* \
/var/log/php-fpm/*
# Create symbolic link for logs
/bin/ln -s /var/log/nginx /var/log/httpd
# Clear autostarts
echo 'Clearning autostarts'
/sbin/chkconfig nginx off
/sbin/chkconfig php-fpm off
# End script
echo 'Successfully built! with my permissions'
/bin/rm -f $(readlink -f $0)
history -c
exit 0
| true
|
21cdcd56163cd6131e05cea0e2c8f42858c73291
|
Shell
|
widelec-BB/vagrant-webdev-vm
|
/build.sh
|
UTF-8
| 340
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
# remove old build
rm -f package.box
vagrant destroy -f || exit
# create and provision
vagrant up || exit
# reboot for VBox additions (apt-get upgrade in provision may cause
# need for reinstallation of VBox additions)
vagrant halt || exit
vagrant up || exit
# stop and package
vagrant halt || exit
vagrant package || exit
| true
|
def2f4a825faf50f5bf776a9317f3d62472f67b1
|
Shell
|
rebelm/stigs
|
/files/redhat7/rhel_07_010340.sh
|
UTF-8
| 242
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
# Remove NOPASSWD from /etc/sudoers
sed -i -e '/^[^#].*NOPASSWD.*/d' /etc/sudoers
# Remove NOPASSWD lines in sudoers.d directory
for file in $(find /etc/sudoers.d/ -type f); do
sed -i -e '/^[^#].*NOPASSWD.*/d' "${file}"
done
| true
|
2f1c42d59fa1a813f9df8857772e6e8e0c5987f5
|
Shell
|
Zygimantass/keep-ecdsa
|
/scripts/install-celo.sh
|
UTF-8
| 3,755
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -euo pipefail
LOG_START='\n\e[1;36m' # new line + bold + cyan
LOG_END='\n\e[0m' # new line + reset
DONE_START='\n\e[1;32m' # new line + bold + green
DONE_END='\n\n\e[0m' # new line + reset
KEEP_ECDSA_PATH=$(realpath $(dirname $0)/../)
KEEP_ECDSA_SOL_PATH=$(realpath $KEEP_ECDSA_PATH/solidity)
# Defaults, can be overwritten by env variables/input parameters
KEEP_CELO_PASSWORD=${KEEP_CELO_PASSWORD:-"password"}
NETWORK_DEFAULT="local"
CONTRACT_OWNER_CELO_ACCOUNT_PRIVATE_KEY=${CONTRACT_OWNER_CELO_ACCOUNT_PRIVATE_KEY:-""}
help() {
echo -e "\nUsage: ENV_VAR(S) $0" \
"--network <network>" \
"--contracts-only"
echo -e "\nEnvironment variables:\n"
echo -e "\tKEEP_CELO_PASSWORD: The password to unlock local Celo accounts to set up delegations." \
"Required only for 'local' network. Default value is 'password'"
echo -e "\tCONTRACT_OWNER_CELO_ACCOUNT_PRIVATE_KEY: Contracts owner private key on Celo. Required for non-local network only"
echo -e "\nCommand line arguments:\n"
echo -e "\t--network: Celo network for keep-core client." \
"Available networks and settings are specified in 'truffle.js'"
echo -e "\t--contracts-only: Should execute contracts part only." \
"Client installation will not be executed.\n"
exit 1 # Exit script after printing help
}
# Transform long options to short ones
for arg in "$@"; do
shift
case "$arg" in
"--network") set -- "$@" "-n" ;;
"--contracts-only") set -- "$@" "-m" ;;
"--help") set -- "$@" "-h" ;;
*) set -- "$@" "$arg" ;;
esac
done
# Parse short options
OPTIND=1
while getopts "n:mh" opt; do
case "$opt" in
n) network="$OPTARG" ;;
m) contracts_only=true ;;
h) help ;;
?) help ;; # Print help in case parameter is non-existent
esac
done
shift $(expr $OPTIND - 1) # remove options from positional parameters
# Overwrite default properties
NETWORK=${network:-$NETWORK_DEFAULT}
CONTRACTS_ONLY=${contracts_only:-false}
printf "${LOG_START}Network: $NETWORK ${LOG_END}"
# Run script.
printf "${LOG_START}Starting installation...${LOG_END}"
cd $KEEP_ECDSA_SOL_PATH
printf "${LOG_START}Installing NPM dependencies...${LOG_END}"
npm install
npm link @keep-network/keep-core
if [ "$NETWORK" == "local" ]; then
printf "${LOG_START}Unlocking celo accounts...${LOG_END}"
KEEP_ETHEREUM_PASSWORD=$KEEP_CELO_PASSWORD \
npx truffle exec scripts/unlock-eth-accounts.js --network $NETWORK
fi
printf "${LOG_START}Migrating contracts...${LOG_END}"
npm run clean
CONTRACT_OWNER_CELO_ACCOUNT_PRIVATE_KEY=$CONTRACT_OWNER_CELO_ACCOUNT_PRIVATE_KEY \
npx truffle migrate --reset --network $NETWORK
printf "${LOG_START}Copying contract artifacts...${LOG_END}"
rm -rf artifacts
cp -r build/contracts artifacts
npm link
if [ "$CONTRACTS_ONLY" = false ]; then
printf "${LOG_START}Building keep-ecdsa client...${LOG_END}"
cd $KEEP_ECDSA_PATH
# solc doesn't support symbolic links that are made in `node_modules` by `npm link`
# command. We need to update the `--allow-paths` value to be the parent directory
# that is assumed to contain both current project and dependent project.
# Ref: https://github.com/ethereum/solidity/issues/4623
TMP_FILE=$(mktemp /tmp/Makefile-ethereum.XXXXXXXXXX)
sed 's/--allow-paths ${solidity_dir}/--allow-paths $(realpath ${SOLIDITY_DIR}\/..\/..\/)/g' pkg/chain/gen/ethereum/Makefile >$TMP_FILE
mv $TMP_FILE pkg/chain/gen/ethereum/Makefile
TMP_FILE=$(mktemp /tmp/Makefile-celo.XXXXXXXXXX)
sed 's/--allow-paths ${solidity_dir}/--allow-paths $(realpath ${SOLIDITY_DIR}\/..\/..\/)/g' pkg/chain/gen/celo/Makefile >$TMP_FILE
mv $TMP_FILE pkg/chain/gen/celo/Makefile
go generate ./...
go build -a -o keep-ecdsa .
fi
printf "${DONE_START}Installation completed!${DONE_END}"
| true
|
663a3ab57e3c85a350186ce862779655c710a2ef
|
Shell
|
Spottybadrabbit/blobio
|
/cli/bio-put
|
UTF-8
| 1,259
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Synopsis:
# Put files to blobio server.
# Usage:
# bio-put <file1> <file2> ...
# find . -type f -print | bio-put
# Environment:
# BLOBIO_SERVICE
# BLOBIO_PUT_SERVICE
# BLOBIO_ALGORITHM
#
PROG=$(basename $0)
die()
{
echo "$PROG: ERROR: $@" >&2
exit 1
}
test -n "BLOBIO_SERVICE" || die 'env variable not defined: BLOBIO_SERVICE'
SERVICE=${BLOBIO_PUT_SERVICE:=$BLOBIO_SERVICE}
test -n "BLOBIO_ALGORITHM" || die 'env variable not defined: BLOBIO_ALGORITHM'
ALGORITHM=$BLOBIO_ALGORITHM
put_file()
{
FILE="$1"
test -d "$FILE" && die "$FILE is a directory"
test -r "$FILE" || die "can't read file $FILE"
DIGEST=$(blobio eat --algorithm $ALGORITHM --input-path "$FILE")
STATUS=$?
test -n "$DIGEST" -o $STATUS != 0 || die "blobio eat failed"
UDIG=$ALGORITHM:$DIGEST
blobio eat --udig $UDIG --service $BLOBIO_SERVICE
STATUS=$?
case $STATUS in
1)
blobio put --udig $UDIG --input-path "$FILE" \
--service $BLOBIO_SERVICE
test $? = 0 || die "blobio put failed: $FILE"
cat <<END
$FILE
>$UDIG
END
;;
0)
cat <<END
$FILE
@$UDIG
END
;;
*)
die "blobio eat: unexpected exit status"
;;
esac
}
case $# in
0)
while read F; do
put_file "$F"
done
;;
*)
while [ "$1" ]; do
put_file "$1"
shift
done
;;
esac
| true
|
95018af0e2338698084f09aeab2cbd43fd55d448
|
Shell
|
alexclewontin/docker-snap
|
/spread/image/create-image.sh
|
UTF-8
| 3,931
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
set -e
if [ $(id -u) -ne 0 ] ; then
echo "ERROR: needs to be executed as root"
exit 1
fi
channel=candidate
if [ ! -z "$1" ] ; then
channel=$1
fi
snap=
if [ ! -z "$2" ] ; then
snap=$2
fi
model=pc
arch=amd64
image_name=ubuntu-core-16.img
ubuntu_image_extra_args=
if [ ! -z "$snap" ] ; then
ubuntu_image_extra_args="--extra-snaps $snap"
fi
ubuntu-image \
--channel $channel \
-o $image_name \
--image-size 4G \
$ubuntu_image_extra_args \
$model.model
kpartx -a $image_name
sleep 0.5
loop_path=`findfs LABEL=writable`
tmp_mount=`mktemp -d`
mount $loop_path $tmp_mount
# Migrate all systemd units from core snap into the writable area. This
# would be normally done on firstboot by the initramfs but we can't rely
# on that because we are adding another file in there and that will
# prevent the initramfs from transitioning any files.
core_snap=$(find $tmp_mount/system-data/var/lib/snapd/snaps -name "core_*.snap")
tmp_core=`mktemp -d`
mount $core_snap $tmp_core
mkdir -p $tmp_mount/system-data/etc/systemd
cp -rav $tmp_core/etc/systemd/* \
$tmp_mount/system-data/etc/systemd/
umount $tmp_core
rm -rf $tmp_core
# system-user assertion which gives us our test:test user we use to
# log into the system
mkdir -p $tmp_mount/system-data/var/lib/snapd/seed/assertions
cp test-user.assertion $tmp_mount/system-data/var/lib/snapd/seed/assertions
# Disable console-conf for the first boot
mkdir -p $tmp_mount/system-data/var/lib/console-conf/
touch $tmp_mount/system-data/var/lib/console-conf/complete
# Create systemd service which is running on firstboot and sets up
# various things for us.
mkdir -p $tmp_mount/system-data/etc/systemd/system
cat << 'EOF' > $tmp_mount/system-data/etc/systemd/system/devmode-firstboot.service
[Unit]
Description=Run devmode firstboot setup
After=snapd.service snapd.socket
[Service]
Type=oneshot
ExecStart=/writable/system-data/var/lib/devmode-firstboot/run.sh
RemainAfterExit=yes
TimeoutSec=3min
EOF
mkdir -p $tmp_mount/system-data/etc/systemd/system/multi-user.target.wants
ln -sf /etc/systemd/system/devmode-firstboot.service \
$tmp_mount/system-data/etc/systemd/system/multi-user.target.wants/devmode-firstboot.service
mkdir $tmp_mount/system-data/var/lib/devmode-firstboot
cat << 'EOF' > $tmp_mount/system-data/var/lib/devmode-firstboot/run.sh
#!/bin/bash
set -e
# Don't start again if we're already done
if [ -e /writable/system-data/var/lib/devmode-firstboot/complete ] ; then
exit 0
fi
echo "Start devmode-firstboot $(date -Iseconds --utc)"
if [ "$(snap managed)" = "true" ]; then
echo "System already managed, exiting"
exit 0
fi
# no changes at all
while ! snap changes ; do
echo "No changes yet, waiting"
sleep 1
done
while snap changes | grep -qE '(Do|Doing) .*Initialize system state' ; do
echo "Initialize system state is in progress, waiting"
sleep 1
done
if [ -n "$(snap known system-user)" ]; then
echo "Trying to create known user"
snap create-user --known --sudoer
fi
# Enable console-conf again
rm /writable/system-data/var/lib/console-conf/complete
# Mark us done
touch /writable/system-data/var/lib/devmode-firstboot/complete
# Reboot the system as its now prepared for the user
reboot
EOF
chmod +x $tmp_mount/system-data/var/lib/devmode-firstboot/run.sh
umount $tmp_mount
kpartx -d $image_name
rm -rf $tmp_mount
| true
|
f2f2d83b03267af040e17e7c2e4574c03014402b
|
Shell
|
EtlamGit/cubian-packages
|
/oracle-java7-jre/DEBIAN/prerm
|
UTF-8
| 841
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/sh -e
basedir=/usr/lib/jvm/java-7-oracle-1.7.0.51
basediralias=/usr/lib/jvm/java-7-oracle
jar_packs=''
jre_tools='ControlPanel java java_vm javaws jcontrol keytool pack200 policytool rmid rmiregistry unpack200 orbd servertool tnameserv'
for i in $jar_packs; do
jar=$(echo $i | sed 's/\.pack$/.jar/')
rm -f $basedir/$jar
done
rm -f $basedir/jre/lib/i386/client/classes.jsa
if [ "$1" = "remove" ] || [ "$1" = "deconfigure" ]; then
for i in $jre_tools; do
update-alternatives --remove $i $basediralias/jre/bin/$i
done
if which update-binfmts >/dev/null; then
# try to remove and ignore the error
if [ -e /var/lib/binfmts/oracle-java7 ]; then
update-binfmts --package oracle-java7 \
--remove jar /usr/bin/jexec || true
fi
fi
update-alternatives --remove jexec $basediralias/jre/lib/jexec
fi
| true
|
fa185fdf884770de41d328f6e2ba44b41f406b12
|
Shell
|
psahanap/jenkinsrepo
|
/finaljenkins.sh
|
UTF-8
| 2,235
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
#Script for automatically running the AWS deployments from Jenkins using CFTs , Jekins and Git
#The source code and script is stored in Git repo and run from Jenkins
#source ~/.bash_profile
/usr/local/bin/aws cloudformation describe-stacks --stack-name test--SG-creation --region ap-south-1
status=`echo $?`
echo exit status is $status
if [ $status -eq 0 ]
then
echo "Updating the Stack Now"
/usr/local/bin/aws cloudformation update-stack --stack-name test--SG-creation --template-body file:///opt/jenkins/subbranch/test/sg\ template.json.txt --parameters file:///opt/jenkins/subbranch/test/parameters.json --region ap-south-1
else
echo "Creating the Stack Now"
/usr/local/bin/aws cloudformation create-stack --stack-name test--SG-creation --template-body file:///opt/jenkins/subbranch/test/sg\ template.json.txt --parameters file:///opt/jenkins/subbranch/test/parameters.json --region ap-south-1
fi
while true
do
/usr/local/bin/aws cloudformation describe-stack-events --stack-name test--SG-creation --region ap-south-1 | grep -iE 'ResourceStatus|ResourceType' |sed 's|[",:]||g' | grep -E 'ResourceType.*AWSCloudFormationStack' > lines
cat lines
nooflines=`cat lines | wc -l`
echo "if number output lines are 2 ,then break;otherwise print until its 2."
if [ "$nooflines" -ne 2 ];
then
echo "Stack Creation is in Progress,Below are the events happening now"
/usr/local/bin/aws cloudformation describe-stack-events --stack-name test--SG-creation --region ap-south-1 | grep -iE 'ResourceStatus|ResourceType|LogicalResourceId|PhysicalResourceId'
else
if [ "$nooflines" -eq 2 ];
then
echo "Stack Creation Completed"
break
fi
fi
done
while true
do
/usr/local/bin/aws cloudformation describe-stack-resources --stack-name test--SG-creation --region ap-south-1 | grep -iE 'ResourceStatus|ResourceType' |sed 's|[",:]||g' |grep -E 'ResourceStatus.*_COMPLETE|ResourceType.*'
rsatus=`echo $?`
echo $rstatus
if [ $rstatus -ne 0 ];
then
echo "Resources are Created Successfully, below are the details"
/usr/local/bin/aws cloudformation describe-stack-resources --stack-name test--SG-creation --region ap-south-1 | grep -iE 'ResourceStatus|ResourceType|LogicalResourceId'
else
break
fi
done
| true
|
853b6bc634cda860238a3a54e81787796721a1a9
|
Shell
|
gylzbk/aihome_codes
|
/mozart/output/molib/app/etc/init.d/S04bsa.sh
|
UTF-8
| 444
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
# start bsa server
case "$1" in
start)
echo -n "Starting bsa server ..."
echo 1 > /sys/class/rfkill/rfkill0/state
sleep 1
bsa_server_mips -all=0 -r 14 -d /dev/ttyS1 -u /var/run/ -p /lib/firmware/BCM_bt_firmware.hcd &
;;
stop)
echo -n "Stopping bsa server ..."
killall bsa_server_mips
echo 0 > /sys/class/rfkill/rfkill0/state
;;
restart|reload)
;;
*)
echo "Usage: $0 {start|stop|restart}"
exit 1
esac
exit $?
| true
|
3049c5afcd32e571856c90827334d8f15cfdabf9
|
Shell
|
g0dlight/environment
|
/.bash_profile
|
UTF-8
| 600
| 2.578125
| 3
|
[] |
no_license
|
NOCOLOR="\[\033[0m\]"
BLACK="\[\033[0;30m\]"
RED="\[\033[0;31m\]"
GREEN="\[\033[0;32m\]"
YELLOW="\[\033[0;33m\]"
BLUE="\[\033[0;34m\]"
PURPLE="\[\033[0;35m\]"
CYAN="\[\033[0;36m\]"
WHITE="\[\033[0;37m\]"
GIT_BRANCH=""
if [ -f /Users/orel/.git-prompt.sh ]; then
source /Users/orel/.git-prompt.sh
GIT_BRANCH='$(__git_ps1 "(%s)")'
GIT_BRANCH="$YELLOW$GIT_BRANCH$NOCOLOR"
fi
export PATH=~/Library/Python/3.6/bin:$PATH
export PS1="\u@\h:\W$GIT_BRANCH\$ "
export SUDO_PS1="\u@\h:\W\$ "
alias ll='ls -la'
alias updatedb='sudo /usr/libexec/locate.updatedb'
alias ssh='ssh -o ServerAliveInterval=30'
| true
|
03b85be5260bc0159f70857790a0a2abd2bdd997
|
Shell
|
MERobinson/pbs_scripts
|
/call_chip_mut_beb.sh
|
UTF-8
| 12,537
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
# default arg
BASEDIR=$WORK
WORKDIR=$PWD
GENOME=mm10
DELIM=","
PLATFORM="ILLUMINA"
# default sample info fields
FQ1_IDX=1
FQ2_IDX=2
SMID_IDX=3
FCID_IDX=4
FCLN_IDX=5
BCID_IDX=6
CSID_IDX=7
# help message
USAGE="$(basename "$0") [-gbwdhv] -s <sample_info>
purpose:
--pipeline to align, clean, generate metrics, and call mut in ChIP-seq samples
required arguments:
-si|--sample_info : delimited file containing sample info,
# see sample info format spec below
optional arguments:
-g|--genome : genome version [hg38,mm10] (default = mm10)
-b|--basedir : home directory (default = \$WORK)
-w|--workdir : working directory to output all files (default = \$PWD)
-d|--delim : delimiter used in sample info file (default = ",")
-sl|--sample_list : comma separated list of sample names - specifies a subset
to analyse from sample info (default = all samples)
-sc|--seq_centre : optional sequencing centre flag for readgroup (default = none)
-pl|--platform : platform [ILLUMINA,SOLID,LS454,HELICOS,PACBIO] (default = ILLUMINA)
-po|--pon : panel of normals (default = none)
-v|--verbose : print additional information to STDOUT [0,1] (default = 0)
-h|--help : print this help message and exit
example:
$(basename "$0") -g hg38 -si sample_info.csv
sample info format:
--the sample info file can be any delimited text file with the following columns:
-FASTQ_read_1 - file name of forward reads in FASTQ format (incl extension) [required]
-FASTQ_read_2 - file name of reverse reads in FASTQ format (incl extension) [optional]
-SMID - unique name for each biological samples [optional]
- used as output file prefix
- if not provided, will use FASTQ filename + assay type
-FCID - flow cell ID for each run [optional]
-FCLN - lane of flow cell for each library sample [optional]
-INID - ID of the sequencing instrument [optional]
-BCID - barcode index for each library sample [optional]
-CSID - control sample ID [required for variant calling]
- SMID of control to compare against in mut calling
- if none provided, no variant calling will be run
- control sample CSID fields should be left blank
--If not provided, FCID, FCLN, PLID and BCID will be extracted from the
readname assuming standard illumina format, i.e.:
@<INID>:<run_number>:<FCID>:<FCLN>:<tile>:<x-pos>:<y-pos> <read>:<filtered>:<control>:<BCID>"
# parse arg
while [[ $# -gt 1 ]]; do
key=$1
case $key in
-si|--sample_info)
SAMPLE_INFO=$2
shift
;;
-g|--genome)
GENOME=$2
shift
;;
-b|--basedir)
BASEDIR=$2
shift
;;
-w|--workdir)
WORKDIR=$2
shift
;;
-d|--delim)
DELIM=$2
shift
;;
-sl|--sample_list)
SAMPLE_LIST=$2
shift
;;
-sc|--seq_centre)
SEQ_CENTRE=$2
shift
;;
-pl|--platform)
PLATFORM=$2
shift
;;
-po|--pon)
PON=$2
shift
;;
-v|--verbose)
VERBOSITY=$2
shift
;;
-h|--help)
echo "$USAGE"
exit 1
;;
*)
echo "Error: Undefined argument provided"
echo "$USAGE"
exit 1
;;
esac
shift
done
# check required arg
if [[ -z $SAMPLE_INFO ]]; then
echo "Error: no sample info provided"
echo "$USAGE"; exit 1
else
SAMPLE_INFO=$(realpath $WORKDIR/$SAMPLE_INFO)
fi
if [[ ! -e $SAMPLE_INFO ]]; then
echo "Sample info file not found, input path: $SAMPLE_INFO"
echo "$USAGE"; exit 1
fi
# get unique sample names
if [[ -z $SAMPLE_LIST ]]; then
SAMPLES=$(tail -n +2 $SAMPLE_INFO | cut -d $DELIM -f $SMID_IDX | sort | uniq)
else
IFS=$DELIM read -r -a SAMPLES <<< "$SAMPLE_LIST"
fi
# set genome resources
if [[ $GENOME = mm10 ]]; then
DBSNP=$BASEDIR/Resources/Mus_musculus/gatk_mm10bundle/mgp.v5.merged.snps_all.dbSNP142.vcf.gz
REFFA=$BASEDIR/Resources/Mus_musculus/gatk_mm10bundle/Mus_musculus_mm10
INDEX=$BASEDIR/Resources/Mus_musculus/gatk_mm10bundle/bwa_index/genome.fa
elif [[ $GENOME = hg38 ]]; then
DBSNP=$BASEDIR/Resources/Homo_sapiens/gatk_hg38bundle/Homo_sapiens_assembly38.dbsnp.vcf.gz
COSMIC=$BASEDIR/Resources/Homo_sapiens/gatk_hg38bundle/cosmic_coding_noncoding.vcf.gz
REFFA=$BASEDIR/Resources/Homo_sapiens/gatk_hg38bundle/Homo_sapiens_assembly38
INDEX=$BASEDIR/Resources/Homo_sapiens/gatk_hg38bundle/bwa_index/hg38bundle
else
echo "Genome version not recognised"; echo "$USAGE"; exit 1
fi
REFFA_BASE=$(basename "$REFFA")
DBSNP_BASE=$(basename "$DBSNP")
INDEX_BASE=$(basename "$INDEX")
# check if PON and COSMIC provided
if [[ ! -z $PON ]]; then
PONARG="-PON $PON"
fi
if [[ ! -z $COSMIC ]]; then
COSMIC=$(realpath "$COSMIC")
COSMICBASE=$(basename "$COSMIC")
COSMICARG="--cosmic $COSMICBASE"
fi
# setup output directories
mkdir -p $WORKDIR/vcf
mkdir -p $WORKDIR/bam
mkdir -p $WORKDIR/logs
mkdir -p $WORKDIR/qc/fastqc
mkdir -p $WORKDIR/qc/metrics
# parse sample info and run
unset VAR_DEPEND
for SMID in ${SAMPLES[@]}; do
FASTQ1=$(awk -F $DELIM -v smid_idx="$SMID_IDX" -v smid="$SMID" -v field="$FQ1_IDX" \
'$smid_idx==smid {print $field}' "$SAMPLE_INFO")
FASTQ2=$(awk -F $DELIM -v smid_idx="$SMID_IDX" -v smid="$SMID" -v field="$FQ2_IDX" \
'$smid_idx==smid {print $field}' "$SAMPLE_INFO")
FCIDS=$(awk -F $DELIM -v smid_idx="$SMID_IDX" -v smid="$SMID" -v field="$FCID_IDX" \
'$smid_idx==smid {print $field}' "$SAMPLE_INFO")
FCLNS=$(awk -F $DELIM -v smid_idx="$SMID_IDX" -v smid="$SMID" -v field="$FCLN_IDX" \
'$smid_idx==smid {print $field}' "$SAMPLE_INFO")
BCIDS=$(awk -F $DELIM -v smid_idx="$SMID_IDX" -v smid="$SMID" -v field="$BCID_IDX" \
'$smid_idx==smid {print $field}' "$SAMPLE_INFO")
CSIDS=$(awk -F $DELIM -v smid_idx="$SMID_IDX" -v smid="$SMID" -v field="$CSID_IDX" \
'$smid_idx==smid {print $field}' "$SAMPLE_INFO")
for IDX in ${!FASTQ1[@]}; do
FQ1=${FASTQ1[$IDX]}
FQ2=${FASTQ2[$IDX]}
FCID=${FCIDS[$IDX]}
FCLN=${FCLNS[$IDX]}
BCID=${BCIDS[$IDX]}
CSID=${CSIDS[$IDX]}
# check if paired end
if [[ -z "$FQ2" ]]; then
PE=0
else
PE=1
PE_FQ2SAM_ARG="FASTQ2=$FQ2"
PE_BWAMEM_ARG="-p"
fi
# if readgroup info not in sample info file, extract from read name
READNAME=$(gzip -dc $WORKDIR/raw_data/$FQ1 | head -n 1)
if [[ -z $FCID ]]; then
if [[ $VERBOSITY > 0 ]]; then echo "Extracting FCID from read name"; fi
FCID=$(echo $READNAME | cut -d ":" -f 3)
fi
if [[ -z $FCLN ]]; then
if [[ $VERBOSITY > 0 ]]; then echo "Extracting FCLN from read name"; fi
FCLN=$(echo $READNAME | cut -d ":" -f 4)
fi
if [[ -z $BCID ]]; then
if [[ $VERBOSITY > 0 ]]; then echo "Extracting BCID from read name"; fi
BCID=$(echo $READNAME | cut -d ":" -f 10)
fi
RGID=$FCID.$FCLN # read group ID
PU=$FCID.$FCLN.$BCID # platform unit
# output if verbose
if [[ $VERBOSITY > 0 ]]; then
echo "Sample Info parsed for sample $SMID:
-FASTQ r1 = $FQ1
-FASTQ r2 = $FQ2
-Flow cell ID = $FCID
-Flow cell lane = $FCLN
-Barcode = $BCID
-Control condition = $CSID"
fi
# reset variables
unset BAM_LIST
unset DEPEND
if [[ ! -e $WORKDIR/bam/$SMID.$RGID.bam ]]; then
# align and clean each readgroup
ALIGNJOB=$(cat <<- EOS | qsub -N $PU.aln -
#!/bin/bash
#PBS -l select=1:mem=40gb:ncpus=20
#PBS -l walltime=50:00:00
#PBS -j oe
#PBS -q med-bio
#PBS -o $WORKDIR/logs/$SMID.$PU.alignment.log.txt
module load picard/2.6.0
module load java/jdk-8u66
module load bio-bwa/0.7.10
module load fastqc/0.11.2
# copy data to scratch
cp $WORKDIR/raw_data/$FQ1 .
if [[ $PE = 1 ]]; then cp $WORKDIR/raw_data/$FQ2 .; fi
cp -rL $INDEX* .
cp -rL $REFFA* .
# FASTQC
fastqc --noextract $FQ1 $FQ2
cp *fastqc.zip $WORKDIR/qc/fastqc/
# convert FASTQ to SAM
java -Xmx32G -jar /apps/picard/2.6.0/picard.jar FastqToSam \
FASTQ=$FQ1 \
OUTPUT=$SMID.$RGID.unaligned.bam \
READ_GROUP_NAME=$RGID \
SAMPLE_NAME=$SMID \
LIBRARY_NAME=$FCID.$BC \
PLATFORM_UNIT=$PU \
PLATFORM=$PLATFORM $PE_FQ2SAM_ARG
# mark adapters
java -Xmx32G -jar /apps/picard/2.6.0/picard.jar MarkIlluminaAdapters \
I=$SMID.$RGID.unaligned.bam \
O=$SMID.$RGID.markadapters.bam \
M=$SMID.$RGID.markadapters.metrics.txt \
TMP_DIR=./picard_tmp/
cp $SMID.$RGID.markadapters.metrics.txt $WORKDIR/qc/metrics/
# convert uBAM to interleaved FASTQ
java -Xmx32G -jar /apps/picard/2.6.0/picard.jar SamToFastq \
I=$SMID.$RGID.markadapters.bam \
FASTQ=$SMID.$RGID.interleaved.fq \
CLIPPING_ATTRIBUTE=XT \
CLIPPING_ACTION=2 \
INTERLEAVE=true \
NON_PF=true \
TMP_DIR=picard_tmp
# align
bwa mem -M -t 20 $PE_BWAMEM_ARG \
$INDEX_BASE \
$SMID.$RGID.interleaved.fq > \
$SMID.$RGID.aligned.sam
# merge uBAM and aligned
java -Xmx32G -jar /apps/picard/2.6.0/picard.jar MergeBamAlignment \
R=$REFFA_BASE.fa \
UNMAPPED_BAM=$SMID.$RGID.markadapters.bam \
ALIGNED_BAM=$SMID.$RGID.aligned.sam \
O=$SMID.$RGID.merged.bam \
CREATE_INDEX=true \
ADD_MATE_CIGAR=true \
CLIP_ADAPTERS=false \
CLIP_OVERLAPPING_READS=true \
INCLUDE_SECONDARY_ALIGNMENTS=true \
MAX_INSERTIONS_OR_DELETIONS=-1 \
PRIMARY_ALIGNMENT_STRATEGY=MostDistant \
ATTRIBUTES_TO_RETAIN=XS \
TMP_DIR=picard_tmp
# mark duplicates
java -Xmx32G -jar /apps/picard/2.6.0/picard.jar MarkDuplicates \
I=$SMID.$RGID.merged.bam \
O=$SMID.$RGID.bam \
M=$SMID.$RGID.markduplicates.metrics.txt \
TMP_DIR=./picard_tmp \
CREATE_INDEX=true
cp $SMID.$RGID.markduplicates.metrics.txt $WORKDIR/qc/metrics/
# alignment metrics
java -Xmx32G -jar /apps/picard/2.6.0/picard.jar \
CollectAlignmentSummaryMetrics \
R=$REFFA_BASE.fa \
I=$SMID.$RGID.bam \
O=$SMID.$RGID.alignmentsummary.metrics.txt
cp $SMID.$RGID.alignmentsummary.metrics.txt $WORKDIR/qc/metrics/
# copy clean BAM back
cp $SMID.$RGID.bam* $WORKDIR/bam/
ls -lhAR
EOS
)
fi
# record job ID & bam name for merge job
DEPEND="$DEPEND,afterok:$ALIGNJOB"
BAM_LIST="$BAM_LIST $SMID.$RGID.bam"
done
# remove leading comma/space
DEPEND=${DEPEND#*,}
BAM_LIST=${BAM_LIST#* }
# merge all read groups per sample
MERGEJOB=$(cat <<- EOS | qsub -N $SMID.merge -
#!/bin/bash
#PBS -l walltime=20:00:00
#PBS -l select=1:mem=20gb:ncpus=1
#PBS -j oe
#PBS -W depend=$DEPEND
#PBS -q med-bio
#PBS -o $WORKDIR/logs/$SMID.mergebam.runinfo.txt
module load samtools/1.2
cp $WORKDIR/bam/$SMID* .
if [[ ${#FASTQ1[@]} > 1 ]]; then
samtools merge $SMID.bam $BAM_LIST
else
mv $BAM_LIST $SMID.bam
fi
samtools index $SMID.bam
cp $SMID.bam* $WORKDIR/bam/
ls -lhAR
EOS
)
# add all merge jobs to dependency list
VAR_DEPEND="$VAR_DEPEND,afterok:$MERGEJOB"
done
# remove leading comma
VAR_DEPEND=${VAR_DEPEND#*,}
# call variants
for SMID in ${SAMPLES[@]}; do
CSID=$(awk -F $DELIM -v smid_idx="$SMID_IDX" -v smid="$SMID" -v field="$CSID_IDX" \
'$smid_idx==smid {print $field}' "$SAMPLE_INFO" | uniq)
if [[ -z $CSID ]]; then
continue
fi
# Peak calling
if [[ -e $WORKDIR/peaks/$SMID.macs2.narrowPeak ]]; then
echo "$SMID.macs2.narrowPeak already exsists, peak calling not run"
else
MACSJOB=$(cat <<- EOS | qsub -N $SMID.MACS -
#!/bin/bash
#PBS -l walltime=10:00:00
#PBS -l select=1:mem=20gb:ncpus=1
#PBS -j oe
#PBS -W depend=afterok:$MERGEJOB
#PBS -q med-bio
#PBS -o $WORKDIR/logs/$SMID.peakCalling.runinfo.txt
module load samtools/1.2
module load macs/2.1.0
REGEX="^altd <- \\([0-9]\\)"
FRAGL=$(cat ${SMID}_predictd.R | grep "^altd <- ()"
# Variant calling
if [[ -e $WORKDIR/vcf/$SMID.mutect2.vcf ]]; then
echo "$SMID.mutect2.vcf already exists, variant calling not run"
else
MT2JOB=$(cat <<- EOS | qsub -N $SMID.MT2 -
#!/bin/bash
#PBS -l walltime=40:00:00
#PBS -l select=1:mem=40gb:ncpus=1
#PBS -j oe
#PBS -W depend=afterok:$MERGEJOB
#PBS -q med-bio
#PBS -o $WORKDIR/logs/$SMID.variantCalling.runinfo.txt
module load picard/2.6.0
module load gatk/3.6
module load java/jdk-8u66
module load samtools/1.2
# copy control and test samples and ref accross
cp $WORKDIR/bam/$SMID.bam* .
cp $WORKDIR/bam/$CSID.bam* .
cp $WORKDIR/vcf/$PON* .
cp -rL $REFFA* .
cp -rL $DBSNP* .
cp -rL $COSMIC* .
# call
java -jar /apps/gatk/3.6/GenomeAnalysisTK.jar \
-T MuTect2 \
-R $REFFA_BASE.fa \
-I:normal $CSID.bam \
-I:tumor $SMID.bam \
--dbsnp $DBSNP_BASE \
-o $SMID.mutect2.vcf $PONARG $COSMICARG
ls -lhAR
cp $SMID.mutect2.vcf* $WORKDIR/vcf/
EOS
)
fi
done
| true
|
38dbda1ebe5ae099b84cd7f5aff4ad4cf6bf33cd
|
Shell
|
StrasbourgEurometropole/Strasbourg-Next
|
/docker/2_shutdown-services-and-backups.sh
|
UTF-8
| 423
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
echo -e "\n\n \e[35m1# Récupération des variables d'environnements\e[0m"
cat .env
export $(cat .env)
echo -e "\n\n \e[35m2# Arrêt des services\e[0m"
docker stack rm ems-stack
echo -e "\n\n \e[35m3# Backup de la BDD\e[0m"
echo -e "Export du dump $DB_BACKUPS_PATH/$MYSQL_DB\_$(date +%Y%m%d-%H%M).sql"
mysqldump -u $MYSQL_USER --password --opt $MYSQL_DB > $DB_BACKUPS_PATH/$MYSQL_DB\_$(date +%Y%m%d-%H%M).sql
| true
|
a2633734aed51b04d98933bd6d482e2e793ab0aa
|
Shell
|
kauezatarin/Domus
|
/DosmusRaspberryScripts/installDotnetCore.sh
|
UTF-8
| 576
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
cd ~/
sudo apt-get install curl libunwind8 gettext
echo "Baixando .NET Core 2.1.x Runtime"
curl -sSL -o dotnet.tar.gz https://dotnetcli.blob.core.windows.net/dotnet/Runtime/release/2.1/dotnet-runtime-latest-linux-arm.tar.gz
echo "Extraindo instalação para /opt/dotnet"
sudo rm -r /opt/dotnet
sudo mkdir -p /opt/dotnet && sudo tar zxf dotnet.tar.gz -C /opt/dotnet
echo "Criando link simbolico para dotnet"
sudo ln -s /opt/dotnet/dotnet /usr/local/bin
echo "Limpando arquivos temporarios"
sudo rm dotnet.tar.gz
echo "Instalação finalizada."
dotnet --info
| true
|
e0c4b7abe15b74d0d3bacf0326c835f5cee3d9bd
|
Shell
|
pygos/init-scripts
|
/scripts/setntpdate.sh
|
UTF-8
| 779
| 3.875
| 4
|
[
"ISC"
] |
permissive
|
#!/bin/sh
resolve() {
local domain="$1"
local server="$2"
if [ -x "$(command -v dig)" ]; then
if [ -z "$server" ]; then
dig +short "$domain"
else
dig +short "@$server" "$domain"
fi
return $?
fi
if [ -x "$(command -v drill)" ]; then
if [ -z "$server" ]; then
drill "$domain" | grep "^$domain." | cut -d$'\t' -f5
else
drill "@$server" "$domain" | grep "^$domain." |\
cut -d$'\t' -f5
fi
return $?
fi
exit 1
}
try_update() {
while read ip; do
if ntpdate -bu "$ip"; then
return 0
fi
done
return 1
}
pool="pool.ntp.org"
dns="1.1.1.1"
# try default DNS server first
resolve "$pool" "" | try_update
[ $? -eq 0 ] && exit 0
# try fallback public dns server
ping -q -c 1 "$dns" || exit 1
resolve "$pool" "$dns" | try_update
exit $?
| true
|
1af1b5ad01de536ed393bfb3078701d43f3eddff
|
Shell
|
fanpiao/JIT-Cloud-shield
|
/研发代码/识别文件标签并在HDFS上创建新路径.txt
|
UTF-8
| 1,460
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/sh
JAVA_HOME=/usr/local/jdk1.8.0_121
CLASSPATH=.:$JAVA_HOME/lib.tools.jar
PATH=$JAVA_HOME/bin:$PATH
export JAVA_HOME CLASSPATH PATH
export HADOOP_HOME=/root/bigdata/hadoop
export PATH=${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:$PATH
Folder_A="/root/test/"
Output_file="/root/extra/output.txt"
Input_file="/root/extra/output.txt"
: > $Output_file
for file_a in ${Folder_A}/*
do
temp_file=`basename $file_a`
FileName=${temp_file%.*}
filename=${temp_file##*_}
Filename=${filename%.*}
echo $Filename >> $Output_file
if [ "$Filename" -eq 1 ];
then
hadoop fs -mkdir -p /normal/$FileName
hadoop fs -put $file_a /normal/$FileName/$temp_file
fi
if [ "$Filename" -eq 0 ];
then
hadoop fs -mkdir -p /slow/$FileName
hadoop fs -put $file_a /slow/$FileName/$temp_file
fi
if [ "$Filename" -eq 2 ];
then
hadoop fs -mkdir -p /DDOS/$FileName
hadoop fs -put $file_a /DDOS/$FileName/$temp_file
fi
if [ "$Filename" -eq 3 ];
then
hadoop fs -mkdir -p /BP/$FileName
hadoop fs -put $file_a /BP/$FileName/$temp_file
fi
if [ "$Filename" -eq 4 ];
then
hadoop fs -mkdir -p /ZK/$temp_file
hadoop fs -put $file_a /ZK/$FileName/$temp_file
fi
done
| true
|
5d52c9d6ef4893c020180d2331af0e099aff5193
|
Shell
|
KazAoyama/KaigoSystem
|
/E-LIFE/SYSTEM_SETTEI/CGI/back/._MASTER_POPUP_UNIT.TOUROKU.20140130.155300
|
UTF-8
| 6,040
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# MASTER_POPUP_UNIT.TOUROKU : ユニット登録
#
# Written by S.Otsubo
# 更新対象
# UNIT_NAME(ユニットが新規作成される場合)
# SHISETSU_TATEYA_FLOOR_UNIT
# 画面と直接やりとり
#--------------------------------------------------------------
# ログ
source /home/hands/E-LIFE/SYSTEM_SETTEI/CGI/SYSTEM_SETTEI.INI &> /dev/null
source /home/hands/.bashrc &> /dev/null
mkdir -p ${log_dir}/$(date +%Y%m%d)
exec 2> ${log_dir}/$(date +%Y%m%d)/LOG.$(basename ${0}).$HOSTNAME.$(date +%Y%m%d) ; set -xv
#rm -f $tmp-*
#------------------------------------------------------
# パラメーターの取得
dd bs=${CONTENT_LENGTH} |
cgi-name -d_ -i_ |
LANG=C sort |
sed 's/null//g' > $tmp-name
#cookie-name |
#LANG=C sort > ${tmp}-cookie
#------------------------------------------------------
#--------------------------------------------------------
# 必要な値の取得
eval $(name-source $tmp-name)
sdaytime=$(date +%Y%m%d%H%M%S)
#--------------------------------------------------------
#--------------------------------------------------------------
function error_exit {
message="$1"
echo -e 'Content-type:text/plain; \n\n' |
cat - <(echo "${message}")
# echo "message ${message}"
# echo "result ng"
rm -f $tmp-*
exit 1
}
function error_unlock {
message="$1"
cat ${tmp}-taisyou |
self 1 |
while read File;do
rm -f ${home_dir}/TBL/MASTER_POPUP_UNIT/${File}.lock
: ;done
error_exit ${message}
}
#--------------------------------------------------------------
#--------------------------------------------------------------
# 更新対象の設定
cat << FIN > ${tmp}-taisyou
SHISETSU_TATEYA_FLOOR_UNIT 5 6 1 4 7
UNIT_NAME 3 4 1 1 5
FIN
# 1:テーブル名 2:削除フラグフィールド 3:更新時間フィールド 4:キーFROM 5:キーTO
# 6:列数
# SHISETSU_TATEYA_FLOOR_UNIT
# 1:施設ID 2:建屋ID 3:フロア 4:ユニットID 5:削除フラグ
# 6:更新日時 7:ユーザID
# -
# UNIT_NAME
# 1:ユニットID 2:ユニット名 3:削除フラグ 4:更新日時 5:ユーザID
# ${shinki_flg}=falseなら存在するユニットの名称メンテなので、
# SHISETSU_TATEYA_FLOOR_UNITは更新しない
if [ "${shinki_flg}" == "false" ] ; then
grep -v SHISETSU_TATEYA_FLOOR_UNIT ${tmp}-taisyou > ${tmp}-taisyou_new
mv ${tmp}-taisyou_new ${tmp}-taisyou
fi
#--------------------------------------------------------------
#--------------------------------------------------------
# 本日のinputディレクトリ作成
[ -e ${input_dir}/${today} ] || mkdir ${input_dir}/${today}
#--------------------------------------------------------
#--------------------------------------------------------------
# 入力データのチェック
# ユーザID
[ -z "${userid}" -o "${userid}" = "_" ] && error_exit "ログインユーザが不明です"
#--------------------------------------------------------------
# ------------------------------------------
# inputの値のチェック
# (必須項目入力の件はjavascriptでチェック中)
# 半角スペースは全角スペースへ
# 改行コードは全角スペースへ
unit_name_syuusei="$(echo "${Unit_name}" | tr " " " " | sed 's/\\n/ /g')"
# ユニット名は10文字まで。
# 変換したものへチェック入れる
[ "${#unit_name_syuusei}" -gt 10 ] && error_exit "ユニット名は10文字までです"
[ "${#unit_name_syuusei}" -eq 0 ] && error_exit "ユニット名は10文字までです"
# ------------------------------------------
# ------------------------------------------
# 更新データの作成
# ロックファイル作成
cat ${tmp}-taisyou |
self 1 |
while read File ; do
lockfile -1 -r 3 -l 10 ${home_dir}/TBL/SHISETSU_MASTER/${File}.LOCK
[ "${?}" != "0" ] && : > ${tmp}-err
done
[ -e ${tmp}-err ] && error_unlock "再度登録してください"
# もしユニットIDがなければ新たに作成する
if [ "${unit_key}" = "" -o "${unit_key}" = "_" ] ; then
last_unit_id="$([ -s ${home_dir}/TBL/SHISETSU_MASTER/UNIT_NAME ] && (awk '$3!="9"' ${home_dir}/TBL/SHISETSU_MASTER/UNIT_NAME | LANG=C sort -k1,1 -k4,4 | getlast 1 1 | tail -1 | self 1) || echo "00000")"
new_unit_id="$(echo "${last_unit_id}" | awk '{new_no=int($1+1); printf("%05d",new_no)}')"
# unit_keyに新しく設定した値をいれる
unit_key=${new_unit_id}
fi
# データの作成
# input
echo "${shisetsu_key} ${tateya_key} ${Floor} ${unit_key} 1 ${sdaytime} ${userid}" > ${tmp}-input_SHISETSU_TATEYA_FLOOR_UNIT
echo "${unit_key} ${unit_name_syuusei} 1 ${sdaytime} ${userid}" > ${tmp}-input_UNIT_NAME
# pompa
cat ${tmp}-taisyou |
delf 6 |
while read File d_flg u_flg s_flg_from s_flg_to ; do
cat ${home_dir}/TBL/SHISETSU_MASTER/${File} ${tmp}-input_${File} |
LANG=C sort -k${s_flg_from},${s_flg_to} -k${u_flg},${u_flg} |
getlast ${s_flg_from} ${s_flg_to} > ${tmp}-pompa_${File}
done
# 列チェック
cat ${tmp}-taisyou |
self 1 6 |
while read File retu_no; do
[ "$(retu ${tmp}-input_${File})" != "${retu_no}" ] && : > $tmp-err
[ "$(retu ${tmp}-input_${File} | gyo)" != "1" ] && : > $tmp-err
[ "$(awk 'NF!="'${retu_no}'"' ${tmp}-input_${File} | gyo)" != "0" ] && : > $tmp-err
done
[ -e $tmp-err ] && error_unlock "列数エラー"
# 更新
cat ${tmp}-taisyou |
self 1 |
while read Name ; do
# input
cp -p ${tmp}-input_${Name} ${input_dir}/${today}/$(basename ${0}).$(date +%Y%m%d%H%M%S).${userid}.${HOSTNAME}
[ "${?}" != "0" ] && : > ${tmp}-err
# pompa
cp -p ${tmp}-pompa_${Name} ${home_dir}/TBL/SHISETSU_MASTER/${Name}
[ "${?}" != "0" ] && : > ${tmp}-err
done
[ -e ${tmp}-err ] && error_unlock "更新に失敗しました"
# ロックファイル削除
cat ${tmp}-taisyou |
self 1 |
while read File; do
rm ${home_dir}/TBL/SHISETSU_MASTER/${File}.LOCK
done
# ------------------------------------------
# ここまできたらokかえす
echo -e 'Content-type:text/plain; \n\n' |
cat - <(echo "result ok")
rm -f ${tmp}-*
exit 0
| true
|
756ea9b0a53bfb4586ef03b2a88b65e12ff14b23
|
Shell
|
nysol/doc
|
/olddoc/tutorial/mcmd/en/exercise/mcalday.sh
|
UTF-8
| 586
| 2.828125
| 3
|
[] |
no_license
|
#/bin/bash
#=====================================================
# MCMD bash script - Lesson 11: Create calculated fields
# Exercise
#=====================================================
# Variables
inPath="tutorial_en"
# Command
mcut f=customer,date i=${inPath}/dat.csv |
msortf f=customer,date |
mcal c='diffday(today(),$d{date})' a="dayDiff" o=outdat/mcaldayout.csv
#mcal c='diff(today(),$d{date},"day")' a="dayDiff" o=outdat/mcaldayout.csv
#mcal c='diff($d{date},0d20010102,"day")' a="dayDiff" o=outdat/mcaldayout.csv
#=====================================================
| true
|
0b47cc3965b65ecb02c858efa98ed9d4006a3b20
|
Shell
|
FauxFaux/debian-control
|
/s/sddm/sddm_0.18.0-1_amd64/postrm
|
UTF-8
| 1,908
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
DEFAULT_DISPLAY_MANAGER_FILE=/etc/X11/default-display-manager
DEFAULT_SERVICE=/etc/systemd/system/display-manager.service
case "$1" in
purge)
update-rc.d sddm remove > /dev/null
if [ -d /var/cache/sddm ]; then rm -r /var/cache/sddm; fi
if [ -d /var/lib/sddm ]; then rm -r /var/lib/sddm; fi
if [ -d /var/run/sddm ]; then rm -r /var/run/sddm; fi
if [ -d /var/log/sddm ]; then rm -r /var/log/sddm; fi
if [ -d /run/sddm ]; then rm -r /run/sddm; fi
if [ -f /var/log/sddm.log ]; then rm /var/log/sddm.log; fi
if getent passwd sddm >/dev/null; then
if [ -x /usr/sbin/deluser ]; then
deluser --system sddm >&2 ||
echo "Could not remove sddm user." >&2
fi
fi
if getent group sddm >/dev/null; then
if [ -x /usr/sbin/delgroup ]; then
delgroup --system sddm >&2 ||
echo "Could not remove sddm group." >&2
fi
fi
if [ ! -e "$DEFAULT_DISPLAY_MANAGER_FILE" ] &&
[ -h "$DEFAULT_SERVICE" ] && [ ! -e "$DEFAULT_SERVICE" ]; then
# Dangling symlink, no other display-manager installed
rm "$DEFAULT_SERVICE"
fi
;;
abort-install|abort-upgrade)
# roll back displacement of default display manager file
if [ -e "$DEFAULT_DISPLAY_MANAGER_FILE.dpkg-tmp" ]; then
# FIXME - redo this part uses shell-lib.sh from xfree86
#observe "rolling back change of default X display manager"
mv "$DEFAULT_DISPLAY_MANAGER_FILE.dpkg-tmp" "$DEFAULT_DISPLAY_MANAGER_FILE"
fi
;;
esac
# Automatically added by dh_installdebconf/11.3.5
if [ "$1" = purge ] && [ -e /usr/share/debconf/confmodule ]; then
. /usr/share/debconf/confmodule
db_purge
fi
# End automatically added section
exit 0
| true
|
5355c64d31e736e9715063d875ba22f3e89c8c82
|
Shell
|
mypaceshun/dotfiles
|
/setup.sh
|
UTF-8
| 1,007
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
BASEDIR=$(cd $(dirname ${BASH_SOURCE:-$0}); pwd)
DOT_FILES=(.bash* .vim* .zsh* .gitconfig)
for file in ${DOT_FILES[@]}
do
if [ -f $HOME/$file -a ! -L $HOME/$file ]; then
# make backup directory
if [ ! -e ${BASEDIR}/backup ]; then
echo backup directory maked
mkdir ${BASEDIR}/backup
fi
echo $file move backup directory becouse $file is exist
mv -fv $HOME/$file ${BASEDIR}/backup/$file
fi
if [ ! -e $HOME/$file ]; then
echo make symbolic link $file
ln -sf ${BASEDIR}/$file $HOME/$file
fi
done
# rewrite gitconfig
GITCONFIG=$HOME/.gitconfig
if [ -f $GITCONFIG ]; then
grep -E "~/.gitconfig.local" $GITCONFIG
if [ $? -eq 1 ]; then
echo "rewrite $GITCONFIG"
cat << EOF >>$GITCONFIG
[include]
path = ~/.gitconfig.local
EOF
fi
fi
# load setup.zsh for zsh
type zsh >/dev/null 2>&1
if [ $? -eq 0 ];then
ZSH_SETUP=${BASEDIR}/setup.zsh
if [ -e ${ZSH_SETUP} ]; then
zsh ${ZSH_SETUP}
fi
fi
| true
|
3bb099b1148012c80212969fa6b88098dc27be9e
|
Shell
|
dladams/lbne-larrel
|
/larrel
|
UTF-8
| 16,323
| 3.625
| 4
|
[] |
no_license
|
#!/bin/sh
# David Adams
# October, 2014
#
# Script to build an LBNE release.
THISDIR=`dirname $0`
OLDPROD=larsoft
NEWPROD=lbnecode
TOPDIR=`pwd`
HISTFILE=$TOPDIR/history
COMHISTFILE=$TOPDIR/comhistory
DBG=
HELP=
README=
ENVCHK=
REMDIR=
REMALL=
DIRMAK=
CHKOUT=
VERUPD=
BUILD=
QUAL=e7:prof
Test=
OLDVERS=
NEWVERS=
COMMIT=
ENDFLOW=
PUSHBRA=
PUSHTAG=
UPSPROD=
SHOW=
while getopts 'ghHerRdcvbtClpPuo:n:q:s:' flag; do
case "${flag}" in
g) DBG='true' ;;
h) HELP='true' ;;
H) README='true' ;;
e) ENVCHK='true' ;;
r) REMDIR='true' ;;
R) REMALL='true' ;;
d) DIRMAK='true' ;;
c) CHKOUT='true' ;;
v) VERUPD='true' ;;
b) BUILD='true' ;;
t) TEST='true' ;;
C) COMMIT='true' ;;
l) COMMIT='true'; ENDFLOW='true' ;;
p) PUSHBRA='true'; PUSHTAG='true' ;;
P) PUSHTAG='true' ;;
u) UPSPROD='true' ;;
o) OLDVERS="${OPTARG}" ;;
n) NEWVERS="${OPTARG}" ;;
q) QUAL="${OPTARG}" ;;
s) SHOW="${OPTARG}" ;;
*) "ERROR: Unexpected option ${flag}"; exit 1 ;;
esac
done
# For option -H, display README.md and exit.
if [ -n "$README" ]; then
cat $THISDIR/README.md
exit 0
fi
# For option -R, remove everything and exit.
if [ -n "$REMALL" ]; then
echo -e $LINE
echo Removing development area.
rm -rf workdir
rm -f $HISTFILE
rm -f $COMHISTFILE
rm -rf oldversion.txt
rm -rf newversion.txt
rm -rf ups*.log
exit 0
fi
# For option -s ARG, show ARG and exit.
if [ -n "$SHOW" ]; then
echo $LINE
echo Show options:
if ! cd $TOPDIR/workdir/srcs/lbnecode 1>/dev/null 2>&1; then
echo First use option -c to check out $NEWPROD.
exit 1
fi
if [ "$SHOW" = help ]; then
echo " gitstatus - local git status"
echo " gitdiff - local git diffs"
echo " localbranches - list branches in local checkout"
echo "remotebranches - list branches in remote repository"
echo " localtags - list tags in local repository"
echo " remotetags - list tags in remote repository"
else if [ "$SHOW" = remotetags ]; then
git ls-remote --tags origin | grep -v "\^{}"$
else if [ "$SHOW" = localtags ]; then
git show-ref --tags
else if [ "$SHOW" = remotebranches ]; then
git branch -r
else if [ "$SHOW" = localbranches ]; then
git branch
echo Local repository changes:
else if [ "$SHOW" = gitstatus ]; then
echo "----------------------------"
git status
echo "----------------------------"
else if [ "$SHOW" = gitdiff ]; then
echo "----------------------------"
git diff
echo "----------------------------"
else if [ "$SHOW" = q ]; then
echo "----------------------------"
echo QUAL=$QUAL
echo "----------------------------"
else
echo ERROR: Invalid show option: $SHOW
echo ERROR: Use $0 -s help to list all options.
exit 1
fi; fi; fi; fi; fi; fi; fi; fi
exit 0
fi
# Determine which stage we process through.
STAGE4=$BUILD$TEST$COMMIT$ENDFLOW$PUSHBRA$PUSHTAG
STAGE3=$CHKOUT$VERUPD$STAGE4
STAGE2=$UPSPROD$STAGE3
STAGE1=$REMDIR$REMALL$OLDVERS$NEWVERS$DIRMAK$STAGE2
if [ -n "$DBG" ]; then
echo Stage 1: $STAGE1
echo Stage 2: $STAGE2
echo Stage 3: $STAGE3
echo Stage 4: $STAGE4
fi
if [ -z "$STAGE1" ]; then
HELP='true'
fi
# Show help and exit.
if [ -n "$HELP" ]; then
echo "Usage: $0 [-hgerRdcbtlp] [-o OLDVERS][-n NEWVERS][-q QUAL]"
echo " -h: Help (this message)."
echo " -H: Display instructions (README.md)."
echo " -g: Enable debugging messgages."
echo " -e: Check environment"
echo " -r: Remove existing development area."
echo " -R: Remove everything and exit."
echo " -d: Create new development area."
echo " -c: Check out product source."
echo " -v: Start flow and update version in checked-out code."
echo " -b: Build product."
echo " -t: Test product."
echo " -C: Commit local changes."
echo " -l: Commit local changes and end flow."
echo " -p: Push changes to development and release branches."
echo " -P: Push changes only to release branch (used when push to development fails)."
echo " -u: Make UPS product"
echo " -o OLDVERS: Set the version for the old product (larsoft)"
echo " -n NEWVERS: Set the version for the new product (lbnecode)"
exit 0
fi
LINE="\n===================================="
# Source the initialization script.
echo -e $LINE
echo Initializing
source $THISDIR/lbneinit.sh
# For option -e, we display some of the environment.
if [ -n "$ENVCHK" ]; then
echo -e $LINE
echo Checking environment
for NAME in setup git mrb mrbsetenv; do
if ! type $NAME >/dev/null 2>&1; then
echo ERROR: Initialization failed: $NAME not found.
exit 1
else
echo Found command $NAME
fi
done
fi
# For option -r, we remove any previous builds.
if [ -n "$REMDIR" ]; then
echo -e $LINE
echo Removing development area.
rm -rf workdir
rm -f $HISTFILE
rm -f $COMHISTFILE
fi
# Retrieve the old (larsoft) version.
# For option -o, the value is taken from the command line.
echo -e $LINE
echo Setting old product version
OLDVERSFILENAME=$TOPDIR/oldversion.txt
if [ -r $OLDVERSFILENAME ]; then
OLDVERSFILE=`cat $OLDVERSFILENAME`
if [ -n "$OLDVERSFILE" -a "$OLDVERS" != "$OLDVERSFILE" ]; then
if [ -z "$OLDVERS" ]; then
OLDVERS=$OLDVERSFILE
else
echo "ERROR: Requested old version does not match previous value:"
echo " $OLDVERS != $OLDVERSFILE"
exit 1
if [ -n "$OLDVERS" ]; then
OLDVERS=$OLDVERSFILE
fi
fi
fi
else
if [ -n "$OLDVERS" ]; then
echo $OLDVERS >$OLDVERSFILENAME
echo setoldversion >> $HISTFILE
fi
fi
echo "Old product and version: $OLDPROD $OLDVERS"
# Retrieve the new (lbnecode) version.
# For option -o, the value is taken from the command line.
echo -e $LINE
echo Setting new product version
NEWVERSFILENAME=$TOPDIR/newversion.txt
if [ -r $NEWVERSFILENAME ]; then
NEWVERSFILE=`cat $NEWVERSFILENAME`
if [ -n "$NEWVERSFILE" -a "$NEWVERS" != "$NEWVERSFILE" ]; then
if [ -z "$NEWVERS" ]; then
NEWVERS=$NEWVERSFILE
else
echo "ERROR: Requested new version does not match previous value:"
echo " $NEWVERS != $NEWVERSFILE"
exit 1
if [ -n "$NEWVERS" ]; then
NEWVERS=$NEWVERSFILE
fi
fi
fi
else
if [ -n "$NEWVERS" ]; then
echo $NEWVERS >$NEWVERSFILENAME
echo setnewversion >> $HISTFILE
fi
fi
if [ -z "$OLDVERS" ]; then
echo "Use -o to set old version."
exit 1
fi
if [ -z "$NEWVERS" ]; then
echo "Use -n to set new version."
exit 1
fi
echo "New product and version: $NEWPROD $NEWVERS"
# Add header to the command history file.
echo >>$COMHISTFILE
echo "# "`date` >>$COMHISTFILE
echo "# $0 $*" >>$COMHISTFILE
# For -r, create the working directory.
if [ ! -r workdir ]; then
echo -e $LINE
echo Creating workdir
COM="mkdir workdir"
echo $COM >> $COMHISTFILE
if ! $COM; then
echo ERROR: Unable to create workdir
exit 1
fi
echo createdworkdir >> $HISTFILE
fi
# Move to the working directory.
COM="cd $TOPDIR/workdir"
echo $COM >>$COMHISTFILE
$COM
# For option -d, create the development area.
if [ -n "$DIRMAK" -a -z "$UPSPROD" ]; then
echo -e $LINE
echo Creating development area.
if [ -z "$OLDVERS" ]; then
echo "ERROR: Use -o to set old version."
exit 1
fi
if [ -z "$QUAL" ]; then
"ERROR: Product qualifier must defined."
exit 1
fi
if [ -r srcs ]; then
echo "ERROR: Development area already exists"
exit 1
fi
COM="mrb newDev -v $OLDVERS -q $QUAL"
echo $COM >>$COMHISTFILE
if ! $COM; then
echo "ERROR: Command failed: $COM"
exit 1
fi
echo creatednewdev >> $HISTFILE
fi
# Exit stage 1.
if [ -z "$STAGE2" ]; then
echo -e $LINE
if [ -n "$DBG" ]; then echo Exiting before stage 2; fi
echo Done
exit 0
fi
# Check that development are exists or we are doing UPS build.
if [ ! -r srcs -a -z "$UPSPROD" ]; then
echo "ERROR: Use -d to create development area"
exit 1
fi
echo Development area: `pwd`
# Find the current platform (slf5, slf6, ...)
# Or we can use this:
# setup cetpkgsupport
# get-directory-name os
PLATFORM=
#SLF5=`lsb_release -a | grep 5.4`
#SLF6=`lsb_release -a | grep 6.4`
#if test -n "$SLF5" -a -z "$SLF6"; then
#PLATFORM=slf5
#else if test -z "$SLF5" -a -n "$SLF6"; then
#PLATFORM=slf6
#fi; fi
PLATFORM=`get-directory-name os`
# For option -u, build UPS product.
if [ -n "$UPSPROD" ]; then
echo -e $LINE
echo Building UPS product $NEWPROD $NEWVERS
if test -z "$PLATFORM"; then
echo Platform could not be determined.
exit 1
fi
RELDIR=`echo reldir-$PLATFORM-$QUAL | sed 's/:/-/g'`
echo Release directory: $RELDIR
if [ -r $RELDIR ]; then
echo Release directory already exists.
echo Delete workdir/$RELDIR it before to rebuilding release.
exit 1
fi
rm -rf $RELDIR
if ! mkdir $RELDIR; then
echo "ERROR: Unable to to create release directory."
exit 1
fi
COM="cd $RELDIR"
echo $COM >>$COMHISTFILE
$COM
echo $RELDIR >>$HISTFILE
echo MRB_SOURCE=$MRB_SOURCE
COM="mrb newDev -v $OLDVERS -q $QUAL"
echo $COM>>$COMHISTFILE
if ! $COM; then
echo "ERROR: Unable to make development area for $OLDVERS"
exit 1
fi
echo MRB_SOURCE=$MRB_SOURCE
for DIR in localProducts*; do
echo " $DIR"
COM="source $DIR/setup"
$COM
echo $COM>>$COMHISTFILE
done
COM="cd $MRB_SOURCE"
$COM
echo $COM>>$COMHISTFILE
UPSLOG=$TOPDIR/workdir/$RELDIR/upscheckout.log
COM="mrb gitCheckout -t $NEWVERS $NEWPROD"
echo Checking out $NEWPROD $NEWVERS. Log is $UPSLOG.
#if ! mrb gitCheckout -t $NEWVERS $NEWPROD >$LOG 2>&1; then
echo $COM>>$COMHISTFILE
if ! $COM >>$UPSLOG 2>&1; then
echo "ERROR: Unable to check out $NEWPROD $NEWVERS"
exit 1
fi
if grep ^error: $UPSLOG; then
echo "ERROR: Error checking out $NEWPROD $NEWVERS:"
grep ^error: $UPSLOG
exit 1
fi
echo MRB_SOURCE=$MRB_SOURCE
COM="cd $MRB_BUILDDIR"
$COM
echo $COM>>$COMHISTFILE
COM=mrbsetenv
$COM
echo $COM>>$COMHISTFILE
UPSLOG=$TOPDIR/workdir/$RELDIR/upsbuild.log
echo Building. Log is $UPSLOG
mrb b >$UPSLOG 2>&1
UPSLOG=$TOPDIR/workdir/$RELDIR/upsmake.log
echo Making product. Log is $UPSLOG
mrb makePackage >$UPSLOG 2>&1
echo -e $LINE
if [ -n "$DBG" ]; then echo Exiting after product build; fi
ls $TOPDIR/workdir/$RELDIR/build_*/$NEWPROD-*.tar*
echo Done
exit 0
fi
# Exit stage 2.
if [ -z "$STAGE3" ]; then
echo -e $LINE
if [ -n "$DBG" ]; then echo Exiting before stage 3; fi
echo Done
exit 0
fi
# Set up larsoft.
echo -e $LINE
echo Setting up $OLDPROD
if [ -n "$DBG" ]; then echo "=====Command: setup $OLDPROD $OLDVERS -q $QUAL"; fi
COM="setup $OLDPROD $OLDVERS -q $QUAL"
echo $COM >>$COMHISTFILE
if ! $COM; then
echo Setup failed for $OLDPROD $OLDVERS
exit 1
fi
# Set up installed products (lbnecode).
echo -e $LINE
echo Setting up local products
for DIR in localProducts*; do
echo " $DIR"
if [ -n "$DBG" ]; then echo "=====Command: source $DIR/setup"; fi
COM="source $DIR/setup"
echo $COM >>$COMHISTFILE
$COM
done
echo MRB_SOURCE=$MRB_SOURCE
COM="cd $MRB_SOURCE"
echo $COM >>$COMHISTFILE
$COM
# For option -c, check out lbnecode.
# Otherwise move to the area where it was previously checked out.
if [ -n "$CHKOUT" ]; then
echo -e $LINE
echo Checking out $NEWPROD
if [ -r $NEWPROD ]; then
echo "ERROR: $NEWPROD is already checked out."
exit 1
fi
COM="mrb gitCheckout $NEWPROD"
echo $COM >>$COMHISTFILE
if ! $COM; then
echo ERROR: Checkout failed
exit 1
fi
echo checkout >> $HISTFILE
COM="cd $NEWPROD"
echo $COM >>$COMHISTFILE
$COM
echo "Listing branches:"
git branch
else
echo -e $LINE
echo Moving to product area.
COM="cd $NEWPROD"
echo $COM >>$COMHISTFILE
if ! $COM; then
echo ERROR: Product area not found.
echo "ERROR: Use larrsoft -c to check out $NEWPROD"
exit 1
fi
fi
# For option -v, start a gitflow release.
if [ -n "$VERUPD" ]; then
echo -e $LINE
echo Starting flow
if grep startflow $HISTFILE 1>/dev/null 2>&1; then
echo ERROR: Flow is already started.
exit 1
fi
if true; then
echo
echo Switching to master
COM="git checkout master"
$COM
fi
echo
echo Starting flow.
COM="git flow release start $NEWVERS"
echo $COM >>$COMHISTFILE
if ! $COM; then
echo "ERROR: Error creating gitflow release."
exit 1
fi
echo startflow >> $HISTFILE
echo "Listing branches:"
git branch
fi
# For option -v, update the lbnecode versions of lbnecode and larsoft.
if [ -n "$VERUPD" ]; then
echo -e $LINE
echo Setting $NEWPROD version to $NEWVERS
if grep versionupdate2 $HISTFILE 1>/dev/null 2>&1; then
echo ERROR: Version has already been updated.
exit 1
fi
COM="mrb uv $NEWPROD $NEWVERS"
echo $COM >>$COMHISTFILE
$COM
echo versionupdate >> $HISTFILE
echo
echo Local repository changes:
echo "----------------------------"
git diff ups/product_deps
echo "----------------------------"
fi
# Exit stage 3.
if [ -z "$STAGE4" ]; then
echo -e $LINE
if [ -n "$DBG" ]; then echo Exiting before stage 4; fi
echo Done
exit 0
fi
# Do MRB setup.
echo -e $LINE
echo Setting up for build or test...
COM="cd $MRB_BUILDDIR"
echo $COM >>$COMHISTFILE
$COM
COM=mrbsetenv
echo $COM >>$COMHISTFILE
if ! $COM; then
echo ERROR: mrbsetenv failed.
exit 1
fi
# For option -b, build lbnecode.
if [ -n "$BUILD" ]; then
echo -e $LINE
echo Building...
COM="mrb build"
echo $COM >>$COMHISTFILE
if ! $COM; then
echo Build failed.
echo buildfailed >> $HISTFILE
exit 1
fi
echo buildpassed >> $HISTFILE
fi
# For option -t, test lbnecode.
if [ -n "$TEST" ]; then
echo -e $LINE
echo Testing...
COM="mrb test"
echo $COM >>$COMHISTFILE
if ! $COM; then
echo Test failed.
echo testfailed >> $HISTFILE
exit 1
fi
echo testpassed >> $HISTFILE
fi
# For option -C (and -l), commit changes to the release branch.
if [ -n "$COMMIT" ]; then
echo -e $LINE
echo Committing flow...
if grep committed $HISTFILE 1>/dev/null 2>&1; then
echo Commit is already done.
else
COM="cd $MRB_SOURCE/$NEWPROD"
echo $COM >>$COMHISTFILE
$COM
echo
echo Show branches:
git branch
echo
echo Committing...
COM="git commit -a -m Set_package_version_to_$NEWVERS"
echo $COM >>$COMHISTFILE
if ! $COM; then
echo ERROR: Git commit failed.
exit 1
fi
echo committed >> $HISTFILE
fi
fi
# For option -l, end gitflow release:
# --copy changes to develop and master branches,
# --create tag, and
# --delete release branch.
if [ -n "$ENDFLOW" ]; then
echo -e $LINE
echo Finishing flow...
cd $MRB_SOURCE/$NEWPROD
echo "***************"
# Capture the current git editor and change the value to something
# that always succeeds without changing the file.
OLDEDITOR=`git config --global core.editor`
COMED1="git config --global core.editor true"
COMED2="git config --global core.editor $OLDEDITOR"
COM="git flow release finish -m Ended_flow_for_${NEWPROD}_$NEWVERS $NEWVERS"
$COMED1
echo $COM >>$COMHISTFILE
if ! $COM; then
echo ERROR: Finishing flow failed.
git config --global core.editor $OLDEDITOR
$COMED2
exit 1
fi
$COMED2
echo "%%%%%%%%%%%%%%%"
echo flowfinished >> $HISTFILE
fi
DRYRUN="--dry-run"
DRYRUN=
# For option -p, push the branch changes back to the repository.
if [ -n "$PUSHBRA" ]; then
echo -e $LINE
echo Pushing branches...
COM="cd $MRB_SOURCE/$NEWPROD"
echo $COM >>$COMHISTFILE
$COM
echo
echo master...
COM="git push $DRYRUN origin master"
echo $COM >>$COMHISTFILE
if ! $COM; then
echo ERROR: Branch push to master failed.
exit 1
fi
echo
echo develop...
COM="git push $DRYRUN origin develop"
echo $COM >>$COMHISTFILE
if ! $COM; then
echo ERROR: Branch push to develop failed.
echo Continuing after error...
fi
echo branchespushed >> $HISTFILE
fi
# For option -p or -P, push the new tag back to the repository.
if [ -n "$PUSHTAG" ]; then
echo -e $LINE
echo Pushing tags...
COM="cd $MRB_SOURCE/$NEWPROD"
echo $COM >>$COMHISTFILE
$COM
COM="git push $DRYRUN --tags"
echo $COM >>$COMHISTFILE
if ! $COM; then
echo ERROR: Tag push failed.
exit 1
fi
echo tagpushed >> $HISTFILE
fi
echo -e $LINE
if [ -n "$DBG" ]; then echo Exiting after all stages; fi
echo Done 3
| true
|
4c557537afda8db79c08b744d2076405ac1158ed
|
Shell
|
xadflores/UpsilonAna_Run2
|
/UpperLimit/Code_2S/run_Ws.sh
|
UTF-8
| 408
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
# dir
dir2=WS
#for cat in 0cent20 20cent60 60cent100 100cent160 0cent160; do
for cat in 0cent10; do
ppfile=`ls $dir2/FIT*PP*root`
pbpbfile=`ls $dir2/FIT*PbPb*$cat*root`
nice root -l -b -q Raa2S_Workspace.C\(\"${pbpbfile}\"\,\"${ppfile}\"\,\"$dir2/WS_combo2S_${cat}.root\"\)
mv c1.pdf $dir2/c1_${cat}.pdf
mv c2.pdf $dir2/c2_${cat}.pdf
mv cpoi.pdf $dir2/cpoi_${cat}.pdf
done
| true
|
791db11fb6f7a6f1fa7ecba400bae5dfae24997e
|
Shell
|
jairot/yogcheck
|
/start2.sh
|
UTF-8
| 787
| 2.984375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
NAME="yogcheck" # Name of the application
DJANGODIR=/home/azureuser/yogcheck/ # Django project directory
USER=azureuser #the user to run as
GROUP=azureuser
NUM_WORKERS=3 # how many worker processes should Gunicorn spawn
#DJANGO_SETTINGS_MODULE=settings # which settings file should Django use
echo "Starting $NAME"
# Activate the virtual environment
cd $DJANGODIR
source bin/activate
#export DJANGO_SETTINGS_MODULE=$DJANGO_SETTINGS_MODULE
export PYTHONPATH=$DJANGODIR:$PYTHONPATH
# Create the run directory if it doesn't exist
# Programs meant to be run under supervisor should not daemonize themselves (do not use --daemon)
exec bin/gunicorn \
-b 127.0.0.1:4000 \
--name $NAME \
--workers $NUM_WORKERS \
--user=$USER --group=$GROUP \
--log-level=debug hello:app
| true
|
a0d4671e5b387de630a96967dcaf8e6349d257a6
|
Shell
|
Amar1729/bin
|
/nix-list
|
UTF-8
| 1,472
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
# Query installed pkgs in nix store of the current user's profile
# see `man nix-store' for more info
#
# I wrote a little `depth' helper that makes viewing the --tree output a little better
op="$1"
[[ -z "$op" ]] && op="-R"
_name () {
fd --maxdepth 1 --type d "$1-?[^-]*$" /nix/store
}
# Args:
# -R - shows list of installed pkgs
# --tree - shows list of installed pkgs as ascii art tree output
_default () {
nix-store -q $1 /nix/var/nix/profiles/per-user/$USER/profile
}
# Allows filtering the tree by how deep pkgs are
# Minimum 0 (just outputs user manifest)
# Depth 1 will show top-level pkgs (i.e. probably the ones the user installed with nix-env -i)
_depth () {
esc=$(printf '\033')
_gsed () { gsed -e "1 s|^\(.*\)$|${esc}[31m\1${esc}[0m|"; }
expr='^'
if [[ $1 -eq 0 ]]; then
expr+='/'
elif [[ $1 -eq 1 ]]; then
expr+='+'
# make sure to print first line too
expr="\($expr\)\|\(^/\)"
else
for i in $(seq 1 $(($1-1))); do
expr+='\(|\s*\)\?'
done
expr+='+'
# make sure to print first line too
expr="\($expr\)\|\(^/\)"
fi
# display results (including first line) inside less if necessary
_default --tree | grep $expr | less -FX
}
if [[ "$op" == "--depth" ]]; then
[[ -z "$2" ]] && echo "Require arg to --depth" && exit 1
_depth $2
elif [[ "$op" == "--name" ]]; then
_name $2
else
_default $op
fi
| true
|
bb4d4f963855b76b4a5edf99756b93966ac5ce66
|
Shell
|
schiermi/bashmeetup
|
/demo/params.sh
|
UTF-8
| 365
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
#IFS=$'\n\t'
fmt="%-4s i:%2i >%s<\n"
echo "Anzahl Parameter: $#"
echo
i=0
for p in $*
do
printf "${fmt}" '$*' $((i++)) "${p}"
done
echo
i=0
for p in "$*"
do
printf "${fmt}" '"$*"' $((i++)) "${p}"
done
echo
i=0
for p in $@
do
printf "${fmt}" '$@' $((i++)) "${p}"
done
echo
i=0
for p in "$@"
do
printf "${fmt}" '"$@"' $((i++)) "${p}"
done
| true
|
8cabf5325595fa4e77bff4b794462e515f940e1d
|
Shell
|
dawson2016/script-mess
|
/mail.sh
|
UTF-8
| 1,088
| 2.921875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
to_email_address="$1" # 收件人Email地址,zabbix传入的第一个参数
message_subject_utf8="$2" # 邮件标题,zabbix传入的第二个参数
message_body_utf8="$3" # 邮件内容,zabbix传入的第三个参数
message_subject_gb2312=`iconv -t GB2312 -f UTF-8 << EOF
$message_subject_utf8
EOF`
[ $? -eq 0 ] && message_subject="$message_subject_gb2312" || message_subject="$message_subject_utf8"
message_body_gb2312=`iconv -t GB2312 -f UTF-8 << EOF
$message_body_utf8
EOF`
[ $? -eq 0 ] && message_body="$message_body_gb2312" || message_body="$message_body_utf8"
echo $3 | grep cpuload &>/dev/null
[ $? -eq 0 ] && cpu_max1=`ps aux | grep -v ^'USER' | sort -rn -k3 | awk '{print $1"\t"$3}' | head -3 | sed -n '1p'` && cpu_max2=`ps aux | grep -v ^'USER' | sort -rn -k3 | awk '{print $1"\t"$3}' | head -3 | sed -n '2p'`&& cpu_max3=`ps aux | grep -v ^'USER' | sort -rn -k3 | awk '{print $1"\t"$3}' | head -3 | sed -n '3p'`|| cpu_max=''
echo "$message_body" "$cpu_max1" "$cpu_max2" "$cpu_max3"| mail -s "$message_subject" $1
| true
|
1ca49ce666b62f03a1aee1d6b86b4a5f1fc6cc8e
|
Shell
|
kei-sato/homebrew-tasktoday
|
/tasktoday
|
UTF-8
| 12,104
| 3.953125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
USAGE="
Recommend:
alias tt=tasktoday
Usage:
$0 (ad|in|st|en|ca|clear|ed|path|help) [options...]
Example:
- add a task with its estimated time 1 hour
tt ad -e 1 house keep
- insert a task (its estimated time is default to 0.5 hour)
tt in fit
- start a task
tt st
- end a task, and start next task (default)
tt en
- cancel current task
tt ca
- remove current task
tt rm
- edit tasks
tt ed
"
eecho() { echo "$@" 1>&2; }
abort() { eecho "$@"; exit 1; }
# yellow echo
yecho() {
color=3
echo -e $(tput setaf $color)"$*"$(tput op);
}
validateNumber() {
[[ -z $1 ]] && return
[[ $1 =~ ^([-][0-9]+)?[0-9]*([.][0-9]+)?$ ]] || { eecho "$1 is not a number"; return 1; }
bc <<< "$1"
}
while getopts h option; do
case $option in
h|\?) abort "$USAGE"
esac
done
shift $((OPTIND-1))
FNAME_FORMAT="%Y%m%d"
TIME_FORMAT="%H:%M"
TT_HOME="$HOME/.tasktoday"
TASKS_DIR="$TT_HOME/tasks"
NOW="$(date +"%s")"
TODAY="$(date -jf %s $((NOW-5*60*60)) +"$FNAME_FORMAT")"
TODAY_FILE="$TASKS_DIR/$TODAY"
LINK_FILE="$TASKS_DIR/today"
INDEX_ESTIMATED=1
INDEX_TITLE=2
INDEX_START=3
INDEX_END=4
INDEX_REPEAT=5
INDEX_TAGS=6
getTitle() {
[[ -s "$TODAY_FILE" ]] || return 1
head -n1 "$TODAY_FILE" | cut -d$'\t' -f"$INDEX_TITLE"
}
getTasks() {
[[ -s "$TODAY_FILE" ]] || return 1
[[ $1 != "remain" && $1 != "current" && $1 != "finished" ]] && return 1
CMD='{
if ($'$INDEX_START' == "" && $'$INDEX_END' == "")
remain[NR] = $0
else if ($'$INDEX_END' == "")
current[NR] = $0
else
finished[NR] = $0
} END {
for (k in '"$1"') {
print k, '"$1"'[k]
}
}'
{
colNames
awk -F $'\t' "$CMD" < "$TODAY_FILE" | sort -n | cut -f 2- -d' '
} | column -t -s$'\t'
eecho "today goal: $(estimatedEndTime)"
}
calcSecWithHour() {
NUM=$(validateNumber "$1")
[[ -n $NUM ]] && printf %.0f $(bc <<< "scale=0; $NUM * 3600")
}
estimatedRemainTime() {
REMAIN_TIME=$(awk -F $'\t' 'BEGIN { remain = 0 } { if ($'$INDEX_END' == "") remain += $'$INDEX_ESTIMATED' } END { print remain }' < "$TODAY_FILE")
line=$(getCurrentTaskLine)
if [[ -n $line ]]; then
ESTIMATED=$(cut -d$'\t' -f"$INDEX_ESTIMATED" <<< "$line")
ESTIMATED_SEC=$(calcSecWithHour $ESTIMATED)
START=$(cut -d$'\t' -f"$INDEX_START" <<< "$line")
START_SEC=$(date -jf $TIME_FORMAT "$START" +"%s")
NOW_SEC=$(date +"%s")
ELAPSED_SEC=$((NOW_SEC - START_SEC))
# add additional time which is elapsed over estimated time
if [[ $ELAPSED_SEC -gt $ESTIMATED_SEC ]]; then
ADDITIONAL_SEC=$((ELAPSED_SEC - ESTIMATED_SEC))
REMAIN_TIME=$(bc <<< "scale=2; $REMAIN_TIME + $ADDITIONAL_SEC / 3600")
fi
fi
echo $REMAIN_TIME
}
calcStartTime() {
DURATION_HOUR=${1:-1}
END_SEC=${2:-$(date +"%s")}
DURATION_SEC=$(calcSecWithHour $DURATION_HOUR)
echo $(date -jf "%s" "$((END_SEC - DURATION_SEC))" +"$TIME_FORMAT")
}
calcEndTime() {
DURATION_HOUR=${1:-1}
START_SEC=${2:-$(date +"%s")}
DURATION_SEC=$(calcSecWithHour $DURATION_HOUR)
echo $(date -jf "%s" "$((START_SEC + DURATION_SEC))" +"$TIME_FORMAT")
}
estimatedEndTime() {
ESTIMATED_REMAIN_HOUR=$(estimatedRemainTime)
# set start to the start of current task if current task exists
# otherwise current time
START_SEC=$(getCurrentTaskStartSec)
START_SEC=${START_SEC:-$(date +"%s")}
calcEndTime "$ESTIMATED_REMAIN_HOUR" "$START_SEC"
}
countLine() {
if [[ -s "$1" ]]; then
wc -l < "$1" | tr -d [[:space:]]
else
echo 0
fi
}
addFromOldTasks() {
TODAY_SEC=$(date +"%s")
if [[ $(date +"%u") -eq 1 ]]; then
PREVIOUS_DAY_SEC=$((TODAY_SEC - 3*24*3600))
else
PREVIOUS_DAY_SEC=$((TODAY_SEC - 24*3600))
fi
PREVIOUS_DAY=$(date -jf "%s" $PREVIOUS_DAY_SEC +"$FNAME_FORMAT")
WEEK_AGO=$(date -jf "%s" $((TODAY_SEC - 7*24*3600)) +"$FNAME_FORMAT")
PREVIOUS_DAY_FILE=$TASKS_DIR/$PREVIOUS_DAY
WEEK_AGO_FILE=$TASKS_DIR/$WEEK_AGO
TMPFILE=$(mktemp)
{
[[ -s $PREVIOUS_DAY_FILE ]] && cat "$PREVIOUS_DAY_FILE"
[[ -s $WEEK_AGO_FILE ]] && cat "$WEEK_AGO_FILE"
} > "$TMPFILE"
[[ -s $TMPFILE ]] || abort "history not found"
while [[ $(countLine "$TMPFILE") -gt 0 ]]; do
awk '{ a[$2] = $0 } END { for(k in a) { print a[k] } }' < "$TMPFILE" | sort | awk '{print NR")\t"$0' | column -t -s$'\t'
read -rn1 -p "input number of the line (or quit if not a number): " LINE_NUM <&3
[[ ${LINE_NUM} -ge 0 ]] || break
done
}
addTask() {
unset LINE_NUM
while getopts e:r:t:i option; do
case $option in
e) ESTIMATED="$OPTARG";;
r) REPEAT="$OPTARG";;
t) TAGS="$OPTARG";;
i) LINE_NUM=$(getLastFinishedTaskLineNum);;
esac
done
shift $((OPTIND-1))
[[ $# -eq 0 ]] && abort "title not found"
TITLE="$*"
: "${ESTIMATED:=0.5}"
: "${REPEAT:=}"
: "${TAGS:=}"
START=""
END=""
[[ $ESTIMATED = '.' || $ESTIMATED = '.5' ]] && ESTIMATED=0.5
RECORD="$ESTIMATED\t$TITLE\t$START\t$END\t$REPEAT\t$TAGS"
: ${LINE_NUM:=$(wc -l < "$TODAY_FILE")}
insertTask "$LINE_NUM" "$RECORD"
printTasksWithGoal
}
insertTask() {
LINE_NUM=${1:-0}
[[ $LINE_NUM -ge 0 ]] || return
NEW_LINE=$2
[[ -z "$NEW_LINE" ]] && return
TMPFILE=$(mktemp)
{
[[ $LINE_NUM -gt 0 ]] && head $((LINE_NUM*-1)) "$TODAY_FILE"
echo -e "$NEW_LINE"
tail +$((LINE_NUM+1)) "$TODAY_FILE"
} > "$TMPFILE"
mv "$TMPFILE" "$TODAY_FILE"
}
getLastFinishedTaskLineNum() {
[[ -s "$TODAY_FILE" ]] || { echo 0; return; }
LINE_NUM=0
while read -r line; do
START=$(cut -d$'\t' -f"$INDEX_START" <<< "$line")
[[ -z $START ]] && { echo $LINE_NUM; return; }
((LINE_NUM++))
done < "$TODAY_FILE"
echo $LINE_NUM
}
getCurrentTaskLine() {
[[ -s "$TODAY_FILE" ]] || return
while read -r line; do
END=$(cut -d$'\t' -f"$INDEX_END" <<< "$line")
if [[ -z $END ]]; then
START=$(cut -d$'\t' -f"$INDEX_START" <<< "$line")
if [[ -n $START ]]; then
echo "$line"
return
fi
fi
done < "$TODAY_FILE"
}
getCurrentTaskStartSec() {
line=$(getCurrentTaskLine)
[[ -z "$line" ]] && return
START=$(cut -d$'\t' -f"$INDEX_START" <<< "$line")
START_SEC=$(date -jf $TIME_FORMAT "$START" +"%s")
echo "$START_SEC"
}
printCurrentTask() {
line=$(getCurrentTaskLine)
[[ -z "$line" ]] && return
TITLE=$(cut -d$'\t' -f"$INDEX_TITLE" <<< "$line")
ESTIMATED=$(cut -d$'\t' -f"$INDEX_ESTIMATED" <<< "$line")
START=$(cut -d$'\t' -f"$INDEX_START" <<< "$line")
START_SEC=$(date -jf $TIME_FORMAT "$START" +"%s")
ESTIMATED_END=$(calcEndTime "$ESTIMATED" "$START_SEC")
NOW_SEC=$(date +"%s")
ELAPSED_SEC=$((NOW_SEC - START_SEC))
[[ $ELAPSED_SEC -lt 0 ]] && ELAPSED_SEC=$((ELAPSED_SEC + 24*60*60))
ZERO_SEC=$(date -jf "$TIME_FORMAT" "00:00" +"%s")
ELAPSED=$(date -jf %s $((ZERO_SEC + ELAPSED_SEC)) +"$TIME_FORMAT")
1>&2 yecho "<< $TITLE >>"$'\n'"now $ELAPSED from $START will end $ESTIMATED_END"$'\n'
}
startTask() {
[[ -s "$TODAY_FILE" ]] || { abort "no tasks"; }
ELAPSED=$(validateNumber "$1")
STARTED=false
TMPFILE=$(mktemp)
printCurrentTask
exec 3<&0
while read -r line; do
$STARTED && { echo "$line"; continue; }
END=$(cut -d$'\t' -f"$INDEX_END" <<< "$line")
if [[ -z $END ]]; then
START=$(cut -d$'\t' -f"$INDEX_START" <<< "$line")
if [[ -n $START ]]; then
read -rn1 -p "restart? [y/N]: " SHOULD_RESTART <&3
eecho
[[ ${SHOULD_RESTART} = "y" ]] || {
echo "$line"
STARTED=true
continue
}
fi
START=$(date +"$TIME_FORMAT")
[[ -n $ELAPSED ]] && START=$(calcStartTime "$ELAPSED" "$(date +%s)")
# insert tabs if not enough
while [[ $(tr -cd $'\t' <<< "$line" | wc -c) -lt $((INDEX_START-2)) ]]; do line="$line"$'\t'; done
# create new line
line="$(echo -n "$line" | cut -d$'\t' -f -$((INDEX_START-1)))"$'\t'$START
echo "$line"
STARTED=true
else
echo "$line"
fi
done < "$TODAY_FILE" > "$TMPFILE"
exec 0<&3 3<&-
mv "$TMPFILE" "$TODAY_FILE"
printTasksWithGoal
}
# usage: tasktoday (en|end) [time_in_hour]
endTask() {
[[ -s "$TODAY_FILE" ]] || { abort "no tasks"; }
DURATION=$(validateNumber "$1")
END_CREATED=false
START_CREATED=false
TMPFILE=$(mktemp)
while read -r line; do
$START_CREATED && { echo "$line"; continue; }
START=$(cut -d$'\t' -f"$INDEX_START" <<< "$line")
END=$(cut -d$'\t' -f"$INDEX_END" <<< "$line")
# auto start next task
if $END_CREATED; then
START=$PREV_END
line="$(echo -n "$line" | cut -d$'\t' -f $(((INDEX_START-1) * -1)))"$'\t'$START
echo "$line"
START_CREATED=true
# the first not end task
elif [[ -z $END ]]; then
END=$(date +"$TIME_FORMAT")
# if start is empty and not first task on the day, then set previous end time for the start time
[[ -z $START ]] && START=$PREV_END
# calc start time with specified value if it is passed
if [[ -n $DURATION ]]; then
if [[ -n $START ]]; then
START_SEC=$(date -jf $TIME_FORMAT "$START" +"%s")
END=$(calcEndTime "$DURATION" "$START_SEC")
else
END_SEC=$(date -jf $TIME_FORMAT "$END" +"%s")
START=$(calcStartTime "$DURATION" "$END_SEC")
fi
fi
# if duration is not specified, and it's the first task on the day, calc start time with estimated time
if [[ -z $START ]]; then
END_SEC=$(date -jf $TIME_FORMAT "$END" +"%s")
ESTIMATED=$(cut -d$'\t' -f"$INDEX_ESTIMATED" <<< "$line")
START=$(calcStartTime "$ESTIMATED" "$END_SEC")
fi
# start time would be never expected to be zero here
[[ -z $START ]] && { eecho "start not found"; exit 1; }
# create new line
line="$(echo -n "$line" | cut -d$'\t' -f $(((INDEX_START-1) * -1)))"$'\t'$START$'\t'$END
echo "$line"
END_CREATED=true
else
echo "$line"
fi
PREV_END=$END
done < "$TODAY_FILE" > "$TMPFILE"
mv "$TMPFILE" "$TODAY_FILE"
printTasksWithGoal
}
cancelTask() {
[[ -s "$TODAY_FILE" ]] || { abort "no tasks"; }
CANCELED=false
TMPFILE=$(mktemp)
while read -r line; do
$CANCELED && { echo "$line"; continue; }
START=$(cut -d$'\t' -f"$INDEX_START" <<< "$line")
END=$(cut -d$'\t' -f"$INDEX_END" <<< "$line")
if [[ -z $END && -n $START ]]; then
# create new line
line="$(echo -n "$line" | cut -d$'\t' -f $(((INDEX_START-1) * -1)))"$'\t'$'\t'
echo "$line"
CANCELED=true
else
echo "$line"
fi
done < "$TODAY_FILE" > "$TMPFILE"
mv "$TMPFILE" "$TODAY_FILE"
printTasksWithGoal
}
removeTask() {
[[ -s "$TODAY_FILE" ]] || { abort "no tasks"; }
REMOVED=false
TMPFILE=$(mktemp)
while read -r line; do
$REMOVED && { echo "$line"; continue; }
END=$(cut -d$'\t' -f"$INDEX_END" <<< "$line")
if [[ -z $END ]]; then
# ignore the first 'not finished' line
REMOVED=true
else
echo "$line"
fi
done < "$TODAY_FILE" > "$TMPFILE"
mv "$TMPFILE" "$TODAY_FILE"
printTasksWithGoal
}
colNames() {
echo "ESTIMATE"$'\t'"TITLE"$'\t'"START"$'\t'"END"$'\t'"REPEAT"$'\t'"TAGS"
}
printAllTasks() {
{
colNames
cat "$TODAY_FILE"
} | column -t -s$'\t'
}
printTasksWithGoal() {
printAllTasks
eecho "today goal: $(estimatedEndTime)"
}
######################### main #########################
mkdir -p "$TASKS_DIR"
touch "$TODAY_FILE"
# overwrite if new file created
[[ -s "$TODAY_FILE" ]] || ln -sf "$TODAY_FILE" "$LINK_FILE"
[[ $# -eq 0 ]] && {
if [[ -s "$TODAY_FILE" ]]; then
printCurrentTask
printTasksWithGoal
else
echo "no tasks"
fi
exit
}
CMD="$1"
shift
case $CMD in
ad|add) addTask "$@";;
ca|can|cancel) cancelTask "$@";;
del|delete|rm|remove) removeTask "$@";;
clear) :> "$TODAY_FILE";;
en|end) endTask "$@";;
ed|edi|edit) ${EDITOR:-vim} "$TODAY_FILE";;
fin|finished) getTasks finished;;
file|path) echo "$TODAY_FILE";;
in|ins|insert) addTask -i "$@";;
remain) getTasks remain;;
st|sta|start) startTask "$@";;
help) abort "$USAGE";;
*) addTask "$CMD" "$@"
esac
| true
|
d77b69f4b1d2c7bc074246e5adaaeabad3c77f66
|
Shell
|
tkdrob/docker-open-dns-ip-updater
|
/ddclient-init.sh
|
UTF-8
| 622
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/sh
# Search for custom config file, if it doesn't exist, copy the default one
if [ ! -f /config/ddclient.conf ]; then
echo "Creating config file. Please do not forget to enter your info in ddclient.conf."
cp /root/ddclient/ddclient.conf /config/ddclient.conf
chmod a+w /config/ddclient.conf
exit 1
fi
# Lets test if the config file is valid, this should result in error we will check for it
/usr/sbin/ddclient -verbose -foreground -file /config/ddclient.conf
loadstatus=$?
if [ $loadstatus -ne 0 ]; then
echo "Could not load /config/ddclient.conf it looks corrupt, please check it." >&2
exit 1
fi
| true
|
d1929fa42c67b55c6a140f9edb83e89dc6d2107d
|
Shell
|
kraffield-eb/dotfiles
|
/new-mac-setup.sh
|
UTF-8
| 1,263
| 3.53125
| 4
|
[] |
no_license
|
getbrew () {
which -s brew
if [[ $? != 0 ]] ; then
echo "homebrew isn't installed."
echo "installing homebrew..."
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
else
echo "updating homebrew"
brew update
fi
}
install_languages () {
brew install python
brew install python3
brew install go
brew install java
brew install node
brew install swift
}
getDotfiles () {
dir=`pwd`
mkdir -p ~/dotfiles
git clone https://github.com/mistahchris/dotfiles.git ~/dotfiles
# create a symlink for .vimrc and .bash_profile in ~
ln -s ~/dotfiles/.bash_profile ~/.bash_profile
ln -s ~/dotfiles.vimrc ~/.vimrc
}
install_basics () {
echo "sweet, a new machine! installing the basics..."
getbrew
which -s git || brew install git
brew install vim
brew install tmux
brew install the_silver_searcher
echo "getting your config files from github"
getDotfiles
}
install_apps () {
brew cask install google-chrome
brew cask install iterm2
brew cask install lastpass
brew cask install spectacle
brew cask install slack
brew cask install atom
brew cask install evernote
}
| true
|
a7af15ae9d612d8ca192ace6108ba42fd4a0e30f
|
Shell
|
rabbitear/AnchorageBusSkill
|
/publish.sh
|
UTF-8
| 602
| 3.34375
| 3
|
[] |
no_license
|
if [ -e index.zip ]
then
echo "[+] Deleting old index.zip."
rm index.zip
fi
if [ -d lambda ]
then
cd lambda
else
echo "[*] WARN: could not find lambda directory in cwd."
exit 1
fi
7z a -r ../index.zip
if [ $? -gt 0 ]
then
echo "[*] Something went wrong while zipping."
exit 1
cd ..
fi
cd ..
echo "[+] Trying to upload code, please wait..."
aws lambda update-function-code --function-name anchorgeBus \
--zip-file fileb://index.zip
if [ $? -gt 0 ]
then
echo "[*] WARNING, code NOT updated!"
else
echo "[+] All done!! Thanks for using the publish script."
fi
| true
|
0a356f6db3a6def15263525d91f0a600f5ec167b
|
Shell
|
pratikga/Vagrant_scripts
|
/bash_scripts/array
|
UTF-8
| 300
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
#NAME=(Pratik ganapa varapu) #This is working
#echo "the name is ${NAME[0]}"
Name[0]="Pratik" # THis is also working
Name[1]="Gana"
Name[2]="Varapu"
#set -A Name pratik pratikgana ganapa varapu JIbe
echo "The value of the sring is ${Name[1]}"
echo "The value of all the srings are ${Name[@]}"
| true
|
4cfdbb6c3e97e705edeee2a41425143515c5a4b1
|
Shell
|
nalysann/init
|
/network/13
|
UTF-8
| 513
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/zsh
# Thanks to the previous question and the reverse DNS find the name of your host
ifconfig en0 | grep 'inet ' | awk '{print $2}' | xargs nslookup | grep name | awk '{print $4}'
# alternatively we can use `ifconfig en0 | grep 'inet ' | awk '{print $2}' | xargs host | grep name | awk '{print $5}'`
# alternatively we can use `ifconfig en0 | grep 'inet ' | awk '{print $2}' | xargs dig -x | grep in-addr | sed -n 2p | awk '{print $5}'`
# without using the previous question we can just use `hostname`
| true
|
b75fc57abfd216f1476622d4c6bc000e505064f2
|
Shell
|
scottpunshon/dotfiles
|
/dotfiles/bash_prompt
|
UTF-8
| 379
| 3.15625
| 3
|
[] |
no_license
|
# vim: ft=sh
_PROMPT_USER='\033[92m\u\033[0m' # bright green
_PROMPT_HOST='\h'
_PROMPT_PWD='\033[94m\w\033[0m' # bright blue
# Show host information when connected from remote machine
if [ -n "${SSH_TTY}" ]; then
PS1="
${_PROMPT_USER}@${_PROMPT_HOST} ${_PROMPT_PWD}
\$ "
else
PS1="
${_PROMPT_USER} ${_PROMPT_PWD}
\$ "
fi
unset _PROMPT_USER _PROMPT_HOST _PROMPT_PWD
export PS1
| true
|
92916ce468d16fd60d30ce8315340995b65d95b5
|
Shell
|
Ponce/slackbuilds
|
/multimedia/Gem/Gem.SlackBuild
|
UTF-8
| 3,138
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
# Slackware build script for <Gem>
#
# Michales (clavisound) Michaloudes korgie@gmail.com <2017>
# 20220214 bkw: Modified by SlackBuilds.org:
# - updated for v0.94, as 0.93.3 won't build on Slackware 15.0.
# - moved pkg-config stuff to proper place (/usr/lib64 on x86_64).
cd $(dirname $0) ; CWD=$(pwd)
PRGNAM=Gem
VERSION=${VERSION:-0.94}
BUILD=${BUILD:-1}
TAG=${TAG:-_SBo}
PKGTYPE=${PKGTYPE:-tgz}
if [ -z "$ARCH" ]; then
case "$( uname -m )" in
i?86) ARCH=i586 ;;
arm*) ARCH=arm ;;
*) ARCH=$( uname -m ) ;;
esac
fi
if [ ! -z "${PRINT_PACKAGE_NAME}" ]; then
echo "$PRGNAM-$VERSION-$ARCH-$BUILD$TAG.$PKGTYPE"
exit 0
fi
TMP=${TMP:-/tmp/SBo}
PKG=$TMP/package-$PRGNAM
OUTPUT=${OUTPUT:-/tmp}
if [ "$ARCH" = "i586" ]; then
SLKCFLAGS="-O2 -march=i586 -mtune=i686"
LIBDIRSUFFIX=""
elif [ "$ARCH" = "i686" ]; then
SLKCFLAGS="-O2 -march=i686 -mtune=i686"
LIBDIRSUFFIX=""
elif [ "$ARCH" = "x86_64" ]; then
SLKCFLAGS="-O2 -fPIC"
LIBDIRSUFFIX="64"
else
SLKCFLAGS="-O2"
LIBDIRSUFFIX=""
fi
set -e
ZIPFILE="$CWD/Gem[v0.94](Sources).dek"
[ -e "$ZIPFILE" ] || ZIPFILE="$CWD/Gem%5Bv0.94%5D%28Sources%29.dek"
rm -rf $PKG
mkdir -p $TMP $PKG $OUTPUT
cd $TMP
rm -rf $PRGNAM
unzip $ZIPFILE
cd $PRGNAM
chown -R root:root .
find -L . \
\( -perm 777 -o -perm 775 -o -perm 750 -o -perm 711 -o -perm 555 \
-o -perm 511 \) -exec chmod 755 {} \+ -o \
\( -perm 666 -o -perm 664 -o -perm 640 -o -perm 600 -o -perm 444 \
-o -perm 440 -o -perm 400 \) -exec chmod 644 {} \+
# I think useless
# patch externals/Gem/configure.ac < $CWD/change_gem_configure_file.patch
# Cannot manage to compile with v4l2
# sed -i "s|linux/videodev\.h|libv4l1-videodev.h|" configure || exit 1
./autogen.sh
CFLAGS="$SLKCFLAGS" \
CXXFLAGS="$SLKCFLAGS -std=c++11 -fpermissive" \
./configure \
--prefix=/usr \
--libdir=/usr/lib${LIBDIRSUFFIX} \
--includedir=/usr/include \
--sysconfdir=/etc \
--localstatedir=/var \
--mandir=/usr/man \
--docdir=/usr/doc/$PRGNAM-$VERSION \
--with-pd=/usr/include/pd \
--without-v4l2 \
--build=$ARCH-slackware-linux
# --without-v4l2 or compilation fails
make
# This is the normal destination, but it goes to $PKG/usr/lib${LIBDIRSUFFIX}/pd/extra/usr/lib${LIBDIRSUFFIX}/$PRGNAM
# make install DESTDIR=$PKG/usr/lib${LIBDIRSUFFIX}/pd/extra
# will go to $PKG/usr/lib${LIBDIRSUFFIX}/$PRGNAM
make install-strip DESTDIR=$PKG
if [ -n "$LIBDIRSUFFIX" ] ; then
mv $PKG/usr/lib/pkgconfig $PKG/usr/lib$LIBDIRSUFFIX
fi
rmdir $PKG/usr/lib 2>/dev/null || true # does nothing on 32-bit.
mkdir -p $PKG/usr/doc/$PRGNAM-$VERSION
# link the plugin to the proper folder
mkdir -p $PKG/usr/lib${LIBDIRSUFFIX}/pd/extra
cd $PKG/usr/lib${LIBDIRSUFFIX}/pd/extra
ln -s ../../Gem ./
cd $PKG/usr/lib${LIBDIRSUFFIX}/$PRGNAM/
mv COPYING.txt ChangeLog GemPrimer.pdf README.txt gem.known_bugs.txt \
gem.release_notes.txt gem.todo.txt \
$PKG/usr/doc/$PRGNAM-$VERSION
cat $CWD/$PRGNAM.SlackBuild > $PKG/usr/doc/$PRGNAM-$VERSION/$PRGNAM.SlackBuild
mkdir -p $PKG/install
cat $CWD/slack-desc > $PKG/install/slack-desc
cd $PKG
/sbin/makepkg -l y -c n $OUTPUT/$PRGNAM-$VERSION-$ARCH-$BUILD$TAG.$PKGTYPE
| true
|
d00b4a511f1327e18967e0ea2429bceeaf1f3015
|
Shell
|
nathan8299/OSX_MediaCenter_MountainLion
|
/scripts/install_plex_server_plexwatchweb.sh
|
UTF-8
| 1,702
| 3.203125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "#------------------------------------------------------------------------------"
echo "# Install plexWebWatch for Plex Media Server"
echo "#------------------------------------------------------------------------------"
# https://github.com/ecleese/plexWatchWeb
source ../config.sh
if [ -e /Applications/Plex\ Media\ Server.app ] ; then
printf 'Plex Server found\n' "$GREEN" $col '[OK]' "$RESET"
else
printf 'Plex Server not installed, something went wrong\n' "$RED" $col '[FAIL]' "$RESET"
echo -e "${BLUE} --- press any key to continue --- ${RESET}"
read -n 1 -s
exit
fi
if [ -d /Users/PlexWatch/plexWatch ] ; then
printf 'PlexWatch found\n' "$GREEN" $col '[OK]' "$RESET"
else
printf 'PlexWatch not installed, something went wrong\n' "$RED" $col '[FAIL]' "$RESET"
echo -e "${BLUE} --- press any key to continue --- ${RESET}"
read -n 1 -s
exit
fi
[ -d /Users/PlexWatch/Sites/plexWatchWeb ] || mkdir -p /Users/PlexWatch/Sites/plexWatchWeb
sudo chown `whoami` /Users/PlexWatch/Sites/plexWatchWeb
sudo ln -s /Users/PlexWatch/Sites/plexWatchWeb /Library/Server/Web/Data/Sites/Default/plexwatch
cd /Users/PlexWatch/Sites/plexWatchWeb
git clone https://github.com/ecleese/plexWatchWeb
# PlexWatch
sudo cpan install Time::Duration
sudo cpan install Time::ParseDate
sudo cpan install Net::Twitter::Lite::WithAPIv1_1
sudo cpan install Net::OAuth
sudo cpan install Mozilla::CA
sudo cpan install JSON
echo "#------------------------------------------------------------------------------"
echo "# Install plexWebWatch for Plex Media Server - Complete"
echo "#------------------------------------------------------------------------------"
| true
|
e5a54b79753b696c81d0a3783c846b6aa43572c2
|
Shell
|
adelgadop/hpc-workshop-wrf
|
/pc_setup_scripts/git_download_and_run.sh
|
UTF-8
| 814
| 3.625
| 4
|
[
"MIT-0",
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
#Load Parallelcluster environment variables
. /etc/parallelcluster/cfnconfig
github_repo=$(echo ${cfn_postinstall_args}| cut -d ',' -f 1 )
setup_command=$(echo ${cfn_postinstall_args}| cut -d ',' -f 2 )
shared_folder=$(echo $cfn_shared_dir | cut -d ',' -f 1 )
echo "ARUMENTS $cfn_postinstall_args"
echo "REPO: ${github_repo}"
echo "SETUP COMMAND: ${setup_command}"
echo "SHARED FOLDER: ${shared_folder}"
dir_name=$(basename -s .git ${github_repo})
case ${cfn_node_type} in
MasterServer)
echo "I am Master node"
cd ${shared_folder}
git clone ${github_repo}
;;
ComputeFleet)
echo "I am a Compute node"
;;
esac
cd ${shared_folder}/${dir_name}
bash -x ${setup_command} >/tmp/setup.log 2>&1
exit $?
| true
|
4cb019e4680c0b79cb47368a223b045a37cccb2f
|
Shell
|
shellingford330/A-Tour-of-Go
|
/bin/init
|
UTF-8
| 125
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
if test $# -eq 1
then
cat ~/Documents/go/bin/template.go > $1
else
echo "(Error): Please set a file name."
fi
| true
|
00815fa528dde88def21058106b5610bb9856c79
|
Shell
|
schopman/NoTube-ClioPatria-services
|
/data/dbpedia/fetch.sh
|
UTF-8
| 806
| 3.265625
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
fixDBpediaFile ( ) {
: ${1?}
ZIPPED=$1.bz2
wget http://downloads.dbpedia.org/3.6/$2/$ZIPPED
bzip2 -cd $ZIPPED | gzip > $1.gz
rm $ZIPPED
echo "* fixed: $1"
}
fixDBpediaDataset ( ) {
: ${1?}
fixDBpediaFile $1 en
}
fixDBpediaAlignment ( ) {
: ${1?}
fixDBpediaFile $1 links
}
FILE=dbpedia_3.6.owl
ZIPPED=$FILE.bz2
wget http://downloads.dbpedia.org/3.6/$ZIPPED
bzip2 -cd $ZIPPED > $FILE
rm $ZIPPED
fixDBpediaAlignment freebase_links.nt
# mkdir categories
# cd categories
# fixDBpediaDataset skos_categories_en.nt
# fixDBpediaDataset article_categories_en.nt
# fixDBpediaDataset category_labels_en.nt
# cd ..
mkdir infobox
cd infobox
fixDBpediaDataset mappingbased_properties_en.nt
zgrep influenced mappingbased_properties_en.nt.gz | gzip > mappingbased_props_influences.nt.gz
cd ..
| true
|
4256364d304e6928d6572862dc40b0e4548f3463
|
Shell
|
ashhher3/pyDatasets
|
/bin/get_physionet_challenge2012_variables.sh
|
UTF-8
| 179
| 3.078125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
DATADIR=$1
OUTDIR=$2
CURRDIR=`pwd`
cd $DATADIR
cat set-a/*.txt set-b/*.txt | sed 's/.*,\(.*\),.*$/\1/g' | sort | uniq > $OUTDIR/variables-from-data.txt
cd $CURRDIR
| true
|
e29d5f84f0bfd0a95159d03695635acea10f4d9b
|
Shell
|
LuiGGi629/dotfiles
|
/install.sh
|
UTF-8
| 9,095
| 3.6875
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env zsh
set -Eeuxo pipefail
######### Pre-checks #########
# Detect platform.
if [ "$(uname -s)" != "Darwin" ]; then
echo "These dotfiles only targets macOS."
exit 1
fi
# Check current shell interpreter.
ps -p $$ | grep "zsh"
if [ $? != 0 ]; then
echo "These dotfiles were only tested with Zsh shell."
exit 1
fi
# Check if SIP is going to let us mess with some part of the system.
if [[ "$(csrutil status | grep --quiet "disabled"; echo $?)" -ne 0 ]]; then
echo "System Integrity Protection (SIP) is enabled."
else
echo "System Integrity Protection (SIP) is disabled."
fi
######### Sudo keep-alive #########
# Source: https://gist.github.com/cowboy/3118588
# Ask for the administrator password upfront.
# Ignore the following error returns within GitHub actions workflows:
# sudo: a terminal is required to read the password; either use the -S option to
# read from standard input or configure an askpass helper
sudo --validate || true
# Update existing `sudo` time stamp until script has finished.
while true; do sleep 60; sudo --non-interactive true; kill -0 "$$" || exit; done 2> /dev/null &
######### Basic dependencies #########
# TODO: install git here.
######### Dotfiles install #########
# Search local dotfiles
DOT_FILES=$(command find ./dotfiles -maxdepth 1 -not -path './dotfiles' -not -name '\.DS_Store')
for FILEPATH (${(f)DOT_FILES}); do
SOURCE="${PWD}/$FILEPATH"
TARGET="${HOME}/$(basename "${FILEPATH}")"
# Link files
if [ -e "${TARGET}" ] && [ ! -L "${TARGET}" ]; then
mv "$TARGET" "$TARGET.dotfiles.bak"
fi
ln -sf "${SOURCE}" "$(dirname "${TARGET}")"
done
######### System upgrades #########
# Update all macOS packages.
sudo softwareupdate --install --all
######### Brew install #########
# Check if homebrew is already installed
# This also install xcode command line tools
if test ! "$(command -v brew)"
then
# Install Homebrew without prompting for user confirmation.
# See: https://github.com/Homebrew/install/pull/139
CI=true /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
fi
brew analytics off
# Refresh our local copy of package index.
brew update
# Upgrade Python ourselves instead of relying to the common "brew upgrade"
# below. This way we fix the following issue:
# Error: The `brew link` step did not complete successfully
# The formula built, but is not symlinked into /usr/local
# Could not symlink bin/2to3
# Target /usr/local/bin/2to3 already exists. You may want to remove it:
# rm '/usr/local/bin/2to3'
brew upgrade python
brew link --overwrite python
# Fetch latest packages.
brew upgrade
# Add services.
brew tap homebrew/services
# Load package lists to install.
source ./packages.sh
# Install brew packages.
for PACKAGE (${(f)BREW_PACKAGES}) brew install --formula "$PACKAGE"
# Install cask packages.
for PACKAGE (${(f)CASK_PACKAGES}) brew install --cask "$PACKAGE"
# htop-osx requires root privileges to correctly display all running processes.
sudo chown root:wheel "$(brew --prefix)/bin/htop"
sudo chmod u+s "$(brew --prefix)/bin/htop"
# Activate auto MAC Address spoofing.
sudo brew services start spoof-mac
######### Mac App Store packages #########
# Install Mac App Store CLI and upgrade all apps.
brew install mas
mas upgrade
# Remove Pages and GarageBand.
sudo rm -rf /Applications/GarageBand.app
sudo rm -rf /Applications/Pages.app
# Install Numbers and Keynotes
mas install 409183694
mas install 409203825
# Install 1Password.
mas install 1333542190
open -a "1Password 7"
# Activate Safari extension.
# Source: https://github.com/kdeldycke/kevin-deldycke-blog/blob/main/content/posts/macos-commands.md
pluginkit -e use -i com.agilebits.onepassword7.1PasswordSafariAppExtension
# WiFi Explorer Lite
mas install 1408727408
# Open apps so I'll not forget to login
open -a Dropbox
open -a adguard
# Spark - Email App by Readdle
mas install 1176895641
# Microsoft Remote Desktop
mas install 1295203466
# Install QuickLooks plugins
# Source: https://github.com/sindresorhus/quick-look-plugins
brew install --cask epubquicklook
brew install --cask qlcolorcode
brew install --cask qlimagesize
brew install --cask qlmarkdown
brew install --cask qlstephen
brew install --cask qlvideo
brew install --cask quicklook-json
brew install --cask suspicious-package
# Fix "QL*.qlgenerator cannot be opened because the developer cannot be verified."
xattr -cr ~/Library/QuickLook/QLColorCode.qlgenerator
xattr -cr ~/Library/QuickLook/QLMarkdown.qlgenerator
xattr -cr ~/Library/QuickLook/QLStephen.qlgenerator
# Clear plugin cache
qlmanage -r
qlmanage -r cache
# Install and configure Google Cloud Storage bucket mount point.
brew install gcsfuse
mkdir -p "${HOME}/gcs"
GOOGLE_APPLICATION_CREDENTIALS=~/.google-cloud-auth.json gcsfuse --implicit-dirs backup-imac-restic ./gcs
# Mount doesn't work as macOS doesn't let us register a new filesystem plugin.
# See: https://github.com/GoogleCloudPlatform/gcsfuse/issues/188
# sudo ln -s /usr/local/sbin/mount_gcsfuse /sbin/
# mount -t gcsfuse -o rw,user,keyfile="${HOME}/.google-cloud-auth.json" backup-imac-restic "${HOME}/gcs"
# Configure swiftbar.
defaults write com.ameba.SwiftBar PluginDirectory "~/.swiftbar"
defaults write com.ameba.SwiftBar SUHasLaunchedBefore 1
wget -O "${HOME}/.swiftbar/btc.17m.sh" https://github.com/matryer/bitbar-plugins/raw/master/Cryptocurrency/Bitcoin/bitstamp.net/last.10s.sh
sed -i "s/Bitstamp: /Ƀ/" "${HOME}/.swiftbar/btc.17m.sh"
wget -O "${HOME}/.swiftbar/brew-services.7m.rb" https://github.com/matryer/bitbar-plugins/raw/master/Dev/Homebrew/brew-services.10m.rb
chmod +x ${HOME}/.swiftbar/*.{sh,py,rb}
open -a SwiftBar
# Open Tor Browser at least once in the background to create a default profile.
# Then close it after a while to not block script execution.
open --wait-apps -g -a "Tor Browser" & sleep 20s; killall "firefox"
# Show TorBrowser bookmark toolbar.
TB_CONFIG_DIR=$(command find "${HOME}/Library/Application Support/TorBrowser-Data/Browser" -maxdepth 1 -iname "*.default")
tee -a "$TB_CONFIG_DIR/xulstore.json" <<-EOF
{"chrome://browser/content/browser.xhtml": {
"PersonalToolbar": {"collapsed": "false"}
}}
EOF
# Set TorBrowser bookmarks in toolbar.
# Source: https://yro.slashdot.org/story/16/06/08/151245/kickasstorrents-enters-the-dark-web-adds-official-tor-address
BOOKMARKS="
https://protonirockerxow.onion,ProtonMail,ehmwyurmkort,eqeiuuEyivna
http://piratebayztemzmv.onion,PirateBay,nnypemktnpya,dvzeeooowsgx
"
TB_BOOKMARK_DB="$TB_CONFIG_DIR/places.sqlite"
# Remove all bookmarks from the toolbar.
sqlite3 -echo -header -column "$TB_BOOKMARK_DB" "DELETE FROM moz_bookmarks WHERE parent=(SELECT id FROM moz_bookmarks WHERE guid='toolbar_____'); SELECT * FROM moz_bookmarks;"
# Add bookmarks one by one.
for BM_INFO (${(f)BOOKMARKS})
do
BM_URL=$(echo $BM_INFO | cut -d',' -f1)
BM_TITLE=$(echo $BM_INFO | cut -d',' -f2)
BM_GUID1=$(echo $BM_INFO | cut -d',' -f3)
BM_GUID2=$(echo $BM_INFO | cut -d',' -f4)
sqlite3 -echo -header -column "$TB_BOOKMARK_DB" "INSERT OR REPLACE INTO moz_places(url, hidden, guid, foreign_count) VALUES('$BM_URL', 0, '$BM_GUID1', 1); INSERT OR REPLACE INTO moz_bookmarks(type, fk, parent, title, guid) VALUES(1, (SELECT id FROM moz_places WHERE guid='$BM_GUID1'), (SELECT id FROM moz_bookmarks WHERE guid='toolbar_____'), '$BM_TITLE', '$BM_GUID2');"
done
sqlite3 -echo -header -column "$TB_BOOKMARK_DB" "SELECT * FROM moz_bookmarks; SELECT * FROM moz_places;"
# Force installation of uBlock origin
wget https://addons.mozilla.org/firefox/downloads/latest/ublock-origin/addon-607454-latest.xpi -O "$TB_CONFIG_DIR/extensions/uBlock0@raymondhill.net.xpi"
# Open IINA at least once in the background to let it register its Safari extension.
# Then close it after a while to not block script execution.
# This also pop-up a persistent, but non-blocking dialog:
# "XXX.app is an app downloaded from the Internet. Are you sure you want to open it?"
open --wait-apps -g -a "IINA" & sleep 20s; killall "IINA"
# Clean things up.
brew cleanup
brew services cleanup
# Use latest pip.
python -m pip install --upgrade pip
# Install & upgrade all global python modules
for p (${(f)PYTHON_PACKAGES}) python -m pip install --upgrade "$p"
# Install Visual Studio Code extensions.
for ext (${(f)VSCODE_PLUGINS}) code --install-extension "$ext"
# Generate pip and poetry completion.
python -m pip completion --zsh > ~/.zfunc/_pip
poetry completions zsh > ~/.zfunc/_poetry
_MPM_COMPLETE=source_zsh mpm > ~/.zfunc/_mpm
# Force Neovim plugin upgrades
nvim -c "try | call dein#update() | finally | qall! | endtry"
# Install zinit
sh -c "$(curl -fsSL https://raw.githubusercontent.com/zdharma/zinit/master/doc/install.sh)"
# Fix "zsh compinit: insecure directories" error.
sudo chown -R $(whoami) /usr/local/share/zsh /usr/local/share/zsh/site-functions
chmod u+w /usr/local/share/zsh /usr/local/share/zsh/site-functions
# Force zinit self-upgrade.
zinit self-update
zinit update
# Configure everything.
source ./macos-config.sh
| true
|
2b6b329996c8f0a9c683f7405d81eb43905eaa6e
|
Shell
|
akshayuprabhu/labWork3rdSem
|
/UNIX/Assignments/Assignment_3/sum_of_cube.sh
|
UTF-8
| 413
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
echo "This program displays numbers from 1 to 999 whose sum of cube of digits is equal to the number"
for (( num = 1; num < 1000; num++ )); do
n=$num
sum=0
while (( n > 0 )); do
digit=`expr $n % 10`
temp=`python -c "print $digit**3"`
sum=`expr $sum + $temp`
n=`expr $n / 10`
done
if (( $sum == $num )); then
echo $num
fi
done
echo Done
| true
|
5cb29fe107d61feda3cbeb847485cc8a7757bc72
|
Shell
|
Bjay1435/capstone
|
/rootfs/var/lib/dpkg/info/openssh-server.postrm
|
UTF-8
| 2,011
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
# Automatically added by dh_installdeb
dpkg-maintscript-helper mv_conffile /etc/pam.d/ssh /etc/pam.d/sshd 1:4.7p1-4~ -- "$@"
# End automatically added section
# Automatically added by dh_systemd_enable
if [ "$1" = "remove" ]; then
if [ -x "/usr/bin/deb-systemd-helper" ]; then
deb-systemd-helper mask ssh.socket >/dev/null
fi
fi
if [ "$1" = "purge" ]; then
if [ -x "/usr/bin/deb-systemd-helper" ]; then
export _DEB_SYSTEMD_HELPER_PURGE=1
deb-systemd-helper disable ssh.socket >/dev/null
deb-systemd-helper unmask ssh.socket >/dev/null
fi
fi
# End automatically added section
# Automatically added by dh_systemd_enable
if [ "$1" = "remove" ]; then
if [ -x "/usr/bin/deb-systemd-helper" ]; then
deb-systemd-helper mask ssh.service >/dev/null
fi
fi
if [ "$1" = "purge" ]; then
if [ -x "/usr/bin/deb-systemd-helper" ]; then
export _DEB_SYSTEMD_HELPER_PURGE=1
deb-systemd-helper disable ssh.service >/dev/null
deb-systemd-helper unmask ssh.service >/dev/null
fi
fi
# End automatically added section
# Automatically added by dh_installdebconf
if [ "$1" = purge ] && [ -e /usr/share/debconf/confmodule ]; then
. /usr/share/debconf/confmodule
db_purge
fi
# End automatically added section
case $1 in
purge)
# Remove all non-conffiles that ssh might create, so that we
# can smoothly remove /etc/ssh if and only if the user
# hasn't dropped some other files in there. Conffiles have
# already been removed at this point.
rm -f /etc/ssh/ssh_host_key /etc/ssh/ssh_host_key.pub
rm -f /etc/ssh/ssh_host_rsa_key /etc/ssh/ssh_host_rsa_key.pub
rm -f /etc/ssh/ssh_host_dsa_key /etc/ssh/ssh_host_dsa_key.pub
rm -f /etc/ssh/ssh_host_ecdsa_key /etc/ssh/ssh_host_ecdsa_key.pub
rm -f /etc/ssh/ssh_host_ed25519_key /etc/ssh/ssh_host_ed25519_key.pub
rm -f /etc/ssh/sshd_config
rm -f /etc/ssh/sshd_not_to_be_run
rmdir --ignore-fail-on-non-empty /etc/ssh
if which deluser >/dev/null 2>&1; then
deluser --quiet sshd > /dev/null || true
fi
;;
esac
exit 0
| true
|
4da2c619db633c876767033c0d7fb4a3b4755e06
|
Shell
|
anchitjain1234/irproject2
|
/installation.sh
|
UTF-8
| 884
| 3.015625
| 3
|
[] |
no_license
|
while true; do
read -p "Do you want to install Scrapy?" yn
case $yn in
[Yy]* ) sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 627220E7;echo 'deb http://archive.scrapy.org/ubuntu scrapy main' | sudo tee /etc/apt/sources.list.d/scrapy.list;sudo apt-get update && sudo apt-get install scrapy-0.24 ; break ;;
[Nn]* ) break;;
* ) echo "Please answer yes or no.";;
esac
done
while true; do
read -p "Do you want to install Beautiful Soup?" yn
case $yn in
[Yy]* ) sudo apt-get install python-bs4;break;;
[Nn]* ) break;;
* ) echo "Please answer yes or no.";;
esac
done
while true; do
read -p "Do you want to install Matplotlib?" yn
case $yn in
[Yy]* ) sudo apt-get install python3-matplotlib; exit ;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
| true
|
2e97302579a87b60e4c9a2495c4f62c4c27d4633
|
Shell
|
NathanFaught/datahub
|
/metadata-ingestion/scripts/codegen.sh
|
UTF-8
| 381
| 2.671875
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"MIT"
] |
permissive
|
#!/bin/bash
set -euxo pipefail
OUTDIR=./src/datahub/metadata
# Note: this assumes that datahub has already been built with `./gradlew build`.
DATAHUB_ROOT=..
cp $DATAHUB_ROOT/metadata-events/mxe-schemas/src/renamed/avro/com/linkedin/mxe/MetadataChangeEvent.avsc .
rm -r $OUTDIR || true
python scripts/avro_codegen.py MetadataChangeEvent.avsc $OUTDIR
rm MetadataChangeEvent.avsc
| true
|
99f5298571b2cdfe5a775ad3687ffc5a5482570a
|
Shell
|
vixie/mhdb
|
/mhdb-build.sh
|
UTF-8
| 266
| 3.34375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/sh
PATH=/home/vixie/src/mhdb:$PATH
case "$1" in
+*) folder=$1 ;;
*) echo usage: $0 +folder; exit 1
esac
path=`mhpath $folder`
if [ ! -d $path ]; then
echo $0: not a folder: $folder
exit 1
fi
rm -f $path/mhindex.db
mhpath all $folder | mhdb-add -
exit
| true
|
e1aa6f17cdc422e6f91f7d2ece2959939da4647f
|
Shell
|
piotr-yuxuan/nile
|
/packer/scripts/installer-ssm-agent.sh
|
UTF-8
| 413
| 3.140625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -xe
# check if it runs as root
[ "$(id -u)" != "0" ] && echo "ERROR: The script needs to be executed as root user.." && exit 1
DEB_FILE=/tmp/amazon-ssm-agent.deb
DEPLOYMENT_REGION=eu-west-1
curl -XGET -O amazon-ssm-agent.db "https://s3.${DEPLOYMENT_REGION}.amazonaws.com/amazon-ssm-${DEPLOYMENT_REGION}/latest/debian_amd64/amazon-ssm-agent.deb" -o $DEB_FILE
sudo apt install $DEB_FILE
rm $DEB_FILE
| true
|
f9d14fc2b91f474e360e604bd3fbae32d910b0da
|
Shell
|
ChadwickCSP/AdventureQuest
|
/support/install-nvm.command
|
UTF-8
| 1,820
| 2.515625
| 3
|
[] |
no_license
|
brew update
brew install nvm
mkdir -p ~/.nvm
INSTALL_LOCATION="$(brew --prefix nvm)"
echo '' >> ~/.bash_profile
echo 'export NVM_DIR="$HOME/.nvm"' >> ~/.bash_profile
echo "[ -s \"$INSTALL_LOCATION/nvm.sh\" ] && . \"$INSTALL_LOCATION/nvm.sh\" # This loads nvm" >> ~/.bash_profile
echo "[ -s \"$INSTALL_LOCATION/etc/bash_completion.d/nvm\" ] && . \"$INSTALL_LOCATION/etc/bash_completion.d/nvm\" # This loads nvm bash_completion" >> ~/.bash_profile
echo '' >> ~/.bashrc
echo 'export NVM_DIR="$HOME/.nvm"' >> ~/.bashrc
echo "[ -s \"$INSTALL_LOCATION/nvm.sh\" ] && . \"$INSTALL_LOCATION/nvm.sh\" # This loads nvm" >> ~/.bashrc
echo "[ -s \"$INSTALL_LOCATION/etc/bash_completion.d/nvm\" ] && . \"$INSTALL_LOCATION/etc/bash_completion.d/nvm\" # This loads nvm bash_completion" >> ~/.bashrc
echo '' >> ~/.bash_rc
echo 'export NVM_DIR="$HOME/.nvm"' >> ~/.bash_rc
echo "[ -s \"$INSTALL_LOCATION/nvm.sh\" ] && . \"$INSTALL_LOCATION/nvm.sh\" # This loads nvm" >> ~/.bash_rc
echo "[ -s \"$INSTALL_LOCATION/etc/bash_completion.d/nvm\" ] && . \"$INSTALL_LOCATION/etc/bash_completion.d/nvm\" # This loads nvm bash_completion" >> ~/.bash_rc
echo '' >> ~/.zshrc
echo 'export NVM_DIR="$HOME/.nvm"' >> ~/.zshrc
echo "[ -s \"$INSTALL_LOCATION/nvm.sh\" ] && . \"$INSTALL_LOCATION/nvm.sh\" # This loads nvm" >> ~/.zshrc
echo "[ -s \"$INSTALL_LOCATION/etc/bash_completion.d/nvm\" ] && . \"$INSTALL_LOCATION/etc/bash_completion.d/nvm\" # This loads nvm bash_completion" >> ~/.zshrc
echo '' >> ~/.zprofile
echo 'export NVM_DIR="$HOME/.nvm"' >> ~/.zprofile
echo "[ -s \"$INSTALL_LOCATION/nvm.sh\" ] && . \"$INSTALL_LOCATION/nvm.sh\" # This loads nvm" >> ~/.zprofile
echo "[ -s \"$INSTALL_LOCATION/etc/bash_completion.d/nvm\" ] && . \"$INSTALL_LOCATION/etc/bash_completion.d/nvm\" # This loads nvm bash_completion" >> ~/.zprofile
| true
|
fae58608de7470960fb939d03fbaa557ea6e5128
|
Shell
|
baojiweicn/Misstar-Tools
|
/appstore/R3/ss/script/ss
|
UTF-8
| 12,118
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/sh /etc/rc.common
#----------------------------------------------------------------
# Shell Name:ss
# Description:Plug-in startup script
# Author:Starry
# E-mail: starry@misstar.com
# Time:2016-11-06 02:30 CST
# Copyright © 2016 Misstar Tools. All rights reserved.
#----------------------------------------------------------------*/
START=95
SERVICE_USE_PID=1
SERVICE_WRITE_PID=1
SERVICE_DAEMONIZE=1
. /etc/misstar/scripts/MTbase
EXTRA_COMMANDS=" status version dnsstatus dnsconfig"
EXTRA_HELP=" status Get shadowsocks status
dnsstatus Get dns status
version Get Misstar Tools Version"
wan_mode=`ifconfig | grep pppoe-wan | wc -l`
if [ "$wan_mode" = '1' ];then
wanip=$(ifconfig pppoe-wan | grep "inet addr:" | cut -d: -f2 | awk '{print$1}')
else
wanip=$(ifconfig eth0.2 | grep "inet addr:" | cut -d: -f2 | awk '{print$1}')
fi
#lanip=$(ifconfig br-lan | grep "inet addr:" | cut -d: -f2 | awk '{print$1}')
lanip=$(uci get network.lan.ipaddr)
redip=$lanip
CONFIG=/etc/misstar/applications/ss/config/shadowsocks.json
DNSCONF=/etc/misstar/applications/ss/config/dns2socks.conf
chnroute=/etc/misstar/applications/ss/config/chnroute.conf
chnroute_user=/etc/misstar/applications/ss/config/chnroute_customize.conf
iplist=/etc/misstar/applications/ss/config/iplist
APPPATH=/etc/misstar/applications/ss/bin/ss-redir
LOCALPATH=/etc/misstar/applications/ss/bin/ss-local
DNSPATH=/etc/misstar/applications/ss/bin/dns2socks
appname=misstar
ss_getconfig() {
local_ip=0.0.0.0
id=$(uci get $appname.ss.id)
ss_server_ip=$(uci get $appname.$id.ss_server)
#ss_server_ip=`nslookup $ss_server_ip | grep 'Address 1' | grep -v '127.0.0.1' | awk '{print $3}'`
ss_server_port=$(uci get $appname.$id.ss_server_port)
ss_server_password=$(uci get $appname.$id.ss_password)
ss_server_method=$(uci get $appname.$id.ss_method)
ssr_enable=$(uci get $appname.$id.ssr_enable)
ssr_protocol=$(uci get $appname.$id.ssr_protocol)
ssr_obfs=$(uci get $appname.$id.ssr_obfs)
rm -rf $CONFIG
if [ "$ssr_enable" = '0' ];then
echo -e '{\n "server":"'$ss_server_ip'",\n "server_port":'$ss_server_port',\n "local_port":'1081',\n "local_address":"'$local_ip'",\n "password":"'$ss_server_password'",\n "timeout":600,\n "method":"'$ss_server_method'"\n}' > $CONFIG
echo -e '{\n "server":"'$ss_server_ip'",\n "server_port":'$ss_server_port',\n "local_port":'1082',\n "local_address":"'$local_ip'",\n "password":"'$ss_server_password'",\n "timeout":600,\n "method":"'$ss_server_method'"\n}' > $DNSCONF
fi
if [ "$ssr_enable" = '1' ];then
APPPATH=/etc/misstar/applications/ss/bin/ssr-redir
LOCALPATH=/etc/misstar/applications/ss/bin/ssr-local
echo -e '{\n "server":"'$ss_server_ip'",\n "server_port":'$ss_server_port',\n "local_port":'1081',\n "local_address":"'$local_ip'",\n "password":"'$ss_server_password'",\n "timeout":600,\n "method":"'$ss_server_method'",\n "protocol":"'$ssr_protocol'",\n "obfs":"'$ssr_obfs'"\n}' > $CONFIG
echo -e '{\n "server":"'$ss_server_ip'",\n "server_port":'$ss_server_port',\n "local_port":'1082',\n "local_address":"'$local_ip'",\n "password":"'$ss_server_password'",\n "timeout":600,\n "method":"'$ss_server_method'",\n "protocol":"'$ssr_protocol'",\n "obfs":"'$ssr_obfs'"\n}' > $DNSCONF
fi
}
dnsconfig(){
killall $DNSPATH
killall pdnsd
iptables -t nat -D PREROUTING -s $lanip/24 -p udp --dport 53 -j DNAT --to $redip -m comment --comment "misstar-dnsred" &> /dev/null
MTlog 1 "Start DNS Process..."
dns_mode=$(uci get misstar.ss.dns_mode)
if [ "$dns_mode" = 'pdnsd' ];then
/etc/misstar/applications/ss/script/pdnsd start
if [ $? -eq 0 ];then
MTlog 1 "Done! DNS started with "$dns_mode" Mode."
else
MTlog 3 "DNS Process start failed,Exiting..."
exit
fi
elif [ "$dns_mode" = 'dns2socks' ];then
DNS_SERVER=$(uci get $appname.ss.dns_server)
DNS_SERVER_PORT=$(uci get $appname.ss.dns_port)
service_start $DNSPATH 127.0.0.1:1082 $DNS_SERVER:$DNS_SERVER_PORT 127.0.0.1:15353
if [ $? -eq 0 ];then
MTlog 1 "Done! DNS started with "$dns_mode" Mode."
else
MTlog 3 "DNS Process start failed,Exiting..."
exit
fi
else
MTlog 3 "Get DNS mode Error,Exiting..."
exit
fi
Dnsred=$(uci get $appname.ss.dns_red_enable)
if [ "$Dnsred" == '1' ];then
Dnsredid=$(uci get $appname.ss.dns_red_ip)
if [ "$Dnsredid" != 'lanip' ];then
redip=$Dnsredid
fi
iptables -t nat -I PREROUTING -s $lanip/24 -p udp --dport 53 -j DNAT --to $redip -m comment --comment "misstar-dnsred" &> /dev/null
fi
}
get_jump_mode(){
case "$1" in
0)
echo "-j"
;;
*)
echo "-g"
;;
esac
}
get_action_chain() {
case "$1" in
0)
echo "RETURN"
;;
1)
echo "SHADOWSOCK"
;;
esac
}
start()
{
vsftpd_enable=$(uci get misstar.ss.enable)
if [ "$vsftpd_enable" = '0' ];then
echo "service ss is disabeld!"
exit
fi
AreadyRunning=$(ps | grep ss-redir | grep -v grep | wc -l)
if [ "$AreadyRunning" != '0' ];then
echo "SS is aleady running,Exit..."
exit
fi
ss_getconfig
insmod ipt_REDIRECT 2>/dev/null
chmod +x /etc/misstar/applications/ss/bin/ss-redir
chmod +x /etc/misstar/applications/ss/bin/ss-local
service_start $LOCALPATH -c $DNSCONF
dnsconfig
#创建CHAIN
MTlog 1 "Add iptables rules... "
iptables -t nat -N SHADOWSOCKS
iptables -t nat -A SHADOWSOCKS -d 0.0.0.0/8 -j RETURN
iptables -t nat -A SHADOWSOCKS -d $lanip/24 -j RETURN
iptables -t nat -A SHADOWSOCKS -d $wanip/16 -j RETURN
iptables -t nat -A SHADOWSOCKS -d $ss_server_ip -j RETURN
iptables -t nat -N SHADOWSOCK
# lan access control
cat /etc/misstar/applications/ss/config/LanCon.conf | awk -F ',' '{print $1}' | while read line
do
mac=$line
proxy_mode=$(cat /etc/misstar/applications/ss/config/LanCon.conf | grep $line | awk -F ',' '{print $4}')
iptables -t nat -A SHADOWSOCKS -m mac --mac-source $mac $(get_jump_mode $proxy_mode) $(get_action_chain $proxy_mode)
done
# default acl mode
ss_acl_default_mode=$(uci get misstar.ss.ss_acl_default_mode)
[ -z "$ss_acl_default_mode" ] && ( ss_acl_default_mode=1;uci set misstar.ss.ss_acl_default_mode=1;uci commit misstar)
iptables -t nat -A SHADOWSOCKS -p tcp -j $(get_action_chain $ss_acl_default_mode)
id=$(uci get $appname.ss.id)
ss_mode=$(uci get $appname.$id.ss_mode)
case $ss_mode in
"gfwlist")
service_start $APPPATH -b 0.0.0.0 -c $CONFIG
if [ $? -eq 0 ];then
MTlog 1 "Start Shadowsocks as GFWlist Mode. Done"
else
MTlog 3 "Shadowsocks Process start failed,Exiting..."
exit
fi
start_ss_rules_gfwlist
;;
"whitelist")
service_start $APPPATH -b 0.0.0.0 -c $CONFIG
if [ $? -eq 0 ];then
MTlog 1 "Start Shadowsocks as whitelist Mode. Done"
else
MTlog 3 "Shadowsocks Process start failed,Exiting..."
exit
fi
start_ss_rules_whitelist
;;
"gamemode")
service_start $APPPATH -b 0.0.0.0 -u -c $CONFIG
if [ $? -eq 0 ];then
MTlog 1 "Start Shadowsocks as Game Mode. Done"
else
MTlog 3 "Shadowsocks Process start failed,Exiting..."
exit
fi
start_ss_rules_whitelist
start_ss_udp
;;
"wholemode")
service_start $APPPATH -b 0.0.0.0 -c $CONFIG
if [ $? -eq 0 ];then
MTlog 1 "Start Shadowsocks as Whole Mode. Done"
else
MTlog 3 "Shadowsocks Process start failed,Exiting..."
exit
fi
start_ss_rules
;;
esac
#apply iptables
#全局模式
ss_mode=$(uci get $appname.$id.ss_mode)
iptablenu=$(iptables -t nat -L PREROUTING | awk '/KOOLPROXY/{print NR}')
if [ '$iptablenu' != '' ];then
iptablenu=`expr $iptablenu - 2`
else
iptablenu=2
fi
[ "$ss_mode" == "wholemode" ] ||[ "$ss_mode" == "whitelist" ] || [ "$ss_mode" == "gamemode" ] && iptables -t nat -I PREROUTING $iptablenu -p tcp -j SHADOWSOCKS
# ipset 黑名单模式
[ "$ss_mode" == "gfwlist" ] && iptables -t nat -I PREROUTING 2 -p tcp -m set --match-set gfwlist dst -j SHADOWSOCKS
#ln -s /etc/misstar/applications/ss/config/pac.conf /tmp/etc/dnsmasq.d/
#ipset
cat /etc/misstar/applications/ss/config/pac_customize.conf /etc/misstar/applications/ss/config/pac.conf | while read line
do
echo "server=/.$line/127.0.0.1#15353" >> /tmp/etc/dnsmasq.d/pac_customize.conf
echo "ipset=/.$line/gfwlist" >> /tmp/etc/dnsmasq.d/pac_customize.conf
done
/etc/init.d/dnsmasq restart
}
start_ss_rules_whitelist()
{
sed -e "s/^/-A nogfwnet &/g" -e "1 i\-N nogfwnet hash:net" $chnroute | ipset -R -!
sed -e "s/^/-A nogfwnet &/g" -e "1 i\-N nogfwnet hash:net" $chnroute_user | ipset -R -!
iptables -t nat -A SHADOWSOCK -p tcp -m set ! --match-set nogfwnet dst -j REDIRECT --to-ports 1081
#iptables -t nat -A PREROUTING -s $lanip/24 -p udp --dport 53 -j DNAT --to $lanip
MTlog 1 "Done!"
}
start_ss_rules()
{
iptables -t nat -A SHADOWSOCK -p tcp -j REDIRECT --to-ports 1081
MTlog 1 "Done!"
}
start_ss_rules_gfwlist()
{
MTlog 1 "Add iptables rules... "
ipset -N gfwlist iphash -!
iptables -t nat -A SHADOWSOCK -p tcp -m set --match-set gfwlist dst -j REDIRECT --to-port 1081
MTlog 1 "Done!"
}
start_ss_udp()
{
MTlog 1 "Add iptables UDP rules... "
ip rule add fwmark 0x01/0x01 table 300
ip route add local 0.0.0.0/0 dev lo table 300
iptables -t mangle -N SHADOWSOCKS
iptables -t mangle -A SHADOWSOCKS -d 0.0.0.0/8 -j RETURN
iptables -t mangle -A SHADOWSOCKS -d 127.0.0.1/16 -j RETURN
iptables -t mangle -A SHADOWSOCKS -d $lanip/16 -j RETURN
iptables -t mangle -A SHADOWSOCKS -d $wanip/16 -j RETURN
iptables -t mangle -A SHADOWSOCKS -d $ss_server_ip -j RETURN
iptables -t mangle -A PREROUTING -p udp -j SHADOWSOCKS
iptables -t mangle -A SHADOWSOCKS -p udp -m set ! --match-set nogfwnet dst -j TPROXY --on-port 1081 --tproxy-mark 0x01/0x01
MTlog 1 "Done!"
chmod -x /opt/filetunnel/stunserver
killall -9 stunserver
}
stop()
{
MTlog 1 "Stopping ss service..."
# Client Mode
#service_stop /usr/bin/ss-local
# Proxy Mode
killall ss-redir
killall ss-local
killall ssr-redir
killall ssr-local
killall $DNSPATH
killall pdnsd
# Tunnel
#service_stop /usr/bin/ss-tunnel
stop_ss_rules
MTlog 1 "Done!"
}
stop_ss_rules()
{
MTlog 1 "Delete iptables rules... "
iptables -t nat -S | grep -E 'SHADOWSOCK|SHADOWSOCKS'| sed 's/-A/iptables -t nat -D/g'|sed 1,2d > clean.sh && chmod 777 clean.sh && ./clean.sh && rm clean.sh
ip rule del fwmark 0x01/0x01 table 300 &> /dev/null
ip route del local 0.0.0.0/0 dev lo table 300 &> /dev/null
iptables -t mangle -D PREROUTING -p udp -j SHADOWSOCKS &> /dev/null
iptables -t nat -D PREROUTING -p tcp -j SHADOWSOCKS &> /dev/null
iptables -t mangle -F SHADOWSOCKS &> /dev/null
iptables -t mangle -X SHADOWSOCKS &> /dev/null
iptables -t nat -F SHADOWSOCK &> /dev/null
iptables -t nat -X SHADOWSOCK &> /dev/null
iptables -t nat -F SHADOWSOCKS &> /dev/null
iptables -t nat -X SHADOWSOCKS &> /dev/null
ipset destroy nogfwnet &> /dev/null
ipset destroy gfwlist &> /dev/null
iptables -t nat -D PREROUTING -s $lanip/24 -p udp --dport 53 -j DNAT --to $redip -m comment --comment "misstar-dnsred" &> /dev/null
MTlog 1 "Done!"
MTlog 1 "Remove Cache files..."
rm -rf /tmp/etc/dnsmasq.d/pac_customize.conf
/etc/init.d/dnsmasq restart
MTlog 1 "Done!"
chmod +x /opt/filetunnel/stunserver
rm -rf $CONFIG
rm -rf $DNSCONF
}
status()
{
status=`ps | grep -E "ss-redir|ssr-redir" | grep -v 'grep' | grep -v script | grep -v '{' | wc -l`
if [ "$status" == "1" ];then #进程存在,已运行
id=$(uci get misstar.ss.id)
DNS_PORT=1082
http_status=`curl -s -w %{http_code} https://www.google.com.hk/images/branding/googlelogo/1x/googlelogo_color_116x41dp.png -k -o /dev/null --socks5 127.0.0.1:1082`
if [ "$http_status" == "200" ];then
echo -e "2\c" #翻墙正常
else
echo -e "3\c"
fi
else
echo -e "1\c"
fi
}
dnsstatus()
{
status=`resolveip www.youtube.com | wc -l`
if [ "$status" == "0" ]; then
echo -e "0\c"
elif [ "$status" == "1" ]; then
ip=`resolveip www.youtube.com`
result=`cat $iplist | grep $ip`
if [ "$result" == "1" ];then
echo -e "0\c"
else
echo -e "1\c"
fi
else
echo -e "1\c"
fi
}
restart()
{
MTlog 1 "Restarting ss service..."
stop
sleep 3
start
}
| true
|
ee8472ab4a9723fabcda7e53909d3e809fc2599f
|
Shell
|
VinceBLOT/SyliusDockerDeployer
|
/deploy/scripts/update-remote-env.sh
|
UTF-8
| 171
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
DIR="$( cd "$( dirname "${BASH_SOURCE%/*}" )" >/dev/null 2>&1 && pwd )/deploy/scripts"
. "$DIR/functions.sh"
echo "Update remote .env files"
rscp .env
exit 0
| true
|
be969e5e2ec3feebdcc654e5efa13c94b3192a89
|
Shell
|
bpan-org/bpan
|
/lib/cmd/config.bash
|
UTF-8
| 851
| 3.890625
| 4
|
[
"MIT"
] |
permissive
|
config:default() (
echo --help
)
config:usage() (
echo "$app [<$app-opts>] $cmd <key> [<value>]"
)
config:options() (
echo "f,file= Config file to use"
echo "l,local Use './.bpan/config'"
echo "g,global Use '\$BPAN_ROOT/config'"
echo "all Get all values for a key"
echo "list List all keys and values"
)
config:main() (
opts=()
if [[ ${option_file-} ]]; then
opts+=(--file="$option_file")
elif $option_local; then
+git:in-repo || error \
"'bpan config --local' can only be used inside a BPAN package repo"
opts+=(--file="$(+git:top-dir)"/.bpan/config)
elif $option_global; then
opts+=(--file="$root"/config)
fi
if $option_list; then
ini:list "${opts[@]}"
elif [[ $# -eq 1 ]]; then
ini:get "${opts[@]}" "$@"
elif [[ $# -eq 2 ]]; then
ini:set "${opts[@]}" "$@"
fi
)
| true
|
74b61a6e658c17aadd71d2cced56345291511364
|
Shell
|
maniaxcz/Shell-Scripting
|
/until.sh
|
UTF-8
| 73
| 2.875
| 3
|
[] |
no_license
|
count=1
for $count in
do
echo $count
count=`expr $count + 1`
done
| true
|
ba068d144f1ab94abebd0bc99b94c1815e14d731
|
Shell
|
mnabila/dotfiles
|
/scripts/dmenu_ffmpeg
|
UTF-8
| 5,509
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# dmenu_ffmpeg
# Copyright (c) 2021 M. Nabil Adani <nblid48[at]gmail[dot]com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# required
# - ffmpeg
# - rofi
# - libpulse
# - xorg-xdpyinfo
# - jq
# - pulseaudo/pipewire-pulse
DMENU="rofi -dmenu -i"
VIDEO="$HOME/Videos/record"
AUDIO="$HOME/Music/record"
recordid="/tmp/recordid"
function getInputAudio() {
pactl list | grep "Name" | grep "alsa" | awk '{print $2}' | $DMENU -p "Input Audio " -theme-str 'window {width: 30%;} listview {lines: 5;}'
}
function audioVideo() {
filename="$VIDEO/video-$(date '+%y%m%d-%H%M-%S').mp4"
dimensions=$(xdpyinfo | grep dimensions | awk '{print $2;}')
audio=$(getInputAudio)
if [ -n "$audio" ]; then
notify-send "Start Recording" "With:\nVideo On\nAudio On"
ffmpeg -y -f x11grab -framerate 30 -s $dimensions \
-i :0.0 -f pulse -i $audio -ac 1 \
-c:v libx264 -pix_fmt yuv420p -preset veryfast -q:v 1 \
-c:a aac $filename &
echo $! >$recordid
fi
}
function video() {
filename="$VIDEO/video-$(date '+%y%m%d-%H%M-%S').mp4"
dimensions=$(xdpyinfo | grep dimensions | awk '{print $2;}')
notify-send "Start Recording" "With:\nVideo On\nAudio Off"
ffmpeg -y -f x11grab -framerate 30 -s $dimensions \
-i :0.0 -f lavfi -i anullsrc=channel_layout=stereo:sample_rate=44100 \
-c:v libx264 -pix_fmt yuv420p -preset veryfast -q:v 1 $filename &
echo $! >$recordid
}
function audio() {
filename="$AUDIO/audio-$(date '+%y%m%d-%H%M-%S').mp3"
audio=$(getInputAudio)
if [ -n "$audio" ]; then
notify-send "Start Recording" "With:\nVideo Off\nAudio On"
ffmpeg -f pulse -i $audio -ac 1 -acodec libmp3lame -ab 128k $filename &
echo $! >$recordid
fi
}
function stream() {
output=$2
platform=$1
dimensions=$(xdpyinfo | grep dimensions | awk '{print $2;}')
audio=$(getInputAudio)
if [ -n "$audio" ]; then
notify-send "Start Streaming On $platform" "With:\nVideo On\nAudio On"
ffmpeg -y -f x11grab -framerate 23 -s $dimensions \
-i :0.0 -f pulse -i $audio -ac 1 \
-c:v libx264 -pix_fmt yuv420p -preset veryfast -q:v 1 \
-b:v 500k -b:a 128k \
-vf scale=854x480 \
-f flv $output &
echo $1 >$recordid
fi
}
function getStreamToken() {
$DMENU -p "Stream" -mesg "Insert $1 Token" -lines 0
}
function startStreaming() {
platform="$1"
streamurl="$2"
token=$(getStreamToken "$platform")
if [ -z "$token" ]; then
exit
else
stream "$platform" "$streamurl$token"
fi
}
function streamOnFacebook() {
startStreaming "Facebook" "rtmps://live-api-s.facebook.com:443/rtmp/"
}
function streamOnNimoTv() {
startStreaming "Nimo TV" "rtmp://txpush.rtmp.nimo.tv/live/"
}
function streamOnTwitch() {
startStreaming "Twitch" "rtmp://sin.contribute.live-video.net/app/"
}
function streamOnYoutube() {
startStreaming "Youtube" "rtmp://a.rtmp.youtube.com/live2/"
}
function streamOnVimeo() {
startStreaming "Vimeo" "rtmps://rtmp-global.cloud.vimeo.com:443/live/"
}
function stoprecord() {
if [ -f $recordid ]; then
kill -15 $(cat $recordid)
rm $recordid
fi
sleep 5
if [ "$(pidof ffmpeg)" != "" ]; then
pkill ffmpeg
fi
}
function endrecord() {
OPTIONS='["Yes", "No"]'
select=$(echo $OPTIONS | jq -r ".[]" | $DMENU -p "Record" -mesg "Stop Recording" -theme-str 'window {width: 30%;} listview {lines: 2;}')
[ "$select" == "Yes" ] && stoprecord
}
function startrecord() {
OPTIONS='''
[
["難 Audio Video", "audioVideo"],
[" Video Only", "video"],
[" Audio Only", "audio"],
[" Stream On Facebook", "streamOnFacebook"],
["壘 Stream On Nimo TV", "streamOnNimoTv"],
["既 Stream On Twitch", "streamOnTwitch"],
[" Stream On Youtube", "streamOnYoutube"],
[" Stream On Vimeo", "streamOnVimeo"]
]
'''
select=$(echo $OPTIONS | jq -r ".[][0]" | $DMENU -p "Record" -theme-str 'window {width: 30%;} listview {lines: 5;}')
eval $(echo $OPTIONS | jq -r ".[] | select(.[0] == \"$select\") | .[1]")
}
function createSaveFolder() {
if [ ! -d $VIDEO ]; then
mkdir -p $VIDEO
fi
if [ ! -d $AUDIO ]; then
mkdir -p $AUDIO
fi
}
createSaveFolder
if [ -f $recordid ]; then
endrecord
else
startrecord
fi
| true
|
ffd49e0998274970a74e99906ce2dd3b3c62f77f
|
Shell
|
LyndonXu/Project
|
/user_sdk/common/rmoldbackup.sh
|
UTF-8
| 161
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/sh
filelist=`ls ${1}* | sort`
cnt=1;
for filetmp in ${filelist};
do
((cnt++))
if [ "${cnt}" -gt 10 ];then
echo ${filetmp}
rm ${filetmp}
fi
done
| true
|
e8e4e05333444d00c86380964b2933cca67ae72a
|
Shell
|
k-husmann/tools
|
/shell/create_git_repo.sh
|
UTF-8
| 388
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
# Initialize a git repository in the temp directory and push it to my own
# server. I should have created a repository there on the server already with
# ``git init ~/repos/the_project_name --bare``.
cd /tmp
git init $1
cd $1
echo "hurray" > README.rst
git add README.rst
git commit -m "Added readme"
git remote add origin ssh://vanrees.org/~/repos/$1
git push origin master
| true
|
0b9785dd1b3a04939b28adf962bef9f4be5dc606
|
Shell
|
evernym/indy-sdk
|
/vcx/libvcx/build_scripts/android/libsodium/build.sh
|
UTF-8
| 1,577
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
TARGET_ARCH=$1
TARGET_API=$2
CROSS_COMPILE=$3
if [ -z "${TARGET_ARCH}" ]; then
echo STDERR "Missing TARGET_ARCH argument"
echo STDERR "e.g. x86 or arm"
exit 1
fi
if [ -z "${TARGET_API}" ]; then
echo STDERR "Missing TARGET_API argument"
echo STDERR "e.g. 21"
exit 1
fi
if [ -z "${CROSS_COMPILE}" ]; then
echo STDERR "Missing CROSS_COMPILE argument"
echo STDERR "e.g. i686-linux-android"
exit 1
fi
if [ ! -f "android-ndk-r16b-linux-x86_64.zip" ] ; then
echo "Downloading android-ndk-r16b-linux-x86_64.zip"
wget -q https://dl.google.com/android/repository/android-ndk-r16b-linux-x86_64.zip
else
echo "Skipping download android-ndk-r16b-linux-x86_64.zip"
fi
if [ ! -f "libsodium-1.0.12.tar.gz" ] ; then
echo "Downloading libsodium-1.0.12.tar.gz"
wget -q wget https://github.com/jedisct1/libsodium/releases/download/1.0.12/libsodium-1.0.12.tar.gz
else
echo "Skipping download libsodium-1.0.12.tar.gz"
fi
sudo docker build -t sodium-android:latest . --build-arg target_arch=${TARGET_ARCH} --build-arg target_api=${TARGET_API} --build-arg cross_compile=${CROSS_COMPILE}
sudo docker run sodium-android:latest && \
docker_id=$(sudo docker ps -a | grep sodium-android:latest | grep Exited | tail -n 1 | cut -d ' ' -f 1) && \
docker_image_id=$(sudo docker image ls | grep sodium-android | perl -pe 's/\s+/ /g' | cut -d ' ' -f 3) && \
sudo docker cp ${docker_id}:/home/sodium_user/libsodium_${TARGET_ARCH}.zip . && \
sudo docker rm ${docker_id} > /dev/null
#sudo docker rmi ${docker_image_id} > /dev/null
| true
|
b9b42fb6ff2dee20cbd7ff028269237c44de174d
|
Shell
|
dankamongmen/snare
|
/sbin/snare-logdump
|
UTF-8
| 317
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/sh
# Sends a no-op message to snare, ensuring that it's alive and addressable
# via the ctlserver at $SNARECTL.
set | grep ^CONF= > /dev/null || CONF=/usr/local/etc/crosier/crosier.conf
. $CONF || exit 1
echo "Running $CROSIER $SNARECTL log_dump < /dev/null..."
exec $CROSIER $SNARECTL log_dump < /dev/null
| true
|
ae912c854003ad07b9f00222c514a2eb6142770b
|
Shell
|
juhanikataja/SAPPORO-service
|
/src/run_workflow.sh
|
UTF-8
| 1,520
| 3.1875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
function run_wf() {
if [[ ${execution_engine} == "cwltool" ]]; then
run_cwltool
elif [[ ${execution_engine} == "nextflow" ]]; then
run_nextflow
elif [[ ${execution_engine} == "toil" ]]; then
run_toil
fi
}
function run_cwltool() {
echo "RUNNING" >$status
cwltool --custom-net=sapporo-network --outdir $run_dir $workflow $workflow_parameters 1>$stdout 2>$stderr || echo "EXECUTOR_ERROR" >$status
echo "COMPLETE" >$status
exit 0
}
function run_nextflow() {
:
}
function run_toil() {
:
}
function cancel() {
if [[ ${execution_engine} == "cwltool" ]]; then
cancel_cwltool
elif [[ ${execution_engine} == "nextflow" ]]; then
cancel_nextflow
elif [[ ${execution_engine} == "toil" ]]; then
cancel_toil
fi
}
function cancel_cwltool() {
exit 0
}
function cancel_nextflow() {
:
}
function cancel_toil() {
:
}
# =============
SCRIPT_DIR=$(cd $(dirname $0) && pwd)
RUN_BASE_DIR=$(cd ${SCRIPT_DIR}/.. && pwd)
uuid=$1
run_dir=$RUN_BASE_DIR/run/$(echo ${uuid} | cut -c 1-2)/${uuid}
cd $run_dir
output_dir="${run_dir}/output"
run_order="${run_dir}/run_order.yml"
workflow="${run_dir}/workflow"
workflow_parameters="${run_dir}/workflow_parameters"
status="${run_dir}/status.txt"
pid_info="${run_dir}/run.pid"
upload_url="${run_dir}/upload_url.txt"
stdout="${run_dir}/stdout.log"
stderr="${run_dir}/stderr.log"
execution_engine=$(cat ${run_order} | yq -r '.execution_engine_name')
trap 'echo "SYSTEM_ERROR" > ${status_file}' 1 2 3 15
trap 'cancel' 10
run_wf
| true
|
d0a05c885b8735d9f1453806cbacb2cd5e965705
|
Shell
|
cyber-dojo-retired/porter
|
/sh/scratch.sh
|
UTF-8
| 762
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
curl -O https://raw.githubusercontent.com/cyber-dojo/porter/master/port_cyber_dojo_storer_to_saver.sh
chmod 700 port_cyber_dojo_storer_to_saver.sh
docker pull cyberdojo/storer
docker pull cyberdojo/saver
docker pull cyberdojo/porter
sudo mkdir /cyber-dojo
sudo chown 19663:65533 /cyber-dojo
sudo mkdir /porter
sudo chown 19664:65533 /porter
# To extract an id2 subset...eg 02
docker run --detach -it --name temp --volumes-from cyber-dojo-katas-DATA-CONTAINER alpine sh
docker exec temp tar -c -f - -C /usr/src/cyber-dojo/katas 02 | tar -x -f - -C .
tar -zcf 02.tgz 02
docker rm -f temp
# to shell into a container that can see the data-container
docker run --rm -it --user storer --volumes-from cyber-dojo-katas-DATA-CONTAINER cyberdojo/storer sh
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.