blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
47760fceba5de1efb89b71696581aa4681513720
|
Shell
|
veegit/dotfiles
|
/scripts/git-push
|
UTF-8
| 174
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/sh
branch=`git rev-parse --abbrev-ref HEAD`
if [ "$branch" = "master" ]
then
echo "You are in master-- can't push"
exit
fi
echo $branch
git push origin $branch -f
| true
|
ff637ad6e2e05baa21ad19b7875d83e192d2d909
|
Shell
|
RunzeWang797/ACIT-4640
|
/midterm/section3/files/setup/app_setup.sh
|
UTF-8
| 2,120
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
USER_NAME="hichat"
#use to install the service that we neeed
install_services(){
ssh A01023366 'sudo yum upgrade -y'
ssh A01023366 'sudo yum install git -y'
ssh A01023366 'sudo yum install nodejs -y'
ssh A01023366 'sudo yum install mongodb-server -y'
ssh A01023366 'sudo systemctl enable mongod && sudo systemctl start mongod'
ssh A01023366 'sudo yum install nginx -y'
echo "############################################ finished install############################################ "
}
#use to create the todo user and set the password and change the permission
create_user(){
ssh A01023366 "sudo useradd "$USER_NAME""
ssh A01023366 'echo $USER_NAME:disabled | sudo chpasswd'
echo "############################################ User created ############################################ "
}
#use to download from Tim's github
get_app(){
ssh A01023366 'mkdir /app'
ssh A01023366 'sudo git clone https://github.com/wayou/Hichat.git /app'
echo "############################################ APP Cloned############################################ "
}
#use to move the files to correct places
move_configs(){
scp -r ./database.js $USER_NAME:~
scp -r ./A01023366.service $USER_NAME:~
scp -r ./nginx.conf $USER_NAME:~
ssh A01023366 "sudo rm /etc/nginx/nginx.conf"
ssh A01023366 "sudo cp ~/nginx.conf /etc/nginx/"
echo "############################################ move configs done############################################ "
}
#use to restart service
service_start(){
ssh A01023366 'sudo systemctl daemon-reload'
ssh A01023366 'sudo systemctl enable nginx'
ssh A01023366 'sudo systemctl start nginx'
echo "############################################ service started############################################ "
}
#npm install
connect(){
ssh A01023366 "cd /app && sudo npm install"
ssh A01023366 "cd /app && sudo iojs server"
echo "############################################finished############################################"
}
install_services
create_user
get_app
move_configs
connect
service_start
| true
|
f260eb2db2d9e968fa0d6998a1d0cdf929dc6448
|
Shell
|
kek91/scripts
|
/shell/sysinfo/sysinfo
|
UTF-8
| 2,136
| 4.09375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Inspired by http://linuxcommand.org/
TITLE="System information for $HOSTNAME"
RIGHT_NOW=$(date +"%x %r %Z")
TIME_STAMP="Updated on $RIGHT_NOW"
system_info() {
echo "<h2>OS</h2>"
echo "<pre>"
lsb_release -a
echo "</pre>"
}
show_uptime() {
echo "<h2>Uptime</h2>"
echo "<pre>"
uptime
echo "</pre>"
}
drive_space() {
echo "<h2>Disk usage</h2>"
echo "<pre>"
df -h
echo "</pre>"
}
home_space() {
echo "<h2>Usage by vhosts</h2>"
echo "<pre>"
format="%8s%10s%10s %-s\n"
printf "$format" "Dirs" "Files" "Blocks" "Directory"
printf "$format" "----" "-----" "------" "---------"
dir_list="/var/www/*"
for home_dir in $dir_list; do
total_dirs=$(find $home_dir -type d | wc -l)
total_files=$(find $home_dir -type f | wc -l)
total_blocks=$(du -s $home_dir)
printf "$format" $total_dirs $total_files $total_blocks
done
echo "</pre>"
} # end of home_space
render_htop() {
echo "<h2>htop</h2>"
echo q | htop | aha --line-fix
}
write_page() {
cat <<- _EOF_
<html>
<head>
<title>$TITLE</title>
<style>
body,html{font-family:verdana,sans-serif;}
h1{font-family:verdana,sans-serif;font-size:30px;}
h2{font-family:verdana,sans-serif;font-size:20px;}
</style>
</head>
<body>
<h1>$TITLE</h1>
<p>$TIME_STAMP</p>
$(system_info)
$(show_uptime)
$(drive_space)
$(home_space)
$(render_htop)
</body>
</html>
_EOF_
}
usage() {
echo "usage: sysinfo [[[-f file ] [-i]] | [-h]]"
}
interactive=
filename=~/sysinfo.html
while [ "$1" != "" ]; do
case $1 in
-f | --file ) shift
filename=$1
;;
-i | --interactive ) interactive=1
;;
-h | --help ) usage
exit
;;
* ) usage
exit 1
esac
shift
done
write_page > $filename
| true
|
41f622bc91b83f605f931fa5836a74171c661ce5
|
Shell
|
nsidc/cumulus-dashboard
|
/tunnel.sh
|
UTF-8
| 2,532
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
AWS_PROFILE=sandbox
LOCAL_PORT=6789
EC2_KEY=~/.ssh/nsidc-sb.pem
PREFIX=nsidc-cumulus-sbx
API_SUBDOMAIN=$(aws --profile=${AWS_PROFILE} apigateway get-rest-apis | jq -r ".[] | map(select(.name == \"${PREFIX}-archive\")) | .[].id")
HOST=${API_SUBDOMAIN}.execute-api.us-west-2.amazonaws.com
if ! grep -E "^127.0.0.1\s*${HOST}$" /etc/hosts > /dev/null
then
echo -e "Add the following line to /etc/hosts and try again:\n"
echo -e "127.0.0.1\t${HOST}"
exit 1
fi
INSTANCE_ID=$(aws --profile=${AWS_PROFILE} ec2 describe-instances --max-items 1 --filters "Name=tag-value,Values=${PREFIX}-CumulusECSCluster" "Name=instance-state-name,Values=running" | jq -r '.Reservations[0].Instances[0].InstanceId')
echo "Opening tunnel with values:"
echo "AWS_PROFILE=${AWS_PROFILE}"
echo "LOCAL_PORT=${LOCAL_PORT}"
echo "EC2_KEY=${EC2_KEY}"
echo "HOST=${HOST}"
echo "PREFIX=${PREFIX}"
PRIOR_SESSION_IDS=$(aws --profile=${AWS_PROFILE} ssm describe-sessions \
--state Active \
--filters key=Target,value=${INSTANCE_ID} \
--query 'Sessions[*].SessionId' \
--output text)
aws --profile=${AWS_PROFILE} ssm start-session \
--target ${INSTANCE_ID} \
--document-name AWS-StartPortForwardingSession \
--parameters portNumber=22,localPortNumber=${LOCAL_PORT} &
sleep 5
SESSION_IDS=$(aws --profile=${AWS_PROFILE} ssm describe-sessions \
--state Active \
--filters key=Target,value=${INSTANCE_ID} \
--query 'Sessions[*].SessionId' \
--output text)
function cleanup() {
echo ""
for SESSION_ID in ${SESSION_IDS}; do
if [[ "${PRIOR_SESSION_IDS}" =~ "${SESSION_ID}" ]]; then
echo ""
else
echo "Cleaning up ssm session ${SESSION_ID}"
aws --profile=${AWS_PROFILE} ssm terminate-session --session-id "${SESSION_ID}" > /dev/null
fi
done
exit 0
}
trap cleanup SIGINT
echo ""
echo "This script uses \`sudo ssh\` to bind your host's port 443 to port 443 on the SSM host."
echo "You may be prompted for your password."
read -p "Press ^C to exit, or Enter to continue. " continue
sudo ssh -f -N -p ${LOCAL_PORT} -L 443:${HOST}:443 -i ${EC2_KEY} ec2-user@127.0.0.1
echo ""
sleep 1
echo ""
echo "Open the following URL in your browser to access the dashboard:"
echo "https://${HOST}/sbx/dashboard/${PREFIX}-dashboard/index.html"
echo ""
echo "Press ^C to close the tunnel."
sleep 86400
| true
|
e0379d81614466f62aba2055bc71ffe11409d69a
|
Shell
|
isurudevj/razor-crest
|
/Kafka/kafka_manage.sh
|
UTF-8
| 1,814
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
INPUT=$1
clean() {
kubectl delete -f dc-kafka.yml
kubectl delete -f sc-kafka.yml
kubectl delete -f dc-zookeeper.yml
kubectl delete -f sc-zookeeper.yml
}
start() {
kubectl apply -f dc-kafka.yml
kubectl apply -f sc-kafka.yml
kubectl apply -f dc-zookeeper.yml
kubectl apply -f sc-zookeeper.yml
STATUS=$(kubectl get pods --selector app=local-kafka --output jsonpath="{.items[0].status.phase}")
while [ "$STATUS" != "Running" ]; do
echo "Service local-kafka is still in $STATUS status, waiting for port forward"
sleep 2;
STATUS=$(kubectl get pods --selector app=local-kafka --output jsonpath="{.items[0].status.phase}")
done
kubectl port-forward service/local-kafka 29092
}
topic() {
TOPIC_NAME=$1
PARTITION_COUNT=$2
POD_NAME=$(kubectl get pods --selector app="local-kafka" --output jsonpath="{.items[0].metadata.name}")
kubectl exec "$POD_NAME" -- kafka-topics --create --topic "$TOPIC_NAME" --partitions "$PARTITION_COUNT" --bootstrap-server localhost:29092
}
ssh() {
SERVICE_NAME=$1
echo "$SERVICE_NAME"
POD_NAME=$(kubectl get pods --selector app="${SERVICE_NAME}" --output jsonpath="{.items[0].metadata.name}")
echo "$POD_NAME"
kubectl exec -it "$POD_NAME" -- /bin/bash
}
logs() {
SERVICE_NAME=$1
echo "$SERVICE_NAME"
POD_NAME=$(kubectl get pods --selector app="${SERVICE_NAME}" --output jsonpath="{.items[0].metadata.name}")
echo "$POD_NAME"
kubectl logs -f "$POD_NAME"
}
port_forward() {
kubectl port-forward service/local-kafka 29092
}
case $INPUT in
ssh)
shift; ssh "$@"
;;
logs)
shift; logs "$@"
;;
port-forward)
shift; port_forward
;;
clean)
clean
;;
start)
start
;;
topic)
shift; topic "$@"
;;
*)
echo "Sorry didn't understand"
;;
esac
| true
|
cef9ae041d93ddad95f38d21d3681141b81051f9
|
Shell
|
douglasmg7/txt
|
/linux/archlinux/asus/30-xorg.sh
|
UTF-8
| 2,375
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Identify the graphics card.
# lspci -v | grep -A1 -e VGA -e 3D
printf "\nInstalling video driver...\n"
sudo pacman -S xf86-video-intel vulkan-intel mesa --noconfirm
printf "\nInstalling display server..."
sudo pacman -S xorg xorg-server --noconfirm
printf "\nInstalling Xstart..."
sudo pacman -S xorg-xinit --noconfirm
printf "\nCreating AUR directory..."
mkdir -p ~/aur
# st.
# Required by dwm.
printf "\nCloning st (Simple terminal)..."
git clone https://git.suckless.org/st ~/src/st
cd ~/src/st
printf "\nCreating symbolic link to st configuration."
ln -s ~/dotfiles/st/config_asus.h ~/src/st/config.h
printf "\nClening, compiling and installing st..."
make clean
make
sudo make install
# dwm.
printf "\nCloning dwm (Dynamic Windows Manager)..."
git clone https://git.suckless.org/dwm ~/src/dwm
cd ~/src/dwm
printf "\nCreating symbolic link to dwm configuration."
ln -s ~/dotfiles/dwm/config_asus.h ~/src/dwm/config.h
printf "\nClening, compiling and installing dwm..."
make clean
make
sudo make install
printf "\nCreating Xserver config file...\n"
cat > ~/.xserverrc << EOF
#!/bin/sh
exec /usr/bin/Xorg -nolisten tcp "\$@" vt\$XDG_VTNR
EOF
printf "\nCreating symbolic link for .xinitrc...\n"
ln -s ~/dotfiles/xinitrc_asus ~/.xinitrc
# .Xresources
printf "\nCreating symbolic link for .Xresources...\n"
ln -s ~/dotfiles/Xresources ~/.Xresources
printf "\nInstalling xserver fonts..."
sudo pacman -S ttf-dejavu ttf-inconsolata --noconfirm
printf "\nInstalling dmenu..."
sudo pacman -S dmenu --noconfirm
# To show current wifi on dwm menu.
printf "\nInstalling iw...\n"
sudo pacman -S iw --noconfirm
printf "\nInstalling slock..."
sudo pacman -S slock --noconfirm
# Required by xorg-server.
printf "\nInstalling libinput to set inputs like touchpad...\n"
sudo pacman -S xf86-input-libinput --noconfirm
# To config touchpad.
# $ xinput list
# $ xinput list-props device
# $ xinput set-prop <device> <option-number> <setting>
# Keyboard layout setting (no needed, alredy on .xinitrc).
# $ startx
# List current config.
# $ setxkbmap -query
# Set keyboard layout.
# $ setxkbmap -model abnt2 -layout br -option
# Blank option, so compose:ralt will not be setted.
# Start xserver using ~/.xinitrc.
# startx
# Start xserver using a dwm windows manager, not using ~/.xinirc.
# startx /usr/bin/dwm
# Quit xserver.
# pkill -15 Xorg
| true
|
de0b95d109f024ef2951b4800d0bc28445d05eaa
|
Shell
|
akashmahakode/benchmarking_file_systems
|
/ceph/filegen.sh
|
UTF-8
| 303
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
count="$1"
size="$2"
fn=`hostname`
#mkdir some_dir;
cd some_dir;
i=1
while [ "$i" -le "$count" ]; do
if [ "$size" -gt 5000000000 ]
then
size=`expr $size / 2`
perl -e 'print "a" x '$size'' >> $fn-file$i
fi
perl -e 'print "a" x '$size'' >> $fn-file$i
i=$(($i + 1))
done
cd ..
| true
|
cdcd00fe87ec6241eb390dc3590bc8fba66097a7
|
Shell
|
slava-github/slava-scripts
|
/lib/screenlayout/work-mon.sh
|
UTF-8
| 1,885
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
xrl=/tmp/xrandr.last
function waitWM {
data=$1;
# while wmctrl -d|grep -vq "$data";do sleep 1;done;
for (( i=1 ; $i<10 ; i++ )) ; do
if wmctrl -d|grep "$data";then
i=100
else
sleep 1
fi;
done;
if [ "$i" == "10" ];then
wmctrl -d
fi
}
function saExit {
xrandr -q >$xrl
exit
}
(
FILE=/tmp/winpos.slava
set -e
echo -en "\n\nStart "
date
set -o xtrace
c=`cat $xrl || true`
n=`xrandr -q`
if [ "$c" = "$n" ] && ( [ -z "$1" ] || [ "$1" != "force" ] );then exit;fi
winpos save stdout >$FILE.tmp
if ( [ -n "$2" ] && [ "$2" = "single" ] && (xrandr -q|grep -q 'LVDS1 connected') ) || ( (xrandr -q|grep -c ' connected'|grep -q 1) && (xrandr -q|grep -q 'LVDS1 connected') );then
test -e $FILE || mv $FILE.tmp $FILE
xrandr --output HDMI2 --off
waitWM "1280x999"
xrandr --output DP3 --off --output DP2 --off --output DP1 --off --output TV1 --off --output HDMI2 --off --output HDMI1 --off --output LVDS1 --mode 1280x800 --pos 0x0 --rotate normal --output VGA1 --off
waitWM "1280x800"
xfce4-panel --restart
sleep 3
wmctrl -c Gigolo
saExit
elif xrandr -q|grep -Ec 'HDMI(1|2) connected'|grep -q 2;then
xrandr --output DP3 --off --output DP2 --off --output DP1 --off --output TV1 --off --output HDMI2 --off --output HDMI1 --mode 1280x1024 --pos 0x0 --rotate normal --output LVDS1 --off --output VGA1 --off
waitWM "1280x999"
xrandr --output DP3 --off --output DP2 --off --output DP1 --off --output TV1 --off --output HDMI2 --mode 1280x1024 --pos 1280x0 --rotate normal --output HDMI1 --mode 1280x1024 --pos 0x0 --rotate normal --output LVDS1 --off --output VGA1 --off
waitWM "2560x999"
xfce4-panel --restart
test -e $FILE || saExit
winpos restore stdin < $FILE
mkdir -p /tmp/slava.bkp
mv $FILE /tmp/slava.bkp/winpos.`date +%Y%m%d-%H%M%S`
wmctrl -c Gigolo
saExit
fi
test -e $FILE || mv $FILE.tmp $FILE
) >>/home/slava/.local/log/mon-chng.log 2>&1
| true
|
7f994ce32a96a11a02b3b446e0338513645445e5
|
Shell
|
11philip22/scripts
|
/autogit.sh
|
UTF-8
| 151
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
if [[ $# -eq 0 ]]; then
msg=$(date '+%F_%H:%M:%S')
else
msg=$@
fi
git add .
git commit -m "${msg}"
git push origin master
| true
|
e1b484985e86fa4dd4e6d51eb08352bc6659bdc1
|
Shell
|
ymxl85/MRs-based-test-suite-for-APR
|
/original/Angelix/pt2/rds/7/oracle3
|
UTF-8
| 3,025
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
test-pt2 () {
diff <(${ANGELIX_RUN:-angelix-eval} ./pt2 "$1") <(angelix-eval $SCRIPT_DIR/../golden/pt2 "$1")
}
case "$1" in
1)
test-pt2 "0G~%[a[czf"
;;
2)
test-pt2 "kH"
;;
3)
test-pt2 "Ocm>0W)<@09\\"
;;
4)
test-pt2 "8gU()d.\`X?_Cs6q7ev]]"
;;
5)
test-pt2 "|F:Iq&1!V[,Z{>i."
;;
6)
test-pt2 "Q73RD|/UIGV"
;;
7)
test-pt2 "A"
;;
8)
test-pt2 "x\\"
;;
9)
test-pt2 "y_xFiG<"
;;
10)
test-pt2 "Zy+o(%XuRd5]Ml~i6"
;;
11)
test-pt2 "(V{rE|gqjWJAmcE"
;;
12)
test-pt2 "\"A,&uENZ7 "
;;
13)
test-pt2 "8JJ"
;;
14)
test-pt2 "?tL2j{]RD"
;;
15)
test-pt2 ".^L|91~.mua/y.NG@V"
;;
16)
test-pt2 "EcR;2\\7G!"
;;
17)
test-pt2 "[J:H~UJWd"
;;
18)
test-pt2 "Q/::E973jf:"
;;
19)
test-pt2 "+@=|sQs}gIpAwKjx\"S?"
;;
20)
test-pt2 "rnh\"-Xq>s@@wmm6?IB-"
;;
21)
test-pt2 "iY!I\$IAu["
;;
22)
test-pt2 "Cn%58^pVjAArMt"
;;
23)
test-pt2 "+^d/4mkR\\"
;;
24)
test-pt2 "0yTYa_r( <~-2EaI"
;;
25)
test-pt2 "BlS"
;;
26)
test-pt2 "nG#}o70S>-aLY"
;;
27)
test-pt2 "l.N3L<P:+Y&vwWn"
;;
28)
test-pt2 "ynPz5EG:\`:78j{\`lX\y,"
;;
29)
test-pt2 "^l #R"
;;
30)
test-pt2 "!tijJbY"
;;
31)
test-pt2 ",|o@+"
;;
32)
test-pt2 "c,n5*jULx"
;;
33)
test-pt2 "Ul12"
;;
34)
test-pt2 "^\`K_N0^;M42<VfMjm6|"
;;
35)
test-pt2 "&uK<L=2O"
;;
36)
test-pt2 "M"
;;
37)
test-pt2 "w?N@X(VFBD8qPZ"
;;
38)
test-pt2 "9y,T\`K xG~!"
;;
39)
test-pt2 "Wi\"1O^x}-z81tUz(e"
;;
40)
test-pt2 "&73apfz}S"
;;
41)
test-pt2 "%S&jU1GqkD&qK"
;;
42)
test-pt2 "\"\$N0EQSVTcKLqMZ)<k2"
;;
43)
test-pt2 "Oe8gb "
;;
44)
test-pt2 "#Sp\\:_"
;;
45)
test-pt2 "U0ebZrQq; N|3_C%pf;"
;;
46)
test-pt2 "=xLMW"
;;
47)
test-pt2 "B"
;;
48)
test-pt2 "7UvO%^Fo"
;;
49)
test-pt2 "lW%6 "
;;
50)
test-pt2 "V(XIP&"
;;
51)
test-pt2 "W2=\wh\y=[C%,<"
;;
52)
test-pt2 "55m"
;;
53)
test-pt2 "/pz.H50SfNVC_d.v{j/"
;;
54)
test-pt2 "q6M]fY"
;;
55)
test-pt2 "iKMm<\$3B]#]x%nz08/_"
;;
56)
test-pt2 "5-8&1a_d]x*E"
;;
57)
test-pt2 "a eII"
;;
58)
test-pt2 "|7GFrn1{))*2~|&"
;;
59)
test-pt2 "9tWf> }s)bgDr"
;;
60)
test-pt2 "x*PzcYW~\`B91"
;;
esac
| true
|
2f9b2fd693979b0b1ef17f7850489cd261f4f19d
|
Shell
|
Dysonsun/tools
|
/bag_covert/record_bag.zsh
|
UTF-8
| 1,878
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env zsh
###############################################################################
#2020/9/18 sundong
###############################################################################
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "${DIR}/install_isolated/setup.zsh"
function pose() {
NUM_PROCESSES="$(pgrep -c -f "rosbag record")"
if [ "${NUM_PROCESSES}" -eq 0 ]; then
rosbag record -o pose --split --duration=20m -b 2048 \
/gpsdata \
/GPSmsg \
/GPSmsg_fix \
/gps_distance \
/imudata \
/insvelocity \
/vehiclestate_GPS \
/lidar_odometry_for_mapping \
/gps_by_lidar_odometry \
/lidar_odometry_to_earth \
/sensor_fusion_output \
/lidar_preciseodometry_to_earth
fi
}
function all() {
NUM_PROCESSES="$(pgrep -c -f "rosbag record")"
if [ "${NUM_PROCESSES}" -eq 0 ]; then
rosbag record -o pose --split --duration=20m -b 2048 \
/gpsdata \
/GPSmsg \
/GPSmsg_fix \
/gps_distance \
/imudata \
/insvelocity \
/vehiclestate_GPS \
/lidar_odometry_for_mapping \
/gps_by_lidar_odometry \
/lidar_odometry_to_earth \
/sensor_fusion_output \
/lidar_preciseodometry_to_earth \
/lidar_cloud_calibrated
fi
}
function stop() {
pkill -SIGINT -f record
}
function help() {
echo "Usage:"
echo "$0 [pose] Record pose to data/bag."
echo "$0 [all] Record all data to data/bag."
echo "$0 stop Stop recording."
echo "$0 help Show this help message."
}
case $1 in
pose)
shift
pose $@
;;
all)
shift
all $@
;;
stop)
shift
stop $@
;;
help)
shift
help $@
;;
*)
all $@
;;
esac
| true
|
f3fbc404fe820f4918627feac0f77530dc12e328
|
Shell
|
outtersg/guili
|
/util.versions.test
|
UTF-8
| 2,228
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Copyright (c) 2019-2020 Guillaume Outters
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
vmax_test()
{
. "$SCRIPTS/util.test.sh"
tester _vmax_tester 5.6 ">= 5.4" "< 6" "< 5.7" "< 6"
tester _vmax_tester 5.6.99 -p 99 ">= 5.4" "< 6" "< 5.7" "< 6"
tester _vmax_tester "" ">= 5.4"
tester _vmax_tester 5.6.99 -p 99 ">= 5.4" "< 6" "< 5.7.0.0" "< 6"
tester _vmax_tester 5.7.0.99 -p 99 "< 5.7.1" "< 5.7.1.0"
# N.B.: si un jour on implémente avec des valeurs négatives (-99 pour la précédente, le -1 étant réservé à la GM, le -2 à la beta et la -3 à l'alpha), < 5.7.0 < 5.7.0.0 devra-t-il donner 5.7.0.-99 ou 5.7.0.0.-99?
}
_vmax_tester()
{
local att="$1" ; shift
res="`vmax "$@"`"
[ "$res" != "$att" ] || return 0
echo "vmax $*: $res au lieu de $att" >&2
return 126
}
aliasVersion_test()
{
. "$SCRIPTS/util.test.sh"
tester _aliasVersion_tester "truc_x:truc_xxxx:xx:trucxy:trucx:trucx.x:truc_xx" 3.14.15 "truc_3:truc_x31415:314:trucxy:truc3:truc3.14:truc_314"
}
_aliasVersion_tester()
{
guili_alias="$1"
version="$2"
aliasVersion x
[ "$guili_alias" != "$3" ] || return 0
(
echo "aliasVersion $1 avec $2:"
echo " $guili_alias"
echo "au lieu de"
echo " $3"
) >&2
return 126
}
| true
|
6609ce46f1c41276ba4f2c070d6ca49d9112308c
|
Shell
|
NetBSD/pkgsrc
|
/net/avahi/files/avahidaemon.sh
|
UTF-8
| 391
| 2.984375
| 3
|
[] |
no_license
|
#!@RCD_SCRIPTS_SHELL@
#
# $NetBSD: avahidaemon.sh,v 1.2 2008/12/20 23:51:39 ahoka Exp $
#
# PROVIDE: avahidaemon
# REQUIRE: DAEMON
# KEYWORD: shutdown
#
if [ -f /etc/rc.subr ]
then
. /etc/rc.subr
fi
name="avahidaemon"
rcvar=${name}
command="@PREFIX@/sbin/avahi-daemon"
required_files="@PKG_SYSCONFDIR@/avahi-daemon.conf"
avahidaemon_flags="-D"
load_rc_config $name
run_rc_command "$1"
| true
|
ab5297131e05c9e27eb22971096b64e38fcbeada
|
Shell
|
ipogudin/gauge
|
/scripts/conductor
|
UTF-8
| 319
| 3.71875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
conductor_help()
{
echo "Usage: builder conductor [sub-command]"
echo "You must use on of the following sub-commands:"
echo " help - to print this help message"
}
conductor()
{
case $1 in
help)
conductor_help
;;
*)
echo "Unknown or ansent command."
echo
conductor_help
;;
esac
}
| true
|
763f30ca77069c838fbcb81f2ef74706926d7467
|
Shell
|
KalinaKar/Code-Academy
|
/shell/shell_kaliimitko2.sh
|
UTF-8
| 345
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
TEST_FILE=kaliimitko
if [ ! -f "$TEST_FILE" ]; then
echo "Binary file is not available, creating..." && gcc -g kaliimitko.c -o $TEST_FILE
fi
check_status() {
if [ "$STATUS" -ne "0" ]
then
echo "ERROR! OH NO, IT IS GONNA BLOW!"
else
echo "ALL GOOD, MOVE ALONG!"
fi
}
./$TEST_FILE
STATUS=$? && check_status STATUS
exit 0
| true
|
3d399bad268a56164176fa4acdd04caf18aa409b
|
Shell
|
isaiah-king/acme-freight-erp
|
/.bluemix/pipeline-TEST.sh
|
UTF-8
| 322
| 2.578125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
npm config delete prefix
curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.31.2/install.sh | bash
. ~/.nvm/nvm.sh
nvm install 4.4
npm install
if [ -z ${COVERALLS_REPO_TOKEN} ]; then
npm run localcoverage
echo No Coveralls token specified, skipping coveralls.io upload
else
npm run coverage
fi
| true
|
fefded2a68c02293e05fe1ef5b0feef592d39837
|
Shell
|
dv01-inc/semantic-release-gcr
|
/scripts/gcloudlogin.sh
|
UTF-8
| 365
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
if [ ! -d "$HOME/google-cloud-sdk/bin" ]; then rm -rf $HOME/google-cloud-sdk; curl https://sdk.cloud.google.com | bash; fi
# Add gcloud to $PATH
source $HOME/google-cloud-sdk/path.bash.inc
gcloud version
# Authentication flow
echo $GCLOUD_KEY | base64 --decode > gcloud.json
gcloud auth activate-service-account $GCLOUD_EMAIL --key-file gcloud.json
| true
|
67a72563dbab6a6472963e0c6e4eaed26ae60046
|
Shell
|
seanpdoyle/dotfiles-local
|
/osx/defaults
|
UTF-8
| 3,644
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Sets reasonable OS X defaults.
#
# Or, in other words, set shit how I like in OS X.
#
# The original idea (and a couple settings) were grabbed from:
# https://github.com/mathiasbynens/dotfiles/blob/master/.osx
#
# Run ./set-defaults.sh and you'll be good to go.
# Disable the sound effects on boot
sudo nvram SystemAudioVolume=" "
# Map Caps Lock to Control
for productId in 544 591 610; do
defaults -currentHost write -g "com.apple.keyboard.modifiermapping.1452-$productId-0" -array "<dict><key>HIDKeyboardModifierMappingDst</key><integer>2</integer><key>HIDKeyboardModifierMappingSrc</key><integer>0</integer></dict>"
done
# Menu bar: disable transparency
defaults write NSGlobalDomain AppleEnableMenuBarTransparency -bool false
# Menu bar: hide the Time Machine, Volume, User, and Bluetooth icons
defaults write com.apple.systemuiserver dontAutoLoad -array \
"/System/Library/CoreServices/Menu Extras/TimeMachine.menu" \
"/System/Library/CoreServices/Menu Extras/Volume.menu" \
"/System/Library/CoreServices/Menu Extras/User.menu"
defaults write com.apple.systemuiserver menuExtras -array \
"/System/Library/CoreServices/Menu Extras/Bluetooth.menu" \
"/System/Library/CoreServices/Menu Extras/AirPort.menu" \
"/System/Library/CoreServices/Menu Extras/Battery.menu" \
"/System/Library/CoreServices/Menu Extras/Clock.menu"
# Disable Dashboard
defaults write com.apple.dashboard mcx-disabled -bool true
# Don’t show Dashboard as a Space
defaults write com.apple.dock dashboard-in-overlay -bool true
# Don’t automatically rearrange Spaces based on most recent use
defaults write com.apple.dock mru-spaces -bool false
# Remove the auto-hiding Dock delay
defaults write com.apple.dock autohide-delay -float 0
# Automatically hide and show the Dock
defaults write com.apple.dock autohide -bool true
###############################################################################
# Terminal & iTerm 2 #
###############################################################################
# Only use UTF-8 in Terminal.app
defaults write com.apple.terminal StringEncodings -array 4
# Set ~/src as home directory
defaults write com.apple.terminal NSNavLastRootDirectory ~/src
# Don’t display the annoying prompt when quitting iTerm
defaults write com.googlecode.iterm2 PromptOnQuit -bool false
# Disable press-and-hold for keys in favor of key repeat.
defaults write -g ApplePressAndHoldEnabled -bool false
# Use AirDrop over every interface. srsly this should be a default.
defaults write com.apple.NetworkBrowser BrowseAllInterfaces 1
# Always open everything in Finder's list view. This is important.
defaults write com.apple.Finder FXPreferredViewStyle Nlsv
# Show the ~/Library folder.
chflags nohidden ~/Library
# Set a really fast key repeat.
defaults write NSGlobalDomain KeyRepeat -int 0
# Disable “natural” (Lion-style) scrolling
defaults write NSGlobalDomain com.apple.swipescrolldirection -bool false
# Always show scrollbars
defaults write NSGlobalDomain AppleShowScrollBars -string "Always"
# Hide Safari's bookmark bar.
defaults write com.apple.Safari ShowFavoritesBar -bool false
# Set up Safari for development.
defaults write com.apple.Safari IncludeInternalDebugMenu -bool true
defaults write com.apple.Safari IncludeDevelopMenu -bool true
defaults write com.apple.Safari WebKitDeveloperExtrasEnabledPreferenceKey -bool true
defaults write com.apple.Safari "com.apple.Safari.ContentPageGroupIdentifier.WebKit2DeveloperExtrasEnabled" -bool true
defaults write NSGlobalDomain WebKitDeveloperExtras -bool true
| true
|
1ea9a94b0eb4f01fc4d4f92aaa40de5d939606d1
|
Shell
|
Vinotha16/WIN_ROLLBACK
|
/templates/linux_audit/rhel7/lockout_532_audit.fact
|
UTF-8
| 362
| 2.984375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# ** AUTO GENERATED **
# 5.3.2 - Ensure lockout for failed password attempts is configured (Automated)
if [ $(sudo grep -E '^\s*auth\s+\S+\s+pam_(faillock|unix)\.so' /etc/pam.d/system-auth /etc/pam.d/password-auth | wc -l) -eq 0 ]; then
echo "{ \"lockout_532_audit\" : \"FAILED\" }"
else
echo "{ \"lockout_532_audit\" : \"PASSED\" }"
fi
| true
|
8aadf0497b73a98829b3d452e7821b0d0ef6e40a
|
Shell
|
ptechen/k8s_deploy_step
|
/kubernetes/kube-controller-manager.sh
|
UTF-8
| 3,072
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
echo '#!/bin/sh
./kube-controller-manager \
--cluster-cidr 172.3.0.1/16 \
--leader-elect true \
--log-dir /data/logs/kubernetes/kube-controller-manager \
--master http://127.0.0.1:8080 \
--service-account-private-key-file ./certs/ca-key.pem \
--service-cluster-ip-range 192.168.0.0/16 \
--root-ca-file ./certs/ca.pem \
--v 2
' > /opt/kubernetes/server/bin/kube-controller-manager.sh
chmod +x /opt/kubernetes/server/bin/kube-controller-manager.sh
mkdir -p /data/logs/kubernetes/kube-controller-manager
localIP=$(ip addr|grep eth0|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "add:")
localip=${localIP%/*}
backip=$(echo $localip|awk -F. '{ print $3"."$4 }')
back_ip=${backip//./-}
echo '[program:kube-controller-manager-'${back_ip}']
command=/opt/kubernetes/server/bin/kube-controller-manager.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controller.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
' > /etc/supervisord.d/kube-conntroller-manager.ini
supervisorctl update
sleep 3
| true
|
781a7aee58481c71a45722af0ebbd7ab11719fbf
|
Shell
|
BaiChaYuLu/host_manager
|
/Optimizing_Configuration/apache_secure/mod_gzip.sh
|
UTF-8
| 1,103
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
#Open the gzip compression
CONF_PATH=`find / -name 'httpd.conf' -a -type f`
[ -d /usr/local/backup ] || mkdir -p /usr/local/backup
[ -d /usr/local/resault ] || mkdir -p /usr/local/resault
##edit the configuration file
[ `find / -name "mod_deflate.so"` ] || echo "mod_deflate.so is not exist" || exit 1
if [ grep "mod_deflate.so" $CONF_PATH | grep "^LoadModule" ]
then
echo "mod_deflate.so is load"
else
sed -e "/mod_include.so/a LoadModule deflate_module $(find / -name "mod_deflate.so ")" -i $CONF_PATH
fi
cat <<EOF>> $CONF_PATH
# Insert filter
SetOutputFilter DEFLATE
# Netscape 4.x has some problems...
BrowserMatch ^Mozilla/4 gzip-only-text/html
# Netscape 4.06-4.08 have some more problems
BrowserMatch ^Mozilla/4\.0[678] no-gzip
# MSIE masquerades as Netscape, but it is fine
BrowserMatch \bMSIE !no-gzip !gzip-only-text/html
# Don't compress images
SetEnvIfNoCase Request_URI \.(?:gif|jpe?g|png)$ no-gzip dont-vary
# Make sure proxies don't deliver the wrong content
# Header append Vary User-Agent env=!dont-vary
EOF
| true
|
a8e387179d5256274a0215ecc20d4d9c116b696b
|
Shell
|
dglyzin/quizgen
|
/accounting/broadcaster.sh
|
UTF-8
| 549
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
onlygroup=$1
input="users_wpw.txt"
while IFS= read -r line
do
user=$(echo $line | awk '{print $1}')
group=$(echo $line | awk '{print $8}')
if [ $onlygroup = $group ]
then
echo $word1
echo cp /home/clusterhome/dglyzin/class2-2020-11-02.ipynb /home/clusterhome/$user/
cp /home/clusterhome/dglyzin/class2-2020-11-02.ipynb /home/clusterhome/$user/
echo chown $user:regular /home/clusterhome/$user/class2-2020-11-02.ipynb
chown $user:regular /home/clusterhome/$user/class2-2020-11-02.ipynb
fi
done < "$input"
| true
|
ad917ff6acbfb0c1700ed766b45e4e60978a4096
|
Shell
|
Garuda-1/ITMO-OS
|
/lab4/task6/override.sh
|
UTF-8
| 303
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
MSG="monka_omega"
MODE="RUN"
terminate()
{
MODE="TERM"
}
echo $$ > .pid
trap 'terminate' SIGTERM
while true
do
case $MODE in
"RUN")
echo -ne "\r \r$(date)"
sleep 1
;;
"TERM")
echo -ne "\nExecution interrupted. Exiting...\n"
exit 0
;;
esac
done
| true
|
468c66cb0bdc6afe5d5f4c1da570692e98cffab4
|
Shell
|
cha63506/OneClickInstall
|
/web/Executables/tools/check-previous-version.sh
|
UTF-8
| 1,627
| 3.265625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# (c) Copyright Ascensio System Limited 2010-2015
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# You can contact Ascensio System SIA by email at sales@onlyoffice.com
MAIL_SERVER_ID="";
DOCUMENT_SERVER_ID="";
COMMUNITY_SERVER_ID="";
command_exists () {
type "$1" &> /dev/null ;
}
install_sudo () {
if command_exists apt-get ; then
apt-get install sudo
elif command_exists yum ; then
yum install sudo
fi
if command_exists sudo ; then
echo "sudo successfully installed"
else
echo "command sudo not found"
echo "INSTALLATION-STOP-ERROR[5]"
exit 0;
fi
}
if ! command_exists sudo ; then
install_sudo;
fi
if command_exists docker ; then
MAIL_SERVER_ID=$(sudo docker ps -a | grep 'onlyoffice-mail-server' | awk '{print $1}');
DOCUMENT_SERVER_ID=$(sudo docker ps -a | grep 'onlyoffice-document-server' | awk '{print $1}');
COMMUNITY_SERVER_ID=$(sudo docker ps -a | grep 'onlyoffice-community-server' | awk '{print $1}');
fi
echo "MAIL_SERVER_ID: [$MAIL_SERVER_ID]"
echo "DOCUMENT_SERVER_ID: [$DOCUMENT_SERVER_ID]"
echo "COMMUNITY_SERVER_ID: [$COMMUNITY_SERVER_ID]"
echo "INSTALLATION-STOP-SUCCESS"
| true
|
efd019fa12725dd6245e8722bc6ca4b0124192f3
|
Shell
|
scalecube/scalecube-vaultenv
|
/scripts/test-secret-input.sh
|
UTF-8
| 121
| 2.515625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
read secrets
echo ">>> echoing secrets from process input: [$secrets]"
while true; do date; sleep 1; done
| true
|
4ad9fa8e0ca247992c08eb85efa561bf71bd88e4
|
Shell
|
sihunqu123/dotfiles
|
/script/sb.sh
|
UTF-8
| 62,608
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "in sb launcher"
set -o errexit
set -o pipefail
#set -o nounset
######################################################################################
### If you failed to run this script, pls enable below 2 lines to enable debugging ###
######################################################################################
# set -v
# set -o xtrace
shopt -s expand_aliases
function nano2Readable {
local -r ori=$1
# convert nano to milliseconds
local -r T=$((ori/1000000))
local -r D=$((T/1000/60/60/24))
local -r H=$((T/1000/60/60%24))
local -r M=$((T/1000/60%60))
local -r S=$((T/1000%60))
local -r MS=$((T%1000))
(( $D > 0 )) && printf '%d days ' $D
(( $H > 0 )) && printf '%d hours ' $H
(( $M > 0 )) && printf '%d minutes ' $M
# (( $D > 0 || $H > 0 || $M > 0 )) && printf 'and '
(( $S > 0 )) && printf '%d seconds ' $S
printf '%d milliseconds\n' $MS
}
function getPathNameInfo {
thePath=$(echo ${theStr} | grep -oP ".*\/(?=[^/]+)") && echo "path: ${thePath}"
theFile=${theStr#${thePath}} && echo "filename: ${theFile}"
theNameOnly=${theFile%.*} && echo "nameOnly(without Extension): ${theNameOnly}"
theDotExtension=${theFile#${theNameOnly}} && echo "dotExtension: ${theDotExtension}"
}
declare -r uname=$(uname)
declare currentOS=""
# to make `grep` compatible with all platforms
case "$uname" in
(*Linux*) currentOS='Linux';
echo "OS is Linux"
alias _grep="grep"
;;
(*Darwin*) currentOS='Darwin';
echo "OS is MacOS."
if command -v ggrep &> /dev/null
then
echo "[checked] GNU utils is install, ready to run"
alias grep="ggrep"
alias sed="gsed"
else
echo "Error: pls make sure ggrep(the GNU grep) is install. Tips: run
brew install coreutils findutils gnu-tar gnu-sed gawk gnutls gnu-indent gnu-getopt
For details, pls refer to:
https://apple.stackexchange.com/questions/69223/how-to-replace-mac-os-x-utilities-with-gnu-core-utilities"
exit 2;
fi
;;
(*CYGWIN*) currentOS='CYGWIN';
echo "OS is CYGWIN"
alias _grep="grep"
;;
(*) echo 'error: unsupported platform.'; exit 2; ;;
esac;
_grep --version
echo "currentOS is :${currentOS}"
# Set magic variables for current file & dir
declare -r __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
declare -r __file="${__dir}/$(basename "${BASH_SOURCE[0]}")"
declare -r __base="$(basename ${__file} .sh)"
declare -r __root="$(cd "$(dirname "${__dir}")" && pwd)" # <-- change this as it depends on your app
declare -r githubPath="/opt/github/Atlas/sourcecode"
declare -r tmpPath="${githubPath}/tmp"
host_githubPath=${HOST_GITHUBPATH:-/media/sf_github} # change to a path with enough ACL if u r are Mac user.
host_side_githubPath=${HOST_SIDE_GITHUBPATH:-/d/github} # change to a path with enough ACL if u r are Mac user.
declare -r nodeCommonUtilsPath="${githubPath}/node-common-utils"
declare -r nodeRestClientPath="${githubPath}/node-rest-client"
declare -r nodeCommonUtilsLibPath="node_modules/@atlas/node-common-utils"
declare -r nodeRestClientLibPath="node_modules/@atlas/node-rest-client"
# the namespace for kubectl cluster
# declare -r namespace_default=default
namespace_default=${CONTAINER_NAMESPACE:-default}
CONTAINER_INDEX=${CONTAINER_INDEX:-0}
domain_default=${CLUSTER_DOMAIN:-k3d.atlashcl.com}
RUN_IN_BACKGROUND=${RUN_IN_BACKGROUND:false}
declare -r minioUser=minioadmin
declare -r minioPwd="miniopassword"
declare -A projectNameMap=(
["api"]="api-gateway atlas-api-gateway-"
["files"]="files-microservice atlas-files-"
["batch"]="batch-microservice atlas-batch-"
["worker"]="worker-microservice atlas-worker-"
["atlas-ui"]="atlas-ui atlas-uiui-"
)
# echo "${projectNameMap[api]}"
# pArr=(${projectNameMap[api]})
# # declare -p pArr
# project_name=${pArr[0]}
# podName=${pArr[1]}
declare -r arg1="${1:-}"
declare -r arg2="${2:-}"
echo arg1: ${arg1} arg2: ${arg2} __dir: ${__dir} __file: ${__file} __base: ${__base} __root: ${__root}
## if [ "${1:-}" = "" ] || [ "${2:-}" = "" ]; then
if [ "${1:-}" = "" ]; then
echo "usage: ${__file} project_name [method]"
exit 107
fi
declare -r _project=$1
# url=${URL_base}${2}" "${3:-}" "${4:-}" "${5:-}" "${6:-}" "${7:-}" "${8:-}" "${9:-}
# echo url: ${url}
shift 1
declare -r _method=$1
shift 1
declare -r _param=$1
#set -x
#${__dir}/testRunner.sh "$@"
#ret=$?
#set +x
# echo "res: ${response_text}"
#
# reload Atlas-UI to the docker container
# @param 1: the method to do
#
function reloadAtlasUI {
local -r project_name="atlas-ui"
local -r linux_project_path="${githubPath}/${project_name}"
local -r container_project_path=/usr/local/site/${project_name}
local -r container_project_path2=/usr/local/site/atlas-file-download-ui
local -r containerName="atlas-uiui-${CONTAINER_INDEX}"
if [ -e ${linux_project_path}/dist/ ]; then
echo "${linux_project_path}/dist/ exists"
else
echo "Error: ${linux_project_path}/dist/ doesn't exists!!! Pls make sure generating build result in this folder."
exit 2
fi
ls -A1t ${linux_project_path}/dist/app/*.hot-update.* |tail -n +4 | xargs -I % -L 1 rm -rfv % && true
ls -A1t ${linux_project_path}/dist/public/*.hot-update.* |tail -n +4 | xargs -I % -L 1 rm -rfv % && true
# docker exec -it ${containerName} sh -c 'rm -rfv ${container_project_path}/*.hot-update.*'
kubectl exec -it -n ${namespace_default} ${containerName} -- sh -c 'rm -rfv ${container_project_path}/*.hot-update.*'
kubectl exec -it -n ${namespace_default} ${containerName} -- sh -c 'rm -rfv ${container_project_path2}/*.hot-update.*'
cp -fRp ${linux_project_path}/{public/locales,dist/app/}
cp -fRp ${linux_project_path}/{public/locales,dist/public/}
# find ${linux_project_path}/dist/ -maxdepth 1 -mindepth 1 -print |_grep -v "\.swp" |xargs -L 1 -I % docker cp % ${containerName}:${container_project_path}/
find ${linux_project_path}/dist/app -maxdepth 1 -mindepth 1 -print | _grep -v "\.swp" |xargs -L 1 -I % kubectl cp % ${namespace_default}/${containerName}:${container_project_path}/ &
find ${linux_project_path}/dist/public -maxdepth 1 -mindepth 1 -print | _grep -v "\.swp" |xargs -L 1 -I % kubectl cp % ${namespace_default}/${containerName}:${container_project_path2}/ &
}
#
# reload files-svc, also copy linux src to host(overwrite), and then to the docker container
# @param 1: the method to do
# @param 2: whether need to launch with --inspect-brk. default --inspect
#
function filesSVC {
local -r project_key="files"
local -r project_name="${project_key}-microservice"
local -r linux_project_path="${githubPath}/${project_name}"
local -r host_project_path="${host_githubPath}/${project_name}"
local -r host_side_project_path="${host_side_githubPath}/${project_name}"
local -r container_project_path=/usr/local/site/${project_name}
local -r containerName="atlas-${project_key}-0"
local -r method=${1}
local -r isBrk=${2}
if [ "${method:-}" = "tar" ]; then # to tar node_modules/ files from container
kubectl exec -it ${containerName} -- sh -c "
cd ${container_project_path} &&
tar cf n.tar node_modules/
"
docker cp ${containerName}:${container_project_path}/n.tar ${linux_project_path}/
tar xf n.tar
cp -fRpv n.tar ${host_project_path}/
echo -e -n "please run:\n tar xf ${host_side_project_path}/n.tar -C ${host_side_project_path}/\nin host's bash terminal.\n"
elif [ "${method:-}" = "push" ]; then # push local node_moudles to conainer
(\
cd ${linux_project_path} && \
tar cf n2.tar node_modules/ \
)
kubectl cp ${linux_project_path}/n2.tar ${namespace_default}/${containerName}:${container_project_path}/
kubectl exec -n ${namespace_default} -it ${containerName} -- sh -c "
cd ${container_project_path} &&
tar xf n2.tar
"
cp -fRpv ${linux_project_path}/n2.tar ${host_project_path}/
echo -e -n "please run:\n tar xf ${host_side_project_path}/n2.tar -C ${host_side_project_path}/\nin host's bash terminal.\n"
else # to sync and restart
local -r inspectVal="inspect"
if [ "${isBrk:-}" = "true" ] || [ "${isBrk:-}" = "1" ]; then
inspectVal="inspect-brk"
fi
echo "about to reload debug "
set +x
local -r pathsToMove=(
src
package.json
package-lock.json
config
);
for item in "${pathsToMove[@]}"; do
cp -fRpv "${linux_project_path}/${item}" "${host_project_path}/"
kubectl cp "${linux_project_path}/${item}" ${namespace_default}/${containerName}:"${container_project_path}/"
done;
## find ${linux_project_path} -mindepth 1 -maxdepth 1 | _grep -Ev "/(src|.json|config|.git|node_modules)$" |xargs -L 1 -I % sh -c "
## cp -fRpv % ${host_project_path} &&
## kubectl cp % ${namespace_default}/${containerName}:${container_project_path}/
## "
echo "=================================="
find ${nodeCommonUtilsPath}/ -mindepth 1 -maxdepth 1 | _grep -P "(\/lib|src|.json)$"
echo "=================================="
find ${nodeCommonUtilsPath}/ -mindepth 1 -maxdepth 1 | _grep -P "(\/lib|src|.json)$" |xargs -L 1 -I % sh -c "
mkdir -p ${linux_project_path}/${nodeCommonUtilsLibPath}/ && cp -fRpv % ${linux_project_path}/${nodeCommonUtilsLibPath}/ &&
mkdir -p ${host_project_path}/${nodeCommonUtilsLibPath}/ && cp -fRpv % ${host_project_path}/${nodeCommonUtilsLibPath}/ &&
kubectl cp % ${namespace_default}/${containerName}:${container_project_path}/${nodeCommonUtilsLibPath}/
"
# start project in container
kubectl exec -it ${containerName} -- bash -c "source /etc/profile
cd ${container_project_path}/ && \
export NODE_ENV=production && \
svc -d /service/${project_name}/ && \
node --inspect=0.0.0.0:9229 .
"
fi
}
#
# reload api-gatway, also copy linux src to host(overwrite), and then to the docker container
# @param 1: the method to do
# @param 2: whether need to launch with --inspect-brk. default --inspect
#
function reloadAPI_gateway {
local -r linux_project_path="${githubPath}/api-gateway"
local -r host_project_path="${host_githubPath}/api-gateway"
local -r container_project_path="/usr/local/site/api-gateway"
local -r containerName="atlas-doad-0"
local -r isBrk=${2}
local inspectVal="inspect"
if [ "${isBrk:-}" = "true" ] || [ "${isBrk:-}" = "1" ]; then
inspectVal="inspect-brk"
fi
echo "about to reload debug "
docker exec -it ${containerName} svc -d /service/api-gateway
cp -fRpv ${linux_project_path}/common ${linux_project_path}/server ${linux_project_path}/package.json ${linux_project_path}/package-lock.json ${linux_project_path}/config ${host_project_path}/
docker cp ${linux_project_path}/common ${containerName}:${container_project_path}/
docker cp ${linux_project_path}/server ${containerName}:${container_project_path}/
docker cp ${linux_project_path}/config ${containerName}:${container_project_path}/
docker cp ${linux_project_path}/package.json ${containerName}:${container_project_path}/
docker cp ${linux_project_path}/package-lock.json ${containerName}:${container_project_path}/
find ${nodeCommonUtilsPath}/ -mindepth 1 -maxdepth 1 |_grep -P "(\/lib|src|.json)$" |xargs -L 1 -I % sh -c "
mkdir -p ${linux_project_path}/${nodeCommonUtilsLibPath}/ && cp -fRpv % ${linux_project_path}/${nodeCommonUtilsLibPath}/ &&
mkdir -p ${host_project_path}/${nodeCommonUtilsLibPath}/ && cp -fRpv % ${host_project_path}/${nodeCommonUtilsLibPath}/ &&
docker cp % ${containerName}:${container_project_path}/${nodeCommonUtilsLibPath}/
"
docker exec -it ${containerName} bash -c "source /etc/profile
cd ${container_project_path}/ && \
export NODE_ENV=production && \
svc -d /service/api-gateway/ && \
node --${inspectVal}=0.0.0.0:9229 .
"
}
#
# Copy files in given folder of given container to given path in the host
# @param 1: containerName - the name of the container
# @param 2: fromPath - the absolute source path in the container
# @param 3: toPath - the relative/absolute target path in the host
# @example copyFolderFromContainer2Host atlas-ldapsync-0 /abc/d ./asdb default
#
function copyFolderFromContainer2Host {
local -r containerName=${1}
local -r fromPath=${2}
local -r toPath=${3}
echo "copyFolderFromContainer2Host - containerName: ${containerName}, fromPath: ${fromPath}, toPath: ${toPath}"
mkdir -p "${toPath}"
kubectl exec -n ${namespace_default} ${containerName} -- sh -c "cd ${fromPath} && tar cf - ./" | tar xf - -C "${toPath}/"
# kubectl exec atlas-ldapsync-0 -- sh -c "cd /usr/local/site/ldapsync-microservice/config/ && tar cf - ./" | tar xf - -C ./config/
}
#
# reload ldapsync, also copy linux src to host(overwrite), and then to the docker container
# @param 1: the method to do
# @param 2: whether need to launch with --inspect-brk. default --inspect
#
function reloadLdapsync {
# copyFolderFromContainer2Host atlas-ldapsync-0 /abc/d ./asdb
# exit 0
local -r project_key="ldapsync"
local -r project_name="${project_key}-microservice"
local -r linux_project_path="${githubPath}/${project_name}"
local -r host_project_path="${host_githubPath}/${project_name}"
local -r container_project_path="/usr/local/site/${project_name}"
local -r containerName="atlas-${project_key}-0"
local -r isBrk=${2}
local inspectVal="inspect"
if [ "${isBrk:-}" = "true" ] || [ "${isBrk:-}" = "1" ]; then
inspectVal="inspect-brk"
fi
echo "about to reload debug "
# first stop project in container to avoid "failed to delete"
kubectl exec -it ${containerName} -- svc -d /service/${project_name}
# cp -fRpv ${host_project_path}/
# array=( \
# ${linux_project_path}/src \
# ${linux_project_path}/test \
# ${linux_project_path}/package.json \
# ${linux_project_path}/package-lock.json \
# ${linux_project_path}/config \
# ); for item in "${array[@]}"; do cp -fRpv /opt/github/Atlas/sourcecode/wispr-node-buildtools/sandbox/buildroot/git/master/docker/cdma/service/startup-action/common.yml \
# /opt/github/Atlas/sourcecode/wispr-node-buildtools/sandbox/buildroot/git/master/docker/${item}/service/startup-action/ && \
# cp -fRpv /opt/github/Atlas/sourcecode/wispr-node-buildtools/sandbox/buildroot/git/master/docker/cdma/service/template/usr/local/site/config/fluent-bit/conf.d/common.conf \
# /opt/github/Atlas/sourcecode/wispr-node-buildtools/sandbox/buildroot/git/master/docker/${item}/service/template/usr/local/site/config/fluent-bit/conf.d/ ; done;
local -r pathsToMove=(
src
test
package.json
package-lock.json
config
);
for item in "${pathsToMove[@]}"; do
echo "cp ${linux_project_path}/${item} to ${host_project_path}/"
echo "kubectl cp ${linux_project_path}/${item} ${namespace_default}/${containerName}:${container_project_path}/"
done;
# sync node-common-utils
find ${nodeCommonUtilsPath}/ -mindepth 1 -maxdepth 1 |_grep -P "(\/lib|src|.json)$" |xargs -L 1 -I % sh -c "
mkdir -p ${linux_project_path}/${nodeCommonUtilsLibPath}/ && cp -fRpv % ${linux_project_path}/${nodeCommonUtilsLibPath}/ &&
mkdir -p ${host_project_path}/${nodeCommonUtilsLibPath}/ && cp -fRpv % ${host_project_path}/${nodeCommonUtilsLibPath}/ &&
kubectl cp % ${namespace_default}/${containerName}:${container_project_path}/${nodeCommonUtilsLibPath}/
"
# start project in container
kubectl exec -it ${containerName} -- bash -c "source /etc/profile
cd ${container_project_path}/ && \
export NODE_ENV=production && \
svc -d /service/${project_name}/ && \
node --${inspectVal}=0.0.0.0:9229 .
"
}
#
# reload a project in a contianer, also copy linux src to host(overwrite), and then to the docker container
# @param 1: the method to do
# @param 2: whether need to launch with --inspect-brk. default --inspect
# @param 3: project_key
# @param 4: whether need to sync node-common-utils; default: false
# @param 5: paths to sync
# @param 6: whether need to add 'microservice' as project_name. default: true
#
function reloadMicroSVC {
# copyFolderFromContainer2Host atlas-ldapsync-0 /abc/d ./asdb default
# exit 0
set -o errexit
local -r isSyncNodeCommonUtils="${4:-false}"
local -r isAppendMicroSVC="${6:-true}"
local -r project_key="${3}"
local project_name="${project_key}"
if [ "${isAppendMicroSVC:-}" = "true" ] || [ "${isAppendMicroSVC:-}" = "1" ]; then
project_name="${project_key}-microservice"
fi
local -r linux_project_path="${githubPath}/${project_name}"
local -r host_project_path="${host_githubPath}/${project_name}"
local -r container_project_path="/usr/local/site/${project_name}"
local -r containerName="atlas-${project_key}-${CONTAINER_INDEX}"
local -r method=${1}
local -r isBrk=${2}
local -r pathsToMove=($5)
if [ "${method:-}" = "tar" ]; then # to tar node_modules/ files from container
docker exec -it ${containerName} sh -c "
cd ${container_project_path} &&
tar cf n.tar node_modules/
"
docker cp ${containerName}:${container_project_path}/n.tar ${linux_project_path}/
tar xf n.tar
cp -fRpv n.tar ${host_project_path}/
echo -e -n "please run:\n tar xf ${host_side_project_path}/n.tar -C ${host_side_project_path}/\nin host's bash terminal.\n"
elif [ "${method:-}" = "config" ]; then # copy the config from conainer to local
copyFolderFromContainer2Host ${containerName} ${container_project_path}/config/ ${linux_project_path}/config
mkdir -p ${host_project_path}/
cp -fRpv ${linux_project_path}/config ${host_project_path}/
elif [ "${method:-}" = "push" ]; then # push local node_moudles to conainer
set -x
(\
cd ${linux_project_path} && \
tar cf n2.tar node_modules/ \
)
set +x
kubectl cp ${linux_project_path}/n2.tar ${namespace_default}/${containerName}:${container_project_path}/
kubectl exec -it -n ${namespace_default} ${containerName} -- sh -c "
cd ${container_project_path} &&
tar xf n2.tar
"
mkdir -p ${host_project_path}/
cp -fRpv ${linux_project_path}/n2.tar ${host_project_path}/
echo -e -n "please run:\n tar xf ${host_side_project_path}/n2.tar -C ${host_side_project_path}/\nin host's bash terminal.\n"
else # to sync and restart
local inspectVal="inspect"
if [ "${isBrk:-}" = "true" ] || [ "${isBrk:-}" = "1" ]; then
inspectVal="inspect-brk"
fi
set -v
set -o xtrace
echo "about to reload debug "
# first stop project in container to avoid "failed to delete"
kubectl exec -it -n ${namespace_default} ${containerName} -- svc -d /service/${project_name}
for item in "${pathsToMove[@]}"; do
cp -fRpv "${linux_project_path}/${item}" "${host_project_path}/"
kubectl cp "${linux_project_path}/${item}" ${namespace_default}/${containerName}:"${container_project_path}/"
done;
# sync node-common-utils
if [ "${isSyncNodeCommonUtils:-}" = "true" ] || [ "${isSyncNodeCommonUtils:-}" = "1" ]; then
find ${nodeCommonUtilsPath}/ -mindepth 1 -maxdepth 1 |_grep -P "(\/lib|src|.json)$" |xargs -L 1 -I % sh -c "
mkdir -p ${linux_project_path}/${nodeCommonUtilsLibPath}/ && cp -fRpv % ${linux_project_path}/${nodeCommonUtilsLibPath}/ &&
mkdir -p ${host_project_path}/${nodeCommonUtilsLibPath}/ && cp -fRpv % ${host_project_path}/${nodeCommonUtilsLibPath}/ &&
kubectl cp % ${namespace_default}/${containerName}:${container_project_path}/${nodeCommonUtilsLibPath}/
"
find ${nodeRestClientPath}/ -mindepth 1 -maxdepth 1 |_grep -P "(\/lib|src|.json)$" |xargs -L 1 -I % sh -c "
mkdir -p ${linux_project_path}/${nodeRestClientLibPath}/ && cp -fRpv % ${linux_project_path}/${nodeRestClientLibPath}/ &&
mkdir -p ${host_project_path}/${nodeRestClientLibPath}/ && cp -fRpv % ${host_project_path}/${nodeRestClientLibPath}/ &&
kubectl cp % ${namespace_default}/${containerName}:${container_project_path}/${nodeRestClientLibPath}/
"
fi
## local libPaths=''
## libPaths=(
## "@atlas/js-logger"
## "bunyan"
## )
if [ "${project_key:-}" != "api-gateway" ] && [ "${libPaths:-}" != "" ]; then
for item in "${libPaths[@]}"; do
libPath="node_modules/${item}"
mkdir -p ${linux_project_path}/${libPath}/ &&
mkdir -p ${host_project_path}/${libPath}/ && cp -fRpv ${linux_project_path}/${libPath}/* ${host_project_path}/${libPath}/
parentPath=$(echo ${container_project_path}/${libPath} | grep -oP ".*\/(?=[^/]+)")
kubectl cp ${linux_project_path}/${libPath} ${namespace_default}/${containerName}:${parentPath}
done;
fi
# start project in container
## use the deadloop echo to prevent disconnected by the loadbalancer
# the always false statement `if(false && ...)`
if (( 8 == 2 )) && [ "${project_key:-}" = "worker" ] || [ "${project_key:-}" = "worker-microservice" ]; then
kubectl exec -it -n ${namespace} ${containerName} -- bash -c "source /etc/profile
cd ${container_project_path}/ && \
export NODE_ENV=production && \
svc -d /service/${project_name}/ && \
chown -R mdrop:mdrop ./ && \
chown -R mdrop:mdrop /var/log/${project_name} && \
setuidgid mdrop /usr/local/site/nodejs/bin/0x .
"
exit 0
fi
if [ "${RUN_IN_BACKGROUND:-}" = "true" ]; then
kubectl exec -it -n ${namespace_default} ${containerName} -- bash -c "source /etc/profile
cd ${container_project_path}/ && \
export NODE_ENV=production && \
svc -d /service/${project_name}/ && \
echo '' > /var/log/${project_name}/${project_name}.log && \
chown -R mdrop:mdrop ./ && \
chown -R mdrop:mdrop /var/log/${project_name} && \
svc -du /service/${project_name}/
"
else
kubectl exec -it -n ${namespace_default} ${containerName} -- bash -c "source /etc/profile
cd ${container_project_path}/ && \
export NODE_ENV=production && \
svc -d /service/${project_name}/ && \
echo '' > /var/log/${project_name}/${project_name}.log && \
chown -R mdrop:mdrop ./ && \
chown -R mdrop:mdrop /var/log/${project_name} && \
setuidgid mdrop node --${inspectVal}=0.0.0.0:9229 . &
while :; do sleep 59; echo -n ' ' >&2; done &
sleep 9
tail -f /var/log/${project_name}/${project_name}.log
"
fi
fi
}
function chown4Mdrop {
local -r arr=(
'api'
'files'
'batch'
'worker'
);
for item in "${arr[@]}"; do
pArr=(${projectNameMap[$item]})
declare -p pArr
project_name=${pArr[0]}
podNamePrefix=${pArr[1]}
echo "chown ${project_name} in ${podNamePrefix}"
#echo "kubectl cp ${linux_project_path}/${item} ${namespace_default}/${containerName}:${container_project_path}/"
(
IFS=$'\n'; for row in $(kubectl get pods -o wide --namespace=${namespace_default} |grep "${podNamePrefix}\d+"); do
podName=$(echo ${row} |awk '{print $1}');
container_project_path="/usr/local/site/${project_name}"
echo ${podName} for project_name: ${project_name};
kubectl exec -n ${namespace_default} ${podName} -- sh -c "chown -R mdrop:mdrop /var/log/${project_name} ${container_project_path}"
done
)
done;
}
#
# create a default bucket in minio container
#
# docker exec -it -u 0 atlas-operation /usr/local/src/scripts/operation/admin.js createFilesBuckets
function createBucket {
local -r linux_mcPath=/opt/shared/mc
local -r container_project_path=/opt/shared
local -r containerName="atlas-minio-0"
# first, check if the mc exits in ${linux_mcPath}
if [ -e ${linux_mcPath} ]; then
echo "${linux_mcPath} already exists"
else
echo "${linux_mcPath} does not exists, will download it now..."
local -r filePath=$(echo ${linux_mcPath} | _grep -oP ".*\/(?=[^/]+)")
local -r filename=${theStr#${thePath}}
# pls make sure aria2c is install
if command -v aria2c &> /dev/null
then
aria2c --check-certificate=false --dir=${filePath} --out=${filename} -x 8 -s 8 https://dl.min.io/client/mc/release/linux-amd64/mc
else
echo "if the download is too slow, pls install aria2c and then try again."
# single-thread download is too slow.
(\
cd ${filePath} && \
wget https://dl.min.io/client/mc/release/linux-amd64/mc
)
fi
fi
docker exec -it -u 0 ${containerName} mkdir -p ${container_project_path}/
docker cp ${linux_mcPath} ${containerName}:${container_project_path}/
docker exec -it ${containerName} bash -c "
cd ${container_project_path}/ && \
chmod -R 777 ./* && \
./mc config host add minio http://minio.service.ext.atlas.com:9000 ${minioUser} ${minioPwd} && \
./mc mb minio/local.atlas.com.cobrand && \
./mc ls minio
"
local -r ret=$?
if (( exitStatus == 0 )); then
echo "create bucket:local.atlas.com.cobrand successfully!"
else
echo "Error: create bucket failed! Pls make sure the minioKey and minioSecret are correct!"
fi
}
# add default users
function addDefaultUser {
local -r method=${1}
local -r isBrk=${2}
local inspectVal="inspect"
if [ "${isBrk:-}" = "true" ] || [ "${isBrk:-}" = "1" ]; then
inspectVal="inspect-brk"
fi
docker exec -it -u 0 atlas-operation sh -c "node --${inspectVal}=0.0.0.0:9241 /usr/local/src/scripts/operation/admin.js addDefaultUsers"
}
# change the session timeout to a longer time
# the unit of this SESSION_INACTIVE_TIMEOUT is:
# expire
# Expiration time of the item. If it's equal to zero, the item will never expire. You can also use Unix timestamp or a number of seconds starting from current time, but in the latter case the number of seconds may not exceed 2592000 (30 days).
# refer: https://stackoverflow.com/questions/6027517/can-the-time-to-live-ttl-for-a-memcached-key-be-set-to-infinite
function longerSession {
# docker exec -it atlas-authen-0 sed -i 's/SESSION_INACTIVE_TIMEOUT =[^;]\+;/SESSION_INACTIVE_TIMEOUT = 999999;/g' /usr/local/site/authen-microservice/src/common/utils.js
# docker exec -it atlas-authen-0 sed -i 's/SESSION_INACTIVE_TIMEOUT =[^;]\+;/SESSION_INACTIVE_TIMEOUT = 0;/g' /usr/local/site/authen-microservice/src/common/utils.js
# docker exec -it atlas-authen-0 svc -du '/service/authen-microservice/'
kubectl exec -it -n ${namespace_default} atlas-authen-0 -- sed -i 's/SESSION_INACTIVE_TIMEOUT =[^;]\+;/SESSION_INACTIVE_TIMEOUT = 0;/g' /usr/local/site/authen-microservice/src/common/utils.js
kubectl exec -it -n ${namespace_default} atlas-authen-0 -- svc -du '/service/authen-microservice/'
}
# removeOperation, including container, images
function removeOperation {
## cancel pipefail, since below may failed when all are clean
set +o pipefail
# remove all atlas container
echo "about to stop all atlas container.."
docker ps -a |grep atlas |awk '{print $NF;}' |xargs -L 1 -I % docker container rm -f %
echo "===========result - docker ps -a"
docker ps -a
# remove all service
echo "about to delete all atlas service..."
docker service ls |awk '{if(NR > 1) print $1;}' |xargs -L 1 -I % docker service rm %
echo "===========result - docker service ls"
docker service ls
# remove all config
echo "about to delete all atlas config..."
docker config ls |awk '{if(NR > 1) print $1;}' |xargs -L 1 -I % docker config rm %
echo "===========result - docker config ls"
docker config ls
echo "about to clean all atlas volume..."
docker ps -a |grep atlas |awk '{ if(NR>-9) print $NF;}' |xargs -L 1 -I % docker rm --force %
docker volume ls |grep atlas |awk '{print $NF;}' |xargs -L 1 -I % docker volume rm %
echo "===========result - docker volume ls"
docker volume ls
docker images |grep operation |awk '{print $3}' |uniq |xargs -L 1 -I % docker rmi --force %
}
# cleanAll, including container, svc, volume
function cleanAll {
## cancel pipefail, since below may failed when all are clean
set +o pipefail
# remove all atlas container
echo "about to stop all atlas container.."
docker ps -a |grep atlas |awk '{print $NF;}' |xargs -L 1 -I % docker container rm -f %
echo "===========result - docker ps -a"
docker ps -a
# remove all service
echo "about to delete all atlas service..."
docker service ls |awk '{if(NR > 1) print $1;}' |xargs -L 1 -I % docker service rm %
echo "===========result - docker service ls"
docker service ls
# remove all config
echo "about to delete all atlas config..."
docker config ls |awk '{if(NR > 1) print $1;}' |xargs -L 1 -I % docker config rm %
echo "===========result - docker config ls"
docker config ls
echo "about to clean all atlas volume..."
docker ps -a |grep atlas |awk '{ if(NR>-9) print $NF;}' |xargs -L 1 -I % docker rm --force %
docker volume ls |grep atlas |awk '{print $NF;}' |xargs -L 1 -I % docker volume rm %
echo "===========result - docker volume ls"
docker volume ls
}
#
# reload adminUI(citadel-config-panel), also copy linux src to linux host
# @param 1: the method to do
#
# # to sync AdminUI production
# # pls ensure that u r running `npm run serve` in the root folder of citadel-control-panel
# find /opt/github/Atlas/sourcecode/citadel-control-panel/dist/admin -mindepth 0 -maxdepth 1 -type f |egrep "*\.(map|js|html)$" |xargs -L 1 -I % docker cp % atlas-uiui-0:/usr/local/site/citadel-control-panel/admin/
function adminUI {
local -r project_name="citadel-control-panel"
local -r linux_project_path="${githubPath}/${project_name}"
local -r tmp_linux_project_path_admin="${tmpPath}/${project_name}/admin"
local -r container_project_path=/usr/local/site/${project_name}
local -r containerName="atlas-uiui-0"
local -r method=${1}
if [ "${method:-}" = "tar" ]; then # to tar node_modules/ files from container
docker exec -it ${containerName} sh -c "
cd '/usr/local/site/' &&
tar cf n.tar ${project_name}/
"
docker cp ${containerName}:/usr/local/site/n.tar ${tmpPath}/
(\
cd ${tmpPath} && \
tar xf n.tar \
)
echo -e -n "Succeed in syncing remote files to local ${tmpPath}!"
else # to sync and restart
echo "about to reload debug "
set -x
# first cp the build result template from container to .tmp
cp -fRpv ${tmp_linux_project_path_admin}/{app,index.html} ${linux_project_path}/.tmp/
mkdir -p ${linux_project_path}/.tmp/assets
# then continue to sync some resources from container to .tmp/assets
cp -fRpv ${tmp_linux_project_path_admin}/assets/{fonts,libs} ${linux_project_path}/.tmp/assets/
# then sync some resources from local src
cp -fRpv ${linux_project_path}/src/app/i18n ${linux_project_path}/.tmp/app/
cp -fRpv ${linux_project_path}/src/assets/{env.js,theme.js,theme.scss} ${linux_project_path}/.tmp/assets/
# generate some default cobrand configs
# local.atlas.com.cobrand
# for fileFullPath in ${linux_project_path}/.tmp/assets/{env.js,theme.scss,theme.js}; do
# echo "filePullPath: ${fileFullPath}"
#
# done
## local -r cobrandStr="local.atlas.com.cobrand"
local -r cobrandStr="k3d.atlashcl.com.cobrand"
cp -fRpv ${linux_project_path}/.tmp/assets/env.js ${linux_project_path}/.tmp/assets/${cobrandStr}.env.js
cp -fRpv ${linux_project_path}/.tmp/assets/theme.scss ${linux_project_path}/.tmp/assets/${cobrandStr}.theme.css
cp -fRpv ${linux_project_path}/.tmp/assets/theme.js ${linux_project_path}/.tmp/assets/${cobrandStr}.theme.js
cp -fRpv ${linux_project_path}/.tmp/assets/env.js ${linux_project_path}/.tmp/assets/${cobrandStr}.env.js
# remote old files
# docker exec -it atlas-uiui-0 rm -rfv ${container_project_path}/admin
kubectl exec -n ${namespace_default} -it atlas-uiui-0 -- rm -rfv ${container_project_path}/admin
# copy local new files to remote
# docker cp ${linux_project_path}/.tmp/ ${containerName}:${container_project_path}/admin
kubectl cp ${linux_project_path}/.tmp/ ${namespace_default}/${containerName}:${container_project_path}/admin
fi
}
function renameSpec {
cd /opt/github/Atlas/sourcecode/citadel-control-panel
local -r method=${1}
local -r paths='src/app src/components'
if [ "${method:-}" = "rename" ]; then # to rename to tiantcbak
function renameOnePath {
local -r doPath=${1}
for item in $(find ${doPath} -mindepth 0 -maxdepth 999 -name "*.spec.js"); do
# echo "item: ${item}"
case "$item" in
(src/app/citadel/cobrand-admin/create-customer.controller.spec.js);&
(src/app/citadel/cobrand-admin/create-customer.controller.spec.js1) echo "${item} will skip ---------"
;;
(*)
# echo "${item}, won't skip"
mv -v "${item}" "${item}.tiantcbak"
;;
esac
done
}
for p in ${paths}; do
renameOnePath ${p}
done
else # to recover
function recoverOnePath {
local -r doPath=${1}
for item in $(find ${doPath} -mindepth 0 -maxdepth 999 -name "*.tiantcbak"); do
# echo "item: ${item}"
mv -v "${item}" "${item%.tiantcbak}"
#echo "${item%.tiantcbak}"
done
}
for p in ${paths}; do
recoverOnePath ${p}
done
fi
}
function syncDockerRepo {
local -r linux_project_path="${githubPath}/wispr-node-buildtools"
(\
cd ${linux_project_path}/sandbox/buildroot/git/master/docker && \
git pull origin master --rebase
git push -f -u mine master
)
(\
cd ${linux_project_path}/sandbox/buildroot/git/master/base/docker && \
git fetch mine
git reset --hard mine/master
)
}
function syncOperation {
# find /opt/github/Atlas/sourcecode/wispr-node-buildtools/sandbox/buildroot/git/master/docker/operation/scripts/operation/ -maxdepth 1 -mindepth 1 -print |grep -v "\.swp" |
local -r project_name="wispr-node-buildtools"
local -r linux_project_path="${githubPath}/${project_name}"
local -r linux_operation_path="${linux_project_path}/sandbox/buildroot/git/master/docker/operation/scripts/operation"
local -r container_project_path=/usr/local/src/scripts/operation/
local -r containerName="atlas-operation"
cp -fRpv ${linux_operation_path} ${host_githubPath}/
echo "sync local into operation docker start.... linux_operation_path: ${linux_operation_path}"
find ${linux_operation_path}/ -maxdepth 1 -mindepth 1 -print |_grep -v "\.swp" |xargs -L 1 -I % docker cp % ${containerName}:${container_project_path}/
echo "sync local into operation docker done"
}
function debugSB {
local -r linux_project_path="${githubPath}/wispr-node-buildtools/sandbox"
local -r host_project_path="${host_githubPath}/sandbox"
cp -fRpv ${linux_project_path}/package* ${host_project_path}/ && cp -fRpv ${linux_project_path}/sandbox.js ${host_project_path}/
(\
cd ${linux_project_path} && \
node --inspect-brk=0.0.0.0:9246 ./sandbox.js -c init
)
}
function debugBuild {
local -r linux_project_path="${githubPath}/wispr-node-buildtools/build_docker_images"
local -r host_project_path="${host_githubPath}/build_docker_images"
mkdir -p ${host_project_path}
(\
cd ${linux_project_path} && \
ls -A1 |grep -v node_modules |xargs -L 1 -I % cp -fRpv % ${host_project_path}/
node --inspect-brk=0.0.0.0:9240 ./build.js
)
}
function collectLog {
local -r method=${1}
local -r output_file="${host_githubPath}/logs/all.txt"
if [ "${method:-}" = "collect" ]; then # to rename to tiantcbak
echo "" > ${output_file}
echo "ssl:" >> ${output_file}
docker exec -it -u 0 atlas-ssl-0 cat /var/log/nginx/access.log >> ${output_file}
echo "doad:" >> ${output_file}
docker exec -it -u 0 atlas-doad-0 cat /var/log/api-gateway/api-gateway.log >> ${output_file}
echo "files:" >> ${output_file}
docker exec -it -u 0 atlas-files-0 cat /var/log/files-microservice/files-microservice.log >> ${output_file}
else # to clear
docker exec -it -u 0 atlas-ssl-0 sh -c 'echo "" > /var/log/nginx/access.log'
docker exec -it -u 0 atlas-doad-0 sh -c 'echo "" > /var/log/api-gateway/api-gateway.log'
docker exec -it -u 0 atlas-files-0 sh -c 'echo "" > /var/log/files-microservice/files-microservice.log'
fi
}
# upload the mariadb jar file to setup pod
function doSetup {
local -i COUNT=0
local -ri limit=50
local rows=""
set +o errexit
# set -o pipefail
while [[ ${rows-} == "" ]]; do
echo "check if atlas-setup pod is ready..."
COUNT=COUNT+1
rows=$(kubectl get pods -n ${namespace_default} |grep atlas-setup |grep '1/1');
echo "rows: ${rows}"
if [ "${rows:-}" = "" ] && (( COUNT >= limit )); then
echo "Error: atlas-setup is not ready yet, while maximum retry times exceeded. Exiting..."
IFS=$'\n'; for row in $(kubectl get pods -n ${namespace_default} |grep atlas-setup); do
podName=$(echo ${row} |awk '{print $1}')
echo -en "\n==========describe of pod: ${podName}==========="
kubectl describe -n ${namespace_default} "pod/${podName}"
done
exit 1;
else
sleep 5;
fi
done
set +o errexit
sleep 20
echo "atlas-setup pod is ready, uploading jar file..."
function uploadJar {
curl -i 'http://setup.atlas.com/api/uploads' \
-H 'Connection: keep-alive' \
-H 'Pragma: no-cache' \
-H 'Cache-Control: no-cache' \
-H 'Accept: application/json, text/plain, */*' \
-H 'Content-Type: application/json;charset=UTF-8' \
-H 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36' \
-H 'crossorigin: true' \
-H 'Origin: http://setup.atlas.com' \
-H 'Referer: http://setup.atlas.com/' \
-H 'Accept-Language: en-US,en;q=0.9,zh;q=0.8,zh-CN;q=0.7,zh-TW;q=0.6' \
--data-binary "@/home/tiantc/script/mariadb.json" \
--compressed \
--insecure
}
IFS=$'\n'; for row in ${rows}; do
uploadJar
retStatus=$?
if (( retStatus != 0 )); then
uploadJar
fi
done
}
# update the atlas images in k3s
function updateK3sAtlas {
dockerPath="/opt/github/Atlas/sourcecode/wispr-node-buildtools/sandbox/buildroot/git/master/docker"
helm uninstall atlas && true
local -i COUNT=0
local -ri limit=5
local -i podLeftCount=2
set -o pipefail
while (( ${podLeftCount-} != 1 )); do
if (( COUNT >= limit )); then
echo "Error: some pods is not removed yet, while maximum retry times exceeded. Exiting..."
kubectl get pods -n ${namespace_default}
fi
echo "waiting for all pods terminated..."
sleep 20
rows=$(kubectl get pods -n ${namespace_default} | tee /dev/tty)
# refer:
# https://superuser.com/questions/543235/how-to-redirect-multiple-bash-commands-to-a-variable-and-screen
podLeftCount=$(echo ${rows} |wc -l)
kubectl get rc,statefulsets,svc,deployment,pods,pvc,cronjob,job -A --show-kind --show-labels |grep persistentvolumeclaim |awk '{print $2;}' |xargs -L 1 -I % kubectl delete % && true
done
array=(\
pv \
); for resourceType in "${array[@]}"; do kubectl get -n "${namespace_default}" "${resourceType}" -o wide | awk '{if(NR>1) print $1;}' |xargs -L 1 -I % kubectl delete -n "${namespace_default}" ${resourceType}/%; done
echo "all pods deleted"
kubectl get svc,statefulset,pods -n ${namespace_default}
# TODO: should use `-n ${namespace_default}` instead of `-A`?
kubectl get rc,statefulsets,svc,deployment,pods,pvc,cronjob,job -A --show-kind --show-labels |grep persistentvolumeclaim |awk '{print $2;}' |xargs -L 1 -I % kubectl delete % && true
sleep 5
set +o pipefail
cd ${dockerPath}/helm/k3d-data
# /opt/github/Atlas/sourcecode/wispr-node-buildtools/sandbox/buildroot/git/master/docker/helm/k3d-data/values.yaml
# TODO: replace the ftpserver: false to true
cd ${dockerPath}/helm
echo "install k3d secret..."
kubectl apply -f k3d-data/atlas-k3d-secret.yml
echo "install atlas via helm..."
helm install atlas ./atlas -f k3d-data/values.yaml
echo "do Setup..."
doSetup
}
# update the atlas images in k3d
function updateK3dAtlas {
dockerPath="/opt/github/Atlas/sourcecode/wispr-node-buildtools/sandbox/buildroot/git/master/docker"
helm uninstall atlas && true
local -i COUNT=0
local -ri limit=5
local -i podLeftCount=2
set -o pipefail
while (( ${podLeftCount-} != 1 )); do
if (( COUNT >= limit )); then
echo "Error: some pods is not removed yet, while maximum retry times exceeded. Exiting..."
kubectl get pods -n ${namespace_default}
fi
echo "waiting for all pods terminated..."
sleep 20
rows=$(kubectl get pods -n ${namespace_default} | tee /dev/tty)
# refer:
# https://superuser.com/questions/543235/how-to-redirect-multiple-bash-commands-to-a-variable-and-screen
podLeftCount=$(echo ${rows} |wc -l)
kubectl get rc,statefulsets,svc,deployment,pods,pvc,cronjob,job -A --show-kind --show-labels |grep persistentvolumeclaim |awk '{print $2;}' |xargs -L 1 -I % kubectl delete % && true
done
array=(\
pv \
); for resourceType in "${array[@]}"; do kubectl get -n "${namespace_default}" "${resourceType}" -o wide | awk '{if(NR>1) print $1;}' |xargs -L 1 -I % kubectl delete -n "${namespace_default}" ${resourceType}/%; done
echo "all pods deleted"
kubectl get svc,statefulset,pods -n ${namespace_default}
# TODO: should use `-n ${namespace_default}` instead of `-A`?
kubectl get rc,statefulsets,svc,deployment,pods,pvc,cronjob,job -A --show-kind --show-labels |grep persistentvolumeclaim |awk '{print $2;}' |xargs -L 1 -I % kubectl delete % && true
sleep 5
set +o pipefail
docker network disconnect k3d-atlas registry.local
docker network connect k3d-atlas registry.local
sleep 1
cd ${dockerPath}/helm/k3d-data
./push-images.sh base
cd ${dockerPath}/helm
# /opt/github/Atlas/sourcecode/wispr-node-buildtools/sandbox/buildroot/git/master/docker/helm/k3d-data/values.yaml
# TODO: replace the ftpserver: false to true
kubectl apply -f k3d-data/atlas-k3d-secret.yml
helm install atlas ./atlas -f k3d-data/values.yaml
doSetup
}
# tail logs in container
function initK3d {
kubectl exec -it -n ${namespace_default} atlas-api-gateway-0 -- sed -i 's/CORS_ALLOW_ALL: false/CORS_ALLOW_ALL: true/g' /usr/local/site/api-gateway/config/production.yml
kubectl exec -it -n ${namespace_default} atlas-api-gateway-0 -- svc -du '/service/api-gateway/'
kubectl exec -it -n ${namespace_default} atlas-ldapsync-0 -- sed -i "\$a arrangeSchedule: '*/30 * * * * *'\
\n\
syncSchedule: '*/10 * * * * *'" /usr/local/site/ldapsync-microservice/config/production.yml && \
kubectl exec -it -n ${namespace_default} atlas-ldapsync-0 -- svc -du /service/ldapsync-microservice/
kubectl exec -it -n ${namespace_default} atlas-ldapsync-0 -- cat /usr/local/site/ldapsync-microservice/config/production.yml
}
# describePod[debugPod]
function describePod {
IFS=$'\n'; for row in $(kubectl get pods -A |awk '{if($1 != "NAMESPACE" && $4 != "Running" && $4 != "Completed") print $0;}'); do namespace=$(echo ${row} |awk '{print $1}'); name=$(echo ${row} |awk '{print $2}'); podStatus=$(echo ${row} |awk '{print $4}'); echo "\n\n"; echo illPod- ${namespace}, ${name}, ${podStatus}; kubectl describe -n ${namespace} pod/${name} --show-events=true; done 2>&1 > log.txt
}
function login {
curl -k "https://api.${domain_default}/api/authentication/login" \
-H "authority: api.${domain_default}" \
-H 'pragma: no-cache' \
-H 'cache-control: no-cache' \
-H 'accept: application/json, text/plain, */*' \
-H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36' \
-H 'crossorigin: true' \
-H 'content-type: application/json;charset=UTF-8' \
-H 'sec-fetch-site: cross-site' \
-H 'sec-fetch-mode: cors' \
-H 'sec-fetch-dest: empty' \
-H 'accept-language: en-US,en;q=0.9,zh;q=0.8,zh-CN;q=0.7,zh-TW;q=0.6' \
--data-binary '{"username":"bpadmin@ops.'${domain_default}'","password":"passw0rd"}' \
--compressed \
--insecure | python -c "import sys, json; print(json.load(sys.stdin)['data'][0]['token'])" \
|& tee ./token.txt
}
# declare pvtTime=()
declare pvtTime="./pvtTime.txt"
# PVT version
function curlPVT {
# in seconds
local -r totalTime=2
local -r totalRequests=4
# local -r intervalSeconds=`echo "scale=3; ${totalTime}/${totalRequests}" | bc -l | awk '{printf "%.3f\n", $0}'`
local -r intervalSeconds=0.001
# echo "intervalSeconds: ${intervalSeconds}"
# local -r token="$(login)"
local -r token=`cat ./token.txt`
local -r start=`date +%s%N`
for i in `seq 1 1 ${totalRequests}`
do
echo "about to curl $i"
curl -k "https://api.${domain_default}/api/files/batchMoveAsync" \
-H "authority: api.${domain_default}" \
-H 'pragma: no-cache' \
-H 'cache-control: no-cache' \
-H 'accept: application/json, text/plain, */*' \
-H "authorization: ${token}" \
-H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36' \
-H 'crossorigin: true' \
-H 'content-type: application/json;charset=UTF-8' \
-H 'sec-fetch-site: cross-site' \
-H 'sec-fetch-mode: cors' \
-H 'sec-fetch-dest: empty' \
-H 'accept-language: en-US,en;q=0.9,zh;q=0.8,zh-CN;q=0.7,zh-TW;q=0.6' \
--data-binary '{"objects":["/bpadmin@ops.'${domain_default}'/a.txt","/bpadmin@ops.'${domain_default}'/b.txt"],"newPosition":"/bpadmin@ops.'${domain_default}'/abc${i}/"}' \
--compressed \
--insecure
## sleep ${intervalSeconds}
# --insecure | python -c "import sys, json; print(json.load(sys.stdin)['data'])" \
# |& tee ./batchId.txt
done
local -r end=`date +%s%N`
# totalTimeCost=`expr $end - $start`
# echo Execution time was `nano2Readable ${totalTimeCost}`, intervalSeconds: ${intervalSeconds}.
echo "${start},${end}" |& tee -a ${pvtTime}
}
function mvStatus {
# in seconds
echo login start
# login first
login
echo login is done
local -r totalTime=2
local -r totalRequests=1
# local -r intervalSeconds=`echo "scale=3; ${totalTime}/${totalRequests}" | bc -l | awk '{printf "%.3f\n", $0}'`
local -r intervalSeconds=0.001
# echo "intervalSeconds: ${intervalSeconds}"
# local -r token="$(login)"
local -r token=`cat ./token.txt`
local -r start=`date +%s%N`
local -r batchId=`cat ./batchId.txt`
curl "https://api.${domain_default}/api/files/getBatchStatus?serverType=atlas&batchId=${batchId}" \
-H 'authority: api.'${domain_default}'' \
-H 'pragma: no-cache' \
-H 'cache-control: no-cache' \
-H 'accept: application/json, text/plain, */*' \
-H "authorization: ${token}" \
-H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36' \
-H 'origin: https://email.'${domain_default}'' \
-H 'sec-fetch-site: same-site' \
-H 'sec-fetch-mode: cors' \
-H 'sec-fetch-dest: empty' \
-H 'referer: https://email.'${domain_default}'/' \
-H 'accept-language: en-US,en;q=0.9,zh;q=0.8,zh-CN;q=0.7,zh-TW;q=0.6' \
--compressed \
--insecure
local -r end=`date +%s%N`
# totalTimeCost=`expr $end - $start`
# echo Execution time was `nano2Readable ${totalTimeCost}`, intervalSeconds: ${intervalSeconds}.
echo "${start},${end}" |& tee -a ${pvtTime}
}
function addTestUser {
local -ir totalUsers=100
login
local -r token=`cat ./token.txt`
for i in `seq 1 1 ${totalUsers}`
do
index="${i}"
if (( i < 10 )); then
index="0${i}"
fi
curl 'https://api.'${domain_default}'/api/admin/customers/1/users' \
-H 'authority: api.'${domain_default}'' \
-H 'pragma: no-cache' \
-H 'cache-control: no-cache' \
-H 'accept: application/json, text/plain, */*' \
-H "authorization: ${token}" \
-H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36' \
-H 'content-type: application/json;charset=UTF-8' \
-H 'origin: https://email.'${domain_default}'' \
-H 'sec-fetch-site: same-site' \
-H 'sec-fetch-mode: cors' \
-H 'sec-fetch-dest: empty' \
-H 'referer: https://email.'${domain_default}'/' \
-H 'accept-language: en-US,en;q=0.9,zh;q=0.8,zh-CN;q=0.7,zh-TW;q=0.6' \
--data-binary '{"email":"test'${index}'@ops.'${domain_default}'","quota":5242880,"language":"en-us","timezone":"Asia/Hong_Kong","profile":{"firstName":"test","lastName":"user'${index}'"},"permissions":{"imap":"enabled","pop3":"enabled","smtp":"enabled"},"password":"passw0rd"}' \
--compressed \
--insecure
done
}
function curlPVT_multiCore {
echo "" > ${pvtTime}
echo login start
# login first
login
echo login is done
multipleCore isDone curlPVT 2 3
local -r array=(`cat ./pvtTime.txt`)
declare -p array
local declarestartArray=()
local endArray=()
for item in ${array[@]}; do
local tmpArray=(`echo $item | tr ',' ' '`)
startArray+=(${tmpArray[0]})
endArray+=(${tmpArray[1]})
done
declare -p startArray
declare -p endArray
echo "====try to sort"
startArray_sorted=( $( printf "%s\n" "${startArray[@]}" | sort -n ) )
endArray_sorted=( $( printf "%s\n" "${endArray[@]}" | sort -n ) )
declare -p startArray_sorted
declare -p endArray_sorted
# start_new=
# end_new=${startArray_sorted[0]}
# totalTimeCost2=((end_new-start_new))
totalTimeCost2=`expr ${endArray_sorted[${#endArray_sorted[@]} - 1]} - ${startArray_sorted[0]}`
echo Execution time was `nano2Readable ${totalTimeCost2}`.
}
function doFunction {
local -r start=`date +%s%N`
echo "in doFunction" "$@"
sleep 2
echo "in doFunction" "$@" done
local -r end=`date +%s%N`
echo "${start},${end}" |& tee -a ${pvtTime}
}
function multipleCore {
argu1=$1
shift 1
if [ "${argu1:-}" = "isDone" ] || [ "${argu1:-}" = "finalRound" ]; then
functionName=$1
shift 1
${functionName} "$@"
else
for i in `seq 1 1 ${argu1}`
do
multipleCore "$@" &
done
# wait for all background job completed, so that the paraent script(the caller) can know when all jobs are done
wait
fi
}
function mysqlConnect {
local -r name=$1
local -r podName="atlas-${name}-${CONTAINER_INDEX}"
local -r pwd=`kubectl exec -n ${namespace_default} atlas-operation-0 -- cat /usr/local/site/config/env.yml |grep root-password | cut -d':' -f2 | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//'`
echo "pwd is: ${pwd}"
kubectl exec -n ${namespace_default} -c "${name}" "${podName}" -- bash -c "echo -ne '[client]\npassword=${pwd}\n' > /tmp/pwd.cnf"
kubectl exec -n ${namespace_default} -c "${name}" "${podName}" -- cat /tmp/pwd.cnf
kubectl exec -n ${namespace_default} -c "${name}" -it "${podName}" -- sh -c "mysql --defaults-extra-file=/tmp/pwd.cnf --user=root --host=localhost"
}
function atlasUIServe {
local -r project_name="atlas-ui"
local -r linux_project_path="${githubPath}/${project_name}"
local -r packageJson="${linux_project_path}/package.json"
# add webpack script
sed -e '/^\s*"scripts":\s*{$/,/^\s*"start":\s*/s/{$/&\n "serve": "webpack --progress", \n "serve-public": "webpack --progress --config .\/webpack-public.config.js",/g' "${packageJson}"
# install necessary depencencies if not present
if ! _grep 'webpack-bundle-analyzer' "${packageJson}" ; then
npm i --save-dev webpack-bundle-analyzer
fi
if ! _grep 'webpack-cli' "${packageJson}" ; then
npm i --save-dev webpack-cli
fi
}
function collectFilesLog {
local -r name=$1
local -r allSummaryCSV='/opt/github/Atlas/sourcecode/results/s.csv'
local -r sourcePath='/opt/github/Atlas/sourcecode/files.log'
local -r targetSummary="/opt/github/Atlas/sourcecode/results/${name}.txt"
local -r targetCSV="/opt/github/Atlas/sourcecode/results/${name}.csv"
local -r tmpFile='/opt/github/Atlas/sourcecode/tmp.log'
grep -Pzao "(?s)allData((?!allData).)+The file summary_(?!.*The file summary_.*)" ${sourcePath} > ${tmpFile}
grep -Pzao "(?s)(?<=csvStr{\s\s)[^}]+(?=})" ${tmpFile} > ${targetCSV}
grep -Pzao "(?s)resultMap.+submitCost.length: \d+\s+" ${tmpFile} > ${targetSummary}
# find /opt/github/Atlas/sourcecode/results/ -mindepth 0 -maxdepth 0
# "shortest time": "176, longest time is
echo "generating summary.csv"
echo "submit_short,submit_long,submit_mean,schedule_short,schedule_long,schedule_mean,file_short,file_long,filemean,total_short,total_long,total_mean" > ${allSummaryCSV}
for file in /opt/github/Atlas/sourcecode/results/p*.txt; do
echo "file: ${file}"
local shortestTimes=(`grep -Pzao "(?s)(?<=shortest time)[^\d]+\K\d+," ${file} | tr ',' ' '`)
local longestTimes=(`grep -Pzao "(?s)(?<=longest time is)[^\d]+\K\d+," ${file} | tr ',' ' '`)
local meanTimes=(`grep -Pzao "(?s)(?<=mean time is)[^\d]+\K\d+," ${file} | tr ',' ' '`)
for index in `seq 0 1 3`
do
echo -n "${shortestTimes[${index}]},${longestTimes[${index}]},${meanTimes[${index}]}," >> ${allSummaryCSV}
done
echo "" >> ${allSummaryCSV}
done
# clean the tailing comma
sed -i 's/,$//g' ${allSummaryCSV}
}
# tail logs in container
function tailLog {
# docker exec -it atlas-doad-0 tail -f /var/log/api-gateway/api-gateway.log
docker exec -it atlas-files-0 tail -f /var/log/files-microservice/files-microservice.log
}
if [ "${_project:-}" = "api-gateway" ] || [ "${_project:-}" = "api" ]; then
# reloadAPI_gateway ${_method} ${_param}
reloadMicroSVC ${_method} ${_param} api-gateway true " common server package.json package-lock.json config " false
elif [ "${_project:-}" = "ldapsync" ] || [ "${_project:-}" = "syncldapsync" ]; then
reloadLdapsync ${_method} ${_param}
elif [ "${_project:-}" = "batch" ] || [ "${_project:-}" = "batch-microservice" ]; then
reloadMicroSVC ${_method} ${_param} batch true " src stub package.json package-lock.json config "
elif [ "${_project:-}" = "worker" ] || [ "${_project:-}" = "synWorker" ]; then
# reloadWorker ${_method} ${_param}
reloadMicroSVC ${_method} ${_param} worker true " src package.json package-lock.json config "
elif [ "${_project:-}" = "files-microservice" ] || [ "${_project:-}" = "files-svc" ] || [ "${_project:-}" = "files" ]; then
# filesSVC ${_method} ${_param}
reloadMicroSVC ${_method} ${_param} files true " src package.json package-lock.json config "
elif [ "${_project:-}" = "mailbox-microservice" ] || [ "${_project:-}" = "mailbox-svc" ] || [ "${_project:-}" = "mailbox" ]; then
reloadMicroSVC ${_method} ${_param} mailbox true " src package.json package-lock.json config "
elif [ "${_project:-}" = "renameSpec" ] || [ "${_project:-}" = "specUT" ]; then
renameSpec ${_method} ${_param}
elif [ "${_project,,}" = "adminui" ] || [ "${_project:-}" = "admin" ] || [ "${_project:-}" = "citadel-control-panel" ]; then
adminUI ${_method} ${_param}
elif [ "${_project:-}" = "atlas-ui" ] || [ "${_project,,}" = "atlasui" ]; then
reloadAtlasUI ${_method} ${_param}
elif [ "${_project:-}" = "createBucket" ] || [ "${_project:-}" = "bucket" ]; then
createBucket ${_method} ${_param}
elif [ "${_project:-}" = "defaultUser" ] || [ "${_project:-}" = "addDefaultUser" ]; then
addDefaultUser ${_method} ${_param}
elif [ "${_project:-}" = "addUser" ] || [ "${_project:-}" = "addTestUser" ]; then
addTestUser ${_method} ${_param}
elif [ "${_project:-}" = "longerSession" ] || [ "${_project:-}" = "session" ]; then
longerSession ${_method} ${_param}
elif [ "${_project:-}" = "clean" ] || [ "${_project:-}" = "cleanAll" ]; then
cleanAll ${_method} ${_param}
elif [ "${_project:-}" = "operation" ] || [ "${_project:-}" = "removeOperation" ]; then
removeOperation ${_method} ${_param}
elif [ "${_project:-}" = "syncOperation" ] || [ "${_project:-}" = "syncOperation" ]; then
syncOperation ${_method} ${_param}
elif [ "${_project:-}" = "docker" ] || [ "${_project:-}" = "syncDocker" ]; then
syncDockerRepo ${_method} ${_param}
elif [ "${_project:-}" = "sandbox" ] || [ "${_project:-}" = "debugSB" ]; then
debugSB ${_method} ${_param}
elif [ "${_project:-}" = "build" ] || [ "${_project:-}" = "debugBuild" ]; then
debugBuild ${_method} ${_param}
elif [ "${_project:-}" = "collectLog" ] || [ "${_project:-}" = "collect" ]; then
collectLog ${_method} ${_param}
elif [ "${_project:-}" = "initK3d" ] || [ "${_project:-}" = "init" ]; then
initK3d ${_method} ${_param}
elif [ "${_project:-}" = "doSetup" ] || [ "${_project:-}" = "setup" ]; then
doSetup ${_method} ${_param}
elif [ "${_project:-}" = "updateK3dAtlas" ] || [ "${_project:-}" = "k3dimage" ]; then
updateK3dAtlas ${_method} ${_param}
elif [ "${_project:-}" = "updateK3sAtlas" ] || [ "${_project:-}" = "k3dimage" ]; then
updateK3sAtlas ${_method} ${_param}
elif [ "${_project:-}" = "login" ] || [ "${_project:-}" = "doLogin" ]; then
login ${_method} ${_param}
elif [ "${_project:-}" = "curlPVT" ] || [ "${_project:-}" = "pvt" ]; then
# multipleCore 2 2 2 isDone doFunction 2 3
curlPVT_multiCore ${_method} ${_param}
elif [ "${_project:-}" = "curlStatus" ] || [ "${_project:-}" = "status" ]; then
# multipleCore 2 2 2 isDone doFunction 2 3
mvStatus ${_method} ${_param}
elif [ "${_project:-}" = "chown" ] || [ "${_project:-}" = "chown4Mdrop" ]; then
chown4Mdrop
elif [ "${_project:-}" = "mysql" ] || [ "${_project:-}" = "mysqlConnect" ]; then
mysqlConnect ${_method} ${_param}
elif [ "${_project:-}" = "collectFilesLog" ] || [ "${_project:-}" = "collectFiles" ]; then
collectFilesLog ${_method} ${_param}
elif [ "${_project:-}" = "log" ] || [ "${_project:-}" = "tailLog" ]; then
tailLog ${_method} ${_param}
elif [ "${_project:-}" = "test" ] || [ "${_project:-}" = "testFunction" ]; then
echo "" > ${pvtTime}
multipleCore 2 2 2 isDone doFunction 2 3
wait
echo "all are done"
ls -AlpkFih ./
echo "about to cat"
cat ${pvtTime}
else
echo "invalid project"
exit 1
fi
echo "ret: ${ret}"
exit ${ret}
# to prune docker system
# docker system prune -f
# to clean up obsolete images
# PS: remember to replace your latest tag: master-HCLATLAS-file-service-HCLATLAS-file-service-1585030927927 -> "1585030927927"
# docker images |grep -P "atlas\/" |grep -P "master-HCLATLAS-file-service-HCLATLAS-file-service-" |grep -v "1585030927927" |awk '{ print $3;}' |xargs -L 1 -I % docker rmi --force %
# to build wisper images on HCLATLAS-file-service
# ./build.js -b master -w HCLATLAS-file-service -d HCLATLAS-file-service
# tailLog
# docker exec -it atlas-doad-0 tail -f /var/log/api-gateway/api-gateway.log
# install webpack
# npm i webpack-cli webpack-bundle-analyzer --save-dev
# way to mimic out of disk space {
# docker exec -it -u 0 atlas-files-0 sh -c "df -h && ls -AlpkFih /var/log/files-microservice && du -sh /var/log/files-microservice" && docker inspect atlas-files-0 |vim -
# docker cp /media/sf_github/tmp.tar atlas-files-0:/var/log/files-microservice/
# {{=<% %>=}}
# Image: "atlas/files:{{& env.version}}"
# ExposedPorts:
# 9254/tcp: {}
# HostConfig:
# Privileged: true
# NetworkMode: "{{& site.clusterName}}_vlan"
# RestartPolicy:
# Name: "no"
# PortBindings:
# 9254/tcp:
# - HostPort: "9254"
# Binds:
# - "sized_vol:/var/log/files-microservice/"
#}
# scrollBar in atlas {
#
#console.info(`scrollHeight: ${listScrollHeight}, scrollTop: ${listScrollTop}, clientHeight: ${listClientHeight}, offsetTop: ${listOffsetTop}, lastScrollTop: ${lastScrollTop}, previousAnchorPagesInSight: ${JSON.stringify(previousAnchorPagesInSight)}`);
#
#const allAnchors = document.getElementsByClassName('page-anchor');
#// at the beginning of every page(e.g. before item0, item20, item40, there will be an Anchor for page1, 2, 3)
#// and when this anchor is shown(visible) in current client window, it's anchorsInSight.
#// And if anchorsInSight is availabe after scrolling, then we'll switch to the page of that anchor.
#}
# to generate csv from json for i18n
# node /opt/github/Atlas/sourcecode/Atlas-Documents/tutorials/BlackboxScripts/UITranslationFormatConvert/index.js -ffi json -fpi /opt/github/Atlas/sourcecode/atlas-ui/public/locales/en-us/common.json -ffo csv -fpo /media/sf_github/tmp/a.csv
# docker path
# /opt/github/Atlas/sourcecode/wispr-node-buildtools/sandbox/buildroot/git/master/docker
# /opt/github/Atlas/sourcecode/wispr-node-buildtools/sandbox/buildroot/git/master/base/docker
# buildRoot: /opt/github/Atlas/sourcecode/wispr-node-buildtools/build_docker_images/../sandbox/buildroot/git/master, DATABAG_REPO_NAME: databag
# buildRoot: /opt/github/Atlas/sourcecode/wispr-node-buildtools/build_docker_images/../sandbox/buildroot/git/master, DOCKER_REPO_NAME: docker
# baseDockerPath: /opt/github/Atlas/sourcecode/wispr-node-buildtools/build_docker_images/../sandbox/buildroot/git/master/base, DOCKER_REPO_NAME: docker
# base: master, docker: master
# docker cp /opt/github/Atlas/sourcecode/wispr-node-buildtools/sandbox/buildroot/git/master/docker/operation/scripts/operation/performance.yml atlas-operation:/usr/local/src/scripts/operation/
# vi /usr/local/src/scripts/operation/performance.yml
# ssh -i ~/.ssh/mountain2.pem ubuntu@moutain02.atlahcl.com
# find the slowest request
#cat batch-microservice.log.0 |grep -oPi 'elapsed":\d+' |cut -d':' -f2 |sort -r -n | vi -
# way to trigger ldapsync now
# docker exec -it atlas-ldapsync-0 \
# sed -i "\$a arrangeSchedule: '*/30 * * * * *'\
# \n\
# syncSchedule: '*/10 * * * * *'" /usr/local/site/ldapsync-microservice/config/production.yml && \
# svc -du /service/ldapsync-microservice/
## kubectl version
# kubectl exec -it -n default atlas-ldapsync-0 -- \
# sed -i "\$a arrangeSchedule: '*/30 * * * * *'\
# \n\
# syncSchedule: '*/10 * * * * *'" /usr/local/site/ldapsync-microservice/config/production.yml && \
# kubectl exec -it -n default atlas-ldapsync-0 -- svc -du /service/ldapsync-microservice/
#######################To Allow all CORS[cors][allowCors]###############################
#docker exec -it atlas-doad-1 sed -i 's/CORS_ALLOW_ALL: false/CORS_ALLOW_ALL: true/g' /usr/local/site/api-gateway/config/production.yml && \
#docker exec -it atlas-doad-1 svc -du '/service/api-gateway/'
#
#kubectl exec -it atlas-api-gateway-0 -- sed -i 's/CORS_ALLOW_ALL: false/CORS_ALLOW_ALL: true/g' /usr/local/site/api-gateway/config/production.yml && \
#kubectl exec -it atlas-api-gateway-0 -- svc -du '/service/api-gateway/'
#######################To reload Atlas-ui(Over)###############################
# export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
| true
|
3ffc3bacb7f528cc04d8d15498b35b67263766ec
|
Shell
|
tranphuquy19/packer-templates
|
/centos/script/01_desktop.sh
|
UTF-8
| 1,964
| 3.609375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Check if script is running as a desktop installation.
# If not, exit without configuring UI
if [[ ! "$DESKTOP" =~ ^(true|yes|on|1|TRUE|YES|ON])$ ]]; then
exit
fi
# CentOS version info - e.g., CentOS 7.8.2003 (v7.8 released March 2020)
# full_version=$(< /etc/centos-release tr -dc '0-9.')
major_version=$(< /etc/centos-release tr -dc '0-9.'|cut -d \. -f1)
# minor_version=$(< /etc/centos-release tr -dc '0-9.'|cut -d \. -f2)
# date_stamp=$(< /etc/centos-release tr -dc '0-9.'|cut -d \. -f3)
if [ "$major_version" -ge 8 ] ; then
pkg_cmd="dnf"
else
pkg_cmd="yum"
fi
# Minimal - gnome + X11
$pkg_cmd install -y gdm gnome-terminal gnome-classic-session gnome-shell-extension-dash-to-dock gnome-tweak-tool gedit
$pkg_cmd install -y xorg-x11-server-Xorg xorg-x11-drivers
# $pkg_cmd groupinstall -y 'GNOME'
# $pkg_cmd groupinstall -y 'x11' # Core Gnome and x11
# $pkg_cmd install -y gnome-shell-extension-dash-to-dock gnome-tweak-tool
echo "==> Set default session as graphical desktop"
systemctl set-default graphical.target
if [ -d /etc/xdg/autostart/ ]; then
echo "==> Disabling screen blanking"
NODPMS_CONFIG=/etc/xdg/autostart/nodpms.desktop
{
echo "[Desktop Entry]"
echo "Type=Application"
echo "Name=nodpms"
echo "Comment="
echo "Exec=xset -dpms s off s noblank s 0 0 s noexpose"
echo "Hidden=false"
echo "NoDisplay=false"
echo "X-GNOME-Autostart-enabled=true"
} >> "$NODPMS_CONFIG"
echo "==> Disabling screensaver"
IDLE_DELAY_CONFIG=/etc/xdg/autostart/idle-delay.desktop
{
echo "[Desktop Entry]"
echo "Type=Application"
echo "Name=idle-delay"
echo "Comment="
echo "Exec=gsettings set org.gnome.desktop.session idle-delay 0"
echo "Hidden=false"
echo "NoDisplay=false"
echo "X-GNOME-Autostart-enabled=true"
} >> "$IDLE_DELAY_CONFIG"
fi
echo "==> Disable Wayland"
GDM_CONFIG=/etc/gdm/custom.conf
sed -i 's/#WaylandEnable/WaylandEnable/' $GDM_CONFIG
| true
|
950df83e8776bde34eec8025a913d652730e4fe6
|
Shell
|
zerodayz/openstack-gitrepos
|
/setup.sh
|
UTF-8
| 163
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
python $DIR/generate-repos.py
for p in $(cat "$DIR/gitrepos"); \
do git clone $p; done
| true
|
7708244935485cf8a1c37b90f50d78ea7412d3c7
|
Shell
|
sancus-project/sancus-lua-web-example
|
/src/makefile_gen.in
|
UTF-8
| 239
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/sh
list() {
if [ $# -eq 0 ]; then
cat
else
echo "$@"
fi | tr -s ' ' | tr ' ' '\n' | sort -V | tr '\n' '|' |
sed -e 's,|$,,' -e 's,|, \\\n\t,g'
}
listall() {
find "$@" ! -type d | grep -v '\(~\|\.swp\|\.orig\)$' | list
}
| true
|
1d80d17d3bdc8bcfc43d95bdbf36c938eeac33d1
|
Shell
|
Mrmaxmeier/stuff
|
/pamixer-notify.sh
|
UTF-8
| 383
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
notify() {
notify-send -t 1000 --hint=int:transient:1 "$1" "Current is $(pamixer --get-volume-human)" --icon=multimedia-volume-control
}
case "$1" in
up)
pamixer -i 2
notify "Volume +2%"
;;
down)
pamixer -d 2
notify "Volume -2%"
;;
mute)
pamixer -t
notify "Toggle mute"
;;
*)
echo "wat"
exit 1
;;
esac
exit 0
| true
|
4014990f1ed315d335b5a81f0a540291083d05c0
|
Shell
|
ggerganov/whisper.cpp
|
/examples/whisper.nvim/whisper.nvim
|
UTF-8
| 2,034
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# INSTRUCTIONS
#
# This simple script is called by Neovim to capture audio from the microphone and transcribe it with Whisper.
# In order for this to work, you need to clone the whisper.cpp repo and build the 'stream' tool
#
# git clone https://github.com/ggerganov/whisper.cpp
# cd whisper.cpp
# make stream
#
# Also, make sure the current script is in your PATH env variable. You should be able to run the following command:
#
# whisper.nvim
#
# Next, export the path to the whisper.cpp repository via the WHISPER_CPP_HOME env variable:
#
# export WHISPER_CPP_HOME=/path/to/whisper.cpp
#
# Finally, add the following lines to your ~/.config/nvim/init.vim:
#
# inoremap <C-G> <C-O>:!whisper.nvim<CR><C-O>:let @a = system("cat /tmp/whisper.nvim \| tail -n 1 \| xargs -0 \| tr -d '\\n' \| sed -e 's/^[[:space:]]*//'")<CR><C-R>a
# nnoremap <C-G> :!whisper.nvim<CR>:let @a = system("cat /tmp/whisper.nvim \| tail -n 1 \| xargs -0 \| tr -d '\\n' \| sed -e 's/^[[:space:]]*//'")<CR>"ap
# vnoremap <C-G> c<C-O>:!whisper.nvim<CR><C-O>:let @a = system("cat /tmp/whisper.nvim \| tail -n 1 \| xargs -0 \| tr -d '\\n' \| sed -e 's/^[[:space:]]*//'")<CR><C-R>a
#
# This allows you to press Ctrl-G in order to capture audio from the microphone and transcribe it.
# When you are done speaking - press Ctrl-C
#
# the Whisper model to use
model="base.en"
# export the path to the whisper.cpp repo in the WHISPER_CPP_HOME env variable
# https://github.com/ggerganov/whisper.cpp
cd "${WHISPER_CPP_HOME}"
if [ ! -f ./stream ] ; then
echo "whisper.nvim: the 'stream' executable was not found! WHISPER_CPP_HOME=${WHISPER_CPP_HOME}" > /tmp/whisper.nvim
exit 1
fi
if [ ! -f ./models/ggml-${model}.bin ] ; then
echo "whisper.nvim: the '$model' model was not found! WHISPER_CPP_HOME=${WHISPER_CPP_HOME}" > /tmp/whisper.nvim
exit 2
fi
# fine-tune the parameters according to your machine specs
./stream -t 8 -m models/ggml-base.en.bin --step 350 --length 10000 -f /tmp/whisper.nvim 2> /dev/null
exit 0
| true
|
dd4bc5c53670be99de7b5dd2395eff351eabf535
|
Shell
|
deepio/dot_files
|
/.functions
|
UTF-8
| 3,431
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh
function cleanmeta() {
xattr -c * && exiftool -all= * && rm *_original
}
function cleanpip() {
pip freeze | xargs pip uninstall -y
}
function http2ssh() {
REPO_URL=`git remote -v | awk '{print $2; exit}'`;
GIT_USER=${REPO_URL#*com/}; GIT_USER=${GIT_USER%%/*}
GIT_REPO=${REPO_URL#*${GIT_USER}/}; GIT_REPO=${GIT_REPO%%.git*}
if [ -z "$REPO_URL" ]; then
echo "-- ERROR: Could not identify Repo url."
echo " It is possible this repo is already using SSH instead of HTTPS."
return 1
fi
if [ -z "$GIT_USER" ]; then
echo "-- ERROR: Could not identify User."
return 1
fi
if [ -z "$GIT_REPO" ]; then
echo "-- ERROR: Could not identify Repo."
return 1
fi
NEW_URL="git@github.com:$GIT_USER/$GIT_REPO.git"
echo "Changing repo url from"
echo " '$REPO_URL'"
echo " to "
echo " '$NEW_URL'"
echo ""
git remote set-url origin `echo $NEW_URL`
echo "Success"
}
function kali() {
docker run --rm -v "$PWD:/pwd" --privileged -it d33pi0/kali:nightly
}
function killdocker() {
echo 'This is the Docker Master Obliterator ;)'
{
docker stop --time 1 $(docker ps -aq)
docker rm --force $(docker ps -aq)
docker rmi --force $(docker image ls -q)
docker volume rm --force $(docker volume ls -q)
docker network rm $(docker network ls -q)
} &> /dev/null
echo 'Done.'
}
function pidof() {
ps -A | grep -i "$@" | awk '{print $1}'
}
function runpy() {
# Phasing out run in favor of entr.
# EG:
# ls *.go | entr sh -c "go build; ./nanny"
ag -g py$ | entr -c python "$@"
}
function rungo() {
ag -g go$ | entr -c go run "$@"
}
function startdt() {
bash --login '/Applications/Docker/Docker Quickstart Terminal.app/Contents/Resources/Scripts/start.sh'
}
function pomodoro() {
# meh, better than googling/installing a some free app from some random place.
while true; do say "Beep Boop, start working"; sleep 1500; say "Boop, Take a break"; sleep 300; done
}
function createpy() {
poetry new "$@"
cd "$@"
python3 -m venv env
source env/bin/activate
# Remove the email address from the toml file.
gsed -Ei "s/( <[a-zA-Z0-9.@_\-]+>)//" ./pyproject.toml
pip install poetry
poetry add --dev sphinx
poetry add --dev git+https://github.com/crossnox/m2r@dev
poetry add --dev coverage
mkdir docs/
cd docs/
sphinx-quickstart \
--no-sep \
-p "$@" \
-a deepio \
-r 0.0.1 \
-l en
# # Remove the old extensions... ?
# gsed -i "s/extensions\ \=\ \[//" ./conf.py
# gsed -Ei "s/^]$//" ./conf.py
# Using this method because it's more transparent, allowing others to read what's going on.
echo "" >> ./conf.py
echo "" >> ./conf.py
echo "" >> ./conf.py
echo "# -- Custom Options -------------------------------------------------" >> ./conf.py
echo "" >> ./conf.py
echo "import os" >> ./conf.py
echo "import subprocess" >> ./conf.py
echo "if os.environ.get('READTHEDOCS') == 'True':" >> ./conf.py
echo " subprocess.check_output(['pip', 'install', 'm2r'])" >> ./conf.py
echo "" >> ./conf.py
echo "extensions = extensions + ['m2r', 'sphinx.ext.autodoc']" >> ./conf.py
echo "autosectionlabel_prefix_document = True" >> ./conf.py
echo "source_suffix = ['.md', '.rst']" >> ./conf.py
mv index.rst index.md
# RESET if you need it
# rm -rf _* conf.py index.rst index.md make.bat Makefile
cd ..
mv README.rst README.md
# Travel back.
cd ..
}
| true
|
5d5f71f9bbcc1495e2f076f4d11a9c28ec6010b4
|
Shell
|
kaizendevsio/MinnyCasinoAffiliate
|
/oneclick.portal.sh
|
UTF-8
| 988
| 2.765625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
clear
#echo "Stopping API Server ..."
#sudo systemctl stop MinnyCasinoAffiliateWebApi.service
#sudo systemctl status MinnyCasinoAffiliateWebApi.service
echo "Stopping Front-End Server ..."
sudo systemctl stop MinnyCasinoAffiliateWebPortal.service
#sudo systemctl status MinnyCasinoAffiliateWebPortal.service
clear
echo "Git Hard Reset.."
echo ""
echo "Removing existing directories: API / Portal"
sudo rm -r webapp_netcore
echo "Git Hard Reset.."
echo ""
echo "Git cloning: release-master"
sudo git clone https://github.com/kaizendevsio/Release.MinnyCasinoAffiliate.git webapp_netcore
echo "Git clone successful.."
#echo "Starting API Server ..."
#sudo systemctl start MinnyCasinoAffiliateWebApi.service
#sudo systemctl status MinnyCasinoAffiliateWebApi.service
echo "Starting Front-End Server ..."
sudo systemctl start MinnyCasinoAffiliateWebPortal.service
#sudo systemctl status MinnyCasinoAffiliateWebPortal.service
clear
echo "Deployment Done. Have a good day :)"
| true
|
a80e78666d4431898480eab4d2a6ed2e20442445
|
Shell
|
Eitol/go-env-installer
|
/install/deps/install_go.sh
|
UTF-8
| 737
| 3.078125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
## CONFIG
USR=`whoami`
SHARED_FOLDER=/home/${USR}/NAS
## GO CONFIG
GOVER='1.11.1'
GOPATH=/opt/go
GO_INSTALL_FOLDER=/usr/local/
GOROOT=${GO_INSTALL_FOLDER}/go
# Install deps
sudo apt update -ybuild-essentials
sudo apt dist-upgrade -y
apt install git make build-essentials -y
# Install go
sudo mkdir -p ${GOPATH}/src
sudo mkdir -p ${GOPATH}/pkg
sudo mkdir -p ${GOPATH}/bin
curl -O https://dl.google.com/go/go${GOVER}.linux-amd64.tar.gz
tar xvf go${GOVER}.linux-amd64.tar.gz
sudo mv go ${GO_INSTALL_FOLDER}
sudo chown -R root:root ${GOROOT}
sudo chown -R ${USR} ${GOPATH}
mv go${GOVER}.linux-amd64.tar.gz ${SHARED_FOLDER}
export GOROOT=${GOROOT}
export GOPATH=${GOPATH}
export PATH=$PATH:${GOROOT}/bin:${GOPATH}/bin
| true
|
a26eee23bba8fbb12364ac50b11b87fac5e008ac
|
Shell
|
Shanfan/garden-windows-ci
|
/tasks/write-windows2016fs-opsfile/run
|
UTF-8
| 377
| 2.890625
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
set -eu
pushd release-tarball
release_version=$(bosh int --path /version <(tar -xzf release.tgz -O release.MF))
popd
echo "Found version: $release_version"
cat << EOF > windows2016fs-opsfile/specified-windows2016fs-version.yml
---
- type: replace
path: /releases/name=windows2016fs?
value:
name: windows2016fs
version: $release_version
EOF
| true
|
fcb180f127e54c69c106bce92570d25b19bb80b4
|
Shell
|
AntonioRodrigo92/Script_Project
|
/atualiza_musicas.sh
|
UTF-8
| 1,033
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
read file
i=1
d=$(cat $file | wc -l)
b=$(($d+1))
while [ $i -lt $b ]; do
titulo1=$(cat $file | cut -d ':' -f1 | head -$i | tail -1)
artista1=$(cat $file | cut -d ':' -f2 | head -$i | tail -1)
ii=1
dd=$(cat musicas.txt | wc -l)
bb=$((dd+1))
templinefile=$(cat $file | head -$i | tail -1)
while [ $ii -lt $bb ]; do
templinemusicas=$(cat musicas.txt | head -$ii | tail -1)
titulo2=$(cat musicas.txt | cut -d ':' -f2 | head -$ii | tail -1)
artista2=$(cat musicas.txt | cut -d ':' -f3 | head -$ii | tail -1)
if [ "$(cat musicas.txt | head -$ii | tail -1 | grep "$titulo1:$artista1" | wc -l)" -eq "1" ]; then
mus=$(cat musicas.txt | cut -d ':' -f1 | head -$ii | tail -1)
sed -i "s/$templinemusicas/$mus:$templinefile/" musicas.txt
fi
if [ "$(grep "$titulo1:$artista1" musicas.txt | wc -l)" -ne "1" ]; then
aux=$(cat musicas.txt | wc -l)
aux=$(($aux+1))
dd=$aux
bb=$(($dd+1))
echo "MUS_$aux:$templinefile" >> musicas.txt
fi
ii=$(($ii+1))
done
i=$(($i+1))
done
| true
|
256bc81858b2ced01d482f171731269bbefcba6e
|
Shell
|
jiangbiaoah/oilserver
|
/server_comm/logsplit.sh
|
UTF-8
| 434
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/sh
# 该脚本需要放在项目目录下,即/server_comm/logsplit.sh,而不是根目录
cd /root/server_comm
logfold="logfold"
if [ ! -d "$logfold" ]; then
mkdir "./$logfold"
fi
folder=`date +"%Y-%m"`
if [ ! -d "./$logfold/$folder" ]; then
mkdir "./$logfold/$folder"
fi
yesterday=`date -d "1 day ago" +"%Y%m%d"`
logfilename="log_$yesterday"
cp ./nohup.out ./$logfold/$folder/$logfilename
cat /dev/null > nohup.out
| true
|
bfa93793c1fa364b47219c808dfbc0156546a25a
|
Shell
|
dustyleary/langproto
|
/lang.3/runtests.sh
|
UTF-8
| 441
| 3.4375
| 3
|
[] |
no_license
|
cd $(dirname $0)
if [ -n "$(which tput)" ]; then
green="`tput sgr0; tput setaf 2`"
GREEN="`tput setaf 2; tput bold`"
RED="`tput setaf 1; tput bold`"
colors_reset="`tput sgr0`"
fi
passed="${GREEN}# OK"
for f in ./test*.ss ; do
{ echo "$green# $f $colors_reset" && $f && echo -e "$passed"; } || { echo -e "$RED# FAILED: $f\n"; exit 1; }
echo -e "$colors_reset"
done
echo -e "$GREEN#\n# ALL TESTS PASSED\n#$colors_reset\n"
| true
|
0465affb618e56d9d8be5d6dab2c3ec099df70ca
|
Shell
|
sergeimonakhov/encryptbackup
|
/mysqlback.sh
|
UTF-8
| 478
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/sh
DATE=`date +%F`
DIR=/tmp
BASES="$1"
#MYSQL_USER
USER=root
#MYSQL_PASS
PASSWD="qwerty"
#Функция бэкапа БД
backupDB() {
echo -n "Dumping $BASES..."
((mysqldump -u $USER -h localhost -p$PASSWD $BASES | gzip -c > $DIR/$BASES$DATE.sql.gz) \
&& echo -n "ok "; du -h $DIR/$BASES$DATE.sql.gz | awk '{print($1)}') \
|| echo "ERROR!!!"
}
if [ -n "$BASES" ];then
echo "\n## Dumping MySQL Databases ##"
backupDB
exit 0
fi
| true
|
62423c8c9370d6e8f597f704d3dacb852fd2ea43
|
Shell
|
mahongquan/termux-rootfs
|
/termux-rootfs/data/data/com.termux/files/usr/etc/basic_environment.sh
|
UTF-8
| 2,985
| 2.953125
| 3
|
[] |
no_license
|
###############################################################
##
## Basic environment for Termux
##
## Should be sourced by any shell's profile.
##
###############################################################
##
## Set the strict file mode creation mask so
## only user with Termux UID can access them.
##
umask 077
##
## Choose home directory
##
## If shell is running under proot as regular user, then
## PROOTED_SHELL will be set so we will not change the
## home directory if root user is emulated (for example with
## fakeroot).
##
if [ "$(id -u)" = "0" ] && [ -z "${PROOTED_SHELL}" ]; then
export HOME="/data/data/com.termux/files/root"
else
export HOME="/data/data/com.termux/files/home"
fi
##
## Basic environment variables
##
## Do not touch if you are not know what you are
## doing.
##
export EDITOR="${PREFIX}/bin/nano"
export GOPATH="${HOME}/.go"
export LD_LIBRARY_PATH="${PREFIX}/lib"
export MAIL="${PREFIX}/var/mail/${USER}"
export PATH="${HOME}/.bin:${HOME}/.local/bin:${PREFIX}/bin"
export SDEXT_STORAGE="/data/data/com.termux/files/sdext"
export TERMINFO="${PREFIX}/share/terminfo"
export TERMUX_UID=$(stat -c '%u' "/data/data/com.termux/files")
export TMPDIR="${PREFIX}/tmp"
##
## This variable points to the executable of your
## current shell.
##
## Variable 'SHELL' should be set in profile and
## in *rc file (bash.bashrc or etc).
##
export SHELL=$(readlink "/proc/$$/exe")
##
## Setup private bin directory for user, so it
## can store custom software.
##
if [ ! -e "${HOME}/.local/bin" ]; then
mkdir -p "${HOME}/.local/bin" > /dev/null 2>&1
fi
##
## Android-related variables
##
## Do not touch if you are not know what you are
## doing.
##
export ANDROID_DATA="${PREFIX}/var/lib/android"
export ANDROID_ROOT="/system"
export BOOTCLASSPATH="/system/framework/core-libart.jar:/system/framework/conscrypt.jar:/system/framework/okhttp.jar:/system/framework/core-junit.jar:/system/framework/bouncycastle.jar:/system/framework/ext.jar:/system/framework/framework.jar:/system/framework/telephony-common.jar:/system/framework/voip-common.jar:/system/framework/ims-common.jar:/system/framework/apache-xml.jar:/system/framework/org.apache.http.legacy.boot.jar:/system/framework/sec_edm.jar:/system/framework/sagearpolicymanager.jar:/system/framework/timakeystore.jar:/system/framework/fipstimakeystore.jar:/system/framework/secocsp.jar:/system/framework/commonimsinterface.jar:/system/framework/imsmanager.jar:/system/framework/sprengine.jar:/system/framework/smartbondingservice.jar:/system/framework/knoxvpnuidtag.jar:/system/framework/sec_sdp_sdk.jar:/system/framework/sec_sdp_hidden_sdk.jar:/system/framework/simageis.jar"
##
## If shell is running under termux-chroot then the
## external storage will be /mnt/emulated/0
##
if [ -e "/.termux-chroot" ]; then
export EXTERNAL_STORAGE="/mnt/storage/emulated/0"
fi
##
## This is needed for LVM programs to prevent warnings
## about leaked descriptors.
##
LVM_SUPPRESS_FD_WARNINGS="true"
| true
|
1094339a1f307ac4041c2fb96a082ff27c7dfb33
|
Shell
|
Gabelbombe/infra-ci
|
/scripts/tf.sh
|
UTF-8
| 331
| 3.25
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
if [[ $# -lt 1 ]]; then
echo "usage: $0 <aws_profile>"
exit 1
fi
PROFILE=$1
shift
AWS_ACCESS_KEY_ID=$(aws configure --profile $PROFILE get aws_access_key_id) \
AWS_SECRET_ACCESS_KEY=$(aws configure --profile $PROFILE get aws_secret_access_key) \
docker-compose run terraform $*
| true
|
149fcdc7b9ee85e951eb0c1cec3766971c1ed55d
|
Shell
|
ffremont/Downloader
|
/player.sh
|
UTF-8
| 1,694
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# see http://linuxcommand.org/lc3_adv_dialog.php
# xfce4-terminal -e "bash -c 'player.sh'"
PLAYER_CMD=(vlc)
SAVEIFS=$IFS
IFS=$(echo -en "\n\b")
# lire (player) ou marquer comme lu (mv)
showAction () {
FILENAME=$1
actionCmd=(dialog --clear --cancel-label "Annuler" --output-fd 1 --title "Actions" --radiolist "Veuillez choisir une action" 10 35 8)
actionOptions=(0 "Lire la vidéo" "ON" 1 "Marquer comme lu" "OFF")
actionChoices=$("${actionCmd[@]}" "${actionOptions[@]}")
if [[ ! -z "$actionChoices" ]]; then
echo "actionChoices : ${actionChoices} of ${FILENAME}"
if [ "$actionChoices" -eq 0 ] ; then
# lire
$("${PLAYER_CMD[@]}" "${FILENAME}")
elif [ "$actionChoices" -eq 1 ] ; then
# lu
mkdir vus
mv $FILENAME vus/
fi
fi
}
while [ 1 ]
do
i=0
options=()
for LINE in `ls -A1t`; do
if [ -f "$LINE" ]
then
options[i]=$i
i=$((i+1))
options[i]="$LINE"
i=$((i+1))
if [ $i == 2 ]
then
options[i]="ON"
else
options[i]="OFF"
fi
i=$((i+1))
fi
done
cmd=(dialog --clear --cancel-label "Annuler" --output-fd 1 --title "'Vidéothèque'" --radiolist "'Veuillez choisir un fichier'" 30 55 30)
choices=$("${cmd[@]}" "${options[@]}")
if [[ -z "$choices" ]]; then
break;
else
showAction ${options[${choices}+1]}
fi
done
clear
IFS=$SAVEIFS
| true
|
10b3ab70ae4788e1e671ed8a988ac476a9298d90
|
Shell
|
jasonwwl/docker-php
|
/build.sh
|
UTF-8
| 365
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
TAG=$1-$2
docker build -t docker-php ./hyperf/$1
docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD shiyuehehu-registry.cn-beijing.cr.aliyuncs.com
echo php-hyperf:$TAG
docker tag docker-php:latest shiyuehehu-registry.cn-beijing.cr.aliyuncs.com/common/php-hyperf:$TAG
docker push shiyuehehu-registry.cn-beijing.cr.aliyuncs.com/common/php-hyperf:$TAG
| true
|
b69bf36ca7789e35787d16b20b1c14195ad9e8a4
|
Shell
|
RazvanRanca/iGEM-Modelling
|
/sensitivity.sh
|
UTF-8
| 1,073
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
dir=${1:-"t1"}
runs=${2:-1}
echo "Running with: "$dir" "$runs
cd $dir
rm *~
rm -rf rez*
#echo $runs
noFiles=$(grep -l '}' * | wc -l)
if [ $noFiles != 1 ]
then
echo MORE THAN 1 FILE MATCHING PATTERN
exit
fi
file=$(grep -l '}' *)
echo $file
noMatches=$(grep -c '}' $file)
if [ $noMatches != 1 ]
then
echo MORE THAN 1 PATTERN IN $file
exit
fi
mv $file "source"
pat=$(grep -Eoh '[{][0-9.]+-[0-9.]+-[0-9.]+}' "source")
p1=$(echo $pat | grep -Eo '^[{][0-9.]+')
n1=${p1:1}
p2=${pat:${#p1}+1}
n2=$(echo $p2 | grep -Eo '^[0-9.]+')
p3=${pat:${#p1}+${#n2}+2}
n3=$(echo $p3 | grep -Eo '^[0-9.]+')
vals=$(seq $n1 $n3 $n2)
count=0
for v in $vals
do
rezDir="rez$v"
#rm -rf $rezDir
mkdir $rezDir
count=$(($count+1))
sed s/$pat/$v/ "source" > $file
for i in $(seq 1 $runs)
do
echo $dir, $count, $v, $i
fl=$rezDir"/"$i".out"
../KaSim_3 -i 1_TCA.ka -i 2_NapC.ka -i 5.ka -t 2.5E6 -p 1000 -o model.temp
cat model.temp | cut -c 2- > $fl
rm model.temp
done
rm $file
done
mv "source" $file
#java -jar ../MeanPlotViewer-v0.0.1.jar
| true
|
0e2cea32116d1ed4752e84f0a1f4ac14eeb7ad61
|
Shell
|
tjlygdx/Utopia
|
/service.sh
|
UTF-8
| 1,843
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
# utopia Startup script for the utopia Server
#
# chkconfig: - 85 12
# description: Open source detecting system
# processname: utpia
# Date: 2015-12-16
# Version: 1.0.0
# Site: http://www.utpia.deppon.com
# Author: utopia group
. /etc/init.d/functions
export PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/opt/deppon/bin
base_dir=$(dirname $0)
PROC_NAME="utopia"
lockfile=/var/lock/subsys/${PROC_NAME}
start() {
utopia_start=$"Starting ${PROC_NAME} service:"
if [ -f $lockfile ];then
echo "utopia is running..."
success "$utopia_start"
else
daemon python $base_dir/manage.py runserver 0.0.0.0:8000 &>> /tmp/utopia.log 2>&1 &
daemon python $base_dir/log_handler.py &> /dev/null 2>&1 &
sleep 1
echo "$utopia_start"
nums=0
for i in manage.py log_handler.py;do
ps aux | grep "$i" | grep -v 'grep' && let nums+=1 &> /dev/null
done
if [ "x$nums" = "x3" ];then
success "utopia_start"
touch "$lockfile"
echo
else
failure "$utopia_start"
echo
fi
fi
}
stop() {
echo -n $"Stopping ${PROC_NAME} service:"
if [ -e $lockfile ];then
ps aux | grep -E 'manage.py|log_handler.py' | grep -v grep | awk '{print $2}' | xargs kill -9 &> /dev/null
ret=$?
if [ $ret -eq 0 ]; then
echo_success
echo
rm -f "$lockfile"
else
echo_failure
echo
fi
else
echo_success
echo
fi
}
restart(){
stop
start
}
# See how we were called.
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
*)
echo $"Usage: $0 {start|stop|restart}"
exit 2
esac
| true
|
b48068f16b30579f13aa08725b048a815082093e
|
Shell
|
tskkst51/lplTrade
|
/bu/scripts/testResults.sh
|
UTF-8
| 921
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
## Run the allTest.sh program and iterate its results
wp=$(pwd)
testCmd="${wp}/bin/test.py"
if [[ ! -e $testPath ]]; then
echo $testPath not found!
exit 1
fi
host=$(hostname -s)
if [[ $host == "ML-C02C8546LVDL" ]]; then
activateDir="/lplW"
else
activateDir="/venv"
fi
activateCmd=$(dirname $wp)
activateCmd+="${activateDir}/bin/activate"
py3=$(dirname $wp)
py3+="${activateDir}/bin/python3"
echo "Paths:"
echo $activateCmd
echo $testPath
echo $py3
echo $stocksPath
echo
. $activateCmd || echo activation failed
# Execute script to populate source library path
$HOME/bin/lplt.sh
dt=$(date "+%d%m%Y")
testPaths=$(ls test)
log="${wp}/testOut_${dt}"
for testDir in $testPaths; do
testPath="test/${testDir}"
echo $testPath
cmd="$py3 $testCmd -c $HOME/profiles/et.json -w $testPath -p ${testPath}/profiles/active.json"
$HOME/bin/sh
$cmd >> $l
done
exit 0
| true
|
ce70e0b82f94c1043d643060e06656e6943e2314
|
Shell
|
rgarner/lcc-site-allocations-data
|
/bin/extract_sites.sh
|
UTF-8
| 1,599
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/sh
set -ex
RAW=data/raw
OUTPUT=data/output
tabula "$RAW/1. Aireborough A3 Inc Site Schedule.pdf" -p 1-4,6-9 --spreadsheet | grep '^[0-9]\|^HLA' > $OUTPUT/1.csv
tabula "$RAW/2. City Centre A3 Inc Site Schedule.pdf" -p 2-13 --spreadsheet | grep '^[0-9]\|^HLA' > $OUTPUT/2.csv
tabula "$RAW/3. East Leeds A3 Inc Site Schedule.pdf" -p 2-7 --spreadsheet | grep '^[0-9]\|^HLA' > $OUTPUT/3.csv
tabula "$RAW/4. Inner Area A3 Inc Site Schedule.pdf" -p 2-17 --spreadsheet | grep '^[0-9]\|^HLA' > $OUTPUT/4.csv
tabula "$RAW/5. North Leeds A3 Inc Site Schedule.pdf" -p 2-14 --spreadsheet | grep '^[0-9]\|^HLA' > $OUTPUT/5.csv
tabula "$RAW/6. Outer North East A3 Inc Site Schedule.pdf" -p 2-16 --spreadsheet | grep '^[0-9]\|^HLA' > $OUTPUT/6.csv
tabula "$RAW/7. Outer North West A3 Inc Site Schedule.pdf" -p 2-7 --spreadsheet | grep '^[0-9]\|^HLA' > $OUTPUT/7.csv
tabula "$RAW/8. Outer South A3 Inc Site Schedule.pdf" -p 2-9 --spreadsheet | grep '^[0-9]\|^HLA' > $OUTPUT/8.csv
tabula "$RAW/9. Outer South East A3 Inc Site Schedule.pdf" -p 2-10 --spreadsheet | grep '^[0-9]\|^HLA' > $OUTPUT/9.csv
tabula "$RAW/10. Outer South West A3 Inc Site Schedule.pdf" -p 2-20 --spreadsheet | grep '^[0-9]\|^HLA' > $OUTPUT/10.csv
tabula "$RAW/11. Outer West A3 Inc Site Schedule.pdf" -p 2-16 --spreadsheet | grep '^[0-9]\|^HLA' > $OUTPUT/11.csv
echo 'SHLAA Ref,Address,Area ha,_something_,Capacity,I&O RAG,Settlement Hierarchy,Green/Brown,Reason' > $OUTPUT/sites.csv
cat $OUTPUT/{1..11}.csv >> $OUTPUT/sites.csv
rm $OUTPUT/{1..11}.csv
| true
|
67de12cda705a11d9590b786070dc81354994741
|
Shell
|
FokinAleksandr/dotfiles
|
/.macos
|
UTF-8
| 6,925
| 2.734375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# ~/.macos — https://mths.be/macos
echo "making system modifications:"
# Close any open System Preferences panes, to prevent them from overriding
# settings we’re about to change
osascript -e 'tell application "System Preferences" to quit'
###############################################################################
# General UI/UX #
###############################################################################
# set F1 F2
defaults write -g com.apple.keyboard.fnState -bool true
# Set computer name (as done via System Preferences → Sharing)
#sudo scutil --set ComputerName "yetto"
#sudo scutil --set HostName "yetto"
#sudo scutil --set LocalHostName "yetto"
# Save to disk (not to iCloud) by default
# defaults write NSGlobalDomain NSDocumentSaveNewDocumentsToCloud -bool false
# Disable automatic capitalization as it’s annoying when typing code
defaults write NSGlobalDomain NSAutomaticCapitalizationEnabled -bool false
# Disable smart dashes as they’re annoying when typing code
defaults write NSGlobalDomain NSAutomaticDashSubstitutionEnabled -bool false
# Disable automatic period substitution as it’s annoying when typing code
defaults write NSGlobalDomain NSAutomaticPeriodSubstitutionEnabled -bool false
# Disable smart quotes as they’re annoying when typing code
defaults write NSGlobalDomain NSAutomaticQuoteSubstitutionEnabled -bool false
# Disable auto-correct
defaults write NSGlobalDomain NSAutomaticSpellingCorrectionEnabled -bool false
# Always show scrollbars like in windows
defaults write -g AppleShowScrollBars -string "Always"
###############################################################################
# Trackpad, mouse, keyboard, Bluetooth accessories, and input #
###############################################################################
# Disable mouse acceleration
defaults write -g com.apple.mouse.scaling -1
# Enable full keyboard access for all controls
# (e.g. enable Tab in modal dialogs)
defaults write NSGlobalDomain AppleKeyboardUIMode -int 3
# Disable press-and-hold for keys in favor of key repeat
defaults write NSGlobalDomain ApplePressAndHoldEnabled -bool false
# Set a blazingly fast keyboard repeat rate
defaults write NSGlobalDomain KeyRepeat -int 1
defaults write NSGlobalDomain InitialKeyRepeat -int 15
###############################################################################
# Screen #
###############################################################################
# Save screenshots to the Documents
defaults write com.apple.screencapture location -string "${HOME}/Documents"
###############################################################################
# Finder #
###############################################################################
# Finder: allow quitting via ⌘ + Q; doing so will also hide desktop icons
defaults write com.apple.finder QuitMenuItem -bool true
# Set default location for new Finder windows
# For other paths, use `PfLo` and `file:///full/path/here/`
defaults write com.apple.finder NewWindowTarget -string "PfLo"
defaults write com.apple.finder NewWindowTargetPath -string "file://${HOME}"
# Finder: show hidden files by default
defaults write com.apple.finder AppleShowAllFiles -bool true
# Finder: show all filename extensions
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
# Finder: show status bar
defaults write com.apple.finder ShowStatusBar -bool true
# Finder: show path bar
defaults write com.apple.finder ShowPathbar -bool true
# Display full POSIX path as Finder window title
defaults write com.apple.finder _FXShowPosixPathInTitle -bool true
# Search in folder
defaults write com.apple.finder FXDefaultSearchScope -string "SCcf"
# Keep folders on top when sorting by name
defaults write com.apple.finder _FXSortFoldersFirst -bool true
# Disable the warning when changing a file extension
defaults write com.apple.finder FXEnableExtensionChangeWarning -bool false
# Avoid creating .DS_Store files on network or USB volumes
defaults write com.apple.desktopservices DSDontWriteNetworkStores -bool true
defaults write com.apple.desktopservices DSDontWriteUSBStores -bool true
# Use list view in all Finder windows by default
# Four-letter codes for the other view modes: `icnv`, `clmv`, `glyv`
defaults write com.apple.finder FXPreferredViewStyle -string "Nlsv"
# Disable the warning before emptying the Trash
defaults write com.apple.finder WarnOnEmptyTrash -bool false
# Expand the following File Info panes:
# “General”, “Open with”, and “Sharing & Permissions”
defaults write com.apple.finder FXInfoPanesExpanded -dict \
General -bool true \
OpenWith -bool true \
Privileges -bool true
###############################################################################
# Dock, Dashboard, and hot corners #
###############################################################################
# Set the icon size of Dock items to 60 pixels
defaults write com.apple.dock tilesize -int 60
# Wipe all (default) app icons from the Dock
# This is only really useful when setting up a new Mac, or if you don’t use
# the Dock to launch apps.
defaults write com.apple.dock persistent-apps -array
# Don’t show recent applications in Dock
defaults write com.apple.dock show-recents -bool false
###############################################################################
# Terminal #
###############################################################################
# Enable “focus follows mouse” for Terminal.app and all X11 apps
# i.e. hover over a window and start typing in it without clicking first
defaults write com.apple.terminal FocusFollowsMouse -bool true
defaults write org.x.X11 wm_ffm -bool true
###############################################################################
# Activity Monitor #
###############################################################################
# Show the main window when launching Activity Monitor
defaults write com.apple.ActivityMonitor OpenMainWindow -bool true
# Visualize CPU usage in the Activity Monitor Dock icon
defaults write com.apple.ActivityMonitor IconType -int 5
# Show all processes in Activity Monitor
defaults write com.apple.ActivityMonitor ShowCategory -int 0
# Sort Activity Monitor results by CPU usage
defaults write com.apple.ActivityMonitor SortColumn -string "CPUUsage"
defaults write com.apple.ActivityMonitor SortDirection -int 0
killall Dock
killall Finder
echo "Done. Note that some of these changes require a logout/restart to take effect."
| true
|
dd74f59829767123a7853c0e0faea130101d8adf
|
Shell
|
cbm-fles/flesnet
|
/contrib/install-docker
|
UTF-8
| 1,110
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
# Install Docker Engine on a Debian Jessie or Stretch
# from https://docs.docker.com/engine/installation/linux/docker-ce/debian/
echo "*** Uninstall old versions"
sudo apt-get remove docker docker-engine docker.io
set -e
# SET UP THE REPOSITORY
echo "*** Install prerequisites"
sudo apt-get update
# Install packages to allow apt to use a repository over HTTPS
sudo apt-get install \
apt-transport-https \
ca-certificates \
curl \
gnupg2 \
software-properties-common
echo "*** Add Docker’s official GPG key"
echo "*** Expected fingerprint: 9DC8 5822 9FC7 DD38 854A E2D8 8D81 803C 0EBF CD88"
curl -fsSL https://download.docker.com/linux/$(. /etc/os-release; echo "$ID")/gpg | sudo apt-key add -
sudo apt-key fingerprint 0EBFCD88
echo "*** Add Stable repository to apt sources"
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/$(. /etc/os-release; echo "$ID") \
$(lsb_release -cs) \
stable"
echo "*** Install Docker CE"
sudo apt-get update
sudo apt-get install docker-ce
echo "*** Verify install"
sudo docker run hello-world
| true
|
638f42f6c53cadcb20ecf13d2c28980cf9d9395b
|
Shell
|
blake-wilson/nixpkgs
|
/pkgs/games/keen4/builder.sh
|
UTF-8
| 755
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -o nounset
# shellcheck source=/dev/null
source "${stdenv}/setup"
mkdir -p "${out}/share/keen4"
unzip -j "$dist" -d "${out}/share/keen4"
mkdir -p "${out}/bin"
cat > "${out}/bin/keen4" <<EOF
#! $SHELL -e
if test -z "\$HOME"; then
echo "HOME directory not set"
exit 1
fi
# Game wants to write in the current directory, but of course we can't
# let it write in the Nix store. So create symlinks to the game files
# in ~/.keen4 and execute game from there.
mkdir -p \$HOME/.keen4
cd \$HOME/.keen4
ln -sf "${out}/share/keen4/"* .
"${dosbox}/bin/dosbox" ./KEEN4E.EXE -fullscreen -exit || true
# Cleanup the symlinks.
for i in *; do
if test -L "\$i"; then
rm "\$i"
fi
done
EOF
chmod +x "${out}/bin/keen4"
| true
|
4233ed371d790317b7d421e7b5d6e40f248f8fbe
|
Shell
|
mohammedsuhail83/bash-script
|
/case-statment.sh
|
UTF-8
| 380
| 3.609375
| 4
|
[] |
no_license
|
#! /bin/bash
echo
echo please choose one of the options below
echo 'a=to print the server name'
echo 'b=to print the load average and uptime'
echo 'c=to check who are loggedin the server'
echo 'd=to print the current directory'
read choice
case $choice in
a)
hostname
;;
b)
uptime
;;
c)
who
;;
d)
pwd
;;
*)
echo invalid input, please check again
esac
| true
|
c25cd777f9849fd93f30d16481e615e3a1761c0f
|
Shell
|
webconn/ausrv_solution2017
|
/run_tests.sh
|
UTF-8
| 570
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
BINARY=$1
TESTS_DIR=$2
RESULT="PASS"
if [[ $# -lt 2 ]]; then
echo "Usage: $0 prog_binary tests_dir"
exit 1
fi
echo "-> Run positive tests"
for f in `ls $TESTS_DIR/YES`; do
if ! $BINARY $TESTS_DIR/YES/$f; then
echo " Test $f failed!"
RESULT="FAIL"
fi
done
echo ""
echo "-> Run negative tests"
for f in `ls $TESTS_DIR/NO`; do
if $BINARY $TESTS_DIR/NO/$f; then
echo " Test $f failed!"
RESULT="FAIL"
fi
done
echo ""
echo "Result: $RESULT"
| true
|
6f7ec62914b3da7d1b027925de0482a32f5c582b
|
Shell
|
mtfuller/CS-4504-Project
|
/run
|
UTF-8
| 371
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
app_name=$1
BUILD_DIR="./out/"
#
MAIN_CLASS="Main"
#
SERVER_ROUTER_CLASS="edu.kennesaw.cs4504.services.TCPServerRouter"
if [ "$app_name" = "main" ] ; then
java -cp $BUILD_DIR $MAIN_CLASS
elif [ "$app_name" = "router" ] ; then
echo $SERVER_ROUTER_PATH
java -cp $BUILD_DIR $SERVER_ROUTER_CLASS
else
echo "Could not find the program: " $app_name
fi
| true
|
1f176547ef3b1fab39bf42a82ad5f02c18b023b9
|
Shell
|
imma/wtf
|
/exec/tf-plan
|
UTF-8
| 327
| 3.078125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
function main {
local shome="$(cd -P -- "${BASH_SOURCE%/*}/.." && pwd -P)"
source "$shome/script/profile"
source normalize
terraform get
terraform plan -out .plan "$@"
if [[ "$(terraform show .plan)" == "This plan does nothing." ]]; then
rm -f .plan
fi
}
source sub "$BASH_SOURCE" "$@"
| true
|
66f17fa3f9486fa1a68eec23fa6e38c8a87dba31
|
Shell
|
leuty/primary_data_SMP_Feedback
|
/simualtions/gen_boundaries/0_get_data/job
|
UTF-8
| 680
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -l
#
#SBATCH --job-name=get_data
#SBATCH --output=job.out
#SBATCH --time=01:00:00
#SBATCH --ntasks=1
#SBATCH --partition=xfer
module unload xalt
if [ ! -d ../input/output/ifs2lm ] ; then mkdir -p ../input/ifs2lm ;fi
find /project/pr04/ERAinterim/2006/ERAINT_2006_{05..08}.tar.gz -exec tar --transform 's/caf/cas/' -xf {} -C input \;
cp /project/pr04/davidle/results_clim/lm_c/24h/2006/lffd2006050100.nc ../input/lffd2006050100_lm_c.nc
cp /project/pr04/davidle/results_clim/lm_f/24h/2006/lffd2006050100.nc ../input/lffd2006050100_lm_f.nc
cp /project/pr04/extpar-HPC/extp2_EUROPE_MCH_12km.nc ../1_ifs2lm/
cp /project/pr04/extpar-HPC/extpar_2km_europe.nc ../3_lm2lm/
| true
|
ef1cf7fc6dd9223f5393269f295f110dd998f4c9
|
Shell
|
seanbreckenridge/dotfiles
|
/.config/zsh/functions/which-cat
|
UTF-8
| 626
| 4.34375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/zsh
#runs which, and prints the contents of the function/script
function which-cat() {
local COMMAND_OUTPUT USER_INPUT
USER_INPUT="${1:?Must provide a command to lookup}"
if COMMAND_OUTPUT="$(which "${USER_INPUT}")"; then
# if the file is readable
if [[ -r "${COMMAND_OUTPUT}" ]]; then
if iconv --from-code="utf-8" --to-code="utf-8" "${COMMAND_OUTPUT}" >/dev/null 2>&1; then
command cat "${COMMAND_OUTPUT}"
else
file "${COMMAND_OUTPUT}"
fi
else
# error finding command, or its an alias/function
printf '%s\n' "${COMMAND_OUTPUT}"
fi
else
printf '%s\n' "${COMMAND_OUTPUT}" >&2
fi
}
| true
|
66d85e381d4dd160414e9c8cc34556e2a7f50cbb
|
Shell
|
Riey/gentoo-emuera
|
/games-util/emuera-launcher/files/emuera
|
UTF-8
| 140
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh
GAME_DIR=${1:-$PWD}
pushd "$GAME_DIR"
ln -s /usr/lib/emuera/Emuera.exe .#Emuera.exe
wine .#Emuera.exe
rm .#Emuera.exe
popd
| true
|
35e980fa7d49f5553500a7dee1f618ba37f1972f
|
Shell
|
totte/desktop
|
/konsole/PKGBUILD
|
UTF-8
| 851
| 2.765625
| 3
|
[] |
no_license
|
#Contributions from Arch: https://projects.archlinux.org/svntogit/packages.git/tree/trunk?h=packages/konsole
source ../kdeapps.conf
pkgname=konsole
pkgver=${_kdever}
pkgrel=1
pkgdesc="KDE's terminal emulator"
arch=('x86_64')
url='http://kde.org/applications/system/konsole/'
screenshot=('https://www.kde.org/images/screenshots/konsole.png')
license=('LGPL')
depends=('kdelibs4support' 'knotifyconfig' 'kpty')
makedepends=('extra-cmake-modules' 'kdoctools' 'python3')
groups=('kde')
conflicts=('kde-baseapps-konsole<4.14.3-2')
replaces=('kde-baseapps-konsole')
source=("$_mirror/${pkgname}-$_kdever.tar.xz")
sha256sums=(`grep ${pkgname}-$_kdever.tar.xz ../checksums.txt | cut -d " " -f1`)
prepare() {
mkdir -p build
}
build() {
cd build
cmake_kf5 ../${pkgname}-${pkgver}
make
}
package() {
cd build
make DESTDIR="${pkgdir}" install
}
| true
|
929d3ddf132ad25893c37e227093f216df1d2d14
|
Shell
|
goffinet/netascode
|
/Installation_components/docker_compose_setup.sh
|
UTF-8
| 512
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
echo ""
echo "Installation and checking docker-compose..."
sudo curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
docker-compose --version
echo ""
echo "Ensuring firewalld is stopped and disabled"
sudo systemctl stop firewalld
sudo systemctl disable firewalld
sudo systemctl restart docker
| true
|
d1b89c20c6708f590ced442dfe15df838fe519c3
|
Shell
|
ravikiran529/shell_scripts
|
/password.sh
|
UTF-8
| 819
| 4.28125
| 4
|
[] |
no_license
|
#!/bin/bash
#To create new user:
usage(){
cat << EOF
Usage format:
====================================================
password.sh [-u] <USER_NAME> [-p] <PASSWD>
options:
u - Pass Username
p - Set Password
====================================================
EOF
exit 0
}
while getopts ":u:p:h" options; do
case "${options}" in
u)
USER_NAME=${OPTARG}
;;
p)
PASSWD=${OPTARG}
;;
h)
usage
;;
*)
usage
;;
esac
done
if [ -z "${USER_NAME}" ] || [ -z "${PASSWD}" ]
then
echo "Username and password fields should not be empty."
usage
fi
useradd ${USER_NAME} -p ${PASSWD}
if [ "$?" == "0" ]
then
echo "New User: ${USER_NAME} created successfully!"
else
echo "User creation failed..Try another name."
fi
| true
|
be8ef70b7e684a24e09f68118014815b15e9322d
|
Shell
|
aryabartar/BSc-HWc
|
/Operating Systems Lab/HW2/q1.sh
|
UTF-8
| 241
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
read -p "Input: " INPUT1 INPUT2
if [ $INPUT1 -gt 10 ]; then
echo "Value is greater than 10!"
elif [ $INPUT1 -gt $INPUT2 ]; then
echo "First number is greater than second one."
else
echo "Second number is greater!"
fi
| true
|
2f1ca2ddf754383f93beda489c9d2fce52861d74
|
Shell
|
bundai223/new_dotfiles
|
/scripts/install_neovim.sh
|
UTF-8
| 295
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
BASEPATH=/usr/src
sudo yum -y install libtool autoconf automake cmake gcc gcc-c++ make pkgconfig unzip
cd $BASEPATH
git clone https://github.com/neovim/neovim.git
cd neovim
if [ -e build ]; then
rm -r build
fi
make clean
make CMAKE_BUILD_TYPE=Release
make && make install
| true
|
c8a0dbeadce6089f89084ea50305737b14f695c1
|
Shell
|
YMC-GitHub/vscode-snippet-shell
|
/scripts/common-function.sh
|
UTF-8
| 586
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
function hasFile() {
local result="false"
if [[ "${1}" && -e "${1}" ]]; then
result="true"
fi
echo "$result"
}
function hasDir() {
local result="false"
if [[ "${1}" ]]; then
if [ -d "${1}" ]; then
result="true"
fi
fi
echo "$result"
}
function getFiles() {
local dir_or_file=""
for files in $(ls $1); do
dir_or_file=$1"/"$files
if [ -d $dir_or_file ]; then #是目录的话递归遍历
getFiles $dir_or_file
else
echo "$dir_or_file"
fi
done
}
# getFiles "./src"
# file -usage
# ./scripts/common-function.sh
| true
|
e07517de7b46960081ebd1ecd81734221576fe4d
|
Shell
|
mustssr/RoadToDevOps
|
/01-installation-scripts/08-EFK/start-filebeat.sh
|
UTF-8
| 2,415
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
[ -f /etc/timezone ] || echo "Asia/Shanghai" > /etc/timezone
function check_server_in_host() {
server=$1
grep ${server} /etc/hosts &> /dev/null
if [ $? -eq 0 ];then
echo -e "\033[33m${server} 主机信息:\033[0m"
grep "${server}" /etc/hosts
else
echo -e "\033[31m/etc/hosts中未定义${server}主机\n\033[0m"
exit
fi
}
check_server_in_host elasticsearch
check_server_in_host kibana
echo # 换行,美观
######################################################
log_dir=/data/myapp/logs
inner_log_dir=/data/logs
host=`hostname`
current_dir=`pwd`
image=docker.elastic.co/beats/filebeat:7.9.1
[ -d ${current_dir}/filebeat ] || mkdir -p ${current_dir}/filebeat
if [ ! -f "${current_dir}/filebeat/filebeat.yml" ];then
echo -e "\033[33m写入配置文件:${current_dir}/filebeat/filebeat.yml\033[0m"
cat > ${current_dir}/filebeat/filebeat.yml << EOF
filebeat.inputs:
- type: log
enabled: true
paths:
- ${inner_log_dir}/log1.log
fields:
type: "log1"
multiline:
pattern: '^[[:space:]]' # 所有的空白行,合并到前面不是空白的那行
negate: false
match: after
timeout: 15s
max_lines: 500
# 打开以下注释和output中的注释,即可配置多目录日志采集
# - type: log
# enabled: true
# paths:
# - /data/logs/log2.log
# fields:
# type: "log2"
setup.kibana:
host: "kibana:5601"
setup.dashboards.enabled: false
setup.ilm.enabled: false
setup.template.name: "${host}" #顶格,和output对齐
setup.template.pattern: "${host}-*" #顶格,和output对齐
output.elasticsearch:
hosts: ["elasticsearch:9200"]
indices:
- index: "${host}-log1-%{+yyyy.MM.dd}" #指定index name
when.equals:
fields.type: "log1"
# 打开以下注释和input中的注释,即可配置多目录日志采集
# - index: "${host}-log2-%{+yyyy.MM.dd}" #指定index name
# when.equals:
# fields.type: "log2"
EOF
fi
echo -e "\033[33mdocker启动filebeat\033[0m"
echo "容器名:${host}-filebeat"
docker run \
--network host \
-d \
--name ${host}-filebeat \
--hostname ${host} \
-v /etc/localtime:/etc/localtime \
-v /etc/timezone:/etc/timezone \
-v ${log_dir}:${inner_log_dir} \
-v ${current_dir}/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml \
--restart always \
${image}
echo # 换行,美观
| true
|
2cb6a1e9a1c839243fb6637554760b149464cc55
|
Shell
|
a809704431/shell-auto-install
|
/script/nodejs.sh
|
UTF-8
| 1,343
| 3.65625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
get_nodejs() {
test_package node-v8.9.3-linux-x64.tar.xz http://shell-auto-install.oss-cn-zhangjiakou.aliyuncs.com/package/node-v8.9.3-linux-x64.tar.xz 32948a8ca5e6a7b69c03ec1a23b16cd2
if [ $language -eq 1 ];then
echo "node-v8.9.3-linux-x64.tar.xz 下载完成"
else
echo "node-v8.9.3-linux-x64.tar.xz Download completed"
fi
}
install_nodejs() {
get_nodejs
tar -xf package/node-v8.9.3-linux-x64.tar.xz
mv node-v8.9.3-linux-x64 /usr/local/nodejs
ln -s /usr/local/nodejs/bin/node /usr/local/bin/node
ln -s /usr/local/nodejs/bin/npm /usr/local/bin/npm
echo "nodejs" >> conf/installed.txt
[ $language -eq 1 ] && echo "nodejs安装完毕,使用node -v 查看版本" || ehco "nodejs installation is completed,Use node -v to check the version"
}
remove_nodejs() {
rm -rf /usr/local/nodejs
rm -rf /usr/local/bin/node
rm -rf /usr/local/bin/npm
[ $language -eq 1 ] && echo "nodejs已卸载" || ehco "nodejs Uninstalled"
}
info_nodejs() {
if [ $language -eq 1 ];then
echo "名字:nodejs
版本:8.9.3
作者:book
介绍:安装nodejs
提示:无
使用:无"
else
echo "Name:nodejs
version:8.9.3
Author:book
Introduction:Install nodejs
Prompt:none
use:none"
fi
}
| true
|
0fe3d4a40d4efb71badc3abb13a7c1a4020f8caf
|
Shell
|
maksru/42
|
/PISCINE_C/j09/ex11/where_am_i.sh
|
UTF-8
| 138
| 2.703125
| 3
|
[] |
no_license
|
RET=$(ifconfig | grep '\<inet\>' | cut -d ' ' -f2 | grep -v 127)
a=${?}
if [ ${a} -ne 0 ]; then echo "Je suis perdu!"; else echo $RET; fi
| true
|
cd8b452ec708f32438c7f6bb879867f9007d7341
|
Shell
|
nguyenminhhieu12041996/casper-node
|
/utils/nctl-metrics/prometheus.sh
|
UTF-8
| 385
| 3.5625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#: Run a prometheus instance that collects metrics from a local nctl network.
cd $(dirname $0)
PROMETHEUS_TAG=prom/prometheus
echo "Genarating config."
./gen_prometheus_config.py > prometheus.yml
echo "Starting prometheus."
exec docker run \
-i \
--rm \
--net=host \
-p 9090:9090 \
-v $(pwd)/prometheus.yml:/etc/prometheus/prometheus.yml \
${PROMETHEUS_TAG}
| true
|
6862302b91f7e922487ade11f62a6f55fa8ac856
|
Shell
|
Sanketh1220/fundoo-notes-angular-frontend
|
/deploy.sh
|
UTF-8
| 696
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
BUNDLE_FILE_BASE=fundoo-frontend
BUNDLE_FILE=$BUNDLE_FILE_BASE-$BUILD_NUMBER.tar.gz
SERVER_IP=18.119.109.3
APPS_HOME=/home/ec2-user/apps
FOLDER_NAME=fd-frontend
EC2_SSH_KEY=/var/lib/jenkins/jenkins_key.pem
rm -rf *.tar.gz
npm install
npm run build
cd dist/fundoo-notes-app
tar czf $BUNDLE_FILE *
scp -o StrictHostKeyChecking=no -i $EC2_SSH_KEY $BUNDLE_FILE ec2-user@$SERVER_IP:$APPS_HOME/$BUNDLE_FILE_BASE.tar.gz
ssh -o StrictHostKeyChecking=no -i $EC2_SSH_KEY ec2-user@$SERVER_IP << 'ENDSSH'
BUNDLE_FILE=fundoo-frontend.tar.gz
APPS_HOME=/home/ec2-user/apps
FOLDER_NAME=fd-frontend
cd $APPS_HOME
rm -rf $FOLDER_NAME
mkdir $FOLDER_NAME
tar -xf $BUNDLE_FILE -C $FOLDER_NAME
ENDSSH
| true
|
8f8c8a18d6b744a1e86c1954ec1ee5781200f8a9
|
Shell
|
kelubi/DevOps
|
/scripts/rsync_api.sh
|
UTF-8
| 2,036
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
#Script:bshare_rsync.sh
#Author:damon
#DATE:2013/07/15
#REV:1.0
#Action:package rsync function into an api
SYNC_USER=""
SYNC_USER_ID=""
SYNC_PORT=
SYNC_AUTH="ssh -p $SYNC_PORT -i $SYNC_USER_ID"
RSYNC="/usr/bin/rsync"
function move_file(){
#3 parametes for this function
#move files by rsync
CP="/bin/cp"
REMOTE_IP=$1
MV_SOURCE=$2
MV_TARGET=$3
MV_LOG="$(dirname $0)/$FUNCNAME.log"
#commond to run
$SYNC_AUTH $SYNC_USER@$REMOTE_IP "sudo $RSYNC -PHXaz --stats --delete $MV_SOURCE $MV_TARGET" >/dev/null 2>&1
#return result and record in log
if [ $? -eq 0 ]; then
echo "[Successful] $FUNCNAME $MV_SOURCE to $MV_TARGET on $REMOTE_IP"
echo "`date +'%Y-%m-%d %H:%M:%S'` [Successful] $FUNCNAME $MV_SOURCE to $MV_TARGET on $REMOTE_IP">> $MV_LOG
else
echo "[Error] $FUNCNAME $MV_SOURCE to $MV_TARGET on $REMOTE_IP"
echo "`date +'%Y-%m-%d %H:%M:%S'` [Error] $FUNCNAME $MV_SOURCE to $MV_TARGET on $REMOTE_IP" >> $MV_LOG
fi
}
function send_file(){
#2 parametes for this function
RSYNC_SOURCE=$1
RSYNC_TARGET=$2
MV_LOG="$(dirname $0)/$FUNCNAME.log"
$RSYNC -PHXaz --rsh "$SYNC_AUTH" --stats --delete --rsync-path "sudo rsync" $RSYNC_SOURCE $SYNC_USER@$RSYNC_TARGET >/dev/null 2>&1
if [ $? -eq 0 ]; then
echo "[Successful] $FUNCNAME $RSYNC_SOURCE to $RSYNC_TARGET"
echo "`date +'%Y-%m-%d %H:%M:%S'` [Successful] $FUNCNAME $RSYNC_SOURCE to $RSYNC_TARGET">> $MV_LOG
else
echo "[Error] $FUNCNAME $RSYNC_SOURCE to $RSYNC_TARGET"
echo "`date +'%Y-%m-%d %H:%M:%S'` [Error] $FUNCNAME $RSYNC_SOURCE to $RSYNC_TARGET" >> $MV_LOG
fi
}
function service(){
#3 parametes for this function
REMOTE_IP=$1
S_SHELL=$2
S_CMD=$3
#commond to run
$SYNC_AUTH $SYNC_USER@$REMOTE_IP "sudo bash $S_SHELL $S_CMD" >/dev/null 2>&1
if [ $? -eq 0 ];then
echo "[Successful] $S_SHELL $S_CMD on $REMOTE_IP"
fi
}
#$1 is function name,running as function $2 $3 ...
#echo $*
eval $*
| true
|
112419e8e7a716fd46d2c81a34c1f93df6457e45
|
Shell
|
dice-project/DICE-Monitoring
|
/bootstrap/provision.sh
|
UTF-8
| 563
| 2.59375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Obtain chef client
wget https://packages.chef.io/files/stable/chef/12.18.31/ubuntu/14.04/chef_12.18.31-1_amd64.deb
dpkg -i chef_12.18.31-1_amd64.deb
# Obtain chef cookbooks
wget https://github.com/dice-project/DICE-Chef-Repository/archive/develop.tar.gz
tar -xf develop.tar.gz
# Run chef
cd DICE-Chef-Repository-develop
cp /vagrant/dmon.json .
chef-client -z \
-j dmon.json \
-o recipe[dice_common::host],recipe[apt::default],recipe[java::default],recipe[dmon::default],recipe[dmon::elasticsearch],recipe[dmon::kibana],recipe[dmon::logstash]
| true
|
424bd0af98ea9dced9d25738295cba47ad1b7e73
|
Shell
|
manbobo2002/gcp-migrate-instance
|
/instance-script.sh
|
UTF-8
| 683
| 2.671875
| 3
|
[] |
no_license
|
sudo screen -S image-transfer
image_disk="wp-image-disk"
bucket="custom-image-storage-20200610"
cat <<EOF >> script.sh
sudo mkdir /mnt/tmp
sudo mkfs.ext4 -F /dev/disk/by-id/google-temporary-disk
sudo mount -o discard,defaults /dev/disk/by-id/google-temporary-disk /mnt/tmp
sudo mkdir /mnt/${image_disk}
ls /dev/disk/by-id/
sudo mount /dev/disk/by-id/"google-${image_disk}-part1" /mnt/${image_disk}
sudo umount /mnt/${image_disk}/
sudo dd if=/dev/disk/by-id/google-${image_disk} of=/mnt/tmp/disk.raw bs=4096
cd /mnt/tmp
sudo tar czvf myimage.tar.gz disk.raw
/mnt/tmp/myimage.tar.gz
gsutil mb gs://${bucket}
gsutil cp /mnt/tmp/myimage.tar.gz gs://${bucket}
exit
EOF
sh script.sh
| true
|
5d12cc1f381414e7b3619db7d087ba1a0759bde9
|
Shell
|
LASER-UMASS/JavaRepair-replication-package
|
/src/statistical-tests-execution/RQ4/reproduce-rq4.sh
|
UTF-8
| 3,081
| 2.71875
| 3
|
[] |
no_license
|
#####################################
# PURPOSE: This is the driver script used to generate results for RQ4
# INPUT: path to directory containing patch quality assesment scores for RQ4 patches (../../../results/RQ4/patch-quality-assessment/)
# OUTPUT: plots and results presented in paper
# CMD TO RUN: bash reproduce-rq4.sh ../../../results/RQ4/patch-quality-assessment/
# DEPENDENCIES: this script requires consolidate.py and combineresults.py scripts to pre-process input data,
# and freq-distribution.R and computePearson.R to generate the plots
#####################################
DIRPATH=$1
python consolidate.py $DIRPATH/gp_rq4_es3_quality.csv 1 > gp_rq4_es3_consolidated.csv
python consolidate.py $DIRPATH/gp_rq4_es6_quality.csv 1 > gp_rq4_es6_consolidated.csv
cat gp_rq4_es3_consolidated.csv > gp_rq4_es3_es6_consolidated.csv
cat gp_rq4_es6_consolidated.csv >> gp_rq4_es3_es6_consolidated.csv
sort gp_rq4_es3_es6_consolidated.csv > gp_rq4_es3_es6_consolidated_sorted.csv
python combineresults.py gp_rq4_es3_es6_consolidated_sorted.csv
mv results.txt gp_rq4_overall.csv
python addFailingTest.py gp_rq4_overall.csv
mv triggering_test_results.csv gp_rq4_overall.csv
rm gp_rq4_es3_consolidated.csv
rm gp_rq4_es6_consolidated.csv
rm gp_rq4_es3_es6_consolidated.csv
rm gp_rq4_es3_es6_consolidated_sorted.csv
python consolidate.py $DIRPATH/par_rq4_es3_quality.csv 2 > par_rq4_es3_consolidated.csv
python consolidate.py $DIRPATH/par_rq4_es6_quality.csv 2 > par_rq4_es6_consolidated.csv
cat par_rq4_es3_consolidated.csv > par_rq4_es3_es6_consolidated.csv
cat par_rq4_es6_consolidated.csv >> par_rq4_es3_es6_consolidated.csv
sort par_rq4_es3_es6_consolidated.csv > par_rq4_es3_es6_consolidated_sorted.csv
python combineresults.py par_rq4_es3_es6_consolidated_sorted.csv
mv results.txt par_rq4_overall.csv
python addFailingTest.py par_rq4_overall.csv
mv triggering_test_results.csv par_rq4_overall.csv
rm par_rq4_es3_consolidated.csv
rm par_rq4_es6_consolidated.csv
rm par_rq4_es3_es6_consolidated.csv
rm par_rq4_es3_es6_consolidated_sorted.csv
python consolidate.py $DIRPATH/trp_rq4_es3_quality.csv 3 > trp_rq4_es3_consolidated.csv
python consolidate.py $DIRPATH/trp_rq4_es6_quality.csv 3 > trp_rq4_es6_consolidated.csv
cat trp_rq4_es3_consolidated.csv > trp_rq4_es3_es6_consolidated.csv
cat trp_rq4_es6_consolidated.csv >> trp_rq4_es3_es6_consolidated.csv
sort trp_rq4_es3_es6_consolidated.csv > trp_rq4_es3_es6_consolidated_sorted.csv
python combineresults.py trp_rq4_es3_es6_consolidated_sorted.csv
mv results.txt trp_rq4_overall.csv
python addFailingTest.py trp_rq4_overall.csv
mv triggering_test_results.csv trp_rq4_overall.csv
rm trp_rq4_es3_consolidated.csv
rm trp_rq4_es6_consolidated.csv
rm trp_rq4_es3_es6_consolidated.csv
rm trp_rq4_es3_es6_consolidated_sorted.csv
cat gp_rq4_overall.csv > rq4_results.csv
tail --lines=+2 par_rq4_overall.csv >> rq4_results.csv
tail --lines=+2 trp_rq4_overall.csv >> rq4_results.csv
rm gp_rq4_overall.csv
rm par_rq4_overall.csv
rm trp_rq4_overall.csv
Rscript computePearson.R
Rscript freq-distribution.R
| true
|
9c5b6eff7ffb2b8692b24598503d0aaf46a0185d
|
Shell
|
softecspa/puppet-softec
|
/files/bin/root_device
|
UTF-8
| 3,697
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
set -u
###############################################################
# Lorenzo Cocchi <lorenzo.cocchi@softecspa.it> #
# #
# Script per identificare il device su cui e' risiede la `/` #
# ex.: /dev/sda, /dev/cciss/c0d0 #
# #
# Utilizzato principalmente come Puppet fatc #
# NOTE: il comando `lvdisplay` richiede i privilegi di r00t #
###############################################################
#############
# functions #
#############
die() {
local msg="$1"
local e=${2-1}
echo "${msg}" >&2
exit ${e}
}
lv_maps() {
# identifica, dal Logical Volume name, il device del Physical Volume
# ex.: /dev/mapper/root = /dev/sda1
local lv_name=$1
local fn_name="lv_maps()"
if [ $# -ne 1 ]; then
echo "${fn_name} usage: ${fn_name} Logical Volume name"
return 1
fi
pv_device=$(
lvdisplay ${lv_name} --maps | \
egrep 'Physical volume' | awk '{ print $3 }' 2>/dev/null
)
if [ -z "${pv_device}" ]; then
return 1
else
echo ${pv_device}
return 0
fi
}
sanitize() {
# ripulisce il device dalle partizioni,
# ex.: /dev/sda1 = /dev/sda
local dev_partition=$1
local device=""
# sed regexp
local ide_scsi_dev="(/dev/s[h,d][a-z]{1,})[0-9]{1,}"
local hp_dev="(/dev/cciss/c[0-9]{1,}d[0-9]{1,})p[0-9]{1,}"
local virt_dev="(/dev/x?vd[a-z]{1,})[0-9]{1,}"
local fn_name="sanitize()"
if [ $# -ne 1 ]; then
echo "${fn_name} usage: ${fn_name} device partition, ex.: /dev/sda1"
return 1
fi
# software raid
if (echo ${dev_partition} | egrep -q '/dev/md'); then
echo ${dev_partition}
return 0
fi
# ide (pata), sata, scsi
if (echo ${dev_partition} | egrep -q '/dev/[h,s]d'); then
device=$(
echo ${dev_partition} | sed -r "s#${ide_scsi_dev}#\1#g"
)
fi
# hp controller
if (echo ${dev_partition} | egrep -q '/dev/cciss'); then
device=$(
echo ${dev_partition} | sed -r "s#${hp_dev}#\1#g"
)
fi
# virt device, ex.: kvm domU
if (echo ${dev_partition} | egrep -q '/dev/x?vd'); then
device=$(
echo ${dev_partition} | sed -r "s#${virt_dev}#\1#g"
)
fi
if [ -z "${device}" ] || [ "x${dev_partition}" == "x${device}" ]; then
return 1
else
echo ${device}
return 0
fi
}
########
# main #
########
CACHE_FILE="/var/cache/$(basename $0).cache"
# se il file di cache e' + vecchio di 2h, lo rimuovo
find ${CACHE_FILE} -mmin +120 -delete 2>/dev/null
# se il file di cache esiste ed ha dimensione maggiore di 0
# restituisco il valore da questo file
if [ -s ${CACHE_FILE} ]; then
cat ${CACHE_FILE}
exit $?
fi
ROOT_DEVICE="$(mount | egrep -v rootfs | egrep -w '/' | awk '{ print $1 }' 2>/dev/null)"
[ $? -ne 0 ] && die "Cannot identify root mount point"
# / su LVM
if (echo ${ROOT_DEVICE} | egrep -q '/dev/mapper'); then
DEV=$(lv_maps ${ROOT_DEVICE})
else
DEV=${ROOT_DEVICE}
fi
[ $? -ne 0 ] && die "Cannot lv_maps() root device"
# / su controller HP
if (echo ${DEV} | egrep -q '/dev/cciss'); then
DEV=$(sanitize ${DEV})
fi
# UUID
if (echo ${DEV} | egrep -q '[a-z0-9]{8}-[az0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}'); then
DEV=$(blkid -U ${DEV##*/})
fi
[ $? -ne 0 ] && die "Cannot sanitize() root device"
# se DEV non e' vuoto
if [ ! -z "${DEV}" ]; then
echo ${DEV} > ${CACHE_FILE}
echo ${DEV}
exit $?
else
die "Cannot lv_maps() root device"
fi
| true
|
9270dbdb154d902d58ac2ae67e5406774a2930ef
|
Shell
|
perry-mitchell/dotfiles
|
/install/brew-cash.sh
|
UTF-8
| 325
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if isMac; then
return
fi
brew tap caskroom/versions
brew tap caskroom/cask
brew tap caskroom/fonts
apps=(
firefox
flux
font-fira-code
google-chrome
google-chrome-canary
macdown
opera
screenflow
sourcetree
transmit
virtualbox
visual-studio-code
vlc
)
brew cask install "${apps[@]}"
| true
|
220214749ec08115220ca5f672207fad6b147814
|
Shell
|
bigman9/summary
|
/shell/max.sh
|
UTF-8
| 955
| 2.796875
| 3
|
[] |
no_license
|
#!bin/bash
# mysql env #
##
IP="10.0.0.35"
PORT="3306"
DB_M="MILLIONS"
USER="developer"
PASSWD="welcomesxn"
LAST_VER_SQL="select aggreation_time from APP_VERSION_NEWS_AGGREGATION where is_current='1' order by aggreation_time desc limit 1"
CONNECT="mysql -h${IP} -P${PORT} -u${USER} -p${PASSWD} -D ${DB_M}"
##
# mysql sql #
##
DB_S="MILLIONS_CRAWLER"
MAX_VER_SQL="select max(last_news_id) from APP_VERSION_NEWS_AGGREGATION"
MAX_VER=`${CONNECT} -e "${MAX_VER_SQL}"`
MAX_VER=`echo ${MAX_VER}| awk -F' ' '{print $2}'`
CONNECT_S="mysql -h${IP} -P${PORT} -u${USER} -p${PASSWD} -D ${DB_S}"
MAX_NEWS_SQL="select max(id) from APP_SOURCE_DATA_NEWS_CN"
MAX_NEWS=`${CONNECT_S} -e "${MAX_NEWS_SQL}"`
MAX_NEWS=`echo ${MAX_NEWS}| awk -F' ' '{print $2}'`
MAX_VER=`expr ${MAX_VER} + 800`
if [[ ${MAX_NEWS} -ge ${MAX_VER} ]]; then
sh /home/hadoop/spark-jars/cluster/scripts/choice.sh
echo "${MAX_NEWS} > ${MAX_VER} over 800"
else
echo "The corpus is not rich enough"
fi
| true
|
62665a50e1f3dde20a7e457852887384a114ec9d
|
Shell
|
xinghun61/infra
|
/3pp/readline/install.sh
|
UTF-8
| 553
| 2.953125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -e
set -x
set -o pipefail
PREFIX="$1"
DEPS_PREFIX="$2"
export CCFLAGS="-I$DEPS_PREFIX/include"
export CFLAGS="-I$DEPS_PREFIX/include"
export LDFLAGS="-L$DEPS_PREFIX/lib"
./configure --enable-static --disable-shared \
--prefix "$PREFIX" \
--host "$CROSS_TRIPLE" \
--with-curses
make install -j $(nproc)
(cd $PREFIX/include && ln -s ./readline/readline.h readline.h)
| true
|
9f3d46cdf1d8964fbd14410f2b3c2905fda6f271
|
Shell
|
seam/build
|
/checkout.sh
|
UTF-8
| 3,055
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/sh
usage()
{
cat << EOF
usage: $0 options
This script will check out Seam 3.
OPTIONS:
-a When performing a fetch use --all to retrieve all remotes
-b Build and install parent, tools and bom modules
-h Show this usage message
-d Destination directory, otherwise the PWD is used
-m Checkout (clone) in manager mode (SSH mode) (default is read-only)
-v Be more verbose
-c Don't run git fetch if the repository has already been cloned
-p Perform a git pull origin for each of the modules
EOF
}
work()
{
if [ "$READONLY" -eq "1" ]
then
GITBASE="git://github.com/seam"
else
GITBASE="git@github.com:seam"
fi
if [ "$VERBOSE" -eq "0" ]
then
GITARGS=""
fi
if [ -d $DESTINATION ]
then
echo "Using existing destination directory $DESTINATION"
else
echo "Creating destination directory $DESTINATION"
mkdir $DESTINATION
fi
for repo in $REPOS
do
update=0
unset gitcmd
url="$GITBASE/$repo.git"
repodir=$DESTINATION/$repo
if [ -d $repodir ]
then
if [ "$GITFETCH" -eq "1" ]; then
echo "Updating $repo"
if [ "$FETCHALL" -eq "1" ]; then
gitcmd="git $GITARGS --git-dir=$DESTINATION/$repo/.git fetch --all"
else
gitcmd="git $GITARGS --git-dir=$DESTINATION/$repo/.git fetch"
fi
update=1
$gitcmd
else
echo "Skipping existing cloned repository $DESTINATION/$repo"
update=1
fi
fi
if [ "$PULL" -eq "1" ]; then
cd $DESTINATION/$repo
status=$(git status --porcelain)
if [ -z "$status" ]; then
echo "Pulling $repo"
gitcmd="git $GITARGS pull"
$gitcmd
update=1
else
echo "Local changes, no pull occurred"
fi
fi
if [ "$update" -eq "0" ]; then
echo "Cloning $repo"
gitcmd="git clone $GITARGS $url $DESTINATION/$repo"
$gitcmd
fi
done
if [ "$BUILD" -eq "1" ]
then
echo "Building Seam parent, tools and bom modules"
cd build/parent
mvn clean install
cd -
cd build/tools
mvn clean install
cd -
cd dist
mvn clean install -N
cd -
fi
}
DESTINATION=`pwd`
READONLY=1
VERBOSE=0
GITBASE=
GITARGS=
GITFETCH=1
FETCHALL=0
BUILD=0
PULL=0
RUN=1
REPOS="parent build dist examples catch config drools faces international jcr jms mail persistence remoting rest security servlet social solder validation ticket-monster wicket"
while getopts “aphmd:bcv” OPTION
do
case $OPTION in
a)
FETCHALL=1
;;
h)
usage
RUN=0
;;
d)
DESTINATION=$OPTARG
;;
c)
GITFETCH=0
;;
m)
READONLY=0
;;
b)
BUILD=1
;;
v)
VERBOSE=1
;;
p)
PULL=1
;;
[?])
usage;
RUN=0
;;
esac
done
if [ "$RUN" -eq "1" ]
then
work;
fi
| true
|
b639627bf32f79500b542baf1aaefdfd8d7fe3cb
|
Shell
|
hades13/apv5sdk-v15
|
/apv5sdk-v15/autelan/base-files/files_AQ2000CN/usr/sbin/ethportstatus
|
UTF-8
| 2,601
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/sh
VERSION="$(basename $0) ver.1.0"
USAGE="Usage: $(basename $0) <eth0|eth1> <port1|port2|...> <s=[0|10|100]> <d=[0|1]>"
DESCRIPTION="Description: get or set the specified port's link status,auto-negotiation status,speed mode and duplex mode,
s means speed(speed=0 means auto-negotiation),d means duplex(duplex=0 means half duplex,duplex=1 means full duplex),
if only have eth and port args,$(basename $0) will do get operation"
if [ $# -lt 2 ];then
echo "${USAGE}" >&2
echo "${DESCRIPTION}" >&2
exit 1
fi
ETH=
PORT=
case "${1}" in
eth0 | eth1)
ETH="${1}"
;;
*)
echo "${USAGE}" >&2
echo "${DESCRIPTION}" >&2
exit 1
;;
esac
case "${2}" in
port1 | port2 | port3 | port4)
PORT=`echo "${2}" | grep -o '[0-9]'`
;;
*)
echo "${USAGE}" >&2
echo "${DESCRIPTION}" >&2
exit 1
;;
esac
PHYCONTROLREGVAL=`ethreg -i ${ETH} -p $((${PORT} - 1)) 0 | awk '{print $5}'`
PORTSTATUSREGVAL=`ethreg -i ${ETH} -p 15 $((${PORT} * 0x100 + 0x100)) | awk '{print $5}'`
SPEEDMODE=$((${PORTSTATUSREGVAL} & 0x03))
DUPLEXMODE=$((${PORTSTATUSREGVAL} & 0x40))
LINKSTATUS=$((${PORTSTATUSREGVAL} & 0x100))
if [ $# -eq 2 ];then
echo "${1} ${2} status: "
if [ ${LINKSTATUS} -eq $((0x100)) ]; then
echo "link status: up"
else
echo "link status: down"
fi
if [ $((${PHYCONTROLREGVAL} & 0x1000)) -eq $((0x1000)) ]; then
echo "auto-negotiation: enabled"
else
echo "auto-negotiation: disabled"
fi
case ${SPEEDMODE} in
0 )
echo "speed: 10 Mbps"
;;
1 )
echo "speed: 100 Mbps"
;;
2 )
echo "speed: reserved"
;;
3 )
echo "speed: speed mode error"
;;
esac
case ${DUPLEXMODE} in
0 )
echo "duplex: half duplex"
;;
* )
echo "duplex: full duplex"
;;
esac
else
FLAG=0
opt=
val=
shift 2
while [ $# -ge 1 ]; do
opt=`echo $1|awk -F "=" '{print $1}'`
val=`echo $1|awk -F "=" '{print $2}'`
case "${opt}" in
s)
case ${val} in
0|10|100 )
FLAG=1
SPEEDMODE=${val}
;;
* )
echo "${USAGE}" >&2
echo "${DESCRIPTION}" >&2
exit 1
;;
esac
;;
d)
case ${val} in
half )
DUPLEXMODE=0
;;
full )
DUPLEXMODE=1
;;
* )
echo "${USAGE}" >&2
echo "${DESCRIPTION}" >&2
exit 1
;;
esac
;;
*)
echo "${USAGE}" >&2
echo "${DESCRIPTION}" >&2
exit 1
;;
esac
shift
done
if [ ${DUPLEXMODE} -ne 0 ]; then
DUPLEXMODE=1
fi
if [ ${FLAG} -ne 1 ]; then
case ${SPEEDMODE} in
0 )
SPEEDMODE=10
;;
1 )
SPEEDMODE=100
;;
* )
echo "${USAGE}" >&2
echo "${DESCRIPTION}" >&2
exit 1
;;
esac
fi
ethreg -i ${ETH} -f $((${PORT} - 1))=${SPEEDMODE} -d ${DUPLEXMODE}
fi
| true
|
7146bf31dd933614ffaf1420e6925d3ddbbef0cb
|
Shell
|
qb00091/appointments
|
/db/bin/make-manager
|
UTF-8
| 182
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ -z "$1" ] || [ -z "$2" ]; then exit 1; fi
psql events -c "insert into admins (uid, rid) values ($1, $2)"
if [ "$?" -eq 0 ]; then echo "User $1 is now admin of $2"; fi
| true
|
b5d1b29164a331c3b9684310780d437a42fcfb6b
|
Shell
|
macisamuele/json-trait-rs
|
/scripts/bump-all-submodules.sh
|
UTF-8
| 616
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail -o posix -o functrace
git submodule foreach --recursive 'git fetch origin && git reset --hard origin/master'
if [[ ! -f .gitmodules ]]; then
echo "No registered submodules"
exit 0
fi
submodules=$(awk '$1 ~ /path/ {print $3}' .gitmodules)
if [[ ${#submodules} -eq 0 ]]; then
echo "No registered submodules"
exit 0
fi
# shellcheck disable=SC2086
if git diff --exit-code ${submodules}; then
# No differences have been identified
exit 0
else
echo "Submodules have been bumped, please make sure to commit them \`git add ${submodules}\`"
exit 1
fi
| true
|
6fe59c5d4253a7ed489129eea754321f66f440a9
|
Shell
|
conda-forge/jupyter_contrib_nbextensions-feedstock
|
/recipe/post-link.sh
|
UTF-8
| 252
| 2.78125
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# We redirect stderr & stdout to conda's .messages.txt; for details, see
# http://conda.pydata.org/docs/building/build-scripts.html
{
"${PREFIX}/bin/jupyter-contrib-nbextension" install --sys-prefix
} >>"${PREFIX}/.messages.txt" 2>&1
| true
|
c5600fe77a022fb72b83648b8fc57feb2326135b
|
Shell
|
austintgriffith/logger
|
/docker/build.sh
|
UTF-8
| 542
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
#we need to move the package.json file in so we can npm install during the build
FILE="package.json"
if [ -f $FILE ];
then
echo "File $FILE exists."
else
echo "File $FILE does not exist, copying..."
cp ../$FILE $FILE
fi
#copy in package json if it is different
cmp -s package.json ../package.json > /dev/null
if [ $? -eq 1 ]; then
echo "package.json has updated... copying...";
cp ../package.json .
else
echo "package.json is unchanged.";
fi
echo "Building base logger container...";
docker build -t logger .
| true
|
554ccc5ed69fd0b331d94cefc501ab324ece4c94
|
Shell
|
grayasm/git-main
|
/tutorial/martin_streicher_rpm/01_build/part1.sh
|
UTF-8
| 428
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
if [ `basename $DIR` != "01_build" ]; then
echo "run this script from: 01_build"
exit
fi
rmdir -v BUILD RPMS SOURCES SPECS SRPMS
mkdir -pv BUILD RPMS SOURCES SPECS SRPMS
cd SOURCES
wget http://ftp.halifax.rwth-aachen.de/gnu/wget/wget-1.18.tar.gz
cd ..
cp part1.spec SPECS/wget.spec
rpmbuild -v -bb --clean SPECS/wget.spec
find . -iname '*.rpm'
| true
|
6f5b2179475e2d6dd4f770f4e6e26d822a58f0a9
|
Shell
|
fabianlee/blogcode
|
/bash/test_heredoc.sh
|
UTF-8
| 1,357
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Examples with heredoc
#
# https://tldp.org/LDP/abs/html/here-docs.html
# https://linuxize.com/post/bash-heredoc/
# https://stackoverflow.com/questions/2500436/how-does-cat-eof-work-in-bash
# https://stackoverflow.com/questions/7316107/how-to-split-strings-over-multiple-lines-in-bash
# escaping characters in heredoc, https://www.baeldung.com/linux/heredoc-herestring
# http://www.linuxcommand.org/lc3_man_pages/readh.html
echo ""
echo "*** do not strip tabs ***"
cat <<EOF
a
b
c
EOF
echo ""
echo "*** strip tabs ***"
cat <<-EOF
a
b
c
EOF
echo ""
echo "*** put into variable ***"
read -r -d '' myheredoc1 <<EOF
a
b
c
EOF
echo "$myheredoc1"
echo ""
echo "*** with variables and subshell ***"
greeting="hello, world!"
cat <<EOF
$greeting
I am ${USER} in directory $(pwd)
$(for i in $(seq 1 9); do echo "hello $i"; done)
if you want a dollar sign, then \$escape it
last line
EOF
echo ""
echo "*** put into variable with subshell ***"
greeting="hello, world!"
# -r do not allow backslashes to escape, -d delimiter
read -r -d '' myheredoc2 <<EOF
$greeting
I am ${USER} in directory $(pwd)
$(for i in $(seq 1 9); do echo "hello $i"; done)
EOF
echo "$myheredoc2"
echo ""
echo "*** append to file /tmp/appendint.txt ***"
datestr=$(date +"%D %T")
cat <<EOF >> /tmp/appendint.txt
appended at $datestr
EOF
| true
|
480426256664dfcb0fc4d3cdf2c9c3f369d97499
|
Shell
|
mission-systems-pty-ltd/snark
|
/actuators/universal_robots/test/ur-arm/test
|
UTF-8
| 1,646
| 3.578125
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
config=config.json
prefix=ur5
publisher_port=$( name-value-get --from json $prefix/publisher/data/port < $config )
timeout=0.1
angle_tolerance=0.0001
status_sample=status_sample_stationary.bin
real_target_angles=$( cat $status_sample | ur-arm-status --fields=arm/angles )
real_target_pose=$( cat $status_sample | ur-arm-status --fields=tool/pose )
fake_target=1,2,3,4,5,6
if (( `netstat --all | grep tcp | grep LISTEN | grep $publisher_port | wc -l` )); then
echo "$( readlink -f `basename $0` ): required port $publisher_port is already in use, try changing publisher port in config.json" >&2
exit 1
fi
function bye { pkill -P $$; }
trap 'bye' INT HUP TERM
function test_for
{
name="$1"
real_target="$2"
ur-arm wait --$name=$fake_target --config=$config --prefix=$prefix --timeout=$timeout --tolerance=$angle_tolerance
status=$?
echo "$name/no_publisher/status=$status"
while :; do cat $status_sample; sleep 0.01; done | io-publish -s $( ur-arm-status --packet-size ) tcp:$publisher_port 2>/dev/null &
ur-arm wait --$name=$fake_target --config=$config --prefix=$prefix --timeout=$timeout --tolerance=$angle_tolerance
status=$?
echo "$name/timed_out/status=$status"
pkill -P $$
while :; do cat $status_sample; sleep 0.01; done | io-publish -s $( ur-arm-status --packet-size ) tcp:$publisher_port 2>/dev/null &
ur-arm wait --$name=$real_target --config=$config --prefix=$prefix --timeout=$timeout --tolerance=$angle_tolerance
status=$?
echo "$name/success/status=$status"
pkill -P $$
}
test_for angles $real_target_angles
test_for pose $real_target_pose
| true
|
d3ba6d2269e2cbfe514dc5d7880266e39487ae1c
|
Shell
|
md-jamal/arm
|
/C_NOTES/0ule/7th/shell/9while/5while.sh
|
UTF-8
| 268
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
i=1
#输出1-10 并且不输出3
while true
do
if [ $i -eq 3 ]
then
let i++
continue #结束本次循环
fi
if [ $i -eq 11 ]
then
break #结束本层循环
fi
echo "$i"
let i++
done
exit 0
| true
|
ac538de4d14d804eef6c98d2f8a4232658ce5b7a
|
Shell
|
danybmx/dotfiles-workspace
|
/setup-ubuntu.sh
|
UTF-8
| 2,300
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
# UPGRADE
sudo apt update -y
sudo apt upgrade -y
# EDITOR
if ! [ -x "$(command -v nvim)" ]; then
sudo apt install -y neovim python3-neovim
sudo update-alternatives --install /usr/bin/vi vi /usr/bin/nvim 61
sudo update-alternatives --set vi /usr/bin/nvim
sudo update-alternatives --install /usr/bin/vim vim /usr/bin/nvim 60
sudo update-alternatives --set vim /usr/bin/nvim
sudo update-alternatives --install /usr/bin/editor editor /usr/bin/nvim 60
sudo update-alternatives --set editor /usr/bin/nvim
fi
# DOCKER
if ! [ -x "$(command -v docker)" ]; then
sudo apt install -y \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
bionic \
stable"
sudo apt update -y
sudo apt install -y docker-ce docker-ce-cli containerd.io
sudo usermod -aG docker $USER
sudo curl -L \
"https://github.com/docker/compose/releases/download/1.25.5/docker-compose-$(uname-s)-$(uname -m)" \
-o /usr/local/bin/docker-compose
sudo chmod a+x /usr/local/bin/docker-compose
fi
# GITCONFIG
git config --global user.name "Daniel Rodriguez Gil"
git config --global user.email "daniel@dpstudios.es"
# TOOLS
sudo apt install -y zip silversearcher-ag direnv tmuxinator
# Java
curl -s "https://get.sdkman.io" | bash
echo $HOME/.sdkman/bin/sdkman-init.sh
source $HOME/.sdkman/bin/sdkman-init.sh
sdk install java 13.0.2.hs-adpt
sdk install maven
sdk install gradle
# Node
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.35.3/install.sh | bash
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
nvm install 12
npm install -g prettier \
eslint \
eslint-config-airbnb \
intelephense \
javascript-typescript-langserver \
typescript \
typescript-language-server \
vls
# Go
sudo apt install golang
# PHP
sudo apt install -y php php-soap php-mysql php-gd php-xdebug php-xml php-curl composer
# DOTFILES
git clone git@github.com:danybmx/dotfiles-workspace.git ~/.dotfiles
sh ~/.dotfiles/install.sh
| true
|
064dc84560abe4e60aabcd54d18b1fbc9cf364e0
|
Shell
|
mkjubran/HM_MV_CNN
|
/TryMe_HMNoTexture_Function
|
UTF-8
| 1,237
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
clear
#QP=$1
#NumberFrames=$2
#mbmapXxX=$3
#InputFile=$4
echo %#######################################################################
echo MV QP=$1 , MV Resolution=$2
echo %#######################################################################
#%%%%%%%%%% deleting .265 videos from previous run
rm ./HM_MVX_NoTexture/vid/box.265
#%%%%%%%%%% deleting estracted MVs from previous run
rm ./mbmap/mv_HM.bin
#%%%%%%%%%% encode and decode using Original HM to encode and decode (from yuv to 265)
cd ./HM_MVX_NoTexture/bin
rm mv.bin
rm mv_bug.dat
sudo ./TAppEncoderStatic -c ../cfg/encoder_Jubran.cfg --InputFile="../vid/box.yuv" --QP=$1 --FramesToBeEncoded=$2 --MaxCUSize=16 --MaxPartitionDepth=1 --BitstreamFile="../vid/box.bin"
sudo ./TAppDecoderStatic --BitstreamFile="../vid/box.bin" --ReconFile="box_decoded.yuv"
cp mv.bin ../../mbmap/mv_dec.bin
#%%%%%%%%%% Extracting the MVs
cd ../../mbmap
if [ $3 -eq 4 ] || [ $3 -eq 8 ] || [ $3 -eq 16 ]
then
./mbmap -h 240 -w 320 -$3 -o mv_HM.bin mv_dec.bin
else
echo "mbmap: MV Reso;ution is not identified"
exit -1;
fi
#%%%%%%%%%% copying the MVs to the directory to be processed by matlab
mv ./mv_HM.bin /media/sf_VisualizeMVX/MVbins/HMMVNoTexture_QP_$1_MVRes_$3.bin
| true
|
91d64c763939837ce51b1f32af6d316931ddd337
|
Shell
|
olevole/jetpack
|
/images/example.showenv/showenv.sh
|
UTF-8
| 829
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
cat <<EOF
Program: $0
Args: $*
I am: $(id)
Work dir: $(pwd)
Hostname: $(hostname)
Environment:
EOF
env | sort
date > /opt/data/date.txt
cat <<EOF
Mounts:
EOF
mount
cat <<EOF
Files:
EOF
ls -la
echo
echo -n 'date.txt: '
cat /opt/data/date.txt
echo -n 'pre-start id: '
cat /opt/data/pre-start-id.txt
if test -f /opt/data/post-stop-id.txt ; then
echo -n 'post-stop id: '
cat /opt/data/post-stop-id.txt
else
echo 'post-stop id: NONE (first run?)'
fi
cat <<EOF
Metadata:
- Pod UUID: $(ac-mdc uuid)
- Image ID: $(ac-mdc image-id)
- Image Timestamp (image annotation): $(ac-mdc app-annotation timestamp)
- IP Address (pod annotation): $(ac-mdc annotation ip-address)
- Render: $(ac-mdc expand '[Hello from Pod {{.UUID}} running app {{.AppImageID}} at {{.PodAnnotation "ip-address"}}]')
EOF
| true
|
c738c428607a6137dd3a7c8c83864bf5ac0e588a
|
Shell
|
TreesLab/NCLtk
|
/bin/generate_Sequence.Path.sh
|
UTF-8
| 727
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env zsh
genome=$1
if [[ -z "$genome" ]]; then
echo ""
echo "Usage:"
echo "./generate_Sequence.Path.sh [genome folder]"
echo ""
echo "Example:"
echo "./generate_Sequence.Path.sh /desktop/h19"
echo ""
exit
fi
BASEDIR=$(readlink -f $genome)
cat <(echo "Chromosome name""\t""path") > path_header.tmp
ls $genome | awk '{print $1}' | sed 's/\.fa//g' > Sequence.Path.tmp1
ls $genome | awk '{print aa "/"$1}' aa=$(echo $BASEDIR) > Sequence.Path.tmp2
paste Sequence.Path.tmp1 Sequence.Path.tmp2 | tr ' ' \\t > Sequence.Path.tmp3
cat path_header.tmp Sequence.Path.tmp3 > Sequence.Path
rm -r -f path_header.tmp
rm -r -f Sequence.Path.tmp1
rm -r -f Sequence.Path.tmp2
rm -r -f Sequence.Path.tmp3
| true
|
44dfc00775a37b823b576f9ed4d568f981472b29
|
Shell
|
photomoose/plantduino-legacy
|
/Arduino/Scripts/plantduino-command-listener
|
UTF-8
| 358
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/sh /etc/rc.common
START=99
start() {
echo "Starting plantduino-command-listener"
python /root/command-listener.py &
}
stop() {
pid=$(ps | grep '[p]ython /root/command-listener.py' | awk '{ print $1 }')
echo "Stopping plantduino-command-listener"
kill $pid
echo "Stopped plantduino-command-listener"
}
| true
|
aa09ebe4950ae0ed5d413ad6cfdfb42531a365f7
|
Shell
|
obal3588/DHALSIM
|
/ICS_topologies/ky3/launch_dhalsim_experiment.sh
|
UTF-8
| 628
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# There should be a DHALSIM configuration file with .yaml extension in the same folder. Also a .inp and .cpa files
# with the same name in this folder
topology_name=$1
week_index=$2
folder_name=$topology_name"_topology"
# Create log folder
if [ ! -d "logs" ]; then\
mkdir "logs";\
fi
# Output folder to store the experiment results
if [ ! -d "output" ]; then\
mkdir "output";\
fi
rm -rf plant.sqlite; sudo python init.py; sudo chown mininet:mininet plant.sqlite
sudo pkill -f -u root "python -m cpppo.server.enip"
chmod +x *.sh
sudo mn -c
sudo python automatic_run.py -c $topology_name".yaml" $week_index
| true
|
abf460675c5bbd0f5b72cc04cc2261fda1754bb0
|
Shell
|
StamLab/stampipes
|
/scripts/sentry/sentry-lib.bash
|
UTF-8
| 1,091
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
# This file is a library intended for use with http://sentry.io
# Example usage in your script:
: <<USAGE_EXAMPLE
#!/bin/bash
source sentry-lib.bash
the_rest_of_your_script
USAGE_EXAMPLE
# Configuration:
# One of two ways:
# 1) Create $HOME/.sentryclirc
# 2) Set ENV variable
# These require a DSN which can be found at https://sentry.io
# https://sentry.io/settings/[your_org]/projects/[your_project]/keys/
: <<RC_EXAMPLE
[auth]
token=redacted
dsn=redacted
[defaults]
org=your-org-goes-here
project=your-project-goes-here
RC_EXAMPLE
: <<VARS_EXAMPLE
export SENTRY_DSN=<your-dsn-goes-here>
VARS_EXAMPLE
# Guard against direct execution
(return 0 2>/dev/null) || (
echo >&2 "Error: You should source this, like 'source \"$0\"', not run this directly. See comments for usage."
exit 1
)
if ! command -v sentry-cli >/dev/null ; then
echo >&2 "WARNING: sentry-cli not available on your PATH"
elif ! [[ -s "$HOME/.sentryclirc" ]] && [[ "$SENTRY_DSN" == "" ]] ; then
echo >&2 "WARNING: sentry-cli improperly configured"
else
eval "$(sentry-cli bash-hook)"
fi
| true
|
21a695b45f7b05410be3e31b15b11ef8cd87d12b
|
Shell
|
godane/archpwn
|
/repo/tunneling/ctunnel/PKGBUILD
|
UTF-8
| 650
| 2.78125
| 3
|
[] |
no_license
|
# Contributor: Francesco Piccinno <stack.box@gmail.com>
pkgname=ctunnel
pkgver=0.3
pkgrel=1
pkgdesc="Proxy forward TCP or UDP connections via a cryptographic tunnel"
arch=('i686' 'x86_64')
url="http://nardcore.org/ctunnel"
updateurl="http://nardcore.org/ctunnel/download=>ctunnel-"
license=('GPL')
depends=('openssl')
groups=('archpwn' 'archpwn-tunneling')
md5sums=('ef2ca33b9e1ad261741dc892ec86c6e6')
source=(http://nardcore.org/ctunnel/$pkgname-$pkgver.tar.gz)
build() {
cd $srcdir/$pkgname-$pkgver
make || return 1
install -Dm 750 src/$pkgname $pkgdir/usr/bin/$pkgname || return 1
install -Dm 640 $pkgname.1 $pkgdir/usr/share/man/man1/$pkgname.1 || return 1
}
| true
|
b14380b757411de834f8c6f6cdc5d1b4c25ad3ea
|
Shell
|
ChristophHaag/scripts
|
/touchjava
|
UTF-8
| 340
| 3.625
| 4
|
[] |
no_license
|
#!/usr/bin/bash
if [ -z "$1" ];
then
echo "Call as $0 <Classname> [directory]"
exit 1
fi
case ${1:0:1} in
[[:lower:]])
echo "$1 should begin with an uppercase letter."
exit
;;
esac
if [ -z "$2" ];
then
P="$1.java"
else
P="$2/$1.java"
fi
echo 'class' "$1" '{
public static void main(String[] args) {
}
}' > "$P"
| true
|
17202e935aac207c27f78df4bb795f405155ab34
|
Shell
|
jensp/Arch-Linux-on-i586
|
/core/dhcpcd/PKGBUILD
|
UTF-8
| 1,490
| 2.96875
| 3
|
[] |
no_license
|
# Maintainer: Jens Pranaitis <jens@chaox.net>
# Contributor: Ronald van Haren <ronald.archlinux.org>
# Contributor: Tom Killian <tom.archlinux.org>
# Contributor: Judd Vinet <jvinet.zeroflux.org>
pkgname=dhcpcd
pkgver=5.1.1
pkgrel=1
pkgdesc="RFC2131 compliant DHCP client daemon"
url="http://roy.marples.name/dhcpcd/"
arch=('i586' 'i686' 'x86_64')
license=('BSD')
groups=('base')
depends=('glibc' 'sh')
backup=('etc/conf.d/dhcpcd' 'etc/dhcpcd.conf')
options=('emptydirs') # We Need the Empty /var/lib/dhcpcd Directory
source=("http://roy.marples.name/downloads/$pkgname/$pkgname-$pkgver.tar.bz2" \
'dhcpcd.conf.d')
md5sums=('7bb70885ffb1789634a20431ae4d5bbd'
'372d33485556982b64a97f301e17c5dd')
build() {
cd ${srcdir}/${pkgname}-${pkgver}
# configure variables
./configure --libexecdir=/usr/lib/dhcpcd --dbdir=/var/lib/dhcpcd
# Build
make || return 1
make DESTDIR=${pkgdir} install || return 1
# Create Binary Symlink
install -d ${pkgdir}/usr/sbin || return 1
ln -sf /sbin/dhcpcd ${pkgdir}/usr/sbin/dhcpcd || return 1
# Install Configuration File used in /etc/rc.d/network
install -D -m644 ../dhcpcd.conf.d $pkgdir/etc/conf.d/$pkgname || return 1
# Install License
install -d $pkgdir/usr/share/licenses/$pkgname || return 1
awk '{if(FNR<27)print $0}' ${srcdir}/${pkgname}-${pkgver}/config.h \
>> ${pkgdir}/usr/share/licenses/${pkgname}/LICENSE || return 1
# Set Options in /etc/dhcpcd.conf
echo noipv4ll >> ${pkgdir}/etc/dhcpcd.conf || return 1 # Disable ip4vall
}
| true
|
0a2a7141410b88b7b50721304727a252f46d5519
|
Shell
|
liboyangbj/eeprom
|
/write_in.sh
|
UTF-8
| 2,086
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root" 1>&2
exit 1
fi
rm -rf num.txt write_in.eep read_out.eep read_out.txt
MODELA=`cat /proc/device-tree/model`
MODELB='Raspberry Pi'
MODELC='ROCK PI'
echo "Board $MODELA"
FILENAME=num.txt #创建序列号的TXT文件
if [ ! -d $FILENAME ];then
touch $FILENAME
else
echo dir exist
fi
read -p "Please scan the serial number:" serialnum
if test -w $FILENAME && test -n $serialnum
then
echo $serialnum > $FILENAME
echo "Write the successfully"
else
echo "Write the failure"
fi
read -p "Please scan the MAC address:" MACnum
if test -w $FILENAME && test -n $MACnum
then
echo $MACnum >> $FILENAME
echo "Write the successfully"
else
echo "Write the failure"
fi
GENERAT=write_in.eep #生成写入eeprom的.eep 文件
./eepmake eeprom_settings.txt $GENERAT -c $FILENAME
if [ $? = 0 ]
then
echo "Generate $GENERAT file succeeded"
else
echo "Failed to generate $GENERAT file"
exit 1
fi
EMPTY=blank.eep #创建清空eeprom的.eep文件
if [ ! -d $EMPTY ];then
touch $EMPTY
else
echo dir exist
fi
if [[ "$MODELA" == *$MODELB* ]];then
echo "正在写入树莓派"
dd if=/dev/zero ibs=1k count=4 of=$EMPTY
./eepflash.sh -w -f=$EMPTY -t=24c256 -d=0
elif [[ "$MODELA" == *$MODELC* ]]; then
echo "正在写入ROCK"
dd if=/dev/zero ibs=1 count=256 of=blank.eep #目前rock pi4只支持at24c02的驱动,所以只有256byte大小
./eepflash.sh -w -f=$EMPTY -t=24c256 -d=2 -a=50
fi
if [ $? = 0 ];
then
echo "EEPROM cleared successfully"
else
echo "Failed to empty EEPROM"
exit 1
fi
if [[ "$MODELA" == *$MODELB* ]]
then
echo "正在写入树莓派"
./eepflash.sh -w -f=$GENERAT -t=24c256 -d=0
elif [[ "$MODELA" == *$MODELC* ]]
then
echo "正在写入ROCK"
./eepflash.sh -w -f=$GENERAT -t=24c256 -d=2 -a=50
fi
if [ $? = 0 ]
then
echo "Write the EEPROM succeeded"
else
echo "Failed to write the EEPROM"
exit 1
fi
| true
|
c9ba3fb0e5bde251454d5ed5e95643a6afe1f6fc
|
Shell
|
mlaradji/misc
|
/lxc/port_forward.sh
|
UTF-8
| 740
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Port forward 1080 and 10433 to ports 80 and 443 of web-server
Server_ip="192.168.0.10"
web_server_ip="192.168.10.100"
interface="wlp3s0"
sudo iptables-save > /home/mohamed/.iptables.bak
sudo sysctl net.ipv4.ip_forward=1
sudo iptables -t nat -A PREROUTING -i "$interface" -p tcp -d "$Server_ip" --dport 1080 -j DNAT --to-destination "$web_server_ip":80
sudo iptables -t nat -A PREROUTING -i "$interface" -p tcp -d "$Server_ip" --dport 10443 -j DNAT --to-destination "$web_server_ip":443
#sudo iptables -t nat -A PREROUTING -p tcp --dport 1080 -j DNAT --to "$web_server_ip":80
#sudo iptables -t nat -A PREROUTING -p tcp --dport 10443 -j DNAT --to "$web_server_ip":443
sudo iptables -t nat -A POSTROUTING -j MASQUERADE
| true
|
a9438fd05f772d1f151a95b43e4e4d3b75f1bd54
|
Shell
|
RobbtZhang/vue-webhook
|
/vue-front.sh
|
UTF-8
| 445
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
WORK_PATH='/usr/projects/blog-c'
cd $WORK_PATH
echo "先清除老代码"
git reset --hard origin/master
git clean -f
echo "拉取最新代码"
git pull origin master
echo "编译"
yarn build
echo "开始执行构建"
docker build -t blog-c:1.0 .
echo "停止旧容器并删除旧容器"
docker stop blog-c-container
docker rm blog-c-container
echo "启动新容器"
docker container run -p 80:80 --name blog-c-container -d blog-c:1.0
| true
|
0f06af10c1e148a830a00ffda0fc85344c1fdfc0
|
Shell
|
hydazz/linux-scripts
|
/disabler.sh
|
UTF-8
| 3,830
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
# ~~~~~~~~~~~~~~~~~~~~~~~
# set colours
# ~~~~~~~~~~~~~~~~~~~~~~~
red='\033[1;31m' # red
green='\033[1;32m' # Green
bold='\033[1;37m' # white bold
nc='\033[0m' # no colour
# ~~~~~~~~~~~~~~~~~~~~~~~
# get parameters from user
# ~~~~~~~~~~~~~~~~~~~~~~~
helpFunction() {
echo ""
echo -e "${bold}Usage: $0 -s <service> -f <load/unload>"
echo -e "\t-s ervice: Service to load or unload, jamf, sophos, nomad"
echo -e "\t-f unction: load or unload the services${nc}"
exit 1
}
while getopts "s:f:" opt; do
case "${opt}" in
s) service="${OPTARG}" ;;
f) function="${OPTARG}" ;;
?) helpFunction ;;
esac
done
# print helpFunction in case parameters are empty
if [ -z "${service}" ] || [ -z "${function}" ]; then
echo -e "${red}>>> ERROR: ${bold}Some or all of the parameters are empty${nc}"
helpFunction
fi
if [ "${function}" = "load" ]; then
clfunc="Loading"
elif [ "${function}" = "unload" ]; then
clfunc="Unloading"
else
echo -e "${red}>>> ERROR: ${bold}${function} is not a supported function"
echo -e "Supported functions: load, unload${nc}"
fi
# ~~~~~~~~~~~~~~~~~~~~~~~
# define loader function
# ~~~~~~~~~~~~~~~~~~~~~~~
function loader() {
echo -e "${green}>>> ${bold}${clfunc} ${clserv}${nc}"
for i in ${list}; do
launchctl "${function}" -w "${i}" &>/dev/null
done
for i in ${list_sudo}; do
sudo launchctl "${function}" -w "${i}" &>/dev/null
done
}
for i in ${service}; do
# ~~~~~~~~~~~~~~~~~~~~~~~
# validate supplied parameters
# ~~~~~~~~~~~~~~~~~~~~~~~
if ! { [ "${i}" = "sophos" ] || [ "${i}" = "jamf" ] || [ "${i}" = "nomad" ] || [ "${i}" = "familyzone" ]; }; then
if [ -z "${unknown}" ]; then
unknown="${i}"
number="1"
else
unknown="${unknown}, ${i}"
number="((number + 1))"
fi
fi
# ~~~~~~~~~~~~~~~~~~~~~~~
# startup
# ~~~~~~~~~~~~~~~~~~~~~~~
if [ "${i}" = "sophos" ]; then
clserv="Sophos"
list=$(find /Library/LaunchAgents -iname "*sophos*")
list_sudo=$(find /Library/LaunchDaemons -iname "*sophos*")
if [ -n "${list}" ] || [ -n "${list_sudo}" ]; then
loader
if [ "${function}" = "unload" ]; then
echo -e "${green}>>> ${bold}Killing all Sophos processes${nc}"
pgrep "[sS]ophos" | sudo xargs kill
fi
else
echo -e "${red}>>> ERROR: ${bold}${clserv} is not installed${nc}"
fi
fi
if [ "${i}" = "jamf" ]; then
clserv="JAMF"
list=$(find /Library/LaunchAgents -iname "*jamf*")
list_sudo=$(find /Library/LaunchDaemons -iname "*jamf*")
if [ -n "${list}" ] || [ -n "${list_sudo}" ]; then
[[ -f /Library/Application\ Support/JAMF/.jmf_settings.json ]] &&
sudo rm /Library/Application\ Support/JAMF/.jmf_settings.json
loader
else
echo -e "${red}>>> ERROR: ${bold}${clserv} is not installed${nc}"
fi
fi
if [ "${i}" = "nomad" ]; then
clserv="NoMad"
list=$(
find /Library/LaunchAgents -iname "*nomad*"
find /Users/hydea22/Library/LaunchAgents -iname "*nomad*"
)
if [ -n "${list}" ] || [ -n "${list_sudo}" ]; then
loader
else
echo -e "${red}>>> ERROR: ${bold}${clserv} is not installed${nc}"
fi
fi
if [ "${i}" = "familyzone" ]; then
clserv="FamilyZone"
list=$(
find /Library/LaunchAgents -iname "*familyzone*"
find /Library/LaunchAgents -iname "*fz*"
)
list_sudo=$(
find /Library/LaunchDaemons -iname "*familyzone*"
find /Library/LaunchDaemons -iname "*fz*"
)
if [ -n "${list}" ] || [ -n "${list_sudo}" ]; then
loader
else
echo -e "${red}>>> ERROR: ${bold}${clserv} is not installed${nc}"
fi
fi
done
if [ "${number}" = "1" ]; then
echo -e "${red}>>> ERROR: ${bold}${unknown} is not a supported service"
echo -e "Supported services: sophos, jamf, nomad${nc}"
elif [[ "${number}" -gt "1" ]]; then
echo -e "${red}>>> ERROR: ${bold}${unknown} are not supported services"
echo -e "Supported services: sophos, jamf, nomad${nc}"
fi
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.