blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
45645d7e6edb2098d91da0a29ff2cbd0d730b3d5 | Shell | Appdynamics/api-commandline-tool | /helpers/recursiveSource.sh | UTF-8 | 292 | 3.734375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
recursiveSource() {
if [ -d "$*" ]; then
debug "Sourcing plugins from $*"
for file in $*/* ; do
if [ -f "$file" ] && [ "${file##*.}" == "sh" ] ; then
. "$file"
fi
if [ -d "$file" ] ; then
recursiveSource $file
fi
done
fi
}
| true |
35a31826bd2c3931b3815108c77b2a84e97f4efc | Shell | bowmanmc/tall.tail | /test/access.sh | UTF-8 | 252 | 3.25 | 3 | [] | no_license | #!/bin/bash
SRC=/tmp/nasa.txt
OUT=/tmp/access.log
while :
do
cat $SRC | awk 'BEGIN { srand() } rand() >= 0.5 { print; exit }' >> $OUT
#cat $SRC | sort -R | head -n 1 >> $OUT
t=$((RANDOM % 5))
echo "Slepping for $t"
sleep $t
done
| true |
d908e8f66c55ba22f0c1ab738c0cb1b296c14683 | Shell | moorefu/docker-apisix | /centos/entrypoint.sh | UTF-8 | 1,410 | 3.703125 | 4 | [] | no_license | #!/bin/bash
LUAJIT_DIR=/usr/local/openresty/luajit
ADDONS_DIR=/usr/local/addons
ADDONS_SPEC=addons-master-0.rockspec
INIT_DIR=/docker-entrypoint-init.d
# 先运行init.d 再运行 addons.rockspec
run_file(){
echo "found file $1"
case "$1" in
*.sh) echo "[INIT] $0: running $1"; . "$1" ;;
*) echo "[INIT] $0: ignoring $1" ;;
esac
}
run_init(){
echo "Starting init scripts from '${INIT_DIR}':"
for fn in $(ls -1 /docker-entrypoint-init.d/*.sh 2> /dev/null)
do
# execute script if it didn't execute yet or if it was changed
cat $INIT_DIR/.cache 2> /dev/null | grep "$(md5sum $fn)" || run_file $fn
done
# clear cache
if [ -e $INIT_DIR/.cache ]; then
rm $INIT_DIR/.cache
fi
# regenerate cache
ls -1 $INIT_DIR/*.sh 2> /dev/null | xargs md5sum >> $INIT_DIR/.cache
echo "Init finished"
echo
}
run_rocks(){
echo "Starting install rockspec from '${ADDONS_DIR}/${ADDONS_SPEC}':"
cat ${ADDONS_DIR}/.cache 2> /dev/null | grep "$(md5sum ${ADDONS_DIR}/${ADDONS_SPEC})"||luarocks install $LUAROCKS_OPTS --lua-dir=${LUAJIT_DIR} ${ADDONS_DIR}/${ADDONS_SPEC} --tree=${ADDONS_DIR}/deps --only-deps --local
md5sum ${ADDONS_DIR}/${ADDONS_SPEC} > ${ADDONS_DIR}/.cache
echo "Install rockspec finished"
}
run(){
/usr/bin/apisix init && /usr/bin/apisix init_etcd && /usr/local/openresty/bin/openresty -p /usr/local/apisix -g 'daemon off;'
}
run_init
run_rocks
run
| true |
e07525f4f988794044e3715269781b7b8cdcdffe | Shell | jcaamano/kubevirt | /automation/travisci-test.sh | UTF-8 | 1,490 | 3.109375 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-dco-1.1"
] | permissive | #!/bin/bash -e
make generate
if [[ -n "$(git status --porcelain)" ]] ; then
echo "It seems like you need to run 'make generate'. Please run it and commit the changes"
git status --porcelain; false
fi
if diff <(git grep -c '') <(git grep -cI '') | egrep -v -e 'docs/.*\.png|swagger-ui' -e 'vendor/*' -e 'assets/*' | grep '^<'; then
echo "Binary files are present in git repostory."; false
fi
make
if [[ -n "$(git status --porcelain)" ]] ; then
echo "It seems like you need to run 'make'. Please run it and commit the changes"; git status --porcelain; false
fi
make build-verify # verify that we set version on the packages built by bazel
# The make bazel-test might take longer then the current timeout for a command in Travis-CI of 10 min, so adding a keep alive loop while it runs
while sleep 9m; do echo "Long running job - keep alive"; done & LOOP_PID=$!
if [[ $TRAVIS_REPO_SLUG == "kubevirt/kubevirt" && $TRAVIS_CPU_ARCH == "amd64" ]]; then
make goveralls
else
make bazel-test
fi
kill $LOOP_PID
make build-verify # verify that we set version on the packages built by go(goveralls depends on go-build target)
make apidocs
make client-python
make manifests DOCKER_PREFIX="docker.io/kubevirt" DOCKER_TAG=$TRAVIS_TAG # skip getting old CSVs here (no QUAY_REPOSITORY), verification might fail because of stricter rules over time; falls back to latest if not on a tag
make olm-verify
if [[ $TRAVIS_CPU_ARCH == "amd64" ]]; then
make prom-rules-verify
fi
| true |
55d16779851c5272c93afb60c798d49af1a4e341 | Shell | hspark-umn/GoNative4Abed | /CreateFileInfoFile.sh | UTF-8 | 149 | 2.875 | 3 | [] | no_license | cd image
count=0
for IM in `ls -1 $IMAGE_DIR | egrep ".bmp$"`
do
count=$((count+1))
done
cd ..
echo "0 image image / bmp 1 7 $count" > FileInfo.TXT
| true |
1018f194bf16b3750fff774bcfccd57ca3fbfdc4 | Shell | jondong/dotfiles | /bin/set-proxy | UTF-8 | 4,043 | 3.84375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
if [ -z $PLATFORM ]; then
platformName=$(uname)
PLATFORM=${platformName:0:6}
if [ $PLATFORM = 'CYGWIN' ]; then
PLATFORM='Cygwin'
fi
unset platformName
fi
urlencode() {
# urlencode <string>
old_lc_collate=$LC_COLLATE
LC_COLLATE=C
local length="${#1}"
for (( i = 0; i < length; i++ )); do
local c="${1:i:1}"
case $c in
[a-zA-Z0-9.~_-]) printf "$c" ;;
*) printf '%%%02X' "'$c" ;;
esac
done
LC_COLLATE=$old_lc_collate
}
urldecode() {
# urldecode <string>
local url_encoded="${1//+/ }"
printf '%b' "${url_encoded//%/\\x}"
}
update_ssh_config() {
local ssh_config_file="${HOME}/.ssh/config"
if [ -f $ssh_config_file ]; then
sed -i -- 's/ProxyCommand.*/ProxyCommand nc -X5 -x '"$1"' %h %p/g' $ssh_config_file
fi
}
update_proxy_settings () {
read -e -p "Use proxy? [y/N] " -n 1
has_proxy=${REPLY:=n}
if [ ${has_proxy,,} = 'n' ]; then
echo "No need to configure proxy settings."
return 0
fi
read -p "HTTP proxy host: " -r http_host
read -p "HTTP proxy port: " -r http_port
read -p "HTTP proxy username (if any): " -r http_user
if [ $http_user ]; then
read -p "HTTP proxy password (if any): " -r -s http_passwd
echo ""
fi
read -p "Socks proxy host (Press <Enter> if same with HTTP proxy host): " -r
socks_host=${REPLY:=$http_host}
read -p "Socks proxy port: (default 1080)"
socks_port=${REPLY:=1080}
read -p "No proxy domains(separated by comma. Press <Enter> to use default settings): " -r
no=${REPLY:=intel.com,.intel.com,10.0.0.0/8,192.168.0.0/16,localhost,127.0.0.0/8,134.134.0.0/16}
if [ ${http_user} ]; then
http_credential=${http_user}
if [ ${http_passwd} ]; then
encoded_passwd=$(urlencode ${http_passwd})
http_credential="${http_user}:${encoded_passwd}"
fi
fi
if [ ${http_credential} ]; then
http="http://${http_credential}@${http_host}:${http_port}/"
else
http="http://${http_host}:${http_port}/"
fi
socks="socks://${socks_host}:${socks_port}/"
echo "HTTP proxy: ${http:-"undefined."}, Socks proxy: ${socks:-"undefined."}, No proxy: ${no}"
proxy_file="${HOME}/.proxyrc"
echo "Update ${proxy_file}."
echo "#!/usr/bin/env bash" > ${proxy_file}
echo "export http_proxy='${http}'" >> ${proxy_file}
echo -e "export https_proxy='${http}'\nexport ftp_proxy='${http}'" >> ${proxy_file}
echo -e "export npm_config_proxy='${http}'\nexport npm_config_https_proxy='${http}'" >> ${proxy_file}
echo "export socks_proxy='${socks}'" >> ${proxy_file}
echo "export no_proxy='${no}'" >> ${proxy_file}
echo -e "\nGIT_PROXY_COMMAND=${HOME}/bin/git-proxy" >> ${proxy_file}
echo "GIT_PROXY_IGNORE=" >> ${proxy_file}
# Set git-proxy
bin_dir="${HOME}/bin"
mkdir -p ${bin_dir}
git_proxy_file="${bin_dir}/git-proxy"
echo "Regenerate ${git_proxy_file}."
echo "#!/usr/bin/env bash" > ${git_proxy_file}
echo -e "nc -X 5 -x ${socks_host}:${socks_port} \$*" >> ${git_proxy_file}
chmod +x ${git_proxy_file}
update_ssh_config "${socks_host}:${socks_port}"
if [ $PLATFORM = "Linux" ]; then
apt_conf_file="/tmp/apt.conf"
echo "Acquire::http::proxy \"${http}\";" > ${apt_conf_file}
echo "Acquire::https::proxy \"${http}\";" >> ${apt_conf_file}
echo "Acquire::ftp::proxy \"${http}\";" >> ${apt_conf_file}
echo "Acquire::socks::proxy \"${socks}\";" >> ${apt_conf_file}
echo "Replace /etc/apt/apt.conf"
sudo cp ${apt_conf_file} /etc/apt
rm ${apt_conf_file}
echo -e "\nNOTICE: visudo will be start and you need to add the following lines to your /etc/sudoer:"
echo -e '\nDefaults\tenv_keep="http_proxy https_proxy ftp_proxy socks_proxy"\n'
read -e -p 'Press <Enter> to continue...' -n 0
export EDITOR=vi
sudo visudo
fi
}
update_proxy_settings
| true |
42d82febffd2e514b2b8b2c02983c7e081e45a1f | Shell | Jarpos/jarm-c | /build.sh | UTF-8 | 265 | 3.046875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
current_dir=$(pwd)
script_dir=$(dirname $0)
if [ $script_dir = '.' ]; then
script_dir="$current_dir"
fi
cd "${current_dir}"
mkdir -p ./build
touch ./build/.gitkeep
cd "${current_dir}/build"
cmake ../
cmake --build . --config Debug --target all
| true |
0a87b40f99b138c929dd4bfe9a0c9b63b9836032 | Shell | jrubinator/jrubin-cli | /scripts/git/gc | UTF-8 | 208 | 3.171875 | 3 | [] | no_license | #!/bin/bash
# checkout or commit
flag=$1
command="checkout"
if [ $flag ]; then
if [[ $flag =~ ^- && $flag != '-b' ]]; then
command="commit"
fi
else
command="commit"
fi
git $command "$@"
| true |
23cafe21fe0d127f187d3b0465658744c8237886 | Shell | kyma-project/test-infra | /prow/images/cleaner/cleaner.sh | UTF-8 | 781 | 3.671875 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -e
set -o pipefail
if [ -z "$CLOUDSDK_CORE_PROJECT" ]; then
echo "Environment variable CLOUDSDK_CORE_PROJECT is empty"
exit 1
fi
if [ -z "$GOOGLE_APPLICATION_CREDENTIALS" ]; then
echo "Environment variable GOOGLE_APPLICATION_CREDENTIALS is empty"
exit 1
fi
echo "Authenticating to Google Cloud..."
gcloud config set project "${CLOUDSDK_CORE_PROJECT}"
gcloud auth activate-service-account --key-file "${GOOGLE_APPLICATION_CREDENTIALS}"
# Get list of ssh-keys, remove header line and print only first column which is key
out=$(gcloud compute os-login ssh-keys list | sed '1 d' | awk -F "\\t" '{print $1}')
for id in ${out}; do
echo "Removing key ${id} ..."
gcloud compute os-login ssh-keys remove --key "${id}"
done;
echo "DONE" | true |
330327ac5ae7c020e94ff620067ed2244c44b3e8 | Shell | m-natsume/equlipse | /openstack-install/sh/05.add-ctrl-tolxd.sh | UTF-8 | 271 | 2.8125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#!/bin/bash
sed -i "s/^TAG=.*/TAG=$2/g" inventory.sh
TARGETS=$(./inventory.sh --list|jq .sys.hosts[] -r)
MACHINES=$2
APPLIST=$1
for i in $(echo ${MACHINES})
do
echo "=== $i ==="
read
cat ${APPLIST}| xargs -I% -t juju add-unit % --to lxd:${i}
done
| true |
dd766734f313ecd14e722a163650674931f6bb00 | Shell | zhaowenjun333/workspace | /linux/software/crontab/elastic_imp.sh | UTF-8 | 11,783 | 3.359375 | 3 | [] | no_license | #!/bin/sh
#说明
#1、修改说明:连接配置和jdk路径
#2、执行说明:elastic_imp.sh user 10000 user为索引名称,10000为每次循环导入数量
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
bin=${DIR}/../bin
lib=${DIR}/../lib
#jdbc连接参数
jdbc_url="jdbc:postgresql://10.163.15.131:5432/wwxiu"
jdbc_user="postgres"
jdbc_pass="pk2014"
#elas参数
elas_cluster="wawatest"
elas_host="10.173.35.136"
elas_port="9300"
elas_url="http://test.wawachina.cn:9200"
#修改jdk路径
export JAVA_HOME=/opt/wwxiu/elasticsearch/jdk1.8.0_91
export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$JAVA_HOME/bin:$PATH
#日志路径
curdate=`date +%Y-%m-%d`
#curdate="2016-06-15"
logfile="logs/ela-imp-${curdate}.log"
#每次导入数量
limitnum=10000
cur_index_name=""
cur_import_sql=""
#导入sql缓存。这个sql不会在每次循环中替换。
cur_import_sql_cache=""
max_id=-1
#系统异常,100=查询max_id异常(暂时未用)
sys_error=0
#从elasticsearch中按ID倒序查询当前最大ID
function queryCurElasticMaxId(){
#解析最大索引ID,默认为0
elas_cur_max_id=0
#sh rm -rf temp_file
#echo "从elasticsearch中按ID倒序查询max_id,并缓存到临时文件temp_file中">>${logfile}
cur_els_url=${elas_url}"/"${cur_index_name}
echo "查询最大ID的cur_els_url=${cur_els_url}">>${logfile}
curl -o temp_file -XPOST ''${cur_els_url}'/_search?pretty' -d '{"query":{"bool":{"must":[{"range":{"id":{"gt":"1"}}}],"must_not":[],"should":[]}},"from":0,"size":1,"sort":{ "id" : {"order" : "desc"}},"aggs":{}}'
last_line=`echo $?`
if [ $last_line -eq 0 ]
then
echo "查询最新max_id成功">>${logfile}
else
echo "查询最新max_id失败">>${logfile}
sys_error=100
fi
#echo "开始解析临时文件,获取字段名称和值">>${logfile}
#解析临时文件,获取字段名称和值
arr=(`awk '/"id"/{print $1,$NF}' temp_file`)
col_name=${arr[0]}
col_value=${arr[1]}
#echo "检索的字段名称col_name=$col_name">>${logfile}
#echo "检索的字段数值col_value=$col_value">>${logfile}
if [ -z "$col_value" ]
then
echo "当前最大索引ID为空,设为0">>${logfile}
elas_cur_max_id=0
else
elas_cur_max_id=${col_value%%,}
fi
echo "查询最新max_id=${elas_cur_max_id}">>${logfile}
echo $elas_cur_max_id
}
#从DB导入到elasticsearch,接收参数依次是:循环次数、当前最大max_id
function importDataFromDB(){
#本次导入最大max_id,第二个参数
cur_max_id=$2
echo "第$1次导入,limitnum=${limitnum},max_id=${cur_max_id}">>${logfile}
cur_import_sql=$(setCurImportSQL ${cur_max_id} ${limitnum})
echo "第$1次导入,执行导入时cur_import_sql=${cur_import_sql}">>${logfile}
#导入数据
echo "开始导入数据。。。">>${logfile}
echo '{
"type" : "jdbc",
"jdbc" : {
"url" : "'${jdbc_url}'",
"user" : "'${jdbc_user}'",
"password" : "'${jdbc_pass}'",
sql : "'${cur_import_sql}'",
locale:"zh_CN",
elasticsearch : {
"cluster" : "'${elas_cluster}'",
"host" : "'${elas_host}'",
"port" : '${elas_port}'
},
index : "'${cur_index_name}'",
type : "'${cur_index_name}'"
}
}
' | java \
-cp "${lib}/*" \
-Dlog4j.configurationFile=${bin}/log4j2.xml \
org.xbib.tools.Runner \
org.xbib.tools.JDBCImporter
}
#循环导入
function startLoopImport(){
echo " ">>${logfile}
echo "开始循环导入">>${logfile}
#当前导入索引
#cur_index_name=$1
#当前导入sql
#cur_import_sql_a=$2
#cur_import_sql_b=$3
echo "当前导入索引cur_index_name=${cur_index_name}">>${logfile}
echo "当前导入cur_import_sql=${cur_import_sql}">>${logfile}
#从elasticsearch中按ID倒序查询当前最大ID
cur_loop_max_id=$(queryCurElasticMaxId $cur_index_name)
#当前循环次数
cur_loop_num=0;
#上次循环最大max_id
pre_loop_max_id=-1
echo "循环导入前,pre_loop_max_id=${pre_loop_max_id},cur_loop_max_id=${cur_loop_max_id}">>${logfile}
while [ $pre_loop_max_id -lt $((cur_loop_max_id)) ];
do
#累计循环次数
let ++cur_loop_num;
echo " ">>${logfile}
#开始导入,参数依次是:循环次数、当前最大max_id
echo "**********************开始第${cur_loop_num}次导入 `date '+%Y-%m-%d %H:%M:%S'`**********************">>${logfile}
echo "第$cur_loop_num次导入,pre_loop_max_id=${pre_loop_max_id},cur_loop_max_id=${cur_loop_max_id}">>${logfile}
importDataFromDB $cur_loop_num $cur_loop_max_id
#保存本次cur_loop_max_id到
pre_loop_max_id=${cur_loop_max_id}
#从新查询最大max_id
cur_loop_max_id=$(queryCurElasticMaxId)
echo "结束本次导入时,pre_loop_max_id=${pre_loop_max_id},cur_loop_max_id=${cur_loop_max_id}">>${logfile}
echo "**********************结束第${cur_loop_num}次导入 `date '+%Y-%m-%d %H:%M:%S'`**********************">>${logfile}
echo " ">>${logfile}
#sleep 1;
done
return 0;
}
#动态拼接sql
function setCurImportSQLBySplit(){
sql_max_id=" ${cur_table_id}>"$1
sql_limitnum="limit "$2
#echo "sql_max_id=${sql_max_id}">>${logfile}
#echo "sql_limitnum=${sql_limitnum} ">>${logfile}
#echo "cur_import_sql_a=${cur_import_sql_a}">>${logfile}
#echo "cur_import_sql_b=${cur_import_sql_b}">>${logfile}
cur_import_sql=${cur_import_sql_a}${sql_max_id}" and "${cur_import_sql_b}" "${sql_limitnum}
#echo "cur_import_sql=${cur_import_sql}">>${logfile}
}
#动态替换sql
function setCurImportSQL(){
#参数$1为id最大值,参数$2为limitnum
sql_max_id=$1
sql_limitnum=$2
#echo "setCurImportSQL时sql_max_id=${sql_max_id}">>${logfile}
#注意,cur_import_sql会在每次循环中动态替换,必须用cur_import_sql_cache
cur_import_sql=${cur_import_sql_cache//param_max_id/${sql_max_id}}
cur_import_sql=${cur_import_sql//param_limitnum/${sql_limitnum}}
#echo "setCurImportSQL替换后cur_import_sql=${cur_import_sql}">>${logfile}
echo ${cur_import_sql}
}
function setLimitNum(){
#echo " ">>${logfile}
if [ -z $1 ]
then
echo "第一个参数每次循环最大导入数量参数为空,取默认值为${limitnum}。(设置示例**.sh 100)">>${logfile}
else
limitnum=$1
echo "设置每次循环最大导入数量为limitnum=${limitnum}">>${logfile}
fi
}
echo " ">>${logfile}
echo "=======================================开始本次导入索引${index_name} `date '+%Y-%m-%d %H:%M:%S'`=======================================">>${logfile}
#每次导入数量设置,$2为第二个参数
setLimitNum $2
#sql_appad为测试索引。供参考用
#param_max_id、param_limitnum为占位符,在执行中会替换
sql_appad="select id as _id,id,picture,url from fo_app_ad faa where id>param_max_id and deleted = false order by id asc limit param_limitnum"
sql_user="select id as _id,id, picture, display_name from fo_user fu where id>param_max_id and deleted = false order by id asc limit param_limitnum"
sql_child="select id as _id,id, picture, display_name from fo_child fu where id>param_max_id and deleted = false order by id asc limit param_limitnum"
sql_topic="select ftp.id as _id,ftp.id, ppic.picture, ftp.title, ftp.content, ftp.create_time from fo_topic_post ftp left join "
sql_topic=${sql_topic}"(select post_id,picture from fo_topic_post_picture where id in (select min(id) from fo_topic_post_picture where deleted = false group by post_id)) "
sql_topic=${sql_topic}"as ppic on ftp.id = ppic.post_id where ftp.id>param_max_id and ftp.deleted = false order by ftp.id asc limit param_limitnum"
sql_childrecord="select c.id as _id, c.id, d.picture, c.content, c.create_time from ( "
sql_childrecord=${sql_childrecord}"select id,content,create_time from fo_child_record where deleted = false and purview=1 and id>param_max_id order by id limit param_limitnum"
sql_childrecord=${sql_childrecord}") as c left join ( "
sql_childrecord=${sql_childrecord}"select id,child_record_id,picture from fo_child_record_picture where id in( "
sql_childrecord=${sql_childrecord}"select a.id from ( "
sql_childrecord=${sql_childrecord}"select min(id) as id ,t.child_record_id from fo_child_record_picture t where t.child_record_id in( "
sql_childrecord=${sql_childrecord}"select id from fo_child_record where deleted = false and purview=1 and id>param_max_id order by id limit param_limitnum "
sql_childrecord=${sql_childrecord}") group by t.child_record_id "
sql_childrecord=${sql_childrecord}") as a "
sql_childrecord=${sql_childrecord}") "
sql_childrecord=${sql_childrecord}") as d on c.id = d.child_record_id order by c.id "
case $1 in
appad)
echo "获取索引参数值为 appad ">>${logfile}
cur_index_name="appad"
cur_import_sql=${sql_appad}
cur_import_sql_cache=${sql_appad}
echo "配置文件中import_sql=${cur_import_sql} ">>${logfile}
startLoopImport
;;
child)
echo "获取索引参数值为 child ">>${logfile}
cur_index_name="child"
cur_import_sql=${sql_child}
cur_import_sql_cache=${sql_child}
echo "配置文件中import_sql=${cur_import_sql} ">>${logfile}
startLoopImport
;;
user)
echo "获取索引参数值为 user ">>${logfile}
cur_index_name="user"
cur_import_sql=${sql_user}
cur_import_sql_cache=${sql_user}
echo "配置文件中import_sql=${cur_import_sql} ">>${logfile}
startLoopImport
;;
childrecord)
echo "获取索引参数值为 childrecord ">>${logfile}
cur_index_name="childrecord"
cur_import_sql=${sql_childrecord}
cur_import_sql_cache=${sql_childrecord}
echo "配置文件中import_sql=${cur_import_sql} ">>${logfile}
startLoopImport
;;
topic)
echo "获取索引参数值为 topic ">>${logfile}
cur_index_name="topic"
cur_import_sql=${sql_topic}
cur_import_sql_cache=${sql_topic}
echo "配置文件中import_sql=${cur_import_sql} ">>${logfile}
startLoopImport
;;
all)
echo " index_name=all 会执行import.sh本次,依次会导入child、user、topic、childrecord">>${logfile}
sh elastic_imp.sh child $2
sh elastic_imp.sh user $2
sh elastic_imp.sh topic $2
sh elastic_imp.sh childrecord $2
;;
*)
echo "请输入导入参数"
echo "示例:./import.sh child 10000。 child为索引名称,10000为每次导入数量。"
echo "索引名称可为child、user、topic、childrecord、all。all代表导入所有"
esac
echo "=======================================结束本次导入索引${index_name} `date '+%Y-%m-%d %H:%M:%S'`=======================================">>${logfile}
| true |
fb13c0a73443126b23e562b764e5922c8c758221 | Shell | rafaferri44/ScriptsShell-Python | /gitclone.sh | UTF-8 | 803 | 3.8125 | 4 | [] | no_license | #!/bin/bash
function carpeta {
declare -a archivo=(`ssh gestiweb@('IP') ls -l /git/web/facturascripts/ | grep ^d | rev | cut -d' ' -f1 | rev`)
list=`echo ${#archivo[@]}`
count=0;
while [ $count != $list ];do
echo -n $count-
echo ${archivo[$count]} | cut -d"/" -f 6
let count=$count+1
done
organizar
}
function organizar {
read -p "Elije el plugin a clonar -> " option
pluginName=`echo ${archivo[$option]}`
echo "Plugin selecionado -> $pluginName"
git clone gestiweb@('IP'):/git/web/facturascripts/$pluginName
mv $pluginName `cat $pluginName/facturascripts.ini | grep ^name | cut -d"=" -f2 | tr "'" " "`
}
read -p "Se recojeran los datos de sus plugins,desea continuar S/n? " a
if [ $a = "S" -o $a = "s" ];then
clear
echo "Aqui estan todos los plugins ->"
carpeta
else
exit
fi
| true |
0e78a235d23d89e318f9787a0f39695480ab5416 | Shell | fishd72/Dotfiles | /.zshrc | UTF-8 | 3,646 | 3.375 | 3 | [] | no_license | #
# .zshrc
#
# @author Jeff Geerling
#
# Colors.
unset LSCOLORS
export CLICOLOR=1
export CLICOLOR_FORCE=1
# Don't require escaping globbing characters in zsh.
unsetopt nomatch
# Enable plugins.
plugins=(git brew history history-substring-search)
# Custom $PATH with extra locations.
export PATH="/Users/fishd/.pyenv/bin:$PATH:/Users/fishd/Library/TinyTeX/bin/universal-darwin"
# Bash-style time output.
export TIMEFMT=$'\nreal\t%*E\nuser\t%*U\nsys\t%*S'
# Include alias file (if present) containing aliases for ssh, etc.
if [ -f ~/.aliases ]
then
source ~/.aliases
fi
# stuff.
if [ -f ~/.zsh/agkozak-zsh-prompt/agkozak-zsh-prompt.plugin.zsh ]
then
AGKOZAK_BLANK_LINES=1
AGKOZAK_CUSTOM_SYMBOLS=( '⇣⇡' '⇣' '⇡' '+' 'x' '!' '>' '?' )
AGKOZAK_PROMPT_DIRTRIM=2
AGKOZAK_USER_HOST_DISPLAY=1
AGKOZAK_MULTILINE=0
AGKOZAK_COLORS_USER_HOST=cyan
AGKOZAK_COLORS_PATH=green
source ~/.zsh/agkozak-zsh-prompt/agkozak-zsh-prompt.plugin.zsh
else
# Nicer prompt.
export PS1=$'\n'"%F{cyan} %*%F %F{green}%2~ %F{white}$ "
fi
# Allow history search via up/down keys.
if [ -f ~/.zsh/zsh-history-substring-search/zsh-history-substring-search.zsh ]
then
source ~/.zsh/zsh-history-substring-search/zsh-history-substring-search.zsh
bindkey "$terminfo[kcuu1]" history-substring-search-up
bindkey "$terminfo[kcud1]" history-substring-search-down
fi
if [ -f ~/.zsh/zsh-autosuggestions/zsh-autosuggestions.zsh ]
then
source ~/.zsh/zsh-autosuggestions/zsh-autosuggestions.zsh
fi
# history
HISTFILE=~/.zsh_history
HISTSIZE=10000
SAVEHIST=10000
setopt EXTENDED_HISTORY # write the history file in the ":start:elapsed;command" format.
setopt HIST_REDUCE_BLANKS # remove superfluous blanks before recording entry.
setopt SHARE_HISTORY # share history between all sessions.
setopt HIST_IGNORE_ALL_DUPS # delete old recorded entry if new entry is a duplicate.
# Git upstream branch syncer.
# Usage: gsync master (checks out master, pull upstream, push origin).
function gsync() {
if [[ ! "$1" ]] ; then
echo "You must supply a branch."
return 0
fi
BRANCHES=$(git branch --list $1)
if [ ! "$BRANCHES" ] ; then
echo "Branch $1 does not exist."
return 0
fi
git checkout "$1" && \
git pull upstream "$1" && \
git push origin "$1"
}
# Python fix for Ansible
export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
# Grab Ansible Vault password file
export ANSIBLE_VAULT_PASSWORD_FILE=/etc/ansible/.vaultpassword
# Delete a given line number in the known_hosts file.
knownrm() {
re='^[0-9]+$'
if ! [[ $1 =~ $re ]] ; then
echo "error: line number missing" >&2;
else
sed -i '' "$1d" ~/.ssh/known_hosts
fi
}
if which pyenv >/dev/null; then
eval "$(pyenv init -)"
fi
if which pyenv-virtualenv-init >/dev/null; then
eval "$(pyenv virtualenv-init -)"
fi
# Function for pandoc
alias md2word=md2word
function md2word () {
PANDOC_INSTALLED=$(pandoc --version >> /dev/null; echo $?)
if [ "0" == ${PANDOC_INSTALLED} ]; then
pandoc -o $2 -f markdown -t docx $1
else
echo "Pandoc is not installed. Unable to convert document."
fi
}
alias md2pdf=md2pdf
function md2pdf () {
PANDOC_INSTALLED=$(pandoc --version >> /dev/null; echo $?)
if [ "0" == ${PANDOC_INSTALLED} ]; then
pandoc -o $2 -f markdown -t pdf $1
else
echo "Pandoc is not installed. Unable to convert document."
fi
}
# Include local config if present
if [ -f ~/.zshrc_local ]; then
source ~/.zshrc_local
fi
if [ -f ~/.zsh/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh ]
then
source ~/.zsh/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh
fi | true |
c48f9d364ca8f9d64765fdc6a944b67a178ffa91 | Shell | jumpinjackie/mapguide-fdo-docker-build | /mg_checkout.sh | UTF-8 | 210 | 2.5625 | 3 | [] | no_license | #!/bin/bash
svn cleanup mapguide
svn update mapguide
while [ $? -ne 0 ]
do
echo "SVN update interrupted. Retrying after 5s"
sleep 5s
svn cleanup mapguide
svn update mapguide
done
echo "SVN update complete"
| true |
d804ca701fd09d0c5f63b769da0c38ea7b9a47c8 | Shell | slalomsv/simple-app | /ci/deploy.sh | UTF-8 | 707 | 3.671875 | 4 | [] | no_license | #!/bin/bash
source ./ci/helpers.sh
PACKAGE_NAME="simple-app.tar.bz2"
PUBLIC_IP=$(getval ci_vars PUBLIC_IP)
echo ">>> Building deploy package $PACKAGE_NAME"
tar -cjf "../$PACKAGE_NAME" * --exclude=id_rsa
if [ $? != 0 ]; then
echo "Build package failed. Exiting"
exit 1
fi
echo ">>> Transfering package to EC2 instance"
scp "../$PACKAGE_NAME" ubuntu@$PUBLIC_IP:~
if [ $? != 0 ]; then
echo "Transfer failed. Exiting"
exit 1
fi
ssh ubuntu@$PUBLIC_IP "ls -lart | grep $PACKAGE_NAME"
echo ">>> Remove and recreate simple-app directory"
ssh ubuntu@$PUBLIC_IP 'rm -rf simple-app && mkdir simple-app'
echo ">>> Extract package contents"
ssh ubuntu@$PUBLIC_IP "tar -xjf $PACKAGE_NAME -C simple-app"
| true |
4209aa99f19384bf83fb8671ae47676f01b8bfb8 | Shell | Alsan/turing-chunk07 | /build/stratos2/openstack/client/allocate_floating_ips.sh | UTF-8 | 154 | 2.703125 | 3 | [] | no_license | #!/bin/bash
source ./demorc
num_ips=$1
if [ -z $num_ips ]; then
num_ips=1
fi
for I in `seq 1 $num_ips`; do nova floating-ip-create; sleep 10; done
| true |
7867a82284fe60e720a3673df3aa1393f80f8d8b | Shell | ZwodahS/dotfiles | /bin/decrypt.sh | UTF-8 | 349 | 3.640625 | 4 | [] | no_license | #!/usr/bin/env bash
if [ "$1" == '' ]; then
echo 'decrypt <file_in> <algorithm[des3|aes] $others'
exit 1
fi
FILE=$1
ALGO=aes
if [ "$2" == 'aes' -o "$2" == 'des' ]; then
ALGO=$2
fi
shift
shift
if [ "$ALGO" == 'aes' ]; then
ALGO='aes-256-cbc'
elif [ "$ALGO" == 'des' ]; then
ALGO='des3'
fi
openssl $ALGO -a -d -salt -in $FILE $*
| true |
2df6a51c820d64848d3e5e9557ba089ce6befac7 | Shell | Legun/Nautilus-Scripts | /Scripts/Archiving/Ubuntu-One-Encrypt-Decrypt.sh | UTF-8 | 6,004 | 4.03125 | 4 | [] | no_license | #!/bin/bash
# Ubuntu One Encryption for Files and Directories.
# Script by Michael B Stevens, Oct. 2011, Version 1.
# Replace named numbers with digits,stevensfourthousand AT earthlink.net
# Please send suggested improvements.
# Suggested SCRIPTNAME: "U1_encrypt_decrypt_v1.sh"
# Loosely based on a script by Adam Buchanan.
#
# QUICK START:
# This is a gnome-nautilus-unity specific script --
# it's location is important:
# 1) Copy script to your home's "~/.gnome2/nautilus-scripts" directory.
# 2) Make script executable there:
# preferences/permission-tab/ check executable box
# or, in a terminal, chmod +x SCRIPTNAME.
# HOW TO USE:
# 1) Highlight files or directories you want to encrypt or decrypt.
# 2) Right click and choose "Scripts".
# 3) Chose this SCRIPTNAME from the drop-down list.
# (If this script is missing, you may have to display
# the scripts directory in a nautilus window at least once first.)
# 4) Choose whether to encrypt or decrypt.
# BAH -- HUMBUG!
# Too many password prompts?
# Gpg's use-agent is probably getting prompts that it doesn't need.
# Do this:
# Open ~/.gnupg/gpg.conf in an editor;
# Comment out the "use-agent" line (prefix it with "#");
# Kill off the gpg-agent service ("sudo killall -9 gpg-agent").
# Enjoy your more prompt-free environment.
# WHAT HAPPENS when you run this script?
# Encrypted stuff from anywhere on your computer
# is copied into the Ubuntu one direcory.
# Decrypted files from anywhere on your computer
# will be written into the ~/U1decrypt directory,
# which is created if necessary.
# The Ubuntu One directory will not be disturbed when you decrypt
# files and directories from it.
# All your local cloud info will migrate to these two directories,
# and the cloud never sees your decrypted information.
# The program assumes you have normal Linux facilities -- gpg, tar,
# sed, zenity dialogs, and Bash 3.2 or later and an Ubuntu One
# cloud subscription.
# Gpg public keys are not used; encryption is simply symmetrical --
# just a password is used.
# Be sure to save or remember the password, because there
# is _no_ other way back into your information.
#-----------------------------------------------------------------------
# SCRIPT:
#-----------------------------------------------------------------------
IFS=$'\t\n'
# Internal field separator, usually space-tab-newline; ' \t\n'.
# "Ubuntu One," has a space, for instance, that could cause problems.
# Find current user and Ubuntu One directory.
this_user=$(whoami)
ubuntu1="/home/$this_user/Ubuntu One"
U1decrypt="/home/$this_user/U1decrypt"
# Assure required folders are there.
if [ ! -d $ubuntu1 ]; then
zenity --warning --text="Ubuntu One directory missing.\nExiting."
exit 0
fi
if [ ! -d $U1decrypt ]; then
mkdir $U1decrypt
fi
# Set direction: To encrypt or to decrypt.
direction=$(zenity \
--list \
--radiolist \
--title="Encrypt or Decrypt:" \
--column " - " \
--column "direction" \
'FALSE' "encrypt" \
'FALSE' "decrypt")
pass=$(zenity --entry --hide-text \
--text="Enter password" --title="Password:")
# encrypt / decrypt
if [ $direction = "encrypt" ]; then
for this_path in $(echo "$NAUTILUS_SCRIPT_SELECTED_FILE_PATHS"); do
input_file=$(echo $this_path | sed 's/.*\///g')
if [ -d "$this_path" ]; then # path is directory
output_path="${ubuntu1}/${input_file}.stgz"
tar czf - ${input_file} |\
gpg --passphrase ${pass} -c -o ${output_path}
else # not directory
output_path="${ubuntu1}/${input_file}.gpg"
gpg --passphrase=${pass} -o ${output_path} -c ${input_file}
fi
done
else # decrypt
for this_path in $(echo "$NAUTILUS_SCRIPT_SELECTED_FILE_PATHS"); do
input_file=$(echo $this_path | sed 's/.*\///g')
if [[ $input_file =~ \.stgz ]]; then # encrypted dir
gpg --passphrase ${pass} --decrypt ${input_file} |\
tar xzf - --directory=${U1decrypt}
else # file
output_file=${input_file%%.gpg}
output_path="${U1decrypt}/${output_file}"
gpg --passphrase=${pass} -o ${output_path} ${input_file}
fi
done
fi
zenity --info --text="done."
exit 0
#-----------------------------------------------------------------------
# END
# NEED TO MODIFY SCRIPT?
# I release this under Gnu Public License, so you may modify it
# as needed. The material below may help:
# The magic terseness of pipes may not be for everyone.
# Watch out for every character in the piped
# gpg/tar commands, they're touchy and are
# tricky to get just right. You could instead go with
# something like the following that uses a temporary file
# if you intend to modify this script
# for some other purpose:
# Alternate directory encrypt
#tar czf "${input_file}.tar" $input_file
#gpg -c -o $output_path "${input_file}.tar"
# Alternate directory-tar-file decrypt
#midf="${U1decrypt}/${input_file}.temp"
#gpg -o ${midf} ${input_file}
#tar xzf ${midf} -C ${U1decrypt}
#rm $midf
# I like things quick and simple -- but
# some people prefer a password confirmation dialog.
# One could modify this script's password prompt
# with something like:
#
# Confirm entry and require confirmed password.
# while [[ -z $match ]]
# do # Loop for password.
# pass=$(zenity --entry --hide-text \
# --text="Enter password" --title="Password:")
# pass_conf=$(zenity --entry --hide-text \
# --text="Confirm password" --title="Confirm:")
# if [[ -z $pass ]] # No password.
# then
# zenity --warning \
# --text="Sorry, password required.\n\nExiting."
# exit 0
# elif [[ "$pass" = "$pass_conf" ]]
# then
# match='true'
# continue
# else
# zenity --warning \
# --text="Passwords did not match.\nClick OK to retry."
# fi
# done
| true |
1e54ecc7e463db1b3e04ee90ac40b7956685741e | Shell | giordanorn/dotfiles | /.zshrc | UTF-8 | 633 | 2.5625 | 3 | [] | no_license | #!/bin/zsh
autoload -Uz compinit promptinit
compinit
promptinit
# This will set the default prompt to the walters theme
prompt suse
# Enables auto completion
zstyle ':completion:*' menu select
# Auto copmlete custom aliases
setopt COMPLETE_ALIASES
# Auto complete sudo commands
zstyle ':completion::complete:*' gain-privileges 1
# bind emacs-like shorcuts
bindkey -e
autoload -Uz vcs_info
precmd_vcs_info() { vcs_info }
precmd_functions+=( precmd_vcs_info )
setopt prompt_subst
RPROMPT=\$vcs_info_msg_0_
# PROMPT=\$vcs_info_msg_0_'%# '
zstyle ':vcs_info:git:*' formats '%b'
[ -f "$HOME/.aliases" ] && source "$HOME/.aliases"
| true |
48f28773fc34bd6192f38dbb68d0c8681fc99576 | Shell | netarch/DRing | /src/emp/datacentre/runTestsTree_par.sh | UTF-8 | 2,132 | 3.59375 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
##Use it to run only with one traffic matrix
TOPO=$1
NPORTS=$2
NSVRS=$3
SUBFLOWS=$4
PATHS=$5
OVERSUBSCRIPTION=$6
TMatrix=$7
MAKE=$8
PARAM=$9
PARAMO=${10}
SEED=${11}
SUFFIX=${12}
NINST=3
SUFFIX2=`echo ${PARAM}| sed -e 's/\//$/g'`
tempResultFile=tempresult_$1_$2_$3_$4_$5_$6_$7_${SUFFIX2}_$9_${10}_${11}_${12}
rm -rf $tempResultFile
runsim() {
INST=$1
inputfile=garbage
make="NOMAKE"
if [[ $1 == "1" && $MAKE == "MAKE" ]]; then
make="MAKE"
fi
if [[ $TMatrix == "FILE" ]]; then
PARAM="${PARAM}_${INST}"
fi
echo "${INST}) make: $make"
resultfile=temptreettpp_${SUFFIX}_${INST}
rm -rf $resultfile
./run.sh ${TOPO} ${SUBFLOWS} ${NPORTS} ${PATHS} $inputfile ${NSVRS} 1 ${OVERSUBSCRIPTION} $make $TMatrix $PARAM ${PARAMO} ${SEED} ${SUFFIX} >> $resultfile
tput=`cat $resultfile | grep "avg_throughput" | awk '{print $2}'`
tput_10ile=`cat $resultfile | grep "10ile_throughput" | awk '{print $2}'`
link_usage=`cat $resultfile | grep "avg_link_usage" | awk '{print $2}'`
rm -rf $resultfile
echo "${INST}) $tput $tput_10ile"
echo "$tput $link_usage $tput_10ile" >> $tempResultFile
count=`cat $tempResultFile | wc -l`
echo "count: $count"
if [[ $count == "${NINST}" ]]; then
avg_tput=`cat $tempResultFile | awk '{if(NF>0){sum+=$1; cnt++;}} END{print sum/cnt}'`
avg_link_usage=`cat $tempResultFile | awk '{if(NF>0){sum+=$2; cnt++;}} END{print sum/cnt}'`
avg_tput_10ile=`cat $tempResultFile | awk '{if(NF>0){sum+=$3; cnt++;}} END{print sum/cnt}'`
rm -rf $tempResultFile
echo "Avg throughput ( $SUFFIX ): $avg_tput"
avg_tput2=`python -c "print ${avg_tput} * ${OVERSUBSCRIPTION}"`
echo "Avg throughput 2 ( $SUFFIX ): $avg_tput2"
echo "10ile throughput ( $SUFFIX ): $avg_tput_10ile"
echo "Avg Link Usage ( $SUFFIX ): $avg_link_usage"
#echo "Avg throughput: $avg_tput" >> $tempResultFile
fi
}
for INST in `seq 1 ${NINST}`;
do
runsim $INST &
pids[${INST}]=$!
if [[ $INST == "1" && $MAKE == "MAKE" ]]; then
sleep 30 #wait for makefile to finish, hacky but whatever
fi
sleep 3
done
for pid in ${pids[@]};
do
wait $pid;
done
| true |
3f7882b78d128f45c5d36de2d488fd56b8f4c2a3 | Shell | gpaOliveira/NewPythonApp | /bootstrap.sh | UTF-8 | 852 | 3.140625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# upgrade all
sudo add-apt-repository ppa:jonathonf/python-3.6
sudo apt-get update
sudo apt-get -y upgrade
# install basic python dependencies - https://www.digitalocean.com/community/tutorials/how-to-install-python-3-and-set-up-a-local-programming-environment-on-ubuntu-16-04
sudo apt-get install build-essential libssl-dev libffi-dev python-dev --yes
# install python - https://snakeycode.wordpress.com/2017/11/18/working-in-python-3-6-in-ubuntu-14-04/
sudo apt-get install python3.6 --yes
rm -f /home/vagrant/.bash_profile
# avoid creating .pyc files
echo 'export PYTHONDONTWRITEBYTECODE=1' >> /home/vagrant/.bash_profile
# alias to make life easier
echo 'alias python=python3.6' >> /home/vagrant/.bash_profile
# load pip
wget https://bootstrap.pypa.io/get-pip.py
sudo python3.6 get-pip.py
sudo python3.6 setup.py install
| true |
256ffe62afb321f792fdbccc436081c03eddebd0 | Shell | fooldoc/lanmp | /function/mysql.sh | UTF-8 | 11,620 | 3.15625 | 3 | [] | no_license | #!/bin/sh
Install_Boost()
{
if check_sys packageManager apt;then
#apt-get update
apt-get install -y python-dev
elif check_sys packageManager yum; then
yum -y install python-devel
fi
tar -jxvf $package_dir/$boost_pag -C $install_dir/
}
MySQL_Opt()
{
if [[ ${MemTotal} -gt 1024 && ${MemTotal} -lt 2048 ]]; then
sed -i "s#^key_buffer_size.*#key_buffer_size = 32M#" /etc/my.cnf
sed -i "s#^table_open_cache.*#table_open_cache = 128#" /etc/my.cnf
sed -i "s#^sort_buffer_size.*#sort_buffer_size = 768K#" /etc/my.cnf
sed -i "s#^read_buffer_size.*#read_buffer_size = 768K#" /etc/my.cnf
sed -i "s#^myisam_sort_buffer_size.*#myisam_sort_buffer_size = 8M#" /etc/my.cnf
sed -i "s#^thread_cache_size.*#thread_cache_size = 16#" /etc/my.cnf
sed -i "s#^query_cache_size.*#query_cache_size = 16M#" /etc/my.cnf
sed -i "s#^tmp_table_size.*#tmp_table_size = 32M#" /etc/my.cnf
sed -i "s#^innodb_buffer_pool_size.*#innodb_buffer_pool_size = 128M#" /etc/my.cnf
sed -i "s#^innodb_log_file_size.*#innodb_log_file_size = 32M#" /etc/my.cnf
sed -i "s#^performance_schema_max_table_instances.*#performance_schema_max_table_instances = 1000#" /etc/my.cnf
elif [[ ${MemTotal} -ge 2048 && ${MemTotal} -lt 4096 ]]; then
sed -i "s#^key_buffer_size.*#key_buffer_size = 64M#" /etc/my.cnf
sed -i "s#^table_open_cache.*#table_open_cache = 256#" /etc/my.cnf
sed -i "s#^sort_buffer_size.*#sort_buffer_size = 1M#" /etc/my.cnf
sed -i "s#^read_buffer_size.*#read_buffer_size = 1M#" /etc/my.cnf
sed -i "s#^myisam_sort_buffer_size.*#myisam_sort_buffer_size = 16M#" /etc/my.cnf
sed -i "s#^thread_cache_size.*#thread_cache_size = 32#" /etc/my.cnf
sed -i "s#^query_cache_size.*#query_cache_size = 32M#" /etc/my.cnf
sed -i "s#^tmp_table_size.*#tmp_table_size = 64M#" /etc/my.cnf
sed -i "s#^innodb_buffer_pool_size.*#innodb_buffer_pool_size = 256M#" /etc/my.cnf
sed -i "s#^innodb_log_file_size.*#innodb_log_file_size = 64M#" /etc/my.cnf
sed -i "s#^performance_schema_max_table_instances.*#performance_schema_max_table_instances = 2000#" /etc/my.cnf
elif [[ ${MemTotal} -ge 4096 && ${MemTotal} -lt 8192 ]]; then
sed -i "s#^key_buffer_size.*#key_buffer_size = 128M#" /etc/my.cnf
sed -i "s#^table_open_cache.*#table_open_cache = 512#" /etc/my.cnf
sed -i "s#^sort_buffer_size.*#sort_buffer_size = 2M#" /etc/my.cnf
sed -i "s#^read_buffer_size.*#read_buffer_size = 2M#" /etc/my.cnf
sed -i "s#^myisam_sort_buffer_size.*#myisam_sort_buffer_size = 32M#" /etc/my.cnf
sed -i "s#^thread_cache_size.*#thread_cache_size = 64#" /etc/my.cnf
sed -i "s#^query_cache_size.*#query_cache_size = 64M#" /etc/my.cnf
sed -i "s#^tmp_table_size.*#tmp_table_size = 64M#" /etc/my.cnf
sed -i "s#^innodb_buffer_pool_size.*#innodb_buffer_pool_size = 512M#" /etc/my.cnf
sed -i "s#^innodb_log_file_size.*#innodb_log_file_size = 128M#" /etc/my.cnf
sed -i "s#^performance_schema_max_table_instances.*#performance_schema_max_table_instances = 4000#" /etc/my.cnf
elif [[ ${MemTotal} -ge 8192 && ${MemTotal} -lt 16384 ]]; then
sed -i "s#^key_buffer_size.*#key_buffer_size = 256M#" /etc/my.cnf
sed -i "s#^table_open_cache.*#table_open_cache = 1024#" /etc/my.cnf
sed -i "s#^sort_buffer_size.*#sort_buffer_size = 4M#" /etc/my.cnf
sed -i "s#^read_buffer_size.*#read_buffer_size = 4M#" /etc/my.cnf
sed -i "s#^myisam_sort_buffer_size.*#myisam_sort_buffer_size = 64M#" /etc/my.cnf
sed -i "s#^thread_cache_size.*#thread_cache_size = 128#" /etc/my.cnf
sed -i "s#^query_cache_size.*#query_cache_size = 128M#" /etc/my.cnf
sed -i "s#^tmp_table_size.*#tmp_table_size = 128M#" /etc/my.cnf
sed -i "s#^innodb_buffer_pool_size.*#innodb_buffer_pool_size = 1024M#" /etc/my.cnf
sed -i "s#^innodb_log_file_size.*#innodb_log_file_size = 256M#" /etc/my.cnf
sed -i "s#^performance_schema_max_table_instances.*#performance_schema_max_table_instances = 6000#" /etc/my.cnf
elif [[ ${MemTotal} -ge 16384 && ${MemTotal} -lt 32768 ]]; then
sed -i "s#^key_buffer_size.*#key_buffer_size = 512M#" /etc/my.cnf
sed -i "s#^table_open_cache.*#table_open_cache = 2048#" /etc/my.cnf
sed -i "s#^sort_buffer_size.*#sort_buffer_size = 8M#" /etc/my.cnf
sed -i "s#^read_buffer_size.*#read_buffer_size = 8M#" /etc/my.cnf
sed -i "s#^myisam_sort_buffer_size.*#myisam_sort_buffer_size = 128M#" /etc/my.cnf
sed -i "s#^thread_cache_size.*#thread_cache_size = 256#" /etc/my.cnf
sed -i "s#^query_cache_size.*#query_cache_size = 256M#" /etc/my.cnf
sed -i "s#^tmp_table_size.*#tmp_table_size = 256M#" /etc/my.cnf
sed -i "s#^innodb_buffer_pool_size.*#innodb_buffer_pool_size = 2048M#" /etc/my.cnf
sed -i "s#^innodb_log_file_size.*#innodb_log_file_size = 512M#" /etc/my.cnf
sed -i "s#^performance_schema_max_table_instances.*#performance_schema_max_table_instances = 8000#" /etc/my.cnf
elif [[ ${MemTotal} -ge 32768 ]]; then
sed -i "s#^key_buffer_size.*#key_buffer_size = 1024M#" /etc/my.cnf
sed -i "s#^table_open_cache.*#table_open_cache = 4096#" /etc/my.cnf
sed -i "s#^sort_buffer_size.*#sort_buffer_size = 16M#" /etc/my.cnf
sed -i "s#^read_buffer_size.*#read_buffer_size = 16M#" /etc/my.cnf
sed -i "s#^myisam_sort_buffer_size.*#myisam_sort_buffer_size = 256M#" /etc/my.cnf
sed -i "s#^thread_cache_size.*#thread_cache_size = 512#" /etc/my.cnf
sed -i "s#^query_cache_size.*#query_cache_size = 512M#" /etc/my.cnf
sed -i "s#^tmp_table_size.*#tmp_table_size = 512M#" /etc/my.cnf
sed -i "s#^innodb_buffer_pool_size.*#innodb_buffer_pool_size = 4096M#" /etc/my.cnf
sed -i "s#^innodb_log_file_size.*#innodb_log_file_size = 1024M#" /etc/my.cnf
sed -i "s#^performance_schema_max_table_instances.*#performance_schema_max_table_instances = 10000#" /etc/my.cnf
fi
}
mysql_init_base(){
#------初始化---------
boot_stop "mysql"
add_group mysql
useradd -g mysql mysql -s /bin/false
mkdir -p $mysql_run_path
mkdir -p $mysql_run_path/data
chown -R mysql:mysql $mysql_run_path/data
rm -rf $cmake_run_path
rm -rf $mysql_run_path
rm -rf /etc/my.cnf
mkdir -p /$web_log_path/mysql
touch /$web_log_path/mysql/mysql_error.log
chown -R mysql:mysql /$web_log_path/mysql
}
mysql_install(){
mysql_init_base
#安装cmake
tar zxvf $package_dir/$cmake_pag -C $install_dir/
cd $cmake_path
error_detect "./configure --prefix=${cmake_run_path}"
error_detect "make"
error_detect "make install"
#安装mysql
tar zxvf $package_dir/$mysql_pag -C $install_dir/
cd $mysql_path
mysql_configure="${cmake_run_path}/bin/cmake ./ -DCMAKE_INSTALL_PREFIX=${mysql_run_path} -DMYSQL_DATADIR=${mysql_run_path}/data -DSYSCONFDIR=/etc -DWITH_MYISAM_STORAGE_ENGINE=1 -DWITH_INNOBASE_STORAGE_ENGINE=1 -DWITH_MEMORY_STORAGE_ENGINE=1 -DWITH_READLINE=1 -DMYSQL_UNIX_ADDR=/tmp/mysqld.sock -DMYSQL_TCP_PORT=3306 -DENABLED_LOCAL_INFILE=1 -DWITH_PARTITION_STORAGE_ENGINE=1 -DEXTRA_CHARSETS=all -DDEFAULT_CHARSET=utf8 -DDEFAULT_COLLATION=utf8_general_ci -DCMAKE_EXE_LINKER_FLAGS='-ljemalloc' -DWITH_SAFEMALLOC=OFF"
error_detect "$mysql_configure"
error_detect "make"
error_detect "make install"
#------------------初始化mysql----------------------
cd $mysql_run_path/
rm -rf my-new.cnf
rm -rf my.cnf
\cp ${mysql_conf}/my.cnf /etc/my.cnf
chmod 644 /etc/my.cnf
cd $mysql_run_path/scripts
./mysql_install_db --user=mysql --basedir=$mysql_run_path --datadir=$mysql_run_path/data
#-----------------------配置环境变量--------------------------------
mysql_bin=$mysql_run_path/bin
if [ -s /etc/profile ] && grep $mysql_bin /etc/profile;then
echo 'has'
else
cat>>/etc/profile<<EOF
PATH=\
${mysql_run_path}/bin:\
\$PATH
export PATH
EOF
fi
source /etc/profile
#------------------------注册服务设置为开机启动-------------------------
\cp $mysql_run_path/support-files/mysql.server /etc/init.d/mysql
chmod +x /etc/init.d/mysql
boot_start "mysql"
#启动mysql
service mysql start
#初始化密码
$mysql_run_path/bin/mysqladmin -u root password 'root'
}
mysql_install_80()
{
mysql_init_base
#个人服务器总内存为2G,内存不够安装8.0-----创建交换分区 并 开启-----
dd if=/dev/zero of=/swapfile bs=64M count=16
mkswap /swapfile
swapon /swapfile
Install_Boost
#安装mysql
tar zxvf $package_dir/$mysql80_pag -C $install_dir/
cd $mysql80_path
mysql_configure="cmake -DCMAKE_INSTALL_PREFIX=${mysql80_run_path} -DSYSCONFDIR=/etc -DWITH_MYISAM_STORAGE_ENGINE=1 -DWITH_INNOBASE_STORAGE_ENGINE=1 -DWITH_PARTITION_STORAGE_ENGINE=1 -DWITH_FEDERATED_STORAGE_ENGINE=1 -DEXTRA_CHARSETS=all -DDEFAULT_CHARSET=utf8mb4 -DDEFAULT_COLLATION=utf8mb4_general_ci -DWITH_EMBEDDED_SERVER=1 -DENABLED_LOCAL_INFILE=1 -DWITH_BOOST=${boost_path}"
error_detect "$mysql_configure"
error_detect "make"
error_detect "make install"
cat > /etc/my.cnf<<EOF
[client]
#password = your_password
port = 3306
socket = /tmp/mysql.sock
[mysqld]
port = 3306
socket = /tmp/mysql.sock
datadir = ${mysql80_run_path}/data
skip-external-locking
key_buffer_size = 16M
max_allowed_packet = 1M
table_open_cache = 64
sort_buffer_size = 512K
net_buffer_length = 8K
read_buffer_size = 256K
read_rnd_buffer_size = 512K
myisam_sort_buffer_size = 8M
thread_cache_size = 8
tmp_table_size = 16M
performance_schema_max_table_instances = 500
explicit_defaults_for_timestamp = true
#skip-networking
max_connections = 500
max_connect_errors = 100
open_files_limit = 65535
default_authentication_plugin = mysql_native_password
log-bin=mysql-bin
binlog_format=mixed
server-id = 1
binlog_expire_logs_seconds = 864000
early-plugin-load = ""
default_storage_engine = InnoDB
innodb_file_per_table = 1
innodb_data_home_dir = ${mysql80_run_path}/data
innodb_data_file_path = ibdata1:10M:autoextend
innodb_log_group_home_dir = ${mysql80_run_path}/data
innodb_buffer_pool_size = 16M
innodb_log_file_size = 5M
innodb_log_buffer_size = 8M
innodb_flush_log_at_trx_commit = 1
innodb_lock_wait_timeout = 50
sql_mode=ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION
[mysqldump]
quick
max_allowed_packet = 16M
user=root
password=root
[mysql]
no-auto-rehash
[myisamchk]
key_buffer_size = 20M
sort_buffer_size = 20M
read_buffer = 2M
write_buffer = 2M
[mysqlhotcopy]
interactive-timeout
[mysqld_safe]
malloc-lib=/usr/lib/libjemalloc.so
EOF
#----------安装完成后可以关闭交换分区 并删除交换分区 文件----------
swapoff /swapfile
rm -rf /swapfile
#------------------初始化mysql----------------------
MySQL_Opt
cd $mysql80_run_path/
chmod 644 /etc/my.cnf
/usr/local/mysql/bin/mysqld --initialize-insecure --basedir=${mysql80_run_path} --datadir=${mysql80_run_path}/data --user=mysql
#-----------------------配置环境变量--------------------------------
mysql_bin=$mysql80_run_path/bin
if [ -s /etc/profile ] && grep $mysql_bin /etc/profile;then
echo 'has'
else
cat>>/etc/profile<<EOF
PATH=\
${mysql80_run_path}/bin:\
\$PATH
export PATH
EOF
fi
source /etc/profile
#------------------------注册服务设置为开机启动-------------------------
\cp $mysql80_run_path/support-files/mysql.server /etc/init.d/mysql
chmod +x /etc/init.d/mysql
boot_start "mysql"
#启动mysql
service mysql start
#初始化密码
$mysql80_run_path/bin/mysqladmin -u root password 'root'
}
| true |
e587414e6e4b6e1ae6e8ea2931c106c6227eed20 | Shell | jcnmedeiros/Syncovery-Dlink-Nas | /init.sh | UTF-8 | 286 | 3.265625 | 3 | [] | no_license | #!/bin/sh
# Init, run on NAS boot process
DEBUG=0
if [ -e /tmp/apkg_debug ] ; then
DEBUG=1
fi
if [ $DEBUG == 1 ] ; then
echo "APKG_DEBUG: $0 $@" > /dev/console
fi
path_des=$1
# link for images
[ ! -d /var/www/Syncovery ] && ln -sf ${path_des}/web /var/www/Syncovery
| true |
25fb1ed05b4a83c26700fd526055bea17ec58b98 | Shell | hanka/window_move | /move_to_display | UTF-8 | 606 | 3.203125 | 3 | [] | no_license | #!/bin/bash
unset x y
eval $(xwininfo -id $(xdotool getactivewindow) |
sed -n -e "s/^ \+Absolute upper-left X: \+\([0-9]\+\).*/x=\1/p" \
-e "s/^ \+Absolute upper-left Y: \+\([0-9]\+\).*/y=\1/p" )
direction=$1
display_height=1080
top_margin=52
if [ $direction == "up" ]; then
y_pos=$(($y - $display_height - $top_margin))
if [ $y_pos -ge 0 ]; then
wmctrl -r ":ACTIVE:" -e 0,$x,$y_pos,-1,-1
fi
fi
if [ $direction == "down" ]; then
y_pos=$(($display_height + $y - $top_margin))
if [ $y_pos -lt $(( 2 * $display_height )) ]; then
wmctrl -r ":ACTIVE:" -e 0,$x,$y_pos,-1,-1
fi
fi
| true |
ebf99a0451f4242f9d4f8988cd042554dc3c2699 | Shell | Ramjivan/copier | /Pi Copier Project Files OLD/New/copy-old.sh | UTF-8 | 533 | 3.28125 | 3 | [] | no_license | #!/bin/bash
#get all flesh drives in an array
media="/media/pi"
echo $(ls $media > drives.txt)
a=0
drives="/home/pi/drives.txt"
while IFS= read -r line
do
temp="$media"
temp+="/"
temp+="$line"
usb[$a]=$temp
echo ${usb[$a]}
let a+=1
done <"$drives"
#tLen=${#usb[@]}
for d in ${usb[@]} #(( d=0; i<${tLen}; d++ ));
do
cp -r "SANT VANEE 47" "$d" &
done
echo "runing" > status.txt
echo "copy started LED status blinking"
wait
echo "ready" > status.txt
echo "Copy completed"
shutdown -r now
exit 0 | true |
b45b80c3a3b1525d2a73ff99dcb410236be4760a | Shell | mntyfrsh/navcoin-core | /preinstall-pak | UTF-8 | 1,350 | 3.6875 | 4 | [
"MIT"
] | permissive | #!/bin/sh
#
SERVICE_NAME=navcoin
ARCH=`dpkg --print-architecture`
# if odroid user does not exist then create
set +e
id -u odroid > /dev/null 2>&1
USER_EXISTS=$?
set -e
if [ ! $USER_EXISTS -eq 0 ]; then
# add odroid user without password for non-ARM arch
if [ $ARCH = "amd64" ] || [ $ARCH = "i386" ]; then
useradd -m -G sudo,ssh,users -u 6021 -s /bin/bash odroid > /dev/null 2>&1
mkdir /home/odroid/.navcoin4/ && chown -R odroid:odroid /home/odroid/.navcoin4/
else
# add odroid user and set password for ARM arch
useradd -m -G sudo,ssh,users -u 6021 -p Om16ojfOaLNA6 -s /bin/bash odroid > /dev/null 2>&1
mkdir /home/odroid/.navcoin4/ && chown -R odroid:odroid /home/odroid/.navcoin4/
fi
fi
# check if service is installed and stop for upgrade
if [ -f "/lib/systemd/system/$SERVICE_NAME.service" ] || [ -f "/usr/lib/systemd/system/$SERVICE_NAME.service" ]; then
# Stop an already running agent
# Only supports systemd and upstart
systemctl stop $SERVICE_NAME || true
fi
if [ -f "/lib/systemd/system/${SERVICE_NAME}-repair.service" ] || [ -f "/usr/lib/systemd/system/${SERVICE_NAME}-repair.service" ]; then
# Stop an already running agent
# Only supports systemd and upstart
systemctl stop ${SERVICE_NAME}-repair || true
fi
| true |
7df249d2889ee8d970f6733ef4b4678cc4220d8d | Shell | errordeveloper/cilium | /test/provision/compile.sh | UTF-8 | 6,227 | 3.5 | 4 | [
"GPL-1.0-or-later",
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
export CILIUM_DS_TAG="k8s-app=cilium"
export KUBE_SYSTEM_NAMESPACE="kube-system"
export KUBECTL="/usr/bin/kubectl"
export PROVISIONSRC="/tmp/provision"
export GOPATH="/home/vagrant/go"
export REGISTRY="k8s1:5000"
export DOCKER_REGISTRY="docker.io"
export CILIUM_TAG="cilium/cilium-dev"
export CILIUM_OPERATOR_TAG="cilium/operator"
export CILIUM_OPERATOR_GENERIC_TAG="cilium/operator-generic"
export CILIUM_OPERATOR_AWS_TAG="cilium/operator-aws"
export CILIUM_OPERATOR_AZURE_TAG="cilium/operator-azure"
export HUBBLE_RELAY_TAG="cilium/hubble-relay"
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source "${PROVISIONSRC}/helpers.bash"
function delete_cilium_pods {
echo "Executing: $KUBECTL delete pods -n $KUBE_SYSTEM_NAMESPACE -l $CILIUM_DS_TAG"
$KUBECTL delete pods -n $KUBE_SYSTEM_NAMESPACE -l $CILIUM_DS_TAG
}
cd ${GOPATH}/src/github.com/cilium/cilium
if echo $(hostname) | grep "k8s" -q;
then
# Only need to build on one host, since we can pull from the other host.
if [[ "$(hostname)" == "k8s1" && "${CILIUM_REGISTRY}" == "" ]]; then
./test/provision/container-images.sh cilium_images .
if [[ "${CILIUM_IMAGE}" == "" ]]; then
echo "building cilium container image..."
make LOCKDEBUG=1 docker-cilium-image
echo "tagging cilium image..."
docker tag cilium/cilium "${REGISTRY}/${CILIUM_TAG}"
echo "pushing cilium image to ${REGISTRY}/${CILIUM_TAG}..."
docker push "${REGISTRY}/${CILIUM_TAG}"
echo "removing local cilium image..."
docker rmi cilium/cilium:latest
else
pull_image_and_push_to_local_registry "${CILIUM_IMAGE}" "${REGISTRY}" "${CILIUM_TAG}"
fi
if [[ "${CILIUM_OPERATOR_IMAGE}" == "" ]]; then
echo "building cilium-operator image..."
make LOCKDEBUG=1 docker-operator-image
echo "building cilium-operator-aws image..."
make -B LOCKDEBUG=1 docker-operator-aws-image
echo "building cilium-operator-azure image..."
make -B LOCKDEBUG=1 docker-operator-azure-image
echo "building cilium-operator-alibabacloud image..."
make -B LOCKDEBUG=1 docker-operator-alibabacloud-image
echo "building cilium-operator-generic image..."
make -B LOCKDEBUG=1 docker-operator-generic-image
echo "tagging cilium-operator images..."
docker tag "${CILIUM_OPERATOR_TAG}" "${REGISTRY}/${CILIUM_OPERATOR_TAG}-ci"
docker tag "${CILIUM_OPERATOR_AWS_TAG}" "${REGISTRY}/${CILIUM_OPERATOR_AWS_TAG}-ci"
docker tag "${CILIUM_OPERATOR_AZURE_TAG}" "${REGISTRY}/${CILIUM_OPERATOR_AZURE_TAG}-ci"
docker tag "${CILIUM_OPERATOR_GENERIC_TAG}" "${REGISTRY}/${CILIUM_OPERATOR_GENERIC_TAG}-ci"
echo "pushing cilium/operator image to ${REGISTRY}/${CILIUM_OPERATOR_TAG}-ci..."
docker push "${REGISTRY}/${CILIUM_OPERATOR_TAG}-ci"
echo "pushing cilium/operator-aws image to ${REGISTRY}/${CILIUM_OPERATOR_AWS_TAG}-ci..."
docker push "${REGISTRY}/${CILIUM_OPERATOR_AWS_TAG}-ci"
echo "pushing cilium/operator-azure image to ${REGISTRY}/${CILIUM_OPERATOR_AZURE_TAG}-ci..."
docker push "${REGISTRY}/${CILIUM_OPERATOR_AZURE_TAG}-ci"
echo "pushing cilium/operator-generic image to ${REGISTRY}/${CILIUM_OPERATOR_GENERIC_TAG}-ci..."
docker push "${REGISTRY}/${CILIUM_OPERATOR_GENERIC_TAG}-ci"
echo "removing local cilium-operator image..."
docker rmi "${CILIUM_OPERATOR_TAG}:latest"
echo "removing local cilium-operator image..."
docker rmi "${CILIUM_OPERATOR_AWS_TAG}:latest"
echo "removing local cilium-operator image..."
docker rmi "${CILIUM_OPERATOR_AZURE_TAG}:latest"
echo "removing local cilium-operator image..."
docker rmi "${CILIUM_OPERATOR_GENERIC_TAG}:latest"
else
pull_image_and_push_to_local_registry "${CILIUM_OPERATOR_IMAGE}" "${REGISTRY}" "${CILIUM_OPERATOR_TAG}"
fi
delete_cilium_pods
if [[ "${HUBBLE_RELAY_IMAGE}" == "" ]]; then
echo "building hubble-relay image..."
make LOCKDEBUG=1 docker-hubble-relay-image
echo "tagging hubble-relay image..."
docker tag ${HUBBLE_RELAY_TAG} ${REGISTRY}/${HUBBLE_RELAY_TAG}
echo "pushing hubble-relay image to ${REGISTRY}/${HUBBLE_RELAY_TAG}..."
docker push ${REGISTRY}/${HUBBLE_RELAY_TAG}
echo "removing local hubble-relay image..."
docker rmi "${HUBBLE_RELAY_TAG}:latest"
else
pull_image_and_push_to_local_registry "${HUBBLE_RELAY_IMAGE}" "${REGISTRY}" "${HUBBLE_RELAY_TAG}"
fi
elif [[ "$(hostname)" == "k8s1" && "${CILIUM_REGISTRY}" != "" ]]; then
if [[ ${CILIUM_IMAGE} != "" ]]; then
pull_image_and_push_to_local_registry "${CILIUM_REGISTRY}/${CILIUM_IMAGE}" "${REGISTRY}" "${CILIUM_TAG}"
fi
if [[ ${CILIUM_OPERATOR_IMAGE} != "" ]]; then
pull_image_and_push_to_local_registry "${CILIUM_REGISTRY}/${CILIUM_OPERATOR_IMAGE}" "${REGISTRY}" "${CILIUM_OPERATOR_TAG}"
fi
if [[ ${HUBBLE_RELAY_IMAGE} != "" ]]; then
pull_image_and_push_to_local_registry "${CILIUM_REGISTRY}/${HUBBLE_RELAY_IMAGE}" "${REGISTRY}" "${HUBBLE_RELAY_TAG}"
fi
else
echo "Not on master K8S node; no need to compile Cilium container"
fi
else
echo "compiling cilium..."
sudo -u vagrant -H -E make SKIP_CUSTOMVET_CHECK=true LOCKDEBUG=1 SKIP_K8S_CODE_GEN_CHECK=false SKIP_DOCS=true
echo "installing cilium..."
make install
mkdir -p /etc/sysconfig/
cp -f contrib/systemd/cilium /etc/sysconfig/cilium
services=$(ls -1 ./contrib/systemd/*.*)
for svc in ${services}; do
cp -f "${svc}" /etc/systemd/system/
done
for svc in ${services}; do
service=$(echo "$svc" | sed -E -n 's/.*\/(.*?).(service|mount)/\1.\2/p')
if [ -n "$service" ] ; then
echo "installing service $service"
systemctl enable $service || echo "service $service failed"
systemctl restart $service || echo "service $service failed to restart"
fi
done
echo "running \"sudo adduser vagrant cilium\" "
sudo adduser vagrant cilium
fi
# Download all images needed for tests.
./test/provision/container-images.sh test_images .
| true |
4f604440f8dd79e7244e560f5d3cd1b05bb56b95 | Shell | pigetnet/bootstrap | /piget/install | UTF-8 | 703 | 3.5625 | 4 | [] | no_license | #!/bin/bash
echo " -------------------- PIGET INSTALL --------------------- "
echo " "
if [ $(id -u) -ne 0 ]; then
echo "ERROR : Script must be run as root."
echo "ACTION : sudo /boot/bootstrap/piget "
exit 1
fi
echo "--> Checking if your Raspberry Pi is connected to the internet"
ping -c1 www.google.com && internet=1 || internet=0
if [[ $internet == 0 ]]
then
echo "ERROR : You must be connected to internet to bootstrap your Raspberry Pi"
exit 1
else
echo "Copy install script"
wget https://raw.githubusercontent.com/pigetnet/install_SCRIPT/master/install -O /tmp/install
if [ $? -eq 0 ];then
chmod +x /tmp/install
/tmp/install
else
echo "Error downloading install script"
fi
fi | true |
b165a9a898f98afe779a4c16340a9d8fdd1145d6 | Shell | SQLAdm/azure | /compute/vm/cli/linuxVM.sh | UTF-8 | 775 | 3.0625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
vnName="vm1"
resourceGroup=""
cat <<EOF > cloud-init.txt
#cloud-config
package_upgrade: true
packages:
- stress
runcmd:
- sudo stress --cpu 1
EOF
az vm create \
--resource-group $resourceGroup \
--name $vnName \
--image UbuntuLTS \
--custom-data cloud-init.txt \
--generate-ssh-keys
VMID=$(az vm show \
--resource-group $resourceGroup \
--name $vnName \
--query id \
--output tsv)
az monitor metrics alert create \
-n "Cpu80PercentAlert" \
--resource-group $resourceGroup \
--scopes $VMID \
--condition "max percentage CPU > 80" \
--description "Virtual machine is running at or greater than 80% CPU utilization" \
--evaluation-frequency 1m \
--window-size 1m \
--severity 3 | true |
181502b8ea7dd3a483fc83f7532d8a1c07a58aab | Shell | Draka46/A1 | /test.sh | UTF-8 | 756 | 3.703125 | 4 | [] | no_license | #!/usr/bin/env bash
# Exit immediately if any command below fails.
set -e
make
echo "Generating a test_files directory.."
mkdir -p test_files
rm -f test_files/*
echo "Generating test files.."
printf "Hello, World!\n" > test_files/ascii.input
printf "Hello, World!" > test_files/ascii2.input
printf "Hello,\x00World!\n" > test_files/data.input
printf "" > test_files/empty.input
### TODO: Generate more test files ###
echo "Running the tests.."
exitcode=0
f=test_files/*.input
echo ">>> Testing ${f}.."
file ${f} | sed 's/ASCII text.*/ASCII text/' > test_files/expected
./file ${f} > test_files/actual
if ! diff -u test_files/expected test_files/actual
then
echo ">>> Failed :-("
exitcode=1
else
echo ">>> Success :-)"
fi
exit $exitcode
| true |
7616f7facab5526da97cd340b719998a128f5c28 | Shell | shyjupv/caliper | /test_cases_cfg/common/unzip/unzip_build.sh | UTF-8 | 619 | 3.015625 | 3 | [] | no_license | #!/bin/bash
url='www.estuarydev.org/caliper'
#url='http://7xjz0v.com1.z0.glb.clouddn.com/caliper_tools'
filename='kernel-dev.tar.gz'
build_compile() {
set -e
set -x
Path=$BENCH_PATH"312.unzip"
myOBJPATH=${INSTALL_DIR}/scripts
if [ ! -d $myOBJPATH ]; then
mkdir -p $myOBJPATH
fi
download_file $download_dir $url $filename
if [ $? -ne 0 ]; then
echo 'Downloading dependency software (kernel-dev.tar.gz) error'
exit 1
fi
if [ ! -f $myOBJPATH/$filename ]; then
cp $download_dir/$filename $myOBJPATH
fi
cp $Path/* $myOBJPATH
}
build_compile
| true |
132b027dd1da039d04f396b785d3901f20f803c1 | Shell | joshgav/azure-scripts | /acs/mesos/mesos-build.sh | UTF-8 | 539 | 2.53125 | 3 | [
"MIT"
] | permissive | # Bootstrap Mesos from source
sudo apt-get update && apt-get upgrade -y
sudo apt-get install -y git
sudo apt-get install -y openjdk-7-jdk
sudo apt-get install -y autoconf libtool build-essential
sudo apt-get install -y python-dev python-boto libcurl4-nss-dev libsasl2-dev maven libapr1-dev libsvn-dev
MESOS_ROOT_DIR=~/mesos
test -d $MESOS_ROOT_DIR || git clone https://git-wip-us.apache.org/repos/asf/mesos.git $MESOS_ROOT_DIR
cd $MESOS_ROOT_DIR
./bootstrap
test -d build || mkdir build
cd $MESOS_ROOT_DIR/build
../configure
make -j8
| true |
933927e371c085b1d0f9fbc3914dd041e20493d9 | Shell | AlexxandreFS/Batocera.PLUS | /plus/opt/Rpcs3/killrpcs3 | UTF-8 | 393 | 3.109375 | 3 | [] | no_license | #!/bin/sh
##
## Batocera.PLUS
## Encerra a execução do emulador rpcs3.
##
## Código escrito por: Sérgio de Carvalho Júnior
##
################################################################################
RPCS3_PID="$(pidof -s rpcs3)"
kill -15 ${RPCS3_PID}
sleep 0.2
if [ -z "${RPCS3_PID}" ]; then
echo 'RPCS3 closed!'
else
sleep 0.5
kill -9 ${RPCS3_PID}
echo 'RPCS3 is dead!'
fi
exit 0 | true |
21de7ece2a194907a566449b71478b245b1ddbd9 | Shell | TerabyteQbt/meta | /bin/qbt | UTF-8 | 6,186 | 3.515625 | 4 | [
"Unlicense",
"LicenseRef-scancode-unknown"
] | permissive | #!/usr/bin/env bash
set -e
# USAGE: use in place or copy this script somewhere where it will be on your path.
# modify the INSTALL_PREFIX if desired. QBT stores caches in ~/.qbt, so that should work for most people.
#
# QBT DEPENDENCIES: python, jdk8, git
# SCRIPT DEPENDENCIES: download requires curl and openssl
#
# Script will try to find jdk8, warn if user name/email not set.
# Change these to update qbt version.
export META_TOOLS_COMMIT="0fae72f2ad04c8c173484f10d6264fe8b5c2619c" # the sha1 we built this from, FYI
export META_TOOLS_CV="0178f0b84a873294e87e861e368de42010f4faee" # the CV that results (multiple commits might generate the same CV)
export META_TOOLS_SHA256="3a37e2ccbcc93a524cd6a88ade7791538b6362464af4ce5b35e0a5ef3251372c" # this is the sha256sum of the build published - we are not bit-for-bit reproducible, so this could change if you build your own.
# github is as good a hosting platform as any, github user terabyte is cmyers, you can verify by keybase.io proof
export GITHUB_URL="https://github.com/TerabyteQbt/meta/releases/download/metatools-1629970749/meta_tools.release-0178f0b84a873294e87e861e368de42010f4faee.tar.gz"
# qbtbuildtool.com is run by cmyers, uses SSL, you can verify by keybase.io proof
export QBTBUILDTOOL_URL="https://qbtbuildtool.com/meta_tools.release.tar.gz" # note that if you use this, the above info might be wrong, you will want to confirm it some other way
# Set this to grab QBT from github or qbtbuildtool.com, whichever you trust more, or add your own
# I use github because I use this script to deploy qbtbuildtool.com
export PUBLIC_QBT_URL="$GITHUB_URL"
INSTALL_PREFIX="$HOME/.qbt"
META_TOOLS_PATH="$INSTALL_PREFIX/meta-tools-$META_TOOLS_CV"
QBT_BIN="$META_TOOLS_PATH/bin/qbt"
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
EXAMPLE_FILE="$SCRIPT_DIR/qbt-config.example"
# install qbt if not present
function install_qbt {
if [[ ! -x "$QBT_BIN" ]]; then
echo "Downloading QBT MetaTools ($META_TOOLS_CV)..."
TMP_BIN="$(mktemp).tar.gz"
function cleanup {
rm -f $TMP_BIN
}
trap cleanup EXIT
curl -L $PUBLIC_QBT_URL > $TMP_BIN
TEST_SHA256="$(openssl sha256 $TMP_BIN | sed 's/.* //')"
if [[ "$TEST_SHA256" != "$META_TOOLS_SHA256" ]]; then
if [[ -n "$I_AM_A_CLOWN_IGNORE_SECURITY" ]]; then
echo "ERROR: sha256 mismatch ($META_TOOLS_SHA256 does not match $TEST_SHA256) - but installing anyways!" 1>&2
else
echo "ERROR: sha256 mismatch ($META_TOOLS_SHA256 does not match $TEST_SHA256)" 1>&2
exit 1
fi
fi
mkdir -p $META_TOOLS_PATH
(cd $META_TOOLS_PATH && tar -xzf $TMP_BIN)
fi
}
# qbt requires JAVA_HOME to be set
# building java with qbt requires JAVA_X_HOME to be set where X is "1_8" or
# "1_9" or whatever QBT_ENV_JDK is set to.
# the qbt wrapper script will complain if JAVA_HOME is not set, but let's try
# to do better
function set_java_home {
if [[ -z "$JAVA_HOME" ]]; then
if [[ "$(uname)" == "Darwin" ]]; then
# get java_home for a mac
export JAVA_HOME="$(/usr/libexec/java_home -v 1.8)"
else
# try to get it from the path
if which java; then
JAVA_BIN="$(which java)"
else
echo "Error setting JAVA_HOME: no java on path" 1>&2
exit 1
fi
JAVA_BIN_DIRNAME="$(dirname $JAVA_BIN)"
if [[ "$(basename $JAVA_BIN_DIRNAME)" == "bin" ]]; then
# this looks right
export JAVA_HOME="$(dirname $JAVA_BIN_DIRNAME)"
fi
fi
# if we set it ourselves, make sure it is the right version
if [[ ! -x "$JAVA_HOME/bin/java" ]]; then
echo "Error setting JAVA_HOME: '$JAVA_HOME/bin/java' not executable" 1>&2
exit 1
fi
if $JAVA_HOME/bin/java -version 2>&1 | head -n1 | grep -v -q 1.8; then
echo "Error setting JAVA_HOME: '$JAVA_HOME/bin/java' not a 1.8 JDK" 1>&2
exit 1
fi
fi
# use this for JAVA_1_8_HOME
if [[ -z "$JAVA_1_8_HOME" ]]; then
# if java in $JAVA_HOME is a 1.8 jdk, set JAVA_1_8_HOME
if $JAVA_HOME/bin/java -version 2>&1 | grep -q '"1.8'; then
export JAVA_1_8_HOME=$JAVA_HOME
fi
fi
# use this for JAVA_1_9_HOME
if [[ -z "$JAVA_1_9_HOME" ]]; then
# if java in $JAVA_HOME is a 1.9 jdk, set JAVA_1_9_HOME
if $JAVA_HOME/bin/java -version 2>&1 | grep -q '"9'; then
export JAVA_1_9_HOME=$JAVA_HOME
fi
fi
}
# check to see if config file exists - if not, place one there
function find_or_create_config_file {
# first, find qbt-manifest
SEARCH_DIR="$(pwd)"
while [[ ! -f "$SEARCH_DIR/qbt-manifest" ]]; do
if [[ -f "$SEARCH_DIR/.qbt-meta-location" ]]; then
NEW_SEARCH_DIR="$SEARCH_DIR/$(cat $SEARCH_DIR/.qbt-meta-location)"
else
NEW_SEARCH_DIR="$(dirname $SEARCH_DIR)"
fi
if [[ "$NEW_SEARCH_DIR" == "$SEARCH_DIR" ]]; then
echo "Error: could not find qbt-manifest or .qbt-meta-location while finding up" 1>&2
exit 1
fi
SEARCH_DIR="$NEW_SEARCH_DIR"
done
if [[ ! -f "$SEARCH_DIR/qbt-config" ]]; then
echo "NOTE: missing qbt-config file, copying example file in place. New file at: $SEARCH_DIR/qbt-config"
cp "$EXAMPLE_FILE" "$SEARCH_DIR/qbt-config"
fi
}
function warn_if_git_user_not_set {
if [[ -n "$(git config --global user.name)" ]]; then
if [[ -n "$(git config --global user.email)" ]]; then
return
fi
fi
cat << EOF 1>&2
You must configure a username and email for git. QBT creates commits
internally and will error out if this is not done. To configure git, run the
following commands:
git config --global user.name "Your Name Here"
git config --global user.email "yourname@example.com"
EOF
exit 1
}
# Here is the main script. This is what it does:
warn_if_git_user_not_set
install_qbt
set_java_home
find_or_create_config_file
$QBT_BIN "$@"
| true |
a4f6acf0799df8d621b469ecb8e88ea928ce6949 | Shell | vtasio/djengu | /.djengu/.production_toolbox/caddy/vagrant_caddy.sh | UTF-8 | 851 | 3.0625 | 3 | [] | no_license | #!/bin/bash
set -e
GREEN='\033[0;32m'
echo -e "\n${GREEN}Configuring vagrant. This may take a few minutes...${NC}\n"
vagrant up
export HOST=`hostname -I | grep -oP "192+[.]\d+[.]\d+[.]\d+"`
export API_DOMAIN=`grep -P "API_URL" env/.env.prod | sed -e "s/^API_URL=https:\/\///"`
export ORIGIN_DOMAIN=`grep -P "ORIGIN_URL" env/.env.prod | sed -e "s/^ORIGIN_URL=https:\/\///"`
envsubst < "./.djengu/.production_toolbox/caddy/Caddyfile.template" > "./.djengu/.production_toolbox/caddy/Caddyfile"
vagrant ssh -c "cd /caddy/
cp /djengu/.djengu/.production_toolbox/caddy/Caddyfile .
docker-compose -f docker-compose.caddy.yml down -v && \
docker-compose -f docker-compose.caddy.yml up --build -d"
echo -e "\n${GREEN}🚀 Vagrant is up and running. You can enter the instance using:\n"
echo "vagrant ssh"
echo | true |
10ad1e37884b47b87e148b08766430da1e7df636 | Shell | gouthi2106/shell | /shells/average.sh~ | UTF-8 | 237 | 3.0625 | 3 | [] | no_license | #!/bin/sh
echo " please enter the first number"
read a
echo " please enter second number"
read b
echo " please enter third number"
read c
z=`expr $a + $b + $c`
average=`expr $z / 3`
echo " the average of the given numbers is: $average"
| true |
c037ca0d4c5d4c0fe425d14a2966b0f6ca08fae3 | Shell | jesuslerma/scripting | /dia1/simpleif.sh | UTF-8 | 288 | 3.375 | 3 | [] | no_license | #!/bin/bash
username=$(id -un)
if [[ $username = "hyde" ]]
then
echo "El usuario por defecto es hyde"
fi
distro=$(lsb_release --id)
echo $distro
if [[ $distro != 'Distributor ID: Centos' ]]
then
echo "No tienes centos instalado"
fi
if [[ 1 -eq 1 ]]
then
echo "Uno es igual a uno"
fi
| true |
102c0315014b91aabc75ea49f1f001a79cb17801 | Shell | DanielEnglisch/EV3Scripts | /scripts/ev3web.sh | UTF-8 | 467 | 2.921875 | 3 | [] | no_license |
#!/bin/bash
echo "=== EV3 Web-upload Script v.1.5 by Xer0, peerfunk ==="
echo "=== Compressing... ==="
cd ../web
tar -czvf upload_web.tar *
echo "=== Compress done ==="
#uploading files
echo "Copying files..."
sshpass -p 'Cisco0' scp ../web/upload_web.tar root@$1:/var/www/html/
sshpass -p 'Cisco0' ssh root@$1 cd /var/www/html/ && tar -xvf upload_web.tar
#nc $1 2222 < ./upload_web.tar
if [ $? -eq 1 ]
then
echo "!!! -> Upload failed!"
exit
fi
| true |
ae2337be81541b1ebe7e14d855023b7eeea7b3d4 | Shell | isis-project/isis-project | /scripts/build_BrowserAdapter.sh | UTF-8 | 296 | 3.046875 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | #!/bin/bash
. ./common/envsetup.sh
. ./common/makesetup.sh
export STAGING_DIR=$LUNA_STAGING
export QT_INSTALL_PREFIX=`$QMAKE -query QT_INSTALL_PREFIX`
if [ ! -z "$PACKAGE" ]; then
build_target $ISIS_ROOT $1 $2 Ubuntu stage $PACKAGE
else
build_target $ISIS_ROOT $1 $2 Ubuntu stage
fi
| true |
491f09302e820f25fc61d6a1004e2d177cd729a9 | Shell | Krushon/Zabbix_templates | /Asterisk/Asterisk IAX/asterisk.trunk-with-name.sh | UTF-8 | 724 | 3.421875 | 3 | [] | no_license | #!/bin/bash
# Получаем количество всех транков в системе
total=`sudo asterisk -rx 'iax2 show peers' | sed -n '/iax2/p' | awk '{print $1}'`
# Получаем число активных транков
active=`sudo asterisk -rx 'iax2 show peers' | sed -n '/OK/p' | wc -l`
# Получаем имена транков с проблемам
offline=`sudo asterisk -rx 'iax2 show peers' | sed -n '/UNREACHABLE/p' | awk '{print $1}'`
# Сравниваем общее число с числом активных транков и выводим сообщение об их состоянии
if [ $active -lt $total ]
then
echo Trunks offline $offline
else
echo All trunks are online
fi | true |
d9e701911d046a5769fac26fead08454d3bb6c23 | Shell | dnfcallan/THE-2020-PERSONALIZED-VOICE-TRIGGER-CHALLENGE-BASELINE-SYSTEM | /src/prepare_testset_data.sh | UTF-8 | 453 | 2.671875 | 3 | [] | no_license | #!/bin/bash
test_set=$1
dest_dir=$2
mkdir -p $dest_dir
awk '{printf("%s\n%s\n%s\n",$1,$2,$3)}' $test_set/trials_competitor | sort -u | awk '{printf("%s %s/wav_data/%s\n",$1,test_set,$1)}' test_set=$test_set > $dest_dir/wav_for_sv.scp
awk '{printf("%s\n",$4)}' $test_set/trials_competitor | sort -u | awk '{printf("%s %s/wav_data/%s\n",$1,test_set,$1)}' test_set=$test_set > $dest_dir/wav_for_wake.scp
cp $test_set/trials_competitor $dest_dir/
| true |
efb3c802fd528274ae50044531f1a1635246da50 | Shell | omohammed95/Upgrade | /modifPpa.sh | UTF-8 | 283 | 2.953125 | 3 | [] | no_license | #!/bin/bash
function DISABLE_PPAs {
#this function will add the # character in lines that begin with deb.
for i in docker-ce.list slack-desktop.list user.list
do
sed -i -E 's/^/#/' /etc/apt/sources.list.d/$i
echo "files has been modified successfully"
done
}
| true |
c018ac9dd69fed04658230bfc4f7cfa3544fd080 | Shell | CPSECapstone/LilBobbyTables-MyCRT | /scripts/clean_package_locks.sh | UTF-8 | 464 | 3.296875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
START_DIR="$(pwd)"
REPOSITORY_ROOT_DIR=`git rev-parse --show-toplevel 2>/dev/null`
if [ -z "$REPOSITORY_ROOT_DIR" ]; then
echo "Cannot determine LilBobbyTables-MyCRT root directory. Please run this script from within the repository."
exit 1
fi
cd $REPOSITORY_ROOT_DIR
echo "Removing all package-locks"
find . -name "package-lock.json" -exec rm -rf '{}' +
echo "Removing all node_modules"
find . -name "node_modules" -exec rm -rf '{}' +
| true |
3566d4dbba1d359e1d50d9d94dc926ba4069da6b | Shell | kajka/automatyzacja-macos | /04--install-homebrew.sh | UTF-8 | 260 | 3 | 3 | [] | no_license | if check $(which brew); then
echo 'Homebrew already installed!'
else
echo 'Installing Homebrew...'
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
brew update & brew upgrade
echo 'Installation succeeded!'
fi
| true |
7e44fa6e3e014d5db8de0f648f5828ce4896491c | Shell | xsteadfastx/massdevextractor | /.test.sh | UTF-8 | 388 | 2.703125 | 3 | [] | no_license | #!/bin/sh
set -euo pipefail
for toxenv in py37 flake8 pylint mypy
do
sudo docker run \
--rm -t \
-v "$PWD"/massdevextractor:/data \
-w /data \
-e TOX_WORK_DIR=/tmp \
xsteadfastx/tox-python:full \
/bin/sh -c " sudo apt-get update; sudo apt-get install -y gcc; tox -v -e $toxenv"
done
| true |
4ba78f6fce0751a2bec9bd9f0bf67fba1222a7ea | Shell | aarcro/dotfiles | /home/.vagrant.d/scripts/provision | UTF-8 | 571 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env bash
echo "Personal provision script"
if [ -f /usr/bin/apt-get ]; then
apt-get update # Maybe bad things in the package index
apt-get -y install ack-grep git vim screen
fi
if [ -f /usr/bin/yum ]; then
yum install -y git-core screen vim
fi
# Do homeshick bootstrap
if [ ! -d /home/vagrant/.homesick ]; then
su -c "git clone git://github.com/andsens/homeshick.git /home/vagrant/.homesick/repos/homeshick" vagrant
# su -c 'source /home/vagrant/.homesick/repos/homeshick/homeshick.sh && yes | homeshick clone aarcro/dotfiles' vagrant
fi
| true |
1b7ac0096014ce05f0be72711e3c4396729bfa31 | Shell | ODEX-TOS/packages | /babl/repos/extra-x86_64/PKGBUILD | UTF-8 | 736 | 2.65625 | 3 | [
"GPL-1.0-or-later",
"MIT"
] | permissive | # Maintainer: Daniel Isenmann <daniel@archlinux.org>
pkgname=babl
pkgver=0.1.82
pkgrel=1
pkgdesc="Dynamic, any to any, pixel format conversion library"
arch=(x86_64)
url="https://gegl.org/babl/"
license=(LGPL3)
depends=(glibc lcms2)
makedepends=(git meson gobject-introspection vala)
_commit='aab30293930236fab173a879f2d9aab95d45db5e' # tags/BABL_0_1_78^0
source=("git+https://gitlab.gnome.org/GNOME/babl.git#commit=$_commit")
sha256sums=('SKIP')
# Don't port to meson until babl's runtime cpu detection works there
pkgver() {
cd $pkgname
git describe --tags | sed 's/^BABL_//;s/_/./g;s/-/+/g'
}
build() {
arch-meson $pkgname build
ninja -C build
}
check() {
meson test -C build --print-errorlogs
}
package() {
DESTDIR="$pkgdir" meson install -C build
}
# vim:set sw=2 et:
| true |
1ef4f44f176caacd919fb87ed25690209bda036a | Shell | VladRyvkin/LPI-OS | /theminst.sh | UTF-8 | 3,538 | 2.65625 | 3 | [] | no_license | #!/bin/bash
sleep 0.3s;
echo ""
sleep 0.3s;
echo "file: theminst.sh"
echo ""
sleep 0.3s;
echo "In this file we have themes for XFCE"
sleep 0.3s;
echo ""
sleep 0.3s;
while true; do
read -p "Do you wish to copy themes?" yn
case $yn in
[Yy]* ) sleep 0.3s; echo ""; echo "Starting theminst.sh";
sleep 0.3s; echo ""; echo "Copying themes";
sudo zypper install cinnamon-metatheme-numix;
sudo zypper install xfce4-panel-plugin-eyes;
sudo zypper install xfce4-panel-plugin-battery;
sudo zypper install xfce4-panel-plugin-cpufreq;
sudo zypper install xfce4-panel-plugin-netload;
sudo zypper install xfce4-panel-plugin-fsguard;
sudo zypper install xfce4-panel-plugin-wavelan;
sudo zypper install xfce4-panel-plugin-cpugraph;
sudo zypper install xfce4-panel-plugin-systemload;
sudo zypper install xfce4-panel-plugin-diskperf;
sudo zypper install xfce4-panel-plugin-sensors;
sudo zypper install xfce4-panel-plugin-weather;
sudo zypper install xfce4-panel-plugin-whiskermenu;
wget https://codeload.github.com/VladRyvkin/ThemesRes-LPI/zip/master;
unzip master;
a=$PWD;
cd $PWD/ThemesRes-LPI-master;
cp -rp /$PWD//ThemesRes-LPI-master/backgrounds/* /usr/share/wallpapers/xfce/
cd $PWD/ThemesRes-LPI-master/icons;
unzip Adwaita.zip -d /usr/share/icons/;
unzip Breeze.zip -d /usr/share/icons/;
unzip bridge.zip -d /usr/share/icons/;
unzip capitaine-cursors.zip -d /usr/share/icons/;
unzip elementary-xfce.zip -d /usr/share/icons/;
unzip elementary.zip -d /usr/share/icons/;
unzip gnome.zip -d /usr/share/icons/;
unzip Faenza-p1.zip -d /usr/share/icons/;
unzip Faenza-p2.zip -d /usr/share/icons/;
unzip Faenza-p3.zip -d /usr/share/icons/;
unzip Faenza-p4.zip -d /usr/share/icons/;
unzip Flat Remix.zip -d /usr/share/icons/;
unzip Humanity.zip -d /usr/share/icons/;
unzip Numix.zip -d /usr/share/icons/;
unzip plane-dark.zip -d /usr/share/icons/;
unzip PolarCursorTheme.zip -d /usr/share/icons/;
unzip Vertex-Maia-p1.zip -d /usr/share/icons/;
unzip Vertex-Maia-p2.zip -d /usr/share/icons/;
unzip Vibrancy-Colors-p1.zip -d /usr/share/icons/;
unzip Vibrancy-Colors-p2.zip -d /usr/share/icons/;
unzip Vibrancy-Colors-p3.zip -d /usr/share/icons/;
unzip Vertex-Maia-p1.zip -d /usr/share/icons/;
unzip Vertex-Maia-p2.zip -d /usr/share/icons/;
unzip win8.zip -d /usr/share/icons/;
cp cab_extract.png /usr/share/icons/;
cp cab_view.png /usr/share/icons/;
cp whiskermenu-manjaro.svg /usr/share/icons/;
cd $OLDPWD;
cd $PWD/ThemesRes-LPI-master/themes;
unzip Adwaita.zip -d /usr/share/themes/;
unzip axiom.zip -d /usr/share/themes/;
unzip Breath.zip -d /usr/share/themes/;
unzip Daloa.zip -d /usr/share/themes/;
unzip Greybird.zip -d /usr/share/themes/;
#unzip Numix.zip -d /usr/share/themes/;
unzip Raleigh.zip -d /usr/share/themes/;
unzip RedmondXP.zip -d /usr/share/themes/;
unzip Vertex-Maia.zip -d /usr/share/themes/;
cd $OLDPWD
rm master;
rm -rf ThemesRes-LPI-master;
sleep 0.3s; echo ""; echo "Copying themes complete"; echo ""; break;;
[Nn]* ) sleep 0.3s; echo ""; echo "Close theminst.sh"; sleep 0.3s; echo ""; exit;;
* ) echo "Please answer yes or no.(y or n)";;
esac
done
#cp -R /home/vlad/Downloads/axiom /usr/share/themes/
#cp -R /home/vlad/Downloads/axiomd /usr/share/themes/
#cp -R /home/vlad/Downloads/plane-dark /usr/share/icons/
#cp -R /home/vlad/Downloads/NumixHolo /usr/share/themes/
#cp -R /home/vlad/Downloads/Flat Remix /usr/share/icons/
#sudo zypper install conky
| true |
ef7c65cb2e16439f8df026caceb67d020f9c060b | Shell | hypermine-bc/hs-authenticator | /clean-build-install.sh | UTF-8 | 2,387 | 3.046875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
## reference https://stackoverflow.com/a/28938235/1851064
GREEN='\033[0;32m'
RED='\033[0;31m'
BLUE='\033[0;34m'
BLUE_BG='\033[44m'
NC='\033[0m' # No Color
if [ "$KCBASE" == "" ]; then
echo "Environment variable KCBASE is not. Set it to proceed."
exit
fi
echo "${BLUE}Keycloak homepath is : ${KCBASE} ${NC}"
echo -e "${BLUE_BG}Building the hypersign plugin..${NC}"
./build.sh
echo -e "${BLUE_BG}Cleaning the hypersign plugin..${NC}"
rm -rf ${KCBASE}/hs-plugin-keycloak-ejb-0.2-SNAPSHOT.jar
rm -rf ${KCBASE}/modules/hs-plugin-keycloak-ejb/
rm -rf ${KCBASE}/standalone/configuration/hypersign.properties
echo -e "${BLUE_BG}Coping the plugin..${NC}"
cp ./dist/hs-plugin-keycloak-ejb-0.2-SNAPSHOT.jar ${KCBASE}
echo -e "${BLUE_BG}Dploying the hypersign theme..${NC}"
cp hs-themes/hypersign-config.ftl ${KCBASE}/themes/base/login
cp hs-themes/hypersign.ftl ${KCBASE}/themes/base/login
cp hs-themes/hypersign-new.ftl ${KCBASE}/themes/base/login
echo -e "${BLUE_BG}Dploying the hypersign config file..${NC}"
cp hypersign.properties ${KCBASE}/standalone/configuration/
echo -e "${BLUE_BG}Deploying the hypersign plugin..${NC}"
cd ${KCBASE}
./bin/jboss-cli.sh --command="module add --name=hs-plugin-keycloak-ejb --resources=./hs-plugin-keycloak-ejb-0.2-SNAPSHOT.jar --dependencies=org.keycloak.keycloak-common,org.keycloak.keycloak-core,org.keycloak.keycloak-services,org.keycloak.keycloak-model-jpa,org.keycloak.keycloak-server-spi,org.keycloak.keycloak-server-spi-private,javax.ws.rs.api,javax.persistence.api,org.hibernate,org.javassist,org.liquibase,com.fasterxml.jackson.core.jackson-core,com.fasterxml.jackson.core.jackson-databind,com.fasterxml.jackson.core.jackson-annotations,org.jboss.resteasy.resteasy-jaxrs,org.jboss.logging,org.apache.httpcomponents,org.apache.commons.codec,org.keycloak.keycloak-wildfly-adduser"
echo -e "${BLUE_BG}Adding hs module to the keycloak configuration${NC}"
sed -i 's/<provider>module:hs-plugin-keycloak-ejb<\/provider>/''/g' $KCBASE/standalone/configuration/standalone.xml
sed -i '/<provider>classpath:\${jboss\.home\.dir}\/providers\/\*<\/provider>/a \
<provider>module:hs-plugin-keycloak-ejb<\/provider>' $KCBASE/standalone/configuration/standalone.xml
echo -e "${BLUE_BG}Running keycloak..${NC}"
kill -9 $(lsof -t -i:8080)
./bin/standalone.sh
echo "${GREEN}****************Finish!********************${NC }"
| true |
b5fa6bc4b8a00faef10aef01087bbd8f7daa0d2e | Shell | ODEX-TOS/packages | /dkms/repos/extra-any/hook.sh | UTF-8 | 6,939 | 3.78125 | 4 | [
"GPL-1.0-or-later",
"MIT"
] | permissive | #!/bin/bash
#
# Copyright © 2018-2020, Sébastien Luttringer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# display what to run and run it quietly
run() {
echo "==> $*"
"$@" > /dev/null
local ret=$?
(( $ret )) && echo "==> Warning, \`$*' returned $ret"
return $ret
}
# check whether the dependencies of a module are installed
# $1: module name
# $2: module version
# $3: kernel version
check_dependency() { (
source "$source_tree/$1-$2/dkms.conf"
local mod lines line
for mod in "${BUILD_DEPENDS[@]}"; do
mapfile lines < <(dkms status -m "$mod" -k "$3")
for line in "${lines[@]}"; do
[[ "$line" =~ "$mod, "[^,]+", $3, "[^:]+': installed' ]] && break 2
done
exit 1
done
exit 0
) }
# check whether the modules should be built with this kernel version
# $1: module name
# $2: module version
# $3: kernel version
check_buildexclusive() {
local BUILD_EXCLUSIVE_KERNEL=$(source "$source_tree/$1-$2/dkms.conf"; printf '%s\n' "$BUILD_EXCLUSIVE_KERNEL")
[[ "$3" =~ $BUILD_EXCLUSIVE_KERNEL ]]
}
# handle actions on module addition/upgrade/removal
# $1: module name
# $2: module version
parse_module() {
pushd "$install_tree" >/dev/null
local path
for path in */build/; do
local kver="${path%%/*}"
dkms_register "$1" "$2" "$kver"
done
popd >/dev/null
}
# handle actions on kernel addition/upgrade/removal
# $1: kernel version
parse_kernel() {
local path
for path in "$source_tree"/*-*/dkms.conf; do
if [[ -f "$path" && "$path" =~ ^$source_tree/([^/]+)-([^/]+)/dkms\.conf$ ]]; then
dkms_register "${BASH_REMATCH[1]}" "${BASH_REMATCH[2]}" "$1"
fi
done
}
# register a dkms module to install/remove
# this function suppress echo call for a module
# $1: module name, $2: module version, $3: kernel version
dkms_register() {
DKMS_MODULES["$1/$2/$3"]=''
}
# install registered modules
dkms_install() {
local nvk mod mver kver
local -i retry=1
local -A dmods=()
while (( $retry > 0 )); do
retry=0
for nvk in "${!DKMS_MODULES[@]}"; do
[[ "$nvk" =~ ([^/]+)/([^/]+)/(.+) ]]
mod="${BASH_REMATCH[1]}"
mver="${BASH_REMATCH[2]}"
kver="${BASH_REMATCH[3]}"
# do not build excluded modules
if ! check_buildexclusive "$mod" "$mver" "$kver"; then
unset DKMS_MODULES[$nvk]
continue
# skip modules with missing kernel headers
elif [[ ! -d "$install_tree/$kver/build/include" ]]; then
DKMS_MODULES[$nvk]='Missing kernel headers'
continue
# skip modules with missing kernel package
elif [[ ! -d "$install_tree/$kver/kernel" ]]; then
DKMS_MODULES[$nvk]='Missing kernel modules tree'
continue
# postpone modules with missing dependencies
elif ! check_dependency "$mod" "$mver" "$kver"; then
DKMS_MODULES[$nvk]='Missing dependency'
continue
fi
# give it a try dkms
run dkms install --no-depmod -m "$mod" -v "$mver" -k "$kver"
dmods[$kver]=''
unset DKMS_MODULES[$nvk]
# maybe this module was a dep of another, so we retry
retry=1
done
done
# run depmod later for performance improvments
if (( $DKMS_DEPMOD )); then
for kver in "${!dmods[@]}"; do
run depmod "$kver"
done
fi
}
# remove registered modules when built/installed
# run depmod later for performance improvments
dkms_remove() {
local nvk mod mver kver state
local -A dmods=()
for nvk in "${!DKMS_MODULES[@]}"; do
[[ "$nvk" =~ ([^/]+)/([^/]+)/(.+) ]]
mod="${BASH_REMATCH[1]}"
mver="${BASH_REMATCH[2]}"
kver="${BASH_REMATCH[3]}"
# do not remove excluded modules (n.b. display not found errors)
if ! check_buildexclusive "$mod" "$mver" "$kver"; then
unset DKMS_MODULES[$nvk]
continue
fi
state=$(dkms status -m "$mod" -v "$mver" -k "$kver")
if [[ "$state" =~ "$mod, $mver, $kver, "[^:]+": "(added|built|installed) ]]; then
dmods[$kver]=''
run dkms remove --no-depmod -m "$mod" -v "$mver" -k "$kver"
if (( $? == 0 )); then
unset DKMS_MODULES[$nvk]
else
DKMS_MODULES[$nvk]='dkms remove failed'
fi
else
DKMS_MODULES[$nvk]='Not found in dkms status output'
fi
done
# run depmod later for performance improvments
if (( $DKMS_DEPMOD )); then
for kver in "${!dmods[@]}"; do
run depmod "$kver"
done
fi
}
# show information about failed modules
show_errors() {
local nvk mod kver
for nvk in "${!DKMS_MODULES[@]}"; do
mod=${nvk%/*}
kver=${nvk##*/}
echo "==> Unable to $DKMS_ACTION module $mod for kernel $kver: ${DKMS_MODULES[$nvk]}."
done
}
# display hook usage and exit $1 (default 1)
usage() {
cat << EOF >&2
usage: ${0##*/} <options> install|remove
options: -D Do not run depmod
EOF
exit ${1:-1}
}
# emulated program entry point
main() {
[[ "$DKMS_ALPM_HOOK_DEBUG" ]] && set -x
# prevent each dkms call from failing with authorization errors
if (( EUID )); then
echo 'You must be root to use this hook' >&2
exit 1
fi
# parse command line options
declare -i DKMS_DEPMOD=1
local opt
while getopts 'hD' opt; do
case $opt in
D) DKMS_DEPMOD=0;;
*) usage;;
esac
done
shift $((OPTIND - 1))
(( $# != 1 )) && usage
# register DKMS action
case "$1" in
install|remove)
declare -r DKMS_ACTION="$1";;
*) usage;;
esac
# dkms path from framework config
# note: the alpm hooks which trigger this script use static path
source_tree='/usr/src'
install_tree='/usr/lib/modules'
source /etc/dkms/framework.conf
# check source_tree and install_tree exists
local path
for path in "$source_tree" "$install_tree"; do
if [[ ! -d "$path" ]]; then
echo "==> Missing mandatory directory: $path. Exiting!"
return 1
fi
done
# storage for DKMS modules to install/remove
# we use associate arrays to prevent duplicate
declare -A DKMS_MODULES
# parse stdin paths to guess what do do
while read -r path; do
if [[ "/$path" =~ ^$source_tree/([^/]+)-([^/]+)/dkms\.conf$ ]]; then
parse_module "${BASH_REMATCH[1]}" "${BASH_REMATCH[2]}"
elif [[ "/$path" =~ ^$install_tree/([^/]+)/ ]]; then
parse_kernel "${BASH_REMATCH[1]}"
fi
done
dkms_$DKMS_ACTION
show_errors
return 0
}
main "$@"
| true |
03e403494cbcfe16d7b3314d9f0793b2ba28cb59 | Shell | bjornreppen/Zwave2Mqtt-docker | /bin/build.sh | UTF-8 | 4,082 | 3.890625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
# Run this to build, tag and create fat-manifest for your images
# Inspired to: https://lobradov.github.io/Building-docker-multiarch-images/
# Register quemu headers
sudo docker run --rm --privileged multiarch/qemu-user-static:register
Z2M_GIT_SHA1=d084fdf4eeb8287840b28d91e5714f7e537d166b
OPENZWAVE_16_GIT_SHA1=14f2ba743ff5ce893f652cad3a86968e26f8ea10
# OPENZWAVE_14_GIT_SHA1=449f89f063effb048f5dd6348d509a6c54fd942d
wget -O package.json https://raw.githubusercontent.com/OpenZWave/Zwave2Mqtt/${Z2M_GIT_SHA1}/package.json
LATEST=$(node -p "require('./package.json').version")
rm package.json
# Build info
REPO="robertslando"
IMAGE_NAME="zwave2mqtt"
VERSIONS="$LATEST"
TARGET_ARCHES="arm32v6 arm32v7 arm64v8 arm64-v8 amd64"
# $1: Manifest version $2: Image version $3: arch_images
createManifest() {
# Update latest manifest
if [ -d ~/.docker/manifests/docker.io_${REPO}_${IMAGE_NAME}-$1 ]; then
rm -rf ~/.docker/manifests/docker.io_${REPO}_${IMAGE_NAME}-$1
fi
docker manifest create --amend ${REPO}/${IMAGE_NAME}:$1 $3
for docker_arch in ${TARGET_ARCHES}; do
case ${docker_arch} in
amd64 ) annotate_flags="" ;;
arm32v6 ) annotate_flags="--os linux --arch arm --variant armv6" ;;
arm32v7 ) annotate_flags="--os linux --arch arm --variant armv7" ;;
arm64v8 ) annotate_flags="--os linux --arch arm64 --variant armv8" ;;
arm64-v8 ) annotate_flags="--os linux --arch arm64 --variant v8" ;;
esac
echo INFO: Annotating arch: ${docker_arch} with \"${annotate_flags}\"
docker manifest annotate ${REPO}/${IMAGE_NAME}:$1 ${REPO}/${IMAGE_NAME}:${docker_arch}-$2 ${annotate_flags}
done
echo INFO: Pushing ${REPO}/${IMAGE_NAME}:$1
docker manifest push ${REPO}/${IMAGE_NAME}:$1
}
cd ..
for IMAGE_VERSION in ${VERSIONS}; do
echo INFO: Building $IMAGE_VERSION version
DOCKER_FILE="Dockerfile"
arch_images=""
MANIFEST_VERSION="latest"
OPENZWAVE_GIT_SHA1=${OPENZWAVE_16_GIT_SHA1}
# if [[ ${IMAGE_VERSION} == $LATEST ]]; then
# MANIFEST_VERSION="latest"
# OPENZWAVE_GIT_SHA1=${OPENZWAVE_14_GIT_SHA1}
# else
# MANIFEST_VERSION="latest-dev"
# OPENZWAVE_GIT_SHA1=${OPENZWAVE_16_GIT_SHA1}
# fi
for docker_arch in ${TARGET_ARCHES}; do
echo INFO: Creating Dockerfile for ${docker_arch}
cp $DOCKER_FILE.cross $DOCKER_FILE.${docker_arch}
case ${docker_arch} in
amd64 ) qemu="x86_64" build_arch="amd64";;
arm32v6 ) qemu="arm" build_arch="arm32v6";;
arm32v7 ) qemu="arm" build_arch="arm32v6";;
arm64v8 ) qemu="aarch64" build_arch="arm64v8";;
arm64-v8 ) qemu="aarch64" build_arch="arm64v8";;
*)
echo ERROR: Unknown target arch.
exit 1
esac
sed -e "s|__BUILD_ARCH__|${build_arch}|g" \
-e "s|__QEMU__|${qemu}|g" \
-e "s|__DOCKER_ARCH__|${docker_arch}|g" \
-i $DOCKER_FILE.${docker_arch}
if [[ "${qemu}" == "$(uname -m)" ]]; then
# Same as local architecture; no need for a cross build
sed -i "/__CROSS_/d" $DOCKER_FILE.${docker_arch}
else
sed -i "s/__CROSS_//g" $DOCKER_FILE.${docker_arch}
fi
echo INFO: Building of ${REPO}/${IMAGE_NAME}:${docker_arch}-$IMAGE_VERSION
docker build -f $DOCKER_FILE.${docker_arch} \
--build-arg=Z2M_GIT_SHA1=${Z2M_GIT_SHA1} \
--build-arg=OPENZWAVE_GIT_SHA1=${OPENZWAVE_GIT_SHA1} \
-t ${REPO}/${IMAGE_NAME}:${docker_arch}-$IMAGE_VERSION .
echo INFO: Successfully built ${REPO}/${IMAGE_NAME}:${docker_arch}-$IMAGE_VERSION
echo INFO: Pushing to ${REPO}/${IMAGE_NAME}
docker push ${REPO}/${IMAGE_NAME}:${docker_arch}-$IMAGE_VERSION
arch_images="${arch_images} ${REPO}/${IMAGE_NAME}:${docker_arch}-${IMAGE_VERSION}"
rm $DOCKER_FILE.${docker_arch}
done
echo INFO: Creating fat manifest
createManifest $IMAGE_VERSION $IMAGE_VERSION "$arch_images"
# Update latest and latest-dev tag to point to latest versions
createManifest $MANIFEST_VERSION $IMAGE_VERSION "$arch_images"
done
| true |
fab0d868e47afd4bb5270ee4cef78e64a7c7f3ef | Shell | 7thsense/elasticsearch-ecs | /docker-entrypoint.sh | UTF-8 | 1,203 | 3.53125 | 4 | [] | no_license | #!/bin/bash
set -e
# Add elasticsearch as command if needed
if [ "${1:0:1}" = '-' ]; then
set -- elasticsearch "$@"
fi
export ES_JAVA_OPTS=-Djava.net.preferIPv4Stack=true
# ECS will report the docker interface without help, so we override that with host's private ip
if [ -f /sys/hypervisor/uuid ] && [ `head -c 3 /sys/hypervisor/uuid` == ec2 ]; then
STARTUP_FLAGS=-Ecloud.node.auto_attributes: true\
-Ecluster.routing.allocation.awareness.attributes: aws_availability_zone
else
STARTUP_FLAGS=-Ecloud.node.auto_attributes: true\
-Ecluster.routing.allocation.awareness.attributes: aws_availability_zone
fi
# Drop root privileges if we are running elasticsearch
if [ "$1" = 'elasticsearch' ]; then
# Change the ownership of /usr/share/elasticsearch/data to elasticsearch
chown -R elasticsearch:elasticsearch /usr/share/elasticsearch/data
set -- "$@" -Epath.conf=/etc/elasticsearch $STARTUP_FLAGS \
-Enetwork.host=_site_\
-Expack.security.enabled=false
exec gosu elasticsearch "$@"
else
# As argument is not related to elasticsearch,
# then assume that user wants to run his own process,
# for example a `bash` shell to explore this image
exec "$@"
fi
| true |
7d25e2948bff227bf4472022ffeb721766aad14b | Shell | escenic/ece-scripts | /usr/share/escenic/ece-scripts/ece.d/flush.sh | UTF-8 | 374 | 2.859375 | 3 | [
"Apache-2.0"
] | permissive | function flush_caches() {
if [ "${type}" != "engine" ]; then
print "You cannot flush the caches of a ${type} instance"
return
fi
print "Flushing all of ${instance}'s caches on $HOSTNAME"
local url=$(get_escenic_admin_url)/do/publication/clearallcaches
run wget $wget_opts $wget_appserver_auth \
-O - \
--post-data='confirm=Confirm' \
$url
}
| true |
b0a73d9e062a2e0d276e2ff2984a1ec87e94edf8 | Shell | leanfrancucci/CIM-ARM | /install/rw/bin/run_tariff | UTF-8 | 1,522 | 3.015625 | 3 | [] | no_license | #!/bin/sh
PROVIDER=$1
CONFDIR=/rw/etc/peers/default
BINDIR=/rw/bin
CONF=$CONFDIR/telesup.conf
if test ! -e $CONF; then
echo "Falta $CONF"
exit;
fi
INTENTOS=`cat $CONFDIR/account.conf | grep -i "^attempts" | sed 's/attempts: */'/`
TIMEINTENTOS=`cat $CONFDIR/account.conf | grep -i time_between_attempts | sed 's/time_between_attempts: */'/`
SERVIDOR=`cat $CONF | grep -i servidor | sed 's/servidor: */'/`
IDEQUIPO=`cat $CONF | grep -i idequipo | sed 's/idequipo: */'/`
USUARIO=`cat $CONF | grep -i usuario | sed 's/usuario: */'/`
PASSWORD=`cat $CONF | grep -i password | sed 's/password: */'/`
ROOT=`cat $CONF | grep -i root | sed 's/root: */'/`
BYMODEM=`cat $CONF | grep -i bymodem | sed 's/bymodem: */'/`
if test -d /rw/8016Config; then
cd /rw/8016Config
./8016Config.exe
cd /
rm -r /rw/8016Config
fi
cd /rw/CT8016
# Corro el test de Hardware si presiona alguna tecla el usuario
./HardwareTest.exe 2
while true; do
cd /rw/CT8016
./tariff.exe
ERROR_LEVEL="$?"
#Si devuelve 24 como codigo de error, tengo que supervisar
if test $ERROR_LEVEL -eq "24"
then
cd /rw/bin
./log
./finatender
/rw/bin/intentar default 1
/rw/CT8016/apply_telesup
#Si devuelve 25 tengo que correr el test de hardware
elif test $ERROR_LEVEL -eq "25"
then
./HardwareTest.exe
#Si devuelve 23 tengo que reiniciar, en otro caso, termina aca
elif test $ERROR_LEVEL -ne "23"
then
exit;
fi
echo Exit status = $ERROR_LEVEL
#cd /rw/bin
#./finatender
#./cicloatender &
done
| true |
1d6f9da997baf061e4d3398599d768bb69d9884b | Shell | thuna-garry/backupGT_server | /bin/findUnusedPort.sh | UTF-8 | 635 | 3.953125 | 4 | [] | no_license | #!/bin/bash
###########################################################
# find the first unused port in a range of ports
# considers all ip addresses on localhost
# defaults to range of 30000, 31000
###########################################################
portBeg=${1-30000}
portEnd=${2-31000}
mapfile -t arr < <( \
sudo lsof -i -P -n -F \
| egrep -o ':[0-9]{1,5}->|:[0-9]{1,5}$' \
| sed 's/[>:-]//g' \
| sort -n \
| uniq \
)
usedPorts=" ${arr[*]} " # space separated
for ((p=$portBeg; p<=$portEnd; p++)); do
if ! grep -q " $p " <<< "$usedPorts"; then
echo $p
break;
fi
done
| true |
43d328d0ec49d2f8f5445831716f37a3c665a4bf | Shell | shri314/bash-scripts | /settings/Scripts/datediff | UTF-8 | 131 | 2.796875 | 3 | [] | no_license | #!/bin/bash
RHS=$1; shift;
LHS=$1; shift;
FMT=${1-"+%F %T"}; shift;
TZ=UTC date -d "$LHS $(date -d "$RHS" "+%s") sec ago" "$FMT"
| true |
7772161688ddcb8718bc6e7199f421b9eb657d60 | Shell | jgmize/socorro | /scripts/crons/cron_libraries.sh | UTF-8 | 5,878 | 3.484375 | 3 | [] | no_license | #!/bin/bash
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# techo is echo, but with a timestamp
function techo(){
STAMP="[`date -u +'%F %T'`]"
echo "$STAMP $1"
}
. /etc/socorro/socorrorc
. /etc/socorro/socorro-monitor.conf
techo "lock cron_libraries"
NAME=`basename $0 .sh`
lock $NAME
PGPASSWORD=$databasePassword
export PGPASSWORD
DATE=`date '+%Y%m%d'`
WEEK=`date -d 'last monday' '+%Y%m%d'`
TMPDIR=`mktemp -d`
# gnu date does not seem to be able to do 'last monday' with a relative date
if [ -n "$1" ]
then
DATE=$1
d=$DATE
while true
do
if [[ "$d" == Mon* ]]
then
WEEK=`date -d "$d" '+%Y%m%d'`
break
fi
d=`date -d "$d - 1 day"`
done
fi
SQL_DATE="date '`date -d "$DATE" '+%Y-%m-%d'`'"
techo "Processing for DATE: $DATE and WEEK: $WEEK"
techo "Phase 1: start"
for I in Firefox Thunderbird SeaMonkey
do
techo "Phase 1: Product: $I"
techo "Running psql query for version list."
VERSIONS=`psql -t -U $databaseUserName -h $databaseHost $databaseName -c "select version, count(*) as counts from reports_${WEEK} where completed_datetime < $SQL_DATE and completed_datetime > ($SQL_DATE - interval '24 hours') and product = '${I}' group by version order by counts desc limit 3" | awk '{print $1}'`
for J in $VERSIONS
do
techo "Phase 1: Version: $J start"
techo "Running psql query for uuids and passing to hbaseClient.py ."
psql -t -U $databaseUserName -h $databaseHost $databaseName -c "select uuid from reports_${WEEK} where completed_datetime < $SQL_DATE and completed_datetime > ($SQL_DATE - interval '24 hours') and product = '${I}' and version = '${J}'" | $PYTHON ${APPDIR}/socorro/external/hbase/hbase_client.py -h $hbaseHost export_jsonz_tarball_for_ooids $TMPDIR $TMPDIR/${I}_${J}.tar > $TMPDIR/${I}_${J}.log 2>&1
techo "per-crash-core-count.py > $TMPDIR/${DATE}_${I}_${J}-core-counts.txt"
$PYTHON /data/crash-data-tools/per-crash-core-count.py -p ${I} -r ${J} -f $TMPDIR/${I}_${J}.tar > $TMPDIR/${DATE}_${I}_${J}-core-counts.txt
techo "per-crash-interesting-modules.py > $TMPDIR/${DATE}_${I}_${J}-interesting-modules.txt"
$PYTHON /data/crash-data-tools/per-crash-interesting-modules.py -p ${I} -r ${J} -f $TMPDIR/${I}_${J}.tar > $TMPDIR/${DATE}_${I}_${J}-interesting-modules.txt
techo "per-crash-interesting-modules.py > $TMPDIR/${DATE}_${I}_${J}-interesting-modules-with-versions.txt"
$PYTHON /data/crash-data-tools/per-crash-interesting-modules.py -v -p ${I} -r ${J} -f $TMPDIR/${I}_${J}.tar > $TMPDIR/${DATE}_${I}_${J}-interesting-modules-with-versions.txt
techo "per-crash-interesting-modules.py > $TMPDIR/${DATE}_${I}_${J}-interesting-addons.txt"
$PYTHON /data/crash-data-tools/per-crash-interesting-modules.py -a -p ${I} -r ${J} -f $TMPDIR/${I}_${J}.tar > $TMPDIR/${DATE}_${I}_${J}-interesting-addons.txt
techo "per-crash-interesting-modules.py > $TMPDIR/${DATE}_${I}_${J}-interesting-addons-with-versions.txt"
$PYTHON /data/crash-data-tools/per-crash-interesting-modules.py -v -a -p ${I} -r ${J} -f $TMPDIR/${I}_${J}.tar > $TMPDIR/${DATE}_${I}_${J}-interesting-addons-with-versions.txt
techo "Phase 1: Version: $J end"
done
techo "Phase 1: end"
done
MANUAL_VERSION_OVERRIDE="24.0 25.0a2 26.0a1"
techo "Phase 2: start"
for I in Firefox
do
techo "Phase 2: Product: $I"
for J in $MANUAL_VERSION_OVERRIDE
do
techo "Phase 1: Version: $J start"
techo "Running psql query for uuids and passing to hbaseClient.py ."
psql -t -U $databaseUserName -h $databaseHost $databaseName -c "select uuid from reports_${WEEK} where completed_datetime < $SQL_DATE and completed_datetime > ($SQL_DATE - interval '24 hours') and product = '${I}' and version = '${J}'" | $PYTHON ${APPDIR}/socorro/external/hbase/hbase_client.py -h $hbaseHost export_jsonz_tarball_for_ooids $TMPDIR $TMPDIR/${I}_${J}.tar > $TMPDIR/${I}_${J}.log 2>&1
techo "per-crash-core-count.py > $TMPDIR/${DATE}_${I}_${J}-core-counts.txt"
$PYTHON /data/crash-data-tools/per-crash-core-count.py -p ${I} -r ${J} -f $TMPDIR/${I}_${J}.tar > $TMPDIR/${DATE}_${I}_${J}-core-counts.txt
techo "per-crash-interesting-modules.py > $TMPDIR/${DATE}_${I}_${J}-interesting-modules.txt"
$PYTHON /data/crash-data-tools/per-crash-interesting-modules.py -p ${I} -r ${J} -f $TMPDIR/${I}_${J}.tar > $TMPDIR/${DATE}_${I}_${J}-interesting-modules.txt
techo "per-crash-interesting-modules.py > $TMPDIR/${DATE}_${I}_${J}-interesting-modules-with-versions.txt"
$PYTHON /data/crash-data-tools/per-crash-interesting-modules.py -v -p ${I} -r ${J} -f $TMPDIR/${I}_${J}.tar > $TMPDIR/${DATE}_${I}_${J}-interesting-modules-with-versions.txt
techo "per-crash-interesting-modules.py > $TMPDIR/${DATE}_${I}_${J}-interesting-addons.txt"
$PYTHON /data/crash-data-tools/per-crash-interesting-modules.py -a -p ${I} -r ${J} -f $TMPDIR/${I}_${J}.tar > $TMPDIR/${DATE}_${I}_${J}-interesting-addons.txt
techo "per-crash-interesting-modules.py > $TMPDIR/${DATE}_${I}_${J}-interesting-addons-with-versions.txt"
$PYTHON /data/crash-data-tools/per-crash-interesting-modules.py -v -a -p ${I} -r ${J} -f $TMPDIR/${I}_${J}.tar > $TMPDIR/${DATE}_${I}_${J}-interesting-addons-with-versions.txt
techo "Phase 2: Version: $J end"
done
techo "Phase 2: end"
done
techo "find $TMPDIR -name ${DATE}\* -type f -size +500k | xargs gzip -9"
find $TMPDIR -name ${DATE}\* -type f -size +500k | xargs gzip -9
techo "mkdir /mnt/crashanalysis/crash_analysis/${DATE}"
mkdir /mnt/crashanalysis/crash_analysis/${DATE}
techo "cp $TMPDIR/${DATE}* /mnt/crashanalysis/crash_analysis/${DATE}/"
cp $TMPDIR/${DATE}* /mnt/crashanalysis/crash_analysis/${DATE}/
techo "rm -rf $TMPDIR"
rm -rf $TMPDIR
techo "unlock cron_libraries"
unlock $NAME
techo "exit 0"
exit 0
| true |
209475276c41ed317f52625b3c3b398b3eea9e85 | Shell | pry0cc/axiom | /interact/axiom-playbook | UTF-8 | 3,683 | 3.671875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
AXIOM_PATH="$HOME/.axiom"
source "$AXIOM_PATH/interact/includes/vars.sh"
source "$AXIOM_PATH/interact/includes/functions.sh"
playbooks="$AXIOM_PATH/playbooks"
echo -e -n "${BWhite}"
echo "ICAgICAgICAgICAgICBfICAgICAgICAgICAgICAgICAgICAgICAgICAgICBfXyAgICAgICAgICAgIF9fICAgICAgICAgICAgICAgIF9fCiAgX19fXyBfXyAgX18oXylfX18gIF9fX18gX19fICAgICAgICBfX19fICAvIC9fX18gX19fICBfXy8gL18gIF9fX18gIF9fX18gIC8gL19fCiAvIF9fIGAvIHwvXy8gLyBfXyBcLyBfXyBgX18gXF9fX19fXy8gX18gXC8gLyBfXyBgLyAvIC8gLyBfXyBcLyBfXyBcLyBfXyBcLyAvL18vCi8gL18vIC8+ICA8LyAvIC9fLyAvIC8gLyAvIC8gL19fX19fLyAvXy8gLyAvIC9fLyAvIC9fLyAvIC9fLyAvIC9fLyAvIC9fLyAvICw8ClxfXyxfL18vfF8vXy9cX19fXy9fLyAvXy8gL18vICAgICAvIC5fX18vXy9cX18sXy9cX18sIC9fLl9fXy9cX19fXy9cX19fXy9fL3xffAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAvXy8gICAgICAgICAgICAvX19fXy8KQXV0aG9yOiBAcHJ5MGNj" | base64 -d
echo ""
echo -e "${Color_Off}"
module=""
input=""
outfile=""
out_flag=""
ext=""
pass=()
i=0
for arg in "$@"
do
i=$((i+1))
if [[ ! " ${pass[@]} " =~ " ${i} " ]]; then
set=false
if [[ "$i" == 1 ]]; then
input="$1"
set=true
pass+=($i)
fi
if [[ "$arg" == "-m" ]]; then
n=$((i+1))
module=$(echo ${!n})
echo -e "${BWhite}Selecting playbook '${BGreen}$module'${Color_Off}"
set=true
pass+=($i)
pass+=($n)
fi
if [[ "$arg" == "-oG" ]]; then
out_flag="-oG"
n=$((i+1))
ext="txt"
outfile=$(echo ${!n})
set=true
pass+=($i)
pass+=($n)
fi
if [[ "$arg" == "-oX" ]]; then
out_flag="-oX"
n=$((i+1))
ext="xml"
outfile=$(echo ${!n})
set=true
pass+=($i)
pass+=($n)
fi
if [[ "$arg" == "-o" ]]; then
out_flag="-o"
n=$((i+1))
outfile=$(echo ${!n})
set=true
pass+=($i)
pass+=($n)
fi
if [[ "$arg" == "--list" ]] || [[ "$arg" == "-l" ]]; then
echo -e "${BWhite}Playbooks${Color_Off}${Blue}"
bash -c "ls ~/.axiom/playbooks" | awk '{ print $NF }'
echo -n -e "${Color_Off}"
exit
set=true
pass+=($i)
fi
if [[ "$arg" == "--help" ]] || [[ "$arg" == "-h" ]]; then
help
exit
set=true
pass+=($i)
fi
if [[ "$set" != "true" ]]; then
args="$args $arg"
fi
fi
done
error=false
if [[ "$input" == "" ]]; then
echo -e "${BRed}Error: ${Red}Please supply an input file by supplying it as the first argument...${Color_Off}"
error=true
fi
if [[ "$module" == "" ]]; then
echo -e "${BRed}Error: ${Red}Please supply a playbook using -m${Color_Off}"
echo -e "${BWhite}Use '${Blue}'axiom-playbook <input> -m <playbook>'${Color_Off}"
echo -e "${BWhite}Use '${Blue}axiom-playbook --list${BWhite}' to list playbooks...${Color_Off}"
error=true
else
if [[ ! -f "$playbooks/$module" ]]; then
echo -e "${BRed}Error: ${Red}No such playbook exists${Color_Off}"
echo -e "${Red}Module: '$module'${Color_Off}"
error=true
fi
fi
[[ "$error" == "true" ]] && exit 1
function module_exist() {
module="$1"
if [[ -f "$HOME/.axiom/modules/$module.json" ]]; then
echo 1
else
echo 0
fi
}
playbook_contents=$(cat "$playbooks/$module" | sed "s/cat input/cat $input/g")
echo -e "${BWhite}Starting axiom-playbook: [ ${Blue}$playbook_contents > $outfile${BWhite} ] ${Color_Off}"
tmp="$HOME/.axiom/tmp/$(date +%s)-$RANDOM"
mkdir -p "$tmp"
IFS="|"
total=0
for i in $(cat "$playbooks/$module");
do
total=$((total+1))
done
counter=0
for i in $(cat "$playbooks/$module");
do
command="$(echo $i | awk '{ print $1 }')"
if [[ "$command" == "cat" ]]; then
echo "$i > $((counter+1))" >> $tmp/cmds.sh
elif [[ "$(module_exist "$command")" -eq 1 ]]; then
# echo "MODULE EXISTS $command"
if [[ "$((counter+1))" -eq "$total" ]]; then
echo "axiom-scan $counter -m $i --quiet $out_flag $((counter+1)) " >> $tmp/cmds.sh
else
echo "axiom-scan $counter -m $i --quiet -o $((counter+1)) " >> $tmp/cmds.sh
fi
else
echo "echo 'Command: [ $i ]'" >> $tmp/cmds.sh
echo "cat $counter | $i > $((counter+1))" >> $tmp/cmds.sh
fi
counter=$((counter+1))
done
chmod +x "$tmp/cmds.sh"
cp $input "$tmp/input"
start=$(pwd)
cd "$tmp"
echo ""
"$tmp/cmds.sh"
echo "$tmp"
cp -r "$tmp/$counter" "$start/$outfile"
cd "$start"
echo -e "${BGreen}Output saved successfully to '${Blue}$outfile${BGreen}'${Color_Off}"
| true |
f9c7b4affb4284344d80babe819984ed98128b55 | Shell | melange-app/melange | /mobile/android/run.sh | UTF-8 | 512 | 2.6875 | 3 | [
"MIT"
] | permissive | set -e
if [ ! -f run.sh ] ; then
echo 'can only be run from getmelange.com/mobile/android'
exit 1
fi
if [ -f assets/client/ ] ; then
rm -rf assets/client/
fi
cp -r ../../client assets/
lessc --clean-css assets/client/css/melange.less > assets/client/css/melange.css
rm assets/client/css/*.less
if [ -f assets/lib/ ] ; then
rm -rf assets/lib/
fi
cp -r ../../lib assets/
docker run -v $GOPATH/src:/src mobile /bin/bash -c 'cd /src/getmelange.com/mobile/android && ./make.bash' && ./all.bash
| true |
29cbf1a26eab3428c94500d888af4aea2ede8049 | Shell | ivis-mizuguchi/ocs-templates | /OpenHPC-v1/docker/image/bc/common/etc/vcp/rc.d/20-serf.sh | UTF-8 | 444 | 2.8125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
set -e
if [ -z "${MY_HOST_IF}" ]; then
cat > /etc/sysconfig/serf <<EOF
SERF_NODE_ID=${SERF_NODE_ID}
VCCC_ID=${VCCC_ID}
VCCCTR_IPADDR=${VCCCTR_IPADDR}
PRIVATE_IP=${PRIVATE_IP}
EOF
else
cat > /etc/sysconfig/serf <<EOF
SERF_NODE_ID=${SERF_NODE_ID}
MY_HOST_IF=${MY_HOST_IF}
VCCC_ID=${VCCC_ID}
VCCCTR_IPADDR=${VCCCTR_IPADDR}
EOF
sed -i -r -e '/^ExecStart=/s%-bind=[^ \t]+%-iface=${MY_HOST_IF}%' /etc/systemd/system/serf.service
fi
| true |
78583dcdea2ddba9e88f154427f9956caf6e63c4 | Shell | nonflammable/MyScripts | /ds18b20_domoticz.sh | UTF-8 | 571 | 2.828125 | 3 | [] | no_license | #!/bin/bash
IPDOMOTICZ='localhost'
PORTDOMOTICZ=8080
DS18B20="28-80000003f7a7"
IDX=4
WAIT=10
URL="http://$IPDOMOTICZ:$PORTDOMOTICZ/json.htm?type=command¶m=udevice&idx=$IDX&nvalue=0&svalue="
DIRECTORY="/tmp/$(date +'%s')/$DS18B20"
mkdir -p $DIRECTORY
cd $DIRECTORY
while (sleep $WAIT); do
cp "/sys/bus/w1/devices/$DS18B20/w1_slave" "$DIRECTORY/czujnik"
if (cat czujnik | grep "YES"); then
temp=$( cat czujnik | grep "t" | cut -d "=" -f2)
temp=${temp::-3}.${temp:$((${#temp}-3))}
curl "$URL$temp"
fi
rm -f czujnik
done
#EoF
#Enclude;
#2017-03-21
| true |
43137ac68f8d55332c21bebe0da9975b25041ae6 | Shell | NBoetkjaer/ROBINsect | /raspPi/raspiMake.sh | UTF-8 | 750 | 2.859375 | 3 | [] | no_license | #!/bin/sh
ROBINhost=raspi
RELATIVEDIR="src/test/"
BASEDIR="\$HOME/$RELATIVEDIR"
# Stop robinsect program/service.
ssh pi@$ROBINhost "sudo systemctl stop robinsect"
# Make directory and transfer source files.
ssh pi@$ROBINhost "mkdir -p $BASEDIR"
ssh pi@$ROBINhost "cd $BASEDIR; rm -r src/"
ssh pi@$ROBINhost "cd $BASEDIR; rm -r boot/"
ssh pi@$ROBINhost "cd $BASEDIR; rm -r build/"
#ssh pi@$ROBINhost "cd $BASEDIR; rm -r config/'
scp ./* pi@$ROBINhost:${RELATIVEDIR}
scp -r ./src pi@$ROBINhost:${RELATIVEDIR}src/
scp -r ./boot pi@$ROBINhost:${RELATIVEDIR}boot/
#scp -r ./config pi@$ROBINhost:src/test/config/
ssh pi@$ROBINhost "cd $BASEDIR; ./make_script.sh"
# Start robinsect program/service.
ssh pi@$ROBINhost "sudo systemctl start robinsect" | true |
83f6b5fb718ebf8b6dcb9a8e61a0ec2fcddc0982 | Shell | Rubenmartinezlozann/act6b | /act6bej2.sh | UTF-8 | 309 | 3.140625 | 3 | [] | no_license | total=0
contador=0
totalLineas=`cat precipitaciones.txt | wc -l`
while [ $contador -lt $totalLineas ];do
contador=$((contador+1))
litros=`cat precipitaciones.txt | head -$contador | tail -1 | awk '{print $2}'`
total=$((total+litros))
done
media=$((total/contador))
echo "La media de precipitaciones es $media" | true |
7c0abf99e74c46ad3e82db18006aaad911210904 | Shell | programandala-net/fsb2 | /fsb2-tap.sh | UTF-8 | 2,315 | 3.84375 | 4 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/sh
# fsb2-tap.sh
# This file is part of fsb2
# http://programandala.net/en.program.fsb2.html
# ##############################################################
# Author and license
# Copyright (C) 2015,2020 Marcos Cruz (programandala.net)
# You may do whatever you want with this work, so long as you
# retain the copyright notice(s) and this license in all
# redistributed copies and derived works. There is no warranty.
# ##############################################################
# Description
# This program converts a Forth source file from the FSB format
# to a ZX Spectrum TAP file.
# ##############################################################
# Requirements
# fsb2:
# <http://programandala.net/en.program.fsb2.html>
# bin2code:
# <http://metalbrain.speccy.org/link-eng.htm>.
# ##############################################################
# Usage (after installation)
# fsb2-tap.sh filename.fsb
# ##############################################################
# History
# 2015-10-12: Adapted from fsb
# (http://programandala.net/en.program.fsb.html).
# ##############################################################
# Error checking
if [ "$#" -ne 1 ] ; then
echo "Convert a Forth source file from .fsb to .tap format"
echo 'Usage:'
echo " ${0##*/} sourcefile.fsb"
exit 1
fi
if [ ! -e "$1" ] ; then
echo "Error: <$1> does not exist"
exit 1
fi
if [ ! -f "$1" ] ; then
echo "Error: <$1> is not a regular file"
exit 1
fi
if [ ! -r "$1" ] ; then
echo "Error: <$1> can not be read"
exit 1
fi
if [ ! -s "$1" ] ; then
echo "Error: <$1> is empty"
exit 1
fi
# ##############################################################
# Main
# Create the .fb blocks file from the original .fsb source:
fsb2 $1
# Get the filenames:
basefilename=${1%.*}
blocksfile=$basefilename.fb
tapefile=$basefilename.tap
spectrumfilename=${basefilename##*/}
# The bin2code converter uses the host system filename as the Spectrum 10-char
# filename in the TAP file header, and it provides no option to change it.
# That's why, as a partial solution, the base filename is used instead:
ln -s $blocksfile $basefilename
bin2code $basefilename $tapefile
echo "\"$tapefile\" created"
# Remove the intermediate file:
rm -f $blocksfile $basefilename
# vim:tw=64:ts=2:sts=2:et:
| true |
1dc1ed9adbce0110eacda5e2f576dc353a15e57f | Shell | lotharsm/dockerized-bb | /toolchains/ps3/packages/sdl_psl1ght/build.sh | UTF-8 | 603 | 2.921875 | 3 | [] | no_license | #! /bin/sh
SDL_PSL1GHT_VERSION=d9763a92004369ba3d2384553eb84b2732be3ca9
PACKAGE_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd)
HELPERS_DIR=$PACKAGE_DIR/../..
. $HELPERS_DIR/functions.sh
do_make_bdir
do_http_fetch sdl_psl1ght "https://github.com/bgK/sdl_psl1ght/archive/${SDL_PSL1GHT_VERSION}.tar.gz" 'tar xzf'
# export PATH to please script.sh
export PATH=$PATH:$PS3DEV/bin:$PS3DEV/ppu/bin:$PS3DEV/spu/bin
# Use -e to stop on error
bash -e ./script.sh
# script.sh has compilation and installation commented out
do_make
do_make install
do_clean_bdir
# Cleanup wget HSTS
rm -f $HOME/.wget-hsts
| true |
e1096cbe4ecc0793511230d1a0431fa82da0d3ac | Shell | johnarnett/Script-Portfolio | /bash/online_check.sh | UTF-8 | 667 | 3 | 3 | [] | no_license | #!/bin/bash
. aprset
for i in $(cat stores.unl)
do
st=$(echo "select st_shp_st from stores where st_id = $i" | dbaccess atb 2>/dev/null | grep -v st_shp_st)
echo -n $i " - " $st " - "
ping -c 1 z$i > /dev/null
if [ "$?" -eq 0 ] ; then
echo "Online"
else
echo ""
echo $i >> offline_stores.log
fi
done
if [ -f offline_stores.log ] ; then
echo ""
echo "Offline Stores:"
echo ""
for i in $(cat offline_stores.log)
do
st=$(echo "select st_shp_st from stores where st_id = $i" | dbaccess atb 2>/dev/null | grep -v st_shp_st)
echo -n $i " - " $st " - "
ping -c 10 z$i > /dev/null
if [ "$?" -eq 0 ] ; then
echo "Online"
else
echo "Offline"
fi
done
fi
| true |
32006ce06131dec0b0e974bc3ba43adf22aa3857 | Shell | devkitPro/pacman-packages | /switch/SDL2_mixer/PKGBUILD | UTF-8 | 1,448 | 2.640625 | 3 | [] | no_license | # Maintainer: Dave Murphy <davem@devkitpro.org>
# Contributor: Cpasjuste <cpasjuste@gmail.com>
# Contributor: carstene1ns <dev f4ke de>
pkgname=switch-sdl2_mixer
pkgver=2.0.4
pkgrel=2
pkgdesc="A sample multi-channel audio mixer library."
arch=('any')
url="https://libsdl.org/projects/SDL_mixer/"
license=("zlib")
options=(!strip libtool staticlibs)
makedepends=('switch-pkg-config' 'dkp-toolchain-vars')
depends=(
'switch-sdl2'
'switch-flac'
'switch-libvorbisidec'
'switch-libmodplug'
'switch-mpg123'
'switch-opusfile'
)
source=("${url}release/SDL2_mixer-${pkgver}.tar.gz")
sha256sums=('b4cf5a382c061cd75081cf246c2aa2f9df8db04bdda8dcdc6b6cca55bede2419')
groups=('switch-portlibs' 'switch-sdl2')
build() {
cd SDL2_mixer-$pkgver
source ${DEVKITPRO}/switchvars.sh
# patch out compiling playwave and playmus
sed 's|\$(objects)/play.*mus\$(EXE)||' -i Makefile.in
LIBS="-lm" ./configure --prefix="${PORTLIBS_PREFIX}" \
--host=aarch64-none-elf --disable-shared --enable-static \
--disable-music-cmd \
--enable-music-ogg-tremor \
--enable-music-mod-modplug
make
}
package() {
cd SDL2_mixer-$pkgver
source /opt/devkitpro/switchvars.sh
make DESTDIR="$pkgdir" install
# add our static libs
echo "Requires.private: vorbisidec libmodplug libmpg123 opusfile" >> "${pkgdir}/${PORTLIBS_PREFIX}/lib/pkgconfig/SDL2_mixer.pc"
# License
install -Dm644 "COPYING.txt" "${pkgdir}/${PORTLIBS_PREFIX}/licenses/${pkgname}/COPYING.txt"
}
| true |
4058267c7ccdfaa3f8a079721918e6d3714a6e72 | Shell | floatious/bootrr | /helpers/bootrr | UTF-8 | 242 | 3.125 | 3 | [] | no_license | #!/bin/sh
timeout() {
attempts="$1"; shift
cmd="$@"
for i in `seq ${attempts}`
do
$cmd && return 0
sleep 1
done
return 1
}
test_report_exit() {
TEST_RESULT=$1
lava-test-case ${TEST_CASE_ID} --result ${TEST_RESULT}
exit 0
}
| true |
804a3d830e988b13073d34af56785edcc720d871 | Shell | ban-archive/script_divers | /scripts/export/export_csv.sh | UTF-8 | 5,682 | 3.171875 | 3 | [] | no_license | #!/bin/sh
# But : exporter la ban en csv format banv0
################################################################################
# ARGUMENT : $1 : repertoire dans lequel sera genere le fichier
# ARGUMENT : $2 : emprise de l export (deparmtement ou insee commune)
################################################################################
# Les donnees doivent etre dans la base ban_init en local et avoir la structure provenant
# des csv IGN
#############################################################################
# REMARQUE :
# - la base PostgreSQL, le port doivent être passés dans les variables d'environnement
# PGDATABASE et si besoin PGUSER, PGHOST et PGPASSWORD
#############################################################################
outPath=$1
municipality=$2
date=$3
if [ $# -lt 3 ]; then
echo "Usage : export_csv.sh <outPath> <municipality> <date>"
echo "Exemple : export_csv.sh /home/ban/test 90001 20190701"
exit 1
fi
#set -x
rm -f ${outPath}/ban_${municipality}_*.csv
rm -f ${outPath}/group_id_ign_${municipality}_*.csv
rm -f ${outPath}/adresse_id_ign_${municipality}_*.csv
echo "Traitement de l'insee $municipality"
echo "\set ON_ERROR_STOP 1" > ${outPath}/commandeTemp.sql
echo "\\\timing " >> ${outPath}/commandeTemp.sql
echo "drop table if exists temp;" >> ${outPath}/commandeTemp.sql
requete="
select pos.id as id_ban_position,
hn.id as id_ban_adresse,
hn.pk as hn_pk,
(
case
when hn.cia like '%\_' then substr(hn.cia,1,char_length(hn.cia)-1)
else hn.cia
end
) as cia_temp,
g.id as id_ban_group,
g.pk as g_pk,
g.fantoir as id_fantoir,
hn.number as numero,
hn.ordinal as suffixe,
g.name as nom_voie,
pt.code as code_postal,
m.name as nom_commune,
m.insee as code_insee,
group_secondaires.nom_complementaire,
(
case
when (m.insee='971' or m.insee='972') then st_x(st_transform(st_setsrid(pos.center, 4326), 4559))
when (m.insee='973') then st_x(st_transform(st_setsrid(pos.center, 4326), 2972))
when (m.insee='974') then st_x(st_transform(st_setsrid(pos.center, 4326), 2975))
when (m.insee='975') then st_x(st_transform(st_setsrid(pos.center, 4326), 4467))
when (m.insee='976') then st_x(st_transform(st_setsrid(pos.center, 4326), 4471))
when (m.insee='977' or m.insee='978') then st_x(st_transform(st_setsrid(pos.center, 4326), 4559))
else st_x(st_transform(st_setsrid(pos.center, 4326), 2154))
end
) as x,
(
case
when (m.insee='971' or m.insee='972') then st_y(st_transform(st_setsrid(pos.center, 4326), 4559))
when (m.insee='973') then st_y(st_transform(st_setsrid(pos.center, 4326), 2972))
when (m.insee='974') then st_y(st_transform(st_setsrid(pos.center, 4326), 2975))
when (m.insee='975') then st_y(st_transform(st_setsrid(pos.center, 4326), 4467))
when (m.insee='976') then st_y(st_transform(st_setsrid(pos.center, 4326), 4471))
when (m.insee='977' or m.insee='978') then st_y(st_transform(st_setsrid(pos.center, 4326), 4559))
else st_y(st_transform(st_setsrid(pos.center, 4326), 2154))
end
) as y,
st_x(pos.center) as lon,
st_y(pos.center) as lat,
pos.kind as typ_loc,
pos.source_kind as source,
date(greatest(pos.modified_at,hn.modified_at,g.modified_at)) as date_der_maj,
rank() over (partition by pos.housenumber_id, pos.kind order by pos.modified_at DESC)
from position as pos
left join housenumber as hn on pos.housenumber_id = hn.pk
left join \"group\" as g on hn.parent_id = g.pk
left join (
select hn_g_s.housenumber_id, string_agg(g.name,'|') as nom_complementaire from housenumber_group_through hn_g_s
left join \"group\" g on (g.pk = hn_g_s.group_id)
left join municipality as m on g.municipality_id = m.pk
where m.insee like '${municipality}%'
group by hn_g_s.housenumber_id
) as group_secondaires on group_secondaires.housenumber_id = hn.pk
left join postcode as pt on hn.postcode_id = pt.pk
left join municipality as m on g.municipality_id = m.pk
where m.insee like '${municipality}%'
and hn.deleted_at is null
and hn.number is not null
order by m.insee, g.pk, hn.pk"
requete=`echo ${requete}| sed "s/\n//"`
echo "create table temp as select *, substring(cia_temp from 1 for 11) || lower(substring(cia_temp from 12 )) as cle_interop from (${requete}) as a where rank = 1;" >> ${outPath}/commandeTemp.sql
echo "\COPY (select id_ban_position,id_ban_adresse,cle_interop,id_ban_group,id_fantoir,numero,suffixe,nom_voie,code_postal,nom_commune,code_insee,nom_complementaire,x,y,lon,lat,typ_loc,source,date_der_maj from temp) TO '${outPath}/ban_${municipality}_${date}.csv' CSV HEADER DELIMITER ';'" >> ${outPath}/commandeTemp.sql
echo "drop table if exists temp_group;" >> ${outPath}/commandeTemp.sql
echo "create table temp_group as select id_ban_group,g_pk from temp group by id_ban_group,g_pk;" >> ${outPath}/commandeTemp.sql
echo "\COPY (select t.id_ban_group,g.ign from temp_group t left join \"group\" g on g.pk = t.g_pk where g.ign is not null) TO '${outPath}/group_id_ign_${municipality}_${date}.csv' CSV HEADER DELIMITER ';'" >> ${outPath}/commandeTemp.sql
echo "drop table if exists temp_hn;" >> ${outPath}/commandeTemp.sql
echo "create table temp_hn as select id_ban_adresse,hn_pk from temp group by id_ban_adresse,hn_pk;" >> ${outPath}/commandeTemp.sql
echo "\COPY (select t.id_ban_adresse,h.ign from temp_hn t left join housenumber h on h.pk = t.hn_pk where h.ign is not null) TO '${outPath}/adresse_id_ign_${municipality}_${date}.csv' CSV HEADER DELIMITER ';'" >> ${outPath}/commandeTemp.sql
psql -f ${outPath}/commandeTemp.sql
if [ $? -ne 0 ]
then
echo "Erreur lors de l export du csv"
exit 1
fi
rm ${outPath}/commandeTemp.sql
echo "FIN"
| true |
f9d2134ccf43fee6976385a6233f184843635f97 | Shell | basiliocat/gebot | /roles/postgresql/install.sh | UTF-8 | 1,147 | 3.296875 | 3 | [] | no_license | #!/bin/sh
[ ! -x /usr/local/bin/screen ] && remove_screen=1
xargs pkg install -y <<EOT
databases/postgresql92-server
sysutils/screen
EOT
mkdir -p /data/pgdata
chown -R pgsql:pgsql /data/pgdata
grep -Eq '^postgresql_enable=' /etc/rc.conf || cat >> /etc/rc.conf << EOT
postgresql_enable="YES"
postgresql_data="/data/pgdata"
postgresql_initdb_flags="--encoding=utf-8 --locale=en_US.UTF-8 --lc-collate=ru_RU.UTF-8"
EOT
/usr/local/etc/rc.d/postgresql initdb
cat >> /data/pgdata/pg_hba.conf << EOT
host all all 10.0.0.0/8 md5
host all all 172.16.0.0/12 md5
host all all 192.168.0.0/16 md5
EOT
#rootpw=`LANG=C tr -dc A-Za-z0-9 < /dev/urandom | head -c 10`
#cat >> /root/.pgpass << EOT
#localhost:*:*:pgsql:$rootpw
#EOT
#chmod 600 /root/.pgpass
tz=`readlink /etc/localtime | cut -d / -f 5,6`
sed -E -i '' -e "s~^(log_)?timezone ?=.*~\1timezone = '$tz'~" \
/data/pgdata/postgresql.conf
screen -d -m /usr/local/etc/rc.d/postgresql start
sleep 1
[ "$remove_screen" = "1" ] && pkg delete -y sysutils/screen
echo "PostgreSQL installed"
| true |
266f87f0c3ac73ce11825bf408743e35e91602d8 | Shell | yamennassif/Linux-Settings | /bin/monitor | UTF-8 | 1,796 | 3.375 | 3 | [
"Beerware"
] | permissive | #!/usr/bin/env sh
INTERN="LVDS1"
INTERN="eDP1"
EXTERN1="DP2"
EXTERN2="DP1"
MAXRES=`xrandr --query | awk '/^ *[0-9]*x[0-9]*/{ print $1 }' | sort | uniq -d | cut -d'x' -f 1 | sort -nr | head -1`
CHOOSEN_RES=`xrandr --query | grep $MAXRES | awk '/^ *[0-9]*x[0-9]*/{ print $1 }' | sort | uniq -d`
PrintUsage(){
cat << EOI
Usage:
monitor < modes | intern | extern | clone | extend | extendl | work | workl | help >
Options:
modes Shows available xrands modes
intern Use only the intern monitor
extern use only the extern monitor
clone Clone monitors
extend Extend the monitor from intern to extern
extendl Extend the monitor from intern to extern on the left
work Special setting for work-place with two monitors (one in portrait-mode)
workl Special setting for work-place with two monitors on a buggy lenovo-dockingstation
help Shows this help
EOI
}
case $1 in
modes)
xrandr -q
;;
intern)
xrandr --output $INTERN --auto --output $EXTERN1 --off --output $EXTERN2 --off
;;
extern)
xrandr --output $INTERN --off --output $EXTERN1 --auto
;;
clone)
CLONERES=`xrandr --query | awk '/^ *[0-9]*x[0-9]*/{ print $1 }' | sort | uniq -d | head -1`
xrandr --output $INTERN --mode $CHOOSEN_RES --output $EXTERN1 --same-as $CHOOSEN_RES --mode $CHOOSEN_RES
;;
extend)
xrandr --output $INTERN --auto --output $EXTERN1 --auto --right-of $INTERN
;;
extendl)
xrandr --output $INTERN --auto --output $EXTERN1 --auto --left-of $INTERN
;;
work)
xrandr --output $EXTERN2 --auto --rotate left --output $EXTERN1 --auto --left-of $EXTERN2 --output $INTERN --auto --left-of $EXTERN1
;;
workl)
xrandr --output $EXTERN1 --mode 3840x1080 --output $INTERN --auto --right-of $EXTERN1
;;
*)
PrintUsage
;;
esac
| true |
caee425213604514d177d57b768577ef43abf4c1 | Shell | IBM-Security/performance | /IAM/scripts/SDS_tuning_scripts/s_nanny_2.ksh | UTF-8 | 4,659 | 3.875 | 4 | [] | no_license | #!/bin/ksh
#DebugTrace=NO
DebugTrace=YES
if [ "${DebugTrace}" == "YES" ]; then
set -x
fi
LDAP_PORT=389
#START_SLAPD_CMD="su - ldap -c /export/home/ldap/start_slapd"
START_SLAPD_CMD="/ldapscripts/ldap_start start"
CORE_BACKUP_PATH=/var/ldap/testdata
TmpSearchResults=${CORE_BACKUP_PATH}/tmp.s_nanny_2.out
NannyLogFile=${CORE_BACKUP_PATH}/s_nanny_2.log
###############################################
# functions...
###############################################
SessionTimeStamp=''
SlapdLogDir=''
SLAPD_PID=NOT_RUNNING
MV=/usr/bin/mv
Trace()
{
if [ ${DebugTrace} == YES ]; then
echo $*
fi
}
Log()
{
Trace $*
echo $* >>${NannyLogFile}
}
InitNewBackupDir()
{
SessionTimeStamp=$(date -u '+%Y%m%d%H%M%SZ')
if [ -d ${CORE_BACKUP_PATH:-"do not save core"} ]; then
SlapdLogDir=${CORE_BACKUP_PATH}/${SessionTimeStamp}
else
SlapdLogDir=${SessionTimeStamp}
fi
# Try and create a new subdirectory for log/core/pstack... etc
(mkdir ${SlapdLogDir} 2>&1) >>${NannyLogFile}
if (( $? )); then
Log "FATAL ERROR - Can't initialize log directory!"
return 1
fi
return 0 # success
}
get_SLAPD_PID()
{
# SLAPD_PS=`ps -eo pid,comm | grep slapd`
SLAPD_PS=`ps -eo pid,comm | sed s'/^\s*//g' | grep slapd`
if [ $? -eq 0 ]; then
Trace "SLAPD_PS=${SLAPD_PS}"
typeset -i tmp_PID=${SLAPD_PS%% *}
SLAPD_PID=${tmp_PID}
Trace "SLAPD_PID=${SLAPD_PID}"
return 0
else
Trace "SLAPD_PS=${SLAPD_PS}"
SLAPD_PID=NOT_RUNNING
Trace "SLAPD_PID=${SLAPD_PID}"
return 1
fi
Log 'get_SLAPD_PID(): how did I get here?'
return 1
}
save_core()
{
if [ -f core ]; then
Trace "core file found..."
if [ -d ${CORE_BACKUP_PATH:-"do not save core"} ]; then
# save the core file...
Log "Saving core to ${SlapdLogDir}"
${MV} core ${SlapdLogDir}
# (pstack ${SlapdLogDir}/core 2>&1) >${SlapdLogDir}/pstack.out
else
Log "Deleting core file"
rm core
fi
else
Log "core file NOT found..."
fi
}
slapd_is_started()
{
cmd="ldapsearch -p ${LDAP_PORT:-389} -b cn=localhost -s base cn=* dn"
Trace ${cmd}
(${cmd} 2>&1) >${TmpSearchResults}
RC=$?
Trace "RC = ${RC}"
case ${RC} in
0) return 0 ;;
*)
if [ ${DebugTrace} == YES ]; then
cat ${TmpSearchResults}
fi
return 1 ;;
esac
Log 'slapd_is_started(): how did I get here?'
return 1
}
start_slapd()
{
if InitNewBackupDir; then
Log "Starting slapd at ${SessionTimeStamp}..."
else
return 1 # could not start
fi
Trace "${START_SLAPD_CMD:-slapd}"
(nohup ${START_SLAPD_CMD:-slapd} 2>&1) >${SlapdLogDir}/slapd.out &
# wait no more than 30 seconds for slapd to start...
let 'SEC=5'
sleep $((SEC))
while (( SEC < 30 )); do
let 'SEC=SEC+1'
Trace "$((SEC)) sec..."
if get_SLAPD_PID; then
Trace "SLAPD_PID = ${SLAPD_PID}"
if slapd_is_started; then
Trace 'start_slapd() -> 0'
return 0
fi
else
Log "FATAL ERROR - slapd won't start!"
Trace 'start_slapd() -> 1'
return 1
fi
sleep 1
done
Log "ERROR - Timed out waiting for slapd to start!"
Trace 'start_slapd() -> 2'
return 2
}
search_timeout()
{
if [ -f ${TmpSearchResults} ]; then
rm ${TmpSearchResults}
fi
cmd="ldapsearch -p ${LDAP_PORT:-389} -b cn=localhost -s base cn=* dn"
Trace "${cmd}"
(${cmd} 2>&1) >${TmpSearchResults} &
let 'SEC=0'
while (( SEC < 30 )); do
if [ -s ${TmpSearchResults} ]; then
if [ ${DebugTrace} == YES ]; then
cat ${TmpSearchResults}
fi
Trace "Search Finished in $((SEC)) sec."
rm ${TmpSearchResults}
return 1
fi
sleep 1
let 'SEC=SEC+1'
Trace "$((SEC)) sec..."
done
# search did not return in < 30 sec.
Log "Search took > 30 sec!"
return 0
}
kill_slapd()
{
# Log "pstack -F $SLAPD_PID >${SlapdLogDir}/pstack.out"
# (pstack -F $SLAPD_PID 2>&1) >${SlapdLogDir}/pstack.out
Log "kill -9 $SLAPD_PID"
kill -9 $SLAPD_PID # be more insistant
sleep 2
}
###############################################
# main - script starts here
###############################################
if InitNewBackupDir; then
Log "$0 started at ${SessionTimeStamp}"
Log "Logging to ${NannyLogFile}"
sleep 1
else
tail ${NannyLogFile}
return 1 # could not start
fi
while (( 1 )); do # loop forever
if get_SLAPD_PID ; then # if slapd is running...
if search_timeout; then # and search does not respond...
kill_slapd; # then kill the server
else
sleep 30 # else wait 30 sec. and try again...
fi
else # else if slapd is not running...
save_core; # save the core if there is one
if start_slapd; then # if we can restart the server
Log "Restarted Server" # then OK -
else # otherwise
sleep 30 # FATAL ERROR wait 30 sec. and try again...
fi
fi
done
| true |
8a51238830f62d8fe7a660c685675aa9acf92ca8 | Shell | morika-t/heroku-buildpack-rbenv | /bin/compile | UTF-8 | 7,699 | 3.40625 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
BUILD_DIR=$1 # The app directory, usually /app. This will have the app source initially. Whatever is left here will be persisted.
CACHE_DIR=$2 # The contents of CACHE_DIR will be persisted between builds so we can use it to speed the builds up
ENV_DIR=$3 # An envdir directory of the app's environment variables
CACHED_RBENV_ROOT=$CACHE_DIR/rbenv
APP_DIR=/app
[ "$BUILD_DIR" = "$APP_DIR" ] || cp -fa $BUILD_DIR $APP_DIR
mkdir -p $APP_DIR
RBENV_ROOT=$APP_DIR/.rbenv
BUILD_RBENV_ROOT=$BUILD_DIR/.rbenv
# If REMOVE_RBENV_CACHE is set, clean up the cache
[ -n "$REMOVE_RBENV_CACHE" ] && [ -d $CACHED_RBENV_ROOT ] && echo "Removing cached copy of RBENV"
[ -n "$REMOVE_RBENV_CACHE" ] && [ -d $CACHED_RBENV_ROOT ] && rm -fr $CACHED_RBENV_ROOT
# Update any cached rbenv, remove the cache if there is an error
[ -d $CACHED_RBENV_ROOT/.git ] && (
echo "Found existing cached copy of rbenv"
( cd $CACHED_RBENV_ROOT; git pull ) || rm -fr $CACHED_RBENV_ROOT
)
# Clone a new rbenv if there isn't already one cached
[ -d $CACHED_RBENV_ROOT/.git ] || (
echo "Cloning a fresh copy of rbenv to cache"
[ -d $CACHED_RBENV_ROOT ] && rm -fr $CACHED_RBENV_ROOT
[ -d $CACHED_RBENV_ROOT ] || git clone https://github.com/sstephenson/rbenv.git $CACHED_RBENV_ROOT
)
# Update any cached ruby-build, remove the cache if there is an error
[ -d $CACHED_RBENV_ROOT/plugins/ruby-build/.git ] && (
echo "Found existing cached copy of ruby-build"
( cd $CACHED_RBENV_ROOT/plugins/ruby-build && git pull ) || rm -fr $CACHED_RBENV_ROOT
)
# Clone a new ruby-build if there isn't already one cached
[ -d $CACHED_RBENV_ROOT/plugins/ruby-build/.git ] || (
echo "Cloning a fresh copy of ruby-build to cache"
[ -d $CACHED_RBENV_ROOT/plugins/ruby-build/.git ] && rm -fr $CACHED_RBENV_ROOT/plugins/ruby-build/.git
[ -d $CACHED_RBENV_ROOT/plugins/ruby-build ] || git clone https://github.com/sstephenson/ruby-build.git $CACHED_RBENV_ROOT/plugins/ruby-build
)
# Restore any cached rbenv (this will include ruby-build)
[ -d $CACHED_RBENV_ROOT ] && echo "Copying cached copy of rbenv"
[ -d $CACHED_RBENV_ROOT ] && cp -fa $CACHED_RBENV_ROOT $RBENV_ROOT
PROFILE_ROOT=$APP_DIR/.profile.d
BUILD_PROFILE_ROOT=$BUILD_DIR/.profile.d
# Generate the bash profile to source
mkdir -p $PROFILE_ROOT
RBENV_PROFILE=$PROFILE_ROOT/rbenv
echo 'export APP_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/.."' >> $RBENV_PROFILE
echo 'export RBENV_ROOT="$APP_DIR/.rbenv"' >> $RBENV_PROFILE
echo 'export PATH="$RBENV_ROOT/bin:$PATH"' >> $RBENV_PROFILE
echo 'hash -r' >> $RBENV_PROFILE
echo '[ -f $RBENV_ROOT/bin/rbenv ]' >> $RBENV_PROFILE
echo 'eval "$(rbenv init -)"' >> $RBENV_PROFILE
# Source the rbenv profile to prepare rbenv to run
source $RBENV_PROFILE
# Identify the ruby versions this app may need to run
ruby_version_from_dotfile=$( ( [ -f $APP_DIR/.ruby-version ] && cat $APP_DIR/.ruby-version ) || true )
ruby_version_from_gemfile=$( grep -e '^ruby ' $APP_DIR/Gemfile | sed -e 's/^ruby //' -e 's/[\'"'"'\"]//g' || true)
# Iterate through each ruby version, building it if neccessary
for ruby_version in ${ruby_version_from_dotfile} ${ruby_version_from_gemfile} ; do
echo "Checking for existence of rbenv ruby ${ruby_version}"
# Generate the ruby version if it doesn't exist yet in the cache
[ -d $RBENV_ROOT/versions/${ruby_version} ] || (
echo "Could not find rbenv ruby ${ruby_version}, building"
# Prepare the build environment by updating ubuntu first. Use caching between builds to improve the speed.
grep old-releases /etc/apt/sources.list || (
if ! grep -qe 'utopic\|trusty\|saucy\|precise\|lucid' /etc/apt/sources.list ; then
perl -pi -e "s%archive.ubuntu.com/ubuntu%old-releases.ubuntu.com/ubuntu%g" /etc/apt/sources.list
fi
if ! grep -qe '^deb-src' /etc/apt/sources.list ; then
sed -e 's/^deb /deb-src /g' < /etc/apt/sources.list >> /etc/apt/sources.list
fi
APT_CACHE_PATH=$CACHE_DIR/apt
mkdir -p $APT_CACHE_PATH
apt-get -o dir::cache::archives=$APT_CACHE_PATH update
LC_ALL=C DEBIAN_FRONTEND=noninteractive apt-get -o dir::cache::archives=$APT_CACHE_PATH install autoconf bison build-essential libssl-dev zlib1g zlib1g-dev libreadline-dev libncurses-dev -f -y libssl-dev patch libyaml-dev
)
# Use ruby-build to build the ruby version. Use caching to improve build speed.
RUBY_BUILD_CACHE_PATH=$CACHE_DIR/ruby-build.cache
mkdir -p $RUBY_BUILD_CACHE_PATH
RUBY_BUILD_BUILD_PATH=$CACHE_DIR/ruby-build.sources
mkdir -p $RUBY_BUILD_BUILD_PATH
if [ -f "/usr/lib/libreadline.so" ]; then
RUBY_CONFIGURE_OPTS=--with-readline-dir="/usr/lib/libreadline.so"
fi
if [ -f "/usr/lib/x86_64-linux-gnu/libreadline.so" ]; then
RUBY_CONFIGURE_OPTS=--with-readline-dir="/usr/lib/x86_64-linux-gnu/libreadline.so"
fi
CONFIGURE_OPTS="--disable-install-doc --with-readline=/usr/include/readline --with-openssl-dir=/usr/include/readline"
(
if echo ${ruby_version} | grep -qe '^2.1.[01]' ; then
curl -fsSL https://gist.github.com/mislav/a18b9d7f0dc5b9efc162.txt
else
true
fi
) | env RUBY_CONFIGURE_OPTS="${RUBY_CONFIGURE_OPTS}" CONFIGURE_OPTS="${CONFIGURE_OPTS}" rbenv install --keep --patch ${ruby_version}
cat <<EOF > ~/.gemrc
---
:backtrace: ${RBENV_GEMRC_BACKTRACE:-true}
:benchmark: ${RBENV_GEMRC_jBENCHMARK:-false}
:bulk_threshold: ${RBENV_GEMRC_THRESHOLD:-1000}
:sources:
- ${RBENV_GEMRC_SOURCE:-https://rubygems.org}
:update_sources: ${RBENV_GEMRC_UPDATE_SOURCES:-true}
:verbose: ${RBENV_GEMRC_VERBOSE:-true}
:gem: ${RBENV_GEMRC_GEM:---no-ri --no-rdoc}
EOF
echo GEMRC:
cat ~/.gemrc
# Test out the new ruby, updating rubygems and installing bundler along the way
bash -c "
echo 'Upgrading system rubygems from: '`gem --version`
REALLY_GEM_UPDATE_SYSTEM=true gem update --system --no-ri --no-rdoc
echo 'Upgraded system rubygems to: '`gem --version`
source $RBENV_PROFILE
rbenv rehash
cd $APP_DIR
echo 'OpenSSL check'
ruby -r openssl -e0
echo 'Upgrading rbenv rubygems from: '`gem --version`
REALLY_GEM_UPDATE_SYSTEM=true gem update --system --no-ri --no-rdoc
echo 'Upgraded rbenv rubygems to: '`gem --version`
gem install rubygems-update --no-ri --no-rdoc
echo 'Running rubygems-update: '`gem --version`
rbenv rehash
update_rubygems || true
rbenv rehash
echo 'Ran rubygems-update: '`gem --version`
gem install bundler --no-ri --no-rdoc
rbenv rehash
"
#bundle install --without test --path vendor/bundle --binstubs vendor/bundle/bin -j4 --deployment
#echo "Creating cached copy of bundled vendor gems"
#mkdir -p $CACHE_DIR/bundle
#cp -fa $APP_DIR/vendor/bundle $CACHE_DIR/bundle
# Make a backup copy in the cache for this ruby version for next time
mkdir -p $CACHED_RBENV_ROOT/versions
echo "Creating cached copy of ruby $ruby_version"
cp -fa $RBENV_ROOT/versions/$ruby_version $CACHED_RBENV_ROOT/versions/$ruby_version
)
done
# Overlay the built rbenv back overtop the $BUILD_DIR so it is persisted in the resultant slug
mkdir -p $BUILD_DIR
echo "Copying files to the slug build directory"
cp -fa $RBENV_ROOT $BUILD_RBENV_ROOT
mkdir -p $BUILD_PROFILE_ROOT
cp -fa $PROFILE_ROOT/* $BUILD_PROFILE_ROOT/
#if [ -d $CACHE_DIR/bundle ]; then
# mkdir -p $BUILD_DIR/vendor/bundle
# [ -n "${RBENV_SKIP_BUNDLE}" ] || cp -fa $CACHE_DIR/bundle/ $BUILD_DIR/vendor/bundle/
#fi
# Source the rbenv environment
rbenv rehash
cd $APP_DIR
# Export the resultant rbenv environment variables for later buildpacks to inherit
export PATH RBENV_ROOT
| true |
3f08b1d82520755dc713305a8cac4c9f3e36e585 | Shell | djobbo/Una | /scripts/programs/ganache.sh | UTF-8 | 712 | 3.578125 | 4 | [] | no_license | #!/bin/bash
echo "Starting AppImage download"
curl -s -L https://api.github.com/repos/trufflesuite/ganache/releases/latest \
| grep -E 'browser_download_url' \
| grep linux-x86_64 \
| cut -d '"' -f 4 \
| wget -qi - -O ganache.AppImage
echo "Downloaded AppImage"
chmod u+x ganache.AppImage
echo "Extracting AppImage..."
sudo ./ganache.AppImage --appimage-extract
echo "AppImage Extracted -> ./squashfs-root"
DEST="$HOME/bin/ganache"
DATE=$(date +%Y-%m-%d-%H%M)
echo $DEST
if [ -d $DEST ]; then
echo "Backing up existing Directory\n \e[90m$DEST -> $DEST.$DATE\e[39m"
mv $DEST "$DEST.$DATE"
fi
sudo mv ./squashfs-root $DEST
sudo rm ganache.AppImage
echo "Ganache Installed!" | true |
9ecf7b12b2f9984b7a08c7fee2a93f3f18fe009c | Shell | pentolone/OpenVPN | /crea-serverkey.bash | UTF-8 | 6,965 | 3.796875 | 4 | [] | no_license | #!/bin/bash
#
# crea-servverkey.bash
# Create CA certificate and server certificate
# (C) Connexx srl 2021
#
#
#
#
#############################################################################
#
#--> Global variables
#
CNX_PRG_NAME=$0
CNX_PATH_TO_OVPN=$HOME/openVPN
CNX_PATH_TO_EASYRSA=$CNX_PATH_TO_OVPN/EasyRSA
CNX_PATH_TO_CA=$CNX_PATH_TO_OVPN/ca
CNX_PATH_TO_PKI=$CNX_PATH_TO_OVPN/pki
CNX_PATH_TO_SERVER=$CNX_PATH_TO_OVPN/servers
CNX_CN=""
CNX_YN=""
CNX_SERVERNAME="server"
CNX_COMPLETED="N"
function confirm() {
CNX_YN=""
while [ "$CNX_YN" != "y" ] && [ "$CNX_YN" != "Y" ] && [ "$CNX_YN" != "n" ] && [ "$CNX_YN" != "N" ]
do
echo -e "$1 (Y/N) ?"
read CNX_YN
done
}
function quit() {
echo "$CNX_PRG_NAME: $1"
exit 1
}
function select_cn() {
CNX_CN=""
while [ "$CNX_CN" == "" ]
do
echo "Please enter the common name (CN) for this server; q to quit"
read CNX_CN
done
}
function select_server_name() {
CNX_SERVERNAME="server"
echo "Please enter the name used to generate file [$CNX_SERVERNAME]; q to quit"
read CNX_SERVERNAME
if [ "$CNX_SERVERNAME" == "" ]
then
CNX_SERVERNAME="server"
fi
}
function select_server_net() {
CNX_SERVERNET=""
echo "Please enter the network used by this server [10.8.0.0]; q to quit"
read CNX_SERVERNET
if [ "$CNX_SERVERNET" == "" ]
then
CNX_SERVERNET="10.8.0.0"
fi
}
function select_server_subnet() {
CNX_SERVERSUBNET=""
echo "Please enter the subnet used by this server [255.255.255.0]; q to quit"
read CNX_SERVERSUBNET
if [ "$CNX_SERVERSUBNET" == "" ]
then
CNX_SERVERSUBNET="255.255.255.0"
fi
}
#
#--> Message start
#
echo "---- $CNX_PRG_NAME: Create CA certificate and key for OpenVPN SERVER ----"
#
#--> MAIN part of script
#
if [ ! -d "$CNX_PATH_TO_EASYRSA" ]
then
echo "Directory $CNX_PATH_TO_EASYRSA does NOT exist!"
echo "Please download EasyRSA by doing:"
echo
echo "wget -P $CNX_PATH_TO_OVPN https://github.com/OpenVPN/easy-rsa/releases/download/v<Latest version>/EasyRSA-<Latest version>.tgz"
echo "Execute tar xvf EasyRSA<Latest version>.tgz in your folder ($CNX_PATH_TO_OVPN)"
echo "Execute ln -s EasyRSA<Latest version>.tgz EasyRSA"
echo
echo "Substitute latest version with the correct value (i.e. 3.0.8)"
echo "Install EasyRSA and restart this script"
echo "In EasyRSA folder do mv vars vars.example"
echo
quit "missing EasyRSA software, exiting..."
fi
if [ ! -d "$CNX_PATH_TO_PKI" ]
then
echo "Directory $CNX_PATH_TO_PKI does NOT exist; creating..."
mkdir $CNX_PATH_TO_PKI
fi
if [ ! -d "$CNX_PATH_TO_CA" ]
then
echo "Directory $CNX_PATH_TO_CA does NOT exist; creating..."
mkdir $CNX_PATH_TO_CA
fi
#
#--> LOOP until completed
#
while [ "$CNX_COMPLETED" == "N" ]
do
select_cn
if [ "$CNX_CN" == "q" ] || [ "$CNX_CN" == "Q" ]
then
quit "exiting upon user request..."
fi
select_server_name
if [ "$CNX_SERVERNAME" == "q" ] || [ "$CNX_SERVERNAME" == "Q" ]
then
quit "exiting upon user request..."
fi
select_server_net
if [ "$CNX_SERVERNET" == "q" ] || [ "$CNX_SERVERNET" == "Q" ]
then
quit "exiting upon user request..."
fi
select_server_subnet
if [ "$CNX_SERVERSUBNET" == "q" ] || [ "$CNX_SERVERSUBNET" == "Q" ]
then
quit "exiting upon user request..."
fi
confirm "Generating CA and CERTIFICATE, summary\nSERVER -> $CNX_SERVERNAME\nCommon Name (CN) -> $CNX_CN\nVPN network -> $CNX_SERVERNET\nsubnet -> $CNX_SERVERSUBNET"
if [ "$CNX_YN" == "n" ] || [ "$CNX_YN" == "N" ]
then
continue
fi
CNX_PATH_TO_CFG="$CNX_PATH_TO_CA/$CNX_CN.vars"
if [ ! -f "$CNX_PATH_TO_CFG" ]
then
echo "Configuration file $CNX_PATH_TO_CFG does NOT exist!"
echo
echo "Copy the example file (vars.example) located in $CNX_PATH_TO_EASYRSA to $CNX_PATH_TO_CFG"
echo "cp $CNX_PATH_TO_EASYRSA/vars.example $CNX_PATH_TO_CFG"
echo
echo "Edit the file, uncomment the lines listed below and put the correct information"
echo
echo "#set_var EASYRSA_REQ_COUNTRY "US""
echo "#set_var EASYRSA_REQ_PROVINCE "California""
echo "#set_var EASYRSA_REQ_CITY "San Francisco""
echo "#set_var EASYRSA_REQ_ORG "Copyleft Certificate Co""
echo "#set_var EASYRSA_REQ_EMAIL "me@example.net""
echo "#set_var EASYRSA_REQ_OU "My Organizational Unit""
echo
echo "Configure the file $CNX_PATH_TO_CFG and restart the script"
echo
quit "missing configuration file, exiting..."
fi
if [ ! -d "$CNX_PATH_TO_PKI/$CNX_CN" ]
then
echo "Directory $CNX_PATH_TO_PKI/$CNX_CN does NOT exist; creating..."
mkdir $CNX_PATH_TO_PKI/$CNX_CN
fi
CNX_PATH_TO_PKI_CA=$CNX_PATH_TO_PKI/$CNX_CN/ca
CNX_PATH_TO_PKI_SERVER=$CNX_PATH_TO_PKI/$CNX_CN/server
cd $CNX_PATH_TO_EASYRSA
./easyrsa --vars=$CNX_PATH_TO_CFG --pki-dir=$CNX_PATH_TO_PKI_CA init-pki
./easyrsa --vars=$CNX_PATH_TO_CFG --pki-dir=$CNX_PATH_TO_PKI_CA build-ca nopass
#
#--> Done CA
#
#--> Server part
#
./easyrsa --vars=$CNX_PATH_TO_CFG --pki-dir=$CNX_PATH_TO_PKI_SERVER init-pki
./easyrsa --vars=$CNX_PATH_TO_CFG --pki-dir=$CNX_PATH_TO_PKI_SERVER gen-req $CNX_SERVERNAME nopass
./easyrsa --vars=$CNX_PATH_TO_CFG --pki-dir=$CNX_PATH_TO_PKI_CA import-req $CNX_PATH_TO_PKI_SERVER/reqs/$CNX_SERVERNAME.req $CNX_SERVERNAME
./easyrsa --vars=$CNX_PATH_TO_CFG --pki-dir=$CNX_PATH_TO_PKI_CA sign-req server $CNX_SERVERNAME
./easyrsa --vars=$CNX_PATH_TO_CFG --pki-dir=$CNX_PATH_TO_PKI_SERVER gen-dh
sudo openvpn --genkey --secret $CNX_PATH_TO_PKI_SERVER/ta.key
sudo chmod 644 $CNX_PATH_TO_PKI_SERVER/ta.key
mv $CNX_PATH_TO_PKI_CA/issued/$CNX_SERVERNAME.crt $CNX_PATH_TO_PKI_SERVER
CNX_PATH_TO_SERVER=$CNX_PATH_TO_SERVER/$CNX_SERVERNAME
mkdir $CNX_PATH_TO_SERVER
CNX_SERVER_CONF=$CNX_PATH_TO_SERVER/$CNX_SERVERNAME.conf
echo "#" > $CNX_SERVER_CONF
echo "# Filename: `basename $CNX_SERVER_CONF`" >> $CNX_SERVER_CONF
echo "# Automatically generated by script: `basename $CNX_PRG_NAME`" >> $CNX_SERVER_CONF
echo "# VPN server name: $CNX_SERVERNAME" >> $CNX_SERVER_CONF
echo "# Date: `date`" >> $CNX_SERVER_CONF
echo "#" >> $CNX_SERVER_CONF
cat $CNX_PATH_TO_SERVER/../server.conf.example | sed -e "s/cert xxxxxx/cert $CNX_SERVERNAME/g" \
| sed -e "s/key xxxxxx/key $CNX_SERVERNAME/g" \
| sed -e "s/_subnet/$CNX_SERVERNET/g" \
| sed -e "s/_mask/$CNX_SERVERSUBNET/g" >> $CNX_SERVER_CONF
cp $CNX_PATH_TO_PKI_SERVER/../ca/ca.crt $CNX_PATH_TO_SERVER
cp $CNX_PATH_TO_PKI_SERVER/ta.key $CNX_PATH_TO_SERVER
cp $CNX_PATH_TO_PKI_SERVER/dh.pem $CNX_PATH_TO_SERVER
cp $CNX_PATH_TO_PKI_SERVER/$CNX_SERVERNAME.crt $CNX_PATH_TO_SERVER
cp $CNX_PATH_TO_PKI_SERVER/private/$CNX_SERVERNAME.key $CNX_PATH_TO_SERVER
echo "Server configuration for '$CNX_CN' finished"
echo "Files are located in $CNX_PATH_TO_SERVER"
echo
echo "In the VPN server copy files $CNX_SERVERNAME.conf, $CNX_SERVERNAME.crt, $CNX_SERVERNAME.key, ca.crt, ta.key, dh.pem in /etc/openvpn folder"
echo "Execute chmod 400 $CNX_SERVERNAME.key ta.key, dh.pem"
echo "Execute systemctl enable openvpn@$CNX_SERVERNAME"
echo "Execute systemctl start openvpn@$CNX_SERVERNAME"
CNX_COMPLETED="Y"
done
| true |
7cb9c43c934f42b8fd3171eebc1470f8455ef404 | Shell | kamilion/kamikazi-core | /buildscripts/xenial/setup-build-environment.sh | UTF-8 | 1,430 | 3.234375 | 3 | [
"WTFPL"
] | permissive | #!/bin/bash
if [[ $EUID -ne 0 ]]; then
echo "You must be a root user" 2>&1
exit 1
else
echo "[kamikazi-build] Setting this node up to be an ISO builder..."
apt-get install --no-install-recommends -y git gksu build-essential fakeroot make binutils g++ squashfs-tools xorriso x11-xserver-utils xserver-xephyr qemu-kvm dpkg-dev debhelper
apt-get install --no-install-recommends -y python python-dev python-qt4 pyqt4-dev-tools qt4-dev-tools qt4-linguist-tools
apt-get install --no-install-recommends -y python3 python3-dev python3-qt4 python3-pyqt5 pyqt5-dev-tools qttools5-dev-tools
mkdir -p /home/kamikazi-16.04/
mkdir -p /home/minilubuntu/
./19-add-iso-customizer.sh
# Might wanna comment this out if you're not doing this from lubuntu.
# I havn't tested it on xubuntu, gnome-desktop, KDE, or Unity.
#cp ../../latest/mods/usr/share/applications/customizer.desktop /usr/share/applications/customizer.desktop
# -- No longer needed, fixed this when I took over the Customizer project.
echo "[kamikazi-build] This node is now an ISO builder."
echo "[kamikazi-build] Open a terminal, navigate to this directory."
echo "[kamikazi-build] First run mini-rebuild.sh then build.sh."
echo "[kamikazi-build] Then after at least one run of build.sh,"
echo "[kamikazi-build] rebuild.sh can be used for faster rerolls."
echo "[kamikazi-build] You can also run customizer-gui from a root terminal."
fi
| true |
7d4687f8592e612fc3b282faba4ceea62dd0a1bf | Shell | aakordas/creature_manager | /scripts/dice_test.sh | UTF-8 | 1,305 | 3.4375 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/bash
make_request() {
curl --silent --request GET "127.0.0.1:8080/api/v1/roll${1}"
}
declare -a DICE
DICE=(4 5 6 8 10 12 20 100)
echo "Default values: "
curl --silent --request GET "127.0.0.1:8080/api/v1/roll"
echo
# Check with the variable, without queries.
echo "Variable for the sides., no queries"
for dice in ${DICE[@]}
do
make_request "/d${dice}"
make_request "/D${dice}"
done
echo
# Check with the variable with query for the count.
echo "Variable for the sides with count query."
for dice in ${DICE[@]}
do
make_request "/d${dice}?count=2"
make_request "/D${dice}?count=2"
done
echo
# Check with variable for the sides and with variable for the count.
echo "Variable for the sides and for the count."
for dice in ${DICE[@]}
do
make_request "/d${dice}/2"
make_request "/D${dice}/2"
done
echo
# Check without the variable with query for the sides.
echo "Query for the sides."
for dice in ${DICE[@]}
do
make_request "?sides=${dice}"
done
echo
# Check without the variable with query for the count.
echo "Query for the count."
make_request "?count=2"
echo
# Check without the variable with query for the sides and the count.
echo "Query for the sides and the count."
for dice in ${DICE[@]}
do
make_request "?sides=${dice}&count=2"
done
echo
| true |
8b9fa88820cbaec11f0c8a8dc169129669d5f2ed | Shell | AustinSchuh/rules_go | /tests/race/race_on_test.sh | UTF-8 | 258 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | OUTPUT="$(tests/race/race_on_tester 2>&1)"
if [ $? -eq 0 ]; then
echo success
echo "Expected failure, got success"
exit 1
fi
if [[ $OUTPUT != *"WARNING: DATA RACE"* ]]; then
echo "Expected WARNING: DATA RACE and it was not present"
exit 1
fi
exit 0 | true |
a9f6e0d0cba1af3cf958be840b2eb2cb7027e74b | Shell | olsner/m3 | /test.sh | UTF-8 | 402 | 3.328125 | 3 | [] | no_license | #!/bin/bash
set -e
testdir=`mktemp -d` || exit 1
trap "rm -fr $testdir" exit
outdir=$testdir/out
mkdir $outdir
if [ "$1" = "-" ]; then
outdir=${testdir}
mod=temp_test
cat > ${testdir}/${mod}.m
else
mod="$1"
fi
shift
dist/build/m3/m3 ${testdir:+-I${testdir} -o${outdir}} ${mod}
file=${outdir}/${mod/::/__}
llvm-as ${file}.ll
llc -o ${file}.s ${file}.bc
gcc -o ${file} ${file}.s -lgmp
${file} "$@"
| true |
03a58f40be80a3a630835a525642cca126cbbed0 | Shell | arapat/aws-jupyter | /aws_jupyter/scripts/install-jupyter-tmsn.sh | UTF-8 | 557 | 2.828125 | 3 | [] | no_license | # Mount SSD
# ami-07ebfd5b3428b6f4d
killall jupyter-notebook
sleep 1
nohup jupyter notebook --no-browser --port=8888 < /dev/null > /dev/null 2>&1 &
URL=$(dig +short myip.opendns.com @resolver1.opendns.com)
sleep 2
echo
echo "The Jupyter Notebook is running on the cluster at the address below."
echo
echo "Open the following address using the browser on your computer"
echo
echo " http"$(jupyter notebook list | grep -Po '(?<=http).*(?=::)' | sed "s/\/.*:/\/\/$URL:/")
echo
echo "(If the URL didn't show up, please wait a few seconds and try again.)"
echo | true |
a9d5ad7691994ac10bcc0db4f4e08751263c77cf | Shell | faust64/BaD | /bench_a_disk | UTF-8 | 8,272 | 3.984375 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
CLEANUP_TMPFS=false
SETUP=false
RETRY_SUFFIXES="`seq 1 5`"
test -z "$LOG_PATH" && LOG_PATH=/var/tmp/BaD
test -z "$TARGET_PATH" && TARGET_PATH=/home/bench
test -z "$TARGET_DEVICE" && TARGET_DEVICE=/dev/sda2
while test "$1"
do
case "$1" in
-s|--setup) SETUP=true ;;
-l|--logpath) LOG_PATH="$2"; shift ;;
-d|--targetdir) TARGET_PATH="$2"; shift ;;
-e|--targetdevice) TARGET_DEVICE="$2"; shift ;;
-i|--fastinput) FAST_INPUT="$2"; shift ;;
-r|--retry) RETRY_SUFFIXES="`seq 1 '$2'`"; shift ;;
-h|--help) cat <<EOF
$0 usage:
$0 [-l /logs] |-d /test/dir ] [ -e /dev/block ] [ -i /dev/zero ] [ -r 1..n ]
EOF
exit 0
;;
*) echo discarding unrecognized option "$1" ;;
esac
shift
done
if ! test `id -u` = 0; then
echo must run as root >&2
exit 1
fi
test -d "$TARGET_PATH" || mkdir -p "$TARGET_PATH" 2>/dev/null
test -d "$LOG_PATH" || mkdir -p "$LOG_PATH" 2>/dev/null
if $SETUP; then
if test -x /bin/yum -o -x /usr/bin/yum; then
if ! yum -y install fio sysstat ioping; then
echo failed installing fio, sysstat or ioping >&2
echo in doubt, try installing epel-release >&2
exit 1
fi
elif test -x /usr/bin/apt-get -o -x /bin/apt-get; then
if ! DEBIAN_FRONTEND=noninteractive apt-get install -y fio sysstat ioping; then
echo failed installing fio, sysstat or ioping >&2
exit 1
fi
else
echo unsupported platform
exit 1
fi
exit 0
elif ! test -d "$TARGET_PATH" -a -d "$LOG_PATH"; then
echo "missing paths" >&2
exit 1
elif ! test -b "$TARGET_DEVICE"; then
if test -z "$USERSPACE"; then
echo "$TARGET_DEVICE is not a block device" >&2
exit 1
fi
fi
if test -z "$FAST_INPUT"; then
echo trying to allocate 1240M of RAM mounting tmpfs
if mkdir -p /var/tmp/fast; then
if mount tmpfs -t tmpfs /var/tmp/fast -o size=1240M; then
if dd if=/dev/urandom of=/var/tmp/fast/sample bs=1M count=1032; then
FAST_INPUT=/var/tmp/fast/sample
else
echo failed generating fast input sample >&2
fi
CLEANUP_TMPFS=true
else
echo failed mounting tmpfs serving tests with a fast and non-sequential input >&2
fi
else
echo failed creating fast input directory >&2
fi
if test -z "$FAST_INPUT"; then
echo falling back to /dev/zero, beware of predictibility/compressions/...
export FAST_INPUT=/dev/zero
fi
fi
CORE_COUNT=`awk 'BEGIN{c=0}/^[Pp]rocessor[ \t]/{c=c+1}END{print c;}' /proc/cpuinfo`
MAX_THREADS=`expr $CORE_COUNT '*' 2 + 2`
cleanup()
{
for child in TOP IOPING IOSTAT1 IOSTAT2 VMSTAT SAR1 SAR2
do
eval childprocess=\$${child}_PID
if test "$childprocess"; then
echo "shutting down $child: $childprocess"
( kill $childprocess || kill -9 $childprocess ) >/dev/null 2>&1
fi
done
if $CLEANUP_TMPFS; then
umount /var/tmp/fast
fi
}
trap cleanup EXIT
top -bHd 10 >"$LOG_PATH"/top-bHd 2>&1 &
TOP_PID=$!
iostat -Ntkdx 10 >"$LOG_PATH"/iostat-Ntkdx 2>&1 &
IOSTAT1_PID=$!
iostat -t -c -d 2 -x -m "$TARGET_DEVICE" >"$LOG_PATH"/iostat.dat 2>&1 &
IOSTAT2_PID=$!
vmstat -t 10 >"$LOG_PATH"/vmstat 2>&1 &
VMSTAT_PID=$!
sar -n DEV 10 >"$LOG_PATH"/sar-n 2>&1 &
SAR1_PID=$!
sar -B 10 >/tmp/sar-B 2>&1 &
SAR2_PID=$!
ioping "$TARGET_PATH" >"$LOG_PATH"/ioping 2>&1 &
IOPING_PID=$!
if test "$USERSPACE"; then
dd if="$FAST_INPUT" of=/home/bench/idontdoblock bs=1G count=1
for i in 2 3 4
do
dd if="$FAST_INPUT" of=/home/bench/idontdoblock bs=1G count=1 oflag=append conv=notrunc
done
TARGET_DEVICE=/home/bench/idontdoblock
else
# Device checks
echo checking device "$TARGET_DEVICE"
hdparm -Tt "$TARGET_DEVICE" >"$LOG_PATH"/hdparm-Tt 2>&1
if ! xfs_info "$TARGET_DEVICE" >"$LOG_PATH"/xfs_info 2>&1; then
rm -f "$LOG_PATH"/xfs_info
if ! dumpe2fs "$TARGET_DEVICE" >"$LOG_PATH"/dumpe2fs 2>&1; then
rm -f "$LOG_PATH"/dumpe2fs
fi
fi
fi
# bonnie, iozone?
# Read
echo reading from "$TARGET_DEVICE"
for i in $RETRY_SUFFIXES
do
dd "if=$TARGET_DEVICE" of=/dev/null bs=8k count=65536 iflag=direct,fullblock >"$LOG_PATH"/dd-read-block-bs8k-count65536-directfullblock$i 2>&1
sync ; echo 3 >/proc/sys/vm/drop_caches
done
for i in $RETRY_SUFFIXES
do
dd "if=$TARGET_DEVICE" of=/dev/null bs=4M count=128 skip=128 iflag=direct,fullblock >"$LOG_PATH"/dd-read-block-bs4M-count128-directfullblock$i 2>&1
sync ; echo 3 >/proc/sys/vm/drop_caches
done
for i in $RETRY_SUFFIXES
do
dd "if=$TARGET_DEVICE" of=/dev/null bs=1G skip=1 count=1 iflag=direct,fullblock >"$LOG_PATH"/dd-read-block-bs1G-count1-directfullblock$i 2>&1
sync ; echo 3 >/proc/sys/vm/drop_caches
done
for i in $RETRY_SUFFIXES
do
dd "if=$FAST_INPUT" of=/dev/null bs=1G count=1 iflag=fullblock >"$LOG_PATH"/dd-read-fast-bs1G-count1-fullblock$i 2>&1
sync ; echo 3 >/proc/sys/vm/drop_caches
done
# Write
echo writing to "$TARGET_PATH"
for i in $RETRY_SUFFIXES
do
dd "if=$FAST_INPUT" of="$TARGET_PATH/dd-out" bs=1G count=1 oflag=direct >"$LOG_PATH"/dd-write-bs1G-count1-direct$i 2>&1
sync ; echo 3 >/proc/sys/vm/drop_caches
done
for i in $RETRY_SUFFIXES
do
dd "if=$FAST_INPUT" of="$TARGET_PATH/dd-out" bs=1G count=1 conv=fdatasync >"$LOG_PATH"/dd-write-bs1G-count1-fdatasync$i 2>&1
sync ; echo 3 >/proc/sys/vm/drop_caches
done
for i in $RETRY_SUFFIXES
do
dd "if=$FAST_INPUT" of="$TARGET_PATH/dd-out" bs=4M count=256 oflag=direct >"$LOG_PATH"/dd-write-bs4M-count256-direct$i 2>&1
sync ; echo 3 >/proc/sys/vm/drop_caches
done
for i in $RETRY_SUFFIXES
do
dd "if=$FAST_INPUT" of="$TARGET_PATH/dd-out" bs=4M count=256 conv=fdatasync >"$LOG_PATH"/dd-write-bs4M-count256-fdatasync$i 2>&1
sync ; echo 3 >/proc/sys/vm/drop_caches
done
for i in $RETRY_SUFFIXES
do
dd "if=$FAST_INPUT" of="$TARGET_PATH/dd-out" bs=8k count=65536 oflag=direct >"$LOG_PATH"/dd-write-bs8k-count65536-direct$i 2>&1
sync ; echo 3 >/proc/sys/vm/drop_caches
done
for i in $RETRY_SUFFIXES
do
dd "if=$FAST_INPUT" of="$TARGET_PATH/dd-out" bs=8k count=65536 conv=fdatasync >"$LOG_PATH"/dd-write-bs8k-count65536-fdatasync$i 2>&1
sync ; echo 3 >/proc/sys/vm/drop_caches
done
rm -f "$TARGET_PATH"/dd-out
# Small Files
if test -d ./smallfiles; then
echo files and directories generation to "$TARGET_PATH"
for thread_count in `seq 2 $MAX_THREAD`
do
for sample_size in 128 256 1024 2048 4096 10240 102400 1024000
do
./smallfile/smallfile_cli.py --top "$TARGET_PATH" --response-times Y --fsync Y --file-size $sample_size --files 4096 --threads $thread_count >"$LOG_PATH"/smallfiles-${sample_size}-${thread_count} 2>&1
done
done
fi
# FIO
if test -x /bin/fio -o -x /usr/bin/fio; then
echo FIO on "$TARGET_PATH"
for i in $RETRY_SUFFIXES
do
fio --randrepeat=1 --ioengine=libaio --direct=1 --gtod_reduce=1 --name=test --filename="$TARGET_PATH"/fio-out --bs=4k --iodepth=64 --size=4G --readwrite=randrw --rwmixread=75 >"$LOG_PATH"/fio-randrw$i 2>&1
echo 3 >/proc/sys/vm/drop_caches
done
for i in $RETRY_SUFFIXES
do
fio --randrepeat=1 --ioengine=libaio --direct=1 --gtod_reduce=1 --name=test --filename="$TARGET_PATH"/fio-out --bs=4k --iodepth=64 --size=4G --readwrite=randwrite >"$LOG_PATH"/fio-randwrite$i 2>&1
echo 3 >/proc/sys/vm/drop_caches
done
cat <<EOF >4threads.fio
[global]
rw=randread
size=256m
directory=$TARGET_PATH/datafio
ioengine=libaio
iodepth=4
invalidate=1
direct=1
[bgwriter]
rw=randwrite
iodepth=32
[queryA]
iodepth=1
ioengine=mmap
direct=0
thinktime=3
[queryB]
iodepth=1
ioengine=mmap
direct=0
thinktime=5
[bgupdater]
rw=randrw
iodepth=16
thinktime=40
size=32m
EOF
rm -rf "$TARGET_PATH"/datafio "$TARGET_PATH"/fio-out
for i in $RETRY_SUFFIXES
do
mkdir "$TARGET_PATH"/datafio
fio 4threads.fio >"$LOG_PATH"/fio-4threads$i 2>&1
rm -rf "$TARGET_PATH"/datafio
echo 3 >/proc/sys/vm/drop_caches
done
rm -f 4threads.fio
fi
if test -z "$USERSPACE"; then
# Diskperfs
if test -d diskperf_utils; then
echo reading from "$TARGET_DEVICE"
timeout 30 python ./diskperf_utils/test_diskread.py "$TARGET_DEVICE" --random -m 30 -f "$LOG_PATH"/diskperf-random.plot
echo 3 >/proc/sys/vm/drop_caches
timeout 30 python ./diskperf_utils/test_diskread.py "$TARGET_DEVICE" -m 30 -f "$LOG_PATH"/diskperf.plot
echo 3 >/proc/sys/vm/drop_caches
fi
fi
ps axww | grep -E '(iotop|ioping|vmstat|sar|iostat|top) '
exit 0
| true |
db4cb08aa06a694390e6d3371f34f21ce5018fa1 | Shell | thirumurthy/dbbackup | /googledrivefiledownload.sh | UTF-8 | 504 | 2.71875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
fileid="### file id ###"
filename="MyFile.csv"
curl -c ./cookie -s -L "https://drive.google.com/uc?export=download&id=${fileid}" > /dev/null
curl -Lb ./cookie "https://drive.google.com/uc?export=download&confirm=`awk '/download/ {print $NF}' ./cookie`&id=${fileid}" -o ${filename}
# Reference : https://gist.github.com/tanaikech/f0f2d122e05bf5f971611258c22c110f
# Reference: https://stackoverflow.com/questions/48133080/how-to-download-a-google-drive-url-via-curl-or-wget/48133859#48133859
| true |
9c043b01901c2303b3fe1009fe824f18adac38d2 | Shell | emunark/holberton-system_engineering-devops | /0x05-processes_and_signals/4-to_infinity_and_beyond | UTF-8 | 112 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env bash
# To infinity and beyond
i=0
while [ $i ]
do
echo "To infinity and beyond"
sleep 2
done
| true |
5f97de57745655d49bc0696045b38d7ccbbac923 | Shell | anuraggupta704/fenics-course | /src/generate_all_notebooks.sh | UTF-8 | 571 | 2.59375 | 3 | [
"CC-BY-4.0"
] | permissive | # Convert Python scripts
find . -iname "*.py" -type f -exec sh -c 'echo "Converting {}" && mkdir -p ../notebooks/$(dirname {}) && python jupyter_converter.py {} ../notebooks/$(dirname {})/$(basename {} ".py").ipynb' \;
# Copy mesh files
find . -iname "*.xml" -type f -exec sh -c 'echo "Copying {}" && cp {} ../notebooks/$(dirname {})' \;
find . -iname "*.xml.gz" -type f -exec sh -c 'echo "Copying {}" && cp {} ../notebooks/$(dirname {})' \;
# Copy over required Python modules manually
cp 02_static_linear_pdes/kul/plotslopes.py ../notebooks/02_static_linear_pdes/kul
| true |
23eadf8dca65e6ccbb4591ceb776b630eceeb1bb | Shell | esa-esdl/jupyterhub-k8s | /scripts/create_clusters.sh | UTF-8 | 1,559 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env bash
export CORE_CLUSTER_NAME=jupyterhub-cluster
export CORE_MACHINE_TYPE=n1-standard-2
export CORE_BOOT_DISK_TYPE=pd-standard
export CORE_NUM_NODES=1
export CORE_ZONE=europe-west3-a
export JUPYTER_USER_MACHINE_TYPE=n1-highmem-2
export JUPYTER_USER_BOOT_DISK_TYPE=pd-standard
export JUPYTER_USER_MIN_NODES=0
export JUPYTER_USER_MAX_NODES=10
export JUPYTER_USER_ZONE=europe-west3-a
export MIN_CPU_PLATFORM="Intel Broadwell"
echo "Creating the cluster and a node pool for the core pods..."
gcloud beta container clusters create $CORE_CLUSTER_NAME \
--enable-ip-alias \
--enable-kubernetes-alpha \
--num-nodes $CORE_NUM_NODES \
--zone $CORE_ZONE \
--disk-type $CORE_BOOT_DISK_TYPE \
--machine-type $CORE_MACHINE_TYPE \
--min-cpu-platform "$MIN_CPU_PLATFORM" \
--num-nodes $CORE_NUM_NODES \
--cluster-version 1.10 \
--node-labels hub.jupyter.org/node-purpose=core \
--no-enable-autoupgrade \
--no-enable-autorepair
echo "Creating a secondary autoscaling node pool for user pods..."
gcloud beta container node-pools create user-pool \
--cluster $CORE_CLUSTER_NAME \
--disk-type $JUPYTER_USER_BOOT_DISK_TYPE \
--machine-type $JUPYTER_USER_MACHINE_TYPE \
--min-cpu-platform "$MIN_CPU_PLATFORM" \
--num-nodes 0 \
--zone $JUPYTER_USER_ZONE \
--enable-autoscaling \
--min-nodes $JUPYTER_USER_MIN_NODES \
--max-nodes $JUPYTER_USER_MAX_NODES \
--node-labels hub.jupyter.org/node-purpose=user \
--node-taints hub.jupyter.org_dedicated=user:NoSchedule \
--no-enable-autoupgrade \
--no-enable-autorepair
| true |
9c1e4e22fcf63cb2b0a8e34f8365109318d31fde | Shell | unb-libraries/CargoDock | /travis/cleanupOldImages.sh | UTF-8 | 1,281 | 3.953125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#
# Remove historical images from AWS ECR if there are more than
# $OLD_IMAGES_TO_KEEP historical images that exist in the repository.
set -e
SCRIPT_DIR=$(dirname $0)
if [[ $DEPLOY_BRANCHES =~ (^|,)"$TRAVIS_BRANCH"(,|$) ]]; then
AMAZON_ECR_URI="${AWS_ACCOUNT_ID}.dkr.ecr.$AMAZON_ECR_REGION.amazonaws.com"
# Set tagStatus
BUILD_DATE=$(date '+%Y%m%d%H%M')
IMAGE_TAG="$TRAVIS_BRANCH-$BUILD_DATE"
# List non-current images
echo "Cleaning Up Old Images in ECR"
IMAGE_JSON=$(aws ecr list-images --repository-name=$SERVICE_NAME --region=$AMAZON_ECR_REGION)
IMAGES_TO_DEL=$(echo "$IMAGE_JSON" | python "$SCRIPT_DIR/../aws/filterOldImages.py" -b $TRAVIS_BRANCH -n $OLD_IMAGES_TO_KEEP)
# Remove non-current images
if [ ! -z "${IMAGES_TO_DEL// }" ]; then
while read -r IMAGE; do
IMAGE_DATE=$(echo $IMAGE | cut -f1 -d\|)
IMAGE_HASH=$(echo $IMAGE | cut -f2 -d\|)
echo "Deleting Image From $IMAGE_DATE - $IMAGE_HASH"
aws ecr batch-delete-image --repository-name=$SERVICE_NAME --region=$AMAZON_ECR_REGION --image-ids=imageDigest=$IMAGE_HASH
done <<< "$IMAGES_TO_DEL"
else
echo "No images to clean up!"
fi
else
echo "Not cleaning up images on branch [$TRAVIS_BRANCH]. Deployable branches : $DEPLOY_BRANCHES"
fi
| true |
4ebf9f4b6293230970eee4c035e8e0cf71cf8ab1 | Shell | KermitYao/Scripts | /sh/vms.sh | UTF-8 | 4,952 | 4.1875 | 4 | [] | no_license | #!/bin/sh
initVar(){
vmsCount=0
getCount=0
vmsCount=$(($(vim-cmd vmsvc/getallvms|wc -l)-1))
ret=Null
vmsAll="False"
getVms="False"
setVms="False"
setVmsC="False"
setVmsO="False"
}
getAll(){
vmsInfo=$(vim-cmd vmsvc/getallvms)
echo "${vmsInfo}"|while read info
do
if [ ${getCount} -gt 0 ]
then
echo ${info% *}
echo " "$(vim-cmd vmsvc/power.getstate ${getCount}|grep 'Powered')
fi
let getCount++
done
}
#接受参数 $1 = id
getVms(){
vInfo=$(vim-cmd vmsvc/power.getstate $1|grep 'Powered')
vInfo=${vInfo#* }
if [ "${vInfo}" == 'on' ]
then
return 0
elif [ "${vInfo}" == 'off' ]
then
return 1
else
return 2
fi
}
#接受参数 $1 = off|on ,$2 = id
setVms(){
if [ "$1" == "off" ]||[ "$1" == "on" ];then
if [ "$1" == "off" ];then
getVms $2
if [ $? == 1 ];then
return 0
else
vim-cmd vmsvc/power.off $2 >/dev/null
return 1
fi
elif [ "$1" == "on" ];then
getVms $2
if [ $? == 0 ];then
return 0
else
vim-cmd vmsvc/power.on $2 >/dev/null
return 1
fi
fi
else
return 3
fi
}
getUsage(){
echo "Usage:"
echo " $0 [-a ] [-g id] [-c|o id|a] [-h|?]"
echo "说明:"
echo " [-a] 获取全部的虚拟机列表和状态."
echo " [-g id] 为获取指定的虚拟机的状态."
echo " [-c id|a] 为关闭指定的虚拟机,可选参数a则关闭所有虚拟机."
echo " [-o id|a] 为打开指定的虚拟机,可选参数o则打开所有虚拟机."
echo " [-h|?] 获取帮助信息."
echo " id 指的是虚拟机的序号,可以用 [-a] 参数查看id."
exit 1
}
#接受参数 $@
setOpt(){
while getopts 'ag:c:o:h' OPT; do
case $OPT in
a) vmsAll="True";;
g) getVms="True"&&getVmsV="$OPTARG";;
c) setVmsC="True"&&setVmsCV="$OPTARG";;
o) setVmsO="True"&&setVmsOV="$OPTARG";;
h) getUsage;;
?) getUsage;;
esac
done
}
initVar
setOpt $@
if [ "${vmsAll}" == "True" ];then
getAll
elif [ "${getVms}" == "True" ];then
getVms ${getVmsV}
if [ $? == 0 ];then
echo vmsID:${getVmsV} -- vmsState:on
elif [ $? == 1 ];then
echo vmsID:${getVmsV} -- vmsState:off
else
echo vmsID:${getVmsV} -- vmsState:错误.
fi
elif [ "${setVmsC}" == "True" ];then
#处理关闭参数为 a
if [ "${setVmsCV}" == "a" ];then
for i in $(seq ${vmsCount})
do
setVms off $i
if [ $? == 0 ];then
echo vmsID:$i -- vmsState:已关闭.
elif [ $? == 1 ];then
echo vmsID:$i -- vmsState:正在关闭.
else
echo vmsID:$i -- vmsState:错误.
fi
done
else
setVms off "${setVmsCV}"
if [ $? == 0 ];then
echo vmsID:${setVmsCV} -- vmsState:已关闭.
elif [ $? == 1 ];then
echo vmsID:${setVmsCV} -- vmsState:正在关闭.
else
echo vmsID:${setVmsCV} -- vmsState:错误.
fi
fi
elif [ "${setVmsO}" == "True" ];then
#处理开启参数为 a
if [ "${setVmsOV}" == "a" ];then
for i in $(seq ${vmsCount})
do
setVms on $i
if [ $? == 0 ];then
echo vmsID:$i -- vmsState:已开启.
elif [ $? == 1 ];then
echo vmsID:$i -- vmsState:正在开启.
else
echo vmsID:$i -- vmsState:错误.
fi
done
else
setVms on "${setVmsOV}"
if [ $? == 0 ];then
echo vmsID:${setVmsOV} -- vmsState:已开启.
elif [ $? == 1 ];then
echo vmsID:${setVmsOV} -- vmsState:正在开启.
else
echo vmsID:${setVmsOV} -- vmsState:错误.
fi
fi
else
getUsage
fi
exit 0 | true |
2c2b72d466c7115a7139199a9ae21a9df7125135 | Shell | ecs-org/ecs-appliance | /salt/appliance/ssl/prepare-ssl.sh | UTF-8 | 3,072 | 3.46875 | 3 | [
"Apache-2.0"
] | permissive | prepare_ssl () {
# re-generate dhparam.pem if not found or less than 2048 bit
recreate_dhparam=$(test ! -e /app/etc/dhparam.pem && echo "true" || echo "false")
if ! $recreate_dhparam; then
recreate_dhparam=$(test "$(stat -L -c %s /app/etc/dhparam.pem)" -lt 224 && echo "true" || echo "false")
fi
if $recreate_dhparam; then
echo "no or to small dh.param found, regenerating with 2048 bit (takes a few minutes)"
mkdir -p /app/etc
openssl dhparam 2048 -out /app/etc/dhparam.pem
fi
# certificate setup
use_snakeoil=true
domains_file=/app/etc/dehydrated/domains.txt
if test "${APPLIANCE_SSL_KEY}" != "" -a "${APPLIANCE_SSL_CERT}" != ""; then
echo "Information: using ssl key,cert supplied from environment"
printf "%s" "${APPLIANCE_SSL_KEY}" > /app/etc/server.key.pem
printf "%s" "${APPLIANCE_SSL_CERT}" > /app/etc/server.cert.pem
cat /app/etc/server.cert.pem /app/etc/dhparam.pem > /app/etc/server.cert.dhparam.pem
use_snakeoil=false
else
if is_truestr "${APPLIANCE_SSL_LETSENCRYPT_ENABLED:-true}"; then
use_snakeoil=false
echo "Information: generate certificates using letsencrypt (dehydrated client)"
# we need a SAN (subject alternative name) for java ssl :(
printf "%s" "$APPLIANCE_DOMAIN $APPLIANCE_DOMAIN" > $domains_file
ACCOUNT_KEY=$(gosu app dehydrated -e | grep "ACCOUNT_KEY=" | sed -r 's/.*ACCOUNT_KEY="([^"]+)"/\1/g')
if test ! -e "$ACCOUNT_KEY"; then
gosu app dehydrated --register --accept-terms
fi
gosu app dehydrated -c
res=$?
if test "$res" -eq 0; then
echo "Information: letsencrypt was successful, using letsencrypt certificate"
else
sentry_entry "Appliance SSL-Setup" "Warning: letsencrypt client (dehydrated) returned error $res" warning
for i in $(cat $domains_file | sed -r "s/([^ ]+).*/\1/g"); do
cert_file=/app/etc/dehydrated/certs/$i/cert.pem
openssl x509 -in $cert_file -checkend $((1 * 86400)) -noout
if test $? -ne 0; then
use_snakeoil=true
sentry_entry "Appliance SSL-Setup" "Error: letsencrypt cert ($i) is valid less than a day, defaulting to snakeoil"
fi
done
fi
fi
fi
if is_falsestr "${APPLIANCE_SSL_LETSENCRYPT_ENABLED:-true}"; then
# delete domains_file to keep cron from retrying to refresh certs
if test -e $domains_file; then rm $domains_file; fi
fi
if $use_snakeoil; then
echo "Warning: couldnt setup server certificate, copy snakeoil.* to appliance/server*"
cp /app/etc/snakeoil/ssl-cert-snakeoil.pem /app/etc/server.cert.pem
cp /app/etc/snakeoil/ssl-cert-snakeoil.key /app/etc/server.key.pem
cat /app/etc/server.cert.pem /app/etc/dhparam.pem > /app/etc/server.cert.dhparam.pem
fi
}
| true |
5cd6d4dc6e84ca00eac8a16771fd1bed2047fb14 | Shell | wang-zhijun/Bash | /d | UTF-8 | 611 | 2.96875 | 3 | [] | no_license | #!/bin/bash
# 用在线金山词霸和正则表达式写的一个命令行字典
# ./d memory 则是查memory的读音和意思
wget -q http://www.iciba.com/$1
cat $1 | col -b | sed 's/[ \t]*//g' > tmp
sed -n 's/<strong>\[<\/strong><stronglang="EN-US"xml:lang="EN-US">\(.*\)<\/strong><strong>\]<\/strong>/\1/p' tmp
echo -n "$proun" >> data
# 第七行的第二个sed的意思是找到<label>这一行之后,进而继续匹配
# 然后这次匹配是用s# # # 的形式, 把<label> 和</label>都删掉
sed -nr '/<spanclass="label_list"/,/<\/span>/p' tmp | sed -nr '/<label>/{s#<[/]?label>##g;p}'
rm $1 tmp
| true |
f3f1722dfc6225340438221047736c3d12116a5f | Shell | tkooda/config-public | /host/0/bash/bash.d/30-aliases.sh | UTF-8 | 2,091 | 3.4375 | 3 | [] | no_license | ## misc aliases
alias cp='cp -i'
alias mv='mv -i'
alias rm='rm -i'
alias psl='ps f -eopid,user,etime,args'
alias pslw='ps f -wwweopid,user,etime,args'
alias scr='screen -D -R'
alias tem='emacs -nw'
alias mroe=more
alias give='find . -type d -exec chmod a+rx {} \; ; find . -type f -exec chmod a+r {} \; '
alias geturi='while read uri; do f=`basename ${uri:?Invalid uri}` && [ ! -e "$f" ] && curl -s $uri > $f && echo $uri > $f.URI && echo success: $f || echo "failure: $f"; done'
## tkooda : 2015-03-23 : allow args
##alias gotmp='cd $( mktemp -d /tmp/gotmp.`date +%m%d%H%M`.XXXXXX )'
function gotmp() { cd $( mktemp -d /tmp/gotmp.`date +%m%d%H%M`.`echo ${@// /_} |tr ' ' _`.XXXXXX; ); } ;
alias golast='for i in /tmp/gotmp.*; do [ -d "$i" ] && [ "`stat --format='%U' $i`" == "`whoami`" ] && last=$i; done; if [ -n "$last" ]; then echo $last; cd $last; fi'
alias lynx='lynx -accept_all_cookies'
alias df='df -hT'
alias tolower="tr '[A-Z]' '[a-z]'"
alias toupper="tr '[a-z]' '[A-Z]'"
alias random-line="perl -e 'print [<>]->[int rand $.]'"
alias bc='bc -ql'
alias smv='rsync --remove-source-files --partial -ai'
alias si="svn ci -m ''"
alias gc='git commit -a --allow-empty-message -m ""'
alias gp='git push'
alias gcp='git commit -a --allow-empty-message -m "" && git push'
alias gits='git status'
function mkdcd { x="`date -I`.${1// /_}"; mkdir "${x}" && cd "${x}" && pwd; }
function urlencode { python -c "import urllib; print urllib.quote('''$1''',safe='')"; }
function cdf() { cd "${1%/*}"; pwd; } # cd to parent of file
## 2017-04-17 : tkooda : transfer.sh (public) : upload file to public server to get URL to send someone
transfer() { if [ $# -eq 0 ]; then echo -e "No arguments specified. Usage:\n transfer /tmp/test.txt"; return 1; else basefile=$(basename "$1" | sed -e 's/[^a-zA-Z0-9._-]/-/g'); curl --progress-bar --upload-file "$1" "https://transfer.sh/$basefile"; fi; }
## 2018-08-19 : tkooda : generate a decent human-readable random password
alias decentpassword='r=$(cat /dev/urandom |tr -dc abcdefghknopqrstuvwxyzCDEY379 |head -c12); echo ${r:0:4}-${r:4:4}-${r:8:4}'
| true |
a9686be6738a5710b234709106b134af1f09d0d1 | Shell | cstevens/Project-Support-Scripts | /IoTivity/gen2.sh | UTF-8 | 1,055 | 3.125 | 3 | [] | no_license | #!/bin/bash
CURPWD=`pwd`
PROJNAME=${PWD##*/}
MY_COMMAND="cd ${OCFPATH}/DeviceBuilder"
eval ${MY_COMMAND}
MY_COMMAND="sh ./DeviceBuilder_C++IotivityServer.sh $CURPWD/$PROJNAME.json $CURPWD/device_output \"oic.d.light\""
eval ${MY_COMMAND}
# copying the introspection file to the executable folder
MY_COMMAND="cp $CURPWD/device_output/code/server_introspection.dat $CURPWD/bin/"
eval ${MY_COMMAND}
# quick fix: using the iotivity supplied oic_svr_db_server_justworks.dat file
MY_COMMAND="cp ${OCFPATH}/iotivity/resource/csdk/security/provisioning/sample/oic_svr_db_server_justworks.dat $CURPWD/bin/server_security.dat"
eval ${MY_COMMAND}
if [ -e $CURPWD/src/$PROJNAME.cpp ]
then
echo "It appears that you have modified the automatically generated source file. Use a tool like diff3 if you want to merge in any changes."
else
MY_COMMAND="cp $CURPWD/device_output/code/server.cpp $CURPWD/src/$PROJNAME.cpp"
eval ${MY_COMMAND}
MY_COMMAND="cp $CURPWD/device_output/code/server.cpp $CURPWD/src/$PROJNAME.cpp.gen"
eval ${MY_COMMAND}
fi
cd $CURPWD
| true |
b771303775ba2b012885d1ad5ac1c89f26df01d4 | Shell | NanYoMy/DeepSimRegistration | /scripts/retune_registration.sh | UTF-8 | 3,151 | 2.5625 | 3 | [] | no_license | #!/bin/bash
# hyperparameter tuning.
TUNE_PLATELET=true
TUNE_PHC=true
TUNE_BRAIN=true
# Check if slurm compute cluster available. Submit as slurm job if possible.
if sbatch -h &> /dev/null; then
echo "Submitting to slurm..."
WRAPPER_FUNC=scripts/slurm/slurm_submit_nice.sh
else
echo "Running locally..."
WRAPPER_FUNC=
fi
if $TUNE_PLATELET; then
# l2
for LAM in 0.04
do
$WRAPPER_FUNC python3 -m src.train_registration --dataset platelet-em --savedir ./out/platelet-em/registration/l2/$LAM/ --loss l2 --ncc_win_size 9 --lam $LAM --channels 64 128 256 --batch_size 3 --accumulate_grad_batches 2 --gpus -1 --lr 0.0001 --bnorm --dropout --distributed_backend ddp --max_steps=12500
done
fi
if $TUNE_PHC; then
# l2
for LAM in 0.000078125 0.00015625 0.0003125 0.000625
do
$WRAPPER_FUNC python3 -m src.train_registration --dataset phc-u373 --savedir ./out/phc-u373/registration/l2/$LAM/ --loss l2 --ncc_win_size 9 --lam $LAM --channels 64 128 256 --batch_size 5 --gpus -1 --lr 0.0001 --bnorm --dropout --accumulate_grad_batches 2 --distributed_backend ddp --max_steps=10000
done
# ncc2
for LAM in 2
do
$WRAPPER_FUNC python3 -m src.train_registration --dataset phc-u373 --savedir ./out/phc-u373/registration/ncc2/$LAM/ --loss ncc2 --ncc_win_size 9 --lam $LAM --channels 64 128 256 --batch_size 5 --gpus -1 --lr 0.0001 --bnorm --dropout --accumulate_grad_batches 2 --distributed_backend ddp --max_steps=10000
done
# deepsim
for LAM in 1
do
$WRAPPER_FUNC python3 -m src.train_registration --dataset phc-u373 --savedir ./out/phc-u373/registration/deepsim/$LAM/ --loss deepsim --deepsim_weights ./weights/phc-u373/segmentation/weights.ckpt --lam $LAM --channels 64 128 256 --batch_size 5 --gpus -1 --lr 0.0001 --bnorm --dropout --accumulate_grad_batches 2 --distributed_backend ddp --max_steps=10000
done
fi
if $TUNE_BRAIN; then
# l2
for LAM in 0.02 0.32
do
$WRAPPER_FUNC python3 -m src.train_registration --dataset brain-mri --loss l2 --ncc_win_size 9 --lam $LAM --channels 32 64 128 --batch_size 1 --gpus -1 --lr 0.0001 --bnorm --dropout --accumulate_grad_batches 4 --max_steps=15000
done
# ncc2
for LAM in 0.25 4 8 16
do
$WRAPPER_FUNC python3 -m src.train_registration --dataset brain-mri --loss ncc2 --ncc_win_size 9 --lam $LAM --channels 32 64 128 --batch_size 1 --gpus -1 --lr 0.0001 --bnorm --dropout --accumulate_grad_batches 4 --max_steps=15000
done
# ncc2+supervised
for LAM in 0.5 8
do
$WRAPPER_FUNC python3 -m src.train_registration --dataset brain-mri --loss ncc2+supervised --ncc_win_size 9 --lam $LAM --channels 32 64 128 --batch_size 1 --gpus -1 --lr 0.0001 --bnorm --dropout --accumulate_grad_batches 4 --max_steps=15000
done
# deepsim
for LAM in 0.5 8
do
$WRAPPER_FUNC python3 -m src.train_registration --dataset brain-mri --loss deepsim --ncc_win_size 9 --deepsim_weights ./weights/brain-mri/segmentation/weights.ckpt --lam $LAM --channels 32 64 128 --batch_size 1 --gpus -1 --lr 0.0001 --bnorm --dropout --accumulate_grad_batches 4 --max_steps=15000
done
fi | true |
59ac257ea046070660c98d307475c1308fe1abe0 | Shell | ibaiul/SysAdmin | /mysql/mysql-secure.sh | UTF-8 | 3,560 | 4 | 4 | [] | no_license | #!/bin/bash
#
# Forked from https://gist.github.com/coderua/5592d95970038944d099
#
# Automated mysql secure installation for RedHat based systems
#
# - You can set a password for root accounts.
# - You can remove root accounts that are accessible from outside the local host.
# - You can remove anonymous-user accounts.
# - You can remove the test database (which by default can be accessed by all users, even anonymous users),
# and privileges that permit anyone to access databases with names that start with test_.
# For details see documentation: http://dev.mysql.com/doc/refman/5.7/en/mysql-secure-installation.html
#
# Tested on CentOS 7 - MySQL 5.7.15 - MySQL 5.6.33 - (other versions may require little adapts)
#
# Usage MySQL 5.7+:
# - Setup mysql root password: systemctl start mysqld (ensures mysql has started once at least and temporary password has been created)
# tempPass="$(grep 'temporary password' /var/log/mysqld.log | awk '{printf $NF}')"
# sh mysql-secure.sh $tempPass 'your_new_root_password'
# - Change mysql root password: sh mysql-secure.sh 'your_old_root_password' 'your_new_root_password'
#
# Usage MySQL 5.6+:
# - Setup mysql root password: sh mysql-secure.sh 'your_new_root_password'
# - Change mysql root password: sh mysql-secure.sh 'your_old_root_password' 'your_new_root_password'
#
# Delete package expect when script is done
# 0 - No;
# 1 - Yes.
PURGE_EXPECT_WHEN_DONE=0
#
# Check the bash shell script is being run by root
#
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root" 1>&2
exit 1
fi
#
# Check input params
#
if [ -n "${1}" -a -z "${2}" ]; then
# Setup root password
CURRENT_MYSQL_PASSWORD=''
NEW_MYSQL_PASSWORD="${1}"
elif [ -n "${1}" -a -n "${2}" ]; then
# Change existens root password
CURRENT_MYSQL_PASSWORD="${1}"
NEW_MYSQL_PASSWORD="${2}"
else
echo "===== Usage =========================================================================="
echo "Setup root password MySQL 5.6: ${0} 'your_new_root_password'"
echo "Change root password MySQL 5.6: ${0} 'your_old_root_password' 'your_new_root_password'"
echo "Setup root password MySQL 5.7: ${0} 'your_tmp_root_password' 'your_new_root_password'"
echo "Change root password MySQL 5.7: ${0} 'your_old_root_password' 'your_new_root_password'"
exit 1
fi
#
# Check is expect package installed
#
yum list installed expect
if [ $? -ne 0 ]; then
echo "Can't find expect. Trying to install ..."
yum install -y expect
status=$?
if [ $status -ne 0 ]; then
echo "Unable to install expect. Status: $status. Exiting ..."
exit 1
fi
fi
#
# Execution mysql_secure_installation
#
/usr/bin/expect << EOF
set timeout 3
spawn mysql_secure_installation
expect "Enter*password*root"
send "$CURRENT_MYSQL_PASSWORD\r"
expect {
"Set root password" {
send "y\r"
exp_continue
} "Change*password" {
send "y\r"
exp_continue
} "New password" {
send "$NEW_MYSQL_PASSWORD\r"
}
}
expect "Re-enter new password"
send "$NEW_MYSQL_PASSWORD\r"
expect {
"Remove anonymous users" {
send "y\r"
} "Do you wish to continue with the password provided" {
send "y\r"
exp_continue
} "Change the password for root" {
send "n\r"
exp_continue
}
}
expect "Disallow root login remotely"
send "y\r"
expect "Remove test database and access to it"
send "y\r"
expect "Reload privilege tables now"
send "y\r"
expect eof
EOF
if [ "${PURGE_EXPECT_WHEN_DONE}" -eq 1 ]; then
# Uninstalling expect package
yum remove -y expect
fi
exit 0 | true |
d8d9eae214196e07db65e74f91804fab819b81ff | Shell | Apoorvd/com.celebal.project | /DeployApacheAndFTP.sh | UTF-8 | 2,433 | 3.203125 | 3 | [] | no_license | #!/bin/bash
apt-get update
apt-get upgrade -y
#Install System Firewall
apt-get install ufw -y
systemctl start ufw
#Install FTP server
apt-get install vsftpd -y
#Enable and start vsftpd service
echo "------------- On SystemD ------------- "
systemctl start vsftpd
systemctl enable vsftpd
echo "------------- On SysVInit ------------- "
service vsftpd start
#chkconfig --level 35 vsftpd on
mv /etc/vsftpd.conf /etc/vsftpd.conf_orig
#write a config file here
config="listen=YES\n
listen_ipv6=NO\n
anonymous_enable=NO\n
local_enable=YES\n
write_enable=YES\n
local_umask=022\n
dirmessage_enable=YES\n
use_localtime=YES\n
xferlog_enable=YES\n
connect_from_port_20=YE\n
chroot_local_user=YES\n
secure_chroot_dir=/var/run/vsftpd/empty\n
pam_service_name=vsftpd\n
rsa_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem\n
rsa_private_key_file=/etc/ssl/private/ssl-cert-snakeoil.key\n
ssl_enable=NO\n
pasv_enable=Yes\n
pasv_min_port=10000\n
pasv_max_port=10100\n
allow_writeable_chroot=YES"
echo $config > /etc/vsftpd.conf
ufw allow from any to any port 20,21,10000:10100 proto tcp
chown -cH root /etc/vsftpd.conf
chmod +x /etc/vsftpd.conf
apt install ftp
systemctl restart vsftpd
# if u want new user uncomments the line below
#pswd="ftpuser"
#username="ftpuser"
#pass=$(perl -e 'print crypt($ARGV[0], "password")' $pswd)
#useradd -m -p "$pass" "$username"
#sudo passwd ftpuser
#then enter ur passwrd
#bash -c "echo FTP TESTING > /home/ftpuser/FTP-TEST"
# Apache config
apt install apache2 -y
ufw app list
ufw allow 'Apache'
ufw status
systemctl enable apache2
# Virtual Host
mkdir /var/www/your_domain
chown -R $USER:$USER /var/www/your_domain
chmod -R 755 /var/www/your_domain
sample_webpage="<html> \n
<head> \n
<title>Welcome to Your_domain!</title> \n
</head> \n
<body> \n
<h1>Success! The your_domain virtual host is working!</h1> \n
</body> \n
</html>"
domain_conf="<VirtualHost *:80> \n
ServerAdmin webmaster@localhost \n
ServerName your_domain \n
ServerAlias your_domain \n
DocumentRoot /var/www/your_domain \n
ErrorLog ${APACHE_LOG_DIR}/error.log \n
CustomLog ${APACHE_LOG_DIR}/access.log combined \n
</VirtualHost>"
echo $sample_webpage > /var/www/your_domain/index.html
echo $domain_conf > /etc/apache2/sites-available/your_domain.conf
a2ensite your_domain.conf
a2dissite 000-default.conf
apache2ctl configtest
systemctl restart apache2
| true |
ec48035eb16b69dd8370145acb56218caaff40c9 | Shell | Nv7-GitHub/vjsexample | /run.sh | UTF-8 | 340 | 2.671875 | 3 | [] | no_license | #!/bin/bash
# needs reflex, get with:
# go get github.com/cespare/reflex
sh build.sh
v run devserver/devserver.v &
PID1=$!
v -prod -skip-unused -o main.js -b js -watch . &
PID2=$!
cleanup() {
echo "Cleaning up..."
kill -9 $PID1
kill -9 $PID2
kill -9 $(( $PID2 + 10 ))
exit
}
trap cleanup INT
reflex -r 'styles.css' sh styles.sh | true |
7c9941251e60f073f828c3e2f3bdb716149387f2 | Shell | ashubits/aktu2017 | /OS Security Lab/KrShanu/lab4/cmp.sh | UTF-8 | 550 | 2.90625 | 3 | [] | no_license | #!/bin/bash
file="/home/monika/hash1.txt"
while IFS=: read -r col1 col2
do
echo "$col1" : $val2
echo "$col2" : $val1
done < "$file"
file="/home/monika/hash2.txt"
while IFS=: read -r col3 col4
do
echo "$col3" : $val4
echo "$col4" : $val3
done < "$file"
if ["$vol2" eq "$val4"];then
{
if ["$col1" != "$col3"];then
echo "Different files"
printf '%s\n' "$col3,$col4"
fi
}
fi
© 2018 GitHub, Inc.
Terms
Privacy
Security
Status
Help
Contact GitHub
API
Training
Shop
Blog
About
| true |
036c2e68205d553c1971b8b6f5fff5718aaae653 | Shell | crzidea/local-bin | /docker-build | UTF-8 | 109 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env bash
_dirname=`basename $PWD`
image=${_dirname/docker-/}
docker build --tag $USER/$image $@ .
| true |
47ec15731b7bb09b93dc23baa3c2f2e99f7e9d48 | Shell | salekseev/Manifests | /hdp/templates/check_status.sh.erb | UTF-8 | 692 | 3.109375 | 3 | [] | no_license | #!/bin/bash
file="/tmp/waitforhosttoregister.pid"
if [ -f $file ]; then
echo "Already Waited for agent it enough..."
else
while [ 1 ]
do
tasks=`curl -s -u admin:admin -H 'X-Requested-By: ambari' 'http://<%= scope.lookupvar('hdp::params::AMBARISERVER_HOSTNAME') %>:8080/api/v1/clusters/hadoopcluster/requests/1?fields=tasks/Tasks/*'|python /tmp/check_status.py`
if [[ $? -eq 0 ]]
then
elif [[ $? -eq <%= scope.lookupvar('hdp::params::COUNT_HOSTNAME') %> ]]
then
/bin/touch /tmp/waitforhosttoregister.pid
exit 1;
else
sleep 15
fi
done
/bin/touch /tmp/waitforhosttoregister.pid
fi
| true |
8bf9757b917473537b4999948e07641d45e1074e | Shell | KLMM-LSD/LSD-Deployment | /credential-download/copy-ssh-keys.sh | UTF-8 | 437 | 3.390625 | 3 | [] | no_license | #Temp file to work with
FILE=github_keys.txt
#Authorized_keys file on machine
SSHFILE=~/.ssh/authorized_keys
#Grabs keys from Github API
python get-ssh-keys.py | (tee $FILE;)
#Appends already authorized keys to downloaded file
cat $SSHFILE >> $FILE
#Removes all duplicate lines
sort -u -o $FILE $FILE
#Removes lines containing only whitespace
sed -i "/^\s*$/d" $FILE
#copies downloaded file to ~/.ssh/authorized_keys
cp $FILE $SSHFILE | true |
f5ce3c9b07ec83af7e32645abecc0fcfbac3d9b5 | Shell | remew/dotfiles | /bootloader.sh | UTF-8 | 758 | 3.78125 | 4 | [] | no_license | #!/bin/bash
GIT_URL="https://github.com/remew/dotfiles.git"
TAR_URL="https://github.com/remew/dotfiles/archive/master.tar.gz"
DOT_PATH=$HOME/dotfiles
function exists() {
which "$1" > /dev/null 2>&1
return $?
}
function dotfiles_download() {
if exists "git"; then
git clone $GIT_URL
elif exists "curl" || exists "wget"; then
if exists "curl"; then
curl -L $TAR_URL
elif exists "wget"; then
wget -qO - $TAR_URL
fi | tar zxv
mv dotfiles-master dotfiles
else
echo "curl or wget is required"
exit 1
fi
}
echo "==========================="
echo "remew's dotfiles bootloader"
echo "==========================="
dotfiles_download
cd dotfiles
./install.sh
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.