blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
7d0729497b86fe785e2b36ee53c4fbe9b11829c3 | Shell | louiscklaw/lichee-nano-one-key-package | /overlay/init.d/S90_start_wifi | UTF-8 | 704 | 3.1875 | 3 | [] | no_license | #!/bin/sh
start() {
printf "Starting wifi"
cd /overlay
insmod r8723bs.ko
cd -
mkdir -p /lib/firmware/rtlwifi
rm -rf /lib/firmware/rtlwifi/rtl8723bs_nic.bin
ln -s /overlay/rtl8723bs_nic.bin /lib/firmware/rtlwifi/rtl8723bs_nic.bin
mv /etc/wpa_supplicant.conf /etc/wpa_supplicant.conf.buildroot
ln -s /overlay/wpa_home/wpa_supplicant.conf /etc/wpa_supplicant.conf
sync
wpa_supplicant -B -i wlan0 -c /etc/wpa_supplicant.conf
# sleep 10
# udhcpc -i wlan0
ifconfig wlan0 192.168.99.250 netmask 255.255.255.0 up
route add default gw 192.168.99.1
echo "nameserver 192.168.99.1" > /etc/resolv.conf
}
case $1 in
start)
start
;;
stop)
stop
;;
esac
| true |
2fef1d8beaed52bd7dbf0b0c06ef289ac8070f4a | Shell | billwyy/crysadm2 | /include/nginx.sh | UTF-8 | 1,510 | 3.296875 | 3 | [] | no_license | #!/bin/bash
Install_Nginx()
{
Echo_Blue "[+] Installing ${Nginx_Ver}... "
groupadd www
useradd -s /sbin/nologin -g www www
Tar_Cd ${Nginx_Ver}.tar.gz ${Nginx_Ver}
./configure --user=www --group=www --prefix=/usr/local/nginx --with-http_stub_status_module --with-http_ssl_module --with-http_v2_module --with-http_gzip_static_module --with-ipv6 --with-http_sub_module
make && make install
cd ../
ln -sf /usr/local/nginx/sbin/nginx /usr/bin/nginx
rm -f /usr/local/nginx/conf/nginx.conf
cd ${cur_dir}
\cp conf/nginx_a.conf /usr/local/nginx/conf/nginx.conf
\cp conf/proxy.conf /usr/local/nginx/conf/proxy.conf
\cp conf/pathinfo.conf /usr/local/nginx/conf/pathinfo.conf
\cp conf/proxy-pass-php.conf /usr/local/nginx/conf/proxy-pass-php.conf
\cp conf/enable-ssl-example.conf /usr/local/nginx/conf/enable-ssl-example.conf
mkdir -p ${Default_Website_Dir}
chmod +w ${Default_Website_Dir}
mkdir -p /home/wwwlogs
chmod 777 /home/wwwlogs
chown -R www:www ${Default_Website_Dir}
mkdir /usr/local/nginx/conf/vhost
\cp conf/crysadm.conf /usr/local/nginx/conf/vhost/crysadm.conf
if [ "${Default_Website_Dir}" != "/home/wwwroot/default" ]; then
sed -i "s#/home/wwwroot/default#${Default_Website_Dir}#g" /usr/local/nginx/conf/nginx.conf
fi
\cp init.d/init.d.nginx /etc/init.d/nginx
chmod +x /etc/init.d/nginx
echo "Run nginx Set Nginx on service"
StartUp nginx
/etc/init.d/nginx start
}
| true |
b0210e96269ca1d71a4727334433cdac5c8dff17 | Shell | sheikhomar/cvml | /project/launcher.sh | UTF-8 | 225 | 3.015625 | 3 | [] | no_license | #PBS -l walltime=24:00:00 -N cvml
cd $PBS_O_WORKDIR
if [ -z "$1" ]
then
echo "No script supplied: qsub launcher.sh -F 'myscript.py'"
exit 1
fi
echo Starting $1
source activate cvml
python $1
echo Script $1 ended
| true |
83fa66d6da43ccf07375b46dabae4778fa88b5fd | Shell | alexH2456/TreePLE | /TreePLE-Web/deployment/deploy.sh | UTF-8 | 590 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env bash
exitStatus () {
if [ $1 -ne 0 ]; then
exit $1
fi
}
echo "systemctl stop httpd"
systemctl stop httpd
exitStatus $?
echo "cp -f $1/TreePLE-Web/dist/bundle.js /var/www/html/"
cp -f $1/TreePLE-Web/dist/bundle.js /var/www/html/
exitStatus $?
echo "cp -f $1/TreePLE-Web/dist/bundle.js.map /var/www/html/"
cp -f $1/TreePLE-Web/dist/bundle.js.map /var/www/html/
exitStatus $?
echo "cp -f $1/TreePLE-Web/index.html /var/www/html/"
cp -f $1/TreePLE-Web/index.html /var/www/html/
exitStatus $?
echo "systemctl start httpd"
systemctl start httpd
exitStatus $?
| true |
2845681930587b7add7a008a04145a26b721e938 | Shell | hildebro/moneysplitter | /bin/reset_db.sh | UTF-8 | 322 | 3.09375 | 3 | [] | no_license | #!/bin/sh
read -p "DANGER: Do you really want to reset your database? " -n 1 -r
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
exit 1
fi
echo
sudo -iu postgres psql -c 'drop database moneysplitter;'
sudo -iu postgres createdb moneysplitter
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
python $DIR/init_db.py
| true |
bed466d278723df3d8d7cefaa1863ba2744c3067 | Shell | muntasirraihan/sdc | /sdc-os-chef/scripts/docker_run.sh | UTF-8 | 19,142 | 3.703125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
CS_PASSWORD="onap123#@!"
SDC_USER="asdc_user"
SDC_PASSWORD="Aa1234%^!"
JETTY_BASE="/var/lib/jetty"
BE_JAVA_OPTIONS="-Xdebug -agentlib:jdwp=transport=dt_socket,address=4000,server=y,suspend=n -Xmx1536m -Xms1536m"
FE_JAVA_OPTIONS="-Xdebug -agentlib:jdwp=transport=dt_socket,address=6000,server=y,suspend=n -Xmx256m -Xms256m"
ONBOARD_BE_JAVA_OPTIONS="-Xdebug -agentlib:jdwp=transport=dt_socket,address=4001,server=y,suspend=n -Xmx1g -Xms1g"
SIM_JAVA_OPTIONS=" -Xmx128m -Xms128m -Xss1m"
API_TESTS_JAVA_OPTIONS="-Xmx512m -Xms512m"
UI_TESTS_JAVA_OPTIONS="-Xmx1024m -Xms1024m"
#Define this as variable, so it can be excluded in run commands on Docker for OSX, as /etc/localtime cant be mounted there.
LOCAL_TIME_MOUNT_CMD="--volume /etc/localtime:/etc/localtime:ro"
# If os is OSX, unset this, so /etc/localtime is not mounted, otherwise leave it be
if [[ "$OSTYPE" == "darwin"* ]]; then
LOCAL_TIME_MOUNT_CMD=""
fi
function usage {
echo "usage: docker_run.sh [ -r|--release <RELEASE-NAME> ] [ -e|--environment <ENV-NAME> ] [ -p|--port <Docker-hub-port>] [ -l|--local <Run-without-pull>] [ -t|--runTests <Run-with-sanityDocker>] [ -h|--help ]"
}
function cleanup {
echo "performing old dockers cleanup"
if [ "$1" == "all" ] ; then
docker_ids=`docker ps -a | egrep -v "onap/sdc-simulator" | egrep "ecomp-nexus:${PORT}/sdc|sdc|Exit" | awk '{print $1}'`
for X in ${docker_ids}
do
docker rm -f ${X}
done
else
echo "performing $1 docker cleanup"
tmp=`docker ps -a -q --filter="name=$1"`
if [[ ! -z "$tmp" ]]; then
docker rm -f ${tmp}
fi
fi
}
#Prefix all dirs with WORKSPACE variable, so it doesn't use absolute path if runnning outside of VM
function dir_perms {
mkdir -p ${WORKSPACE}/data/logs/BE/SDC/SDC-BE
mkdir -p ${WORKSPACE}/data/logs/FE/SDC/SDC-FE
mkdir -p ${WORKSPACE}/data/logs/sdc-api-tests/ExtentReport
mkdir -p ${WORKSPACE}/data/logs/ONBOARD/SDC/ONBOARD-BE
mkdir -p ${WORKSPACE}/data/logs/sdc-api-tests/target
mkdir -p ${WORKSPACE}/data/logs/sdc-ui-tests/ExtentReport
mkdir -p ${WORKSPACE}/data/logs/sdc-ui-tests/target
mkdir -p ${WORKSPACE}/data/logs/docker_logs
mkdir -p ${WORKSPACE}/data/logs/WS
chmod -R 777 ${WORKSPACE}/data/logs
}
function docker_logs {
docker logs $1 > ${WORKSPACE}/data/logs/docker_logs/$1_docker.log
}
function probe_cs {
cs_stat=false
docker exec $1 /var/lib/ready-probe.sh > /dev/null 2>&1
rc=$?
if [[ $rc == 0 ]]; then
echo DOCKER start finished in $2 seconds
cs_stat=true
fi
}
function probe_be {
be_stat=false
docker exec $1 /var/lib/ready-probe.sh > /dev/null 2>&1
rc=$?
if [[ $rc == 0 ]]; then
echo DOCKER start finished in $2 seconds
be_stat=true
fi
}
function probe_sdc_onboard_be {
sdc_onboard_be_stat=false
docker exec $1 /var/lib/ready-probe.sh > /dev/null 2>&1
rc=$?
if [[ $rc == 0 ]]; then
echo DOCKER start finished in $2 seconds
sdc_onboard_be_stat=true
fi
}
function probe_fe {
fe_stat=false
docker exec $1 /var/lib/ready-probe.sh > /dev/null 2>&1
rc=$?
if [[ $rc == 0 ]]; then
echo DOCKER start finished in $2 seconds
fe_stat=true
fi
}
function probe_es {
es_stat=false
health_Check_http_code=$(curl --noproxy "*" -o /dev/null -w '%{http_code}' http://${IP}:9200/_cluster/health?wait_for_status=yellow&timeout=120s)
if [[ "$health_Check_http_code" -eq 200 ]]
then
echo DOCKER start finished in $2 seconds
es_stat=true
fi
}
function probe_sim {
if lsof -Pi :8285 -sTCP:LISTEN -t >/dev/null ; then
echo "running"
sim_stat=true
else
echo "not running"
sim_stat=false
fi
}
function probe_docker {
match_result=false
MATCH=`docker logs --tail 30 $1 | grep "DOCKER STARTED"`
echo MATCH is -- $MATCH
if [ -n "$MATCH" ]; then
echo DOCKER start finished in $2 seconds
match_result=true
fi
}
function monitor_docker {
echo monitor $1 Docker
sleep 5
TIME_OUT=900
INTERVAL=20
TIME=0
while [ "$TIME" -lt "$TIME_OUT" ]; do
if [ "$1" == "sdc-cs" ]; then
probe_cs $1 $TIME
if [[ $cs_stat == true ]]; then break; fi
elif [ "$1" == "sdc-es" ]; then
probe_es $1 $TIME
if [[ $es_stat == true ]]; then break; fi
elif [ "$1" == "sdc-BE" ]; then
probe_be $1 $TIME
if [[ $be_stat == true ]]; then break; fi
elif [ "$1" == "sdc-FE" ]; then
probe_fe $1 $TIME
if [[ $fe_stat == true ]]; then break; fi
elif [ "$1" == "sdc-onboard-BE" ]; then
probe_sdc_onboard_be $1 $TIME
if [[ $sdc_onboard_be_stat == true ]]; then break; fi
else
probe_docker $1 $TIME
if [[ $match_result == true ]]; then break; fi
fi
echo Sleep: $INTERVAL seconds before testing if $1 DOCKER is up. Total wait time up now is: $TIME seconds. Timeout is: $TIME_OUT seconds
sleep $INTERVAL
TIME=$(($TIME+$INTERVAL))
done
docker_logs $1
if [ "$TIME" -ge "$TIME_OUT" ]; then
echo -e "\e[1;31mTIME OUT: DOCKER was NOT fully started in $TIME_OUT seconds... Could cause problems ...\e[0m"
fi
}
function healthCheck {
curl --noproxy "*" ${IP}:9200/_cluster/health?pretty=true
echo "BE health-Check:"
curl --noproxy "*" http://${IP}:8080/sdc2/rest/healthCheck
echo ""
echo ""
echo "FE health-Check:"
curl --noproxy "*" http://${IP}:8181/sdc1/rest/healthCheck
echo ""
echo ""
healthCheck_http_code=$(curl --noproxy "*" -o /dev/null -w '%{http_code}' -H "Accept: application/json" -H "Content-Type: application/json" -H "USER_ID: jh0003" http://${IP}:8080/sdc2/rest/v1/user/demo;)
if [[ ${healthCheck_http_code} != 200 ]]
then
echo "Error [${healthCheck_http_code}] while user existance check"
return ${healthCheck_http_code}
fi
echo "check user existance: OK"
return ${healthCheck_http_code}
}
RELEASE=latest
LOCAL=false
RUNTESTS=false
DEBUG_PORT="--publish 4000:4000"
ONBOARD_DEBUG_PORT="--publish 4001:4000"
while [ $# -gt 0 ]; do
case $1 in
# -r | --release - The specific docker version to pull and deploy
-r | --release )
shift 1 ;
RELEASE=$1;
shift 1;;
# -e | --environment - The environment name you want to deploy
-e | --environment )
shift 1;
DEP_ENV=$1;
shift 1 ;;
# -p | --port - The port from which to connect to the docker nexus
-p | --port )
shift 1 ;
PORT=$1;
shift 1 ;;
# -l | --local - Use this for deploying your local dockers without pulling them first
-l | --local )
LOCAL=true;
shift 1;;
# -ta - Use this for running the APIs sanity docker after all other dockers have been deployed
-ta )
shift 1 ;
API_SUITE=$1;
RUN_API_TESTS=true;
shift 1 ;;
# -tu - Use this for running the UI sanity docker after all other dockers have been deployed
-tu )
shift 1 ;
UI_SUITE=$1;
RUN_UI_TESTS=true;
shift 1 ;;
# -tad - Use this for running the DEFAULT suite of tests in APIs sanity docker after all other dockers have been deployed
-tad | -t )
API_SUITE="onapApiSanity";
RUN_API_TESTS=true;
shift 1 ;;
# -tud - Use this for running the DEFAULT suite of tests in UI sanity docker after all other dockers have been deployed
-tud )
UI_SUITE="onapUiSanity";
RUN_UI_TESTS=true;
shift 1 ;;
# -d | --docker - The init specified docker
-d | --docker )
shift 1 ;
DOCKER=$1;
shift 1 ;;
# -h | --help - Display the help message with all the available run options
-h | --help )
usage;
exit 0;;
* )
usage;
exit 1;;
esac
done
#Prefix those with WORKSPACE so it can be set to something other then /opt
[ -f ${WORKSPACE}/opt/config/env_name.txt ] && DEP_ENV=$(cat ${WORKSPACE}/opt/config/env_name.txt) || echo ${DEP_ENV}
[ -f ${WORKSPACE}/opt/config/nexus_username.txt ] && NEXUS_USERNAME=$(cat ${WORKSPACE}/opt/config/nexus_username.txt) || NEXUS_USERNAME=release
[ -f ${WORKSPACE}/opt/config/nexus_password.txt ] && NEXUS_PASSWD=$(cat ${WORKSPACE}/opt/config/nexus_password.txt) || NEXUS_PASSWD=sfWU3DFVdBr7GVxB85mTYgAW
[ -f ${WORKSPACE}/opt/config/nexus_docker_repo.txt ] && NEXUS_DOCKER_REPO=$(cat ${WORKSPACE}/opt/config/nexus_docker_repo.txt) || NEXUS_DOCKER_REPO=nexus3.onap.org:${PORT}
[ -f ${WORKSPACE}/opt/config/nexus_username.txt ] && docker login -u $NEXUS_USERNAME -p $NEXUS_PASSWD $NEXUS_DOCKER_REPO
export IP=`ip route get 8.8.8.8 | awk '/src/{ print $7 }'`
#If OSX, then use this to get IP
if [[ "$OSTYPE" == "darwin"* ]]; then
export IP=$(ipconfig getifaddr en0)
fi
export PREFIX=${NEXUS_DOCKER_REPO}'/onap'
if [ ${LOCAL} = true ]; then
PREFIX='onap'
fi
echo ""
#Elastic-Search
function sdc-es {
echo "docker run sdc-elasticsearch..."
if [ ${LOCAL} = false ]; then
echo "pulling code"
docker pull ${PREFIX}/sdc-elasticsearch:${RELEASE}
fi
docker run -dit --name sdc-es --env ENVNAME="${DEP_ENV}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --env ES_JAVA_OPTS="-Xms512m -Xmx512m" --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 $LOCAL_TIME_MOUNT_CMD --env ES_HEAP_SIZE=1024M --volume ${WORKSPACE}/data/ES:/usr/share/elasticsearch/data --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 9200:9200 --publish 9300:9300 ${PREFIX}/sdc-elasticsearch:${RELEASE} /bin/sh
echo "please wait while ES is starting..."
monitor_docker sdc-es
}
#Init-Elastic-Search
function sdc-init-es {
echo "docker run sdc-init-elasticsearch..."
if [ ${LOCAL} = false ]; then
echo "pulling code"
docker pull ${PREFIX}/sdc-init-elasticsearch:${RELEASE}
fi
echo "Running sdc-init-es"
docker run --name sdc-init-es --env ENVNAME="${DEP_ENV}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 $LOCAL_TIME_MOUNT_CMD --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments ${PREFIX}/sdc-init-elasticsearch:${RELEASE} > /dev/null 2>&1
rc=$?
docker_logs sdc-init-es
if [[ $rc != 0 ]]; then exit $rc; fi
}
#Cassandra
function sdc-cs {
echo "docker run sdc-cassandra..."
if [ ${LOCAL} = false ]; then
docker pull ${PREFIX}/sdc-cassandra:${RELEASE}
fi
docker run -dit --name sdc-cs --env RELEASE="${RELEASE}" --env CS_PASSWORD="${CS_PASSWORD}" --env ENVNAME="${DEP_ENV}" --env HOST_IP=${IP} --env MAX_HEAP_SIZE="1536M" --env HEAP_NEWSIZE="512M" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 $LOCAL_TIME_MOUNT_CMD --volume ${WORKSPACE}/data/CS:/var/lib/cassandra --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 9042:9042 --publish 9160:9160 ${PREFIX}/sdc-cassandra:${RELEASE} /bin/sh
echo "please wait while CS is starting..."
monitor_docker sdc-cs
}
#Cassandra-init
function sdc-cs-init {
echo "docker run sdc-cassandra-init..."
if [ ${LOCAL} = false ]; then
docker pull ${PREFIX}/sdc-cassandra-init:${RELEASE}
fi
docker run --name sdc-cs-init --env RELEASE="${RELEASE}" --env SDC_USER="${SDC_USER}" --env SDC_PASSWORD="${SDC_PASSWORD}" --env CS_PASSWORD="${CS_PASSWORD}" --env ENVNAME="${DEP_ENV}" --env HOST_IP=${IP} --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 $LOCAL_TIME_MOUNT_CMD --volume ${WORKSPACE}/data/CS:/var/lib/cassandra --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --volume ${WORKSPACE}/data/CS-Init:/root/chef-solo/cache ${PREFIX}/sdc-cassandra-init:${RELEASE} > /dev/null 2>&1
rc=$?
docker_logs sdc-cs-init
if [[ $rc != 0 ]]; then exit $rc; fi
}
#Onboard Cassandra-init
function sdc-cs-onboard-init {
echo "docker run sdc-cs-onboard-init..."
if [ ${LOCAL} = false ]; then
docker pull ${PREFIX}/sdc-onboard-cassandra-init:${RELEASE}
fi
docker run --name sdc-cs-onboard-init --env RELEASE="${RELEASE}" --env CS_HOST_IP=${IP} --env SDC_USER="${SDC_USER}" --env SDC_PASSWORD="${SDC_PASSWORD}" --env CS_PASSWORD="${CS_PASSWORD}" --env ENVNAME="${DEP_ENV}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 $LOCAL_TIME_MOUNT_CMD --volume ${WORKSPACE}/data/CS:/var/lib/cassandra --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --volume ${WORKSPACE}/data/CS-Init:/root/chef-solo/cache ${PREFIX}/sdc-onboard-cassandra-init:${RELEASE}
rc=$?
docker_logs sdc-onboard-cs-init
if [[ $rc != 0 ]]; then exit $rc; fi
}
#Kibana
function sdc-kbn {
echo "docker run sdc-kibana..."
if [ ${LOCAL} = false ]; then
docker pull ${PREFIX}/sdc-kibana:${RELEASE}
docker run --detach --name sdc-kbn --env ENVNAME="${DEP_ENV}" --env NODE_OPTIONS="--max-old-space-size=200" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 $LOCAL_TIME_MOUNT_CMD --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 5601:5601 ${PREFIX}/sdc-kibana:${RELEASE}
fi
}
#Back-End
function sdc-BE {
echo "docker run sdc-backend..."
if [ ${LOCAL} = false ]; then
docker pull ${PREFIX}/sdc-backend:${RELEASE}
else
ADDITIONAL_ARGUMENTS=${DEBUG_PORT}
fi
docker run --detach --name sdc-BE --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env cassandra_ssl_enabled="false" --env JAVA_OPTIONS="${BE_JAVA_OPTIONS}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 $LOCAL_TIME_MOUNT_CMD --volume ${WORKSPACE}/data/logs/BE/:/var/lib/jetty/logs --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 8443:8443 --publish 8080:8080 ${ADDITIONAL_ARGUMENTS} ${PREFIX}/sdc-backend:${RELEASE}
echo "please wait while BE is starting..."
monitor_docker sdc-BE
}
# Back-End-Init
function sdc-BE-init {
echo "docker run sdc-backend-init..."
if [ ${LOCAL} = false ]; then
docker pull ${PREFIX}/sdc-backend-init:${RELEASE}
fi
docker run --name sdc-BE-init --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 $LOCAL_TIME_MOUNT_CMD --volume ${WORKSPACE}/data/logs/BE/:/var/lib/jetty/logs --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments ${PREFIX}/sdc-backend-init:${RELEASE} > /dev/null 2>&1
rc=$?
docker_logs sdc-BE-init
if [[ $rc != 0 ]]; then exit $rc; fi
}
# Onboard Back-End
function sdc-onboard-BE {
dir_perms
# Back-End
echo "docker run sdc-onboard-BE ..."
if [ ${LOCAL} = false ]; then
docker pull ${PREFIX}/sdc-onboard-backend:${RELEASE}
else
ADDITIONAL_ARGUMENTS=${ONBOARD_DEBUG_PORT}
fi
docker run --detach --name sdc-onboard-BE --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env cassandra_ssl_enabled="false" --env SDC_CLUSTER_NAME="SDC-CS-${DEP_ENV}" --env SDC_USER="${SDC_USER}" --env SDC_PASSWORD="${SDC_PASSWORD}" --env JAVA_OPTIONS="${ONBOARD_BE_JAVA_OPTIONS}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 $LOCAL_TIME_MOUNT_CMD --volume ${WORKSPACE}/data/logs/ONBOARD:/var/lib/jetty/logs --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 8445:8445 --publish 8081:8081 ${ADDITIONAL_ARGUMENTS} ${PREFIX}/sdc-onboard-backend:${RELEASE}
echo "please wait while sdc-onboard-BE is starting..."
monitor_docker sdc-onboard-BE
}
# Front-End
function sdc-FE {
echo "docker run sdc-frontend..."
if [ ${LOCAL} = false ]; then
docker pull ${PREFIX}/sdc-frontend:${RELEASE}
fi
docker run --detach --name sdc-FE --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env JAVA_OPTIONS="${FE_JAVA_OPTIONS}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 $LOCAL_TIME_MOUNT_CMD --volume ${WORKSPACE}/data/logs/FE/:/var/lib/jetty/logs --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 9443:9443 --publish 8181:8181 ${PREFIX}/sdc-frontend:${RELEASE}
echo "please wait while FE is starting....."
monitor_docker sdc-FE
}
# apis-sanity
function sdc-api-tests {
healthCheck
if [[ (${RUN_API_TESTS} = true) && (${healthCheck_http_code} == 200) ]]; then
echo "docker run sdc-api-tests..."
echo "Triger sdc-api-tests docker, please wait..."
if [ ${LOCAL} = false ]; then
docker pull ${PREFIX}/sdc-api-tests:${RELEASE}
fi
docker run --detach --name sdc-api-tests --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env JAVA_OPTIONS="${API_TESTS_JAVA_OPTIONS}" --env SUITE_NAME=${API_SUITE} --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 $LOCAL_TIME_MOUNT_CMD --volume ${WORKSPACE}/data/logs/sdc-api-tests/target:/var/lib/tests/target --volume ${WORKSPACE}/data/logs/sdc-api-tests/ExtentReport:/var/lib/tests/ExtentReport --volume ${WORKSPACE}/data/logs/sdc-api-tests/outputCsar:/var/lib/tests/outputCsar --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 9560:9560 ${PREFIX}/sdc-api-tests:${RELEASE} echo "please wait while SDC-API-TESTS is starting....."
monitor_docker sdc-api-tests
fi
}
# ui-sanity
function sdc-ui-tests {
healthCheck
if [[ (${RUN_UI_TESTS} = true) && (${healthCheck_http_code} == 200) ]]; then
echo "docker run sdc-ui-tets..."
echo "Triger sdc-ui-tests docker, please wait..."
if [ ${LOCAL} = false ]; then
docker pull ${PREFIX}/sdc-ui-tests:${RELEASE}
fi
sdc-sim
docker run --detach --name sdc-ui-tests --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env JAVA_OPTIONS="${UI_TESTS_JAVA_OPTIONS}" --env SUITE_NAME=${UI_SUITE} --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 $LOCAL_TIME_MOUNT_CMD --volume ${WORKSPACE}/data/logs/sdc-ui-tests/target:/var/lib/tests/target --volume ${WORKSPACE}/data/logs/sdc-ui-tests/ExtentReport:/var/lib/tests/ExtentReport --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 5901:5901 --publish 6901:6901 ${PREFIX}/sdc-ui-tests:${RELEASE}
echo "please wait while SDC-UI-TESTS is starting....."
monitor_docker sdc-ui-tests
fi
}
# SDC-Simulator
function sdc-sim {
echo "docker run sdc-webSimulator..."
if [ ${LOCAL} = false ]; then
docker pull ${PREFIX}/sdc-simulator:${RELEASE}
fi
probe_sim
if [ sim_stat=false ]; then
docker run --detach --name sdc-sim --env JAVA_OPTIONS="${SIM_JAVA_OPTIONS}" --env ENVNAME="${DEP_ENV}" $LOCAL_TIME_MOUNT_CMD --volume ${WORKSPACE}/data/logs/WS/:/var/lib/jetty/logs --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 8285:8080 --publish 8286:8443 ${PREFIX}/sdc-simulator:${RELEASE}
echo "please wait while SDC-WEB-SIMULATOR is starting....."
monitor_docker sdc-sim
fi
}
if [ -z "${DOCKER}" ]; then
cleanup all
dir_perms
sdc-es
sdc-init-es
sdc-cs
sdc-cs-init
# sdc-kbn
sdc-cs-onboard-init
sdc-onboard-BE
sdc-BE
sdc-BE-init
sdc-FE
healthCheck
sdc-api-tests
sdc-ui-tests
else
cleanup ${DOCKER}
dir_perms
${DOCKER}
healthCheck
fi
| true |
f847f3f14a3ffd42752d56ff10f2223175db5024 | Shell | bfreezy/stack-cookbooks | /site-cookbooks/stack-ruby-webserver/templates/default/unicorn_initd.sh.erb | UTF-8 | 1,556 | 3.546875 | 4 | [] | no_license | #!/bin/bash
### BEGIN INIT INFO
# Provides: unicorn
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start unicorn at boot time
# Description: Enable rails service provided by unicorn
### END INIT INFO
<%# https://wiki.debian.org/LSBInitScripts %>
RETVAL=0
PIDFILE=/var/run/god.pid
LOGFILE=/var/log/god.log
GODCMD="bundle exec god"
source /usr/local/rvm/scripts/rvm
source /etc/profile.d/rails_env.sh
rvm use <%= @ruby_version %> &> /dev/null
cd <%= @shared_god_root %>
case "$1" in
terminate)
$GODCMD terminate
RETVAL=0
;;
start)
mkdir -p <%= @socket_folder %>
chown <%= @deploy_owner %> <%= @socket_folder %>
service nginx restart
$GODCMD -P $PIDFILE -l $LOGFILE -c <%= @rails_root %>/god/unicorn.god start unicorn_master
RETVAL=$?
;;
stop)
$GODCMD stop unicorn_master
sleep 3
pkill -f 'god/unicorn.god'
RETVAL=$?
;;
restart)
output=`service unicorn status`
if echo $output | grep 'unicorn_master'
then
echo 'unicorn is running, restarting...'
$GODCMD restart unicorn_master
else
echo 'unicorn is not running, starting it up...'
service unicorn start
fi
RETVAL=$?
;;
status)
$GODCMD status unicorn_master
RETVAL=$?
;;
*)
echo "Usage: unicorn {start|stop|restart|status}"
exit 1
;;
esac
exit $RETVAL
| true |
8fb2fd7f2ac96b40c7d6172c62fde03262da5229 | Shell | afsafzal/RepairBox | /manybugs/lighttpd/lighttpd-bug-2661-2662/test.sh | UTF-8 | 1,101 | 3.578125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
bugrev=2661
executable=$( dirname $1 )
test_id=$2
here_dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
#Check if coverage is being run. If so, don't use time limit.
if [ `basename $executable` = "coverage" ] ; then
timeout=180
else
timeout=90
fi
run_test()
{
pushd src
timeout $timeout $here_dir/test.pl $1 $bugrev
result=$?
popd
return $result
}
case $test_id in
p1) run_test 1 && exit 0 ;;
p2) run_test 2 && exit 0 ;;
p3) run_test 3 && exit 0 ;;
p4) run_test 4 && exit 0 ;;
p5) run_test 5 && exit 0 ;;
p6) run_test 6 && exit 0 ;;
p7) run_test 7 && exit 0 ;;
p8) run_test 8 && exit 0 ;;
p9) run_test 9 && exit 0 ;;
p10) run_test 12 && exit 0 ;;
p11) run_test 13 && exit 0 ;;
p12) run_test 14 && exit 0 ;;
p13) run_test 15 && exit 0 ;;
p14) run_test 16 && exit 0 ;;
p15) run_test 17 && exit 0 ;;
p16) run_test 18 && exit 0 ;;
p17) run_test 20 && exit 0 ;;
p18) run_test 21 && exit 0 ;;
n1) run_test 10 && exit 0 ;;
n2) run_test 11 && exit 0 ;;
esac
exit 1
| true |
9251d059d96c7427786f00dabd8e6476b3c6c33e | Shell | grahammoore/hs100 | /myip.sh | UTF-8 | 917 | 3.859375 | 4 | [] | no_license | #!/bin/sh
#
# Find the local IP by munging network tool output
private_ip='(127\.|172\.)'
ip_capture='(([0-9]{1,3}\.){3}[0-9]{1,3})'
# windows has ipconfig
ipconfig=`command -v ipconfig`
ifconfig=`command -v ifconfig`
ip=`command -v ip`
# linux has ip now
if [ -n "$ip" ]
then
ip addr show \
| grep '^\s*inet[^6]' \
| egrep -v $private_ip \
| egrep -o $ip_capture \
| head -n 1
# windows has ipconfig but doesn't have ifconfig
elif [ -n "$ipconfig" ] && ! [ -n "$ifconfig" ]
then
ipconfig \
| grep 'IPv4 Address' \
| egrep -v $private_ip \
| egrep -o $ip_pattern
# osx still has ifconfig
elif [ -n "$ifconfig" ]
then
ifconfig \
| grep '^\s*inet[^6]' \
| egrep -v $private_ip \
| egrep -o $ip_capture \
| head -n 1
else
echo 2>&1 "Must have ipconfig or ip or ifconfig available to get IP address"
exit 1
fi
| true |
5ff6dd53ce4b6ff0c1bb507bff050e46c8be4992 | Shell | 2stacks/terraform-powerdns | /templates/configure_db.sh | UTF-8 | 6,046 | 3.65625 | 4 | [
"MIT"
] | permissive | #! /bin/sh
#
# Author: Bert Van Vreckem <bert.vanvreckem@gmail.com>
#
# A non-interactive replacement for mysql_secure_installation
#
# Tested on CentOS 6, CentOS 7, Ubuntu 12.04 LTS (Precise Pangolin), Ubuntu
# 14.04 LTS (Trusty Tahr), Ubuntu 18.04 LTS (Bionic Beaver).
set -o nounset # abort on unbound variable
#{{{ Functions
usage() {
cat << _EOF_
Usage: ${0} "ROOT PASSWORD" "DB USER" "USER PASSWORD" "DOMAIN NAME"
with "ROOT PASSWORD" the desired password for the database root user.
Use quotes if your password contains spaces or other special characters.
_EOF_
}
# Make sure service has started
while true; do
netstat -an | grep 3306 > /dev/null 2>&1
if [ $? -lt 1 ]; then
echo "Mariadb Service Started!"
break
else
echo "Service not ready, sleeping..."
sleep 5
fi
done
# Predicate that returns exit status 0 if the database root password
# is set, a nonzero exit status otherwise.
is_mysql_root_password_set() {
! mysqladmin --user=root status > /dev/null 2>&1
}
# Predicate that returns exit status 0 if the mysql(1) command is available,
# nonzero exit status otherwise.
is_mysql_command_available() {
which mysql > /dev/null 2>&1
}
#}}}
#{{{ Command line parsing
if [ "$#" -ne "4" ]; then
echo "Expected 2 arguments, got $#" >&2
usage
exit 2
fi
#}}}
#{{{ Variables
db_root_password="${1}"
db_user="${2}"
db_user_password="${3}"
domain_name="${4}"
#}}}
# Script proper
dns_server="$(ip route get 1.1.1.1 | awk '{print $7; exit}')"
if ! is_mysql_command_available; then
echo "The MySQL/MariaDB client mysql(1) is not installed"
exit 1
fi
if is_mysql_root_password_set; then
echo "Database root password already set"
exit 0
fi
echo "Configuring MySQL/MariaDB installation"
mysql --user=root <<_EOF_
UPDATE mysql.user SET Password=PASSWORD('${db_root_password}') WHERE User='root';
DELETE FROM mysql.user WHERE User='';
DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1');
DROP DATABASE IF EXISTS test;
DELETE FROM mysql.db WHERE Db='test' OR Db='test\\_%';
CREATE DATABASE pdns;
CREATE DATABASE pdnsadmin;
GRANT ALL ON pdns.* TO '${db_user}'@'localhost' IDENTIFIED BY '${db_user_password}';
GRANT ALL ON pdnsadmin.* TO '${db_user}'@'localhost' IDENTIFIED BY '${db_user_password}';
FLUSH PRIVILEGES;
USE pdns;
CREATE TABLE domains (
id INT AUTO_INCREMENT,
name VARCHAR(255) NOT NULL,
master VARCHAR(128) DEFAULT NULL,
last_check INT DEFAULT NULL,
type VARCHAR(6) NOT NULL,
notified_serial INT DEFAULT NULL,
account VARCHAR(40) CHARACTER SET 'utf8' DEFAULT NULL,
PRIMARY KEY (id)
) Engine=InnoDB CHARACTER SET 'latin1';
CREATE UNIQUE INDEX name_index ON domains(name);
CREATE TABLE records (
id BIGINT AUTO_INCREMENT,
domain_id INT DEFAULT NULL,
name VARCHAR(255) DEFAULT NULL,
type VARCHAR(10) DEFAULT NULL,
content VARCHAR(64000) DEFAULT NULL,
ttl INT DEFAULT NULL,
prio INT DEFAULT NULL,
change_date INT DEFAULT NULL,
disabled TINYINT(1) DEFAULT 0,
ordername VARCHAR(255) BINARY DEFAULT NULL,
auth TINYINT(1) DEFAULT 1,
PRIMARY KEY (id)
) Engine=InnoDB CHARACTER SET 'latin1';
CREATE INDEX nametype_index ON records(name,type);
CREATE INDEX domain_id ON records(domain_id);
CREATE INDEX ordername ON records (ordername);
CREATE TABLE supermasters (
ip VARCHAR(64) NOT NULL,
nameserver VARCHAR(255) NOT NULL,
account VARCHAR(40) CHARACTER SET 'utf8' NOT NULL,
PRIMARY KEY (ip, nameserver)
) Engine=InnoDB CHARACTER SET 'latin1';
CREATE TABLE comments (
id INT AUTO_INCREMENT,
domain_id INT NOT NULL,
name VARCHAR(255) NOT NULL,
type VARCHAR(10) NOT NULL,
modified_at INT NOT NULL,
account VARCHAR(40) CHARACTER SET 'utf8' DEFAULT NULL,
comment TEXT CHARACTER SET 'utf8' NOT NULL,
PRIMARY KEY (id)
) Engine=InnoDB CHARACTER SET 'latin1';
CREATE INDEX comments_name_type_idx ON comments (name, type);
CREATE INDEX comments_order_idx ON comments (domain_id, modified_at);
CREATE TABLE domainmetadata (
id INT AUTO_INCREMENT,
domain_id INT NOT NULL,
kind VARCHAR(32),
content TEXT,
PRIMARY KEY (id)
) Engine=InnoDB CHARACTER SET 'latin1';
CREATE INDEX domainmetadata_idx ON domainmetadata (domain_id, kind);
CREATE TABLE cryptokeys (
id INT AUTO_INCREMENT,
domain_id INT NOT NULL,
flags INT NOT NULL,
active BOOL,
content TEXT,
PRIMARY KEY(id)
) Engine=InnoDB CHARACTER SET 'latin1';
CREATE INDEX domainidindex ON cryptokeys(domain_id);
CREATE TABLE tsigkeys (
id INT AUTO_INCREMENT,
name VARCHAR(255),
algorithm VARCHAR(50),
secret VARCHAR(255),
PRIMARY KEY (id)
) Engine=InnoDB CHARACTER SET 'latin1';
CREATE UNIQUE INDEX namealgoindex ON tsigkeys(name, algorithm);
INSERT INTO domains (name, type) values ('${domain_name}', 'NATIVE');
INSERT INTO domainmetadata (domain_id, kind, content) VALUES (1,'SOA-EDIT-API','DEFAULT');
INSERT INTO records (domain_id, name, content, type,ttl,prio)
VALUES (1,'${domain_name}','ns1.${domain_name} hostmaster.${domain_name} 1 10380 3600 604800 3600','SOA',86400,NULL);
INSERT INTO records (domain_id, name, content, type,ttl,prio)
VALUES (1,'${domain_name}','ns1.${domain_name}','NS',86400,NULL);
INSERT INTO records (domain_id, name, content, type,ttl,prio)
VALUES (1,'ns1.${domain_name}','${dns_server}','A',300,NULL);
_EOF_
| true |
06521b7ce0f703fdce4092b7f4b4c5b06fef1fa1 | Shell | suejon/advanced-openshift-development-assignment | /Infrastructure/bin/reset_prod.sh | UTF-8 | 1,924 | 3.671875 | 4 | [] | no_license | #!/bin/bash
# Reset Production Project (initial active services: Blue)
# This sets all services to the Blue service so that any pipeline run will deploy Green
if [ "$#" -ne 1 ]; then
echo "Usage:"
echo " $0 GUID"
exit 1
fi
GUID=$1
ParksMap="parksmap"
MlbParks="mlbparks"
NationalParks="nationalparks"
echo "Resetting Parks Production Environment in project ${GUID}-parks-prod to Green Services"
# Code to reset the parks production environment to make
# all the green services/routes active.
# This script will be called in the grading pipeline
# if the pipeline is executed without setting
# up the whole infrastructure to guarantee a Blue
# rollout followed by a Green rollout.
# To be Implemented by Student
# delete + create blue services (w/o labels)
echo "Removing labels from blue services so they are no longer used as active backends"
oc delete svc ${MlbParks}-blue -n ${GUID}-parks-prod
oc delete svc ${NationalParks}-blue -n ${GUID}-parks-prod
oc create -f ./Infrastructure/templates/parks-prod/${MlbParks}-blue-svc.yaml -n ${GUID}-parks-prod
oc create -f ./Infrastructure/templates/parks-prod/${NationalParks}-blue-svc.yaml -n ${GUID}-parks-prod
# Switch parks-map route to point back to green (should have been recreated in pipeline)
echo "Directing traffic back to green deployments"
oc patch route ${MlbParks} --patch='{"spec":{"to":{"name": "mlbparks-green"}}}' -n ${GUID}-parks-prod
oc patch route ${NationalParks} --patch='{"spec":{"to":{"name": "nationalparks-green"}}}' -n ${GUID}-parks-prod
oc patch route ${ParksMap} --patch='{"spec":{"to":{"name": "parksmap-green"}}}' -n ${GUID}-parks-prod
# label green services with correct labels: app and type
echo "Label the green services as the active backends"
oc label svc ${MlbParks}-green type=parksmap-backend --overwrite -n ${GUID}-parks-prod
oc label svc ${NationalParks}-green type=parksmap-backend --overwrite -n ${GUID}-parks-prod
| true |
25fb236f529fe344a2efa733974850145358abfa | Shell | FauxFaux/debian-control | /b/binutils/binutils-multiarch_2.31.1-5_amd64/postrm | UTF-8 | 996 | 3.65625 | 4 | [] | no_license | #! /bin/sh
set -e
this_ver=2.31.1-5; # this version
ma=x86_64-linux-gnu
triplet=x86_64-linux-gnu
# action: upgrade, abort-upgrade, remove, abort-install, disappear,
# purge, or failed-upgrade.
context=$1
if
test "$context" = failed-upgrade &&
dpkg --compare-versions "$this_ver" lt "$2"
then
# postrm of the future failed.
# Who knows what it was supposed to do? Abort.
exit 1
fi
new_ver=; # version replacing this one, if any.
case "$context" in
failed-upgrade)
new_ver=$this_ver ;;
abort-install|disappear)
new_ver= ;;
*)
new_ver=$2 ;;
esac
diversion() {
local added_ver divertto file
added_ver=$1
file=$2
divertto=${3-$file.single}
if
test "$context" != purge &&
dpkg --compare-versions "$new_ver" lt "$added_ver"
then
dpkg-divert --package binutils-multiarch \
--remove --rename --divert "$divertto" "$file"
fi
}
for prog in nm objdump objcopy strings strip size \
ar ranlib addr2line gprof readelf
do
diversion 2.27-8 "/usr/bin/$triplet-$prog"
done
| true |
fa1720e87b9b5e0e7f6172161a40ae0fb1197932 | Shell | ivilab/kjb | /lib/qd_cpp/test/interactive/spq_histo.bash | UTF-8 | 1,559 | 3.515625 | 4 | [] | no_license | #!/bin/bash -ex
# $Id: spq_histo.bash 20310 2016-02-01 11:32:25Z predoehl $
# This script plots histograms of the demo SPQ paths.
TEMP1=zzz.1.$$
TEMP2=zzz.2.$$
for g in $TEMP1 $TEMP2
do [[ -f $g ]] && echo "temp file $g exists" && exit 1
touch $g
done
# create list of data files we wish to plot.
# "sink_1" is the orange-colored paths.
for num in -3 -10 -30
do f+=("dist_sto_${num}_sink_1.txt")
done
# create fake "data" for deterministic case by just repeating same deterministic answer over and over
((n=0)) ||true
while read -r
do cat ${f[0]/sto/det}
((++n > 150)) && break # stop after 200 for better readbility though less authenticity
done <${f[0]} >$TEMP1
f+=($TEMP1) # add deterministic data file to the list
# process each file
for g in "${f[@]}"
do printf "# %s\n" "$g"
awk -v mb=240 -v db=4 -v nb=22 -- 'BEGIN {hb=mb+nb*db; for(i=mb;i<=hb;++i) y[i]=0; } {for(i=mb;i<=hb;++i){if($1<=i){y[i]+=1;break;}} } END {for(i=mb;i<=hb;++i){print i, y[i]*0.001;} }' $g
echo
echo
done >$TEMP2
GPE="set style data histogram; "
GPE+="set style histogram; "
GPE+="set term svg; set output 'histo.svg'; "
#GPE+="unset xtics; "
#GPE+="set xtics ; "
#GPE+="set xrange [240:320]; "
#GPE+="unset mxtics; "
GPE+="set ylabel 'Bin relative frequency'; "
GPE+="plot newhistogram 'Path size', "
GPE+=" '$TEMP2' index 0 using 2:xtic(1) title 'beta = 1.5', "
GPE+=" '' index 1 using 2:xtic(1) title 'beta = 5', "
GPE+=" '' index 2 using 2:xtic(1) title 'beta = 15', "
GPE+=" '' index 3 using 2:xtic(1) title 'beta -> inf' ; "
gnuplot -e "$GPE"
rm $TEMP1 $TEMP2
| true |
1635e3b0fd836d48c718c5b6bcf89ba2b7ce6597 | Shell | magodo/docker_practice | /mysql/scripts/dual-master/agent.sh | UTF-8 | 5,711 | 3.765625 | 4 | [] | no_license | #!/bin/bash
#########################################################################
# Author: Zhaoting Weng
# Created Time: Thu 27 Dec 2018 02:26:02 PM CST
# Description:
#########################################################################
MYDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")"&& pwd)"
MYNAME="$(basename "${BASH_SOURCE[0]}")"
. "$MYDIR"/common.sh
. "$MYDIR"/../config.sh
#########################################################################
# action: config
#########################################################################
usage_config() {
cat << EOF
Usage: ./${MYNAME} [options] server_id
Options:
-h|--help show this message
Arguments:
server_id
EOF
}
do_config() {
while :; do
case $1 in
-h|--help)
usage_config
exit 1
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
server_id=$1
# rm default auto.cnf which contains same server uuid on install
rm "$DATA_DIR"/auto.cnf
# server config
cat << EOF > "$HOME/.my.cnf"
[mysqld]
log_bin = $DATA_DIR/mysql-bin.log
log_bin_index = $DATA_DIR/mysql-bin.log.index
relay_log = $DATA_DIR/mysql-relay-bin
relay_log_index = $DATA_DIR/mysql-relay-bin.index
log_slave_updates = 1
server_id = $server_id
bind_address = 0.0.0.0
EOF
# start server
do_start
# create user
cat << EOF | mysql
create user '$SUPER_USER'@'%' identified by '$SUPER_PASSWD';
grant all privileges on *.* to '$SUPER_USER'@'%' with grant option;
EOF
}
#########################################################################
# action: setup
#########################################################################
usage_setup() {
cat << EOF
Usage: ./${MYNAME} [options]
Options:
-h|--help show this message
Arguments:
peer_hostname
EOF
}
do_setup() {
while :; do
case $1 in
-h|--help)
usage_setup
exit 1
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
peer=$1
local peer_ipv4="$(getent ahostsv4 $peer | grep "STREAM $peer" | cut -d' ' -f 1)"
local my_ipv4="$(getent ahostsv4 "$(hostname)" | grep "STREAM" | grep -v $VIP | cut -d' ' -f 1)"
master_status="$(mysql -h$peer_ipv4 -p$SUPER_PASSWD -u$SUPER_USER -B -r --vertical <<< "show master status")"
master_bin_file="$(grep "File:" <<< $master_status | awk '{print $2}')"
master_bin_pos="$(grep "Position:" <<< $master_status | awk '{print $2}')"
cat << EOF | mysql
STOP SLAVE;
CHANGE MASTER TO master_host='$peer_ipv4', master_port=3306, master_user='$SUPER_USER', master_password='$SUPER_PASSWD', master_log_file='$master_bin_file', master_log_pos=$master_bin_pos;
START SLAVE;
EOF
}
#########################################################################
# action: start
#########################################################################
usage_start() {
cat << EOF
Usage: ./${MYNAME} [options]
Options:
-h|--help show this message
EOF
}
do_start() {
while :; do
case $1 in
-h|--help)
usage_start
exit 1
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
[[ -e /var/run/mysqld ]] || install -m 755 -o mysql -g root -d /var/run/mysqld
mysqld_safe &>"$DATA_DIR"/start.log &
for i in $(seq 1 10); do
mysqladmin -s ping && break
sleep 1
done
if ! output=$(mysqladmin ping 2>&1); then
error "$output"
return 1
fi
return 0
}
#########################################################################
# action: stop
#########################################################################
usage_stop() {
cat << EOF
Usage: ./${MYNAME} [options]
Options:
-h|--help show this message
EOF
}
do_stop() {
while :; do
case $1 in
-h|--help)
usage_stop
exit 1
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
if ! output=$(mysqladmin shutdown 2>&1); then
error "$output"
return 1
fi
return 0
}
#########################################################################
# main
#########################################################################
usage() {
cat << EOF
Usage: ./${MYNAME} [option] [action]
Options:
-h, --help
Actions:
setup Setup dual master DB cluster
start Start DB cluster
stop Stop DB cluster
EOF
}
main() {
while :; do
case $1 in
-h|--help)
usage
exit 0
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
local action="$1"
shift
case $action in
"config")
do_config "$@"
;;
"setup")
do_setup "$@"
;;
"start")
do_start "$@"
;;
"stop")
do_stop "$@"
;;
*)
die "Unknwon action: $action!"
;;
esac
exit 0
}
main "$@"
| true |
0084f16ea7170b612a895dd109778e65410263b7 | Shell | sahil-rao/deployment-sahil | /packer/mongodb-3.4/setup_mongodb.sh | UTF-8 | 1,186 | 2.75 | 3 | [] | no_license | #!/bin/bash
set -euv
# Install MongoDB 3.4.2; Pin packages
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 0C49F3730359A14518585931BC711F9BA15703C6
echo "deb http://repo.mongodb.org/apt/ubuntu trusty/mongodb-org/3.4 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.4.list
apt-get update
apt-get install -y mongodb-org=3.4.2 mongodb-org-server=3.4.2 mongodb-org-shell=3.4.2 mongodb-org-mongos=3.4.2 mongodb-org-tools=3.4.2
echo "mongodb-org hold" | sudo dpkg --set-selections
echo "mongodb-org-server hold" | sudo dpkg --set-selections
echo "mongodb-org-shell hold" | sudo dpkg --set-selections
echo "mongodb-org-mongos hold" | sudo dpkg --set-selections
echo "mongodb-org-tools hold" | sudo dpkg --set-selections
# Setup the basics
pip install pymongo
wget -qO /usr/local/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64; chmod +x /usr/local/bin/jq
# Move files uploaded by packer file provisioner
cp /tmp/executables/* /usr/local/bin/
cp /tmp/etc/mongod.conf /etc/mongod.conf
cp /tmp/etc/init/mongod.conf /etc/init/mongod.conf
cp /tmp/etc/logrotate.d/mongodb /etc/logrotate.d/mongodb
cp /tmp/etc/cron.d/mongodb /etc/cron.d/mongodb
| true |
477bed9b313d9980a70768ed8131eaf65122ca77 | Shell | zhaoxin34/working | /funcs/select_profile.sh | UTF-8 | 528 | 3.4375 | 3 | [] | no_license | function select_profile() {
echo_yellow "========= Select Run Profile =========="
profiles=(`ls $DIR/profile`)
# [ ! $? -eq 0 ] && echo_red "get app list error" && before_exit && exit 1
for (( i=0; $i<${#profiles[@]}; i++)); do
echo "$i. ${profiles[i]}"
done
profile_n=""
while [ -z "$profile_n" ]; do
echo "Input The Profile Number:"
read profile_n
if [ -z "${profiles[$profile_n]}" ]; then
profile_n=""
echo "Wrong Number, Input again!"
fi
done
PROFILE=${profiles[$profile_n]}
} | true |
12cf1c25ce7863d169ece1540767aa880f12ce33 | Shell | YellowOrz/WordFrequencyOfPapers | /count.sh | UTF-8 | 2,803 | 3.890625 | 4 | [] | no_license | #!/bin/bash
# 整合文本
echo "Please input direction of txts [default: ./Papers]"
read path
if [ -z "$path" ];then
path="./Papers"
fi
result="dictionary_"$(echo $path|tr 'A-Z' 'a-z'|sed 's/[^a-z]//g')".txt"
echo "save result to $result"
echo "" > $result
for i in $(find $path -name "*txt")
do
cat "$i" >> $result
done
# 统计词频
cat $result |tr 'A-Z' 'a-z' | #大写转小写
sed 's/[^a-z'-']/ /g'| #将除了字母和-以外的变成空格
tr -s ' ' '\n'| #将空格变成换行
sed 's/s$//g'| #删除单词结尾的s
grep -v '\-$'|grep -v '^\-'| #去除以-为首or结尾的单词
awk '{print length($0) " " $0}'| #统计单词长度
grep -v -w [1-4]| #去除长度为1-4的单词
awk '{if($2!="") print $2}'| #去除长度和空行
sort| #根据首字母排序
uniq -c| #统计单词频率
grep -v -w [1-9]| #删除出现频率小于9次的单词
sort -r -n| #根据词频排序
awk '{print $2" "$1}'>$result #输出单词+词频
# 整理黑名单
cat blacklist.txt|tr 'A-Z' 'a-z' | #大写转小写
sed 's/[^a-z'-']/ /g'| #将除了字母和-以外的变成空格
tr -s ' ' '\n'| #将空格变成换行
awk '{print length($0) " " $0}'| #统计单词长度
grep -v -w [1-4]| #去除长度为1-4的单词
awk '{if($2!="") print $2}'| #去除长度和空行
sort| #排序
uniq > temp.txt #去除重复行后输出
cat temp.txt > blacklist.txt
rm temp.txt
# 从结果中剔除黑名单里的单词
for i in $(cat blacklist.txt)
do
line=$(cat $result|grep -n -w $i|grep -v "-" | cut -f 1 -d ':')
if [ -n "$line" ];then
sed -i ''$line'd' $result
fi
done | true |
b3dce292e817f3523bed142c104b06a39550dfe6 | Shell | xverges/wa-cli | /wa_cli/resources/travis-cleanup.sh | UTF-8 | 624 | 3.546875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Cleans up the sanboxes that have been created to run PRs
# Relies on the following env vars:
# WA_URL
# WA_APIKEY
# TRAVIS_*
#
export PYTHONUNBUFFERED=TRUE
if [[ -n "${TRAVIS_PULL_REQUEST_BRANCH}" ]]; then
echo "Deleting the PR sandboxes..."
for skill in ./test/flow/*; do
if [ -d "${skill}" ]; then
skill=$(basename "$skill")
SANDBOX_NAME=$(wa-cli sandbox name "${skill}")
echo "Deleting sandbox '$SANDBOX_NAME'..."
wa-cli sandbox delete "$skill"
fi
done
else
echo "No ad-hoc PR skills were created for this build"
fi
| true |
853760dea39858071f8162abded81eb9bc1592f7 | Shell | MIB1700/MRaudioChopperStitcher | /MRaudioChopper | UTF-8 | 2,013 | 3.703125 | 4 | [] | no_license | #! /opt/local/bin/bash
#-x
set -o errexit
#-----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
#takes one argument: integer value that is the amount in seconds the audio file
#is chopped to
#a positive value trims the file from the beginning and expects a folder named /shortStart
#in the pwd
#a negative value trims it from the end and expects a folder named /shortEnd in the pwd
#the resulting file is transcoded to a .aiff at 44.1kHz
#no checks are made if the files in the directory are actually soundfiles...
#no checks are made if /short[End/Start] exist
# https://github.com/MIB1700/MRaudioChopperStitcher/
#-----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
#assign arg 1 to var time; if not set use default 12
time=${1:-12}
shopt -s nullglob # Sets nullglob
shopt -s nocaseglob # Sets nocaseglob
shopt -s extglob
mkdir "$(pwd)/shortStart/"
mkdir "$(pwd)/shortEnd/"
for file in *.@(aa|aac|aiff|aif|alac|au|m4a|mp3|ogg|oga|mogg|opus|raw|wav|wma|webm)
do
if [[ -f "$file" && -e "$file" ]]; then
filename=${file%.*}
#extension=${file##*.}
if (( "$time" > 0 )); then
newFile="$(pwd)/shortStart/${filename}_shortStart.aiff"
if ! ffmpeg -y -i "$file" -ss 0 -to "$time" -ar 44100 "$newFile"; then
echo "Unable to extrac audio from ${file}" >&2
exit 1
fi
elif (( "$time" < 0 )); then
newFile="$(pwd)/shortEnd/${filename}_shortEnd.aiff"
if ! ffmpeg -sseof "$time" -i "$file" -ar 44100 "$newFile"; then
echo "Unable to extrac audio from ${file}" >&2
exit 1
fi
fi
echo "$newFile"
fi
done
rmdir "$(pwd)/shortEnd/"
rmdir "$(pwd)/shortStart/"
shopt -u nocaseglob # Unsets nocaseglob
shopt -u nullglob # Unsets nullglob
shopt -u extglob
exit 0 | true |
653612765b17f08d56187a4a43214263b4605e3a | Shell | whm/cz-ldap-utils | /usr/bin/ldap-load-db | UTF-8 | 3,145 | 3.90625 | 4 | [] | no_license | #!/bin/bash
#
# This dumps the dc=stanford,dc=edu directory.
#
# Copyright 2013 Board of Trustees, Leland Stanford Jr. University All
# rights reserved.
# -----------------------------------
# Delete files only if they are there
function del {
for f in $*
do
if [ -d $f ]
then
echo "skipping directory $f"
else
if [ -e $f ]
then
echo "deleteing $f"
rm $f
fi
fi
done
}
##############################################################################
# Main Routine
##############################################################################
# Set the configuration to use for the load
if [ "$1" = "help" ]
then
echo "Usage: ldap-load-db [help|manual|files|config]"
exit 1
fi
if [ "$1" = "manual" ]
then
pod2text $0
exit 1
fi
# Get the database to reload
source /etc/ldap-backup.conf
if [ "$DBID" = "" ]
then
echo "ERROR: DBID missing"
exit 1
fi
dbDir="/var/lib/ldap/$DBID"
if [ -e $dbDir ]
then
echo "Cleaning out $dbDir"
else
echo "ERROR: $dbDir not found"
exit 1
fi
# Decide on a configuration style to use
if [ "$2" = "files" ]
then
config="-f /etc/ldap/slapd-cn.conf"
fi
if [ "$2" = "config" ]
then
config="-F /etc/ldap/slapd.d"
fi
if [ "$config" = "" ]
then
echo "Using default configuration for load"
else
echo "Using $config for load"
fi
cd /tmp
dbRoot="/tmp/db-${DBID}.ldif"
dbFile="${dbRoot}.gz"
if [ ! -e $dbFile ]
then
echo "ERROR: input file not found. ($dbFile)"
exit 1
fi
del $dbRoot
echo "Uncompressing ..."
/bin/gunzip $dbFile
if [ $? -ne 0 ]
then
echo "ERROR: problem unzipping input file"
exit 1
fi
touch /etc/noldap
/usr/sbin/service slapd stop
# Clean out the current database
del /var/lib/ldap/$DBID/*
if [ -e /var/lib/ldap/accesslog ]
then
del /var/lib/ldap/accesslog/*
else
mkdir /var/lib/ldap/accesslog
fi
# Empty file system cache. This most useful for mdb loads to free
# memory, and doesn't hurt bdb loads.
sync
set +o noclobber
echo 3 > /proc/sys/vm/drop_caches
/usr/sbin/slapadd -q $config -b $DBDN -l $dbRoot
echo "Load complete. Don't forget to start slapd."
# Documentation. Use a hack to hide this from the shell. Because of the
# above exit line, this should never be executed.
DOCS=<<__END_OF_DOCS__
=head1 NAME
ldap-load-db - Load an OpenLDAP directory
=head1 SYNOPSIS
ldap-load-db dbID [help|manual|files|config]
=head1 DESCRIPTION
This script uses slapadd to load an OpenLDAP directory. The database
directory is assumed to be in /var/lib/ldap. The accesslog directory
will be created if needed. The input LDIF file is assumed to be
/tmp/db-<dbID>.gz.
The load process is very careful about the input LDIF file. It is
backed up before being uncompressed and will be restored if the
process is restarted.
=head1 OPTIONS
=over 4
=item help
A short help message.
=item manual
This documentation.
=item files
Use the /etc/ldap/slapd.conf configuration file.
=item config
Use the cn=config data base at /etc/ldap/slapd.d.
=back
=head1 AUTHOR
Bill MacAllister <bill@ca-zephyr.org>
=cut
__END_OF_DOCS__
| true |
6718b37e7d7a526abe10d8fb1d810d3b5ab1c658 | Shell | juanique/dotfiles | /bin/vcprompt-install | UTF-8 | 279 | 2.875 | 3 | [] | no_license | #!/bin/sh
# Download, compile, and copy the vcprompt command to ~/bin.
#
# AUTHOR: Geoffrey Grosenbach
# July 27 2009
sudo apt-get install mercurial
mkdir -p ~/tmp/src
cd ~/tmp/src
hg clone http://vc.gerg.ca/hg/vcprompt/
cd vcprompt
make
cp vcprompt ~/bin/vcprompt
rm -rf ~/tmp/src
| true |
93cd8b6b4d103e9f1d68a60f5526228b33724243 | Shell | mcandre/wdmycloud-ssh-certs | /lib/install-ssh-cert | UTF-8 | 1,389 | 3.703125 | 4 | [] | no_license | #!/bin/sh
unset IFS
set -euf
usage() {
echo "Usage: $0 <remote-address> <remote-password> <auxilliary-username> <local-public-key>\n"
echo "Warning: Overwrites \$HOME/.ssh/authorized_keys on remote server\n"
echo "Example: $0 root@wdmycloud.local welcome root ~/.ssh/id_rsa.pub"
}
if [ "$#" -lt 3 ]; then
usage
exit 1
fi
REMOTE_ADDRESS="$1"
if [ -z "$REMOTE_ADDRESS" ]; then
usage
exit 1
fi
REMOTE_PASSWORD="$2"
if [ -z "REMOTE_PASSWORD" ]; then
usage
exit 1
fi
AUXILLIARY_USERNAME="$3"
if [ -z "$AUXILLIARY_USERNAME" ]; then
usage
exit 1
fi
LOCAL_PUBLIC_KEY="$4"
if [ -z "LOCAL_PUBLIC_KEY" ]; then
usage
exit 1
fi
sshpass -p "$REMOTE_PASSWORD" \
ssh "$REMOTE_ADDRESS" mkdir -p "\$HOME/.ssh"
sshpass -p "$REMOTE_PASSWORD" \
scp "$LOCAL_PUBLIC_KEY" "${REMOTE_ADDRESS}:\$HOME/.ssh/authorized_keys"
sshpass -p "$REMOTE_PASSWORD" \
ssh "$REMOTE_ADDRESS" chown "${AUXILLIARY_USERNAME}:" "\$HOME/.ssh/authorized_keys"
sshpass -p "$REMOTE_PASSWORD" \
ssh "$REMOTE_ADDRESS" chmod 0600 "\$HOME/.ssh/authorized_keys"
sshpass -p "$REMOTE_PASSWORD" \
ssh "$REMOTE_ADDRESS" chown "${AUXILLIARY_USERNAME}:" "\$HOME/.ssh"
sshpass -p "$REMOTE_PASSWORD" \
ssh "$REMOTE_ADDRESS" chmod 0700 "\$HOME/.ssh"
sshpass -p "$REMOTE_PASSWORD" \
ssh "$REMOTE_ADDRESS" chown 'root:' "\$HOME"
sshpass -p "$REMOTE_PASSWORD" \
ssh "$REMOTE_ADDRESS" chmod 0755 "\$HOME"
| true |
2b9ba075270282c7dea797bd254401fd8596f503 | Shell | hemmerling/java-myfirstci | /bat/jenkins.sh | UTF-8 | 1,636 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | #! /bin/bash
#
# myfirstci
#
jenkinsJob="myfirstci"
jenkinsUrl="http://localhost:8080" #URL to trigger Jenkins
user="admin"
password="admin"
trigger="BUILD"
#
wget_cmd=wget #Location of wget command
wget_outputToStdout="-O-"
wget_outputDocument="--output-document -" # must be followed by SPACE and then the URL in single hyphons ( 'http://...' )
wget_post="--post-data="
wget_credentials="--auth-no-challenge --http-user=$user --http-password=$password"
curl_cmd=curl #Location of curl command
curl_verbose="--verbose"
curl_post="-XPOST"
curl_credentials="--user $user:$password"
# Work without deactivated CSRF
#$wget_cmd $wget_credentials $wget_post $wget_outputToStdout $jenkinsUrl/job/$jenkinsJob/build?token=$trigger
#$curl_cmd $curl_credentials $curl_post $curl_verbose $jenkinsUrl/job/$jenkinsJob/build?token=$trigger
# Works also with activated CSRF:
crumb=$($curl_cmd $curl_credentials -s $jenkinsUrl'/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)')
#$curl_cmd $curl_credentials $curl_post $curl_verbose -H "$crumb" $jenkinsUrl/job/$jenkinsJob/build?token=$trigger
$curl_cmd $curl_credentials $curl_post $curl_verbose -H "$crumb" $jenkinsUrl/job/$jenkinsJob/build?token=$trigger
# Works also with activated CSRF:
#crumb=$($wget_cmd $wget_credentials $wget_outputDocument $jenkinsUrl'/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)')
#wget_header=--header="$crumb"
#$wget_cmd $wget_credentials $wget_post $wget_outputToStdout $wget_header $jenkinsUrl/job/$jenkinsJob/build?token=$trigger
| true |
bd768635f1d7589872e1f25105ae15f7bf5228dd | Shell | NetBSD/pkgsrc | /regress/infra-unittests/check-portability.sh | UTF-8 | 6,903 | 3.46875 | 3 | [] | no_license | #! /bin/sh
# $NetBSD: check-portability.sh,v 1.7 2021/01/04 21:10:01 rillig Exp $
#
# Test cases for mk/check/check-portability.*.
#
set -eu
. "./test.subr"
# Runs the shell program for all files in the current directory.
check_portability_sh() {
env PATCHDIR='patches' \
PREFIX='/nonexistent' \
"$@" \
sh "$pkgsrcdir/mk/check/check-portability.sh" \
1>"$tmpdir/out" 2>&1 \
&& exitcode=0 || exitcode=$?
}
# Runs the AWK program in standalone mode for the given file.
check_portability_awk() {
env CK_FNAME="$1" \
CK_PROGNAME='check-portability.awk' \
awk -f "$pkgsrcdir/mk/check/check-subr.awk" \
-f "$pkgsrcdir/mk/check/check-portability.awk" \
"$1" \
1>"$tmpdir/out" 2>&1 \
&& exitcode=0 || exitcode=$?
}
if test_case_begin "test ... = ..."; then
create_file_lines 'file' \
'if [ "$var" = value ]; then' \
' ...' \
'elif test "$var" = value ]; then' \
' ...' \
'fi'
check_portability_awk 'file'
assert_that "$tmpdir/out" --file-is-empty
assert_that $exitcode --equals 0
test_case_end
fi
if test_case_begin 'test ... == ...'; then
create_file_lines 'file' \
'if [ "$var" == value ]; then' \
' ...' \
'elif test "$var" == value ]; then' \
' ...' \
'fi'
check_portability_awk 'file'
create_file 'expected' <<'EOF'
ERROR: [check-portability.awk] => Found test ... == ...:
ERROR: [check-portability.awk] file:1: if [ "$var" == value ]; then
ERROR: [check-portability.awk] file:3: elif test "$var" == value ]; then
Explanation:
===========================================================================
The "test" command, as well as the "[" command, are not required to know
the "==" operator. Only a few implementations like bash and some
versions of ksh support it.
When you run "test foo == foo" on a platform that does not support the
"==" operator, the result will be "false" instead of "true". This can
lead to unexpected behavior.
There are two ways to fix this error message. If the file that contains
the "test ==" is needed for building the package, you should create a
patch for it, replacing the "==" operator with "=". If the file is not
needed, add its name to the CHECK_PORTABILITY_SKIP variable in the
package Makefile.
===========================================================================
EOF
assert_that "$tmpdir/out" --file-equals 'expected'
assert_that $exitcode --equals 1
test_case_end
fi
if test_case_begin 'configure patched, configure.in bad'; then
create_file_lines 'patches/patch-aa' \
'+++ configure 2020-05-04'
create_file_lines 'configure' \
'#! /bin/sh' \
'good'
create_file_lines 'configure.in' \
'test a == b'
check_portability_sh \
'CHECK_PORTABILITY_EXPERIMENTAL=yes'
assert_that "$tmpdir/out" --file-is-empty
assert_that $exitcode --equals 0
test_case_end
fi
if test_case_begin 'Makefile.in patched, Makefile.am bad'; then
# As of 2020-05-05, Makefile.am is not checked at all since only
# very few packages actually use that file during the build.
create_file_lines 'patches/patch-aa' \
'+++ Makefile.in 2020-05-05'
create_file_lines 'Makefile.in' \
'test a = b'
create_file_lines 'Makefile.am' \
'test a == b'
check_portability_sh \
'CHECK_PORTABILITY_EXPERIMENTAL=yes'
assert_that "$tmpdir/out" --file-is-empty
assert_that $exitcode --equals 0
test_case_end
fi
if test_case_begin 'files that are usually not used for building'; then
# The following files are mostly interesting to the upstream
# developers and are not used during the actual build, except
# if the package rebuilds everything using the GNU autotools.
create_file_lines 'configure.ac' \
'test a == b'
create_file_lines 'Makefile.am' \
'test a == b'
check_portability_sh \
'CHECK_PORTABILITY_EXPERIMENTAL=yes'
assert_that "$tmpdir/out" --file-is-empty
assert_that $exitcode --equals 0
test_case_end
fi
if test_case_begin 'configure patched and still bad'; then
create_file_lines 'patches/patch-aa' \
'+++ configure 2020-05-04'
create_file_lines 'configure' \
'#! /bin/sh' \
'test a == b'
check_portability_sh \
'CHECK_PORTABILITY_EXPERIMENTAL=yes'
create_file 'expected' <<'EOF'
ERROR: [check-portability.awk] => Found test ... == ...:
ERROR: [check-portability.awk] configure:2: test a == b
Explanation:
===========================================================================
The "test" command, as well as the "[" command, are not required to know
the "==" operator. Only a few implementations like bash and some
versions of ksh support it.
When you run "test foo == foo" on a platform that does not support the
"==" operator, the result will be "false" instead of "true". This can
lead to unexpected behavior.
There are two ways to fix this error message. If the file that contains
the "test ==" is needed for building the package, you should create a
patch for it, replacing the "==" operator with "=". If the file is not
needed, add its name to the CHECK_PORTABILITY_SKIP variable in the
package Makefile.
===========================================================================
EOF
assert_that "$tmpdir/out" --file-equals 'expected'
assert_that $exitcode --equals 1
test_case_end
fi
if test_case_begin 'special characters in filenames'; then
# Ensure that the filename matching for patched files
# does not treat special characters as shell metacharacters.
create_file_lines 'patches/patch-aa' \
'+++ [[[[(`" 2020-05-04'
create_file_lines '+++ [[[[(`"' \
'#! /bin/sh' \
'test a = b'
check_portability_sh \
'CHECK_PORTABILITY_EXPERIMENTAL=yes'
assert_that "$tmpdir/out" --file-is-empty
assert_that $exitcode --equals 0
test_case_end
fi
if test_case_begin 'no patches'; then
# Ensure that no error message is printed when there are no
# patch files.
create_file_lines 'file' \
'#! /bin/sh' \
'test a = b'
check_portability_sh \
CHECK_PORTABILITY_EXPERIMENTAL=no
assert_that "$tmpdir/out" --file-is-empty
assert_that $exitcode --equals 0
test_case_end
fi
if test_case_begin 'no experimental by default'; then
create_file_lines 'configure.in' \
'test a == b'
check_portability_sh \
'CHECK_PORTABILITY_EXPERIMENTAL=no'
assert_that "$tmpdir/out" --file-is-empty
assert_that $exitcode --equals 0
test_case_end
fi
if test_case_begin 'always skip tilde files'; then
# Projects that use GNU autoconf 2.70 are reported to include
# backup files like 'configure~' in their distribution, for
# whatever reason. Since these files are not used by pkgsrc,
# they should be ignored.
#
# Since the filename is not one of the well-known ones, the file
# must start with a '#!' line to be actually recognized as a shell
# program.
create_file_lines 'configure~' \
'#! /bin/sh' \
'test a == b'
check_portability_sh \
'CHECK_PORTABILITY_EXPERIMENTAL=no'
assert_that "$tmpdir/out" --file-is-empty
assert_that $exitcode --equals 0
test_case_end
fi
| true |
bbf666c4151115b0cf499e19995bea6c1cd034c4 | Shell | pacificclimate/data-prep-actions | /actions/vic-gen2-metadata/time-subset.sh | UTF-8 | 694 | 3.109375 | 3 | [] | no_license | #!/bin/bash
localdir="/local_temp/lzeman/first"
for var in BASEFLOW EVAP GLAC_AREA GLAC_MBAL GLAC_OUTFLOW PET_NATVEG PREC RAINF RUNOFF SNOW_MELT SOIL_MOIST_TOT SWE TRANSP_VEG
do
echo "$(date) Now processing $var files"
for file in /storage/data/projects/hydrology/dataportal/CMIP5/VICGL/*$var*.nc
do
echo " Now processing $file"
echo " $(date) now copying $file to $localdir"
base=$(basename $file)
cp $file $localdir/$base
echo " $(date) now subsetting $base"
cdo seltimestep,1/1 $localdir/$base $localdir/$base.1
echo " $(date) cleaning up original and renaming subset"
rm $localdir/$base
mv $localdir/$base.1 $localdir/$base
done
done
| true |
c0c2da1ae6b1135a9f04ce58760d7063f786113f | Shell | yungcheeze/fzf_browser | /fuzzybrowse | UTF-8 | 155 | 2.765625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
thisDir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd -P )
source "$thisDir"/fzf_browser.sh
shift $((OPTIND-1))
fzf_browser "$@"
| true |
32d7d15ba0491f7b4bad5cd5f85bd5442ef89eba | Shell | PolinyaevaIrina7372/sp | /sp1.sh | UTF-8 | 1,478 | 3.8125 | 4 | [] | no_license | echo 'Автор: Ирина Полиняева'
while true;
do
echo 'Введите действие: создать (с), удалить (у), переместить (п)'
read choice
if [ "$choice" == "с" ]
then
echo 'Введите имя файла для создания'
read source
if [ -f $source ]
then
echo 'Ошибка: файл существует'
break
fi
touch $source
echo 'Файл создан'
elif [ "$choice" == "у" ]
then
echo 'Введите имя файла для удаления'
read source
if [ ! -f $source ]
then
echo 'Ошибка: файл не найден'
break
fi
rm $source
echo 'Файл удалён'
elif [ "$choice" == "п" ]
then
echo 'Введите имя файла для перемещения'
read source
echo 'Введите имя в которую нужно переместить файл'
read destination
if [ ! -d $destination ]
then
echo 'Ошибка: директории, в которую нужно переместить файл, не существует'
break
fi
mv $source $destination
echo 'Файл перемещен'
else
echo 'Ошибка: действие не распознано'
fi
echo 'Продолжить? да / нет'
read continue
while [ "$continue" != "да" ] && [ "$continue" != "нет" ]
do
echo 'Введите да или нет'
read continue
done
if [ "$continue" == "нет" ]
then
break
fi
done
| true |
0c1d09efaa0449378f0f9403c90906ec6101738b | Shell | mtulio/piOS | /scripts/build/.build_functions | UTF-8 | 1,948 | 3.328125 | 3 | [] | no_license | #!/bin/bash
#
# .build_functions - Define functions to build packages, see main build script: 2-build
#
# author: Marco Tulio R Braga (https://github.com/mtulio/piOS)
# created: 04 Aug 2015
# modified: 04 Aug 2015
#
#######################################
#######################################
# Declare Globals
#TODO(mtulio) : check .build.cfg config file to check what envs to export, packs to build...
[ ! -f .build.cfg ] && (echo "# Config file not found in $(basename $0). Run 2-configure to fix it."; exit 2)
readonly TOOL_PATH="$(cat .build.cfg |grep ^'TOOL_PATH=' |awk -F'=' '{print$2}')"
readonly PATH_GCC_NAME="$(cat .build.cfg |grep ^'PATH_GCC_NAME=' |awk -F'=' '{print$2}')"
readonly PATH_GCC="$(cat .build.cfg |grep ^'PATH_GCC=' |awk -F'=' '{print$2}')"
readonly CROSS_COMPILE="$(cat .build.cfg |grep ^'CROSS_COMPILE=' |awk -F'=' '{print$2}')"
readonly ARCH="$(cat .build.cfg |grep ^'ARCH=' |awk -F'=' '{print$2}')"
readonly BOARD_VERSION="$(cat .build.cfg |grep ^'BOARD_VERSION=' |awk -F'=' '{print$2}')"
#######################################
# Build Linux Kernel
# Globals:
# ARCH
# CROSS_COMPILE
# BOARD_VERSION
# Arguments:
# None
# Returns:
# Kernel Cross-comiled : zImage
#######################################
build_kernel() {
cd ${PWD}../linux
# Config kernel for Pi2
KERNEL=kernel7
make ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} bcm2709_defconfig
# Build it
NCPU="4"
make ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} zImage modules dtbs -j${NCPU}
}
#fc_build_kernel
#######################################
# Build App: mtapp01
# Globals:
# CROSS_COMPILE
# Arguments:
# None
# Returns:
# Binary(ies) : mtapp01
#######################################
fc_build_app_mtulio01() {
OPWD="${PWD}"
PATH_APPS="${PWD}../apps"
cd ${PATH_APPS}/mtulio01
make CROSS_COMPILE=${CROSS_COMPILE}
cd ${OPWD}
}
#fc_build_app_mtulio01
| true |
73cb3f76943c7f6684737e4061bce164197ee3f5 | Shell | fancn21th/0_80_Bash_Recap | /functions/command-sub.sh | UTF-8 | 144 | 2.984375 | 3 | [] | no_license | greet() {
echo "$1 world" # only first line return
}
greeting=$(greet "howdy") #command substitution syntax
echo "greeting set to $greeting" | true |
12f7ffcdff6721f7e9cf7be61bf78787b388aeb2 | Shell | pallocate/tedit | /build/bin/build-scripts/kick | UTF-8 | 829 | 2.71875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
cd /home/projects/kick/kick
task="compile"
if [ $1 ]; then
task=$1
fi
DEPENDENCIES=$COROUTINES_CORE:$COROUTINES_CORE_JVM:$KROTO_PLUS_COROUTINES:$GRPC_PROTOBUF:$GRPC_CONTEXT:$GRPC_CORE:\
$GRPC_STUB:$GRPC_API:$GUAVA:$JNA:$GRPC_PROTOBUF_LITE:$PROTOBUF_JAVA:$PERFMARK:$GRPC_NETTY_SHADED:$PROTOCOL:$PEN:\
$APACHE_COMMON:$IROHA_ED25519
if [ $task = "compile" ]; then
kotlinc -cp $DEPENDENCIES -d $KICK src/jvm src/jvm_utils
elif [ $task = "run" ]; then
kotlin -cp $KICK:$DEPENDENCIES kick.utils.DumpServer $2
elif [ $task = "tests" ]; then
kotlinc -cp $KICK:$DEPENDENCIES:$JUNIT -d $KICK_TESTS -Xmulti-platform src/jvm_tests
elif [ $task = "docs" ]; then
java -cp $STDLIB:$DEPENDENCIES -jar $DOKKA src/jvm -output build/docs/kick/api
else
echo "Usage: build kick [compile|run|tests|docs]"; echo
fi
| true |
ed1a8f2f5100558bee7d57d6375db8b335906ed4 | Shell | chipster/chipster-openshift | /deploy-builds.bash | UTF-8 | 2,935 | 3.796875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
source scripts/utils.bash
if ! kubectl --help > /dev/null 2>&1; then
echo "Error: command 'kubectl' not found"
exit 1
fi
if ! kubectl kustomize --help > /dev/null 2>&1; then
echo "Error: command 'kubectl kustomize' not found, please update your kubectl"
exit 1
fi
export PROJECT=$(get_project)
export DOMAIN=$(get_domain)
branch="$1"
if [ -n "$branch" ]; then
echo "Error: this script doesn't have a branch parameter anymore. Please set appropriate branches in private patches. "
echo
exit 1
fi
private_config_path=" ../chipster-private/confs"
# better to do this outside repo
build_dir=$(make_temp chipster-openshift_deploy-builds)
# build_dir="build_temp"
# rm -rf build_temp
# mkdir $build_dir
echo -e "build dir is \033[33;1m$build_dir\033[0m"
base_dir="$build_dir/builds"
mkdir -p $base_dir
echo "copy Kustomize yaml files"
cp kustomize/builds/*.yaml $base_dir
echo "create base BuildConfigs and ImageStreams"
# use oc templates to put Dockerfiles to BuildConfigs and to copy ImageStreams for each build
for build_template in kustomize/builds/*/*.yaml; do
build=$(basename $build_template .yaml)
template_dir=$(dirname $build_template)
echo $build
cat $build_template \
| yq e - -o=json \
| jq .spec.source.dockerfile="$(cat $template_dir/Dockerfile | jq -s -R .)" \
> $base_dir/$build-bc.yaml
oc process -f templates/imagestreams/imagestream.yaml --local -p NAME=$build \
> $base_dir/$build-is.yaml
# modify the object in memory in the write to the same file
echo "$(cat $base_dir/kustomization.yaml | yq e - -o=json | jq '.resources += ["'$build-bc.yaml'"]' | yq e - )" > $base_dir/kustomization.yaml
echo "$(cat $base_dir/kustomization.yaml | yq e - -o=json | jq '.resources += ["'$build-is.yaml'"]' | yq e - )" > $base_dir/kustomization.yaml
done
# copy builds-mylly overlay to the build dir in case this deployment uses it
cp -r kustomize/builds-mylly $build_dir
private_all_kustomize_path="$private_config_path/chipster-all/builds"
private_kustomize_path="$private_config_path/$PROJECT.$DOMAIN/builds"
if [ -z $private_all_kustomize_path ]; then
echo "chipster-all not found"
else
echo "copy chipster-all"
mkdir -p $build_dir/chipster-all
cp -r $private_all_kustomize_path/* $build_dir/chipster-all
fi
if [ -f $private_kustomize_path/kustomization.yaml ]; then
echo "create overlay from $private_kustomize_path"
overlay_dir="$build_dir/overlay"
mkdir -p $overlay_dir
# copy the overlay to our build dir
cp -r $private_kustomize_path/* $overlay_dir
apply_dir="$overlay_dir"
else
echo "using default kustomization"
apply_dir="$base_dir"
fi
echo "apply to server $apply_dir"
apply_out="$build_dir/apply.out"
kubectl kustomize $apply_dir | oc apply -f - | tee $apply_out | grep -v unchanged
echo $(cat $apply_out | grep unchanged | wc -l) objects unchanged
echo "delete build dir $build_dir"
rm -rf $build_dir
| true |
c0e6d1ef1e1539c52ec9e3bfe3741c7d027e3729 | Shell | afaur/vibrant | /deploy | UTF-8 | 361 | 3.1875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
echo "Startup Webserver"
echo ""
echo "Specify the server env you like to use when starting:"
echo ""
echo "Example (test, development, staging, or production)"
echo ""
read WSERV_ENV
echo ""
echo "Server starting using env: $(echo $WSERV_ENV)"
echo ""
WSERV_ENV="$(echo $WSERV_ENV)" dub
| true |
a39c6ae2059ad39fc1f3f7760c83a24966eb8c07 | Shell | drewkeller/xscreensaver-pi-hdmi | /bin/xscreensaver-pi-hdmi-on | UTF-8 | 557 | 3.28125 | 3 | [] | no_license | #!/bin/bash
# This script contains the commands to run when turning the display ON.
# no --quiet switch
tvservice --preferred > /dev/null
# refresh the screen using xrefresh method
fbset -depth 8
fbset -depth 16
xrefresh
#because xrefresh doesn't always work
curr_vt=`sudo fgconsole`
if [ "$curr_vt" = "1" ]; then
sudo chvt2; sleep 1; sudo chvt1
else if [[ $curr_vt =~ $-?[0-9]+$ ]]; then
sudo chvt 2; sleep 1; sudo chvt "$curr_vt"
else
sudo chvt 1; sleep 1; sudo chvt 7
fi; fi
# Using CEC command (requires libcec)
echo "on 0" | cec-client -s -d 1
| true |
f8513afba290589d7a568767a1f18c7343bcdaaa | Shell | elmomk/fun_with_scripts | /check_var.sh | UTF-8 | 110 | 2.765625 | 3 | [] | no_license | #!/bin/bash
if [ -z $1 ] && [ -z $2 ]; then
echo empty
exit 1
else
echo "not empty"
fi
echo "check last"
| true |
4f7ed60b697a1d65333fe105572de3fd1efdcf3f | Shell | jordanm/dotfiles | /bin/start-environment-terminal | UTF-8 | 350 | 3.515625 | 4 | [] | no_license | #!/bin/bash
last=""
if [[ -e $HOME/.last-environment ]]; then
last=$(< $HOME/.last-environment)
fi
name=$(zenity --entry --text="Environment:" --title="Launch Environment" --entry-text="$last")
if [[ -n $name ]]; then
path=$ENVPATH/$name
if [[ -d $path ]]; then
echo $name > $HOME/.last-environment
exec start-terminal "$name"
fi
fi
| true |
0df31d59ee7cdeb1c96864608729ecc2843e06aa | Shell | pernicgu/ccurr_velocity | /quant/impl/run_data_update.sh | UTF-8 | 1,017 | 3.453125 | 3 | [] | no_license | #!/bin/bash
# PARSE ARGUMENT
for i in "$@"
do
case $i in
-mkt=*|--upd_mkt_data=*)
UPD_MKT_DATA="${i#*=}"
shift # past argument=value
;;
-bc=*|--upd_bc_data=*)
UPD_BC_DATA="${i#*=}"
shift # past argument=value
;;
*)
# unknown option
;;
esac
done
# SET DEFAULT
if [ ! -z $UPD_MKT_DATA ]; then
UPD_MKT_DATA = 0
fi
if [ ! -z $UPD_BC_DATA ]; then
UPD_BC_DATA = 0
fi
# CONDITIONALLY DOWNLOAD / EXTRACT NEW DATA
if [ "$UPD_MKT_DATA" ]
then
echo "Not implemnted: Would updated market data"
# COMMENT: Note, that in marketdata etc. all directories broke because of moving files.
# Rscript ../marketdata_generation/data_col.R
# Rscript ../marketdata_generation/data_proc.R
fi
if [ "$UPD_BC_DATA" ]
then
echo "Not implemnted: Would updated BC data"
#python3 ../bc_data_generation/script.py -e 01/01/2010
fi
# PREPARE DATA
python3 ./data_preprocessing/get_data_from_archive.py
python3 ./data_preprocessing/prepare_data.py -m "diff_2diff"
| true |
e136801f29c059593dd452e1501a963b090fc297 | Shell | crazyit/TechLibrary | /BigData/SparkBestPractice.code/chapter2/hadoop-spark-installer-master/projects/hadoop/remove.sh | UTF-8 | 1,185 | 3.125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
. ./../../common/log.sh
. ./../../bin/utils.sh
. ../../bin/set_env.sh
install_dir="${CLUSTER_BASEDIR_INSTALL}/hadoop"
data_dir="${CLUSTER_BASEDIR_DATA}/hadoop"
log_dir_hdfs="${CLUSTER_BASEDIR_LOG}/hdfs"
log_dir_yarn="${CLUSTER_BASEDIR_LOG}/yarn"
databasedirs=$(../../bin/getconfig.sh hadoop.datanode.databasedirs | sed 's/[,;]/ /g')
# hadoop master
masters=$(../../bin/getconfig.sh hadoop.namenode.hostnames)
m1=$(echo $masters | cut -d',' -f1 | ../../bin/nametoip.sh)
m2=$(echo $masters | cut -d',' -f2 | ../../bin/nametoip.sh)
for host in $m1 $m2; do
echo "> stop master $host"
sleep 1
ssh $host "su $CLUSTER_USER -c 'cd $install_dir; ./admin.sh stop'"
echo
done
for host in $m1 $m2; do
echo "> clean master $host"
sleep 1
ssh $host "rm -rf ${CLUSTER_BASEDIR_INSTALL}/${CLUSTER_PROJECT_HADOOP_NAME} $install_dir $data_dir $log_dir_hdfs $log_dir_yarn"
echo
done
slaves=$(../../bin/getconfig.sh hadoop.datanode.hostnames | sed 's/[,;]/\n/g' | ../../bin/nametoip.sh)
for host in $slaves; do
echo "> clean datanode $host data dir: $databasedirs"
sleep 1
ssh $host "rm -rf $databasedirs" &
wait
echo
done
| true |
960c6f284f630bd69fb70fe230ce969f572f521b | Shell | Sebatyne/utils | /scripts/noip.sh | UTF-8 | 844 | 3.71875 | 4 | [] | no_license | #!/bin/bash
# This script can be useful to access your device behind a NAT (like an internet box)
# It will email you the public IP of your device whenever it changes.
# It can be used like a cheap "no-ip service", but without owning a DNS record.
# Just connect to your device using the public IP.
# Add this crons to a user crontab :
# Run this script once an hour (can be decreased if needed)
# 0 * * * * bash /root/noip.sh
# makes sure the home router doesn't "forget" about your device. Changes IP if necessary
# 0/4 * * * * ping -c 1 192.168.0.1 &> /dev/null
save_file=$HOME/.noip
email="email@address.domain"
current_ip=$(curl -s http://icanhazip.com)
old_ip=$(cat $save_file 2> /dev/null)
if [ "$old_ip" = "$current_ip" ]; then
exit 0
else
mail -s "ip raspberry" $email <<< "New ip is : $current_ip"
echo $current_ip > $save_file
fi
exit 0
| true |
0dc11381392784ca04d43854db96c8300b28c2dd | Shell | lucaswannen/source_code_classification_with_CNN | /dataset_v2/bash/8403336.txt | UTF-8 | 313 | 3.109375 | 3 | [] | no_license | #!/bin/sh
DAY=$(date +"%d%b%Y")
BUCKET='/home/user/Scripts/Holding/s3buckets.txt'
BLIST='/home/user/Scripts/Holding/blist.txt'
LOGDIR='/home/user/Scripts/Holding/'
USAGE=$BLIST
s3cmd ls > $BUCKET
awk '{print $3}' $BUCKET > $BLIST
while read USAGE; do
s3cmd du -H $USAGE
done < $BUCKET > $LOGDIR/S3Usage$DAY.txt
| true |
3de0f55dc854a64cdd2ec23e2c40c833a840e173 | Shell | fsiddiqi/dovetail | /build-docs.sh | UTF-8 | 4,648 | 3.78125 | 4 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
] | permissive | #!/usr/bin/env bash
# Description: Build script for Project Dovetail documentation
# Author: retgits <https://github.com/retgits>
# Mod: fcenedes@tibco.com
# Last Updated: 2018-11-11
#--- Variables ---
HUGO_VERSION=0.50
GIT_ACCOUNT="TIBCOSoftware"
GIT_REPO="dovetail"
#--- Download and install prerequisites ---
prerequisites() {
wget -O hugo.tar.gz https://github.com/gohugoio/hugo/releases/download/v${HUGO_VERSION}/hugo_${HUGO_VERSION}_Linux-64bit.tar.gz
mkdir -p hugobin
tar -xzvf hugo.tar.gz -C ./hugobin
mv ./hugobin/hugo $HOME/gopath/bin
rm hugo.tar.gz && rm -rf ./hugobin
}
#--- Get external docs ---
ext_docs() {
echo "cloning dovetail-contrib"
git clone https://github.com/TIBCOSoftware/dovetail-contrib.git
#for i in `find dovetail-contrib/activity -name \*.md` ; do filename=$(basename $(dirname $i)); cp $i docs/content/development/webui/activities/$filename.md; done;
#for i in `find dovetail-contrib/trigger -name \*.md` ; do filename=$(basename $(dirname $i)); cp $i docs/content/development/webui/triggers/$filename.md; done;
rm -rf ./dovetail-contrib
}
#--- Add readme and license ---
add_readme() {
echo "Adding readme and license files"
cp docs/content/README.md docs/public
cp docs/content/LICENSE docs/public
}
update_page_cli() {
echo "Getting the docs for the commandline tools"
#curl -o docs/content/dovetail-cli/dovetail-cli.md https://raw.githubusercontent.com/TIBCOSoftware/dovetail-cli/master/docs/dovetail-cli.md
}
#--- Update contributions page ---
update_page_contrib() {
echo "Update contributing page"
cp CONTRIBUTING.md docs/content/contributing/contributing.md
sed -i '1d' docs/content/contributing/contributing.md
sed -i '1i ---' docs/content/contributing/contributing.md
sed -i '1i weight: 9010' docs/content/contributing/contributing.md
sed -i '1i title: Contributing to Project Dovetail' docs/content/contributing/contributing.md
sed -i '1i ---' docs/content/contributing/contributing.md
}
#--- Update introduction page ---
update_page_introduction() {
cp README.md docs/content/introduction/_index.md
sed -i '1,4d' docs/content/introduction/_index.md
sed -i '5,17d' docs/content/introduction/_index.md
sed -i '1i ---' docs/content/introduction/_index.md
sed -i '1i pre: "<i class=\\"fas fa-home\\" aria-hidden=\\"true\\"></i> "' docs/content/introduction/_index.md
sed -i '1i weight: 1000' docs/content/introduction/_index.md
sed -i '1i title: Introduction' docs/content/introduction/_index.md
sed -i '1i ---' docs/content/introduction/_index.md
sed -i "s#images/eventhandlers.png#../images/eventhandlers.png#g" docs/content/introduction/_index.md
}
#--- Update page ---
update_page() {
case "$1" in
"contributing")
update_page_contrib
;;
"introduction")
update_page_introduction
;;
*)
echo "Updating all pages"
ext_docs
update_page_cli
update_page_contrib
update_page_introduction
esac
}
#--- Execute build ---
build() {
echo "Build docs site..."
cd docs && hugo
#cd ../showcases && hugo
#mv public ../docs/public/showcases
cd public/
ls -alh
cd ../../
}
gitprep() {
if [ -d "dovetail" ]; then
echo "Your working directory is dirty, please run the script again after cleaning it up"
exit 1;
fi
echo "cloning dovetail"
git clone https://github.com/TIBCOSoftware/dovetail.git
cd dovetail
echo "Deleting old publication"
cd docs
rm -rf public
mkdir public
git worktree prune
rm -rf .git/worktrees/public/
echo "Checking out gh-pages branch into public"
git worktree add -B gh-pages public origin/gh-pages
echo "Removing existing files"
rm -rf public/*
cd ../
echo $PWD
}
gitupdate() {
echo "Updating gh-pages branch"
cd docs
git add -A .
cd public
git add -A .
git commit -a -m "Publishing to gh-pages (build-doc.sh)"
if [[ $(git status -s) ]]
then
echo "The working directory is dirty. Please commit any pending changes."
exit 1;
fi
git push origin gh-pages
}
case "$1" in
"prerequisites")
prerequisites
;;
"ext-docs")
ext_docs
;;
"update-page")
update_page $2
;;
"build")
build
;;
"magic")
gitprep
update_page $2
add_readme
build
gitupdate
;;
*)
echo "The target {$1} you want to execute doesn't exist"
esac
| true |
12776a96915831189047f5ac3f2106a383c0e441 | Shell | roytje88/archinstallscripts | /tests/packer.sh | UTF-8 | 740 | 3.328125 | 3 | [] | no_license | #!/bin/bash
ips=$(ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1')
arr=""
for ip in $ips; do
# echo $ip
arr="$arr $ip IP off "
done
tempfile=`tempfile 2>/dev/null` || tempfile=/tmp/test$$
trap "rm -f $tempfile" 0 1 2 5 15
dialog --backtitle "Test" \
--radiolist "test" 0 0 5 \
$arr 2> $tempfile
retval=$?
choice=`cat $tempfile`
case $retval in
0)
mkdir -p $share
echo "$actualfolder $share none bind 0 0" >> /etc/fstab
echo "$share $choice/24(rw,no_subtree_check,nohide,sync,no_root_squash,insecure,no_auth_nlm)" >> /etc/exports
exportfs -rav
;;
1)
echo "Cancel pressed.";;
255)
echo "ESC pressed.";;
esac
# echo $arr | true |
24c04e64e584852ba40441d9d66c2673a4ef5646 | Shell | davep-github/dpw | /bin/rcs-dirty | UTF-8 | 633 | 3.515625 | 4 | [] | no_license | #!/bin/bash
source script-x
set -u
progname="$(basename $0)"
source eexec
if vsetp "${eexec_program-}" # Did the caller provide a program?
then
EEXEC_SHIFT=:
else
eexec_program=$(EExec_parse "$@")
EEXEC_SHIFT=shift
fi
for op in $eexec_program
do
$op
${EEXEC_SHIFT}
done
EExec_verbose_msg $(echo_id eexec_program)
unset eexec_program
# Or export eexec_program to propagate eexec info to a called program.
# export eexec_program
filter=cat
(($# > 0)) && [ "$1" = '-f' ] && {
filter=filter_RCS
shift
}
filter_RCS()
{
sed -rn 's|(RCS/)(.*)(,v)|\2|p'
}
EExec rlog -L -R "$@" 2>/dev/null | "${filter}"
| true |
cbee2368339ad1f52512a83ecf0885db765efd6b | Shell | daviddenton/fintrospect | /publish_site.sh | UTF-8 | 450 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
echo Publishing site for v$1
set -e
export PATH=~/.nvm/versions/node/v5.1.0/bin:$PATH
npm run setup
npm run cleanSite
rm -rf /tmp/site
git clone git@github.com:fintrospect/fintrospect.github.io.git /tmp/site
cd /tmp/site
cd -
npm run makeSite
./sbt unidoc
rm -rf /tmp/site/*
mv target/scala-2.13/unidoc target/www/api
mv -f target/www/* /tmp/site
cd /tmp/site
git add *
git commit -m "releasing $1 version of site"
git push origin master
cd -
| true |
0d498a6be07da2ca1338d2ea52bdc917e9acf9f2 | Shell | brad/aur-python33 | /PKGBUILD | UTF-8 | 3,105 | 2.5625 | 3 | [] | no_license | # Maintainer: Eric Berquist <eric DOT berquist AT gmail DOT com>
# Contributor: Rodolphe Breard <packages@what.tf>
# Contributor: Christopher Arndt <chris@chrisarndt.de>
pkgname=python33
pkgver=3.3.6
pkgrel=2
_pybasever=3.3
_pymajver=3
pkgdesc="Major release 3.3 of the Python high-level programming language"
arch=('i686' 'x86_64')
license=('custom')
url="http://www.python.org/"
depends=('expat' 'bzip2' 'gdbm' 'openssl' 'libffi' 'zlib')
makedepends=('tk' 'sqlite' 'valgrind' 'bluez-libs' 'mpdecimal' 'hardening-wrapper')
optdepends=('mpdecimal: for decimal'
'sqlite'
'tk: for tkinter'
'xz')
options=('!makeflags')
source=(http://www.python.org/ftp/python/${pkgver}/Python-${pkgver}.tar.xz
python-3.3-ssl-nosslv3.patch
python-3.3-test-expat.patch)
sha256sums=('5226e4bf7a530c3ff2bcde0c94e0e09e59a8bcde0114fe0268bc925bdabb5d3f'
'd54bc0ac72218b37c1c2f7a8f03f904a06c2270518a5f3b9e27e54578fe1fb04'
'5df423235ca68f8736a3fc7263ad90eaf85dab23941f98e3f924a31be13d0b54')
prepare() {
cd "${srcdir}/Python-${pkgver}"
patch -Np1 -i ${srcdir}/python-3.3-ssl-nosslv3.patch
patch -Np1 -i ${srcdir}/python-3.3-test-expat.patch
# FS#23997
sed -i -e "s|^#.* /usr/local/bin/python|#!/usr/bin/python|" Lib/cgi.py
# Ensure that we are using the system copy of various libraries (expat, zlib and libffi),
# rather than copies shipped in the tarball
rm -rf Modules/expat
rm -rf Modules/zlib
rm -rf Modules/_ctypes/{darwin,libffi}*
rm -rf Modules/_decimal/libmpdec
}
build() {
cd "${srcdir}/Python-${pkgver}"
export CPPFLAGS="-DOPENSSL_NO_SSL3"
./configure --prefix=/usr \
--enable-shared \
--with-threads \
--with-computed-gotos \
--enable-ipv6 \
--with-system-expat \
--with-dbmliborder=gdbm:ndbm \
--with-system-ffi \
--with-system-libmpdec \
--enable-loadable-sqlite-extensions
make
}
check() {
cd "${srcdir}/Python-${pkgver}"
# make test
LD_LIBRARY_PATH="${srcdir}/Python-${pkgver}":${LD_LIBRARY_PATH} \
"${srcdir}/Python-${pkgver}/python" -m test.regrtest -x \
test_distutils \
test_faulthandler \
test_ftplib \
test_ssl
}
package() {
cd "${srcdir}/Python-${pkgver}"
# altinstall: /usr/bin/pythonX.Y but not /usr/bin/python or /usr/bin/pythonX
make DESTDIR="${pkgdir}" altinstall maninstall
# Avoid conflicts with the main 'python' package.
rm "${pkgdir}/usr/lib/libpython${_pymajver}.so"
rm "${pkgdir}/usr/share/man/man1/python${_pymajver}.1"
# fix FS#22552
ln -sf ../../libpython${_pybasever}m.so \
"${pkgdir}/usr/lib/python${_pybasever}/config-${_pybasever}m/libpython${_pybasever}m.so"
# fix pycairo build
ln -sf python${_pybasever}m-config "${pkgdir}/usr/bin/python${_pybasever}-config"
# clean-up reference to build directory
sed -i "s|$srcdir/Python-${pkgver}:||" "$pkgdir/usr/lib/python${_pybasever}/config-${_pybasever}m/Makefile"
# license
install -Dm644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
}
| true |
aed37de8ce7c3672e15012c99d2e91beb27e493c | Shell | mdhtr/alice-at-ubuntu | /nautilus_scripts/PDF_manipulation/PDF_smart_rename_and_split_based_on_content/3rd_quest_pdf_criteria_matching_multipage_split.sh | UTF-8 | 1,496 | 3.90625 | 4 | [] | no_license | #!/bin/bash
# NAUTILUS SCRIPT
# automatically splits pdf file to multiple pages based on search criteria while renaming the output files using the search criteria and some of the pdf text.
# read files
IFS=$'\n' read -d '' -r -a filelist < <(printf '%s\n' "$NAUTILUS_SCRIPT_SELECTED_FILE_PATHS"); unset $IFS
# process files
for file in "${filelist[@]}"; do
pagecount=`pdfinfo $file | grep "Pages" | awk '{ print $2 }'`
# MY SEARCH CRITERIA is a 10 digit long ID number that begins with number 8:
storedid=`pdftotext -f 1 -l 1 $file - | egrep '8?[0-9]{9}'`
pattern=''
pagetitle=''
datestamp=''
for (( pageindex=1; pageindex<=$pagecount; pageindex+=1 )); do
header=`pdftotext -f $pageindex -l $pageindex $file - | head -n 1`
pageid=`pdftotext -f $pageindex -l $pageindex $file - | egrep '8?[0-9]{9}'`
let "datestamp =`date +%s%N`" # to avoid overwriting with same new name
# match ID found on the page to the stored ID
if [[ $pageid == $storedid ]]; then
pattern+="$pageindex " # adds number as text to variable separated by spaces
pagetitle+="$header+"
if [[ $pageindex == $pagecount ]]; then #process last output of the file
pdftk $file cat $pattern output "$storedid $pagetitle $datestamp.pdf"
storedid=0
pattern=''
pagetitle=''
fi
else
#process previous set of pages to output
pdftk $file cat $pattern output "$storedid $pagetitle $datestamp.pdf"
storedid=$pageid
pattern="$pageindex "
pagetitle="$header+"
fi
done
done
| true |
33371650cc9b9d7673e3b98178af05ae27341063 | Shell | quartictech/infra | /kubernetes/backups/scripts/pg-backup.sh | UTF-8 | 1,620 | 3.4375 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #! /bin/bash
set -euo pipefail
# TODO - secure credentials?
# TODO - test query or something
# TODO - we should record the platform version in the filename or something
DUMP_FILE="/db.sql.gz"
RESTORE_FILE="/restore.sql.gz"
declare -a DATABASES=("eval" "qube" "catalogue")
GCS_URL="gs://${GCS_BUCKET}/postgres/postgres.$(date -u +"%Y-%m-%dT%H:%M:%SZ").${VERSION}.sql.gz"
#----------------------------------------#
# Backup
#----------------------------------------#
echo "Running pg_dumpall ..."
export PGPASSWORD=${SOURCE_POSTGRES_PASSWORD}
pg_dumpall -h ${SOURCE_POSTGRES_HOST} -U ${SOURCE_POSTGRES_USER} --clean | gzip > ${DUMP_FILE}
echo "Uploading to ${GCS_URL} ..."
gsutil cp ${DUMP_FILE} ${GCS_URL}
#----------------------------------------#
# Test restore
#----------------------------------------#
echo "Downloading from ${GCS_URL} ..."
gsutil cp ${GCS_URL} ${RESTORE_FILE}
# This ensures that the DROP statements in the output of pg_dumpall will succeed
echo "Creating databases (ERRORs here are OK)"
set +e
for db in "${DATABASES[@]}"
do
createdb -h ${TEMP_POSTGRES_HOST} -U ${TEMP_POSTGRES_USER} "$db"
done
set -e
# This is an unpleasant hack. See here: https://dba.stackexchange.com/questions/75033/how-to-restore-everything-including-postgres-role-from-pg-dumpall-backup
# and https://www.postgresql.org/message-id/200804241637.m3OGbAOe071623@wwwmaster.postgresql.org
echo "Restoring to temp Postgres instance ..."
gunzip < ${RESTORE_FILE} | egrep -v '^(CREATE|DROP) ROLE postgres;' | psql -v ON_ERROR_STOP=on -h ${TEMP_POSTGRES_HOST} -U ${TEMP_POSTGRES_USER} -d postgres -q
echo "Complete!"
| true |
2cc551a723bd258db43611e307d20d39684f1edd | Shell | exredorg/exred | /docker_builds/rpi_xbuild/prod/assets/load.sh | UTF-8 | 230 | 2.5625 | 3 | [
"MIT"
] | permissive | #! /bin/bash
echo -e "\n>> loading docker images"
docker load -i images_rpi.tar
echo -e "\n>> creating .env file"
echo "COMPOSE_PROJECT_NAME=prod" > .env
echo -e "\n>> done"
echo
echo "Run it with:"
echo "docker-compose up -d"
| true |
ffecaeaf7fc2e6596a053d6439691db27b7e6f3c | Shell | elgressy/lyn | /docker/test-network/check-versions.sh | UTF-8 | 252 | 2.9375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# a small script to help sanity check the versions of the different node implementations
dockerfiles=$(find . -name 'Dockerfile')
# print location of dockerfiles
echo $dockerfiles
# print variables
awk '/ENV/ && /VER|COMMIT/' $dockerfiles
| true |
63ffde1a00c2a389709e8cf7e1d550a002bfdac8 | Shell | bridgecrew-perf3/terraform-equinix-metal-ecs-anywhere | /templates/private_network.sh | UTF-8 | 936 | 3.4375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
VLAN_ID='${vlan_id}'
PRIVATE_IP='${private_ip}'
PRIVATE_NETWORK='${private_network}'
AWS_SUBNET_CIDR='${aws_subnet_cidr}'
# Install the prerequisites for VLANs
sudo apt update -qy
sudo apt install vlan -y
modprobe 8021q
echo "8021q" >> /etc/modules-load.d/networking.conf
# Make sure eth1 has been removed from bond0
eth1=(`ls /sys/class/net/ | egrep 'f1|eth1'`)
echo "remove interface $eth1 from bond0"
echo "-$eth1" > /sys/class/net/bond0/bonding/slaves
sed -i.bak_$(date "+%m%d%y") -r "s/(.*bond-slaves.*) $eth1/\1/" /etc/network/interfaces
sed -i "/^auto $eth1\$*/,\$d" /etc/network/interfaces
cat <<EOT >> /etc/network/interfaces
auto $eth1
iface $eth1 inet static
pre-up sleep 5
address $PRIVATE_NETWORK.$PRIVATE_IP
netmask 255.255.255.0
up route add -net $AWS_SUBNET_CIDR gw $PRIVATE_NETWORK.254 dev $eth1
EOT
# Restart eth1 interface:
ip addr flush dev $eth1
sudo ifdown $eth1 && sudo ifup $eth1 | true |
105da59f220be82916bfddcaff7fa5289baf49be | Shell | RoboticDinosaur/dotfiles | /zsh/functions/git | UTF-8 | 296 | 2.734375 | 3 | [] | no_license | #!/bin/zsh
#export GIT_AUTHOR_NAME="${FULLNAME}"
#export GIT_AUTHOR_EMAIL="${EMAIL}"
#export GIT_COMMITTER_NAME="${FULLNAME}"
#export GIT_COMMITTER_EMAIL="${EMAIL}"
#----------
git_init () {
git init \
&& git add . \
&& git commit -m "initial commit" \
&& git gc
}
alias gin=git_init
| true |
11c8846605c86a6cb111a7e6a0ac72087b227238 | Shell | wheatandcat/Peperomia | /scripts/dl_expo_bins | UTF-8 | 683 | 3.328125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -eo pipefail
# query expo.io to find most recent ipaUrl
IPA_URL=$(curl -sS https://expo.io/--/api/v2/versions | python -c 'import sys, json; print json.load(sys.stdin)["iosUrl"]')
# Skipping android apk dl for now
# APK_URL=$(curl -sS https://expo.io/--/api/v2/versions | python -c 'import sys, json; print json.load(sys.stdin)["androidUrl"]')
# download tar.gz
TMP_PATH_IPA=/tmp/exponent-app.tar.gz
curl -o $TMP_PATH_IPA "$IPA_URL"
# recursively make app dir
APP_PATH=bin/Exponent.app
mkdir -p $APP_PATH
# create apk (isn't stored tar'd)
# APK_PATH=bin/Exponent.apk
# curl -o $APK_PATH "$APK_URL"
# unzip tar.gz into APP_PATH
tar -C $APP_PATH -xzf $TMP_PATH_IPA | true |
3e6fc714db845e001f464e8f45aebf4eedd4d753 | Shell | cr0hn/Tarkin | /train.sh | UTF-8 | 447 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
export PYTHONPATH=$(pwd)/security-anomalies-logs-data/
. utils.sh
CHECK_INPUT_FILE=${1:-security-anomalies-logs-data/data/input-logs-example-file.csv}
check_file_existence $CHECK_INPUT_FILE
mkdir -p security-anomalies-logs-data/input-data/
LETTERSPACE_FILENAME=${2:-security-anomalies-logs-data/input-data/letterspace.pkl}
rm $LETTERSPACE_FILENAME
time cat $CHECK_INPUT_FILE | python security-anomalies-logs-data/service/train.py
| true |
18b855e96338d69df5944fac0b73c4c8b3ec4167 | Shell | KIT-CMS/sm-htt-analysis | /run_analysis.sh | UTF-8 | 2,489 | 3.703125 | 4 | [] | no_license | #!/bin/bash
set -e
# Parse arguments
ERA=$1 # options: 2016, 2017
IFS=',' read -r -a CHANNELS <<< $2
CHANNELSARG=$2
TAG="default"
source utils/bashFunctionCollection.sh
# Error handling to ensure that script is executed from top-level directory of
# this repository
for DIRECTORY in shapes datacards combine plotting utils
do
if [ ! -d "$DIRECTORY" ]; then
echo "[FATAL] Directory $DIRECTORY not found, you are not in the top-level directory of the analysis repository?"
exit 1
fi
done
ensureoutdirs
# Clean-up workspace
#./utils/clean.sh
# Create shapes of systematics
#./shapes/produce_shapes.sh $ERA $CHANNELSARG $TAG
# Apply blinding strategy
#./shapes/apply_blinding.sh $ERA
# Convert shapes to synced format
for CHANNEL in ${CHANNELS[@]}; do
logandrun ./shapes/convert_to_synced_shapes.sh $ERA $CHANNEL $TAG &
done
wait
# Write datacard
STXS_SIGNALS="stxs_stage0" # options: stxs_stage0, stxs_stage1p1
CATEGORIES="stxs_stage1p1" # options: stxs_stage0, stxs_stage1p1
JETFAKES=1 # options: 0, 1
EMBEDDING=1 # options: 0, 1
DATACARDDIR=output/datacards/${ERA}-${TAG}-smhtt-ML/${STXS_SIGNALS}
[ -d $DATACARDDIR ] || mkdir -p $DATACARDDIR
logandrun ./datacards/produce_datacard.sh ${ERA} $STXS_SIGNALS $CATEGORIES $JETFAKES $EMBEDDING ${TAG} ${CHANNELSARG}
# Combine datacards
# The following line combines datacards of different eras.
# The era name "combined" is used for the resulting datacards and can be fitted using this
# as ERA variable in the following.
#./datacards/combine_datacards.sh 2016 2017
# Build workspace
STXS_FIT="inclusive" # options: stxs_stage0, stxs_stage1p1, inclusive
logandrun ./datacards/produce_workspace.sh $ERA $STXS_FIT $TAG | tee ${ERA}_produce_workspace_${STXS_FIT}.log
# Run statistical inference
#./combine/significance.sh $ERA | tee ${ERA}_significance.log
logandrun ./combine/signal_strength.sh $ERA $STXS_FIT $DATACARDDIR/cmb/125 cmb ${TAG}
logandrun ./combine/signal_strength.sh $ERA $STXS_FIT $DATACARDDIR/cmb/125 cmb ${TAG} "robustHesse"
./combine/diff_nuisances.sh $ERA
#./combine/nuisance_impacts.sh $ERA
# Make prefit and postfit shapes
logandrun ./combine/prefit_postfit_shapes.sh ${ERA} ${STXS_FIT} ${DATACARDDIR}/cmb/125 ${TAG}
EMBEDDING=1
JETFAKES=1
logandrun ./plotting/plot_shapes.sh $ERA $TAG ${CHANNELSARG} $STXS_SIGNALS $STXS_FIT $CATEGORIES $JETFAKES $EMBEDDING
#./plotting/plot_signals.sh $ERA $STXS_SIGNALS $CATEGORIES $CHANNELS
| true |
f31f8fd73424bbb5779fa19ea5154a9fa94f3996 | Shell | edwardspbe/ProjectOllie | /olliemon | UTF-8 | 2,068 | 3.6875 | 4 | [
"MIT"
] | permissive | #!/bin/bash -x
##############################################################################
# olliemon - task responsible for maintaining an operating environment for
# a deployed device. This task will verify that the network is
# operational, then send a heartbeat to the gateway configured in
# our config file. The heartbeat will return the required tasks
# that should be functioning on this device along with associated
# names, states and responsibilities. It will then start the task
# if not already running. Each task will operate independently
# and will in-turn send event details to the gateway as they occur.
include /opt/ollie/conf/config
function checknet()
{
ping -c1 google.ca
if [ $? -eq 0 ]
then
#all is fine, we can speak to the Internet.
echo 0 > /var/run/ollie/internet
return 0
else
#hmmm, cannot see the Internet, let's try our home server
ping -c1 ${gateway}
if [ $? -eq 0 ]
then
#we have limited intranet
echo 1 > /var/run/ollie/internet
return 1
else
#we have no network comm...?
echo 2 > /var/run/ollie/internet
return 2
fi
fi
}
function restartnet()
{
#our network connection must be down, reload the network
/etc/init.d/network restart
}
#main routine starts here...
#network testing and restart as required...
output=/tmp/config.json
net=`checknet`
if [ $net -lt 2 ]
then
#we have a network connection (possibly limited), send heartbeat accordingly
#but remember, we cannot send SMS
curl -X POST http://${gateway}/checkin -o $output
jq -c '.[]' $output | while read item; do
echo $item
done
else
#no net, restart network
restartnet
exit 1
fi
#test case 1: CMD to turn on the lights
#curl -X POST http://192.168.1.97/dev_state -d "name=SS-Door&status=3"
#curl -X POST http://192.168.1.97/dev_state -d "name=OllieAYS=1"
| true |
542a6452ffe21c7647cd8d7da3f1da8c77a749f2 | Shell | dvberkel/mathematics-articles | /thesis/lights_out/README | UTF-8 | 528 | 3.078125 | 3 | [] | no_license | #! /usr/bin/env bash
# This README file instructs how to "build" this thesis.
#
# First and foremost it depends on the thesis.class. The thesis class can be
# found at: http://code.google.com/p/persphone
#
# The following instructions build the thesis. It can be run with the following
# command:
# bash README
name=lights_out
for dir in code content image
do
if [ -f $dir/README ]
then
cd $dir
bash README
cd ..
fi
done
latex $name
bibtex $name
makeindex $name
latex $name
latex $name
dvips $name.dvi -o $name.ps
| true |
255d5457802800fca541397af351db00efb0574f | Shell | nanocad-lab/sdecc-viffto-ecc-ctrl | /automate_offline_inst_recovery.sh | UTF-8 | 12,535 | 2.765625 | 3 | [] | no_license | #!/bin/bash
#
# Author: Mark Gottscho
# mgottscho@ucla.edu
ARGC=$# # Get number of arguments excluding arg0 (the script itself). Check for help message condition.
if [[ "$ARGC" != 0 ]]; then # Bad number of arguments.
echo "Author: Mark Gottscho"
echo "mgottscho@ucla.edu"
echo ""
echo "No arguments allowed."
exit
fi
########################## FEEL FREE TO CHANGE THESE OPTIONS ##################################
ISA=rv64g # Set the target ISA; benchmarks must be disassembled for this as well
INPUT_TYPE=dynamic-split-int-float
if [[ "$INPUT_TYPE" == "static" ]]; then # Static evaluation
SPEC_BENCHMARKS="400.perlbench 401.bzip2 403.gcc 410.bwaves 416.gamess 429.mcf 433.milc 434.zeusmp 435.gromacs 436.cactusADM 437.leslie3d 444.namd 445.gobmk 447.dealII 450.soplex 453.povray 454.calculix 456.hmmer 458.sjeng 459.GemsFDTD 462.libquantum 464.h264ref 465.tonto 470.lbm 471.omnetpp 473.astar 481.wrf 482.sphinx3 483.xalancbmk" # Static -- all are working
INPUT_DIRECTORY=$MWG_DATA_PATH/swd_ecc_data/$ISA/disassembly/linux-gnu # For static
MNEMONIC_HOTNESS_FILENAME=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/static/$ISA-mnemonic-hotness-static-all.csv
RD_HOTNESS_FILENAME=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/static/$ISA-rd-hotness-static-all.csv
elif [[ "$INPUT_TYPE" == "dynamic-static-side-info" ]]; then # Dynamic evaluation with static side info
# TODO FIXME: this won't work with filter-joint-frequency-sort-pick-longest-pad since we did not collect the relevant data yet.
SPEC_BENCHMARKS="400.perlbench 401.bzip2 403.gcc 410.bwaves 435.gromacs 436.cactusADM 444.namd 447.dealII 450.soplex 453.povray 454.calculix 456.hmmer 458.sjeng 459.GemsFDTD 462.libquantum 464.h264ref 465.tonto 470.lbm 471.omnetpp 473.astar" # Dynamic -- working
#INPUT_DIRECTORY=$MWG_DATA_PATH/swd_ecc_data/$ISA/spike_micro17 # For dynamic
#FILE_VERSION="micro17"
INPUT_DIRECTORY=$MWG_DATA_PATH/swd_ecc_data/$ISA/spike_cases17 # For dynamic
FILE_VERSION="cases17"
MNEMONIC_HOTNESS_FILENAME=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/static/$ISA-mnemonic-hotness-static-all.csv
RD_HOTNESS_FILENAME=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/static/$ISA-rd-hotness-static-all.csv
elif [[ "$INPUT_TYPE" == "dynamic" ]]; then # Dynamic
SPEC_BENCHMARKS="400.perlbench 401.bzip2 403.gcc 410.bwaves 435.gromacs 436.cactusADM 444.namd 447.dealII 450.soplex 453.povray 454.calculix 456.hmmer 458.sjeng 459.GemsFDTD 462.libquantum 464.h264ref 465.tonto 470.lbm 471.omnetpp 473.astar" # Dynamic -- working
#INPUT_DIRECTORY=$MWG_DATA_PATH/swd_ecc_data/$ISA/spike_micro17 # For dynamic
#FILE_VERSION="micro17"
INPUT_DIRECTORY=$MWG_DATA_PATH/swd_ecc_data/$ISA/spike_cases17 # For dynamic
FILE_VERSION="cases17"
#MNEMONIC_HOTNESS_FILENAME=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/dynamic/$ISA-mnemonic-hotness-dyn-all.csv
MNEMONIC_HOTNESS_FILENAME=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/dynamic/$ISA-joint-mnemonic-reg-hotness-dyn-all.csv
RD_HOTNESS_FILENAME=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/dynamic/$ISA-rd-hotness-dyn-all.csv
elif [[ "$INPUT_TYPE" == "dynamic-split-int-float" ]]; then # Dynamic
SPEC_INT_BENCHMARKS="400.perlbench 401.bzip2 403.gcc 456.hmmer 458.sjeng 462.libquantum 464.h264ref 471.omnetpp 473.astar"
SPEC_FLOAT_BENCHMARKS="410.bwaves 435.gromacs 436.cactusADM 444.namd 447.dealII 450.soplex 453.povray 454.calculix 459.GemsFDTD 465.tonto 470.lbm"
#INPUT_DIRECTORY=$MWG_DATA_PATH/swd_ecc_data/$ISA/spike_micro17 # For dynamic
#FILE_VERSION="micro17"
INPUT_DIRECTORY=$MWG_DATA_PATH/swd_ecc_data/$ISA/spike_cases17 # For dynamic
FILE_VERSION="cases17"
#INT_MNEMONIC_HOTNESS_FILENAME=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/dynamic/$ISA-mnemonic-hotness-dyn-int.csv
INT_MNEMONIC_HOTNESS_FILENAME=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/dynamic/$ISA-joint-mnemonic-reg-hotness-dyn-int.csv
INT_RD_HOTNESS_FILENAME=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/dynamic/$ISA-rd-hotness-dyn-int.csv
#FLOAT_MNEMONIC_HOTNESS_FILENAME=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/dynamic/$ISA-mnemonic-hotness-dyn-flt.csv
FLOAT_MNEMONIC_HOTNESS_FILENAME=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/dynamic/$ISA-joint-mnemonic-reg-hotness-dyn-float.csv
FLOAT_RD_HOTNESS_FILENAME=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/dynamic/$ISA-rd-hotness-dyn-flt.csv
elif [[ "$INPUT_TYPE" == "dynamic-perfect" ]]; then # Dynamic
SPEC_BENCHMARKS="400.perlbench 401.bzip2 403.gcc 410.bwaves 435.gromacs 436.cactusADM 444.namd 447.dealII 450.soplex 453.povray 454.calculix 456.hmmer 458.sjeng 459.GemsFDTD 462.libquantum 464.h264ref 465.tonto 470.lbm 471.omnetpp 473.astar" # Dynamic -- working
#INPUT_DIRECTORY=$MWG_DATA_PATH/swd_ecc_data/$ISA/spike_micro17 # For dynamic
#FILE_VERSION=micro17
INPUT_DIRECTORY=$MWG_DATA_PATH/swd_ecc_data/$ISA/spike_cases17 # For dynamic
FILE_VERSION=cases17
#MNEMONIC_HOTNESS_PREFIX=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/dynamic/$ISA-mnemonic-hotness-dyn
MNEMONIC_HOTNESS_PREFIX=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/dynamic/$ISA-joint-mnemonic-reg-hotness-dyn
RD_HOTNESS_FILENAME=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/dynamic/$ISA-rd-hotness-dyn-all.csv # FIXME Don't use rd-based policy with dynamic-perfect SI, this is placeholder
elif [[ "$INPUT_TYPE" == "dynamic-baseline" ]]; then # Baseline, all are equally likely frequencies
SPEC_BENCHMARKS="400.perlbench 401.bzip2 403.gcc 410.bwaves 435.gromacs 436.cactusADM 444.namd 447.dealII 450.soplex 453.povray 454.calculix 456.hmmer 458.sjeng 459.GemsFDTD 462.libquantum 464.h264ref 465.tonto 470.lbm 471.omnetpp 473.astar" # Dynamic -- working
#INPUT_DIRECTORY=$MWG_DATA_PATH/swd_ecc_data/$ISA/spike_micro17 # For dynamic
#FILE_VERSION=micro17
INPUT_DIRECTORY=$MWG_DATA_PATH/swd_ecc_data/$ISA/spike_cases17 # For dynamic
FILE_VERSION=cases17
#MNEMONIC_HOTNESS_FILENAME=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/dynamic/$ISA-mnemonic-hotness-dyn-baseline.csv
MNEMONIC_HOTNESS_FILENAME=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/dynamic/$ISA-joint-mnemonic-reg-hotness-dyn-baseline.csv
RD_HOTNESS_FILENAME=$MWG_DATA_PATH/swd_ecc_data/$ISA/program-statistics/dynamic/$ISA-rd-hotness-dyn-baseline.csv
else
echo "ERROR, bad INPUT_TYPE: $INPUT_TYPE"
exit 1
fi
AXBENCH_INT_BENCHMARKS="" # kmeans not working
AXBENCH_FLOAT_BENCHMARKS="blackscholes fft inversek2j jmeint jpeg sobel" # kmeans not working
AXBENCH_BENCHMARKS="blackscholes fft inversek2j jmeint jpeg sobel" # kmeans not working
FAULTLINK_INT_BENCHMARKS="blowfish dhrystone matmult_int sha"
FAULTLINK_FLOAT_BENCHMARKS="whetstone"
FAULTLINK_BENCHMARKS="blowfish dhrystone matmult_int sha whetstone"
N=33
K=32
NUM_MESSAGES=1000
WORDS_PER_BLOCK=16
NUM_THREADS=$(cat /proc/cpuinfo | grep ^processor | wc -l )
CODE_TYPE=ULEL_even
#NUM_SAMPLED_ERROR_PATTERNS=1000
#NUM_SAMPLED_ERROR_PATTERNS=741 # Max for (39,32) SECDED
#NUM_SAMPLED_ERROR_PATTERNS=2556 # Max for (72,64) SECDED
#NUM_SAMPLED_ERROR_PATTERNS=14190 # Max for (45,32) DECTED
#NUM_SAMPLED_ERROR_PATTERNS=79079 # Max for (79,64) DECTED
#NUM_SAMPLED_ERROR_PATTERNS=141750 # Max for (144,128) ChipKill
NUM_SAMPLED_ERROR_PATTERNS=33 # Max for (35,32) ULEL
POLICY=filter-joint-frequency-sort-pick-longest-pad
CRASH_THRESHOLD=0.9
HASH_MODE=none
VERBOSE_RECOVERY=0
INT_BENCHMARKS=$AXBENCH_INT_BENCHMARKS
FLOAT_BENCHMARKS=$AXBENCH_FLOAT_BENCHMARKS
BENCHMARKS=$AXBENCH_BENCHMARKS
OUTPUT_DIRECTORY=$MWG_DATA_PATH/swd_ecc_data/$ISA/inst-recovery/offline-$INPUT_TYPE/$CODE_TYPE/$N,$K/hash-$HASH_MODE/$POLICY/crash-threshold-$CRASH_THRESHOLD/`date -I`
#if [[ "$MWG_MACHINE_NAME" == "hoffman" ]]; then
# NUM_THREADS=16 # Override above.
# # qsub options used:
# # -V: export environment variables from this calling script to each job
# # -N: name the job. I made these so that each job will be uniquely identified by its benchmark running as well as the output file string ID
# # -l: resource allocation flags for maximum time requested as well as maximum memory requested.
# # -M: cluster username(s) to email with updates on job status
# # -m: mailing rules for job status. b = begin, e = end, a = abort, s = suspended, n = never
# MAX_TIME_PER_RUN=18:00:00 # Maximum time of each script that will be invoked, HH:MM:SS. If this is exceeded, job will be killed.
# #MAX_MEM_PER_RUN="$((600 * $NUM_THREADS))M" # Maximum memory needed per script that will be invoked. If this is exceeded, job will be killed.
# MAX_MEM_PER_RUN="16G" # Maximum memory needed per script that will be invoked. If this is exceeded, job will be killed.
# MAILING_LIST=mgottsch # List of users to email with status updates, separated by commas
#fi
###############################################################################################
# Prepare directories
mkdir -p $OUTPUT_DIRECTORY
# Submit all the benchmarks
echo "Running..."
echo ""
if [[ "$INPUT_TYPE" == "dynamic-split-int-float" ]]; then
for BENCHMARK in $INT_BENCHMARKS; do
echo "$BENCHMARK (int)..."
INPUT_FILE="$INPUT_DIRECTORY/spike_mem_data_trace_${BENCHMARK}.txt.inst"
OUTPUT_FILE="$OUTPUT_DIRECTORY/${ISA}-${BENCHMARK}-inst-heuristic-recovery.mat"
JOB_STDOUT=$OUTPUT_DIRECTORY/${ISA}-${BENCHMARK}-inst-heuristic-recovery.stdout
JOB_STDERR=$OUTPUT_DIRECTORY/${ISA}-${BENCHMARK}-inst-heuristic-recovery.stderr
MNEMONIC_HOTNESS_FILENAME=$INT_MNEMONIC_HOTNESS_FILENAME
RD_HOTNESS_FILENAME=$INT_RD_HOTNESS_FILENAME
./swd_ecc_offline_inst_heuristic_recovery_wrapper.sh $PWD $ISA $BENCHMARK $N $K $NUM_MESSAGES $NUM_SAMPLED_ERROR_PATTERNS $WORDS_PER_BLOCK $INPUT_FILE $OUTPUT_FILE $NUM_THREADS $CODE_TYPE $POLICY $MNEMONIC_HOTNESS_FILENAME $RD_HOTNESS_FILENAME $CRASH_THRESHOLD $VERBOSE_RECOVERY $FILE_VERSION $HASH_MODE > $JOB_STDOUT 2> $JOB_STDERR
done
for BENCHMARK in $FLOAT_BENCHMARKS; do
echo "$BENCHMARK (float)..."
INPUT_FILE="$INPUT_DIRECTORY/spike_mem_data_trace_${BENCHMARK}.txt.inst"
OUTPUT_FILE="$OUTPUT_DIRECTORY/${ISA}-${BENCHMARK}-inst-heuristic-recovery.mat"
JOB_STDOUT=$OUTPUT_DIRECTORY/${ISA}-${BENCHMARK}-inst-heuristic-recovery.stdout
JOB_STDERR=$OUTPUT_DIRECTORY/${ISA}-${BENCHMARK}-inst-heuristic-recovery.stderr
MNEMONIC_HOTNESS_FILENAME=$FLOAT_MNEMONIC_HOTNESS_FILENAME
RD_HOTNESS_FILENAME=$FLOAT_RD_HOTNESS_FILENAME
./swd_ecc_offline_inst_heuristic_recovery_wrapper.sh $PWD $ISA $BENCHMARK $N $K $NUM_MESSAGES $NUM_SAMPLED_ERROR_PATTERNS $WORDS_PER_BLOCK $INPUT_FILE $OUTPUT_FILE $NUM_THREADS $CODE_TYPE $POLICY $MNEMONIC_HOTNESS_FILENAME $RD_HOTNESS_FILENAME $CRASH_THRESHOLD $VERBOSE_RECOVERY $FILE_VERSION $HASH_MODE > $JOB_STDOUT 2> $JOB_STDERR
done
else
for BENCHMARK in $BENCHMARKS; do
echo "$BENCHMARK..."
if [[ "$INPUT_TYPE" == "static" ]]; then
INPUT_FILE="$INPUT_DIRECTORY/${ISA}-${BENCHMARK}-instructions.txt"
elif [[ "$INPUT_TYPE" == "dynamic-static-side-info" ]]; then
INPUT_FILE="$INPUT_DIRECTORY/spike_mem_data_trace_${BENCHMARK}.txt.inst"
elif [[ "$INPUT_TYPE" == "dynamic" ]]; then
INPUT_FILE="$INPUT_DIRECTORY/spike_mem_data_trace_${BENCHMARK}.txt.inst"
elif [[ "$INPUT_TYPE" == "dynamic-perfect" ]]; then
INPUT_FILE="$INPUT_DIRECTORY/spike_mem_data_trace_${BENCHMARK}.txt.inst"
MNEMONIC_HOTNESS_FILENAME="${MNEMONIC_HOTNESS_PREFIX}-${BENCHMARK}.csv"
# FIXME Don't use rd-based policy with dynamic-perfect SI, this is placeholder
elif [[ "$INPUT_TYPE" == "dynamic-baseline" ]]; then
INPUT_FILE="$INPUT_DIRECTORY/spike_mem_data_trace_${BENCHMARK}.txt.inst"
fi
OUTPUT_FILE="$OUTPUT_DIRECTORY/${ISA}-${BENCHMARK}-inst-heuristic-recovery.mat"
JOB_STDOUT=$OUTPUT_DIRECTORY/${ISA}-${BENCHMARK}-inst-heuristic-recovery.stdout
JOB_STDERR=$OUTPUT_DIRECTORY/${ISA}-${BENCHMARK}-inst-heuristic-recovery.stderr
./swd_ecc_offline_inst_heuristic_recovery_wrapper.sh $PWD $ISA $BENCHMARK $N $K $NUM_MESSAGES $NUM_SAMPLED_ERROR_PATTERNS $WORDS_PER_BLOCK $INPUT_FILE $OUTPUT_FILE $NUM_THREADS $CODE_TYPE $POLICY $MNEMONIC_HOTNESS_FILENAME $RD_HOTNESS_FILENAME $CRASH_THRESHOLD $VERBOSE_RECOVERY $FILE_VERSION $HASH_MODE > $JOB_STDOUT 2> $JOB_STDERR
done
fi
| true |
459b7faa239110d69d037af16b1f7852d8b7b9be | Shell | gnaganab/Openstack-lab | /LTRCLD-1451/scripts/copy_keys.sh | UTF-8 | 268 | 2.5625 | 3 | [] | no_license | for i in {99..99};
do
rm -rf /home/tenant${i}/.ssh
mkdir -p /home/tenant${i}/.ssh
chmod 700 /home/tenant${i}/.ssh
cp ~/.ssh/id_rsa* /home/tenant${i}/.ssh/
cp ~/.ssh/config /home/tenant${i}/.ssh/ -f
chown -R tenant${i}:tenant${i} /home/tenant${i}/.ssh
done
| true |
091da58064c8083862f9f73bf0ddfae0069a8b29 | Shell | bpikap/docker-cloud-media-scripts | /setup/rclone_setup | UTF-8 | 4,157 | 3.03125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
. "/usr/bin/variables"
printf "\n\n\n==============================================================\n"
printf "Setup following endpoints in rclone:\n\n"
echo "- Endpoint to your cloud storage."
printf "\t- Create new remote [Press N]\n"
if [ "$(printenv ENCRYPT_MEDIA)" != "0" ]; then
printf "\t- Give it a name example gd\n"
else
printf "\t- Enter Rclone cloud endpoint: $(echo -n $(printenv RCLONE_CLOUD_ENDPOINT) | head -c -1)\n"
fi
printf "\t- Choose Google Drive [Press 7]\n"
printf "\t- If you have a client id paste it here or leave it blank\n"
printf "\t- Choose headless machine [Press N]\n"
printf "\t- Open the url in your browser and enter the verification code\n\n"
if [ "$(printenv CACHE_PROGRAM)" == "rclone" ]; then
echo "- Cache for your cloud storage."
printf "\t- Create new remote [Press N]\n"
printf "\t- Enter Rclone cloud endpoint: $(echo -n $(printenv RCLONE_CLOUD_ENDPOINT) | head -c -1)\n"
printf "\t- Choose Cache a remote [Press 5]\n"
printf "\t- Enter your Plex Server Details for optimized caching while playback or during scans.\n"
printf "\t- Choose chunk file size.\n"
printf "\t- Choose object info time. Use high value if all writes to gdrive going through cache.\n"
printf "\t- Choose maximum chunk size. Oldest ones will be deleted after limit is hit.\n\n"
fi
if [ "$(printenv MIRROR_MEDIA)" != "0" ]; then
echo "- Mirror your cloud storage to second gdrive account."
printf "\t- Create new remote [Press N]\n"
printf "\t- Give it a name example gdm\n"
printf "\t- Choose Google Drive [Press 7]\n"
printf "\t- If you have a client id paste it here or leave it blank\n"
printf "\t- Choose headless machine [Press N]\n"
printf "\t- Open the url in your browser and enter the verification code\n\n"
if [ "$(printenv ENCRYPT_MIRROR_MEDIA)" != "0" ]; then
echo "- Encryption and decryption for your mirror storage."
printf "\t- Enter Rclone mirror endpoint: $(echo -n $(printenv RCLONE_MIRROR_ENDPOINT) | head -c -1)\n"
printf "\t- Choose Encrypt/Decrypt a remote [Press 5]\n"
printf "\t- Enter the name of the remote created in mirror-storage appended with a colon (:) and the subfolder on your cloud. Example gdm:/Media or just gdm: if you have your files in root.\n"
printf "\t- Choose how to encrypt filenames. I prefer option 2 Encrypt the filenames\n"
printf "\t- Choose to either generate your own or random password. I prefer to enter my own.\n"
printf "\t- Choose to enter pass phrase for the salt or leave it blank. I prefer to enter my own.\n\n"
fi
fi
if [ "$(printenv ENCRYPT_MEDIA)" != "0" ]; then
echo "- Encryption and decryption for your cloud storage."
printf "\t- Create new remote [Press N]\n"
printf "\t- Enter Rclone cloud endpoint: $(echo -n $(printenv RCLONE_CLOUD_ENDPOINT) | head -c -1)\n"
printf "\t- Choose Encrypt/Decrypt a remote [Press 5]\n"
printf "\t- Enter the name of the remote created in cloud-storage appended with a colon (:) and the subfolder on your cloud. Example gd:/Media or just gd: if you have your files in root.\n"
printf "\t- Choose how to encrypt filenames. I prefer option 2 Encrypt the filenames\n"
printf "\t- Choose to either generate your own or random password. I prefer to enter my own.\n"
printf "\t- Choose to enter pass phrase for the salt or leave it blank. I prefer to enter my own.\n\n"
echo "- Encryption and decryption for your local storage."
printf "\t- Create new remote [Press N]\n"
printf "\t- Enter Rclone local endpoint: $(echo -n $(printenv RCLONE_LOCAL_ENDPOINT) | head -c -1)\n"
printf "\t- Choose Encrypt/Decrypt a remote [Press 5]\n"
printf "\t- Enter the encrypted folder: ${cloud_encrypt_dir}. If you are using subdirectory append it to it. Example ${cloud_encrypt_dir}/Media\n"
printf "\t- Choose the same filename encrypted as you did with the cloud storage.\n"
printf "\t- Enter the same password as you did with the cloud storage.\n"
printf "\t- Enter the same pass phrase as you did with the cloud storage.\n"
fi
printf "==============================================================\n\n\n"
rclone $rclone_config config
| true |
06e46abc50a1bf594c38e0ee308d79629ca43a77 | Shell | skoczen/dotfiles | /shellScripts/fab_complete.sh | UTF-8 | 326 | 3.390625 | 3 | [
"MIT"
] | permissive | _fab_completion() {
COMPREPLY=()
# Fab in the path?
/usr/bin/which -s fab || return 0
# Fabfile in this folder?
[[ -e fabfile.py ]] || return 0
local cur="${COMP_WORDS[COMP_CWORD]}"
tasks=$(fab --shortlist)
COMPREPLY=( $(compgen -W "${tasks}" -- ${cur}) )
}
complete -F _fab_completion fab | true |
50d530ee7703121b77b2b15b725571720e11f693 | Shell | masmullin2000/qemu-runner | /fc | UTF-8 | 3,689 | 3.78125 | 4 | [] | no_license | #!/bin/bash
set -e
RUN_AS="both"
CORES=2
THREAD="false"
RAM=1024
kernel_path=""
rootfs_path=""
tap_ctr=$((0))
TAP_DEV=""
SOCKET="0"
FIRECRACKER="firecracker"
NETWORK="no"
CMDLN=""
make_tap() {
TAPS=$(ip a | grep tap | awk '{ print $2; }' | sed 's/://g')
for t in $TAPS; do
tap="tap$tap_ctr"
if [[ $tap == $t ]]; then
tap_ctr=$((tap_ctr+1))
else
break
fi
done
sudo ip tuntap add dev "tap$tap_ctr" mode tap
sudo ip link set dev "tap$tap_ctr" up
sudo ip link set "tap$tap_ctr" master br0
}
while [ ! -z "$1" ]
do
CMDLN+="$1 "
case "$1" in
-c|--client)
RUN_AS="client"
;;
-j|--cores)
CORES="$2"
CMDLN+="$2 "
shift
;;
-t|--thread)
THREAD="true"
;;
-m|--ram)
RAM=$2
CMDLN+="$2 "
shift
;;
-k|--kernel)
kernel_path=$2
CMDLN+="$2 "
shift
;;
-r|--root)
rootfs_path=$2
CMDLN+="$2 "
shift
;;
-int|--interactive)
INTERACTIVE="yes"
;;
-i|--initramfs)
INITRAMFS=$2
CMDLN+="$2 "
shift
;;
-n|--network)
NETWORK="yes"
;;
-s|--socket)
SOCKET=$2
CMDLN+="$2 "
shift
;;
--server)
RUN_AS="server"
;;
*)
echo "Unknown Command $1"
exit
;;
esac
shift
done
if [[ "$RUN_AS" == "both" ]]; then
SOCKET="/tmp/firecracker$SOCKET.socket"
/bin/bash -c "sleep 1 && $0 -c $CMDLN" &
rm -f $SOCKET
if [[ "$INTERACTIVE" == "yes" ]]; then
$FIRECRACKER --api-sock $SOCKET
else
$FIRECRACKER --api-sock $SOCKET &
fi
exit 0
fi
if [[ "$RUN_AS" == "server" ]]; then
for i in {0..10}; do
if [[ ! -e /tmp/firecracker$i.socket ]]; then
rm -rf /tmp/firecracker$i.socket
echo -e "Socket: /tmp/firecracker$i.socket"
$FIRECRACKER --api-sock /tmp/firecracker$i.socket
rm -rf /tmp/firecracker$i.socket
exit 0
fi
done
fi
if [[ "$RUN_AS" == "client" ]]; then
SOCKET="/tmp/firecracker$SOCKET.socket"
K_STR="{ \"kernel_image_path\": \"${kernel_path}\""
if [[ "$INTERACTIVE" == "yes" ]]; then
K_STR+=",\"boot_args\": \"console=ttyS0 reboot=k panic=1\""
fi
if [[ -n "$INITRAMFS" ]]; then
K_STR+=",\"initrd_path\": \"${INITRAMFS}\""
fi
K_STR+=" }"
if [[ "$NETWORK" == "yes" ]]; then
make_tap
TAP_DEV="tap$tap_ctr"
E=$(printf '%02X' $tap_ctr)
IPADD=$(ip a show br0 | grep "inet " | awk '{ print $2; }' | sed 's/\/.*$//g' | sed "s/\.[^.]*$//")
IPADD=$(printf '%02X:' ${IPADD//./ })
IPADD+=$E
curl --unix-socket $SOCKET -i \
-X PUT 'http://localhost/network-interfaces/eth0' \
-H 'Accept: application/json' \
-H 'Content-Type: application/json' \
-d "{
\"iface_id\": \"eth0\",
\"guest_mac\": \"AA:BB:$IPADD\",
\"host_dev_name\": \""$TAP_DEV"\"
}"
fi
curl --unix-socket $SOCKET -i \
-X PUT 'http://localhost/boot-source' \
-H 'Accept: application/json' \
-H 'Content-Type: application/json' \
-d "$K_STR"
curl --unix-socket $SOCKET -i \
-X PUT 'http://localhost/drives/rootfs' \
-H 'Accept: application/json' \
-H 'Content-Type: application/json' \
-d "{
\"drive_id\": \"rootfs\",
\"path_on_host\": \"${rootfs_path}\",
\"is_root_device\": true,
\"is_read_only\": false
}"
RAM=$((1024*$RAM))
curl --unix-socket $SOCKET -i \
-X PUT 'http://localhost/machine-config' \
-H 'Accept: application/json' \
-H 'Content-Type: application/json' \
-d "{
\"vcpu_count\": $CORES,
\"mem_size_mib\": $RAM,
\"ht_enabled\": $THREAD
}"
curl --unix-socket $SOCKET -i \
-X PUT 'http://localhost/actions' \
-H 'Accept: application/json' \
-H 'Content-Type: application/json' \
-d "{
\"action_type\": \"InstanceStart\"
}"
fi
| true |
5c6649f2518886882cc375ea46d14c8b538cb1ef | Shell | tabital0/ull-shaka-ecosystem | /demos/end-to-end/testPatternToPackager.sh | UTF-8 | 2,366 | 3.28125 | 3 | [] | no_license | #!/bin/bash
# Set up variables
export LOW_LATENCY_PREVIEW_DIR="${PWD}/../../low-latency-preview"
export UTILS_DIR="${PWD}/../utils"
export PORT=8080
export IP=127.0.0.1
export OUTPUT_DIR='1234'
export OUTPUT_SEG_NAME='test_pattern_live_video'
export X264_ENC='libx264 -tune zerolatency -profile:v baseline -preset ultrafast -bf 0 -refs 1 -sc_threshold 0'
export SERVER_UPLOAD_DIR='ldash'
export SERVER_PLAYOUT_DIR='ldashplay'
# Create log folder
[ -e logs ] && rm -rf logs
mkdir logs
# Create pipe
[ -e pipe0 ] && rm pipe0
mkfifo pipe0
# Set up cleanup process
trap_ctrlc() {
echo -e "\nStream has ended. Cleanup taking place."
pkill main
pkill ffmpeg
pkill packager
rm pipe0
echo "All processes have been killed ☠"
}
trap trap_ctrlc INT
echo "Launching local server! Please wait a few seconds"
# Launch the local server on port 8080
go run ${LOW_LATENCY_PREVIEW_DIR}/main.go "${LOW_LATENCY_PREVIEW_DIR}/www" 2>logs/server.log &
# Give server time to get up and running
while ! pgrep -x "main" >/dev/null
do sleep 1
done
echo "Server is up! Checkout the stream at: http://${IP}:${PORT}/${SERVER_PLAYOUT_DIR}/${OUTPUT_DIR}/manifest.mpd"
# Generate the test pattern with FFMPEG and send to pipe
ffmpeg \
-hide_banner \
-re \
-f lavfi \
-i "testsrc2=size=640x360:rate=60" \
-pix_fmt yuv420p \
-map 0:v \
-c:v ${X264_ENC} \
-g 300 \
-keyint_min 300 \
-b:v 4000k \
-vf "drawtext=fontfile=${UTILS_DIR}/OpenSans-Bold.ttf:box=1:fontcolor=black:boxcolor=white:fontsize=33':x=14:y=150:textfile=${UTILS_DIR}/text.txt'" \
-f mpegts \
pipe: > pipe0 \
2>logs/ffmpeg.log &
# Package test pattern as LL-DASH
packager \
--v=0 \
--io_block_size 65536 \
--nogenerate_sidx_in_media_segments \
in=pipe0,stream=video,init_segment='http://'${IP}':'${PORT}'/'${SERVER_UPLOAD_DIR}'/'${OUTPUT_DIR}'/'${OUTPUT_SEG_NAME}'_init.m4s',segment_template='http://'${IP}':'${PORT}'/'${SERVER_UPLOAD_DIR}'/'${OUTPUT_DIR}'/'${OUTPUT_SEG_NAME}'_$Number%05d$.m4s' \
--segment_duration 5 \
--is_low_latency_dash=true \
--utc_timings "urn:mpeg:dash:utc:http-xsdate:2014"="https://time.akamai.com/?iso" \
--mpd_output "http://${IP}:${PORT}/${SERVER_UPLOAD_DIR}/${OUTPUT_DIR}/manifest.mpd" \
2> logs/packager.log &
# Wait for all background processes to terminate or Control-C
wait | true |
718c4b328a5832f68671bf5e5bb2d10a06d683e3 | Shell | Sbai7/CMLIB | /src/utils/mkitldir | UTF-8 | 7,355 | 3.234375 | 3 | [] | no_license | #!/bin/sh
ITLDIR=$HOME/cmlib
SRCDIR=$HOME/itl/apps/cmlib
rm -r -f $ITLDIR
$SRCDIR/utils/mkarchdir $ITLDIR
rm -f $SRCDIR/[A,H,I,L,S]*/lib*/make.inc
echo Copying libraries, executables '...' this will take approx. 15 minutes
echo please wait
cd $SRCDIR;tar cf - ./AIX* ./HP* ./IRIX* ./Linux* ./SunOS* ./doc ./docs ./src|(cd $ITLDIR; tar xpf -)
cd $ITLDIR
rm -f [A,H,I,L,S]*/Version
echo Fixing IRIX '...' please wait
Idirs=`cd $ITLDIR;ls -ld IRIX* 2>/dev/null|grep drwx|cut -c55-`
#echo $Idirs
for dir in $Idirs ; do
libdirs=`cd $dir;ls -ld lib*|grep drwx|cut -c55-`
# echo $libdirs
if [ `echo $dir|grep -c 5\.3` -eq 1 ]; then
mkdir -p $ITLDIR/$dir/lib/mips2
mkdir $ITLDIR/$dir/bin
mkdir -p $ITLDIR/$dir/man/manl
cp -p $SRCDIR/nist.scripts/cmdoc $ITLDIR/$dir/bin
cp -p $SRCDIR/nist.scripts/cmsrc $ITLDIR/$dir/bin
cp -p $SRCDIR/nist.scripts/cmsrcx.irix $ITLDIR/$dir/bin/cmsrcx
cp -p $SRCDIR/nist.man/cmlib.irix5.3.l $ITLDIR/$dir/man/manl/cmlib.l
else
mkdir -p $ITLDIR/$dir/lib32/mips3
mkdir -p $ITLDIR/$dir/lib32/mips4
mkdir -p $ITLDIR/$dir/lib64/mips4
mkdir $ITLDIR/$dir/bin
mkdir -p $ITLDIR/$dir/man/manl
cp -p $SRCDIR/nist.scripts/cmdoc $ITLDIR/$dir/bin
cp -p $SRCDIR/nist.scripts/cmsrc $ITLDIR/$dir/bin
cp -p $SRCDIR/nist.scripts/cmsrcx.irix64 $ITLDIR/$dir/bin/cmsrcx
if [ `echo $dir|grep -c 6\.2` -eq 1 ]; then
cp -p $SRCDIR/nist.man/cmlib.irix64.6.2.l $ITLDIR/$dir/man/manl/cmlib.l
else
cp -p $SRCDIR/nist.man/cmlib.irix64.6.5.l $ITLDIR/$dir/man/manl/cmlib.l
fi
fi
for ldir in $libdirs ; do
if [ `echo $ldir|grep -c mips1` -eq 1 ]; then
odir=lib
lname=`echo $ldir|sed -e "s/_mips1//"`
lname=`echo $lname|sed -e "s/lib/libcmlib/"`
lname=$lname\.a
# echo $odir $lname
elif [ `echo $ldir|grep -c mips2` -eq 1 ]; then
odir=lib/mips2
lname=`echo $ldir|sed -e "s/_mips2//"`
lname=`echo $lname|sed -e "s/lib/libcmlib/"`
lname=$lname\.a
# echo $odir $lname
elif [ `echo $ldir|grep -c mips3` -eq 1 ]; then
lname=`echo $ldir|sed -e "s/_mips3//"`
odir='lib32/mips3'
lname=`echo $lname|sed -e "s/_n32//" -e "s/f90n32/f90/"`
lname=`echo $lname|sed -e "s/lib/libcmlib/"`
lname=$lname\.a
# echo $odir $lname
elif [ `echo $ldir|grep -c mips4` -eq 1 ]; then
lname=`echo $ldir|sed -e "s/_mips4//"`
if [ `echo $lname|grep -c n32` -eq 1 ]; then
odir='lib32/mips4'
lname=`echo $lname|sed -e "s/_n32//" -e "s/f90n32/f90/"`
else
odir='lib64/mips4'
fi
lname=`echo $lname|sed -e "s/lib/libcmlib/"`
lname=$lname\.a
# echo $odir $lname
fi
mv $ITLDIR/$dir/$ldir/libcm.a $ITLDIR/$dir/$odir/$lname
rm -r -f $ITLDIR/$dir/$ldir
done
done
#
# Create compatibility links for older versions of CMLIB
# Eventually these commands should be removed
#
if [ -d $ITLDIR/IRIX-IP22-5.3/lib ]; then
cd $ITLDIR/IRIX-IP22-5.3/lib
ln -s libcmlib.a libcm.a
ln -s mips2/libcmlib.a libcm_mips2.a
cd $ITLDIR
fi
if [ -d $ITLDIR/IRIX64-IP25-6.2/lib64 ]; then
cd $ITLDIR/IRIX64-IP25-6.2/lib64
ln -s mips4/libcmlib.a libcm_64.a
ln -s mips4/libcmlib.a libcm90_64.a
cd $ITLDIR
fi
if [ -d $ITLDIR/IRIX64-IP25-6.2/lib32 ]; then
cd $ITLDIR/IRIX64-IP25-6.2/lib32
ln -s mips3/libcmlib_f90.a libcm90.a
cd $ITLDIR
fi
#
echo Fixing SunOS '...' please wait
Sdirs=`cd $ITLDIR;ls -ld SunO* 2>/dev/null|grep drwx|cut -c55- 2>/dev/null`
#echo $Sdirs
for sdir in $Sdirs ; do
mv $ITLDIR/$sdir/lib $ITLDIR/$sdir/lib.tmp
mkdir $ITLDIR/$sdir/lib
mkdir $ITLDIR/$sdir/bin
mkdir -p $ITLDIR/$sdir/man/manl
cp -p $SRCDIR/nist.scripts/cmdoc $ITLDIR/$sdir/bin
cp -p $SRCDIR/nist.scripts/cmsrc $ITLDIR/$sdir/bin
if [ `echo $sdir|grep -c 4\.1` -eq 1 ]; then
cp -p $SRCDIR/nist.scripts/cmsrcx.sun4 $ITLDIR/$sdir/bin/cmsrcx
cp -p $SRCDIR/nist.man/cmlib.sun4.l $ITLDIR/$sdir/man/manl/cmlib.l
else
cp -p $SRCDIR/nist.scripts/cmsrcx.solaris2 $ITLDIR/$sdir/bin/cmsrcx
cp -p $SRCDIR/nist.man/cmlib.solaris2.l $ITLDIR/$sdir/man/manl/cmlib.l
fi
libdirs=`cd $sdir;ls -ld lib*|grep drwx|cut -c55-`
# echo $libdirs
for ldir in $libdirs ; do
ext=''
if [ "$ldir" != "lib" ]; then
if [ `echo $ldir|grep -c r8` -eq 1 ]; then
ext='8'
elif [ `echo $ldir|grep -c f90` -eq 1 ]; then
ext='_f90'
fi
if [ `ls $sdir/$ldir|grep -c libcm\.so` -eq 1 ]; then
mv $ITLDIR/$sdir/$ldir/libcm.so $ITLDIR/$sdir/lib/libcm"$ext".so
fi
if [ `ls $ITLDIR/$sdir/$ldir|grep -c libcm\.a` -eq 1 ]; then
mv $ITLDIR/$sdir/$ldir/libcm.a $ITLDIR/$sdir/lib/libcm"$ext".a
fi
rm -r -f $ITLDIR/$sdir/$ldir
fi
done
done
#
echo Fixing AIX '...' please wait
Adirs=`cd $ITLDIR;ls -ld AIX* 2>/dev/null|grep drwx|cut -c55-`
#echo $Adirs
for dir in $Adirs ; do
mv $ITLDIR/$dir/lib $ITLDIR/$dir/lib.tmp
mkdir $ITLDIR/$dir/bin
mkdir -p $ITLDIR/$dir/man/manl
cp -p $SRCDIR/nist.scripts/cmdoc $ITLDIR/$dir/bin
cp -p $SRCDIR/nist.scripts/cmsrc $ITLDIR/$dir/bin
cp -p $SRCDIR/nist.scripts/cmsrcx.aix $ITLDIR/$dir/bin/cmsrcx
cp -p $SRCDIR/nist.man/cmlib.aix.l $ITLDIR/$dir/man/manl/cmlib.l
mkdir $ITLDIR/$dir/lib
libdirs=`cd $dir;ls -ld lib*|grep drwx|cut -c55-`
# echo $libdirs
for ldir in $libdirs ; do
ext1=''
ext2=''
if [ "$ldir" != "lib" ]; then
if [ `echo $ldir|grep -c autodbl` -eq 1 ]; then
ext2='_autodbl'
fi
if [ `echo $ldir|grep -c f90` -eq 1 ]; then
ext1='_f90'
fi
mv $ITLDIR/$dir/$ldir/libcm.a $ITLDIR/$dir/lib/libcmlib"$ext1""$ext2".a
rm -r -f $ITLDIR/$dir/$ldir
fi
done
done
echo Fixing HP-UX '...' please wait
Hdirs=`cd $ITLDIR;ls -ld HP* 2>/dev/null|grep drwx|cut -c55-`
#
# Currently there is only one HP-UX version of the library
# this will need to be modified similar to AIX loops above
# if more versions are added
#echo $Hdirs
for dir in $Hdirs ; do
mv $ITLDIR/$dir/lib/libcm.a $ITLDIR/$dir/lib/libcmlib.a
mkdir $ITLDIR/$dir/bin
mkdir -p $ITLDIR/$dir/man/manl
cp -p $SRCDIR/nist.scripts/cmdoc $ITLDIR/$dir/bin
cp -p $SRCDIR/nist.scripts/cmsrc $ITLDIR/$dir/bin
cp -p $SRCDIR/nist.scripts/cmsrcx.hpux $ITLDIR/$dir/bin/cmsrcx
cp -p $SRCDIR/nist.man/cmlib.hpux.l $ITLDIR/$dir/man/manl/cmlib.l
rm -f $ITLDIR/$dir/lib/test*
rm -r $ITLDIR/$dir/lib/src
rm -r $ITLDIR/$dir/epg
done
echo Fixing Linux '...' please wait
Ldirs=`cd $ITLDIR;ls -ld Linux* 2>/dev/null|grep drwx|cut -c55-`
#
# Currently there is only one Linux version of the library
# this will need to be modified similar to AIX loops above
# if more versions are added
#echo $Hdirs
for dir in $Ldirs ; do
mkdir $ITLDIR/$dir/bin
mkdir -p $ITLDIR/$dir/man/manl
cp -p $SRCDIR/nist.scripts/cmdoc $ITLDIR/$dir/bin
cp -p $SRCDIR/nist.scripts/cmsrc $ITLDIR/$dir/bin
cp -p $SRCDIR/nist.scripts/cmsrcx.linux $ITLDIR/$dir/bin/cmsrcx
cp -p $SRCDIR/nist.man/cmlib.linux.l $ITLDIR/$dir/man/manl/cmlib.l
rm -f $ITLDIR/$dir/lib/test*
rm -r $ITLDIR/$dir/lib/src
done
| true |
3f4471bebdbc404a835e0e3d36379433c898c4a9 | Shell | Pustelto/shell_theme | /libs/helpers.zsh | UTF-8 | 3,544 | 3.65625 | 4 | [
"MIT"
] | permissive | THEME_GIT_PROMPT_PREFIX="[\u2325 "
THEME_GIT_PROMPT_SUFFIX="]"
function git_prompt_info() {
WORKDIR_DIRTY=false
local ref=$(git symbolic-ref HEAD 2> /dev/null)
[[ -z "${ref}" ]] && return
git_branch_info
vcs_info
local vcs_prompt="${vcs_info_msg_0_}"
if [[ -n "$vcs_prompt" ]]; then
if [[ "$WORKDIR_DIRTY" == true ]]; then
# $vcs_visual_identifier gets set in +vi-vcs-detect-changes in functions/vcs.zsh,
# as we have there access to vcs_info internal hooks.
current_state='red'
else
current_state='green'
fi
echo "%B%F{$current_state}$THEME_GIT_PROMPT_PREFIX$vcs_prompt$THEME_GIT_PROMPT_SUFFIX%f%b "
fi
}
# Get additional informations from git via vcs module
function git_branch_info() {
WORKDIR_DIRTY=false
autoload -Uz vcs_info
zstyle ':vcs_info:*' enable git
zstyle ':vcs_info:*' check-for-changes true
VCS_DEFAULT_FORMAT="%b%c%m"
zstyle ':vcs_info:*' formats "$VCS_DEFAULT_FORMAT"
zstyle ':vcs_info:*' actionformats "%b | %a | %c%m"
zstyle ':vcs_info:*' stagedstr " $(echo ' ^')"
# unstages files are hinted by red color of git info, additional mark is
# unnecessary.
# zstyle ':vcs_info:*' unstagedstr " $(echo '*')"
HOOKS=(vcs-detect-changes git-aheadbehind git-untracked)
zstyle ':vcs_info:git*+set-message:*' hooks $HOOKS
}
# Get number of commits branch is ahead or behind from upstream
# Taken from https://github.com/bhilburn/powerlevel9k
function +vi-git-aheadbehind() {
local ahead behind branch_name
local -a gitstatus
branch_name=$(git symbolic-ref --short HEAD 2>/dev/null)
ahead=$(git rev-list "${branch_name}"@{upstream}..HEAD 2>/dev/null | wc -l)
(( ahead )) && gitstatus+=( " \u2191${ahead// /}" )
behind=$(git rev-list HEAD.."${branch_name}"@{upstream} 2>/dev/null | wc -l)
(( behind )) && gitstatus+=( " \u2193${behind// /}" )
hook_com[misc]+=${(j::)gitstatus}
}
function +vi-git-untracked() {
if [[ $(git rev-parse --is-inside-work-tree 2> /dev/null) == 'true' ]] && \
git status --porcelain | grep '??' &> /dev/null ; then
# This will show the marker if there are any untracked files in repo.
# If instead you want to show the marker only if there are untracked
# files in $PWD, use:
#[[ -n $(git ls-files --others --exclude-standard) ]] ; then
hook_com[staged]+=' ?'
fi
}
function +vi-vcs-detect-changes() {
if [[ -n "${hook_com[staged]}" ]] || [[ -n "${hook_com[unstaged]}" ]]; then
WORKDIR_DIRTY=true
else
WORKDIR_DIRTY=false
fi
}
# Set color of prompt to red on non-0 return code
function return_code() {
echo "%(?.%F{green}.%F{red})%B$PROMPT_SYMBOL%b%f "
}
# Displays name and version of current package.json file
function package_version() {
local ref=$(cat package.json 2> /dev/null)
[[ -z "${ref}" ]] && return
local name=$(cat package.json | grep "name" | head -1 | awk -F: '{ print $2 }' | sed 's/[ ",]//g')
local ver=$(cat package.json | grep "version" | head -1 | awk -F: '{ print $2}' | sed 's/[ ",]//g')
echo $name@$ver
}
function host() {
[[ $SSH_CONNECTION == false ]] && return
if [[ -n $SSH_CONNECTION ]]; then
echo "%F{cyan}%B%n@%m%b%f "
fi
}
function dirpath() {
echo "%F{cyan}%~%f "
}
function firebase_project_prompts() {
if [[ -e ~/.config/configstore/firebase-tools.json ]]; then
local fb_project=$(grep \"$(pwd)\" ~/.config/configstore/firebase-tools.json | cut -d" " -f2 | gsed 's/[",]//g')
if [[ -n $fb_project ]]; then
echo "%B%F{yellow}$fb_project%f%b "
fi
fi
}
| true |
672283bb3827d40b9adf17a8ca640288ef48927e | Shell | andytanoko/4.2.x_integration | /GTAS/GridTalk/rollout/application/bin/unix/postgres/pingpostgres.sh | UTF-8 | 180 | 2.65625 | 3 | [] | no_license | # this script ping the postgres server until it is alive
DBSTATE=1
until [ $DBSTATE -eq 0 ]; do
$GRIDTALK_HOME/pgsql/bin/pg_ctl status -D $GRIDTALK_HOME/db/
DBSTATE=$?
done
| true |
831904dd6437f51ae94abd4d30907e8f5e5a3f8b | Shell | sagarnikam123/learnNPractice | /others/language/nodejs/howTo.sh | UTF-8 | 1,591 | 3.1875 | 3 | [
"MIT"
] | permissive | # packaing nodejs app inside docker container
# create packge.json
cd app
npm install
docker build -t sagarnikam123/node-web-app .
docker run -p 49160:8080 -d sagarnikam123/node-web-app
# saving or exporting to tar
# save will fetch an image
# export will fetch the whole container
docker save sagarnikam123/node-web-app --output node-web-app.tar
# docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy
#docker export sagarnikam123/node-web-app --output="node-web-appExported.tar"
#docker export red_panda > node-web-app.tar
# importing or loading from .tar image
# docker import creates one image from one tarball which is not even an image (just a filesystem you want to import as an image)
# docker load creates potentially multiple images from a tarred repository (since docker save can save multiple images in a tarball).
#docker load < busybox.tar.gz
docker load --input node-web-app.tar
# Save docker image to local in tar
pipeline {
environment {
registry = "sagarnikam123/node-web-app"
dockerImage = ''
outputImageTarNameWithExt = "node-web-app.tar"
}
agent any
stages {
stage('changeDir + Building image') {
steps{
script {
dir('/home/quanta/Documents/git/learnNPractice/others/language/nodejs/app') {
dockerImage = docker.build registry + ":$BUILD_NUMBER"
}
}
}
}
stage('Save to .tar file') {
steps{
script {
sh 'docker save ' + dockerImage.imageName() + ' --output $HOME/' + outputImageTarNameWithExt
}
}
}
}
}
| true |
8906a93adc5116376a4449233c4a21f448ff7cfc | Shell | 1312543912/bash-shell | /5.sh | UTF-8 | 575 | 3.703125 | 4 | [] | no_license | #! /bin/bash
# echo命令
# 1.普通字符输出
echo "it is a sample test"
# 2.转义字符输出加\
echo "\"it sis a sample test \""
# 3.显示变量 read:命令从标准输入中读取一行,并把输入行的每个字段值指定给shell变量
read name
echo "$name it is test"
# 4.显示换行 \n:换行 -e:开启转义
echo -e "ok! \n"
echo "it is a test"
# 5.显示转换不换行 \c:不换行 -e:开启转义
echo -e "ok ! \c"
echo "it is a test"
# 6.指定输入文件 echo xxx > xxx
echo "it is a test" > file
# 7.显示命令执行结果
echo `date`
| true |
4ee762995e662586dd9b4267d4a0750e6c8b3fab | Shell | M-Francia/pyCA-record-script | /recordscript.sh | UTF-8 | 1,696 | 3.71875 | 4 | [] | no_license | #!/bin/bash
######
# pyca-script
#
# This bash script demonstrates recording multiple sources for audio & video
# with a single command. It is intended to be used with the pyca capture
# agent by Lars Kiesow. Based in a script of Jan Koppe <jan.koppe@wwu.de>
#
# @author Mario Francia Rius <mariofranciarius@gmail.com>
# @date 2017-06-09
#
######
######
#
#
# where...
# directory: where to put the recordings, relative to CWD
# name: basename for the files
# time: length of the recording in seconds
#
######
# ARGS
DIRECTORY=$1
NAME=$2
TIME=$3
# INTERNAL
pids="" # pids for capture processes, used to check exit codes
delay_option=""
######
# Capture webcam 1080
ffmpeg -nostats -re -f v4l2 -r 25 -i /dev/video0 -s 1920x1080 -t $TIME \
"$DIRECTORY/$NAME presenter.mp4" \
& # run in background
pids+=" $!" # save pid
#
######
######
# Capture desktop debian
ffmpeg -f x11grab -r 25 -t $TIME \
-s 1280x720 -i :0.0+0,24 -vcodec libx264 "$DIRECTORY/$NAME presentation.mp4" \
& # run in background
pids+=" $!" # save pid
#
######
######
# Capture audio micro
ffmpeg -f alsa -i hw:0 -t $TIME \
"$DIRECTORY/$NAME presenter.mp3" \
& # run in background
pids+=" $!" # save pid
######
######
# Streaming and video mp4 output
timeout $TIME ffmpeg -nostats -f alsa -i hw:0 -f v4l2 -i /dev/video1 -s 1920x1080 -ar 11025 -f flv -r 30 "rtmpURL" $DIRECTORY/$NAME-streaming.mp4 \
& # run in background
pids+=" $!" # save pid
#
######
# Wait for recordings to finish (running as background processes), then finish
for p in $pids; do
if wait $p; then
echo "$p successful"
else
exit $?
fi
done
| true |
7af2dda8b713b76a8e3b4643d60885cd9823c718 | Shell | JLO79/repo | /guac_install.sh | UTF-8 | 1,041 | 3.25 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
# Check if user is root or sudo
if ! [ $(id -u) = 0 ]; then echo "Please run this script as sudo or root"; exit 1 ; fi
apt-get install guacamole
apt-get install tomcat8 -y
apt-get install libguac-client-ssh0 libguac-client-rdp0 libguac-client-vnc0
wget http://sourceforge.net/projects/guacamole/files/current/binary/guacamole-*.war
mv guacamole-0.9.14.war guacamole.war
sudo cp guacamole.war /var/lib/tomcat8/webapps
sudo mkdir /etc/guacamole
sudo mkdir /usr/share/tomcat8/.guacamole
cat > /etc/guacamole/guacamole.properties << EOF
guacd-hostname: localhost
guacd-port: 4822
user-mapping: /etc/guacamole/user-mapping.xml
auth-provider: net.sourceforge.guacamole.net.basic.BasicFileAuthenticationProvider
basic-user-mapping: /etc/guacamole/user-mapping.xml
EOF
ln -s /etc/guacamole/guacamole.properties /usr/share/tomcat8/.guacamole/
echo "" > /etc/guacamole/user-mapping.xml
chmod 600 /etc/guacamole/user-mapping.xml
chown tomcat8:tomcat8 /etc/guacamole/user-mapping.xml
/etc/init.d/guacd restart
/etc/init.d/tomcat8 restart
| true |
98011e7a8e3eedddb2f4221af10bcc6280c2bd56 | Shell | lovey89/guides | /in_progress/haskell/toggleedit | UTF-8 | 367 | 2.96875 | 3 | [] | no_license | #!/bin/bash
grep -q '#+BEGIN_SRC python :python runghc :results output' tutorial.org
if [ $? = 0 ]; then
sed -i.bak 's/#+BEGIN_SRC python :python runghc :results output/#+BEGIN_SRC haskell/' tutorial.org
echo "Ready for commit"
else
sed -i.bak 's/#+BEGIN_SRC haskell/#+BEGIN_SRC python :python runghc :results output/' tutorial.org
echo "Ready for edit"
fi
| true |
96dfb4d158297e063f1e81a1438e6983faf40d63 | Shell | cichlidx/ronco_et_al | /variant_calling/src/run_gatk1.sh | UTF-8 | 802 | 2.828125 | 3 | [] | no_license | # m_matschiner Wed Apr 12 00:44:13 CEST 2017
# Make the output directory if it does not exist yet.
mkdir -p ../res/gatk
mkdir -p ../log/gatk
# Get the command line arguments.
specimen_id=$1
chromosome_id=$2
# Get the name of the bam file.
bam_with_relative_path="../res/mapping/${specimen_id}.merged.sorted.dedup.realn.bam"
# Start script run_gatk1.slurm.
bam=`basename ${bam_with_relative_path}`
truncated_bam=${bam%.merged.sorted.dedup.realn.bam}
out="../log/gatk/run_gatk1.${truncated_bam}.${chromosome_id}.out"
gvcf="../res/gatk/${truncated_bam}.${chromosome_id}.g.vcf.gz"
log="../log/gatk/run_gatk1.${truncated_bam}.${chromosome_id}.log"
if [ ! -f ${gvcf} ]
then
sbatch -o ${out} run_gatk1.slurm ../data/reference/orenil2.fasta ${bam_with_relative_path} ${gvcf} ${log} ${chromosome_id}
fi
| true |
9b5ce34975b87989e9f4bbd6ca70879a578bed5d | Shell | krosado/HWRF_LPI_TM | /HWRF_2015_real/wrappers/launcher_wrapper | UTF-8 | 1,449 | 3.140625 | 3 | [] | no_license | #!/bin/sh
######################################################################
# launcher wrapper script
######################################################################
######################################################################
# #
# Author: DTC July 2014 #
######################################################################
######################################################################
# Definitions
######################################################################
#---------------------------------------------------------------------
# Global definitions of environment variables
#---------------------------------------------------------------------
set -x
. ./global_vars.sh
if [ -z "$PYTHONPATH" ] ; then
export PYTHONPATH=${USHhwrf}
else
export PYTHONPATH=${PYTHONPATH}:${USHhwrf}
fi
YMDH=$START_TIME
STID=$SID
CASE_ROOT=$CASE
######################################################################
# Main
######################################################################
export TOTAL_TASKS='1'
${HOMEhwrf}/scripts/exhwrf_launch.py "$YMDH" "$STID" "$CASE_ROOT" "$HOMEhwrf/parm" \
"config.EXPT=${EXPT}" "config.startfile=${startfile}" \
"config.HOMEhwrf=$HOMEhwrf" "config.case_root=$CASE_ROOT" \
"$HOMEhwrf/parm/hwrf_v3.7release.conf" \
"$@"
| true |
769a93b547d139a8d15bc72282d347469961f00c | Shell | petronny/aur3-mirror | /gcaldaemon/PKGBUILD | UTF-8 | 1,311 | 2.765625 | 3 | [] | no_license | # Contributor: Nicolas Bigaouette <nbigaouette@gmail.com>
pkgname=gcaldaemon
pkgver=1.0_beta16
pkgrel=2
pkgdesc="OS-independent Java program that offers two-way synchronization between Google Calendar and various iCalendar compatible calendar applications."
url="http://gcaldaemon.sourceforge.net/"
depends=('java-runtime')
source=("http://downloads.sourceforge.net/sourceforge/${pkgname}/${pkgname}-linux-${pkgver/_/-}.zip" "${pkgname}.rcd")
install="${pkgname}.install"
arch=('i686' 'x86_64')
license=('GPL')
backup=('opt/gcaldaemon/conf/gcal-daemon.cfg')
md5sums=('da5ef2fe0e8bb2a8cf47aeecb4c5a3bc'
'f17e580811412bf13d0f2d5c9f26c7c0')
build() {
_pkgname=GCALDaemon
_installdir=/opt/${pkgname}
mkdir -p ${startdir}/pkg/{opt,etc/rc.d} || return 1
mv ${startdir}/src/${_pkgname} ${startdir}/pkg${_installdir} || return 1
rm -fr ${startdir}/pkg/opt/${pkgname}/dev || return 1
cd ${startdir}/pkg${_installdir}/bin
chmod +x *.sh
sed -i "s|^GCALDIR=.*$|GCALDIR=${_installdir}|g" ${startdir}/pkg${_installdir}/bin/password-encoder.sh
sed -i "s|^GCALDIR=.*$|GCALDIR=${_installdir}|g" ${startdir}/pkg${_installdir}/bin/standalone-start.sh
sed -i "s|^GCALDIR=.*$|GCALDIR=${_installdir}|g" ${startdir}/pkg${_installdir}/bin/sync-now.sh
install -m 755 ${startdir}/src/${pkgname}.rcd ${startdir}/pkg/etc/rc.d/${pkgname}
}
| true |
1332730d8b7b09020892a639b95f4b9156236816 | Shell | s-kostyaev/archzfs-apparmor | /install.sh | UTF-8 | 2,427 | 3.421875 | 3 | [] | no_license | #!/bin/sh
CURDIR=`pwd`
echo $CURDIR
if [ $# -eq 0 ]
then
echo "Usage:"
echo " install.sh [WORKDIR]"
exit 1
fi
if [ -d $1 ]
then
cd $1
else
mkdir -p $1
cd $1
fi
echo "Updating linux package..."
pacman -Syy
pacman -S linux --noconfirm
echo "done"
echo "Clonning kernel repo..."
su builder -c 'git clone https://github.com/seletskiy/arch-apparmor.git'
echo "done"
cd arch-apparmor/linux-apparmor
echo "Creating kernel package..."
su builder -c 'makepkg --skipinteg -s'
echo "done"
echo "Installing kernel..."
pacman -Ud linux-apparmor-*-x86_64.pkg.tar.xz --noconfirm
pacman -Ud linux-apparmor-headers-*-x86_64.pkg.tar.xz --noconfirm
echo "done"
cd ../..
echo "Clonning zfs repo..."
git clone https://github.com/archzfs/archzfs.git --recursive
echo "done"
echo "Patching archzfs for apparmor..."
cp $CURDIR/apparmor.patch archzfs/
cp $CURDIR/edit-for-apparmor.sh archzfs/
cp $CURDIR/change-kernel-deps-ver.sh archzfs/
chown -R builder archzfs
cd archzfs/
echo "Updating versions..."
NEW_VER=`pacman -Q linux | cut -d' ' -f2 | cut -f1 -d- `
NEW_REL=`pacman -Q linux | cut -d' ' -f2 | cut -f2 -d- `
sed -i 's/^AZB_GIT_KERNEL_VERSION=.*$/AZB_GIT_KERNEL_VERSION="'$NEW_VER'"/' conf.sh
sed -i 's/^AZB_GIT_KERNEL_X32_PKGREL=.*$/AZB_GIT_KERNEL_x32_PKGREL="'$NEW_REL'"/' conf.sh
sed -i 's/^AZB_GIT_KERNEL_X64_PKGREL=.*$/AZB_GIT_KERNEL_x64_PKGREL="'$NEW_REL'"/' conf.sh
sed -i 's/^AZB_BUILD=0/AZB_BUILD=1/' build.sh
./build.sh git update
echo "done"
patch -Np1 -i apparmor.patch
./edit-for-apparmor.sh
./change-kernel-deps-ver.sh
echo "done"
cd spl-utils-git
echo "Creating spl-utils-git package..."
su builder -c 'makepkg -s'
echo "done"
echo "Installing zfs-utils-git package..."
pacman -U spl-utils-git-*x86_64.pkg.tar.xz --noconfirm
echo "done"
cd ../spl-git
echo "Creating spl-git package..."
su builder -c 'makepkg -s'
echo "done"
echo "Installing spl-git..."
pacman -U spl-git-*x86_64.pkg.tar.xz --noconfirm
echo "done"
cd ../zfs-utils-git
echo "Creating zfs-utils-git package..."
su builder -c 'makepkg -s'
echo "done"
echo "Installing zfs-utils-git package..."
pacman -U zfs-utils-git-*x86_64.pkg.tar.xz --noconfirm
echo "done"
cd ../zfs-git
echo "Creating zfs-git package..."
su builder -c 'makepkg -s'
echo "done"
echo "Installing zfs-git..."
pacman -U zfs-git-*x86_64.pkg.tar.xz --noconfirm
echo "done"
cd ../..
echo "Collecting packages"
mkdir $CURDIR/pkg
cp `find -iname "*pkg.tar.xz"` -t $CURDIR/pkg
echo "done"
| true |
23ede4d1c2db7d54de4d2a44589293f8f4aec7c1 | Shell | FauxFaux/debian-control | /c/crispy-doom/crispy-doom_5.4-3_amd64/postinst | UTF-8 | 265 | 2.625 | 3 | [] | no_license | #!/bin/sh
set -e
if [ "$1" = "abort-upgrade" ] || [ "$1" = "configure" ]
then
update-alternatives \
--install /usr/games/doom doom /usr/games/crispy-doom 50 \
--slave /usr/share/man/man6/doom.6.gz doom.6.gz /usr/share/man/man6/crispy-doom.6.gz
fi
exit 0
| true |
cf703a093a212cba76c6a3f2f0c8b8133aad8a16 | Shell | Galunid/dotfiles-1 | /scripts/rofi/power-menu | UTF-8 | 421 | 3.734375 | 4 | [] | no_license | #! /bin/sh
if [ "$1" = "--help" ] || [ "$1" = "-h" ] || [ "$1" = "help" ]; then
printf "$ power-menu
Issue a power command
"; exit
fi
command -v dmenu >/dev/null || { echo "dmenu is not installed"; exit 1; }
case "$(printf "reboot\nsuspend\nshutdown/poweroff\nlock" | dmenu -lines 4 -p power -width 20)" in
reboot) reboot;;
suspend) systemctl suspend;;
"shutdown/poweroff") poweroff;;
lock) blurlock;;
esac
| true |
65b86286d3dbad491b590767d3ac87d490e2ba23 | Shell | Vinotha16/WIN_ROLLBACK | /templates/linux_actualfacts/centos6/sshprotocol_534_actual.fact | UTF-8 | 498 | 3 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
cmd=$(sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" | grep '^\s*protocol')
if [ $( sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" | grep '^\s*protocol' | wc -l) -eq 0 ] || [ $(sudo grep -Ei '^\s*Protocol\s+[^2].*$' /etc/ssh/sshd_config | wc -l) -ne 0 ]; then
echo "{ \"sshprotocol_534_actual\" : \"\" }"
else
echo "{ \"sshprotocol_534_actual\" : \"${cmd}\" }"
fi
| true |
f4b3b75183d5d47129ef6c6f946a8026565cb771 | Shell | LEAF-BoiseState/WRF_HYDRO-R2 | /run_scripts/create_hydro_namelist.sh | UTF-8 | 10,050 | 4.03125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# *****************************************************************************
# FILE:
# AUTHOR: Matt Masarik (MM)
# VERSION: 0 2019-07-07 MM Base version
#
# PURPOSE: create hydro.namelist with routing parameters, etc.
#
# USAGE: ./create_hydro_namelist.sh <hydro_namelist> [<0 ... 5>]
#
# NOTES:
# Routing options
# (0) LSM - NOAHMP LAND SURFACE
# (1) SUB - SUBSURFACE
# (2) OVR - OVERLAND
# (3) CHL - CHANNEL
# (4) RES - LAKE / RESERVOIR
# (5) GWB - GROUNDWATER / BASEFLOW
#
# * Terrain routing = Overland flow routing, and/or Subsurface flow routing
# *****************************************************************************
# PARAMETERS - DEFAULT / USER
DX_LSM_METERS=1000 # Land Surface Model grid spacing [meters]
DX_ROUTING_TERRAIN_METERS=250 # Terrain Routing Model grid spacing [meters]
DT_ROUTING_TERRAIN_SECONDS=10 # Terrain Routing Model timestep [seconds]
DT_ROUTING_CHANNEL_SECONDS=10 # Channel Routing Model timestep [seconds]
# Input
routing_options=""
routing_options_flag="false"
num_routing_options=0
hydro_namelist_path=""
if [[ "$#" -lt 1 || "$#" -gt 7 ]]; then
prog=$(basename $0)
echo -e "\n\tUSAGE: $prog <hydro_namelist> [<0 ... 5>]\n"
exit 1
else
hydro_namelist_path="$1"
shift
routing_options="$*"
num_routing_options=$#
routing_options_flag="true"
fi
# Set flag / display routing components from user input
SUB_FLAG=0 # 1 SUB: Subsurface
OVR_FLAG=0 # 2 OVR: Overland
CHL_FLAG=0 # 3 CHL: Channel
RES_FLAG=0 # 4 RES: Lakes / Reservoirs
GWB_FLAG=0 # 5 GWB: Groundwater / Base flow
echo -e "\n\tROUTING OPTIONS"
echo -e "\t---------------"
echo -e "\t 0: LSM - NoahMP Land Surface Model [selected by default]"
if [ "$routing_options_flag" == "true" ]; then
for ro in $routing_options
do
if [ $ro -eq 0 ]; then
: # null op, 0 is default selected
elif [ $ro -eq 1 ]; then
SUB_FLAG=1
echo -e "\t 1: SUB - Subsurface Flow Routing"
elif [ $ro -eq 2 ]; then
OVR_FLAG=1
echo -e "\t 2: OVR - Overland Flow Routing"
elif [ $ro -eq 3 ]; then
CHL_FLAG=1
echo -e "\t 3: CHL - Channel Routing"
elif [ $ro -eq 4 ]; then
RES_FLAG=1
echo -e "\t 4: RES - Lakes/Reservoir Routing"
elif [ $ro -eq 5 ]; then
GWB_FLAG=1
echo -e "\t 5: GWB - Groundwater/baseflow Routing"
else
echo -e "\t ** ${ro}: BAD VALUE - Valid opts: 0-5. **"
fi
done
echo -e "\n"
fi
# ----------------------------------------------------------------------------- *
# GENERAL SIMULATION PARAMETERS
# ----------------------------------------------------------------------------- *
# Specify the restart file write frequency...(minutes)
# A value of -99999 will output restarts on the first day of the month only.
rst_dt=-99999
# ----------------------------------------------------------------------------- *
# (0) LSM - NOAHMP LAND SURFACE
# ----------------------------------------------------------------------------- *
# Netcdf grid of variables passed between LSM and routing components (2d)
# (0 = no output, 1 = output)
# NOTE: No scale_factor/add_offset available
LSMOUT_DOMAIN=1
# ----------------------------------------------------------------------------- *
# (0.5) TERRAIN - SUBSURFACE / OVERLAND
# ----------------------------------------------------------------------------- *
lsm_rst_type=0
terrain_routing_grid_output=0
if [[ $SUB_FLAG -eq 1 ]] || [[ $OVR_FLAG -eq 1 ]]; then
lsm_rst_type=1
terrain_routing_grid_output=1
fi
# Reset the LSM soil states from the high-res routing restart file
# (1=overwrite, 0=no overwrite)
# NOTE: Only turn this option on if overland or subsurface rotuing is active!
rst_typ=$lsm_rst_type
# Netcdf grid of terrain routing variables on routing grid (2d):
# (0 = no output, 1 = output)
RTOUT_DOMAIN=$terrain_routing_grid_output
# Specify the terrain routing model timestep...(seconds)
DTRT_TER=$DT_ROUTING_TERRAIN_SECONDS
# Specify the grid spacing of the terrain routing grid...(meters)
DXRT=$DX_ROUTING_TERRAIN_METERS
# Created parameter, grid spacing of the LSM grid (meters), file: geo_em.d0x.nc
# NOTE: this is along with DXRT to calculate AGGFACTRT
DXLSM=$DX_LSM_METERS
# Integer multiple between land model grid and terrain routing grid...(integer)
AGGFACTRT=$(( DXLSM / DXRT ))
# ----------------------------------------------------------------------------- *
# (1) SUB - SUBSURFACE
# ----------------------------------------------------------------------------- *
subrtswcrt_val=0
if [ $SUB_FLAG -eq 1 ]; then
subrtswcrt_val=1
fi
# Switch to activate subsurface routing:
# (0=no, 1=yes)
SUBRTSWCRT=$subrtswcrt_val
# ----------------------------------------------------------------------------- *
# (2) OVR - OVERLAND
# ----------------------------------------------------------------------------- *
ovrtswcrt_val=0
if [ $OVR_FLAG -eq 1 ]; then
ovrtswcrt_val=1
fi
# Switch to activate surface overland flow routing:
# (0=no, 1=yes)
OVRTSWCRT=$ovrtswcrt_val
# Specify overland flow routing option:
# (1=Steepest Descent - D8, 2=CASC2D - not active)
# NOTE: Currently subsurface flow is only steepest descent
rt_option=1
# ----------------------------------------------------------------------------- *
# (3) CHL - CHANNEL
# ----------------------------------------------------------------------------- *
chanrtswcrt_val=0
chrtout_domain_val=0
chanobs_domain_val=0
frxst_pts_out_val=0
if [ $CHL_FLAG -eq 1 ]; then
chanrtswcrt_val=1
chrtout_domain_val=1
chanobs_domain_val=1
frxst_pts_out_val=1
fi
# Switch to activate channel routing:
# (0=no, 1=yes)
CHANRTSWCRT=$chanrtswcrt_val
# Specify channel routing option:
# (1=Muskingam-reach, 2=Musk.-Cunge-reach, 3=Diff.Wave-gridded)
channel_option=3
# Specify the channel routing model timestep...(seconds)
DTRT_CH=$DT_ROUTING_CHANNEL_SECONDS
# Netcdf point timeseries output at all channel points (1d):
# (0 = no output, 1 = output)
CHRTOUT_DOMAIN=$chrtout_domain_val
# Netcdf point timeseries at forecast points or gage points (defined in Routelink):
# (0 = no output, 1 = output at forecast points or gage points.)
CHANOBS_DOMAIN=$chanobs_domain_val
# Netcdf grid of channel streamflow values (2d):
# (0 = no output, 1 = output)
# NOTE: Not available with reach-based routing
chrtout_grid_val=0
if [[ $CHL_FLAG -eq 1 ]] && [[ $channel_option -eq 3 ]]; then
chrtout_grid_val=1
fi
CHRTOUT_GRID=$chrtout_grid_val
# ASCII text file of forecast points or gage points (defined in Routelink):
# (0 = no output, 1 = output)
frxst_pts_out=$frxst_pts_out_val
# ----------------------------------------------------------------------------- *
# (4) RES - LAKE / RESERVOIR
# ----------------------------------------------------------------------------- *
outlake_val=0
if [ $RES_FLAG -eq 1 ]; then
outlake_val=1
fi
# Netcdf grid of lake values (1d):
# (0 = no output, 1 = output)
outlake=$outlake_val
# ----------------------------------------------------------------------------- *
# (5) GWB - GROUNDWATER / BASEFLOW
# ----------------------------------------------------------------------------- *
gwbaseswcrt_val=0
output_gw_val=0
if [ $GWB_FLAG -eq 1 ]; then
gwbaseswcrt_val=1 # [1, 2] default=1 (exp. bucket)
output_gw_val=1
fi
gw_restart_val=0 # [0, 1] default=0 (cold start from tbl)
# Switch to activate baseflow bucket model:
# (0=none, 1=exp. bucket, 2=pass-through)
GWBASESWCRT=$gwbaseswcrt_val # default=1, exp bucket, when gwb on
# Specify baseflow/bucket model initialization:
# (0=cold start from table, 1=restart file)
GW_RESTART=$gw_restart_val # default=0, cold start from tbl, when gwb on
# Netcdf GW output:
# (0 = no output, 1 = output)
output_gw=$output_gw_val
# ----------------------------------------------------------------------------- *
# MAIN - CREATE HYDRO.NAMELIST
# ----------------------------------------------------------------------------- *
sed -i'' "s/rstdt/$rst_dt/g" $hydro_namelist_path
sed -i'' "s/lsmoutdomain/$LSMOUT_DOMAIN/g" $hydro_namelist_path
sed -i'' "s/lsmrsttype/$rst_typ/g" $hydro_namelist_path
sed -i'' "s/chrtoutdomain/$CHRTOUT_DOMAIN/g" $hydro_namelist_path # this one, must precede
sed -i'' "s/rtoutdomain/$RTOUT_DOMAIN/g" $hydro_namelist_path # this one.
sed -i'' "s/dtrtter/$DTRT_TER/g" $hydro_namelist_path
sed -i'' "s/dxrt/$DXRT/g" $hydro_namelist_path
sed -i'' "s/aggfactrt/$AGGFACTRT/g" $hydro_namelist_path
sed -i'' "s/subrtswcrt/$SUBRTSWCRT/g" $hydro_namelist_path
sed -i'' "s/ovrtswcrt/$OVRTSWCRT/g" $hydro_namelist_path
sed -i'' "s/rtoption/$rt_option/g" $hydro_namelist_path
sed -i'' "s/chanrtswcrt/$CHANRTSWCRT/g" $hydro_namelist_path
sed -i'' "s/channeloption/$channel_option/g" $hydro_namelist_path
sed -i'' "s/dtrtch/$DTRT_CH/g" $hydro_namelist_path
sed -i'' "s/chanobsdomain/$CHANOBS_DOMAIN/g" $hydro_namelist_path
sed -i'' "s/chrtoutgrid/$CHRTOUT_GRID/g" $hydro_namelist_path
sed -i'' "s/frxstptsout/$frxst_pts_out/g" $hydro_namelist_path
sed -i'' "s/outlakeval/$outlake/g" $hydro_namelist_path
sed -i'' "s/gwbaseswcrt/$GWBASESWCRT/g" $hydro_namelist_path
sed -i'' "s/gwrestart/$GW_RESTART/g" $hydro_namelist_path
sed -i'' "s/outputgw/$output_gw/g" $hydro_namelist_path
echo -e "\thydro.namelist path: $hydro_namelist_path\n"
exit
| true |
9a59cf9cdd0f11c81842403edd8c560f18acf373 | Shell | brianisme/updater-hubot | /scripts/airplay.sh | UTF-8 | 485 | 2.734375 | 3 | [] | no_license | #!/bin/sh
# choosespeak='select (row 1 of table 1 of scroll area 1 of tab group 1 of window "Sound" whose value of text field 1 is "'$1'")'
# echo $choosespeak
# osascript -e 'tell application "System Preferences"' -e 'reveal anchor "output" of pane id "com.apple.preference.sound"' -e 'activate' -e 'tell application "System Events"' -e 'tell process "System Preferences"' -e $choosespeak -e 'end tell' -e 'end tell' -e 'quit' -e 'end tell'
osascript scripts/choose_speaker.scpt "$1"
| true |
1ac43ca343d34e4bc78e2899aa4511a739348fd6 | Shell | fusionpbx/fusionpbx-install.sh | /freebsd/install.sh | UTF-8 | 1,276 | 3.09375 | 3 | [] | no_license | #!/bin/sh
#move to script directory so all relative paths work
cd "$(dirname "$0")"
#includes
. ./resources/config.sh
. ./resources/colors.sh
. ./resources/environment.sh
#Update to latest packages
verbose "Update installed packages"
pkg upgrade --yes
#Update the ports
if [ .$portsnap_enabled = .'true' ]; then
if [ -f /usr/ports/UPDATING ]; then
portsnap fetch && portsnap update
echo "/usr/ports updated"
else
portsnap fetch extract
echo "/usr/ports added"
fi
fi
#PF - Packet Filter
if [ .$firewall_enabled = .'true' ]; then
resources/pf.sh
fi
#sngrep
if [ .$sngrep_enabled = .'true' ]; then
resources/sngrep.sh
fi
#FusionPBX
if [ .$nginx_enabled = .'true' ]; then
resources/fusionpbx.sh
fi
#NGINX web server
if [ .$nginx_enabled = .'true' ]; then
resources/nginx.sh
fi
#FreeSWITCH
if [ .$switch_enabled = .'true' ]; then
resources/switch.sh
fi
#Postgres
if [ .$database_enabled = .'true' ]; then
resources/postgresql.sh
fi
#restart services
if [ .$nginx_enabled = .'true' ]; then
service php-fpm restart
service nginx restart
fi
if [ .$fail2ban_enabled = .'true' ]; then
service fail2ban restart
fi
#Fail2ban
if [ .$fail2ban_enabled = .'true' ]; then
resources/fail2ban.sh
fi
#add the database schema, user and groups
resources/finish.sh
| true |
cb5650eb736c0d912291e44bad12f6d1a6a15243 | Shell | hengqujushi/dotfiles | /shell/shellrc.d/applications.sh | UTF-8 | 2,218 | 3.3125 | 3 | [] | no_license | #------------------------------
# Program shortcuts
#------------------------------
# NOTE1: &! detaches and disowns process. The shell no longer keeps track of it.
# NOTE2: rifle always expects arguments
# NOTE3: rifle either completely detaches or runs a terminal program in-place.
# NOTE4: The following commands are aware of the type of terminal they are on. For
# example fm launches GTK file manager on any terminal except tty's or remote logins
# file openers
op() {
xdg-open "$@" &> /dev/null &!
}
rf() {
rifle "$@"
}
# terminal editor
vi() {
eval "local CMD=($EDITOR)"
${CMD[@]} "$@";
}
# emacs
ee() {
eval "local CMD=($EMACS)"
if [ "$1" ]; then sdrun ${CMD[@]} "$@"
else sdrun ${CMD[@]} "$PWD"
fi
}
# emacs project
ep() {
eval "local CMD=($EMACS)"
if [ "$1" ]; then sdrun ${CMD[@]} -e "(ab2/find-file-in-project \"$1\")"
else sdrun ${CMD[@]} -e "(ab2/find-file-in-project \"$PWD\")"
fi
}
# open magit
mg() {
eval "local CMD=($EMACS)"
if [ "$1" ]; then sdrun ${CMD[@]} -e "(abdo-vcs-main \"$1\")"
else sdrun ${CMD[@]} -e "(abdo-vcs-main \"$PWD\")"
fi
}
# open tmux session
tx() {
if [ "$TMUX" ]; then tmux new-window
else tmux_session default
fi
}
# detach from tmux
dt() {
if [ "$TMUX" ]; then tmux detach-client
fi
}
# if inside tmux close window and detach, otherwise just exit
cl() {
if [ "$TMUX" ]; then tmux unlink-window -k\; detach-client
else exit 0
fi
}
# new terminal
tm() {
eval "local CMD=($TERMCMD)"
if [ "$1" ]; then sdrun ${CMD[@]} -d "$1"
else sdrun ${CMD[@]} -d "$PWD"
fi
}
# ranger session
rg() {
eval "local CMD=($TERMCMD)"
if [ "$1" ]; then sdrun ${CMD[@]} -e ranger -d "$1"
else sdrun ${CMD[@]} -e ranger -d "$PWD"
fi
}
# vifm as an awesome dropdown
fm() {
echo "ddshow('app:vifm-dropdown', true)" | awesome-client
if [ "$1" ]; then local dir=$(realpath "$1")
else local dir=$(realpath "$PWD")
fi
# TODO: remove this once vifm is socket-activated
sleep 0.5
vifm --remote -c "cd '$dir'"
}
| true |
a642b1cd1b6b3ee11766619b45b2f654b059b605 | Shell | dipanjanS/fabric8-analytics-deployment | /openshift/env-template.sh | UTF-8 | 2,981 | 2.6875 | 3 | [
"Apache-2.0"
] | permissive | # This is a deployment configuration template with default values used to configure dev deployment of fabric8-analytics.
# It is recommended to copy this file and then modify it:
# $ cp env-template.sh env.sh
# (Required) Dev cluster
# Your dev cluster username
export OC_USERNAME='not-set'
# Your dev cluster password
export OC_PASSWD='not-set'
# Export dev cluster token
export OC_TOKEN='not-set'
# (Required) AWS credentials
export AWS_ACCESS_KEY_ID='not-set'
export AWS_SECRET_ACCESS_KEY='not-set'
# PostgreSQL/RDS password to be used
# You can generate a good password with `pwgen`:
# $ pwgen -1cs 32
export RDS_PASSWORD='not-set'
# (Required) Your OpenShift.io API token. You can find it on your profile page when you log in to https://openshift.io.
export RECOMMENDER_API_TOKEN='not-set'
# (Required) GitHub
# Comma-separated list of tokens for talking to GitHub API. Having just single token here is enough.
# You can generate a token here: https://github.com/settings/tokens
export GITHUB_API_TOKENS='not-set'
# (Required) Get your Libraries.io API token here: https://libraries.io/account
export LIBRARIES_IO_TOKEN='not-set'
# Following section describes how to setup authentication for the jobs service. Feel free to skip it, if you don't need the service.
#
# Create a new GitHub OAuth App here: https://github.com/settings/developers
# You will need to provide homepage and callback URL; for the dev cluster, use following values (replace OC_USERNAME):
# "Homepage URL" is "http://bayesian-jobs-${OC_USERNAME}-fabric8-analytics.dev.rdu2c.fabric8.io/"
# "Authorization callback URL" is "http://bayesian-jobs-${OC_USERNAME}-fabric8-analytics.dev.rdu2c.fabric8.io/api/v1/authorized"
# In return, you'll get GITHUB_OAUTH_CONSUMER_KEY and GITHUB_OAUTH_CONSUMER_SECRET from GitHub.
# Client ID is GITHUB_OAUTH_CONSUMER_KEY
# Client Secret is GITHUB_OAUTH_CONSUMER_SECRET
export GITHUB_OAUTH_CONSUMER_KEY='not-set'
export GITHUB_OAUTH_CONSUMER_SECRET='not-set'
# ----------------------------------------------------------------------------------
# Non-essential configuration options follow. You likely don't need to touch these.
# Deployment prefix
export DEPLOYMENT_PREFIX=${DEPLOYMENT_PREFIX:-${OC_USERNAME}}
# Keycloak
export KEYCLOAK_URL='https://sso.openshift.io'
# Flask
export FLASK_APP_SECRET_KEY='notsosecret'
# Dev cluster
export OC_URI='devtools-dev.ext.devshift.net:8443'
export OC_PROJECT="${OC_USERNAME}-fabric8-analytics"
# AWS
export AWS_DEFAULT_REGION='us-east-1'
## RDS configuration variables are use to provision RDS instance
export RDS_ENDPOINT=''
export RDS_INSTANCE_NAME="${OC_USERNAME}-bayesiandb"
export RDS_INSTANCE_CLASS='db.t2.micro'
export RDS_DBNAME='postgres'
export RDS_DBADMIN='coreapi'
export RDS_STORAGE=5
export RDS_SUBNET_GROUP_NAME='dv peering az'
export RDS_ARN='not-set'
# URLs against which to run E2E tests
export F8A_API_URL='not-set'
export F8A_JOB_API_URL='not-set'
# Sentry URL
export SENTRY_DSN=''
| true |
7f1e3707ab3aefd0657a6135b9873fee9fa576df | Shell | capsl0cker/myapplet | /live-migration/disk_img.sh | UTF-8 | 571 | 3.65625 | 4 | [] | no_license | #!/bin/bash
#set -x
setup(){
sudo losetup /dev/loop0 $1
sudo kpartx -av /dev/loop0
sudo pvscan
sudo lvscan
sudo lvchange -ay VolGroup00
sudo lvscan
}
remove(){
sudo lvchange -an VolGroup00
sudo kpartx -d /dev/loop0
sudo losetup -d /dev/loop0
}
usage(){
echo -e "usage:"
echo -e "$0 -i image_name: install image"
echo -e "$0 -r :remove image"
exit 0
}
main(){
case $1 in
-i)
if [ $# -lt 2 ]
then
usage
else
setup $2
fi
;;
-r)
if [ $# -gt 1 ]
then
usage
else
remove
fi
;;
*)
usage
esac
}
main $@
| true |
86a9a96043546990261c3c5ace62c7fe1a3daea2 | Shell | Magisk-Modules-Repo/logcat | /system/bin/catlog | UTF-8 | 1,931 | 2.875 | 3 | [] | no_license | #!/system/bin/sh
case "$1" in
[0-9]*)
dura=${1}
;;
*)
dura=60
;;
esac
lc_ver="v17"
lc_vercode=17
msg[0]="Now just a little more only just a little more..."
msg[1]="It's not a right that I'm due, my duty that is must have been kept..."
msg[2]="Since one day you will disappear, I'll keep every part of you..."
msg[3]="Yes we are time fliers scaling the walls of time climbers, tired of playing hide and seek with time..."
msg[4]="Wherever you are in the world, I'll search for you..."
msg[5]="Mitsuha. Mitsuha. Mitsuha, your name is Mitsuha..."
msg[6]="Someone dear to me. I don't want to forget. I shouldn't forget!"
time=$(date +%Y-%m-%d-%H-%M-%S)
path=/data/local/catlog
file=$path/catlog-${time}-${dura}s.log
num=$(($RANDOM+100000000))
rand=$(($num%7))
android_sdk=`getprop ro.build.version.sdk`
build_desc=`getprop ro.build.description`
product=`getprop ro.build.product`
manufacturer=`getprop ro.product.manufacturer`
brand=`getprop ro.product.brand`
fingerprint=`getprop ro.build.fingerprint`
arch=`getprop ro.product.cpu.abi`
device=`getprop ro.product.device`
android=`getprop ro.build.version.release`
build=`getprop ro.build.id`
mkdir -p $path
echo "--------- beginning of head">>$file
echo "Log Catcher by MlgmXyysd">>$file
echo "Version: ${lc_ver} (${lc_vercode}) (Cat log)">>$file
echo "QQ chat group 855219808">>$file
echo ${msg[$rand]}>>$file
echo "--------- beginning of system info">>$file
echo "Android version: ${android}">>$file
echo "Android sdk: ${android_sdk}">>$file
echo "Android build: ${build}">>$file
echo "Fingerprint: ${fingerprint}">>$file
echo "ROM build description: ${build_desc}">>$file
echo "Architecture: ${arch}">>$file
echo "Device: ${device}">>$file
echo "Manufacturer: ${manufacturer}">>$file
echo "Brand: ${brand}">>$file
echo "Product: ${product}">>$file
logcat -v long *:V logcatcher-catlog-mlgmxyysd:S>>$file &
sleep ${dura}s
pkill -f logcatcher-catlog-mlgmxyysd:S
| true |
fc77a8a718ffb6e912aab38736225e65384aa28a | Shell | ensonmj/dotfiles | /profile/.profile | UTF-8 | 12,135 | 3.125 | 3 | [] | no_license | #!/bin/bash
# ^ For shellcheck's happiness
# ~/.profile: executed by the command interpreter for login shells.
# This file is not read by bash(1), if ~/.bash_profile or ~/.bash_login
# exists.
# see /usr/share/doc/bash/examples/startup-files for examples.
# the files are located in the bash-doc package.
# the default umask is set in /etc/profile; for setting the umask
# for ssh logins, install and configure the libpam-umask package.
#umask 022
# if running bash
#if [ -n "$BASH_VERSION" ]; then
# # include .bashrc if it exists
# if [ -f "$HOME/.bashrc" ]; then
# . "$HOME/.bashrc"
# fi
#fi
# Alias {{{
# enable color support of ls and also add handy aliases
if command -v dircolors &> /dev/null ; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
alias ls='ls -F --show-control-chars'
alias la='ls -a'
alias ll='ls -l'
alias lla='ls -al'
alias vimenc='vim -c '\''let $enc=&fileencoding | execute "!echo Encoding: $enc" | q'\'''
#alias tmux='tmux -2'
# Add an "alert" alias for long running commands. Use like so:
# sleep 10; alert
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
# }}}
# Environment {{{
# GDK_BACKEND=wayland
# CLUTTER_BACKEND=wayland
# SDL_VIDEODRIVER=wayland
#performance acceleration for sort etc.
#export LC_ALL=C
#zsh PROMPT can be disrupted by "LC_ALL=C"
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
export LESSCHARSET=utf-8
export EDITOR=vim
unset SSH_ASKPASS
# colored GCC warnings and errors
export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
# enable public access for X11, this should just set on ssh client side {{{
# export DISPLAY=$(ip route list default | awk '{print $3}'):0
# export DISPLAY=$(awk '/nameserver / {print $2; exit}' /etc/resolv.conf 2>/dev/null):0
# export DISPLAY=$(host `hostname` | grep -oP '(\s)\d+(\.\d+){3}' | tail -1 | awk '{ print $NF }' | tr -d '\r'):0
# export LIBGL_ALWAYS_INDIRECT=1
# }}}
# set PATH so it includes user's private bin if it exists
[[ -d $HOME/.local/bin ]] && export PATH="$HOME/.local/bin:$PATH"
if [ -d $HOME/.opt ]; then
for SUB in $HOME/.opt/*; do
[[ -d $SUB/bin ]] && export PATH="$SUB/bin:$PATH"
done
fi
[[ -d /snap/bin ]] && export PATH="$PATH:/snap/bin"
[[ -d $HOME/.cabal/bin ]] && export PATH="$PATH:$HOME/.cabal/bin" #pandoc
export PATH=.:$PATH
#tmux
if [ -z "$TMUX" ]; then
#run this outside of tmux!
if [ -n "$DISPLAY" ]; then
for name in `tmux ls -F '#{session_name}' 2>/dev/null`; do
tmux setenv -g -t $name DISPLAY $DISPLAY #set display for all sessions
done
fi
else
#inside tmux!
export TERM=screen-256color
fi
# }}}
# Load scripts {{{
#tmuxinator
[[ -s $HOME/.tmuxinator/scripts/tmuxinator ]] && source $HOME/.tmuxinator/scripts/tmuxinator
#nix
# [[ -e $HOME/.nix-profile/etc/profile.d/nix.sh ]] && source $HOME/.nix-profile/etc/profile.d/nix.sh
#homebrew
[[ -d $HOME/.linuxbrew ]] && eval "$($HOME/.linuxbrew/bin/brew shellenv)"
[[ -d /home/linuxbrew/.linuxbrew ]] && eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"
if command -v brew &> /dev/null && brew list | grep coreutils > /dev/null ; then
PATH="$(brew --prefix coreutils)/libexec/gnubin:$PATH"
MANPATH="$(brew --prefix coreutils)/libexec/gnuman:$MANPATH"
fi
#rust
[[ -f $HOME/.cargo/env ]] && source $HOME/.cargo/env
#golang
if [ -d $HOME/go ]; then
export GOPATH=$HOME/go
export PATH=$PATH:$HOME/go/bin
fi
#nvm
if [ -d $HOME/.nvm ]; then
source $HOME/.nvm/nvm.sh
#nvm doesn't seem to set $NODE_PATH automatically
NP=$(which node)
BP=${NP%bin/node} #this replaces the string 'bin/node'
LP="${BP}lib/node_modules"
export NODE_PATH="$LP"
fi
#rbenv
if [ -d $HOME/.rbenv ]; then
export PATH="$HOME/.rbenv/bin:$PATH"
eval "$(rbenv init -)"
fi
#dotenv
# https://gist.github.com/mihow/9c7f559807069a03e302605691f85572
# s/\r//g remove carriage return(\r)
# "s/'/'\\\''/g" replaces every single quote with '\'', which is a trick sequence in bash to produce a quote :)
# "s/=\(.*\)/='\1'/g" converts every a=b into a='b'
[ -f $HOME/.env ] && while read -r LINE; do
if [[ $LINE != '#'* ]] && [[ $LINE == *'='* ]]; then
ENV_VAR=$(echo $LINE | sed -e 's/\r//g' -e "s/'/'\\\''/g")
eval "export $ENV_VAR"
fi
done < $HOME/.env
# }}}
# self-defined functions {{{
function ostype() {
case "$OSTYPE" in
solaris) echo "SOLARIS" ;;
darwin*) echo "OSX" ;;
linux*) echo "LINUX" ;;
bsd*) echo "BSD" ;;
msys|cygwin) echo "WINDOWS" ;;
*) echo "unknown: $OSTYPE" ;;
esac
}
# env {{{
function env_do() {
env $(cat .env) $@
}
function env_load() {
# https://stackoverflow.com/questions/12916352/shell-script-read-missing-last-line
while read -r LINE || [ -n "$LINE" ]; do
if [[ $LINE != '#'* ]] && [[ $LINE == *'='* ]]; then
ENV_VAR=$(echo $LINE | sed -e 's/\r//g' -e "s/'/'\\\''/g")
eval "export $ENV_VAR"
fi
done < $1
}
#}}}
# path {{{
function path_add() {
case ":${PATH}:" in
*:"$1":*)
;;
*)
# Prepending path in case a system-installed needs to be overridden
export PATH="$1:$PATH"
;;
esac
}
# Safely remove the given entry from $PATH
# https://unix.stackexchange.com/a/253760/143394
function path_del() {
while case $PATH in
"$1") unset PATH; false;;
"$1:"*) PATH=${PATH#"$1:"};;
*":$1") PATH=${PATH%":$1"};;
*":$1:"*) PATH=${PATH%%":$1:"*}:${PATH#*":$1:"};;
*) false;;
esac
do
:
done
}
#}}}
# json {{{
function trim_comment() {
sed "s|[ \t]*//.*$||" $1 | sed "/^$/d"
}
function merge_json() {
jq -s '[.[][]]' <(trim_comment $1) <(trim_comment $2)
}
#}}}
# proxy {{{
# https://gist.github.com/yougg/5d2b3353fc5e197a0917aae0b3287d64
function proxy() {
local PROTO="${1:-socks5}" # socks5(local DNS), socks5h(remote DNS), http, https
local HOST="${2:-127.0.0.1}"
local PORT="${3:-8080}"
local ADDR="$PROTO://$HOST:$PORT"
export http_proxy=$ADDR https_proxy=$ADDR ftp_proxy=$ADDR rsync_proxy=$ADDR all_proxy=$ADDR
export HTTP_PROXY=$ADDR HTTPS_PROXY=$ADDR FTP_PROXY=$ADDR RSYNC_PROXY=$ADDR ALL_PROXY=$ADDR
no_proxy="127.0.0.1,localhost,.localdomain.com"
no_proxy=$no_proxy,`echo 10.{0..255}.{0..255}.{0..255}|tr ' ' ','`
no_proxy=$no_proxy,`echo 172.{16..31}.{0..255}.{0..255}|tr ' ' ','`
no_proxy=$no_proxy,`echo 192.168.{0..255}.{0..255}|tr ' ' ','`
export no_proxy
export NO_PROXY="$no_proxy"
}
function noproxy() {
unset http_proxy https_proxy ftp_proxy rsync_proxy all_proxy no_proxy
unset HTTP_PROXY HTTPS_PROXY FTP_PROXY RSYNC_PROXY ALL_PROXY NO_PROXY
}
function wgetproxy() {
local HOST="${1:-127.0.0.1}"
local PORT="${2:-8080}"
local ADDR="http://$HOST:$PORT"
echo "http_proxy=$ADDR" >> ~/.wgetrc
echo "https_proxy=$ADDR" >> ~/.wgetrc
}
function nowgetproxy() {
sed -i '/http_proxy/d' ~/.wgetrc
sed -i '/https_proxy/d' ~/.wgetrc
}
function gitproxy() {
local PROTO="${1:-http}" # http, https
local HOST="${2:-127.0.0.1}"
local PORT="${3:-1080}"
local ADDR="$PROTO://$HOST:$PORT"
SSH_PROXY_PROTO="-X 5" # socks 5 for default
if [ "$PROTO" == "http" -o "$PROTO" == "https" ]; then
SSH_PROXY_PROTO="-X connect"
# set git http(s) proxy
git config --global http.sslverify false
git config --global http.proxy "$ADDR"
git config --global https.proxy "$ADDR"
fi
# set git ssh proxy
local SSH_PROXY="ProxyCommand=nc $SSH_PROXY_PROTO -x $HOST:$PORT %h %p"
git config --global core.sshCommand "ssh -o '$SSH_PROXY'"
# only for 'github.com'
# git config --global http.https://github.com.proxy "$ADDR"
# replace "git://" with "ssh://"
# git config --global url.'ssh://git@github.com/'.insteadOf 'git://github.com/'
}
function nogitproxy() {
git config --global --unset http.proxy
git config --global --unset https.proxy
git config --global --unset core.sshCommand
# git config --global --unset http.https://github.com.proxy
# git config --global --remove-section url.'ssh://git@github.com/'
}
function sshproxy() {
local TARGET_ADDR="${1}"
local HOST="${2:-127.0.0.1}"
local PORT="${3:-22}"
local USER="${4}"
# # use 'nc' with http protocol
# local SSH_PROXY="ProxyCommand=nc -X connect -x $HOST:$PORT %h %p"
# # use 'nc' with http protocol and proxy user
# local SSH_PROXY="ProxyCommand=nc -X connect -x $HOST:$PORT -P $USER %h %p"
# use 'nc' with socks5 protocol
local SSH_PROXY="ProxyCommand=nc -X 5 -x $HOST:$PORT %h %p"
# # use 'connect' with http protocol
# local SSH_PROXY="ProxyCommand=connect -H $HOST:$PORT %h %p"
# # use 'connect' with http protocol and proxy user
# local SSH_PROXY="ProxyCommand=connect -H $USER@$HOST:$PORT %h %p"
# # use 'connect' with HTTP_PROXY environment
# local SSH_PROXY="ProxyCommand=connect -h %h %p"
# # use 'connect' with socks5 protocol
# local SSH_PROXY="ProxyCommand=connect -S $HOST:$PORT %h %p"
# # use 'connect' with socks5 protocol and user
# local SSH_PROXY="ProxyCommand=connect -S $USER@$HOST:$PORT %h %p"
# # use 'connect' with SOCKS5_SERVER environment
# export SOCKS5_SERVER="$HOST:$PORT"
# export SOCKS5_USER="$USER"
# export SOCKS5_PASSWD="$PASS"
# local SSH_PROXY="ProxyCommand=connect -s %h %p"
# connect to ssh server over proxy
ssh -o "$SSH_PROXY" $TARGET_ADDR
}
# }}}
function svn() {
if [[ "$1" == "log" ]]; then
# -FX tell `less` to quit if entire file fits on the first screen, not to switch to the alternate screen
command svn "$@" | less -FX
elif [[ "$1" == "diff" ]]; then
command svn "$@" | dos2unix | vim - -R "+colorscheme koehler"
else
command svn "$@"
fi
}
# should use colormake in github
function make() {
pathpat="(/[^/]*)+:[0-9]+"
ccred=$(echo -e "\033[0;31m")
ccyellow=$(echo -e "\033[0;33m")
ccend=$(echo -e "\033[0m")
/usr/bin/make "$@" 2>&1 | sed -e "/[Ee]rror[: ]/ s%$pathpat%$ccred&$ccend%g" -e "/[Ww]arning[: ]/ s%$pathpat%$ccyellow&$ccend%g"
return ${PIPESTATUS[0]}
}
# docker {{{
function docker_attach() {
local CONTAINER="$1"
local WORKDIR="${2:-/workspaces/$CONTAINER}"
docker exec -itu vscode --privileged -e DISPLAY=$DISPLAY -w $WORKDIR $CONTAINER zsh
}
#}}}
# valgrind {{{
function vgrun() {
local COMMAND="$1"
local NAME="$2"
[[ -n "$COMMAND" ]] || { echo "Syntax: vgrun <command> <name>"; return; }
[[ -n "$NAME" ]] || { echo "Syntax vgrun <command> <name>"; return; }
valgrind \
--leak-check=full --error-limit=no --track-origins=yes \
--undef-value-errors=yes --log-file=valgrind-${NAME}.log \
--read-var-info=yes \
$COMMAND | tee valgrind-${NAME}-output.log 2>&1
}
function vgtrace() {
local COMMAND="$1"
local NAME="$2"
[[ -n "$COMMAND" ]] || { echo "Syntax: vgtrace <command> <name>"; return; }
[[ -n "$NAME" ]] || { echo "Syntax vgtrace <command> <name>"; return; }
valgrind \
--leak-check=full --error-limit=no --track-origins=yes \
--undef-value-errors=yes --log-file=valgrind-${NAME}.log \
--read-var-info=yes --trace-children=yes \
$COMMAND | tee valgrind-${NAME}-output.log 2>&1
}
function vgdbg() {
[[ -n "$*" ]] || { echo "Syntax: vgrun <command>"; return; }
valgrind \
--leak-check=full --error-limit=no --track-origins=yes \
--undef-value-errors=yes --read-var-info=yes --db-attach=yes \
"$@"
}
# }}}
# }}}
# vim: foldmethod=marker
| true |
539b1f6ded932bc5ceeb3b6ca047dc855024ca18 | Shell | mirage335/gedaProduction | /laserstencil/shrinkPads.sh | UTF-8 | 1,612 | 3.53125 | 4 | [
"CC0-1.0",
"MIT",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
#$1 = inputGerberFile
#$2 = resizeDimension
resizeDimension="$2"
resizePad() {
#Basic operators, for reference.
#echo -n $(bc <<< "$1 - $resizeDimension")
#echo -n $(bc <<< "$1 * 0.6")
newDimension=$(bc <<< "$1 - $resizeDimension")
if [[ "$newDimension" < ".003" ]]
then
echo -n ".003"
else
echo -n "$newDimension"
fi
}
#Two fields.
#^\%ADD[0-9]*.\,[0-9]*\.[0-9]*.[0-9]*\.[0-9]*.*
#One field.
#^\%ADD[0-9]*.\,[0-9]*\.[0-9]*
processFile() {
while read line
do
#Two fields.
if echo $line | grep '^\%ADD[0-9]*.\,[0-9]*\.[0-9]*.[0-9]*\.[0-9]*.*' > /dev/null
then
echo -n $(echo "$line" | sed 's/\(^\%ADD[0-9]*.\,\)\([0-9]*\.[0-9]*\)\(.\)\([0-9]*\.[0-9]*\)\(.*\)/\1/g')
resizePad $(echo "$line" | sed 's/\(^\%ADD[0-9]*.\,\)\([0-9]*\.[0-9]*\)\(.\)\([0-9]*\.[0-9]*\)\(.*\)/\2/g')
echo -n $(echo "$line" | sed 's/\(^\%ADD[0-9]*.\,\)\([0-9]*\.[0-9]*\)\(.\)\([0-9]*\.[0-9]*\)\(.*\)/\3/g')
resizePad $(echo "$line" | sed 's/\(^\%ADD[0-9]*.\,\)\([0-9]*\.[0-9]*\)\(.\)\([0-9]*\.[0-9]*\)\(.*\)/\4/g')
echo "$line" | sed 's/\(^\%ADD[0-9]*.\,\)\([0-9]*\.[0-9]*\)\(.\)\([0-9]*\.[0-9]*\)\(.*\)/\5/g'
continue
fi
#One field.
if echo $line | grep '^\%ADD[0-9]*.\,[0-9]*\.[0-9]*' > /dev/null
then
echo -n $(echo "$line" | sed 's/\(^\%ADD[0-9]*.\,\)\([0-9]*\.[0-9]*\)\(.*\)/\1/g')
resizePad $(echo "$line" | sed 's/\(^\%ADD[0-9]*.\,\)\([0-9]*\.[0-9]*\)\(.*\)/\2/g')
echo "$line" | sed 's/\(^\%ADD[0-9]*.\,\)\([0-9]*\.[0-9]*\)\(.*\)/\3/g'
continue
fi
#No fields.
echo "$line"
done < $1
}
processFile "$1" | true |
07b6462c9e48d19966856355e33b8e3e55e37c65 | Shell | rutulpatel/ansible_class | /setup | UTF-8 | 794 | 3.21875 | 3 | [] | no_license | #!/bin/bash -x
#forcefull recycling of docker containers and machine
docker-compose kill
docker-machine stop dev
echo "starting docker machine"
docker-machine start dev
eval "$(docker-machine env dev)"
#cd'ing to home directory
cd ~/Workspace/ansible_class/project #home directory where zip folder is installed
#setting variables
home_dir="$PWD"
devip=$(docker-machine ip dev)
#this is to setup docker env
cd $home_dir/env
echo "generating ssh keys"
yes | ssh-keygen -t rsa -f ansible -q -N ""
echo "building and starting containers"
docker-compose build && docker-compose up -d
sleep 5
ssh -t -i $home_dir/env/ansible ansible@$devip -p 2200 $home_dir/ansible/pre-setup
sudo scp -r -i $home_dir/env/ansible -P 2200 $home_dir/ansible/* ansible@$devip:~/work/
echo "DEV IP address is "$devip
| true |
3e5d720145d4d67d3818ab79928e8df9dd35a72a | Shell | andrehacker/mapred | /test-stratosphere/test.sh | UTF-8 | 110 | 2.703125 | 3 | [] | no_license | #!/bin/bash
echo $0
exit
set -x verbose
DIR=`dirname "$0"`
DIR=`cd "$bin"; pwd`
echo $DIR
pwd
cd $DIR
pwd
| true |
434af56e79a51806f95623c0b46bfa60ccdef407 | Shell | snowywhitee/Bash_app | /log.sh | UTF-8 | 681 | 3.890625 | 4 | [] | no_license | #!/bin/bash
#access log files
SCRIPTNAME=$0
function error_exit {
echo "${SCRIPTNAME}: ${1:-"Unknown error"}" 1>&2
exit 1
}
if ! [[ -e "/var/log/anaconda/X.log" ]]; then
error_exit "For some reason /var/log/anaconda/X.log doesn't exist"
elif ! [[ -r "/var/log/anaconda/X.log" ]]; then
error_exit "Log file '/var/log/anaconda/X.log' is not readable"
fi
#YELLOW [33m
#BLUE [34m
warnings="$(cat /var/log/anaconda/X.log | sed -e 's/]\ (WW/]\ (Warning/g;/Warning/!d;s/Warning/\x1b[33mWarning\x1b[0m/g')"
echo -e "$warnings"
info="$(cat /var/log/anaconda/X.log | sed -e 's/]\ (II/]\ (Information/g;/Information/!d;s/Information/\x1b[34mInformation\x1b[0m/g')"
echo -e "$info"
| true |
55209ec357564f9d7a534029889d40f56509b7c1 | Shell | jfifield/sbp | /segments/path_read_only.bash | UTF-8 | 501 | 3.25 | 3 | [
"MIT"
] | permissive | ### Defaults
_sbp_path_color_readonly_fg=${_sbp_path_color_readonly_fg:-$_sbp_color_white}
_sbp_path_color_readonly_bg=${_sbp_path_color_readonly_bg:-$_sbp_color_red}
function _sbp_generate_path_read_only_segment {
if [[ ! -w "$PWD" ]] ; then
local command_value
command_value=""
_sbp_segment_new_color_bg="$_sbp_path_color_readonly_bg"
_sbp_segment_new_color_fg="$_sbp_path_color_readonly_fg"
_sbp_segment_new_value=" ${command_value} "
_sbp_segment_new_create
fi
}
| true |
f3c41838cd2255f3e91983df923385200728fbdc | Shell | woosa7/nbcc_projects | /auto_MD_simulation/autoMD/run_xmu_calc_with_GPU.sh | UTF-8 | 1,037 | 3.140625 | 3 | [] | no_license | # set GPU device number (either 0 or 1)
GPU_device_num=$1
echo $GPU_device_num
# do xmu calculations for each pre_rsm file in list_files
cat list_files | while read filename
do
echo $filename
cd execute
# copy protein.pql & protein.pdb
cp ../../../2_pql/protein.pql .
cp ../../4_extract_pdb/pdb/"$filename" ./protein.pdb
# create pdb and temp_protein.inp files from pre_rsm.dat
/homes/epsilon/users/nbcc/HP36_tutorial/tools/mkrsm_from_pql_and_pdb/mkrsm_monomer/run.exe
# create protein.inp
cp /opt/nbcc/common/3D-RISM/header_m_BX_128_NX_128.inp ./protein.inp
cat temp_protein.inp >> protein.inp
# copy solvent input file
cp /opt/nbcc/common/3D-RISM/tip3p_combined_300K.xsv ./solvent.xsv
# perform 3D-RISM calculation
rism3d-x protein.inp $GPU_device_num > /dev/null
# copy xmu data
cp protein.xmu ../xmu_data/$filename.xmu
# delete unnecessary files
rm -rf temp_protein.inp
rm -rf solvent.xsv
rm -rf protein.pql
rm -rf protein.pdb
rm -rf protein.inp
rm -rf protein.xmu
cd ..
done
| true |
a6f66e076b4e7a6b13be10631f572c3efe3df405 | Shell | happinesslijian/VM | /安装redis并使用prometheus监控/redis_exporter.sh | UTF-8 | 1,260 | 3.25 | 3 | [] | no_license | #!/bin/bash
#配置开机启动redis_exporter
#下载安装包并解压
wget https://github.com/oliver006/redis_exporter/releases/download/v1.3.2/redis_exporter-v1.3.2.linux-amd64.tar.gz
if [ $? -ne 0 ]; then
while true
do
wget https://github.com/oliver006/redis_exporter/releases/download/v1.3.2/redis_exporter-v1.3.2.linux-amd64.tar.gz
if [ $? -eq 0 ]; then
break
fi
done
else
tar -zxf redis_exporter-*.tar.gz
cp -R redis_exporter-*/redis_exporter /usr/local/bin/redis_exporter
fi
echo "---验证版本---"
redis_exporter --version
#创建工作目录
#mkdir -p /var/lib/node_exporter
#创建并编写配置启动项配置文件
cat > /usr/lib/systemd/system/redis_exporter.service <<EOF
[Unit]
Description=redis_exporter
[Service]
Restart=on-failure
ExecStart=/usr/local/bin/redis_exporter -redis.addr 127.0.0.1:6379 -redis.password 123456
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload && systemctl start redis_exporter && systemctl enable redis_exporter
echo "---验证---"
systemctl status redis_exporter
echo "---开机自启动---"
systemctl list-unit-files | grep redis_exporter
echo "---监听端口---"
ss -ntlp | grep 9121
#清除无用包
rm -rf ./redis_exporter-*
| true |
86628219e59e8c790e30c06c8719563e0c62e527 | Shell | SST-Author/Subgraph-Subgraph-Transitions | /run_model_on_static_graphs.sh | UTF-8 | 5,149 | 2.65625 | 3 | [] | no_license | # Usage:
# ./run_model_on_static_graphs.sh <model> <parallel> <directed>
#
# <model> -- a model name, such as Linear_AE or SST_SVM
# <parallel> -- input --parallel to get all 10 splits of a graph to run in
# parallel. input anything else to get them to run in sequence
# <directed> -- if you want to run on the directed version of a graph, input
# --directed otherwise leave blank.
if [ "$2" = "--parallel" ];
then
python runner.py --model $1 $3 --input karate --idx 0 &
python runner.py --model $1 $3 --input karate --idx 1 &
python runner.py --model $1 $3 --input karate --idx 2 &
python runner.py --model $1 $3 --input karate --idx 3 &
python runner.py --model $1 $3 --input karate --idx 4 &
python runner.py --model $1 $3 --input karate --idx 5 &
python runner.py --model $1 $3 --input karate --idx 6 &
python runner.py --model $1 $3 --input karate --idx 7 &
python runner.py --model $1 $3 --input karate --idx 8 &
python runner.py --model $1 $3 --input karate --idx 9
python runner.py --model $1 $3 --input cora --idx 0 &
python runner.py --model $1 $3 --input cora --idx 1 &
python runner.py --model $1 $3 --input cora --idx 2 &
python runner.py --model $1 $3 --input cora --idx 3 &
python runner.py --model $1 $3 --input cora --idx 4 &
python runner.py --model $1 $3 --input cora --idx 5 &
python runner.py --model $1 $3 --input cora --idx 6 &
python runner.py --model $1 $3 --input cora --idx 7 &
python runner.py --model $1 $3 --input cora --idx 8 &
python runner.py --model $1 $3 --input cora --idx 9
python runner.py --model $1 $3 --input citeseer --idx 0 &
python runner.py --model $1 $3 --input citeseer --idx 1 &
python runner.py --model $1 $3 --input citeseer --idx 2 &
python runner.py --model $1 $3 --input citeseer --idx 3 &
python runner.py --model $1 $3 --input citeseer --idx 4 &
python runner.py --model $1 $3 --input citeseer --idx 5 &
python runner.py --model $1 $3 --input citeseer --idx 6 &
python runner.py --model $1 $3 --input citeseer --idx 7 &
python runner.py --model $1 $3 --input citeseer --idx 8 &
python runner.py --model $1 $3 --input citeseer --idx 9
python runner.py --model $1 $3 --input eucore --idx 0 &
python runner.py --model $1 $3 --input eucore --idx 1 &
python runner.py --model $1 $3 --input eucore --idx 2 &
python runner.py --model $1 $3 --input eucore --idx 3 &
python runner.py --model $1 $3 --input eucore --idx 4 &
python runner.py --model $1 $3 --input eucore --idx 5 &
python runner.py --model $1 $3 --input eucore --idx 6 &
python runner.py --model $1 $3 --input eucore --idx 7 &
python runner.py --model $1 $3 --input eucore --idx 8 &
python runner.py --model $1 $3 --input eucore --idx 9
else
python runner.py --model $1 $3 --input karate --idx 0
python runner.py --model $1 $3 --input karate --idx 1
python runner.py --model $1 $3 --input karate --idx 2
python runner.py --model $1 $3 --input karate --idx 3
python runner.py --model $1 $3 --input karate --idx 4
python runner.py --model $1 $3 --input karate --idx 5
python runner.py --model $1 $3 --input karate --idx 6
python runner.py --model $1 $3 --input karate --idx 7
python runner.py --model $1 $3 --input karate --idx 8
python runner.py --model $1 $3 --input karate --idx 9
python runner.py --model $1 $3 --input cora --idx 0
python runner.py --model $1 $3 --input cora --idx 1
python runner.py --model $1 $3 --input cora --idx 2
python runner.py --model $1 $3 --input cora --idx 3
python runner.py --model $1 $3 --input cora --idx 4
python runner.py --model $1 $3 --input cora --idx 5
python runner.py --model $1 $3 --input cora --idx 6
python runner.py --model $1 $3 --input cora --idx 7
python runner.py --model $1 $3 --input cora --idx 8
python runner.py --model $1 $3 --input cora --idx 9
python runner.py --model $1 $3 --input citeseer --idx 0
python runner.py --model $1 $3 --input citeseer --idx 1
python runner.py --model $1 $3 --input citeseer --idx 2
python runner.py --model $1 $3 --input citeseer --idx 3
python runner.py --model $1 $3 --input citeseer --idx 4
python runner.py --model $1 $3 --input citeseer --idx 5
python runner.py --model $1 $3 --input citeseer --idx 6
python runner.py --model $1 $3 --input citeseer --idx 7
python runner.py --model $1 $3 --input citeseer --idx 8
python runner.py --model $1 $3 --input citeseer --idx 9
python runner.py --model $1 $3 --input eucore --idx 0
python runner.py --model $1 $3 --input eucore --idx 1
python runner.py --model $1 $3 --input eucore --idx 2
python runner.py --model $1 $3 --input eucore --idx 3
python runner.py --model $1 $3 --input eucore --idx 4
python runner.py --model $1 $3 --input eucore --idx 5
python runner.py --model $1 $3 --input eucore --idx 6
python runner.py --model $1 $3 --input eucore --idx 7
python runner.py --model $1 $3 --input eucore --idx 8
python runner.py --model $1 $3 --input eucore --idx 9
fi
| true |
ad1250bfb2851438784c09a50a648576da433d7c | Shell | frakc/jira-move-task | /trial-step.sh | UTF-8 | 4,080 | 3.109375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
jira_project_name="EB3AND"
jira_url="https://alterplay.atlassian.net"
jira_token="WVhMArz7RhEaWkSvHqMk1E4F"
from_status="Team Review"
to_status="Ready for QA"
slack_webhoock="https://hooks.slack.com/services/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"
custom_jira_value="6"
custom_jira_field="customfield_11360"
changelogpath="changelog.txt"
if [ -z "$jira_project_name" ]; then
echo "Jira Project Name is required."
usage
fi
if [ -z "$jira_url" ]; then
echo "Jira Url is required."
usage
fi
if [ -z "$jira_token" ]; then
echo "Jira token is required."
usage
fi
if [ -z "$from_status" ]; then
echo "Status of tasks for deployment is required."
usage
fi
if [ -z "$custom_jira_field" ]; then
echo "custom_jira_field is empty"
usage
fi
if [ -z "$custom_jira_value" ]; then
echo "custom_jira_value is empty."
usage
fi
length=${#jira_project_name}
cred="artem@alty.co:$jira_token"
token=`echo -n $cred | base64`
query=$(jq -n \
--arg jql "project = $jira_project_name AND status = '$from_status'" \
'{ jql: $jql, startAt: 0, maxResults: 200, fields: [ "id" ], fieldsByKeys: false }'
);
echo "Query to be executed in Jira: $query"
tasks_to_close=$(curl -s \
-H "Content-Type: application/json" \
-H "Authorization: Basic $token" \
--request POST \
--data "$query" \
"$jira_url/rest/api/2/search" | jq -r '.issues[].key'
)
change_log=""
echo "Tasks to transition: $tasks_to_close"
for task in ${tasks_to_close}
do
echo "Transitioning $task"
if [[ -ne "$custom_jira_field" && -ne "$custom_jira_value" ]]; then
echo "Setting $custom_jira_field of $task to $custom_jira_value"
query=$(jq -n \
--arg c_value "$custom_jira_value" \
--arg c_name "$custom_jira_field" \
'{ "fields": { ($c_name) : { "value": $c_value } } }'
);
curl \
-H "Content-Type: application/json" \
-H "Authorization: Basic $token" \
--request PUT \
--data "$query" \
"$jira_url/rest/api/2/issue/$task"
fi
task_title=$(curl \
-H "Content-Type: application/json" \
-H "Authorization: Basic $token" \
--request GET \
"$jira_url/rest/api/2/issue/$task" |
jq -r '.fields.summary')
change_log="$change_log"$'\n'"$task_title"
transition_id=$(curl -s \
-H "Authorization: Basic $token" \
"$jira_url/rest/api/2/issue/$task/transitions" |
jq -r --arg t "$to_status" '.transitions[] | select( .to.name == $t ) | .id'
)
echo "ids: $transition_id"
if [ -n "$transition_id" ]; then
echo "Transitioning $task to $to_status"
query=$(jq -n \
--arg ti $transition_id \
'{ transition: { id: $ti } }'
);
echo "query: $query"
curl \
-H "Content-Type: application/json" \
-H "Authorization: Basic $token" \
--request POST \
--data "$query" \
"$jira_url/rest/api/2/issue/$task/transitions"
else
echo "No matching transitions from status '$from_status' to '$to_status' for $task"
fi
done
release_message="Next build resolves following issues \n\`\`\`\n"
for task in ${tasks_to_close}
do
release_message="$release_message$jira_url/browse/$task\n"
done
release_message="$release_message\n\`\`\` "
release_message="\"$release_message\""
slack_query=$(jq -n --argjson message "$release_message" '{text:$message}');
echo "======"
echo "$change_log"
echo "$change_log" > $changelogpath
echo "query $slack_query"
echo $(curl -X POST -H "Content-type: application/json" --data "$slack_query" $slack_webhoock) | true |
68ffb36a1c184bd0276d90e6c06ab261244d3719 | Shell | citusdata/packaging | /scripts/determine_name | UTF-8 | 305 | 3.59375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# make bash behave
set -euo pipefail
IFS=$'\n\t'
# constants
success=0
failure=1
fullname=$(curl -sf https://api.github.com/user | jq -r '.name // empty')
if [ -z "${fullname}" ]; then
echo "$0: could not determine user name" >&2
exit $failure
fi
echo "${fullname}"
exit $success
| true |
9fcf441deea541c92669728d7e591f9facd59d9c | Shell | alabbas-ali/N-Gram-Classifier | /TestVagrant/deploy/install-java.sh | UTF-8 | 1,011 | 3.3125 | 3 | [] | no_license | #!/bin/bash
## Latest JDK8 version is JDK8u144 released on 26th July, 2017.
BASE_URL_8=http://download.oracle.com/otn-pub/java/jdk/8u144-b01/090f390dda5b47b9b721c7dfaa008135/jdk-8u144
JDK_VERSION=`echo $BASE_URL_8 | rev | cut -d "/" -f1 | rev`
##declare -a PLATFORMS=(
## "-linux-arm32-vfp-hflt.tar.gz"
## "-linux-arm64-vfp-hflt.tar.gz"
## "-linux-i586.rpm"
## "-linux-i586.tar.gz"
## "-linux-x64.rpm"
## "-linux-x64.tar.gz"
## "-macosx-x64.dmg"
## "-solaris-sparcv9.tar.Z"
## "-solaris-sparcv9.tar.gz"
## "-solaris-x64.tar.Z"
## "-solaris-x64.tar.gz"
## "-windows-i586.exe"
## "-windows-x64.exe"
## "-docs-all.zip"
##)
declare -a PLATFORMS=("-linux-x64.rpm" "-docs-all.zip")
for platform in "${PLATFORMS[@]}"
do
wget -c --header "Cookie: oraclelicense=accept-securebackup-cookie" "${BASE_URL_8}${platform}"
### curl -C - -L -O -# -H "Cookie: oraclelicense=accept-securebackup-cookie" "${BASE_URL_8}${platform}"
done
yum install -y jdk-8u144-linux-x64.rpm
rm jdk-8u144-linux-x64.rpm | true |
95ad56e166839634bd05f05c402cd36477c53d10 | Shell | nd-cse-30341-fa20/cse-30341-fa20-assignments | /reading09/test_program.sh | UTF-8 | 826 | 3.234375 | 3 | [] | no_license | #!/bin/bash
input() {
python3 <<EOF
import os
import sys
VIRTUAL_ADDRESSES = [
'0010 0100',
'1000 0001',
'0000 0101',
'1100 1111',
'0011 1100',
'0001 1001',
]
with os.fdopen(sys.stdout.fileno(), 'wb') as fs:
for virtual_address in VIRTUAL_ADDRESSES:
virtual_address = virtual_address.replace(' ', '')
virtual_address = int(virtual_address, 2)
fs.write(virtual_address.to_bytes(1, byteorder='little'))
EOF
}
output() {
cat <<EOF | base64 -d
VkFbMjRdIC0+IFBBWzU0XQpWQVs4MV0gLT4gUEFbMDFdIFNlZ21lbnRhdGlvbiBGYXVsdApWQVsw
NV0gLT4gUEFbMzVdClZBW2NmXSAtPiBQQVswZl0gU2VnbWVudGF0aW9uIEZhdWx0ClZBWzNjXSAt
PiBQQVsyY10gUHJvdGVjdGlvbiBGYXVsdApWQVsxOV0gLT4gUEFbNzldCg==
EOF
}
printf "Testing reading09 program ... "
DIFF=$(diff <(input | ./program 2> /dev/null) <(output) | grep -E "^[><]" | wc -l)
SCORE=$(python3 <<EOF
print("{:0.2f}".format((6 - $DIFF) * 3.0 / 6.0))
EOF
)
if [ "$DIFF" -eq 0 ]; then
echo "Success"
else
echo "Failure"
fi
echo " Score $SCORE"
exit $DIFF
| true |
03e9cb18fd1dcc06c3598c7fe40f049e9cf1a1a7 | Shell | Circuit-killer/resin-wifi-connect | /scripts/local-build.sh | UTF-8 | 326 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -ev
if [ -z "$1" ]; then
printf 'Rust compilation target not specified'
exit 1
fi
TARGET=$1
cross() {
docker run -it --rm -v $PWD:/work $TARGET "$@"
}
docker build -t $TARGET scripts/docker/$TARGET
cross cargo build --release --target=$TARGET
cross cross-strip target/$TARGET/release/wifi-connect
| true |
85e345f6eeb6baff2df55ab540ef826dcdabc2dd | Shell | AlfredPianist/holberton-system_engineering-devops | /0x12-web_stack_debugging_2/0-iamsomeoneelse | UTF-8 | 137 | 3.265625 | 3 | [] | no_license | #!/usr/bin/env bash
# Script that runs the whoami command under the user passed as an argument
if [ "$1" ]
then
sudo -u "$1" whoami
fi
| true |
fc0f72371ac926f0d8a4a39e4f3e77906c1e0b6e | Shell | TheButlah/Battlecode-2018 | /examplefuncsplayer-java/run.sh | UTF-8 | 551 | 2.640625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# This file should build and run your code.
# It will run if you're in nodocker mode on Mac or Linux,
# or if you're running in docker.
# Compile our code.
#echo javac $(find . -name '*.java') -classpath ../battlecode/java
#javac $(find . -name '*.java') -classpath ../battlecode/java
# Run our code.
#echo java -Xmx40m -classpath .:../battlecode/java Player
#java -Xmx40m -classpath .:../battlecode/java Player
#remember to disable assertions by removing the -ea flag when not debugging!
java -Xmx40m -jar -ea Battlecode-2018-0.1-all.jar | true |
59e8d95f3236c3ba80283db65e4cff45577d77df | Shell | ecwws/docker-spark | /worker.sh | UTF-8 | 531 | 2.890625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
export SPARK_HOME=/spark
. "${SPARK_HOME}/sbin/spark-config.sh"
. "${SPARK_HOME}/bin/load-spark-env.sh"
if [ "$SPARK_MASTER_PORT" = "" ]; then
SPARK_MASTER_PORT=7077
fi
if [ "$SPARK_WORKER_WEBUI_PORT" = "" ]; then
SPARK_WORKER_WEBUI_PORT=8081
fi
if [ "$SPARK_MASTER_HOST" = "" ]; then
echo "SPARK_MASTER_HOST must be set"
exit 1
fi
/spark/bin/spark-class org.apache.spark.deploy.worker.Worker \
--webui-port $SPARK_WORKER_WEBUI_PORT \
spark://$SPARK_MASTER_HOST:$SPARK_MASTER_PORT \
>> /dev/stdout
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.