blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
c4abf95cd3b3c3a6dd2577c547f96cc6a428017b
|
Shell
|
luxwig/EE180D
|
/run_all.sh
|
UTF-8
| 251
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
rm -rf output.txt
while IFS='' read -r line || [[ -n "${line}" ]]; do
echo -n "RUNNING ${line}"
echo "--- ${line} ---" 1>>output.txt
echo -e "\t EC: $?"
./cf_main "${line}" 2>/dev/null 1>> output.txt
done < "testfile.list"
| true
|
08ffc3336b88bb31c4d416e2dd2a357bffbbd5ed
|
Shell
|
jbelmont/useful-scripts
|
/convert-to-titlecase.sh
|
UTF-8
| 285
| 3.125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
# Print System Clipboard onto standard output then
# convert each character to lowercase and then turn the first character to match to uppercase with \u
# then remove the hyphen (-) to empty space
echo $(pbpaste) | gsed -E 's/.*/\L&/;s/[a-z]*/\u&/g;s/-/ /g' | tr -d '\n'
| true
|
83c1366e133f239118d1321e4715b7b46762dc2a
|
Shell
|
northbright/bookmarks
|
/dev/acme-sh/local-copy/letsencrypt_notes.sh
|
UTF-8
| 3,468
| 3.734375
| 4
|
[
"MIT"
] |
permissive
|
# How to use "acme.sh" to set up Lets Encrypt without root permissions
# See https://github.com/Neilpang/acme.sh for more
# This assumes that your website has a webroot at "/var/www/<domain>"
# I'll use the domain "EXAMPLE.com" as an example
# When this is done, there will be an "acme" user that handles issuing,
# updating, and installing certificates. This account will have the following
# (fairly minimal) permissions:
# - Host files at http://EXAMPLE.com/.well-known/acme-challenge
# - Copy certificates to /etc/nginx/auth-acme
# - Reload your nginx server
# First things first - create a user account for acme
sudo useradd -m -d /var/lib/acme -s /usr/sbin/nologin acme
sudo chmod 700 /var/lib/acme
# Create a directory for the acme account to save certs in
sudo mkdir /etc/nginx/auth-acme
sudo chown acme.www-data /etc/nginx/auth-acme
sudo chmod 710 /etc/nginx/auth-acme
# Create a directory under the webroot for acme to put webroot challenge responses
sudo mkdir -p /var/www/EXAMPLE.com/.well-known/acme-challenge
sudo chown acme.acme /var/www/EXAMPLE.com/.well-known/acme-challenge
sudo chmod 755 /var/www/EXAMPLE.com/.well-known/acme-challenge
# Also make sure the acme user has at least eXecute permissions on all parent
# directories of this directory. This will generally be true by default.
# Edit your nginx config file to publish the well-known directory on your site.
# Lets Encrypt checks on port 80, non-SSL, so you need to at least not redirect
# that location.
sudo vim /etc/nginx/sites-enabled/EXAMPLE.com
## Example config section:
# webroot for acme
server {
listen [::]:80;
server_name EXAMPLE.com;
location ~ /.well-known {
allow all;
root /var/www/EXAMPLE.com;
}
location / {
rewrite ^ https://EXAMPLE.com$request_uri? permanent;
}
}
# Make sure nginx is configured properly
sudo nginx -t
sudo service nginx reload
# Edit your sudoers file to allow the acme user to reload (not restart) nginx
sudo visudo
# Add the following line:
acme ALL=(ALL) NOPASSWD: /usr/sbin/service nginx reload
# Now change to the ACME user - you'll do most of the rest of this guide as them
sudo -s -u acme bash
export HOME=/var/lib/acme
cd /var/lib/acme
# Install acme.sh
git clone https://github.com/Neilpang/acme.sh.git
cd acme.sh
./acme.sh --install
# Create your first certificate (from here on is roughly what you'll repeat)
cd /var/lib/acme
.acme.sh/acme.sh --issue -d EXAMPLE.com -w /var/www/EXAMPLE.com
# If everything went well, install your certificate
.acme.sh/acme.sh --installcert -d EXAMPLE.com \
--keypath /etc/nginx/auth-acme/EXAMPLE.com.key \
--capath /etc/nginx/auth-acme/EXAMPLE.com.ca \
--fullchainpath /etc/nginx/auth-acme/EXAMPLE.com.crt \
--reloadcmd "sudo service nginx reload"
# Drop back to your own user
exit
# Now modify your nginx config to work with the new certs
sudo vim /etc/nginx/sites-enabled/EXAMPLE.com
# Example SSL config section
server {
...
ssl_certificate /etc/nginx/auth-acme/EXAMPLE.com.crt;
ssl_certificate_key /etc/nginx/auth-acme/EXAMPLE.com.key;
ssl_trusted_certificate /etc/nginx/auth-acme/EXAMPLE.com.ca;
include ssl_settings.conf;
...
}
# Test nginx
sudo nginx -t
# And reload if it worked
sudo service nginx reload
# Congrats, you have letsencrypt and nobody ran anything as root on your box.
# Don't forget to back up /var/lib/acme/.acme.sh - it has your letsencrypt account keys!
| true
|
dc16d32af8be0ab0eec750b9835ca0e90cc4862b
|
Shell
|
sergiomt/centorion
|
/vagrant-setup/zookeeper/init.d/zookeeper
|
UTF-8
| 541
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# description: Zookeeper Start Stop Restart
# processname: zookeeper
# chkconfig: 234 20 80
JAVA_HOME=/usr/java/latest
export JAVA_HOME
ZOO_LOG_DIR=/usr/share/zookeeper/logs
export ZOO_LOG_DIR
case $1 in
start)
/bin/su -s /bin/bash hadoop /usr/share/zookeeper/bin/zkServer.sh start
;;
stop)
/bin/su -s /bin/bash hadoop /usr/share/zookeeper/bin/zkServer.sh stop
;;
restart)
/bin/su -s /bin/bash hadoop /usr/share/zookeeper/bin/zkServer.sh stop
/bin/su -s /bin/bash hadoop /usr/share/zookeeper/bin/zkServer.sh start
;;
esac
exit 0
| true
|
0372b6613050bd6b7575b2c9234f1f1f1f8257c4
|
Shell
|
gorgyboy/holberton-system_engineering-devops
|
/0x0F-load_balancer/0-custom_http_response_header
|
UTF-8
| 325
| 2.71875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Configures nginx to have a custom header
HOSTNAME=$(hostname)
sudo apt-get update
sudo apt-get -y install nginx
echo "Holberton School for the win!" | sudo tee /var/www/html/index.html
sudo sed -i "/^.*sendfile.*/i add_header X-Served-By ${HOSTNAME};" /etc/nginx/nginx.conf
sudo service nginx restart
| true
|
0aea1869f4b19513f4f7db917319cbabe53543a8
|
Shell
|
yangguoandresolution/amazon-vpc-cni-k8s
|
/scripts/entrypoint.sh
|
UTF-8
| 4,334
| 4.09375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# NOTE(jaypipes): Normally, we would prefer *not* to have an entrypoint script
# and instead just start the agent daemon as the container's CMD. However, the
# design of CNI is such that Kubelet looks for the presence of binaries and CNI
# configuration files in specific directories, and the presence of those files
# is the trigger to Kubelet that that particular CNI plugin is "ready".
#
# In the case of the AWS VPC CNI plugin, we have two components to the plugin.
# The first component is the actual CNI binary that is execve'd from Kubelet
# when a container is started or destroyed. The second component is the
# aws-k8s-agent daemon which houses the IPAM controller.
#
# As mentioned above, Kubelet considers a CNI plugin "ready" when it sees the
# binary and configuration file for the plugin in a well-known directory. For
# the AWS VPC CNI plugin binary, we only want to copy the CNI plugin binary
# into that well-known directory AFTER we have successfully started the IPAM
# daemon and know that it can connect to Kubernetes and the local EC2 metadata
# service. This is why this entrypoint script exists; we start the IPAM daemon
# and wait until we know it is up and running successfully before copying the
# CNI plugin binary and its configuration file to the well-known directory that
# Kubelet looks in.
# turn on exit on subprocess error and exit on undefined variables
set -eu
# turn on bash's job control
set -m
log_in_json()
{
FILENAME="${0##*/}"
LOGTYPE=$1
MSG=$2
TIMESTAMP=$(date +%FT%T.%3NZ)
printf '{"level":"%s","ts":"%s","caller":"%s","msg":"%s"}\n' "$LOGTYPE" "$TIMESTAMP" "$FILENAME" "$MSG"
}
# Check for all the required binaries before we go forward
if [ ! -f aws-k8s-agent ]; then
log_in_json error "Required aws-k8s-agent executable not found."
exit 1
fi
if [ ! -f grpc-health-probe ]; then
log_in_json error "Required grpc-health-probe executable not found."
exit 1
fi
AGENT_LOG_PATH=${AGENT_LOG_PATH:-"aws-k8s-agent.log"}
HOST_CNI_BIN_PATH=${HOST_CNI_BIN_PATH:-"/host/opt/cni/bin"}
HOST_CNI_CONFDIR_PATH=${HOST_CNI_CONFDIR_PATH:-"/host/etc/cni/net.d"}
AWS_VPC_K8S_CNI_VETHPREFIX=${AWS_VPC_K8S_CNI_VETHPREFIX:-"eni"}
AWS_VPC_ENI_MTU=${AWS_VPC_ENI_MTU:-"9001"}
AWS_VPC_K8S_PLUGIN_LOG_FILE=${AWS_VPC_K8S_PLUGIN_LOG_FILE:-"/var/log/aws-routed-eni/plugin.log"}
AWS_VPC_K8S_PLUGIN_LOG_LEVEL=${AWS_VPC_K8S_PLUGIN_LOG_LEVEL:-"Debug"}
AWS_VPC_K8S_CNI_CONFIGURE_RPFILTER=${AWS_VPC_K8S_CNI_CONFIGURE_RPFILTER:-"true"}
# Check for ipamd connectivity on localhost port 50051
wait_for_ipam() {
while :
do
if ./grpc-health-probe -addr 127.0.0.1:50051 >/dev/null 2>&1; then
return 0
fi
# We sleep for 1 second between each retry
sleep 1
done
}
# If there is no init container, copy the required files
if [[ "$AWS_VPC_K8S_CNI_CONFIGURE_RPFILTER" != "false" ]]; then
# Copy files
log_in_json info "Copying CNI plugin binaries ... "
PLUGIN_BINS="loopback portmap bandwidth aws-cni-support.sh"
for b in $PLUGIN_BINS; do
# Install the binary
install "$b" "$HOST_CNI_BIN_PATH"
done
fi
log_in_json info "Install CNI binary.."
install aws-cni "$HOST_CNI_BIN_PATH"
log_in_json info "Starting IPAM daemon in the background ... "
./aws-k8s-agent | tee -i "$AGENT_LOG_PATH" 2>&1 &
log_in_json info "Checking for IPAM connectivity ... "
if ! wait_for_ipam; then
log_in_json error "Timed out waiting for IPAM daemon to start:"
cat "$AGENT_LOG_PATH" >&2
exit 1
fi
log_in_json info "Copying config file ... "
# modify the static config to populate it with the env vars
sed \
-e s~__VETHPREFIX__~"${AWS_VPC_K8S_CNI_VETHPREFIX}"~g \
-e s~__MTU__~"${AWS_VPC_ENI_MTU}"~g \
-e s~__PLUGINLOGFILE__~"${AWS_VPC_K8S_PLUGIN_LOG_FILE}"~g \
-e s~__PLUGINLOGLEVEL__~"${AWS_VPC_K8S_PLUGIN_LOG_LEVEL}"~g \
10-aws.conflist > "$HOST_CNI_CONFDIR_PATH/10-aws.conflist"
log_in_json info "Successfully copied CNI plugin binary and config file."
if [[ -f "$HOST_CNI_CONFDIR_PATH/aws.conf" ]]; then
rm "$HOST_CNI_CONFDIR_PATH/aws.conf"
fi
# Bring the aws-k8s-agent process back into the foreground
log_in_json info "Foregrounding IPAM daemon ..."
fg %1 >/dev/null 2>&1 || { log_in_json error "failed (process terminated)" && cat "$AGENT_LOG_PATH" && exit 1; }
| true
|
b2249290dae0f1b4550a4e4290900b35af0b4e78
|
Shell
|
ekad0912/Project
|
/guessinggame.sh
|
UTF-8
| 460
| 3.75
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# File: guessinggame.sh
function question {
echo "How many files in this directory?"
read response
}
function hmfiles {
echo `find ./ -maxdepth 1 -type f | wc -l`
}
while [[ $response -ne $(hmfiles) ]]
do
question
if [[ $response -eq $(hmfiles) ]]
then
echo "Congratulations! This is correct number of files"
else
if [[ $response -gt $(hmfiles) ]]
then
echo "Is it too high"
else
echo "Is it too low"
fi
fi
done
| true
|
f4616dc0e23bd0b0366161f2fde793f6489e31a2
|
Shell
|
minos-org/sentry-deb
|
/debian/sentry.postinst
|
UTF-8
| 2,082
| 3.828125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
set -e
_ensure_setting_top()
{ #setting($1), configuration file($2)
[ -z "${1}" ] && return 1 || _ensuresetting_var_line="${1}"
[ -z "${2}" ] && return 1 || _ensuresetting_var_file="${2}"
[ ! -f "${_ensuresetting_var_file}" ] && return 1
_ensuresetting_var_regex="$(printf "%s" "${_ensuresetting_var_line}" | sed 's: :[ \\t]\\+:g')"
_ensuresetting_var_setting="$(printf "%s" "${_ensuresetting_var_line}" | cut -d' ' -f1-3)"
if grep "$(printf "^%s" "${_ensuresetting_var_setting}")" "${_ensuresetting_var_file}" >/dev/null; then
if ! grep "$(printf "^%s" "${_ensuresetting_var_regex}")" "${_ensuresetting_var_file}" >/dev/null; then
sed -i -e "/^${_ensuresetting_var_setting}/ s,.*,${_ensuresetting_var_line}," "${_ensuresetting_var_file}"
fi
else
if grep "$(printf "^#%s[ \t]" "${_ensuresetting_var_setting}")" "${_ensuresetting_var_file}" >/dev/null; then
sed -i -e "/^#${_ensuresetting_var_setting}/ s,#.*,${_ensuresetting_var_line}," "${_ensuresetting_var_file}"
else
sed -i -e "1i${_ensuresetting_var_line}" "${_ensuresetting_var_file}"
fi
fi
}
case "${1}" in
configure)
[ ! -d /var/db/sentry/ ] && /var/db/sentry/sentry.pl >/dev/null 2>&1 || true
chmod -R 755 /var/db/ || true
if [ -f /etc/hosts.allow ]; then
if [ ! -f /etc/hosts.allow.backup ]; then
cp /etc/hosts.allow /etc/hosts.allow.backup
fi
_ensure_setting_top "sshd : ALL : spawn /var/db/sentry/sentry.pl -c --ip=%a : allow\n" /etc/hosts.allow
_ensure_setting_top "sshd : /var/db/sentry/hosts.deny : deny" /etc/hosts.allow
_ensure_setting_top "#Autogenerated by the sentry deb package" /etc/hosts.allow
else
printf "%s\\n" "/etc/hosts.allow doesn't exist!, leaving sentry unconfigured..."
fi
;;
abort-upgrade|abort-deconfigure|abort-remove) ;;
*) printf "%s\\n" "${0} called with unknown argument \`${1}'" 1>&2
exit 1 ;;
esac
exit 0
| true
|
4e99c0d130c0f7f17e1d923f67cad2001bf6ee55
|
Shell
|
manoj-kristhombu/commons
|
/qa/web-test-framework/wsas/runAll.sh
|
UTF-8
| 6,223
| 3.75
| 4
|
[] |
no_license
|
#!/bin/sh
# ----------------------------------------------------------------------------
# Copyright 2005-2009 WSO2, Inc. http://www.wso2.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# Script to run all tests one by one
#
# Environment Variable Prequisites
#
# JAVA_HOME Must point at your Java Development Kit installation.
#
# JAVA_OPTS (Optional) Java runtime options used when the commands
# is executed.
#
# NOTE: Borrowed generously from Apache Tomcat startup scripts.
# -----------------------------------------------------------------------------
#check for JAVA_HOME
before="$(date +%s)"
if [ -z "$JAVACMD" ] ; then
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
else
JAVACMD=java
fi
fi
if [ ! -x "$JAVACMD" ] ; then
echo "Error: JAVA_HOME is not defined correctly."
echo " test framework cannot be executed $JAVACMD"
exit 1
fi
# if JAVA_HOME is not set we're not happy
if [ -z "$JAVA_HOME" ]; then
echo "You must set the JAVA_HOME variable before running test framework."
exit 1
fi
# ----- Execute The Requested Command-----------------------------------
rm -r reports
mkdir reports
var=`cat wsas_test_suites.txt`
total=0
count(){
total=0
while read number
do
total=`expr $total + $number`
done<reports/$1.txt
}
echo ""
ffprofile=""
foundff=0
for j in $@; do
if echo "$j" | grep -q "[/]" # extrat the FF profile from the command line
then
ffprofile=${j}
foundff=1
fi
done
if [ $foundff -eq 1 ]
then
echo "$ffprofile"
echo "FireFox Profile Detected" # dispaly the FF profile detection
else
echo "WARNING: No FireFox Profile Detected"
exit 0
fi
for i in $var; do
# echo $i >> reports/tmp.txt
echo $i
if echo "$i" | grep -q "#\<" #check whether the test is marked for skipping
then
echo "WARNING: Skipping $i" # diplay the name of the test with the skip tag
continue
fi
if test $# = 1;then
mvn clean install -DfirefoxProfileTemplate=$ffprofile -Dtest.suite=$i
else if echo "$@" | grep -q "\-o";then
echo "offline mode"
mvn clean install -o -DfirefoxProfileTemplate=$ffprofile -Dtest.suite=$i
fi
fi
echo "\n\n##################################################
$i
###################################################">> reports/surefire_report.txt
cat target/surefire-reports/org.wso2.carbon.web.test.wsas.AllTests.txt >> reports/surefire_report.txt
egrep "Tests run:" target/surefire-reports/org.wso2.carbon.web.test.wsas.AllTests.txt > reports/tmp.txt
#list classes with Failures
cut -d " " -f5 reports/tmp.txt | cut -d "," -f1 > reports/temp.txt
count temp
if [ ${total} -gt 0 ];then
echo $i>>reports/tempFailures.txt
fi
#list classes with Errors
cut -d " " -f7 reports/tmp.txt | cut -d "," -f1 > reports/temp.txt
count temp
if [ ${total} -gt 0 ];then
echo $i>>reports/tempErrors.txt
fi
#list classes with Skips
cut -d " " -f9 reports/tmp.txt | cut -d "," -f1 > reports/temp.txt
count temp
if [ ${total} -gt 0 ];then
echo $i>>reports/tempSkips.txt
fi
rm reports/temp.txt
done
# make the final report
#-----------------------------------------------------------------------------
egrep "Tests run:" reports/surefire_report.txt > reports/tmp1.txt
cut -d " " -f3 reports/tmp1.txt | cut -d "," -f1 > reports/run.txt
cut -d " " -f5 reports/tmp1.txt | cut -d "," -f1 > reports/Failures.txt
cut -d " " -f7 reports/tmp1.txt | cut -d "," -f1 > reports/Errors.txt
cut -d " " -f9 reports/tmp1.txt | cut -d "," -f1 > reports/Skipped.txt
echo "\n\n##################################################
Final Report
##################################################">> reports/surefire_report.txt
count run
run=${total}
count Failures
failures=${total}
count Errors
errors=${total}
count Skipped
skipped=${total}
after="$(date +%s)"
elapsed_seconds="$(expr $after - $before)"
echo "\nTotal Tests Run : ${run}">> reports/surefire_report.txt
echo "Total Failures : ${failures}">> reports/surefire_report.txt
echo "Total Errors : ${errors}">> reports/surefire_report.txt
echo "Total Skipped : ${skipped}">> reports/surefire_report.txt
echo "Total Elapsed Time : ${elapsed_seconds} sec" >> reports/surefire_report.txt
echo "\n---------------------------------------------------">> reports/surefire_report.txt
if [ $errors -gt 0 ];then
echo "\t\t\tTEST ERROR!">> reports/surefire_report.txt
else if [ $failures -gt 0 ];then
echo "\t\t\tTEST FAILURE!">> reports/surefire_report.txt
else if [ $skipped -gt 0 ];then
echo "\t\t\tTESTS SKIPPED!">> reports/surefire_report.txt
else
echo "\t\t\tTEST SUCESSFULL!">> reports/surefire_report.txt
fi
fi
fi
echo "---------------------------------------------------">> reports/surefire_report.txt
#list error/failure/skipped suites
if [ $errors -gt 0 ];then
echo "\n-------------------Suites with errors--------------">> reports/surefire_report.txt
cat reports/tempErrors.txt >> reports/surefire_report.txt
fi
if [ $failures -gt 0 ];then
echo "\n-------------------Suites with failures------------">> reports/surefire_report.txt
cat reports/tempFailures.txt >> reports/surefire_report.txt
fi
if [ $skipped -gt 0 ];then
echo "\n-------------------Suites with skips--------------">> reports/surefire_report.txt
cat reports/tempSkips.txt >> reports/surefire_report.txt
fi
mv -t target/surefire-reports reports/surefire_report.txt
rm -r reports
| true
|
f0c0df46f756b25796728fb8f1223d5a45713f40
|
Shell
|
jadedgnome/newsbeuter-dangerzone
|
/getimgs.sh
|
UTF-8
| 1,086
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/sh
# Create a macro in your /.newsbeuter/config file
# macro o set external-url-viewer "dehtml >> ~/imagetemp/imglinks.txt" ; show-urls ; next-unread ; set external-url-viewer "urlview"
# therefore anything you type ,o (see macro support http://www.newsbeuter.org/doc/newsbeuter.html#_macro_support ) over
# has its text saved to a straight text file. Then you run this bash script on exiting newsbeuter.
cd ~/imagetemp
if [ -f imglinks.txt ]; then
# extracting all links with jpg or png or gif extensions....
cat imglinks.txt | awk -F "[ \"]" '{ for (i = 1; i<=NF; i++) if ($i ~ /\.jpeg/) print $i }' >> imglinks2.txt
cat imglinks.txt | awk -F "[ \"]" '{ for (i = 1; i<=NF; i++) if ($i ~ /\.jpg/) print $i }' >> imglinks2.txt
cat imglinks.txt | awk -F "[ \"]" '{ for (i = 1; i<=NF; i++) if ($i ~ /\.png/) print $i }' >> imglinks2.txt
cat imglinks.txt | awk -F "[ \"]" '{ for (i = 1; i<=NF; i++) if ($i ~ /\.gif/) print $i }' >> imglinks2.txt
# use wget to download all those images and delete the old files.
wget -i imglinks2.txt
rm imglinks.txt
rm imglinks2.txt
fi
| true
|
59a02945d06f077ad6738209ddd4d664d9e8d3b1
|
Shell
|
kunphiphitb/OS-AUTO-SCRIPT
|
/menu/debian8/menu
|
UTF-8
| 5,124
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Modified by kunphiphit
if [[ -e /etc/debian_version ]]; then
OS=debian
RCLOCAL='/etc/rc.local'
else
echo -e "\e[33;1m IT LOOKS LIKE YOU ARE NOT RUNNING THIS INSTALLER ON DEBIAN, UBUNTU \e[0m"
exit
fi
cname=$( awk -F: '/model name/ {name=$2} END {print name}' /proc/cpuinfo )
cores=$( awk -F: '/model name/ {core++} END {print core}' /proc/cpuinfo )
freq=$( awk -F: ' /cpu MHz/ {freq=$2} END {print freq}' /proc/cpuinfo )
tram=$( free -m | awk 'NR==2 {print $2}' )
swap=$( free -m | awk 'NR==4 {print $2}' )
up=$(uptime|awk '{ $1=$2=$(NF-6)=$(NF-5)=$(NF-4)=$(NF-3)=$(NF-2)=$(NF-1)=$NF=""; print }')
echo -e "\e[33;1m \e[0m"
echo -e "\e[33;1m \e[0m"
echo -e "\e[032;1mCPU Model:\e[0m $cname"
echo -e "\e[032;1mNumber Of Cores:\e[0m $cores"
echo -e "\e[032;1mCPU Frequency:\e[0m $freq MHz"
echo -e "\e[032;1mTotal Amount Of RAM:\e[0m $tram MB"
echo -e "\e[032;1mTotal Amount Of Swap:\e[0m $swap MB"
echo -e "\e[032;1mSystem Uptime:\e[0m $up"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m a \e[0m: \e[33;1m เมนูทั้งหมด \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 1 \e[0m: \e[33;1m สร้างบัญชี \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 2 \e[0m: \e[33;1m สร้างบัญชีแบบสุ่ม \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 3 \e[0m: \e[33;1m สร้างบัญชีทดลองใช้ \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 4 \e[0m: \e[33;1m ต่ออายุบัญชี \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 5 \e[0m: \e[33;1m เปลี่ยนรหัสผ่านบัญชี \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 6 \e[0m: \e[33;1m แบนบัญชีผู้ใช้ \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 7 \e[0m: \e[33;1m ปลดล็อคบัญชีผู้ใช้ที่ถูกแบน\e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 8 \e[0m: \e[33;1m ล็อคบัญชีผู้ใช้ \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 9 \e[0m: \e[33;1m ปลดล็อกบัญชีผู้ใช้ \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 10 \e[0m: \e[33;1m ลบบัญชีผู้ใช้ \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 11 \e[0m: \e[33;1m รายละเอียดบัญชี \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 12 \e[0m: \e[33;1m แสดงรายการผู้ใช้ \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 13 \e[0m: \e[33;1m ตรวจสอบการเข้าสู่ระบบ \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 14 \e[0m: \e[33;1m ตรวจสอบล็อกอิน \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 15 \e[0m: \e[33;1m ตัดเซสชั่นการเข้าสู่ระบบเกิน \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 16 \e[0m: \e[33;1m แสดงบัญชีผู้ใช้ที่หมดอายุแล้วทั้งหมด \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 17 \e[0m: \e[33;1m ตรวจสอบบัญชีผู้ใช้ที่หมดอายุ \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 18 \e[0m: \e[33;1m ลบบัญชีผู้ใช้ที่หมดอายุแล้ว \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 19 \e[0m: \e[33;1m ล็อคบัญชีผู้ใช้ที่หมดอายุแล้ว \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 20 \e[0m: \e[33;1m ตรวจสอบบัญชีผู้ใช้ที่ถูกล็อก \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 21 \e[0m: \e[33;1m ตรวจสอบบัญชีผู้ใช้ที่ถูกแบน \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 22 \e[0m: \e[33;1m ตรวจสอบความเร็วเซิร์ฟเวอร์ \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 23 \e[0m: \e[33;1m รายละเอียดของระบบ \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 24 \e[0m: \e[33;1m ตรวจสอบการใช้ RAM \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 25 \e[0m: \e[33;1m แก้ไขพอร์ตเซิร์ฟเวอร์ \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 26 \e[0m: \e[33;1m ตั้งค่ารีบูตอัตโนมัติ \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 27 \e[0m: \e[33;1m ดูบันทึกการติดตั้ง \e[0m"
echo -e "\e[31;1m\e[0m"
echo -e "\e[031;1m\e[0m\e[32;1m 28 \e[0m: \e[33;1m ตรวจสอบและแก้ใข VPS \e[0m"
| true
|
d5265721fdbc6cf347ab4d05f618884815ba4ecb
|
Shell
|
kaicarver/tools
|
/commands.sh
|
UTF-8
| 441
| 2.6875
| 3
|
[] |
no_license
|
# Some commands I use from my history and don't want to lose
cd ~
# status about all projects
git-sum2/git-summary -lq
# more status about list of projects
for r in blog leaflet clock geotools bootcamp_python_kai lapeste yuansu-react uketabs tools; do echo $r : `git -C $r status | grep ahead`; done
# push it
for r in blog leaflet clock geotools bootcamp_python_kai lapeste yuansu-react uketabs tools; do echo $r : `git -C $r push`; done
| true
|
0a921152a36dcb4a0d046dde2c79770637663bc5
|
Shell
|
frescogo/analise
|
/stats/all.sh
|
UTF-8
| 426
| 2.84375
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh
# ./all.sh ../jogos/Bolivar/20190405/ > ranking.md
# :%!sort -n -r -k3
echo "\`\`\`"
mkdir -p "$1/stats/"
echo "ESQ DIR PTS QDS 300+ 100+ 50-"
echo "--------------------------------------------------------------"
for i in $1/*.txt; do
#echo $i
base=`basename "$i" .txt`
lua5.3 parse.lua "$i" /tmp
python3 histogram.py "/tmp/$base.py" "$1/stats/"
done
echo "\`\`\`"
| true
|
5b01961b7327dbc15374951a5746cd7c6b014be6
|
Shell
|
MNXANL/GRAU-LP
|
/CL_Practica/run.sh
|
UTF-8
| 344
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
antlr -gt mountains.g
echo ""
dlg parser.dlg scan.c
echo ""
if g++ -w -I /usr/include/pccts -o practica mountains.c scan.c err.c; then
echo "COMPILATION SUCCESFUL; EXECUTING PROGRAM"
echo "****************************************"
./practica
else
echo ""
echo "COMPILATION HAS FAILED"
echo "**********************"
fi
| true
|
53d12d13c2779eb8f8a5ce3ee0b1a5e9cabe021d
|
Shell
|
lovesa/serverscripts
|
/pgpool/pgpool_remote_start
|
UTF-8
| 483
| 3.5625
| 4
|
[] |
no_license
|
#! /bin/sh
#
# Start PostgreSQL on the recovery target node
#
. /opt/serverscripts/utils/functions.sh
if [ $# -ne 2 ]
then
echo "pgpool_remote_start remote_host remote_datadir"
exit 1
fi
DEST=$1
DESTDIR=$2
PGCTL=/usr/pgsql-9.1/bin/pg_ctl
DEFLOG=/var/log/pgpool/recovery.log
ALLOWOUT=1
e "Starting postgres on $DEST..."
ssh $DEST "/etc/init.d/postgresql-9.1 start" >> $DEFLOG 2>&1
if [ $? -ne 0 ]; then
error "Error during service start on $DEST" 1
fi
e "Done"
exit 0
| true
|
407d486f1ef77906cc06304e8e433089e876b598
|
Shell
|
jsp1256/qBittorrent_cross_complie
|
/shell/qBittorrent_install.sh
|
UTF-8
| 4,338
| 3.375
| 3
|
[] |
no_license
|
#!/bin/sh
##qBittorrent部署安装脚本
##qBittorrent_install.sh
##version: 1.3
##### 配置文件 #####
USB_PATH=""
SOFT_PATH=""
##### 前置函数起始点 #####
##### 配置外接存储路径 #####
config_USB_PATH(){
if [ -z "$USB_PATH" ]; then
#获取USB外置存储挂载根目录,多次重复匹配,防止重复
USB_PATH=`df -h | grep -no "/mnt/[0-9_a-zA-Z]*" | grep -no "/mnt/[0-9_a-zA-Z]*" | grep -o "1:/mnt/[0-9_a-zA-Z]*" | grep -o "/mnt/[0-9_a-zA-Z]*"`
USB_SOFTPATH="$USB_PATH/opt"
if [ -z "$USB_PATH" ]; then
echo "未探测到已挂载的USB分区"
exit 0
fi
fi
SOFT_PATH=$USB_PATH/qbittorrent
}
##### 配置开机启动 #####
##参数: $1:USB挂载点
config_startup(){
cat>/etc/init.d/qBittorrent<<EOF
#!/bin/sh /etc/rc.common
START=99
boot(){
start
}
start() {
cd $1/qBittorrent/bin
export HOME=/root
export LD_LIBRARY_PATH=/mnt/mmcblk0p1/qbittorrent/lib
./qbittorrent-nox &
}
EOF
chmod a+x /etc/init.d/qBittorrent
chmod a+x $1/qBittorrent/bin/qbittorrent-nox
/etc/init.d/qBittorrent enable
}
##### 配置qBittorrent配置文件 #####
##参数: $1:USB挂载点 $2:程序安装目录
config_qbittorrent(){
mkdir -p $1/Downloads
mkdir -p $2/share/Settings/qBittorrent/
cat>$2/share/Settings/qBittorrent/qBittorrent.conf<<EOF
[AutoRun]
enabled=false
program=
[LegalNotice]
Accepted=true
[Network]
Cookies=@Invalid()
[Preferences]
Bittorrent\AddTrackers=false
Bittorrent\MaxRatioAction=0
Bittorrent\PeX=true
Connection\GlobalDLLimitAlt=10
Connection\GlobalUPLimitAlt=10
Downloads\PreAllocation=false
Downloads\SavePath=$1/Downloads/
Downloads\ScanDirsV2=@Variant(\0\0\0\x1c\0\0\0\0)
Downloads\TempPath=$1/Downloads/temp/
Downloads\TempPathEnabled=true
DynDNS\DomainName=changeme.dyndns.org
DynDNS\Enabled=false
DynDNS\Password=
DynDNS\Service=0
DynDNS\Username=
General\Locale=zh
General\UseRandomPort=false
MailNotification\email=
MailNotification\enabled=false
MailNotification\password=
MailNotification\req_auth=false
MailNotification\req_ssl=false
MailNotification\smtp_server=smtp.changeme.com
MailNotification\username=
WebUI\HTTPS\Enabled=false
WebUI\LocalHostAuth=true
WebUI\Port=8080
WebUI\ServerDomains=*
WebUI\UseUPnP=true
WebUI\Username=admin
EOF
rm -rf /root/Settings/qBittorrent
mkdir -p /root/Settings
ln -s $2/share/Settings/qBittorrent/ /root/Settings/qBittorrent
if [ $? -ne 0 ]; then
echo "无法创建符号链接,请确保以root身份执行程序!"
exit 0;
fi
}
##### 解压程序文件 #####
##参数: $1:程序安装目录
extract_data(){
if [ -f "qb_release.tar.gz" ]; then
echo 解压qb_release.tar.gz...
tar xzf qb_release.tar.gz
if [ $? -ne 0 ]; then
echo "解压程序文件失败,请确认是否有足够的空间"
exit 0;
fi
rm -rf $1
mv install/ $1
else
echo 找不到qb_release.tar.gz,请确认其是否放置相同目录下!
exit 0
fi
}
##### 配置环境变量 #####
##参数: $1:程序安装目录
config_env(){
echo "export PATH=$PATH:$1/bin" >> /etc/profile
echo "export LD_LIBRARY_PATH=$1/lib" >> /etc/profile
source /etc/profile
}
##### 配置环境变量 #####
start_qbittorrent(){
qbittorrent-nox > /dev/null 2>&1 &
if [ $? -ne 0 ]; then
echo "qBittorrent-nox启动失败,请手动启动"
exit 0;
else
echo "已经成功后台启动qBittorrent-nox"
LAN_IP=$(ubus call network.interface.lan status | grep \"address\" | grep -oE '[0-9]{0,3}\.[0-9]{0,3}\.[0-9]{0,3}\.[0-9]{0,3}')
if [ -z "$LAN_IP" ]; then
echo "网页访问请输入http://路由器IP地址:8080"
else
echo "网页访问请输入http://$LAN_IP:8080"
fi
echo "默认用户名:admin"
echo "默认密码:adminadmin"
fi
}
##### 前置函数终止点 #####
##### 主函数入口点 #####
#配置外置存储目录
config_USB_PATH
#解压程序文件
extract_data $SOFT_PATH
#配置qBittorrent
echo "正在配置qBittorrent"
config_qbittorrent $USB_PATH $SOFT_PATH
#添加开机自启
config_startup $USB_PATH
#配置环境变量
config_env $SOFT_PATH
#运行qBittorrent
echo "正在启动qBittorrent-nox"
start_qbittorrent
| true
|
65987a147290e7cf8cda103a86684c3b5767a84b
|
Shell
|
otusex/ku5svb
|
/packer/scripts/stage-1-kernel-update.sh
|
UTF-8
| 1,147
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
cd /usr/src/kernels/
# update packeges
yum update -y
# install bc
yum install bc -y
# install repo epel-release
yum install -y epel-release
# install Dev tools
yum groupinstall 'Development Tools' -y
# Required to install the kernel from source
yum install -y openssl-devel elfutils-libelf-devel
# Install gcc 8.x for install the kernel 5.x from source
yum install -y centos-release-scl devtoolset-8
yum install -y devtoolset-8
source /opt/rh/devtoolset-8/enable
# Download source kernel
curl -o kernel.tgz "https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.9.3.tar.xz"
tar -xf kernel.tgz
curl -o patch.xz https://cdn.kernel.org/pub/linux/kernel/v5.x/patch-5.9.3.xz
unxz patch.xz
cd linux-5*
patch -R -p1 < ../patch
# Use old kernel config
cp /boot/config* .config
yes "" | make oldconfig
# assembly kernel, modules, headers, firmware
make -j4 && make modules -j4 && make modules_install -j4 && make headers_install ARCH=x86_64 INSTALL_HDR_PATH=/usr -j4 && make install -j 4
# Update GRUB
grub2-mkconfig -o /boot/grub2/grub.cfg && grub2-set-default 0
echo "Grub update done."
# Reboot VM
shutdown -r now
| true
|
b3a443e1045c2892c98cfc3db661905639bfb9b0
|
Shell
|
optionalg/dotfiles-60
|
/.profile
|
UTF-8
| 910
| 2.859375
| 3
|
[] |
no_license
|
if [ -d "$HOME/.local/bin" ] ; then
export PATH="$HOME/.local/bin:$PATH"
fi
[ -d /usr/local/cuda/bin ] && export PATH=/usr/local/cuda/bin:$PATH
export PATH=/opt/local/bin:/opt/local/sbin:$PATH
[ -d /usr/local/opt/go/libexec/bin ] && export PATH=$PATH:/usr/local/opt/go/libexec/bin
# FSL Setup
FSLDIR=/opt/local/fsl
if [ -d "$FSLDIR" ]; then
PATH=${FSLDIR}/bin:${PATH}
export FSLDIR PATH
. ${FSLDIR}/etc/fslconf/fsl.sh
fi
# EDITOR
export HOMEBREW_EDITOR='subl -w'
export JULIA_EDITOR='subl -w'
#pyenv setup
# export PYENV_ROOT="$HOME/.pyenv"
# export PATH="$PYENV_ROOT/bin:$PATH"
# eval "$(pyenv init -)"
# eval "$(pyenv virtualenv-init -)"
#rbenv setup
export PATH="$HOME/.rbenv/bin:$PATH"
eval "$(rbenv init -)"
# homebrew github token
[ -f ~/.config/homebrew ] && source ~/.config/homebrew
# hub alias
eval "$(hub alias -s)"
# miniconda
export PATH="/Users/Randy/miniconda3/bin:$PATH"
| true
|
316909387a7415a6badb18a6fe6b4738369bb505
|
Shell
|
kamath/ANTsDoc
|
/Tex2/runexamplesandtests.sh
|
UTF-8
| 339
| 3.359375
| 3
|
[] |
no_license
|
for ee in examplesandtests/Ex*.sh ; do
nm=`basename $ee`
op=output` echo $nm | cut -d '.' -f 1`
if [[ 1 == 1 ]] ; then
bash $ee > /dev/null
testnm=examplesandtests/test${nm}
if [[ -s $testnm ]] ; then
bash examplesandtests/test${nm} $op
else
echo $testnm does not exist
fi
fi
echo example $nm status $?
done
| true
|
18756a80fa67005cf161e43888b86706ddf1750b
|
Shell
|
rochakagrawal89/WLST
|
/list_server_health.sh
|
UTF-8
| 1,856
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/sh
FILE="list_server_health_$(date +%Y%m%d_%H%M%S).csv"
FILE1="list_server_health_stuck_threads_$(date +%Y%m%d_%H%M%S).csv"
ED=/u01/oracle/script/scripts/splunk
MW_HOME=/u01/Oracle/Middleware
ORACLE_HOME="${MW_HOME}/Oracle_OSB1"
WL_HOME="${MW_HOME}/wlserver_10.3"
# Setup Common Environment
WLS_NOT_BRIEF_ENV=true
. "${WL_HOME}/server/bin/setWLSEnv.sh"
CLASSPATH="${CLASSPATH}${CLASSPATHSEP}${FMWLAUNCH_CLASSPATH}${CLASSPATHSEP}${DERBY_CLASSPATH}${CLASSPATHSEP}${DERBY_TOOLS}${CLASSPATHSEP}${POINTBASE_CLASSPATH}${CLASSPATHSEP}${POINTBASE_TOOLS}"
CLASSPATH=$CLASSPATH:$ORACLE_HOME/modules/com.bea.common.configfwk_1.7.0.0.jar:$ORACLE_HOME/lib/sb-kernel-api.jar:$ORACLE_HOME/lib/sb-kernel-impl.jar:$WL_HOME/server/lib/weblogic.jar:$ORACLE_HOME/lib/alsb.jar;
export CLASSPATH
if [ "${WLST_HOME}" != "" ] ; then
WLST_PROPERTIES="-Dweblogic.wlstHome='${WLST_HOME}'${WLST_PROPERTIES}"
export WLST_PROPERTIES
fi
JVM_ARGS="-Dprod.props.file='${WL_HOME}'/.product.properties ${WLST_PROPERTIES} ${JVM_D64} ${MEM_ARGS} ${CONFIG_JVM_ARGS}"
$ORACLE_HOME/common/bin/wlst.sh ${ED}/list_server_health.py > ${ED}/list_server_health.tmp
sed -i '0,/^Test$/d' ${ED}/list_server_health.tmp
sed '/^$/d' ${ED}/list_server_health.tmp > ${ED}/list_server_health1.tmp
echo DomainName,ServerName,OpenSocketsCurrentCount,StandbyThreadCount,Throughput,HoggingThreadCount,ExecuteThreadTotalCount,ExecuteThreadIdleCount > ${ED}/list_server_health/$FILE
sed '$d' ${ED}/list_server_health1.tmp >> ${ED}/list_server_health/$FILE
chmod 755 ${ED}/list_server_health/$FILE
rm -rf ${ED}/list_server_health1.tmp ${ED}/list_server_health.tmp
#$ORACLE_HOME/common/bin/wlst.sh ${ED}/list_server_health_stuck_threads.py > ${ED}/list_server_health_stuck_threads.csv
#sed -i '0,/^Test$/d' ${ED}/list_server_health_stuck_threads.csv
find ${ED}/list_server_health/* -mtime +1 -delete
| true
|
b5964c259ee32a54d4664efd95cb245c498a546f
|
Shell
|
usmanC9/fuel-plugin-lma-collector
|
/contrib/tools/common.sh
|
UTF-8
| 1,088
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function check_fuel_nodes_file {
if [ ! -f "$1" ]; then
echo "You must first run the following command on the Fuel master node:"
echo " fuel nodes > $1"
exit 1
fi
}
# Get IPs list of online nodes from 'fuel command' output.
function get_ready_nodes {
# "fuel nodes" command output differs form Fuel 8 and 9 for online nodes: True/False and 0/1
fuel nodes | grep ready | awk -F '|' -vOFS=':' '{print $5,$9 }'|tr -d ' '|grep -E ':1|:True'|awk -F ':' '{print $1}'
}
| true
|
bb6bafb79867a6c184f99dbbad77a168009e5df1
|
Shell
|
k3nno88/forcrawler
|
/run_pararius_sell.sh
|
UTF-8
| 1,374
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/zsh
#run the pararius scraper and update database accordingly
#eval "$(conda shell.zsh hook)"
#conda activate scraping_env
echo $(date '+[%d:%m:%Y] %H:%M'): Run pararius sell >> ~/cron.log
cd ~/pararius_sell
nordvpn c
size=$(mysql --login-path=server -s -N scraping_data -e "select max(Id) from pararius_sell;")
echo "UPDATE ACTIVE STATUS"
mysql --login-path=server -s -N scraping_data -e "update pararius_sell set active = 0, SoldSince = CURDATE() where active = 1;"
mysql --login-path=server -s -N scraping_data -e "update pararius_sell as p inner join (select min(Id) Id from pararius_sell where Active = 1 group by Url having count(*) = 1)pp on pp.Id = p.Id set p.Active = 0, SoldSince = CURDATE();update pararius_sell set Active = 1, SoldSince = null where Id > $size;"
echo "SCRAPE"
scrapy crawl house
#echo "DELETE AND READ ID"
#something wrong with "
#mysql --login-path=local -s -N scraping_data -e "ALTER TABLE pararius_owner DROP COLUMN Id; ALTER TABLE pararius_owner ADD COLUMN Id INT NOT NULL AUTO_INCREMENT FIRST , ADD PRIMARY KEY (Id), ADD UNIQUE INDEX Id_UNIQUE (Id ASC) VISIBLE; ALTER TABLE pararius_sell DROP COLUMN Id; ALTER TABLE pararius_sell ADD COLUMN Id INT NOT NULL AUTO_INCREMENT, ADD PRIMARY KEY (Id), ADD UNIQUE INDEX Id_UNIQUE (Id ASC) VISIBLE;"
#nordvpn d
echo $(date '+[%d:%m:%Y] %H:%M'): Finish running pararius sell >> ~/cron.log
| true
|
af074da1d15fc3446418f556e45c16d2221b42d3
|
Shell
|
basarballioz/Bash-Script-Examples
|
/password_generator.sh
|
UTF-8
| 236
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
#Bballioz
#USING OPENSSL
function password(){
read -p "Please enter the length of the password and how many samples you want: " Len Count
for p in $(seq 1 $Count);
do
openssl rand -hex 48 | cut -c1-$Len
done
}
password
| true
|
02583119c9e8faf6ec6539ade881238d08d40b56
|
Shell
|
crotwell/recFunc
|
/paper/moma/xyplot.sh
|
UTF-8
| 1,416
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
gmtset PLOT_DEGREE_FORMAT -D ANNOT_FONT_PRIMARY Times-Roman
gmtset LABEL_FONT Times-Roman LABEL_FONT_SIZE 12
NAME=moma
REGION=-92/-70/20/70
FILENAME=${NAME}H.ps
/bin/rm -f ${NAME}H.ps
touch ${NAME}H.ps
psxy -K -JX6i/-4i -R${REGION} -Ss.1i -G0 >> $FILENAME <<END
-78 58
END
pstext -O -K -JX -R >> $FILENAME <<END
-77.3 58 9 0 1 ML Li et. al. Vp=6.5 Vp/Vs=1.73
END
psxy -O -K -JX -R -Sa.1i -G0 >> $FILENAME <<END
-78 60
END
pstext -O -K -JX -R >> $FILENAME <<END
-77.3 60 9 0 1 ML Li et. al. Vp=6.6 Vp/Vs=1.80
END
psxy -O -K -JX -R -Sd.1i -G0 >> $FILENAME <<END
-78 62
END
pstext -O -K -JX -R >> $FILENAME <<END
-77.3 62 9 0 1 ML Li et. al. Vp=6.6 Vp/Vs=1.84
END
psxy -O -K -JX -R -Sc.1i -G155 >> $FILENAME <<END
-78 64
END
pstext -O -K -JX -R >> $FILENAME <<END
-77.3 64 9 0 1 ML EARS
END
perl -nae 'print "$F[4] $F[5] $F[6] $F[6]\n"' ${NAME}.txt | psxy -O -K -H2 -JX -R -B2:"Longitude":/5:"Thickness (km)":WSen -Sc.1i -G155 -Ey >> ${FILENAME}
cat ${NAME}.txt | grep 1.73 | perl -nae 'print "$F[4] $F[13]\n"' | psxy -O -K -JX -R -Ss.1i -G0 >> ${FILENAME}
cat ${NAME}.txt | grep 1.80 | perl -nae 'print "$F[4] $F[13]\n"' | psxy -O -K -JX -R -Sa.1i -G0 >> ${FILENAME}
cat ${NAME}.txt | grep 1.84 | perl -nae 'print "$F[4] $F[13]\n"' | psxy -O -K -JX -R -Sd.1i -G0 >> ${FILENAME}
perl -nae '$x=$F[4]+.2;print "$x 30 9 90 1 MC $F[1]\n"' ${NAME}.txt | pstext -O -H2 -JX -R -Y1.1i >> ${FILENAME}
| true
|
3227aef2de284c2668efdb7f4000f1d53150b483
|
Shell
|
alienrobotarmy/bashthreads
|
/threads.sh
|
UTF-8
| 2,307
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Bash multi-threading library
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# !! DO NOT EDIT THIS FILE !!
# !! This is a library to be included into your source !
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
# Copyright (c) 2014 Jess Mahan
#
# This library has 3 main functions:
# thread_init <max threads>
# thread_create <command> <args>
# thread_wait (final)
#
# thread_init <max_threads>
# This function MUST be called ONLY ONCE, and BEFORE any other thread calls
#
# thread_create <command> <args>
# Spawn <max threads> of <command> <args>
# You should call thread_wait immediately after this.
#
# thread_wait
# Block until we are no longer at <max threads>
# When called with an argument, blocks parent until all threads are complete
#
# Usage example:
# (This will create 64 threads of function nslkup)
#
# function nslkup {
# dig +short $1
# }
# thread_init 64
# for i in $(cat hosts.txt)
# do
# thread_create nslkup ${i}
# if [ $? -eq 1 ]; then
# thread_wait
# thread_create nslkup ${i}
# fi
# done
# thread_wait final
#
function thread_init {
local i=0
J_THREADS_MAX_THREADS=$1
J_THREADS_ID=$(date +%s)$((RANDOM%1000))
J_THREADS_THREAD_ID=0
export J_THREADS_MAP
while [ ${i} -lt ${J_THREADS_MAX_THREADS} ]
do
J_THREADS_MAP[${i}]=0
let $((i++))
done
}
function thread_cleanup {
rm -f /tmp/j-threads-${J_THREADS_ID}*.lck
}
function thread_count {
local i=0
local x=0
for i in $(ls -1 /tmp/j-threads-${J_THREADS_ID}-*.lck 2>/dev/null)
do
let $((x++))
done
return ${x}
}
function thread_wait {
local count=0
local waitfor=${J_THREADS_MAX_THREADS}
if [ $1 ]; then waitfor=1; fi
thread_count; count=$?
while [ ${count} -ge ${waitfor} ]
do
thread_count; count=$?
sleep 0.05
done
}
function thread_run {
touch /tmp/j-threads-${J_THREADS_ID}-${J_THREADS_THREAD_ID}.lck
$*
rm -f /tmp/j-threads-${J_THREADS_ID}-${J_THREADS_THREAD_ID}.lck
}
function thread_create {
local buf=""
local ret=0
local count=0
thread_count; count=$?
if [ ${count} -lt ${J_THREADS_MAX_THREADS} ]; then
let $((J_THREADS_THREAD_ID++))
thread_run $* &
return 0
else
return 1
fi
}
| true
|
5950922947ed22447f1d8c9c08839cb0ae06f837
|
Shell
|
phpc0de/baksetup
|
/mini/include/check_sw.sh
|
UTF-8
| 2,930
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
# Author: Alpha Eva <kaneawk AT gmail.com>
installDepsCentOS() {
[ -e '/etc/yum.conf' ] && sed -i 's@^exclude@#exclude@' /etc/yum.conf
# Uninstall the conflicting packages
echo "${CMSG}Removing the conflicting packages...${CEND}"
[ -z "`grep -w epel /etc/yum.repos.d/*.repo`" ] && yum -y install epel-release
if [ "${CentOS_ver}" == '7' ]; then
yum -y groupremove "Basic Web Server" "MySQL Database server" "MySQL Database client"
systemctl stop firewalld && systemctl mask firewalld.service
elif [ "${CentOS_ver}" == '6' ]; then
yum -y groupremove "FTP Server" "PostgreSQL Database client" "PostgreSQL Database server" "MySQL Database server" "MySQL Database client" "Web Server"
fi
if [ ${CentOS_ver} -ge 7 >/dev/null 2>&1 ] && [ "${iptables_flag}" == 'y' ]; then
yum -y install iptables-services
systemctl enable iptables.service
systemctl enable ip6tables.service
fi
echo "${CMSG}Installing dependencies packages...${CEND}"
# Install needed packages
pkgList="deltarpm gcc gcc-c++ make cmake autoconf libjpeg libjpeg-devel libjpeg-turbo libjpeg-turbo-devel libpng libpng-devel libxml2 libxml2-devel zlib zlib-devel libzip libzip-devel glibc glibc-devel krb5-devel libc-client libc-client-devel glib2 glib2-devel bzip2 bzip2-devel ncurses ncurses-devel libaio numactl numactl-libs readline-devel curl curl-devel e2fsprogs e2fsprogs-devel krb5-devel libidn libidn-devel openssl openssl-devel net-tools libxslt-devel libicu-devel libevent-devel libtool libtool-ltdl bison gd-devel vim-enhanced pcre-devel libmcrypt libmcrypt-devel mhash mhash-devel mcrypt zip unzip ntpdate sqlite-devel sysstat patch bc expect expat-devel oniguruma oniguruma-devel libtirpc-devel nss rsync rsyslog git lsof lrzsz psmisc wget which libatomic tmux"
for Package in ${pkgList}; do
yum -y install ${Package}
done
[ ${CentOS_ver} -lt 8 >/dev/null 2>&1 ] && yum -y install cmake3
yum -y update bash openssl glibc
}
installDepsBySrc() {
pushd ${oneinstack_dir}/src > /dev/null
if [ "${OS}" == 'CentOS' ]; then
# install htop
if ! command -v htop >/dev/null 2>&1; then
tar xzf htop-${htop_ver}.tar.gz
pushd htop-${htop_ver} > /dev/null
./configure
make -j ${THREAD} && make install
popd > /dev/null
rm -rf htop-${htop_ver}
fi
else
echo "No need to install software from source packages."
fi
if ! command -v icu-config > /dev/null 2>&1 || icu-config --version | grep '^3.' || [ "${Ubuntu_ver}" == "20" ]; then
tar xzf icu4c-${icu4c_ver}_${icu4c_ver2}-src.tgz
pushd icu/source > /dev/null
./configure --prefix=/usr/local
make -j ${THREAD} && make install
popd > /dev/null
rm -rf icu
fi
if command -v lsof >/dev/null 2>&1; then
echo 'already initialize' > ~/.oneinstack
else
echo "${CFAILURE}${PM} config error parsing file failed${CEND}"
kill -9 $$
fi
popd > /dev/null
}
| true
|
e4b9ccd2c5e9a9b2f210d3ca6fea04fec06bbd4a
|
Shell
|
salhen/.dotfiles
|
/.config/polybar/scripts/power.sh
|
UTF-8
| 1,710
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
echo ''<<LICENSE
The MIT License(MIT)
Copyright(c), Tobey Peters, https://github.com/tobeypeters
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
LICENSE
# Power Options - logout, reboot, shutdown
declare -a options=("lock
logout
reboot
shutdown")
choice=$(echo -e "${options[@]}" | dmenu -l -i -p 'System:' -nb black -sb tomato -fn 'System San Francisco Display:size=10')
#'Shutdown' 'systemctl poweroff' -b 'Reboot' 'systemctl reboot' -b 'Logout' 'i3-msg exit'"
case $choice in
lock)
exec i3lock -c '#000000'
;;
logout)
exec i3-msg exit
;;
reboot)
exec i3-sensible-terminal -e systemctl reboot
;;
shutdown)
exec i3-sensible-terminal -e systemctl poweroff
;;
*)
exit 1
;;
esac
| true
|
7ec58c8f3ca7e8e3a99db3a65f8b41f039722cc0
|
Shell
|
vimalmanohar/kaldi-tfmask
|
/egs/babel/s5b/run-6-semisupervised-seg.sh
|
UTF-8
| 7,272
| 3.3125
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
# Copyright 2014 Vimal Manohar
# Apache 2.0
# Run DNN training on untranscribed data
# This uses approx 70 hours of untranscribed data
set -e #Exit on non-zero return code from any command
set -o pipefail #Exit if any of the commands in the pipeline will
#return non-zero return code
. conf/common_vars.sh || exit 1;
. ./lang.conf || exit 1;
. cmd.sh
. path.sh
# Can provide different neural net structure than for supervised data
. conf/common.semisupervised.limitedLP || exit 1
#debugging stuff
echo $0 $@
train_stage=-100
decode_dir=
ali_dir=
nj=
. parse_options.sh || exit 1
if [ $# -ne 1 ]; then
echo "Usage: $0 [options] <untranscribed-data-dir>"
echo
echo "--nj <num_jobs> # Number of parallel jobs for decoding untranscribed data"
echo "--decode-dir <decode_directory> # Decode directory with posteriors and best path done"
echo "--ali-dir <alignment_directory> # Alignment directory"
echo "--weight-threshold <0.7> # Frame confidence threshold for frame selection"
echo "--do-supervised-tuning (default: true) # Train only the last layer at the end."
echo "e.g.: "
echo "$0 --decode-dir exp/dnn_sgmm_combine/decode_train_unt.seg --ali-dir exp/tri6_nnet_ali data/train_unt.seg"
exit 1
fi
untranscribed_datadir=$1
[ -z $nj ] && nj=$unsup_nj
###############################################################################
#
# Supervised data alignment
#
###############################################################################
if [ -z $alidir ]; then
# If alignment directory is not done, use exp/tri6_nnet_ali as alignment
# directory
ali_dir=exp/tri6_nnet_ali
fi
if [ ! -f $ali_dir/.done ]; then
echo "$0: Aligning supervised training data in exp/tri6_nnet_ali"
[ ! -f exp/tri6_nnet/final.mdl ] && echo "exp/tri6_nnet/final.mdl not found!\nRun run-6-nnet.sh first!" && exit 1
if [ ! -f exp/tri6_nnet_ali/.done ]; then
steps/nnet2/align.sh --cmd "$decode_cmd" \
--use-gpu no --transform-dir exp/tri5_ali --nj $train_nj \
data/train data/lang exp/tri6_nnet exp/tri6_nnet_ali || exit 1
touch exp/tri6_nnet_ali/.done
fi
else
echo "$0: Using supervised data alignments from $ali_dir"
fi
###############################################################################
#
# Unsupervised data decoding
#
###############################################################################
datadir=$untranscribed_datadir
dirid=`basename $datadir`
decode=exp/tri5/decode_${dirid}
if [ ! -f ${decode}/.done ]; then
echo ---------------------------------------------------------------------
echo "Spawning decoding with SAT models on" `date`
echo ---------------------------------------------------------------------
utils/mkgraph.sh \
data/lang exp/tri5 exp/tri5/graph |tee exp/tri5/mkgraph.log
mkdir -p $decode
#By default, we do not care about the lattices for this step -- we just want the transforms
#Therefore, we will reduce the beam sizes, to reduce the decoding times
steps/decode_fmllr_extra.sh --skip-scoring true --beam 10 --lattice-beam 4\
--nj $nj --cmd "$decode_cmd" "${decode_extra_opts[@]}"\
exp/tri5/graph ${datadir} ${decode} |tee ${decode}/decode.log
touch ${decode}/.done
fi
if [ -z $decode_dir ]; then
decode=exp/tri6_nnet/decode_${dirid}
[ ! -f exp/tri6_nnet/final.mdl ] && echo "exp/tri6_nnet/final.mdl not found!\nRun run-6-nnet.sh first!" && exit 1
if [ ! -f $decode/.done ]; then
echo "$0: Decoding unsupervised data from $untranscribed_datadir using exp/tri6_nnet models"
mkdir -p $decode
steps/nnet2/decode.sh --cmd "$decode_cmd" --nj $nj \
--beam $dnn_beam --lat-beam $dnn_lat_beam \
--skip-scoring true "${decode_extra_opts[@]}" \
--transform-dir exp/tri5/decode_${dirid} \
exp/tri5/graph ${datadir} $decode | tee $decode/decode.log
touch $decode/.done
fi
echo "$0: Getting frame posteriors for unsupervised data"
# Get per-frame weights (posteriors) by best path
if [ ! -f $decode/.best_path.done ]; then
$decode_cmd JOB=1:$nj $decode/log/best_path.JOB.log \
lattice-best-path --acoustic-scale=0.1 \
"ark,s,cs:gunzip -c $decode/lat.JOB.gz |" \
ark:/dev/null "ark:| gzip -c > $decode/best_path_ali.JOB.gz" || exit 1
touch $decode/.best_path.done
fi
model=`dirname $decode`/final.mdl
$decode_cmd JOB=1:$nj $decode/log/get_post.JOB.log \
lattice-to-post --acoustic-scale=0.1 \
"ark,s,cs:gunzip -c $decode/lat.JOB.gz|" ark:- \| \
post-to-pdf-post $model ark,s,cs:- ark:- \| \
get-post-on-ali ark,s,cs:- "ark,s,cs:gunzip -c $decode/best_path_ali.JOB.gz | ali-to-pdf $model ark,s,cs:- ark:- |" "ark:| gzip -c > $decode/weights.JOB.gz" || exit 1
else
echo "$0: Using unsupervised data from $decode_dir"
decode=$decode_dir
for f in $decode/weights.1.gz $decode/best_path_ali.1.gz; do
[ ! -f $f ] && echo "$0: Expecting $f to exist. Probably need to run local/combine_posteriors.sh first." && exit 1
done
fi
###############################################################################
#
# Semi-supervised DNN training
#
###############################################################################
mkdir -p exp/tri6_nnet_semi_supervised
if [ ! -f exp/tri6_nnet_semi_supervised/.egs.done ]; then
local/nnet2/get_egs_semi_supervised.sh $spk_vecs_opt \
"${egs_gpu_opts[@]}" --io-opts "$egs_io_opts" \
--transform-dir-sup exp/tri5_ali \
--transform-dir-unsup exp/tri5/decode_${dirid} \
--weight-threshold $weight_threshold \
data/train $untranscribed_datadir data/lang \
$ali_dir $decode exp/tri6_nnet_semi_supervised || exit 1;
touch exp/tri6_nnet_semi_supervised/.egs.done
fi
if [ ! -f exp/tri6_nnet_semi_supervised/.done ]; then
steps/nnet2/train_pnorm.sh \
--stage $train_stage --mix-up $dnn_mixup \
--initial-learning-rate $dnn_init_learning_rate \
--final-learning-rate $dnn_final_learning_rate \
--num-hidden-layers $dnn_num_hidden_layers \
--pnorm-input-dim $dnn_input_dim \
--pnorm-output-dim $dnn_output_dim \
--num-epochs $num_epochs \
--num-epochs-extra $num_epochs_extra \
--num-iters-final $num_iters_final \
--cmd "$train_cmd" "${dnn_gpu_parallel_opts[@]}" \
--transform-dir exp/tri5_ali \
--egs-dir exp/tri6_nnet_semi_supervised/egs \
data/train data/lang $ali_dir exp/tri6_nnet_semi_supervised || exit 1
touch exp/tri6_nnet_semi_supervised/.done
fi
if $do_supervised_tuning; then
# Necessary only when semi-supervised DNN is trained using the unsupervised
# data that was decoded using only the tri6_nnet system.
if [ ! -f exp/tri6_nnet_supervised_tuning/.done ]; then
learning_rates="0"
for i in `seq 1 $[dnn_num_hidden_layers-1]`; do
learning_rates="$learning_rates:0"
done
learning_rates="$learning_rates:0.0008"
steps/nnet2/update_nnet.sh \
--learning-rates $learning_rates \
--cmd "$train_cmd" \
"${dnn_update_gpu_parallel_opts[@]}" \
--num-epochs 2 --num-iters-final 5 \
--transform-dir exp/tri5_ali \
data/train data/lang $ali_dir \
exp/tri6_nnet_semi_supervised exp/tri6_nnet_supervised_tuning || exit 1
touch exp/tri6_nnet_supervised_tuning/.done
fi
fi
| true
|
4241d6efdcb794c6f1a3d05f62456ac78b1cc0b8
|
Shell
|
Bioconductor/BBS
|
/propagation/3.17/checkPublishedPkgs-data-annotation.sh
|
UTF-8
| 963
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/sh
set -e # Exit immediately if a simple command exits with a non-zero status
export LC_COLLATE="C" # to sort the result of local pathname expansion as on cobra
LOCAL_REPO_ROOT="$HOME/PACKAGES/3.17/data/annotation"
REMOTE_REPO_ROOT="/extra/www/bioc/packages/3.17/data/annotation"
PKG_FILEPATHS="src/contrib/*.tar.gz bin/windows/contrib/4.3/*.zip bin/macosx/big-sur-x86_64/contrib/4.3/*.tgz"
CONTRIB_DIR=" \/.*\/"
LOCAL_MD5SUMS="$LOCAL_REPO_ROOT/md5sums.local.txt"
REMOTE_MD5SUMS="$LOCAL_REPO_ROOT/md5sums.cobra.txt"
MD5SUMS_DIFF="$LOCAL_REPO_ROOT/md5sums.diff"
rm -f $LOCAL_MD5SUMS $REMOTE_MD5SUMS $MD5SUMS_DIFF
for filepath in $PKG_FILEPATHS; do
md5sum $LOCAL_REPO_ROOT/$filepath | sed -r "s/$CONTRIB_DIR/ /" >>$LOCAL_MD5SUMS
ssh webadmin@cobra md5sum $REMOTE_REPO_ROOT/$filepath | sed -r "s/$CONTRIB_DIR/ /" >>$REMOTE_MD5SUMS
done
diff $LOCAL_MD5SUMS $REMOTE_MD5SUMS >$MD5SUMS_DIFF
mail "hpages.on.github@gmail.com" -s "Result of $0 on $HOSTNAME" <$MD5SUMS_DIFF
| true
|
b4fac6a27e31b4587283d3e3aa6d74f9c98f0b7d
|
Shell
|
mikalv/elk-kubernetes
|
/docker/elasticsearch/pre-stop-hook.sh
|
UTF-8
| 1,008
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
#set -x
CLIENT_ENDPOINT="elasticsearch-logging:9200"
echo "Prepare stopping of ${HOSTNAME} pod"
echo "Prepare to migrate data of the node"
NODE_STATS=$(curl -s -XGET "http://${CLIENT_ENDPOINT}/_nodes/${HOSTNAME}/stats")
NODE_IP=$(echo "${NODE_STATS}" | jq -r ".nodes[] | select(.name==\"${HOSTNAME}\") | .host")
echo "Move all data from node ${NODE_IP}"
curl -s -XPUT "http://${CLIENT_ENDPOINT}/_cluster/settings" -d "{
\"transient\" :{
\"cluster.routing.allocation.exclude._name\" : \"${HOSTNAME}\"
}
}"
echo
echo "Wait for node to become empty"
DOC_COUNT=$(echo "${NODE_STATS}" | jq ".nodes[] | select(.name==\"${HOSTNAME}\") | .indices.docs.count")
while [ "${DOC_COUNT}" -gt 0 ]; do
NODE_STATS=$(curl -s -XGET "http://${CLIENT_ENDPOINT}/_nodes/${HOSTNAME}/stats")
DOC_COUNT=$(echo "${NODE_STATS}" | jq -r ".nodes[] | select(.name==\"${HOSTNAME}\") | .indices.docs.count")
echo "Node contains ${DOC_COUNT} documents"
sleep 1
done
echo "Node clear to shutdown"
| true
|
3e49298ae4e9620d1dfc264d711f44dd2c672d58
|
Shell
|
darb2723/uim-shell
|
/scripts/bulk_manage/hub_command.sh
|
UTF-8
| 1,814
| 3.40625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
BINDIR="${0%/*}"
if [ -f "$BINDIR/common.rc" ]
then
. "$BINDIR/common.rc"
else
cat << EOF
Missing required common.rc file, re-install to fix.
File location: $PWD/common.rc
EOF
exit
fi
if [ -f "$BINDIR/nimbus.rc" ]
then
. "$BINDIR/nimbus.rc"
else
cd "$BINDIR"
cat << EOF
Missing required nimbus.rc file. Expected to contain the following:
NIM_USERNAME="administrator"
NIM_PASSWD="******"
File location: $PWD/nimbus.rc
EOF
exit
fi
ROBOT_CFG="$NM_ROOT/robot/robot.cfg"
HUB="$1"
if [ -z "$HUB" ]
then
HUBROBOT=$(grep hubrobotname "$ROBOT_CFG" | sed -e "s/.* = //g")
HUB=$(grep "hub = " "$ROBOT_CFG" | sed -e "s/.* = //g")
shift
else
HUB_INFO=$(echo "$HUBS" | grep "^$HUB|")
if [ -z "$HUB_INFO" ]
then
HUBROBOT=$(grep hubrobotname "$ROBOT_CFG" | sed -e "s/.* = //g")
HUB=$(grep "hub = " "$ROBOT_CFG" | sed -e "s/.* = //g")
else
shift
fi
fi
# get the hub for the current robot
HUB_INFO=$(echo "$HUBS" | grep "^$HUB|")
HUB_NAME="${HUB_INFO%%|*}"; HUB_INFO="${HUB_INFO#*\|}"
HUB_ADDRESS="${HUB_INFO%%|*}"; HUB_INFO="${HUB_INFO#*|}"
HUB_SECURITY="${HUB_INFO%%|*}"; HUB_INFO="${HUB_INFO#*|}"
HUB_STATUS="${HUB_INFO%%|*}"; HUB_INFO="${HUB_INFO#*|}"
HUB_LICENSE="${HUB_INFO%%|*}"; HUB_INFO="${HUB_INFO#*|}"
HUB_VERSION="${HUB_INFO%%|*}"; HUB_INFO="${HUB_INFO#*|}"
HUB_IP="${HUB_INFO%%|*}"; HUB_INFO="${HUB_INFO#*|}"
HUB_MODE="${HUB_INFO%%|*}"; HUB_INFO="${HUB_INFO#*|}"
HUB_PORT="${HUB_INFO%%|*}"; HUB_INFO="${HUB_INFO#*|}"
HUB_DOMAIN="${HUB_INFO%%|*}"; HUB_INFO="${HUB_INFO#*|}"
HUB_ROBOT="${HUB_INFO%%|*}"; HUB_INFO="${HUB_INFO#*|}"
#echo pu -u "$U" -p "$P" "$HUB_ADDRESS" "$@" >&2
pu -u "$NIM_USERNAME" -p "$NIM_PASSWD" "$HUB_ADDRESS" "$@"
exit
## ## (c) MorseCode Incorporated 2015
| true
|
be0c0bb384fc9b365f78812fae9cd30db38693ae
|
Shell
|
safisher/ngs
|
/ngs_PIPELINE.sh
|
UTF-8
| 11,907
| 3.421875
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
# Copyright (c) 2012-2014, Stephen Fisher, Hoa Giang, and Junhyong Kim, University of
# Pennsylvania. All Rights Reserved.
#
# You may not use this file except in compliance with the Kim Lab License
# located at
#
# http://kim.bio.upenn.edu/software/LICENSE
#
# Unless required by applicable law or agreed to in writing, this
# software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License
# for the specific language governing permissions and limitations
# under the License.
##########################################################################################
# INPUT: see individual commands
# OUTPUT: see individual commands
# REQUIRES: see individual commands
##########################################################################################
##########################################################################################
# USAGE
##########################################################################################
NGS_USAGE+="Usage: `basename $0` pipeline OPTIONS sampleID -- run full pipeline\n"
##########################################################################################
# HELP TEXT
##########################################################################################
ngsHelp_PIPELINE() {
echo -e "Usage:\n\t`basename $0` pipeline [-i inputDir] [-o outputDir] [-t RNASeq | RNASeq_Stranded | RNASeq_Human | WGS] [-l readLength] -p numProc -s species [-se] sampleID"
echo -e "Input:\n\tsee individual commands"
echo -e "Output:\n\tsee individual commands"
echo -e "Requires:\n\tsee individual commands"
echo -e "OPTIONS:"
echo -e "\t-i - parent directory containing subdirectory with compressed fastq files (default: ./raw). This is the parent directory of the sample-specific directory. The sampleID will be used to complete the directory path (ie inputDir/sampleID)."
echo -e "\t-o - directory containing subdirectory with analysis files (default: ./analyzed). This is the parent directory of the sample-specific directory. The sampleID will be used to complete the directory path (ie outputDir/sampleID)."
echo -e "\t-t type - RNASeq, RNASeq_BC, or WGS (Whole Genome Sequencing) (default: RNASeq). The RNASeq_BC adds barcode selection and trimming to the standard RNASeq pipeline."
echo -e "\t-l readLength - read length (default = 100). If paired end then this is the length of one mate. Used to determine blast e-values and star library length."
echo -e "\t-p numProc - number of cpu to use."
echo -e "\t-s species - species from repository: $REPO_LOCATION."
echo -e "\t-se - single-end reads (default: paired-end)\n"
echo -e "\t-c contaminant - name of contaminant file from $REPO_LOCATION/trim to be used for trimming. Default is contaminants.fa"
echo -e "\t-f features - list of feature types for quantification. Features will be assigned hierarchically, in the order listed. Availible features are listed in the header of GTF files at $REPO_LOCATION/verse. Default is exon."
echo -e "\t-id id_attr - ID attribute from a GTF file you will use for quantification. Final gene counts will be output using this field. Should be either gene_name or gene_id. Default is gene_id."
echo -e "\t-stranded - data comes from stranded library preparation. Reads will only be counted if they align on the same strand as annotated features. Default is unstranded."
echo -e "\t-lines_sines - quantify LINE and SINE elements, separately from other features. "
echo -e "\t-chgrp group - change the unix group of a sample and it's data before syncing to the repo. Default is no change."
echo -e "\t-small - abreviated pipeline that keeps fewer large output files. The trim output files, the position sorted STAR output, and the STAR unmapped reads files are all excluded.\n"
echo -e "This will process sequencing data using either an RNASeq or WGS (Whole Genome Sequencing) pipeline. For RNASeq the modules used are: init, fastqc, blast, trim, star, verse, post, and rsync. For WGS the modules used are: init, fastqc, blast, trim, bowtie, SPAdes, post, and rsync. See individual modules for documentation."
}
##########################################################################################
# LOCAL VARIABLES WITH DEFAULT VALUES. Using the naming convention to
# make sure these variables don't collide with the other modules.
##########################################################################################
ngsLocal_PIPELINE_TYPE="RNASeq"
ngsLocal_CONTAM_NAME="contaminants.fa"
ngsLocal_PIPELINE_FEATURES="exon"
ngsLocal_PIPELINE_LINES_SINES=""
ngsLocal_PIPELINE_ID_ATTR="gene_id"
ngsLocal_PIPELINE_STRANDED=""
ngsLocal_PIPELINE_GROUP=""
small=false
noinit=false
#########################################################################################
# PROCESSING COMMAND LINE ARGUMENTS
# PIPELINE args: -i value, -o value, -t value, -p value, -s value, -se (optional), sampleID
##########################################################################################
ngsArgs_PIPELINE() {
if [ $# -lt 5 ]; then printHelp "PIPELINE"; fi
ngsLocal_PIPELINE_INITIAL_ARGS="$@" #needed to recurse for multi-barcode samples
# getopts doesn't allow for optional arguments so handle them manually
while true; do
case $1 in
-i) RAW=$2 #global
shift; shift;
;;
-o) ANALYZED=$2 #global
shift; shift;
;;
-l) READ_LENGTH=$2 #global
shift; shift;
;;
-p) NUMCPU=$2 #global
shift; shift;
;;
-s) SPECIES=$2 #global
shift; shift;
;;
-se) SE=true #global
shift;
;;
-t) ngsLocal_PIPELINE_TYPE=$2
shift; shift;
;;
-c) ngsLocal_CONTAM_NAME=$2
shift; shift;
;;
-f) ngsLocal_PIPELINE_FEATURES=$2
shift; shift;
;;
-b) ngsLocal_PIPELINE_BC=$2
shift; shift;
;;
-id) ngsLocal_PIPELINE_ID_ATTR=$2
shift; shift;
;;
-stranded) ngsLocal_PIPELINE_STRANDED="-stranded"
shift;
;;
-lines_sines) ngsLocal_PIPELINE_LINES_SINES="-lines_sines"
shift;
;;
-chgrp) ngsLocal_PIPELINE_GROUP="-g $2"
shift; shift;
;;
-small) small=true
shift;
;;
-noinit) noinit=true
shift;
;;
-*) printf "Illegal option: '%s'\n" "$1"
printHelp $COMMAND
exit 0
;;
*) break ;;
esac
done
SAMPLE=$1 #global
}
##########################################################################################
# RUNNING COMMAND ACTION
# PIPELINE does not have its own command function. Rather includes the
# command functions from the following commands: init, fastqc, blast,
# trim, star, post. blastdb, htseq, rsync. See the individual
# config files.
##########################################################################################
ngsCmd_PIPELINE() {
if $SE; then prnCmd "# BEGIN: SINGLE-END, $ngsLocal_PIPELINE_TYPE PIPELINE"
else prnCmd "# BEGIN: PAIRED-END, $ngsLocal_PIPELINE_TYPE PIPELINE"; fi
# PIPELINE runs the command functions from the following commands. THE
# PIPELINE COMMAND IS SENSITIVE TO THE ORDER OF THE COMMAND FUNCTIONS
# BELOW. For example, INIT needs to prepare the files prior to FASTQC
# running.
# In order to change a "default" value, the ngsArgs_XXX() function
# needs to be called prior to the ngsCmd_XXX(). It is important
# that $SAMPLE is included as the last argument, every time
# ngsArgs_XXX() is called.
########################################################
### The modules below are included in all pipelines. ###
# The value of $RAW is hardcoded in ngs.sh and is used to set
# inputDir in INIT. We allow users to change this value using
# the optional inputDir argument (-i inputDir). Since INIT
# defaults to the original (hardcoded) value of $RAW, we need
# to call ngsArgs_INIT() to update the value, prior to calling
# ngsCmd_INIT().
if ! $noinit; then
ngsArgs_INIT -i $RAW $SAMPLE
ngsCmd_INIT
fi
ngsCmd_FASTQC
########################################################
if [[ "$ngsLocal_PIPELINE_TYPE" = "RNASeq" ]]; then
ngsArgs_BLAST -l $READ_LENGTH -k TATAGTGAGT -p $NUMCPU -s $SPECIES $SAMPLE
ngsCmd_BLAST
ngsArgs_TRIM -t $NUMCPU -m 20 -q 53 -rAT 26 -rN -c $ngsLocal_CONTAM_NAME $SAMPLE
ngsCmd_TRIM
# Need different args to run FastQC on the trimmed data, so adjust
# args by calling ngsArgs_FASTQC() prior to running ngsCmd_FASTQC().
ngsArgs_FASTQC -i trim -o fastqc.trim $SAMPLE
ngsCmd_FASTQC
ngsCmd_STAR
ngsArgs_VERSE $ngsLocal_PIPELINE_STRANDED $ngsLocal_PIPELINE_LINES_SINES -l $ngsLocal_PIPELINE_FEATURES -id $ngsLocal_PIPELINE_ID_ATTR -p $NUMCPU -s $SPECIES $SAMPLE
ngsCmd_VERSE
ngsLocal_PIPELINE_check_readCnts
#ngsCmd_BLASTDB
elif [[ "$ngsLocal_PIPELINE_TYPE" = "RNASeq_BC" ]]; then
ngsArgs_BLAST -l $READ_LENGTH -k TATAGTGAGT -p $NUMCPU -s $SPECIES $SAMPLE
ngsCmd_BLAST
ngsArgs_BARCODE -b $ngsLocal_PIPELINE_BC $SAMPLE
ngsCmd_BARCODE
ngsArgs_TRIM -i barcode.trim -t $NUMCPU -rPoly -rN -m 10 -c $ngsLocal_CONTAM_NAME $SAMPLE
ngsCmd_TRIM
ngsArgs_FASTQC -i barcode.trim -o fastqc.barcode $SAMPLE
ngsCmd_FASTQC
ngsCmd_STAR
ngsArgs_VERSE $ngsLocal_PIPELINE_STRANDED $ngsLocal_PIPELINE_LINES_SINES -l $ngsLocal_PIPELINE_FEATURES -id $ngsLocal_PIPELINE_ID_ATTR -p $NUMCPU -s $SPECIES $SAMPLE
ngsCmd_VERSE
ngsLocal_PIPELINE_check_readCnts
ngsArgs_POST -i barcode.trim $SAMPLE
ngsCmd_POST
ngsArgs_POST -i trim $SAMPLE #Args has to be called to reset prior call
elif [[ "$ngsLocal_PIPELINE_TYPE" = "WGS" ]]; then
ngsArgs_BLAST -l $READ_LENGTH -k TATAGTGAGT -p $NUMCPU -s $SPECIES $SAMPLE
ngsCmd_BLAST
# disable poly-A/T trimming for WGS
if [[ $ngsLocal_CONTAM_NAME == "contaminants.fa" ]]; then
# If the contaminants file is still the default, change it to the WGS default.
ngsLocal_CONTAM_NAME="contaminantsWGS.fa"
fi
ngsArgs_TRIM -m 19 -rAT 0 -rN -c contaminantsWGS.fa $SAMPLE
ngsCmd_TRIM
ngsArgs_FASTQC -i trim -o fastqc.trim $SAMPLE
ngsCmd_FASTQC
ngsCmd_BOWTIE
ngsCmd_SNP
ngsCmd_SPADES
ngsArgs_POST -i bowtie $SAMPLE
ngsCmd_POST
ngsArgs_POST -i bowtie/SE_mapping $SAMPLE
ngsCmd_POST
#ngsArgs_POST -i trim $SAMPLE #Args has to be called to reset prior call
else
prnCmd "ERROR: Invalid PIPELINE type $ngsLocal_PIPELINE_TYPE. Valid options are 'RNASeq' and 'WGS'."
fi
########################################################
### The modules below are included in all pipelines. ###
# compress the trimmed files
if $small; then
prnCmd "chgrp -R $ngsLocal_PIPELINE_GROUP $SAMPLE"
chgrp -R repo-admin $SAMPLE
prnCmd "rsync -avh --stats --exclude "init" --exclude "unaligned_*.fq" --exclude "Unmapped.out.*" $SAMPLE $ANALYZED/"
rsync -avh --stats --exclude "init" --exclude "unaligned_*.fq" --exclude "Unmapped.out.*" $SAMPLE $ANALYZED/
else
ngsArgs_POST -i trim $ngsLocal_PIPELINE_GROUP $SAMPLE #Change group to repo-admin before syncing. Uses -i trim to reset any previous calls to args
ngsCmd_POST
# OutputDir defaults to $ANALYZED which is hardcoded in
# ngs.sh, just like inputDir and $RAW.
ngsArgs_RSYNC -o $ANALYZED $SAMPLE
ngsCmd_RSYNC
fi
########################################################
if $SE; then prnCmd "# FINISHED: SINGLE-END, $ngsLocal_PIPELINE_TYPE PIPELINE"
else prnCmd "# FINISHED: PAIRED-END, $ngsLocal_PIPELINE_TYPE PIPELINE"; fi
}
ngsLocal_PIPELINE_check_readCnts() {
local starPairs=$(grep "Uniquely mapped reads number" $SAMPLE/star/$SAMPLE.star.stats.txt | cut -f2)
local versePairs=$(grep "TotalRead" $SAMPLE/verse/$SAMPLE.verse.summary.txt | cut -f2)
if [[ $starPairs -ne $versePairs ]]; then
prnError "Star reports $starPairs unique mapped pairs, but Verse reports $versePairs input pairs"
fi
}
| true
|
15b44cc14f0faf05d4ba1ea13bc745677b71e8f3
|
Shell
|
luafran/exercises
|
/linux_api/07_signals/print_signals.sh
|
UTF-8
| 181
| 3
| 3
|
[] |
no_license
|
trap3 ()
{
echo "Signal SIGQUIT (3) trap"
}
trap15 ()
{
echo "Signal SIGTERM (15) trap"
}
echo "pid = $$"
trap trap3 3
trap trap15 15
while [ true ]; do
sleep 1
done
| true
|
9b32bcdd963d65f77e7f71deb86f23c0138b4230
|
Shell
|
5l1v3r1/sifter
|
/modules/webmods/widow.sh
|
UTF-8
| 2,416
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
ORNG='\033[0;33m'
NC='\033[0m'
W='\033[1;37m'
LP='\033[1;35m'
YLW='\033[1;33m'
LBBLUE='\e[104m'
if [[ -d /opt/sifter/results/Blackwidow ]]; then
echo ""
else
mkdir /opt/sifter/results/Blackwidow
fi
echo -e "${ORNG}Blackwidow${NC}"
echo -e "${ORNG}***********${NC}"
options=("Crawl the target domain & fuzz all parameters (Verbose enabled)" "Fuzz all GET parameters for common OWASP Vulns (Verbose enabled)" "Back")
select opts in "${options[@]}"
do
case $opts in
"Crawl the target domain & fuzz all parameters (Verbose enabled)")
echo -e "${YLW}"
if [[ -d files/pingtest_pass.txt ]]; then
echo -e "${YLW}"
cat files/pingtest_pass.txt
echo -e "${NC}"
echo -e "${W}Please copy and paste in your target site${NC}"
else
echo -e "${W}Please enter your target site with 'http/s://'${NC}"
fi
read TARGET1
echo -e "${W}How many levels would you like to crawl?${NC}"
read TARGET2
echo -e "${W}Would you like to fuzz all possible parameters for OWASP vulns? (y/n)${NC}"
read TARGET3
echo -e "${LP}Running Blackwidow with the following command, 'blackwidow -u $TARGET1 -l $TARGET2 -s $TARGET3 -v y'${NC}"
sleep 5
sudo blackwidow -u ${TARGET1} -l ${TARGET2} -s ${TARGET3} -v y | tee /opt/sifter/results/Blackwidow/${TARGET}.txt
./modules/webmods/widow.sh
;;
"Fuzz all GET parameters for common OWASP Vulns (Verbose enabled)")
echo -e "${YLW}"
cat files/pingtest_pass.txt
echo -e "${NC}"
echo -e "${W}Please enter your target domain and trailing directories${NC}"
echo -e "${LP}ex. http://target.com/wp-content/uploads/${NC}"
sleep 2
read TARGETDMN
echo -e "${W}Please enter the target file & GET or POST parameters${NC}"
echo -e "${LP}ex. 'users.php?user=1&admin=true'${NC}"
sleep 2
read TARGETEXT
echo -e "${W}Running injectx script with the following argument, ${LP}'injectx.py -u ${TARGETDMN}${TARGETEXT} -v y'${NC}"
sleep 5
sudo injectx.py -u ${TARGET} -v y | tee /opt/sifter/results/Blackwidow/${TARGET}_owaspVulns.txt
./modules/webmods/widow.sh
;;
"Back")
cd /opt/sifter
sifter -m
;;
esac
done
| true
|
3bbef87857b700a13dea021a80c99340286c27bc
|
Shell
|
paulineauffret/pmarg_albino_project
|
/01_from_raw_to_count_matrix_and_vcf/01_1_juvenile/01_fastqc.sh
|
UTF-8
| 926
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
DATA_DIRECTORY=~/juvenile/raw #path to raw data directory (raw fastq.gz files, each pair R1 - R2 placed into one subdirectory)
QC_SCRIPT=~/juvenile/scripts/01_fastqc.qsub #path to 01_2_fastqc_raw_reads.qsub script
SCRIPT=~/juvenile/scripts #path to scripts directory
HEADER=${SCRIPT}/input_files/header.txt #path to header.txt file
#Loop into each subdirectory of DATA_DIRECTORY containing pairs of raw fastq files and run QC script
cd $DATA_DIRECTORY
for dir in $(ls) ;
do
cd $dir ;
FILE_R1=$(ls *_R1.fastq.gz) ;
FILE_R2=$(ls *_R2.fastq.gz) ;
echo "cd ${DATA_DIRECTORY}/${dir}" >> ${SCRIPT}/tmp ;
echo "FILE_R1=${DATA_DIRECTORY}/${dir}/${FILE_R1}" >> ${SCRIPT}/tmp ;
echo "FILE_R2=${DATA_DIRECTORY}/${dir}/${FILE_R2}" >> ${SCRIPT}/tmp ;
cat $HEADER ${SCRIPT}/tmp $QC_SCRIPT > $SCRIPT/${QC_SCRIPT##*/}_${dir}.qsub
qsub ${SCRIPT}/${QC_SCRIPT##*/}_${dir}.qsub ;
rm ${SCRIPT}/tmp ;
cd .. ;
done
| true
|
0a8480e15926eaf532f1b878d536ec968a210a26
|
Shell
|
dglo/domhub-tools
|
/devel/domhub-testing/throughput-AB.sh
|
UTF-8
| 2,038
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
source throughput-common.sh
function atexit () {
rm -f /tmp/thrAB.$$.doms
}
trap atexit EXIT
#
# throughput-AB.sh, get results of throughput
# tests for A/B doms...
#
#
# FIXME: check interrupt, times stats -- interrupts, and times are
# (approx) per wire pair...
#
#
# for the paired doms
#
# we use 3*sigma for a cutoff (p value around 0.005)...
#
sigma=42
mean=86012
pdoms=`paired-doms`
for dom in ${pdoms}; do
cl=`printf '$3 ~ /^%s$/ { print $0; }' $dom`
awk "${cl}" $1 | awk -vmean=${mean} -vsigma=${sigma} \
'{ if (NF==12) print $4, mean, sigma, "3 * - - p"; }' $1 | \
dc | sed 's/\..*$//1' | sort -n | uniq > \
/tmp/thrAB.$$.pslowest
if (( `wc -l /tmp/thrAB.$$.pslowest | awk '{ print $1; }'` > 0 )); then
if (( `head -1 /tmp/thrAB.$$.pslowest` < 0 )); then
echo "`basename $0`: paired dom ${dom} is too slow"
exit 1
fi
fi
done
sigma=13
mean=89923
udoms=`unpaired-doms`
for dom in ${udoms}; do
cl=`printf '$3 ~ /^%s$/ { print $0; }' $dom`
awk "${cl}" $1 | awk -vmean=${mean} -vsigma=${sigma} \
'{ if (NF==12) print $4, mean, sigma, "3 * - - p"; }' | \
dc | sed 's/\..*$//1' | sort -n | uniq > \
/tmp/thrAB.$$.uslowest
if (( `wc -l /tmp/thrAB.$$.uslowest | awk '{ print $1; }'` > 0 )); then
if (( `head -1 /tmp/thrAB.$$.uslowest` < 0 )); then
echo "`basename $0`: unpaired dom ${dom}, is too slow"
cat /tmp/thrAB.$$.uslowest
exit 1
fi
fi
done
#
# check echo test error count
#
if (( `awk '{ print $5; }' $1 | sort -n -r | uniq | sed -n '1p'` > 0 )); then
echo "`basename $0`: too many echo test errors"
exit 1
fi
#
# check for crc errors...
#
if (( `awk '{ print $7; }' $1 | sort -n -r | uniq | sed -n '1p'` > 1 )); then
echo "`basename $0`: too many crc errors"
exit 1
fi
#
# check for retxs...
#
if (( `awk '{ print $6; }' $1 | sort -n -r | uniq | sed -n '1p'` > 0 )); then
echo "`basename $0`: too many retxs"
exit 1
fi
| true
|
46135922c7eca8c9b65e72b01f1dddfa1cec0c7b
|
Shell
|
viyoriya/lfs_9
|
/blfs/pkgscripts/llvm
|
UTF-8
| 1,094
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
. /etc/pkg/pkg.conf
. /var/lib/pkg/functions
name=$(basename $0)
version=$LLVM_VER
url=https://github.com/llvm/llvm-project/releases/download/llvmorg-$LLVM_PARENT_VER/llvm-$version.tar.xz
fetch $url
rm -fr $WORK_DIR/$name-$version
tar -xvf $SOURCE_DIR/$(basename $url) -C $WORK_DIR
cd $WORK_DIR/$name-$version
{ time \
{
mkdir -v build &&
cd build &&
CC=gcc CXX=g++ \
cmake -DCMAKE_INSTALL_PREFIX=/usr \
-DLLVM_ENABLE_FFI=ON \
-DCMAKE_BUILD_TYPE=Release \
-DLLVM_BUILD_LLVM_DYLIB=ON \
-DLLVM_LINK_LLVM_DYLIB=ON \
-DLLVM_ENABLE_RTTI=ON \
-DLLVM_TARGETS_TO_BUILD="host;AMDGPU;BPF" \
-DLLVM_BUILD_TESTS=ON \
-Wno-dev -G Ninja ..
ninja
ninja install
}
} 2>&1 | tee -a $LOG_DIR/$(basename $0).log
if [ $PIPESTATUS -ne 0 ]; then exit 1; fi;
rm -fr $WORK_DIR/$name-$version
registerpkg $(basename $0) $version
exit 0
| true
|
39e1ef30f4ee4f7b49dec9a3927baebca4c67082
|
Shell
|
sunliang711/ss2
|
/init/tools/detectOS.sh
|
UTF-8
| 1,454
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
# 检测出当前系统的版本,形如ubuntu-16.10,archlinux,fedora-23,centos-6.8,debian-8,macos
currentOS(){
case "$(uname)" in
"Linux")
#pacman -> archlinux
if command -v pacman >/dev/null 2>&1;then
echo "archlinux"
#apt-get -> debian or ubuntu
elif command -v apt-get >/dev/null 2>&1;then
#get version info from lsb_release -a
#lsb_release -a命令会有个错误输出No LSB modules are available.把这个丢弃使用 2>/dev/null
lsb=$(lsb_release -a 2>/dev/null)
distributor=$(echo "$lsb" | grep 'Distributor ID' | grep -oP ':.*' | grep -oP '\w+')
if [[ "$distributor" == "Ubuntu" ]];then
echo "$lsb" | grep "Description" | awk -F: '{print $2}' | awk '{print $1"-"$2}'
elif [[ "$distributor" == "Debian" ]];then
release=$(echo "$lsb" | grep 'Release' | grep -oP ':.*' | grep -oP '\d.+')
echo "$distributor-$release"
else
echo "error(not ubuntu or debian)"
fi
#yum -> centos or fedora
elif command -v yum >/dev/null 2>&1;then
#TODO
echo 'yum'
fi
;;
"Darwin")
echo "macos"
;;
*)
echo "error"
;;
esac
}
| true
|
f134fad38d6c4dfab93fa2f254116ae40c6070d1
|
Shell
|
Foxwc/CS100-RSHELL
|
/tests/multi_command.sh
|
UTF-8
| 2,556
| 2.921875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
printf "\n\nTest 1: rmdir cat || ls\nExpected:\\n" # testing || (fail)
rmdir cat || ls
printf "rmdir cat || ls\nexit\\n" | bin/rshell
printf "\n\nTest 2: rmdir cat && ls\nExpected:\\n" # testing && (fail)
rmdir cat && ls
printf "rmdir cat && ls\nexit\\n" | bin/rshell
printf "\n\nTest 3: ls; mkdir tests && ls || cat README.md\nExpected:\\n" # testing ; -> && and && -> || (&& passes)
ls; mkdir tests && ls || cat README.md
printf "ls; mkdir tests && ls || cat README.md\nexit\\n" | bin/rshell
printf "\n\nTest 4: ls; mkdir tests || ls && cat README.md\n\nExpected:\\n" # testing ; -> || and || -> && (|| passes)
ls; mkdir tests || ls && cat README.md
printf "ls; mkdir tests || ls && cat README.md\nexit\\n" | bin/rshell
printf "\n\nTest 5: ls || exit; ls\n\nExpected:\\n" # testing || -> ; (|| passes)
ls || exit; ls
printf "ls || exit; ls\nexit\\n" | bin/rshell
printf "\n\nTest 6: ls && ls; cat makefile\n\nExpected:\\n" # testing && -> ; (&& passes)
ls && ls; cat makefile
printf "ls && ls; cat makefile\nexit\\n" | bin/rshell
printf "\n\nTest 7: rmdir frog && ls; cat makefile\n\nExpected:\\n" # testing && -> ; (&& fails)
rmdir frog && ls; cat makefile
printf "rmdir frog && ls; cat makefile\nexit\\n" | bin/rshell
printf "\n\nTest 8: rmdir frog || ls;\n\nExpected:\\n" # testing || -> ; (|| fails)
rmdir frog || ls;
printf "rmdir frog || ls;\nexit\\n" | bin/rshell
printf "\n\nTest 9: echo hello|| ls\nExpected:\\n" # testing || (pass)
echo hello || ls
printf "echo hello || ls\nexit\\n" | bin/rshell
printf "\n\nTest 10: echo hello && ls\nExpected:\\n" # testing && (pass)
echo hello && ls
printf "echo hello && ls\nexit\\n" | bin/rshell
printf "\n\nTest 11: hello; ls\nExpected:\\n" # testing ; (pass)
hello; ls
printf "hello; ls\nexit\\n" | bin/rshell
printf "\n\nTest 12: rmdir frog; ls\nExpected:\\n" # testing ; (fail)
rmdir frog; ls
printf "rmdir frog; ls\nexit\\n" | bin/rshell
printf "\n\nTest 13: ls; cat dog || ls && cat README.md\n\nExpected:\\n" # testing ; -> || and || -> && (|| fails)
ls; cat dog || ls && cat README.md
printf "ls; cat dog || ls && cat README.md\nexit\\n" | bin/rshell
printf "\n\nTest 14: ls; cat dog && ls || cat README.md\nExpected:\\n" # testing ; -> && and && -> || (&& fails)
ls; cat dog && ls || cat README.md
printf "ls; cat dog && ls || cat README.md\nexit\\n" | bin/rshell
printf "\n\nTest 15: ls; ls; ls; ls; ls; ls\n\nExpected:\\n" # testing ; -> ; -> ........ -> ;
ls; ls; ls; ls; ls; ls
printf "\n\n\\n"
printf "ls; ls; ls; ls; ls; ls\nexit\\n" | bin/rshell
printf "\n\nTesting complete\\n"
| true
|
279f69397cc01520edcd68f058fb3200be10d8eb
|
Shell
|
aleskandro/MyConfigs
|
/home/aleskandro/.zoppo/plugins/archive/functions/archive:extract:gz
|
UTF-8
| 326
| 2.765625
| 3
|
[
"WTFPL"
] |
permissive
|
local v
(( $verbose )) && v='-v'
if (( $+commands[gunzip] )); then
gunzip ${=v} "$1"
elif (( $+commands[gzip] )); then
gzip ${=v} -d "$1"
elif (( $+commands[zcat] )); then
zcat ${=v} "$1" >! "${1:r}"
else
warn 'extract: you have to install gzip'
return 1
fi
# vim: ft=zsh sts=2 ts=2 sw=2 et fdm=marker fmr={{{,}}}
| true
|
b474fb0f27605e5de4518bed5dacdee602c99428
|
Shell
|
henrytwo/GooseAlert-Public
|
/deployAlarmServer.sh
|
UTF-8
| 546
| 3
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
ALARM_SERVER_USER="pi@10.8.0.9"
ALARM_SERVER_PATH="/home/pi/GooseAlert/ALARMSERVER"
echo "Killing watchdog"
ssh $ALARM_SERVER_USER "tmux kill-server; sudo pkill python3;"
echo "Starting deployment"
scp -i ~/.ssh/id_rsa -r ALARMSERVER/* $ALARM_SERVER_USER:$ALARM_SERVER_PATH
echo "Files copied"
echo "Restarting server..."
ssh $ALARM_SERVER_USER "tmux new-session -d \"cd $ALARM_SERVER_PATH; sudo python3 heartbeat.py; read;\"; tmux new-session -d \"cd $ALARM_SERVER_PATH; sudo python3 server.py; read;\""
echo "Done."
| true
|
dfd093d1cdbcb425e02947d28c0c3d2e2f6ba0be
|
Shell
|
briancsparks/server-assist-server
|
/build-scripts/run-instance
|
UTF-8
| 5,025
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash -e
#
# Run an instance of the server-assist-server.
#
# 1. Gets the giant `configuration` JSON.
# 2. Invokes js-cluster/run-instance, but do not allow it to start services.
# 3. From the configuration, find all the certificates that will be needed by
# the instance, decrypts them, and copies them to the instance.
# 4. Starts services
#
eval "$(cli-shezargs $@)"
#
# Pulls a credential from S3, decrypts it and puts it on a remote host
#
# * Sets the mode to 400.
# * Copies to protected dirs, and chowns them.
#
cpit() {
the_pathname="$1"
project="$2"
ip="$3"
the_filename="$(basename $the_pathname)"
the_dir="$(dirname $the_pathname)"
cert_work_dir="$(pull-secret --project="$project" $the_filename | egrep '^certdir:' | head -1 | cut -d' ' -f2)"
# The only tricky thing is to put it into scotty's user area, or into a root dir
if [[ -d $cert_work_dir ]]; then
scpix $cert_work_dir/$the_filename "$ip:~/" && rm $cert_work_dir/$the_filename
if [[ $the_dir =~ scotty ]]; then
sshix $ip "mkdir -p $the_dir && cd $the_dir && mv $HOME/$the_filename ./ && chmod 0400 $the_filename"
else
sshix $ip "sudo mkdir -p $the_dir && cd $the_dir && sudo mv $HOME/$the_filename ./ && sudo chmod 0400 $the_filename && sudo chown root:root $the_filename"
fi
fi
}
# Force js-cluster/run-instance to use my PID, so I can know what IP the instance was launched as
as_pid="$$"
# Calculate the domain name
domain_name="mobilewebassist.net"
[[ $stack != pub ]] && domain_name="mobiledevassist.net"
# Get the big configuration JSON object
configuration="/tmp/configuration-$$.json"
ra invoke `fn ~/dev 'serverassist/serverassist\.js'` configuration > $configuration
cat $configuration | jq . > ~/configuration.json
# ---------- Run via js-cluster/run-instance ----------
cd "$(fd ~/dev/ '/js-cluster$')"
./build-scripts/run-instance --skip-start --as-pid="$as_pid" --project-id="sa" --namespace="serverassist" "$orig_args"
# Get the IP, die if we cannot
instance_start="/tmp/run-instance-from-ami-${service}-${as_pid}.json"
ip="$(cat $instance_start | underscore select '.InstanceId ~ .PrivateIpAddress' --outfmt=text)"
[[ -z $ip ]] && die "no ip"
cd "${start_dir}"
# ---------- Set env ----------
[[ -n $TENABLE_IO_KEY ]] && sshix ${ip} "jsaws-set-env TENABLE_IO_KEY ${TENABLE_IO_KEY}"
if [[ $stack == pub ]]; then
[[ -n $TENABLE_IO_GROUP ]] && sshix ${ip} "jsaws-set-env TENABLE_IO_GROUP ${TENABLE_IO_GROUP}-prod"
else
[[ -n $TENABLE_IO_GROUP ]] && sshix ${ip} "jsaws-set-env TENABLE_IO_GROUP ${TENABLE_IO_GROUP}-dev"
fi
# ---------- Now, find all the certs and keys needed by the new instance ----------
# Only pertains to web instances
if [[ $service == web ]]; then
for fqdn in $(cat $configuration | jq -r ".result | .subStacks | .[\"$color-$stack\"] | .fqdns | keys | .[]"); do
echo $fqdn
# Un-comment this line to see what the JSON looks like
cat $configuration | jq -r ".result | .subStacks | .[\"$color-$stack\"] | .fqdns | .[\"$fqdn\"]"
# Get the project name
project="$(cat $configuration | jq -r ".result | .subStacks | .[\"$color-$stack\"] | .fqdns | .[\"$fqdn\"] | .projectName")"
# The certificate filename, if any
file="$(cat $configuration | jq -r ".result | .subStacks | .[\"$color-$stack\"] | .fqdns | .[\"$fqdn\"] | .certfile")"
[[ -n $file && $file != null ]] && cpit $(echo $file | jq -r '.[]') $project $ip
# The key filename, if any
file="$(cat $configuration | jq -r ".result | .subStacks | .[\"$color-$stack\"] | .fqdns | .[\"$fqdn\"] | .keyfile")"
[[ -n $file && $file != null ]] && cpit $(echo $file | jq -r '.[]') $project $ip
# The client root certificate, if any
file="$(cat $configuration | jq -r ".result | .subStacks | .[\"$color-$stack\"] | .fqdns | .[\"$fqdn\"] | .clientCert")"
[[ -n $file && $file != null ]] && cpit $(echo $file | jq -r '.[]') $project $ip
done
fi
# Finish the startup process
ra invoke `fn ~/dev/ ssh\.js$` sshRun --ip=${ip} --command="./build-scripts/on-instance/b01-start-from-snapshot" --message="${service}-sfss"
ra invoke `fn ~/dev/ ssh\.js$` sshRun --ip=${ip} --command="./build-scripts/on-instance/b02-start-services" --message="${service}-start"
# Put this into rotation
if [[ -z $no_rotation ]]; then
# Only do rotation when adding a web-tier instance
if [[ $service == web ]]; then
[[ -z $rotation ]] && rotation="next"
# Get the new instanceId
instance_id="$(cat $instance_start | jq -r '.[0] | [.. | .InstanceId?] | map(select(. != null)) | .[0]')"
# Give it the color-stack fqdn (or hq...)
fqdn="$color-$stack.$domain_name"
[[ $stack == cluster && $rotation == main ]] && fqdn="hq.$domain_name"
echo "----- Assigning new web instance $instance_id as $rotation at $fqdn"
ra invoke `fn $HOME/dev 'lib2/ec2/ec2.js'` moveEipForFqdn --instance-id="$instance_id" --fqdn="$fqdn"
set-routing --project-id=sa --stack="$stack" --color="$color" --state="$rotation"
fi
fi
| true
|
711e1385be811aad18eec87c79a3d6d19bacca7b
|
Shell
|
klovercloud/elasticsearch-dev-example
|
/bin/kcinit.sh
|
UTF-8
| 677
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
fileCount=$(find /tmp-elasticsearch -type f | wc -l)
echo $fileCount
if [ -d "/usr/share/elasticsearch" ]
then
echo '[INFO] Persistant volume mounted'
fileCount=$(find /usr/share/elasticsearch -type f | wc -l)
if [ $fileCount -gt 100 ]
then
echo "elasticsearch files exists"
else
echo "[INFO] Copying elasticsearch files to /usr/share/elasticsearch";
cp -a /tmp-elasticsearch/. /usr/share/elasticsearch/.
echo "[INFO] Finished copying Elasticsearch files to /usr/share/elasticsearch";
fi
fileCount=$(find /usr/share/elasticsearch -type f | wc -l)
echo $fileCount
else
echo '[ERROR] /usr/share/elasticsearch doesnt exists'
fi
| true
|
3b3f4d31ead92c056e0f78371fe3c4a5d9c4b114
|
Shell
|
uber/storagetapper
|
/scripts/workflow_example.sh
|
UTF-8
| 1,953
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -ex
go build
sql() {
db=$1
shift
mysql -uroot "$db" -e "$@"
}
KAFKA_RETENTION_TIME=20
TEST_TOPIC=hp-ex_svc1-ex_db1-ex_table1
KPATH=/home/kafka/bin
KPARAM="--zookeeper localhost --topic $TEST_TOPIC"
$KPATH/kafka-topics.sh "$KPARAM" --alter --config retention.ms=$((KAFKA_RETENTION_TIME * 1000))
$KPATH/kafka-topics.sh "$KPARAM" --describe
sql "" "DROP DATABASE IF EXISTS ex_db1"
sql "" "RESET MASTER"
sql "" "CREATE DATABASE ex_db1"
sql "ex_db1" "CREATE TABLE ex_table1(f1 int not null primary key, ts TIMESTAMP, f3 int not null default 0)"
for i in $(seq 101 110); do
sql "ex_db1" "INSERT INTO ex_table1(f1) VALUES ($i)"
done
./storagetapper &
TPID=$!
trap 'kill $TPID; exit 1' 1 2 15 #SIGHUP SIGINT SIGTERM
sleep 2
curl --data '{"cmd" : "add", "name" : "ex_cluster1", "host" : "localhost", "port" : 3306, "user" : "root", "pw" : ""}' http://localhost:7836/cluster
curl --data '{"cmd" : "add", "cluster" : "ex_cluster1", "service" : "ex_svc1", "db":"ex_db1", "table":"ex_table1"}' http://localhost:7836/table
sleep 12
for i in $(seq 1 10); do
sql "ex_db1" "INSERT INTO ex_table1(f1) VALUES ($i)"
done
sql "ex_db1" "ALTER TABLE ex_table1 ADD f2 varchar(32)"
for i in $(seq 11 30); do
sql "ex_db1" "INSERT INTO ex_table1(f1,f2) VALUES ($i, CONCAT('bbb', $i))"
done
sql "ex_db1" "ALTER TABLE ex_table1 DROP f2"
for i in $(seq 101 110); do
sql "ex_db1" "UPDATE ex_table1 SET f3=f3+20 WHERE f1>100 AND f1<111"
done
sleep 4
curl --data '{"cmd" : "del", "name" : "ex_cluster1" }' http://localhost:7836/cluster
curl --data '{"cmd" : "del", "cluster" : "ex_cluster1", "service" : "ex_svc1", "db":"ex_db1", "table":"ex_table1"}' http://localhost:7836/table
kill $TPID
$KPATH/kafka-topics.sh "$KPARAM" --describe
$KPATH/kafka-topics.sh --list --zookeeper localhost:2181
$KPATH/kafka-console-consumer.sh "$KPARAM" --max-messages 50 --from-beginning
date
echo "Wait for $KAFKA_RETENTION_TIME secs before running next test"
| true
|
27d9150a25e86e5f7696d43b1eea649f5c79b7df
|
Shell
|
nirmalya-broad/PatHCap_PL
|
/shell_scripts/old scripts/gff_parse3.sh
|
UTF-8
| 8,224
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/sh
NCBI_PATH=$1"/"
shift
OUT_DIR=$1"/"
shift
TEMP_DIR=$1"/"
shift
ADD5=$1
shift
ADD3=$1
shift
############################
TR_GFF_PARSE ()
{
INFILE=$1
GENE_KEY=$2
PRODUCT_KEY=$3
ACC=$4
OUTFILE=$5
APPEND=$6
if [ $APPEND == "N" ];then
echo "" | sed 1d > $OUTFILE
fi
cat $INFILE | tr '\r' '\n' | awk -v GENE_KEY=$GENE_KEY -v ACC=$ACC '{
if($1 == ACC)
{
if($3 == GENE_KEY)
{
y=split($9, ar, ";")
for(i=1; i < y+1; i++)
{
if(ar[i] ~ /'$PRODUCT_KEY'/)
{
t=split(ar[i], pr, "=")
PRODUCT=pr[2]
}
if(ar[i] ~ /gbkey=/)
{
t=split(ar[i], pr, "=")
KEY=pr[2]
}
if(ar[i] ~ /ID=/)
{
t=split(ar[i], pr, "=")
ID=pr[2]
}
}
if(KEY=="")
print $0";full_tag="ID":"PRODUCT":"ID":"$7":"$5-$4
else
print $0";full_tag="KEY":"PRODUCT":"ID":"$7":"$5-$4
}
}
}' | sed 's/\%//g' >> $OUTFILE
}
###########################
fna_extract ()
{
FNA_FILE=$1
ACC_PARSED=$2
GFF_ACC=$3
typeset -i NUM_MATCH
NUM_MATCH=` awk -v flag="N" -v GFF_ACC=$GFF_ACC -v ACC_PARSED=$ACC_PARSED '{
if($1 ~ /'$ACC_PARSED'/ && $1 ~ />/ )
{
print ">"GFF_ACC
}
}' $FNA_FILE | wc -l`
cat $FNA_FILE | tr '\r' '\n' | awk -v flag="N" -v GFF_ACC=$GFF_ACC -v ACC_PARSED=$ACC_PARSED -v NUM_MATCH=$NUM_MATCH '{
if($1 ~ />/ && flag=="Y")
flag="N"
if(flag=="Y" && $1 !~ />/)
print $1
if ($1 ~ />/ )
{
if(NUM_MATCH==1)
{
if($1 ~ /'$ACC_PARSED'/)
{
print ">"GFF_ACC
flag="Y"
}
}
if(NUM_MATCH > 1)
{
if($1 == ">"ACC_PARSED)
{
print ">"GFF_ACC
flag="Y"
}
}
}
}' | sed '/^$/d'
}
############################
OUT_TAG=`echo $@ | sed 's/ /:/g'`
COMBINED_ALL_GFF=$OUT_DIR$OUT_TAG"_ALL.gff"
COMBINED_GENE_GFF=$OUT_DIR$OUT_TAG"_GENES.gff"
COMBINED_FNA=$OUT_DIR$OUT_TAG".fna"
COMBINED_VERS=$OUT_DIR$OUT_TAG"_fna_gff_version.txt"
echo "" | sed 1d > $COMBINED_ALL_GFF
echo "" | sed 1d > $COMBINED_GENE_GFF
echo "" | sed 1d > $COMBINED_FNA
echo "" | sed 1d > $COMBINED_VERS
echo $OUT_TAG
for p in $@
do
GFF_FILE=$NCBI_PATH"/"$p".gff"
PARSED_GFF_FILE=$TEMP_DIR"/"$p"_parsed.gff"
FNA_FILE=$NCBI_PATH"/"$p".fna"
echo $GFF_FILE
cat $GFF_FILE | sed 's/ /_/g' > $PARSED_GFF_FILE
ALL_GFF_ACC=` awk '{if($1 ~ /FASTA/) exit; print $1}' $PARSED_GFF_FILE | sort | uniq | grep -v "#" `
echo $ALL_GFF_ACC
for ACC in $ALL_GFF_ACC
do
echo $ACC
mkdir $TEMP_DIR"/"
TEMP_CDS_FILE=$TEMP_DIR"/"$ACC"_acc_CDS_temp.gff"
CDS_FILE=$TEMP_DIR"/"$ACC"_acc_CDS.gff"
TRNA_FILE=$TEMP_DIR"/"$ACC"_acc_tRNA.gff"
RRNA_FILE=$TEMP_DIR"/"$ACC"_acc_rRNA.gff"
NCRNA_FILE=$TEMP_DIR"/"$ACC"_acc_ncRNA.gff"
IGR_FILE=$TEMP_DIR"/"$ACC"_acc_IGR.gff"
GENE_FILE=$OUT_DIR"/"$ACC"_acc_GENES.gff"
ALL_FILE=$OUT_DIR"/"$ACC"_acc_ALL.gff"
TR_GFF_PARSE $PARSED_GFF_FILE CDS product= $ACC $TEMP_CDS_FILE N
echo $TEMP_CDS_FILE
echo $PARSED_GFF_FILE
echo "Working on $TEMP_CDS_FILE for "$ACC" to generate "$CDS_FILE"..."
while read line
do
ACC_LINE=`echo $line | awk '{print $1}'`
if [ $ACC == $ACC_LINE ];then
ID=`echo $line | awk '{
print "NONE"
y=split($9, ar, ";")
for(i=1; i < y+1; i++)
{
if(ar[i] ~ /Parent=/)
{
split(ar[i], pr, "=")
print pr[2]
exit
}
}
}' | tail -1`
if [ $ID != 'NONE' ];then
tag=`cat $PARSED_GFF_FILE | grep -w $ID | awk '{
if($3 == "gene")
{
y=split($9, ar, ";")
for(i=1; i < y+1; i++)
{
if(ar[i] ~ /locus_tag=/)
{
split(ar[i], pr, "=")
print pr[2]
}
}
}
}' | head -1`
gene_name=`cat $PARSED_GFF_FILE | grep -w $ID | awk '{
if(NR==1)
print "-"
if($3 == "gene")
{
y=split($9, ar, ";")
for(i=1; i < y+1; i++)
{
if(ar[i] ~ /gene=/)
{
split(ar[i], pr, "=")
print pr[2]
}
}
}
}' | tail -1`
else
tag="-"
gene_name="-"
fi
echo $line | awk -v ADD5=$ADD5 -v ADD3=$ADD3 '{
if($7=="+")
{
$4=$4-ADD5;$5=$5+ADD3
}
if($7=="-")
{
$4=$4-ADD3;$5=$5+ADD5
}
if($4 < 0)
$4=0
print $0
}' | sed 's/full_tag=CDS/full_tag=CDS:'$tag':'$gene_name'/g'
fi
done < $TEMP_CDS_FILE > $CDS_FILE
echo "Running TR_GFF_PARSE for "$ACC"..."
TR_GFF_PARSE $PARSED_GFF_FILE tRNA product= $ACC $TRNA_FILE N
TR_GFF_PARSE $PARSED_GFF_FILE rRNA product= $ACC $RRNA_FILE N
TR_GFF_PARSE $PARSED_GFF_FILE ncRNA gene= $ACC $NCRNA_FILE N
TR_GFF_PARSE $PARSED_GFF_FILE tmRNA gene= $ACC $NCRNA_FILE Y
TR_GFF_PARSE $PARSED_GFF_FILE transcript product= $ACC $NCRNA_FILE Y
echo $CDS_FILE
echo $TRNA_FILE
echo $RRNA_FILE
echo $NCRNA_FILE
echo $ALL_FILE
echo $GENE_FILE
cat $TRNA_FILE $RRNA_FILE $CDS_FILE $NCRNA_FILE | sort -k4n | awk '{type=$3;split($1, ar, ".");$1=ar[1];print $0";type="type}' | sed 's/ / /g' > $GENE_FILE
echo "" | sed 1d > $IGR_FILE
cat $GENE_FILE | awk -v ADD5=$ADD5 -v ADD3=$ADD3 -v last_end=0 -v last_name=="ORIGIN" -v last_tag="ORIGIN" -v last_dir="|" '{
$4=$4-1;$5=$5+1
if($4 < 0)
$4=0
y=split($9, ar, ";")
type_field=ar[y]
tag_field=ar[y-1]
split(tag_field, qr, "=")
tag_name=qr[2]
split(type_field, pr, "=")
type_name=pr[2]
dir=$7
strand="+"
if(NR >= 1)
{
$3="IGR"
if(last_end < $4)
{
gsub(":", ",", last_tag)
gsub(":", ",", tag_name)
print $1, $2, $3, last_end, $4, $6, strand, $8, last_tag"/"tag_name"):"strand":"$4-last_end";"last_name"/"type_name"_"last_dir"/"dir
}
}
last_end=$5
last_name=type_field
last_tag=tag_field
last_dir=$7
}' | sed 's/ / /g' | sed 's/full_tag=/full_tag=IGR:(/g' | sed 's/type=/type=IGR:/g' > $IGR_FILE
echo $IGR_FILE
cat $PARSED_GFF_FILE | grep "#" > $ALL_FILE
cat $GENE_FILE $IGR_FILE | tr '\r' '\n' | awk -v ACC=$ACC '{$3="feature";$1=ACC; print $0}' | sed 's/ / /g' | sed 's/full_tag=transcript/full_tag=ncRNA/g' | sed 's/full_tag=tmRNA/full_tag=ncRNA/g' | sed 's/type=tmRNA/type=ncRNA/g' | sort -k4n >> $ALL_FILE
cat $ALL_FILE | tr '\r' '\n' | sed '/^$/d' >> $COMBINED_ALL_GFF
cat $GENE_FILE >> $COMBINED_GENE_GFF
ACC_PARSED=`echo $ACC | cut -d"." -f1`
typeset -i NUM_FNA_ACC
NUM_FNA_ACC=`cat $FNA_FILE | grep ">" | grep -w $ACC_PARSED | wc -l`
if [ $NUM_FNA_ACC == 1 ];then
echo "fna_extract $FNA_FILE $ACC_PARSED $ACC >> $COMBINED_FNA"
fna_extract $FNA_FILE $ACC_PARSED $ACC >> $COMBINED_FNA
fi
if [ $NUM_FNA_ACC == 0 ];then
echo "No fasta header with $ACC_PARSED found!"
cat $FNA_FILE | grep ">"
exit
fi
if [ $NUM_FNA_ACC -gt 1 ];then
echo "$ACC_PARSED has more than one fasta header!"
cat $FNA_FILE | grep ">"
exit
fi
# making record of acc version in $COMBINED_VERS
cat $PARSED_GFF_FILE | grep "##sequence-region" >> $COMBINED_VERS
cat $FNA_FILE | grep ">" >> $COMBINED_VERS
echo $COMBINED_ALL_GFF
done
done
cp $COMBINED_ALL_GFF $TEMP_DIR
cp $COMBINED_FNA $TEMP_DIR
cp $COMBINED_VERS $OUT_DIR
echo $COMBINED_ALL_GFF
echo $COMBINED_FNA
echo $COMBINED_VERS
exit
| true
|
6b6eb3cb0096384d5a99e86d5da687d07074c7b6
|
Shell
|
onaio/ansible-kafka-stack
|
/roles/zookeeper_helper_tools/templates/zklease.sh.j2
|
UTF-8
| 4,101
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
ZOOKEEPER_URL="{% for host in groups['zookeeper'] %}{% if loop.index > 1%},{% endif %}{{ host }}:{{zookeeper.properties.clientPort}}{% endfor %}"
LEASE_ROOT=/bashleases
if [[ $1 == --* ]]; then
if [[ $1 == --release ]]; then
RELEASE_LEASE=1
shift 1
else
echo "Unknown option $1"
exit 1
fi
fi
LEASE_NAME="$1"
LEASE_TIME_SECS="${2:-3600}"
LEASE_VALUE="${3:-$(hostname)}"
MAX_SKEW_SECS=10
if [[ -z ${LEASE_NAME} ]]; then
echo "No lease name specified."
exit 1
fi
zk_command() {
zookeeper-shell "${ZOOKEEPER_URL}" "$@" | grep -A 50000 "WatchedEvent state:SyncConnected" | grep -v "WatchedEvent state:SyncConnected"
}
timestamp_secs() {
date --utc +"%s"
}
renew_lease() {
# NOTE that we must renew while holding the lease
# NOTE that we must renew with the same ttl as we used on creation
RENEW_LEASE_RESULT="$(zk_command set "${LEASE_ROOT}/${LEASE_NAME}" "${LEASE_VALUE}/$(timestamp_secs)/${WINNING_LEASE_TIME_SECS}")"
}
try_acquire_lease() {
#
# Here we try to create a node in zookeeper to represent the lease - only one client can win
#
zk_command create "${LEASE_ROOT}" > /dev/null
zk_command create -t "$((LEASE_TIME_SECS * 1000))" "${LEASE_ROOT}/${LEASE_NAME}" "${LEASE_VALUE}/$(timestamp_secs)/${LEASE_TIME_SECS}" > /dev/null
WINNING_LEASE_DATA="$(zk_command get "${LEASE_ROOT}/${LEASE_NAME}")"
WINNING_LEASE_VALUE="$(echo "${WINNING_LEASE_DATA}" | awk '{split($0,s,"/"); print s[1];}')"
WINNING_LEASE_TIMESTAMP_SECS="$(echo "${WINNING_LEASE_DATA}" | awk '{split($0,s,"/"); print s[2];}')"
WINNING_LEASE_TIME_SECS="$(echo "${WINNING_LEASE_DATA}" | awk '{split($0,s,"/"); print s[3];}')"
WINNING_LEASE_REMAINING_TIME_SECS="$((WINNING_LEASE_TIMESTAMP_SECS + WINNING_LEASE_TIME_SECS - $(timestamp_secs)))"
if [[ ${LEASE_VALUE} == ${WINNING_LEASE_VALUE} ]]; then
#
# We've got the lease, see if we have time to re-acquire
# We need to re-acquire to prevent zookeeper from killing our node, resetting the ttl timer
#
if [[ $((WINNING_LEASE_REMAINING_TIME_SECS - MAX_SKEW_SECS)) < WINNING_LEASE_TIME_SECS ]]; then
echo "$WINNING_LEASE_REMAINING_TIME_SECS seconds remain for bash lease ${WINNING_LEASE_DATA}."
# Renew the lease to protect it for another WINNING_LEASE_TIME_SECS
renew_lease
if [[ -z ${RENEW_LEASE_RESULT} ]]; then
WINNING=1
else
echo "WARNING: could not renew bash lease ${WINNING_LEASE_DATA} - we must try to re-acquire."
WINNING=0
fi
else
echo "WARNING: could not renew expiring bash lease ${WINNING_LEASE_DATA} - ${WINNING_LEASE_REMAINING_TIME_SECS} seconds remain and we must wait to re-acquire."
WINNING=0
fi
else
WINNING=0
fi
}
release_lease() {
# NOTE that we must release while holding the lease
RELEASE_LEASE_RESULT="$(zk_command delete "${LEASE_ROOT}/${LEASE_NAME}")"
}
echo "Waiting for bash lease ${LEASE_NAME} with value ${LEASE_VALUE}..."
while true; do
try_acquire_lease
if [[ ${WINNING} == 0 ]]; then
echo "Did not acquire bash lease - ${WINNING_LEASE_DATA} exists for about ${WINNING_LEASE_REMAINING_TIME_SECS} more secs..."
if [[ ${RELEASE_LEASE} == 1 ]]; then
# Job's already done, don't wait
echo "Bash lease ${LEASE_NAME} not held."
exit 0
fi
sleep "$((1 + RANDOM % 3)).$((RANDOM % 10))"
else
echo "Acquired renewed bash lease ${LEASE_NAME} with value ${WINNING_LEASE_VALUE} for ${WINNING_LEASE_TIME_SECS}..."
if [[ ${RELEASE_LEASE} == 1 ]]; then
release_lease
if [[ -z ${RELEASE_LEASE_RESULT} ]]; then
echo "Released bash lease ${WINNING_LEASE_DATA}."
exit 0
fi
echo "WARNING: could not release bash lease ${WINNING_LEASE_DATA} - we must try to re-acquire."
else
exit 0
fi
fi
done
| true
|
6dcab07d909774ebf972ebfc0af02245afb6d633
|
Shell
|
kevinyu0506/gitops-demo
|
/02-create-cluster.sh
|
UTF-8
| 816
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -f "./env-variables.sh" ]
then
echo "Please check if the variables ./env-variables.sh exists"
return 1
else
. ./env-variables.sh
fi
echo "Set default project to [${PROJECT_ID}], region to [${COMPUTE_REGION}], zone to [${COMPUTE_ZONE}]..."
gcloud config set project ${PROJECT_ID}
gcloud config set compute/region ${COMPUTE_REGION}
gcloud config set compute/zone ${COMPUTE_ZONE}
gcloud services enable container.googleapis.com
gcloud services enable compute.googleapis.com
gcloud services enable domains.googleapis.com
gcloud services enable dns.googleapis.com
echo "Creating cluster [${CLUSTER_NAME}]..."
gcloud container clusters create ${CLUSTER_NAME} \
--num-nodes=1 \
--workload-pool=${PROJECT_ID}.svc.id.goog
gcloud container clusters get-credentials ${CLUSTER_NAME}
| true
|
a55121fcc94c125c81ca913244fc70aa5b4853e6
|
Shell
|
Qitmeer/084testnet-data
|
/gen-data.sh
|
UTF-8
| 2,180
| 3.828125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
force="false"
if [ "$1" == "-f" ]; then
force="true"
shift
fi
CLI=./cli.sh
if [ ! -f "$CLI" ]; then
echo "$CLI not found!"
exit
fi
FILE=./block-reward-0.8.4.txt
if [ "$force" == "true" ]; then
rm -f $FILE
fi
function get_block_by_order() {
local cli=$1
local order=$2
block=$($cli block $num|jq '.|"\(.order) \(.height) \(.pow.pow_name) \(.pow.proof_data.edge_bits) \(.difficulty) \(.hash) \(.confirmations) \(.txsvalid) \(.timestamp) \(.transactions[0].vout[0].amount) \(.transactions[0].vout[0].scriptPubKey.addresses[0])"' -r)
order=`echo $block| cut -d' ' -f1`
height=`echo $block| cut -d' ' -f2`
pow=`echo $block| cut -d' ' -f3`
edge=`echo $block| cut -d' ' -f4`
diff=`echo $block| cut -d' ' -f5`
hash=`echo $block| cut -d' ' -f6`
confirm=`echo $block| cut -d' ' -f7`
txsvalid=`echo $block| cut -d' ' -f8`
timestamp=`echo $block| cut -d' ' -f9`
amount=`echo $block| cut -d' ' -f10`
addr=`echo $block| cut -d' ' -f11`
isblue=$($cli isblue $hash)
node=`echo $(basename $cli .sh)|sed 's/^.*-//g'`
#echo "$block $isblue $node $(owner $node)"
t=`date_to_timestamp $timestamp`
t1=`timestamp_to_date $t`
#t1=`timestamp_to_date $t GMT`
#t1=`timestamp_to_date $t UTC`
echo "$order,$height,$addr,$hash,$txsvalid,$amount,$confirm,$pow,$edge,$diff,$t,$t1,$isblue"
}
function date_to_timestamp() {
if [ ! "${1:${#1}-1}" == "Z" ]; then
date_to_timestamp_1 $1
else
date_to_timestamp_2 $1
fi
}
function date_to_timestamp_1() {
local time=`echo $1|sed s/:00$/00/g`
local s=`date -j -f "%Y-%m-%dT%H:%M:%S%z" "$time" "+%s"`
echo $s
}
# 2019-12-22T00:22:14Z
function date_to_timestamp_2() {
local s=`date -j -u -f "%Y-%m-%dT%H:%M:%SZ" "$1" "+%s"`
echo $s
}
function timestamp_to_date() {
if [ "$2" == "GMT" ]; then
local date=`env TZ=GMT date -j -r "$1" '+%Y-%m-%d_%H:%M:%S_%Z'`
elif [ "$2" == "UTC" ]; then
local date=`env TZ=UTC date -j -r "$1" '+%Y-%m-%d_%H:%M:%S_%Z'`
else
local date=`date -j -r "$1" '+%Y-%m-%d_%H:%M:%S_%Z'`
fi
echo $date
}
if [ ! -f $FILE ]; then
for ((num=1; num<40652; num+=1))
do
echo $(get_block_by_order $CLI $num) >> $FILE
done
fi
| true
|
73166da2de4ff25a97e02c85d4bb0fde257b5db5
|
Shell
|
dd238/username
|
/username.sh
|
UTF-8
| 508
| 3.953125
| 4
|
[] |
no_license
|
#! /bin/bash
# username.sh
# Dylan Davis
echo "This program accepts a max of 12 characters as username, but no less than 3."
echo "It must start with a lower case letter."
echo "Only lower case letters, digits, and underscore character can be used."
echo "Enter a username: "
read USRNME
while echo "$USRNME" | egrep -v "^[a-z]{1}[a-z,0-9,_]{2,11}$" > /dev/null 2>&1
do
echo "You must enter a valid username - follow the rules!"
echo "Enter a twenty-character username: "
read USRNME
done
| true
|
920da617437cfe2bf0df3c4eef5481d4c4ab62f5
|
Shell
|
edukorg/tsuru-platform-eduk-gradle
|
/deploy
|
UTF-8
| 1,620
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash -el
SOURCE_DIR=/var/lib/tsuru
source ${SOURCE_DIR}/base/deploy
source ${SOURCE_DIR}/base/rc/config
source "$HOME/.sdkman/bin/sdkman-init.sh"
SDK_CONF="sdk.conf"
DEFAULT_GRADLE_VERSION="4.7"
if [ -f "${CURRENT_DIR}/${SDK_CONF}" ]; then
while IFS='' read -r line || [[ -n "$line" ]]; do
IFS=" "; set -- $line;
if [ -n "$1" ]; then
sdk=$1
if [ -n "$2" ]; then
version=$2
echo "Installing $sdk sdk version $version..."
sdk install $sdk $version
if [ $? -ne 0 ]; then
echo "ERROR: failed to install $sdk version $version. Aborting..."
exit 2
fi
else
echo "Installing $sdk default version..."
sdk install $sdk
if [ $? -ne 0 ]; then
echo "ERROR: failed to install $sdk default version. Aborting..."
exit 2
fi
fi
fi
done < ${CURRENT_DIR}/${SDK_CONF}
else
echo "ERROR: ${SDK_CONF} not found. Please, create it and list insed it the gradle sdks to be used. Aborting..."
exit 1
fi
if sdk current gradle | grep 'Not using any version of gradle' > /dev/null; then
sdk install gradle $DEFAULT_GRADLE_VERSION
if [ $? -ne 0 ]; then
echo "ERROR: failed to install the gradle sdk version "
exit 3
fi
fi
pushd "$CURRENT_DIR"
gradle build --console plain --no-daemon --info
if [ $? -ne 0 ]; then
echo "ERROR: failed to build the Gradle project. Aborting..."
exit 4
fi
popd
| true
|
26b0065f991e12f6522bb87148e9e5db1398aced
|
Shell
|
mputterf/Wake-and-Sync
|
/wakeandsync.sh
|
UTF-8
| 2,463
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#wol = MAC address of destination nic to wake up
#backupsrv = destination machine
#sourcedir= backup source
wol=
backupsrv=
sourcedir=
username=
#Keep track of month for once a month deletion
currentmonth=$(date +%b)
monthfile="$(dirname "$0")/.monthfile.txt"
#Check for manual override
while getopts ":mw" opt; do
case "${opt}" in
w ) manualoverride="normal";;
m ) manualoverride="monthly";;
\? ) echo "Invalid Argument. Use -w for normal backup or -m for backup and delete." >&2
exit 1;;
esac
done
function readmonthfile {
while IFS= read -r line; do
monthread=$line
done < $monthfile
}
function normalbackup {
rsync -auvvXP -o -g * $username@$backupsrv:$sourcedir
}
function monthlybackup {
rsync -auvvXP -o -g --delete-before * $username@$backupsrv:$sourcedir
}
function checkstatus {
retries=0
if [ ! $alive -eq 0 ]; then
#try pinging again
ping -c3 $backupsrv
alive=$?
#if this is the first attempt, try again, else quit the script
if [ $retries -eq 0 ]; then
retries=$((retries+1))
#sleeping 5 seconds
sleep 5
checkstatus
else
#echo exiting script
exit 0
fi
fi
}
#read monthfile
if [ -f $monthfile ]; then
readmonthfile
else
echo "$currentmonth" | tee $monthfile
monthread=$(date +%b)
fi
#echo "sending Magic Packet"
wakeonlan $wol
#pause for 60 seconds to give backup server time to boot, and check if it's online
echo "sleeping for 60 seconds"
sleep 60
#echo "pinging server"
ping -c3 $backupsrv
alive=$?
checkstatus
#cding to NAS
cd $sourcedir/
#Manual override
if [ $# -eq 1 ]; then
if [ $manualoverride = "normal" ]; then
echo "Beginning normal backup (manual)"
normalbackup
elif [ $manualoverride = "monthly" ]; then
echo "Beginning monthly backup (manual)"
monthlybackup
fi
#automatic
elif [ $# -eq 0 ]; then
if [ $currentmonth = $monthread ]; then
echo "Beginning normal backup (automatic)"
normalbackup
elif [ $currentmonth != $monthread ]; then
echo "Beginning monthly backup (automatic)"
monthlybackup
fi
fi
#sleeping 20 seconds and write month file
echo "$currentmonth" > "$monthfile"
sleep 20
#check if users are logged in. If not, shutdown.
ssh $username@$backupsrv 'userson=$(who | wc -l 2>&1);
if [ $userson -gt 0 ]; then
exit 0;
else
/sbin/shutdown -P now;
fi'
| true
|
331805d60de8305dcf0610f4b18abaaebcbda297
|
Shell
|
hlesey/phippy
|
/load-test/run.sh
|
UTF-8
| 578
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
UI_URL="http://phippy.clusterx.qedzone.ro:30080"
API_URL="http://phippy-api.clusterx.qedzone.ro:30080"
ITERATIONS=10000
for i in $(seq 1 $ITERATIONS); do
echo "Generating requests for POST /"
echo "------------------------"
hey -n 1000 -m POST $UI_URL
echo "Generating requests for GET /"
echo "------------------------"
hey -n 1000 -m GET $UI_URL
echo "Generating requests for api GET /trigger_error"
echo "------------------------"
hey -n 10 -c 2 -m POST "$API_URL/trigger_error"
sleep $(jot -r 1 40 130)
done
| true
|
e189a84967720d2c4000e0d5a0ed075b58cf827f
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/makro/PKGBUILD
|
UTF-8
| 856
| 2.546875
| 3
|
[] |
no_license
|
# Maintainer: James An <james@jamesan.ca>
# Contributor: Nick B <Shirakawasuna at gmail _dot_com>
pkgname=makro
pkgver=1.0
pkgrel=5
pkgdesc="A simple KDE4 frontend for Xnee and Xbindkeys."
arch=('i686' 'x86_64')
url="http://www.kde-apps.org/content/show.php/Makro?content=88647"
license=('GPL')
depends=('kdelibs' 'xbindkeys')
makedepends=('cmake' 'automoc4')
source=(
"http://kde-apps.org/CONTENT/content-files/88647-$pkgname.tar.gz"
'patch'
)
md5sums=(
'43f286516fd24890b222054b3931d5fa'
'7d51848f46784b1ac768a9ba16dd14ee'
)
prepare() {
cd $pkgname
mkdir -p build
patch -p1 < ../patch
}
build() {
cd $pkgname/backend
qmake
make
cp release ../makrod
}
package() {
cd $pkgname
cmake -D CMAKE_INSTALL_PREFIX=`kde4-config --prefix` -D CMAKE_BUILD_TYPE=release --build build .
make DESTDIR=$pkgdir install
}
| true
|
207f119bf46b8330ee2c5bd26e57844e8c5ddfb5
|
Shell
|
cloudfoundry/buildpacks-ci
|
/tasks/interrupt-bot/deploy/task.sh
|
UTF-8
| 929
| 3.109375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -eu
set -o pipefail
#shellcheck source=../../../util/print.sh
source "${PWD}/ci/util/print.sh"
function main() {
util::print::title "[task] executing"
gcloud::authenticate
gcloud::run::deploy
}
function gcloud::authenticate() {
util::print::info "[task] * authenticating with gcp"
gcloud auth activate-service-account \
--key-file <(echo "${SERVICE_ACCOUNT_KEY}")
}
function gcloud::run::deploy() {
util::print::info "[task] * deploying interrupt-bot"
local project
project="$(echo "${SERVICE_ACCOUNT_KEY}" | jq -r .project_id)"
gcloud run deploy interrupt-bot \
--image gcr.io/cf-buildpacks/slack-delegate-bot:latest \
--max-instances 1 \
--memory "128Mi" \
--platform managed \
--set-env-vars "SLACK_TOKEN=${SLACK_TOKEN},PAIRIST_PASSWORD=${PAIRIST_PASSWORD}" \
--allow-unauthenticated \
--project "${project}" \
--region us-central1
}
main "${@}"
| true
|
245ef53d73931fb24c11d73e10ac019fed8fa4fb
|
Shell
|
gaankdoank/imapsync-bulk-migrate
|
/bulk-copy-mailbox.sh
|
UTF-8
| 929
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
# Simple script to sync imap accounts with imapsync
# Author: Carlos Egüez <speedlight>
# Version: 1.0
# The USERLIST file use the user1;pass1;user2;pass2 format, change the IFS=";" variable if needed.
# The USERLIST file is suposed to be in the same location of this script.
imapsyncpath=/usr/bin
origin=oldmailaddress
destination=newmailaddress
options="--usecache --noerrorsdump --nofoldersizes --no-modules_version --addheader --subscribeall"
while IFS=";" read u1 p1 u2 p2; do
{ echo "$u1" |egrep "^#" ; } > /dev/null && continue # skip comment lines in USERLIST
echo "============== Migrating mail from user $u1 to $u2 =============="
bash $imapsyncpath/imapsync --host1 $origin --host2 $destination --user1 $u1 --password1 $p1 --user2 $u2 --password2 $p2 $options
echo
echo "============== Ended migration for user $u1 in $u2 =============="
done < USERLIST
echo "Migration Complete."
exit 0
| true
|
7cf9befb50ce709db3e097d43caae62735220c18
|
Shell
|
interfect/split-end
|
/split-end
|
UTF-8
| 1,106
| 4.0625
| 4
|
[
"WTFPL"
] |
permissive
|
#!/usr/bin/env bash
# split-end.sh: split a file end-first, without using more than one part extra disk space.
set -e
if [[ "${#}" != 2 ]] ; then
echo 1>&2 "${0}: split a file end-first, without using more than one part extra disk space."
echo 1>&2 ""
echo 1>&2 "Usage: ${0} SOURCE DEST-BASENAME"
exit 1
fi
SOURCE="${1}"
DEST="${2}"
PART_BLOCKS=4194304
BLOCK_SIZE=1024
FILE_SIZE=$(stat -c %s "${SOURCE}")
FILE_BLOCKS=$((FILE_SIZE/BLOCK_SIZE+1))
# Compute total parts
FILE_PARTS=$((FILE_BLOCKS/PART_BLOCKS+1))
echo "Split into ${FILE_PARTS} parts..."
CURRENT_PART=$((FILE_PARTS-1))
while [[ "${CURRENT_PART}" -ge 0 ]] ; do
OFFSET_BLOCKS=$((CURRENT_PART*PART_BLOCKS))
echo "Producing part ${CURRENT_PART}..."
dd "if=${SOURCE}" "of=${DEST}.part$(printf "%05d" ${CURRENT_PART})" bs=${BLOCK_SIZE} count=${PART_BLOCKS} skip=${OFFSET_BLOCKS}
OFFSET_BYTES=$((OFFSET_BLOCKS*BLOCK_SIZE))
echo "Dropping part ${CURRENT_PART} from file..."
truncate -s ${OFFSET_BYTES} ${SOURCE}
CURRENT_PART=$((CURRENT_PART-1))
done
echo "Split into ${FILE_PARTS} parts complete!"
| true
|
169fbe0c6ea8d72924deedf161eb5969083f4df4
|
Shell
|
yamrzou/delayed-admin
|
/tests/test_abdicate.sh
|
UTF-8
| 550
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
. tests/tools.sh
testAbdicate(){
sudo ./setup.sh install || fail "Installation failed"
sudo ./abdicate.sh "now+1min" || fail "Abdicate failed"
check_access_is_revoked || fail "Sudo access is not revoked"
sleep 100
check_access_is_restored || fail "Sudo access is not restored"
sudo ./setup.sh uninstall || fail "Uninstall failed"
}
testAbdicateWithoutDelayedAdmin() {
sudo ./abdicate.sh "now+1min" && fail "Abdicate did not err without Delayed Admin"
check_access_is_restored || fail "Sudo access is not restored"
}
. tests/shunit2
| true
|
35d1477731189244a38f500eda5a4e1029b899d2
|
Shell
|
jancelin/usernetes
|
/run.sh
|
UTF-8
| 386
| 3
| 3
|
[
"Apache-2.0",
"MIT",
"GPL-2.0-only"
] |
permissive
|
#!/bin/bash
set -eu -o pipefail
# clean up (workaround for crash of previously running instances)
(
if ! [[ -w $XDG_RUNTIME_DIR ]]; then
echo &>2 "XDG_RUNTIME_DIR needs to be set and writable"
exit 1
fi
rootlesskit=$(realpath $(dirname $0))/bin/rootlesskit
cd $XDG_RUNTIME_DIR
$rootlesskit rm -rf docker docker.* containerd runc crio usernetes
)
exec $(dirname $0)/bin/task $@
| true
|
bf87d2b25b7137e9d6f221a2fddb5a623872d4d3
|
Shell
|
edr1/spaCy
|
/ftd/create_train_data.sh
|
UTF-8
| 700
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#in_file="FTD_autores.txt"
#out_file="FTD_autores_train.dat.py"
#entity_type="AUTOR"
create_train_data () {
echo "Creating training data"
echo "in_file: $1"
echo "out_file: $2"
echo "entity_type: $3"
echo "TRAIN_DATA_$3_1E = [" > $2
while read line
do
tot="$(echo $line | wc -L)"
echo "(\"$line\",{\"entities\":[(0,"$tot",'$3')]})," >> $2
done < ./$1
echo "]" >> $2
}
main () {
if [ $# -eq 3 ]; then
create_train_data $1 $2 $3
else
echo "Please psupply 3 parameters... "
echo "in_file=FTD_autores.txt"
echo "out_file=FTD_autores_dat.py"
echo "entity_type=AUTOR"
fi
}
main $1 $2 $3
| true
|
513c73a45e72256c537f0832187d65871a136590
|
Shell
|
timehook/cli-client
|
/ci/style.sh
|
UTF-8
| 236
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
GOIMPORTS_FILES=$(gofmt -l .)
if [ -n "${GOIMPORTS_FILES}" ]; then
printf >&2 'goimports failed for the following files:\n%s\n\nplease run "gofmt -w ." on your changes before committing.\n' "${GOIMPORTS_FILES}"
exit 1
fi
| true
|
07f575df9b64e88fe3eaaea6cf2df6430fa2b789
|
Shell
|
ionSurf/interactive_animation_control
|
/run_example.sh
|
UTF-8
| 635
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
helpFunction() {
echo ""
echo "Usage: $0 -p port"
echo -e "\t-p Defines the port to use for the temporary web server"
exit 1 # Exit script after printing help
}
#while getopts "a:b:c:" opt
while getopts "p:" opt
do
case "$opt" in
p ) port="$OPTARG" ;;
? ) helpFunction ;; # Print helpFunction in case parameter is non-existent
esac
done
# Print helpFunction in case parameters are empty
if [ -z "$port" ]
then
echo "Some or all of the parameters are empty";
helpFunction
fi
#echo "$port"
open -a /Applications/Google\ Chrome.app http://localhost:$port/ & python3 -m http.server $port
| true
|
2da9068fd659b5f0ec872208de5d82b1c7962c30
|
Shell
|
cqmba/rocko-trusted
|
/meta-tpmenable/recipes-tpmscripts/tpmscripts/files/signature/signature.sh
|
UTF-8
| 3,039
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# das Script lässt vom TPM eine Signatur für externe Daten erzeugen
# das asymmetrische Schlüsselüaar wird auf dem TPM erzeugt
# der private Key verschlüsselt auf dem TPM und kann nicht ausgelesen werden
# der öffentliche Schlüssel wird gelesen und wird zurPrüfung der Signatur extrahiert
# mittels OpenSSL wird die Signatur extern (vom Linux) überprüft
echo "PRIM KEY"
# primary key for owner hierarchy
tpm2_createprimary -a o -o csr_prim.ctx
echo ""
echo "KEY"
# create signing key pair
tpm2_create -C csr_prim.ctx -u csr.pub -r csr.priv
echo ""
echo "LOAD KEY"
#C SR pub/priv sind Binärblobs, können nicht ohne weiteres
# von OpenSSL verarbeitet werden
tpm2_load -C csr_prim.ctx -u csr.pub -r csr.priv -o csr_sub.ctx
echo ""
echo "HASH"
# this is not really important
# it compares the HASH output of the TPM and Linux
tpm2_hash -a o -g sha256 input_hex.dat
sha256sum input_hex.dat
echo ""
echo "SIGN"
# the TPM signs external data with private signing key
# output the signautre to the file system
tpm2_sign -c csr_sub.ctx -g sha256 -m input_hex.dat -o signature_tpm.sig
echo ""
echo "CHECK SIG"
# TPM checks its own signature
# this will success :-D
tpm2_verifysignature -c csr_sub.ctx -g sha256 -m input_hex.dat -s signature_tpm.sig
echo ""
echo "READ PUB KEY FOR OPENSSL CHECK"
# extract the (unencrypted) public key from the TPM
# use PEM format for OpenSSL
tpm2_readpublic -c csr_sub.ctx -f pem -o csr.pem
echo ""
echo "EXTRACT SIGNATURE"
#
# the signature file has some additional header information
# this information will be deletet to be compatible to OpenSSL
#
# THE FIRST SIX BYTES OF SIGNATURE.SIG WILL BE DELETED
# [0014 000b 0100]
#
# pi@raspberrypi:~/development/csr $ xxd signature.sig
# 00000000: 0014 000b 0100 54e6 6bb3 f16c a3ec 6968 ......T.k..l..ih
# 00000010: 77a4 bfec f453 0863 8978 5f67 8cac 5ee3 w....S.c.x_g..^.
# ...
# 000000e0: ba63 f805 7dbe cec5 a5a1 3824 fd01 b425 .c..}.....8$...%
# 000000f0: 219f 78ae 2ad1 7631 a74e 0e22 cce1 681c !.x.*.v1.N."..h.
# 00000100: 745f 1fdf fe2d t_...-
# DD COMMANT CAN DO THIS JOB
#
# pi@raspberrypi:~/development/csr $ dd if=signature_tpm.sig of=signature_raw.sig bs=1 skip=6
# 256+0 Datens�tze ein
# 256+0 Datens�tze aus
# 256 bytes copied, 0,00864755 s, 29,6 kB/s
# CHECK THE OUTPUT
#
# pi@raspberrypi:~/development/csr $ xxd signature2.sig
# 00000000: 54e6 6bb3 f16c a3ec 6968 77a4 bfec f453 T.k..l..ihw....S
# 00000010: 0863 8978 5f67 8cac 5ee3 477c d118 043f .c.x_g..^.G|...?
# ...
# 000000e0: cec5 a5a1 3824 fd01 b425 219f 78ae 2ad1 ....8$...%!.x.*.
# 000000f0: 7631 a74e 0e22 cce1 681c 745f 1fdf fe2d v1.N."..h.t_...-
dd if=signature_tpm.sig of=signature_raw.sig bs=1 skip=6
echo ""
echo "CHECK SIG WITH OPENSSL"
# use OpenSSL to check the signature
openssl dgst -sha256 -verify csr.pem -keyform pem -signature signature_raw.sig input_hex.dat
echo ""
echo "CLEANUP"
rm csr.pem
rm signature_tpm.sig
rm signature_raw.sig
rm csr.priv
rm csr.pub
rm csr_sub.ctx
rm csr_prim.ctx
echo ""
| true
|
2072229f87707c539ca3c3e0480b690a2a984ecb
|
Shell
|
steven-cruz/holberton-system_engineering-devops
|
/0x04-loops_conditions_and_parsing/10-fizzbuzz
|
UTF-8
| 418
| 3.640625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#Displays FizzBuzz when the number is a multiple of 3 and 5.
#Displays Fizz when the number is multiple of 3.
#Displays Buzz when the number is a multiple of 5.
#Otherwise, displays the number.
i="1"
while (( i <= 100 )); do
if (( i%15==0 ))
then
echo "FizzBuzz"
elif ((i%5==0))
then
echo "Buzz"
elif ((i%3==0))
then
echo "Fizz"
else
echo $i
fi
((i++))
done
| true
|
c8e235259aecbf1605890386ac8d1f5a29f0d672
|
Shell
|
ehles/aux_scripts
|
/mirantis/config.sh
|
UTF-8
| 2,344
| 2.640625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
SCRIPT=`realpath $0`
SCRIPTPATH=`dirname $SCRIPT`
source ${SCRIPTPATH}/utils.sh
export WORK_PATH=~/work/dpdk/tpi76/fuel_devops/data
###############################################################################
# Local variables
###############################################################################
DEVOPS_RELEASE="3.0.2"
SRC_REPO="https://github.com/openstack/fuel-plugin-contrail.git"
SRC_PATH="${WORK_PATH}/fuel-plugin-contrail"
SRC_BRANCH="origin/stable/5.0.0"
#SRC_BRANCH="master"
DEPS_FILES=(
"https://raw.githubusercontent.com/openstack/fuel-qa/stable/mitaka/fuelweb_test/requirements.txt"
"${SRC_PATH}/plugin_test/requirement.txt"
)
###############################################################################
# Exports
###############################################################################
export ENV_NAME=80-contrail
export VENV_PATH="${WORK_PATH}/devops_venv_${DEVOPS_RELEASE}"
#export DEVOPS_DB_NAME="${WORK_PATH}/db.sqlite"
#export DEVOPS_DB_ENGINE="django.db.backends.sqlite3"
###############################################################################
# Baremetal Access Credentials
###############################################################################
export BM_IPMI_USER='ipmi_user'
export BM_IPMI_PASSWORD='ipmi_password'
export BM_IPMI_ADDR='ipmi_host_address'
export BM_TARGET_MACS='MAC1;MAC2'
export BM_HOST_BRIDGE_INTERFACES='eth1:10.109.0.0/24;eth2:10.109.4.0/24'
###############################################################################
# Test files and configs.
###############################################################################
export CONTRAIL_PLUGIN_PATH="${WORK_PATH}/contrail-5.0-5.0.0-1.noarch.rpm"
export CONTRAIL_PLUGIN_PACK_UB_PATH="${WORK_PATH}/contrail-install-packages_3.1.0.0-25.deb"
export ISO_PATH="/storage/downloads/MirantisOpenStack-9.0.iso"
export MIRROR='http://mirror.seed-cz1.fuel-infra.org'
export MIRROR_UBUNTU='deb http://mirror.seed-cz1.fuel-infra.org/pkgs/ubuntu/ trusty main universe multiverse | deb http://mirror.seed-cz1.fuel-infra.org/pkgs/ubuntu/ trusty-updates main universe multiverse'
export KVM_USE='True'
export DISABLE_SSL='True'
export ADMIN_NODE_MEMORY=4096
export ADMIN_NODE_CPU=4
export SLAVE_NODE_MEMORY=4096
export SLAVE_NODE_CPU=4
export NODE_VOLUME_SIZE=350
export NODES_COUNT=10
| true
|
2721932be5fd207fd6e70b825070f66429d99eb4
|
Shell
|
open-estuary/armor
|
/testing/test_scripts/test_setpci.sh
|
UTF-8
| 1,012
| 3.140625
| 3
|
[] |
no_license
|
###############################################################################
# This script tests the setpci tool
#Usage:
# $ sh test_setpci.sh
################################################################################
#!/bin/bash
bash check_install.sh setpci
status=$?
if test $status -eq 1
then
exit
fi
#setpci
setpci --version
status=$?
if test $status -eq 0
then
echo "command setpci --version [PASS]"
else
echo "command setpci --version [FAIL]"
exit
fi
echo " setpci - configure PCI devices"
echo " setpci -s 0 device_id vendor_id - lists ID’s of devices in slot 0 in all buses"
setpci -s 0 device_id vendor_id
status=$?
if test $status -eq 0
then
echo "command setpci -s 0 device_id vendor_id [PASS]"
else
echo "command setpci -s 0 device_id vendor_id [FAIL]"
exit
fi
echo " setpci --dumpregs"
setpci --dumpregs
status=$?
if test $status -eq 0
then
echo "command setpci --dumpregs [PASS]"
else
echo "command setpci --dumpregs [FAIL]"
exit
fi
exit
| true
|
dc76bb217ccd99db48b4be835402cee05fb7a95d
|
Shell
|
facubara/orga2
|
/TP2/entregable/tp2-bundle.v2/src/experimentos/funciones.sh
|
UTF-8
| 803
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
DIR=(log mem txt ult blue hsl1ch)
#reciben un directorio valido
function quitarOutliers()
{
tmp=tmp.txt
for file in $1/*
do
echo "quitando outliers de $file en $1"
sort -g $file>$tmp
sed '1,200d' $tmp>$file
sort -g -r $file>$tmp
sed '1,200d' $tmp>$file
rm $tmp
done
}
function Promedios()
{
rm $1/*.stats
for file in $1/*
do
echo "calculando promedio y varianza $file en $1"
python3 promedio.py $file>>$file.stats
python3 desvio.py $file>>$file.stats
done
}
#no reciben nada
function promediarParaTodos()
{
for dir in ${DIR[*]};
do
Promedios $dir
done
}
function quitarOutliersParaTodos()
{
for dir in ${DIR[*]};
do
quitarOutliers $dir
done
}
function sacarPromediosViejos()
{
for dir in ${DIR[*]};
do
rm $dir/*promedio
done
}
| true
|
ab3e4a5e4972fd662e5f2493f959d748763ce308
|
Shell
|
rahulgidde/Shellscript
|
/dictionary/RollDie.sh
|
UTF-8
| 665
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash -x
declare -A die
#VARIABLE
maximum=0
# CALCULATING DICE NUMBER WHO REACH MAXIMUM AND MINIMUM COUNT
while [ $maximum -ne 10 ]
do
number=$((1+RANDOM%6))
die[$number]=$(( $((die[$number])) + 1 ))
minimum=${die[1]}
for (( index=1; index<=${#die[@]}; index++ ))
do
if [ $((die[$index])) -gt $maximum ]
then
maximum=$((die[$index]))
fi
if [ $((die[$index])) -lt $minimum ]
then
minimum=$((die[$index]))
fi
done
done
#DISPLAYING DICE NUMBER WHO REACH MAXIMUM AND MINIMUM TIMES
echo "Keys : ${!die[@]}"
echo "Values: ${die[@]}"
echo "Maximun Times Reached Element Is : $maximum"
echo "Minimum Times Reached Element Is : $minimum"
| true
|
69e6652759ea5acbc4bebcdce1885c5d1b1a0cf1
|
Shell
|
hexnut/scripts
|
/moodate.sh
|
UTF-8
| 594
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
# Author: Stephen Collins
# Date: 2016-04-01
# Description: Instruct the cow to show system time and hostname.
# Global inits
M=`hostname`
ARG=$1
# Draw the cow
function update_screen(){
clear
F=$(mktemp /tmp/moodate.XXXXXX)
figlet -f small $M >> $F
cowthink -W 30 -f default "It's" $NOW "...moo" >> $F
if [[ $ARG = '-l' ]]; then
cat $F | lolcat -p 10
else
cat $F
fi
rm -f $F
}
# Main bovine loop
while :
do
NOW=`date +"%A %b %d %Y %I:%M%p"`
if [[ $NOW != $THEN ]]; then
update_screen
fi
THEN=$NOW
sleep 10s
done
| true
|
101ea737fae90b1ddc0e1b35565eb848ae923e80
|
Shell
|
pydata/bottleneck
|
/tools/travis/bn_setup.sh
|
UTF-8
| 636
| 2.828125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -ev # exit on first error, print commands
if [ "${TEST_RUN}" = "style" ]; then
flake8
black . --check --exclude "(build/|dist/|\.git/|\.mypy_cache/|\.tox/|\.venv/\.asv/|env|\.eggs)"
else
if [ "${TEST_RUN}" = "sdist" ]; then
python setup.py sdist
ARCHIVE=`ls dist/*.tar.gz`
pip install "${ARCHIVE[0]}"
else
pip install "."
fi
python setup.py build_ext --inplace
set +e
if [ "${TEST_RUN}" = "doc" ]; then
make doc
else
# Workaround for https://github.com/travis-ci/travis-ci/issues/6522
python "tools/test-installed-bottleneck.py"
fi
fi
| true
|
56f38f20b2abccff9794b171a25a18b2acda7d03
|
Shell
|
foundeo/fuseless
|
/test.sh
|
UTF-8
| 1,514
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [[ !$LUCEE_VERSION ]]; then
LUCEE_VERSION=5.3.6.61
fi
if [ -f "java/jars/lucee-light-$LUCEE_VERSION.jar" ]; then
echo "lucee-light-$LUCEE_VERSION.jar already there, skipping download"
else
#download lucee jar
echo "Downloading lucee-light-$LUCEE_VERSION.jar"
echo "https://cdn.lucee.org/lucee-light-$LUCEE_VERSION.jar"
curl --location -o java/jars/lucee-light-$LUCEE_VERSION.jar https://cdn.lucee.org/lucee-light-$LUCEE_VERSION.jar
cp java/jars/lucee-light-$LUCEE_VERSION.jar test/jars/
fi
cd java
#compile java
gradle build
cd ..
cp java/build/libs/foundeo-fuseless.jar test/jars/
cd test
gradle build
sam local start-api --port 3003 --debug &
SAM_PID=$!
#give it a chance to startup
echo -e "Sleeping for 5...\n"
sleep 5
echo "Running: http://127.0.0.1:3003/assert.cfm"
http_code=$(curl --verbose -s --header "Content-Type: application/json" --request POST --data '{"x":1}' -o /tmp/result.txt -w '%{http_code}' 'http://127.0.0.1:3003/assert.cfm?requestMethod=POST&requestContentType=application/json&requestBody=%7B%22x%22%3A1%7D&contentLength=7';)
echo "Finished with Status: $http_code "
echo -e "\n-----\n"
#output the result
cat /tmp/result.txt
echo -e "\n-----\n"
kill $SAM_PID
if [ "$http_code" -ne 222 ]; then
#fail if status code is not 200
exit 1
fi
echo "Testing Events"
echo -e "\n-----\n"
sam local generate-event s3 put > /tmp/test-event.json
sam local invoke FuselessTestEvent --event /tmp/test-event.json
echo -e "\n-----\n"
exit 0
| true
|
92b87143b2ec47dfb83804467e627851a41609ac
|
Shell
|
kowalskey/toolz
|
/svn-backup/backup_repos.sh
|
UTF-8
| 1,100
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
REPOS_ROOT=/home/svnbackup/repos
BACKUP_ROOT=/home/svnbackup/backups
NUM_BACKUPS=60
HOT_BACKUP_SCRIPT=~/bin/hot-backup.py
HOT_BACKUP_SCRIPT_ARGS="--archive-type=bz2 --num-backups=$NUM_BACKUPS --verify"
SVNSYNC=$(which svnsync 2>/dev/null)
REPOS=(repo1 repo2)
echo "test" >&2
for REPO in ${REPOS[@]}
do
REPO_DIR="$REPOS_ROOT/$REPO"
if [[ ! -d "$REPO_DIR" ]]
then
echo "$REPO_DIR does not exist or is not a directory!" >&2
exit 1
fi
echo "Replicating repository $REPO.."
$SVNSYNC synchronize file://$REPO_DIR
if [[ $? -ne 0 ]]
then
echo "Failed to replicate repository, skipping backup..." >&2
exit 2
fi
echo "Backing up local repository copy..."
BACKUP_DIR="$BACKUP_ROOT/$REPO"
$HOT_BACKUP_SCRIPT $HOT_BACKUP_SCRIPT_ARGS $REPO_DIR $BACKUP_DIR
if [[ $? -eq 0 ]]
then
echo "Backup completed successfully!"
else
echo "Failed to backup repository!" >&2
fi
done
| true
|
91545b3de5e4ef1d8c8227d4bc36427e6c51c93e
|
Shell
|
ellerbrock/gitlab-terraform
|
/templates/userdata.tpl
|
UTF-8
| 1,477
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash -ex
exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
apt-get update
echo "Installing nfs-common"
apt-get install -y nfs-common
echo "nameserver 169.254.169.253" > /etc/resolv.conf
echo "Creating gitlab-data mount point"
mkdir -p ${mount_point}
echo "${fs_id}.efs.${region}.amazonaws.com:/ /gitlab-data nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 0 0" >> /etc/fstab
mount ${mount_point}
echo "Creating gitlab.rb"
cat >> /etc/gitlab/gitlab.rb <<EOF
external_url 'http://${external_url}'
postgresql['enable'] = false
gitlab_rails['db_adapter'] = "postgresql"
gitlab_rails['db_encoding'] = "utf8"
gitlab_rails['db_database'] = "${db_database}"
gitlab_rails['db_username'] = "${db_username}"
gitlab_rails['db_password'] = "${db_password}"
gitlab_rails['db_host'] = "`echo ${db_host} |cut -d: -f1`"
gitlab_rails['db_port'] = ${db_port}
gitlab_rails['redis_host'] = "${redis_host}"
gitlab_rails['redis_port'] = ${redis_port}
git_data_dirs({"default" => "/gitlab-data/git-data"})
user['home'] = '/gitlab-data/gitlab/'
gitlab_rails['uploads_directory'] = '/gitlab-data/uploads'
gitlab_rails['shared_path'] = '/gitlab-data/shared'
gitlab_rails['registry_path'] = "/gitlab-data/shared/registry"
gitlab_rails['lfs_storage_path'] = '/gitlab-data/lfs-objects'
postgres_exporter['enable'] = false
redis_exporter['enable'] = false
redis['enable'] = false
EOF
echo "Executing gitlab-ctl reconfigure"
gitlab-ctl reconfigure
| true
|
125222f61581b66237db636e7def32952f049d9b
|
Shell
|
hant0508/hant0508.github.io
|
/comp/edit.sh
|
UTF-8
| 255
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ "$1" == "add" ]]
then
s=`echo "$2" | tr ' ' '\n'`
echo "$s" >> dic.txt
sort -u dic.txt > dic.back
cp dic.back dic.txt
rm dic.back
elif [[ "$1" == "rm" ]]
then
sed -i "/\<$2\>/d" dic.txt
else
echo "add \"word\" or rm \"word\""
fi
| true
|
7974166b803b9d2af0981d10f4d88e8b4e1a2d55
|
Shell
|
jfharden/template-php-docker-site
|
/config/docker/entrypoint.sh
|
UTF-8
| 3,068
| 3.609375
| 4
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
#!/bin/bash
set -e
echo "Writing PHP config file"
echo "<?php
\$_CONF['db_name'] = \"$DB_NAME\";
\$_CONF['db_host'] = \"$DB_HOST\";
\$_CONF['db_user'] = \"$DB_USER\";
\$_CONF['db_pass'] = \"$DB_PASS\";
\$_CONF['sslmode'] = \"$SSLMODE\";
\$_CONF['sslrootcert'] = \"$SSLROOTCERT\";
?>" > /secrets/config.php
chmod 440 /secrets/config.php
chown root:www-data /secrets/config.php
echo "Created /secrets/config.php"
if [ -n "$HTPASSWD_FILE" ]; then
echo "HTPASSWD_FILE provided, setting up basic auth"
cat > /etc/apache2/conf-available/basic_auth.conf <<-EOF
<Location />
AuthType Basic
AuthName "Authorisation Required"
AuthUserFile "/secrets/htpasswd"
require valid-user
</Location>
EOF
chown www-data:www-data /etc/apache2/conf-available/basic_auth.conf
chmod 400 /etc/apache2/conf-available/basic_auth.conf
a2enconf basic_auth
echo "Created and enabled /etc/apache2/conf-available/basic_auth.conf"
echo "$HTPASSWD_FILE" > /secrets/htpasswd
chmod 440 /secrets/htpasswd
chown root:www-data /secrets/htpasswd
echo "Created /secrets/htpasswd"
else
echo "HTPASSWD_FILE not provided, basic auth disabled"
fi
if [ "$OPENID_ENABLED" == "true" ]; then
echo "OpenID has been requested, writing apache config"
cat > /etc/apache2/conf-available/openid.conf <<-EOF
# SEE https://medium.com/@robert.broeckelmann/openid-connect-authorization-code-flow-with-aws-cognito-246997abd11a
# MetaData URL for AWS cognito is:
# https://cognito-idp.<REGION>.amazonaws.com/<USER_POOL_ID>/.well-known/openid-configuration
OIDCProviderMetadataURL $OPENID_METADATA_URL
OIDCClientID $OPENID_CLIENT_ID
OIDCClientSecret $OPENID_SECRET
# OIDCRedirectURI is a vanity URL that must point to a path protected by this module but must NOT point to any content
OIDCRedirectURI $OPENID_REDIRECT_URL
OIDCCryptoPassphrase $OPENID_CRYPTO_PASSPHRASE
<LocationMatch "^/(?!loggedout.php)">
AuthType openid-connect
Require valid-user
</LocationMatch>
EOF
# Some open connect providers aren't giving the logout uri in the metadata (looking at you AWS Cognito!) so if it's
# specified we need to include it in the OpenID config
if [ -n "$OPENID_END_SESSION_ENDPOINT" ]; then
echo " OIDCProviderEndSessionEndpoint $OPENID_END_SESSION_ENDPOINT" >> /etc/apache2/conf-available/openid.conf
fi
chmod 400 /etc/apache2/conf-available/openid.conf
a2enconf openid
else
echo "OpenID has not been enabled"
fi
echo "Changing permissions of /sessions/"
chown www-data:www-data /sessions/
chmod 770 /sessions/
echo "Unsetting env vars"
unset DB_NAME
unset DB_HOST
unset DB_USER
unset DB_PASS
unset SSLMODE
unset SSLROOTCERT
unset HTPASSWD_FILE
unset OPENID_ENABLED
unset OPENID_END_SESSION_ENDPOINT
unset OPENID_METADATA_URL
unset OPENID_CLIENT_ID
unset OPENID_SECRET
unset OPENID_REDIRECT_URL
unset OPENID_CRYPTO_PASSPHRASE
echo "Custom entrypoint setup complete, running docker-php-entrypoint"
exec "/usr/local/bin/docker-php-entrypoint" "$@"
| true
|
01ebbdee050d6979b026241f0df416beda97376f
|
Shell
|
Bkoul26/NTI300
|
/LDAP
|
UTF-8
| 4,550
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
yum install -y git
cd /tmp
git clone https://github.com/Bkoul26/NTI300
yum install -y openldap-servers openldap-clients
cp /usr/share/openldap-servers/DB_CONFIG.example /var/lib/ldap/DB_CONFIG
chown ldap. /var/lib/ldap/DB_CONFIG
systemctl enable slapd
systemctl start slapd
yum install -y httpd phpldapadmin
#SELinux pass
setsebool -P httpd_can_connect_ldap on
systemctl enable httpd
systemctl start httpd
sed -i ‘s,Require local,Require local\n Require all granted,g’ /etc/httpd/conf.d/phpldapadmin.conf
#CONTINUED...
unalias cp
cp /etc/phpldapadmin/config.php /etc/phpldapadmin/config.php.orig
# copies original CONFIGURATION file as backup
grep 'config.*blowfish' /etc/phpldapadmin/config.php.orig
# grep for blowfish key should output "# Autogenerated for ldap"
rightkey=$( grep ‘config.*blowfish’ /etc/phpldapadmin/config.php.orig )
# use rejex to retrieve the key
cp /tmp/NTI300/CONFIG /etc/phpldapadmin/config.php
# copy pre-made conf to config.php
#sed -i “s/\$config->custom->session\[‘blowfish’\] = ‘6ad4614a5189aaf046e2057e7870ae4‘\;incomplete
sed -i "s/\$config->custom->session\['blowfish'\] = '6ad4614a51893aaf046e2057e7870ae4'\; # Autogenerated for ldap-c/$rightkey/g" /etc/phpldapadmin/config.php
# Autogenerated for ldap-c/$rightkey/
# grep 'config.*blowfish' /etc/phpldapadmin/config.php.orig
# grep for blowfish key should output "# Autogenerated for ldap"
chown ldap:apache /etc/phpldapadmin/config.php #change to apache to make ldap with control w permissions
systemctl restart httpd #restart
echo "phpldapadmin should now be functional" #messaging for logs echo commands
echo "configuring ldap + ldapadmin"
# securely generate code with new pwd stores
newsecret=$(slappasswd -g) #system variable 'newsecret'
echo $newsecret #shows the pwd
newhash=$(slappasswd -s "$newsecret") #create a new hash of the passwd
echo $newhash #echo the hash encryption
echo -n "$newsecret" > /root/ldap_admin_pass #put pwd in root so that it can be accessed according to privelages
chmod 0600 /root/ldap_admin_pass #only root can read & write
echo -e "dn: olcDatabase={2}hdb,cn=config
changetype: modify
replace: olcSuffix
olcSuffix: dc=NTI310,dc=local
\n
dn: olcDatabase={2}hdb,cn=config
changetype: modify
replace: olcRootDN
olcRootDN: cn=ldapadm,dc=nti310,dc=local
\n
dn: olcDatabase={2}hdb,cn=config
changetype: modify
replace: olcRootPW
olcRootPW: $newhash" > db.ldif
#moving into db.ldif file, vim into it to make sure
vim db.ldif #make sure the code is there
ldapmodify -Y EXTERNAL -H ldapi:/// -f db.ldif
# Authentication Restriction
echo 'dn: olcDatabase={1}monitor,cn=config
changetype: modify
replace: olcAccess
olcAccess: {0}to * by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external, cn=auth" read by dn.base="cn=ldapadm,dc=nti310,dc=local" read by * none' > monitor.ldif
#MODIFY NEXT
ldapmodify -Y EXTERNAL -H ldapi:/// -f monitor.ldif
# Cert Gen
openssl req -new -x509 -nodes -out /etc/openldap/certs/NTI310ldapcert.pem -keyout /etc/openldap/certs/NTI310ldapkey.pem -days 365 -subj "/C=US/ST=WA/L=Seattle/O=SCC/OU=IT/CN=NTI310.local"
chown -R ldap. /etc/openldap/certs/NTI*.pem
echo -e "dn: cn=config
changetype: modify
replace: olcTLSCertificateKeyFile
olcTLSCertificateKeyFile: /etc/openldap/certs/NTI310ldapkey.pem
\n
dn: cn=config
changetype=modify
replace: olcTLSCertificateKeyFile
olcTLSCertificateKeyFile: /etc/openldap/certs/NTI310ldapcert.pem" > certs.ldif
ldapmodify -Y EXTERNAL -H ldapi:/// -f certs.ldif
# Cert Configurations' SLAPTEST
slaptest -u
echo “WE ARE IN BUSINESS”
#unalias cp
ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/cosine.ldif
ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/nis.ldif
ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif
# Dome Structure Creation for Object oriented classes
echo -e "dn: dc=NTI310,dc=local
dc: NTI310
objectClass: top
objectClass: domain
\n
#engage organzational role class for managment of LDAP
dn: cn=ldapadm,dc=NTI310,dc=local
objectClass: organizationalRole
cn: ldapadm
description: LDAP Manager
\n
#adapt organizational unit people
dn: ou=People,dc=NTI310,dc=local
objectClass: organizationalUnit
ou: People
\n
#engage organizational unit group
dn: ou=Group,dc=NTI310,dc=local
objectClass: organizationalUnit
ou: Group" > base.ldif
setenforce 0
ldapadd -x -W -D "cn=ldapadm,dc=NTI310,dc=local" -f base.ldif -y /root/ldap_admin_pass #ADD ENTRY/ this works without errors
#restart
systemctl restart httpd
#end
#cat /root/ldap_admin_pass
| true
|
d55874b3de95e1371f7d6d496033d71dba423372
|
Shell
|
violinshack/my-sfdx-simple
|
/bin/extra-org-setup.sh
|
UTF-8
| 910
| 3.84375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
##
# Performs additional app setup on org.
#
# This script:
# - Enables PermissionSet on configured workspace,
# - Loads sample data.
##
# Debug, echo every command
#set -x
# Wrapper function to capture command return codes, detect failure,
# and, if error, log message
runCmd() {
local cmnd="$*"
local ret_code
echo $cmnd
eval $cmnd
ret_code=$?
if [ $ret_code != 0 ]; then
printf "Error : [%d] when executing command: '$cmnd'" $ret_code
exit $ret_code
fi
}
# Set perms on apps and objects
echo ""
echo "Enabling PermissionSet for App and SObject visibility..."
# TODO
#runCmd "$HEROKU_CLIENT force:permset:assign --name [APP_NAME]"
if [ "$LOAD_DATA" = true ]; then
# Load the app data into the org
echo ""
echo "Loading data..."
# TODO
#runCmd "$HEROKU_CLIENT force:data:import --plan [DATA_PLAN_FILE]"
fi
exit 0
| true
|
81e5bfe573c12e67147bac2fcb27ef364d8a86c2
|
Shell
|
cheyang/tensorflow-samples
|
/sshd/startup.sh
|
UTF-8
| 361
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
SSHDIR=/home/mpirun/.ssh/
if [ -n "$SSH_PORT" ]; then
sed -ri "s/Port 22/Port ${SSH_PORT}/g" /etc/ssh/sshd_config
echo "Port ${SSH_PORT}" >> ${SSHDIR}/config
fi
if [[ -z "${SSH_PASSWORD}" ]]; then
SSH_PASSWORD="tryHPC"
fi
echo "Password Of User root is $SSH_PASSWORD"
echo "root:$SSH_PASSWORD" | chpasswd
/usr/sbin/sshd -D
| true
|
88f4a505c03b6d2b2c8814069e4144df9c544e64
|
Shell
|
darkwhispering/Newvhost
|
/newvhost
|
UTF-8
| 7,979
| 4.1875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# title : newvhost
# description : adds a new apache virtual host and create database if wanted
# usage : No parameters. Just invoke script-name.
# version : 1.1.0
#==================================================================================
#-----------------------------------------------
# Edit these variables after your needs
#-----------------------------------------------
# User to user for permissions
tSERVERUSER="root"
# Location to host all virtual hosts (recommended is the home folder of the user specified above)
# Do not include a traling slash
tLOCATION="/var/www"
# Safe ocation path, must be same as tLOCATION!
# Do not include a traling slash
tLOCATIONSAFE="\/var\/www"
# If left empty, the script will ask for MySQL root account password
tDBPW=""
#-----------------------------------------------
# DO NOT EDIT BELOW THIS LINE!
#-----------------------------------------------
## Check so root is running the script
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root" 1>&2
exit 1
fi
# Set up needed variables
tVHOST=""
tHOSTLOCATION=""
tVHOSTDIR=""
tVHOSTCONFIG=""
tDBUSER=""
tDIR="/usr/local/newvhost"
tGENPASSWD=32
tGENPW=""
# Get proper paths to binaries used
tSED=$(which sed)
tA2ENSITE=$(which a2ensite)
tCHOWN=$(which chown)
tMKDIR=$(which mkdir)
tMYSQL=$(which mysql)
tTR=$(which tr)
tHEAD=$(which head)
tCP=$(which cp)
tRM=$(which rm)
tAPACHE2CTL=$(which apache2ctl)
#-----------------------------------------------
# Functions
#-----------------------------------------------
#
# Perform an action quietly
#
function quietly() {
"$@" > /dev/null 2>&1
}
#
# Generat a password
#
function genpasswd() {
tGENPW=$($tTR -dc A-Za-z0-9_ < /dev/urandom | $tHEAD -c ${tGENPASSWD} | xargs)
}
#
# Get server name for the virtual host
#
function set_vhost() {
clear
echo
echo "-----------------------------------------------------------------------------"
echo
echo "Please type in the domain name to be used for the virtual host."
echo
echo "It must be a fully qualified domain name, including .com/.net or what else"
echo "you have as domain ending. However, DO NOT include http(s)!!"
echo
echo "-----------------------------------------------------------------------------"
echo
read -r -p "Domain name ? " _domain_
if [ -z "$_domain_" ]; then
echo
echo "No domain name given, exiting."
echo
exit 1
else
tVHOST=$_domain_
fi
}
#
# Ask for the mysql root password if it is not already specified in the config
#
function get_rootpw() {
if [ -z "$tDBPW" ]; then
echo
read -r -s -p "Database admin's password ? " _mydbpw_
if [ -z "$_mydbpw_" ]; then
echo
echo "No password given, exiting."
echo
exit 1
else
tDBPW=$_mydbpw_
fi
fi
}
#
# Create the new virtual host
#
function create_vhost() {
tHOSTLOCATION="$tLOCATION/$tVHOST"
tVHOSTDIR="$tHOSTLOCATION/public/"
tVHOSTLOG="$tHOSTLOCATION/log/"
echo
echo
echo -n "Creating new virtual host for domain $tVHOST... "
# Check if the dir for the new vhost already exists
if [ -d "$tHOSTLOCATION" ]; then
echo "$tHOSTLOCATION already exists, aborting."
exit 1
fi
# Create the new dir for the vhost
$tMKDIR -p ${tHOSTLOCATION}/{public,log}
# $tMKDIR -p $tVHOSTDIR
# $tMKDIR -p $tVHOSTLOG
# Copy the index.html template to the new vhost-dir
$tCP $tDIR/index.template $tVHOSTDIR/index.html
# Set the proper web server permissions
$tCHOWN -R $tSERVERUSER:$tSERVERUSER $tHOSTLOCATION
# Put vhost config-file into place
tVHOSTCONFIG="/etc/apache2/sites-available/${tVHOST}.conf"
# Check if config file already exists
if [ -f "$tVHOSTCONFIG" ]; then
echo "$tVHOSTCONFIG already exists, aborting."
exit 1
fi
# Create temporary apache config file
$tSED -e "s/CHANGEMEVHOST/${tVHOST}/g" $tDIR/vhost.template > $tDIR/vhost.template1
$tSED -e "s/CHANGEMEHOME/${tLOCATIONSAFE}/g" $tDIR/vhost.template1 > $tDIR/${tVHOST}.conf
# Copy apache config file to /etc/apache2/site-available
$tCP $tDIR/${tVHOST}.conf $tVHOSTCONFIG
# Remove temporary config file
$tRM $tDIR/vhost.template1
$tRM $tDIR/${tVHOST}.conf
printf "Done\n"
# Activate the new virtual host?
echo
read -r -p "Do you want to enable the newly added virtual host ($tVHOST) ? [y/N] " tCHOICE
case $tCHOICE in
[yY][eE][sS]|[yY])
echo
echo -n "Enabling vhost... "
$tA2ENSITE ${tVHOST}.conf | quietly
$tAPACHE2CTL graceful | quietly
printf "Done\n"
;;
*)
echo
echo "Skipping vhost enabling procedure by user choice"
return
;;
esac
}
#
# Ask if user wants to create a database too
#
function add_db() {
# Ask if we want to create a database to the new virtual host
echo
echo "-----------------------------------------------------------------------------"
echo
read -r -p "Do you want to create a database to your new virtual host ($tVHOST) ? [y/N] " tDBCHOICE
case $tDBCHOICE in
[yY][eE][sS]|[yY])
get_rootpw
create_db
;;
*)
echo
echo "Skipping creating a database by user choice"
return
;;
esac
}
#
# Create a new database and database user
#
function create_db() {
echo
echo "-----------------------------------------------------------------------------"
echo
echo "Type in the desired username. It will also be used as the table name."
echo
echo "Remember no to use a to long name. MySQL only support up to 16 character in"
echo "the username. Anything above will break this script when trying to create"
echo "the database user."
echo
echo "-----------------------------------------------------------------------------"
echo
read -r -p "Database usernname ? " _dbuser_
if [ -z "$_dbuser_" ]; then
echo
echo "No domain name given, exiting."
echo
exit 1
else
tDBUSER=$_dbuser_
echo
echo -n "Creating new database... "
# Create a random password
genpasswd
# Check if the database already exists
if [ -d "/var/lib/mysql/$tDBUSER" ]; then
echo "Database $tDBUSER already exists, aborting."
exit 1
fi
# Create temporary db and db-user config files from template
$tSED -e "s/CHANGEMEDB/$tDBUSER/g" $tDIR/mysql.template > $tDIR/mysql.template.1
$tSED -e "s/CHANGEMEUSER/$tDBUSER/g" $tDIR/mysql.template.1 > $tDIR/mysql.template.2
$tSED -e "s/CHANGEMEPW/$tGENPW/g" $tDIR/mysql.template.2 > $tDIR/mysql.template.3
# Create database table and user
$tMYSQL --user=root --password=$tDBPW < $tDIR/mysql.template.3
# Delete temporary config files
$tRM $tDIR/mysql.template.1
$tRM $tDIR/mysql.template.2
$tRM $tDIR/mysql.template.3
printf "Done\n"
fi
}
#
# We are done, print final message
#
function finished() {
echo
echo "-----------------------------------------------------------------------------"
echo
echo "# FINISHED"
echo "Everything is done and ready for your. Please save this information securely"
echo "This is the only time you will see it!"
echo
echo "# VIRTUAL HOST"
echo "Host location: $tVHOSTDIR"
echo "Log location: $tVHOSTLOG"
if [ -n "$tDBUSER" ]; then
echo
echo "# DATABASE"
echo "Database username: $tDBUSER"
echo "Database tablename: $tDBUSER"
echo "Database password: $tGENPW"
echo
echo "Please save the database password in a secure place, this is the only time you will see it!"
fi
echo
echo "-----------------------------------------------------------------------------"
echo
}
#-----------------------------------------------
# Main
#-----------------------------------------------
set_vhost
create_vhost
add_db
finished
| true
|
846e2436e92b5d588fb25c16f85b38a878224186
|
Shell
|
konnov/bymc
|
/bymc/verifypa-nusmv-bdd
|
UTF-8
| 2,197
| 3.921875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# Run the abstraction refinement loop with nusmv in bdd mode
NUSMV_VERBOSE=0
MEM_LIMIT="unlimited"
TIME_LIMIT="unlimited"
MONO=""
REACH_REL=""
NO_JUSTICE=""
SOURCE=""
# parse options
DIR=`dirname $0`
DIR=`cd "$DIR"; pwd`
TEMPOPTS=`"$DIR"/script/ggetopt.py -o h,w: --long help,nusmv-verbose:,rel-mono,rel-reach,no-justice,source:,limit-time:,limit-mem: -n '$@' -- "$@"`
if [ $? != 0 ] ; then echo "Terminating..." >&2 ; exit 1 ; fi
function help_and_quit() {
echo "use $0 <switches> prog spec <bymc_switches>"
echo " where the switches are"
echo " --smt 'lib2|z3|-smt2|-in' (default, use z3 as the backend solver)"
echo " --smt 'lib2|mysolver|arg1|arg2|arg3' (use an SMT2 solver, unsat cores are required)"
echo " --smt 'yices' (use yices 1.x as the backend solver, DEPRECATED)"
echo " -w|--nusmv-verbose <level>: set the verbosity level"
echo " --rel-reach: compute reachable states first (in NuSMV)"
echo " --rel-mono: monolithic transition relation (in NuSMV)"
echo " --source: execute the script in NuSMV"
echo " --limit-time: limit (in seconds) cpu time of subprocesses (ulimit -t)"
echo " --limit-mem: limit (in MB) virtual memory of subprocesses (ulimit -v)"
echo " -h|--help: show this help message"
exit 1
}
eval set -- "$TEMPOPTS"
while true ; do
case "$1" in
-h|--help) help_and_quit ;;
-w|--nusmv-verbose) NUSMV_VERBOSE=$2 ; shift 2 ;;
--source) SOURCE=$2 ; shift 2 ;;
--rel-mono) MONO="1" ; shift ;;
--rel-reach) REACH_REL="1" ; shift ;;
--no-justice) NO_JUSTICE="1" ; shift ;;
--limit-time) TIME_LIMIT=$2 ; shift 2 ;;
--limit-mem) MEM_LIMIT=$((1024*$2)) ; shift 2 ;;
--) shift ; break ;;
*) echo "Internal error!" ;
help_and_quit;;
esac
done
if [ "$#" -lt 2 ]; then
help_and_quit
fi
echo ulimit -SHv $MEM_LIMIT
ulimit -SHv $MEM_LIMIT
echo ulimit -SHt $TIME_LIMIT
ulimit -SHt $TIME_LIMIT
PROG=$1
SPEC=$2
shift 2
export BYMC_FLAGS="$@ $BYMC_FLAGS -O smt.unsat.cores=1"
# run the tool
export DEPTH NUSMV_VERBOSE SOURCE MONO REACH_REL NO_JUSTICE
export TARGET_MC=nusmv-bdd
`dirname $0`/script/refinement-loop.sh "$PROG" "$SPEC"
| true
|
4f2230dfd34dd7781fa18764f3779e2cfac88bdc
|
Shell
|
AngelaZhou779/CVL_SequenceAnaylsis
|
/rmDupPicard.sh
|
UTF-8
| 489
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
project_name=cvl
project_dir=/home/sarahm/cvl/storage
pic=/usr/local/picard-tools-1.131/picard.jar
sort_dir=${project_dir}/sort_dir
tmp=${project_dir}/tmp
rmd_dir=${project_dir}/rmd_dir
files=(${sort_dir}/*)
for file in ${files[@]}
do
name=${file}
base=`basename ${name} .sort.bam`
java -Xmx2g -jar ${pic} MarkDuplicates I= ${sort_dir}/${base}.sort.bam O= ${rmd_dir}/${base}.rmd.sort.bam M= ${rmd_dir}/dupstat.txt VALIDATION_STRINGENCY=SILENT REMOVE_DUPLICATES= true
done
| true
|
d2033c4d41442bbe9576c5048fa641652fe9ab5f
|
Shell
|
stuggi/neutron-operator
|
/templates/neutronapi/bin/bootstrap.sh
|
UTF-8
| 1,263
| 2.765625
| 3
|
[] |
no_license
|
#!/bin//bash
#
# Copyright 2020 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -ex
export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."}
export DatabaseHost=${DatabaseHost:?"Please specify a DatabaseHost variable."}
export Database=${Database:-"neutron"}
export DatabaseConnection="mysql+pymysql://$Database:$DatabasePassword@$DatabaseHost/$Database"
# Bootstrap and exit if KOLLA_BOOTSTRAP variable is set. This catches all cases
# of the KOLLA_BOOTSTRAP variable being set, including empty.
#if [[ "${!KOLLA_BOOTSTRAP[@]}" ]]; then
echo JAAA MANN
OPTS="--config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini"
neutron-db-manage ${OPTS} upgrade heads
exit 0
#fi
| true
|
acd595301c23109eeaeb45e614bf822c028cb004
|
Shell
|
xlii-chl/scripts
|
/project/extend/v2/services/wordpress5/init.sh
|
UTF-8
| 384
| 2.578125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
wordpress5Init() {
# Wordpress version
cat <<EOF >> .wex
# Wordpress 5
WP_DB_CHARSET=utf8 # Database charset
WP_DB_TABLE_PREFIX=wp_ # Used for wordpress database
WP_DEBUG_ENABLED=false # Will allow wordpress debug mode
WP_VERSION=5.6.1-php7.4-apache # Docker image tags
EOF
}
| true
|
3d7652fe6043d6212a192a19e569ae4037183f33
|
Shell
|
chte/links-mautic-theme
|
/build.sh
|
UTF-8
| 99
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
FOLDER="$1"
echo "Zipping $1"
rm -f "$1".zip
cd "$1"; zip -r ../"$1".zip *
| true
|
7923804aa954f275388651fabdebcf7022d8be51
|
Shell
|
antenore/svntogit-community
|
/python-confuse/trunk/PKGBUILD
|
UTF-8
| 930
| 2.71875
| 3
|
[] |
no_license
|
# Maintainer: Maxime Gauduin <alucryd@archlinux.org>
# Contributor: Mubashshir <ahmubashshir@gmail.com>
# Contributor: Frederik “Freso” S. Olesen <freso.dk@gmail.com>
pkgname=python-confuse
pkgver=1.6.0
pkgrel=1
pkgdesc='Painless YAML config files for Python'
arch=(any)
url=https://github.com/beetbox/confuse
license=(MIT)
depends=(python-yaml)
makedepends=(
git
python-flit
python-pip
python-poetry
)
checkdepends=(python-nose)
_tag=3162e50197f256198e7d1d24048195a3429852e6
source=(git+https://github.com/beetbox/confuse.git#tag=${_tag})
sha256sums=(SKIP)
pkgver() {
cd confuse
git describe --tags | sed 's/^v//'
}
build() {
cd confuse
python -m flit build --format wheel
}
check() {
cd confuse
python -m nose
}
package() {
PIP_CONFIG_FILE=/dev/null pip install --isolated --root="${pkgdir}" --ignore-installed --no-deps confuse/dist/*.whl
install -Dm 644 confuse/LICENSE -t "${pkgdir}"/usr/share/licenses/python-confuse/
}
# vim: ts=2 sw=2 et:
| true
|
ca5107a878d351179639a0b909e04fcd87835ae1
|
Shell
|
aws/s2n-tls
|
/codebuild/bin/test_libcrypto_interning.sh
|
UTF-8
| 6,513
| 3.515625
| 4
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
set -e
source codebuild/bin/s2n_setup_env.sh
source codebuild/bin/jobs.sh
# build 2 different version of libcrypto to make it easy to break the application if
# interning doesn't work as expected
WHICH_LIBCRYPTO=$(echo "${S2N_LIBCRYPTO:-"openssl-1.1.1"}")
TARGET_LIBCRYPTO="${WHICH_LIBCRYPTO//[-.]/_}"
TARGET_LIBCRYPTO_PATH="${TEST_DEPS_DIR}/${WHICH_LIBCRYPTO}"
OPENSSL_1_0="$OPENSSL_1_0_2_INSTALL_DIR"
if [ ! -f $OPENSSL_1_0/lib/libcrypto.a ]; then
./codebuild/bin/install_openssl_1_0_2.sh $OPENSSL_1_0/src $OPENSSL_1_0 linux
fi
if [ ! -f $TARGET_LIBCRYPTO_PATH/lib/libcrypto.a ]; then
if [ "$TARGET_LIBCRYPTO" == "awslc" ]; then
./codebuild/bin/install_${TARGET_LIBCRYPTO}.sh $TARGET_LIBCRYPTO_PATH/src $TARGET_LIBCRYPTO_PATH 0
else
./codebuild/bin/install_${TARGET_LIBCRYPTO}.sh $TARGET_LIBCRYPTO_PATH/src $TARGET_LIBCRYPTO_PATH linux
fi
fi
COMMON_FLAGS="-DCMAKE_PREFIX_PATH=$TARGET_LIBCRYPTO_PATH -DCMAKE_BUILD_TYPE=RelWithDebInfo"
LTO_FLAGS="-DS2N_LTO=on"
# use LTO-aware commands if possible
if [ -x "$(command -v gcc-ar)" ]; then
LTO_FLAGS+=" -DCMAKE_AR=$(which gcc-ar) -DCMAKE_NM=$(which gcc-nm) -DCMAKE_RANLIB=$(which gcc-ranlib)"
fi
function fail() {
echo "test failure: $1"
exit 1
}
function write_app() {
cat <<EOF > $1
#include <s2n.h>
#include <openssl/bn.h>
int main() {
s2n_init();
BN_CTX_new();
return 0;
}
EOF
}
function build() {
echo "=== BUILDING $1 ==="
cmake . -B$1 $COMMON_FLAGS ${@:2}
cmake --build $1 -- -j $JOBS
}
function tests() {
echo "=== TESTING $1 ==="
make -C $1 test ARGS="-j $JOBS -L unit"
}
##################
# Dynamic builds #
##################
# build a default version to test what happens without interning
build build/shared-default -DBUILD_SHARED_LIBS=on -DBUILD_TESTING=on
ldd ./build/shared-default/lib/libs2n.so | grep -q libcrypto || fail "shared-default: libcrypto was not linked"
# ensure libcrypto interning works with shared libs and no testing
build build/shared -DBUILD_SHARED_LIBS=on -DBUILD_TESTING=off -DS2N_INTERN_LIBCRYPTO=on
# s2n should not publicly depend on libcrypto
ldd ./build/shared/lib/libs2n.so | grep -q libcrypto && fail "shared: libcrypto was not interned"
# ensure libcrypto interning works with shared libs, LTO and no testing
# NOTE: interning+LTO+testing doesn't currently work
build build/shared-lto -DBUILD_SHARED_LIBS=on -DBUILD_TESTING=off -DS2N_INTERN_LIBCRYPTO=on $LTO_FLAGS
# s2n should not publicly depend on libcrypto
ldd ./build/shared-lto/lib/libs2n.so | grep -q libcrypto && fail "shared-lto: libcrypto was not interned"
# ensure libcrypto interning works with shared libs and testing
build build/shared-testing -DBUILD_SHARED_LIBS=on -DBUILD_TESTING=on -DS2N_INTERN_LIBCRYPTO=on
# s2n should not publicly depend on libcrypto
ldd ./build/shared-testing/lib/libs2n.so | grep -q libcrypto && fail "shared-testing: libcrypto was not interned"
# run the tests and make sure they all pass with the prefixed version
tests build/shared-testing
# load the wrong version of libcrypto and the tests should still pass
LD_PRELOAD=$OPENSSL_1_0/lib/libcrypto.so tests build/shared-testing
# ensure the small app will compile with both versions of openssl without any linking issues
for build in shared shared-lto; do
# create a small app that links against both s2n and libcrypto
write_app build/$build/app.c
for target in $OPENSSL_1_0 $TARGET_LIBCRYPTO_PATH; do
echo "testing $build linking with $target"
mkdir -p $target/bin
cc -fPIE -Iapi -I$target/include build/$build/app.c build/$build/lib/libs2n.so $target/lib/libcrypto.a -lpthread -ldl -o $target/bin/test-app
# make sure the app doesn't crash
LD_LIBRARY_PATH="build/$build/lib:$target/lib:$LD_LIBRARY_PATH" $target/bin/test-app
done
done
##################
# Static builds #
##################
# ensure libcrypto interning works with static libs
# NOTE: static builds don't vary based on testing being enabled
build build/static -DBUILD_SHARED_LIBS=off -DBUILD_TESTING=on -DS2N_INTERN_LIBCRYPTO=on
tests build/static
# TODO figure out how to get static-lto+interning builds working
# ensure the small app will compile with both versions of openssl without any linking issues
for build in static; do
# create a small app that links against both s2n and libcrypto
write_app build/$build/app.c
for target in $OPENSSL_1_0 $TARGET_LIBCRYPTO_PATH; do
echo "testing $build linking with $target"
mkdir -p $target/bin
cc -fPIE -Iapi -I$target/include build/$build/app.c build/$build/lib/libs2n.a $target/lib/libcrypto.a -lpthread -ldl -o $target/bin/test-app
nm $target/bin/test-app | grep -q 'T s2n$BN_CTX_new' || fail "$target: libcrypto symbols were not prefixed"
nm $target/bin/test-app | grep -q 'T BN_CTX_new' || fail "$target: libcrypto was not linked in application"
# make sure the app doesn't crash
$target/bin/test-app
done
done
##################
# Runtime tests #
##################
run_connection_test() {
local TARGET="$1"
LD_PRELOAD=$OPENSSL_1_0/lib/libcrypto.so ./build/$TARGET/bin/s2nd -c default_tls13 localhost 4433 &> /dev/null &
local SERVER_PID=$!
LD_PRELOAD=$OPENSSL_1_0/lib/libcrypto.so ./build/$TARGET/bin/s2nc -i -c default_tls13 localhost 4433 | tee build/client.log
kill $SERVER_PID &> /dev/null || true
# ensure a TLS 1.3 session was negotiated
echo "checking for TLS 1.3"
grep -q "Actual protocol version: 34" build/client.log
}
# without interning, the connection should fail when linking the wrong version of libcrypto
echo "running pair: TLS 1.3 failure expected"
run_connection_test shared-default && fail "TLS 1.3 handshake was expected to fail"
# with interning, the connection should succeed even though we've linked the wrong version of libcrypto
echo "running pair: TLS 1.3 success expected"
run_connection_test shared-testing || fail "TLS 1.3 handshake was expected to succeed"
echo "SUCCESS!"
| true
|
263c90e82e319a01bf540baf03a6a7304b5837d3
|
Shell
|
sabd-project/sabd-desktop
|
/install.sh
|
UTF-8
| 305
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Use this script to install packages
# Which use native modules
# To prevent overwritting normal nodejs headers
# thanks to @Kishan
# Source our vars
. "$PWD/setup-x64.sh"
# Make electron build dir if not exists
mkdir -p ~/.electron-gyp
# Run
HOME=~/.electron-gyp npm install "$@"
| true
|
388ab194771962982edfaaf0a6c05c6081a0f021
|
Shell
|
dasJ/dotfiles
|
/zsh/include/keys.zsh
|
UTF-8
| 1,888
| 3.015625
| 3
|
[] |
no_license
|
###############
## Key configuration
###############
# vi keybindings
bindkey -v
# No lag when switching vi modes (0.1 secs)
KEYTIMEOUT=1
# Arrow up for history
if [[ "${terminfo[kpp]}" != "" ]]; then
bindkey "${terminfo[kpp]}" up-line-or-history
fi
# Arrow down for history
if [[ "${terminfo[knp]}" != "" ]]; then
bindkey "${terminfo[knp]}" down-line-or-history
fi
# Arrow up with non-empty buffer
if [[ "${terminfo[kcuu1]}" != "" ]]; then
autoload -U up-line-or-beginning-search
zle -N up-line-or-beginning-search
bindkey "${terminfo[kcuu1]}" up-line-or-beginning-search
fi
# Arrow down with non-empty buffer
if [[ "${terminfo[kcud1]}" != "" ]]; then
autoload -U down-line-or-beginning-search
zle -N down-line-or-beginning-search
bindkey "${terminfo[kcud1]}" down-line-or-beginning-search
fi
# Home key
if [[ "${terminfo[khome]}" != "" ]]; then
bindkey "${terminfo[khome]}" beginning-of-line
fi
# End key
if [[ "${terminfo[kend]}" != "" ]]; then
bindkey "${terminfo[kend]}" end-of-line
fi
# Space does history expansion
bindkey ' ' magic-space
# Ctrl+Arrow to move by word
bindkey '^[[1;5C' forward-word
bindkey '^[[1;5D' backward-word
# Move through completion backwards with Shift+Tab
if [[ "${terminfo[kcbt]}" != "" ]]; then
bindkey "${terminfo[kcbt]}" reverse-menu-complete
fi
# Backspace to delete backwards
bindkey '^?' backward-delete-char
# Delete to delete forward
if [[ "${terminfo[kdch1]}" != "" ]]; then
bindkey "${terminfo[kdch1]}" delete-char
else
bindkey "^[[3~" delete-char
bindkey "^[3;5~" delete-char
bindkey "\e[3~" delete-char
fi
# Ctrl+R for backward search
bindkey "^r" history-incremental-search-backward
# v command for opening in $EDITOR
bindkey -M vicmd v edit-command-line
# b command to push line
bindkey -M vicmd b push-line-or-edit
# Do nothing when using scroll wheel
nop() {
:
}
zle -N nop
bindkey ^Y nop
bindkey ^E nop
| true
|
252320c81376441fdc5e44f9e6d85c3395cd4ecb
|
Shell
|
mori-dev/bikeshed
|
/man2text
|
UTF-8
| 88
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/sh
[ $# -eq 1 ] || { echo 'usage: man2text command'; exit 1; }
man $1 | col -b
| true
|
cc3b05c640673d9e10349879fb0b50d752bab383
|
Shell
|
devimteam/dcos-pakages
|
/marathon-consul/build
|
UTF-8
| 873
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
mkdir -p "$PKG_PATH/usr/"
#cp -rp /pkg/src/marathon/target/scala-2.11/marathon-assembly-*.jar "$PKG_PATH/usr/marathon.jar"
cp -rp /pkg/src/marathon-consul "$PKG_PATH/usr/marathon-consul"
#chmod -R o+r "$PKG_PATH/usr"
chmod a+x $PKG_PATH/usr/marathon-consul
marathon-consul_service="$PKG_PATH/dcos.target.wants_master/dcos-marathon-consul.service"
mkdir -p $(dirname "$marathon-consul_service")
cat <<EOF > "$marathon-consul_service"
[Unit]
Description=Marathon-consul: bridge
[Service]
User=dcos_marathon
Restart=always
StartLimitInterval=0
RestartSec=15
LimitNOFILE=16384
PermissionsStartOnly=True
EnvironmentFile=/opt/mesosphere/environment
ExecStartPre=/bin/ping -c1 leader.mesos
ExecStart=$PKG_PATH/usr/marathon-consul \\
--marathon-location=marathon.mesos:8080 \\
--consul-local-agent-host=consul.marathon.mesos \\
--sse-enabled=true
EOF
| true
|
589d9a299f2cac91082344d1a6ee01b54a913bb6
|
Shell
|
komin92/bash-scripts
|
/rsyncvar.sh
|
UTF-8
| 906
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
tarih=$(date +%d/%m/%Y)
saat=$(date +%H:%M:%S)
zaman="$tarih $saat"
logdir="/var/log/rsynccore.log"
email="eposta@example.com"
if [[ $1 =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
echo -n
else
echo "IP yanlis yazdin."
exit 0
fi
ping -c 1 $1 > /dev/null
if [ $? -eq 0 ];then
echo "$zaman : Calisti" >> $logdir
else
echo "$zaman : $1 Kapali.." >> $logdir
mail -s "RSYNC Yapilamadi!" $email <<< "$1 IP adresi calismiyor RSYNC yapilamadi"
exit 0
fi
dir1="dir1"
dir2="dir2"
password="$2"
/usr/bin/expect - << EOF
set timeout -1
spawn rsync -avz -l -p -t root@$1:$dir1 $dir2
expect "password:" { send "$password\n"}
expect "#"
spawn date
expect "#"
EOF
# Amale log rotate :)
logsay=$(cat $logdir | wc -l)
if [ $logsay -gt 1000 ];then
cat $logdir | tail -1000 > /tmp/rotate.log
cat /tmp/rotate.log > $logdir
rm -rf /tmp/rotate.log
fi
| true
|
8a09c92ecfb829b55ba5de8d337d25eeae476a5c
|
Shell
|
erineyes/ESMLabs_myown
|
/scripts/build-live.sh
|
UTF-8
| 1,733
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
WORKING_DIR=$(pwd)
export LLVM_TOOLCHAIN=/usr/lib/llvm-6.0/bin
export CC=$LLVM_TOOLCHAIN/clang
export CXX=$LLVM_TOOLCHAIN/clang++
export AR=$LLVM_TOOLCHAIN/llvm-ar
export AS=$LLVM_TOOLCHAIN/llvm-as
export LD=$LLVM_TOOLCHAIN/ld.lld
echo "##################### BEGIN BUILD LIVE555... #####################"
mkdir -p ../3rdparty/live/include
mkdir -p ../3rdparty/live/lib
wget http://live555.com/liveMedia/public/live555-latest.tar.gz --no-check-certificate
chmod +x ./live555-latest.tar.gz
tar -xzvf ./live555-latest.tar.gz
cd live
cp ../config.live555-linux-64bit-clang-release .
cp ../config.live555-linux-64bit-clang-debug .
./genMakefiles live555-linux-64bit-clang-release
make
mv ./BasicUsageEnvironment/libBasicUsageEnvironment.a $WORKING_DIR/../3rdparty/live/lib
mv ./groupsock/libgroupsock.a $WORKING_DIR/../3rdparty/live/lib
mv ./liveMedia/libliveMedia.a $WORKING_DIR/../3rdparty/live/lib
mv ./UsageEnvironment/libUsageEnvironment.a $WORKING_DIR/../3rdparty/live/lib
cp -rf ./BasicUsageEnvironment/include/* ./groupsock/include/* ./liveMedia/include/* ./UsageEnvironment/include/* $WORKING_DIR/../3rdparty/live/include
make clean
./genMakefiles live555-linux-64bit-clang-debug
make
mv ./BasicUsageEnvironment/libBasicUsageEnvironment.a $WORKING_DIR/../3rdparty/live/lib/libBasicUsageEnvironment_debug.a
mv ./groupsock/libgroupsock.a $WORKING_DIR/../3rdparty/live/lib/libgroupsock_debug.a
mv ./liveMedia/libliveMedia.a $WORKING_DIR/../3rdparty/live/lib/libliveMedia_debug.a
mv ./UsageEnvironment/libUsageEnvironment.a $WORKING_DIR/../3rdparty/live/lib/libUsageEnvironment_debug.a
make clean
cd $WORKING_DIR
rm live555-latest.tar.gz
rm -rf live
echo "##################### END BUILD LIVE555 #####################"
| true
|
94af509efa4753d7c0292f081196662a59a7caf9
|
Shell
|
Chen-Zhihui/SeismicPackage
|
/CPSeis/scripts/build_lam.sh
|
UTF-8
| 2,703
| 3.3125
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/bin/bash
#<license>
#-------------------------------------------------------------------------------
# Copyright (c) 2007 ConocoPhillips Company
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#-------------------------------------------------------------------------------
#</license>
# build lam-7.0.6 mpi with single underscore on symbols, include shared libs.
temp=$(pwd)
trap "cd ${temp}; echo 'Aborted lam-mpi build'; exit 1" 1 2 3 6 9 11 12 15
PRODUCT=lam-7.0.6
BASE_DIR=${THIRDPARTY_DIR}/${PRODUCT}
BUILD_DIR=${BASE_DIR}/platforms/${CPSEIS_ARCH}
LINKS=$(echo $CPSEIS_ARCH | awk 'BEGIN{FS="_"}{
for (i=1;i<NF-1;i++)printf("%s_",$i);
printf("%s",$(NF-1));}')
LINK_DIR=${BASE_DIR}/platforms/${LINKS}
cd ${THIRDPARTY_DIR}
if [ ! -e ${PRODUCT}.tar.gz ] ; then
# get lam-7.0.6
wget http://lam.fries.net/download/files/${PRODUCT}.tar.gz
tar xvfz ${PRODUCT}.tar.gz
fi
if [ ! -d ${BASE_DIR} ] ; then
tar xvfz ${PRODUCT}.tar.gz
fi
if [ "$CPSEIS_COMPILER" = "gfortran" ] ; then
F90=gfortran
F77=gfortran
CC=gcc
elif [ "$CPSEIS_COMPILER" = "intel91" ] ; then
F90=ifort
F77=ifort
CC=icc
fi
cd ${BASE_DIR}
CFLAGS="-fPIC -O"
FFLAGS="-fPIC -fno-second-underscore -O"
export F90 CC CFLAGS FFLAGS F77
cd ${BASE_DIR}
mkdir -p ${BUILD_DIR}
if [ "${BUILD_DIR}" != "${LINK_DIR}" ] ; then
ln -s ${BUILD_DIR} ${LINK_DIR}
fi
FLAGS="--prefix=${BUILD_DIR} --enable-threads --enable-shared libs=gcc \
--enable-f77 --enable-f90 --enable-cxx"
./configure ${FLAGS} && \
make clean && \
make -k
# there are errors in the lam install. Use -k to bypass errors killing make.
make -k install
cd $temp
echo "Build lam-mpi complete. Add ${BUILD_DIR}/bin to your path."
| true
|
86688e74a07d074d7b018da44b2dea1d134e0c3b
|
Shell
|
relsqui/doily
|
/make_release.sh
|
UTF-8
| 199
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [[ -z "$1" ]]; then
echo "Please provide a release name."
exit 1
fi
new_release="releases/doily-$1.tar.gz"
tar -czf "${new_release}" doily default.conf
ls -l "${new_release}"
| true
|
fa4e08f200687a041d2eada47d1f2c5202213030
|
Shell
|
Nickdv9/vendor_candy
|
/bootanimation/generate-bootanimation.sh
|
UTF-8
| 2,844
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
CWD=`pwd`
BASEW=$1 # Device Width
BASEH=$2 # Device Height
HALF_RES=$3 # Half the size of the device resolution (true or false)
PORTW=1400 # Original bootanimation image based on unmodified width
PORTH=2560 # Original bootanimation image based on unmodified height
if [ -f "/usr/bin/convert" ]; then
if [ -f "$ANDROID_PRODUCT_OUT/system/media/bootanimation.zip" ]; then
echo "$ANDROID_PRODUCT_OUT/system/media/bootanimation.zip"
else
if [ -d "$ANDROID_PRODUCT_OUT"/obj/BOOTANIMATION/bootanimation/part0 ]; then
rm -rf "$ANDROID_PRODUCT_OUT"/obj/BOOTANIMATION/bootanimation/part*
else
mkdir -p "$ANDROID_PRODUCT_OUT"/obj/BOOTANIMATION/bootanimation/part{0..2}
fi
tar xvfp "$CWD/vendor/candy/bootanimation/bootanimation.tar" -C "$ANDROID_PRODUCT_OUT/obj/BOOTANIMATION/bootanimation/$TAR_FILENAME"
while read -r line; do
IMAGEFILE="$(basename "${line}")"
IMAGEFILEDIR="$(dirname "${line}")"
# Determine original width and height of each image.
SIZEW=$(identify -format "%wx%h" "${IMAGEFILEDIR}"/"${IMAGEFILE}" | egrep -o "[0-9]+x" | egrep -o "[0-9]+")
SIZEH=$(identify -format "%wx%h" "${IMAGEFILEDIR}"/"${IMAGEFILE}" | egrep -o "x[0-9]+" | egrep -o "[0-9]+")
if [ "$HALF_RES" = "true" ]; then
# Resize the images by half
FINALW=$(expr $SIZEW / 2)
FINALH=$(expr $SIZEH / 2)
# Make the conversion
convert "${IMAGEFILEDIR}"/"${IMAGEFILE}" -resize "$FINALW"'x'"$FINALH"'!' "${IMAGEFILEDIR}"/"${IMAGEFILE}"
else
# Calculate old size for new size of each image for proper ratio.
MODW=$(awk "BEGIN{print ${PORTW}/${SIZEW}}")
NEWW=$(awk "BEGIN{print ${BASEW}/${MODW}}")
MODH=$(awk "BEGIN{print ${PORTH}/${SIZEH}}")
NEWH=$(awk "BEGIN{print ${BASEH}/${MODH}}")
# Round the fractions for each image.
FINALW=$(awk "BEGIN{print int(${NEWW}+0.5)}")
FINALH=$(awk "BEGIN{print int(${NEWH}+0.5)}")
# Make the conversion
convert "${IMAGEFILEDIR}"/"${IMAGEFILE}" -resize "$FINALW"'x'"$FINALH"'!' "${IMAGEFILEDIR}"/"${IMAGEFILE}"
fi
done < <(find "$ANDROID_PRODUCT_OUT"/obj/BOOTANIMATION/bootanimation -type f -iname "*.png" -o -iname "*.jpg")
# create desc.txt
echo "$FINALW" "$FINALH" 60 > "$ANDROID_PRODUCT_OUT/obj/BOOTANIMATION/bootanimation/desc.txt"
cat "$CWD/vendor/candy/bootanimation/desc.txt" >> "$ANDROID_PRODUCT_OUT/obj/BOOTANIMATION/bootanimation/desc.txt"
# create bootanimation.zip
cd "$ANDROID_PRODUCT_OUT/obj/BOOTANIMATION/bootanimation"
if [ ! -d "$ANDROID_PRODUCT_OUT/system/media" ]; then
mkdir -p "$ANDROID_PRODUCT_OUT/system/media"
fi
zip -r0 "$ANDROID_PRODUCT_OUT/system/media/bootanimation.zip" .
echo "$ANDROID_PRODUCT_OUT/system/media/bootanimation.zip"
fi
fi
| true
|
e9dd473b5ffea46b1691c6ad1718fd0126102432
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/dispmanx_vnc/PKGBUILD
|
UTF-8
| 702
| 2.671875
| 3
|
[] |
no_license
|
# Maintainer: graysky <graysky AT archlinux DOT us>
pkgname=dispmanx_vnc
pkgver=r17.8af7fcf
pkgrel=1
pkgdesc="VNC Server for Raspberry Pi using dispmanx"
arch=('armv7h' 'armv6h')
url="https://github.com/hanzelpeter/dispmanx_vnc"
license=('MIT')
depends=('libvncserver' 'raspberrypi-firmware')
makedepends=('git')
source=("git://github.com/hanzelpeter/dispmanx_vnc")
md5sums=('SKIP')
pkgver() {
cd "$pkgname"
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
build() {
cd "$pkgname"
make
}
package() {
cd "$pkgname"
install -Dm755 dispmanx_vncserver "$pkgdir"/usr/bin/dispmanx_vncserver
install -Dm644 MIT "$pkgdir"/usr/share/licenses/$pkgname/LICENSE
}
| true
|
bc0edbe5947e768faf0960ddd19d52eeeed3fc49
|
Shell
|
gekkstah/smartMeter
|
/deployment.sh
|
UTF-8
| 2,770
| 2.984375
| 3
|
[] |
no_license
|
# deployment of $1 to pi@192.168.2.42:production/. and remote execution...
#
TARGETSERVER=krukas.dyndns.org
# TARGETSERVER=192.168.2.42
TARGET_SSH_PORT=42022
TARGET_HTTP_PORT=42080
case "$1" in
'-devServer' )
clear
echo "starting dev environment"
sudo apachectl stop
sudo apachectl start
node /Volumes/docdata/johannes.mainusch/docjoe/development/smartMeter/server/djserver_eenergy.js
exit 0
;;
'-deploySmartMeter' | '-sm' )
# the smartMeter
ssh -p $TARGET_SSH_PORT pi@$TARGETSERVER "ps aux | grep smartMeter.js | grep -v grep | awk '{ print \$2}' | xargs -t kill > /dev/null 2>&1 "
ssh -p $TARGET_SSH_PORT pi@$TARGETSERVER 'cp myMeter.log myMeter.log.last'
ssh -p $TARGET_SSH_PORT pi@$TARGETSERVER 'sudo node production/myMeter/myMeter.js > /dev/null 2>&1 &'
;;
'-deploy2pi' | * )
clear
echo "I will now try to deploy to pi at " $TARGETSERVER
# copy the files to the pi
ssh -p $TARGET_SSH_PORT pi@$TARGETSERVER 'mkdir -p production/smartMeter';
rsync -R -ave 'ssh -p '$TARGET_SSH_PORT . pi@$TARGETSERVER:production/smartMeter/.
ssh -p $TARGET_SSH_PORT pi@$TARGETSERVER 'mkdir -p public_html/smartMeter';
rsync -R -ave 'ssh -p '$TARGET_SSH_PORT client pi@$TARGETSERVER:public_html/smartMeter/.
# link the data file
ssh -p $TARGET_SSH_PORT pi@$TARGETSERVER 'ln -sf ~/myMeter.log production/smartMeter/data/gotResults.json';
# the webServer
# kill any running server
echo "killing the currently running server..."
ssh -p $TARGET_SSH_PORT pi@$TARGETSERVER "ps aux | grep djserver_eenergy.js | grep -v grep | awk '{ print \$2}' | xargs -t kill > /dev/null 2>&1 "
# start the server
echo "set the server file to executable..."
ssh -p $TARGET_SSH_PORT pi@$TARGETSERVER 'chmod +x production/smartMeter/webServer/djserver_eenergy.js'
###ssh -p $TARGET_SSH_PORT pi@$TARGETSERVER 'cd production/smartMeter; ./server/djserver_eenergy.js serverport=42080 &'
echo "start the server..."
ssh -p $TARGET_SSH_PORT pi@$TARGETSERVER '(cd production/smartMeter; ./webServer/djserver_eenergy.js serverport=42080 ) > djserver_eenergy.log 2>&1 &'
# test the webServer
echo "==============================="
echo "testing the server..."
echo "curl http://$TARGETSERVER:42080/djserver_eenergy/getnolines"
sleep 1
curl http://$TARGETSERVER:$TARGET_HTTP_PORT/djserver_eenergy/getnolines
sleep 1
curl http://$TARGETSERVER:$TARGET_HTTP_PORT/djserver_eenergy/getnolines
sleep 1
curl http://$TARGETSERVER:$TARGET_HTTP_PORT/djserver_eenergy/getnolines
echo "done with testing the server..."
echo "==============================="
echo " "
# edit the contag entry that limits the number of datasets...
# 59 23 * * 0 (tail -50000 /home/pi/myMeter.log > test; mv test /home/pi/myMeter.log) > /dev/null 2&>1
exit 0
;;
esac
exit 0
| true
|
97d0fd94068aad322c0d7e8040fcc3ddf05da48c
|
Shell
|
norjms/serverinstallscript
|
/install.sh
|
UTF-8
| 4,474
| 3.78125
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
#
#Check if user is root
#Set Paths
PATH=/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/bin:/sbin
#Check for root
if [ $(id -u) != "0" ]; then
echo "Error: You must be root to run this script, please use the root user to install the software."
exit 1
fi
#--------------------------------------------------------------------------------------------------------------------------------
# Updated to check if packages are installed to save time
# What do we need anyway
function updatecheck ()
{
apt-get clean
if dpkg-query -W curl net-tools alsa-base alsa-utils debconf-utils git whiptail build-essential stunnel4 html2text apt-transport-https; then
return
else
debconf-apt-progress -- apt-get update
apt-get -y install sudo net-tools curl debconf-utils dnsutils unzip whiptail git build-essential alsa-base alsa-utils stunnel4 html2text apt-transport-https --force-yes
#debconf-apt-progress -- apt-get upgrade -y
fi
}
updatecheck
#--------------------------------------------------------------------------------------------------------------------------------
SECTION="Basic configuration"
# Read IP address
#
serverIP=$(ip route get 8.8.8.8 | awk '{ print $NF; exit }')
set ${serverIP//./ }
SUBNET="$1.$2.$3."
#Begin installer scripts
whiptail --title "Welcome to the Media Server Installation Script" --msgbox "This Debian Wheezy/Jessie and Ubuntu installer will prompt for valid users and ports, defaults are suggested in () for those in doubt" 8 78
source "functions.sh"
whiptail --ok-button "Install" --title "Media Server Installation Script" --checklist --separate-output "\nIP: $serverIP\n\nChoose what you want to install:" 20 78 9 \
"Plex" "Plex Media Server " off \
"Kodi" "Kodi Media Server " off \
"SickRage" "Python Show Automation Finder" off \
"Sonarr" ".NET Show Automation Finder" off \
"Jackett" "Add custom providers to Sonarr" off \
"CouchPotato" "Video Automation Finder" off \
"HTPC Manager" "HTPC Management system" off \
"Madsonic" "Java media server" off \
"Subsonic" "Java media server" off \
"Samba" "Windows compatible file sharing " off \
"NFS Tools" "Windows compatible file sharing " off \
"Webmin" "Admin server web interface" off \
"SoftEther VPN server" "Advanced VPN solution" off \
"Varnish" "Reverse Proxy HTTP Accelerator" off \
"RUTORRENT" "RUTORRENT including nginx, PHP, and MariaDB" off \
"LEMP" "nginx, PHP, MariaDB" off 2>results
while read choice
do
case $choice in
"Samba") ins_samba="true";;
"Madsonic") ins_madsonic="true";;
"Subsonic") ins_subsonic="true";;
"Kodi") ins_kodi="true";;
"Plex") ins_plex="true";;
"NFS Tools") ins_nfs="true";;
"Jackett") ins_jackett="true";;
"SickRage") ins_sickrage="true";;
"Sonarr") ins_sonarr="true";;
"CouchPotato") ins_couchpotato="true";;
"HTPC Manager") ins_htpcmanager="true";;
"SoftEther VPN server") ins_vpn_server="true";;
"Webmin") ins_webmin="true";;
"RUTORRENT") ins_rutorrent="true";;
"LEMP") ins_lemp="true";;
"Varnish") ins_varnish="true";;
*)
;;
esac
done < results
if [[ "$ins_subsonic" == "true" ]]; then install_subsonic; fi
if [[ "$ins_madsonic" == "true" ]]; then install_madsonic; fi
if [[ "$ins_webmin" == "true" ]]; then install_webmin; fi
if [[ "$ins_jackett" == "true" ]]; then install_jackett; fi
if [[ "$ins_kodi" == "true" ]]; then install_kodi; fi
if [[ "$ins_plex" == "true" ]]; then install_plex; fi
if [[ "$ins_samba" == "true" ]]; then install_samba; fi
if [[ "$ins_nfs" == "true" ]]; then install_nfs; fi
if [[ "$ins_vpn_server" == "true" ]]; then install_vpn_server; fi
if [[ "$ins_sickrage" == "true" ]]; then install_sickrage; fi
if [[ "$ins_sonarr" == "true" ]]; then install_sonarr; fi
if [[ "$ins_couchpotato" == "true" ]]; then install_couchpotato; fi
if [[ "$ins_htpcmanager" == "true" ]]; then install_htpcmanager; fi
if [[ "$ins_rutorrent" == "true" ]]; then install_rutorrent; fi
if [[ "$ins_lemp" == "true" ]]; then install_lemp; fi
if [[ "$ins_varnish" == "true" ]]; then install_varnish; fi
#rm results
| true
|
00b0b45b739ac76e9029b09494028773cfdfa2b6
|
Shell
|
aminnj/redis-htcondor
|
/scripts/condor_executable.sh
|
UTF-8
| 1,220
| 3.015625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
function getjobad {
grep -i "^$1" "$_CONDOR_JOB_AD" | cut -d= -f2- | xargs echo
}
if [ -r "$OSGVO_CMSSW_Path"/cmsset_default.sh ]; then
echo "sourcing environment: source $OSGVO_CMSSW_Path/cmsset_default.sh"
source "$OSGVO_CMSSW_Path"/cmsset_default.sh
elif [ -r "$OSG_APP"/cmssoft/cms/cmsset_default.sh ]; then
echo "sourcing environment: source $OSG_APP/cmssoft/cms/cmsset_default.sh"
source "$OSG_APP"/cmssoft/cms/cmsset_default.sh
elif [ -r /cvmfs/cms.cern.ch/cmsset_default.sh ]; then
echo "sourcing environment: source /cvmfs/cms.cern.ch/cmsset_default.sh"
source /cvmfs/cms.cern.ch/cmsset_default.sh
else
echo "ERROR! Couldn't find $OSGVO_CMSSW_Path/cmsset_default.sh or /cvmfs/cms.cern.ch/cmsset_default.sh or $OSG_APP/cmssoft/cms/cmsset_default.sh"
exit 1
fi
if ! ls /hadoop/cms/store/ ; then
echo "ERROR! hadoop is not visible, so the worker would be useless later. dying."
exit 1
fi
ls -lrth
hostname
mkdir temp
cd temp
mv ../workerenv.tar.xz .
mv ../*.py .
tar xf workerenv.tar.xz
ls -lrth
export PATH=`pwd`/workerenv/bin:$PATH
export LC_ALL=C.UTF-8
echo $PATH
echo $PYTHONPATH
which python3
which pip3
python3 -V
python3 worker.py $@
| true
|
4b6ac84168b2a15b0d54e2a2722f3ca7a128282a
|
Shell
|
orenchuk/OS_words_counter
|
/executer.sh
|
UTF-8
| 120
| 2.84375
| 3
|
[] |
no_license
|
N=5
rm res.txt
for i in 1 2 4 8
do
for j in $(seq 1 $N)
do
./counter $i >> res.txt
done
echo
done
| true
|
9a492c866569802206cb118bc85868c3bc100846
|
Shell
|
duganchen/my_slackbuilds
|
/ripgrep.SlackBuild
|
UTF-8
| 1,548
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
PRGNAM=ripgrep
BUILD=${BUILD:-1}
TAG=${TAG:-dc}
TMP=${TMP:-/tmp}
PKG=$TMP/package-$PRGNAM
URL=$(
python3 - << EOF
import os
from pathlib import PurePosixPath
import requests
url = 'https://api.github.com/repos/BurntSushi/ripgrep/releases'
headers = {'Accept': 'application/vnd.github.v3+json'}
machine = os.uname().machine
urls = (asset['browser_download_url'] for asset in requests.get(url, headers=headers).json()[0]['assets'])
filename = f'ripgrep-*-{machine}-unknown-linux-musl.tar.gz'
url = next(url for url in urls if PurePosixPath(url).match(filename))
print(url)
EOF
)
ARCHIVE=$(basename "$URL")
ARCH=$(echo "$ARCHIVE" | cut -d - -f 3)
VERSION=$(echo "$ARCHIVE" | cut -d - -f 2)
SRC=$(basename "$ARCHIVE" .tar.gz)
cd "$TMP"
rm -rf "$ARCHIVE" "$PKG" "$SRC"
wget --content-disposition "$URL"
tar xf "$SRC".tar.*
mkdir -p "$PKG/usr/bin"
mkdir -p "$PKG/usr/doc/$PRGNAM-$VERSION"
mkdir -p "$PKG/usr/man/man1"
mkdir -p "$PKG/usr/share/zsh/site-functions"
mkdir -p "$PKG/usr/share/bash-completion/completions"
mkdir -p "$PKG/usr/share/fish/vendor_completions.d"
cd "$SRC"
cp -a COPYING LICENSE-MIT UNLICENSE *.md doc/*.md "$PKG/usr/doc/$PRGNAM-$VERSION"
cp rg "$PKG/usr/bin"
cp doc/rg.1 "$PKG/usr/man/man1"
gzip -9 "$PKG/usr/man/man1/rg.1"
cp complete/rg.bash "$PKG/usr/share/bash-completion/completions/rg"
cp complete/_rg "$PKG/usr/share/zsh/site-functions"
cp complete/rg.fish "$PKG/usr/share/fish/vendor_completions.d"
cd "$PKG"
/sbin/makepkg -l y -c n "$TMP/$PRGNAM-$VERSION-$ARCH-$BUILD$TAG.${PKGTYPE:-tgz}"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.