blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
5018fe697688d81761c736adae30763068abdd16 | Shell | weakish/csharp-sdk | /script/package.sh | UTF-8 | 789 | 2.78125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
pack() {
local path=$1;
local dir=$2;
local output=$3;
mkdir $dir
rsync -avz $path $dir
zip -r $output $dir
rm -r $dir
}
# Storage
pack ./Storage/Storage/bin/Release/netstandard2.0/ ./DLLs LeanCloud-SDK-Storage-Standard.zip
pack ./Storage/Storage-Unity/bin/Release/netstandard2.0/ ./Plugins LeanCloud-SDK-Storage-Unity.zip
# Realtime
pack ./Realtime/Realtime/bin/Release/netstandard2.0/ ./DLLs LeanCloud-SDK-Realtime-Standard.zip
pack ./Realtime/Realtime-Unity/bin/Release/netstandard2.0/ ./Plugins LeanCloud-SDK-Realtime-Unity.zip
# LiveQuery
pack ./LiveQuery/LiveQuery/bin/Release/netstandard2.0/ ./DLLs LeanCloud-SDK-LiveQuery-Standard.zip
pack ./LiveQuery/LiveQuery-Unity/bin/Release/netstandard2.0/ ./Plugins LeanCloud-SDK-LiveQuery-Unity.zip | true |
13909ca8d2434cb7339cf29aa6993b92f66d3168 | Shell | gentoo/releng | /releases/scripts/cloud-prep.sh | UTF-8 | 2,650 | 2.921875 | 3 | [] | no_license | #!/bin/bash
# Set timezone
echo 'UTC' > /etc/timezone
# Set locale
echo 'en_US.UTF-8 UTF-8' >> /etc/locale.gen
echo 'en_US ISO-8859-1' >> /etc/locale.gen
locale-gen -q
eselect locale set en_US.utf8
# Some rootfs stuff
grep -v rootfs /proc/mounts > /etc/mtab
# This is set in rackspaces prep, might help us
echo 'net.ipv4.conf.eth0.arp_notify = 1' >> /etc/sysctl.conf
echo 'vm.swappiness = 0' >> /etc/sysctl.conf
# Let's configure our grub
# Access on both regular tty and serial console
mkdir /boot/grub
cat >>/etc/default/grub <<EOF
GRUB_TERMINAL='serial console'
GRUB_CMDLINE_LINUX="console=tty0 console=ttyS0,115200n8"
GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1"
EOF
grub-mkconfig -o /boot/grub/grub.cfg
sed -r -i 's/loop[0-9]+p1/LABEL\=cloudimg-rootfs/g' /boot/grub/grub.cfg
sed -i 's/root=.*\ ro/root=LABEL\=cloudimg-rootfs\ ro/' /boot/grub/grub.cfg
# And the fstab
echo 'LABEL=cloudimg-rootfs / ext4 defaults 0 0' > /etc/fstab
# allow the console log
sed -i 's/#s0/s0/g' /etc/inittab
# let ipv6 use normal slaac
sed -i 's/slaac/#slaac/g' /etc/dhcpcd.conf
# don't let dhcpcd set domain name or hostname
sed -i 's/domain_name\,\ domain_search\,\ host_name/domain_search/g' /etc/dhcpcd.conf
# need to do this here because it clobbers an openrc owned file
cat > /etc/conf.d/hostname << "EOL"
# Set to the hostname of this machine
if [ -f /etc/hostname ];then
hostname=$(cat /etc/hostname 2> /dev/null | cut -d"." -f1 2> /dev/null)
else
hostname="localhost"
fi
EOL
chmod 0644 /etc/conf.d/hostname
chown root:root /etc/conf.d/hostname
# set a nice default for /etc/resolv.conf
cat > /etc/resolv.conf << EOL
nameserver 8.8.8.8
nameserver 2001:4860:4860::8888
EOL
# let's upgrade (security fixes and otherwise)
USE="-build" emerge -uDNv --with-bdeps=y --jobs=2 @world
USE="-build" emerge --verbose=n --depclean
USE="-build" emerge -v --usepkg=n @preserved-rebuild
etc-update --automode -5
# Clean up portage
emerge --verbose=n --depclean
if [[ -a /usr/bin/eix ]]; then
eix-update
fi
emaint all -f
eselect news read all
eclean-dist --destructive
sed -i '/^USE=\"\${USE}\ \ build\"$/d' /etc/portage/make.conf
echo 'PORTAGE_GPG_DIR="/var/lib/gentoo/gkeys/keyrings/gentoo/release"' >> /etc/portage/make.conf
# clean up system
passwd -d root
passwd -l root
for i in $(find /var/log -type f); do truncate -s 0 $i; done
# remove foreign manpages
find /usr/share/man/ -mindepth 1 -maxdepth 1 -path "/usr/share/man/man*" -prune -o -exec rm -rf {} \;
# fine if this fails, aka non-hardened
if [[ -x /usr/sbin/migrate-pax ]]; then
echo 'migraging pax'
/usr/sbin/migrate-pax -m
fi
| true |
1c10d673762ed9ad098a6c6f4a0f55fc819f6f15 | Shell | jrmarino/libnvpair | /test-libnvpair_json/json_07_nested_arrays.ksh | UTF-8 | 1,888 | 2.796875 | 3 | [] | no_license | #!/usr/local/bin/ksh93
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2014, Joyent, Inc.
#
DIR=$(dirname $0)
. ${DIR}/json_common.ksh
BASELINE="$(cat <<EOF
{\
"event_store":{\
"name":"Occurences",\
"events":[\
{"time":489715200,"desc":"inception"},\
{"time":1057708800,"desc":"maturation"},\
{"time":1344816000,"desc":"migration"},\
{"time":1405296000,"desc":"integration"},\
{}\
]\
},\
"first level":[\
{"second_level_0":[{\
"sl0_a":true,\
"sl0_b":"aaaa"\
},\
{"x":1234}\
],\
"second_level_1":[{}],\
"second_level_2":[\
{"alpha":"a"},\
{"beta":"b"},\
{"gamma":"c"},\
{"delta":"d"},\
{"order":["a","b","c","d"]}\
]\
}\
]\
}
EOF)"
OUTPUT="$(${DIR}/print_json <<'EOF'
add_object "event_store";
add_string "name" "Occurences";
add_object_array "events";
add_uint32 "time" "489715200";
add_string "desc" "inception";
next;
add_uint32 "time" "1057708800";
add_string "desc" "maturation";
next;
add_uint32 "time" "1344816000";
add_string "desc" "migration";
next;
add_uint32 "time" "1405296000";
add_string "desc" "integration";
next;
end;
end;
add_object_array "first level";
add_object_array "second_level_0";
add_boolean "sl0_a";
add_string "sl0_b" "aaaa";
next;
add_int32 "x" "1234";
end;
add_object_array "second_level_1";
end;
add_object_array "second_level_2";
add_string "alpha" "a";
next;
add_string "beta" "b";
next;
add_string "gamma" "c";
next;
add_string "delta" "d";
next;
add_string_array "order" "a" "b" "c" "d";
end;
end;
EOF)"
complete
| true |
8006fca7f468033b034b221ce5eafdb87ff3c052 | Shell | ian47vr/penislicker420 | /cowfc.sh | UTF-8 | 20,023 | 4.03125 | 4 | [] | no_license | #!/bin/bash
DISCONTINUED="false"
if [ $DISCONTINUED="true" ] ; then
echo "Sorry, but this script has been discontinued. Please use the installer at https://github.com/EnergyCube/dwc_network_installer instead."
exit 1
fi
echo "##### WARNING!!!!! ##### - Recently, Wiimmfi has undergone some changes which makes it so that their servers are more secure from hackers."
echo "Having said that, this means that the CoWFC fork will not be getting the security patch, as it is unclear how it is possible. For the time being, you accept that you run your own server with a chance that hackers will be able to execute code over the MKW network."
echo "This might mean that hackers can in theory, brick consoles. Pressing ENTER will confirm that you accept the risks."
read -rp "Please press ENTER to accept the risk: "
read -rp "Just in case you were trigger-happy, I'll need you to type ACCEPT.: "
if [ "$REPLY" != "ACCEPT" ] ; then
echo "Verification FAILED!"
exit 2
fi
# DWC Network Installer script by kyle95wm/beanjr - re-written for CoWFC
# Warn Raspberry Pi users - probably a better way of doing this
if [ -d /home/pi/ ] ; then
echo "THIS SCRIPT IS NOT SUPPORTED ON RASPBERRY PI!"
echo "Please use the older script here: https://github.com/kyle95wm/dwc_network_installer"
exit 1
fi
# Check if we already installed the server
if [ -f /etc/.dwc_installed ] ; then
echo "You already installed CoWFC. There is no need to re-run it.
Perhaps some time down the road we can offer an uninstall option.
For now, if you wish to uninstall everything, just nuke your system.
You shouldn't have anything else on it anyways."
exit 999
fi
# ensure running as root
if [ "$(id -u)" != "0" ]; then
exec sudo "$0" "$@"
fi
# We will test internet connectivity using ping
if ping -c 4 google.com >/dev/nul ; then
echo "Internet is OK"
else
echo "Internet connection test failed!"
exit 1
fi
# We'll assume the user is from an English locale
if [ ! -f /var/www/.locale-done ] ; then
if ! locale-gen en_US.UTF-8 ; then
apt-get install -y language-pack-en-base
fi
fi
export LANG=en_US.UTF-8
export LC_ALL=en_US.UTF-8
# We'll create our secret locale file
touch /var/www/.locale-done
# Variables used by the script in various sections to pre-fill long commandds
C1="0" # A counting variable
C2="0" # A counting variable
IP="" # Used for user input
interface="" # Used for user input
mod1="proxy" # This is a proxy mod that is dependent on the other 2
mod2="proxy_http" # This is related to mod1
mod3="php7.1"
UPDATE_FILE="$0.tmp"
UPDATE_BASE="https://raw.githubusercontent.com/kyle95wm/cowfc_installer/master/cowfc.sh"
# Functions
function update {
# The following lines will check for an update to this script if the -s switch
# is not used.
# Original code by Dennis Simpson
# Modified by Kyle Warwick-Mathieu
echo "Checking if script is up to date, please wait"
wget -nv -O "$UPDATE_FILE" "$UPDATE_BASE" >& /dev/null
if ! diff "$0" "$UPDATE_FILE" >& /dev/null && [ -s "$UPDATE_FILE" ]; then
mv "$UPDATE_FILE" "$0"
chmod +x "$0"
echo "$0 updated"
"$0" -s
exit
else
rm "$UPDATE_FILE" # If no updates are available, simply remove the file
fi
}
function create_apache_vh_nintendo {
# This function will create virtual hosts for Nintendo's domains in Apache
echo "Creating Nintendo virtual hosts...."
touch /etc/apache2/sites-available/gamestats2.gs.nintendowifi.net.conf
touch /etc/apache2/sites-available/gamestats.gs.nintendowifi.net.conf
touch /etc/apache2/sites-available/nas-naswii-dls1-conntest.nintendowifi.net.conf
touch /etc/apache2/sites-available/sake.gs.nintendowifi.net.conf
cat >/etc/apache2/sites-available/gamestats2.gs.nintendowifi.net.conf <<EOF
<VirtualHost *:80>
ServerAdmin webmaster@localhost
ServerName gamestats2.gs.nintendowifi.net
ServerAlias "gamestats2.gs.nintendowifi.net, gamestats2.gs.nintendowifi.net"
ProxyPreserveHost On
ProxyPass / http://127.0.0.1:9002/
ProxyPassReverse / http://127.0.0.1:9002/
</VirtualHost>
EOF
cat >/etc/apache2/sites-available/gamestats.gs.nintendowifi.net.conf <<EOF
<VirtualHost *:80>
ServerAdmin webmaster@localhost
ServerName gamestats.gs.nintendowifi.net
ServerAlias "gamestats.gs.nintendowifi.net, gamestats.gs.nintendowifi.net"
ProxyPreserveHost On
ProxyPass / http://127.0.0.1:9002/
ProxyPassReverse / http://127.0.0.1:9002/
</VirtualHost>
EOF
cat >/etc/apache2/sites-available/nas-naswii-dls1-conntest.nintendowifi.net.conf <<EOF
<VirtualHost *:80>
ServerAdmin webmaster@localhost
ServerName naswii.nintendowifi.net
ServerAlias "naswii.nintendowifi.net, naswii.nintendowifi.net"
ServerAlias "nas.nintendowifi.net"
ServerAlias "nas.nintendowifi.net, nas.nintendowifi.net"
ServerAlias "dls1.nintendowifi.net"
ServerAlias "dls1.nintendowifi.net, dls1.nintendowifi.net"
ServerAlias "conntest.nintendowifi.net"
ServerAlias "conntest.nintendowifi.net, conntest.nintendowifi.net"
ProxyPreserveHost On
ProxyPass / http://127.0.0.1:9000/
ProxyPassReverse / http://127.0.0.1:9000/
</VirtualHost>
EOF
cat >/etc/apache2/sites-available/sake.gs.nintendowifi.net.conf <<EOF
<VirtualHost *:80>
ServerAdmin webmaster@localhost
ServerName sake.gs.nintendowifi.net
ServerAlias sake.gs.nintendowifi.net *.sake.gs.nintendowifi.net
ServerAlias secure.sake.gs.nintendowifi.net
ServerAlias secure.sake.gs.nintendowifi.net *.secure.sake.gs.nintendowifi.net
ProxyPass / http://127.0.0.1:8000/
CustomLog ${APACHE_LOG_DIR}/access.log combined
</VirtualHost>
EOF
echo "Done!"
echo "enabling...."
a2ensite *.nintendowifi.net.conf
service apache2 restart
}
function create_apache_vh_wiimmfi {
# This function will create virtual hosts for Wiimmfi's domains in Apache
echo "Creating Wiimmfi virtual hosts...."
touch /etc/apache2/sites-available/gamestats2.gs.wiimmfi.de.conf
touch /etc/apache2/sites-available/gamestats.gs.wiimmfi.de.conf
touch /etc/apache2/sites-available/nas-naswii-dls1-conntest.wiimmfi.de.conf
touch /etc/apache2/sites-available/sake.gs.wiimmfi.de.conf
cat >/etc/apache2/sites-available/gamestats2.gs.wiimmfi.de.conf <<EOF
<VirtualHost *:80>
ServerAdmin webmaster@localhost
ServerName gamestats2.gs.wiimmfi.de
ServerAlias "gamestats2.gs.wiimmfi.de, gamestats2.gs.wiimmfi.de"
ProxyPreserveHost On
ProxyPass / http://127.0.0.1:9002/
ProxyPassReverse / http://127.0.0.1:9002/
</VirtualHost>
EOF
cat >/etc/apache2/sites-available/gamestats.gs.wiimmfi.de.conf <<EOF
<VirtualHost *:80>
ServerAdmin webmaster@localhost
ServerName gamestats.gs.wiimmfi.de
ServerAlias "gamestats.gs.wiimmfi.de, gamestats.gs.wiimmfi.de"
ProxyPreserveHost On
ProxyPass / http://127.0.0.1:9002/
ProxyPassReverse / http://127.0.0.1:9002/
</VirtualHost>
EOF
cat >/etc/apache2/sites-available/nas-naswii-dls1-conntest.wiimmfi.de.conf <<EOF
<VirtualHost *:80>
ServerAdmin webmaster@localhost
ServerName naswii.wiimmfi.de
ServerAlias "naswii.wiimmfi.de, naswii.wiimmfi.de"
ServerAlias "nas.wiimmfi.de"
ServerAlias "nas.wiimmfi.de, nas.wiimmfi.de"
ServerAlias "dls1.wiimmfi.de"
ServerAlias "dls1.wiimmfi.de, dls1.wiimmfi.de"
ServerAlias "conntest.wiimmfi.de"
ServerAlias "conntest.wiimmfi.de, conntest.wiimmfi.de"
ProxyPreserveHost On
ProxyPass / http://127.0.0.1:9000/
ProxyPassReverse / http://127.0.0.1:9000/
</VirtualHost>
EOF
cat >/etc/apache2/sites-available/sake.gs.wiimmfi.de.conf <<EOF
<VirtualHost *:80>
ServerAdmin webmaster@localhost
ServerName sake.gs.wiimmfi.de
ServerAlias sake.gs.wiimmfi.de *.sake.gs.wiimmfi.de
ServerAlias secure.sake.gs.wiimmfi.de
ServerAlias secure.sake.gs.wiimmfi.de *.secure.sake.gs.wiimmfi.de
ProxyPass / http://127.0.0.1:8000/
CustomLog ${APACHE_LOG_DIR}/access.log combined
</VirtualHost>
EOF
echo "Done!"
echo "enabling...."
a2ensite *.wiimmfi.de.conf
service apache2 restart
}
function apache_mods {
a2enmod $mod1 $mod2
service apache2 restart
if ! a2enmod $mod3 ; then
a2dismod mpm_event
a2enmod $mod3
service apache2 restart
fi
service apache2 restart
}
function dns_config {
# This function will configure dnsmasq
echo "----------Lets configure DNSMASQ now----------"
sleep 3s
# Decided to take this step out, as doing so will create what's known as an open resolver.
# Having an open resolver is a security risk and is not a good idea.
# This means that DNS will be restricted to ONLY looking up Nintendo domains.
#echo "Adding Google DNS (8.8.8.8) to config"
# We add Google's DNS server to our server so that anyone with our DNS server can still resolve hostnames to IP
# addresses outside our DNS server. Useful for Dolphin testing
#cat >>/etc/dnsmasq.conf <<EOF
#server=8.8.8.8
#EOF
#sleep 2s
echo "What is your EXTERNAL IP?"
echo "NOTE: If you plan on using this on a LAN, put the IP of your Linux system instead"
echo "It's also best practice to make this address static in your /etc/network/interfaces file"
echo "your LAN IP is"
hostname -I | cut -f1 -d' '
echo "Your external IP is:"
curl -4 -s icanhazip.com
echo "Please type in either your LAN or external IP"
read -re IP
cat >>/etc/dnsmasq.conf <<EOF # Adds your IP you provide to the end of the DNSMASQ config file
address=/nintendowifi.net/$IP
address=/wiimmfi.de/$IP
EOF
clear
ifconfig
read -rp "Please type your primary interfaces's name (e.g - eth0): " interface
cat >>/etc/dnsmasq.conf <<EOF
interface="$interface"
EOF
clear
echo "DNSMasq setup completed!"
clear
service dnsmasq restart
clear
}
function install_required_packages {
# Add PHP 7.1 repo
if [ ! -f "/var/www/.php71-added" ] ; then
echo "Adding the PHP 7.1 repository. Please follow any prompts."
if ! add-apt-repository ppa:ondrej/php ; then
apt-get install --force-yes software-properties-common python-software-properties -y
add-apt-repository ppa:ondrej/php
fi
sleep 2s
echo "Creating file to tell the script you already added the repo"
touch "/var/www/.php71-added"
echo "I will now reboot your server to free up resources for the next phase"
reboot
exit
else
echo "The PHP 7.1 repo is already added. If you believe this to ben an error, please type 'rm -rf /var/www/.php71-added' to remove the file which prevents the repository from being added again."
fi
# Fix dpkg problems that happened somehow
dpkg --configure -a
echo "Updating & installing PHP 7.1 onto your system..."
apt-get update
apt-get install --force-yes php7.1 -y
# Install the other required packages
apt-get install --force-yes apache2 python2.7 python-twisted dnsmasq git curl -y
}
function config_mysql {
echo "We will now configure MYSQL server."
debconf-set-selections <<< 'mysql-server mysql-server/root_password password passwordhere'
debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password passwordhere'
apt-get -y install mysql-server
# We will now set the new mysql password in the AdminPage.php file.
# Do not change "passwordhere", as this will be the base for replacing it later
# The below sed command has NOT been tested so we don't know if this will work or not.
#sed -i -e 's/passwordhere/passwordhere/g' /var/www/html/_site/AdminPage.php
# Next we will install two more packages to make mysql and sqlite work with PHP
apt-get install --force-yes php7.1-mysql -y
apt-get install --force-yes sqlite php7.1-sqlite3 -y
# Now we will set up our first admin user
echo "Now we're going to set up our first Admin Portal user."
read -rp "Please enter the username you wish to use: " firstuser
read -rp "Please enter a password: " password
hash=$(/var/www/CoWFC/SQL/bcrypt-hash "$password")
echo "We will now set the rank for $firstuser"
echo "At the moment, this does nothing. However in later releases, we plan to restrict who can do what."
echo "1: First Rank"
echo "2: Second Rank"
echo "3: Third Rank"
read -rp "Please enter a rank number [1-3]: " firstuserrank
echo "That's all the informatio I'll need for now."
echo "Setting up the cowfc users database"
echo "create database cowfc" | mysql -u root -ppasswordhere
echo "Now importing dumped cowfc database..."
mysql -u root -ppasswordhere cowfc < /var/www/CoWFC/SQL/cowfc.sql
echo "Now inserting user $firstuser into the database with password $password, hashed as $hash."
echo "insert into users (Username, Password, Rank) values ('$firstuser','$hash','$firstuserrank');" | mysql -u root -ppasswordhere cowfc
}
function re {
echo "For added security, we recommend setting up Google's reCaptcha.
However, not many people would care about this, so we're making it optional.
Feel free to press the ENTER key at the prompt, to skip reCaptcha setup, or 'y' to proceed with recaptcha setup."
read -rp "Would you like to set up reCaptcha on this server? [y/N]: " recaptchacontinue
if [ "$recaptchacontinue" == y ] ; then
echo "In order to log into your Admin interface, you will need to set up reCaptcha keys. This script will walk you through it"
echo "Please make an account over at https://www.google.com/recaptcha/"
# Next we will ask the user for their secret key and site keys
read -rp "Please enter the SECRET KEY you got from setting up reCaptcha: " secretkey
read -rp "Please enter the SITE KEY you got from setting up reCaptcha: " sitekey
echo "Thank you! I will now add your SECRET KEY and SITE KEY to /var/www/html/_admin/Auth/Login.php"
# Replace SECRET_KEY_HERE with the secret key from our $secretkey variable
#sed -i -e "s/SECRET_KEY_HERE/$secretkey/g" /var/www/html/_admin/Auth/Login.php
sed -i -e "s/SECRET_KEY_HERE/$secretkey/g" /var/www/html/config.ini
# Replace SITE_KEY_HERE with the site key from our $sitekey variable
#sed -i -e "s/SITE_KEY_HERE/$sitekey/g" /var/www/html/_admin/Auth/Login.php
sed -i -e "s/recaptcha_site = SITE_KEY_HERE/recaptcha_site = $sitekey/g" /var/www/html/config.ini
else
sed -i -e "s/recaptcha_enabled = 1/recaptcha_enabled = 0/g" /var/www/html/config.ini
fi
}
function set-server-name {
echo "This recent CoWFC update allows you to set your server's name"
echo "This is useful if you want to whitelabel your server, and not advertise it as CoWFC"
read -rp "Please enter the server name, or press ENTER to accept the default [CoWFC]: " servernameconfig
if [ -z "$servernameconfig" ] ; then
echo "Using CoWFC as the server name."
else
echo "Setting server name to $servernameconfig"
sed -i -e "s/name = 'CoWFC'/name = '$servernameconfig'/g" /var/www/html/config.ini
fi
}
function add-cron {
echo "Checking if there is a cron available for $USER"
if ! crontab -l -u "$USER" |grep "@reboot sh /start-altwfc.sh >/cron-logs/cronlog 2>&1" ; then
echo "No cron job is currently installed"
echo "Working the magic. Hang tight!"
cat > /start-altwfc.sh <<EOF
#!/bin/sh
cd /
chmod 777 /var/www/dwc_network_server_emulator -R
cd var/www/dwc_network_server_emulator
python master_server.py
cd /
EOF
chmod 777 /start-altwfc.sh
mkdir -p /cron-logs
if ! command -v crontab ; then
apt-get install cron -y
fi
echo "Creating the cron job now!"
echo "@reboot sh /start-altwfc.sh >/cron-logs/cronlog 2>&1" >/tmp/alt-cron
crontab -u "$USER" /tmp/alt-cron
echo "Done!"
fi
}
function install_website {
# First we will delete evertyhing inside of /var/www/html
rm -rf /var/www/html/*
# Let's download the HTML5 template SBAdmin so that the Admin GUI looks nice
# Download the stuff
#wget https://github.com/BlackrockDigital/startbootstrap-sb-admin/archive/gh-pages.zip -O sb-admin.zip
#unzip sb-admin.zip
#if [ $? != "0" ] ; then
# apt-get --force-yes install unzip -y
# unzip sb-admin.zip
#fi
# Copy required directories and files to /var/www/html
#cp /var/www/startbootstrap-sb-admin-gh-pages/css/ /var/www/html/ -R && cp /var/www/startbootstrap-sb-admin-gh-pages/js /var/www/html/ -R && cp /var/www/startbootstrap-sb-admin-gh-pages/scss/ /var/www/html/ -R && cp /var/www/startbootstrap-sb-admin-gh-pages/vendor/ /var/www/html/ -R
# We'll download and install the main template next
#wget https://html5up.net/landed/download -O html5up-landed.zip
#unzip html5up-landed.zip -d landed
# We could put varous cp commands here to copy the needed files
# Then we will copy the website files from our CoWFC Git
cp /var/www/CoWFC/Web/* /var/www/html -R
chmod 777 /var/www/html/bans.log
# Let's restart Apache now
service apache2 restart
echo "Creating gpcm.db file"
touch /var/www/dwc_network_server_emulator/gpcm.db
chmod 777 /var/www/dwc_network_server_emulator/ -R
}
# MAIN
# Call update function
if [ "$1" != "-s" ]; then # If there is no -s argument then run the updater
update # This will call our update function
fi
#echo "******************************************* WARNING!*******************
#*****************************************************************************
#IT HAS BEEN DISCOVERED THAT BUILDS ON THE LATEST UBUNTU UPDATES WILL FAIL!
#*****************************************************************************
#"
#read -p "Press [ENTER] to continue at your own risk, or ctrl+c to abort."
# First we will check if we are on Ubuntu - this isn't 100% going to work,
# but if we're running Debian, it should be enough for what we need this check
# to do.
if [ -f /etc/lsb-release ] ; then
if grep -q "14.04" /etc/lsb-release || grep -q "16.04" /etc/lsb-release ; then
CANRUN="TRUE"
elif [ -f /var/www/.aws_install ] ; then
CANRUN="TRUE"
else
echo "It looks like you are not running on a supported OS."
echo "Please open an issue and request support for this platform."
fi
fi
# Determine if our script can run
if [ "$CANRUN" == "TRUE" ] ; then
# Our script can run since we are on Ubuntu
# Put commands or functions on these lines to continue with script execution.
# The first thing we will do is to update our package repos but let's also make sure that the user is running the script in the proper directory /var/www
if [ "$PWD" == "/var/www" ] ; then
apt-get update
# Let's install required packages first.
install_required_packages
# Then we will check to see if the Gits for CoWFC and dwc_network_server_emulator exist
if [ ! -d "/var/www/CoWFC" ] ; then
echo "Git for CoWFC does not exist in /var/www/"
while ! git clone https://github.com/kyle95wm/CoWFC.git && [ "$C1" -le "4" ] ; do
echo "GIT CLONE FAILED! Retrying....."
(( C1=C1+1 ))
done
if [ "$C1" == "5" ] ; then
echo "Giving up"
exit 1
fi
fi
if [ ! -d "/var/www/dwc_network_server_emulator" ] ; then
echo "Git for dwc_network_server_emulator does not exist in /var/www"
#git clone https://github.com/mh9924/dwc_network_server_emulator.git
while ! git clone https://github.com/kyle95wm/dwc_network_server_emulator.git && [ "$C2" -le "4" ] ; do
echo "GIT CLONE FAILED! Retrying......"
(( C2=C2+1 ))
done
if [ "$C2" == "5" ] ; then
echo "Giving up"
exit 1
fi
echo "Setting proper file permissions"
chmod 777 /var/www/dwc_network_server_emulator/ -R
fi
# Configure DNSMASQ
dns_config
# Let's set up Apache now
create_apache_vh_nintendo
create_apache_vh_wiimmfi
apache_mods # Enable reverse proxy mod and PHP 7.1
install_website # Install the web contents for CoWFC
config_mysql # We will set up the mysql password as "passwordhere" and create our first user
re # Set up reCaptcha
add-cron #Makes it so master server can start automatically on boot
set-server-name # Set your server's name
#a fix to fix issue: polaris-/dwc_network_server_emulator#413
cat >>/etc/apache2/apache2.conf <<EOF
HttpProtocolOptions Unsafe LenientMethods Allow0.9
EOF
echo "Thank you for installing CoWFC."
echo "If you wish to access the admin GUI, please go to http://$IP/?page=admin§ion=Dashboard"
read -rp "Please hit the ENTER key to reboot now, or press ctrl+c and reboot whenever it is convenient for you: [ENTER] " rebootenterkey
if [ -z "$rebootenterkey" ] ; then
reboot
fi
# Let's make our hidden file so that our script will know that we've already installed the server
# This will prevent accidental re-runs
touch /etc/.dwc_installed
reboot
exit 0
# DO NOT PUT COMMANDS UNDER THIS FI
fi
else
echo "Sorry, you do not appear to be running a supported Opperating System."
echo "Please make sure you are running Ubuntu 14.04, and try again!"
exit 1
fi
| true |
cd0ce8874c5e15c8716dfe6a90ef3ab652a7e22b | Shell | apbarrero/gmockbuild | /gmockbuild.sh | UTF-8 | 2,511 | 4.21875 | 4 | [] | no_license | #!/bin/bash
# Configuration
os=$(uname -s)
case $os in
"Darwin")
lib_suffix="dylib"
;;
"Linux")
lib_suffix="so"
;;
?)
echo "Platform $os not supported"
exit 1
;;
esac
# Defaults
gmock_version='1.7.0'
workdir=$(mktemp -d /tmp/tmp.XXXXX)
prefix=/usr/local
shared="yes"
update_ldconf=false
function usage
{
cat <<EOF
Usage: $0 [options]
-h print this help message.
-v <version> gmock version to build (Default: $gmock_version)
-p <path> provide installation prefix path (Default: $prefix).
-s build static libraries, default is shared.
-l update ldconfig cache to include <prefix>/lib path.
Option is ignored if -s is present.
(Requires root privileges).
EOF
}
# Options parsing
while getopts "hv:p:sl" OPTION
do
case $OPTION in
h)
usage
exit 0
;;
v)
gmock_version=$OPTARG
;;
p)
prefix=$OPTARG
;;
s)
shared="no"
lib_suffix="a"
;;
l)
update_ldconf=true
;;
?)
usage
exit
;;
esac
done
# Get GoogleMock sources
curl https://googlemock.googlecode.com/files/gmock-$gmock_version.zip > $workdir/gmock-$gmock_version.zip || exit 1
unzip -d $workdir $workdir/gmock-$gmock_version.zip || exit 1
gmocksrcdir=$workdir/gmock-$gmock_version
# Apply patch if available
gmock_patch="$(pwd)/patches/gmock-$gmock_version.patch"
if [ -f "$gmock_patch" ]; then
pushd $gmocksrcdir
patch -p0 -i $gmock_patch
popd
fi
# Build shared libraries
cd $gmocksrcdir
mkdir build && cd build
local cmake_options=""
if [ "$shared" == "yes" ]; then
cmake_options="-DBUILD_SHARED_LIBS=ON"
fi
cmake $cmake_options ..
make
# Install
subdirs=( include lib )
for subdir in "${subdirs[@]}"
do
test -d $prefix/$subdir || mkdir -p $prefix/$subdir
done
find . -name "lib*.$lib_suffix" -exec cp {} $prefix/lib/ \;
includes=( $gmocksrcdir/include $gmocksrcdir/gtest/include )
for inc in "${includes[@]}"
do
cp -r $inc/* $prefix/include/
done
# Remove workspace directory
rm -rf $workdir
# Update ldconfig cache
if [ "$shared" == "yes" -a "$update_ldconf" == "true" ]; then
ldconf_dir="$prefix/lib"
grep $ldconf_dir /etc/ld.so.conf
if [ "$?" -ne "0" ]; then
echo $ldconf_dir >> /etc/ld.so.conf
fi
ldconfig
fi
| true |
58b0f96aba627508932a97e228c6f836836a2876 | Shell | vixtor-qm/utility-scripts | /restartGPU.sh | UTF-8 | 215 | 2.765625 | 3 | [] | no_license | #! /bin/sh
# Workaround for not working nvidia cuda after suspend
# check which process uses driver currently
lsof | grep nvidia.uvm
#case $1 in
# resume|thaw)
rmmod nvidia_uvm
modprobe nvidia_uvm
#;;
#esac | true |
7d846ddc84c58fcc2e9279ed008004e0ec217e24 | Shell | hengne/UFHiggsPattuplizer13TeV | /Utilities/make_grid.sh | UTF-8 | 482 | 3.46875 | 3 | [] | no_license | #!/bin/bash
GO=0; if [[ "$1" == "--go" ]]; then GO=1; shift; fi
if [ -z "$1" ]; then
echo "Please pass an input directory!"
exit 1;
fi
inputDir=$1; shift;
cd ${inputDir}
for d in $(ls .)
do
cd $d
n=$(ls -1 crab_* | wc -l)
if [[ "$n" == "0" ]]; then
if [[ "$GO" == "1" ]]; then crab -cfg crab.cfg -USER.ui_working_dir=crab_$d -create -submit;
else
crab -cfg crab.cfg -USER.ui_working_dir=crab_$d -create;
fi
fi
cd ../
done
cd ../
| true |
cc4878589fc7ce4b96784cb022aee713a3f3d7ef | Shell | kentron/exp | /bin/pclo | UTF-8 | 242 | 3.546875 | 4 | [] | no_license | #!/bin/sh
PID_FILE=$1
PID=$(cat ${PID_FILE})
children_of () {
PID=$1
pgrep -P $PID
}
children_of_rec () {
PID=$1
for p in $(children_of $PID) ; do
echo $p
children_of_rec $p
done
}
children_of_rec $PID
| true |
6ced11fda81b2ddc83d9bd38f81475cfc72cc420 | Shell | tbzrawh/cloud-native-brewery | /appdynamics/appd_extensions/start-appd.sh | UTF-8 | 1,527 | 3 | 3 | [] | no_license | #!/bin/bash
if [ "${PERSONALITY}" == "local-apache" ]; then
mv /tmp/ApacheMonitor ${MACHINE_AGENT_HOME}monitors/
TIER=FrontendUI
fi
if [ "${PERSONALITY}" == "local-nginx" ]; then
mv /tmp/NginxMonitor ${MACHINE_AGENT_HOME}monitors/
TIER=AdminUI
fi
if [ "${PERSONALITY}" == "local-activemq" ]; then
mv /tmp/ActiveMQMonitor ${MACHINE_AGENT_HOME}monitors/
TIER=ProdQueue
fi
HOST=${HOSTNAME%%.*}
MA_PROPERTIES="-Dappdynamics.controller.hostName=${CONTROLLER_HOST}"
MA_PROPERTIES+=" -Dappdynamics.controller.port=${CONTROLLER_PORT}"
MA_PROPERTIES+=" -Dappdynamics.controller.ssl.enabled=${CONTROLLER_SSL}"
MA_PROPERTIES+=" -Dappdynamics.agent.accountName=${ACCOUNT_NAME%%_*}"
MA_PROPERTIES+=" -Dappdynamics.agent.accountAccessKey=${ACCESS_KEY}"
MA_PROPERTIES+=" -Dappdynamics.agent.applicationName=${APP_NAME}"
MA_PROPERTIES+=" -Dappdynamics.agent.tierName=${TIER}"
MA_PROPERTIES+=" -Dappdynamics.agent.nodeName=${HOST}"
if [ "x${PROXY_HOST}" != "x" ]; then
MA_PROPERTIES+=" -Dappdynamics.http.proxyHost=${PROXY_HOST}"
MA_PROPERTIES+=" -Dappdynamics.https.proxyHost=${PROXY_HOST}"
fi
if [ "x${PROXY_PORT}" != "x" ]; then
MA_PROPERTIES+=" -Dappdynamics.http.proxyPort=${PROXY_PORT}"
MA_PROPERTIES+=" -Dappdynamics.https.proxyPort=${PROXY_PORT}"
fi
# Disable SIM and Docker Monitoring
MA_PROPERTIES+=" -Dappdynamics.sim.enabled=true -Dappdynamics.docker.enabled=false"
# Start Machine Agent
${MACHINE_AGENT_HOME}jre/bin/java ${MA_PROPERTIES} -jar ${MACHINE_AGENT_HOME}machineagent.jar | true |
d3ade02579a72d77ad3984b80ad0ec74fb690990 | Shell | alex312/install-scripts | /base.sh | UTF-8 | 1,514 | 3.03125 | 3 | [] | no_license | #!/bin/bash
function install_x() {
if [ ! -d $HOME/bin ]; then
mkdir $HOME/bin
fi
echo '#!/bin/bash' >>$HOME/bin/x
echo '"$@" 1>/dev/null 2>&1 &' >>$HOME/bin/x
chmod +x $HOME/bin/x
}
command -v x >/dev/null 2>&1 || install_x
grep "alias ll=" $HOME/.bashrc >/dev/null
if [ $? -eq 0 ]; then
sed -i '/^alias ll=/c\alias ll="ls -alFh"' $HOME/.bashrc
else
sed -i '$a\alias ll="ls -alFh"' $HOME/.bashrc
fi
DL_DIR=$HOME/下载
wget 'http://kdl1.cache.wps.com/ksodl/download/linux/a21//wps-office_10.1.0.5707~a21_amd64.deb' -O $DL_DIR/wps-office.deb
sudo dpkg -i $DL_DIR/wps-office.deb
sudo apt-get install -f
wget 'http://kdl.cc.ksosoft.com/wps-community/download/fonts/wps-office-fonts_1.0_all.deb' $DL_DIR/wps-office-fonts.deb
sudo dpkg -i $DL_DIR/wps-office-fonts.deb
sudo apt-get install -f
wget 'http://cdn2.ime.sogou.com/dl/index/1509619794/sogoupinyin_2.2.0.0102_amd64.deb?st=nwMOF9j7R0arpmasfKw9JA&e=1511109222&fn=sogoupinyin_2.2.0.0102_amd64.deb' \
-O $DL_DIR/sogoupinyin.deb
sudo dpkg -i $DL_DIR/sogoupinyin.deb
sudo apt-get install -f
sudo apt-get purge -y libreoffice-common unity-webapps-common rhythmbox brasero \
simple-scan gnome-mahjongg empathy aisleriot gnome-mines \
cheese transmission-common gnome-orca webbrowser-app gnome-sudoku \
landscape-client-ui-install
sudo apt-get autoremove
sudo apt-get autoclean
command -v shfmt ||
{
sudo wget 'https://github.com/mvdan/sh/releases/download/v2.0.0/shfmt_v2.0.0_linux_amd64' -O /usr/bin/shfmt
sudo chmod +x /usr/bin/shfmt
}
| true |
85abda9981c9499d998e759bb97751e7a23610ab | Shell | askformore/b-dotfiles | /b-vim/vim.config/install-vim-for-docker.sh | UTF-8 | 2,330 | 3.59375 | 4 | [] | no_license | #!/bin/bash
# ====================================================
# Copyright (C)2015 All rights reserved.
#
# Author : bbxytl
# Email : bbxytl@gmail.com
# File Name : install-vim.sh
# Last Modified : 2015-10-30 14:54
# Describe :
#
# Log :
#
# ====================================================
lnif(){
if [ -e "$1" ];then
ln -sf "$1" "$2"
fi
}
# 默认安装 simple 版
COMPLEX=false
if [ $# -ge 1 ];then
if [ $1="--complex" ];then
COMPLEX=true
else
echo "Error arg! no arg to rum simple , arg: --complex to use ycm and so on !"
exit
fi
fi
BASEDIR=$(dirname $0)
cd $BASEDIR
CURRENT_DIR=`pwd`
echo " Step 1: setting tu symlinks----------Vim"
if $COMPLEX;then
bundlesfile=$CURRENT_DIR/vimrc.bundles_complex
else
bundlesfile=$CURRENT_DIR/vimrc.bundles
fi
rm $HOME/.vimrc $HOME/.vimrc.bundles $HOME/.vimrc.config_base $HOME/.vimrc.config_filetype
lnif $CURRENT_DIR/vimrc $HOME/.vimrc
lnif $bundlesfile $HOME/.vimrc.bundles
lnif $CURRENT_DIR/vimrc.config_base $HOME/.vimrc.config_base
lnif $CURRENT_DIR/vimrc.config_filetype $HOME/.vimrc.config_filetype
rm $HOME/.vim/tags_list $HOME/.vim/tags_list
SYS_VERSION=`uname -s`
if [ $SYS_VERSION = 'Darwin' ];then
lnif $CURRENT_DIR/tags_list_of_cpp/tags_list_mac $HOME/.vim/tags_list
else if [ $SYS_VERSION = 'Linux' ];then
lnif $CURRENT_DIR/tags_list_of_cpp/tags_list_linux $HOME/.vim/tags_list
fi
fi
if [ -e $HOME/.vim/bundle/molokai/colors/molokai.vim ];then
mv $HOME/.vim/bundle/molokai/colors/molokai.vim $HOME/.vim/bundle/molokai/colors/molokai.vim.bk
cp $CURRENT_DIR/project_vimrc/molokai.vim $HOME/.vim/bundle/molokai/colors/molokai.vim
fi
if [ -e $HOME/.vim/bundle/vim-syntax-match/colors/syntaxmatch.vim ];then
mv $HOME/.vim/bundle/vim-syntax-match/colors/syntaxmatch.vim $HOME/.vim/bundle/vim-syntax-match/colors/syntaxmatch.vim.bk
cp $CURRENT_DIR/project_vimrc/syntaxmatch.vim $HOME/.vim/bundle/vim-syntax-match/colors
fi
if [ -d /usr/share/vim/vim80/syntax ];then
sudo mv /usr/share/vim/vim80/syntax/c.vim /usr/share/vim/vim80/syntax/c.vim.bk
sudo cp $CURRENT_DIR/project_vimrc/c.vim /usr/share/vim/vim80/syntax/c.vim
sudo mv /usr/share/vim/vim80/syntax/cpp.vim /usr/share/vim/vim80/syntax/cpp.vim.bk
sudo cp $CURRENT_DIR/project_vimrc/cpp.vim /usr/share/vim/vim80/syntax/cpp.vim
fi
| true |
03fbd52f462c9aec6ded7fdda88e5d4028fd325e | Shell | isabella232/sigmod2014-contest-graphblas | /cpp/scripts/rebuild.sh | UTF-8 | 539 | 3.515625 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# fail fast, echo command
set -e -x
# Build type: first argument, BUILD_TYPE env.var., "Release" (first existing option)
BUILD_TYPE=${1:-${BUILD_TYPE:-Release}}
BUILD_TYPE_LOWERCASE=$(echo $BUILD_TYPE | tr '[:upper:]' '[:lower:]')
PRINT_RESULTS=${PRINT_RESULTS:-1}
CPP_DIR=$(dirname "$0")/..
CMAKE_BUILD_DIR=cmake-build-$BUILD_TYPE_LOWERCASE
cd "$CPP_DIR"
rm -rf "$CMAKE_BUILD_DIR"
mkdir "$CMAKE_BUILD_DIR"
cd "$CMAKE_BUILD_DIR"
cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DPRINT_RESULTS=$PRINT_RESULTS ..
make -j$(nproc)
| true |
82825b76ccf3d692bd382345609e92e450178203 | Shell | davidpengsun/Insight-AdFlow | /kafka/tmp/spawn_kafka_streams.sh | UTF-8 | 270 | 2.96875 | 3 | [] | no_license | #!/bin/bash
NUM_SPAWNS=$1
SESSION=$2
PRODUCER_DIR="/home/ubuntu/insight/kafka"
tmux new-session -s $SESSION -n bash -d
for ID in `seq 1 $NUM_SPAWNS`;
do
echo $ID
tmux new-window -t $ID
tmux send-keys -t $SESSION:$ID 'bash $PRODUCER_DIR/producer.sh' C-m
done
| true |
40abb39c7d09b7768356086d5328183a45b1dcb7 | Shell | terjr/nrfjprog.sh | /nrfjprog.sh | UTF-8 | 2,842 | 3.78125 | 4 | [] | no_license | #!/bin/bash
read -d '' USAGE <<- EOF
nrfprog.sh
This is a loose shell port of the nrfjprog.exe program distributed by Nordic,
which relies on JLinkExe to interface with the JLink hardware.
usage:
nrfjprog.sh action hexfile
where action is one of
--reset
--pin-reset
--erase-all
--flash
--flash-softdevice
EOF
TOOLCHAIN_PREFIX=arm-none-eabi
# assume the tools are on the system path
TOOLCHAIN_PATH=
OBJCOPY=$TOOLCHAIN_PATH$TOOLCHAIN_PREFIX-objcopy
OBJDUMP=$TOOLCHAIN_PATH$TOOLCHAIN_PREFIX-objdump
JLINK_OPTIONS="-device nrf51822 -if swd -speed 1000"
HEX=$2
# assume there's an out and bin file next to the hexfile
OUT=${HEX/.hex/.out}
BIN=${HEX/.hex/.bin}
JLINK="JLinkExe $JLINK_OPTIONS"
JLINKGDBSERVER="JLinkGDBServer $JLINK_OPTIONS"
# the script commands come from Makefile.posix, distributed with nrf51-pure-gcc
TMPSCRIPT=/tmp/tmp_$$.jlink
TMPBIN=/tmp/tmp_$$.bin
if [ "$1" = "--reset" ]; then
echo ""
echo "resetting..."
echo "------------"
echo ""
echo "r" > $TMPSCRIPT
echo "g" >> $TMPSCRIPT
echo "exit" >> $TMPSCRIPT
$JLINK $TMPSCRIPT
rm $TMPSCRIPT
elif [ "$1" = "--pin-reset" ]; then
echo "resetting with pin..."
echo "w4 40000544 1" > $TMPSCRIPT
echo "r" >> $TMPSCRIPT
echo "exit" >> $TMPSCRIPT
$JLINK $TMPSCRIPT
rm $TMPSCRIPT
elif [ "$1" = "--erase-all" ]; then
echo ""
echo "perfoming full erase..."
echo "-----------------------"
echo ""
echo "w4 4001e504 2" > $TMPSCRIPT
echo "w4 4001e50c 1" >> $TMPSCRIPT
echo "sleep 100" >> $TMPSCRIPT
echo "r" >> $TMPSCRIPT
echo "exit" >> $TMPSCRIPT
$JLINK $TMPSCRIPT
rm $TMPSCRIPT
elif [ "$1" = "--flash" ]; then
echo ""
echo "flashing $BIN..."
echo "------------------------------------------"
echo ""
FLASH_START_ADDRESS=`$OBJDUMP -h $OUT -j .text | grep .text | awk '{print $4}'`
echo "r" > $TMPSCRIPT
echo "loadbin $BIN $FLASH_START_ADDRESS" >> $TMPSCRIPT
echo "r" >> $TMPSCRIPT
echo "g" >> $TMPSCRIPT
echo "exit" >> $TMPSCRIPT
$JLINK $TMPSCRIPT
rm $TMPSCRIPT
elif [ "$1" = "--flash-softdevice" ]; then
echo ""
echo "flashing softdevice $HEX..."
echo "------------------------------------------"
echo ""
$OBJCOPY -Iihex -Obinary $HEX $TMPBIN
# Write to NVMC to enable erase, do erase all, wait for completion. reset
echo "w4 4001e504 2" > $TMPSCRIPT
echo "w4 4001e50c 1" >> $TMPSCRIPT
echo "sleep 100" >> $TMPSCRIPT
echo "r" >> $TMPSCRIPT
# Write to NVMC to enable write. Write mainpart, write UICR. Assumes device is erased.
echo "w4 4001e504 1" > $TMPSCRIPT
echo "loadbin $TMPBIN 0" >> $TMPSCRIPT
echo "r" >> $TMPSCRIPT
echo "g" >> $TMPSCRIPT
echo "exit" >> $TMPSCRIPT
$JLINK $TMPSCRIPT
rm $TMPSCRIPT
rm $TMPBIN
else
echo "$USAGE"
fi
| true |
45f12e7268495038a22f6b133d03375303bfa7fd | Shell | aravindvnair99/Motorola-Moto-E-condor-unbrick | /Source Codes and Scripts/repack_ramdisk | UTF-8 | 337 | 3.484375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Repacks a ramdisk for use inside a boot.img
# Optionally taking an output to repack the ramdisk into.
if [ -z $1 ]
then
echo "Usage: repack_ramdisk <ramdiskDirectory> [outputFile]"
exit 0
fi
cd $1
find . | cpio -o -H newc | gzip > ../new-ramdisk.cpio.gz
if [ -z $2 ]
then
exit 0
else
mv ../new-ramdisk.cpio.gz ../$2
fi
| true |
b479c8bf6e74daebf329758cd179b6f8efcf11db | Shell | elc1798/aptproxyget | /apt-proxy-get.sh | UTF-8 | 1,103 | 3.703125 | 4 | [] | no_license | #!/bin/bash
while getopts "carh" opt; do
case $opt in
c)
if [ -e "/etc/apt/apt.conf.d/01proxy" ]; then
echo "/etc/apt/apt.conf.d/01proxy exists."
echo "Contents: "
cat /etc/apt/apt.conf.d/01proxy
else
echo "/etc/apt/apt.conf.d/01proxy does not currently exist"
fi
;;
a)
sudo cp ~/.aptproxyget/01proxy.aptproxyget.data /etc/apt/apt.conf.d/01proxy
;;
r)
if [ -e "/etc/apt/apt.conf.d/01proxy" ]; then
sudo rm /etc/apt/apt.conf.d/01proxy
else
echo "/etc/apt/apt.conf.d/01proxy does not exist"
fi
;;
h)
echo "Commands:"
echo " -c Checks /etc/apt/apt.conf.d/01proxy existence and outputs the contents of the file"
echo " -a Adds /etc/apt/apt.conf.d/01proxy and the rules for the NYCBOE proxy"
echo " -r Removed /etc/apt/apt.conf.d/01proxy if it exists"
echo " -h Displays this message"
;;
esac
done
| true |
4e2471496358fdbf5d80e42f29601e2fd8ec0fa7 | Shell | 0intro/hare | /sys/src/cmd/uem/tests/bg_scale_run.rc | UTF-8 | 3,461 | 3.25 | 3 | [] | no_license | #!/bin/rc
#
# a UEM Scale Test Run
#
# to run on the BG:
# R_MAX=4 # or the max number of tasks
# rc bg_run 0xFFDFF
# 1 is for the BG
if(~ $#BG 0) BG=1
if(~ $BG 0) O=8
if(~ $BG 0) CMDPATH=$O.
if not CMDPATH=''
if(~ $#R_MIN 0) R_MIN=1
if(~ $#R_MAX 0){
if(~ $BG 1) R_MAX=1
if not R_MAX=8
}
#R_MAX=16
#R_MAX=256
#R_MAX=2048
#TESTNUM=''
# fake the IO node's IP
IO=128.0.1
IP=11.$IO
fn enum_tests {
TESTNUM=`{awk 'BEGIN {
for (i = '$R_MAX'; i >= '$R_MIN'; i/=2) printf "%d ",i
exit }'}
#`# shut up the code coloring bug...
}
if(~ $BG 0) {
I_MAX=1
J_MAX=1
K_MAX=4
}
if not {
I_MAX=4
J_MAX=4
K_MAX=4
}
fn enum_srv {
SRVNUM=`{awk 'BEGIN {
for (i=0; i<'$I_MAX'; i++)
for (j=0; j<'$J_MAX'; j++)
for (k=1; k<='$K_MAX'; k++)
printf "%d.%d.%d ", i, j, k
exit }'}
#`# shut up the code coloring bug...
}
if(~ $#* 0) DEBUG='0xFFDFF'
if not DEBUG=$1
if(~ $#TESTNUM 0) enum_tests
enum_srv
echo -n 'TESTNUM= '
echo $TESTNUM
echo 'making output directories'
LOCALDIR=bg_scale
mkdir -p $LOCALDIR
TESTDIR=/tmp/testbed/sbg
mkdir -p $TESTDIR
if(~ $BG 0){
#rc tidy
echo '############ starting next '$IP' ############'
#cpunode=0 treeip=$IO profile.pbg $DEBUG
cpunode=0 treeip=$IO profile.pbg $DEBUG
mount -c /srv/io /n/io
# generate the fake CPU nodes and mount them
for(i in $SRVNUM){
echo '############ starting next '11.$i' ############'
cpunode=1 treeip=$i profile.pbg $DEBUG
}
}
if not {
echo '############ starting next '$IP' ############'
# cpunode=0 treeip=$IO rc profile.anl $DEBUG
# srvfs $IP /
# mount -c /srv/$IP /n/$IP
# mount -c /srv/io /n/$IP
mount -c /srv/io /n/io
# cpunode=0 treeip=$IO profile.anl $DEBUG
for(i in $SRVNUM){
9fs 11.$i
# echo '############ starting next '11.$i' ############'
# cpunode=1 treeip=$i profile.anl $DEBUG
}
}
for(i in $TESTNUM){
# clean out the previous run and leave the last one in case it dies
# rc tidy
# echo -n 'Serving CPU Nodes='
# echo $SRVNUM
# if(~ $BG 0){
# rc tidy
# echo '############ starting next '$IP' ############'
# cpunode=0 treeip=$IO profile.pbg $DEBUG
#
# # generate the fake CPU nodes and mount them
# for(i in $SRVNUM){
# echo '############ starting next '11.$i' ############'
# cpunode=1 treeip=$i profile.pbg $DEBUG
# }
# }
# if not {
# echo '############ starting next '$IP' ############'
# # cpunode=0 treeip=$IO rc profile.anl $DEBUG
# # srvfs $IP /
# # mount -c /srv/$IP /n/$IP
# mount -c /srv/io /n/$IP
# mount -c /srv/io /n/io
# # cpunode=0 treeip=$IO profile.anl $DEBUG
#
# for(i in $SRVNUM){
# 9fs 11.$i
# # echo '############ starting next '11.$i' ############'
# # cpunode=1 treeip=$i profile.anl $DEBUG
# }
# }
echo '############ starting next '$i' ############'
#mk tidy
rc bg_scale.rc $i $DEBUG
# echo '############ processing /'$i'/ ############'
# if(~ $#* 0 1) {
# mv $TESTDIR/time_$i^.dat $LOCALDIR/time_$i^_sav.dat
# $CMDPATH^time-stat -i $LOCALDIR/time_$i^_sav.dat -o $LOCALDIR/time_$i^.out
# }
# if not {
# mv $TESTDIR/time_$i^.dat $LOCALDIR/time_$i^_$1^_sav.dat
# $CMDPATH^time-stat -i $LOCALDIR/time_$i^_$2^_sav.dat -o $LOCALDIR/time_$i^_$2^.out
# }
# echo ''
}
#mk tidy
| true |
c3092d0022059c5dd63283253e5038e2a9c22b04 | Shell | Ash515/turbinia | /tools/gcp_init/start-wrapper.sh | UTF-8 | 960 | 3.78125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# To be run as root as a start up script. This does the minimum required to
# bootstrap copying the files from GCS and starting a Turbinia worker. It
# assumes that the virtualenv containing Turbinia, and the Turbinia config is
# already created and configured. Variables in this file and in
# start-turbinia-common.sh must be configured appropriately.
mount_point="/mnt/turbinia"
scripts_dir="$mount_point/scripts"
user="turbinia"
home_dir="/home/$user"
bucket="turbinia"
if [ ! -d $mount_point ] ; then
echo "Creating mount point $mount_point"
mkdir $mount_point
chown $user $mount_point
fi
if ! mount | grep $mount_point >/dev/null 2>&1 ; then
echo "Mounting GCS FUSE $bucket at $mount_point"
su - $user -c "GOOGLE_APPLICATION_CREDENTIALS=/home/turbinia/turbinia-service-account-creds.json gcsfuse $bucket $mount_point"
fi
su - $user -c "bash $scripts_dir/update-scripts.sh"
su - $user -c "$home_dir/start-turbinia-worker.sh"
| true |
0fd540dffce86b73b734d725fbd176fcc5d878a1 | Shell | alay3168/XGTestProjects | /publish_shell/scripts/build_body.sh | UTF-8 | 681 | 3.421875 | 3 | [] | no_license | #!/bin/bash
source /etc/profile
function yellow_echo ()
{
local what=$*
echo -e "\e[1;33m-- ${what} \e[0m"
}
function green_echo ()
{
local what=$*
echo -e "\e[1;32m-- ${what} \e[0m"
}
function red_echo ()
{
local what=$*
echo -e "\e[1;31m-- ${what} \e[0m"
}
yellow_echo "Start to build BodyDetect binary"
#Build BodyDetect
cd /workspace/BodyDetect
chmod +x ./codegen && ./codegen
rm -rf build
mkdir -p build && cd build && cmake .. && make -j
if [ $? -ne 0 ]; then
red_echo "Build BodyDetect failed"
else
green_echo "Build BodyDetect successfully"
fi
cd /workspace
yellow_echo "Finish building BodyDetect binary" | true |
fb236f8bd5310c5a48287345c9b19b163ce714a1 | Shell | hrionda/Basic-UNIX-Scripts | /SysAdminScripts/VirtualHostStep.bash | UTF-8 | 1,117 | 2.765625 | 3 | [] | no_license | #CentOS
#cat /root/hrion001.csv | cut -d, -f8 | sort -u > fruits.txt
CreateVirtualHost() {
touch /etc/httpd/conf.d/hrion001/$1.conf
mkdir /var/www/$1
cat<<EOF > /etc/httpd/conf.d/hrion001/$1.conf
Listen $2
<VirtualHost *:$2>
ServerName localhost
DocumentRoot /var/www/$1/
SSLEngine on
SSLCertificateFile /root/hrion001-selfsigned.crt
SSLCertificateKeyFile /root/hrion001-selfsigned.key
</VirtualHost>
EOF
}
#PantherID = 56756(97)
PNUM=8897
while IFS=, read FRUIT
do
CreateVirtualHost $FRUIT $PNUM
PNUM=$((PNUM+1))
done <"fruits.txt"
systemctl restart httpd
#Fedora
CreateVirtualHost() {
touch /etc/httpd/conf.d/hrion001/$1.conf
mkdir /var/www/$1
cat<<EOF > /etc/httpd/conf.d/hrion001/$1.conf
<VirtualHost *:$2>
ServerName localhost
DocumentRoot /var/www/$1/
SSLEngine on
SSLCertificateFile /root/hrion001-selfsigned.crt
SSLCertificateKeyFile /root/hrion001-selfsigned.key
</VirtualHost>
EOF
}
#PantherID = 56756(97)
PNUM=9997
while IFS=, read FRUIT
do
CreateVirtualHost $FRUIT $PNUM
PNUM=$((PNUM+1))
done <"fruits.txt"
systemctl restart httpd
| true |
b8091c2977dc78cbf8fd90100834f24f10eb509e | Shell | sushengyang/NLP | /Corpus + Training/main.sh | UTF-8 | 1,244 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env bash
# This script uses the plaintext tweet data instead of converting the XML
# ones since the latter takes too long and may cause the file manager to
# become unresponsive due to the creation of >400,000 new files
# echo "Extracting corpus... "
# tar xvf data/corpus.tar.gz
# echo "Done."
# echo -n "Converting XML files to single file with plaintext tweets... "
# ./scripts/plaintextify.rb
# echo "Done."
echo -n "Normalising plaintext tweet data... "
./scripts/normalise.rb data/tweets.plaintext data/tweets.plaintext.normal
echo "Done."
echo -n "Adding part-of-speech tags... "
cd twitie-tagger/
java -jar twitie_tag.jar ../tagger_and_parser/POS\ Tagger/Training\ POS\ Models/finalpos.tagger ../data/tweets.plaintext.normal > ../data/tweets.plaintext.normal.pos
cd ..
echo "Done."
echo -n "Parsing POS-tagged data... "
java -cp "*" -mx1g edu.stanford.nlp.parser.lexparser.LexicalizedParser -sentences newline -tokenized -tagSeparator _ -tokenizerFactory edu.stanford.nlp.process.WhitespaceTokenizer -tokenizerMethod newCoreLabelTokenizerFactory -loadFromSerializedFile tagger_and_parser/Parser/serialized_models/SerializedModel10 data/tweets.plaintext.normal.pos > data/tweets.plaintext.normal.pos.parsed
echo "Done."
| true |
3650490e6e1f3182e02ad13169007a3787568a7e | Shell | gocom/dotfiles | /bin/timing | UTF-8 | 1,624 | 4.125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
app_name="${0##*/}"
app_version="0.0.0"
usage () {
cat <<EOF
Time HTTP responses in seconds.
Usage:
$ $app_name [options] [benchmark|request] [sequences] <url>
Options:
-h, --help Print this message
-v, --version Print version number
Commands:
$ $app_name benchmark [sequences] <url>
Run a benchmark specified number of sequences.
$ $app_name request <url>
Run a single request and prints raw results in columns.
Examples:
$ $app_name https://localhost.test/example
EOF
}
request () {
curl \
-L \
--output /dev/null \
--silent \
--write-out '%{http_code} %{time_namelookup} %{time_connect} %{time_pretransfer} %{time_starttransfer} %{time_total} %{size_download} %{speed_download}' \
"$@" || return 1
}
main () {
local results sequence sequences
if [[ "${1:-}" =~ ^[0-9]+$ ]]; then
sequences="$(seq 1 "$1")" || exit 1
shift
else
sequences="$(seq 1 10)"
fi
printf '%6s %-3s %-8s %-8s %-8s %-8s %-8s %-6s %-6s'"\\n" \
"#" "STA" "DNS" "TCP" "TSL" "Start" "Total" "kB" "MB/s"
for sequence in $sequences; do
results="$(request "$@")" || exit 1
echo "$sequence $results" | awk -F '[[:space:]]{2,}' '{printf("%06d %-3s %-8s %-8s %-8s %-8s %-8s %-6s %-6s\n", substr($1,0,6), substr($2,0,3), substr($3,0,8), substr($4,0,8), substr($5,0,8), substr($6,0,8), substr($7,0,8), substr($8/1024,0,6), substr($9/1024/1024,0,6))}'
done
}
case "${1:-}" in
""|-h|--help) usage ;;
-v|--version) echo "$app_version" ;;
benchmark) main "${@:2}" ;;
request) request "${@:2}" ;;
*) main "$@" ;;
esac
| true |
a3f37e62d05ad06c62719043434030a7cb0b762d | Shell | esimov/diagram | /watch | UTF-8 | 625 | 3.4375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
unameOut="$(uname -s)"
case "${unameOut}" in
Linux*) machine=Linux;;
Darwin*) machine=Mac;;
CYGWIN*) machine=Cygwin;;
MINGW*) machine=MinGw;;
*) machine="UNKNOWN:${unameOut}"
esac
inotifywait -e close_write,moved_to,create -m ./output |
while read -r directory events file; do
if [[ $file == *.png ]]; then
if [[ ${machine} == "Linux" ]]; then
xdg-open ./output/$file
else
if [[ ${machine} == "Mac" ]]; then
open ./output/$file
fi
fi
fi
done | true |
18ec0d5b3cfc6e9bb8d54e569bf0268835b5c1dd | Shell | yusufgogus50/malfs-milis | /talimatname/temel-ek/kernel-lts/kernel-lts.kur-kos | UTF-8 | 393 | 2.953125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
cd /boot
if [ -f kernel ];then
rm kernel
fi
yenimodul=`ls -Art /lib/modules/ | tail -n 1`
yenikernel=`ls -Art /boot/kernel* | tail -n 1`
modulno=`basename $yenimodul`
kernelno=`basename $yenikernel`
ln -s $kernelno kernel
if [ -f initramfs ];then
mv initramfs initramfs_eski
fi
dracut -N --force --xz --omit systemd /boot/initramfs $modulno
grub-mkconfig -o /boot/grub/grub.cfg
| true |
2775b20ec956558d6988101b0c408698ca2ee6e7 | Shell | slaclab/cryoem-pipeline | /pipeline.sh | UTF-8 | 32,440 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/bin/bash -e
# module loads for programs
IMOD_VERSION=${IMOD_VERSION:-4.10.42}
IMOD_LOAD="imod/${IMOD_VERSION}"
EMAN2_VERSION="20190603"
EMAN2_LOAD="eman2/${EMAN2_VERSION}"
MOTIONCOR2_VERSION=${MOTIONCOR2_VERSION:-1.2.3}
MOTIONCOR2_LOAD="motioncor2/${MOTIONCOR2_VERSION}"
#MOTIONCOR2_VERSION="1.2.2"
#MOTIONCOR2_LOAD="motioncor2/${MOTIONCOR2_VERSION}"
CTFFIND4_VERSION=${CTFFIND4_VERSION:-4.1.10}
CTFFIND4_LOAD="ctffind/${CTFFIND4_VERSION}"
RELION_VERSION=${RELION_VERSION:-3.0.4}
RELION_LOAD="relion/${RELION_VERSION}"
IMAGEMAGICK_VERSION=${IMAGEMAGICK_VERSION:=-6.8.9}
IMAGEMAGICK_LOAD="imagemagick/$IMAGEMAGICK_VERSION"
# GENERATE
# force redo of all files
MODE=${MODE:-spa} # spa | tomo
TASK=${TASK:-all}
FORCE=${FORCE:-0}
NO_FORCE_GAINREF=${NO_FORCE_GAINREF:-0}
NO_PREAMBLE=${NO_PREAMBLE:-0}
# SCOPE PARAMS
CS=${CS:-2.7}
KV=${KV:-300}
APIX=${APIX}
SUPERRES=${SUPERRES:-0}
PHASE_PLATE=${PHASE_PLATE:-0}
AMPLITUDE_CONTRAST=${AMPLITUDE_CONTRAST:-0.1}
# MOTIONCOR2 PARAMETERS
BFT=${BFT:-150}
FMDOSE=${FMDOSE}
PATCH=${PATCH:-5 5}
THROW=${THROW:-0}
TRUNC=${TRUNC:-0}
ITER=${ITER:-10}
TOL=${TOL:-0.5}
OUTSTACK=${OUTSTACK:-0}
INFMMOTION=${INFMMOTION:-1}
GPU=${GPU:-0}
GPU_OFFSET=${GPU_OFFSET:-0}
FMREF=${FMREF:-0}
INITDOSE=${INITDOSE:-0}
# PICKING
PARTICLE_SIZE=${PARTICLE_SIZE:-150}
PARTICLE_SIZE_MIN=${PARTICLE_SIZE_MIN:-$(echo $PARTICLE_SIZE | awk '{ print $1*0.8 }')}
PARTICLE_SIZE_MAX=${PARTICLE_SIZE_MAX:-$(echo $PARTICLE_SIZE | awk '{ print $1*1.2 }')}
# LOCAL COPY
LOCAL=${LOCAL}
# usage
usage() {
cat <<__EOF__
Usage: $0 MICROGRAPH_FILE
Mandatory Arguments:
[-a|--apix FLOAT] use specified pixel size
[-d|--fmdose FLOAT] use specified fmdose in calculations
Optional Arguments:
[-g|--gainref GAINREF_FILE] use specificed gain reference file
[-b|--basename STR] output files names with specified STR as prefix
[-k|--kev INT] input micrograph was taken with INT keV microscope
[-s|--superres] input micrograph was taken in super-resolution mode (so we should half the number of pixels)
[-p|--phase-plate] input microgrpah was taken using a phase plate (so we should calculate the phase)
[-P|--patch STRING] use STRING patch settings for motioncor2 alignment
[-e|--particle-size INT] pick particles with size INT
[-f|--force] reprocess all steps (ignore existing results).
[-m|--mode [spa|tomo]] pipeline to use: single particle analysis of tomography
[-t|--task sum|align|pick|all] what to process; sum the stack, align the stack; just particle pick or all
__EOF__
}
# determine what to run
main() {
# map long arguments to short
for arg in "$@"; do
shift
case "$arg" in
"--help") set -- "$@" "-h";;
"--gainref") set -- "$@" "-g";;
"--basename") set -- "$@" "-b";;
"--force") set -- "$@" "-F";;
"--apix") set -- "$@" "-a";;
"--fmdose") set -- "$@" "-d";;
"--kev") set -- "$@" "-k";;
"--superres") set -- "$@" "-s";;
"--phase-plate") set -- "$@" "-p";;
"--patch") set -- "$@" "-P";;
"--particle-size") set -- "$@" "-e";;
"--mode") set -- "$@" "-m";;
"--task") set -- "$@" "-t";;
*) set -- "$@" "$arg";;
esac
done
while getopts "Fhspm:t:g:b:a:d:k:e:" opt; do
case "$opt" in
g) GAINREF_FILE="$OPTARG";;
b) BASENAME="$OPTARG";;
a) APIX="$OPTARG";;
d) FMDOSE="$OPTARG";;
k) KV="$OPTARG";;
s) SUPERRES=1;;
p) PHASE_PLATE=1;;
P) PATCH="$OPTARG";;
e) PARTICLE_SIZE="$OPTARG";;
F) FORCE=1;;
m) MODE="$OPTARG";;
t) TASK="$OPTARG";;
h) usage; exit 0;;
?) usage; exit 1;;
esac
done
MICROGRAPHS=${@:$OPTIND}
# >&2 echo "MICROGRAPHS: ${MICROGRAPHS}"
if [ ${#MICROGRAPHS[@]} -lt 1 ]; then
echo "Need input micrograph MICROGRPAH_FILE to continue..."
usage
exit 1
fi
if [ -z $APIX ]; then
echo "Need pixel size [-a|--apix] to continue..."
usage
exit 1
fi
if [[ -z $FMDOSE && ( "$TASK" == "all" || "$TASK" == "align" || "$TASK" == "sum" ) ]]; then
echo "Need fmdose [-d|--fmdose] to continue..."
usage
exit 1
fi
for MICROGRAPH in ${MICROGRAPHS}; do
# strip ./
if [[ "$MICROGRAPH" = ./* ]]; then MICROGRAPH="${MICROGRAPH:2}"; fi
if [ "$MODE" == "spa" ]; then
>&2 echo "MICROGRAPH: ${MICROGRAPH}"
do_spa
elif [ "$MODE" == "tomo" ]; then
do_tomo
else
echo "Unknown MODE $MODE"
usage
exit 1
fi
done
}
do_spa()
{
if [ ${NO_PREAMBLE} -eq 0 ]; then
do_prepipeline
if [[ "$TASK" == "align" || "$TASK" == "sum" || "$TASK" == "all" ]]; then
local force=${FORCE}
if [ ${NO_FORCE_GAINREF} -eq 1 ]; then
FORCE=0
fi
do_gainref
FORCE=$force
fi
else
# still need to determine correct gainref
local force=${FORCE}
FORCE=0
GAINREF_FILE=$(process_gainref "$GAINREF_FILE") || exit $?
FORCE=$force
fi
# start doing something!
echo "single_particle_analysis:"
if [[ "$TASK" == "align" || "$TASK" == "all" ]]; then
do_spa_align
fi
if [[ "$TASK" == "sum" || "$TASK" == "all" ]]; then
do_spa_sum
fi
if [[ "$TASK" == "pick" || "$TASK" == "all" ]]; then
# get the assumed pick file name
if [ -z $ALIGNED_FILE ]; then
ALIGNED_FILE=$(align_file ${MICROGRAPH}) || exit $?
fi
do_spa_pick
fi
if [[ "$TASK" == "preview" || "$TASK" == "all" ]]; then
echo " - task: preview"
local start=$(date +%s.%N)
# need to guess filenames
if [ "$TASK" == "preview" ]; then
ALIGNED_DW_FILE=$(align_dw_file ${MICROGRAPH}) || exit $?
#echo "ALIGNED_DW_FILE: $ALIGNED_DW_FILE"
ALIGNED_CTF_FILE=$(align_ctf_file "${MICROGRAPH}") || exit $?
#echo "ALIGNED_CTF_FILE: $ALIGNED_CTF_FILE"
PARTICLE_FILE=$(particle_file ${ALIGNED_DW_FILE}) || exit $?
#echo "PARTICLE_FILE: $PARTICLE_FILE"
SUMMED_CTF_FILE=$(sum_ctf_file "${MICROGRAPH}") || exit $?
# remove the _sum bit if SUMMED_FILE defined
if [ ! -z $SUMMED_FILE ]; then
SUMMED_CTF_FILE="${SUMMED_CTF_FILE%_sum_ctf.mrc}_ctf.mrc"
#>&2 echo "SUMMED CTF: $SUMMED_CTF_FILE"
fi
#echo "SUMMED_CTF_FILE: $SUMMED_CTF_FILE"
fi
local PREVIEW_FILE=$(generate_preview) || exit $?
echo " files:"
dump_file_meta "${PREVIEW_FILE}" || exit $?
local duration=$( awk '{print $2-$1}' <<< "$start $(date +%s.%N)" )
echo " duration: $duration"
echo " executed_at: " $(date --utc +%FT%TZ -d @$start)
fi
}
do_tomo()
{
echo "TODO"
exit 255
}
do_prepipeline()
{
echo "pre-pipeline:"
# other params
echo " - task: input"
echo " data:"
echo " apix: ${APIX}"
echo " fmdose: ${FMDOSE}"
echo " astigmatism: ${CS}"
echo " kev: ${KV}"
echo " amplitude_contrast: ${AMPLITUDE_CONTRAST}"
echo " super_resolution: ${SUPERRES}"
echo " phase_plate: ${PHASE_PLATE}"
# input micrograph
echo " - task: micrograph"
echo " files:"
dump_file_meta "${MICROGRAPH}" || exit $?
}
do_gainref()
{
if [ ! -z "$GAINREF_FILE" ]; then
# gainref
echo " - task: convert_gainref"
local start=$(date +%s.%N)
GAINREF_FILE=$(process_gainref "$GAINREF_FILE") || exit $?
local duration=$( awk '{print $2-$1}' <<< "$start $(date +%s.%N)" )
echo " duration: $duration"
echo " executed_at: " $(date --utc +%FT%TZ -d @$start)
echo " files:"
dump_file_meta "${GAINREF_FILE}" || exit $?
fi
}
###
# process the micrograph by aligning and creating the ctfs and previews for the MICROGRAPH
###
do_spa_align() {
>&2 echo
>&2 echo "Processing align for micrograph $MICROGRAPH..."
echo " - task: align_stack"
local start=$(date +%s.%N)
ALIGNED_FILE=$(align_stack "$MICROGRAPH" "$GAINREF_FILE") || exit $? #"./aligned/motioncor2/$MOTIONCOR2_VERSION")
local duration=$( awk '{print $2-$1}' <<< "$start $(date +%s.%N)" )
echo " duration: $duration"
echo " executed_at: " $(date --utc +%FT%TZ -d @$start)
echo " files:"
dump_file_meta "${ALIGNED_FILE}" || exit $?
ALIGNED_DW_FILE="${ALIGNED_FILE%.mrc}_DW.mrc"
dump_file_meta "${ALIGNED_DW_FILE}" || exit $?
echo " - task: align_data"
local file=$(motioncor_file ${ALIGNED_FILE}) || exit $?
if [ ! -e $file ]; then
>&2 echo "motioncor2 data file $file does not exist"
exit 4
fi
echo " source: $file"
echo " data:"
local align=$(parse_motioncor ${ALIGNED_FILE}) || exit $?
eval $align || exit $?
for k in "${!align[@]}"; do
if [ "$k" == 'frames' ]; then
echo " $k: ${align[$k]}"
else
printf " $k: %.2f\n" ${align[$k]}
fi
done
PROCESSED_ALIGN_FIRST1=${align[first1]}
PROCESSED_ALIGN_FIRST3=${align[first3]}
PROCESSED_ALIGN_FIRST5=${align[first5]}
PROCESSED_ALIGN_FIRST8=${align[first8]}
PROCESSED_ALIGN_ALL=${align[all]}
# create a file that relion can read
local star=$(create_motioncor_star ${ALIGNED_FILE}) || exit $?
echo " - task: ctf_align"
local start=$(date +%s.%N)
# we always bin down the aligned file if superres, so we need to prevent the ctf from using the wrong apix value
local orig_superres=${SUPERRES}
SUPERRES=0
local outdir="aligned/motioncor2/$MOTIONCOR2_VERSION/ctffind4/$CTFFIND4_VERSION"
ALIGNED_CTF_FILE=$(process_ctffind "$ALIGNED_FILE" "$outdir") || exit $?
SUPERRES=${orig_superres}
local duration=$( awk '{print $2-$1}' <<< "$start $(date +%s.%N)" )
echo " duration: $duration"
echo " executed_at: " $(date --utc +%FT%TZ -d @$start)
echo " files:"
dump_file_meta "${ALIGNED_CTF_FILE}" || exit $?
echo " - task: ctf_align_data"
local ctf_file=$(ctffind_file $ALIGNED_CTF_FILE) || exit $?
echo " source: $ctf_file"
local ctf_data=$(parse_ctffind $ALIGNED_CTF_FILE) || exit $?
eval $ctf_data || exit $?
echo " data:"
for k in "${!ctf[@]}"; do
echo " $k: ${ctf[$k]}"
done
PROCESSED_ALIGN_RESOLUTION=${ctf[resolution]}
PROCESSED_ALIGN_RESOLUTION_PERFORMANCE=${ctf[resolution_performance]}
PROCESSED_ALIGN_ASTIGMATISM=${ctf[astigmatism]}
PROCESSED_ALIGN_CROSS_CORRELATION=${ctf[cross_correlation]}
}
sum_ctf_file()
{
local input="$1"
if [ ! -z "${BASENAME}" ]; then
input="${BASENAME}"
fi
local extension="${input##*.}"
local outdir=${2:-summed/imod/$IMOD_VERSION/ctffind4/$CTFFIND4_VERSION}
local output="$outdir/${input%.${extension}}_sum_ctf.mrc"
echo $output
}
align_ctf_file()
{
local input="$1"
if [ ! -z "${BASENAME}" ]; then
input="${BASENAME}"
fi
local extension="${input##*.}"
local outdir=${2:-aligned/motioncor2/$MOTIONCOR2_VERSION/ctffind4/$CTFFIND4_VERSION}
local output="$outdir/${input%.${extension}}_aligned_ctf.mrc"
echo $output
}
###
# create the ctf and preview images for the summed stack of the MICROGRAPH. creating the sum temporarily if necessary
###
do_spa_sum() {
>&2 echo
>&2 echo "Processing sum for micrograph $MICROGRAPH..."
SUMMED_CTF_FILE=$(sum_ctf_file "$MICROGRAPH") || exit $?
# check for the SUMMED_CTF_FILE, do if not exists
if [ -e $SUMMED_CTF_FILE ]; then
>&2 echo
>&2 echo "sum ctf file $SUMMED_CTF_FILE already exists"
fi
local summed_file_log=""
# work out the summed average fo teh stack if necessary
local create_summed_file=1
if [ ! -z $SUMMED_FILE ]; then
create_summed_file=0
>&2 echo "using summed micrograph $SUMMED_FILE..."
fi
if [[ -z $SUMMED_FILE && ( $FORCE -eq 1 || ! -e $SUMMED_CTF_FILE || ! -s $SUMMED_CTF_FILE ) ]]; then
echo " - task: sum"
local start=$(date +%s.%N)
local file=$(basename -- "$SUMMED_CTF_FILE") || exit $?
local tmpfile="/tmp/${file%_ctf.mrc}.mrc"
SUMMED_FILE=$(process_sum "$MICROGRAPH" "$tmpfile" "$GAINREF_FILE") || exit $?
summed_file_log="${SUMMED_FILE%.mrc}.log"
local duration=$( awk '{print $2-$1}' <<< "$start $(date +%s.%N)" )
echo " duration: $duration"
echo " executed_at: " $(date --utc +%FT%TZ -d @$start)
fi
echo " - task: ctf_summed"
local start=$(date +%s.%N)
if [[ $FORCE -eq 1 || ! -e "$SUMMED_CTF_FILE" || ! -s $SUMMED_CTF_FILE ]]; then
local outdir=$(dirname "$SUMMED_CTF_FILE") || exit $?
SUMMED_CTF_FILE=$(process_ctffind "$SUMMED_FILE" "$outdir") || exit $?
if [ $create_summed_file -eq 1 ]; then
#>&2 echo "DELETING $SUMMED_FILE"
rm -f "$SUMMED_FILE"
fi
fi
local duration=$( awk '{print $2-$1}' <<< "$start $(date +%s.%N)" )
echo " duration: $duration"
echo " executed_at: " $(date --utc +%FT%TZ -d @$start)
echo " files:"
dump_file_meta "${SUMMED_CTF_FILE}" || exit $?
if [ "$summed_file_log" != "" ]; then
rm -f $SUMMED_FILE $summed_file_log || exit $?
fi
echo " - task: ctf_summed_data"
local ctf_file=$(ctffind_file $SUMMED_CTF_FILE) || exit $?
echo " source: $ctf_file"
local ctf_data=$(parse_ctffind $SUMMED_CTF_FILE) || exit $?
eval $ctf_data
echo " data:"
for k in "${!ctf[@]}"; do
echo " $k: ${ctf[$k]}"
done
PROCESSED_SUM_RESOLUTION=${ctf[resolution]}
PROCESSED_SUM_RESOLUTION_PERFORMANCE=${ctf[resolution_performance]}
}
do_spa_pick()
{
# use DW file?
>&2 echo
if [ -z $ALIGNED_DW_FILE ]; then
ALIGNED_DW_FILE=$(align_dw_file "$MICROGRAPH") || exit $?
fi
>&2 echo "Processing particle picking for micrograph $ALIGNED_DW_FILE..."
echo " - task: particle_pick"
local start=$(date +%s.%N)
PARTICLE_FILE=$(particle_pick "$ALIGNED_DW_FILE") || exit $?
local duration=$( awk '{print $2-$1}' <<< "$start $(date +%s.%N)" )
echo " duration: $duration"
echo " executed_at: " $(date --utc +%FT%TZ -d @$start)
echo " files:"
dump_file_meta "${PARTICLE_FILE}" || exit $?
echo " data:"
# 11 non-particle lines
local particles=$(wc -l ${PARTICLE_FILE} | awk '{print $1-11}') || exit $?
PROCESSED_NUMBER_PARTICLES=$particles
echo " particles: " $particles
}
function gen_template() {
eval "echo \"$1\""
}
process_gainref()
{
# read in a file and spit out the appropriate gainref to actually use via echo as path
local input=$1
local outdir=${2:-.}
if [[ ${input:0:1} == "/" ]]; then outdir=""; else mkdir -p $outdir; fi
>&2 echo
local filename=$(basename -- "$input")
local extension="${filename##*.}"
local output="$outdir/${filename}"
if [ ! -e "$input" ]; then
>&2 echo "gainref file $input does not exist!"
exit 4
fi
if [[ "$extension" -eq "dm4" ]]; then
output="$outdir/${input%.$extension}.mrc"
# strip all ./'s
if [[ "$output" = ././* ]]; then output="${output:4}"; fi
if [[ "$output" = ./* ]]; then output="${output:2}"; fi
if [[ $FORCE -eq 1 || ! -e $output ]]; then
>&2 echo "converting gainref file $input to $output..."
module load ${IMOD_LOAD} || exit $?
# assume $input is always superres, so scale down if not
dm2mrc "$input" "$output" 1>&2 || exit $?
if [[ "$SUPERRES" == "0" ]]; then
>&2 echo "binning gain ref for non-superres $SUPERRES"
>&2 newstack -bin 2 "$output" "/tmp/${filename%.$extension}.mrc" && mv "/tmp/${filename%.$extension}.mrc" "$output" || exit $?
fi
else
>&2 echo "gainref file $output already exists"
fi
# TODO: this needs testing
elif [[ "$extension" -eq 'mrc' && ! -e $output ]]; then
>&2 echo "Error: output gainref file $output does not exist"
fi
>&2 echo "OUT: $output"
echo $output
}
align_file()
{
local input="$1"
if [ ! -z "${BASENAME}" ]; then
input="${BASENAME}"
fi
local filename=$(basename -- "$input")
local outdir=${2:-aligned/motioncor2/$MOTIONCOR2_VERSION}
local extension="${filename##*.}"
local output="$outdir/${filename%.${extension}}_aligned.mrc"
echo $output
}
align_dw_file()
{
local align=$(align_file "$1")
local output="${align%_aligned.mrc}_aligned_DW.mrc"
echo $output
}
align_stack()
{
# given a set of parameters, run motioncor on the input movie stack
local input=$1
local gainref="$2"
local outdir=${3:-aligned/motioncor2/$MOTIONCOR2_VERSION}
>&2 echo
mkdir -p $outdir
local output=$(align_file $input $outdir) || exit $?
if [ -e $output ]; then
>&2 echo "aligned file $output already exists"
fi
local micrograph=$input
# copy locally if requested
if [[ $LOCAL != "" && -d $LOCAL ]]; then
local this=$(basename -- $micrograph)
>&2 echo "executing: cp -f $input $LOCAL/$this"
>&2 cp -f $input $LOCAL/$this || exit $?
micrograph=$LOCAL/$this
fi
if [[ $FORCE -eq 1 || ! -e $output ]]; then
local extension="${input##*.}"
>&2 echo "aligning $extension stack $micrograph to $output, using gainref file $gainref..."
local gpu=$(($GPU+$GPU_OFFSET))
local cmd="
MotionCor2 \
$(if [ "$extension" == 'mrc' ]; then echo '-InMrc'; else echo '-InTiff'; fi) '$micrograph' \
$(if [ ! '$gainref' == '' ]; then echo -Gain \'$gainref\'; fi) \
-OutMrc $output \
-LogFile ${output%.${extension}}.log \
-FmDose $FMDOSE \
-kV $KV \
-Bft $BFT \
-PixSize $(echo $APIX | awk -v superres=$SUPERRES '{ if( superres=="1" ){ print $1/2 }else{ print $1 } }') \
-FtBin $(if [ $SUPERRES -eq 1 ]; then echo 2; else echo 1; fi) \
-Patch $PATCH \
-Throw $THROW \
-Trunc $TRUNC \
-InitDose $INITDOSE \
-FmRef $FMREF \
-Iter $ITER \
-Tol $TOL \
-OutStack $OUTSTACK \
-InFmMotion $INFMMOTION \
-Gpu $gpu \
-GpuMemUsage 0.95
"
align_command=$(gen_template "$cmd") || exit $?
>&2 echo "executing:" $align_command
module load ${MOTIONCOR2_LOAD} || exit $?
eval $align_command 1>&2 || exit $?
fi
if [[ $LOCAL != "" && -d $LOCAL ]]; then
local this=$(basename -- $micrograph)
>&2 echo "executing: rm -f $LOCAL/$this"
>&2 rm -f $LOCAL/$this || exit $?
fi
echo $output
}
process_ctffind()
{
# do a ctf of the input mrc
local input=$1
local outdir=${2:-.}
>&2 echo
if [ ! -e "$input" ]; then
>&2 echo "input micrograph $input not found"
exit 4
fi
local filename=$(basename -- "$input")
local extension="${filename##*.}"
local output="$outdir/${filename%.${extension}}_ctf.mrc"
mkdir -p $outdir
local apix=${APIX}
if [[ $SUPERRES -eq 1 ]]; then
apix=$(echo $apix | awk '{ print $1/2 }') || exit $?
fi
if [ -e $output ]; then
>&2 echo "output ctf file $output already exists"
fi
if [[ $FORCE -eq 1 || ! -e $output || ! -s $output ]]; then
>&2 echo "ctf'ing micrograph $input to $output..."
module load ${CTFFIND4_LOAD} || exit $?
# phase plate?
if [ $PHASE_PLATE -eq 0 ]; then
ctffind > ${output%.${extension}}.log << __CTFFIND_EOF__
$input
$output
$apix
$KV
$CS
0.1
512
30
4
1000
50000
200
no
no
yes
100
no
no
__CTFFIND_EOF__
else
ctffind > ${output%.${extension}}.log << __CTFFIND_EOF__
$input
$output
$apix
$KV
$CS
0.1
512
30
4
1000
50000
200
no
no
yes
100
yes
0
1.571
0.1
no
__CTFFIND_EOF__
fi
fi
if [ $? -ne 0 ]; then
exit 1
fi
echo $output
}
generate_image()
{
local input=$1
local outdir=${2:-.}
local lowpass=${3:-}
local format=${4:-jpg}
>&2 echo
if [ ! -e $input ]; then
>&2 echo "input micrograph $input not found"
exit
fi
local filename=$(basename -- "$input")
local extension="${filename##*.}"
local output="$outdir/${filename%.${extension}}.${format}"
mkdir -p $outdir
if [ -e $output ]; then
>&2 echo "preview file $output already exists"
fi
if [[ $FORCE -eq 1 || ! -e $output ]]; then
>&2 echo "generating preview of $input to $output..."
module load ${IMOD_LOAD} || exit $?
tmpfile=$input
if [ "$lowpass" != "" ]; then
tmpfile=/tmp/${filename}
echo "clip filter -l $lowpass $input $tmpfile" 1>&2
clip filter -l $lowpass $input $tmpfile 1>&2 || exit $?
fi
local opts=""
if [ "${format}" == "jpg" ]; then
opts="-j"
fi
echo "mrc2tif ${opts} $tmpfile $output" 1>&2
mrc2tif ${opts} $tmpfile $output 1>&2 || exit $?
if [ "$lowpass" != "" ]; then
echo "rm -f $tmpfile" 1>&2
rm -f $tmpfile || exit $?
fi
fi
if [ ! -e $output ]; then
>&2 echo "could not generate preview file $output!"
exit 4
fi
>&2 echo "done"
echo $output
}
process_sum()
{
local input=$1
local output=$2
# TODO: what if no gainref?
local gainref=$3
local outdir=${4:-.} #-$(sum_ctf_file ${MICROGRAPH})}
local filename=$(basename -- "$output")
local extension="${filename##*.}"
local log="${output%.${extension}}.log"
>&2 echo
if [ ! -e $input ]; then
>&2 echo "input micrograph $input not found"
exit
fi
if [ -e $output ]; then
>&2 echo "output micrograph $output already exists"
fi
if [[ $FORCE -eq 1 || ! -e $output ]]; then
>&2 echo "summing stack $input to $output..."
local tmpfile=$(mktemp /tmp/pipeline-sum.XXXXXX) || exit $?
module load ${IMOD_LOAD} || exit $?
>&2 echo "avgstack $input $tmpfile /"
avgstack > $log << __AVGSTACK_EOF__
$input
$tmpfile
/
__AVGSTACK_EOF__
>&2 echo clip mult -n 16 $tmpfile \'$gainref\' \'$output\'
module load ${IMOD_LOAD} || exit $?
clip mult -n 16 $tmpfile "$gainref" "$output" 1>&2 || exit $?
rm -f $tmpfile || exit $?
fi
echo $output
}
particle_file()
{
local input="$1"
local dirname=${2:-particles}
local extension="${input##*.}"
local output="$dirname/${input%.${extension}}_autopick.star"
echo $output
}
particle_pick()
{
local input=$1
local dirname=${2:-particles}
>&2 echo
if [ ! -e $input ]; then
>&2 echo "input micrograph $input not found"
exit 4
fi
local output=$(particle_file "$input") || exit $?
if [ -e $output ]; then
>&2 echo "particle file $output already exists"
fi
if [[ $FORCE -eq 1 || ! -e $output ]]; then
>&2 echo "particle picking from $input to $output..."
>&2 echo module load ${RELION_LOAD}
module load ${RELION_LOAD} || exit $?
local cmd="relion_autopick --i $input --odir $dirname/ --pickname autopick --LoG --LoG_diam_min $PARTICLE_SIZE_MIN --LoG_diam_max $PARTICLE_SIZE_MAX --angpix $APIX --shrink 0 --lowpass 15 --LoG_adjust_threshold -0.1"
>&2 echo $cmd
$cmd 1>&2 || exit $?
fi
echo $output
}
generate_file_meta()
{
local file="$1"
if [ -h "$file" ]; then
file=$(realpath "$file") || exit $?
fi
if [ ! -e "$file" ]; then
>&2 echo "file $file does not exist!"
exit 4
fi
local md5file="$1.md5"
if [ -e "$md5file" ]; then
>&2 echo "md5 checksum file $md5file already exists..."
fi
local md5=""
>&2 echo "calculating checksum and stat for $file..."
if [[ $FORCE -eq 1 || ! -e $md5file ]]; then
md5=$(md5sum "$1" | tee "$md5file" | awk '{print $1}' ) || exit $?
else
md5=$(cat "$md5file" | cut -d ' ' -f 1) || exit $?
fi
local stat=$(stat -c "%s/%y/%w" "$file") || exit $?
local mod=$(date --utc -d "$(echo $stat | cut -d '/' -f 2)" +%FT%TZ) || exit $?
local create=$(echo $stat | cut -d '/' -f 3) || exit $?
if [ "$create" == "-" ]; then create=$mod; fi
local size=$(echo $stat | cut -d '/' -f 1) || exit $?
echo "file=\"$1\" checksum=$md5 size=$size modify_timestamp=$mod create_timestamp=$create"
}
dump_file_meta()
{
if [ ! -e "$1" ]; then
>&2 echo "File '$1' does not exist."
exit 4
fi
echo " - path: $1"
local out=$(generate_file_meta "$1") || exit $?
eval "$out"
echo " checksum: $checksum"
echo " size: $size"
echo " modify_timestamp: $modify_timestamp"
echo " create_timestamp: $create_timestamp"
}
generate_preview()
{
local outdir=${1:-previews}
mkdir -p $outdir
local filename=$(basename -- "$MICROGRAPH") || exit $?
if [ ! -z ${BASENAME} ]; then
filename="${BASENAME}.jpg"
fi
local extension="${filename##*.}"
local output="$outdir/${filename}"
# create a preview of the image
# create the picked preview
#local picked_preview=/tmp/tst.jpg
local picked_preview=$(mktemp /tmp/pipeline-picked-XXXXXXXX.jpg) || exit $?
if [ -e "$picked_preview" ]; then
>&2 echo "particle picked preview file $picked_preview already exists..."
fi
if [ ! -e "$ALIGNED_DW_FILE" ]; then
>&2 echo "aligned file $ALIGNED_DW_FILE not found..."
exit 4
fi
local aligned_jpg=$(generate_image "$ALIGNED_DW_FILE" /tmp 0.05) || exit $?
if [ ! -e "$PARTICLE_FILE" ]; then
>&2 echo "particle file $PARTICLE_FILE not found..."
exit 4
fi
module load ${IMAGEMAGICK_LOAD} || exit $?
#if [ ! -e "$picked_preview" ]; then
local origifs=$IFS
IFS=$'
'
local cmd="convert -flip -negate '$aligned_jpg' -strokewidth 3 -stroke yellow -fill none "
local size=$( echo "$PARTICLE_SIZE * $APIX" | awk '{ print $1 }' ) || exit $?
local i=0
for l in $(cat $PARTICLE_FILE | grep -vE '(_|\#|^ $)' ); do
local shape=$(echo $l | awk -v size=$size '{print "circle " $1 "," $2 "," $1 + size/2 "," $2 }')
#cmd="${cmd} -strokewidth 3 -stroke yellow -fill none -draw \" $shape \" "
cmd="${cmd} -draw \"$shape\" "
i=$((i+1))
#if [ $i -gt 10 ]; then
# break;
#fi
done
cmd="${cmd} $picked_preview"
IFS=$origifs
#>&2 echo $cmd
eval $cmd || exit $?
#fi
PROCESSED_NUMBER_PARTICLES=${PROCESSED_NUMBER_PARTICLES:-$i}
# get a timestamp of when file was created
local timestamp=$(TZ=America/Los_Angeles date +"%Y-%m-%dT%H:%M:%SZ" -r ${MICROGRAPH}) || exit $?
# create the top half
if [ ! -e "$SUMMED_CTF_FILE" ]; then
>&2 echo "summed ctf file $SUMMED_CTF_FILE not found..."
exit 4
fi
SUMMED_CTF_PREVIEW=$(generate_image "${SUMMED_CTF_FILE}" "/tmp" "" "tif" ) || exit $?
local top=$(mktemp /tmp/pipeline-top-XXXXXXXX.jpg)
local res="$(printf '%.1f' ${PROCESSED_SUM_RESOLUTION:-0.0})Å ($(echo ${PROCESSED_SUM_RESOLUTION_PERFORMANCE:-0.0} | awk '{printf( "%2.0f", $1*100 )}')%)" || exit $?
#>&2 echo "ctf res $res"
convert \
-resize '512x512^' -extent '512x512' $picked_preview \
-flip ${SUMMED_CTF_PREVIEW} \
+append -font DejaVu-Sans -pointsize 24 -fill SeaGreen1 -stroke SeaGreen3 -strokewidth 1 -draw "text 13,502 \"~$PROCESSED_NUMBER_PARTICLES pp\"" \
+append -font DejaVu-Sans -pointsize 24 -fill yellow -stroke orange -strokewidth 1 -draw "text 529,502 \"${timestamp}\"" \
+append -font DejaVu-Sans -pointsize 24 -fill yellow -stroke orange -strokewidth 1 -draw "text 864,502 \"$res\"" \
$top \
|| exit $?
rm -f $picked_preview $SUMMED_CTF_PREVIEW || exit $?
# create the bottom half
if [ ! -e "$ALIGNED_CTF_FILE" ]; then
>&2 echo "aligned ctf file $ALIGNED_CTF_FILE not found..."
exit 4
fi
ALIGNED_CTF_PREVIEW=$(generate_image "${ALIGNED_CTF_FILE}" "/tmp" "" "tif") || exit $?
local bottom=$(mktemp /tmp/pipeline-bottom-XXXXXXXX.jpg) || exit $?
local res="$(printf '%.1f' ${PROCESSED_ALIGN_RESOLUTION:-0.0})Å ($(echo ${PROCESSED_ALIGN_RESOLUTION_PERFORMANCE:-0.0} | awk '{printf( "%2.0f", $1*100)}')%)"
local ctf="cs $(printf '%.2f' ${PROCESSED_ALIGN_ASTIGMATISM:-0.0}) cc $(printf '%.2f' ${PROCESSED_ALIGN_CROSS_CORRELATION:-0.0})"
local drift="$(printf "%.2f" ${PROCESSED_ALIGN_FIRST1:-0.0}) "/" $(printf "%.2f" ${PROCESSED_ALIGN_FIRST5:-0.0}) "/" $(printf "%.2f" ${PROCESSED_ALIGN_ALL:-0.0})"
# >&2 echo "RES: $res DRIFT: $drift"
convert \
-resize '512x512^' -extent '512x512' \
$aligned_jpg \
${ALIGNED_CTF_PREVIEW} \
+append -font DejaVu-Sans -pointsize 24 -fill orange -stroke orange2 -strokewidth 1 -draw "text 274,30 \"$drift\"" \
+append -font DejaVu-Sans -pointsize 24 -fill orange -stroke orange2 -strokewidth 1 -draw "text 529,30 \"$ctf\"" \
+append -font DejaVu-Sans -pointsize 24 -fill orange -stroke orange2 -strokewidth 1 -draw "text 864,30 \"$res\"" \
$bottom \
|| exit $?
# clean files
rm -f $aligned_jpg $ALIGNED_CTF_PREVIEW || exit $?
convert $top $bottom \
-append $output \
|| exit $?
rm -f $top $bottom || exit $?
if [ ! -e $output ]; then
exit 4
fi
echo $output
}
ctffind_file()
{
local input=$1
local filename=$(basename -- "$input")
local extension="${filename##*.}"
local datafile="${input%.${extension}}.txt"
echo $datafile
}
parse_ctffind()
{
local input=$1
local datafile=$(ctffind_file "$input") || exit $?
if [ ! -e $datafile ]; then
>&2 echo "ctf data file $datafile does not exist"
exit 4
fi
cat $datafile | awk '
/# Pixel size: / { apix=$4; next } \
!/# / { defocus_1=$2; defocus_2=$3; astig=$4; phase_shift=$5; cross_correlation=$6; resolution=$7; next } \
END { \
if (resolution=="inf") {
print "declare -A ctf; ctf[apix]="apix " ctf[defocus_1]="defocus_1 " ctf[defocus_2]="defocus_2 " ctf[astigmatism]="astig " ctf[phase_shift]="phase_shift " ctf[cross_correlation]="cross_correlation;
} else {
resolution_performance= 2 * apix / resolution;
print "declare -A ctf; ctf[apix]="apix " ctf[defocus_1]="defocus_1 " ctf[defocus_2]="defocus_2 " ctf[astigmatism]="astig " ctf[phase_shift]="phase_shift " ctf[cross_correlation]="cross_correlation " ctf[resolution]="resolution " ctf[resolution_performance]="resolution_performance;
}
}'
}
motioncor_file()
{
local input=$1
local extension="${input##*.}"
local basename=$(basename -- "$input")
# sometimes it has the extention?!
local datafile="${input}.log0-Patch-Full.log"
if [[ $basename == FoilHole_* ]]; then
datafile="${input%.${extension}}.log0-Patch-Full.log"
if [ ! -e $datafile ]; then
datafile="${input%.${extension}}.mrc.log0-Patch-Full.log"
fi
elif [[ "$extension" == "mrc" && ! -e "$datafile" ]]; then
datafile="${input%.${extension}}.log0-Patch-Full.log"
fi
echo $datafile
}
parse_motioncor()
{
local input=$1
local datafile=$(motioncor_file "$input")
if [ ! -e $datafile ]; then
>&2 echo "motioncor2 data file $datafile does not exist"
exit 4
fi
cat $datafile | grep -vE '^$' | awk '
!/# / {
if( $1 > 1 ){
x=$2; y=$3;
dx=lastx-x; dy=lasty-y;
n=sqrt((dx*dx)+(dy*dy));
drifts[$1-1]=n;
} lastx=$2; lasty=$3; next; }
END {
for (i = 1; i <= length(drifts); ++i) {
if( i <= 3 ){ first3 += drifts[i] }
if( i <= 5 ){ first5 += drifts[i] }
if( i <= 8 ){ first8 += drifts[i] }
all += drifts[i]
}
f1 = sprintf("%.4f", drifts[1]);
f3 = sprintf("%.5f", first3/3);
f5 = sprintf("%.4f", first5/5);
f8 = sprintf("%.4f", first8/8);
a = sprintf("%.4f", all/length(drifts));
print "declare -A align; align[first1]="f1 " align[first3]="f3 " align[first5]="f5 " align[first8]="f8 " align[all]="a " align[frames]="length(drifts)+1;
}'
# print " - "lastx "-"x" ("dx*dx")\t" lasty "-"y" ("dy*dy"):\t" n;
}
create_motioncor_star()
{
local input=$1
local datafile=$(motioncor_file "$input")
if [ ! -e $datafile ]; then
>&2 echo "motioncor2 data file $datafile does not exist"
exit 4
fi
local output="${datafile%.log0-Patch-Full.log}.star"
>&2 echo "creating alignment star file $output"
local binning=1
if [ $SUPERRES -eq 1 ]; then
binning=2
fi
module load ${IMOD_LOAD}
local info=$(header ${MICROGRAPH} | grep 'Number of columns,')
local x=$(echo $info | awk '{print $7}')
local y=$(echo $info | awk '{print $8}')
local z=$(echo $info | awk '{print $9}')
cat <<EOF > $output
data_general
_rlnImageSizeX ${x}
_rlnImageSizeY ${y}
_rlnImageSizeZ ${z}
_rlnMicrographMovieName ${MICROGRAPH}
_rlnMicrographGainName ${GAINREF_FILE}
_rlnMicrographBinning ${binning}
_rlnMicrographOriginalPixelSize ${APIX}
_rlnMicrographDoseRate ${FMDOSE}
_rlnMicrographPreExposure ${INITDOSE}
_rlnVoltage ${KV}
_rlnMicrographStartFrame 1
_rlnMotionModelVersion 0
data_global_shift
loop_
_rlnMicrographFrameNumber #1
_rlnMicrographShiftX #2
_rlnMicrographShiftY #3
EOF
cat $datafile | tail -n +4 >> $output
echo >> $output
#>&2 cat $output
echo $output
}
set -e
main "$@"
| true |
34df817ad7c08d8f9894c0ed2e71cfb83bdb444f | Shell | atpathak/ATLAS | /Physics_Analysis/LFV_area/LFVlephad/share/script/git_all.sh | UTF-8 | 284 | 3.1875 | 3 | [] | no_license | #/bin/sh
(
n=$(ls Htt2016* 2>/dev/null | wc -l)
while [ $n == 0 ]
do
cd ..
n=$(ls Htt2016* 2>/dev/null | wc -l)
if [ $(pwd) == "/" ]; then echo "Cannot find Htt2016*"; exit 1 ;fi
done
for i in Htt2016*
do
echo "--- In $i"
( cd $i && git $* )
echo "---"
done
)
| true |
225b88e3c2d7f1428202eb499862df3050a1f50e | Shell | taherbohari/k8s-test | /beginner/deploy-ingress-controller | UTF-8 | 349 | 3 | 3 | [] | no_license | #!/bin/bash -e
INGRESS_NAME=${INGRESS_NAME:?"Please provide ingress controller name"}
echo -e "\n## Deploying Nginx Ingress Controller"
echo "# Add nginx helm repository"
helm repo add nginx-stable https://helm.nginx.com/stable
helm repo update
echo "# Installing nginx ingress controller"
helm install --name ${INGRESS_NAME} stable/nginx-ingress
| true |
f05e3c143f5a0db1b23e32841783677db6889075 | Shell | SaitoYoshiko/fibonacci | /linux-study/myfirst-shell/quiz.sh | UTF-8 | 265 | 3.125 | 3 | [] | no_license | !#/bin/bash
read -p "日本で二番目に高い山は槍ヶ岳でしょうか?[y/n]" answer
if [ $answer = "n" ]; then
echo "正解です! 日本で二番目に高い山は北岳です"
else
echo "残念! 日本で二番目に高い山は北岳です"
fi
| true |
2ca866f7f00014408531ad233d55d073bf0744e5 | Shell | JeffersonLab/alarms-filter | /scripts/list-filters.sh | UTF-8 | 812 | 2.890625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
[ -z "$BOOTSTRAP_SERVERS" ] && echo "BOOTSTRAP_SERVERS environment required" && exit 1;
CWD=$(readlink -f "$(dirname "$0")")
APP_HOME=$CWD/..
FILTER_JAR=`ls $APP_HOME/lib/alarms-filter*`
CLIENTS_JAR=`ls $APP_HOME/lib/kafka-clients-*`
JACK_CORE=`ls $APP_HOME/lib/jackson-core-*`
JACK_BIND=`ls $APP_HOME/lib/jackson-databind-*`
JACK_ANN=`ls $APP_HOME/lib/jackson-annotations-*`
SLF4J_API=`ls $APP_HOME/lib/slf4j-api-*`
SLF4J_IMP=`ls $APP_HOME/lib/slf4j-log4j*`
LOG4J_IMP=`ls $APP_HOME/lib/log4j-*`
RUN_CP="$FILTER_JAR:$CLIENTS_JAR:$SLF4J_API:$SLF4J_IMP:$LOG4J_IMP:$LOG4J_CONF:$JACK_CORE:$JACK_BIND:$JACK_ANN"
java -Dlog.dir=$APP_HOME/logs -Dlog4j.configuration="file://$APP_HOME/config/log4j-client.properties" -cp $RUN_CP org.jlab.alarms.client.CommandConsumer $BOOTSTRAP_SERVERS filter-commands | true |
4b28f058e019aae857199afeb5e17487932c5ef5 | Shell | JosephBrendler/joetoo | /dev-util/joetoolkit/files/utility_archive/00_crypt_storage.start | UTF-8 | 1,473 | 3.484375 | 3 | [
"MIT"
] | permissive | #! /bin/bash
source /usr/local/sbin/script_header_brendlefly
VERBOSE=$TRUE
verbosity=3
# (To Do) check and (if necessary) start lagging encrypted storage
for x in $(blkid | grep -i luks | cut -d: -f1)
do
device="$x"
name=$(echo $device | sed 's:/dev/s:e:')
message_n "For device [${BYon}${device}${Boff}], crypt-name [${LBon}${name}${Boff}] is: " 1
status=$(cryptsetup status $name | grep active | awk '{print $3}')
echo -e "${BWon}${status}${Boff}"
if [[ ! "$status" == "active" ]]
then
d_message "About to mount thumbdrive and run command ${BYon}\"cryptsetup -d /mnt/thumb/crypt/dat luksOpen $device $name\"${Boff}" 1
mount /mnt/thumb ## need to have this in fstab, with no-auto
cryptsetup -d /mnt/thumb/crypt/dat luksOpen $device $name
vgscan
vgchange -ay
# mount encrypted storage (should have beebn activated by /etc/conf.d/dmcrypt) using entry in /etc/fstab
d_message "About to run command ${BYon}\"mount -a\"${Boff}"
mount -a
umount /mnt/thumb
/usr/bin/logger -p cron.notice "$0 found $device, $name not started, and attempted to correct"
else
/usr/bin/logger -p cron.notice "$0 found $device, $name already started"
fi
done
d_message "------------------[ Here is what's up ]-------------------"
echo ">>> running pvs <<<"
pvs && echo
echo ">>> running vgs <<<"
vgs && echo
echo ">>> running lvs <<<"
lvs && echo
echo ">>> running \"mount | grep /dev/mapper\" <<<"
mount | grep /dev/mapper && echo
| true |
bfac36d74fa6e9465c45cb2d2d9d735965d7a15e | Shell | gentoo/genkernel | /defaults/unlock-zfs.sh | UTF-8 | 2,119 | 3.59375 | 4 | [] | no_license | #!/bin/sh
. /etc/initrd.defaults
. /etc/initrd.scripts
GK_INIT_LOG_PREFIX=${0}
if [ -n "${SSH_CLIENT_IP}" ] && [ -n "${SSH_CLIENT_PORT}" ]
then
GK_INIT_LOG_PREFIX="${0}[${SSH_CLIENT_IP}:${SSH_CLIENT_PORT}]"
fi
if [ -f "${ZFS_ENC_ENV_FILE}" ]
then
. "${ZFS_ENC_ENV_FILE}"
else
bad_msg "${ZFS_ENC_ENV_FILE} does not exist! Did you boot without 'dozfs' kernel command-line parameter?"
exit 1
fi
main() {
if ! hash zfs >/dev/null 2>&1
then
bad_msg "zfs program is missing. Was initramfs built without --zfs parameter?"
exit 1
elif ! hash zpool >/dev/null 2>&1
then
bad_msg "zpool program is missing. Was initramfs built without --zfs parameter?"
exit 1
elif [ -z "${ROOTFSTYPE}" ]
then
bad_msg "Something went wrong. ROOTFSTYPE is not set!"
exit 1
elif [ "${ROOTFSTYPE}" != "zfs" ]
then
bad_msg "ROOTFSTYPE of 'zfs' required but '${ROOTFSTYPE}' detected!"
exit 1
elif [ -z "${REAL_ROOT}" ]
then
bad_msg "Something went wrong. REAL_ROOT is not set!"
exit 1
fi
if [ "$(zpool list -H -o feature@encryption "${REAL_ROOT%%/*}" 2>/dev/null)" != 'active' ]
then
bad_msg "Root device ${REAL_ROOT} is not encrypted!"
exit 1
fi
local ZFS_ENCRYPTIONROOT="$(get_zfs_property "${REAL_ROOT}" encryptionroot)"
if [ "${ZFS_ENCRYPTIONROOT}" = '-' ]
then
bad_msg "Failed to determine encryptionroot for ${REAL_ROOT}!"
exit 1
fi
local ZFS_KEYSTATUS=
while true
do
if [ -e "${ZFS_ENC_OPENED_LOCKFILE}" ]
then
good_msg "${REAL_ROOT} device meanwhile was opened by someone else."
break
fi
zfs load-key "${ZFS_ENCRYPTIONROOT}"
ZFS_KEYSTATUS="$(get_zfs_property "${REAL_ROOT}" keystatus)"
if [ "${ZFS_KEYSTATUS}" = 'available' ]
then
run touch "${ZFS_ENC_OPENED_LOCKFILE}"
good_msg "ZFS device ${REAL_ROOT} opened"
break
else
bad_msg "Failed to open ZFS device ${REAL_ROOT}"
# We need to stop here with a non-zero exit code to prevent
# a loop when invalid keyfile was sent.
exit 1
fi
done
if [ "${ZFS_KEYSTATUS}" = 'available' ]
then
# Kill any running load-key prompt.
run pkill -f "load-key" >/dev/null 2>&1
fi
}
main
exit 0
| true |
7a073e2aac8400eb22fc584ea99e1e7f996905c1 | Shell | argoproj/argo-cd | /test/remote/entrypoint.sh | UTF-8 | 274 | 3.046875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
if test "$(id -u)" == "0" -a "${USER_ID}" != ""; then
useradd -u ${USER_ID} -d /home/user -s /bin/bash ${USER_NAME:-default}
chown -R "${USER_NAME:-default}" ${GOCACHE}
fi
export PATH=$PATH:/usr/local/go/bin:/go/bin
export GOROOT=/usr/local/go
"$@" | true |
5e038f60f08c55d48904cfcb3c398962d567cc78 | Shell | gphalkes/t3window | /testsuite/showtestdiff.sh | UTF-8 | 210 | 2.96875 | 3 | [] | no_license | #!/bin/bash
DIR="`dirname \"$0\"`"
. "$DIR"/_common.sh
if [ $# -eq 0 ] ; then
fail "Usage: showtestdiff.sh <test> [<subtest>]"
fi
setup_TEST "$1"
dwdiff -Pc -C3 $TEST/recording $TEST/recording.new
exit 0
| true |
0e88f3f5c0b5f9e1f6c6288111dc9d845024240f | Shell | novatakm/tool | /util/calc_datesubstr.sh | UTF-8 | 348 | 3.984375 | 4 | [] | no_license | #!/usr/bin/env bash
# function calc_date() {
# expr \( `date -d"$1" +%s` - `date -d"$2" +%s` \) / 86400
# }
function calc_date() {
expr \( $( date +%s --date "$1" ) - $( date +%s --date "$2" ) \) / 86400
}
function usage() {
echo "Usage: $(basename $0) datestr datestr"
exit 1
}
if [ $# -ne 2 ]; then
usage
fi
calc_date $1 $2 | true |
6da2ebde1bd7225fc742200edf5f3a0b6eaca1fc | Shell | jatin-reglobe/material2 | /scripts/publish-npm.sh | UTF-8 | 679 | 4.1875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Script to publish the build artifacts to a npm repository.
# Builds will be automatically published once new changes are made to the repository.
set -e -o pipefail
# Go to the project root directory
cd $(dirname $0)/..
# Create a release of the current module.
. $(dirname $0)/build.sh
echo "Start creating release of the current module"
# check for successful building
if [ $? -eq 0 ]; then
echo "Creating release of the current module success"
else
echo "Error is not recoverable: exiting now"
exit
fi
buildDir="dist/releases"
echo "npm publish ${buildDir} ${1} ${2}"
npm publish ${buildDir} $1 $2
echo "Finished publishing build artifacts to npm"
| true |
29904989c5f8d4716710ba5c8a67d20f108a87a7 | Shell | davidherczeg/TourLoop | /deploy.sh | UTF-8 | 961 | 3.375 | 3 | [] | no_license | #!/bin/bash
# get production version of front end (if not uploaded already)
cd client
npm install && npm run build
cd ../
# skip this step by pre-downloading edmonton-OSM-data.xml into raw-data from:
# https://drive.google.com/drive/u/0/folders/1csxY4bgFG6Vt3tK8NuExde1NCZFQ3Dan
# get the original datafile
# if not already there
if [ ! -f ./raw-data/edmonton-OSM-data.xml ]; then
cd raw-data
echo "Database file not found... downloading from overpass API"
./download.sh
cd ../
fi
# rebuild the database (prepare for it at least)
echo "generating db files, this may take a while..."
cd database
pip3 install neo4j polyline vincenty || true
./rebuild-database-files.sh
cd ../
# docker up the services
echo "Starting db and backend containers"
docker-compose up -d --build
# auto-add the neo4j indexes
echo "Adding neo4j indexes for improved performance"
sleep 30s
cd database
python3 index_creator.py
# if that failed, wait a bit then retry
| true |
77e851d834c9fb8afdd052e2310ffeef179ed324 | Shell | nadz-goldman/freeton-rustnode-ansible | /roles/monitoring_agent/files/scripts/ton-election-date.sh | UTF-8 | 898 | 3.390625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash -eE
# export ton environments
. ton-env.sh
ton-check-env.sh TON_CLI
ton-check-env.sh TON_CLI_CONFIG
get_election_date ()
{
ELECTIONS_START=$($TON_CLI -c $TON_CLI_CONFIG runget $ELECTOR_ADDR active_election_id | grep 'Result:' | sed 's/Result:[[:space:]]*//g' | tr -d \"[])
}
# rustcup have unique elector
get_election_date_rustcup ()
{
ELECTION_RESULT=`$TON_CLI -c $TON_CLI_CONFIG run $ELECTOR_ADDR active_election_id {} --abi $TON_CONTRACT_ELECTOR_ABI`
ELECTIONS_START=$(echo $ELECTION_RESULT | awk -F'Result: ' '{print $2}' | jq -r '.value0' )
}
# get elector address
ELECTOR_ADDR="-1:$($TON_CLI -c $TON_CLI_CONFIG getconfig 1 | grep 'p1:' | sed 's/Config p1:[[:space:]]*//g' | tr -d \")"
if [ $TON_IS_RUSTNET -eq 1 ]; then
get_election_date_rustcup
else
get_election_date
fi
if [ -z $ELECTIONS_START ]; then
echo "-1";
exit
fi
echo "$ELECTIONS_START" | true |
6ed568ed99e10ac2c8fbb85517dd1c8fbe5cac30 | Shell | Favorite-Icons-Of-Internet/Favorite-Icons-Of-Internet | /steps_1_and_2.sh | UTF-8 | 438 | 2.59375 | 3 | [] | no_license | #!/bin/bash
# step 1 - import latest alexa rankings
curl -s http://s3.amazonaws.com/alexa-static/top-1m.csv.zip | funzip > top-1m.csv
php step1.php < top-1m.csv
# step 2 - get all currently ranked domains and send jobs to the queue
rm -rf /tmp/.step2/
mkdir -p /tmp/.step2/
php step2.php | split -a 5 -l 1000 - /tmp/.step2/job_
for i in $( ls /tmp/.step2/); do
enqueue FaviconPipelineDomains </tmp/.step2/$i
done
rm -rf /tmp/.step2/
| true |
290fbe784be13bdc774cccda1b9928b1736bcebf | Shell | nzvincent/nzvincent-github | /docker/migrate-docker-images/migrate-images.sh | UTF-8 | 1,278 | 3.625 | 4 | [] | no_license | #!/bin/bash
# This script create docker container / images tar files to be used to load into another docker host.
# Author: nzvincent@gmail.com | Vincent Pang
IMG=images.txt
CON=containers.txt
PS=ps-out.txt
LOG=log-migration.log
LOAD=load-script.txt
echo "" > ${LOAD}
docker images | tee ${IMG}
docker ps -as | tee ${PS}
docker ps --format "{{.ID}},{{.Image}},{{.Names}}" | tee ${CON}
echo "" >> ${LOG}
echo "`date` - Migration start " >> ${LOG}
cat ${IMG} >> ${LOG}
cat ${CON} >> ${LOG}
echo "" | tee -a ${LOG}
echo "Saving container into tar files..." | tee -a ${LOG}
while read con; do
cid=`echo ${con} | awk -F, '{print $1}'`
cimg=`echo ${con} | awk -F, '{print $2}'`
cname=`echo ${con} | awk -F, '{print $3}'`
# Convert name to lower case and migration image name and commit to images
mname="migrate-`echo ${cname}|tr '[:upper:]' '[:lower:]'`:29dec2017"
echo ${mname}
#docker commit ${cname} ${mname}
# Save images to tar files
tarname="${mname}.tar.gz"
echo "Saving image file to ${tarname}" | tee -a ${LOG}
docker save ${mname} > ${tarname}
# Generate load images
echo "docker load < ${tarname}" | tee -a ${LOAD}
echo "" | tee -a ${LOG}
done < ${CON}
echo "" | tee -a ${LOG}
cat ${LOAD} >> ${LOG}
echo "" | tee -a ${LOG}
docker images | tee -a ${LOG}
| true |
622904b5812b0d9ad228178996a3baf2f53f6c06 | Shell | liodopolus/scriptcol-current | /archiv/archiv-alpha/lftp_kde-4.sc | UTF-8 | 260 | 2.84375 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/sh
# getting KDE4-Source
VERSION=4.0.72
WDIR=kde-$VERSION
mkdir -p $WDIR
cd $WDIR
cat > scriptfile << !
lftp ftp://ftp.fu-berlin.de/pub/unix/X11/gui/kde/unstable \\
lftp -c mirror -n $VERSION .
!
lftp -f scriptfile
wait
rm scriptfile
echo "finished"
| true |
32359d6c3174f0ac42e3399304946dc448ba319b | Shell | electroducer/acholi4kaldi | /local/make_flists_swahili.sh | UTF-8 | 3,790 | 3.890625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash -u
# Borrowed heavily from gp script of same name
set -o errexit
set -o pipefail
function read_dirname () {
local dir_name=`expr "X$1" : '[^=]*=\(.*\)'`;
[ -d "$dir_name" ] || { echo "Argument '$dir_name' not a directory" >&2; \
exit 1; }
local retval=`cd $dir_name 2>/dev/null && pwd || exit 1`
echo $retval
}
PROG=`basename $0`;
usage="Usage: $PROG <arguments> <acholi|luganda|uenglish>\n
Prepare train, dev, eval file lists for a language.\n\n
Required arguments:\n
--list-dir=DIR\t\tPlace where speaker lists are stored\n
--work-dir=DIR\t\tPlace to write the files (in a subdirectory within lang)\n
";
if [ $# -lt 3 ]; then
echo -e $usage; exit 1;
fi
while [ $# -gt 0 ];
do
case "$1" in
--help) echo -e $usage; exit 0 ;;
--work-dir=*)
WDIR=`read_dirname $1`; shift ;;
--list-dir=*)
LIST_PATH=`read_dirname $1`; shift ;;
*) L=$1; shift ;;
esac
done
[ -f path.sh ] && . path.sh # Sets the PATH to contain necessary executables
tmpdir=$(mktemp -d /tmp/kaldi.XXXX);
trap 'rm -rf "$tmpdir"' EXIT
# Directory to write file lists & transcripts
ODIR=$WDIR/$L/local/data
# ODIR_TR=${ODIR}_trimmed
mkdir -p $ODIR
# mkdir -p $ODIR_TR
echo "Saving files to $ODIR"
# echo "Saving trimmed files to $ODIR_TR"
echo "Getting lists from $LIST_PATH"
echo "Creating files for all speakers..."
# Get all unique speakers from the speaker list
# in order to divide them up into sets
# cut -d" " -f2 $DATA_PATH/$L/speakers/all_spk.txt | sed -e 's/[^0-9]$//' \
# | sort | uniq > $tmpdir/uniq_spk
#
# # Get the recording numbers from the original mp3 files
# ls $DATA_PATH/$L/mp3 | sed -e "s/.mp3$//" \
# > $tmpdir/all_recs
# Get the list of transcript files
trans=$tmpdir/trans.list
ls -1 $DATA_PATH/$L/trl/*.trl > $trans
# Just need to clean up the transcripts
local/clean_trans_swahili.sh $tmpdir/${L}.trans
# Sanity check: select only transcript files that have recordings
# and transcripts that are not blank
# then combine them into a single file
# sed 's/$/./' $tmpdir/all_recs > $tmpdir/all_recs_dot
# sed '/^[0-9_]\+\s\+$/d' $(grep -f $tmpdir/all_recs_dot $trans) \
# > $tmpdir/${L}.trans
# Any blank lines (ID only) would be deleted here
# Clean the transcriptions (see cleaning script for details)
# clean_trans_acholi.pl $tmpdir/${L}.trans \
# > $tmpdir/${L}_clean.trans
# Some changes for acholi
# After cleaning, there still might blanks and junk
# awk '$0=NF' $tmpdir/${L}_clean.trans > $tmpdir/nwords
# paste $tmpdir/nwords $tmpdir/${L}_clean.trans \
# | egrep "^2[^0-9]" | egrep "(junk|<fil>)" \
# | cut -f2 | cut -d" " -f1 > $tmpdir/junk_ids
# Make a list of all wav files (for each utt)
ls $DATA_PATH/$L/wav/*.wav > $ODIR/${L}_wav.flist
# Make the master scp file for all wavs
sed -e "s?.*/??" -e 's?.wav$??' $ODIR/${L}_wav.flist \
> $tmpdir/basenames_wav
paste $tmpdir/basenames_wav $ODIR/${L}_wav.flist | sort -k1,1 \
> $tmpdir/${L}_wav.scp
# We're going to make a copy of the IDs for now
cut -f1 $tmpdir/${L}_wav.scp > $tmpdir/basenames_wav2
# Now for the hard part. Find the intersection of speaker data,
# transcription data, and wav data
# This gets the intersection of wavs and transcripts
cut -d" " -f1 $tmpdir/${L}.trans | awk '{print "SA" $0}' | sort \
| join $tmpdir/basenames_wav2 - > $tmpdir/basenames
# Keep only the wavs in the final list
join $tmpdir/basenames $tmpdir/${L}_wav.scp \
> $ODIR/wav.scp
# Keep only the transcripts in the final list
cat $tmpdir/${L}.trans | awk '{print "SA" $0}' | sort \
| join $tmpdir/basenames - \
> $ODIR/text
# Keep only the speaker ids in the final list
sed 's/_.*//' $tmpdir/basenames | paste $tmpdir/basenames - \
> $ODIR/utt2spk
# And the speakers to utterances
utt2spk_to_spk2utt.pl $ODIR/utt2spk \
> $ODIR/spk2utt || exit 1;
echo "Done."
| true |
2e27356ed53f34c6b2366d4eef6aca81b1df6c7e | Shell | WSULib/combine-vm | /install_scripts/elasticsearch.sh | UTF-8 | 1,357 | 3.890625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
echo "---- Installing Elasticsearch ------------------------------------------------"
#### GET ENVARS #################################################
SHARED_DIR=$1
if [ -f "$SHARED_DIR/config/envvars" ]; then
. $SHARED_DIR/config/envvars
printf "found your local envvars file. Using it."
else
. $SHARED_DIR/config/envvars.default
printf "found your template file. Using its default values."
fi
#################################################################
cd /tmp/
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.5.2.deb
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.5.2.deb.sha1
generated=$(sha1sum elasticsearch-5.5.2.deb | awk '{print $1}')
provided=`cat elasticsearch-5.5.2.deb.sha1`
if [ "$generated" == "$provided" ]; then
printf "SHA matches"
else
printf "SHA comparison doesn't match. Check to see if you're downloading the correct Elasticsearch"
exit 1
fi
# Install Elasticsearch
dpkg -i elasticsearch-5.5.2.deb
cp $SHARED_DIR/config/files/elasticsearch.yml /etc/elasticsearch/elasticsearch.yml
cp $SHARED_DIR/config/files/jvm.options /etc/elasticsearch/jvm.options
# Enable Elasticsearch upon boot
/bin/systemctl daemon-reload
/bin/systemctl enable elasticsearch.service
# Start up elasticsearch
service elasticsearch stop
service elasticsearch start
| true |
6f61bea7d3b5a95bf231be69775d4fd190c086a0 | Shell | chendo/oh-my-zsh | /modules/command-not-found/init.zsh | UTF-8 | 184 | 2.609375 | 3 | [
"MIT"
] | permissive | #
# Displays installation information for not found commands.
#
# Authors:
# Joseph Jon Booker <joe@neoturbine.net>
#
if [[ -f /etc/zsh_command_not_found ]]; then
source /etc/zsh_command_not_found
fi
| true |
2a09b31182ef9431ef918e6b207b5424283966b2 | Shell | benjamin-asdf/dotfiles-legacy | /home/.local/bin/team-cmd-scripts/install-team-cmd-scripts.sh | UTF-8 | 593 | 3.6875 | 4 | [] | no_license | #!/bin/sh
# run this ones to add this folder to your path.
# it will make it so you can run the scripts from anywhere
pwd | grep -q '\/TeamCommands\/scripts$' || (echo "Enter TeamCommand/scripts folder first." && exit 1)
printf "\nexport COSDIR=\"C:/ClashOfStreamers\"" >> "$HOME/.bashrc"
printf "\nPATH=\"\$PATH:%s\"\nexport PATH" "$(pwd)" >> "$HOME/.bashrc" && . "$HOME/.bashrc"
printf "Added %s to your path in %s. You might want to adjust this file.\n" "$(pwd)" "$HOME/.bashrc"
[ -d "$COSDIR" ] || printf "%s is not a directory. This looks like a wrong setup. Fix the path." "$COSDIR"
| true |
887692abc160f40c390db9e1f075b86382defc85 | Shell | Sanaqamar123/ProgramConstruct | /Array/arrayPrimeFactor.sh | UTF-8 | 259 | 3.859375 | 4 | [] | no_license | #!/bin/bash
declare -a value
value=()
echo "Enter number : "
read num
for (( i=2; i <= $num; i++ ))
do
if [ $(($num%$i)) -eq 0 ]
then
echo "Prime Factor of a $num is "$i
value+="$i "
fi
done
echo "Prime Factor of a $num in array is : "${value[@]}
| true |
c669eb52ef81070c0862f621f09d4d3f3a5564ae | Shell | quynhlab/KodeKloud_Tasks | /Tasks_21-30/TASK_26-Linux_Nginx_as_Reverse_Proxy.sh | UTF-8 | 73,471 | 3.609375 | 4 | [] | no_license |
-----------------------------------------------------------------------------------------------------------------
Start 2020-07-29 || 11:22 PM
Finished 2020-07-30 || 03:18 AM
-----------------------------------------------------------------------------------------------------------------
TASK 26 - Linux Nginx as Reverse Proxy
REQUIREMENTS:
Nautilus system admin team is planning to deploy a front end application for their backup utility on Nautilus Backup Server, so that they can manage the backups of different websites from a graphical user interface. They have shared requirements to set up the same; please accomplish the tasks as per detail given below:
a. Install Apache Server on Nautilus Backup Server and configure it to use 5001 port (do not bind it to 127.0.0.1 only, keep it default i.e let Apache listen on server IP, hostname, localhost, 127.0.0.1 etc).
b. Install Nginx webserver on Nautilus Backup Server and configure it to use 8099.
c. Configure Nginx as a reverse proxy server for Apache.
d. There is a sample index file /home/index.html on Jump Host, copy that file to Apache document root.
e. Make sure to start Apache and Nginx services.
f. You can test final changes using curl command, e.g curl http://<backup server IP or Hostname>:8099.
-----------------------------------------------------------------------------------------------------------------
CONTENT:
I. REFERENCES
II. STEPS - SUMMARY
III. COMMANDS USED
IV. STEPS - BREAKDOWN
-----------------------------------------------------------------------------------------------------------------
I. REFERENCES
https://community.kodekloud.com/t/linux-nginx-as-reverse-proxy/2089
https://community.kodekloud.com/t/linux-nginx-as-reverse-proxy-failed/2140
https://community.kodekloud.com/t/linux-nginx-as-reverse-proxy-task/2192/3
https://community.kodekloud.com/t/linux-nginx-reverse-proxy/2510
-----------------------------------------------------------------------------------------------------------------
II. STEPS - SUMMARY
# There's actually a lot of steps involve in this lab.
# An entirely new set of tasks - defiitely a draining one.
1. Connect via SSH to the backup server.
2. Install the httpd and nginx and configure each to listen to the required ports.
3. Enable and start nginx service. Verify that httpd and nginx is active.
4. Return to the jumphost, then copy the index.html from jumphost to the Backup server.
5. Verify that everything is working as they should be -- use curl.
-----------------------------------------------------------------------------------------------------------------
III. COMMANDS USED
# 1. Connect via SSH to the backup server.
sshpass -p '******' ssh -o StrictHostKeyChecking=no clint@172.16.238.16
# 2. Install the httpd and nginx and configure each to listen to the required ports. Restart httpd.
sudo yum install httpd -y
# As suggested in the various articles in KKC, the listening port an be updated
# in the http.conf file -- located in /etc/httpd/conf
# As always, I first check and verify that file is in the specified directory.
ls -l /etc/httpd/conf
cd /etc/httpd/conf
cat httpd.conf
# once verified, update the file to use 5001 as port
sudo vi httpd.conf
Listen 5001
# Restart httpd
# sudo systemctl restart httpd
# Once httpd is good, proceed with installing nginx.
# Now there's an entire website that has a complete steps about the nginx installation.
# I initially followed the steps provided in the internet but decided to just follow the
# the steps that I somehow piece-up together from the different KKC articles.
sudo yum install epel-release -y
sudo yum install -y nginx
# Now one of the steps provide dni the website was to first create a backup of the config file.
# This is probably a best practice just in case I mess up the original.
# However, I did not backup the config file for httpd. Will probably keep this in mind
# and do this as an additional step next for security next time.
sudo cp /etc/nginx/nginx.conf /etc/nginx/nginx.conf.bak
# again, it always good to make sure that the backup file was created.
ls -l /etc/nginx/
# after verifying, update the specified file.
sudo vi /etc/nginx/nginx.conf
# now the nginx.conf if a very logn list of configurations, but there are some lines
# that needs to be changed.
# 1ST - Change 'user nginx' to 'user apache'
user apache
# 2ND - The server section mut look something like this.
# Note that the 8099 is the required listening port of the nginx.
server {
listen 8099 default_server;
listen [::]:8099;
server_name 172.16.238.16;
# 3RD - the location section must have the root
# the root shoudl be set to '/var/www/html'
# note that the 5001 is the required listening port of the apache.
location
root /var/www/html;
proxy_pass http://127.0.0.1:5001/;
# 5001 is apache port
proxy_redirect off;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
}
# an additional requirement was to create a proxy.conf.
# now for this one, I really don't have an idea as to why this nginx.conf is needed.
# I did not also bothered to read it up in the internet as I was focused on finishing the task.
# Will definitely brush up on the use of this file next time I encounter a similar task.
sudo vi /etc/nginx/conf.d/proxy.conf
# now the proxy.conf configuration was just provided in the article.
# I did not read it up. Will just research this next time I get a similar task.
# the proxy.conf should have the following configuration:
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_buffer_size 4k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
# 3. Enable and start nginx service. Verify that httpd and nginx is active.
# This was actualy one of the hard part. I was stucked here for quite some time.
# I can't seem to start the nginx service. Luckily there were some troublehooting steps
# provided in the KKC article. Some of those troublehooting step/commands were:
nginx -t
journalctl -xe | grep emerg
# Both of the steps above where actually helpful in pointing te part that I got wrong.
# Turns out that there were just some incorrect syntax which I managed to change and I was able
# to start the service.
# Still, i took me quite a number of tries before succeeding on this part.
sudo systemctl enable nginx
sudo systemctl start nginx
# As always, verify that the services are active and running
sudo systemctl status httpd
sudo systemctl status nginx
# 4. Return to the jumphost, then copy the index.html from jumphost to the Backup server.
# Check first the file in the directory.
ls -l /home/
# Copy the file from the jumphost to the Backup server.
# Just like the other labs, the jumphost user doesn't have permissiong on the
# specified backup server directory.
# So I just copied the index.html into a temporary directory inside the Backup server.
# Once it is copied, i proceeded to the Backup server, and using the backup server user,
# I moved the file to the specified directory.
sudo scp -r /home/index.html clint@172.16.238.16:/tmp
# enter jumhost password ******
# enter backup server password ******
sshpass -p '******' ssh -o StrictHostKeyChecking=no clint@172.16.238.16
ls -l /tmp/
sudo mv /tmp/index.html /var/www/html/
# verify if the file was moved into the correct directory within the backup server.
ls -l /var/www/html/
# 5. Verify that everything is working as they should be -- use curl.
curl http://172.16.238.16:8095
curl http://172.16.238.16:5001
-----------------------------------------------------------------------------------------------------------------
IV. STEPS - BREAKDOWN
# Definitely an exhausting lab. But got so many valuable learnings here.
# Though I was not able to absorb all of them. Might need to redo this in my personal lab.
# Maybe in the EC2 instances or in my VirtualBox.
thor@jump_host /$ sshpass -p '******' ssh -o StrictHostKeyChecking=no clint@172.16.238.16Warning: Permanently added '172.16.238.16' (ECDSA) to the list of known hosts.
[clint@stbkp01 ~]$
[clint@stbkp01 ~]$ sudo yum install httpd -y
We trust you have received the usual lecture from the local System
Administrator. It usually boils down to these three things:
#1) Respect the privacy of others.
#2) Think before you type.
#3) With great power comes great responsibility.
[sudo] password for clint:
Loaded plugins: fastestmirror, ovl
Loading mirror speeds from cached hostfile * base: mirror.alpix.eu
* extras: mirror.ratiokontakt.de
* updates: mirror.softaculous.com
Resolving Dependencies
--> Running transaction check
---> Package httpd.x86_64 0:2.4.6-93.el7.centos will be installed
--> Processing Dependency: httpd-tools = 2.4.6-93.el7.centos for package: httpd-2.4.6-93.el7.centos.x86_64
--> Processing Dependency: system-logos >= 7.92.1-1 for package: httpd-2.4.6-93.el7.centos.x86_64
--> Processing Dependency: /etc/mime.types for package: httpd-2.4.6-93.el7.centos.x86_64
--> Processing Dependency: libaprutil-1.so.0()(64bit) for package: httpd-2.4.6-93.el7.centos.x86_64
--> Processing Dependency: libapr-1.so.0()(64bit) for package: httpd-2.4.6-93.el7.centos.x86_64
--> Running transaction check
---> Package apr.x86_64 0:1.4.8-5.el7 will be installed
---> Package apr-util.x86_64 0:1.5.2-6.el7 will be installed
---> Package centos-logos.noarch 0:70.0.6-3.el7.centos will be installed
---> Package httpd-tools.x86_64 0:2.4.6-93.el7.centos will be installed
---> Package mailcap.noarch 0:2.1.41-2.el7 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
================================================================================================
Package Arch Version Repository Size
================================================================================================
Installing:
httpd x86_64 2.4.6-93.el7.centos base 2.7 M
Installing for dependencies:
apr x86_64 1.4.8-5.el7 base 103 k
apr-util x86_64 1.5.2-6.el7 base 92 k
centos-logos noarch 70.0.6-3.el7.centos base 21 M
httpd-tools x86_64 2.4.6-93.el7.centos base 92 k
mailcap noarch 2.1.41-2.el7 base 31 k
Transaction Summary
================================================================================================
Install 1 Package (+5 Dependent packages)
Total download size: 24 M
Installed size: 32 M
Downloading packages:
(1/6): apr-util-1.5.2-6.el7.x86_64.rpm | 92 kB 00:00:00
(2/6): apr-1.4.8-5.el7.x86_64.rpm | 103 kB 00:00:00
(3/6): httpd-2.4.6-93.el7.centos.x86_64.rpm | 2.7 MB 00:00:00
(4/6): httpd-tools-2.4.6-93.el7.centos.x86_64.rpm | 92 kB 00:00:00
(5/6): mailcap-2.1.41-2.el7.noarch.rpm | 31 kB 00:00:00
(6/6): centos-logos-70.0.6-3.el7.centos.noarch.rpm | 21 MB 00:00:00
------------------------------------------------------------------------------------------------
Total 33 MB/s | 24 MB 00:00:00
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : apr-1.4.8-5.el7.x86_64 1/6
Installing : apr-util-1.5.2-6.el7.x86_64 2/6
Installing : httpd-tools-2.4.6-93.el7.centos.x86_64 3/6
Installing : centos-logos-70.0.6-3.el7.centos.noarch 4/6
Installing : mailcap-2.1.41-2.el7.noarch 5/6
Installing : httpd-2.4.6-93.el7.centos.x86_64 6/6
Verifying : mailcap-2.1.41-2.el7.noarch 1/6
Verifying : apr-util-1.5.2-6.el7.x86_64 2/6
Verifying : httpd-2.4.6-93.el7.centos.x86_64 3/6
Verifying : apr-1.4.8-5.el7.x86_64 4/6
Verifying : httpd-tools-2.4.6-93.el7.centos.x86_64 5/6
Verifying : centos-logos-70.0.6-3.el7.centos.noarch 6/6
Installed:
httpd.x86_64 0:2.4.6-93.el7.centos
Dependency Installed:
apr.x86_64 0:1.4.8-5.el7 apr-util.x86_64 0:1.5.2-6.el7
centos-logos.noarch 0:70.0.6-3.el7.centos httpd-tools.x86_64 0:2.4.6-93.el7.centos
mailcap.noarch 0:2.1.41-2.el7
Complete!
[clint@stbkp01 ~]$ ls -l /etc/httpd/conf
total 28
-rw-r--r-- 1 root root 11753 Nov 27 2019 httpd.conf
-rw-r--r-- 1 root root 13064 Apr 2 13:14 magic
[clint@stbkp01 ~]$
[clint@stbkp01 ~]$ cd /etc/httpd/conf
[clint@stbkp01 conf]$
[clint@stbkp01 conf]$ cat httpd.conf
#
# This is the main Apache HTTP server configuration file. It contains the
# configuration directives that give the server its instructions.
# See <URL:http://httpd.apache.org/docs/2.4/> for detailed information.
# In particular, see
# <URL:http://httpd.apache.org/docs/2.4/mod/directives.html>
# for a discussion of each configuration directive.
#
# Do NOT simply read the instructions in here without understanding
# what they do. They're here only as hints or reminders. If you are unsure
# consult the online docs. You have been warned.
#
# Configuration and logfile names: If the filenames you specify for many
# of the server's control files begin with "/" (or "drive:/" for Win32), the
# server will use that explicit path. If the filenames do *not* begin
# with "/", the value of ServerRoot is prepended -- so 'log/access_log'
# with ServerRoot set to '/www' will be interpreted by the
# server as '/www/log/access_log', where as '/log/access_log' will be
# interpreted as '/log/access_log'.
#
# ServerRoot: The top of the directory tree under which the server's
# configuration, error, and log files are kept.
#
# Do not add a slash at the end of the directory path. If you point
# ServerRoot at a non-local disk, be sure to specify a local disk on the
# Mutex directive, if file-based mutexes are used. If you wish to share the
# same ServerRoot for multiple httpd daemons, you will need to change at
# least PidFile.
#
ServerRoot "/etc/httpd"
#
# Listen: Allows you to bind Apache to specific IP addresses and/or
# ports, instead of the default. See also the <VirtualHost>
# directive.
#
# Change this to Listen on specific IP addresses as shown below to
# prevent Apache from glomming onto all bound IP addresses.
#
#Listen 12.34.56.78:80
Listen 80
#
# Dynamic Shared Object (DSO) Support
#
# To be able to use the functionality of a module which was built as a DSO you
# have to place corresponding `LoadModule' lines at this location so the
# directives contained in it are actually available _before_ they are used.
# Statically compiled modules (those listed by `httpd -l') do not need
# to be loaded here.
#
# Example:
# LoadModule foo_module modules/mod_foo.so
#
Include conf.modules.d/*.conf
#
# If you wish httpd to run as a different user or group, you must run
# httpd as root initially and it will switch.
#
# User/Group: The name (or #number) of the user/group to run httpd as.
# It is usually good practice to create a dedicated user and group for
# running httpd, as with most system services.
#
User apache
Group apache
# 'Main' server configuration
#
# The directives in this section set up the values used by the 'main'
# server, which responds to any requests that aren't handled by a
# <VirtualHost> definition. These values also provide defaults for
# any <VirtualHost> containers you may define later in the file.
#
# All of these directives may appear inside <VirtualHost> containers,
# in which case these default settings will be overridden for the
# virtual host being defined.
#
#
# ServerAdmin: Your address, where problems with the server should be
# e-mailed. This address appears on some server-generated pages, such
# as error documents. e.g. admin@your-domain.com
#
ServerAdmin root@localhost
#
# ServerName gives the name and port that the server uses to identify itself.
# This can often be determined automatically, but we recommend you specify
# it explicitly to prevent problems during startup.
#
# If your host doesn't have a registered DNS name, enter its IP address here.
#
#ServerName www.example.com:80
#
# Deny access to the entirety of your server's filesystem. You must
# explicitly permit access to web content directories in other
# <Directory> blocks below.
#
<Directory />
AllowOverride none
Require all denied
</Directory>
#
# Note that from this point forward you must specifically allow
# particular features to be enabled - so if something's not working as
# you might expect, make sure that you have specifically enabled it
# below.
#
#
# DocumentRoot: The directory out of which you will serve your
# documents. By default, all requests are taken from this directory, but
# symbolic links and aliases may be used to point to other locations.
#
DocumentRoot "/var/www/html"
#
# Relax access to content within /var/www.
#
<Directory "/var/www">
AllowOverride None
# Allow open access:
Require all granted
</Directory>
# Further relax access to the default document root:
<Directory "/var/www/html">
#
# Possible values for the Options directive are "None", "All",
# or any combination of:
# Indexes Includes FollowSymLinks SymLinksifOwnerMatch ExecCGI MultiViews
#
# Note that "MultiViews" must be named *explicitly* --- "Options All"
# doesn't give it to you.
#
# The Options directive is both complicated and important. Please see
# http://httpd.apache.org/docs/2.4/mod/core.html#options
# for more information.
#
Options Indexes FollowSymLinks
#
# AllowOverride controls what directives may be placed in .htaccess files.
# It can be "All", "None", or any combination of the keywords:
# Options FileInfo AuthConfig Limit
#
AllowOverride None
#
# Controls who can get stuff from this server.
#
Require all granted
</Directory>
#
# DirectoryIndex: sets the file that Apache will serve if a directory
# is requested.
#
<IfModule dir_module>
DirectoryIndex index.html
</IfModule>
#
# The following lines prevent .htaccess and .htpasswd files from being
# viewed by Web clients.
#
<Files ".ht*">
Require all denied
</Files>
#
# ErrorLog: The location of the error log file.
# If you do not specify an ErrorLog directive within a <VirtualHost>
# container, error messages relating to that virtual host will be
# logged here. If you *do* define an error logfile for a <VirtualHost>
# container, that host's errors will be logged there and not here.
#
ErrorLog "logs/error_log"
#
# LogLevel: Control the number of messages logged to the error_log.
# Possible values include: debug, info, notice, warn, error, crit,
# alert, emerg.
#
LogLevel warn
<IfModule log_config_module>
#
# The following directives define some format nicknames for use with
# a CustomLog directive (see below).
#
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
LogFormat "%h %l %u %t \"%r\" %>s %b" common
<IfModule logio_module>
# You need to enable mod_logio.c to use %I and %O
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio
</IfModule>
#
# The location and format of the access logfile (Common Logfile Format).
# If you do not define any access logfiles within a <VirtualHost>
# container, they will be logged here. Contrariwise, if you *do*
# define per-<VirtualHost> access logfiles, transactions will be
# logged therein and *not* in this file.
#
#CustomLog "logs/access_log" common
#
# If you prefer a logfile with access, agent, and referer information
# (Combined Logfile Format) you can use the following directive.
#
CustomLog "logs/access_log" combined
</IfModule>
<IfModule alias_module>
#
# Redirect: Allows you to tell clients about documents that used to
# exist in your server's namespace, but do not anymore. The client
# will make a new request for the document at its new location.
# Example:
# Redirect permanent /foo http://www.example.com/bar
#
# Alias: Maps web paths into filesystem paths and is used to
# access content that does not live under the DocumentRoot.
# Example:
# Alias /webpath /full/filesystem/path
#
# If you include a trailing / on /webpath then the server will
# require it to be present in the URL. You will also likely
# need to provide a <Directory> section to allow access to
# the filesystem path.
#
# ScriptAlias: This controls which directories contain server scripts.
# ScriptAliases are essentially the same as Aliases, except that
# documents in the target directory are treated as applications and
# run by the server when requested rather than as documents sent to the
# client. The same rules about trailing "/" apply to ScriptAlias
# directives as to Alias.
#
ScriptAlias /cgi-bin/ "/var/www/cgi-bin/"
</IfModule>
#
# "/var/www/cgi-bin" should be changed to whatever your ScriptAliased
# CGI directory exists, if you have that configured.
#
<Directory "/var/www/cgi-bin">
AllowOverride None
Options None
Require all granted
</Directory>
<IfModule mime_module>
#
# TypesConfig points to the file containing the list of mappings from
# filename extension to MIME-type.
#
TypesConfig /etc/mime.types
#
# AddType allows you to add to or override the MIME configuration
# file specified in TypesConfig for specific file types.
#
#AddType application/x-gzip .tgz
#
# AddEncoding allows you to have certain browsers uncompress
# information on the fly. Note: Not all browsers support this.
#
#AddEncoding x-compress .Z
#AddEncoding x-gzip .gz .tgz
#
# If the AddEncoding directives above are commented-out, then you
# probably should define those extensions to indicate media types:
#
AddType application/x-compress .Z
AddType application/x-gzip .gz .tgz
#
# AddHandler allows you to map certain file extensions to "handlers":
# actions unrelated to filetype. These can be either built into the server
# or added with the Action directive (see below)
#
# To use CGI scripts outside of ScriptAliased directories:
# (You will also need to add "ExecCGI" to the "Options" directive.)
#
#AddHandler cgi-script .cgi
# For type maps (negotiated resources):
#AddHandler type-map var
#
# Filters allow you to process content before it is sent to the client.
#
# To parse .shtml files for server-side includes (SSI):
# (You will also need to add "Includes" to the "Options" directive.)
#
AddType text/html .shtml
AddOutputFilter INCLUDES .shtml
</IfModule>
#
# Specify a default charset for all content served; this enables
# interpretation of all content as UTF-8 by default. To use the
# default browser choice (ISO-8859-1), or to allow the META tags
# in HTML content to override this choice, comment out this
# directive:
#
AddDefaultCharset UTF-8
<IfModule mime_magic_module>
#
# The mod_mime_magic module allows the server to use various hints from the
# contents of the file itself to determine its type. The MIMEMagicFile
# directive tells the module where the hint definitions are located.
#
MIMEMagicFile conf/magic
</IfModule>
#
# Customizable error responses come in three flavors:
# 1) plain text 2) local redirects 3) external redirects
#
# Some examples:
#ErrorDocument 500 "The server made a boo boo."
#ErrorDocument 404 /missing.html
#ErrorDocument 404 "/cgi-bin/missing_handler.pl"
#ErrorDocument 402 http://www.example.com/subscription_info.html
#
#
# EnableMMAP and EnableSendfile: On systems that support it,
# memory-mapping or the sendfile syscall may be used to deliver
# files. This usually improves server performance, but must
# be turned off when serving from networked-mounted
# filesystems or if support for these functions is otherwise
# broken on your system.
# Defaults if commented: EnableMMAP On, EnableSendfile Off
#
#EnableMMAP off
EnableSendfile on
# Supplemental configuration
#
# Load config files in the "/etc/httpd/conf.d" directory, if any.
IncludeOptional conf.d/*.conf
[clint@stbkp01 conf]$
[clint@stbkp01 conf]$ sudo vi httpd.conf
[clint@stbkp01 conf]$
[clint@stbkp01 conf]$ sudo systemctl restart httpd
[clint@stbkp01 conf]$
[clint@stbkp01 conf]$ sudo yum install epel-release -y
Loaded plugins: fastestmirror, ovl
Loading mirror speeds from cached hostfile
* base: mirror.alpix.eu
* extras: mirror.ratiokontakt.de
* updates: mirror.softaculous.com
Resolving Dependencies
--> Running transaction check
---> Package epel-release.noarch 0:7-11 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
================================================================================================
Package Arch Version Repository Size
================================================================================================
Installing:
epel-release noarch 7-11 extras 15 k
Transaction Summary
================================================================================================
Install 1 Package
Total download size: 15 k
Installed size: 24 k
Downloading packages:
epel-release-7-11.noarch.rpm | 15 kB 00:00:00
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : epel-release-7-11.noarch 1/1
Verifying : epel-release-7-11.noarch 1/1
Installed:
epel-release.noarch 0:7-11
Complete!
[clint@stbkp01 conf]$
[clint@stbkp01 conf]$ sudo yum install -y nginx
Loaded plugins: fastestmirror, ovl
Loading mirror speeds from cached hostfile
epel/x86_64/metalink | 32 kB 00:00:00
* base: mirror.alpix.eu
* epel: mirrors.n-ix.net
* extras: mirror.ratiokontakt.de
* updates: mirror.softaculous.com
epel | 4.7 kB 00:00:00
(1/3): epel/x86_64/updateinfo | 1.0 MB 00:00:00
(2/3): epel/x86_64/group_gz | 95 kB 00:00:00
(3/3): epel/x86_64/primary_db | 6.9 MB 00:00:00
Resolving Dependencies
--> Running transaction check
---> Package nginx.x86_64 1:1.16.1-1.el7 will be installed
--> Processing Dependency: nginx-all-modules = 1:1.16.1-1.el7 for package: 1:nginx-1.16.1-1.el7.x86_64
--> Processing Dependency: nginx-filesystem = 1:1.16.1-1.el7 for package: 1:nginx-1.16.1-1.el7.x86_64
--> Processing Dependency: nginx-filesystem for package: 1:nginx-1.16.1-1.el7.x86_64
--> Processing Dependency: openssl for package: 1:nginx-1.16.1-1.el7.x86_64
--> Processing Dependency: redhat-indexhtml for package: 1:nginx-1.16.1-1.el7.x86_64
--> Processing Dependency: libprofiler.so.0()(64bit) for package: 1:nginx-1.16.1-1.el7.x86_64
--> Running transaction check
---> Package centos-indexhtml.noarch 0:7-9.el7.centos will be installed
---> Package gperftools-libs.x86_64 0:2.6.1-1.el7 will be installed
---> Package nginx-all-modules.noarch 1:1.16.1-1.el7 will be installed
--> Processing Dependency: nginx-mod-http-image-filter = 1:1.16.1-1.el7 for package: 1:nginx-all-modules-1.16.1-1.el7.noarch
--> Processing Dependency: nginx-mod-http-perl = 1:1.16.1-1.el7 for package: 1:nginx-all-modules-1.16.1-1.el7.noarch
--> Processing Dependency: nginx-mod-http-xslt-filter = 1:1.16.1-1.el7 for package: 1:nginx-all-modules-1.16.1-1.el7.noarch
--> Processing Dependency: nginx-mod-mail = 1:1.16.1-1.el7 for package: 1:nginx-all-modules-1.16.1-1.el7.noarch
--> Processing Dependency: nginx-mod-stream = 1:1.16.1-1.el7 for package: 1:nginx-all-modules-1.16.1-1.el7.noarch
---> Package nginx-filesystem.noarch 1:1.16.1-1.el7 will be installed
---> Package openssl.x86_64 1:1.0.2k-19.el7 will be installed
--> Processing Dependency: openssl-libs(x86-64) = 1:1.0.2k-19.el7 for package: 1:openssl-1.0.2k-19.el7.x86_64
--> Processing Dependency: make for package: 1:openssl-1.0.2k-19.el7.x86_64
--> Running transaction check
---> Package make.x86_64 1:3.82-24.el7 will be installed
---> Package nginx-mod-http-image-filter.x86_64 1:1.16.1-1.el7 will be installed
--> Processing Dependency: gd for package: 1:nginx-mod-http-image-filter-1.16.1-1.el7.x86_64
--> Processing Dependency: libgd.so.2()(64bit) for package: 1:nginx-mod-http-image-filter-1.16.1-1.el7.x86_64
---> Package nginx-mod-http-perl.x86_64 1:1.16.1-1.el7 will be installed
--> Processing Dependency: perl >= 5.006001 for package: 1:nginx-mod-http-perl-1.16.1-1.el7.x86_64
--> Processing Dependency: perl(:MODULE_COMPAT_5.16.3) for package: 1:nginx-mod-http-perl-1.16.1-1.el7.x86_64
--> Processing Dependency: perl(Exporter) for package: 1:nginx-mod-http-perl-1.16.1-1.el7.x86_64
--> Processing Dependency: perl(XSLoader) for package: 1:nginx-mod-http-perl-1.16.1-1.el7.x86_64
--> Processing Dependency: perl(constant) for package: 1:nginx-mod-http-perl-1.16.1-1.el7.x86_64
--> Processing Dependency: perl(strict) for package: 1:nginx-mod-http-perl-1.16.1-1.el7.x86_64
--> Processing Dependency: perl(warnings) for package: 1:nginx-mod-http-perl-1.16.1-1.el7.x86_64
--> Processing Dependency: libperl.so()(64bit) for package: 1:nginx-mod-http-perl-1.16.1-1.el7.x86_64
---> Package nginx-mod-http-xslt-filter.x86_64 1:1.16.1-1.el7 will be installed
--> Processing Dependency: libxslt.so.1(LIBXML2_1.0.11)(64bit) for package: 1:nginx-mod-http-xslt-filter-1.16.1-1.el7.x86_64
--> Processing Dependency: libxslt.so.1(LIBXML2_1.0.18)(64bit) for package: 1:nginx-mod-http-xslt-filter-1.16.1-1.el7.x86_64
--> Processing Dependency: libexslt.so.0()(64bit) for package: 1:nginx-mod-http-xslt-filter-1.16.1-1.el7.x86_64
--> Processing Dependency: libxslt.so.1()(64bit) for package: 1:nginx-mod-http-xslt-filter-1.16.1-1.el7.x86_64
---> Package nginx-mod-mail.x86_64 1:1.16.1-1.el7 will be installed
---> Package nginx-mod-stream.x86_64 1:1.16.1-1.el7 will be installed
---> Package openssl-libs.x86_64 1:1.0.2k-16.el7_6.1 will be updated
---> Package openssl-libs.x86_64 1:1.0.2k-19.el7 will be an update
--> Running transaction check
---> Package gd.x86_64 0:2.0.35-26.el7 will be installed
--> Processing Dependency: libpng15.so.15(PNG15_0)(64bit) for package: gd-2.0.35-26.el7.x86_64
--> Processing Dependency: libjpeg.so.62(LIBJPEG_6.2)(64bit) for package: gd-2.0.35-26.el7.x86_64
--> Processing Dependency: libpng15.so.15()(64bit) for package: gd-2.0.35-26.el7.x86_64
--> Processing Dependency: libjpeg.so.62()(64bit) for package: gd-2.0.35-26.el7.x86_64
--> Processing Dependency: libfreetype.so.6()(64bit) for package: gd-2.0.35-26.el7.x86_64
--> Processing Dependency: libfontconfig.so.1()(64bit) for package: gd-2.0.35-26.el7.x86_64
--> Processing Dependency: libXpm.so.4()(64bit) for package: gd-2.0.35-26.el7.x86_64
--> Processing Dependency: libX11.so.6()(64bit) for package: gd-2.0.35-26.el7.x86_64
---> Package libxslt.x86_64 0:1.1.28-5.el7 will be installed
---> Package perl.x86_64 4:5.16.3-295.el7 will be installed
--> Processing Dependency: perl(Socket) >= 1.3 for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(Scalar::Util) >= 1.10 for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl-macros for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(threads::shared) for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(threads) for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(Time::Local) for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(Time::HiRes) for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(Storable) for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(Socket) for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(Scalar::Util) for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(Pod::Simple::XHTML) for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(Pod::Simple::Search) for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(Getopt::Long) for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(Filter::Util::Call) for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(File::Temp) for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(File::Spec::Unix) for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(File::Spec::Functions) for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(File::Spec) for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(File::Path) for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(Cwd) for package: 4:perl-5.16.3-295.el7.x86_64
--> Processing Dependency: perl(Carp) for package: 4:perl-5.16.3-295.el7.x86_64
---> Package perl-Exporter.noarch 0:5.68-3.el7 will be installed
---> Package perl-constant.noarch 0:1.27-2.el7 will be installed
---> Package perl-libs.x86_64 4:5.16.3-295.el7 will be installed
--> Running transaction check
---> Package fontconfig.x86_64 0:2.13.0-4.3.el7 will be installed
--> Processing Dependency: fontpackages-filesystem for package: fontconfig-2.13.0-4.3.el7.x86_64
--> Processing Dependency: dejavu-sans-fonts for package: fontconfig-2.13.0-4.3.el7.x86_64
---> Package freetype.x86_64 0:2.8-14.el7 will be installed
---> Package libX11.x86_64 0:1.6.7-2.el7 will be installed
--> Processing Dependency: libX11-common >= 1.6.7-2.el7 for package: libX11-1.6.7-2.el7.x86_64
--> Processing Dependency: libxcb.so.1()(64bit) for package: libX11-1.6.7-2.el7.x86_64
---> Package libXpm.x86_64 0:3.5.12-1.el7 will be installed
---> Package libjpeg-turbo.x86_64 0:1.2.90-8.el7 will be installed
---> Package libpng.x86_64 2:1.5.13-7.el7_2 will be installed
---> Package perl-Carp.noarch 0:1.26-244.el7 will be installed
---> Package perl-File-Path.noarch 0:2.09-2.el7 will be installed
---> Package perl-File-Temp.noarch 0:0.23.01-3.el7 will be installed
---> Package perl-Filter.x86_64 0:1.49-3.el7 will be installed
---> Package perl-Getopt-Long.noarch 0:2.40-3.el7 will be installed
--> Processing Dependency: perl(Pod::Usage) >= 1.14 for package: perl-Getopt-Long-2.40-3.el7.noarch
--> Processing Dependency: perl(Text::ParseWords) for package: perl-Getopt-Long-2.40-3.el7.noarch
---> Package perl-PathTools.x86_64 0:3.40-5.el7 will be installed
---> Package perl-Pod-Simple.noarch 1:3.28-4.el7 will be installed
--> Processing Dependency: perl(Pod::Escapes) >= 1.04 for package: 1:perl-Pod-Simple-3.28-4.el7.noarch
--> Processing Dependency: perl(Encode) for package: 1:perl-Pod-Simple-3.28-4.el7.noarch
---> Package perl-Scalar-List-Utils.x86_64 0:1.27-248.el7 will be installed
---> Package perl-Socket.x86_64 0:2.010-5.el7 will be installed
---> Package perl-Storable.x86_64 0:2.45-3.el7 will be installed
---> Package perl-Time-HiRes.x86_64 4:1.9725-3.el7 will be installed
---> Package perl-Time-Local.noarch 0:1.2300-2.el7 will be installed
---> Package perl-macros.x86_64 4:5.16.3-295.el7 will be installed
---> Package perl-threads.x86_64 0:1.87-4.el7 will be installed
---> Package perl-threads-shared.x86_64 0:1.43-6.el7 will be installed
--> Running transaction check
---> Package dejavu-sans-fonts.noarch 0:2.33-6.el7 will be installed
--> Processing Dependency: dejavu-fonts-common = 2.33-6.el7 for package: dejavu-sans-fonts-2.33-6.el7.noarch
---> Package fontpackages-filesystem.noarch 0:1.44-8.el7 will be installed
---> Package libX11-common.noarch 0:1.6.7-2.el7 will be installed
---> Package libxcb.x86_64 0:1.13-1.el7 will be installed
--> Processing Dependency: libXau.so.6()(64bit) for package: libxcb-1.13-1.el7.x86_64
---> Package perl-Encode.x86_64 0:2.51-7.el7 will be installed
---> Package perl-Pod-Escapes.noarch 1:1.04-295.el7 will be installed
---> Package perl-Pod-Usage.noarch 0:1.63-3.el7 will be installed
--> Processing Dependency: perl(Pod::Text) >= 3.15 for package: perl-Pod-Usage-1.63-3.el7.noarch
--> Processing Dependency: perl-Pod-Perldoc for package: perl-Pod-Usage-1.63-3.el7.noarch
---> Package perl-Text-ParseWords.noarch 0:3.29-4.el7 will be installed
--> Running transaction check
---> Package dejavu-fonts-common.noarch 0:2.33-6.el7 will be installed
---> Package libXau.x86_64 0:1.0.8-2.1.el7 will be installed
---> Package perl-Pod-Perldoc.noarch 0:3.20-4.el7 will be installed
--> Processing Dependency: perl(parent) for package: perl-Pod-Perldoc-3.20-4.el7.noarch
--> Processing Dependency: perl(HTTP::Tiny) for package: perl-Pod-Perldoc-3.20-4.el7.noarch
--> Processing Dependency: groff-base for package: perl-Pod-Perldoc-3.20-4.el7.noarch
---> Package perl-podlators.noarch 0:2.5.1-3.el7 will be installed
--> Running transaction check
---> Package groff-base.x86_64 0:1.22.2-8.el7 will be installed
---> Package perl-HTTP-Tiny.noarch 0:0.033-3.el7 will be installed
---> Package perl-parent.noarch 1:0.225-244.el7 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
================================================================================================
Package Arch Version Repository Size
================================================================================================
Installing:
nginx x86_64 1:1.16.1-1.el7 epel 562 k
Installing for dependencies:
centos-indexhtml noarch 7-9.el7.centos base 92 k
dejavu-fonts-common noarch 2.33-6.el7 base 64 k
dejavu-sans-fonts noarch 2.33-6.el7 base 1.4 M
fontconfig x86_64 2.13.0-4.3.el7 base 254 k
fontpackages-filesystem noarch 1.44-8.el7 base 9.9 k
freetype x86_64 2.8-14.el7 base 380 k
gd x86_64 2.0.35-26.el7 base 146 k
gperftools-libs x86_64 2.6.1-1.el7 base 272 k
groff-base x86_64 1.22.2-8.el7 base 942 k
libX11 x86_64 1.6.7-2.el7 base 607 k
libX11-common noarch 1.6.7-2.el7 base 164 k
libXau x86_64 1.0.8-2.1.el7 base 29 k
libXpm x86_64 3.5.12-1.el7 base 55 k
libjpeg-turbo x86_64 1.2.90-8.el7 base 135 k
libpng x86_64 2:1.5.13-7.el7_2 base 213 k
libxcb x86_64 1.13-1.el7 base 214 k
libxslt x86_64 1.1.28-5.el7 base 242 k
make x86_64 1:3.82-24.el7 base 421 k
nginx-all-modules noarch 1:1.16.1-1.el7 epel 19 k
nginx-filesystem noarch 1:1.16.1-1.el7 epel 21 k
nginx-mod-http-image-filter x86_64 1:1.16.1-1.el7 epel 30 k
nginx-mod-http-perl x86_64 1:1.16.1-1.el7 epel 39 k
nginx-mod-http-xslt-filter x86_64 1:1.16.1-1.el7 epel 29 k
nginx-mod-mail x86_64 1:1.16.1-1.el7 epel 57 k
nginx-mod-stream x86_64 1:1.16.1-1.el7 epel 84 k
openssl x86_64 1:1.0.2k-19.el7 base 493 k
perl x86_64 4:5.16.3-295.el7 base 8.0 M
perl-Carp noarch 1.26-244.el7 base 19 k
perl-Encode x86_64 2.51-7.el7 base 1.5 M
perl-Exporter noarch 5.68-3.el7 base 28 k
perl-File-Path noarch 2.09-2.el7 base 26 k
perl-File-Temp noarch 0.23.01-3.el7 base 56 k
perl-Filter x86_64 1.49-3.el7 base 76 k
perl-Getopt-Long noarch 2.40-3.el7 base 56 k
perl-HTTP-Tiny noarch 0.033-3.el7 base 38 k
perl-PathTools x86_64 3.40-5.el7 base 82 k
perl-Pod-Escapes noarch 1:1.04-295.el7 base 51 k
perl-Pod-Perldoc noarch 3.20-4.el7 base 87 k
perl-Pod-Simple noarch 1:3.28-4.el7 base 216 k
perl-Pod-Usage noarch 1.63-3.el7 base 27 k
perl-Scalar-List-Utils x86_64 1.27-248.el7 base 36 k
perl-Socket x86_64 2.010-5.el7 base 49 k
perl-Storable x86_64 2.45-3.el7 base 77 k
perl-Text-ParseWords noarch 3.29-4.el7 base 14 k
perl-Time-HiRes x86_64 4:1.9725-3.el7 base 45 k
perl-Time-Local noarch 1.2300-2.el7 base 24 k
perl-constant noarch 1.27-2.el7 base 19 k
perl-libs x86_64 4:5.16.3-295.el7 base 689 k
perl-macros x86_64 4:5.16.3-295.el7 base 44 k
perl-parent noarch 1:0.225-244.el7 base 12 k
perl-podlators noarch 2.5.1-3.el7 base 112 k
perl-threads x86_64 1.87-4.el7 base 49 k
perl-threads-shared x86_64 1.43-6.el7 base 39 k
Updating for dependencies:
openssl-libs x86_64 1:1.0.2k-19.el7 base 1.2 M
Transaction Summary
================================================================================================
Install 1 Package (+53 Dependent packages)
Upgrade ( 1 Dependent package)
Total download size: 19 M
Downloading packages:
Delta RPMs disabled because /usr/bin/applydeltarpm not installed.
(1/55): centos-indexhtml-7-9.el7.centos.noarch.rpm | 92 kB 00:00:00
(2/55): dejavu-fonts-common-2.33-6.el7.noarch.rpm | 64 kB 00:00:00
(3/55): fontconfig-2.13.0-4.3.el7.x86_64.rpm | 254 kB 00:00:00
(4/55): dejavu-sans-fonts-2.33-6.el7.noarch.rpm | 1.4 MB 00:00:00
(5/55): fontpackages-filesystem-1.44-8.el7.noarch.rpm | 9.9 kB 00:00:00
(6/55): gd-2.0.35-26.el7.x86_64.rpm | 146 kB 00:00:00
(7/55): freetype-2.8-14.el7.x86_64.rpm | 380 kB 00:00:00
(8/55): gperftools-libs-2.6.1-1.el7.x86_64.rpm | 272 kB 00:00:00
(9/55): groff-base-1.22.2-8.el7.x86_64.rpm | 942 kB 00:00:00
(10/55): libX11-1.6.7-2.el7.x86_64.rpm | 607 kB 00:00:00
(11/55): libX11-common-1.6.7-2.el7.noarch.rpm | 164 kB 00:00:00
(12/55): libXau-1.0.8-2.1.el7.x86_64.rpm | 29 kB 00:00:00
(13/55): libXpm-3.5.12-1.el7.x86_64.rpm | 55 kB 00:00:00
(14/55): libjpeg-turbo-1.2.90-8.el7.x86_64.rpm | 135 kB 00:00:00
(15/55): libpng-1.5.13-7.el7_2.x86_64.rpm | 213 kB 00:00:00
(16/55): libxcb-1.13-1.el7.x86_64.rpm | 214 kB 00:00:00
(17/55): libxslt-1.1.28-5.el7.x86_64.rpm | 242 kB 00:00:00
(18/55): make-3.82-24.el7.x86_64.rpm | 421 kB 00:00:00
warning: /var/cache/yum/x86_64/7/epel/packages/nginx-1.16.1-1.el7.x86_64.rpm: Header V3 RSA/SHA256 Signature, key ID 352c64e5: NOKEY
Public key for nginx-1.16.1-1.el7.x86_64.rpm is not installed
(19/55): nginx-1.16.1-1.el7.x86_64.rpm | 562 kB 00:00:00
(20/55): nginx-filesystem-1.16.1-1.el7.noarch.rpm | 21 kB 00:00:00
(21/55): nginx-mod-http-image-filter-1.16.1-1.el7.x86_64.rpm | 30 kB 00:00:00
(22/55): nginx-mod-http-perl-1.16.1-1.el7.x86_64.rpm | 39 kB 00:00:00
(23/55): nginx-mod-http-xslt-filter-1.16.1-1.el7.x86_64.rpm | 29 kB 00:00:00
(24/55): nginx-mod-mail-1.16.1-1.el7.x86_64.rpm | 57 kB 00:00:00
(25/55): nginx-mod-stream-1.16.1-1.el7.x86_64.rpm | 84 kB 00:00:00
(26/55): nginx-all-modules-1.16.1-1.el7.noarch.rpm | 19 kB 00:00:00
(27/55): openssl-1.0.2k-19.el7.x86_64.rpm | 493 kB 00:00:00
(28/55): openssl-libs-1.0.2k-19.el7.x86_64.rpm | 1.2 MB 00:00:00
(29/55): perl-Carp-1.26-244.el7.noarch.rpm | 19 kB 00:00:00
(30/55): perl-Encode-2.51-7.el7.x86_64.rpm | 1.5 MB 00:00:00
(31/55): perl-Exporter-5.68-3.el7.noarch.rpm | 28 kB 00:00:00
(32/55): perl-5.16.3-295.el7.x86_64.rpm | 8.0 MB 00:00:00
(33/55): perl-File-Path-2.09-2.el7.noarch.rpm | 26 kB 00:00:00
(34/55): perl-File-Temp-0.23.01-3.el7.noarch.rpm | 56 kB 00:00:00
(35/55): perl-Filter-1.49-3.el7.x86_64.rpm | 76 kB 00:00:00
(36/55): perl-Getopt-Long-2.40-3.el7.noarch.rpm | 56 kB 00:00:00
(37/55): perl-HTTP-Tiny-0.033-3.el7.noarch.rpm | 38 kB 00:00:00
(38/55): perl-PathTools-3.40-5.el7.x86_64.rpm | 82 kB 00:00:00
(39/55): perl-Pod-Escapes-1.04-295.el7.noarch.rpm | 51 kB 00:00:00
(40/55): perl-Pod-Perldoc-3.20-4.el7.noarch.rpm | 87 kB 00:00:00
(41/55): perl-Pod-Simple-3.28-4.el7.noarch.rpm | 216 kB 00:00:00
(42/55): perl-Pod-Usage-1.63-3.el7.noarch.rpm | 27 kB 00:00:00
(43/55): perl-Scalar-List-Utils-1.27-248.el7.x86_64.rpm | 36 kB 00:00:00
(44/55): perl-Socket-2.010-5.el7.x86_64.rpm | 49 kB 00:00:00
(45/55): perl-Text-ParseWords-3.29-4.el7.noarch.rpm | 14 kB 00:00:00
(46/55): perl-Storable-2.45-3.el7.x86_64.rpm | 77 kB 00:00:00
(47/55): perl-Time-HiRes-1.9725-3.el7.x86_64.rpm | 45 kB 00:00:00
(48/55): perl-Time-Local-1.2300-2.el7.noarch.rpm | 24 kB 00:00:00
(49/55): perl-constant-1.27-2.el7.noarch.rpm | 19 kB 00:00:00
(50/55): perl-macros-5.16.3-295.el7.x86_64.rpm | 44 kB 00:00:00
(51/55): perl-parent-0.225-244.el7.noarch.rpm | 12 kB 00:00:00
(52/55): perl-podlators-2.5.1-3.el7.noarch.rpm | 112 kB 00:00:00
(53/55): perl-libs-5.16.3-295.el7.x86_64.rpm | 689 kB 00:00:00
(54/55): perl-threads-1.87-4.el7.x86_64.rpm | 49 kB 00:00:00
(55/55): perl-threads-shared-1.43-6.el7.x86_64.rpm | 39 kB 00:00:00
------------------------------------------------------------------------------------------------
Total 15 MB/s | 19 MB 00:00:01
Retrieving key from file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
Importing GPG key 0x352C64E5:
Userid : "Fedora EPEL (7) <epel@fedoraproject.org>"
Fingerprint: 91e9 7d7c 4a5e 96f1 7f3e 888f 6a2f aea2 352c 64e5
Package : epel-release-7-11.noarch (@extras)
From : /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Updating : 1:openssl-libs-1.0.2k-19.el7.x86_64 1/56
Installing : fontpackages-filesystem-1.44-8.el7.noarch 2/56
Installing : 2:libpng-1.5.13-7.el7_2.x86_64 3/56
Installing : freetype-2.8-14.el7.x86_64 4/56
Installing : dejavu-fonts-common-2.33-6.el7.noarch 5/56
Installing : dejavu-sans-fonts-2.33-6.el7.noarch 6/56
Installing : fontconfig-2.13.0-4.3.el7.x86_64 7/56
Installing : gperftools-libs-2.6.1-1.el7.x86_64 8/56
Installing : libXau-1.0.8-2.1.el7.x86_64 9/56
Installing : libxcb-1.13-1.el7.x86_64 10/56
Installing : centos-indexhtml-7-9.el7.centos.noarch 11/56
Installing : libjpeg-turbo-1.2.90-8.el7.x86_64 12/56
Installing : libxslt-1.1.28-5.el7.x86_64 13/56
Installing : 1:make-3.82-24.el7.x86_64 14/56
Installing : 1:openssl-1.0.2k-19.el7.x86_64 15/56
Installing : libX11-common-1.6.7-2.el7.noarch 16/56
Installing : libX11-1.6.7-2.el7.x86_64 17/56
Installing : libXpm-3.5.12-1.el7.x86_64 18/56
Installing : gd-2.0.35-26.el7.x86_64 19/56
Installing : groff-base-1.22.2-8.el7.x86_64 20/56
Installing : 1:perl-parent-0.225-244.el7.noarch 21/56
Installing : perl-HTTP-Tiny-0.033-3.el7.noarch 22/56
Installing : perl-podlators-2.5.1-3.el7.noarch 23/56
Installing : perl-Pod-Perldoc-3.20-4.el7.noarch 24/56
Installing : 1:perl-Pod-Escapes-1.04-295.el7.noarch 25/56
Installing : perl-Encode-2.51-7.el7.x86_64 26/56
Installing : perl-Text-ParseWords-3.29-4.el7.noarch 27/56
Installing : perl-Pod-Usage-1.63-3.el7.noarch 28/56
Installing : 4:perl-libs-5.16.3-295.el7.x86_64 29/56
Installing : 4:perl-macros-5.16.3-295.el7.x86_64 30/56
Installing : 4:perl-Time-HiRes-1.9725-3.el7.x86_64 31/56
Installing : perl-Exporter-5.68-3.el7.noarch 32/56
Installing : perl-constant-1.27-2.el7.noarch 33/56
Installing : perl-Socket-2.010-5.el7.x86_64 34/56
Installing : perl-Time-Local-1.2300-2.el7.noarch 35/56
Installing : perl-Carp-1.26-244.el7.noarch 36/56
Installing : perl-Storable-2.45-3.el7.x86_64 37/56
Installing : 1:perl-Pod-Simple-3.28-4.el7.noarch 38/56
Installing : perl-PathTools-3.40-5.el7.x86_64 39/56
Installing : perl-Scalar-List-Utils-1.27-248.el7.x86_64 40/56
Installing : perl-File-Temp-0.23.01-3.el7.noarch 41/56
Installing : perl-File-Path-2.09-2.el7.noarch 42/56
Installing : perl-threads-shared-1.43-6.el7.x86_64 43/56
Installing : perl-threads-1.87-4.el7.x86_64 44/56
Installing : perl-Filter-1.49-3.el7.x86_64 45/56
Installing : perl-Getopt-Long-2.40-3.el7.noarch 46/56
Installing : 4:perl-5.16.3-295.el7.x86_64 47/56
Installing : 1:nginx-filesystem-1.16.1-1.el7.noarch 48/56
Installing : 1:nginx-mod-mail-1.16.1-1.el7.x86_64 49/56
Installing : 1:nginx-mod-stream-1.16.1-1.el7.x86_64 50/56
Installing : 1:nginx-mod-http-xslt-filter-1.16.1-1.el7.x86_64 51/56
Installing : 1:nginx-mod-http-image-filter-1.16.1-1.el7.x86_64 52/56
Installing : 1:nginx-1.16.1-1.el7.x86_64 53/56
Installing : 1:nginx-mod-http-perl-1.16.1-1.el7.x86_64 54/56
Installing : 1:nginx-all-modules-1.16.1-1.el7.noarch 55/56
Cleanup : 1:openssl-libs-1.0.2k-16.el7_6.1.x86_64 56/56
Verifying : perl-HTTP-Tiny-0.033-3.el7.noarch 1/56
Verifying : fontconfig-2.13.0-4.3.el7.x86_64 2/56
Verifying : 1:nginx-mod-mail-1.16.1-1.el7.x86_64 3/56
Verifying : 1:nginx-filesystem-1.16.1-1.el7.noarch 4/56
Verifying : perl-Pod-Perldoc-3.20-4.el7.noarch 5/56
Verifying : perl-threads-shared-1.43-6.el7.x86_64 6/56
Verifying : 4:perl-Time-HiRes-1.9725-3.el7.x86_64 7/56
Verifying : 1:perl-Pod-Escapes-1.04-295.el7.noarch 8/56
Verifying : groff-base-1.22.2-8.el7.x86_64 9/56
Verifying : perl-Exporter-5.68-3.el7.noarch 10/56
Verifying : perl-constant-1.27-2.el7.noarch 11/56
Verifying : perl-PathTools-3.40-5.el7.x86_64 12/56
Verifying : 2:libpng-1.5.13-7.el7_2.x86_64 13/56
Verifying : 1:nginx-mod-http-perl-1.16.1-1.el7.x86_64 14/56
Verifying : freetype-2.8-14.el7.x86_64 15/56
Verifying : perl-Socket-2.010-5.el7.x86_64 16/56
Verifying : fontpackages-filesystem-1.44-8.el7.noarch 17/56
Verifying : 1:perl-parent-0.225-244.el7.noarch 18/56
Verifying : perl-Pod-Usage-1.63-3.el7.noarch 19/56
Verifying : 4:perl-libs-5.16.3-295.el7.x86_64 20/56
Verifying : perl-Encode-2.51-7.el7.x86_64 21/56
Verifying : perl-File-Temp-0.23.01-3.el7.noarch 22/56
Verifying : 1:perl-Pod-Simple-3.28-4.el7.noarch 23/56
Verifying : libX11-1.6.7-2.el7.x86_64 24/56
Verifying : libX11-common-1.6.7-2.el7.noarch 25/56
Verifying : libxcb-1.13-1.el7.x86_64 26/56
Verifying : 1:make-3.82-24.el7.x86_64 27/56
Verifying : perl-Time-Local-1.2300-2.el7.noarch 28/56
Verifying : 4:perl-macros-5.16.3-295.el7.x86_64 29/56
Verifying : 4:perl-5.16.3-295.el7.x86_64 30/56
Verifying : libXpm-3.5.12-1.el7.x86_64 31/56
Verifying : 1:nginx-mod-stream-1.16.1-1.el7.x86_64 32/56
Verifying : perl-Carp-1.26-244.el7.noarch 33/56
Verifying : libxslt-1.1.28-5.el7.x86_64 34/56
Verifying : 1:openssl-1.0.2k-19.el7.x86_64 35/56
Verifying : perl-Storable-2.45-3.el7.x86_64 36/56
Verifying : dejavu-sans-fonts-2.33-6.el7.noarch 37/56
Verifying : perl-Scalar-List-Utils-1.27-248.el7.x86_64 38/56
Verifying : gd-2.0.35-26.el7.x86_64 39/56
Verifying : libjpeg-turbo-1.2.90-8.el7.x86_64 40/56
Verifying : 1:nginx-all-modules-1.16.1-1.el7.noarch 41/56
Verifying : 1:nginx-mod-http-xslt-filter-1.16.1-1.el7.x86_64 42/56
Verifying : 1:openssl-libs-1.0.2k-19.el7.x86_64 43/56
Verifying : centos-indexhtml-7-9.el7.centos.noarch 44/56
Verifying : 1:nginx-mod-http-image-filter-1.16.1-1.el7.x86_64 45/56
Verifying : dejavu-fonts-common-2.33-6.el7.noarch 46/56
Verifying : perl-podlators-2.5.1-3.el7.noarch 47/56
Verifying : libXau-1.0.8-2.1.el7.x86_64 48/56
Verifying : perl-File-Path-2.09-2.el7.noarch 49/56
Verifying : perl-threads-1.87-4.el7.x86_64 50/56
Verifying : gperftools-libs-2.6.1-1.el7.x86_64 51/56
Verifying : perl-Filter-1.49-3.el7.x86_64 52/56
Verifying : perl-Getopt-Long-2.40-3.el7.noarch 53/56
Verifying : perl-Text-ParseWords-3.29-4.el7.noarch 54/56
Verifying : 1:nginx-1.16.1-1.el7.x86_64 55/56
Verifying : 1:openssl-libs-1.0.2k-16.el7_6.1.x86_64 56/56
Installed:
nginx.x86_64 1:1.16.1-1.el7
Dependency Installed:
centos-indexhtml.noarch 0:7-9.el7.centos dejavu-fonts-common.noarch 0:2.33-6.el7
dejavu-sans-fonts.noarch 0:2.33-6.el7 fontconfig.x86_64 0:2.13.0-4.3.el7
fontpackages-filesystem.noarch 0:1.44-8.el7 freetype.x86_64 0:2.8-14.el7
gd.x86_64 0:2.0.35-26.el7 gperftools-libs.x86_64 0:2.6.1-1.el7
groff-base.x86_64 0:1.22.2-8.el7 libX11.x86_64 0:1.6.7-2.el7
libX11-common.noarch 0:1.6.7-2.el7 libXau.x86_64 0:1.0.8-2.1.el7
libXpm.x86_64 0:3.5.12-1.el7 libjpeg-turbo.x86_64 0:1.2.90-8.el7
libpng.x86_64 2:1.5.13-7.el7_2 libxcb.x86_64 0:1.13-1.el7
libxslt.x86_64 0:1.1.28-5.el7 make.x86_64 1:3.82-24.el7
nginx-all-modules.noarch 1:1.16.1-1.el7 nginx-filesystem.noarch 1:1.16.1-1.el7
nginx-mod-http-image-filter.x86_64 1:1.16.1-1.el7 nginx-mod-http-perl.x86_64 1:1.16.1-1.el7
nginx-mod-http-xslt-filter.x86_64 1:1.16.1-1.el7 nginx-mod-mail.x86_64 1:1.16.1-1.el7
nginx-mod-stream.x86_64 1:1.16.1-1.el7 openssl.x86_64 1:1.0.2k-19.el7
perl.x86_64 4:5.16.3-295.el7 perl-Carp.noarch 0:1.26-244.el7
perl-Encode.x86_64 0:2.51-7.el7 perl-Exporter.noarch 0:5.68-3.el7
perl-File-Path.noarch 0:2.09-2.el7 perl-File-Temp.noarch 0:0.23.01-3.el7
perl-Filter.x86_64 0:1.49-3.el7 perl-Getopt-Long.noarch 0:2.40-3.el7
perl-HTTP-Tiny.noarch 0:0.033-3.el7 perl-PathTools.x86_64 0:3.40-5.el7
perl-Pod-Escapes.noarch 1:1.04-295.el7 perl-Pod-Perldoc.noarch 0:3.20-4.el7
perl-Pod-Simple.noarch 1:3.28-4.el7 perl-Pod-Usage.noarch 0:1.63-3.el7
perl-Scalar-List-Utils.x86_64 0:1.27-248.el7 perl-Socket.x86_64 0:2.010-5.el7
perl-Storable.x86_64 0:2.45-3.el7 perl-Text-ParseWords.noarch 0:3.29-4.el7
perl-Time-HiRes.x86_64 4:1.9725-3.el7 perl-Time-Local.noarch 0:1.2300-2.el7
perl-constant.noarch 0:1.27-2.el7 perl-libs.x86_64 4:5.16.3-295.el7
perl-macros.x86_64 4:5.16.3-295.el7 perl-parent.noarch 1:0.225-244.el7
perl-podlators.noarch 0:2.5.1-3.el7 perl-threads.x86_64 0:1.87-4.el7
perl-threads-shared.x86_64 0:1.43-6.el7
Dependency Updated:
openssl-libs.x86_64 1:1.0.2k-19.el7
Complete!
[clint@stbkp01 conf]$ sudo cp /etc/nginx/nginx.conf /etc/nginx/nginx.conf.bak
[clint@stbkp01 conf]$
[clint@stbkp01 conf]$ ls -l /etc/nginx/
total 80
drwxr-xr-x 2 root root 4096 Oct 3 2019 conf.d
drwxr-xr-x 2 root root 4096 Oct 3 2019 default.d
-rw-r--r-- 1 root root 1077 Oct 3 2019 fastcgi.conf
-rw-r--r-- 1 root root 1077 Oct 3 2019 fastcgi.conf.default
-rw-r--r-- 1 root root 1007 Oct 3 2019 fastcgi_params
-rw-r--r-- 1 root root 1007 Oct 3 2019 fastcgi_params.default
-rw-r--r-- 1 root root 2837 Oct 3 2019 koi-utf
-rw-r--r-- 1 root root 2223 Oct 3 2019 koi-win
-rw-r--r-- 1 root root 5231 Oct 3 2019 mime.types
-rw-r--r-- 1 root root 5231 Oct 3 2019 mime.types.default
-rw-r--r-- 1 root root 2471 Oct 3 2019 nginx.conf
-rw-r--r-- 1 root root 2471 Jul 29 18:35 nginx.conf.bak
-rw-r--r-- 1 root root 2656 Oct 3 2019 nginx.conf.default
-rw-r--r-- 1 root root 636 Oct 3 2019 scgi_params
-rw-r--r-- 1 root root 636 Oct 3 2019 scgi_params.default
-rw-r--r-- 1 root root 664 Oct 3 2019 uwsgi_params
-rw-r--r-- 1 root root 664 Oct 3 2019 uwsgi_params.default
-rw-r--r-- 1 root root 3610 Oct 3 2019 win-utf
[clint@stbkp01 conf]$
[clint@stbkp01 conf]$ sudo vi /etc/nginx/nginx.conf
[clint@stbkp01 conf]$
[clint@stbkp01 conf]$ sudo vi /etc/nginx/conf.d/proxy.conf
[clint@stbkp01 conf]$
[clint@stbkp01 conf]$ sudo systemctl enable nginx
Created symlink from /etc/systemd/system/multi-user.target.wants/nginx.service to /usr/lib/systemd/system/nginx.service.
[clint@stbkp01 conf]$ sudo systemctl start nginx
[clint@stbkp01 conf]$
[clint@stbkp01 conf]$ sudo systemctl status httpd
● httpd.service - The Apache HTTP Server
Loaded: loaded (/usr/lib/systemd/system/httpd.service; disabled; vendor preset: disabled)
Active: active (running) since Wed 2020-07-29 18:23:22 UTC; 3min 47s ago
Docs: man:httpd(8)
man:apachectl(8)
Main PID: 199 (httpd)
Status: "Total requests: 0; Current requests/sec: 0; Current traffic: 0 B/sec"
CGroup: /docker/e8ec55b17f9b1fc0ae9f8c464e3ebd67095664ab0c6c6da3d17cd669ca42c426/system.slice/httpd.service
├─199 /usr/sbin/httpd -DFOREGROUND
├─200 /usr/sbin/httpd -DFOREGROUND
├─201 /usr/sbin/httpd -DFOREGROUND
├─202 /usr/sbin/httpd -DFOREGROUND
├─203 /usr/sbin/httpd -DFOREGROUND
└─204 /usr/sbin/httpd -DFOREGROUND
Jul 29 18:26:41 stbkp01 systemd[1]: httpd.service: got READY=1
Jul 29 18:26:41 stbkp01 systemd[1]: httpd.service: got STATUS=Total requests: 0; Current r...sec
Jul 29 18:26:51 stbkp01 systemd[1]: Got notification message for unit httpd.service
Jul 29 18:26:51 stbkp01 systemd[1]: httpd.service: Got notification message from PID 199 (...ec)
Jul 29 18:26:51 stbkp01 systemd[1]: httpd.service: got READY=1
Jul 29 18:26:51 stbkp01 systemd[1]: httpd.service: got STATUS=Total requests: 0; Current r...sec
Jul 29 18:27:01 stbkp01 systemd[1]: Got notification message for unit httpd.service
Jul 29 18:27:01 stbkp01 systemd[1]: httpd.service: Got notification message from PID 199 (...ec)
Jul 29 18:27:01 stbkp01 systemd[1]: httpd.service: got READY=1
Jul 29 18:27:01 stbkp01 systemd[1]: httpd.service: got STATUS=Total requests: 0; Current r...sec
Hint: Some lines were ellipsized, use -l to show in full.
[clint@stbkp01 conf]$ sudo systemctl status nginx
● nginx.service - The nginx HTTP and reverse proxy server
Loaded: loaded (/usr/lib/systemd/system/nginx.service; enabled; vendor preset: disabled)
Active: active (running) since Wed 2020-07-29 18:27:01 UTC; 10s ago
Process: 413 ExecStart=/usr/sbin/nginx (code=exited, status=0/SUCCESS)
Process: 412 ExecStartPre=/usr/sbin/nginx -t (code=exited, status=0/SUCCESS)
Process: 411 ExecStartPre=/usr/bin/rm -f /run/nginx.pid (code=exited, status=0/SUCCESS)
Main PID: 414 (nginx)
CGroup: /docker/e8ec55b17f9b1fc0ae9f8c464e3ebd67095664ab0c6c6da3d17cd669ca42c426/system.slice/nginx.service
├─414 nginx: master process /usr/sbin/nginx
├─415 nginx: worker process
└─416 nginx: worker process
Jul 29 18:27:01 stbkp01 systemd[1]: Forked /usr/sbin/nginx as 413
Jul 29 18:27:01 stbkp01 systemd[1]: nginx.service changed start-pre -> start
Jul 29 18:27:01 stbkp01 systemd[413]: Executing: /usr/sbin/nginx
Jul 29 18:27:01 stbkp01 systemd[1]: Child 413 belongs to nginx.service
Jul 29 18:27:01 stbkp01 systemd[1]: nginx.service: control process exited, code=exited status=0
Jul 29 18:27:01 stbkp01 systemd[1]: nginx.service got final SIGCHLD for state start
Jul 29 18:27:01 stbkp01 systemd[1]: Main PID loaded: 414
Jul 29 18:27:01 stbkp01 systemd[1]: nginx.service changed start -> running
Jul 29 18:27:01 stbkp01 systemd[1]: Job nginx.service/start finished, result=done
Jul 29 18:27:01 stbkp01 systemd[1]: Started The nginx HTTP and reverse proxy server.
Hint: Some lines were ellipsized, use -l to show in full.
[clint@stbkp01 conf]$
[clint@stbkp01 conf]$ exit
logout
Connection to 172.16.238.16 closed.
thor@jump_host /$
thor@jump_host /$ ls -l /home/
total 12
drwx------ 2 ansible ansible 4096 Oct 15 2019 ansible
-rw-r--r-- 1 root root 35 Jul 29 18:20 index.html
drwx------ 1 thor thor 4096 Jul 29 18:21 thor
thor@jump_host /$
thor@jump_host /$ sudo scp -r /home/index.html clint@172.16.238.16:/tmp
We trust you have received the usual lecture from the local System
Administrator. It usually boils down to these three things:
#1) Respect the privacy of others.
#2) Think before you type.
#3) With great power comes great responsibility.
[sudo] password for thor:
The authenticity of host '172.16.238.16 (172.16.238.16)' cant be established.
ECDSA key fingerprint is SHA256:4NrUFfYJy95oM4CueeNl9mJhrGHeNxNMTaOAX+LIuUY.
ECDSA key fingerprint is MD5:89:74:db:35:73:8e:4d:00:6b:48:c0:03:ba:c4:c7:9e.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '172.16.238.16' (ECDSA) to the list of known hosts.
clint@172.16.238.16's password:
Permission denied, please try again.
clint@172.16.238.16's password:
index.html 100% 35 36.3KB/s 00:00
thor@jump_host /$
thor@jump_host /$ sshpass -p '******' ssh -o StrictHostKeyChecking=no clint@172.16.238.16
Last login: Wed Jul 29 18:21:43 2020 from jump_host.linuxnginxconfig_app_net
[clint@stbkp01 ~]$
[clint@stbkp01 ~]$ ls -l /tmp/
total 24
-rw-r--r-- 1 clint clint 35 Jul 29 18:27 index.html
-rwx------ 28 root root 836 Aug 1 2019 ks-script-rnBCJB
drwx------ 3 root root 4096 Jul 29 18:23 systemd-private-39dd61ce342847a9bd998e2bf666e267-httpd.service-XDmu4T
drwx------ 3 root root 4096 Jul 29 18:27 systemd-private-39dd61ce342847a9bd998e2bf666e267-nginx.service-z3FzcZ
-rwxr-xr-x 1 root root 179 Jul 29 18:20 test2-telnet.sh
-rwxr-xr-x 1 root root 179 Jul 29 18:20 test-telnet.sh
-rw------- 28 root root 0 Aug 1 2019 yum.log
[clint@stbkp01 ~]$
[clint@stbkp01 ~]$ sudo mv /tmp/index.html /var/www/html/
[sudo] password for clint:
[clint@stbkp01 ~]$
[clint@stbkp01 ~]$ ls -l /var/www/html/
total 4
-rw-r--r-- 1 clint clint 35 Jul 29 18:27 index.html
[clint@stbkp01 ~]$
[clint@stbkp01 ~]$ curl http://172.16.238.16:8091
Welcome to xFusionCorp Industries![clint@stbkp01 ~]$
[clint@stbkp01 ~]$
[clint@stbkp01 ~]$ curl http://172.16.238.16:8080
Welcome to xFusionCorp Industries![clint@stbkp01 ~]$
[clint@stbkp01 ~]$
[clint@stbkp01 ~]$ nginx -t
nginx: [alert] could not open error log file: open() "/var/log/nginx/error.log" failed (13: Permission denied)
2020/07/29 18:29:21 [warn] 457#0: the "user" directive makes sense only if the master process runs with super-user privileges, ignored in /etc/nginx/nginx.conf:5
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
2020/07/29 18:29:21 [emerg] 457#0: open() "/run/nginx.pid" failed (13: Permission denied)
nginx: configuration file /etc/nginx/nginx.conf test failed
[clint@stbkp01 ~]$
[clint@stbkp01 ~]$ sudo su -
[root@stbkp01 ~]#
[root@stbkp01 ~]# nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
[root@stbkp01 ~]#
[root@stbkp01 ~]# curl http://172.16.238.16:8091
Welcome to xFusionCorp Industries![root@stbkp01 ~]#
[root@stbkp01 ~]#
[root@stbkp01 ~]# curl http://172.16.238.16:8080
Welcome to xFusionCorp Industries![root@stbkp01 ~]#
[root@stbkp01 ~]#
[root@stbkp01 ~]# Connection to host01 closed by remote host.
Connection to host01 closed.
The environment has expired.
Please refresh to get a new environment.
-----------------------------------------------------------------------------------------------------------------
############################################################################################################
####### ### ### ####### ####### ######### ####### ####### ### ###
#### #### ### ### #### #### #### #### ######### #### #### #### #### ### ###
### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ### ### ### ### ### ### ### ### ###
####### ### ### ### ### ######## ####### ####### ### ###
####### ### ### ### ### ######## ####### ####### ### ###
### ### ### ### ### ### ### ### ### ###
### ### ### ### ### ### ### ### ### ### ### ### ###
######### ######### #### #### #### #### ######### ######### ######### ### ###
####### ####### ####### ####### ######### ####### ####### ### ###
############################################################################################################
| true |
fa7c3e0a12d0ddc1c7f8e7cb0dc4fa0b87d552dd | Shell | petronny/aur3-mirror | /amule-adunanza/PKGBUILD | UTF-8 | 1,421 | 2.671875 | 3 | [] | no_license | # Mantainer: Federico 'baghera' Chiacchiaretta <federico.chia at gmail.com>
pkgname=amule-adunanza
pkgver=2010.1
_amulever=2.2.6
pkgrel=1
pkgdesc="An aMule version patched for use with Adunanza Network (Fastweb - Italy)"
url="http://amule-adunanza.sourceforge.net/"
license=('GPL')
arch=('i686' 'x86_64')
depends=('wxgtk' 'gd' 'geoip' 'libupnp' 'crypto++')
conflicts=('amule' 'amule-adnza-svn' 'amule-adnza')
provides=('amule=${_amulever}')
source=(http://downloads.sourceforge.net/amule-adunanza/aMule-AdunanzA/Stable/aMule-AdunanzA-${pkgver}-${_amulever}.tar.bz2
svn_fixes.patch
wxfont.patch
gcc-4.7.patch)
md5sums=('9156f5c8f7f70269e3c328c05146a3e7'
'9833f5eacfa485e6bf9a4f624b1be231'
'92556fd14c4b4180791e942140ec1d5a'
'f2f923bfec2f8805251ea3561a5d539a')
build() {
cd ${srcdir}/aMule-AdunanzA-${pkgver}-${_amulever}
# Apply patch from revision 277 to 282
patch -Np1 -i ${srcdir}/svn_fixes.patch || return 1
# Apply patch for new wxgtk
patch -Np0 -i ${srcdir}/wxfont.patch || return 1
# Patch for compilation against gcc 4.7
patch -Np0 -i ${srcdir}/gcc-4.7.patch
./configure --prefix=/usr \
--enable-upnp \
--enable-geoip \
--enable-cas \
--enable-wxcas \
--enable-amule-daemon \
--enable-amulecmd \
--enable-webserver \
--enable-amule-gui \
--enable-alc \
--enable-alcc \
--disable-debug \
--enable-optimize
make || return 1
make DESTDIR=${pkgdir} install
}
| true |
06d90ed3fc02f7bd820a9f3deeb8e4682b35ee0d | Shell | solalsabbah/admsys_init | /scripts/03 | UTF-8 | 87 | 2.8125 | 3 | [] | no_license | #!/bin/bash
if [ $# -ne 1 ]
then
echo "Usage: ./03 <directory>"
exit 0
fi
ls -S $1
| true |
929da8b2dd146a50e0d0390305aec80e8aac4239 | Shell | Shreekkanth/RedHatToolsInstaller | /Ansible_Tower/Playbook_Examples/MONITORING/role_nagios_client-master/files/plugins/check_open_files | UTF-8 | 568 | 3.953125 | 4 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/bash
PLUGINDIR=$(dirname $0)
. $PLUGINDIR/utils.sh
path_SUDO=/bin/sudo
path_LSOF=/sbin/lsof
path_WC=/bin/wc
if [ -z "$1" ]; then
echo "Usage: $0 username"
echo "Username: Username to check for open files"
exit $STATE_UNKNOWN
else
USER=$1
fi
# Calculate open files for user
LSOF=$(path_SUDO $path_LSOF -u $USER | $path_WC -l)
echo "$1 has $LSOF files open"
# Determine if this is good or bad
if [[ $LSOF -lt 3000 ]]; then
exit $STATE_OK
elif [[ ( $LSOF -fe 3000 ) && ( $LSOF -lt 8000 ) ]]; then
exit $STATE_WARNING
else
exit $STATE_CRITICAL
fi | true |
cf57558cce0a05f79e56bcd223f7a26b50d8eeab | Shell | Danas1/avm-docking-station-entrega | /bin/device_added.sh | UTF-8 | 2,328 | 3.8125 | 4 | [] | no_license | #!/bin/bash
AVMDIR="/mnt/AVM" #Directorio usado para montar AVM
BACKUPDIR="/home/pi/backups/" #Directorio usado para guardar los respaldos.
LOGDIR="/home/pi/dockingstation.log"
PIPECOMM="/home/pi/pipes/pipecomm"
PIPEUP="/home/pi/pipes/pipeup"
#SETUP PREVIO
#Checkear si existe el directorio donde el AVM sera montado.
if [ ! -d $AVMDIR ]; then
#Crea el directorio si no existe previamente
mkdir $AVMDIR
fi
#Checkear si existe el directorio donde se guardara la informacion.
if [ ! -d $BACKUPDIR ]; then
mkdir $BACKUPDIR
fi
sleep 5
#########
#Aca empieza la funcionalidad
#######
mount -v /dev/avm_link $AVMDIR
#Verificar que el dispositivo se monto correctamente.
if [ $? -eq 0 ]; then
#Cuando el dispositivo fue montado correctamente.
echo "$(date +"%Y/%m/%d %T") AVM conectado" >> $LOGDIR
#Enviar mensaje al usuario que los archivos se comenzaran a subir si se encuentra conectado.
if [ -p $PIPECOMM ]; then
echo 'start_backup' > $PIPECOMM
fi
else
echo "AVM no fue conectado de forma correcta"
#exit 1
fi
#agregar --remove-source-files para eliminar archivos del pendrive.0
rsync -Pra --out-format="%t %n" --include=AVM* --exclude=* "$AVMDIR/" $BACKUPDIR \
| grep -B 1 -e "100%" \
| grep "AVM*" \
| sed -e "s+AVM+ Transferencia AVM->Docking Station AVM+" \
>> $LOGDIR
#Opciones RSYNC
#-P: Otorga informacion del progreso de los archivos (util para debugear.
#-r: Busca recursivamente en los directrios del directorio fuente (AVMDIR)
#-a: Modo archivo, mantiene los permisos del archivo transferido.
#-vv: Agrega informacion a la salida, util para debugear.
#--remove-source-files: Elimina los archivos de la fuente una vez que estos se transfirieron de forma exitosa.
#--include=AVM* --exclude=*: Solo incluye los archivos que su nombre comienze por AVM.
###
#grep
# -B 1 : Entrega una linea antes, esto para conocer que archivos se completo la transferencia.
# grep "AVM" entrega el nombre del archivo
#sed reemplaza el nombre del archivo
umount $AVMDIR
#Enviar mensaje al usuario que los archivos ya fueron transferidos si esta conectado.
systemctl is-active --quiet bluetooth-comm.service
if [ -p $PIPECOMM ]; then
echo "end_backup" > $PIPECOMM
fi
#Enviar mensaje al proceso encargado de subir archivos si se encuentra activo.
if [ -p $PIPEUP ]; then
echo 'upload_files' > $PIPEUP
fi
exit 0
| true |
dc5fb53420cbfc5532924fac289413bcaed961cd | Shell | lancetipton/kegerator | /scripts/generateSSHKey.sh | UTF-8 | 821 | 3.375 | 3 | [] | no_license | #!/bin/bash
# Setup the paths for creating the ssh key
cd ~/
KEG_HOME=$(pwd)
KEG_KEY=keg-ssh
KEG_KEY_PATH=$KEG_HOME/.kegConfig/$KEG_KEY
# Create the key
# ssh-keygen -b 2048 -t rsa -f /tmp/keg-ssh -q -N ""
ssh-keygen -b 2048 -t rsa -f /tmp/$KEG_KEY -q -N ""
# Move the key into the .kegConfig directory
# mv /tmp/keg-ssh /Users/lancetipton/.kegConfig/keg-ssh
mv /tmp/$KEG_KEY $KEG_KEY_PATH
mv /tmp/$KEG_KEY.pub $KEG_KEY_PATH.pub
# Update the keys permissions
# chmod 400 /Users/lancetipton/.kegConfig/keg-ssh
chmod 400 $KEG_KEY_PATH
# Add the public key to the authorized_keys file
# echo "$(cat /Users/lancetipton/.kegConfig/keg-ssh.pub)" >> /Users/lancetipton/.ssh/authorized_keys
echo "$(cat $KEG_KEY_PATH.pub)" >> $KEG_HOME/.ssh/authorized_keys
# If mac turn on remote login
sudo systemsetup -setremotelogin on | true |
a2bbb228e4e7daa6bfdf299ccd62b1cab5463aec | Shell | biofangyao/wanglab.code | /hisat2-lincRNA.script | UTF-8 | 3,871 | 2.65625 | 3 | [] | no_license | #!/bin/bash
#PBS -k o
#PBS -l nodes=1:ppn=8:dc,vmem=32gb,walltime=120:00:00
#PBS -M zhenyisong@gmail.com
#PBS -m abe
#PBS -N HISAT2
#PBS -j oe
#PBS -e /home/zhenyisong/data/cardiodata/SRP082390
#PBS -o /home/zhenyisong/data/cardiodata/SRP082390
##
## qsub /home/zhenyisong/data/wanglilab/wangcode/hisat2-lincRNA.script
##
##---
## discarded!!!
## #python -m HTSeq.scripts.count -f bam -r name -s no $base.sorted.bam $gtf > $base.txt
##---
#---
# unique mapping read
# see the hisat2 manual:
# https://ccb.jhu.edu/software/hisat2/manual.shtml
# SAM output
# see the discussion
# https://www.researchgate.net/post/How_can_I_get_uniquely_mapped_reads_from_Tophatv2012
#---
#---
# see PMID: 27560171
# Title:Transcript-level expression analysis of RNA-seq experiments with HISAT, StringTie and Ballgown.
#---
#---
# this raw data from Wang yibin'lab is strandness
# use this to infer strandness:
# infer_experiment.py -r mm10_RefSeq.bed -i hisat2/SRR4044044.sam
# mm10_RefSeq.bed data is from http://rseqc.sourceforge.net
# or the bed foramt can be obtained from GFF file
# This is PairEnd Data
# Fraction of reads explained by "1++,1--,2+-,2-+": 0.0096
# Fraction of reads explained by "1+-,1-+,2++,2--": 0.9558
# Fraction of reads explained by other combinations: 0.0346
# --rna-strandness RF
# see http://bowtie-bio.sourceforge.net/manual.shtml
# RF
# FR
#----
#---
# non-code RNA reference dataset is downloaded from NON-CODE database
# its bed format was transformed by using Galaxy program
# BED_to_GFF3 converter, otherwise it will fail.
# I found the GTF file in NONCODE database. all-versions
#
#---
source /etc/profile
##----------------------------------------------------------
## old index file was broken, I do not know why.
## I used the command below to re-build the genome
##----------------------------------------------------------
hisat2='/data/software/bin/hisat2-2.0.4'
stringtie='/home/zhenyisong/bin/stringtie'
preDE='/usr/bin/prepDE.py'
mm10_genome='/home/zhenyisong/data/genome/mm10_Bowtie1Index/mm10.fa'
mm10_index='/home/zhenyisong/data/genome/hisat2/mm10'
mergelist='mergelist.txt'
merge_gtf='stringtie_merged.gtf'
ucsc_mm10_GTF='/home/zhenyisong/data/genome/mm10_Bowtie1Index/genes.gtf'
NONCODE_GTF='/home/zhenyisong/data/genome/lncRNA/NONCODE2016_mouse_mm10_current_lncRNA.gtf'
## cd $mm10_index
## discard to build the index.
## cd /home/zhenyisong/data/genome/hisat2/new_mm10
## $hisat2/hisat2-build -f -p 8 $mm10_genome genome
## cat $ucsc_mm10_GTF $NONCODE_GTF > mm10_lnc.protein.all.gtf
GTF='/home/zhenyisong/data/genome/lncRNA/mm10_lnc.protein.all.gtf'
fastq='/home/zhenyisong/data/cardiodata/SRP082390'
## if unpiared the data, -U parameter will be used
##shopt -s nullglob
cd $fastq
files1=(*_1.fastq)
files2=(*_2.fastq)
len=${#files1[@]}
cd /home/zhenyisong/data/cardiodata/SRP082390/hisat2
#---
# this is dangerous!!!!
#---
#rm -rf *
# I discard the -G parameter
#
for (( i=0; i<${len}; i++ ));
do
forward=${files1[$i]}
backward=${files2[$i]}
base=${forward%_1.fastq}
#$hisat2/hisat2 -p 8 --dta --fr --rna-strandness RF -x $mm10_index/genome -1 $fastq/$forward -2 $fastq/$backward -S $base.sam
#samtools view -Sb $base.sam |samtools sort -@ 8 - $base
$stringtie -p 8 -G $GTF -o $base.gtf -l $base $base.bam
#$stringtie -p 8 -o $base.gtf -l $base $base.bam
done
ls S*.gtf > $mergelist
stringtie --merge -p 8 -G $GTF -o $merge_gtf $mergelist
#stringtie --merge -p 8 -o $merge_gtf $mergelist
exit 0
gffcompare -G -r $wholeRNAgtf $hisat2lncRNA
cuffcompare -i $hisat2lncRNA -r $wholeRNAgtf
for (( i=0; i<${len}; i++ ));
do
forward=${files1[$i]}
base=${forward%_1.fastq}
$stringtie -e -B -p 8 -G $merge_gtf -o ballgown/$base/$base.gtf $base.bam
done
$preDE -i ballgown
##sebnif -g Mouse -r genes.gtf stringtie_merged.gtf | true |
c8cb2b0e3d6eb8005977a87fc96f6fc4fa43fd52 | Shell | talhajilal/aws | /kernel_verificatio_script.sh | UTF-8 | 416 | 3.640625 | 4 | [] | no_license | echo " Please enter kernel version for comparison i.e"
echo "2.6.32-573.el6.x86_64"
read expected
received=$(uname -r)
exp_ver=${expected//\.el6.*/}
re_ver=${received//\.el6.*/}
echo $exp_ver
echo $ew_ver
if [ ${#re_ver} -lt ${#received} -a "$re_ver" '>' "$ex_ver" ]
then
echo "Installed Version is higher than Expected."
echo "Passed."
else
echo "== FAILED == Received output is not as expected."
fi
| true |
73949acbfe2adac1b3eeebef218f93bec79d4d2b | Shell | luancheng12/checkinpanel | /api_json52toml.sh | UTF-8 | 578 | 2.75 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# shellcheck disable=SC2188
<<'COMMENT'
cron: 45 0-23/1 * * *
new Env('JSON5toTOML 工具');
COMMENT
json52toml() {
if [[ -f /ql/config/config.sh ]]; then
sed -i '/^RepoFileExtensions/c RepoFileExtensions="js pl py sh ts"' /ql/config/config.sh
ql repo https://github.com/Oreomeow/checkinpanel.git "api_|ck_|ins_" "^checkin" "^notify|^utils" "master"
cp -f /ql/repo/Oreomeow_checkinpanel_master/check.sample.toml /ql/config/check.template.toml
fi
find . -type f -name '*utils_json52toml.pl' -exec perl {} \;
}
json52toml
| true |
0843d43b671f4450eef497cb151c5af098fbda37 | Shell | alexcstark/stark-dotfiles | /.zshrc | UTF-8 | 2,984 | 3.078125 | 3 | [] | no_license | # If you come from bash you might have to change your $PATH.
export PATH=$HOME/bin:/usr/local/bin:$PATH
export PATH=$JAVA_HOME/bin:$PATH
# Path to your oh-my-zsh installation.
export ZSH=$USER/.oh-my-zsh
# Set name of the theme to load --- if set to "random", it will
# load a random theme each time oh-my-zsh is loaded, in which case,
# to know which specific one was loaded, run: echo $RANDOM_THEME
# See https://github.com/robbyrussell/oh-my-zsh/wiki/Themes
ZSH_THEME="bullet-in"
# Set list of themes to pick from when loading at random
# Setting this variable when ZSH_THEME=random will cause zsh to load
# a theme from this variable instead of looking in ~/.oh-my-zsh/themes/
# If set to an empty array, this variable will have no effect.
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion.
# Case-sensitive completion must be off. _ and - will be interchangeable.
HYPHEN_INSENSITIVE="true"
# Uncomment the following line to display red dots whilst waiting for completion.
COMPLETION_WAITING_DOTS="true"
# Which plugins would you like to load?
# Standard plugins can be found in ~/.oh-my-zsh/plugins/*
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(git)
source $ZSH/oh-my-zsh.sh
# User configuration
# use vim as the visual editor
export VISUAL=vim
export EDITOR="$VISUAL"
# zmv allows us to do some cool rename things
autoload -U zmv
# syntax highlighter
source /usr/local/share/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh
unsetopt correct_all
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
# make enter send return and not ^M TODO - figure out where this comes from
stty sane;
stty icrnl;
# Preserve Insert Cursor shape in nvim using iterm
export NVIM_TUI_ENABLE_CURSOR_SHAPE=1
function cd {
builtin cd "$@";
ls -a;
}
function gplom {
BRANCH=$(git rev-parse --abbrev-ref HEAD)
if [[ "$BRANCH" == "master" ]]; then
git pull origin master --rebase;
return
fi
GS_OUTPUT=$(git status --porcelain)
if [[ -n "$GS_OUTPUT" ]]; then
echo "stashing detected changes"
git stash;
fi
git checkout master;
git pull origin master --rebase --autostash;
git checkout -;
git rebase master;
if [[ -n "$GS_OUTPUT" ]]; then
git stash pop;
fi
}
function gc {
git commit $1 $2;
}
function gs {
git status;
}
function prof {
vim ~/.zshrc;
}
function reprof {
source ~/.zshrc;
}
BULLETTRAIN_PROMPT_ORDER=(
time
context
dir
git
)
alias awk1="awk '{print \$1}'"
alias awk2="awk '{print \$2}'"
alias awk3="awk '{print \$3}'"
alias awklast="rev | awk1 | rev"
alias ..="cd .." # if you’re not using “.” for sourcing bash
alias c=clear
function ppjson {
echo $@ | python -m json.tool;
} | true |
cac11c43b1e884c19da79e8767939db05e734657 | Shell | FSMaxB/lfs-me-repos | /blfs-7.5/tcl-8.6.1 | UTF-8 | 1,367 | 3.265625 | 3 | [] | no_license | #!/bin/bash
pkgname=tcl
pkgver=8.6.1
pkgver_postfix=
sources=( "http://downloads.sourceforge.net/${pkgname}/${pkgname}${pkgver}-src.tar.gz"
"http://downloads.sourceforge.net/${pkgname}/${pkgname}${pkgver}-html.tar.gz")
sha1sums=( '5c83d44152cc0496cc0847a2495f659502a30e40'
'92d1315504cb8f03187fc271f93e1ca5264912d9' )
lfs_me_prepare() {
tar -xf "${sources_dir}/${pkgname}${pkgver}-src.tar.gz" -C "$build_dir"
cd "${build_dir}/${pkgname}${pkgver}"
tar -xf "${sources_dir}/${pkgname}${pkgver}-html.tar.gz" --strip-components=1
cd unix
./configure --prefix=/usr \
--without-tzdata \
--mandir=/usr/share/man \
$([ $(uname -m) = x86_64 ] && echo --enable-64bit)
}
lfs_me_check() {
make test 2>&1 | tee "${current_dir}/${pkgname}-check-log"
}
lfs_me_install() {
sed -e "s@^\(TCL_SRC_DIR='\).*@\1/usr/include'@" \
-e "/TCL_B/s@='\(-L\)\?.*unix@='\1/usr/lib@" \
-i tclConfig.sh
make DESTDIR="$fakeroot_dir" install
make DESTDIR="$fakeroot_dir" install-private-headers
ln -v -sf tclsh8.6 "${fakeroot_dir}/usr/bin/tclsh"
chmod -v 755 "${fakeroot_dir}/usr/lib/libtcl8.6.so"
mkdir -v -p "${fakeroot_dir}/usr/share/doc/${pkgname}-${pkgver}"
cp -v -r ../html/* "${fakeroot_dir}/usr/share/doc/${pkgname}-${pkgver}"
}
| true |
c5f8fd7d1149bbf7321ec0b656e9d7a83cda5b18 | Shell | junho-falinux/docker-bsp | /install.sh | UTF-8 | 448 | 3.390625 | 3 | [] | no_license | #!/bin/sh
PACKAGE_NAME=bsp
echo "[*] Building docker image for ${PACKAGE_NAME}"
docker build -t ${PACKAGE_NAME} .
echo "[*] Copying runner script ~/bin"
mkdir -p ~/bin/
for RUNNER in runner/*
do
case $RUNNER in
runner/docker-runner*)
echo " = copy $RUNNER"
cp --remove-destination $RUNNER ~/bin/
;;
*)
echo " = copy $RUNNER"
cp -a --remove-destination $RUNNER ~/bin/
;;
esac
done
| true |
79d00075e957e522cc218018f051411c299a4a7e | Shell | natuty/linux | /example/bash脚本/7章 条件结构/7-15.sh | GB18030 | 340 | 3.03125 | 3 | [] | no_license | #ʾű7-15.sh caseṹʹ
#! /bin/bash
echo -n һ:
read num
case $num in
1) echo ֵΪ1;;
2) echo ֵΪ2;;
3) echo ֵΪ3;;
4) echo ֵΪ4;;
5) echo ֵΪ5;;
*) echo ֵ5;;
esac
echo caseṹн
| true |
e49bb8328194cfeebd71c33318c22b6677321e90 | Shell | leohuang4977/tvb-ukbb | /bb_FS_pipeline/bb_FS_create_zip_packages.sh | UTF-8 | 720 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/bash
#set -x
mkdir -p zips
mkdir -p logs_FS_zips
for elem in `cat subj.txt ` ; do
if [ -d $elem/FreeSurfer ] ; then
if [ ! -d $elem/FreeSurfer/unusable ] ; then
visit=`echo $elem | awk -F "" '{print $1}'`;
newName=`echo $elem | awk -F "" '{print $2$3$4$5$6$7$8}'`;
echo "cd $elem ; \
zip -r ../zips/${newName}_20263_${visit}_0.zip FreeSurfer -x `column_to_row $BB_BIN_DIR/bb_data/FS_files_to_exclude.txt` ; \
md5sum ../zips/${newName}_20263_${visit}_0.zip > ../zips/${newName}_20263_${visit}_0.md5 ; \
cd .. ";
fi
fi
done > jobs_zips.txt
#fsl_sub -l logs_FS_zips -t jobs_zips.txt
| true |
eeed56b3ba416434800dfa9a703818751d8bb5bc | Shell | noetl/hadoop-tools | /provisioning-ec2/install-spark.sh | UTF-8 | 1,551 | 3.640625 | 4 | [] | no_license | #!/bin/bash
set -e
if [ $# -ne 2 ]; then
echo "Usage: ./install-spark.sh <json_conf_file> <master_host_name>"
exit -1
fi
json_conf_file=$1
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
. $DIR/export-conf.sh $json_conf_file
echo "yarn.nodemanager.resource.memory-mb ${yarn_mem}"
echo "spark.executor.memory ${exec_mem}m"
echo "spark.executor.cores ${slave_cores}"
MASTER=$2
echo "MASTER: $MASTER"
echo "Downloading Spark...."
cd /usr/lib
sudo curl -O https://s3-us-west-2.amazonaws.com/noetl-provisioning-us-west-2/emr-4.7.1/spark.tar.gz
echo "Installing Spark...."
sudo tar xzf spark.tar.gz
sudo rm -rf spark.tar.gz
sudo mkdir -p /mnt/var/log/spark
sudo chown hadoop:hadoop /mnt/var/log/spark
sudo su - hadoop -c '/usr/lib/hadoop/bin/hadoop fs -mkdir -p /var/log/spark/apps'
sudo su - hadoop -c '/usr/lib/hadoop/bin/hadoop fs -chmod g+w /var/log/spark/apps'
echo "Configuring Spark...."
# set MASTER and other variables in template
sed -i -e "s/\${MASTER}/${MASTER}/g" $DIR/spark/conf/spark-defaults.conf
sed -i -e "s/\${exec_mem}/${exec_mem}/g" $DIR/spark/conf/spark-defaults.conf
sed -i -e "s/\${exec_cores}/${exec_cores}/g" $DIR/spark/conf/spark-defaults.conf
sudo cp -R $DIR/spark /etc/
echo "Configuring Spark done"
sudo su - hadoop -c 'cat >> ~/.bashrc << EOL
export SPARK_CONF_DIR=/etc/spark/conf
export PATH=\$PATH:/usr/lib/spark/bin
EOL'
echo "Starting spark history server...."
sudo su - hadoop -c '/usr/lib/spark/sbin/start-history-server.sh'
echo "Starting spark history server done"
| true |
f67b1b261541451a2591ddce5408e1e13443b9d0 | Shell | alundiak/lanbash | /osx_defaults.sh | UTF-8 | 2,052 | 2.625 | 3 | [] | no_license | #!/bin/bash
#
# Script to contain useful Mac OS commands. Sometimes, I may use.
# Planned to have 2 modes: "Change all as I want" and "restore all I changed to default"
#
# http://osxdaily.com/2010/02/12/how-to-quit-the-finder/
defaults write com.apple.finder QuitMenuItem -bool true && killall Finder
# defaults write com.apple.finder QuitMenuItem -bool false && killall Finder #default
# Finder. Show All
defaults write com.apple.finder AppleShowAllFiles true
#defaults write com.apple.finder AppleShowAllFiles false #default
# Enable the debug menu and restart the Mac App Store.
defaults write com.apple.appstore ShowDebugMenu -bool true
# defaults write com.apple.appstore ShowDebugMenu -bool false #default
# http://apple.stackexchange.com/questions/99318/closing-applications-after-documents-are-closed-os-x-10-8 - doesn't work
# If you want to disable showing an iCloud-centric open dialog when you for example open TextEdit or Preview,
# you can either disable syncing documents and data from the iCloud preference pane, or use this unexposed preference:
# defaults write -g NSDisableAutomaticTermination -bool true
# https://developer.apple.com/library/mac/documentation/Cocoa/Reference/NSApplicationDelegate_Protocol/index.html
# MORE flags !!!
# Use Plain Text Mode as Default
defaults write com.apple.TextEdit RichText -int 0 # or -bool false
# defaults write com.apple.TextEdit RichText -int 1 # default
# https://www.tekrevue.com/tip/show-path-finder-title-bar/
#true is by default - was for me on MacOS Mojave v10.14.1
defaults write com.apple.finder _FXShowPosixPathInTitle -bool false
# => helped me to show short name of folder - final
# https://apple.stackexchange.com/questions/40821/how-do-i-get-finder-windows-to-reopen-on-start-up
# After Mojave installed, com.apple.finder NSQuitAlwaysKeepsWindows set/changed to false.
# So I had to set true.
defaults write com.apple.finder NSQuitAlwaysKeepsWindows -bool true
# This was helpful:
# defaults read com.apple.finder NSQuitAlwaysKeepsWindows # shows value (1/0)
| true |
62eb9401775613638d4553ffee074c2d4f5e055c | Shell | click0/jinx | /modules/20-jinx-formatting.sh | UTF-8 | 1,542 | 2.71875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Define colors and formats for making everything pretty.
COLOR_RED=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[0;31m"`
# COLOR_RED_LIGHT=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[1;31m"`
COLOR_GREEN=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[0;32m"`
# COLOR_GREEN_LIGHT=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[1;32m"`
COLOR_ORANGE=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[0;33m"`
COLOR_YELLOW=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[1;33m"`
# COLOR_BLUE=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[0;34m"`
# COLOR_BLUE_LIGHT=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[1;34m"`
# COLOR_PURPLE=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[0;35m"`
COLOR_PURPLE_LIGHT=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[1;35m"`
# COLOR_CYAN=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[0;36m"`
# COLOR_CYAN_LIGHT=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[1;36m"`
# COLOR_GRAY=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[1;30m"`
# COLOR_GRAY_LIGHT=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[0;37m"`
# COLOR_BLACK=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[0;30m"`
# COLOR_WHITE=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[1;37m"`
FORMAT_BOLD=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[1m"`
FORMAT_UNDERLINE=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[4m"`
FORMAT_END=`[[ $JINX_IS_GRUMPY -eq 1 ]] && echo "" || echo "\033[0m"`
| true |
b8d24bc2778a9b8af11fafce23f5532870e0ea4e | Shell | xiongqi1/web | /db_apps/connection_mgr/score_connection.sh | UTF-8 | 1,058 | 3.46875 | 3 | [] | no_license | #!/bin/sh
WGET_CONNECTION_TIME=10
WGET_HOST="services.netcomm.com.au"
WGET_URL="http://services.netcomm.com.au/c/time.pl"
WGET_STR='2[0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9].*<br/>[0-9]\+'
nslookup_by_ping() {
host="$1"
if ping -c 1 "$host" 2>&1 >/dev/null | grep -q "bad address"; then
return 1
fi
return 0
}
wget_by_nc() {
host="$1"
url="$2"
logger -t "score" "echo -e \"GET $url HTTP/1.0\n\n\" | nc -w $WGET_CONNECTION_TIME \"$host\" 80 2>&1"
echo -e "GET $url HTTP/1.0\n\n" | nc -w $WGET_CONNECTION_TIME "$host" 80 2>&1
RESULT_WGET=$?
return $RESULT_WGET
}
# score
score=0
logger -t "score" "scoring..."
TEMPFILE="/tmp/score-$$.tmp"
wget_by_nc "$WGET_HOST" "$WGET_URL" > "$TEMPFILE"
if grep -q "$WGET_STR" "$TEMPFILE"; then
score=9
else
test -z "$RESULT_WGET" && RESULT_WGET=0
if [ $RESULT_WGET = 0 ]; then
score=2
else
RESULT_NSLOOKUP=$(nslookup_by_ping "$WGET_HOST"; echo $?)
if [ $RESULT_NSLOOKUP = 0 ]; then
score=3
fi
fi
fi
logger -t "score" "score : $score"
echo "score : $score"
rm -f "$TEMPFILE"
| true |
1d68cf2910a10be0b24f56c6f44c6b2ec92925c2 | Shell | scaleway/docker-machine-driver-scaleway | /scripts/build-dmds-packages.sh | UTF-8 | 1,172 | 3.609375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -eo pipefail
version=$1
if [[ -z "$version" ]]; then
echo "usage: $0 <version>"
exit 1
fi
package="github.com/scaleway/docker-machine-driver-scaleway"
archives=("docker-machine-driver-scaleway-linux-arm/linux_arm/tar.gz" "docker-machine-driver-scaleway-linux-amd64/linux_amd64/tar.gz" "docker-machine-driver-scaleway-linux-386/linux_386/tar.gz" "docker-machine-driver-scaleway-freebsd-arm/freebsd_arm/zip" "docker-machine-driver-scaleway-freebsd-amd64/freebsd_amd64/zip" "docker-machine-driver-scaleway-freebsd-386/freebsd_386/zip" "docker-machine-driver-scaleway-darwin-386/darwin_386/zip" "docker-machine-driver-scaleway-darwin-amd64/darwin_amd64/zip" "docker-machine-driver-scaleway-linux-amd64/amd64/deb" "docker-machine-driver-scaleway-linux-arm/armhf/deb" "docker-machine-driver-scaleway-linux-386/i386/deb")
mkdir -p "./release"
cd "./release"
for archive in "${archives[@]}"
do
archive_split=(${archive//\// })
bin=${archive_split[0]}
bin_split=(${bin//-/ })
../go-executable-build.sh "$package" "${bin_split[-2]}/${bin_split[-1]}" \
&& ../packages-build.sh "$package" "$version" "$archive" \
&& rm -f "$bin"
done
| true |
21a0b2198c095d0aa53f8fa1d2ebd0ad54efe9fe | Shell | cristianAmoyao/parches-de-todo-tipo | /Instalar_Flash/flash_instalar_V2.sh | UTF-8 | 851 | 2.8125 | 3 | [] | no_license | #!/bin/bash
#bajo licencia GPL
mkdir $HOME/FLASH
cd FLASH
wget https://fpdownload.adobe.com/pub/flashplayer/pdc/25.0.0.171/flash_player_ppapi_linux.i386.tar.gz
tar xvzf flash_player_ppapi_linux.i386.tar.gz
rm license.pdf;rm readme.txt;rm -r LGPL;rm flash_player_ppapi_linux.i386.tar.gz
#read -p "Primero libpepflashplayer.so : " flash
#read -p "Segundo manifest.json : " flash2
#echo "usted Movera los archivos $flash y $flash2 "
mkdir /usr/lib/adobe-flashplugin
#echo "cp $flash /usr/lib/adobe-flashplugin;cp $flash2 /usr/lib/adobe-flashplugin;rm move.sh" > move.sh
echo
echo "============bajo licencia GPL====================="
echo "se necesitara password para mover los archivos"
sudo mkdir /usr/lib/adobe-flashplugin;sudo cp -f libpepflashplayer.so /usr/lib/adobe-flashplugin;sudo cp -f manifest.json /usr/lib/adobe-flashplugin
rm -r $HOME/FLASH | true |
9a28de4a4651e36040304f9ff038522888ae6e51 | Shell | wiltonruntime/wilton | /resources/builder/builder | UTF-8 | 1,249 | 3.0625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Copyright 2020, alex at staticlibs.net
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
export BUILDER_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
export WILTON_DIR="$BUILDER_DIR"../../
if [ -z "$JAVA_HOME" ] ; then
echo "'JAVA_HOME' environment variable must be defined"
exit 1
fi
if [ -z "$1" ] ; then
echo "USAGE: builder <task> [args]"
exit 1
fi
"$JAVA_HOME"/bin/java \
-XX:MaxRAM=256M \
-XX:+UseSerialGC \
-XX:+TieredCompilation \
-XX:TieredStopAtLevel=1 \
-cp "$WILTON_DIR"tools/mvnrepo/org/mozilla/rhino/1.7.7.1/rhino-1.7.7.1.jar \
org.mozilla.javascript.tools.shell.Main \
-O -1 \
"$BUILDER_DIR"init/runTask.js \
"$*"
| true |
be41368e5cac1c64d202bc359d888279e0fcccbd | Shell | rmukhia/reservationapp | /reservation/server/gunicorn_start.sh | UTF-8 | 848 | 2.71875 | 3 | [] | no_license | NAME="reservation" #Name of the application (*)
DJANGODIR=/app/server/ # Django project directory (*)
SOCKFILE=/app/server/unix.sockfile # we will communicate using this unix socket (*)
DJANGO_SETTINGS_MODULE=reservation.settings # which settings file should Django use (*)
DJANGO_WSGI_MODULE=reservation.wsgi # WSGI module name (*)
export DJANGO_SETTINGS_MODULE=$DJANGO_SETTINGS_MODULE
export PYTHONPATH=$DJANGODIR:$PYTHONPATH
# Start your Django Unicorn
# Programs meant to be run under supervisor should not daemonize themselves (do not use --daemon)
exec gunicorn ${DJANGO_WSGI_MODULE}:application \
--name $NAME \
--bind=unix:$SOCKFILE \
--access-logfile /app/logs/gunicorn-access.log \
--error-logfile /app/logs/gunicorn-error.log
| true |
2652a1bbbf34380612b586d3ce44112377dc8778 | Shell | amerlyq/airy | /xkb/xkb-xorg.gen | UTF-8 | 710 | 3 | 3 | [
"MIT"
] | permissive | #!/bin/bash -e
source ~/.shell/profile
lang=${CURR_XKB_LANG:-amer(us):1+amer(ru):2+amer(ua):3}
over=${1:-${CURR_XKB_OVER:-vim}}
r.xkb-has-overlay -q ${over:?}
src="${TMPDIR:?}/00-keyboard.conf"
cat> "$src" <<EOT
# vim: ft=xf86conf
# SEE: man xkeyboard-config
Section "InputClass"
Identifier "system-keyboard"
MatchIsKeyboard "on"
# Driver "kbd"
Option "XkbOptions" ""
Option "XkbModel" "pc104"
Option "XkbTypes" "complete+amer"
Option "XkbCompat" "amer"
Option "XkbLayout" "amer,amer,amer"
Option "XkbVariant" "us,ru,ua"
Option "XkbSymbols" "amer(pc)+${lang}+amer(mod)+overlay($over)"
EndSection
EOT
trap "rm '$src'" EXIT
linkcp -ct /etc/X11/xorg.conf.d "$src"
| true |
ff542351496ce8f5f823a175627c01c77da8f3f3 | Shell | jdagilliland/eces690-jdag-jdg-final-project | /sam2bam.sh | UTF-8 | 736 | 2.625 | 3 | [] | no_license | #!/bin/bash
#$ -S /bin/bash
#$ -cwd
#$ -M jdg323@drexel.edu
#$ -P nsftuesPrj
#$ -pe openmpi_ib 1
#$ -l h_rt=24:00:00
#$ -l h_vmem=8G
##$ -pe shm 32-64 #for parallel processing
#$ -l mem_free=6G
#$ -q all.q@@amdhosts
# ---- Keep the following
. /etc/profile.d/modules.sh
module load shared
module load proteus
module load sge/univa
module load gcc/4.8.1
# ---- Keep the foregoing
PATH=/mnt/HA/groups/nsftuesGrp/.local/bin:$PATH
PATH=/mnt/HA/groups/nsftuesGrp/data/gilliland-guest/bin:$PATH
PROJPATH=/mnt/HA/groups/nsftuesGrp/data/gilliland-guest
# Set up variables to keep organized.
DATADIR="$PROJPATH/idba-mapped"
SAMFILES=$DATADIR/*.sam
for f in $SAMFILES
do :
echo $f
newf=$DATADIR/$(basename $f)
samtools view -bS $f > $newf.bam
done
| true |
41a6bd9f01bbe38ab038182e01f0d40f7952ffd8 | Shell | bextra/txome-pxome-pipeline | /bin/run_trinity_genomeguided_isner.sh | UTF-8 | 890 | 2.96875 | 3 | [] | no_license | #!/bin/bash
# Genome-guided Trinity
# the reads were first aligned to rheMac2
# Trinity will partition according to locus, followed by de novo transcriptome assmebly at each locus
#Read alignments must be supplied as coordinate-sorted bam file.
cd /Chanlab/Scratch/kristen/Macaque_transcriptome/BAMs
#SAMPLE_LIST="$(ls *.bam)"
SAMPLE_LIST="$(ls human*.bam)"
for FILE in $SAMPLE_LIST
do
echo "Sorting $FILE"
samtools sort -f $FILE sorted_$FILE
done
export JAVA_HOME=/Chanlab/Packages/Java/jdk1.7.0_80/bin/java
export PATH="/Chanlab/Packages/Java/jdk1.7.0_80/bin/:$PATH"
for FILE in $SAMPLE_LIST
do
time /Chanlab/Packages/Trinity/trinityrnaseq-2.0.6/Trinity --genome_guided_bam /Chanlab/Scratch/kristen/Macaque_transcriptome/BAMs/sorted_$FILE --genome_guided_max_intron 10000 -seqType fq --max_memory 70G --CPU 20 --output ./trinity_gg_$FILE --full_cleanup > stdout_run_$FILE
done
| true |
79d28b0352ddd0cb327afe7cbac5f90949a97497 | Shell | rhgrant10/dotfiles | /use | UTF-8 | 618 | 4.28125 | 4 | [] | no_license | #!/usr/bin/env bash
# Basic sanity check.
old_dir="${HOME}/dotfiles.old"
if [ -d "$old_dir" ]; then
printf "You already have a dotfile backup directory! Remove it first."
exit 1
fi
# Make the backup dir exist.
mkdir "$old_dir"
# Determine which files need to be put in place.
targets="`find . -maxdepth 1 -type f -name \".*\"`"
# For each target, back it up and symlink the reposed one.
for target in $targets; do
target_file=`basename $target`
if [ -f "${HOME}/$target_file" ]; then
mv "${HOME}/$target_file" "$old_dir"
fi
ln -s "${PWD}/$target_file" "${HOME}/$target_file"
done
| true |
cfb0f77cdc8c5c9fb587c3639492151c8d13a836 | Shell | thbzzz/CocooningFire | /cocooningfire | UTF-8 | 487 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env bash
FIREFOX=`which firefox`
XDOTOOL=`which xdotool`
if [ -z "$FIREFOX" ] || [ -z "$XDOTOOL" ] ; then
echo "you should install firefox and xdotool first"
exit
fi
echo "put your mouse around the center of the screen"
sleep 2
firefox https://www.youtube.com/watch?v=RDfjXj5EGqI &
sleep 10
xdotool search --sync --onlyvisible --class "Firefox" windowactivate click --repeat 2 1
sleep 1
xdotool search --sync --onlyvisible --class "Firefox" windowactivate click 1
reset
| true |
1bf73a1df50216b3647e44ee7876aa19b6924f99 | Shell | Dmiller2599/BFOR206 | /args_lab.sh | UTF-8 | 197 | 3.34375 | 3 | [] | no_license | #! /bin/bash
read -p "Enter Your First Number:" var1
read -p "Enter Your Second Number:" var2
sum=$(( $var1 + $var2))
product=$(( $var1 * $var2))
echo "Sum is: $sum"
echo "Product is: $product"
| true |
5dba463116a3fa67d07be54a5267603a99000287 | Shell | PudgyPoppins/.dotfiles | /install.sh | UTF-8 | 1,231 | 3.1875 | 3 | [] | no_license | #!/bin/bash
BASEDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# vim
ln -sf ${BASEDIR}/.vimrc ~/.vimrc
ln -sf ${BASEDIR}/.vim/ ~/.vim
# zsh
ln -sf ${BASEDIR}/.zshrc ~/.zshrc
ln -sf ${BASEDIR}/.oh-my-zsh/ ~/.oh-my-zsh
ln -sf ${BASEDIR}/.p10k.zsh/ ~/.p10k.zsh
#bash
ln -sf ${BASEDIR}/.bashrc ~/.bashrc
ln -sf ${BASEDIR}/.bash_profile ~/.bash_profile
ln -sf ${BASEDIR}/.profile ~/.profile
# git
ln -sf ${BASEDIR}/.gitconfig ~/.gitconfig
# misc
ln -sf ${BASEDIR}/.gnupg/ ~/.gnupg
ln -sf ${BASEDIR}/.config/ ~/.config
ln -sf ${BASEDIR}/.Xresources ~/.Xresources
ln -sf ${BASEDIR}/.ssh/ ~/.ssh
ln -sf ${BASEDIR}/.icons/ ~/.icons
# set up config
mkdir ~/.config
for folder in $(ls ${BASEDIR}/.config)
do
echo "moving $folder to .config"
ln -sf ${BASEDIR}/.config/$folder ~/.config/$folder
done
#set wallpapers
mkdir ~/Pictures
ln -sf ${BASEDIR}/Wallpapers ~/Pictures/Wallpapers
cp -f ${BASEDIR}/Wallpapers/Wallpaper_Day.png /usr/share/backgrounds/Wallpaper_Day.png
#set lightdm greeter
cd lightdm-tiny-greeter
make
sudo make install
cd ../
#set lightdm conf
yes | cp -rf ${BASEDIR}/etc/lightdm/lightdm.conf /etc/lightdm/lightdm.conf
#move scripts to /usr/local/bin
yes | cp -af ${BASEDIR}/scripts/. /usr/local/bin
| true |
6f08bf5e55af820394d9cdea4b4d34d180888843 | Shell | apple/swift-stress-tester | /SwiftEvolve/Utilities/evolve-swiftCore.sh | UTF-8 | 4,404 | 3.953125 | 4 | [
"Swift-exception",
"Apache-2.0"
] | permissive | #!/usr/bin/caffeinate -i /bin/bash
#
# Script to run resilience tests against the Swift standard library.
#
# This is meant to be a temporary driver while we're figuring out the right
# logic, but we all know that temporary things don't stay that way, so if you're
# reading this comment in 2020 feel free to laugh at me.
#
# cd up until we find the directory containing all projects.
until [ -e swift-stress-tester ]; do
if [[ "$(pwd)" == "/" ]]; then
echo "FAIL: Can't find root directory"
exit 1
fi
cd ..
done
# Make sure we don't have stdlib changes from a previous run.
if ! git -C swift diff --exit-code --quiet -- stdlib; then
git -C swift status stdlib
echo "FAIL: Unstaged changes in stdlib"
echo " (To clear them, run 'git -C swift checkout HEAD -- stdlib')"
exit 1
fi
# Set up a few globals.
ITERATIONS="${1-1}"
BUILD_SCRIPT_ARGS="--build-subdir=buildbot_evolve-swiftCore --release"
ROOT="$(pwd)"
BUILD=$ROOT/build/buildbot_evolve-swiftCore
BUILD_SWIFT=$BUILD/swift-macosx-x86_64
EVOLVE=$ROOT/swift-stress-tester/SwiftEvolve
set -e
#
# HELPERS
#
# Make sure we don't have stdlib changes from a previous run.
function resetStdlib() {
git -C swift checkout HEAD -- stdlib
}
# Run a command with pass/fail messages.
function run() {
descString="$1"
shift 1
echo "BEGIN: $descString"
echo '$' "$@"
if "$@" ; then
echo "PASS: $descString"
else
echo "FAIL: $descString"
exit 1
fi
}
# Run utils/build-script with the provided arguments.
function buildSwift() {
assertLibNotSymlink
run "Building Swift with $phase" swift/utils/build-script $BUILD_SCRIPT_ARGS "$@"
}
# Run lit tests with the provided arguments.
function testSwift() {
run "Testing Swift with $phase" llvm/utils/lit/lit.py -sv --param swift_site_config=$BUILD_SWIFT/test-macosx-x86_64/lit.site.cfg "$@" swift/test
}
# Modify swift/stdlib source code.
function evolveStdlib() {
# Temporarily re-link lib/swift to lib/swiftCurrentCurrent
linkLibs Current Current
run "Evolving Swift source code" env SWIFT_EXEC="$BUILD_SWIFT/bin/swiftc" $BUILD/swiftevolve-macosx-x86_64/release/swift-evolve --replace --rules=$EVOLVE/Utilities/swiftCore-exclude.json $ROOT/swift/stdlib/public/core/*.swift
rm $(libs)
}
# Generate a diff of swift/stdlib.
function diffStdlib() {
git -C swift diff --minimal -- stdlib >$1
}
# Returns the path to a built lib/swift folder. If provided, $1 should be the
# interfaces (Current/Evolved) and $2 should be the implementations
# (Current/Evolved)
function libs() {
echo "$BUILD_SWIFT/lib/swift$1$2"
}
# Exits if the indicated directory is a symbolic link; this would indicate the
# build folder is in a dirty, previously modified state.
function assertLibNotSymlink() {
if [ -L $(libs $1 $2) ]; then
echo "FAIL: Assertion failure: $(libs $1 $2) is a symlink!"
exit 2
fi
}
# Change lib/swift to link to the indicated lib/swift folder.
function linkLibs() {
assertLibNotSymlink $1 $2
rm -rf $(libs)
ln -s $(libs $1 $2) $(libs)
}
# Move lib/swift to the indicated folder.
function saveLibs() {
rm -rf $(libs $1 $2)
assertLibNotSymlink
mv $(libs) $(libs $1 $2)
}
# Combine the interfaces from $1 with the implementations from $2.
function mixLibs() {
rm -rf $(libs $1 $2)
assertLibNotSymlink $1 $1
run "Copying $1 Modules to $phase" cp -Rc $(libs $1 $1) $(libs $1 $2)
run "Copying $2 Binaries to $phase" rsync -ai --include '*/' --include '*.dylib' --exclude '*' $(libs $2 $2)/ $(libs $1 $2)
}
#
# MAIN FLOW
#
# Build and test a stock version of Swift.
phase="Current Modules, Current Binaries"
buildSwift --swiftsyntax --swiftevolve
testSwift
saveLibs 'Current' 'Current'
for iteration in $(seq $ITERATIONS); do
phase="Evolving ($iteration)"
# Modify the standard library.
resetStdlib
evolveStdlib
diffStdlib "stdlib-$iteration.diff"
# Build and test a version of Swift with the evolved libraries, then move them
# to lib/swiftEvolvedEvolved.
phase="Evolved Modules, Evolved Binaries ($iteration)"
buildSwift
testSwift --param swift_evolve
saveLibs "Evolved$iteration" "Evolved$iteration"
# Combine the Current interfaces with the Evolved implementations, then test
# the combination.
phase="Current Modules, Evolved Binaries ($iteration)"
mixLibs 'Current' "Evolved$iteration"
linkLibs 'Current' "Evolved$iteration"
testSwift --param swift_evolve
done
| true |
cb7744f944aa14cca3e475387a507e8e8abf3846 | Shell | fishercera/petulant-avenger | /data_grooming_pipeline/fastqc_func.sh | UTF-8 | 507 | 3.34375 | 3 | [] | no_license | #!/usr/bin/bash
#fastqc command function
# USAGE: fastqcB4 $lib
# function fastqcB4 {
# lib=$1
#
# fastqc -o output/fastqcBEFORE $lib.*.fastq.gz
# }
function fastqcB4 {
lib=$1
echo "Running fastqc on $lib.*.fastq.gz"
fastqc -o output/fastqcBEFORE input/$lib.*.fastq.gz
}
# USAGE: fastqc <lib-base-name>
# function fastqcAFTER {
#
# fastqc -o output/fastqcAFTER $lib.*.fastq.gz
# }
function fastqcAFTER {
echo "Running fastqc on $lib files"
fastqc -o output/fastqcAFTER output/$lib.*.fastq.gz
} | true |
0d4abbfcaa6a30ab64b9780ea7f990d022a1474d | Shell | Kyle-Helmick/Blockparty | /BlockpartyDiscordBot/install.sh | UTF-8 | 152 | 2.609375 | 3 | [] | no_license | #!/bin/bash
if ! command -v docker-compose &> /dev/null
then
echo "docker and docker-compose must be installed"
exit 1
fi
docker-compose up -d | true |
e3714f8f3401e66488d1560f8c6d86c7d4156720 | Shell | bigleuxenchef/ElasticOnDocker | /run_elastic_standalone.sh | UTF-8 | 723 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash -x
# rumi 2018
#launch docker container with all inside the container : data included, data will be deleted as soon as the container exists
docker run --rm --mount source=elastic,target=/usr/share/elasticsearch/data --env NETWORK_HOST=_eth0_ --env ES_JAVA_OPTS="-Xms2G -Xmx2G" --env NODE_NAME=node1 -p 9200:9200 -p 9300:9300 -it elasticsearch${1}:6.0.1
# run docker after having created a directory `/usr/local/var/elasticsearch/Docker` in order to store data outside the container
#docker run --rm -v /usr/local/var/elasticsearch/Docker:/usr/share/elasticsearch/data --env NETWORK_HOST=_eth0_ --env ES_JAVA_OPTS="-Xms2G -Xmx2G" --env NODE_NAME=node2 -p 9200:9200 -p 9300:9300 -it elasticsearch${1}:6.0.1
| true |
b3d30bb27e8921c438f97a769a991048497de03d | Shell | isliulin/ZKCD | /diff.sh | UTF-8 | 779 | 3.390625 | 3 | [] | no_license | #!/bin/bash
# @file for_diff.sh
# @author ymm
# @brief 比较不同目录下指定文件目录的差异
# @date 2014/2/28 16:18:10
# @History
# 1、2014/2/28 16:18:10 author ymm 初步完成
path_src=/home/user/Desktop/ArkSdk/MPlayer/MPlayer-1.3.0-20170321
path_desc=/home/user/Desktop/ArkSdk/MPlayer/MPlayer-1.3.0
#只比较备份文件中的.cpp .h 和makefile文件,如需比较其他文件,可以指定
#files=$(ls $path_src/*.cpp $path_src/*.h $path_src/makefile)
files=$(ls $path_src/*)
for file in $files
do
filename=$(basename $file )
file_desc=${path_desc}/${filename}
diff ${file} ${file_desc} 1>/dev/null 2>&1 && result=0 || result=1
if [ "$result" == 1 ];then
echo "$filename is diff"
fi
done
| true |
07031f9c1a2034440b04707cc17e228c85f8208a | Shell | sleepyfox97/ft_service | /srcs/nginx/srcs/liveness.sh | UTF-8 | 160 | 2.5625 | 3 | [] | no_license | pgrep nginx
liveness_nginx=$?
if [ $liveness_nginx -ne 0 ]; then
return 1
fi
pgrep telegraf
liveness_nginx=$?
if [ $liveness_nginx -ne 0 ]; then
return 1
fi
| true |
b8b037b5a832316a69d20490590c5adf788db82f | Shell | mk-pmb/minetest-util-pmb | /launcher/launch.sh | UTF-8 | 840 | 3.34375 | 3 | [
"ISC"
] | permissive | #!/bin/bash
# -*- coding: utf-8, tab-width: 2 -*-
function launcher_cli () {
export LANG{,UAGE}=en_US.UTF-8 # make error messages search engine-friendly
local SELFFILE="$(readlink -m -- "$BASH_SOURCE")"
local SELFPATH="$(dirname -- "$SELFFILE")"
local SELFNAME="$(basename -- "$SELFFILE" .sh)"
local INVOKED_AS="$(basename -- "$0" .sh)"
local UNBUFFERED='stdbuf -i0 -o0 -e0'
local -A CFG=(
[task]='launch_game'
)
local DBGLV="${DEBUGLEVEL:-0}"
local CLI_EXTRA_ARGS=()
local ITEM=
for ITEM in "$SELFPATH"/src/*.sh; do
source -- "$ITEM" --lib || return $?
done
source -- "$SELFPATH"/cfg_default.rc || return $?
default_homesyms || return $?
[ "$*" == --symlinks-only ] && return 0
parse_cli "$@" || return $?
"${CFG[task]}" "${CLI_EXTRA_ARGS[@]}" || return $?
}
launcher_cli "$@"; exit $?
| true |
141bcf1be010c614a42a042a6f5ebf2e95235052 | Shell | truhlikfredy/thinkpadScripts | /cpu_power_switch.sh-t400 | UTF-8 | 645 | 3.1875 | 3 | [] | no_license | #!/bin/sh
status=`cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor`
echo Switching to:
if [ "$status" = "ondemand" ]; then
echo "performance" | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
xmessage -timeout 1 -nearmouse -center 3-high-performance
fi
if [ "$status" = "powersave" ]; then
echo "ondemand" | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
xmessage -timeout 1 -nearmouse -center 2-medium-ondemand
fi
if [ "$status" = "performance" ]; then
echo "powersave" | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
xmessage -timeout 1 -nearmouse -center 1-low-powersave
fi
| true |
2b2551764402894c0f72bee704a409afad253773 | Shell | fmartingr/shelfzilla | /rpm/scripts/shelfzilla | UTF-8 | 2,005 | 4.125 | 4 | [] | no_license | #!/bin/sh
#
# Shefzilla Init script Adapted
## Nginx
## FactCGI
#
# chkconfig: - 85 15
#
# Source function library.
. /etc/rc.d/init.d/functions
function check ()
{
error=`echo -e "[\e[0;31mError\e[0m]"`
ok=`echo -e "[\e[0;32mOk\e[0m]"`
if [ $1 != 0 ];then
echo $2": "$error
else
echo $2": "$ok
fi
}
function validations() {
## Variables
INSTANCE=shelfzilla
BASE_PATH="/opt/shelfzilla"
PID_PATH=/var/run/shelfzilla
PID_FILE=${PID_PATH}/${INSTANCE}.pid
P_USER="shelfzilla"
LOG_PATH=/var/log/shelfzilla
LOG_FILE=shelfzilla.log
FCGI_PORT=8000
FCGI_IP=127.0.0.1
MOD=shelfzilla.settings.configfile
SZ_CONFIG_FILE=/opt/shelfzilla.toml
## Folders
[ -d "${PID_PATH}" ] || mkdir -p "${PID_PATH}"
[ -d "${LOG_PATH}" ] || mkdir -p "${LOG_PATH}"
chown -R "${P_USER}:${P_USER}" $LOG_PATH
}
function status(){
if [ "$1" == "start" ];then
[ -z "`pidof nginx`" ] && service nginx start
[ -z "`pidof python2.7`" ] && gunicorn --daemon -b ${FCGI_IP}:${FCGI_PORT} -e DJANGO_SETTINGS_MODULE=${MOD} -e APP_CONFIGFILE=${SZ_CONFIG_FILE} --pythonpath=${BASE_PATH} shelfzilla.wsgi:application
check $? "Gunicorn"
elif [ "$1" == "stop" ];then
[ -z "`pidof nginx`" ] || service nginx stop
[ -z "`pidof python2.7`" ] || pkill gunicorn
check $? "Gunicorn"
else
service nginx status
FCGI=`pidof python2.7`
if [ -z "${FCGI}" ];then
echo -n "Gunicorn Server is down"
echo ""
else
echo -n "Gunicorn Server is up (${FCGI})"
echo ""
fi
fi
}
function restart() {
status "stop"
status "start"
}
validations
case "$1" in
start)
status "start"
;;
stop)
status "stop"
;;
restart)
restart
;;
status)
status
;;
*)
echo $"Usage: $0 {start|stop|restart|status}"
exit 2
esac
| true |
89280984e3d1ecfb903288f14644af5178e58d29 | Shell | SeniorCtrlPlayer/Linux | /newdeploy.sh | UTF-8 | 3,800 | 3.265625 | 3 | [] | no_license | # color scheme
# ====================================================
# Copyright (C)2019 All rights reserved.
#
# Author : lwk
# Email : 510062390@qq.com
# File Name : newdeploy.sh
# Last Modified : 2019-11-30 15:02
# Describe :
#
# ====================================================
red='\e[91m'
green='\e[92m'
yellow='\e[93m'
magenta='\e[95m'
cyan='\e[96m'
none='\e[0m'
_red() { echo -e ${red}$*${none}; }
_green() { echo -e ${green}$*${none}; }
_yellow() { echo -e ${yellow}$*${none}; }
_magenta() { echo -e ${magenta}$*${none}; }
_cyan() { echo -e ${cyan}$*${none}; }
# example
# _red "helloworld"
# 获取系统发行版本
release=`lsb_release -a | grep "Distributor ID:"`
OS_name=${release:16}
echo "your system is $OS_name"
check() {
# 使用示例
#########################
# a=(git wget)
# check "${a[*]}"
# check wget
#########################
cmds=$1
for cmd in ${cmds[*]}; do
check=`which $cmd &> /dev/null`
if [ $? -eq 0 ];then
echo "yes"
else
yum install $cmd -y
fi
done
}
custom_install() {
# done
# wget, aria2, lrzsz, bash-completion
yum install -y wget aria2 lrzsz bash-completion tree htop
case $OS_name in
"CentOS")
grep "aliyun" /etc/yum.repos.d/CentOS-Base.repo &> /dev/null
if [ $? -ne 0 ];then
# backup repo
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
# update repo
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
# make cache
yum makecache
else
_green "repo is already"
fi
;;
*)
_red "only support CentOS"
;;
esac
}
git_install() {
# done
# install
yum install -y git
_green "git has been installed"
# DNS to speed git
sed -i '$a\140.82.114.4 github.com' /etc/hosts
# set email and name
git config --global user.email "510062390@qq.com"
git config --global user.name "SeniorCtrlPlayer"
}
zsh_install() {
echo "zsh"
cd zsh
./powerline.sh
# oh-my-zsh
}
nvim_install() {
# realitive
yum install -y cmake gcc gcc-c++ ncurses-devel ctags
# remove vim
yum remove vim vi vim-common vim-tiny
# nvim
yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
yum install -y neovim python3-neovim
# 插件管理器
curl -fLo ~/.config/nvim/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
cp nvim/init.vim ~/.config/nvim/init.vim
py_V=`python -V 2>&1|awk '{print $2}'|awk -F '.' '{print $1}'`
if [[ $py_V -ne "3" ]];then
_red "your py_version is not python3"
else
pip install pynvim -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com
fi
}
hadoop_install() {
echo "hadoop"
# java, hadoop, hbase
}
while :; do
echo -e "**********自动部署脚本**********\n"
echo "1. custom"
echo "2. zsh"
echo "3. git"
echo "4. nvim"
echo "5. hadoop"
echo -e "\n**********自动部署脚本**********"
read -p "$(echo -e "请选择(q退出) ${red}1-5$none:" )" choose
case $choose in
1)
custom_install
;;
2)
git_install
;;
3)
;;
4)
nvim_install
;;
5)
;;
q)
break;;
esac
done
| true |
47462b96fb82f3dc63418ead8d3b733786b36469 | Shell | millyz/POCache | /pocache-dfs-perf/bin/dfs-perf-collect | UTF-8 | 1,132 | 3.6875 | 4 | [
"BSD-3-Clause",
"CC-BY-2.5",
"EPL-1.0",
"Classpath-exception-2.0",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"GCC-exception-3.1",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"CC-PDDC",
"CC0-1.0",
"CC-BY-3.0",
"CDDL-1.0",
"MIT",
"LicenseRef-scancode-proprietary-license",
"AGPL-3.0-only",
"BSD-2-Clause-Views",
"MPL-2.0",
"MPL-2.0-no-copyleft-exception",
"BSD-2-Clause",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-unknown-license-reference",
"CDDL-1.1",
"LicenseRef-scancode-jdom",
"LicenseRef-scancode-unknown"
] | permissive | #!/usr/bin/env bash
function printUsage {
echo "Usage: dfs-perf-collect <TestCase>"
}
# if less than 1 args specified, show usage
if [ $# -le 0 ]; then
printUsage
exit 1
fi
bin=`cd "$( dirname "$0" )"; pwd`
DEFAULT_PERF_LIBEXEC_DIR="$bin"/../libexec
DFS_PERF_LIBEXEC_DIR=${DFS_PERF_LIBEXEC_DIR:-$DEFAULT_PERF_LIBEXEC_DIR}
. $DFS_PERF_LIBEXEC_DIR/dfs-perf-config.sh
NODELIST=$DFS_PERF_CONF_DIR/slaves
DFS_PERF_OUT_REPORTS_DIR=$DFS_PERF_OUT_DIR/contexts_$1
if [ -e $DFS_PERF_OUT_REPORTS_DIR ]; then
rm -rf $DFS_PERF_OUT_REPORTS_DIR
fi
mkdir -p $DFS_PERF_OUT_REPORTS_DIR
taskid=0
for slave in `sort "$NODELIST"|sed "s/#.*$//;/^$/d"`; do
echo -n "Collect from $slave... "
scp $slave:$DFS_PERF_OUT_DIR/context$1-$taskid@$slave $DFS_PERF_OUT_REPORTS_DIR/context$1-$taskid@$slave
sleep 0.02
taskid=`expr $taskid + 1`
done
wait
$JAVA -cp $DFS_PERF_CONF_DIR/:$DFS_PERF_JAR -Dpasalab.dfs.perf.home=$DFS_PERF_HOME -Dpasalab.dfs.perf.logger.type="PERF_TOOLS_LOGGER" -Dlog4j.configuration=file:$DFS_PERF_CONF_DIR/log4j.properties $DFS_PERF_JAVA_OPTS pasalab.dfs.perf.tools.DfsPerfCollector $1 $DFS_PERF_OUT_REPORTS_DIR
| true |
4cf96f8d136d57bcfc31b994f125bd150d713efe | Shell | pirkla/JamfScripts | /UpdateAppVPPToken.sh | UTF-8 | 3,591 | 3.78125 | 4 | [] | no_license | #!/bin/sh
# Migrate all apps assigned to a VPP account in Jamf Pro to another VPP account.
# If any apps do not have licenses on the new VPP account they will not be migrated, and will need to be handled manually.
# see https://github.com/pirkla/JamfScripts/blob/master/UpdateVPPByID.sh to update managed distribution and vpp tokens by id
################ USER DEFINED VARIABLES START #############################
# Enter credentials and the token id's. If hosting locally use the format https://your.url:8443
# Special characters in the user or password may cause issues with parsing the script
jssURL="https://yoururl.jamfcloud.com"
apiUser="admin"
# apiPass="password"
read -s -p "Password: " apiPass
newToken="999"
oldToken="999"
# specify the endpoint and xml node name for applications
# comment this out to switch to mac applications
endpoint="mobiledeviceapplications"
xmlEndpoint="mobile_device_application"
# comment this back in to switch to mac applications
# endpoint="macapplications"
# xmlEndpoint="mac_application"
################ USER DEFINED VARIABLES END #############################
# base64 encode user/password since curl can't handle special characters
auth=$( printf "$apiUser:$apiPass" | base64 )
# get all id's and names from the endpoints
appResp=$(curl -w "%{http_code}" -H "content-type: application/xml" -H "authorization: Basic $auth" -ks "$jssURL/JSSResource/$endpoint" -X GET )
status=${appResp: -3}
allApps=$( echo $appResp | sed 's/...$//')
if [[ "$status" != "200" ]]; then
echo "There was a problem: $status"
exit 1
fi
ids=$( echo "$allApps" | xpath "//id[not(ancestor::site)]" 2> /dev/null | sed s/'<id>'//g | sed s/'<\/id>'/' '/g)
IFS=', ' read -r -a allIDs <<< ${ids}
appNames=$( echo "$allApps" | xmllint --xpath '//name' - | sed s/'<name>'//g | sed s/'<\/name>'/','/g)
IFS=',' read -r -a allNames <<< "${appNames}"
# initialize variables to collect failed updates
failedName=""
failedID=""
# loop over each id
for index in ${!allIDs[@]};
do
echo "checking ${allIDs[index]}"
# get the VPP xml subset
tokenResp=$(curl -w "%{http_code}" -H "accept: text/xml" -H "content-type: text/xml" -H "authorization: Basic $auth" -ks "$jssURL/JSSResource/$endpoint/id/${allIDs[index]}/subset/vpp" -X GET )
tokenStatus=${tokenResp: -3}
if [[ "$tokenStatus" != "200" ]]; then
echo "there was an error retrieving the token: $tokenStatus"
fi
token=$( echo $tokenResp | sed 's/...$//' | xpath //vpp/vpp_admin_account_id/text\(\) 2> /dev/null)
# if the old token is being used then switch tokens
if [ "$token" == "$oldToken" ]; then
update=$(curl -H "accept: text/xml" -H "content-type: text/xml" -H "authorization: Basic $auth" -ks "$jssURL/JSSResource/$endpoint/id/${allIDs[index]}" -w '%{http_code}' -X PUT -d "<${xmlEndpoint}>
<vpp>
<vpp_admin_account_id>$newToken</vpp_admin_account_id>
</vpp>
</${xmlEndpoint}>" --output /dev/null)
echo "******* updating ${allIDs[index]}"
# report and gather failed updates
if [ "$update" != "201" ]; then
echo "failed $update"
failedName+="${allNames[index]}, "
failedID+="${allIDs[index]} "
fi
fi
done
# Report failed token updates.
if [ "$failedName" != "" ]; then
echo "The following apps did not migrate to the new token and will need to be managed manually"
echo "$failedName"
echo "The ID's of those apps are as follows"
echo "$failedID"
else
echo "All apps on the old token have been migrated to the new token"
fi
| true |
9bab7f621a863d6dd6c505ec5bdb8f835a6630ab | Shell | project-chip/connectedhomeip | /third_party/pigweed/update.sh | UTF-8 | 531 | 2.84375 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/usr/bin/env bash
# Update the pigweed submodule and the CHIP CIPD manifest.
# Update the submodule.
cd "$(dirname "${BASH_SOURCE[0]}")/repo"
git fetch origin master
git checkout origin/master
# Copy the CIPD manifest but change the Python line so we don't use CIPD
# Python on Linux.
cat pw_env_setup/py/pw_env_setup/cipd_setup/pigweed.json |
perl -pe 's[(infra/3pp/tools/cpython3/\$\{os)\}][$1=mac}];' > \
../../../scripts/pigweed.json
# Reformat the CIPD manifest.
cd ../../..
./scripts/helpers/restyle-diff.sh
| true |
027b1c2adc20fa9a2def3045a684f0c577720ed9 | Shell | cobalt-mirror/bluequartz | /BlueQuartz/5100WG/tags/OSS_1_4/ui/base.mod/glue/etc/rc.d/init.d/skeleton | UTF-8 | 422 | 3.671875 | 4 | [
"BSD-3-Clause-No-Nuclear-Warranty"
] | permissive | #!/bin/sh
#
# skeleton Example file to build /etc/init.d scripts.
#
# Version: @(#) /etc/init.d/skeleton 1.01 26-Oct-1993
#
# Author: Miquel van Smoorenburg, <miquels@drinkel.nl.mugnet.org>
#
# Source function library.
. /etc/rc.d/init.d/functions
# See how we were called.
case "$1" in
start)
touch /var/lock/subsys/skeleton
;;
stop)
rm -f /var/lock/subsys/skeleton
;;
*)
echo "Usage: skeleton {start|stop}"
exit 1
esac
exit 0
| true |
77826469ed5310f94b76c07f270ea48799b603b4 | Shell | tangyibo/some_script | /greenplum/vacuumtab.sh | UTF-8 | 1,067 | 3.53125 | 4 | [] | no_license | #!/bin/bash
############################################
# Function: Greenplum数据库全库vacuum膨胀表
# Author: tang
# Date : 2020-12-07
#
# Usage: sh vacuumtab.sh
#
###########################################
set -e
function log() {
TIME=$(date +"%Y-%m-%d %H:%M:%S")
echo "$TIME $1"
}
if [ "$(whoami)" != 'gpadmin' ]; then
log "[ERROR]: You have no permission to run $0 as gpadmin user."
exit 1
fi
DBNAMES=( $(psql -t -c 'SELECT sodddatname as database_name from gp_toolkit.gp_size_of_database') )
log "[INFO]:Find all database : ${DBNAMES[*]} "
for n in ${DBNAMES[@]}; do
log "[INFO]: Vacuum database for $n."
SQL="select quote_ident(bdinspname)|| '.' || quote_ident(bdirelname) from gp_toolkit.gp_bloat_diag order by bdirelpages desc, bdidiag;"
# echo $SQL
TABLES=$( psql -d $n -t -c "$SQL")
log "[INFO]:Find bloat tables : ${TABLES[*]} "
for t in $TABLES
do
vacuumdb -f -z -t "$t" "$n"
done
done
log "[INFO]: Success to finish vacuum all bloat tables!"
| true |
153a27c708d5482638e49d7de2fbc2868a531020 | Shell | jkwmoore/hpc_intro | /sge/test4 | UTF-8 | 486 | 2.921875 | 3 | [] | no_license | #!/bin/bash
#
#$ -j y
#$ -l h_rt=00:05:00
#
# This script will run the fish-stocks simulation
# for a number of different enforcement limits.
# Modify this script so that as you repeatedly
# run the simulation ( i.e. fish )for a set of
# 'in this case 6' different input enforcement limits.
# you will send the output to a different file
# for each run.
#
limits='1500 2000 3000 4500 6000 7000'
for limit in $limits ;
do
( echo "$limit" | ./fish )>> fishout
done
| true |
581f74330da23b46f8ec49641d5a08004c4a54ee | Shell | uio-bmi/two_step_graph_mapper | /benchmarking/create_roc_plots.sh | UTF-8 | 1,170 | 3.328125 | 3 | [] | no_license | id=builder
threshold=150
mappers=$1
timestamp=$(date +%s)
mkdir $timestamp
echo "Results will end up in directory $timestamp"
echo "correct mq score length unaligned known.nodes known.bp novel.nodes novel.bp is_on_rare_variant aligner" > $timestamp/results.tsv
for mapper in $(echo $mappers| tr "," "\n")
do
echo $mapper
cat $mapper.compare | awk -v name="${mapper}" 'BEGIN { OFS="\t"} { print $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, name}' >> $timestamp/results.tsv
done
( cat $timestamp/results.tsv | head -1; cat $timestamp/results.tsv | tail -n+2 | awk '{ if ($8 == 0) print }' ) | gzip >$timestamp/results-known.tsv
( cat $timestamp/results.tsv | head -1; cat $timestamp/results.tsv | tail -n+2 | awk '{ if ($8 > 0) print }' ) | gzip >$timestamp/results-novel.tsv
../plot-roc.R $timestamp/results.tsv $timestamp/roc-$id.png
echo rendering known ROC
../plot-roc.R $timestamp/results-known.tsv $timestamp/roc-known-$id.png
echo rendering novel ROC
../plot-roc.R $timestamp/results-novel.tsv $timestamp/roc-novel-$id.png
cd $timestamp
python3 ../../make_html_report.py $mappers > report.html
cd ..
echo "Report created at $timestamp/report.html"
| true |
62f24ac7f2117bf2838d1b611c2447298d16ddc4 | Shell | HarryTornado/HCC | /opt/scada/data/reports/bridge/sumTrack.sh | UTF-8 | 2,582 | 3.953125 | 4 | [] | no_license | #!/bin/bash
function usage {
echo "#================================================================================================="
echo "#"
echo "# The track test results would be sumarrised in the file track_test_summary_${date}.txt"
echo "#"
echo "# for each of the interlockings, the summary would include the following sections:"
echo "# 1. URI not at all define in the "
echo "# /opt/scada/var/${domain}/TrafficSimulator/A/trafficSim.cfg.xml"
echo "# file."
echo "#"
echo "# 2. tracks failed to pick or drop, which usually implies URI defined in the "
echo "# the file"
echo "# /opt/scada/var/${domain}/TrafficSimulator/A/trafficSim.cfg.xml"
echo "# but probably not properly defined."
echo "#"
echo "#================================================================================================="
}
ints=(BBH BEL BLS BLY CAM CBE CFD CHL COB DNG EPP ERM FKS FSA FSB FSD FSE GRN HDB JLI KPK LAL MCD MRN MUL NME NMU NPT SDM SHM SKN SSN SSS SST STN STS SUN SYR UFD WFY WST)
resultFile="results/tracks/summary_at_`date +%I`_on_`date +%B-%d`.txt"
usage > $resultFile
for it in ${ints[@]}
do
echo -e "\n=========================================================================================" >> $resultFile
echo -e "\nTrack test summary for interlocking ${it}...." >> $resultFile
echo -e "\n\ttracks for which URI is not defined at all:" >> $resultFile
# check for URI errors
cat results/tracks/${it}.txt | grep "URI" > tmp
sed -i 's/URI null for //g' tmp
sed -i 's/ //g' tmp
sed -i 's/\\t//g' tmp
cat tmp | uniq | awk '{print "\t\t", $1}' >> $resultFile
# check for tracks that do not pick or drop properly
echo -e "\n\ttracks which can not picked up or dropped properly:" >> $resultFile
cat results/tracks/${it}.txt | grep "Error:" > tmp
sed -i 's/Error: failed to pick up //g' tmp
sed -i 's/Error: failed to drop //g' tmp
sed -i 's/ //g' tmp
sed -i 's/\\t//g' tmp
cat tmp | uniq | awk '{print "\t\t", $1}' >> $resultFile
# check to see if every track in the interlocking failed
failedCount=`cat tmp | wc -l`
if [[ "$failedCount" != "0" ]]
then
okCount=`cat results/tracks/${it}.txt | grep "Spent" | wc -l`
if [[ "$okCount" == "0" ]]
then
echo -e "\n\tNotice: all the tracks in the interlocking ${it} are not working\n" >> $resultFile
fi
else
okCount=`cat results/tracks/${it}.txt | grep "Spent" | wc -l`
if [[ "$okCount" == "0" ]]
then
echo -e "\n\tNotice: no tracks found in interlocking ${it}.\n" >> $resultFile
fi
fi
done
echo "track test summary was saved to $resultFile .... Cheers!"
| true |
4bcbab57d28d59a5fde9922e5b65d6ca5f65d1df | Shell | guildai/issue-resolution | /35-compare-failing-on-imported-runs/recreate | UTF-8 | 1,432 | 3.796875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -eu
BASE=/tmp/guild-issue-35
SYS1=$BASE/sys1
SYS2=$BASE/sys2
if [ -e $BASE ]; then
echo "$BASE exists - delete it first and rerun this script"
fi
cat <<EOF
This script recreates Issue 35 by simulating two systems and an export
of runs from one of the systems to another. After runs are moved, the
original project file is deleted to break any references in the
exported runs to the project.
This issue can be recreated with 0.6.5 and has been resolved in 0.6.6.
EOF
init-dirs() {
mkdir -p $BASE
mkdir -p $SYS1/guild-home
mkdir -p $SYS1/project
mkdir -p $SYS2/guild-home
mkdir -p $SYS2/project
mkdir -p $BASE/export-import
}
init-project() {
cp guild.yml $SYS1/project/
}
generate-run() {
printf "==> Generating run on system 1\n\n"
guild -H $SYS1/guild-home -C $SYS1/project run op -y
}
export-run() {
printf "==> Exporting run on system 1\n\n"
guild -H $SYS1/guild-home export $BASE/export-import -y
}
import-run() {
printf "\n==> Importing run on system 2\n\n"
guild -H $SYS2/guild-home import $BASE/export-import -y
}
delete-project() {
printf "\n==> Deleting project on system 1 (breaks project ref)\n\n"
rm $SYS1/project/guild.yml
}
compare-runs() {
printf "==> Comparing runs on system 2\n\n"
guild -H $SYS2/guild-home compare --table
}
init-dirs
init-project
generate-run
export-run
import-run
delete-project
compare-runs
| true |
5dccd25935cc898aacefef68d492cbf6727f4620 | Shell | apnex/path | /bash/load.model.sh | UTF-8 | 615 | 3.34375 | 3 | [] | no_license | #!/bin/bash
NODES=$(cat model.json | jq -c '.nodes[]')
PATHS=$(cat model.json | jq -c '.paths[]')
function loadNodes() {
IFS=$'\n'
COUNT=0
for NODE in ${NODES}; do
local GRIDX=$(printf ${NODE} | jq -r '.[0]')
local GRIDY=$(printf ${NODE} | jq -r '.[1]')
./cmd.nodes.create.sh "${GRIDX}:${GRIDY}" "${COUNT}"
COUNT=$((COUNT+1))
done
}
function loadPaths() {
IFS=$'\n'
COUNT=0
for LINK in ${PATHS}; do
local LINKSRC=$(printf ${LINK} | jq -r '.[0]')
local LINKDST=$(printf ${LINK} | jq -r '.[1]')
./cmd.paths.create.sh "${LINKSRC}" "${LINKDST}"
COUNT=$((COUNT+1))
done
}
loadNodes
loadPaths
| true |
6c6d35f18a9099a52e4e1bb00db555b6b0d9c6f0 | Shell | jonhawkes/cfc-jenkins-build | /slave/s390x/cfc-auto-deploy-k8s | UTF-8 | 766 | 3.109375 | 3 | [] | no_license | #!/bin/bash
if [ $# != 1 ] ; then
echo "USAGE: cfc-auto-deploy-k8s \$FilePath/DirPath"
exit 255
fi
depolyfiles=$1
podnamespace=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
kubernetes_service_url="https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT_HTTPS"
token=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
kubectl-1.7 config set-credentials user --token=$token || exit 255
kubectl-1.7 config set-cluster cfc --server=$kubernetes_service_url --insecure-skip-tls-verify=true || exit 255
kubectl-1.7 config set-context cfc --user=user || exit 255
kubectl-1.7 config set-context cfc --cluster=cfc || exit 255
kubectl-1.7 config use-context cfc || exit 255
kubectl-1.7 apply -f $depolyfiles --namespace=$podnamespace || exit 255
| true |
cb9ae3024a0233285728d76d520c8329a824efe7 | Shell | glennholloway/ubuntu-unattended | /init.sh | UTF-8 | 1,460 | 3.828125 | 4 | [] | no_license | #!/bin/bash
set -e
spinner()
{
local pid=$1
local delay=0.175
local spinstr='|/-\'
local infotext=$2
tput civis;
while [ "$(ps a | awk '{print $1}' | grep $pid)" ]; do
local temp=${spinstr#?}
printf " [%c] %s" "$spinstr" "$infotext"
local spinstr=$temp${spinstr%"$temp"}
sleep $delay
printf "\b\b\b\b\b\b"
for i in $(seq 1 ${#infotext}); do
printf "\b"
done
done
printf " \b\b\b\b"
tput cnorm;
}
clear
# check for root privilege
if [ "$(id -u)" != "0" ]; then
echo " this script must be run as root" 1>&2
echo
exit 1
fi
# check for interactive shell
if ! grep -q "noninteractive" /proc/cmdline ; then
stty sane
fi
# print status message
echo " preparing your server; this may take a few minutes ..."
# update repos
(apt-get -y update > /dev/null 2>&1) & spinner $! "updating apt repository ..."
echo
(apt-get -y upgrade > /dev/null 2>&1) & spinner $! "upgrade ubuntu os ..."
echo
(apt-get -y dist-upgrade > /dev/null 2>&1) & spinner $! "dist-upgrade ubuntu os ..."
echo
#(apt-get -y install openssh-server zsh git curl vim npm > /dev/null 2>&1) & spinner $! "installing extra software ..."
#echo
(apt-get -y autoremove > /dev/null 2>&1) & spinner $! "removing old kernels and packages ..."
echo
(apt-get -y purge > /dev/null 2>&1) & spinner $! "purging removed packages ..."
echo
# remove myself to prevent any unintended changes at a later stage
rm $0
# finish
echo " DONE; rebooting ... "
# reboot
shutdown -r now
| true |
dab28c8b5377e765a621e1de78600b2136231bdb | Shell | ruarfff/Scripts | /FsUtils/FSUtilsTests/WORKING/report.sh | ISO-8859-13 | 2,770 | 4.34375 | 4 | [] | no_license | #!/bin/bash
# Autho: Ruair O Brien
# 08/02/2013
# Script to iterate through all dtl files in the DTL_FILE_DIR and create seperate reports for each config file in the CONFIGS_DIR
# The reports will be stored in the REPORTS_DIR in the format REPORTS_DIR/DTL_FILE_NAME/CONFIG_FILE_NAME
# User should be sudo
if [[ $UID != 0 ]]; then
echo "Please run this script with sudo:"
echo "sudo $0 $*"
exit 1
fi
source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"/definitions.sh
# Compile fsReport code and move the executable to the root test folder.
function CompileAndCopyFSReport {
if [ -f $FSREPORT_APP ]; then
rm $FSREPORT_APP;
fi
# Compile fsReportLinux
echo "Compiling fsReport...";
pushd $CODE_DIR/src/fsReportLinux;
make clean;
make;
popd;
echo "Compilation complete";
cp $CODE_DIR/src/fsReportLinux/fsReport $FSREPORT_APP;
chmod 777 $FSREPORT_APP;
if [ -f "$FSREPORT_APP" ];
then
echo "fsReport file copied to working dir";
else
echo "Error! fsReport file was not copied to working directory";
exit 1;
fi
}
# Iterate through all DTL files in the dtl_files folder and call GenerateReports to create reports based on the current DTL file.
function IterateDTLFiles {
echo "Iterating through dtl files";
# Look for DTL files placed in the DTL inputs folder
for f in $DTL_FILE_INPUT_DIR/*
do
GenerateReports ${f##*/}
done
echo "Finished iterating through dtl files";
}
# Iterate through all configurations in the config dir and call CreateAndStoreReport passing the DTL file and current Config file.
function GenerateReports {
echo "Starting report generation for $1";
for f in $REPORT_CONFIGS_DIR/*
do
CreateAndStoreReport ${f##*/} $1
done
echo "Finished generating reports";
}
# Create a report from a DTL file using a Config file and store it in the reports directory.
function CreateAndStoreReport {
if [ -d $REPORTS_DIR/$2/$1 ]; then
# Get rid of any old reports based on the same DTL file and Configuration
rm -rf "$REPORTS_DIR/$2/$1";
fi
# Make the folder to store the new report
mkdir -p "$REPORTS_DIR/$2/$1";
if [ -f $FSREPORT_APP ]
then
chmod 777 $FSREPORT_APP;
echo "Creating report on $2 using $1";
$FSREPORT_APP -dtl "$DTL_FILE_INPUT_DIR/$2" -cfg "$REPORT_CONFIGS_DIR/$1" -rdir $REPORTS_DIR/$2/$1;
else
echo "An error has occured. Please ensure that $FSREPORT_APP exists in the working directory.";
fi
}
# Allow this script to prep any required folders.
function CreateFolders {
if [ ! -d "$REPORTS_DIR" ]; then
mkdir -p "$REPORTS_DIR";
fi
if [ ! -d "$REPORT_CONFIGS_DIR" ]; then
mkdir -p "$REPORT_CONFIGS_DIR";
fi
if [ ! -d "$DTL_FILE_INPUT_DIR" ]; then
mkdir -p "$DTL_FILE_INPUT_DIR";
fi
}
CompileAndCopyFSReport
CreateFolders
IterateDTLFiles
chmod 777 -R $REPORTS_DIR
exit $?
| true |
b15400416c5b249a55c10ae2a663ae9ff2196cd3 | Shell | NaabZer/.dotfiles | /gui/polybar/get_playtype.sh | UTF-8 | 983 | 3.390625 | 3 | [] | no_license | #!/bin/bash
status=$(playerctl status 2> /dev/null)
youtube=$(playerctl -l | grep youtube)
spotify=$(playerctl -l | grep spotify)
chrome=$(playerctl -l | grep chromium)
# Use different statuses, to have a display order (eg. stopped spotify and playing yt)
if [ ! "$spotify" == "" ]; then
spotstatus=$(playerctl -p "$spotify" status 2> /dev/null)
fi
if [ ! "$youtube" == "" ]; then
ytstatus=$(playerctl -p "$youtube" status 2> /dev/null)
fi
if [ ! "$chrome" == "" ]; then
chstatus=$(playerctl -p "$chrome" status 2> /dev/null)
fi
# TODO: Fix several players
if [ ! "$spotify" == "" ] && [ "$spotstatus" = "Playing" ]; then
echo "sp"
elif [ ! "$youtube" == "" ] && [ "$ytstatus" = "Playing" ]; then
echo "yt"
elif [ ! "$chrome" == "" ] && [ "$chstatus" = "Playing" ]; then
echo "ch"
elif [ "$status" = "Playing" ]; then
echo "pl"
elif [ "$status" = "Paused" ]; then
echo "pa"
elif [ "$status" = "Stopped" ]; then
echo "st"
else
echo ""
fi
| true |
38e08aaa305f7c96a9c04502127009d088968792 | Shell | calculuswhiz/config-files | /config/.zshrc | UTF-8 | 5,021 | 2.578125 | 3 | [] | no_license | # Path to your oh-my-zsh installation.
export TERM="xterm-256color"
ZSH=/usr/share/oh-my-zsh/
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
#ZSH_THEME="robbyrussell"
#ZSH_THEME="arrow"
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
# COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# The optional three formats: "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(git archlinux)
source $ZSH/oh-my-zsh.sh
# User configuration
export PATH=/usr/bin/:$PATH:$HOME/bin:/usr/local/bin
export XILINX_PATH=/home/zach/Xilinx/14.7/ISE_DS
export EDITOR="emacs -nw"
export PYTHONPATH=$HOME/Documents/scripts/MacroManX/
export GOPATH=~/.golang
#export JAVA_HOME=/usr/java/
# Auto-completion helpers:
export FPATH=~/.zsh/completion:$FPATH
autoload -U compinit
compinit
# Prompt settings:
autoload -Uz promptinit
promptinit
prompt redhat
#PROMPT="%T
#[%{$fg_no_bold[cyan]%}%n%b%f@%m %{$fg_bold[cyan]%}%1~%b%f]%F{magenta}%(!.#.%%)%f "
#RPROMPT="%B%F{red}%(?..%? )%F%B"
PROMPT="%B%F{red}%(?..%? )%f%b%F{cyan}[%j running job(s)] %f%F{green}{history#%!} %f%F{red}%(3L.+ .)%f%B%F{blue}%D{%H:%M:%S} %f%F{blue}%D{%Y-%m-%d}%f%b
%F{red}└-%B%F{blue}%n%f%b@%m %B%40<..<%~%<< %b%# "
winemode()
{
if [ "$1" = "32" ]
then
export WINEPREFIX=~/.wine32
export WINEARCH=win32
elif [ -z "$1" ] || [ "$1" = "64" ]
then
export WINEPREFIX=~/.wine
export WINEARCH=win64
fi
}
moviemode()
{
while [ 1 ]
do
xscreensaver-command -deactivate
sleep 300s
done
}
# export MANPATH="/usr/local/man:$MANPATH"
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# Preferred editor for local and remote sessions
# if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
# else
# export EDITOR='mvim'
# fi
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
# ssh
# export SSH_KEY_PATH="~/.ssh/dsa_id"
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
#
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
alias ls="ls -FA --group-directories-first --color=auto"
alias emacs="emacs -nw"
alias zxconfig="emacs -nw ~/.zshrc; echo -n 'Sourcing... '; source ~/.zshrc; echo 'Done.'"
alias -s pdf="zathura"
alias adbon='sudo adb start-server'
alias adboff='adb kill-server'
alias terminal='xfce4-terminal'
#alias wlan0-illinoisnet='sudo netctl start IllinoisNet'
alias wyeFi='sudo netctl start'
alias pacman='pacman --color always'
alias DELETED!='rm'
alias clipEdit='xclip -o -selection clipboard'
alias xcf2jpg='batch_level.py 0 255'
alias remolevl='perl-rename "s/-levl//" *'
alias ethreset='sudo ip link set enp9s0 down; sudo netctl start ethernet-dhcp'
alias forgotpi='python -c "import math; print(math.pi)"'
alias yoaurt='yaourt'
alias cliptrim="xclip -o -selection clipboard | tr -d '\n' | xclip -selection clipboard"
alias getflashpid="ps -U zach | grep plug | grep -oP '(?<= )\d+ '"
#alias plz='sudo'
alias unescape="sed -e 's/%/\\\\x/g' | python -c \"import sys, codecs; print(codecs.decode(sys.stdin.readline(),'unicode_escape'))\""
alias zathcpy="xclip -o | xclip -selection clipboard"
# Anti-grep_options thing
alias grep="/usr/bin/grep --color=always"
unset GREP_OPTIONS
#disable shared history.
unsetopt share_history
#Miscellaneous config stuff, just in case.
if xset q &>/dev/null; then
xset r rate 225 30
fi
| true |
0b29b8beee272451cd9134162dde4a14b072d0f4 | Shell | Deedone/polybar | /common/travis/tests.sh | UTF-8 | 275 | 3.453125 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | #!/bin/bash
r=0
make all_unit_tests || exit $?
for test in tests/unit_test.*; do
[ -x "$test" ] || continue
if $test; then
printf "\033[1;32m%s\033[0m\n" "${test##*/} passed"
else
r=1
printf "\033[1;31m%s\033[0m\n" "${test##*/} failed"
fi
done
exit $r
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.