blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d66958286e6d8bae212d52773aa8f8a6216b23ed
|
Shell
|
lusssn/ecclesia
|
/scripts/generate-ssl-certs.sh
|
UTF-8
| 616
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -e server.js -a ! -e app.js ]
then
echo "Error: could not find main application server.js file"
echo "You should run the generate-ssl-certs.sh script from the main application root directory"
echo "i.e: bash scripts/generate-ssl-certs.sh"
exit -1
fi
echo "Generating self-signed certificates..."
mkdir -p ./sslcert
openssl genrsa -out ./sslcert/key.pem 1024
openssl req -new -key ./sslcert/key.pem -out ./sslcert/csr.pem
openssl x509 -req -days 9999 -in ./sslcert/csr.pem -signkey ./sslcert/key.pem -out ./sslcert/cert.pem
rm ./sslcert/csr.pem
chmod 600 ./sslcert/key.pem ./sslcert/cert.pem
| true
|
d85a8d3e7a8a05c05e0dbb18ae93b6a61ba22c6f
|
Shell
|
stensonb/ipget
|
/sharness/t0020-ephemeral-node.sh
|
UTF-8
| 962
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
test_description="test the ipget command by spawning a shell"
. ./lib/sharness/sharness.sh
test_expect_success "retrieve a known popular single file" "
ipget --node=spawn QmQ2r6iMNpky5f1m4cnm3Yqw8VSvjuKpTcK1X7dBR1LkJF/cat.gif &&
echo 'c5ea0d6cacf1e54635685803ec4edbe0d4fe8465' > expected &&
shasum cat.gif | cut -d ' ' -f 1 > actual &&
diff expected actual
"
test_expect_success "retrieve a known popular file with -o" "
ipget -o meow.gif --node=spawn QmQ2r6iMNpky5f1m4cnm3Yqw8VSvjuKpTcK1X7dBR1LkJF/cat.gif &&
echo 'c5ea0d6cacf1e54635685803ec4edbe0d4fe8465' > expected &&
shasum meow.gif | cut -d ' ' -f 1 > actual &&
diff expected actual
"
test_expect_success "retrieve a directory" "
ipget --node=spawn QmQ2r6iMNpky5f1m4cnm3Yqw8VSvjuKpTcK1X7dBR1LkJF &&
ls QmQ2r6iMNpky5f1m4cnm3Yqw8VSvjuKpTcK1X7dBR1LkJF > /dev/null &&
ls QmQ2r6iMNpky5f1m4cnm3Yqw8VSvjuKpTcK1X7dBR1LkJF/cat.gif > /dev/null
"
test_done
| true
|
f3a492beb7ac6cae5823ceae0a6c9fca9808d99e
|
Shell
|
venkatping/chilaka
|
/section16/array.sh
|
UTF-8
| 212
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
<<mycom1
echo -n "Enter array values: "
read -a my_array
echo "the given array values is: ${my_array[@]}"
mycom1
my_array=( hi hello "how are you" )
for i in "${my_array[@]}"
do
echo "${i}"
done
| true
|
f41f881aafafc54da50df05bb979f82f118800f8
|
Shell
|
barlingo-app/barlingo-be
|
/scripts/deploy.sh
|
UTF-8
| 2,267
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
NOW=$(date +"%Y-%m-%d")
if [ "$TRAVIS_BRANCH" == "CI" ]; then
echo "Deploying $TRAVIS_BRANCH to UAT"
mysql -u$CI_DB_USERNAME -p$CI_DB_PASSWORD -e "drop database ${CI_DB_NAME};"
mysql -u$CI_DB_USERNAME -p$CI_DB_PASSWORD -e "create database ${CI_DB_NAME};"
mv src/main/resources/import.sql src/main/resources/import.sql.bck
mvn clean install -Dspring.profiles.active=ci
mv src/main/resources/import.sql.bck src/main/resources/import.sql
bash scripts/generate-db-update.sh $UAT_DB_URL $UAT_DB_USERNAME $UAT_DB_PASSWORD $CI_DB_URL $CI_DB_USERNAME $CI_DB_PASSWORD uat uat
mysql -h $DB_HOST -u$DB_USER -D $UAT_DB_NAME -p$DB_PASSWORD < src/main/resources/database-updates/uat/db-update-uat.sql
rm src/main/resources/application.properties
rm src/main/resources/application-ci.properties
rm src/main/resources/application-prd.properties
mv src/main/resources/application-uat.properties src/main/resources/application.properties
mvn clean install -P deploy -Dmaven.test.skip=true
sshpass -p $UAT_DEPLOY_PASSWORD scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null target/barlingo-be.war $UAT_DEPLOY_USER@$UAT_DEPLOY_DOMAIN:~/barlingo-deploy-packages/barlingo-be-uat/barlingo-be.war
sshpass -p $UAT_DEPLOY_PASSWORD ssh -o StrictHostKeyChecking=no $UAT_DEPLOY_USER@$UAT_DEPLOY_DOMAIN "bash deploy-barlingo-be.sh UAT $TRAVIS_BRANCH 1> ~/barlingo-deploy-logs/barlingo-be-uat/deploy-$NOW.log 2>&1"
elif [ "$TRAVIS_TAG" != "" ]; then
echo "Deploying $TRAVIS_TAG to PRD"
rm src/main/resources/application.properties
rm src/main/resources/application-ci.properties
rm src/main/resources/application-uat.properties
mv src/main/resources/application-prd.properties src/main/resources/application.properties
mvn clean install -P deploy -Dmaven.test.skip=true
sshpass -p $PRD_DEPLOY_PASSWORD scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null target/barlingo-be.war $PRD_DEPLOY_USER@$PRD_DEPLOY_DOMAIN:~/barlingo-deploy-packages/barlingo-be-prd/barlingo-be.war
sshpass -p $PRD_DEPLOY_PASSWORD ssh -o StrictHostKeyChecking=no $PRD_DEPLOY_USER@$PRD_DEPLOY_DOMAIN "bash deploy-barlingo-be.sh PRD $TRAVIS_TAG 1> ~/barlingo-deploy-logs/barlingo-be-prd/deploy-v$TRAVIS_TAG-$NOW.log 2>&1"
fi
exit 0
| true
|
ff5e3bdf84870cb593c7f7ec8c1dad25fbba9551
|
Shell
|
nursix/sahana-setup
|
/prod/debian/apache-mysql-install.sh
|
UTF-8
| 10,173
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Script to install Sahana on a Debian Wheezy or Jessie box with Apache & MySQL
#
# License: MIT
#
# Execute like:
# bash apache-mysql-install.sh
#
# =============================================================================
# Configuration
# Stable 2.14.6
WEB2PY_COMMIT=cda35fd
# Which OS are we running?
read -d . DEBIAN < /etc/debian_version
if [ $DEBIAN == '8' ]; then
echo "Detected Debian 8"
DEBIAN_NAME='jessie'
# Apache 2.4
extension='.conf'
else
echo "Assuming Debian 7"
DEBIAN_NAME='wheezy'
# Apache 2.2
extension=''
fi
# =============================================================================
# Update system
echo "Updating System"
apt-get update
apt-get upgrade -y
apt-get clean
# =============================================================================
# Install Admin Tools
echo "Installing Admin Tools"
apt-get install -y unzip psmisc mlocate telnet lrzsz vim elinks-lite rcconf htop sudo
apt-get clean
# =============================================================================
# Install Git
echo "Installing Git"
apt-get install -y git-core
apt-get clean
# =============================================================================
# Email
echo "Installing Mail Server"
apt-get install -y exim4-config exim4-daemon-light
apt-get clean
# =============================================================================
# MySQL
echo "Installing MySQL"
apt-get -y install mysql-server python-mysqldb phpmyadmin mytop
# Tune for smaller RAM setups
echo "Configuring MySQL"
sed -i 's|query_cache_size = 16M|query_cache_size = 1M|' /etc/mysql/my.cnf
sed -i 's|key_buffer = 16M|key_buffer = 1M|' /etc/mysql/my.cnf
sed -i 's|max_allowed_packet = 16M|max_allowed_packet = 1M|' /etc/mysql/my.cnf
/etc/init.d/mysql restart
# =============================================================================
# Apache
echo "Installing Apache"
apt-get -y install libapache2-mod-wsgi
echo "Activating Apache modules"
a2enmod rewrite
a2enmod deflate
a2enmod headers
a2enmod expires
echo "Configuring Apache"
# Enable Basic Authentication for WebServices
sed -i 's|</IfModule>|WSGIPassAuthorization On|' /etc/apache2/mods-enabled/wsgi.conf
echo "</IfModule>" >> /etc/apache2/mods-enabled/wsgi.conf
# Prevent Memory leaks from killing servers
sed -i 's|MaxRequestsPerChild 0|MaxRequestsPerChild 300|' /etc/apache2/apache2.conf
# Tune for smaller RAM setups
sed -i 's|MinSpareServers 5|MinSpareServers 3|' /etc/apache2/apache2.conf
sed -i 's|MaxSpareServers 10|MaxSpareServers 6|' /etc/apache2/apache2.conf
apache2ctl restart
# Holding Page for Maintenance windows
echo "Creating maintenance page"
cat << EOF > "/var/www/maintenance.html"
<html><body><h1>Site Maintenance</h1>Please try again later...</body></html>
EOF
# =============================================================================
# Python
#
echo "Installing Python Libraries"
apt-get -y install libgeos-c1
apt-get -y install libgeos-dev
apt-get -y install python-dev
apt-get -y install python-lxml python-setuptools python-dateutil python-pip
apt-get -y install python-serial
if [ $DEBIAN == '7' ]; then
apt-get -y install python-imaging
else
apt-get -y install python-imaging python-reportlab
fi
apt-get -y install python-imaging
apt-get -y install python-matplotlib
apt-get -y install python-requests
apt-get -y install python-xlwt
if [ $DEBIAN == '7' ]; then
# Need ReportLab>3.0 for percentage support (Wheezy installs only 2.5)
echo "Upgrading ReportLab"
pip install reportlab
fi
# Install latest Shapely for Simplify enhancements
# Shapely>=1.3 requires GEOS>=3.3.0 (Wheezy=3.3.3, Jessie=3.4.2)
echo "Installing Shapely"
pip install shapely
# Install latest XLRD for XLS import support
echo "Installing XLRD"
pip install xlrd
# =============================================================================
# Web2Py
apt-get -y install libodbc1
# Create user and group web2py
echo "Creating web2py user and group"
if id "web2py" >/dev/null 2>&1; then
echo "web2py user exists"
else
adduser --system --disabled-password web2py
fi
if grep -q "web2py" /etc/group; then
echo "web2py group exits"
else
addgroup web2py
fi
echo "Cloning web2py"
cd /home
if [ -d "web2py/applications" ]; then
echo "WARNING: This will remove the existing web2py/Sahana installation - continue"
echo "Type 'yes' if you are certain"
read answer
case $answer in
yes)
echo "Removing existing installation..."
rm -rf web2py;;
*)
echo "Aborting..."
exit 1;;
esac
fi
git clone --recursive git://github.com/web2py/web2py.git
if [ ! -z "$WEB2PY_COMMIT" ]; then
echo "Checking out web2py stable"
cd web2py
git reset --hard $WEB2PY_COMMIT
git submodule update
cd ..
fi
# Create symbolic link
ln -fs /home/web2py ~
echo "Copying WSGI Handler"
cp -f /home/web2py/handlers/wsgihandler.py /home/web2py
echo "Setting up routes"
cat << EOF > "/home/web2py/routes.py"
#!/usr/bin/python
default_application = 'eden'
default_controller = 'default'
default_function = 'index'
routes_onerror = [
('eden/400', '!'),
('eden/401', '!'),
('eden/509', '!'),
('eden/*', '/eden/errors/index'),
('*/*', '/eden/errors/index'),
]
EOF
# Configure Matplotlib
mkdir /home/web2py/.matplotlib
chown web2py /home/web2py/.matplotlib
echo "os.environ['MPLCONFIGDIR'] = '/home/web2py/.matplotlib'" >> /home/web2py/wsgihandler.py
sed -i 's|TkAgg|Agg|' /etc/matplotlibrc
# =============================================================================
# Sahana
cd web2py
cd applications
echo "Cloning Sahana"
git clone git://github.com/sahana/eden.git
echo "Fixing permissions"
declare -a admindirs=("cache" "cron" "databases" "errors" "sessions" "uploads")
chown web2py ~web2py
for i in "${admindirs[@]}"
do
if [ ! -d "$i" ]; then
mkdir -p ~web2py/applications/admin/$i
fi
chown -v web2py ~web2py/applications/admin/$i
done
declare -a edendirs=("cache" "cron" "databases" "models" "errors" "sessions" "uploads")
chown web2py ~web2py/applications/eden
for i in "${edendirs[@]}"
do
if [ ! -d "$i" ]; then
mkdir -p ~web2py/applications/eden/$i
fi
chown -v web2py ~web2py/applications/eden/$i
done
# Additional upload directories
mkdir -p ~web2py/applications/eden/uploads/gis_cache
mkdir -p ~web2py/applications/eden/uploads/images
mkdir -p ~web2py/applications/eden/uploads/tracks
chown web2py ~web2py/applications/eden/uploads/gis_cache
chown web2py ~web2py/applications/eden/uploads/images
chown web2py ~web2py/applications/eden/uploads/tracks
# Additional static directories
mkdir -p ~web2py/applications/eden/static/cache/chart
chown web2py ~web2py/applications/eden/static/fonts
chown web2py ~web2py/applications/eden/static/img/markers
chown web2py -R ~web2py/applications/eden/static/cache
# Create symbolic links
ln -fs /home/web2py/applications/eden ~
ln -fs /home/web2py/applications/eden /home/web2py/eden
# =============================================================================
# Management scripts
echo "Installing Management Scripts"
echo "...backup"
cat << EOF > "/usr/local/bin/backup"
#!/bin/sh
NOW=\$(date +"%Y-%m-%d")
mysqldump sahana > /root/backup-\$NOW.sql
gzip -9 /root/backup-\$NOW.sql
OLD=\$(date --date='7 day ago' +"%Y-%m-%d")
rm -f /root/backup-\$OLD.sql.gz
EOF
chmod +x /usr/local/bin/backup
echo "...compile"
cat << EOF > "/usr/local/bin/compile"
#!/bin/sh
cd ~web2py
python web2py.py -S eden -M -R applications/eden/static/scripts/tools/compile.py
apache2ctl restart
EOF
chmod +x /usr/local/bin/compile
echo "...maintenance"
cat << EOF > "/usr/local/bin/maintenance"
#!/bin/sh
# Script to activate/deactivate the maintenance site
# Can provide the option 'off' to disable the maintenance site
if [ "\$1" != "off" ]; then
# Stop the Scheduler
killall python
# Deactivate the Production Site
a2dissite production$extension
# Activate the Maintenance Site
a2ensite maintenance$extension
else
# Deactivate the Maintenance Site
a2dissite maintenance$extension
# Activate the Production Site
a2ensite production$extension
# Start the Scheduler
cd ~web2py && sudo -H -u web2py nohup python web2py.py -K eden -Q >/dev/null 2>&1 &
fi
apache2ctl restart
EOF
chmod +x /usr/local/bin/maintenance
echo "...pull"
cat << EOF > "/usr/local/bin/pull"
#!/bin/sh
cd ~web2py/applications/eden
sed -i 's/settings.base.migrate = False/settings.base.migrate = True/g' models/000_config.py
git pull
/usr/local/bin/maintenance
rm -rf compiled
cd ~web2py
sudo -H -u web2py python web2py.py -S eden -M -R applications/eden/static/scripts/tools/noop.py
cd ~web2py/applications/eden
sed -i 's/settings.base.migrate = True/settings.base.migrate = False/g' models/000_config.py
/usr/local/bin/compile
/usr/local/bin/maintenance off
EOF
chmod +x /usr/local/bin/pull
# Change the value of prepopulate, if-necessary
echo "...clean"
cat << EOF > "/usr/local/bin/clean"
#!/bin/sh
/usr/local/bin/maintenance
cd ~web2py/applications/eden
rm -rf databases/*
rm -f errors/*
rm -rf sessions/*
rm -rf uploads/*
sed -i 's/settings.base.migrate = False/settings.base.migrate = True/g' models/000_config.py
sed -i 's/settings.base.prepopulate = 0/#settings.base.prepopulate = 0/g' models/000_config.py
rm -rf compiled
mysqladmin -f drop sahana
mysqladmin create sahana
cd ~web2py
sudo -H -u web2py python web2py.py -S eden -M -R applications/eden/static/scripts/tools/noop.py
cd ~web2py/applications/eden
sed -i 's/settings.base.migrate = True/settings.base.migrate = False/g' models/000_config.py
sed -i 's/#settings.base.prepopulate = 0/settings.base.prepopulate = 0/g' models/000_config.py
/usr/local/bin/maintenance off
/usr/local/bin/compile
EOF
chmod +x /usr/local/bin/clean
echo "...w2p"
cat << EOF > "/usr/local/bin/w2p"
#!/bin/sh
cd ~web2py
python web2py.py -S eden -M
EOF
chmod +x /usr/local/bin/w2p
# =============================================================================
# END
echo "Installation successful - please run configuration script"
| true
|
7cea01475111d3fe098363efe57ba06b1087f906
|
Shell
|
atkuzmanov/scripts
|
/rename_files_and_folders_script/rename_files_and_folders_script_v1.0.sh
|
UTF-8
| 6,284
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
################################
## WORKING
##
## TODO: optimise
##
## WIP: [optimisation 1:]
## Instead of each function looping and going through all files and folders
## have one function to do the loop and pass the files and folders as arguments
## to all the other functions.
## Continue developing function `rename_files_and_folders_dirs_1`.
##
## [optimisation 2:]
## Expand features to take user input.
## For example call the script with flags/arguments/parameters/options
## which invoke different functions passing different arguments
## such as tags to remove or tags to add.
## Note: This could likely make [optimisation 1:] redundant, so need to choose
## witch path to follow.
##
## References
## https://www.gnu.org/software/bash/manual/bash.html#Shell-Parameter-Expansion
## https://stackoverflow.com/questions/15012631/rename-files-and-directories-recursively-under-ubuntu-bash
## https://superuser.com/questions/213134/recursively-rename-files-change-extension-in-linux
## https://stackoverflow.com/questions/6509650/extract-directory-from-path
## https://stackoverflow.com/questions/6121091/get-file-directory-path-from-file-path/6121114
## https://stackoverflow.com/questions/13210880/replace-one-substring-for-another-string-in-shell-script
## https://tldp.org/LDP/abs/html/string-manipulation.html
## https://stackoverflow.com/questions/16623835/remove-a-fixed-prefix-suffix-from-a-string-in-bash
## https://unix.stackexchange.com/questions/311758/remove-specific-word-in-variable
## https://unix.stackexchange.com/questions/56810/adding-text-to-filename-before-extension
## https://stackoverflow.com/questions/45799657/bash-adding-a-string-to-file-name
## https://linuxize.com/post/bash-functions/
## https://bash.cyberciti.biz/guide/Pass_arguments_into_a_function
## https://stackoverflow.com/questions/6212219/passing-parameters-to-a-bash-function
## https://linuxacademy.com/blog/linux/conditions-in-bash-scripting-if-statements/
################################
declare -a STRINGS_TO_REPLACE
STRINGS_TO_REPLACE=("tag1" "tag2")
STRING_TO_ADD_IF_NOT_PRESENT="tag3"
################################
rename_files_remove_old_tags_arguments() {
filepathnodot="${1#.}"
# echo "$filepathnodot"
justfilenamenopath="${1##*/}"
# echo "$justfilenamenopath"
justpathnofile=${1%/*}
# echo "$justpathnofile"
for current_string in "${STRINGS_TO_REPLACE[@]}" ;
do
if [[ "$justfilenamenopath" == *"$current_string"* ]];
then
# echo "Will rename $justfilenamenopath"
test -e "$1" &&
newfilename=$(echo "$justfilenamenopath" | sed "s/$current_string//g")
mv -v "$1" "$justpathnofile/$newfilename"
break;
fi
done
}
rename_files_remove_old_tags_arguments
################################
rename_files_remove_old_tags() {
while IFS= read -r -d '' n; do
filepathnodot="${n#.}"
# echo "$filepathnodot"
justfilenamenopath="${n##*/}"
# echo "$justfilenamenopath"
justpathnofile=${n%/*}
# echo "$justpathnofile"
for current_string in "${STRINGS_TO_REPLACE[@]}" ;
do
if [[ "$justfilenamenopath" == *"$current_string"* ]];
then
# echo "Will rename $justfilenamenopath"
test -e "$n" &&
newfilename=$(echo "$justfilenamenopath" | sed "s/$current_string//g")
mv -v "$n" "$justpathnofile/$newfilename"
break;
fi
done
done < <(find . \( -type f -name "[!.]*" \) -print0)
}
rename_files_remove_old_tags
################################
rename_folders_dirs_remove_old_tags() {
while IFS= read -r -d '' n; do
for current_string in "${STRINGS_TO_REPLACE[@]}" ;
do
if [[ "$n" == *"$current_string"* ]];
then
# echo "Will rename $n"
test -e "$n" &&
newfilename=$(echo "$n" | sed "s/$current_string//g")
mv -v "$n" "$newfilename"
break;
fi
done
done < <(find . \( -type d -name "[!.]*" \) -print0)
}
rename_folders_dirs_remove_old_tags
################################
rename_files_add_new_tags() {
while IFS= read -r -d '' n; do
filepathnodot="${n#.}"
# echo "$filepathnodot"
justfilenamenopath="${n##*/}"
# echo "$justfilenamenopath"
justpathnofile=${n%/*}
# echo "$justpathnofile"
if [[ ! "$justfilenamenopath" == *"$STRING_TO_ADD_IF_NOT_PRESENT"* ]];
then
# echo "Will rename $justfilenamenopath"
test -e "$n" &&
newfilename="${justfilenamenopath%.*} $STRING_TO_ADD_IF_NOT_PRESENT.${justfilenamenopath##*.}"
mv -v "$n" "$justpathnofile/$newfilename"
fi
done < <(find . \( -type f -name "[!.]*" \) -print0)
}
rename_files_add_new_tags
################################
rename_folders_dirs_add_new_tags() {
while IFS= read -r -d '' n; do
filepathnodot="${n#.}"
# echo "$filepathnodot"
justpathnofile=${n%/*}
# echo "$justpathnofile"
if [[ ! "$n" == *"$STRING_TO_ADD_IF_NOT_PRESENT"* ]];
then
test -e "$n" &&
newfilename="$n $STRING_TO_ADD_IF_NOT_PRESENT"
mv -v "$n" "$newfilename"
fi
done < <(find . \( -type d -name "[!.]*" \) -print0)
}
rename_folders_dirs_add_new_tags
################################
################################
################################
rename_files_and_folders_dirs_1 () {
while IFS= read -r -d '' n; do
if [[ -f $n ]];
then
# echo "FILE <<< $n"
rename_files_remove_old_tags_arguments "$n"
# rename_files_add_new_tags "$n"
elif [[ -d "$n" ]];
then
echo "DIR >>> $n"
# rename_folders_dirs_remove_old_tags "$n"
# rename_folders_dirs_add_new_tags "$n"
fi
done < <(find . \( -name "[!.]*" \) -print0)
}
# rename_files_and_folders_dirs_1
################################
################################
################################
| true
|
2887e171a8813f1e0c7ef5444481caf8d37f08eb
|
Shell
|
flintory5/homebridge
|
/install.sh
|
UTF-8
| 1,686
| 3.9375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# install.sh
#
#
# Created by Flint Ory on 12/13/15.
#
#!/bin/bash
NODE_VERSION=4.2.3
NPM_VERSION=2.14.4
# Save script's current directory
DIR="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
#cd "${DIR}"
#
# Check if Homebrew is installed
#
which -s brew
if [[ $? != 0 ]] ; then
# Install Homebrew
echo "**** Installing Homebrew ****"
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
else
echo "**** Updating Homebrew ****"
brew update
fi
#
# Check if Git is installed
#
#echo "**** Checking for git and installing if necessary ****"
#which -s git || brew install git
#
# Check if Node is installed and at the right version
#
echo "Checking for Node version ${NODE_VERSION}"
node -v | grep ${NODE_VERSION}
if [[ $? != 0 ]] ; then
# Install Node
cd `brew --prefix`
$(brew versions node | grep ${NODE_VERSION} | cut -c 16- -)
echo "**** Installing Node ****"
brew install node
fi
#
# Check if Node Package Manager is installed and at the right version
#
#echo "Checking for NPM version ${NPM_VERION}"
#npm --version | grep ${NPM_VERSION}
#if [[ $? != 0 ]] ; then
#echo "Downloading npm"
#git clone git://github.com/isaacs/npm.git && cd npm
#git checkout v${NPM_VERSION}
#make install
#fi
#
# Ensure NODE_PATH is set
#
#grep NODE_PATH ~/.bash_profile > /dev/null || cat "export NODE_PATH=/usr/local/lib/node_modules" >> ~/.bash_profile && . ~/.bash_profile
#
# Check if python is installed
#
echo "Checking for Python and installing if necessary"
which -s python
if [[ $? != 0 ]] ; then
# Install Python
brew install python
fi
echo "**** Running Homebrew install script ****"
python ~/homebridge/install_homebridge.py
| true
|
b8ef860173ef0ef284fb946e94e8b675ddcb463b
|
Shell
|
mattrobenolt/prompt
|
/bench/old-prompt.sh
|
UTF-8
| 772
| 3.46875
| 3
|
[
"ISC"
] |
permissive
|
#!/usr/bin/env zsh
#fg_green=$'\e[32m'
#fg_cyan=$'\e[36m'
#fg_red_bold=$'\e[1;31m'
#fg_white_bold=$'\e[1;97m'
#reset=$'\e[0m'
prompt_pwd () {
#[ "$PWD" = "/home/${USER}" ] && wd='~' || wd="${PWD##*/}"
wd="${PWD##*/}"
#printf %s "${fg_cyan}${wd}${reset}"
printf %s "$wd"
}
prompt_git () {
cur_branch="$(git rev-parse --abbrev-ref HEAD 2>/dev/null)" || return
[ -z "$(git status --porcelain -uno)" ] || printf %s ' *'
[ "$cur_branch" = HEAD ] && cur_branch='(detached HEAD)'
#printf %s " ${fg_green}${cur_branch}${reset}"
printf %s " ${cur_branch}"
}
prompt () {
#echo "${fg_white_bold}${USER}${reset}@${fg_red_bold}${HOST}${reset} $(prompt_pwd)$(prompt_git) $ "
echo "${USER}@${HOST} $(prompt_pwd)$(prompt_git) $ "
}
prompt
| true
|
bd0b4da9ac3d157e6387b689cbd18b8bae01388f
|
Shell
|
comsci-uwc-isak/process-journal-Shin1203
|
/octbreakcountdown.sh.save
|
UTF-8
| 442
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#identifiers for each month
January=1
#Jan=0days
February=2
#Feb=32days
March=3
#March=60days
April=4
#April=91days
May=5
#May=121days
June=6
#June=152days
July=7
#July=182days
August=8
#August=213day
September=9
#September=244day
October=10
#October=274
#read month from user
echo "Enter month as number (Jan-Oct)"
read month
if [ $month == $January ]; then
echo "January- enter date"
read date
(( total = $date ))
| true
|
0340b7a5d523b7f99f6d9826720ad24f927796ef
|
Shell
|
andris9/sharded-mongodb-docker
|
/mongod/mongod_runextra.sh
|
UTF-8
| 614
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Wait until local MongoDB instance is up and running
until /usr/bin/mongo --port 27017 --quiet --eval 'db.getMongo()'; do
sleep 1
done
# Configure a MongoDB replica set (doesn't matter if each container attempts
# to run same action, first one wins, other attempts will then be ignored)
/usr/bin/mongo --port 27017 <<EOF
rs.initiate({_id: "${REPSET_NAME}", members: [
{_id: 0, host: "${REPSET_NAME}-replica0:27017"},
{_id: 1, host: "${REPSET_NAME}-replica1:27017"},
{_id: 2, host: "${REPSET_NAME}-replica2:27017"}
], settings: {electionTimeoutMillis: 2000}});
EOF
| true
|
32e35f09c281b36cb089798955e47d38f1746577
|
Shell
|
jeffski10/tvh-postprocessor
|
/TVH_PP.sh
|
UTF-8
| 2,707
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/sh
#set -x #echo on
# TV Converter
# Processes recorded TV program, strips out adverts via Comskip and compresses
function pause(){
read -p "$*"
}
#Variables and paths
filename=`basename "$1"`
BaseFileName=${1%.*}
EDLFILE=$BaseFileName.edl
FFPROBE=/opt/Qffmpeg/bin/ffprobe
FFMPEG=/opt/Qffmpeg/bin/ffmpeg
COMSKIPPATH="/share/Recording/Tools/comskip"
COMSKIPINI="/share/Recording/Tools/comskip.ini"
#Run COMSKIP first
if [ ! -f "$EDLFILE" ]; then
$COMSKIPPATH --ini=$COMSKIPINI "$1"
fi
# If edl file now exists we have something to work with e.g. there are some adverts
if [ -f "$EDLFILE" ]; then
#Now read EDL file into array and count rows
echo "EDL File Exists"
edlrow=1
start[edlrow]=0
while read line; do
end[edlrow]="$( cut -f 1 <<<"$line" )"
start[edlrow+1]="$( cut -f 2 <<<"$line")"
edlrow=$((edlrow+1))
done < "$EDLFILE"
#Set end point to end of file
end[edlrow]=$($FFPROBE -i "$1" -show_entries format=duration -v quiet -of csv="p=0")
# First section starts at first time and goes to second cut point, second section starts at end of cut point 1 etc.
a=1
while [ $a -le $edlrow ]
do
ffstart=$(echo ${start[$a]}|TZ=UTC awk '{print strftime("%H:%M:%S",$1,-3600)}')
ffend=$(echo ${end[$a]}|TZ=UTC awk '{print strftime("%H:%M:%S",$1,-3600)}')
length=$(echo ${end[$a]} ${start[$a]}} | awk '{ printf "%f", $1 - $2 }')
fflength=$(echo $length|TZ=UTC awk '{print strftime("%H:%M:%S", $1,-3600)}')
$FFMPEG -ss $ffstart -i "$1" -t $fflength -async 1 -vcodec copy -acodec copy -y "$BaseFileName""_Temp_"$a.ts
ffparts=$ffparts$BaseFileName"_Temp_"$a.ts"|"
a=$((a+1))
#Finish Looping
done
#Determine if to convert of copy the video
videoformat=$($FFPROBE -i "$1" -show_entries format=bitrate -v quiet -of csv="p=0")
convertvideoformat="-c:v libx264 -profile:v high -preset fast -x264-params crf=24"
if (( videoformat < 3000000 )); then
convertvideoformat="-vcodec copy "
fi
#Now combine it all again if needed and output to mp4
`$FFMPEG -fflags +genpts -i "concat:$ffparts" $convertvideoformat -acodec ac3 -ac 6 -y -sn -threads 0 "$BaseFileName""_output.mp4"`
# No EDL File so just convert the file
else
`$FFMPEG -fflags +genpts -i "$1" $convertvideoformat -acodec ac3 -ac 6 -y -sn -threads 0 "$BaseFileName""_output.mp4"`
fi
#Finally Clean Up files
rm -f "$EDLFILE"
rm -f "$BaseFileName""_Temp_"*".ts"
#cp -f "$1" /share/Recording/TV_Converted
#Now Tell TV Headend with trick renaming so TVH finds it
mv "$1" "$BaseFileName"".mp4"
cp -f "$BaseFileName""_output.mp4" "$BaseFileName"".mp4"
rm -f "$BaseFileName""_output.mp4"
#curl -G -v "http://localhost:9981/api/dvr/entry/filemoved?" --data-urlencode "src=$1" --data-urlencode "dst=$BaseFileName.mp4"
| true
|
884b4b14187d79d4bc2fcb383c7c7f76a96130a1
|
Shell
|
4charles2/SHELL
|
/exercice/variable.sh
|
UTF-8
| 4,099
| 3.828125
| 4
|
[] |
no_license
|
#! /bin/bash
variable="bonjour"
suite=" monsieur"
strcat=$variable$suite
other=$variable_$suite #Cette variable va produire une erreur car le shell (/bin/bash) va chercher une premiere variable qui se nomme $variable_ alors qu'elle n'éxiste pas
autre=${variable}_${suite}
echo -n "Devrai afficher bonjour : "
echo $variable
echo -n "Devrai afficher bonjour monsieur : "
echo $strcat
echo -n "Devrai afficher bonjour_ monsieur : "
echo $ohter
echo -n "Devrai afficher bonjour_ monsieur: "
echo $autre
###################################################################################################################################################################################################################################
###################################################################################################################################################################################################################################
#Autre possibilite Avec les variable
###################################################################################################################################################################################################################################
###################################################################################################################################################################################################################################
my="charles"
last=" tog"
echo -n "Devrait afficher charles tog : "
echo $my.$last #Cette solution fonctionne mais le point et afficher aussi
echo -n "Devrait afficher charles tog : "
echo $my@$last #Fonctionne mais affiche le symbole @ au milieu comme pour le point
echo -n "Devrait afficher charles tog : "
echo $my-$last #Pareil affiche le - au milieu
#Resumer on peut coller deux variables sans les entourer avec {} avec des caractère interdit dans les noms de variables mais il faut avoir conscience qu'il apparaitront
#Ont peut utiliser les "" pour entourer le caractere ou les caracteres présent entre deux variable
echo "prenom : "$my" nom "$last
###################################################################################################################################################################################################################################
###################################################################################################################################################################################################################################
#Exemple d'utilisation des opérateur d'extraction de sous chaine dans une variable
###################################################################################################################################################################################################################################
###################################################################################################################################################################################################################################
var=ABCDEFGHIJKLMNOPQRSTUVWXYZ
echo "Fonctionne avec bash mais avec sh renseigner bash dans shebang #!"
echo "On enleve les 5 premieres lettres et on affiche les 2 suivantes"
echo ${var:5:2} #enleve les 5 premieres letres
echo "J'enleve les 20 premieres lettres :"
echo ${var:20} #enleve les 20 premieres lettres
#Autres façon de faire avec l'opérateur #
var1=AZERTYUIOPAZERTYUIOP
echo "variable utilisé "$var1
echo "Utilisation de l'opérateur #"
echo "Si la chaine commence par AZE alors on l'enleve"
echo ${var1#AZE}
echo "elimine tout jusqu'au premier T rencontrer si pas de T rien c'est * qui permet de ne pas s'arreter au premier caractere"
echo ${var1#*T}
echo ${var1#T*}
echo "Si la variable commence par un T on l'enleve sinon rien"
echo ${var1#T}
echo "elimination de prefixe a la premiere lettre contenue dans l'intervale"
echo ${var1#*[MNOP]}
echo "Utilisation de l'operateur % et %%"
echo "variable utilisé "$var1
echo ${var1%IOP*}
echo ${var1%*IOP}
echo ${var1%[X-Z]*}
| true
|
456751fb57213db2e3696706862d83b1a64648da
|
Shell
|
B-Rich/community-mirror
|
/rubber/trunk/PKGBUILD
|
UTF-8
| 1,141
| 2.5625
| 3
|
[] |
no_license
|
# $Id$
# Maintainer: Sergej Pupykin <pupykin.s+arch@gmail.com>
# Contributor: Baptiste Grenier <baptiste@bapt.name>
pkgname=rubber
pkgver=1.1
pkgrel=9
pkgdesc="A wrapper for LaTeX and friends"
arch=('any')
url="http://iml.univ-mrs.fr/~beffara/soft/rubber/"
license=('GPL')
depends=('python2')
makedepends=('texinfo')
source=("http://launchpad.net/rubber/trunk/$pkgver/+download/rubber-$pkgver.tar.gz"
"rubber-1.1-spaces.patch"
"md5tohashlib.patch")
md5sums=('8087cdb498f51f91c2427c7d0b253189'
'743f644122ba27c783748ac33510e4e8'
'5478bafce24f09501d8c8d6075492a77')
build() {
cd "${srcdir}/${pkgname}-${pkgver}"
patch -N -i "${srcdir}/rubber-1.1-spaces.patch" "${srcdir}/${pkgname}-${pkgver}/src/rules/latex/__init__.py"
patch -N -i "${srcdir}/md5tohashlib.patch" "${srcdir}/${pkgname}-${pkgver}/src/util.py"
./configure --prefix=/usr --mandir="${pkgdir}/usr/share/man" --infodir="${pkgdir}/usr/share/info"
make
}
package() {
cd "${srcdir}/${pkgname}-${pkgver}"
make prefix="${pkgdir}/usr" install
mkdir -p $pkgdir/usr/share/bash-completion/completions/
echo "complete -f -X '!*.tex' rubber" >$pkgdir/usr/share/bash-completion/completions/rubber
}
| true
|
6e9e68a443ad91202d02a205f9919329a7f2041d
|
Shell
|
ericstone57/nurun-scripts
|
/ddms/file-rsync-pord2uat-emag3.sh
|
UTF-8
| 337
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
PROD=192.168.10.21
PROD_USER=webown
PROD_PATH=/home/webown/sites/emag3/sites/magazine/files
LOCAL_PATH=/home/webown/sites/ddms-emag3/sites/magazine/
rsync -arv \
--delete \
--exclude=styles \
--exclude=js \
--exclude=css \
--exclude=less \
$PROD_USER@$PROD:$PROD_PATH $LOCAL_PATH
| true
|
a7f6d5b369d5c5646ae9770cc39db86a724d2440
|
Shell
|
conda-forge/gflags-feedstock
|
/recipe/build.sh
|
UTF-8
| 376
| 2.734375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
mkdir build_release
cd build_release
export CXXFLAGS="-fPIC ${CXXFLAGS}"
export CFLAGS="-fPIC ${CFLAGS}"
cmake ${CMAKE_ARGS} .. \
-DCMAKE_INSTALL_PREFIX=$PREFIX \
-DINSTALL_HEADERS=ON \
-DBUILD_SHARED_LIBS=ON \
-DBUILD_STATIC_LIBS=OFF \
-DBUILD_TESTING=ON
make
if [[ "${CONDA_BUILD_CROSS_COMPILATION}" != "1" ]]; then
ctest
fi
make install
| true
|
c5662a85a4c5fa175a8dc6ccec7f7e23753673c9
|
Shell
|
mintproject/MINT-WorkflowDomain
|
/WINGSWorkflowComponents/GeneralDataPreparation/pihm_cropland_to_points/run
|
UTF-8
| 405
| 3.8125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
checkExitCode() {
if [ $? -ne 0 ]; then
echo "Error"
exit 1;
fi
}
BASEDIR=`dirname $0`
. $BASEDIR/io.sh 1 0 1 "$@"
HEADER=`sed '1!d' $INPUTS1`
count=1
sed 1d $INPUTS1 | while read line
do
echo $HEADER > outfile-$count
echo $line >> outfile-$count
count=$((count+1))
done
ls -lh
count=1
for i in $OUTPUTS1
do
mv outfile-$count $i
count=$((count+1))
done
checkExitCode
| true
|
50eb39cad014ad174478dd1277db64fde8c882f4
|
Shell
|
BLshlomo/shell-scripting
|
/busybox_test.sh
|
UTF-8
| 935
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/sh
set -x
addr=tox_lb
port=8080
t=5
# Split test file to smaller ones
mkdir -p src/test/split
cd src/test/split
split -l 10 ../e2e
cd ..
# Check if the app container is running
while [ $t -gt 0 ]; do
nc -z "$addr" $port && break || sleep 3
t=$((t-1))
done
[ $t -eq 0 ] && echo "App is not up, quitting" && exit 1
# Run test files simultaneously in the background
rm -f log.log
for b in split/*; do
python e2e_test.py $addr:$port $b 0 2>>log.log &
done
# Waiting for all tests to end
wait
# Checking for error
grep -q Traceback log.log && exit 2 || exit 0
# For longer tests, would use a loop similar to the next code
# For alpine linux (busybox) the syntax would be different, as you dont have regular arrays in busybox
###
for p in "${pid[@]}"; do
wait "$p"
exit=$?
[ $exit -ne 0 -a $exit -ne 127 ] && exit $exit || echo "pid $p is over"
# Checking for error
grep -q error log && exit 2
done
###
| true
|
1cf993a71f17b7e65a90d7a3cc394642b4b0b0ac
|
Shell
|
andreology/LinuxShellScripting
|
/greeting.sh
|
UTF-8
| 106
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
NAME="Bob Hank"
FAVORITE_COLOR=blue
echo HI $NAME, your fav color is $FAVORITE_COLOR.
| true
|
0e65228877abcfb83c45e72af5c24d3c1250993b
|
Shell
|
cheuklau/spyglass
|
/src/kafka/mvp/stop_kafka.sh
|
UTF-8
| 397
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
# Stop Kafka
kafka-server-stop.sh
if [[ $? -gt 0 ]]; then
echo "WARN: Unable to stop Kafka"
fi
# Stop Zookeeper
zookeeper-server-stop.sh
if [[ $? -gt 0 ]]; then
echo "WARN: Unable to stop Zookeeper"
fi
# Remove Zookeeper and Kafka data directories
rm -rf kafka_2.12-2.0.0/data/zookeeper
rm -rf kafka_2.12-2.0.0/data/kafka
# Remove output and error files
rm *.out *.err
exit 0
| true
|
c4239725d47f5cf2bd3ec9906998d1f918d006b3
|
Shell
|
carlosmaniero/front-end-testing-guide
|
/visual-tests/set-env.sh
|
UTF-8
| 363
| 3.203125
| 3
|
[] |
no_license
|
unameOut="$(uname -s)"
case "${unameOut}" in
Linux*) machine=Linux;;
Darwin*) machine=Mac;;
CYGWIN*) machine=Cygwin;;
MINGW*) machine=MinGw;;
*) machine="UNKNOWN:${unameOut}"
esac
if [ "$machine" == "Linux" ]; then
export STORYBOOK_HOST="localhost:6006"
else
export STORYBOOK_HOST="host.docker.internal:6006"
fi
| true
|
7fc2f4f5f28bba338e3da67c778931920ba6db1d
|
Shell
|
redhat-nfvpe/openshift-baremetal-ci
|
/ovn/upi/deploy.sh
|
UTF-8
| 4,940
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
set -x
export OPENSHIFT_MAJOR_VERSION=${OPENSHIFT_MAJOR_VERSION:-"4.3"}
export NETWORK_TYPE=${NETWORK_TYPE:-"OVNKubernetes"}
# export PULL_SECRET=${PULL_SECRET:-""}
# [ -z $PULL_SECRET ] && echo "empty pull secret, exiting" && exit 1
trap cleanup 0 1
cleanup() {
# Gather bootstrap & master logs
./requirements/openshift-install gather bootstrap \
--dir=./ocp --bootstrap 192.168.111.10 \
--master 192.168.111.11 \
--master 192.168.111.12 \
--master 192.168.111.13 || true
# Destroy bootstrap VM
# virsh destroy dev-bootstrap || true
virsh list --name | grep bootstrap | xargs virsh destroy || true
./requirements/oc --kubeconfig ./ocp/auth/kubeconfig get nodes || true
./requirements/oc --kubeconfig ./ocp/auth/kubeconfig get co || true
./requirements/oc --kubeconfig ./ocp/auth/kubeconfig get clusterversion || true
popd
}
yum install -y git
# git clone https://github.com/redhat-nfvpe/kni-upi-lab.git
# git fetch origin pull/95/head:fix
# git checkout fix
if [ $OPENSHIFT_MAJOR_VERSION == '4.3' ]; then
# we want to test latest 4.3.z nightly build instead of 4.3 release
rm -rf kni-upi-lab
git clone https://github.com/zshi-redhat/kni-upi-lab.git
pushd kni-upi-lab
git checkout 4.3-nightly
elif [ $OPENSHIFT_MAJOR_VERSION == '4.4' ]; then
rm -rf kni-upi-lab
git clone https://github.com/zshi-redhat/kni-upi-lab.git
pushd kni-upi-lab
git checkout 4.4
elif [ $OPENSHIFT_MAJOR_VERSION == '4.5' ]; then
#git checkout 4.5-boot-index
rm -rf kni-upi-lab
git clone https://github.com/zshi-redhat/kni-upi-lab.git
pushd kni-upi-lab
git checkout 4.5
elif [ $OPENSHIFT_MAJOR_VERSION == '4.6' ]; then
#git checkout 4.5-boot-index
rm -rf kni-upi-lab
git clone https://github.com/zshi-redhat/kni-upi-lab.git
pushd kni-upi-lab
git checkout 4.6
else
pushd kni-upi-lab
git checkout master
fi
sed -i -e "s/^OPENSHIFT_RHCOS_MAJOR_REL=.*/OPENSHIFT_RHCOS_MAJOR_REL=\"${OPENSHIFT_MAJOR_VERSION}\"/g" ./common.sh
cp -rf /root/upi-config/site-config.yaml cluster/
cp -rf /root/upi-config/install-config.yaml cluster/
cp -rf /root/upi-config/ha-lab-ipmi-creds.yaml cluster/
# update network type {OpenShiftSDN|OVNKubernetes}, default is OVNKubernetes
sed -i -e "s/networkType: .*/networkType: ${NETWORK_TYPE}/g" cluster/install-config.yaml
make clean
./prep_bm_host.sh
make all
sleep 5
make con-start
sleep 5
podman ps
sleep 2
sleep 20
./scripts/manage.sh deploy cluster
sleep 30
./scripts/manage.sh deploy workers
# Wait for extra 5min for bootstrap to complete
sleep 320
# Wait for bootstrap complete
./requirements/openshift-install --dir ./ocp wait-for bootstrap-complete --log-level debug
sleep 30
cp -rf ./requirements/oc /usr/local/bin/
cp -rf ./requirements/kubectl /usr/local/bin/
cp -rf ./requirements/openshift-install /usr/local/bin/
cp -rf ./ocp/auth/kubeconfig /root/kubeconfig
mkdir -p ~/.kube || true
# Copy kubeconfig to user root <~/.kube/config>
# Some tests detect kubeconfig automatically
cp -rf ./ocp/auth/kubeconfig ~/.kube/config
./requirements/oc --kubeconfig ./ocp/auth/kubeconfig get nodes || true
sleep 30
# Start Openshift-installer wait-for when image-registry is rendered
# This allows us to wait a few more mins for cluster to come up
while [ "$(./requirements/oc --kubeconfig ./ocp/auth/kubeconfig get configs.imageregistry.operator.openshift.io | grep cluster)" == "" ]
do
sleep 10
echo "waiting for image-registry operator to be deployed"
done
sleep 20
# Patch storage to emptyDir to workthrough warning: "Unable to apply resources: storage backend not configured"
# Comment out, this is only required for pre-4.2 releases
# Patch storage to emptyDir first, then patch
# configs.imageregistry.operator.openshift.io
# to Managed state.
./requirements/oc --kubeconfig ./ocp/auth/kubeconfig patch configs.imageregistry.operator.openshift.io cluster \
-p '{"spec":{"storage":{"emptyDir":{}}}}' --type='merge'
sleep 1
# Patch storage to 'Managed' managementState.
./requirements/oc --kubeconfig ./ocp/auth/kubeconfig patch configs.imageregistry.operator.openshift.io cluster \
-p '{"spec":{"managementState": "Managed"}}' --type='merge'
# Patch storage to 'Removed' managementState. This makes image-registry operator become Available immediately
# ./requirements/oc --kubeconfig ./ocp/auth/kubeconfig patch configs.imageregistry.operator.openshift.io cluster \
# -p '{"spec":{"managementState": "Removed"}}' --type='merge'
# Wait for install complete
# Force exit with 0 as workers won't be ready (due to more NICs need to be inspected)
./requirements/openshift-install --dir ./ocp wait-for install-complete --log-level debug || true
# Manually approve pending CSRs from worker node that join late
./requirements/oc get csr -o name | xargs -n 1 ./requirements/oc adm certificate approve
# Wait again for install complete after approving worker CSRs
./requirements/openshift-install --dir ./ocp wait-for install-complete --log-level debug
| true
|
46098730ad35e0b6e04291892a68afa5831ee9e2
|
Shell
|
romilsrivastava/romilss
|
/employeewageuc4.sh
|
UTF-8
| 323
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash -x
p=$(( RANDOM%3 ))
WagePerHour=20
FullDayHour=16
PartTimeHour=8
case $p in
1)
echo "employee is present for full time"
dailywage=$(( $WagePerHour*$FullDayHour ))
;;
2)
echo "employee is present for part time"
dailywage=$(( $WagePerHour*$PartTimeHour ))
;;
*)
echo "employee is absent"
;;
esac
| true
|
0584a7038c9e9b160f2d327cd4d66df7425bce55
|
Shell
|
airaup/airaup-sgi
|
/hack/encrypt_passwords.sh
|
UTF-8
| 172
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
CONTAINER=sgi_php
FILE=encrypt_passwords.php
DEST_PATH=/tmp/
docker cp $FILE $CONTAINER:$DEST_PATH
docker exec -it $CONTAINER php -f $DEST_PATH/$FILE
| true
|
2f1aeca4b423110343bec1edf82bea754acce600
|
Shell
|
johnsonyue/targets
|
/map.sh
|
UTF-8
| 1,076
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
test $# -lt 1 && exit
inter=$1
python <(
cat << "EOF"
import sys
import json
import socket
import struct
f=open(sys.argv[1])
obj=json.load(f)
f.close()
def ip_str2int(ip):
packedIP = socket.inet_aton(ip)
return struct.unpack("!L", packedIP)[0]
def hit(i,o):
if i < o[0]['ip_from']:
return False
if i > o[-1]['ip_from']:
return i < o[-1]['ip_to']
l=0;r=len(o)-1;
while True:
m=(l+r)/2
#print l,m,r
#print o[l]['ip_from'],o[m]['ip_from'],o[r]['ip_from']
if o[m]['ip_from'] > i and o[m+1]['ip_from'] > i:
#print 'left'
r=m
elif o[m]['ip_from'] < i and o[m+1]['ip_from'] < i:
#print 'right'
l=m+1
else:
#print 'middle'
break
return (o[m]['ip_to'] > i) or (o[m+1]['ip_from'] == i)
def map(i):
for k,v in obj.items():
#print "hit: " + str(i) + ", " + str(k)
if v and hit(i,v):
return k
return '--'
while True:
try:
l=raw_input().strip()
except:
break
i=l.split()[2]
#print "map: " + str(i)
i=ip_str2int(i)
print l + '\t' + map(i)
EOF
) $inter
| true
|
481f2b4f7a0e4ebfd93b29331d08965c93aa8c44
|
Shell
|
pabplanalp/pvmail
|
/production/copper/servlet/acstart.sh
|
UTF-8
| 633
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Changes also must be added to the other environments(Production/Test/ETC)
#
#parameters
JRE_HOME=/usr/java/jdk1.5.0_07
TOMCAT_HOME=/usr/local/tomcat
VMAIL_STAGE=/usr/local/vmail/stage
VMAIL_LIVE=/usr/local/vmail/live
JAVA_OPTS="-Xms768m -Xmx768m -DAppPropertyDir=$VMAIL_LIVE/resource/property -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/usr/local/tomcat/heapdumps"
export JRE_HOME TOMCAT_HOME VMAIL_HOME JAVA_OPTS
#activate staging
rm -rf $VMAIL_LIVE
cp -r $VMAIL_STAGE $VMAIL_LIVE
chown -R tomcat $VMAIL_LIVE
chgrp -R tomcat $VMAIL_LIVE
#run tomcat as the tomcat user
su -c $TOMCAT_HOME/bin/startup.sh tomcat
| true
|
c0709ed94e0ff35ad9a6aa8d2603eb9ef03a34ba
|
Shell
|
ErvinCs/OS
|
/Shell101PentruTarani/intros/wildcards.sh
|
UTF-8
| 352
| 2.875
| 3
|
[] |
no_license
|
#!/bin/sh
echo "Current dir: " $PWD
cp /$PWD/tmp/a/*.c /$PWD/tmp/b
cp $PWD/tmp/a/*.html $PWD/tmp/b #Note that it works even without the first escape
#Altough i think it's better practice not to use it
#Copy all .c and .html files from a into b^
cp -a $PWD/tmp/a/* $PWD/tmp/c
#Copy all the files from a into c^
| true
|
454830d4d0557668035ac4583c83fe886e32613c
|
Shell
|
timattil/trytls
|
/stubs/bash-opensslSClient/run
|
UTF-8
| 529
| 3.515625
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/bin/bash
host=$1 #e.g. localhost
port=$2
ca_bundle=$3
if [ "$#" -lt 2 ]; then
echo "UNSUPPORTED"
exit 2
fi
if [[ "$ca_bundle" ]]; then
resp=`echo "Q" | openssl s_client -CAfile $ca_bundle -servername $host -connect $host:$port 2>&1`
else
resp=`echo "Q" | openssl s_client -servername $host -connect $host:$port 2>&1`
fi
grepped=`echo "$resp" | grep "Verify return code:"`
if [[ "$grepped" =~ " 0 " ]]; then
echo "VERIFY SUCCESS"
elif [[ $grepped ]]; then
echo "VERIFY FAILURE"
else
echo $resp
exit 1
fi
exit 0
| true
|
9dc982e2cbcf632c965a242ef6ee0e310b7aa42c
|
Shell
|
n-johnson/bin
|
/docs
|
UTF-8
| 144
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
__track_usage "$(basename "$0")"
file=$1
if [ -n "$file" ]; then
less "$HOME/docs/$file"
exit 0
else
ls -al "$HOME/docs"
fi
| true
|
7edb70030beead9b28328524e61047e57dd4d356
|
Shell
|
luxe/unilang
|
/source/code/scratch/config/shell/bash/.bashrc_impl/prompt.sh
|
UTF-8
| 771
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
printf_new() {
str=$1
num=$2
v=$(printf "%-${num}s" "$str")
PS1="${v// /$str}"
}
#include the branch detection file
source ~/.bashrc_impl/git/.git-prompt.sh
#this is some built-in bash thing that will ensure the prompt is upated every time=
PROMPT_COMMAND=set_prompt
set_prompt () {
#uncomment to draw line across terminal"
#printf_new "═" $COLUMNS;
#PS1+="\r"
#adds the author and repo name (if inside a git repository)
PS1='$(__git_ps1 "[\[\x1b[38;5;33m\]$(git-repo-printer --author --repo)\[\x1b[0m\]]")'
#add the current branch (if inside a git repository)
PS1+='$(__git_ps1 "[\[\x1b[38;5;33m\]%s\[\x1b[0m\]]")'
#shows the path
PS1+="[\[\e[38;5;144m\]\w"
#reset colors
PS1+="\[\033[0m\]]\n"
}
| true
|
c13ffbe8af4ae637c9492bf58c7e1a44a17299c2
|
Shell
|
samsucik/prosodic-lid-globalphone
|
/egs/multi_en/s5/local/tedlium_lm_decode.sh
|
UTF-8
| 951
| 3
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2016 Allen Guo
# Apache 2.0
# This script decodes the tedlium test set using a tedlium LM, which is assumed
# to be prepared already using the tedlium recipe.
. ./cmd.sh
. ./path.sh
set -e
stage=0
lib=../../tedlium/s5
lang=data/lang_tedlium_tg
srilm_opts="-subset -prune-lowprobs -unk -tolower -order 3"
step=tri5
. utils/parse_options.sh
# You do not need to redo this stage when changing the "step" argument
if [ $stage -le 0 ]; then
utils/format_lm_sri.sh --srilm-opts "$srilm_opts" \
data/lang $lib/db/cantab-TEDLIUM/cantab-TEDLIUM-pruned.lm3.gz \
data/local/dict/lexicon.txt $lang
fi
graph_dir=exp/multi_a/$step/graph_tedlium_tg
if [ $stage -le 1 ]; then
utils/mkgraph.sh $lang \
exp/multi_a/$step $graph_dir
fi
if [ $stage -le 2 ]; then
steps/decode_fmllr.sh --nj 11 --cmd "$decode_cmd" --config conf/decode.config $graph_dir \
data/tedlium/test exp/multi_a/$step/decode_tedlium_tg_tedlium
fi
| true
|
44a77ba68132d450cc2586ac48f5d40f9a74b510
|
Shell
|
petrelharp/popassembly
|
/beagle.sh
|
UTF-8
| 1,012
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
#PBS -S /bin/bash
#PBS -q cmb
#PBS -l nodes=1:ppn=1
#PBS -l walltime=24:00:00
#PBS -l pmem=24gb
#PBS -l mem=24gb
#PBS -l vmem=24gb
# grr, java
# see http://stackoverflow.com/questions/31075761/java-8-reserves-minimum-1g-for-metaspace-despite-maxmetaspacesize
if [ -e /usr/usc/java/1.8.0_45/setup.sh ]
then
# on the cluster
source /usr/usc/java/1.8.0_45/setup.sh
export _JAVA_OPTIONS="-Xmx18000m -XX:MaxMetaspaceSize=1200m"
JAVA="java $_JAVA_OPTIONS"
BEAGLE="/home/rcf-40/pralph/cmb/software/beagle/beagle.jar"
else
# at home
JAVA="/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java"
BEAGLE="/home/peter/software/beagle/beagle.jar"
fi
if ! [ $# -eq 1 ]
then
echo "Usage: beagle.sh (.vcf file)"
exit 1
fi
VCF_FILE="$1"
RUN_ID=$RANDOM
echo "VCF file: $VCF_FILE"
echo "beagle: $BEAGLE"
if [[ -z ${VCF_FILE:-} ]]
then
echo "Can't find $VCF_FILE or $BEAGLE"
exit 1
fi
$JAVA -jar $BEAGLE gt=$VCF_FILE ibd=true out=${VCF_FILE%%.vcf.gz}.${RUN_ID}.beagle
| true
|
4f72d66ed8242acfb77995434f9ebb9975765f25
|
Shell
|
oncoapop/data_reporting
|
/beast_scripts/check_CGs.sh
|
UTF-8
| 1,263
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/sh
# Script to check the actual CG that matched factors to the CG that we designed for
# This will look at two levels.
# Firstly, check to see if there are exact matches (ie same breakpoints)
# Secondly, check to see if there are matches in both the conjoined gene (but maybe the breakpoints differ)
# Files
wd="/home/dyap/Projects/Takeda_T3/CG"
motifs="HCT116_hTERT_CG_high_motif_dens.tsv"
ordered="CG_primers_ordered.txt"
outfile="CG-matches.txt"
cd $wd
# Exact matches
echo "==============" > $outfile
echo "Exact matches:" >> $outfile
echo "==============" >> $outfile
for i in `cat $ordered`
do
grep "$i" $motifs >> $outfile
echo $i
grep "$i" $motifs
echo "======================"
done
# Gene pair matches
echo "=================" >> $outfile
echo "Gene pair matches:" >> $outfile
echo "=================" >> $outfile
for j in `cat $ordered | sed 's/\@.*.*\:/:/' | sed 's/\@.*.$//'`
do
gene1=`echo $j | awk -F":" '{print $1}'`
gene2=`echo $j | awk -F":" '{print $2}'`
test=`grep "$gene1" $motifs | grep $gene2`
if [[ $test != "" ]]
then
grep "$gene1" $ordered | grep $gene2 >> $outfile
grep "$gene1" $motifs | grep $gene2 >> $outfile
fi
echo $j
grep "$gene1" $motifs | grep $gene2
echo "======================"
done
| true
|
13e5ede7ad421d206af7c99a954c42e12253f0e6
|
Shell
|
bithead2k/pg_bash
|
/pguri2pgpass.bash
|
UTF-8
| 1,922
| 4.03125
| 4
|
[
"PostgreSQL"
] |
permissive
|
#!/Users/kroybal/homebrew/bin/bash
#===============================================================================
#
# FILE: pguri2pgpass.bash
#
# USAGE: ./pguri2pgpass.bash
#
# DESCRIPTION: Change a URI in the form of
# postgres://user:pw@host:port/db?options
# to
# host:port:user:database:pass
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: Kirk L. Roybal (DBRE), kirk@webfinish.com
# ORGANIZATION: Private
# CREATED: 04/19/2021 11:46:02
# REVISION: ---
#===============================================================================
ScriptVersion="1.0"
#=== FUNCTION ================================================================
# NAME: usage
# DESCRIPTION: Display usage information.
#===============================================================================
function usage ()
{
cat <<- EOT
Usage : ${0##/*/} [options] [--]
echo "postgres://this_guy:some_password@remote_host:5432/the_database" | pguri2pgpass.bash
remote_host:5432:the_database:this_guy:some_password
Options:
-f|file [name] Take input from a file
-h|help Display this message
-v|version Display script version
EOT
} # ---------- end of function usage ----------
#-----------------------------------------------------------------------
# Handle command line arguments
#-----------------------------------------------------------------------
input='-' # STDIN
while getopts ":f:hv" opt
do
case $opt in
f|file ) input="$OPTARG";;
h|help ) usage; exit 0 ;;
v|version ) echo "$0 -- Version $ScriptVersion"; exit 0 ;;
\? ) echo -e "\n Option does not exist : $OPTARG\n"
usage; exit 1 ;;
esac # --- end of case ---
done
shift $(($OPTIND-1))
cat $input | sed -e 's_postgres://\(.*\):\(.*\)@\(.*\):\(.*\)/\(.*\)_\3:\4:\5:\1:\2 _'
| true
|
a2ad1bd20cb52968e1047e801db8c81bcb2e2117
|
Shell
|
SerenityOS/serenity
|
/Toolchain/BuildPython.sh
|
UTF-8
| 1,518
| 3.9375
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -e
# This file will need to be run in bash, for now.
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# shellcheck source=/dev/null
. "${DIR}/../Meta/shell_include.sh"
exit_if_running_as_root "Do not run BuildPython.sh as root, parts of your Toolchain directory will become root-owned"
PREFIX_DIR="$DIR/Local/python"
BUILD_DIR="$DIR/Build/python"
TARBALLS_DIR="$DIR/Tarballs"
# shellcheck source=/dev/null
source "$DIR/../Ports/python3/version.sh"
mkdir -p "${TARBALLS_DIR}"
pushd "${TARBALLS_DIR}"
if [ ! -e "${PYTHON_ARCHIVE}" ]; then
echo "Downloading Python from ${PYTHON_ARCHIVE_URL}..."
curl -O "${PYTHON_ARCHIVE_URL}"
else
echo "${PYTHON_ARCHIVE} already exists, not downloading archive"
fi
if ! sha256sum --status -c <(echo "${PYTHON_ARCHIVE_SHA256SUM}" "${PYTHON_ARCHIVE}"); then
echo "Python archive SHA256 sum mismatch, please run script again"
rm -f "${PYTHON_ARCHIVE}"
exit 1
fi
if [ ! -d "Python-${PYTHON_VERSION}" ]; then
echo "Extracting ${PYTHON_ARCHIVE}..."
tar -xf "${PYTHON_ARCHIVE}"
else
echo "Python-${PYTHON_VERSION} already exists, not extracting archive"
fi
popd
NPROC=$(get_number_of_processing_units)
[ -z "$MAKEJOBS" ] && MAKEJOBS=${NPROC}
mkdir -p "${PREFIX_DIR}"
mkdir -p "${BUILD_DIR}"
pushd "${BUILD_DIR}"
"${TARBALLS_DIR}"/Python-"${PYTHON_VERSION}"/configure --prefix="${PREFIX_DIR}"
make -j "${MAKEJOBS}"
make install
popd
| true
|
cc020896ab873c5ff9d52a936a215881ce54dd44
|
Shell
|
adsfgg/ModFramework
|
/setup
|
UTF-8
| 1,824
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
die() {
echo "Error: $1"
exit 1
}
usage() {
echo "Usage: setup"
echo ""
echo "./setup prjName"
echo ""
echo "Installs ModFramework"
exit 2
}
onError() {
die "Unexpected error occurred"
}
trap onError ERR
[[ $# -ne 1 ]] && usage
prjName="$1"
shift
[[ $prjName == "" ]] && die "Project name cannot be blank"
[[ $prjName =~ " " ]] && die "Project name cannot contain spaces"
rm -rf build
echo "Creating project \"$prjName\""
mkdir -p build/docs/revisions
mkdir -p build/docugen
mkdir -p build/src/lua/entry
mkdir -p build/src/lua/$prjName/ModFramework
echo "Moving files..."
cp -R src/lua/ModFramework build/src/lua/$prjName/
cp src/lua/Config.lua build/src/lua/$prjName/
cp src/lua/Modules.lua build/src/lua/$prjName/
cp src/lua/entry/ModFramework.entry build/src/lua/entry/$prjName.entry
cp -R src/docugen build
touch build/docugen.db
cp src/docugen.py build/docugen.py
cp src/make.sh build/make.sh
cp LICENSE build/src/lua/$prjName/ModFramework/
cp README.md build/src/lua/$prjName/ModFramework/
cat <<EOL >> build/.gitignore
build
docugen/__pycache__
EOL
echo "Renaming target variables"
# Replace %__MODNAME__%
find build/src/lua/. build/docugen.py build/docugen/*.py -type f -iname "*" -exec sed -i "s/%__MODNAME__%/$prjName/g" {} \;
echo "Creating mod structure..."
mkdir build/src/lua/$prjName/Modules
echo "Installing example module..."
mkdir -p build/src/lua/$prjName/Modules/Example/Client
cat << EOL >> build/src/lua/$prjName/Modules/Example/Client/client.lua
local logger = $prjName:GetModule('logger')
logger:PrintInfo("Hello, world!")
EOL
cat << EOL >> build/src/lua/$prjName/Modules/Example/.docugen
Example.Test
Example of docugen!
EOL
echo "Build complete"
echo ""
echo "Copy everything inside build/ to your project."
echo "Happy modding!"
| true
|
7948082295f84446fe5ec412ba98fb6f72a11f6d
|
Shell
|
lsmolic/hangoutsrc
|
/Resources/Deployment/scripts/full-qa-deploy.sh
|
UTF-8
| 1,485
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/sh
#!/bin/bash
echo " ################################# "
echo " ####### BEGINNING DEPLOY TO QA ####### "
echo " ################################# "
select_webserver_instance()
{
echo "Which QA webserver instance do you wish to deploy?"
select webInstance in hangoutqa hangoutqa2; do
if [[ -n "$webInstance" ]];
then
export webserverInstance=$webInstance
break
else
echo 'invalid'
select_webserver_instance
fi
done
}
select_stateserver_instance()
{
echo "Which QA stateserver instance do you wish to deploy?"
select stateInstance in stateserver stateserver2; do
if [[ -n "$stateInstance" ]];
then
export stateserverInstance=$stateInstance
break
else
echo 'invalid'
select_webserver_instance
fi
done
}
#echo "DID YOU RUN THE DATA COMPARE YET? "
#read dataCompare
#./StructureCompare-DB-qa.sh $1;
select_webserver_instance
./WebServices-qa.sh $1;
./WebSite-qa.sh $1;
./SecureWebSite-qa.sh $1;
select_stateserver_instance
./StateServer-qa.sh $1;
./Assets-qa.sh $1;
#no need to back up db
#Data compare can't work from here because truncate doesn't have a command line option.. PISSY PISSY PISSY
#./DataCompare-DB-qa.sh $1;
echo " "
echo " "
echo " "
echo " "
echo "#################################"
echo "#################################"
echo "~~~......## DEPLOY TO QA COMPLETE ##......~~~"
echo "#################################"
echo "#################################"
echo " "
echo " "
echo " "
echo " "
| true
|
50c3b9c8785945ae3d41d17bb0d8f49126b4fa20
|
Shell
|
Primos-tn/backend-app
|
/scripts/redis.installation.sh
|
UTF-8
| 3,672
| 3.578125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
wget http://download.redis.io/redis-stable.tar.gz
tar xvzf redis-stable.tar.gz
cd redis-stable
rm redis-stable.tar.gz
# You need this
sudo apt-get install -y tcl
make
sudo cp src/redis-server /usr/local/bin/
sudo cp src/redis-cli /usr/local/bin/
#Running Redis from the command line is fine just to hack a bit with it or for development. However at some point you'll have some actual application to run on a real server. For this kind of usage you have two different choices:
# Run Redis using screen.
# Install Redis in your Linux box in a proper way using an init script, so that after a restart everything will start again properly.
#A proper install using an init script is strongly suggested. The following instructions can be used to perform a proper installation using the init script shipped with Redis 2.4 in a Debian or Ubuntu based distribution.
#We assume you already copied redis-server and redis-cli executables under /usr/local/bin.
# Create a directory where to store your Redis config files and your data:
sudo mkdir /etc/redis
sudo mkdir /var/redis
# Copy the init script that you'll find in the Redis distribution under the utils directory into /etc/init.d. We suggest calling it with the name of the port where you are running this instance of Redis. For example:
sudo cp utils/redis_init_script /etc/init.d/redis_6379
# Edit the init script.
sudo vi /etc/init.d/redis_6379
#Make sure to modify REDISPORT accordingly to the port you are using. Both the pid file path and the configuration file name depend on the port number.
# Copy the template configuration file you'll find in the root directory of the Redis distribution into /etc/redis/ using the port number as name, for instance:
sudo cp redis.conf /etc/redis/6379.conf
# Create a directory inside /var/redis that will work as data and working directory for this Redis instance:
sudo mkdir /var/redis/6379
# Edit the configuration file, making sure to perform the following changes:
* #### Set daemonize to yes (by default it is set to no).
* #### Set the pidfile to /var/run/redis_6379.pid (modify the port if needed).
* #### Change the port accordingly. In our example it is not needed as the default port is already 6379.
* #### Set your preferred loglevel.
* #### Set the logfile to /var/log/redis_6379.log
* #### Set the dir to /var/redis/6379 (very important step!)
# Finally add the new Redis init script to all the default runlevels using the following command:
* ### if doesnt' work try https://github.com/antirez/redis/issues/804
# go to redis-stable/ and run install
sudo update-rc.d redis_6379 defaults
#You are done! Now you can try running your instance with:
sudo /etc/init.d/redis_6379 start
#Make sure that everything is working as expected:
# Try pinging your instance with redis-cli.
# Do a test save with redis-cli save and check that the dump file is correctly stored into /var/redis/6379/ (you should find a file called dump.rdb).
# Check that your Redis instance is correctly logging in the log file.
#If it's a new machine where you can try it without problems make sure that after a reboot everything is still working.
cd
#Note: In the above instructions we skipped many Redis configuration parameters that you would like to change, for instance in order to use AOF persistence instead of RDB persistence, or to setup replication, and so forth. Make sure to read the example redis.conf file (that is heavily commented) and the other documentation you can find in this web site for more information.
| true
|
3187defcc69a59eda49eb5c1c7bc80a98abac2d5
|
Shell
|
JoyMichael7842/OS-lab
|
/Lab1/ex1.sh
|
UTF-8
| 138
| 3.203125
| 3
|
[] |
no_license
|
echo "enter number"
read number
if [ $number -lt 0 ]
then
echo "negative"
elif [ $number -gt 0 ]
then
echo "positive"
else
echo "zero"
fi
| true
|
5cff42898620af2b4cc7b31135bf0477b9bc6941
|
Shell
|
southpawgeek/perlweeklychallenge-club
|
/challenge-151/roger-bell-west/rust/ch-1.rs
|
UTF-8
| 1,414
| 2.8125
| 3
|
[] |
no_license
|
#! /bin/sh
//usr/bin/env rustc --test $0 -o ${0}x && ./${0}x --nocapture; rm -f ${0}x ; exit
#[test]
fn test_ex1() {
assert_eq!(mindepth(str2tree("1 | 2 3 | 4 5")), 2);
}
#[test]
fn test_ex2() {
assert_eq!(mindepth(str2tree("1 | 2 3 | 4 * * 5 | * 6")), 3);
}
fn str2tree(st: &str) -> Vec<u32> {
let mut o: Vec<u32> = vec![0];
let mut d = 0;
let mut p = 0;
for e in st.split_whitespace() {
if e == "|" {
d += 1;
p = 0;
let m = (1 << (d + 1)) - 1;
if o.len() < m {
o.resize(m, 0);
}
} else {
let mut y = 0;
if e != "*" {
y = e.parse::<u32>().unwrap();
}
let i = (1 << d) - 1 + p;
o[i] = y;
p += 1;
}
}
return o;
}
fn mindepth(tree: Vec<u32>) -> usize {
let mut firstleaf = tree.len();
for (i, e) in tree.iter().enumerate() {
if *e == 0 {
continue;
} else if (i + 1) << 1 >= tree.len() {
firstleaf = i;
break;
} else {
let ni = ((i + 1) << 1) - 1;
if tree[ni] == 0 && tree[ni + 1] == 0 {
firstleaf = i;
break;
}
}
}
firstleaf += 1;
let mut d = 0;
while firstleaf > 0 {
firstleaf >>= 1;
d += 1;
}
return d;
}
| true
|
9128a1b9791c662441ac6b2663453c43a0022e45
|
Shell
|
sonianara/CompArch
|
/lab6/compile
|
UTF-8
| 126
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
gcc -o load_testcase driver.c
if [[ $? == 0 ]]; then
if [[ $1 == "run" ]]; then
./load_testcase
fi
fi
| true
|
1ea9446cff5129a68374db479f35d9deed732f07
|
Shell
|
mwhahaha/cloud-images
|
/centos-8.stream.virt-install-cmd
|
UTF-8
| 803
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
set -ex
# --location="http://mirror.centos.org/centos/8-stream/BaseOS/x86_64/os"
WORK_DIR=$(pwd)
KS_FILE=centos-8.stream.ks
MIRROR="http://192.168.0.127/repos/centos/8-stream"
DISK_SIZE=10
virt-install \
--transient \
--name=centos-8-stream \
--ram=4096 \
--arch=x86_64 \
--cpu=host \
--vcpus=4 \
--os-variant=rhel8.0 \
--initrd-inject="${KS_FILE}" \
--extra-args="inst.ks=file:/${KS_FILE} console=tty0 console=ttyS0,115200 rd_NO_PLYMOUTH" \
--disk="${WORK_DIR}/centos-8-stream.img,size=${DISK_SIZE},sparse=true,format=qcow2" \
--location="${MIRROR}/BaseOS/x86_64/os" \
--serial=pty \
--nographics
qemu-img convert -O qcow2 "${WORK_DIR}/centos-8-stream.img" "${WORK_DIR}/centos-8-stream.qcow2"
rm -rf "${WORK_DIR}/centos-8-stream.img"
| true
|
212de926c91be93a326c59646239cf1b352be0e0
|
Shell
|
noloader/Build-Scripts
|
/build-ncurses.sh
|
UTF-8
| 12,501
| 3.78125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Written and placed in public domain by Jeffrey Walton
# This script builds Ncurses from sources. We do not
# build Termcap, so there is no libtinfo{w}.
# Do NOT use Ncurses 6.2. There are too many problems with the release.
# Ncurses 6.2 does not build. It ends in a compile error. Additionally,
# the patches supplied for Ncurses 6.2 do not apply.
#
# We must wait for the Ncurses 6.3 release.
#
# Linux from Scratch provides a lot of cool hints for building Ncurses.
# Also see the following for Ncurses 6.1:
# http://www.linuxfromscratch.org/lfs/view/9.0-systemd/chapter06/ncurses.html
# And for Ncurses 6.2:
# http://www.linuxfromscratch.org/lfs/view/development/chapter06/ncurses.html
NCURSES_VER=6.3
NCURSES_TAR="ncurses-${NCURSES_VER}.tar.gz"
NCURSES_DIR="ncurses-${NCURSES_VER}"
PKG_NAME=ncurses
###############################################################################
# Get the environment as needed.
if [[ "${SETUP_ENVIRON_DONE}" != "yes" ]]; then
if ! source ./setup-environ.sh
then
echo "Failed to set environment"
exit 1
fi
fi
if [[ -e "${INSTX_PKG_CACHE}/${PKG_NAME}" ]]; then
echo ""
echo "$PKG_NAME is already installed."
exit 0
fi
# The password should die when this subshell goes out of scope
if [[ "${SUDO_PASSWORD_DONE}" != "yes" ]]; then
if ! source ./setup-password.sh
then
echo "Failed to process password"
exit 1
fi
fi
###############################################################################
# Remove old Termcap/libtinfo{w}
if [[ -n "${SUDO_PASSWORD}" ]]; then
printf "%s\n" "${SUDO_PASSWORD}" | sudo ${SUDO_ENV_OPT} -S find ${INSTX_LIBDIR} -name 'libtinfo*' -exec rm -f {} \;
printf "%s\n" "${SUDO_PASSWORD}" | sudo ${SUDO_ENV_OPT} -S find ${INSTX_PREFIX}/include -name 'termcap*' -exec rm -f {} \;
else
find ${INSTX_LIBDIR} -name 'libtinfo*' -exec rm -f {} \;
find ${INSTX_PREFIX}/include -name 'termcap*' -exec rm -f {} \;
fi
###############################################################################
if ! ./build-cacert.sh
then
echo "Failed to install CA Certs"
exit 1
fi
###############################################################################
if ! ./build-pcre2.sh
then
echo "Failed to build PCRE2"
exit 1
fi
###############################################################################
echo ""
echo "========================================"
echo "================ Ncurses ==============="
echo "========================================"
echo ""
echo "***************************"
echo "Downloading package"
echo "***************************"
# Remove all the old shit from testing
rm -rf ncurses-6.*
if ! "${WGET}" -q -O "$NCURSES_TAR" --ca-certificate="${LETS_ENCRYPT_ROOT}" \
"https://ftp.gnu.org/pub/gnu/ncurses/$NCURSES_TAR"
then
echo "Failed to download Ncurses"
exit 1
fi
rm -rf "$NCURSES_DIR" &>/dev/null
gzip -d < "$NCURSES_TAR" | tar xf -
cd "$NCURSES_DIR" || exit 1
# Don't attempt to apply patches. They don't apply. Sigh...
if false; then
# https://invisible-island.net/ncurses/ncurses.faq.html#applying_patches
if "${WGET}" -q -O dev-patches.zip --ca-certificate="${LETS_ENCRYPT_ROOT}" \
"ftp://ftp.invisible-island.net/ncurses/${NCURSES_VER}/dev-patches.zip"
then
if unzip dev-patches.zip -d .
then
echo "********************************"
echo "Applying Ncurses patches"
echo "********************************"
for p in ncurses-${NCURSES_VER}-*.patch.gz ;
do
echo "Applying ${p}"
zcat "${p}" | patch -s -p1
done
else
echo "********************************"
echo "Failed to unpack Ncurses patches"
echo "********************************"
exit 1
fi
else
echo "**********************************"
echo "Failed to download Ncurses patches"
echo "**********************************"
exit 1
fi
fi
# Patches are created with 'diff -u' from the pkg root directory.
if [[ -e ../patch/ncurses${NCURSES_VER}.patch ]]; then
echo ""
echo "***************************"
echo "Patching package"
echo "***************************"
patch -u -p0 < ../patch/ncurses${NCURSES_VER}.patch
echo ""
fi
# Fix sys_lib_dlsearch_path_spec
bash "${INSTX_TOPDIR}/fix-configure.sh"
echo ""
echo "***************************"
echo "Configuring package"
echo "***************************"
# We always build the wide version of Ncurses via --enable-widec.
CONFIG_OPTS=()
CONFIG_OPTS+=("--disable-leaks")
CONFIG_OPTS+=("--with-shared")
CONFIG_OPTS+=("--with-cxx-shared")
CONFIG_OPTS+=("--enable-widec")
CONFIG_OPTS+=("--without-debug")
# CONFIG_OPTS+=("--with-termlib")
CONFIG_OPTS+=("--enable-pc-files")
CONFIG_OPTS+=("--disable-root-environ")
CONFIG_OPTS+=("--with-pkg-config-libdir=${INSTX_PKGCONFIG}")
# Distros move this directory around
if [[ -d "/etc/terminfo" ]]; then
CONFIG_OPTS+=("--with-default-terminfo-dir=${INSTX_PREFIX}/etc/terminfo")
elif [[ -d "/usr/lib64/terminfo" ]]; then
CONFIG_OPTS+=("--with-default-terminfo-dir=${INSTX_PREFIX}/lib64/terminfo")
elif [[ -d "/usr/lib/terminfo" ]]; then
CONFIG_OPTS+=("--with-default-terminfo-dir=${INSTX_PREFIX}/lib/terminfo")
elif [[ -d "/lib64/terminfo" ]]; then
CONFIG_OPTS+=("--with-default-terminfo-dir=${INSTX_PREFIX}/lib64/terminfo")
elif [[ -d "/lib/terminfo" ]]; then
CONFIG_OPTS+=("--with-default-terminfo-dir=${INSTX_PREFIX}/lib/terminfo")
else
# This is $DATADIR/terminfo
CONFIG_OPTS+=("--with-default-terminfo-dir=${INSTX_PREFIX}/share/terminfo")
fi
if [[ "${INSTX_DEBUG_MAP}" -eq 1 ]]; then
ncurses_cflags="${INSTX_CFLAGS} -fdebug-prefix-map=${PWD}=${INSTX_SRCDIR}/${NCURSES_DIR}"
ncurses_cxxflags="${INSTX_CXXFLAGS} -fdebug-prefix-map=${PWD}=${INSTX_SRCDIR}/${NCURSES_DIR}"
else
ncurses_cflags="${INSTX_CFLAGS}"
ncurses_cxxflags="${INSTX_CXXFLAGS}"
fi
# Ncurses use PKG_CONFIG_LIBDIR, not PKG_CONFIG_PATH???
PKG_CONFIG_LIBDIR="${INSTX_PKGCONFIG}" \
PKG_CONFIG_PATH="${INSTX_PKGCONFIG}" \
CPPFLAGS="${INSTX_CPPFLAGS}" \
ASFLAGS="${INSTX_ASFLAGS}" \
CFLAGS="${ncurses_cflags}" \
CXXFLAGS="${ncurses_cxxflags}" \
LDFLAGS="${INSTX_LDFLAGS}" \
LDLIBS="${INSTX_LDLIBS}" \
LIBS="${INSTX_LDLIBS}" \
./configure \
--build="${AUTOCONF_BUILD}" \
--prefix="${INSTX_PREFIX}" \
--libdir="${INSTX_LIBDIR}" \
"${CONFIG_OPTS[@]}"
if [[ "$?" -ne 0 ]]; then
echo ""
echo "***************************"
echo "Failed to configure Ncurses"
echo "***************************"
bash "${INSTX_TOPDIR}/collect-logs.sh" "${PKG_NAME}"
exit 1
fi
# Escape dollar sign for $ORIGIN in makefiles. Required so
# $ORIGIN works in both configure tests and makefiles.
bash "${INSTX_TOPDIR}/fix-makefiles.sh"
# Remove unneeded warnings
IFS= find "$PWD" -name 'Makefile' -print | while read -r file
do
sed -e 's/ --param max-inline-insns-single=1200//g' \
-e 's/ -no-cpp-precomp//g' \
"$file" > "$file.fixed"
mv "$file.fixed" "$file"
chmod u=rw,go=r "$file"
done
echo ""
echo "***************************"
echo "Building package"
echo "***************************"
MAKE_FLAGS=("-j" "${INSTX_JOBS}")
if ! "${MAKE}" "${MAKE_FLAGS[@]}"
then
echo ""
echo "***************************"
echo "Failed to build Ncurses"
echo "***************************"
bash "${INSTX_TOPDIR}/collect-logs.sh" "${PKG_NAME}"
exit 1
fi
# Fix flags in *.pc files
bash "${INSTX_TOPDIR}/fix-pkgconfig.sh"
# Fix runpaths
bash "${INSTX_TOPDIR}/fix-runpath.sh"
echo ""
echo "***************************"
echo "Testing package"
echo "***************************"
MAKE_FLAGS=("test")
if ! "${MAKE}" "${MAKE_FLAGS[@]}"
then
echo ""
echo "***************************"
echo "Failed to test Ncurses"
echo "***************************"
bash "${INSTX_TOPDIR}/collect-logs.sh" "${PKG_NAME}"
exit 1
fi
# Fix runpaths again
bash "${INSTX_TOPDIR}/fix-runpath.sh"
echo ""
echo "***************************"
echo "Installing package"
echo "***************************"
MAKE_FLAGS=("install")
if [[ -n "${SUDO_PASSWORD}" ]]; then
printf "%s\n" "${SUDO_PASSWORD}" | sudo ${SUDO_ENV_OPT} -S "${MAKE}" "${MAKE_FLAGS[@]}"
printf "%s\n" "${SUDO_PASSWORD}" | sudo ${SUDO_ENV_OPT} -S bash "${INSTX_TOPDIR}/fix-permissions.sh" "${INSTX_PREFIX}"
printf "%s\n" "${SUDO_PASSWORD}" | sudo ${SUDO_ENV_OPT} -S bash "${INSTX_TOPDIR}/copy-sources.sh" "${PWD}" "${INSTX_SRCDIR}/${NCURSES_DIR}"
else
"${MAKE}" "${MAKE_FLAGS[@]}"
bash "${INSTX_TOPDIR}/fix-permissions.sh" "${INSTX_PREFIX}"
bash "${INSTX_TOPDIR}/copy-sources.sh" "${PWD}" "${INSTX_SRCDIR}/${NCURSES_DIR}"
fi
echo ""
echo "***************************"
echo "Extra commands"
echo "***************************"
# Extra commands to provide non-wide names for warez that needs them.
# Linux uses linker scripts, while non-linux uses soft links.
# http://www.linuxfromscratch.org/lfs/view/9.0-systemd/chapter06/ncurses.html
{
echo '#!/usr/bin/env bash'
echo ''
echo "include_dir=${INSTX_PREFIX}/include"
echo "lib_dir=${INSTX_LIBDIR}"
echo ''
echo 'cd ${lib_dir}'
echo ''
# JW added ncurses++ and tinfo
if [[ "${IS_LINUX}" -eq 1 ]]; then
echo 'for lib in ncurses ncurses++ form panel menu ; do'
echo ' rm -vf ${lib_dir}/lib${lib}.so'
echo ' rm -vf ${lib_dir}/lib${lib}.so.6'
echo ' echo "INPUT(-l${lib}w)" > ${lib_dir}/lib${lib}.so'
echo ' ln -sfv lib${lib}.so.6 ${lib_dir}/lib${lib}.so.6'
echo 'done'
echo ''
echo 'rm -vf ${lib_dir}/libcursesw.so'
echo 'rm -vf ${lib_dir}/libcurses.so'
echo 'echo "INPUT(-lncursesw)" > ${lib_dir}/libcursesw.so'
echo 'ln -sfv libncurses.so ${lib_dir}/libcurses.so'
echo ''
elif [[ "${IS_DARWIN}" -eq 1 ]]; then
echo 'for lib in ncurses ncurses++ form panel menu ; do'
echo ' rm -vf ${lib_dir}/lib${lib}.dylib'
echo ' rm -vf ${lib_dir}/lib${lib}.6.dylib'
echo ' ln -sfv lib${lib}w.dylib ${lib_dir}/lib${lib}.dylib'
echo ' ln -sfv lib${lib}w.6.dylib ${lib_dir}/lib${lib}.6.dylib'
echo 'done'
echo ''
echo 'rm -vf ${lib_dir}/libcursesw.dylib'
echo 'rm -vf ${lib_dir}/libcurses.dylib'
echo 'ln -sfv libcursesw.dylib ${lib_dir}/libcurses.dylib'
echo 'ln -sfv libncurses.dylib ${lib_dir}/libcurses.dylib'
echo ''
else
echo 'for lib in ncurses ncurses++ form panel menu ; do'
echo ' rm -vf ${lib_dir}/lib${lib}.so'
echo ' rm -vf ${lib_dir}/lib${lib}.so.6'
echo ' ln -sfv lib${lib}w.so ${lib_dir}/lib${lib}.so'
echo ' ln -sfv lib${lib}w.so.6 ${lib_dir}/lib${lib}.so.6'
echo 'done'
echo ''
echo 'rm -vf ${lib_dir}/libcursesw.so'
echo 'rm -vf ${lib_dir}/libcurses.so'
echo 'ln -sfv libcursesw.so ${lib_dir}/libcurses.so'
echo 'ln -sfv libncurses.so ${lib_dir}/libcurses.so'
echo ''
fi
echo ''
echo 'cd ${lib_dir}/pkgconfig'
echo ''
echo 'for lib in ncurses ncurses++ form panel menu ; do'
echo ' rm -vf ${lib_dir}/pkgconfig/${lib}.pc'
echo ' ln -sfv ${lib}w.pc ${lib_dir}/pkgconfig/${lib}.pc'
echo 'done'
echo ''
echo ''
echo 'cd ${include_dir}'
echo ''
echo 'ln -sfv ncursesw ${include_dir}/ncurses'
} > extra-cmds.sh
# Run the extra commands...
if [[ -n "${SUDO_PASSWORD}" ]]; then
printf "%s\n" "${SUDO_PASSWORD}" | sudo ${SUDO_ENV_OPT} -S bash extra-cmds.sh
else
bash extra-cmds.sh
fi
# Fix permissions once
if [[ -n "${SUDO_PASSWORD}" ]]; then
printf "%s\n" "${SUDO_PASSWORD}" | sudo ${SUDO_ENV_OPT} -S bash "${INSTX_TOPDIR}/fix-permissions.sh" "${INSTX_PREFIX}"
else
bash "${INSTX_TOPDIR}/fix-permissions.sh" "${INSTX_PREFIX}"
fi
###############################################################################
touch "${INSTX_PKG_CACHE}/${PKG_NAME}"
cd "${CURR_DIR}" || exit 1
###############################################################################
# Set to false to retain artifacts
if true;
then
ARTIFACTS=("$NCURSES_TAR" "$NCURSES_DIR")
for artifact in "${ARTIFACTS[@]}"; do
rm -rf "$artifact"
done
fi
exit 0
| true
|
cdaa5de41023fa6ae2f2a5d022a55edc85bc773f
|
Shell
|
ShapeShiftOS-Devices/device_oneplus_sm8250-common
|
/rootdir/bin/init.qti.chg_policy.sh
|
UTF-8
| 1,164
| 2.671875
| 3
|
[] |
no_license
|
#! /vendor/bin/sh
#
# Copyright (c) 2019-2020 Qualcomm Technologies, Inc.
# All Rights Reserved.
# Confidential and Proprietary - Qualcomm Technologies, Inc.
#
# Copyright (c) 2019 The Linux Foundation. All rights reserved.
#
export PATH=/vendor/bin
soc_id=`getprop ro.vendor.qti.soc_id`
if [ "$soc_id" -eq 415 ] || [ "$soc_id" -eq 439 ] || [ "$soc_id" -eq 450 ]; then
setprop persist.vendor.hvdcp_opti.start 0
rm -rf /mnt/vendor/persist/hvdcp_opti
exit 0
fi
prefix="/sys/class/"
#List of folder for ownership update
arr=( "power_supply/battery/" "power_supply/usb/" "power_supply/main/" "power_supply/charge_pump_master/" "power_supply/pc_port/" "power_supply/dc/" "power_supply/bms/" "power_supply/parallel/" "usbpd/usbpd0/" "qc-vdm/" "charge_pump/" "qcom-battery/" )
for i in "${arr[@]}"
do
for j in `ls "$prefix""$i"`
do
#skip directories to prevent possible security issues.
if [[ -d "$prefix""$i""$j" ]]
then
continue
else
chown -h system.system "$prefix""$i""$j"
fi
done
done
#@bsp, 2020/05/11, remove hvdcp_opti service
#setprop persist.vendor.hvdcp_opti.start 1
| true
|
e6ab68c87a78ad87a15bba5f41b15528ae7c90c0
|
Shell
|
umanathlanka/edgeapps
|
/applications/eis-experience-kit/roles/web_visualizer/deploy/templates/web_access.sh.j2
|
UTF-8
| 1,751
| 2.96875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2020 Intel Corporation
mkdir -p RootCA
mkdir -p Server
mkdir -p Client
EtcdctlPath={{ etcdctl_bin_path }}
echo "Generating RootCA Key and Cert for WebVisualizer access "
openssl ecparam -genkey -name secp384r1 -out "./RootCA/ca.key"
openssl req -key "./RootCA/ca.key" -new -x509 -days 1000 -subj "/CN=WebVisualizer" -out "./RootCA/ca.crt"
echo "Generate Server Key and Cert for WebVisualizer access"
openssl req -new -sha256 -nodes -out ./Server/server.csr -newkey rsa:2048 -keyout ./Server/server.key -subj "/CN=Server"
openssl x509 -req -in ./Server/server.csr -CA ./RootCA/ca.crt -CAkey ./RootCA/ca.key -days 1000 -out ./Server/server.cert -CAcreateserial
echo "Generate Client Key and Cert for WebVisualizer access"
openssl req -new -sha256 -nodes -out ./Client/client.csr -newkey rsa:2048 -keyout ./Client/client.key -subj "/CN=Client"
openssl x509 -req -in client.csr -CA ./RootCA/ca.crt -CAkey ./RootCA/ca.key -days 1000 -out ./Client/client.cert -CAcreateserial
etcd_ip=$(kubectl -n eis get service ia-etcd-service | grep ia-etcd-service |awk '{print $3}')
echo "Access etcd for writing WebVisualizer web access certificate"
export ETCDCTL_CACERT={{ rootca_cert }}
export ETCDCTL_CERT={{ root_client_cert }}
export ETCDCTL_KEY={{ root_client_key }}
export ETCDCTL_ENDPOINTS=https://$etcd_ip:{{ etcd_port }}
cd $EtcdctlPath
./etcdctl get "/GlobalEnv/"
cat {{ helm_chart_web_visualizer }}/secrets/Server/server.key | ./etcdctl put /WebVisualizer/server_key
cat {{ helm_chart_web_visualizer }}/secrets/Server/server.cert | ./etcdctl put /WebVisualizer/server_cert
cat {{ helm_chart_web_visualizer }}/secrets/RootCA/ca.crt | ./etcdctl put /WebVisualizer/ca_cert
| true
|
195b63a900155ba6cbb5d690b430a311c4ccf30b
|
Shell
|
Plooms/bin
|
/easystroke-scripts
|
UTF-8
| 12,817
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
# Easystroke scripts all in one place
SCRIPT_NAME="easystroke-scripts"
PID=$$
export LANG=en_US.UTF-8
# Source the VAR MOAS
source /usr/local/bin/VAR-SCRIPT
_CHANGE_LANG_US()
{
xte 'keydown Control_R' 'keydown Alt_R' 'key 0' 'keyup Control_R' 'keyup Alt_R'
}
_MOVE_WINDOW()
{
xte "keydown Control_L" "keydown Alt_L" "key 8" "keyup Control_L" "keyup Alt_L"
}
if [ "$1" == "dolphin" ]
then
if [ "$(wmctrl -l |grep -i dolphin | wc -l)" -gt 1 ]
then
win_selector="$(mktemp)"
wmctrl -l | grep -i "dolphin" | cut -d" " -f 5- >> "$win_selector"
while read a ; do windows+=("$a") ; done < "$win_selector"
answer="$(dialog --stdout --menu "Choose window" 0 0 0 \
"${windows[0]}" "" \
"${windows[1]}" "" \
"${windows[2]}" "" \
"${windows[3]}" "" )"
wmctrl -a "$answer"
killall terminator-helper
fi
fi
##########################################################
# Close Window
if [ "$1" == "C01" ] ; then
export MIN=YES ; /usr/local/bin/gesture-helper-close-window
fi
##########################################################
# Close Window Really
if [ "$1" == "C02" ] ; then
export MIN=NO ; /usr/local/bin/gesture-helper-close-window
fi
##########################################################
# Pidgin Text
if [ "$1" == "C03" ] ; then
_CHANGE_LANG_US
CURRENTWIN="$(xdotool getwindowname $(xdotool getwindowfocus))"
if wmctrl -a "Friend "
then wmctrl -a "Friend " ; sleep .2 ; xte "str hello" ; xte "key Return" ; sleep .1 ; wmctrl -a "$CURRENTWIN" ; notify-send "Pidgin" "Sent hello - Pidgin"
else notify-send "Gesture" "Pidgin window Not Found"
fi
fi
##########################################################
# Change Language
if [ "$1" == "C04" ] ; then
xte "keydown Alt_L" "keydown Shift_L" "keyup Alt_L" "keyup Shift_L"
fi
##########################################################
# Visibility -
if [ "$1" == "C05" ] ; then
xte "keydown Control_R" "keydown Alt_L" "str rrr" "keyup Control_R" "keyup Alt_L"
fi
##########################################################
# Visibility +
if [ "$1" == "C06" ] ; then
xte "keydown Control_R" "keydown Alt_L" "str qqqqqqqqqqqqqqq" "keyup Control_R" "keyup Alt_L"
fi
##########################################################
# File Manager
if [ "$1" == "C07" ] ; then
dolphin
fi
##########################################################
# Terminal
if [ "$1" == "C08" ] ; then
terminator
fi
##########################################################
# Brightness
if [ "$1" == "C09" ] ; then
terminator -x /usr/local/bin/br
fi
##########################################################
# Search Google
if [ "$1" == "C10" ] ; then
wmctrl -a firefox && xte "keydown Control_R" "key t" "key k" "keyup Control_R"
fi
##########################################################
# Search Google-2
if [ "$1" == "C11" ] ; then
wmctrl -a firefox && xte "keydown Control_R" "key t" "key k" "keyup Control_R"
sleep 3 ; xdotool type "$(xclip -o)" ; xdotool key Return
fi
##########################################################
# ln2tb-Movies
if [ "$1" == "C12" ] ; then
export TERM_LAUNCH=Y ; terminator -x c.ln2tb-movie-find-single
fi
##########################################################
# Search YT - KAT - TVDB
if [ "$1" == "C13" ] ; then
YT_NAME="$( xclip -o | sed 's/(//g;s/)//g;s/&/and/g;s/\ /+/g' ) trailer"
KAT_NAME="$( xclip -o | sed "s/(//g;s/)//g;s/\./\+/g;s/&/and/g;s/,/\ /g;s/'//g;s/\!//g;s/\//+/g" )"
TVDB_NAME="$( xclip -o | sed 's/(//g;s///g;s/&/and/g;s/\ /+/g' )"
# TVDB
firefox "http://thetvdb.com/?string=$TVDB_NAME&searchseriesid=&tab=listseries&function=Search"
# KAT
firefox "https://kickass.cd/usearch/$KAT_NAME" 2>/dev/null
# YT
firefox "www.youtube.com/results?search_query=$YT_NAME&sm=3" 2>/dev/null
fi
##########################################################
# Search YT & KAT
if [ "$1" == "C14" ] ; then
YT_NAME="$( xclip -o | sed 's/(//g;s/)//g;s/&/and/g;s/\ /+/g' ) trailer"
KAT_NAME="$( xclip -o | sed "s/(//g;s/)//g;s/\./\+/g;s/&/and/g;s/,/\ /g;s/'//g;s/\!//g;s/\//+/g" )"
# KAT
firefox "https://kickass.cd/usearch/$KAT_NAME" 2>/dev/null
# YT
firefox "www.youtube.com/results?search_query=$YT_NAME&sm=3" 2>/dev/null
fi
##########################################################
# Search TVDB
if [ "$1" == "C14-1" ] ; then
TVDB_NAME="$( xclip -o | sed 's/(//g;s///g;s/&/and/g;s/\ /+/g' )"
# TVDB
firefox "http://thetvdb.com/?string=$TVDB_NAME&searchseriesid=&tab=listseries&function=Search"
fi
##########################################################
# Search KAT
if [ "$1" == "C15" ] ; then
KAT_NAME="$( xclip -o | sed "s/(//g;s/)//g;s/\./\+/g;s/&/and/g;s/,/\ /g;s/'//g;s/\!//g;s/\//+/g" )"
# KAT
firefox "https://kickass.cd/usearch/$KAT_NAME" 2>/dev/null
fi
##########################################################
# Move p2p
if [ "$1" == "C16" ] ; then
mv ~/Downloads/*.torrent ~/NAS/Other/Downloads/shared-folder/
mv ~/Downloads/*.nzb ~/NAS/Other/Downloads/shared-folder/
fi
##########################################################
# Pause VLC
if [ "$1" == "C17" ] ; then
CURRENTWIN="$(xdotool getwindowname $(xdotool getwindowfocus))"
if wmctrl -a "VLC media player"
then sleep .1 ; xte 'key space' ; wmctrl -a "$CURRENTWIN"
else notify-send "Gesture" "VLC Window Not Found" ; sudo beep
fi
fi
##########################################################
# Fullscreen VLC
if [ "$1" == "C18" ] ; then
_CHANGE_LANG_US
if wmctrl -a "VLC media player"
then wmctrl -a "VLC media player" ; sleep .1 ; xte 'key f' ; wmctrl -a "$CURRENTWIN"
else notify-send "Gesture" "VLC Window Not Found" ; sudo beep
fi
fi
##########################################################
# Fullscreen + Pause VLC
if [ "$1" == "C19" ] ; then
_CHANGE_LANG_US
CURRENTWIN="$(xdotool getwindowname $(xdotool getwindowfocus))"
if wmctrl -a "VLC media player"
then wmctrl -a "VLC media player" ; sleep .1 ; xte 'key space' 'key f' ; wmctrl -a "$CURRENTWIN"
else notify-send "Gesture" "VLC Window Not Found" ; sudo beep
fi
fi
##########################################################
# Window Above
if [ "$1" == "C20" ] ; then
WIN="$(wmctrl -lp | grep `xprop -root | grep _NET_ACTIVE_WINDOW | head -1 | awk '{print $5}' | sed 's/,//' | sed 's/^0x/0x0/'` | cut -d" " -f1)"
wmctrl -i -r "$WIN" -b add,above
fi
##########################################################
# Window Below
if [ "$1" == "C21" ] ; then
WIN="$(wmctrl -lp | grep `xprop -root | grep _NET_ACTIVE_WINDOW | head -1 | awk '{print $5}' | sed 's/,//' | sed 's/^0x/0x0/'` | cut -d" " -f1)"
wmctrl -i -r "$WIN" -b remove,above
fi
##########################################################
# Firefox Focus
if [ "$1" == "C22" ] ; then
wmctrl -a firefox || (notify-send "Gesture" "Firefox Window Not Found" ; sudo beep)
fi
##########################################################
# Dolphin Focus
if [ "$1" == "C23" ] ; then
if [ "$(wmctrl -l |grep -i dolphin | wc -l)" -gt 1 ]
then
terminator -x terminator-helper easystroke-scripts dolphin
else
wmctrl -a dolphin || (notify-send "Gesture" "Dolphin Window Not Found" ; sudo beep)
fi
fi
##########################################################
# Pidgin Focus
if [ "$1" == "C24" ] ; then
wmctrl -a "Friend " || (notify-send "Gesture" "Pidgin Window Not Found" ; sudo beep)
fi
##########################################################
# KODI Focus
if [ "$1" == "C25" ] ; then
wmctrl -a kodi || (notify-send "Gesture" "KODI Window Not Found" ; sudo beep)
fi
##########################################################
# VLC Focus
if [ "$1" == "C26" ] ; then
wmctrl -a "VLC media player" || (notify-send "Gesture" "VLC Window Not Found" ; sudo beep)
fi
##########################################################
# Geany Focus
if [ "$1" == "C27" ] ; then
wmctrl -a geany || (notify-send "Gesture" "Geany Window Not Found" ; sudo beep)
fi
##########################################################
# Open Keyboard
if [ "$1" == "C28" ] ; then
if pgrep florence
then killall florence
else florence
fi
fi
##########################################################
# Volume UP
if [ "$1" == "C29" ] ; then
xte "keydown Control_R" "keydown Alt_L" "str b" "keyup Control_R" "keyup Alt_L"
fi
##########################################################
# Volume Down
if [ "$1" == "C30" ] ; then
xte "keydown Control_R" "keydown Alt_L" "str n" "keyup Control_R" "keyup Alt_L"
fi
##########################################################
# Press CTRL+SHIFT
if [ "$1" == "C31" ] ; then
xte "keydown Control_L" "keydown Shift_R"
sleep 3
xte "keyup Control_L" "keyup Shift_R"
notify-send "Gesture" "keyup CTRL+SHIFT"
fi
##########################################################
# Press CTRL
if [ "$1" == "C32" ] ; then
xte "keydown Control_L"
sleep 1
xte "keyup Control_L"
notify-send "Gesture" "keyup CTRL"
fi
##########################################################
# Show-Time
if [ "$1" == "C33" ] ; then
/usr/local/bin/show-time
fi
##########################################################
# Select Text & Delete
if [ "$1" == "C34" ] ; then
xte "keydown Control_L" "key a" "keyup Control_L" "key BackSpace" "key BackSpace"
fi
##########################################################
# VLC Jump Forward
if [ "$1" == "C35" ] ; then
CURRENTWIN="$(xdotool getwindowname $(xdotool getwindowfocus))"
if wmctrl -a "VLC media player"
then wmctrl -a "VLC media player" ; sleep .1 ; xte "keydown Control_L" "key Right" "keyup Control_L" ; wmctrl -a "$CURRENTWIN"
else notify-send "Gesture" "VLC Window Not Found" ; sudo beep
fi
fi
##########################################################
# VLC Jump Back
if [ "$1" == "C36" ] ; then
CURRENTWIN="$(xdotool getwindowname $(xdotool getwindowfocus))"
if wmctrl -a "VLC media player"
then wmctrl -a "VLC media player" ; sleep .1 ; xte "keydown Control_L" "key Left" "keyup Control_L" ; wmctrl -a "$CURRENTWIN"
else notify-send "Gesture" "VLC Window Not Found" ; sudo beep
fi
fi
##########################################################
# VLC Volume UP
if [ "$1" == "C37" ] ; then
CURRENTWIN="$(xdotool getwindowname $(xdotool getwindowfocus))"
if wmctrl -a "VLC media player"
then wmctrl -a "VLC media player" ; sleep .1 ; xte "keydown Control_L" "key Up" "keyup Control_L" ; wmctrl -a "$CURRENTWIN"
else notify-send "Gesture" "VLC Window Not Found" ; sudo beep
fi
fi
##########################################################
# VLC Volume Down
if [ "$1" == "C38" ] ; then
CURRENTWIN="$(xdotool getwindowname $(xdotool getwindowfocus))"
if wmctrl -a "VLC media player"
then wmctrl -a "VLC media player" ; sleep .1 ; xte "keydown Control_L" "key Down" "keyup Control_L" ; wmctrl -a "$CURRENTWIN"
else notify-send "Gesture" "VLC Window Not Found" ; sudo beep
fi
fi
##########################################################
# KODI Move
if [ "$1" == "C39" ] ; then
if wmctrl -a "kodi"
then wmctrl -a "kodi" ; sleep .1 ; _MOVE_WINDOW
else notify-send "Gesture" "KODI Window Not Found" ; sudo beep
fi
fi
##########################################################
# VLC Move
if [ "$1" == "C40" ] ; then
if wmctrl -a "VLC media player"
then wmctrl -a "VLC media player" ; sleep .1 ; _MOVE_WINDOW
else notify-send "Gesture" "VLC Window Not Found" ; sudo beep
fi
fi
##########################################################
# Brightness
if [ "$1" == "C41" ] ; then
terminator -x /usr/local/bin/br OFF
fi
##########################################################
# VLC Sub Delay
if [ "$1" == "C42" ] ; then
_CHANGE_LANG_US
CURRENTWIN="$(xdotool getwindowname $(xdotool getwindowfocus))"
if wmctrl -a "VLC media player"
then wmctrl -a "VLC media player" && (sleep .1 ; xte 'str hhhhh' ; wmctrl -a "$CURRENTWIN")
else notify-send "Gesture" "VLC Window Not Found" ; sudo beep
fi
fi
##########################################################
# VLC Sub Hasten
if [ "$1" == "C43" ] ; then
_CHANGE_LANG_US
CURRENTWIN="$(xdotool getwindowname $(xdotool getwindowfocus))"
if wmctrl -a "VLC media player"
then wmctrl -a "VLC media player" && (sleep .1 ; xte 'str ggggg' ; wmctrl -a "$CURRENTWIN")
else notify-send "Gesture" "VLC Window Not Found" ; sudo beep
fi
fi
##########################################################
# Press ALT
if [ "$1" == "C44" ] ; then
xte "keydown Alt_L"
sleep 3
xte "keyup Alt_L"
notify-send "Gesture" "keyup ALT"
fi
##########################################################
# KRDC Focus
if [ "$1" == "C45" ] ; then
wmctrl -a "KRDC" || (notify-send "Gesture" "KRDC Window Not Found" ; sudo beep)
fi
##########################################################
# END OF FILE
if [ -z "$1" ] ; then
echo "USAGE: $SCRIPT_NAME C##"
echo "Nothing left to do or no options were passed"
fi
| true
|
3021de20b6e858bb842a9afaa22c2070c164eb18
|
Shell
|
Ron-Yang/Traffic-Econometrics-MA
|
/upload_otherTexDocuments.sh
|
UTF-8
| 970
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
# Syntax:
# upload_aufgaben.sh <latexname ohne ext> <optional -edit oder -e>
####################################################
if ( (($#<1)) || (($#>2)) ); then
echo "Calling sequence:"
echo " upload_otherTexDocuments.sh <latexname ohne ext.> <optional -edit oder -e>"
exit 0
fi
sourcedir=$PWD
targetdir="$HOME/public_html/professional/Vkoek_Ma_Skript"
htmldir="$HOME/public_html/professional/Vkoek_Ma"
texname="$1"
pdfname=${texname}.pdf
cd ${sourcedir}
if test -r ${texname}.tex; then
echo "pdflatexing ${texname} ..."
pdflatex ${texname} > logfile
else
echo "error: File ${texname}.tex existiert nicht!!"
fi;
# upload to web page
echo "uploading to $targetdir/$pdfname and chmod o+r ..."
echo cp $pdfname $targetdir
cp $pdfname $targetdir
chmod o+r $targetdir/$pdfname
if (($#==2));
then emacs ${htmldir}/index.html;
#(setze neue Links)
else echo "potentially edit html file ${htmldir}/index.html"
fi
| true
|
127046703bfeaef44f96966d56e2b6b52968b249
|
Shell
|
carmenxlivia/JournalCompetition-PeerReview
|
/script.sh
|
UTF-8
| 1,076
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash -l
# Batch script to run a serial job on Legion with the upgraded
# software stack under SGE.
# 1. Force bash as the executing shell.
#$ -S /bin/bash
# 2. Request ten minutes of wallclock time (format hours:minutes:seconds).
#$ -l h_rt=23:59:0
# 3. Request 1 gigabyte of RAM (must be an integer)
#$ -l mem=1G
# 4. Request 15 gigabyte of TMPDIR space (default is 10 GB)
#$ -l tmpfs=15G
# 5. Set the name of the job.
#$ -N PeerReview
# 6. Set the working directory to somewhere in your scratch space. This is
# a necessary step with the upgraded software stack as compute nodes cannot
# write to $HOME.
# Replace "<your_UCL_id>" with your UCL user ID :)
#$ -wd /home/your_UCL_id/Scratch/output
# 7. Your work *must* be done in $TMPDIR
cd $TMPDIR
# 8. Run the application.
module load python3/recommended
python /home/your_UCL_id/PeerReviewModel/run.py
# 9. Preferably, tar-up (archive) all output files onto the shared scratch area
tar zcvf $HOME/Scratch/batch/files_from_job_$JOB_ID.tar.gz $TMPDIR
# Make sure you have given enough time for the copy to complete!
| true
|
6dc5a29715c7993f39bdba454f581c50492cd48f
|
Shell
|
marcogaro/rasp
|
/permessifunzionante22082020/lauch.sh
|
UTF-8
| 2,532
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
#if [ "$#" -ne 1 ]; then
# echo "Usage: launch_virtual_rasp.sh <virtual_rasp_name>"
# exit;
#fi
#lxd init
#echo nome della raspberry?
#read name
#echo "il nome della raspberry è $name !"
#name=$1
echo "Creating virtual rasp "test2"!"
lxc launch ubuntu:16.04 test2
MYUID=`sudo ls -l /home/ubuntu/storage/containers/test2/rootfs/ | grep root | awk '{}{print $3}{}'`
lxc exec test2 -- addgroup gpio
sleep 20
lxc exec test2 -- usermod -a -G gpio ubuntu
sleep 1
MYGID=$(($MYUID + `lxc exec test2 -- sed -nr "s/^gpio:x:([0-9]+):.*/\1/p" /etc/group`))
echo $MYGID $MYUID
sudo mkdir -p /gpio_mnt/test2
sudo chmod 777 -R /gpio_mnt/
sudo mkdir -p /gpio_mnt/test2/sys/devices/platform/soc/3f200000.gpio
sudo mkdir -p /gpio_mnt/test2/sys/class/gpio
sudo mkdir -p /gpio_mnt/test2/sys/devices/platform/soc/soc\:firmware/soc\:firmware\:expgpio/gpio/gpiochip504/
#sudo chown "$MYUID"."$MYGID" -R /gpio_mnt/test2/sys/
lxc exec test2 -- mkdir -p /gpio_mnt/sys/class/gpio
lxc exec test2 -- mkdir -p /gpio_mnt/sys/devices/platform/soc/3f200000.gpio
lxc exec test2 -- mkdir -p /gpio_mnt/sys/devices/platform/soc/soc\:firmware/soc\:firmware\:expgpio/gpio/gpiochip504/
sudo chmod -R 777 /gpio_mnt/
#lxc config set test2 raw.idmap "both 1000 1000000"
lxc config set test2 security.privileged true
lxc restart test2
lxc config device add test2 gpio disk source=/gpio_mnt/test2/sys/class/gpio path=/gpio_mnt/sys/class/gpio
lxc config device add test2 devices disk source=/gpio_mnt/test2/sys/devices/platform/soc/3f200000.gpio path=/gpio_mnt/sys/devices/platform/soc/3f200000.gpio
lxc config device add test2 soc disk source=/sys/devices/platform/soc/soc\:firmware/soc\:firmware\:expgpio/gpio/gpiochip504/ path=/gpio_mnt/sys/devices/platform/soc/soc\:firmware/soc\:firmware\:expgpio/gpio/gpiochip504/
sleep 5
wget https://raw.githubusercontent.com/marcogaro/rasp/master/provacongpiopermessi/pass1.4.py -P /tmp/passthrough/
cd /tmp/passthrough/
ls
sudo chmod -R 777 /sys/class/gpio/
sudo chmod -R 777 /sys/devices/platform/soc/
sudo chmod -R 777 /gpio_mnt/
sudo chmod -R 777 /gpio_mnt/test2/sys/
sudo groupadd gpio
chgrp gpio -R /sys/class/gpio/
sleep 10
python3 pass1.4.py /sys/devices/platform/soc/3f200000.gpio /gpio_mnt/test2/sys/devices/platform/soc/3f200000.gpio/ &
python3 pass1.4.py /sys/class/gpio/ /gpio_mnt/test2/sys/class/gpio/ &
#sudo chown "$MYUID"."$MYGID" -R /gpio_mnt/test2/sys/
cd
lxc exec test2 -- su --login ubuntu -l
#lxc exec test2 bash
#adduser utente
#adduser utente gpio
#su - utente
| true
|
6330b68d67b3b7449fce24a522b5b8cfdfd4bbe9
|
Shell
|
taoshengxu/genome_annotation_pipeline
|
/scripts/train_snap_model.sh
|
UTF-8
| 712
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/bash
set -e
set -u
set -o pipefail
#module load snap
#module load maker/2.31.11
if [ $# -lt 3 ]; then
echo "$0 datastore_index.log round specie_name"
exit 1
fi
ds_index=$1
round=$2
specie=$3
mkdir -p snap && cd snap
mkdir -p round${round} && cd round${round}
gff3_merge -d $ds_index -s > maker_round${round}.gff
maker2zff -x 0.25 -l 100 maker_round${round}.gff
fathom genome.ann genome.dna -gene-stats > gene-stats.log 2>&1 &
fathom genome.ann genome.dna -validate > validate.log 2>&1 &
fathom -categorize 1000 genome.ann genome.dna
fathom -export 1000 -plus uni.ann uni.dna
mkdir params && cd params
forge ../export.ann ../export.dna
cd ..
hmm-assembler.pl $specie params > snap.hmm
| true
|
9d35f680298fa9a1c98522bc7400165bb23b0e75
|
Shell
|
Jyny/outliner
|
/pkg/deployer/ssh/script/deploy.sh
|
UTF-8
| 376
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
function install_docker() {
curl -sS https://get.docker.com/ | sh
}
function install_outline {
bash -c "$(wget -qO- https://raw.githubusercontent.com/Jigsaw-Code/outline-server/master/src/server_manager/install_scripts/install_server.sh)"
}
function write_PID {
echo $$ > /tmp/pid
}
function main() {
write_PID
install_docker
install_outline
}
main
| true
|
6c128ac9ea3813949eed831bdd01fbe0047348a0
|
Shell
|
angloyna/docker-microservices
|
/bin/overrides.sh
|
UTF-8
| 246
| 2.84375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
OVERRIDE=""
touch ./overrides/set-overrides.sh
source ./overrides/set-overrides.sh
if [[ ${BUILD_NAME} ]];
then OVERRIDE=""; # No override for a headless build.
else echo -e "${LIGHTBLUE}OVERRIDE=${OVERRIDE}${NC}";
fi
| true
|
1e7745726005d566633fca74a03d29cd24ad34ac
|
Shell
|
nickkelley42/financial-tools
|
/bin/setup-pre-commit
|
UTF-8
| 223
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
#!/bin/bash
GITDIR="$(git rev-parse --git-dir)"
HOOKSDIR="$GITDIR/hooks"
cd "$HOOKSDIR"
if [ -e "pre-commit" ]; then
echo "pre-commit hook already exists; aborting"
exit 1
fi
ln -s ../../bin/pre-commit
| true
|
9555392bc16a2880354439fac0fd3be5ec3966bd
|
Shell
|
groovedoggo/storage
|
/pg-backup-bash
|
UTF-8
| 336
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/sh
PATH=/etc:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin
PGPASSWORD=password
export PGPASSWORD
pathB=/backup
dbUser=dbuser
database=db
find $pathB \( -name "*-1[^5].*" -o -name "*-[023]?.*" \) -ctime +61 -delete
pg_dump -U $dbUser $database | gzip > $pathB/pgsql_$(date "+%Y-%m-%d").sql.gz
unset PGPASSWORD
| true
|
bd54707c33983221c13cab3bd6e1d686f59b9c28
|
Shell
|
labittraining/elasticsearch
|
/bash_setup_scripts/webserver_setup.sh
|
UTF-8
| 2,285
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
package_installed=(apache metricbeat filebeat auditbeat)
# add Elastic’s signing key so that the downloaded package can be verified
wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
# add the repository definition to your system
echo "deb https://artifacts.elastic.co/packages/7.x/apt stable main" | sudo tee -a /etc/apt/sources.list.d/elastic-7.x.list
# The following code updates linux distribution and installs apache2
sudo apt-get update
for package in ${package_installed[@]};
do
if dpkg -l | grep $package
then
echo "$package is already installed, skipping......."
else
echo "$package is not installed , installing ......"
if [ $package=apache ]
then
echo "installaing $package"
sudo apt-get install apache2 -y
# Adds a simple hellow world web page to apache home
echo '<!doctype html><html><body><h1>Hello World!</h1></body></html>' | sudo tee /var/www/html/index.html
elif [ $package=metricbeat ]
then
echo "installaing $package"
# Installs metric beat
sudo apt install metricbeat -y
# Configure metric beat
## update elasticsearch and kibana entries in metricbeat.
echo "Configuring Metricbeat"
sudo sed -i -e 's/#host: "localhost:5601"/host: 192.168.29.4:5601/;s/localhost:9200/192.168.29.4:9200/' /etc/metricbeat/metricbeat.yml
## enable apache module
sudo metricbeat modules enable apache
## start metricbeat and enable it to start automatically on system boot
sudo systemctl start metricbeat
sudo /bin/systemctl enable metricbeat.service
elif [ $package=filebeat ]
then
echo "installaing $package"
# Installs files beat
sudo apt install filebeat -y
elif [ $package=auditbeat ]
then
echo "installaing $package"
#Installs Auditbeat
sudo apt install auditbeat -y
fi
fi;
done
| true
|
ea7757b16c5c7da4f2d167a6150096720accd54f
|
Shell
|
Freeaqingme/icinga-vagrant
|
/icinga2x-cluster/.vagrant-puppet/manifests/finalize.sh
|
UTF-8
| 475
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
mountIcinga2webVarLog () {
# Remount /vagrant/var/log/ with appropriate permissions since the group apache is missing initially
mount -t vboxsf -o uid=`id -u vagrant`,gid=`id -g apache`,dmode=775,fmode=664 /vagrant/icingaweb2/var/log/ /vagrant/icingaweb2/var/log/
}
fixIcingaWeb2Config () {
touch /etc/icingaweb/config.ini
chown apache:apache /etc/icingaweb/config.ini
}
fixIcingaWeb2Config
mountIcinga2webVarLog
echo "Done."
exit 0
| true
|
29d18cc1e1580fec9595ceafe74ab2adcb038d64
|
Shell
|
jbcrail/dash-recipes
|
/dask.sh
|
UTF-8
| 2,134
| 3.640625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
SRC=https://github.com/dask/dask
NAME=$(basename ${SRC})
ENV=${NAME}-$(cat < /dev/urandom | base64 | head -c 4)
DOCSET=Dask
VERSION=0.19.0
REPO_DIR=$(pwd)/repos/${NAME}
DOCSET_DIR=$(pwd)/docsets/${NAME}
DOCS_DIR=${REPO_DIR}/docs
section()
{
GREEN='\033[0;32m'
NC='\033[0m'
printf "[%s%s%s]\\n" "${GREEN}" "${1}" "${NC}"
}
section "Clone repository" & {
rm -rf "${REPO_DIR}"
git clone -q ${SRC} "${REPO_DIR}"
cd "${REPO_DIR}" || exit
git checkout ${VERSION}
}
section "Create conda environment" & {
conda create --yes --quiet -n "${ENV}" python=3
source activate "${ENV}"
}
section "Install dependencies" & {
conda install -y -q lxml=3.8.0 # required by doc2dash 2.2.0
conda install -y -q scipy
pip install --upgrade pip
pip install --quiet --no-deps -e ".[complete]"
pip install --quiet -r docs/requirements-docs.txt
pip install --quiet doc2dash
}
section "Build documentation" & {
cd "${DOCS_DIR}" || exit
make html
}
section "Build docset" & {
rm -rf "${DOCSET_DIR}"
mkdir -p "${DOCSET_DIR}"
doc2dash --quiet -n ${DOCSET} --enable-js -d "${DOCSET_DIR}" -I index.html build/html
}
section "Build icons" & {
mogrify -format png -size 16x16 -write "${DOCSET_DIR}/icon.png" source/images/dask_icon.svg
mogrify -format png -size 32x32 -write "${DOCSET_DIR}/icon@2x.png" source/images/dask_icon.svg
}
section "Write docset metadata" & {
cat <<EOF >"${DOCSET_DIR}/README.md"
${DOCSET} Dash Docset
=====
- Docset Description:
- "Versatile parallel programming with task scheduling".
- Docset Author:
- [Joseph Crail](https://github.com/jbcrail)
EOF
cat <<EOF >"${DOCSET_DIR}/docset.json"
{
"name": "${DOCSET}",
"version": "${VERSION}",
"archive": "${DOCSET}.tgz",
"author": {
"name": "Joseph Crail",
"link": "https://github.com/jbcrail"
},
"aliases": ["task-scheduling parallelism"]
}
EOF
}
section "Archive docset" & {
cd "${DOCSET_DIR}" || exit
tar --exclude='.DS_Store' -czf ${DOCSET}.tgz ${DOCSET}.docset
rm -rf ${DOCSET}.docset
}
section "Cleanup" & {
source deactivate "${ENV}"
conda env remove --yes --quiet -n "${ENV}"
}
| true
|
7b05b9b0cf712db0a6c0a201cc7bfcda756e8190
|
Shell
|
aioc/tps
|
/upgrade-scripts.sh
|
UTF-8
| 6,382
| 4.03125
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -euo pipefail
source "scripts/internal/util.sh"
function usage {
errcho "Usage: upgrade-scripts.sh [options] <problem-dir>"
errcho "Options:"
errcho -e " -h, --help"
errcho -e " -d, --dry"
}
dry_run="false"
function handle_option {
shifts=0
case "${curr}" in
-h|--help)
usage
exit 0
;;
-d|--dry)
dry_run="true"
;;
*)
invalid_arg "undefined option"
;;
esac
}
function handle_positional_arg {
if [ -z "${problem_dir+x}" ]; then
problem_dir="${curr}"
return
fi
invalid_arg "meaningless argument"
}
argument_parser "handle_positional_arg" "handle_option" "$@"
if [ -z "${problem_dir+x}" ]; then
cecho red >&2 "Problem directory is not specified."
usage
exit 2
fi
if [ ! -d "${problem_dir}" ]; then
cecho red >&2 "Problem directory '${problem_dir}' not found"
exit 4
fi
problem_scripts="${problem_dir}/scripts"
source_scripts="scripts"
old_scripts="old"
problem_old_scripts_version_file="${problem_dir}/.scripts_version"
if [ -f "${problem_old_scripts_version_file}" ]; then
old_scripts_version="$(cat "${problem_old_scripts_version_file}")"
else
old_scripts_version="$(git log --pretty=format:"%H" | tail -n 1)"
fi
if ! git show -s --oneline "${old_scripts_version}" > /dev/null; then
cecho red >&2 "Problem scripts version '${old_scripts_version}' not found in this source repo"
exit 4
fi
function check_repo_is_clean {
dir="$1"
pushd "${dir}" > /dev/null 2>&1 || exit 1
ret=0
if [ -n "$(git status --porcelain)" ]; then
ret=1
fi
popd > /dev/null 2>&1
return ${ret}
}
if ! check_repo_is_clean "$(dirname ${problem_scripts})"; then
cecho red >&2 "There are uncommitted changes in problem repo"
read -p "Are you sure you want to proceed? [y/N]" res
if [ "${res}" != "y" ]; then
exit 1
else
echo
fi
fi
if ! check_repo_is_clean "$(dirname ${source_scripts})"; then
cecho red >&2 "There are uncommitted changes in source scripts repo"
read -p "Are you sure you want to proceed? [y/N]" res
if [ "${res}" != "y" ]; then
exit 1
else
echo
fi
fi
ret=0
tag="$(git describe --tags | head -1)" || ret=$?
if [ "${ret}" -eq 0 ]; then
source_current_scripts_version="${tag}"
else
source_current_scripts_version="$(git log --pretty=format:'%H' -n 1)"
fi
function list_dir_files {
dir="$1"
pushd "${dir}" > /dev/null 2>&1 || exit 1
git ls-files | while read file; do
[ -f "${file}" ] && echo "${file}"
done
popd > /dev/null 2>&1
}
function get_all_scripts_files {
{
list_dir_files "${source_scripts}"
list_dir_files "${problem_scripts}"
} | sort | uniq
}
has_conflicts="false"
declare -a conflict_list
conflict_index=0
function push_conflict {
status="FAIL"
has_conflicts="true"
conflict_list["${conflict_index}"]="$1"
conflict_index=$((conflict_index + 1))
}
function do_action {
if ! "${dry_run}"; then
status="FAIL"
ret=0
"$@" || ret=$?
if [ ${ret} -eq 0 ]; then
changed="true"
status="OK"
fi
return ${ret}
fi
}
recreate_dir "${old_scripts}"
while read file; do
c="${problem_scripts}/${file}"
b="${source_scripts}/${file}"
a="${old_scripts}/${file}"
a_exists="true"
mkdir -p "$(dirname "${a}")"
git show "${old_scripts_version}:${source_scripts}/${file}" > "${a}" 2> /dev/null || a_exists="false"
b_exists="true"
[ -f "${b}" ] || b_exists="false"
c_exists="true"
[ -f "${c}" ] || c_exists="false"
changed="false"
status="OK"
"${dry_run}" && status="SKIP"
message=""
if "${b_exists}"; then
if "${c_exists}"; then
if are_same "${b}" "${c}"; then
message="not changed"
elif "${a_exists}" && are_same "${a}" "${b}"; then
message="keeping changes in problem"
elif "${a_exists}" && are_same "${a}" "${c}"; then
message="applying changes in problem"
do_action cp "${b}" "${c}"
else
push_conflict "${file}"
message="CONFLICT! both problem and script source changed this file"
fi
else
if ! "${a_exists}"; then
message="added as new file"
do_action mkdir -p "$(dirname "${c}")"
do_action cp "${b}" "${c}"
elif are_same "${a}" "${b}"; then
message="keeping deleted"
else
push_conflict "${file}"
message="CONFLICT! deleted file in problem has been updated"
fi
fi
else
if "${c_exists}"; then
if ! "${a_exists}"; then
message="keeping extra file in problem"
elif are_same "${a}" "${c}"; then
message="file deleted because it is deleted in source"
do_action rm -f "${c}"
else
push_conflict "${file}"
message="CONFLICT! changed file in problem has been deleted"
fi
else
cecho red >&2 "Unreachable code reached!"
exit 1
fi
fi
printf >&2 "%-40s" "${file}"
BOX_PADDING=10
echo_status "${status}"
if "${changed}"; then
cecho yellow >&2 "${message}"
else
errcho "${message}"
fi
done <<< "$(get_all_scripts_files)"
if ! "${dry_run}"; then
errcho
do_update_version="true"
if "${has_conflicts}"; then
do_update_version="false"
read -p "There are some conflicts. Should the .scripts_version file be updated? [y/N]" res
if [ "${res}" == "y" ]; then
do_update_version="true"
fi
fi
if "${do_update_version}"; then
echo "${source_current_scripts_version}" > "${problem_old_scripts_version_file}"
cecho yellow "updated .script_version file in problem to '${source_current_scripts_version}'"
fi
fi
errcho
if "${has_conflicts}"; then
cecho red >&2 "Please resolve the following conflicts now!"
sleep 0.1
for file in "${conflict_list[@]}"; do
echo "${file}"
done
exit 10
else
cecho green >&2 "OK"
fi
| true
|
45023e608a915888d4b968f8f9b7707ea5f0cb16
|
Shell
|
rgabriana/MyStuff
|
/nagios/em_ssh_tunnel.sh
|
UTF-8
| 1,098
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash -x
if [ -z "$1" ]; then
echo "First Argument should be a valid id from the em_instance table"
exit 1
fi
EMID=$1
RDB=emscloud
PSQL="/usr/bin/psql --username=DB1 --host=DB1 --dbname=${RDB} --no-align --quiet --tuples-only"
#SSHM="/usr/bin/ssh ops@cloudui.enlightedcloud.net"
SQL="SELECT count(*) from em_instance WHERE id=$EMID"
EXISTS=`$PSQL --command="${SQL}"`
if [ "$EXISTS" = 1 ]; then
SQL="SELECT open_ssh_tunnel_to_cloud from em_instance WHERE id=$EMID"
SSHSTATUS=`$PSQL --command="${SQL}"`
if [ "$SSHSTATUS" = "t" ]; then
NEWSTATUS="false"
elif [ "$SSHSTATUS" = "f" ]; then
NEWSTATUS="true"
else
echo "Could not determine ssh tunnel status of id: $EMID"
exit 1
fi
SQL="UPDATE em_instance SET open_ssh_tunnel_to_cloud=${NEWSTATUS} WHERE id=${EMID}"
$PSQL --command="${SQL}"
SQL="SELECT count(*) from em_instance WHERE id=$EMID AND open_ssh_tunnel_to_cloud=${NEWSTATUS}"
RESULT=`$PSQL --command="${SQL}"`
if [ "$RESULT" = 1 ]; then
echo "Updated EM id: $EMID open_ssh_tunnel_to_cloud to ${NEWSTATUS}"
fi
else
echo "Could not find EM with id: $EMID"
fi
| true
|
4dfcc7e47fb0d5ea3310bcec7d65243b1fccfd37
|
Shell
|
mlaanderson/homebridge-firebase-common
|
/build-bundle.sh
|
UTF-8
| 808
| 3.65625
| 4
|
[
"ISC"
] |
permissive
|
#!/bin/bash
MINIFY=1
MANGLE=1
MINCMD="uglifyjs --mangle --compress -- -"
while [[ $# -ge 1 ]]
do
key="$1"
case ${key} in
--no-min)
MINIFY=0
;;
--no-mangle)
MANGLE=0
;;
--debug)
MINIFY=0
MANGLE=0
;;
*)
echo Usage: $0 [--no-min][--no-mangle]
exit 1
;;
esac
shift
done
if [[ ${MINIFY} -eq 1 && ${MANGLE} -eq 1 ]]; then
echo "Already set" >> /dev/null
elif [[ ${MINIFY} -eq 1 ]]; then
MINCMD="uglifyjs --compress -- -"
elif [[ ${MANGLE} -eq 1 ]]; then
MINCMD="uglifyjs --mangle -- -"
else
MINCMD="cat -"
fi
echo Creating the bundle
browserify -s homebridge index.js | ${MINCMD} > homebridge-firebase-common.bundle.js
echo DONE
| true
|
fffb270bebe856e3fa4eb9c886cf1e8c76e2f28e
|
Shell
|
rtpwilliams/DSCI512_RNAseqAnalyzers
|
/execute_RNAseq_pipeline.sh
|
UTF-8
| 1,209
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#SBATCH --job-name=test_RNAseq_pipeline
#SBATCH --nodes=1
#SBATCH --ntasks=24 # modify this number to reflect how many cores you want to use (up to 24)
#SBATCH --partition=shas-testing #modify this to reflect which queue you want to use. Options are 'shas' and 'shas-testing'
#SBATCH --qos=testing # modify this to reflect which queue you want to use. Options are 'normal' and 'testing'
#SBATCH --time=0:29:00 # modify this to reflect how long to let the job go.
#SBATCH --output=log_RNAseq_pipe_%j.txt
## Source software
source /scratch/summit/<eID>@colostate.edu/activate.bashrc
######### CHANGE <eID> TO YOUR EID: ############
## execute the RNA-seq_pipeline
bash RNAseq_analyzer_181117.sh ../01_input/metadata_aceticAcid_subset.txt $SLURM_NTASKS
######### MODIFY the SECOND argument to point to YOUR metadata.file #########
## OR, you can use a python script
#python RNAseq_analyzer_181011.py ../01_input/metadata_aceticAcid_subset.txt $SLURM_NTASKS
## clean up by zipping .fastq files and deleting extra files
#bash RNAseq_cleanup_mouse_181011.sh ../04_testing/metadata_mouse.txt
######### modify the SECOND argument to point to YOUR metadata.file #########
| true
|
97e966483f70c86f26f02a86fb28a99254bfca28
|
Shell
|
petronny/aur3-mirror
|
/python-mongoengine-git/PKGBUILD
|
UTF-8
| 870
| 3.125
| 3
|
[] |
no_license
|
# Contributor: Dmitry Shapovalov <dmitry at 0fe dot ru>
pkgname=python-mongoengine-git
pkgver=20130221
pkgrel=1
pkgdesc="A Python Object-Document-Mapper for working with MongoDB, git version"
arch=(any)
url="http://hmarr.com/mongoengine/"
license=('MIT')
depends=('python-pymongo')
makedepends=('setuptools' 'git')
options=(!emptydirs)
_gitroot="https://github.com/MongoEngine/mongoengine.git"
_gitname="mongoengine"
build() {
cd "$srcdir"
msg "Connecting to GIT server...."
if [ -d $_gitname ] ; then
cd $_gitname && git pull origin
msg "The local files are updated."
else
git clone $_gitroot $_gitname
fi
msg "GIT checkout done or server timeout"
msg "Starting make..."
rm -rf "$srcdir/$_gitname-build"
cp -R "$srcdir/$_gitname" "$srcdir/$_gitname-build"
cd "$srcdir/$_gitname-build"
python setup.py install --root=$pkgdir || return 1
}
| true
|
c93c9b9287c96b7e6b1b3f8c0ddea21650b934a0
|
Shell
|
discoverygarden/secure_drupal_file
|
/secure_drupal_file.sh
|
UTF-8
| 2,248
| 4.3125
| 4
|
[] |
no_license
|
#!/bin/bash
# This script will lock down the drupal dirs and make them secure
# {{{ set defaults
if [ -f "env.sh" ]
then
source env.sh
else
echo -e "${red}Copy default.env.sh to env.sh and set accordingly${NC}"
exit 1
fi
# }}}
# {{{ setupUsers()
setupUsers()
{
# set apache user
getent passwd www-data > /dev/null
if [ $? -eq "0" ]; then
APACHE_USER="www-data"
else
getent passwd apache > /dev/null
if [ $? -eq "0" ]; then
APACHE_USER="apache"
fi
fi
#set owner
if [ -n "$1" ]; then
DRUPAL_OWNER="$1"
else
DRUPAL_OWNER=root
fi
}
# }}}
# {{{ sanityCheck()
sanityCheck()
{
if [ ! -d $DRUPAL_DIR ]; then
echo "DRUPAL_DIR not set properly recheck before running script"
exit 1
fi
if [ -z $APACHE_USER ]; then
echo "APACHE_USER not set properly"
exit 1
fi
getent passwd $DRUPAL_OWNER > /dev/null
if [ $? -ne "0" ]; then
echo "DRUPAL_OWNER does not exist on this server"
exit 1
fi
echo -e "${green}User set to $DRUPAL_OWNER and Group set to $APACHE_USER${NC}"
}
# }}}
# {{{ lockItDown()
lockItDown()
{
test -f $DRUPAL_DIR/install.php && mv $DRUPAL_DIR/install.php $DRUPAL_DIR/orig.install.bak
echo -e "${green}Setting ownership to $DRUPAL_OWNER:$APACHE_USER${NC}"
chown -RL $DRUPAL_OWNER:$APACHE_USER $DRUPAL_DIR
echo -e "${green}Setting base permissions to dir $ROOT_DIR_PERM and file $ROOT_FILE_PERM${NC}"
find $DRUPAL_DIR -type d -not -perm $ROOT_DIR_PERM -not -path '*/sites/*/files/*' -exec chmod $ROOT_DIR_PERM '{}' \;
find $DRUPAL_DIR -type f -not -perm $ROOT_FILE_PERM -not -path '*/sites/*/files/*' -exec chmod $ROOT_FILE_PERM '{}' \;
chmod 400 $DRUPAL_DIR/orig.install.bak
find $DRUPAL_DIR/sites -type d -name files -exec chmod $FILE_DIR_PERM '{}' \;
echo -e "${green}Setting sites/*/files permissions to dir $FILE_DIR_PERM and file $FILE_FILE_PERM${NC}"
for d in $DRUPAL_DIR/sites/*/files
do
find $d -type d -not -perm $FILE_DIR_PERM -exec chmod $FILE_DIR_PERM '{}' \;
find $d -type f -not -perm $FILE_FILE_PERM -exec chmod $FILE_FILE_PERM '{}' \;
done
chmod 440 $DRUPAL_DIR/sites/*/settings.php
drush -r $DRUPAL_DIR cc all
echo -e "${green}Complete${NC}"
}
# }}}
setupUsers $1
sanityCheck
lockItDown
| true
|
250d1d090e6775758077968b9135d020dc4b72b8
|
Shell
|
THAO1999/Linux
|
/ÔN----/Linux_2017_2018/Midterm/bai1.sh
|
UTF-8
| 1,008
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
y1(){
pwd
}
y2(){
ls *a*
#hoac
ls | grep ".*a.*"
}
y3(){
count=0
for fi in `ls`
do
count=$(($count + 1))
done
echo "So file la $count"
}
y4() {
dir="/etc/group"
cat $dir | grep -E '.*th.*'
}
y5() {
ls /etc | grep -E '.*[a|b]+.*' > outputy5.txt
}
y6() {
if [ -f /etc/passwd ]
then
head -n 3 /etc/passwd
else
echo "Tep khong ton tai."
fi
}
y7() {
count=0
for p in `ps`
do
count=$(($count +1))
done
echo "So tien trinh la: $count"
}
y8() {
echo " MENU"
echo "0. thoat"
echo "1. hien thi thu muc lam viec hien tai"
echo "2. hien thi file co chua chu a"
echo "3. diem so luong tep va thu muc"
echo "4. in ra cac dong chua xau 'th' "
echo "5. tim kiem file co a hoac b it nhat 1 lan"
echo "6. kiem tra 1 file ect/passwd co ton tai khong."
echo "7. dem tong so tien trinh co trong he thong"
}
main(){
while true
do
y8
read -p "Chon chac nang: " a
case $a in
1)
y1;;
2)
y2;;
3)
y3;;
4)
y4;;
5)
y5;;
6)
y6;;
7)
y7;;
0)
echo "Good Bye..."; break;;
esac
done
}
main
| true
|
d9f533d2e70886c63daee602cdaa5a62ccf8108c
|
Shell
|
ro3i/dotfiles_
|
/etc/init/osx/create_private.sh
|
UTF-8
| 686
| 3.40625
| 3
|
[
"DOC",
"MIT"
] |
permissive
|
#!/bin/bash
# Stop script if errors occur
trap 'echo Error: $0:$LINENO stopped; exit 1' ERR INT
set -eu
# Load vital library that is most important and
# constructed with many minimal functions
# For more information, see etc/README.md
. "$DOTPATH"/etc/lib/vital.sh
# This script is only supported with OS X
if ! is_osx; then
log_fail "error: this script is only supported with osx"
exit 1
fi
if has "/Applications/Karabiner-Elements.app/Contents/MacOS/Karabiner-Elements"; then
if [ ! -d "$HOME/.config/karabiner" ]; then
mkdir -p "$HOME/.config/karabiner"
ln -sf "$DOTPATH/etc/gui/osx/karabiner.json" "$HOME/.config/karabiner/karabiner.json"
fi
fi
| true
|
0f6dbe46e02846238d5227be577dcddd1bec20e9
|
Shell
|
dirigonaut/video-sync
|
/develop/shell/letsencrypt.sh
|
UTF-8
| 1,083
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
DOMAIN=$1
DRY=$2
add-apt-repository ppa:certbot/certbot -y
apt-get install certbot software-properties-common -y
apt-get update
certbot certonly --staging --standalone --agree-tos --preferred-challenges http --register-unsafely-without-email --renew-by-default -d $DOMAIN
RC=$?
if [[ $RC == 0 ]]; then
if [[ -z $DRY ]]; then
echo "Staging trial worked getting offical certs now."
certbot certonly --standalone --agree-tos --preferred-challenges http --register-unsafely-without-email --renew-by-default -d $DOMAIN
certbot renew --dry-run
else
echo "Dry Run do not request certs"
fi
echo "Symlink certs for Video-Sync"
CERT="$(sudo certbot certificates | grep 'Certificate Path:' | cut -d: -f2)"
KEY="$(sudo certbot certificates | grep 'Private Key Path:' | cut -d: -f2)"
if [[ ! -z $CERT ]]; then
rm /etc/video-sync/certificate.crt
rm /etc/video-sync/certificate.key
ln -s $CERT /etc/video-sync/certificate.crt
ln -s $KEY /etc/video-sync/certificate.key
fi
else
echo "Let's encrypt failed staging call"
exit 1
fi
| true
|
ac36399e1a89ca64ca7235573c3fdb2f87d5e193
|
Shell
|
tommylutz/tmux-rc-generic
|
/tmux-rc-generic
|
UTF-8
| 4,866
| 4.40625
| 4
|
[] |
no_license
|
#!/bin/bash
######## tmux-rc-generic ##########
# This script is designed to help you set up servers running inside a shared
# tmux session, started up by the init.d system. The canonical example usage
# of this script is to manage a minecraft server.
#
# To use this script, call it from your init.d script. Export the following
# variables:
#
# Variable Meaning
# SVC_DIR A path that we should cd to before bringing up the server
# SVC_UP_CMD The command used to bring up the server
# SVC_NAME A unique string that can be used to pgrep -f the process of the server.
# This is used to detect whether the server is up or down.
# SVC_USER The user the server should run as
# SVC_TMUX The name of the tmux socket in /tmp/
# SVC_STOP_CMD Characters to send to the tmux terminal to stop the server.
# For minecraft, this will be "save-all^Mstop"
# SVC_GROUP Optional unix group that will have access to directly manipulate
# the tmux session of the server using
# $ tmux -S /tmp/$SVC_TMUX attach
#
# After exporting the above variables, the client script should invoke this script
# as follows:
# /usr/sbin/tmux-rc-generic "$@"
die()
{
echo "$@"
exit 1
}
validate_env_vars()
{
if [[ ! -d "$SVC_DIR" ]]; then
die "Must set SVC_DIR to a valid directory"
fi
if [[ -z "$SVC_UP_CMD" ]]; then
die "Must set SVC_UP_CMD to a valid command"
fi
if [[ -z "$SVC_NAME" ]]; then
die "Must set SVC_NAME to a unique string for the process"
fi
if [[ -z "$SVC_USER" ]]; then
die "Must set SVC_USER to a unix user"
fi
if [[ -z "$SVC_TMUX" ]]; then
die "Must set SVC_TMUX to the desired socket name in /tmp"
fi
if [[ -z "$SVC_STOP_CMD" ]]; then
die "Must set SVC_STOP_CMD to the key sequence needed to kill the server"
fi
}
######################################################
ME=`whoami`
TMUX=tmux
as_user() {
if [ "$ME" = "$SVC_USER" ] ; then
echo "Running as me..."
bash -c "$1"
else
echo "Running as su - $SVC_USER"
su - "$SVC_USER" -c "$1"
fi
}
#Inputs : Username, Service name
#Returns : 1 if the service is running, 0 if not
service_is_running()
{
if pgrep -u "$1" -f "$2" > /dev/null; then
return 1
else
return 0
fi
}
#Inputs: Number of seconds (approx) to wait
#Returns: 0 if service exited, 1 if timed out waiting
wait_for_service_to_exit()
{
typeset TIMEOUT=$1
if [ -z $TIMEOUT ]; then
TIMEOUT=10
fi
TIMEOUT=$((TIMEOUT * 10))
typeset WAITING=1
while [ $WAITING -eq 1 -a $TIMEOUT -ne 0 ]; do
service_is_running $SVC_USER $SVC_NAME
WAITING=$?
if [ $WAITING -eq 1 ]; then
sleep 0.1
fi
TIMEOUT=$((TIMEOUT - 1))
done
return $WAITING
}
svc_start() {
service_is_running $SVC_USER $SVC_NAME
if [ $? -eq 1 ]; then
echo "Service [$SVC_NAME] as user [$SVC_USER] is already running"
return
else
echo "Starting [$SVC_NAME] as user [$SVC_USER]"
as_user "cd $SVC_DIR && $TMUX -S /tmp/$SVC_TMUX new -d \"$SVC_UP_CMD\""
if [[ ! -z "$SVC_GROUP" ]]; then
as_user "chgrp $SVC_GROUP /tmp/$SVC_TMUX"
fi
fi
service_is_running $SVC_USER $SVC_NAME
if [ $? -eq 1 ]; then
echo "Success: service [$SVC_NAME] running as user [$SVC_USER]"
echo "Run tmux -S /tmp/$SVC_TMUX attach to interface with the server directly"
else
echo "Failed to run [$SVC_NAME] as user [$SVC_USER]"
fi
}
svc_stop() {
service_is_running $SVC_USER $SVC_NAME
if [ $? -eq 1 ]; then
echo "Stopping $SVC_NAME"
svc_send_keys "$SVC_STOP_CMD"
else
echo "$SVC_NAME was not running."
fi
wait_for_service_to_exit 5
if [ $? -eq 1 ]; then
echo "Error! $SVC_NAME could not be stopped."
else
echo "$SERVICE is stopped."
fi
}
svc_send_keys() {
typeset KEYS="
$@
"
$TMUX -S /tmp/$SVC_TMUX send-keys -l "$KEYS"
}
validate_env_vars
#Start-Stop here
case "$1" in
start)
svc_start
;;
stop)
svc_stop
;;
restart)
svc_stop
svc_start
;;
stat)
service_is_running $SVC_USER $SVC_NAME
if [ $? -eq 1 ]; then
echo "$SVC_NAME is running."
else
echo "$SVC_NAME is not running."
fi
;;
command)
if [ $# -gt 1 ]; then
shift
echo "Sending keys '$@'"
svc_send_keys "$@"
else
echo "Must specify server command (try 'help'?)"
fi
;;
*)
echo "Usage: $0 {start|stop|status|restart|"\
"command \"server command\"}"
exit 1
;;
esac
exit 0
| true
|
f8df88fb967a7370b50e493ca220d636048e8543
|
Shell
|
epochwolf/dotfiles
|
/zshrc
|
UTF-8
| 2,119
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
# Path to your oh-my-zsh configuration.
ZSH=$HOME/.oh-my-zsh
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
#ZSH_THEME="robbyrussell"
#ZSH_THEME="fishy"
#ZSH_THEME="nicoulaj"
ZSH_THEME="epochwolf2"
# environmental variables
#export CC=/usr/bin/gcc-4.2
export PATH="$PATH:/usr/local/bin:/usr/local/sbin:${HOME}/.bin:${HOME}/.rvm/bin"
#export JAVA_HOME=$(/usr/libexec/java_home)
# Example aliases
alias zshconfig="subl ~/.zshrc"
alias ohmyzsh="subl ~/.oh-my-zsh"
alias gk="gitk 2> /dev/null"
alias gcom="git checkout master"
alias gcow="git checkout work"
alias gcot="git checkout test"
alias gcos="git checkout staging"
alias grbm="git rebase master"
alias grbw="git rebase work"
alias grb="git rebase"
alias rake="noglob rake"
alias rat="rake test"
alias ratu="rake test:units"
alias rati="rake test:integration"
alias ratf="rake test:functionals"
alias be="bundle exec"
alias berc="bundle exec rails console"
cdgems(){
if [[ -n "$GEM_HOME" ]] ; then
cd "$GEM_HOME/gems"
else
echo "RVM not activated."
fi
}
# alias sshk="ssh root:23817d888fnalidu@kazan.epochwolf.com"
# Set to this to use case-sensitive completion
# CASE_SENSITIVE="true"
# Comment this out to disable weekly auto-update checks
# DISABLE_AUTO_UPDATE="true"
# Uncomment following line if you want to disable colors in ls
# DISABLE_LS_COLORS="true"
# Uncomment following line if you want to disable autosetting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment following line if you want red dots to be displayed while waiting for completion
COMPLETION_WAITING_DOTS="true"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
#plugins=(git rvm rails3 terminalapp osx)
plugins=(git rvm rails osx)
source $ZSH/oh-my-zsh.sh
[[ -s "${HOME}/.zshrc-local" ]] && source "${HOME}/.zshrc-local"
[[ -s "${HOME}/.rvm/scripts/rvm" ]] && source "${HOME}/.rvm/scripts/rvm"
| true
|
0a2e5e5efebe76631c7411747da04ef243b05746
|
Shell
|
dancehair/myWork
|
/script/logCleaner.sh
|
UTF-8
| 3,120
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
#************************************************#
# log_cleaner.sh
# written by kh902.kim
# Jan 13, 2015
#************************************************#
targetDate=`date '+%Y%m%d' -d '1 month ago'`
defaultDir="/mnt/logs/"
auth ()
{
dirRm $defaultDir"tauth/"
}
proxy ()
{
dirRm $defaultDir"nginx/"
logRm $defaultDir"nginx/"
}
ts ()
{
dirRm $defaultDir"tapi/"
dirRm $defaultDir"tapi/translation/"
logRm $defaultDir"ts_log/"
}
dirRm ()
{
echo ""
echo "$1 dir clean start."
for date in `ls $1 | grep ^20`
do
if [ "$date" -lt "$targetDate" ]
then
echo $1$date"/"
rm -rf $1$date
elif [ "$date" -eq "$targetDate" ]
then
break
fi
done
}
logRm ()
{
echo ""
if [ "$1" = $defaultDir"nginx/" ]
then
echo "$1access.log clean start."
accessLogTargetDate=`date '+%Y%m%d00' -d '1 month ago'`
for date in `ls $1 | grep access.log.20 | sed 's/access.log.//g'`
do
if [ "$date" -lt "$accessLogTargetDate" ]
then
echo "access.log.$date"
rm $1"access.log.$date"
elif [ "$date" -gt "$accessLogTargetDate" ]
then
break
fi
done
elif [ "$1" = $defaultDir"ts_log/" ]
then
echo "ts log clean start"
for engine in `ls $1`
do
echo $1$engine"/"
for date in `ls $1$engine | sed -e 's/MT_//g' -e 's/.log//g'`
do
if [ "$date" -lt "$targetDate" ]
then
echo "MT_$date.log"
rm $1$engine"/MT_"$date".log"
elif [ "$date" -eq "$targetDate" ]
then
break
fi
done
done
fi
}
hostname=`hostname`
biz=`grep $hostname /home/stpusr/log_cleaner.sh | awk '{print $2}'`
[ -z "$biz" ] && { echo "Check hostname. log_cleaner is not started.";exit 0; }
echo "$biz server log clean"
case $biz in
"AUTH") auth;;
"PROXY") proxy;;
"TS") ts;;
*) echo "not AUTH, PROXY, TS.";;
esac
echo ""
echo "nagios log clean start."
sudo find /home/nagios/jnrpeagent/log/biz -name Biz.Oss_JNRPE.log.* -mtime +30 -exec rm {} \;
echo "nagios log clean end."
echo ""
echo "log_cleaner complete."
# PRD
# AUTH auth 01 ip-100-20-4-22
# AUTH auth 02 ip-100-20-5-22
# REPO chefrepo ip-100-20-1-11
# MONITOR monitor 01 ip-100-20-1-12
# PROXY proxy 01 ip-100-20-4-21
# PROXY proxy 02 ip-100-20-5-21
# PROXY cn-proxy 01 ip-100-20-4-24
# PROXY cn-proxy 02 ip-100-20-5-24
# RMC rmc 01 ip-100-20-1-13
# MEMCACHE memcached 01 ip-100-20-4-50
# MEMCACHE memcached 02 ip-100-20-5-50
# MEMCACHE cn-memcached 01 ip-100-20-4-51
# MEMCACHE cn-memcached 02 ip-100-20-5-51
# TS DE 01 ip-100-20-4-33
# TS DE 02 ip-100-20-5-33
# TS ES 01 ip-100-20-4-34
# TS ES 02 ip-100-20-5-34
# TS FR 01 ip-100-20-4-43
# TS FR 02 ip-100-20-5-43
# TS IT 01 ip-100-20-4-44
# TS IT 02 ip-100-20-5-44
# TS JPCN 01 ip-100-20-4-37
# TS JPCN 02 ip-100-20-5-37
# TS JPUS_CNUS 01 ip-100-20-4-38
# TS JPUS_CNUS 02 ip-100-20-5-38
# TS KR 01 ip-100-20-4-30
# TS KR 02 ip-100-20-5-30
# TS PT 01 ip-100-20-4-45
# TS PT 02 ip-100-20-5-45
# TS RU 01 ip-100-20-4-46
# TS RU 02 ip-100-20-5-46
# TS TRFA 01 ip-100-20-4-42
# TS TRFA 02 ip-100-20-5-42
# TS ARHI 01 ip-100-20-4-40
# TS ARHI 02 ip-100-20-5-40
| true
|
80174f749b7a8d8026dedb72b3cb8dae14762c76
|
Shell
|
ytyaru0/RaspberryPi.Home.Root.20180318143826
|
/src/script/sh/_command/mkrepo
|
UTF-8
| 1,986
| 4.0625
| 4
|
[
"CC0-1.0"
] |
permissive
|
#!/bin/bash
# ----------------------------------------------------------------------------
# リポジトリ作成
# リポジトリのテンプレートをコピーする。
# 出力: /tmp/work/repos/repo.yyyyMMddHHmmss/
# 作成日: 2019-02-16
# ----------------------------------------------------------------------------
# テンプレートがあるディレクトリパスを標準出力する
TplDir() {
echo "${HOME}/root/db/template/_command/mkrepo/"
}
# 出力先ディレクトリパスを標準出力する
OutDir() {
echo /tmp/work/repos/
}
# 指定ディレクトリ直下のディレクトリ一覧を標準出力する
DirList() {
echo $(cd "${1}"; ls -l | grep ^d | awk '{print $9}')
}
# 出力先(カレントディレクトリ)にテンプレートと同名のディレクトリが存在した場合はテンプレートをリネームする
#Rename() {
# [ -f "./${1}" ] && { local name="${1}_"`date +%Y%m%d%H%M%S`; mv "./${1}" "./${name}"; echo "${name}"; } || echo "${1}";
#}
SelectTemplate() {
local template=default
# local dirs=`cd /home/pi/root/_meta/res/mkrepo/templates/; ls -l | grep ^d | awk '{print $9}'`
# local dirs=$(cd $(TplDir); ls -l | grep ^d | awk '{print $9}')
local dirs=$(DirList $(TplDir))
select i in ${dirs}; do [ -n "$i" ] && { template=$i; break; }; done
echo "${template}"
}
# テンプレートをコピー&リネームする
# 出力: /tmp/work/repos/repo.yyyyMMddHHmmss/
MakeDir() {
# local tpl_name=`Rename "${1}"`
# local template=`TplDir`"${tpl_name}"
# local template="/home/pi/root/_meta/res/mkrepo/templates/${tpl_name}"
# cp -r "${template}" "./"
# mv "./${tpl_name}" "repo.`date +%Y%m%d%H%M%S`"
local tpl_name="${1}"
local template=`TplDir`"${tpl_name}"
mkdir -p "`OutDir`"
cp -r "${template}" "`OutDir`"
mv "`OutDir`${tpl_name}" "`OutDir`repo.`date +%Y%m%d%H%M%S`"
}
Run() {
local tpl_name=`SelectTemplate`
MakeDir "${tpl_name}"
}
Run
| true
|
b84db13f042b91eeee160de052b4a185fcd0b770
|
Shell
|
teto/lib_bash
|
/lib_ssh.sh
|
UTF-8
| 144
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
#
function ssh_get_client_ip()
{
if [ ! -z "$SSH_CLIENT" ]; then
local ip=$( echo "$SSH_CLIENT" | cut -d' ' -f1)
echo $ip
fi
}
| true
|
ca24364ad01d409d7c20dd421ad730d92e79633d
|
Shell
|
mlhackergz/torch-quiver
|
/configure
|
UTF-8
| 1,843
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -e
. ./auto/cmake
CUDA_HOME=/usr/local/cuda
CUDA_ARCHITECTURES="61" # FIXME: detect
HAVE_CUDA=0
if [ -f $CUDA_HOME/include/cuda_runtime.h ]; then
HAVE_CUDA=1
fi
ENABLE_CUDA=${HAVE_CUDA}
BUILD_TORCH_EXTENSION=1 # FIXME: disable by default
parse_args() {
for i in "$@"; do
case $i in
--enable-trace)
ENABLE_TRACE=1
;;
--enable-torch)
BUILD_TORCH_EXTENSION=1
;;
--lib)
BUILD_LIB=1
;;
--tests)
BUILD_TESTS=1
;;
--benchmarks)
BUILD_BENCHMARKS=1
;;
-v)
VERBOSE=1
;;
*)
echo "unknown argument $i"
exit 1
;;
esac
done
}
config() {
PYTHON=$(which python3)
mkdir -p srcs/cmake/generated
$PYTHON ./srcs/config/config.py >srcs/cmake/generated/torch.cmake
cat srcs/cmake/generated/torch.cmake
}
add_cmake_prefixes() {
add_cmake_prefix $HOME/local/gtest
}
add_cmake_flags() {
if [ ${ENABLE_CUDA} -eq 1 ]; then
add_cmake_flag ENABLE_CUDA ${ENABLE_CUDA}
add_cmake_flag CUDA_TOOLKIT_ROOT_DIR $CUDA_HOME
add_cmake_flag CMAKE_CUDA_COMPILER $CUDA_HOME/bin/nvcc
fi
add_cmake_flag BUILD_TORCH_EXTENSION ${BUILD_TORCH_EXTENSION}
add_cmake_flag BUILD_LIB ${BUILD_LIB}
add_cmake_flag BUILD_TESTS ${BUILD_TESTS}
add_cmake_flag BUILD_BENCHMARKS ${BUILD_TESTS}
add_cmake_flag CMAKE_PREFIX_PATH ${CMAKE_PREFIX_PATH}
add_cmake_flag QUIVER_ENABLE_TRACE $ENABLE_TRACE
add_cmake_flag CMAKE_VERBOSE_MAKEFILE ${VERBOSE}
add_cmake_flag CMAKE_EXPORT_COMPILE_COMMANDS ${VERBOSE}
add_cmake_flag CUDA_ARCHITECTURES ${CUDA_ARCHITECTURES}
}
main() {
config
parse_args $@
config_cmake
}
main $@
| true
|
a96449b0ead74c5beebcf0d774226213e6ed3b61
|
Shell
|
s-shin/bach
|
/lib/_session.sh
|
UTF-8
| 427
| 3.546875
| 4
|
[] |
no_license
|
# shellcheck disable=SC2148
: ${BACH_SESSION_DIR:="${HOME}/.bach/session/$$"}
mkdir -p "$BACH_SESSION_DIR"
bach.session.set() {
key="$1"
value="${2:-}"
echo -n "$value" > "${BACH_SESSION_DIR}/${key}"
}
bach.session.get() {
key="$1"
cat "${BACH_SESSION_DIR}/${key}"
}
bach.session.isset() {
key="$1"
[[ -f "${BACH_SESSION_DIR}/${key}" ]]
}
bach.session.del() {
key="$1"
rm "${BACH_SESSION_DIR}/${key}"
}
| true
|
a691323995a43783fcc20c352186ac74234f454c
|
Shell
|
urmyfaith/sinstallation
|
/vimwreck.sh
|
UTF-8
| 847
| 3.234375
| 3
|
[] |
no_license
|
source './homebrew.sh'
function install_vimwreck {
install_or_upgrade_package git
install_or_upgrade_package ctags
current_dir="$PWD"
if [ -d $HOME/.vim/.git ]; then
cd $HOME/.vim && git pull
else
git clone https://github.com/jfelchner/vim.git $HOME/.vim
fi
rm $HOME/.vimrc 2> /dev/null
rm $HOME/.gvimrc 2> /dev/null
ln -s $HOME/.vim/vimrc $HOME/.vimrc
ln -s $HOME/.vim/gvimrc $HOME/.gvimrc
mkdir -p $HOME/.vim/bundle/vundle 2> /dev/null
if [ -d $HOME/.vim/bundle/vundle/.git ]; then
cd $HOME/.vim/bundle/vundle && git pull
else
git clone http://github.com/gmarik/vundle.git $HOME/.vim/bundle/vundle
fi
switch_shell="$SHELL"
export SHELL=/bin/sh
eval vim +BundleInstall +BundleClean +qall
export SHELL="$switch_shell"
cp $HOME/.vim/fonts/* $HOME/Library/Fonts/
cd "$current_dir"
}
| true
|
4dfb7c89145535e361341766ab81c1148d01545b
|
Shell
|
chinesedfan/jest-dash
|
/build.sh
|
UTF-8
| 1,205
| 3
| 3
|
[] |
no_license
|
# clean up previous remains, if any
rm -rf Contents/Resources
rm -rf Jest.docset
rm -rf dist
mkdir -p Contents/Resources/Documents
# create a fresh sqlite db
cd Contents/Resources
sqlite3 docSet.dsidx 'CREATE TABLE searchIndex(id INTEGER PRIMARY KEY, name TEXT, type TEXT, path TEXT)'
sqlite3 docSet.dsidx 'CREATE UNIQUE INDEX anchor ON searchIndex (name, type, path)'
# fetch the whole doc site
# https://www.gnu.org/software/wget/manual/wget.html
cd Documents
wget -m -p -E -k -np -t 3 -T 10 \
--reject-regex '/es-ES|/ja|/pt-BR|/ro|/ru|/uk|/zh-Hans|versions|/blog' \
https://jestjs.io/
# move it around a bit
mv jestjs.io jest
cd ../../../
# create data file from base index page
mkdir dist
node src/createSectionJSON.js
# change the documentation markup layout a bit to fit dash's small window
mkdir -p dist/jest/en
mkdir -p dist/jest/docs/en
node src/modifyDocsHTML.js
# read the previously fetched doc site and parse it into sqlite
node src/index.js
# bundle up!
mkdir Jest.docset
cp -r Contents Jest.docset
cp -r dist/jest Jest.docSet/Contents/Resources/Documents/
cp src/icon* Jest.docset
# Create gzip bundle for Dash Contribution
tar --exclude='.DS_Store' -cvzf Jest.tgz Jest.docset
| true
|
dcf572dfc562903c7840e931caf6876b68a695c8
|
Shell
|
ververica/flink-ecosystem
|
/production-build.sh
|
UTF-8
| 324
| 2.625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
echo "Running docker-compose for building and launching the backend server"
docker-compose build
docker-compose down
docker-compose up -d
echo "Building frontend"
export NODE_ENV=production
export NODE_PATH=src/
npm install
npm run build
echo "Moving build results to nginx"
sudo cp -r build/* /var/www/html/
| true
|
bebd284e19aec2c1ef9cfe6b33a85497f0e7e0f6
|
Shell
|
mpecenin/wscad-2019
|
/HalideRL/run-cpu-node-6.sh
|
UTF-8
| 1,770
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#SBATCH -p 7d
#SBATCH --nodelist node6-ib
#SBATCH --sockets-per-node 4
#SBATCH --cores-per-socket 8
#SBATCH --threads-per-core 1
#SBATCH -o output/slurm-%j.out
module load compilers/gcc/6.4.0
module load libraries/openmpi/3.0.0-gcc-6.4.0
if [ -z "$1" ]; then
echo "Usage: $0 HalideEnv"
exit 1
fi
if pgrep -x "grpc-halide" > /dev/null; then
echo "Failed, Halide is already running"
exit 1
fi
BASEDIR="$(pwd)"
HLDIR="$BASEDIR/grpc-halide"
AGDIR="$BASEDIR/baselines"
export LD_LIBRARY_PATH="$BASEDIR/runtime:$LD_LIBRARY_PATH"
export PATH="$HLDIR:$HOME/.local/bin:$PATH"
export PYTHONPATH="$AGDIR:$BASEDIR/gym-halide:$PYTHONPATH"
export OMP_NUM_THREADS=8
export HL_NUM_THREADS=8
cd $HLDIR
numactl --membind=0 --cpunodebind=0 grpc-halide "localhost:50051" &
numactl --membind=1 --cpunodebind=1 grpc-halide "localhost:50052" &
numactl --membind=2 --cpunodebind=2 grpc-halide "localhost:50053" &
numactl --membind=3 --cpunodebind=3 grpc-halide "localhost:50054" &
sleep 5
cd $AGDIR
numactl --membind=0 --cpunodebind=0 python3 baselines/ppo1/run_halide.py --env $1 --seed $(od -vAn -N4 -tu4 < /dev/urandom) --num-episodes 10000 --target "localhost:50051" &
numactl --membind=1 --cpunodebind=1 python3 baselines/ppo1/run_halide.py --env $1 --seed $(od -vAn -N4 -tu4 < /dev/urandom) --num-episodes 10000 --target "localhost:50052" &
numactl --membind=2 --cpunodebind=2 python3 baselines/ppo1/run_halide.py --env $1 --seed $(od -vAn -N4 -tu4 < /dev/urandom) --num-episodes 10000 --target "localhost:50053" &
numactl --membind=3 --cpunodebind=3 python3 baselines/ppo1/run_halide.py --env $1 --seed $(od -vAn -N4 -tu4 < /dev/urandom) --num-episodes 10000 --target "localhost:50054" &
jobs -l
wait %5 %6 %7 %8
kill %1 %2 %3 %4
wait %1 %2 %3 %4
| true
|
f7a9430adbe8efb96aa6559324cc953c72ae9758
|
Shell
|
ssjokhla/Konsolidate
|
/oldKonsolidate/rabbitmqphp_example/rdownload.sh
|
UTF-8
| 588
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
#Ask for listener username and IP
#read -p 'Listener (user@IP): ' listener
#Set up watcher on /uploads directory to watch for modified, created, or deleted files
echo "Listener: $1"
inotifywait -r -e modify,create,delete --format '%f' /var/lib/mysql-files | while read FILE
do
echo "File is named $FILE"
#Sync files to the listeners directory while simultaneously deleting it from local directory
rsync -av --remove-source-files /var/lib/mysql-files/ $1:/var/www/html/downloads/
echo "Yay this worked"
done
~/IT490/Konsolidate/rabbitmqphp_example/rdownload.sh $1
| true
|
40379ac6a7fa6bdb94bfa2680e733eee471a1c1c
|
Shell
|
rahulrenjeev/terraform-todeploy-wordpress
|
/database.sh
|
UTF-8
| 1,083
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/sh
cd /etc/yum.repos.d/
wget https://raw.githubusercontent.com/rahulrenjeev/maria-repo/main/mariadb.repo
yum makecache
yum repolist
yum install MariaDB-server MariaDB-client -y
systemctl enable --now mariadb
systemctl status mariadb
yum install expect -y
PASSWORD=abcd1234
SECURE_MYSQL=$(expect -c "
set timeout 10
spawn sudo mysql_secure_installation
expect \"Enter current password for root (enter for none):\"
send \\r\"
expect \"Change the root password?\"
send \"y\r\"
expect \"Remove anonymous users?\"
send \"n\r\"
expect \"Disallow root login remotely?\"
send \"n\r\"
expect \"Remove test database and access to it?\"
send \"n\r\"
expect \"Reload privilege tables now?\"
send \"y\r\"
expect eof
")
echo "$SECURE_MYSQL"
mysql -e "CREATE DATABASE wordpress1 /*\!40100 DEFAULT CHARACTER SET utf8 */;"
mysql -e "CREATE USER wordpress1@’localhost’ IDENTIFIED BY 'BVK3oVFPC9xAQ';"
mysql -e "GRANT ALL PRIVILEGES ON wordpress1.* to 'wordpress1'@'%' IDENTIFIED BY 'BVK3oVFPC9xAQ';"
mysql -e "FLUSH PRIVILEGES;"
| true
|
f0242c99936e856d7336999322cbdb375cdc58c7
|
Shell
|
a3213105/occlum
|
/demos/redis/run_occlum_redis_glibc.sh
|
UTF-8
| 812
| 2.9375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
redis_dir=/usr/local/redis/
occlum_glibc=/opt/occlum/glibc/lib/
set -e
# 1. Init Occlum Workspace
rm -rf occlum_instance
occlum new occlum_instance
cd occlum_instance
new_json="$(jq '.resource_limits.user_space_size = "320MB" |
.process.default_mmap_size = "256MB"' Occlum.json)" && \
echo "${new_json}" > Occlum.json
# 2. Copy files into Occlum Workspace and Build
cp $redis_dir/bin/redis* image/bin
cp /usr/local/bin/openssl* image/bin
cp /usr/local/lib/libssl* image/$occlum_glibc
cp /usr/local/lib/libcrypto* image/$occlum_glibc
cp $occlum_glibc/libdl.so.2 image/$occlum_glibc
cp $occlum_glibc/librt.so.1 image/$occlum_glibc
cp $occlum_glibc/libm.so.6 image/$occlum_glibc
#occlum build
occlum build
# 3. Run redis server
occlum run /bin/redis-server --save "" --appendonly no &
| true
|
e5219e1090c47a2c417e7d19bf44e72110a4dc12
|
Shell
|
pnandam29/new-mac-setup
|
/osx_m1_bootstrip.sh
|
UTF-8
| 7,638
| 3.421875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Bootstrap script for setting up a new OSX machine
#
# This should be idempotent so it can be run multiple times.
#
# Some apps don't have a cask and so still need to be installed by hand. These
# include:
#
# Notes:
#
# - If installing full Xcode, it's better to install that first from the app
# store before running the bootstrap script. Otherwise, Homebrew can't access
# the Xcode libraries as the agreement hasn't been accepted yet.
#
osascript -e 'tell application "System Preferences" to quit'
echo "Starting bootstrapping"
#sudo -v
echo "installing xcode"
xcode-select --install
# Keep-alive: update existing `sudo` time stamp until `.macos` has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
# Check for Homebrew, install if we don't have it
if test ! $(which brew); then
echo "Installing homebrew..."
##ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
fi
# Update homebrew recipes
brew install
echo "Installing rosetta..." "disable this command for non M1 Chip macbooks and run..."
softwareupdate --install-rosetta
# Install GNU core utilities (those that come with OS X are outdated)
#brew tap homebrew/dupes
brew install coreutils
brew install gnu-sed #--with-default-names
brew install gnu-tar #--with-default-names
brew install gnu-indent #--with-default-names
brew install gnu-which #--with-default-names
brew install gnu-grep #--with-default-names
brew install gnu-indent
brew install gnu-sed
brew install gnutls
brew install gnu-grep
brew install gnu-tar
brew install gawk
brew install gsed
brew install gnu-indent
brew install gnu-which
#brew install tar
# Install GNU `find`, `locate`, `updatedb`, and `xargs`, g-prefixed
brew install findutils
# Install Bash 4
brew install bash
#for PACKAGES in $(<"$PACKAGES.txt") ; do
PACKAGES=(
cask
svn
brew-pip
ack
autoconf
automake
boot2docker
ffmpeg
gettext
gifsicle
git
graphviz
hub
imagemagick
jq
libjpeg
libmemcached
lynx
markdown
memcached
mercurial
npm
pkg-config
postgresql
#python
#python3
pypy
rabbitmq
rename
ssh-copy-id
terminal-notifier
the_silver_searcher
tmux
tree
#vim
wget
)
echo "Installing packages..."
#if ! brew install ${PACKAGES[@]}
# then
# echo "Failed to install ${PACKAGES[@]}"
# continue
# fi
#
#done
brew install ${PACKAGES[@]} || :
echo "Cleaning up..."
brew cleanup
echo "Installing cask..."
brew install cask
#for CASKS in $(<"$CASKS.txt") ; do
CASKS=(
google-chrome
brackets
slack
postman
dropbox
docker
firefox
google-chrome
wireshark
iterm2
macvim
slack
1password
vscode
github
pycharm-ce
atom
tunnelblick
)
echo "Installing cask apps..."
#if ! brew install ${CASKS[@]}
# then
# echo "Failed to install ${CASKS[@]}"
# continue
# fi
#
#done
brew install --cask ${CASKS[@]} || :
#brew upgrade --cask ${CASKS[@]}
echo "Installing fonts..."
brew tap homebrew/cask-fonts
FONTS=(
font-roboto
font-clear-sans
font-fira-code
#font-inconsolidata
)
brew install ${FONTS[@]} || :
echo "Installing Python packages..."
PYTHON_PACKAGES=(
ipython
virtualenv
virtualenvwrapper
)
sudo pip3 install ${PYTHON_PACKAGES[@]} || :
echo "Installing Ruby gems"
RUBY_GEMS=(
bundler
filewatcher
cocoapods
)
sudo gem install ${RUBY_GEMS[@]}
echo "Installing global npm packages..."
npm install marked -g
echo "Configuring OSX..."
#
## Set fast key repeat rate
##defaults write NSGlobalDomain KeyRepeat -int 0
#
## Require password as soon as screensaver or sleep mode starts
defaults write com.apple.screensaver askForPassword -int 1
defaults write com.apple.screensaver askForPasswordDelay -int 0
#
## Show filename extensions by default
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
#
## Enable tap-to-click
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad Clicking -bool true
defaults -currentHost write NSGlobalDomain com.apple.mouse.tapBehavior -int 1
#
## Disable "natural" scroll
##defaults write NSGlobalDomain com.apple.swipescrolldirection -bool false
###############################################################################
# Screen #
###############################################################################
# Require password immediately after sleep or screen saver begins
defaults write com.apple.screensaver askForPassword -int 0
defaults write com.apple.screensaver askForPasswordDelay -int 0
# Save screenshots to the desktop
defaults write com.apple.screencapture location -string "${HOME}/Documents/Screeshots"
# Save screenshots in PNG format (other options: BMP, GIF, JPG, PDF, TIFF)
defaults write com.apple.screencapture type -string "png"
# Disable shadow in screenshots
#defaults write com.apple.screencapture disable-shadow -bool true
###############################################################################
# Mac App Store #
###############################################################################
# Enable the WebKit Developer Tools in the Mac App Store
defaults write com.apple.appstore WebKitDeveloperExtras -bool true
# Enable Debug Menu in the Mac App Store
defaults write com.apple.appstore ShowDebugMenu -bool true
# Enable the automatic update check
defaults write com.apple.SoftwareUpdate AutomaticCheckEnabled -bool true
# Check for software updates daily, not just once per week
defaults write com.apple.SoftwareUpdate ScheduleFrequency -int 1
# Download newly available updates in background
defaults write com.apple.SoftwareUpdate AutomaticDownload -int 1
# Install System data files & security updates
defaults write com.apple.SoftwareUpdate CriticalUpdateInstall -int 1
# Automatically download apps purchased on other Macs
defaults write com.apple.SoftwareUpdate ConfigDataInstall -int 1
# Turn on app auto-update
defaults write com.apple.commerce AutoUpdate -bool true
# Allow the App Store to reboot machine on macOS updates
defaults write com.apple.commerce AutoUpdateRestartRequired -bool true
###############################################################################
# Login window
###############################################################################
# Disable Guest Account
defaults write /Library/Preferences/com.apple.loginwindow GuestEnabled 0
# User Auto login -- Off
defaults delete /Library/Preferences/com.apple.loginwindow autoLoginUser
# Enable Show Input menu in login window
defaults write /Library/Preferences/com.apple.loginwindow showInputMenu 1
# Enable show password hint
defaults write /Library/Preferences/com.apple.loginwindow RetriesUntilHint 1
# Enable Show sleep, restart, and shutdown buttons
defaults write /Library/Preferences/com.apple.loginwindow PowerOffDisabled 0
# disable gatekeeper.
# ref https://www.defaults-write.com/disable-gatekeeper-on-your-mac/
defaults write /Library/Preferences/com.apple.security GKAutoRearm -bool false
#Turning Gatekeeper back on on a Mac with the following command:
#defaults delete /Library/Preferences/com.apple.security GKAutoRearm
echo "Creating folder structure..."
[[ ! -d Wiki ]] && mkdir Wiki
[[ ! -d Workspace ]] && mkdir Workspace
echo "Bootstrapping complete"
| true
|
a5f6351b0cad585fabba17c07d2d4f48c5db6fea
|
Shell
|
AiriYokochi/setup_cube_petit
|
/shell_utils/udev_check.sh
|
UTF-8
| 1,031
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
echo "--------------------------------"
echo "[INFO] UDEV CHECK START"
CMD1=`udevadm info -a -n /dev/ttyUSB0 | grep idVendor |sed 's/^.*"\(.*\)".*$/\1/' | awk 'NR==1'`
if [ "${CMD1}" == "0403" ]; then
echo "[INFO] ttyUSB0 is DYNAMIXEL MOTOR:[OK]"
else
echo "[WARN] ***ttyUSB0 IS NOT DYNAMICEL MOTOR PLEASE CHECK***"
fi
CMD1=`udevadm info -a -n /dev/ttyUSB1 | grep idVendor |sed 's/^.*"\(.*\)".*$/\1/' | awk 'NR==1'`
if [ "${CMD1}" == "1a86" ]; then
echo "[INFO] ttyUSB1 is PACECAT LiDAR:[OK]"
else
echo "[WARN] ***ttyUSB1 IS NOT PACECAR LiDAR PLEASE CHECK***"
fi
#ttyDYNAMIXEL_2.4.2
#ttyPACECAT_2.4.3
CMD1=`ls /dev/ttyDYNAMIXEL*`
if [ "${CMD1}" != "" ];then
echo "[INFO] ${CMD1} :[OK]"
else
echo "[WARN] ***ttyDYNAMIXEL IS NOT FOUND***"
fi
CMD1=`ls /dev/ttyPACECAT*`
if [ "${CMD1}" != "" ];then
echo "[INFO] ${CMD1} :[OK]"
else
echo "[WARN] ***ttyPACECAT_2.4.3 IS NOT FOUND***"
fi
echo "[INFO] UDEV CHECK FINISH"
echo "---------------------------------"
| true
|
4b3925858990cffb47177227e6d5ec87ed397704
|
Shell
|
sbrisbane/shc4hpc
|
/aws/aws-module-installer.sh
|
UTF-8
| 608
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
PYMINORVERS=$(python3 --version | awk '{ print $NF }' | awk -F . '{print $NF}')
if [ 6 -gt $PYMINORVERS ]; then
VENV=pyvenv
else
VENV="python3 -m venv"
fi
BUILDDIR=../.build/aws
mkdir -p $BUILDDIR
#work around broken venv in rhel7
$VENV --without-pip $BUILDDIR/python
source $BUILDDIR/python/bin/activate
python3 get-pip-21.0.1.py
deactivate
############################
source $BUILDDIR/python/bin/activate
python3 -m pip install -r aws-requirements.txt
mkdir $BUILDDIR/ansible
cp ansible.cfg $BUILDDIR/ansible
cd $BUILDDIR/ansible
ansible-galaxy collection install amazon.aws -p ./
| true
|
95823a7017f60fa21c32f8143532a5efafcf4224
|
Shell
|
okayanyan/DockerSet_RailsEnv5_2
|
/dockerfile_boot.sh
|
UTF-8
| 248
| 2.78125
| 3
|
[] |
no_license
|
# prepare
cd file
# boot container
boot_container_count=$(docker-compose ps | grep Up | grep -c file)
if [ $boot_container_count = 0 ]; then
docker-compose up -d
fi
# login container
docker-compose exec app /bin/bash
# after operation
cd ../
| true
|
3d31208d7477469445f447862336f6175d54528a
|
Shell
|
khale/encryption-wrappers
|
/gen-rsa-key.sh
|
UTF-8
| 463
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
# $1 - length (2048 is default)
#
# NOTE: if you're using this for hybrid encryption, make sure you
# use a long enough key length to accomodate whatever you're going to encrypt (e.g.
# an AES key)
LEN=2048
if [[ -n "$1" ]]; then
echo "using length: $1"
LEN="$1"
else
echo "using default length: $LEN"
fi
openssl genpkey -out private.pem -algorithm rsa -pkeyopt rsa_keygen_bits:$LEN
openssl rsa -in private.pem -pubout -out public.pem
| true
|
b9a71ea0decd668d8e63b226754e99348e275a16
|
Shell
|
oracle/weblogic-kubernetes-operator
|
/kubernetes/samples/scripts/create-weblogic-domain-on-azure-kubernetes-service/validate.sh
|
UTF-8
| 7,788
| 3.46875
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"UPL-1.0",
"bzip2-1.0.6",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Plexus",
"EPL-2.0",
"CDDL-1.0",
"MIT",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"CDDL-1.1",
"LicenseRef-scancode-generic-export-compliance",
"CC0-1.0",
"GPL-2.0-or-later",
"EPL-1.0",
"Classpath-exception-2.0",
"W3C",
"GPL-1.0-or-later",
"CPL-1.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright (c) 2018, 2022, Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
#
# Description
# This script is to validate if resources are ready for creating a new WebLogic domain.
# It will validate the following resources:
# * Azure resource group: check if it exists
# * Azure Kubernetes Service instance: check if it is created
# * Azure storage account: check if it is created
# * Azure file share: check if it's created
# * Kubernetes secret for container registry account: check if it's created
# * Kubernetes secret for WebLogic domain: check if it's created
# * Persistent Volume Claim: check if it's mounted and verify the status and storage class
# Initialize
script="${BASH_SOURCE[0]}"
scriptDir="$( cd "$( dirname "${script}" )" && pwd )"
usage() {
echo "Arguments"
echo " --aks-name [Required] :Azure Kubernetes Service instance name. "
echo " --file-share [Required] :File share name."
echo " --resource-group -g [Required] :Resource group name."
echo " --storage-account [Required] :Storage account name."
echo " --domain-uid -d [Required] :Domain UID."
echo " --pvc-name [Required] : Persistent Volume Claim name."
echo " --secret-docker [Required] : Name of the Kubernetes secret that stores container registry account."
echo " --help -h :Help"
exit $1
}
while test $# -gt 0; do
case "$1" in
--aks-name*)
shift
if test $# -gt 0; then
export aksName=$1
else
echo "Azure Kubernetes Service instance name is required."
exit 1
fi
shift
;;
-g*|--resource-group*)
shift
if test $# -gt 0; then
export resourceGroup=$1
else
echo "Resource group is required."
exit 1
fi
shift
;;
--storage-account*)
shift
if test $# -gt 0; then
export storageAccount=$1
else
echo "Storage account is required."
exit 1
fi
shift
;;
--file-share*)
shift
if test $# -gt 0; then
export fileShare=$1
else
echo "Storage accounFile share name is required."
exit 1
fi
shift
;;
-d*|--domain-uid*)
shift
if test $# -gt 0; then
export domainUID=$1
else
echo "Domain UID is required."
exit 1
fi
shift
;;
--pvc-name*)
shift
if test $# -gt 0; then
export pvcName=$1
else
echo "Persistent Volume Claim name is required."
exit 1
fi
shift
;;
--secret-docker*)
shift
if test $# -gt 0; then
export imagePullSecret=$1
else
echo "Secret name for Container Registry Account is required."
exit 1
fi
shift
;;
-h|--help) usage 0
;;
*) usage 1
;;
esac
done
missingRequiredOption="false"
if [ -z ${aksName} ]; then
echo "${script}: --aks-name must be specified."
missingRequiredOption="true"
fi
if [ -z ${domainUID} ]; then
echo "${script}: --domain-uid or -d must be specified."
missingRequiredOption="true"
fi
if [ -z ${fileShare} ]; then
echo "${script}: --file-share must be specified."
missingRequiredOption="true"
fi
if [ -z ${pvcName} ]; then
echo "${script}: --pvc-name must be specified."
missingRequiredOption="true"
fi
if [ -z ${resourceGroup} ]; then
echo "${script}: --resource-group or -g must be specified."
missingRequiredOption="true"
fi
if [ -z ${imagePullSecret} ]; then
echo "${script}: --secret-docker must be specified."
missingRequiredOption="true"
fi
if [ -z ${storageAccount} ]; then
echo "${script}: --storage-account must be specified."
missingRequiredOption="true"
fi
if [ "${missingRequiredOption}" == "true" ]; then
usage 1
fi
#
# Function to exit and print an error message
# $1 - text of message
fail() {
echo [ERROR] $*
exit 1
}
validateAzLogin() {
az account show
if [ $? -ne 0 ]; then
fail "Please run az login to setup account."
fi
}
validateResourceGroup() {
ret=$(az group exists --name ${resourceGroup})
if [ $ret == false ];then
fail "${resourceGroup} does not exist."
fi
}
validateStorageAccount() {
ret=$(az storage account check-name --name ${storageAccount})
echo $ret
nameAvailable=$(echo "$ret" | grep "AlreadyExists")
if [ -z "$nameAvailable" ];then
fail "Storage account ${storageAccount} is unavailable."
fi
}
validateAKSName() {
ret=$(az aks list -g ${resourceGroup} | grep "${aksName}")
if [ -z "$ret" ];then
fail "AKS instance with name ${aksName} does not exist."
fi
}
validateFileShare() {
export azureStorageConnectionString=$(az storage account show-connection-string \
-n $storageAccount -g $resourceGroup -o tsv)
echo Check if file share exists
ret=$( az storage share-rm exists --name ${fileShare} --storage-account ${storageAccount} | grep "exists" | grep false)
if [ -n "$ret" ];then
fail "File share ${fileShare} is unavailable."
fi
}
connectAKS() {
az aks get-credentials --resource-group $resourceGroup --name $aksName
if [ $? -ne 0 ]; then
fail "Connect to ${aksName} failed."
fi
}
validateImagePullSecret() {
${KUBERNETES_CLI:-kubectl} get secret ${imagePullSecret}
if [ $? -ne 0 ]; then
fail "Secret:${imagePullSecret} for container registry account is not created."
fi
}
validateWebLogicDomainSecret() {
ret=$(${KUBERNETES_CLI:-kubectl} get secrets | grep "weblogic-credentials")
if [ $? -ne 0 ]; then
fail "Secret:weblogic-credentials is not created."
fi
export secretWebLogic=$(echo ${ret%% *})
}
validatePVC() {
ret=$(${KUBERNETES_CLI:-kubectl} get pvc)
index=0
for item in ${ret};
do
index=$((index + 1))
if [ $index -eq 9 ]; then
if [[ "$item" != "$pvcName" ]];then
fail "Persistent Volume Claim name $item does not match value $pvcName."
fi
fi
if [[ $index -eq 10 && "$item" != "Bound" ]]; then
fail "Persistent Volume Claim status is not Bound."
fi
done
}
validateOperator() {
ret=$(${KUBERNETES_CLI:-kubectl} get pods | grep "weblogic-operator" | grep "Running")
if [ -z "${ret}" ]; then
fail "Please make sure WebLogic operator is running."
fi
}
validateDomain() {
ret=$(${KUBERNETES_CLI:-kubectl} get domain | grep "${domainUID}")
if [ -n "$ret" ]; then
fail "${domainUID} is created! Please create a new domain or follow the page to delete it https://oracle.github.io/weblogic-kubernetes-operator/samples/domains/domain-home-on-pv/#delete-the-generated-domain-home."
fi
}
pass() {
echo ""
echo "PASS"
echo "You can create your domain with the following resources ready:"
echo " Azure resource group: ${resourceGroup}"
echo " Azure Kubernetes Service instance: ${aksName}"
echo " Azure storage account: ${storageAccount}"
echo " Azure file share: ${fileShare}"
echo " Kubernetes secret for Container Registry Account: ${imagePullSecret}"
echo " Kubernetes secret for WebLogic domain: ${secretWebLogic}"
echo " Persistent Volume Claim: ${pvcName}"
}
validateAzLogin
validateResourceGroup
validateAKSName
validateStorageAccount
validateFileShare
connectAKS
validateImagePullSecret
validateWebLogicDomainSecret
validatePVC
validateOperator
validateDomain
pass
| true
|
50b200450078d255ac5850349b56bedad91ed507
|
Shell
|
Eddayy/dotfiles-wsl
|
/implement.sh
|
UTF-8
| 1,526
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
source ./functions.sh
# Main
cd "$(dirname "$0")"
while getopts ":hTiuxs:c:" opt; do
case $opt in
h)
show_help
exit
;;
T)
echo "self test" >&2
exit
;;
i)
check_repos
echo "Initialize"
echo "Step 1. Set dotfiles"
set_dotfiles
read -p "Do you want to configure zsh to your default shell [yY]?" -n 1
if [[ $REPLY =~ ^[Yy]$ || ! $REPLY ]]; then
conf_zsh
fi
read -p "Do you want to configure vim and its bundles [yY]?" -n 1
if [[ $REPLY =~ ^[Yy]$ || ! $REPLY ]]; then
conf_vim
fi
echo "\nAll done!"
exit
;;
u)
FORCE_SET="true"
echo "Update dotfiles"
set_dotfiles
exit
;;
s)
echo "Set $OPTARG"
FORCE_SET="true"
eval "impl_$OPTARG"
exit
;;
c)
echo "Configure $OPTARG"
eval "conf_$OPTARG"
exit
;;
x)
echo "Delete all backup files"
rm -f $HOME/.*.dotfilebak
exit
;;
\?)
echo "Invalid option: -$OPTARG"
exit 1
;;
:)
echo "Option -$OPTARG requires an argument."
exit 1
;;
esac
done
| true
|
15af91ee3119a0fcf20e99e158fccaf519e66296
|
Shell
|
francescofiorenza/CONCEPTtaggingWFST
|
/Pv1.2/scripts/trainCONCEPT/4CONCEPTtaggerUnknownTOKENCreate.sh
|
UTF-8
| 256
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
traindata='../../data/NLSPARQL.train.data'
N=$(cat $traindata | cut -f 2 |sed '/^ *$/d' |sort | uniq|wc -l)
prob=$(echo "-l(1/$N)" | bc -l)
while read concept count
do
echo -e "0\t0\t<unk>\t$concept\t$prob"
done < ../../CONCEPT.counts
echo "0"
| true
|
bef3977ab0d16e03b619c09a10b7d2bd5edffdd7
|
Shell
|
Skynet260/hsro-inf-fpk.github.io
|
/scripts/2pdf.sh
|
UTF-8
| 304
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "-> Create tmp file"
python ../../scripts/md2beamer.py $1
echo "-> tmp created!"
echo "->Create pdf"
pandoc $1.tmp --listings -H ../../scripts/listings.tex -o $2 -t beamer -V fontsize=9pt --pdf-engine=xelatex -V theme:metropolis -V colortheme=crane
rm $1.tmp
echo "->tmp deleted!"
| true
|
6fe13b0180cd6042e7a13f3841b432864139e4e9
|
Shell
|
Nicholas-Schaub/polus-plugins
|
/transforms/polus-recycle-vector-plugin/run-plugin.sh
|
UTF-8
| 636
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
version=$(<VERSION)
datapath=$(readlink --canonicalize ../../data)
# Inputs
stitchDir=/data/vector
collectionDir=/data/images/MaricRatBrain2019/fovs/intensity
filepattern=.+
# Output paths
outDir=/data/output
# Log level, must be one of ERROR, CRITICAL, WARNING, INFO, DEBUG
LOGLEVEL=INFO
docker run --mount type=bind,source=${datapath},target=/data/ \
--user $(id -u):$(id -g) \
--env POLUS_LOG=${LOGLEVEL} \
polusai/recycle-vector-plugin:${version} \
--stitchDir ${stitchDir} \
--collectionDir ${collectionDir} \
--outDir ${outDir}
| true
|
c25462c1015c6624e99794f519607fc203561842
|
Shell
|
cncf/devstats
|
/shared/top_n_companies.sh
|
UTF-8
| 372
| 2.890625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if ( [ -z "$PG_PASS" ] || [ -z "$PG_DB" ] || [ -z "$1" ] )
then
echo "$0: you need to set PG_DB and PG_PASS env variables and provide number of companies as an argument to use this script"
exit 1
fi
GHA2DB_LOCAL=1 runq metrics/shared/companies_tags.sql {{lim}} $1 ' sub.name' " string_agg(sub.name, ',')" {{exclude_bots}} "`cat util_sql/exclude_bots.sql`"
| true
|
4e9410ae9eb40737f42a4c37ff2a30c61db1d307
|
Shell
|
mathworks-ref-arch/matlab-on-aws
|
/packer/v1/build/install-glibc-ubuntu2004.sh
|
UTF-8
| 644
| 3.125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env bash
#
# Copyright 2023 The MathWorks Inc.
# Exit on any failure, treat unset substitution variables as errors
set -euo pipefail
# Install patched glibc fix - See https://github.com/mathworks/build-glibc-bz-19329-patch
cd /tmp
mkdir glibc-packages && cd glibc-packages
wget --no-verbose https://github.com/mathworks/build-glibc-bz-19329-patch/releases/download/ubuntu-focal/all-packages.tar.gz
tar -x -f all-packages.tar.gz --exclude glibc-*.deb --exclude libc6-dbg*.deb
sudo apt-get -qq install \
libcrypt-dev \
linux-libc-dev
sudo apt-get -qq install --no-install-recommends ./*.deb
cd /tmp
sudo rm -rf glibc-packages
| true
|
de59f01b861a615d7c9d2d338fb290e9b192e34c
|
Shell
|
GokGokalp/fission
|
/test/tests/test_backend_nd_update.sh
|
UTF-8
| 2,623
| 3.875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -euo pipefail
# global variables
pkg=""
http_status=""
url=""
cleanup() {
if [ -e "test-deploy-pkg.zip" ]; then
rm -rf test-deploy-pkg.zip test_dir
fi
if [ -e "/tmp/file" ]; then
rm -rf /tmp/file
fi
}
create_archive() {
log "Creating an archive"
mkdir test_dir
printf 'def main():\n return "Hello, world!"' > test_dir/hello.py
zip -jr test-deploy-pkg.zip test_dir/
}
create_env() {
log "Creating environment"
fission env create --name $1 --image fission/python-env:latest --builder fission/python-builder:latest --mincpu 40 --maxcpu 80 --minmemory 64 --maxmemory 128 --poolsize 2
}
create_fn() {
log "Creating functiom"
fission fn create --name $1 --env $2 --deploy test-deploy-pkg.zip --entrypoint "hello.main" --executortype newdeploy --minscale 1 --maxscale 4 --targetcpu 50
}
create_route() {
log "Creating route"
fission route create --function $1 --url /$1 --method GET
log "Waiting for router & newdeploy deployment creation"
sleep 5
}
update_archive() {
log "Updating the archive"
sed -i 's/world/fission/' test_dir/hello.py
zip -jr test-deploy-pkg.zip test_dir/
}
update_fn() {
log "Updating function with updated package"
fission fn update --name $1 --env $2 --deploy test-deploy-pkg.zip --entrypoint "hello.main" --executortype newdeploy --minscale 1 --maxscale 4 --targetcpu 50
log "Waiting for deployment to update"
sleep 5
}
test_fn() {
echo "Doing an HTTP GET on the function's route"
echo "Checking for valid response"
while true; do
response0=$(curl http://$FISSION_ROUTER/$1)
echo $response0 | grep -i $2
if [[ $? -eq 0 ]]; then
break
fi
sleep 1
done
}
export -f test_fn
# This test only tests one path of execution: updating package and checking results of function
# There might be potential future tests where one can test changes in:
# environment, min & max scale, secrets and configmaps etc.
# This test in summary:
# Creates a archive, env. with builder and a function and tests for response
# Then updates archive with a different word and udpates functions to check for new string in response
main() {
# trap
trap cleanup EXIT
env=python-$(date +%N)
fn_name="hellopython"
create_archive
create_env $env
create_fn $fn_name $env
create_route $fn_name
timeout 60 bash -c "test_fn $fn_name 'world'"
update_archive
update_fn $fn_name $env
timeout 60 bash -c "test_fn $fn_name 'fission'"
log "Update function for new deployment executor passed"
}
main
| true
|
c643855032396c910862d904a7df832bcec838bc
|
Shell
|
artisdom/_ebDev
|
/scripts/imgAndVideo/SVG2img.sh
|
UTF-8
| 3,837
| 3.796875
| 4
|
[] |
no_license
|
# DESCRIPTION
# Creates a .jpg (by default) file from an .svg file passed as parameter $1.
# USAGE
# Run with these parameters:
# - $1 The svg file name to create an image from e.g. in.svg
# - $2 OPTIONAL. Longest side in pixels for rendered output image. Default 4280 if not given.
# - $3 OPTIONAL. Target file format e.g. png or jpg -- default jpg if not given.
# - $4 A hex color code (format ffffff, no pound/hex symbol) which will be used to render the svg background (if it has a transparent background). To force a transparent background for an SVG without one (or if it isn't specified), you may be able to set an eight-hex color with 00 as the last two hex digits, to indicate zero transparency, like this: ffffff00. If your argument is not hex digits, [a-f0-9] (you can pass anything as this parameter), a hard-coded hex color will be used. See the BACKGROUND COLOR OPTIONS comment to hack that. IF OMITTED, the background will be transparent.
# CODE
# TO DO
# - Add an rnd bg color option?
# - Or rnd background choice from a hexplt file?
# ==== START SET GLOBALS
# If parameter $1 not present, notify user and exit. Otherwise use it and continue.
if ! [ "$1" ]; then echo "No parameter \$1. Exit."; exit; else svgFileName=$1; svgFilenameNoExtension=${svgFileName%.*}; fi
# If no image size parameter, set default image size of 4280.
if ! [ "$2" ]; then IMGsize=4280; echo SET IMGsize to DEFAULT 7680; else IMGsize=$2; echo SET IMGsize to $2; fi
# If no image format parameter, set default image format of jpg.
if ! [ "$3" ]; then IMGformat=png; echo SET IMGformat to DEFAULT png; else IMGformat=$3; echo SET IMGformat to $3; fi
# If no $4), set bg transparent, otherwise, if $4, check if matches [a-z0-9]{6}, and if that, use that; if not that, use a default.
if ! [ "$4" ]
then
backgroundColorParam="-background none"; echo SET parameter DEFAULT \"-background none\";
else
echo background color control parameter passed\; checking if parameter is a hex color code . . .
# Check errorlevel $? after piping $4 to grep search pattern. Errorlevel will be 0 if match, 1 if no match:
echo $4 | grep '[a-f0-9]\{6\}'; thisErrorLevel=$?
if [ "$thisErrorLevel" == "0" ]
then
echo Hex color code verified\; setting bgHEXcolorCode to $4!
bgHEXcolorCode=$4
echo bgHEXcolorCode val is\: $bgHEXcolorCode
else
# BACKGROUND COLOR OPTIONS
# Uncomment only one of the following options; comment out the others:
# bgHEXcolorCode=ffffff # white
# bgHEXcolorCode=000000 # black
# bgHEXcolorCode=584560 # Darkish plum?
bgHEXcolorCode=39383b # Medium-dark purplish-gray
# Other potentially good black line color change options: #2fd5fe #bde4e4
echo $4 is not a hex color code\! Background was set to default $bgHEXcolorCode\!
fi
# Whichever option was set, use it:
backgroundColorParam="-background "#"$bgHEXcolorCode"
fi
# ==== END SET GLOBALS
if [ -a $svgFilenameNoExtension.$IMGformat ]
then
echo render candidate is $svgFilenameNoExtension.$IMGformat
echo target already exists\; will not render.
echo . . .
else
# Have I already tried e.g. -size 1000x1000 as described here? :
echo rendering target file $svgFilenameNoExtension.$IMGformat . . .
# DEPRECATED, as it causes the problem described at this question: https://stackoverflow.com/a/27919097/1397555 -- for which the active solution is also given:
# gm convert $backgroundColorParam -scale $IMGsize $svgFileName $svgFilenameNoExtension.$IMGformat
# UNCOMMENT EITHER the `gm convert` or `magick` option:
# GRAPHICSMAGICK OPTION, which breaks on some svgs optimized via svgo :( :
# gm convert -size $IMGsize $backgroundColorParam $svgFileName $svgFilenameNoExtension.$IMGformat
# IMAGEMAGICK OPTION (which doesn't break that way) :
magick -size $IMGsize $backgroundColorParam $svgFileName $svgFilenameNoExtension.$IMGformat
fi
| true
|
f626a4251f852069ba592697c761d06f424c747a
|
Shell
|
javaperson/TencentSSH
|
/to
|
UTF-8
| 459
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/sh
source host_addr.sh
exec=''
while getopts "h:x:t:" Option
# b and d take arguments
#
do
case $Option in
x) exec=$OPTARG;;
h) host=$(addr $OPTARG);;
t) token=$OPTARG;;
esac
done
shift $(($OPTIND - 1))
echo "HOST:$host"
echo "CMD:$exec"
if [ -z ${token} ]; then
token=`qc-cvm-token | grep '^[0-9]'`
token="`echo ${token}`"
fi
echo "TOKEN:$token"
expect /usr/local/bin/to_expect $host $token "$exec"
#to.sh -h animal-w1 -x "bash -c 'echo animal'"
| true
|
0651eb149d8e5a8e65c92ca3fc14cdd61b812570
|
Shell
|
satchpx/dmcache
|
/dumpstats
|
UTF-8
| 1,192
| 2.734375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
outfile="/var/log/dumpstats"
printf '%80s\n' | tr ' ' - >> $outfile
date >> $outfile
for vol in $(pxctl v l | grep -v NAME | awk '{print $1}'); do
echo "======= $vol =======" >> $outfile
pxctl v i $vol >> $outfile
echo "========== done =========" >> $outfile
done
printf '%80s\n' | tr ' ' - >> $outfile
echo "" >> $outfile
echo "" >> $outfile
printf '%80s\n' | tr ' ' - >> $outfile
date >> $outfile
dmsetup status /dev/mapper/pxd-cached >> $outfile
printf '%80s\n' | tr ' ' - >> $outfile
echo "" >> $outfile
echo "" >> $outfile
printf '%80s\n' | tr ' ' - >> $outfile
date >> $outfile
cat /proc/diskstats >> $outfile
printf '%80s\n' | tr ' ' - >> $outfile
echo "" >> $outfile
echo "" >> $outfile
#printf '%80s\n' | tr ' ' - >> $outfile
#date >> $outfile
#curl -s http://127.0.0.1:9001/metrics | grep px_disk_stats_read_bytes | grep -v -e HELP -e TYPE >> $outfile
#curl -s http://127.0.0.1:9001/metrics | grep px_disk_stats_read_bytes | grep -v -e HELP -e TYPE >> $outfile
#printf '%80s\n' | tr ' ' - >> $outfile
#echo "" >> $outfile
#echo "" >> $outfile
# upload the file to slack
curl -F file=@/var/log/dumpstats https://slackit.portworx.com/juniper-eval
| true
|
bc74700d63dd29042d11b1677b3e712e4f995bd7
|
Shell
|
kazufusa/mattermost-docker
|
/enable_japanese_search.bash
|
UTF-8
| 363
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -Ceux
SQL1="ALTER TABLE Posts DROP INDEX idx_posts_message_txt;"
SQL2="ALTER TABLE Posts ADD FULLTEXT INDEX idx_posts_message_txt (\\\`Message\\\`) WITH PARSER ngram;"
docker-compose exec db bash -c " \
set -x;\
echo \"${SQL1}\" | mysql -u mmuser mattermost -pmmuser_password; \
echo \"${SQL2}\" | mysql -u mmuser mattermost -pmmuser_password; \
"
| true
|
96a2f9dabc1864cf76daf531381ac93ee37708d5
|
Shell
|
wn1980/docker-snowboy-build
|
/run.sh
|
UTF-8
| 509
| 3.15625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
p1080=1920x1080
p720=1280x720
p169=1600x900
if [ $(uname -m) == 'x86_64' ]
then
tag=
elif [ $(uname -m) == 'aarch64' ]
then
tag=:rpi
else
echo 'not matched platform!'
exit 0
fi
if [[ -f "~/rosuser_home" ]]
then
mkdir -p "~/rosuser_home"
chmod -Rf 777 "~/rosuser_home"
fi
NAME=build-snowboy
docker rm -f $NAME
docker system prune -f
docker run -it --rm --name $NAME \
-v "$(pwd)/workspace:/workspace:rw" \
-v "/dev:/dev" \
--privileged \
wn1980/build-snowboy${tag} bash
| true
|
6dbdd0941551bb9e4cb69cc97bac747cb2a35a08
|
Shell
|
paoloyx/docker-host-gateway-setup
|
/set-docker-routing.sh
|
UTF-8
| 1,861
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -ne 3 ]; then
echo -e "Usage \n\n ./set-docker-routing.sh [DOCKER_CIDR_FROM] [OUTPUT_NIC_DEV] [OUTPUT_NIC_IP_ADDRESS] \n\n Example: ./set-docker-routing.sh 172.17.0.0/16 enp0s8 192.168.1.36"
exit 1
fi
DOCKER_CIDR_FROM=$1
OUTPUT_NIC_DEV=$2
OUTPUT_NIC_IP_ADDRESS=$3
# clean up previous resources
echo -e "Cleaning resources on startup"
while $(ip rule list | grep -q "$DOCKER_CIDR_FROM lookup docker"); do ip rule del from $DOCKER_CIDR_FROM lookup docker; done
echo -e "Cleaned ip rules"
ip route del default via $OUTPUT_NIC_IP_ADDRESS dev $OUTPUT_NIC_DEV table docker
echo -e "Cleaned ip routes"
sed -i -e 's/1 docker//g' -e '/^$/d' /etc/iproute2/rt_tables
echo -e "Cleaned /etc/iproute2/rt_tables file\n"
# Create a new routing table just for docker
echo "1 docker" >> /etc/iproute2/rt_tables
echo "Added new 'docker' routing table\n"
# Add a rule stating any traffic from the docker0 bridge interface should use
# the newly added docker routing table
echo -e "All traffic from $DOCKER_CIDR_FROM will be routed through 'docker' table"
ip rule add from $DOCKER_CIDR_FROM table docker
ip rule list
echo -e "\n"
# Add a route to the newly added docker routing table that dictates all traffic
# go out the $OUTPUT_NIC_IP_ADDRESS interface on $OUTPUT_NIC_DEV
ip route add default via $OUTPUT_NIC_IP_ADDRESS dev $OUTPUT_NIC_DEV table docker
echo -e "All traffic from 'docker' table will be forwarded to $OUTPUT_NIC_DEV device (ip address: $OUTPUT_NIC_IP_ADDRESS)"
ip route list table docker
echo -e "\n"
# Flush the route cache
ip route flush cache
echo -e "Flushed route cache\n"
# Set default ip address to which docker daemon should bind to --> $OUTPUT_NIC_IP_ADDRESS
# echo -e "{\"ip\":\"$OUTPUT_NIC_IP_ADDRESS\"}" > /etc/docker/daemon.json
# echo -e "New docker daemon config:"
# cat /etc/docker/daemon.json
# echo -e "\n"
| true
|
9d0e491453a233e56cde576496af078cf14d2d55
|
Shell
|
well-zt/tutorials
|
/UBend_Channel/preProcessing.sh
|
UTF-8
| 360
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$WM_PROJECT" ]; then
echo "OpenFOAM environment not found, forgot to source the OpenFOAM bashrc?"
exit
fi
# pre-processing
# generate mesh
echo "Generating mesh.."
blockMesh > log.meshGeneration
renumberMesh -overwrite >> log.meshGeneration
echo "Generating mesh.. Done!"
# copy initial and boundary condition files
cp -r 0.orig 0
| true
|
70049a41e92cc334305dc83f55dcd518e8529209
|
Shell
|
GSA/data.gov
|
/bin/disable-egress
|
UTF-8
| 1,364
| 3.78125
| 4
|
[
"CC0-1.0",
"LicenseRef-scancode-us-govt-public-domain",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
set -e
set -o pipefail
# for debugging
# set -x
help()
{
echo
echo "$0: disables egress proxy for a given app."
echo "Syntax: disable-egress <APP>"
echo " <APP> must be a valid cf app in the current space with egress enabled."
#echo "Options:"
#echo " --space <SPACE>: #TODO"
echo
echo "To re-enable egress for an app, use enable-egress."
exit 1
}
app="$1"
if [ -z "$app" ]; then
echo "No app provided."
help
else
echo "Checking for app $app in space.."
if cf apps | tr -s ' ' | cut -d ' ' -f 1 | grep -q -E "(^|\s)$app($|\s)"; then
echo "$app found."
echo "Unsetting environment variable proxy_url.."
cf unset-env "$app" proxy_url
echo "Checking network policy.."
read -r source dest protocol port space <<< "$( cf network-policies --source "$app" | tail -n +4 | tr -s ' ' | cut -d ' ' -f 1-5 )"
if [ -z "$dest" ] && [ -z "$protocol" ] && [ -z "$port" ] && [ -z "$space" ]; then
# network policy already empty, pass
echo "Network policy not found, continuing.."
else
cf remove-network-policy "$source" "$dest" -s "$space" --protocol "$protocol" --port "$port"
fi
echo "Restarting $app.."
cf restart "$app"
else
echo "App not found in space."
help
fi
fi
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.