blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e8dd8c8161a2f64fd2572961c0a834dc0b1be400
|
Shell
|
reillyowencooper/daphnia_magna_metagenome
|
/sequencing_pipeline/02_assembly_and_binning.sh
|
UTF-8
| 10,019
| 3.09375
| 3
|
[] |
no_license
|
# Step 1: Trimmed, filtered reads were assembled into metagenomes using metaSPAdes.
# 7 metagenomes were created: a master coassembly of all four samples, a coassembly of the adult samples, a coassembly of the juvenile samples,
# and assemblies of all four samples individually.
# After assembly, contig files were renamed according to their assembly and moved out of assembly folder for read mapping.
cd $WORK/metagenomes
module load spades
spades.py --meta -1 s1bac_r1.fastq -2 s1bac_r2.fastq -o s1_assembly/
spades.py --meta -1 s2bac_r1.fastq -2 s2bac_r2.fastq -o s2_assembly/
spades.py --meta -1 s3bac_r1.fastq -2 s3bac_r2.fastq -o s3_assembly/
spades.py --meta -1 s4bac_r1.fastq -2 s4bac_r2.fastq -o s4_assembly/
cat s1bac_r1.fastq s2bac_r1.fastq > adultf.fastq
cat s1bac_r2.fastq s2bac_r2.fastq > adultr.fastq
spades.py --meta -1 adultf.fastq -2 adultr.fastq -o adult_assembly/
cat s3bac_r1.fastq s3bac_r1.fastq > juvf.fastq
cat s4bac_r2.fastq s4bac_r2.fastq > juvr.fastq
spades.py --meta -1 juvf.fastq -2 juvr.fastq -o juv_assembly/
cat adultf.fastq juvf.fastq > masterf.fastq
cat adultr.fastq juvr.fastq > masterr.fastq
spades.py --meta -1 masterf.fastq -2 masterr.fastq -o master_assembly/
cp s1_assembly/contigs.fasta s1_assembly.fasta
cp s2_assembly/contigs.fasta s2_assembly.fasta
cp s3_assembly/contigs.fasta s3_assembly.fasta
cp s4_assembly/contigs.fasta s4_assembly.fasta
cp adult_assembly/contigs.fasta adult_assembly.fasta
cp juv_assembly/contigs.fasta juv_assembly.fasta
cp master_assembly/contigs.fasta master_assembly.fasta
# Step 2: Reads from samples were mapped to specific assemblies. Juvenile reads were mapped to the juvenile coassembly, adult to the adult coassembly, and all to the master coassembly.
module load bwa samtools
bwa index master_assembly.fasta
bwa index adult_assembly.fasta
bwa index juv_assembly.fasta
bwa mem master_assembly.fasta s1bac_r1.fastq s1bac_r2.fastq > adult1_master.sam
bwa mem master_assembly.fasta s2bac_r1.fastq s2bac_r2.fastq > adult2_master.sam
bwa mem master_assembly.fasta s3bac_r1.fastq s3bac_r2.fastq > juv1_master.sam
bwa mem master_assembly.fasta s4bac_r1.fastq s4bac_r2.fastq > juv2_master.sam
bwa mem adult_assembly.fasta s1bac_r1.fastq s1bac_r2.fastq > adult1_adult.sam
bwa mem adult_assembly.fasta s2bac_r1.fastq s2bac_r2.fastq > adult2_adult.sam
bwa mem juv_assembly.fasta s3bac_r1.fastq s3bac_r2.fastq > juv1_juv.sam
bwa mem juv_assembly.fasta s4bac_r1.fastq s4bac_r2.fastq > juv2_juv.sam
samtools view -F 4 -bS adult1_master.sam > adult1_master.bam
samtools view -F 4 -bS adult2_master.sam > adult2_master.bam
samtools view -F 4 -bS juv1_master.sam > juv1_master.bam
samtools view -F 4 -bS juv2_master.sam > juv2_master.bam
samtools view -F 4 -bS adult1_adult.sam > adult1_adult.bam
samtools view -F 4 -bS adult2_adult.sam > adult2_adult.bam
samtools view -F 4 -bS juv1_juv.sam > juv1_juv.bam
samtools view -F 4 -bS juv2_juv.sam > juv2_juv.bam
samtools sort adult1_master.bam > adult1_master_sorted.bam
samtools sort adult2_master.bam > adult2_master_sorted.bam
samtools sort juv1_master.bam > juv1_master_sorted.bam
samtools sort juv2_master.bam > juv2_master_sorted.bam
samtools sort adult1_adult.bam > adult1_adult_sorted.bam
samtools sort adult2_adult.bam > adult2_adult_sorted.bam
samtools sort juv1_juv.bam > juv1_juv_sorted.bam
samtools sort juv2_juv.bam > juv2_juv_sorted.bam
samtools index adult1_master_sorted.bam
samtools index adult2_master_sorted.bam
samtools index juv1_master_sorted.bam
samtools index juv2_master_sorted.bam
samtools index adult1_adult_sorted.bam
samtools index adult2_adult_sorted.bam
samtools index juv1_juv_sorted.bam
samtools index juv2_juv_sorted.bam
# Step 3: Move all sorted .bam files and .fasta files to a folder, then download to own computer for work in Anvi'o.
mkdir for_anvio
cp *sorted.bam for_anvio/
cp *.fasta for_anvio/
scp -r rcooper@crane.unl.edu:/work/cresslerlab/rcooper/metagenomes/for_anvio/* for_anvio/
# Will be prompted for password
# Will take forEVER to download
# Step 4: Anvi'o pre-processing. Here, each assembly contig file gets trimmed for contigs > 2500bp and other identification steps occur. Also make backups of contig databases!
cd Dekstop/metagenomes/for_anvio
anvi-script-reformat-fasta master_assembly.fasta -l 2500 -o master.fasta
anvi-script-reformat-fasta adult_assembly.fasta -l 2500 -o ad.fasta
anvi-script-reformat-fasta juv_assembly.fasta -l 2500 -o juv.fasta
anvi-gen-contigs-database -f master.fasta -o master.db -n 'Master assembly contigs'
anvi-gen-contigs-database -f ad.fasta -o ad.db -n 'Adult assembly contigs'
anvi-gen-contigs-database -f juv.fasta -o juv.db -n 'Juvenile assembly contigs'
anvi-run-hmms -c master.db
anvi-run-hmms -c ad.db
anvi-run-hmms -c juv.db
anvi-get-sequences-for-gene-calls -c master.db -o master_gene_calls.fa
anvi-get-sequences-for-gene-calls -c ad.db -o ad_gene_calls.fa
anvi-get-sequences-for-gene-calls -c juv.db -o juv_gene_calls.fa
cp master.db master_backup.db
cp ad.db ad_backup.db
cp juv.db juv_backup.db
# Here, sorted read .bams are mapped onto their respective binned assemblies.
scp -r rcooper@crane.unl.edu:/work/cresslerlab/rcooper/metagenomes/for_anvio/*.names .
anvi-profile -i adult1_master_sorted.bam -c master.db
anvi-profile -i adult2_master_sorted.bam -c master.db
anvi-profile -i juv1_master_sorted.bam -c master.db
anvi-profile -i juv2_master_sorted.bam -c master.db
anvi-merge adult1_master_sorted.bam-ANVIO_PROFILE/PROFILE.db adult2_master_sorted.bam-ANVIO_PROFILE/PROFILE.db juv1_master_sorted.bam-ANVIO_PROFILE/PROFILE.db juv2_master_sorted.bam-ANVIO_PROFILE/PROFILE.db -o master_merged -c master.db
anvi-profile -i adult1_adult_sorted.bam -c ad.db
anvi-profile -i adult2_adult_sorted.bam -c ad.db
anvi-merge adult1_adult_sorted.bam-ANVIO_PROFILE/PROFILE.db adult2_adult_sorted.bam-ANVIO_PROFILE/PROFILE.db -o ad_merged -c ad.db
anvi-profile -i juv1_juv_sorted.bam -c juv.db
anvi-profile -i juv2_juv_sorted.bam -c juv.db
anvi-merge juv1_juv_sorted.bam-ANVIO_PROFILE/PROFILE.db juv2_juv_sorted.bam-ANVIO_PROFILE/PROFILE.db -o juv_merged -c juv.db
# Step 7: Anvi'o CONCOCT binning and refinement. Refining bins is a manual process, so it can't really be shown in code. My guidelines for refining bins are:
# Check CONCOCT-generated bins. If completeness > 90% and redundancy < 10%, that's a good bin. Leave it.
# If completeness > 90% and redundancy > 10%, refine bin. Probably will have to split bin, which will lower completeness (that's ok).
# If completeness < 90% and redundancy < 10%, try to merge with other closely related bins. If merging increases completeness by > 10%, keep merge.
# Don't even try if completeness is <20%.
anvi-interactive -p master_merged/PROFILE.db -c master.db -C CONCOCT
# Example for anvi-refine -- do this for bins you want to refine
anvi-refine -p master_merged/PROFILE.db -c master.db -C CONCOCT -b Bin_6
# Example for anvi-merge-bins -- do this for bins that you think should go together
anvi-merge-bins -p master_merged/PROFILE.db -c master.db -C CONCOCT -b Bin_1,Bin_2 -B Bin_A
anvi-interactive -p ad_merged/PROFILE.db -c ad.db -C CONCOCT
anvi-interactive -p juv_merged/PROFILE.db -c juv.db -C CONCOCT
# Step 8: Once you are satisfied with bins, summarize your results. This will generate a folder with a static .html summary of your bins as well as .fasta files of each bin.
# Use these .fasta files for GTDB-Tk.
anvi-summarize -p master_merged/PROFILE.db -c master.db -C CONCOCT --report-aa-seqs-for-gene-calls -o master_bin_summary/
anvi-summarize -p ad_merged/PROFILE.db -c ad.db -C CONCOCT --report-aa-seqs-for-gene-calls -o ad_bin_summary/
anvi-summarize -p juv_merged/PROFILE.db -c juv.db -C CONCOCT --report-aa-seqs-for-gene-calls -o juv_bin_summary/
# Rename all species bins according to assembly they came from -- i.e. species 1 from master assembly should be 'sp1_master.fa'
# Move all renamed .fa bins up one level (into assembly-specific 'bin_by_bin' folder)
mkdir for_prokka
cp master_assembly.fasta for_prokka/master_assembly.fa
cp adult_assembly.fasta for_prokka/adult_assembly.fa
cp juv_assembly.fasta for_prokka/juv_assembly.fa
cp master_bin_summary/bin_by_bin/*.fa for_prokka/
cp ad_bin_summary/bin_by_bin/*.fa for_prokka/
cp juv_bin_summary/bin_by_bin/*.fa for_prokka/
# Step 8.5: Assign taxonomy to binned genomes using GTDB-Tk. This should be done on individual species.
# Needs to be done on HCC.
scp -r for_prokka/*.fa rcooper@crane.unl.edu:/work/cresslerlab/rcooper/metagenomes/
mkdir bin_taxonomy
cp sp* bin_taxonomy/
module load gtdbtk/0.1
gtdbtk classify_wf --genome_dir named_mag_identities --extension fasta --out_dir master_mag_gtdbtk
# ...Continue for number of species binned
# Step 9: Identify potential genes using Prokka for the five identified MAGs.
# Load files onto HCC
module load prokka
prokka --compliant --centre X --outdir sp1_master_prokka/ --prefix sp1_master sp1_master.fa
# ...Continue for number of species binned
# Prokka output will contain date of job start. Take all the .faa (amino acid) files and make sure they have clear names.
# Step 10. These files will be input to GhostKOALA for K Number assignment (https://www.kegg.jp/ghostkoala/).
# Unfortunately, each job has to be run sequentially.
# Files from GhostKOALA will just be named 'user_ko.txt'. Rename to appropriate assembly or species -- i.e. 'master_ko.txt', 'sp1_ko.txt'
# Step 11. In R, clean up the annotated files so only genes with assigned K numbers occur.
# extract_kos.R
sp1 <- read.csv("../sp1_ko.txt", sep = "\t", header = FALSE)
sp1$V2[sp1$V2==""] <- NA
sp1 <- subset(sp1, V2 != "NA")
sp1$V1 <- seq(1:1428)
write.table(sp1, "../sp1_ko_trimmed.txt", sep = "\t", col.names = FALSE, row.names = FALSE, quote = FALSE)
# Repeat for each assembly/species. I know this could be a for loop, but I was lazy.
# Step 12: Cleaned files (named with _trimmed) should be submitted to the KEGG Module Mapping tool (https://www.genome.jp/kegg/tool/map_module.html).
# This will show complete or nearly-complete metabolic modules in pathways.
| true
|
c3ea590bf31294642a9d8803e7bb0999dc1f6376
|
Shell
|
linkerd/linkerd2
|
/bin/shellcheck-all
|
UTF-8
| 859
| 3.8125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -eu
bindir=$( cd "${0%/*}" && pwd )
rootdir=$( cd "$bindir"/.. && pwd )
scripts() {
find "$rootdir" -name '*.sh' \
-not -path "$rootdir/.git/*" \
-not -path "$rootdir/target/*" \
-not -path "$rootdir/web/app/node_modules/*"
}
# Make sure all files with the .sh extension are shellscripts and have a
# proper shebang
shebangpattern='#!/usr/bin/env (bash|sh)'
while IFS= read -r file ; do
head -1 "$file" | grep -qE "$shebangpattern\$" || {
echo "ERROR: No valid '$shebangpattern' shebang found in '$file'" >&2
exit 1
}
done < <(scripts)
# For more information on shellcheck failures:
# https://github.com/koalaman/shellcheck/wiki/Checks
# We want the word splitting for the shellcheck arguments
# shellcheck disable=SC2046
"$bindir"/shellcheck -x -P "$bindir" $(scripts |xargs)
| true
|
cc3324a6e7710a0de6542c3626d7527e9b23e4a1
|
Shell
|
JuanDaw/1daw
|
/sinf/3trim/tema10/actScript2/muestra1.sh
|
UTF-8
| 325
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 0 ]
then
while [ "$*" ]
do
if [ -f $1 ]
then
contador=$contador+1
more $1
shift
else
echo "El argumento $1 no es un fichero"
break
fi
done
else
echo "Debes introducir un argumento al menos"
fi
| true
|
9f015dc6b5667f00839d674d5cecbed7d6b74aff
|
Shell
|
cropleyb/pentai
|
/iOS/build-pentai.sh
|
UTF-8
| 1,569
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -x
. $(dirname $0)/environment.sh
PROJ=pente
EXTSRC=$KIVYIOSROOT/src/$PROJ
mkdir -p $EXTSRC
pushd ~/Dropbox/$PROJ
try ./copy_to.sh $EXTSRC
popd
pushd $EXTSRC
# TODO: cp other py files
OLD_CC="$CC"
OLD_CFLAGS="$CFLAGS"
OLD_LDFLAGS="$LDFLAGS"
OLD_LDSHARED="$LDSHARED"
export CC="$ARM_CC -I$BUILDROOT/include"
export CFLAGS="$ARM_CFLAGS"
export LDFLAGS="$ARM_LDFLAGS"
export LDSHARED="$KIVYIOSROOT/tools/liblink"
# iOS cythonize
echo "PRE-CYTHONIXE"
# TODO specified .py
$KIVYIOSROOT/tools/cythonize.py pentai/*/*.pyx
echo "POST-CYTHONIXE"
# Build cython module
echo "BEFORE BUILD1"
try $HOSTPYTHON ./setup.py build_ext -g
echo "BETWEEN BUILDS"
try $HOSTPYTHON ./setup.py install -O2 --root iosbuild
echo "AFTER BUILDS"
# Look for built targets
try find iosbuild | grep -E '.*\.(py|pyc|so\.o|so\.a|so\.libs)$$' | xargs rm
try rm -rdf "$BUILDROOT/python/lib/python2.7/site-packages/$PROJ*"
# Copy to python for iOS installation
#try cp -R "iosbuild/usr/local/lib/python2.7/site-packages/" "$BUILDROOT/python/lib/python2.7/site-packages"
#try find "iosbuild/usr/local/lib/python2.7/site-packages/" -name "*.so" | xargs cp "$BUILDROOT/python/lib/python2.7/site-packages"
try cp -R "iosbuild/usr/local/lib/python2.7/site-packages/pentai" "$BUILDROOT/python/lib/python2.7/site-packages"
popd
export CC="$OLD_CC"
export CFLAGS="$OLD_CFLAGS"
export LDFLAGS="$OLD_LDFLAGS"
export LDSHARED="$OLD_LDSHARED"
bd=$EXTSRC/build/lib.macosx-*
try $KIVYIOSROOT/tools/biglink $BUILDROOT/lib/libpente.a $bd $bd/base $bd/ai $bd/db
deduplicate $BUILDROOT/lib/libpente.a
| true
|
00e88fb207476a9b6b0f051699ec0ab9e5b23eb3
|
Shell
|
malrefai/LaravelBox
|
/box/scripts/provision.sh
|
UTF-8
| 6,905
| 3.03125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
export DEBIAN_FRONTEND=noninteractive
# Update Package List
apt-get update
# Update System Packages
apt-get -y upgrade
# Force Locale
echo "LC_ALL=en_US.UTF-8" >> /etc/default/locale
locale-gen en_US.UTF-8
# Install Some PPAs
apt-get install -y software-properties-common curl
apt-add-repository ppa:nginx/development -y
apt-add-repository ppa:chris-lea/redis-server -y
apt-add-repository ppa:ondrej/php -y
curl --silent --location https://deb.nodesource.com/setup_6.x | bash -
# Update Package Lists
apt-get update
# Install Some Basic Packages
apt-get install -y build-essential dos2unix gcc git libmcrypt4 libpcre3-dev \
make python2.7-dev python-pip re2c supervisor unattended-upgrades whois vim libnotify-bin debconf-utils
# Set My Timezone
ln -sf /usr/share/zoneinfo/UTC /etc/localtime
# Install PHP Stuffs
apt-get install -y --force-yes php7.1-cli php7.1-dev php7.1-sqlite3 php7.1-gd \
php7.1-curl php7.1-imap php7.1-mbstring \
php7.1-xml php7.1-zip php7.1-bcmath php7.1-soap \
php7.1-intl php7.1-readline php-xdebug
# Install Composer
curl -sS https://getcomposer.org/installer | php
mv composer.phar /usr/local/bin/composer
# Add Composer Global Bin To Path
printf "\nPATH=\"$(sudo su - vagrant -c 'composer config -g home 2>/dev/null')/vendor/bin:\$PATH\"\n" | tee -a /home/vagrant/.profile
# Install Laravel Envoy & Installer
sudo su vagrant <<'EOF'
/usr/local/bin/composer global require "laravel/envoy=~1.0"
/usr/local/bin/composer global require "laravel/installer=~1.1"
EOF
# Set Some PHP CLI Settings
sudo sed -i "s/error_reporting = .*/error_reporting = E_ALL/" /etc/php/7.1/cli/php.ini
sudo sed -i "s/display_errors = .*/display_errors = On/" /etc/php/7.1/cli/php.ini
sudo sed -i "s/memory_limit = .*/memory_limit = 512M/" /etc/php/7.1/cli/php.ini
sudo sed -i "s/;date.timezone.*/date.timezone = UTC/" /etc/php/7.1/cli/php.ini
# Install Nginx & PHP-FPM
apt-get install -y --force-yes nginx php7.1-fpm
rm /etc/nginx/sites-enabled/default
rm /etc/nginx/sites-available/default
service nginx restart
# Setup Some PHP-FPM Options
echo "xdebug.remote_enable = 1" >> /etc/php/7.1/mods-available/xdebug.ini
echo "xdebug.remote_connect_back = 1" >> /etc/php/7.1/mods-available/xdebug.ini
echo "xdebug.remote_port = 9000" >> /etc/php/7.1/mods-available/xdebug.ini
echo "xdebug.max_nesting_level = 512" >> /etc/php/7.1/mods-available/xdebug.ini
sed -i "s/error_reporting = .*/error_reporting = E_ALL/" /etc/php/7.1/fpm/php.ini
sed -i "s/display_errors = .*/display_errors = On/" /etc/php/7.1/fpm/php.ini
sed -i "s/;cgi.fix_pathinfo=1/cgi.fix_pathinfo=0/" /etc/php/7.1/fpm/php.ini
sed -i "s/memory_limit = .*/memory_limit = 512M/" /etc/php/7.1/fpm/php.ini
sed -i "s/upload_max_filesize = .*/upload_max_filesize = 100M/" /etc/php/7.1/fpm/php.ini
sed -i "s/post_max_size = .*/post_max_size = 100M/" /etc/php/7.1/fpm/php.ini
sed -i "s/;date.timezone.*/date.timezone = UTC/" /etc/php/7.1/fpm/php.ini
# Disable XDebug On The CLI
sudo phpdismod -s cli xdebug
# Copy fastcgi_params to Nginx because they broke it on the PPA
cat > /etc/nginx/fastcgi_params << EOF
fastcgi_param QUERY_STRING \$query_string;
fastcgi_param REQUEST_METHOD \$request_method;
fastcgi_param CONTENT_TYPE \$content_type;
fastcgi_param CONTENT_LENGTH \$content_length;
fastcgi_param SCRIPT_FILENAME \$request_filename;
fastcgi_param SCRIPT_NAME \$fastcgi_script_name;
fastcgi_param REQUEST_URI \$request_uri;
fastcgi_param DOCUMENT_URI \$document_uri;
fastcgi_param DOCUMENT_ROOT \$document_root;
fastcgi_param SERVER_PROTOCOL \$server_protocol;
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
fastcgi_param SERVER_SOFTWARE nginx/\$nginx_version;
fastcgi_param REMOTE_ADDR \$remote_addr;
fastcgi_param REMOTE_PORT \$remote_port;
fastcgi_param SERVER_ADDR \$server_addr;
fastcgi_param SERVER_PORT \$server_port;
fastcgi_param SERVER_NAME \$server_name;
fastcgi_param HTTPS \$https if_not_empty;
fastcgi_param REDIRECT_STATUS 200;
EOF
# Set The Nginx & PHP-FPM User
sed -i "s/user www-data;/user vagrant;/" /etc/nginx/nginx.conf
sed -i "s/# server_names_hash_bucket_size.*/server_names_hash_bucket_size 64;/" /etc/nginx/nginx.conf
sed -i "s/user = www-data/user = vagrant/" /etc/php/7.1/fpm/pool.d/www.conf
sed -i "s/group = www-data/group = vagrant/" /etc/php/7.1/fpm/pool.d/www.conf
sed -i "s/listen\.owner.*/listen.owner = vagrant/" /etc/php/7.1/fpm/pool.d/www.conf
sed -i "s/listen\.group.*/listen.group = vagrant/" /etc/php/7.1/fpm/pool.d/www.conf
sed -i "s/;listen\.mode.*/listen.mode = 0666/" /etc/php/7.1/fpm/pool.d/www.conf
service nginx restart
service php7.1-fpm restart
# Add Vagrant User To WWW-Data
usermod -a -G www-data vagrant
id vagrant
groups vagrant
# Install Node
apt-get install -y nodejs
/usr/bin/npm install -g gulp
/usr/bin/npm install -g bower
/usr/bin/npm install -g yarn
# Install SQLite
apt-get install -y sqlite3 libsqlite3-dev
# install mongodb
echo "Importing the public key used by the package management system";
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 0C49F3730359A14518585931BC711F9BA15703C6
echo "Creating a list file for MongoDB.";
echo "deb [ arch=amd64,arm64 ] http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.4 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-3.4.list;
echo "Updating the packages list";
apt-get update;
echo "Install the latest version of MongoDb";
apt-get install -y mongodb-org;
echo "Fixing the pecl errors list";
sed -i -e 's/-C -n -q/-C -q/g' `which pecl`;
echo "Installing OpenSSl Libraries";
apt-get install -y autoconf g++ make openssl libssl-dev libcurl4-openssl-dev;
apt-get install -y libcurl4-openssl-dev pkg-config;
apt-get install -y libsasl2-dev;
echo "Installing PHP7 mongoDb extension";
pecl install mongodb;
echo "adding the extension to your php.ini file";
touch /etc/php/7.1/mods-available/mongodb.ini
echo "extension=mongodb.so" >> /etc/php/7.1/mods-available/mongodb.ini
ln -s /etc/php/7.1/mods-available/mongodb.ini /etc/php/7.1/cli/conf.d/20-mongodb.ini
ln -s /etc/php/7.1/mods-available/mongodb.ini /etc/php/7.1/fpm/conf.d/20-mongodb.ini
echo "Add mongodb.service file"
cat >/etc/systemd/system/mongodb.service <<EOL
[Unit]
Description=High-performance, schema-free document-oriented database
After=network.target
[Service]
User=mongodb
ExecStart=/usr/bin/mongod --quiet --config /etc/mongod.conf
[Install]
WantedBy=multi-user.target
EOL
systemctl start mongodb
systemctl status mongodb
systemctl enable mongodb
echo "restarting The nginx server";
service nginx restart && sudo service php7.1-fpm restart
# Install A Few Other Things
apt-get install -y redis-server
# Configure Supervisor
systemctl enable supervisor.service
service supervisor start
apt-get -y autoremove
apt-get -y clean
# Enable Swap Memory
/bin/dd if=/dev/zero of=/var/swap.1 bs=1M count=1024
/sbin/mkswap /var/swap.1
/sbin/swapon /var/swap.1
| true
|
d3fbbb8f2882f747a19d84e32db67eba74e3b01f
|
Shell
|
NetBSD/pkgsrc
|
/security/password-store/patches/patch-src_password-store.sh
|
UTF-8
| 1,826
| 3.234375
| 3
|
[] |
no_license
|
$NetBSD: patch-src_password-store.sh,v 1.3 2018/06/14 16:08:39 leot Exp $
Avoid non portable mkdir(1) `-v' parameter.
--- src/password-store.sh.orig 2018-06-14 14:58:28.000000000 +0000
+++ src/password-store.sh
@@ -323,7 +323,7 @@ cmd_init() {
fi
rmdir -p "${gpg_id%/*}" 2>/dev/null
else
- mkdir -v -p "$PREFIX/$id_path"
+ mkdir -p "$PREFIX/$id_path"
printf "%s\n" "$@" > "$gpg_id"
local id_print="$(printf "%s, " "$@")"
echo "Password store initialized for ${id_print%, }${id_path:+ ($id_path)}"
@@ -432,7 +432,7 @@ cmd_insert() {
[[ $force -eq 0 && -e $passfile ]] && yesno "An entry already exists for $path. Overwrite it?"
- mkdir -p -v "$PREFIX/$(dirname -- "$path")"
+ mkdir -p "$PREFIX/$(dirname -- "$path")"
set_gpg_recipients "$(dirname -- "$path")"
if [[ $multiline -eq 1 ]]; then
@@ -466,7 +466,7 @@ cmd_edit() {
local path="${1%/}"
check_sneaky_paths "$path"
- mkdir -p -v "$PREFIX/$(dirname -- "$path")"
+ mkdir -p "$PREFIX/$(dirname -- "$path")"
set_gpg_recipients "$(dirname -- "$path")"
local passfile="$PREFIX/$path.gpg"
set_git "$passfile"
@@ -509,7 +509,7 @@ cmd_generate() {
check_sneaky_paths "$path"
[[ $length =~ ^[0-9]+$ ]] || die "Error: pass-length \"$length\" must be a number."
[[ $length -gt 0 ]] || die "Error: pass-length must be greater than zero."
- mkdir -p -v "$PREFIX/$(dirname -- "$path")"
+ mkdir -p "$PREFIX/$(dirname -- "$path")"
set_gpg_recipients "$(dirname -- "$path")"
local passfile="$PREFIX/$path.gpg"
set_git "$passfile"
@@ -598,7 +598,7 @@ cmd_copy_move() {
echo "$old_path"
[[ -e $old_path ]] || die "Error: $1 is not in the password store."
- mkdir -p -v "${new_path%/*}"
+ mkdir -p "${new_path%/*}"
[[ -d $old_path || -d $new_path || $new_path == */ ]] || new_path="${new_path}.gpg"
local interactive="-i"
| true
|
46651b748c6420d825d04ea0cf9352df57be6944
|
Shell
|
xarses/fuel-library
|
/files/fuel-ha-utils/tools/swiftcheck
|
UTF-8
| 1,879
| 4.21875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Script to make a HAProxy capable of monitoring the Swift proxy backends status.
# This script checks the given scan target(auth endpoint) and also performs a Swift
# healthcheck via the given Swift endpoint with the given connect timeout.
# Reports an HTTP 200 OK, if all of the results are OK.
# If the healthcheck result was not OK or the Swift/Auth endpoint was not reachable,
# it would report an HTTP 503 Error.
#
# Author: Bogdan Dobrelya <bdobrelia@mirantis.com>
#
if [[ $1 == '-h' || $1 == '--help' || "$#" -ne 3 ]];then
echo "Usage: $0 <local_swift_endpoint> <scan_target> <connect_timeout>"
exit
fi
# Set options
url=${1%/} # remove trailing slash
scan_target=${2/:/ } # convert to netcat format
connect_timeout=$3
ncat=$(type -P nc)
curl=$(type -P curl)
waiting_time=3
pause=1
result='UNDEFINED'
# Scan for the target availability
while !($ncat -z ${scan_target}) && [ $waiting_time -gt 0 ]; do
sleep $pause
(( waiting_time -= pause ))
done
# Check for the swift healthcheck report via given endpoint url
if [[ $waiting_time -gt 0 ]]; then
result=$($curl --silent --connect-timeout ${connect_timeout} --retry 1 -XGET ${url}/healthcheck)
fi
if [[ $result == 'OK' ]]; then
# Swift healthcheck is OK and endpoint is reachable
# return HTTP 200. Shell return-code is 0
echo -en "HTTP/1.1 200 OK\r\n"
echo -en "Content-Type: text/plain\r\n"
echo -en "Connection: close\r\n"
echo -en "Content-Length: 5\r\n"
echo -en "\r\n"
echo -en "OK.\r\n"
sleep 0.1
exit 0
else
# Swift healthcheck failed or endpoint was not reachable,
# return HTTP 503. Shell return-code is 1
echo -en "HTTP/1.1 503 Service Unavailable\r\n"
echo -en "Content-Type: text/plain\r\n"
echo -en "Connection: close\r\n"
echo -en "Content-Length: 8\r\n"
echo -en "\r\n"
echo -en "Error.\r\n"
sleep 0.1
exit 1
fi
| true
|
2724c8e6d4ea5650407985a5f0d94ed9526f76b0
|
Shell
|
Jekotia/srv-saturn
|
/Dockerfiles/jekyll/copy/entrypoint
|
UTF-8
| 725
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
[ "$DEBUG" = "true" ] && set -x
set -e
# --
: ${JEKYLL_UID:=$(id -u jekyll)}
: ${JEKYLL_GID:=$(id -g jekyll)}
# --
export JEKYLL_UID
export JEKYLL_GID
# --
# Users can customize our UID's to fit their own so that
# we don't have to chown constantly. Well it's not like
# we do so much of it (anymore) it's slow, but we do
# change permissions which can result in some bad
# behavior on OS X.
# --
if [ "$JEKYLL_UID" != "0" ] && [ "$JEKYLL_UID" != "$(id -u jekyll)" ]; then
usermod -u $JEKYLL_UID jekyll
groupmod -g $JEKYLL_GID jekyll
if [ "$FULL_CHOWN" ]; then
chown -R jekyll:jekyll \
$JEKYLL_DATA_DIR
else
chown jekyll:jekyll \
$JEKYLL_DATA_DIR
fi
fi
# --
exec "$@"
| true
|
6d08b5cc6c2099d8a0e7d085c3a4119c43758070
|
Shell
|
bradyshutt/dotfiles
|
/files/bin/lolcommits-addtimestamp
|
UTF-8
| 286
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
TEXT=$(date +'%b %_d, %_I:%M%P' | sed -e 's/ / /g')
if [[ $# < 1 ]] ; then
echo 'Missing arg(s)'
exit
else
convert -font /home/brady/fonts/Impact/Impact.ttf -fill white -stroke black -strokewidth 1.8 -pointsize 36 -weight Heavy -draw "text 5,35 '$TEXT'" $1 $1
fi
| true
|
f3d6beb3bee615f7c9081c2d1a52b9a15ecc2fe2
|
Shell
|
joelliusczar/Czar_LFS
|
/ch6_scripts/07_glibc.sh
|
UTF-8
| 3,335
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
helper_path=${helper_path:-..}
. "$helper_path/install_help.sh"
install_app () {
patch -Np1 -i ../glibc-2.28-fhs-1.patch &&
ln -sfv /tools/lib/gcc /usr/lib &&
case $(uname -m) in
i?86) GCC_INCDIR=/usr/lib/$(uname -m)-pc-linux-gnu/8.2.0/include &&
ln -sfv ld-linux.so.2 /lib/ld-lsb.so.3 ||
return 1;
;;
x86_64) GCC_INCDIR=/usr/lib/gcc/x86_64-pc-linux-gnu/8.2.0/include &&
ln -sfv ../lib/ld-linux-x86-64.so.2 /lib64 &&
ln -sfv ../lib/ld-linux-x86-64.so.2 /lib64/ld-lsb-x86-64.so.3 ||
return 1;
;;
esac &&
rm -f /usr/include/limits.h &&
mkdir -v build &&
cd build &&
echo 'Begin configuring' &&
CC="gcc -isystem $GCC_INCDIR -isystem /usr/include" \
../configure --prefix=/usr \
--disable-werror \
--enable-kernel=3.2 \
--enable-stack-protector=strong \
libc_cv_slibdir=/lib &&
echo 'Done with configure' &&
unset GCC_INCDIR &&
make &&
make check test-xfail-tst-idna_name_classify='t' \
test-xfail-tst-ttyname='t' &&
touch /etc/ld.so.conf &&
sed '/test-installation/s@$(PERL)@echo not running@' -i ../Makefile &&
make install &&
cp -v ../nscd/nscd.conf /etc/nscd.conf &&
mkdir -pv /var/cache/nscd &&
install -v -Dm644 ../nscd/nscd.tmpfiles /usr/lib/tmpfiles.d/nscd.conf &&
install -v -Dm644 ../nscd/nscd.service /lib/system/nscd.service &&
mkdir -pv /usr/lib/locale &&
localedef -i cs_CZ -f UTF-8 cs_CZ.UTF-8 &&
localedef -i de_DE -f ISO-8859-1 de_DDE &&
localedef -i de_DE@euro -f ISO-8859-15 de_DE@euro &&
localedef -i de_DE -f UTF-8 de_DE.UTF-8 &&
localedef -i en_GB -f UTF-8 en_GB.UTF-8 &&
localedef -i en_HK -f ISO-8859-15 en_HK &&
localedef -i en_PH -f ISO-8859-1 en_PH &&
localedef -i en_US -f ISO-8859-1 en_US &&
localedef -i en_US -f UTF-8 en_US.UTF-8 &&
localedef -i es_MX -f ISO-8859-1 es_MX &&
localedef -i fa_IR -f UTF-8 fa_IR &&
localedef -i fr_FR -f ISO-8859-1 fr_FR &&
localedef -i fr_FR@euro -f ISO-8859-15 fr_FR@euro &&
localedef -i fr_FR -f UTF-8 fr_FR.UTF-8 &&
localedef -i it_IT -f ISO-8859-1 it_IT &&
localedef -i it_IT -f UTF-8 it_IT.UTF-8 &&
localedef -i ja_JP -f EUC-JP ja_JP &&
localedef -i ru_RU -f KOI8-R ru_RU.KOI8-R &&
localedef -i ru_RU -f UTF-8 ru_RU.UTF-8 &&
localedef -i tr_TR -f UTF-8 tr_TR.UTF-8 &&
localedef -i zh_CN -f GB18030 zh_CN.GB18030 &&
(cat > /etc/nsswitch.conf << "EOF"
# Begin /etc/nsswitch.conf
passwd: files
group: files
shadow: files
hosts: files dns
networks: files
protocols: files
services: files
ethers: files
rpc: files
#End /etc/nsswitch.conf
EOF
) &&
tar -xf ../../tzdata2018e.tar.gz &&
ZONEINFO=/usr/share/zoneinfo &&
mkdir -pv $ZONEINFO/{posix,right} &&
for tz in etcetera southamerica northamerica europe africa antarctica \
asia australasia backward pacificnew systemv; do
zic -L /dev/null -d $ZONEINFO -y "sh yearistype.sh" ${tz} &&
zic -L /dev/null -d $ZONEINFO/posix -y "sh yearistype.sh" ${tz} &&
zic -L leapseconds -d $ZONEINFO/right -y "sh yearistype.sh" ${tz} ||
{ echo "Loser is $app : timezones"; return 1; }
done &&
cp -v zone.tab zone1970.tab iso3166.tab $ZONEINFO &&
zic -d $ZONEINFO -p America/New_York &&
unset ZONEINFO &&
ln -sfv /usr/share/zoneinfo/America/New_York /etc/localtime &&
(cat > /etc/ld.so.conf << "EOF"
# Begin /etc/ld.so.conf
/usr/local/lib
/opt/lib
EOF
) &&
(cat >> /etc/ld.so.conf << "EOF"
# Add an include directory
include /etc/ld.so.conf.d/*.conf
EOF
)
}
install_app_nest 'glibc-2.28' "/sources"
| true
|
d9e0bff343c359104ea6b3a4ef7e189ca95fe25e
|
Shell
|
GongT/containers
|
/mqtt-broker/fs/usr/bin/debug
|
UTF-8
| 194
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -Eeuo pipefail
sed -i '/log_type /d' /etc/mosquitto/mosquitto.conf
if [[ ${1:-} != off ]]; then
echo "log_type all" >>/etc/mosquitto/mosquitto.conf
fi
kill -SIGHUP 1
| true
|
8c66e88e52bbe2b932ba4708c1fd7bf72345bb81
|
Shell
|
mmmanyfold/closet-of-souls
|
/play-local.sh
|
UTF-8
| 1,399
| 2.53125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
videoNum=$1
# ./play-local.sh <video number>
# ./play-local.sh 1
ssh pi@192.168.29.234 \
"nohup omxplayer -b /home/pi/video$videoNum/*.mp4 -o local &> /dev/null & exit"
ssh pi@192.168.29.245 \
"nohup omxplayer -b /home/pi/video$videoNum/*.mp4 -o local &> /dev/null & exit"
ssh pi@192.168.29.241 \
"nohup omxplayer -b /home/pi/video$videoNum/*.mp4 -o local &> /dev/null & exit"
ssh pi@192.168.29.148 \
"nohup omxplayer -b /home/pi/video$videoNum/*.mp4 -o local &> /dev/null & exit"
ssh pi@192.168.29.249 \
"nohup omxplayer -b /home/pi/video$videoNum/*.mp4 -o local &> /dev/null & exit"
ssh pi@192.168.29.213 \
"nohup omxplayer -b /home/pi/video$videoNum/*.mp4 -o local &> /dev/null & exit"
ssh pi@192.168.29.217 \
"nohup omxplayer -b /home/pi/video$videoNum/*.mp4 -o local &> /dev/null & exit"
ssh pi@192.168.29.202 \
"nohup omxplayer -b /home/pi/video$videoNum/*.mp4 -o local &> /dev/null & exit"
ssh pi@192.168.29.122 \
"nohup omxplayer -b /home/pi/video$videoNum/*.mp4 -o local &> /dev/null & exit"
ssh pi@192.168.29.243 \
"nohup omxplayer -b /home/pi/video$videoNum/*.mp4 -o local &> /dev/null & exit"
ssh pi@192.168.29.185 \
"nohup omxplayer -b /home/pi/video$videoNum/*.mp4 -o local &> /dev/null & exit"
ssh pi@192.168.29.127 \
"nohup omxplayer -b /home/pi/video$videoNum/*.mp4 -o local &> /dev/null & exit"
echo "Done starting videos"
| true
|
f06990603c65b18cbb5c3f2bbc56dd97486538ca
|
Shell
|
pramo18/usp_1BM18CS070
|
/prime.sh
|
UTF-8
| 231
| 3.1875
| 3
|
[] |
no_license
|
echo "enter number"
read n
i=2
flag=0
b=$(($n / 2))
while [ $i -lt $b ]
do
if [ $(($n % 2)) -eq 0 ]
then
flag=1
fi
i=$(($i + 1))
done
if [ $flag -eq 1 ]
then
echo "number is not prime"
else
echo "number is prime"
fi
| true
|
95b15e66ab0336e2f5a442f172514659905d823c
|
Shell
|
rwstauner/run_control
|
/mac/install
|
UTF-8
| 628
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
tapped () {
brew tap | grep -qFx "$1"
}
tap () {
tapped "$1" || \
brew tap "$1"
}
installed () {
brew list --cask "$1" &> /dev/null
}
install () {
installed "$1" || \
brew install --cask "$@"
}
# "Package installers may write to any location; options such as --appdir are ignored."
[[ `stat -c '%U' /usr/local/bin` == "$USER" ]] || \
sudo chown $USER /usr/local/bin
tap homebrew/cask
# tap homebrew/cask-versions
reinstall=false
if [[ "$1" == "--reinstall" ]]; then
reinstall=true
shift
fi
for i in "$@"; do
if $reinstall; then
brew uninstall --cask "$i"
fi
install "$i"
done
| true
|
4bb812dada9f9b811645041f4694b6ed6049b8e4
|
Shell
|
hakkika/vagrant-o3l
|
/setup-nodes.sh
|
UTF-8
| 2,029
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/sh
# This is script is run manually on the controller-1 node
if [ "$USER" != "kolla" ]; then
echo "This script must be run as user kolla"
exit 1
fi
kollacli host add controller-1
kollacli host add controller-2
kollacli host add network
kollacli host add comp-1
kollacli host add comp-2
kollacli host list
kollacli host setup --file /vagrant/node-passwords.yml
kollacli host check all
kollacli group addhost control controller-1
kollacli group addhost control controller-2
kollacli group addhost database controller-1
kollacli group addhost database controller-2
kollacli group addhost storage controller-1
kollacli group addhost storage controller-2
kollacli group addhost network network
kollacli group addhost compute comp-1
kollacli group addhost compute comp-2
kollacli group listhosts
kollacli host list
# This is the default value
# kollacli property set openstack_release 3.0.1
# Set virtual ip to 192.168.56.110 in the configuration file at /usr/share/kolla/ansible/group_vars/__GLOBAL__
# Virtual ip is also set as a kolla property
kollacli property set kolla_internal_address 192.168.56.110
kollacli property set network_interface eth1
kollacli property set tunnel_interface eth2
kollacli property set neutron_external_interface eth3
# This should be a VIP, an unused IP on your network that will float between
# the hosts running keepalived for high-availability. When running an All-In-One
# without haproxy and keepalived, this should be the first IP on your
# 'network_interface' as set in the Networking section below.
cp -a /usr/share/kolla/ansible/group_vars/__GLOBAL__ /usr/share/kolla/ansible/group_vars/__GLOBAL__.ORIG
sed -i '/^kolla_internal_vip_address: /s/10.10.10.254/192.168.56.110/' /usr/share/kolla/ansible/group_vars/__GLOBAL__
# Docker registry settings
kollacli set property docker_registry ""
kollacli set property docker_namespace oracle
# Setting the passwords
echo "**** Setting the passwords"
kollacli password set keystone_admin_password
kollacli password init
| true
|
bd173fb7f24c5b2903797b366fd817dc5748e565
|
Shell
|
jrahm/Boltsnap
|
/install
|
UTF-8
| 201
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ ! $DAEMON_NAME ]] ; then
DAEMON_NAME="boltsnapd"
fi
if [[ ! $GENCGI_RENAME ]] ; then
GENCGI_RENAME="boltgen.cgi"
fi
sudo cp -v $DAEMON_NAME /usr/bin/
cp -rvf public_html $HOME/
| true
|
7c4031d00e9d7720371cfdbc06a85a106098c5dd
|
Shell
|
Gringox/sinatra_mvc
|
/mvc_sinatra.sh
|
UTF-8
| 1,085
| 3.171875
| 3
|
[] |
no_license
|
# Validate that $1 and only $1 exist
if [ "$#" -ne 1 ]; then
echo "Usage: $0 NAME"
exit 1
fi
# Validate that "$1" is not repeated
if [ -e "$1" ]; then
echo "$1 already exist"
exit 1
fi
# Create models views, controllers and public folders
mkdir "$1" "$1"/models "$1"/views "$1"/controllers "$1"/public "$1"/public/images "$1"/public/js "$1"/public/css
# Create config.ru
touch "$1"/config.ru
echo "require 'sinatra'
require 'data_mapper'
# Setup database
DataMapper.setup(:default, 'sqlite3:database.db')
# Require all models
Dir['./models/*'].each {|file| require file }
# Finalize database init
DataMapper.finalize
# Require all controllers
Dir['./controllers/*'].each {|file| require file }
run Sinatra::Application" > "$1"/config.ru
# Create Gemfile
touch "$1"/Gemfile
echo "source 'https://rubygems.org'
gem 'sinatra'
gem 'data_mapper'" > "$1"/Gemfile
# Create layout.erb
touch "$1"/views/layout.erb
echo "<!DOCTYPE html>
<html>
<head>
<!-- Stuff -->
</head>
<body>
<%= yield %>
</body>
</html>" > "$1"/views/layout.erb
# All done
echo "DONE"
| true
|
241674b668597eae9d28f73fd2275a878e786786
|
Shell
|
sorftfaricson/Compilar
|
/compiler.sh
|
UTF-8
| 21,659
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
cyan=" \e[36m"
blanco="\e[0m"
BLACK='\033[30m'
RED='\033[31m'
GREEN='\033[32m'
YELLOW='\033[33m'
BLUE='\033[34m'
MAGENTA='\033[35m'
clear
echo -e "$cyan _______"
echo -e "$cyan ∆ | |$GREEN ©©©©© ©©©©© ©© ©© ©©©©© ©©©©© © ©©©©© ©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © © © © © © © © © © © ©"
echo -e "$cyan ∆ ∆ |$YELLOW |_|$cyan |$GREEN © © © © © ©©©©© © © ©©© ©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © © © © © © © © © ©"
echo -e "$cyan ∆ ∆ |$YELLOW |_|$cyan |$GREEN ©©©©© ©©©©© © © © ©©©©© ©©©©© ©©©©© © ©"
echo -e "$cyan ∆ ∆ | | "
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ |$YELLOW |_|$cyan |$GREEN ©©©©© © ©"
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ | |$GREEN © © ©"
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ |$YELLOW |_|$cyan |$GREEN © ©©©©© ©©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © ©"
echo -e "$cyan ∆ ∆ |$MAGENTA π$cyan |$GREEN ©©©©© © ©"
echo -e " "
echo -e "$BLUE CompilerC++ es una herramienta que te permite ajustar tus prrogramas "
echo -e " que estan basados en C++ , por tales motivos funciona para compilar "
echo -e " en esta misma terminal. "
echo -e " "
echo -e " "
echo -e "$MAGENTA g++ -o hola_cpp hola.cpp"
echo -e "$blanco"
a=" --> crear un archivo c++"
b=" --> editar un archivo"
c=" --> compilar un archivo"
d=" --> compilar y ejecutar un archivo"
e=" --> instalar c++ y herramientas"
f=" --> salir"
PS3=" seleccione el numero de la opcion: "
function menu_principal(){
select menu in "$a" "$b" "$c" "$d" "$e" "$f"
do
case $menu in
$a)
clear
echo ""
echo -e "$YELLOW ooooo ooooooooooooo ooooo "
echo -e " o ooooooo ooooooo o "
echo -e " o o o ∆∆∆∆∆∆∆∆∆ o o"
echo -e " o ooooooo ooooooo o "
echo -e " ooooo o o ooooo "
echo -e " ooooooooooooo "
echo -e " o o"
echo -e " o o"
echo -e " o o"
echo -e " o o "
echo -e " o o "
echo -e " o o "
echo -e " o o$cyan by:Sorft Faricson "
echo -e "$YELLOW ooooooooooo$cyan & Kenarto Projects."
echo ""
echo -e "$blanco ingresa el nombre de tu archivo"
read name
echo "el nombre es $name"
sleep 2
echo " Se esta creando una carpeta llamada $name"
sleep 2
mkdir $name
cd $name
echo " Se esta creando el archivo"
sleep 2
touch $name.cpp
cd ..
echo "Completado"
sleep 3
clear
echo -e "$cyan _______"
echo -e "$cyan ∆ | |$GREEN ©©©©© ©©©©© ©© ©© ©©©©© ©©©©© © ©©©©© ©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © © © © © © © © © © © ©"
echo -e "$cyan ∆ ∆ |$YELLOW |_|$cyan |$GREEN © © © © © ©©©©© © © ©©© ©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © © © © © © © © © ©"
echo -e "$cyan ∆ ∆ |$YELLOW |_|$cyan |$GREEN ©©©©© ©©©©© © © © ©©©©© ©©©©© ©©©©© © ©"
echo -e "$cyan ∆ ∆ | | "
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ |$YELLOW |_|$cyan |$GREEN ©©©©© © ©"
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ | |$GREEN © © ©"
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ |$YELLOW |_|$cyan |$GREEN © ©©©©© ©©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © ©"
echo -e "$cyan ∆ ∆ |$MAGENTA π$cyan |$GREEN ©©©©© © ©"
echo -e " "
echo -e "$BLUE CompilerC++ es una herramienta que te permite ajustar tus prrogramas "
echo -e " que estan basados en C++ , por tales motivos funciona para compilar "
echo -e " en esta misma terminal. "
echo -e " "
echo -e " "
echo -e "$MAGENTA g++ -o hola_cpp hola.cpp"
echo -e "$blanco"
menu_principal
;;
$c)
clear
echo -e " $cyan "
echo -e " $cyan ____________________________________ "
echo -e " $cyan | | _________ "
echo -e " $cyan | si ella ya no te ama | | • | "
echo -e " $cyan | ,emtonces progrmaate | | 0 | "
echo -e " $cyan | una novia. | | 0 | "
echo -e " $cyan | descubre como en: | | 0 | "
echo -e " $cyan | | | |"
echo -e " $cyan | https://github.com/sorftfaricson | | _____ |"
echo -e " $cyan |____________________________________| | _____ | "
echo -e " $cyan | | | | "
echo -e " $cyan | | | [] [] |"
echo -e " $cyan /_____\ |_________|"
echo ""
echo " Estos son los archivos en esta carpeta: "
echo -e "$blanco "
echo ""
ls
echo ""
echo ""
echo -e " ingresa el nombre del archivo a compilar (recuerda no poner la extension .cpp) :"
read arc
g++ -o $arc $arc.cpp
sleep 2
echo " Se ha compilado el archivo editable $arc.cpp "
echo " exitosamente!!"
sleep 3
clear
echo -e "$cyan _______"
echo -e "$cyan ∆ | |$GREEN ©©©©© ©©©©© ©© ©© ©©©©© ©©©©© © ©©©©© ©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © © © © © © © © © © © ©"
echo -e "$cyan ∆ ∆ |$YELLOW |_|$cyan |$GREEN © © © © © ©©©©© © © ©©© ©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © © © © © © © © © ©"
echo -e "$cyan ∆ ∆ |$YELLOW |_|$cyan |$GREEN ©©©©© ©©©©© © © © ©©©©© ©©©©© ©©©©© © ©"
echo -e "$cyan ∆ ∆ | | "
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ |$YELLOW |_|$cyan |$GREEN ©©©©© © ©"
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ | |$GREEN © © ©"
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ |$YELLOW |_|$cyan |$GREEN © ©©©©© ©©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © ©"
echo -e "$cyan ∆ ∆ |$MAGENTA π$cyan |$GREEN ©©©©© © ©"
echo -e " "
echo -e "$BLUE CompilerC++ es una herramienta que te permite ajustar tus prrogramas "
echo -e " que estan basados en C++ , por tales motivos funciona para compilar "
echo -e " en esta misma terminal. "
echo -e " "
echo -e " "
echo -e "$MAGENTA g++ -o hola_cpp hola.cpp"
echo -e "$blanco"
menu_principal
;;
$d)
clear
echo -e " $cyan "
echo -e " $cyan ____________________________________ "
echo -e " $cyan | | _________ "
echo -e " $cyan |$blanco si ella ya no te ama$cyan | | • | "
echo -e " $cyan |$blanco ,entonces programate$cyan | | 0 | "
echo -e " $cyan |$blanco una novia. $cyan | | 0 | "
echo -e " $cyan |$blanco descubre como en: $cyan | | 0 | "
echo -e " $cyan | $cyan | | |"
echo -e " $cyan |$blanco https://github.com/sorftfaricson$cyan | | _____ |"
echo -e " $cyan |____________________________________| | _____ | "
echo -e " $cyan | | | | "
echo -e " $cyan | | | [] [] |"
echo -e " $cyan /_____\ |_________|"
echo -e "$GREEN"
echo " Estos son los archivos en esta carpeta: "
echo -e "$blanco "
echo " ∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆"
echo ""
ls
echo ""
echo " ∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆"
echo ""
echo -e " ingresa el nombre del archivo a compilar (recuerda no poner la extension .cpp) :"
read arc
g++ -o $arc $arc.cpp
sleep 2
echo -e "$YELLOW Se ha compilado el archivo editable $arc.cpp "
echo " exitosamente!!"
sleep 3
echo -e "$GREEN En estos momentos se va a ejecutar tu programa..."
sleep 3
clear
./$arc
echo ""
echo -e "$cyan Fue un gusto poder ayudarte ,suerte programador"
sleep 3
clear
echo -e "$cyan _______"
echo -e "$cyan ∆ | |$GREEN ©©©©© ©©©©© ©© ©© ©©©©© ©©©©© © ©©©©© ©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © © © © © © © © © © © ©"
echo -e "$cyan ∆ ∆ |$YELLOW |_|$cyan |$GREEN © © © © © ©©©©© © © ©©© ©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © © © © © © © © © ©"
echo -e "$cyan ∆ ∆ |$YELLOW |_|$cyan |$GREEN ©©©©© ©©©©© © © © ©©©©© ©©©©© ©©©©© © ©"
echo -e "$cyan ∆ ∆ | | "
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ |$YELLOW |_|$cyan |$GREEN ©©©©© © ©"
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ | |$GREEN © © ©"
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ |$YELLOW |_|$cyan |$GREEN © ©©©©© ©©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © ©"
echo -e "$cyan ∆ ∆ |$MAGENTA π$cyan |$GREEN ©©©©© © ©"
echo -e " "
echo -e "$BLUE CompilerC++ es una herramienta que te permite ajustar tus prrogramas "
echo -e " que estan basados en C++ , por tales motivos funciona para compilar "
echo -e " en esta misma terminal. "
echo -e " "
echo -e " "
echo -e "$MAGENTA g++ -o hola_cpp hola.cpp"
echo -e "$blanco"
menu_principal
;;
$b)
clear
echo -e " $MAGENTA 88888 8888 88888 88888 88888 8888 "
echo -e " 8 8 8 8 8 8 8 8 8"
echo -e " 888 8 8 8 8 8 8 8888 "
echo -e " 8 8 8 8 8 8 8 8 8"
echo -e " 88888 8888 88888 8 88888 8 8 "
echo -e " "
echo -e " $cyan 8 $BLUE Siempre e pensado que "
echo -e " $cyan 88 88 $BLUE no hay nada mas divertido "
echo -e " $cyan 8888888888 $BLUE que salir a la calle e "
echo -e " $cyan 88 88 $BLUE ir por un helado en un "
echo -e " $cyan 8 $BLUE dia caluroso... "
echo -e " $cyan "
echo -e ""
echo -e " estos son los archivos de tu carpeta:"
echo -e "$blanco"
pwd
echo ""
echo " ∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆"
echo ""
ls
echo ""
echo " ∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆∆"
echo -e " ingresa el nombre del archivo a editar (recuerda no poner la extension .cpp) :"
read xc
vim $xc.cpp
echo -e "$cyan genial , suerte en tu codigo"
sleep 3
clear
echo -e "$cyan _______"
echo -e "$cyan ∆ | |$GREEN ©©©©© ©©©©© ©© ©© ©©©©© ©©©©© © ©©©©© ©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © © © © © © © © © © © ©"
echo -e "$cyan ∆ ∆ |$YELLOW |_|$cyan |$GREEN © © © © © ©©©©© © © ©©© ©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © © © © © © © © © ©"
echo -e "$cyan ∆ ∆ |$YELLOW |_|$cyan |$GREEN ©©©©© ©©©©© © © © ©©©©© ©©©©© ©©©©© © ©"
echo -e "$cyan ∆ ∆ | | "
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ |$YELLOW |_|$cyan |$GREEN ©©©©© © ©"
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ | |$GREEN © © ©"
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ |$YELLOW |_|$cyan |$GREEN © ©©©©© ©©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © ©"
echo -e "$cyan ∆ ∆ |$MAGENTA π$cyan |$GREEN ©©©©© © ©"
echo -e " "
echo -e "$BLUE CompilerC++ es una herramienta que te permite ajustar tus prrogramas "
echo -e " que estan basados en C++ , por tales motivos funciona para compilar "
echo -e " en esta misma terminal. "
echo -e " "
echo -e " "
echo -e "$MAGENTA g++ -o hola_cpp hola.cpp"
echo -e "$blanco"
menu_principal
;;
$e)
clear
echo ""
echo -e "$GREEN ooooo ooooooooooooo ooooo "
echo -e " o ooooooo ooooooo o "
echo -e " o o o ∆∆∆∆∆∆∆∆∆ o o"
echo -e " o ooooooo ooooooo o "
echo -e " ooooo o o ooooo "
echo -e " ooooooooooooo "
echo -e " o o"
echo -e " o o"
echo -e " o o"
echo -e " o o "
echo -e " o o "
echo -e " o o "
echo -e " o o$cyan by:Sorft Faricson "
echo -e "$GREEN ooooooooooo$cyan & Kenarto Projects."
echo -e "$blanco"
pkg install clang -y
apt install clang -y
echo ""
echo " instalaciones 1/2 Hecho"
sleep 2
echo -e "$YELLOW"
echo ""
echo ""
pkg install vim -y
apt install vim -y
echo ""
echo "instalaciones 2/2 Hecho"
sleep 4
clear
echo ""
echo ""
echo -e "$cyan "
echo -e "$cyan 8 88888 88888 88888 88888 "
echo -e "$cyan 8 8 8 8 8 8"
echo -e "$cyan 8 8 88888 8 8 8"
echo -e "$cyan 8 8 8 8 8 8 88"
echo -e "$cyan 88888 88888 88888 8 88888 88"
echo -e "$cyan "
echo -e "$MAGENTA UN POEMA:"
echo -e " No hay amor mas puro y sincero "
echo -e " que el amor de un ingeniero "
echo -e " "
sleep 6
clear
echo -e "$cyan _______"
echo -e "$cyan ∆ | |$GREEN ©©©©© ©©©©© ©© ©© ©©©©© ©©©©© © ©©©©© ©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © © © © © © © © © © © ©"
echo -e "$cyan ∆ ∆ |$YELLOW |_|$cyan |$GREEN © © © © © ©©©©© © © ©©© ©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © © © © © © © © © ©"
echo -e "$cyan ∆ ∆ |$YELLOW |_|$cyan |$GREEN ©©©©© ©©©©© © © © ©©©©© ©©©©© ©©©©© © ©"
echo -e "$cyan ∆ ∆ | | "
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ |$YELLOW |_|$cyan |$GREEN ©©©©© © ©"
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ | |$GREEN © © ©"
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ |$YELLOW |_|$cyan |$GREEN © ©©©©© ©©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © ©"
echo -e "$cyan ∆ ∆ |$MAGENTA π$cyan |$GREEN ©©©©© © ©"
echo -e " "
echo -e "$BLUE CompilerC++ es una herramienta que te permite ajustar tus prrogramas "
echo -e " que estan basados en C++ , por tales motivos funciona para compilar "
echo -e " en esta misma terminal. "
echo -e " "
echo -e " "
echo -e "$MAGENTA g++ -o hola_cpp hola.cpp"
echo -e "$blanco"
menu_principal
;;
$f)
clear
echo ""
echo ""
echo ""
echo -e "$cyan ADIOS"
echo ""
echo ""
echo ""
exit
;;
*)
clear
echo -e "$GREEN NO ES UNA OPCION VALIDA"
echo " INTENTALO DE NUEVO "
echo -e ""$RED
echo " ¶¶¶ "
echo " ¶¶ ¶¶"
echo " ¶¶ ¶¶"
echo " ¶¶ • ¶¶"
echo " ¶¶ •¶• ¶¶"
echo " ¶¶ •¶¶¶• ¶¶"
echo " ¶¶ •¶• ¶¶"
echo " ¶¶ • ¶¶"
echo " ¶¶ ¶¶"
echo " ¶¶ • ¶¶"
echo " ¶¶ •¶• ¶¶"
echo " ¶¶ • ¶¶"
echo " ¶¶ ¶¶"
echo " ¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶"
sleep 3
clear
echo -e "$cyan _______"
echo -e "$cyan ∆ | |$GREEN ©©©©© ©©©©© ©© ©© ©©©©© ©©©©© © ©©©©© ©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © © © © © © © © © © © ©"
echo -e "$cyan ∆ ∆ |$YELLOW |_|$cyan |$GREEN © © © © © ©©©©© © © ©©© ©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © © © © © © © © © ©"
echo -e "$cyan ∆ ∆ |$YELLOW |_|$cyan |$GREEN ©©©©© ©©©©© © © © ©©©©© ©©©©© ©©©©© © ©"
echo -e "$cyan ∆ ∆ | | "
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ |$YELLOW |_|$cyan |$GREEN ©©©©© © ©"
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ | |$GREEN © © ©"
echo -e "$cyan ∆ $YELLOW ®$cyan ∆ |$YELLOW |_|$cyan |$GREEN © ©©©©© ©©©©©"
echo -e "$cyan ∆ ∆ | |$GREEN © © ©"
echo -e "$cyan ∆ ∆ |$MAGENTA π$cyan |$GREEN ©©©©© © ©"
echo -e " "
echo -e " CompilerC++ es una herramienta que te permite ajustar tus prrogramas "
echo -e " que estan basados en C++ , por tales motivos funciona para compilar "
echo -e " en esta misma terminal. "
echo -e " "
echo -e " "
echo "g++ -o hola_cpp hola.cpp"
echo -e "$blanco"
menu_principal
;;
esac
done
}
menu_principal
| true
|
8f21ed6cac111ef62b658bc531f8b3e6fd82079e
|
Shell
|
portworx/katacoda-scenarios
|
/px-k8s-sql-ha/assets/wait4-mssql.sh
|
UTF-8
| 229
| 2.890625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
while true; do
if kubectl get pods | grep Running > /dev/null
then
echo SQL Server is Ready
break
else
echo Waiting for SQL Server to be Ready ...
sleep 5
fi
done
| true
|
d33608a926c087420da5e368731dfae7aec38c45
|
Shell
|
imacks/oneplus6
|
/src/opoverlay/misc/core/system/opoverlay_deepsleep.sh
|
UTF-8
| 234
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/tmp-mksh/tmp-mksh
for i in `ls /sys/class/scsi_disk/`; do
cat /sys/class/scsi_disk/$i/write_protect 2>/dev/null | grep 1 >/dev/null
if [ $? -eq 0 ]; then
echo 'temporary none' > /sys/class/scsi_disk/$i/cache_type
fi
done
| true
|
5c33974d87ec90222b8c754294c7e76aef4eb8d3
|
Shell
|
nickbattam/picamon
|
/camera/simulator/scripts/change_cam_pv.sh
|
UTF-8
| 167
| 3.40625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ -z "$*" ]; then
echo "no arguments supplied"
exit 1
fi
OLD_NAME=$1
NEW_NAME=$2
find $3 -type f -exec sed -i "s/$OLD_NAME/$NEW_NAME/g" {} +
| true
|
ecb4b6466547ae5a40e77b6e55ae287729ad1f66
|
Shell
|
dwrolvink/gitautomata
|
/profile/install.sh
|
UTF-8
| 3,014
| 3.984375
| 4
|
[] |
no_license
|
GIT_FOLDER="$HOME/git/gitautomata/profile"
GIT_FUNCTIONS_FOLDER="$GIT_FOLDER/functions"
GIT_CFG_FOLDER="$GIT_FOLDER/config"
FUNCTIONS_FOLDER="$HOME/.local/bin"
# Get commandline options
# -f: Force, or "redo all", even if e.g. a file already exists. Used for updating mostly
# -u: Uninstall, remove gitautomata from the environment
REPLACE=0
UNINSTALL_ALL=0
while getopts fu flag
do
case "${flag}" in
f) REPLACE=1;;
u) UNINSTALL_ALL=1;;
esac
done
# Functions
function exists {
([ -L $1 ] || [ -f $1 ]) && return 0
return 1
}
function link_file {
replacing=0
[ ! -f $1 ] && print_error "ERROR: File $1 does not exist! Skipped." && return 1
[ $REPLACE -eq 1 ] && exists $2 && rm $2 && echo "Reapplying: $2" && replacing=1
[ $REPLACE -eq 0 ] && exists $2 && print_verbose "Skipped: File or symlink $2 already exists. Use -f to replace." && return
[ $replacing -eq 0 ] && ([ ! -L $2 ] && [ ! -f $2 ]) && echo "Installing: $2"
ln -s $1 $2
return 0
}
function unlink_file {
([ ! -L $2 ] && [ ! -f $2 ]) && print_verbose "Checked absence: $2"
([ -L $2 ] || [ -f $2 ]) && rm $2 && echo "Removed: $2"
return 0
}
function set_file {
[ $UNINSTALL_ALL -eq 1 ] && unlink_file $2 $3 && return
[ $1 -eq 1 ] && link_file $2 $3 && [ $4 -eq 1 ] && chmod +x $2
[ $1 -eq 0 ] && unlink_file $2 $3
return 0
}
function print_error {
echo -e "\e[0;38;5;197m$1\e[0m"
}
function print_verbose {
echo -e "\e[0;38;5;241m$1\e[0m"
}
# Create target folders if not exist
[ ! -d $FUNCTIONS_FOLDER ] && mkdir -p $FUNCTIONS_FOLDER
# Links programs/scripts
set_file 1 $GIT_FUNCTIONS_FOLDER/reload_profile $FUNCTIONS_FOLDER/reload_profile 1
set_file 1 $GIT_FUNCTIONS_FOLDER/switch_to $FUNCTIONS_FOLDER/switch_to 1
set_file 1 $GIT_FUNCTIONS_FOLDER/vv $FUNCTIONS_FOLDER/vv 1
set_file 1 $GIT_FUNCTIONS_FOLDER/supload $FUNCTIONS_FOLDER/supload 1
#set_file 0 $GIT_FUNCTIONS_FOLDER/test $FUNCTIONS_FOLDER/test 1
# Insert shell additions to shell rc files
# ========================================
function update_additions {
# skip if file does not exist
! exists $1 && return
# remove previous block if exists
sed -i '/^# <GITAUTOMATA TOP>/,/^# <GITAUTOMATA BOTTOM>/{d}' $1
# remove newline at end of file if exists
[ -z "$(tail -n 1 $1)" ] && sed '$d' < ~/.zshrc > /tmp/__gitautomata_temp__ && mv /tmp/__gitautomata_temp__ $1
# exit now if -u
[ $UNINSTALL_ALL -eq 1 ] && echo "Removed custom code inclusion @ $1" && return
# add top marker
echo -e "\n# <GITAUTOMATA TOP>\n# --------------------------" >> $1
# add additions
cat $GIT_CFG_FOLDER/shell_additions.sh >> $1
# add bottom marker
echo -e "\n# --------------------------\n# <GITAUTOMATA BOTTOM>" >> $1
echo "(Re)applied custom code inclusion @ $1"
}
update_additions $HOME/.zshrc
| true
|
cfe32b8d7ee963f4a2b2a60002ad67edee374391
|
Shell
|
seeseemelk/gnome-1.0
|
/gnotepad/PKGBUILD
|
UTF-8
| 539
| 2.890625
| 3
|
[] |
no_license
|
pkgname=gnotepad
pkgver=1.0.8
pkgrel=1
epoch=
pkgdesc="GNotepad"
arch=(x86_64)
url=""
license=('GPL')
depends=('gtk-old')
source=(
"https://download.gnome.org/desktop/1.0/1.0/sources/gnotepad%2B-1.0.8.tar.gz"
)
md5sums=('7366f8dafdcfea1076cea9c14ace852c')
prepare() {
cd "gnotepad+-$pkgver"
}
build() {
cd "gnotepad+-$pkgver"
CPPFLAGS=${CPPFLAGS/-D_FORTIFY_SOURCE=2}
./configure \
--prefix=/usr
make
}
check() {
cd "gnotepad+-$pkgver"
make -k check
}
package() {
cd "gnotepad+-$pkgver"
make prefix="$pkgdir/usr/" install
}
| true
|
a0d141e55ad407c7147eacb1a330032e175b80fe
|
Shell
|
the-events-calendar/action-s3-utility
|
/commands/exists.sh
|
UTF-8
| 629
| 4.15625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
if [ -z "$FILE" ]; then
echo "FILE is not set. Quitting."
exit 1
fi
echo "Checking for file existence at s3://${S3_BUCKET}/${FILE} at the ${S3_ENDPOINT}"
the_command="aws ${S3_PROFILE} ${ENDPOINT_APPEND} s3api head-object --bucket ${S3_BUCKET} --key ${FILE} $*"
echo $the_command
# Verify file existence.
sh -c "$the_command"
# XXX: we are just checking the error code, but should check the result for a 404, and raise error in other cases
if [ $? == 0 ]
then
echo "File exists."
echo "::set-output name=exists::true"
else
echo "File does not exist."
echo "::set-output name=exists::false"
fi
| true
|
f41259043f53163abe37a1bd4b40785d90a6dac3
|
Shell
|
AlekzNet/Cisco-ASA-ACL-toolkit
|
/asasearch.sh
|
UTF-8
| 458
| 3.234375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/ksh
# Prints the amount of matching ACLs for the IPs
# found in the source and destination
# Takes a list of IP's as an argument
# E.g. asasearch.sh 10.0.1.64/28,10.0.1.68
IPS=$*
for acl in */*.acl
do
for dir in src dst
do
eval cnt${dir}=`ipaclmatch.py --noany --permit --$dir -a $IPS $acl | wc -l | awk '{print $1}'`
# eval cnt${dir}=`ipaclmatch.py --permit --$dir -a $IPS $acl | wc -l | awk '{print $1}'`
done
echo $acl $cntsrc $cntdst
done
| true
|
f4ee0abbd93ebd76cedb1000eab752f2dd1cf469
|
Shell
|
kchenphy/dotfiles
|
/bash/bash.d/0_commons.sh
|
UTF-8
| 746
| 3.921875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
current_script_dir() {
( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
}
die() {
local -r message=$1 exit_code="${2:-1}"
printf "%s (status code: %d)\n" "$message" "$exit_code" >&2
print_stack_trace >&2
}
check_num_args() {
local -ri num_args=$1
shift
if [[ $# -ne "$num_args" ]]; then
die "number of arguments for ${FUNCNAME[1]} is $# (expected $num_args)"
fi
}
test_check_num_args() {
check_num_args 2 "$@"
}
print_stack_trace() {
local context frame=0
while true; do
context=$(caller ${frame})
if [ -z "${context}" ]; then
break
fi
read -r -a array <<< "${context}"
printf " at %s (%s:%d)\n" "${array[1]}" "${array[2]}" "${array[0]}"
(( ++frame ))
done
}
| true
|
072a491a565e20d16d43604406d19f5c0588ebde
|
Shell
|
ThomasYeoLab/CBIG
|
/stable_projects/disorder_subtypes/Tang2020_ASDFactors/step3_analyses/bootstrapping/CBIG_ASDf_bootstrappedEst_wrapper.sh
|
UTF-8
| 1,038
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Wrapper script to run polarLDA estimate on bootstrapped samples
#
# Written by Siyi Tang and CBIG under MIT license: https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
# Take input variables
input_dir=$1 # absolute directory where the docs are saved
out_dir=$2
K=$3 # number of factors
N=$4 # number of resamples
cluster=$5 # cluster name
proj_dir=${CBIG_CODE_DIR}/stable_projects/disorder_subtypes/Tang2020_ASDFactors
code_dir=${proj_dir}/step2_polarLDA
model_name=${proj_dir}/data_release/files_polarLDA/final
inf_settings=${code_dir}/CBIG_ASDf_polarLDA_infSettings.txt
for (( i=1; i<=${N}; i++ ))
do
corpusDir_est=${input_dir}/resampled_${i}/dx1.dat
output_dir=${out_dir}/resampled_${i}
mkdir -p ${output_dir}
output_dir_step2a=${output_dir}/estimate
sh ${code_dir}/CBIG_ASDf_polarLDA_est_initFromModel.sh \
-d ${corpusDir_est} \
-t ${inf_settings} \
-k ${K} \
-i model \
-m ${model_name} \
-p ${code_dir} \
-o ${output_dir_step2a} \
-q ${cluster}
done
| true
|
6eb0fd0423a81a4c95e05995d8a0ce72e42c32eb
|
Shell
|
gheja/misc
|
/add_service.sh
|
UTF-8
| 458
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# != 3 ]; then
echo "$0 <name> <user> <command>"
exit 1
fi
name="$1"
user="$2"
command="$3"
file="/etc/systemd/system/$name.service"
if [ -e "$file" ]; then
echo "$file: exists, exiting."
exit 1
fi
cat > $file <<EOF
[Unit]
Description=$name
After=network.target
[Service]
Type=simple
User=$user
ExecStart=$command
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
systemctl enable $name
systemctl start $name
| true
|
741bfd7a2ec392aad32dd8f45ffc4187fd887cc6
|
Shell
|
bensonnjonjo/phoenix-codedeploy
|
/deploy/after-install.sh
|
UTF-8
| 1,372
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
# Find out from CodeDeploy what environment we are in by parsing the APPLICATION_NAME
ENVIRONMENT=`echo "$APPLICATION_NAME" | cut -d '-' -f 1 | tr A-Z a-z`
# Put the environment in the log
echo "Customizing environment: $ENVIRONMENT"
# Setup variables depending on what environment
case $ENVIRONMENT in
blue)
S3=monica-blue
export MIX_ENV=blue
;;
red)
S3=monica-red
export MIX_ENV=red
;;
production)
S3=monica-production
export MIX_ENV=prod
;;
*)
echo "Error: undefined environment: $ENVIRONMENT"
exit 1
;;
esac
SOURCE_DIR=/opt/phoenix-codedeploy/deploy
# Move into the app directory
cd /opt/phoenix-codedeploy
# Pull in secrets from S3 Bucket
aws --region=us-east-1 s3 cp s3://$S3/$ENVIRONMENT.secret.exs /opt/phoenix-codedeploy/config/$MIX_ENV.secret.exs
# Copy over the upstart script and set MIX_ENV correctly
sed "s/MIX_ENV_VALUE/$MIX_ENV/" /opt/phoenix-codedeploy/deploy/monica-app-upstart.conf >/etc/init/monica-app.conf
export HOME=/root
mix local.hex --force
yes | head -n 1000 | mix deps.get
yes | head -n 1000 | mix deps.compile
yes | head -n 1000 | mix compile
yes | head -n 1000 | mix ecto.migrate
yes | head -n 1000 | mix phoenix_codedeploy.insert_seeds
mix phoenix.digest -o _build/prod/lib/phoenix_codedeploy/priv/static/ web/static
| true
|
529d359f82ea414789aae7b0af8ece279e6bad53
|
Shell
|
dell/wsl
|
/viwsl
|
UTF-8
| 248
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
# source function library
. ${0%/*}/wsl-functions
case "$1" in
-h|--help|help)
$WSCOLORERR
echo -e "\n`basename $0` - Utility to create and edit ${MYCONFIG} file\n"
$WSCOLORNORM
exit 1
;;
esac
vi ~/${MYCONFIG}
| true
|
89095f6dd71342ce0bee541a2978c45ca65b9221
|
Shell
|
noelnamai/kent
|
/src/hg/makeDb/doc/hg38/reg.txt
|
UTF-8
| 20,219
| 2.5625
| 3
|
[] |
no_license
|
# for emacs: -*- mode: sh; -*-
# Regulation tracks for hg38 / GRCh38
#############################################################################
# Building UW DNAse I ENCODE2 tracks (In progress 2014-09-03 Jim Kent)
#These tracks contain the results of DNAse I hypersensitivity experiments from the
#John Stamatoyannapoulos lab at the University of Washington done for the ENCODE Project
#phase 2.
#The data was processed according to the July 2014 version of the ENCODE 3 DNAse
#processing pipeline. At a high level this means pooling aligning the reads
#with the bwa program against hg38 with the 'sponge' sequence, removing multiple
#mapping reads, and reads that aligned to the sponge or mitochondria, pooling
#the results for all replicates, and running the hotspot program. The bigWig output
#was normalized so that the average value genome-wide is 1.
#The bam files were created by the encode analysis pipeline on each replicate separately
#and the process for doing this won't be described here. It is a bit complex, and
#really will just need to be reworked into something simpler now that we've no longer
#are working directly on that contract. This build assumes that the relevant bam
#files are in the /hive/groups/encode/3/eap/cach directory.
# To do the mapping again you'd start with fastq files and use the script
# eap_run_bwa_se on an index that included the sponge as well as hg38
# chromosomes (but not alternative haplotypes). Bwa itself is run in a
# rather vanilla mode, with no options beyond -t 4 to parallelize the
# first pass of the alignment in 4 threads.
# The first section of methods here are to create a hub with peaks, hotspots, and signal
# from pooled replicates.
#The detailed instructions after the bam files are available are:
##In more detail. First
mkdir /hive/data/genomes/hg38/bed/uwDnase1
## Run program to generate most of parasol batches
ssh encode-02
cd /hive/data/genomes/hg38/bed/uwDnase1
dnaseHg38Batch batchDir
## By hand edit split batchDir into pooled and single replicate versions in directories
## run_pooled and run_replicates (sorry for the hand work)
## Do parasol runs on pooled
ssh ku
cd cd /hive/data/genomes/hg38/bed/uwDnase1/run_pooled
para make
para time
#Completed: 95 of 95 jobs
#CPU time in finished jobs: 2908517s 48475.28m 807.92h 33.66d 0.092 y
#IO & Wait Time: 0s 0.00m 0.00h 0.00d 0.000 y
#Average job time: 27838s 463.96m 7.73h 0.32d
#Longest finished job: 128043s 2134.05m 35.57h 1.48d
#Submission to last job: 128747s 2145.78m 35.76h 1.49d
#Estimated complete: 0s 0.00m 0.00h 0.00d
## Do parasol runs on replicates (these are not actually currently used)
ssh ku
cd /hive/data/genomes/hg38/bed/uwDnase1/run_replicates
para make
para time
#completed: 189 of 189 jobs
#CPU time in finished jobs: 4025020s 67083.66m 1118.06h 46.59d 0.128 y
#IO & Wait Time: 0s 0.00m 0.00h 0.00d 0.000 y
#Average job time: 20115s 335.25m 5.59h 0.23d
#Longest finished job: 110245s 1837.42m 30.62h 1.28d
#Submission to last job: 111410s 1856.83m 30.95h 1.29d
#Estimated complete: 0s 0.00m 0.00h 0.00d
#Note that one of the experiments only has replicate 2. It's because both
#iterations of replicate 1 were deprecated.
## Augment metadata
ssh hgwdev
cd /hive/data/genomes/hg38/bed/uwDnase1
dnaseHg38AddTreatments batchDir/meta.tab meta.tab
## Do correlations between all pooled experiments
ssh ku
cd /hive/data/genomes/hg38/bed/uwDnase1
mkdir run_correlations
cd run_correlations
# Create little script to make tab separated output out of bigWigCorrelate results
cat << '_EOF_' > corr2
#!/bin/tcsh -efx
echo -n "$1\t$2\t" > $3
bigWigCorrelate $1 $2 >> $3
'_EOF_'
# << happy emacs
# Create gensub2 input
cat << '_EOF_' > gsub
#LOOP
corr2 $(path1) $(path2) out/$(root1)_vs_$(root2)
#ENDLOOP
'_EOF_'
# << happy emacs
# Run gensub2 with brand new selfPair method on all pooled files
ls -1 /hive/data/genomes/hg38/bed/uwDnase1/run_pooled/*.bigWig > fileList
gensub2 fileList selfPair gsub jobList
# The parasol run using just 10 CPUs because we are i/o heavy
para create jobList
para push -maxJob=10
para time
#Completed: 4465 of 4465 jobs
#CPU time in finished jobs: 349724s 5828.74m 97.15h 4.05d 0.011 y
#IO & Wait Time: 58019s 966.98m 16.12h 0.67d 0.002 y
#Average job time: 91s 1.52m 0.03h 0.00d
#Longest finished job: 701s 11.68m 0.19h 0.01d
#Submission to last job: 47080s 784.67m 13.08h 0.54d
#Estimated complete: 0s 0.00m 0.00h 0.00d
# Concatenate results
cat out/* > ../correlation.tab
# Set up inputs for clustering run to choose colors and make tree
cd /hive/data/genomes/hg38/bed/uwDnase1
ls -1 /hive/data/genomes/hg38/bed/uwDnase1/run_pooled/*.bigWig > ../pooled.lst
grep -v '^#' meta.tab | cut -f 6 > foo
paste pooled.lst foo > pooled.labels
# Run clustering program, which takes about 20 hours
mkdir /hive/data/genomes/hg38/bed/uwDnase1/calcGraph
cd /hive/data/genomes/hg38/bed/uwDnase1/calcGraph
mkdir -p /scratch/kent/tmpDir
bigWigCluster ../pooled.lst /hive data/genomes/hg38/chrom.sizes uwDnase1.json uwDnase1.tab -precalc=../correlation.tab -threads=10 -tmpDir=/scratch/kent/tmpDir -labels=../pooled.labels
## Make normalized versions of wigs (Might be able to encorperate this into
# the pooled job maker in the future
ssh ku
cd /hive/data/genomes/hg38/bed/uwDnase1
mkdir run_normalized
ls -1 /hive/data/genomes/hg38/bed/uwDnase1/run_pooled/*.bigWig | \
sed 's/.pooled.bigwig//' > run_normalized/fileList
cd run_normalized
mkdir out
# Make normalization script
cat << '_EOF_' > norm1
#!/bin/tcsh -efx
set m = `bigWigInfo $1 | awk '/mean/ {print 1.0/$2}'`
bigWigToBedGraph $1 stdout | colTransform 4 stdin 0 $m tmp.bedGraph
bedGraphToBigWig tmp.bedGraph /hive/data/genomes/hg38/chrom.sizes tmp.bw
rm tmp.bedGraph
mv tmp.bw $2
'_EOF_'
# << happy emacs
# Create gensub2 input
cat << '_EOF_' > gsub
#LOOP
edwCdJob /hive/data/genomes/hg38/bed/uwDnase1/run_normalized/norm1 $(path1).pooled.bigWig /hive/data/genomes/hg38/bed/uwDnase1/run_normalized/out/$(root1).norm.bw
#ENDLOOP
#ENDLOOP
'_EOF_'
# << happy emacs
# Do parasol run
gensub2 fileList single gsub jobList
para make jobList -maxJob=20
para time
#Completed: 95 of 95 jobs
#CPU time in finished jobs: 20273s 337.88m 5.63h 0.23d 0.001 y
#IO & Wait Time: 0s 0.00m 0.00h 0.00d 0.000 y
#Average job time: 189s 3.15m 0.05h 0.00d
#Longest finished job: 364s 6.07m 0.10h 0.00d
#Submission to last job: 2006s 33.43m 0.56h 0.02d
#Estimated complete: 0s 0.00m 0.00h 0.00d
# Link results into pooled directory
ln -s /hive/data/genomes/hg38/bed/uwDnase1/run_normalized/out/*.bw /hive/data/genomes/hg38/bed/uwDnase1/run_pooled/
sed 's/pooled.bigWig/norm.bw/' < calcGraph/uwDnase1.tab > colors.tab
# Run program to generate trackDb file. The source is in /hg/makeDb/outside/uwDnaseTrackHub
cd /hive/data/genomes/hg38/bed/uwDnase1
uwDnaseTrackHub meta.tab run_pooled colors.tab hub
##########################################################
# Create DNase tracks from the hub files (In progress 2014-12-09 Kate)
# There are 3 tracks (Redmine #14353):
# 1) Composite with peaks, hotspots, and signal (as on hub)
# 2) Multiwig of signal, colored by similarity (as on hub)
# 3) Clusters (as on hg19)
# The hub data file dir is: /hive/data/genomes/hg38/bed/uwDnase1/run_pooled
# (and normalized signal files are linked into that dir from :
# /hive/data/genomes/hg38/bed/uwDnase1/run_normalized/out
# The hub trackDb file is: /hive/data/genomes/hg38/bed/uwDnase1/hub/hg38/trackDb.txt
#################################
# DNAse peaks, hotspots, and signal track, and the multiwig
# Add scores to bigBeds (use signalValue)
cd /hive/data/genomes/hg38/bed/uwDnase1/run_pooled
mkdir -p scored/in scored/out
foreach f (*.narrowPeak *.broadPeak)
bigBedToBed $f scored/in/$f
end
cd scored/in
bedScore -uniform -method=reg -col=7 *.narrowPeak ../out >&! score.out
bedScore -uniform -method=reg -col=7 *.broadPeak ../out >>&! score.out &
cd ../out
foreach f (*.broadPeak)
echo $f
bedToBigBed -type=bed6+3 -as=$HOME/kent/src/hg/lib/encode/broadPeak.as \
$f /hive/data/genomes/hg38/chrom.sizes ../$f.bb
end
# Link data files into gbdb and create bbi tables
mkdir /hive/data/gbdb/hg38/bbi/uwDnase
cd /hive/data/genomes/hg38/bed/uwDnase1/run_pooled/scored
set bbi = /gbdb/hg38/bbi/uwDnase/scored
mkdir $bbi
foreach f (*.broadPeak.bb)
echo $f
ln -s `pwd`/$f $bbi
set exp = `echo $f | sed 's/wgEncodeEH\([0-9]*\).*/WgEncodeEH\1/'`
hgBbiDbLink hg38 uwEnc2DnaseHot${exp} $bbi/$f
end
foreach f (*.narrowPeak.bb)
echo $f
ln -s `pwd`/$f $bbi
set exp = `echo $f | sed 's/wgEncodeEH\([0-9]*\).*/WgEncodeEH\1/'`
hgBbiDbLink hg38 uwEnc2DnasePeaks${exp} $bbi/$f
end
cd /hive/data/genomes/hg38/bed/uwDnase1/run_normalized/out
foreach f (*.bw)
ln -s `pwd`/$f $bbi/$f
set exp = `echo $f | sed 's/wgEncodeEH\([0-9]*\).*/WgEncodeEH\1/'`
hgBbiDbLink hg38 uwEnc2DnaseSignal${exp} $bbi/$f
hgBbiDbLink hg38 uwEnc2DnaseWig${exp} $bbi/$f
end
# Load peaks into database (needed by hgc. we may be able to drop these with code changes)
cd /hive/data/genomes/hg38/bed/uwDnase1/run_pooled/scored/out
foreach f (*.narrowPeak)
echo $f
set exp = `echo $f | sed 's/wgEncodeEH\([0-9]*\).*/WgEncodeEH\1/'`
#hgLoadBed -fillInScore=signalValue -trimSqlTable -sqlTable=$HOME/kent/src/hg/lib/encode/narrowPeak.sql -renameSqlTable -as=$HOME/kent/src/hg/lib/encode/narrowPeak.as hg38 uwEnc2DnaseBedPeaks${exp} $f
hgLoadBed -trimSqlTable -sqlTable=$HOME/kent/src/hg/lib/encode/narrowPeak.sql -renameSqlTable -as=$HOME/kent/src/hg/lib/encode/narrowPeak.as hg38 uwEnc2DnaseBedPeaks${exp} $f
end
# Use cell curation to make more informative long labels
cd /hive/data/genomes/hg38/bed/uwDnase1
hgsql hgFixed -Ne 'select * from wgEncodeCell' > cells/cellInfo.tab
uwDnaseTrackHub -cellFile=cells/cellInfo.tab meta.tab run_pooled colors.tab kateHub4
# Convert trackDb from hub to native
cd /hive/data/genomes/hg38/bed/uwDnase1/
mkdir tracks
cd tracks
cp ../kateHub4/hg38/trackDb.txt .
sed -e 's/type bigBed/type bigBed 6 +/' -e '/bigDataUrl/d' trackDb.txt > trackDb.ra
cp trackDb.ra ~/kent/src/hg/makeDb/trackDb/human/hg38/uwDnase.ra
#################################
# DNase clusters track
cd /hive/data/genomes/hg38/bed/uwDnase1
mkdir clusters
# cd run_pooled/scored/out
cd run_pooled_hotspot5.1/scored/out
ls *.pooled.narrowPeak > ../../../clusters/peak.lst
# calculate normalization factor
regClusterMakeTableOfTables -verbose=3 eapDnase01Hg38 \
../../../clusters/peak.lst ../../../clusters/peak.table >&! ../../../clusters/regTable.out &
# cluster
regCluster -bedSources ../../../clusters/peak.table /dev/null ../../../clusters/peak.bed \
>&! ../../../clusters/regCluster.out &
2011652 singly-linked clusters, 2076756 clusters in 96 chromosomes
# NOTE: more clusters (2.2M) in hg19 (which included Duke data)
# filter out low scoring
cd ../../../clusters
awk '$5 >= 100' peak.bed > peak.filtered.bed
wc -l peak.filtered.bed
# 1330766 peak.filtered.bed
# retained 66% vs 83% in hg19 (seems low ??)
# --> keep them all for now, filter with UI
# format to BED5+floatscore+sources for hgBedSources
# which will extract, uniquify, and assign ID's to sources
awk 'BEGIN {OFS="\t"}{print $1, $2, $3, $4, $5, 0, $7;}' peak.bed > peak.bed6
hgBedSources peak.bed6 regDnase
mv regDnaseSources.tab uwEnc2DnaseSources.tab
# hand edit to fix curation -- add RA treatments where needed
# NHBE_RA -> NHBE_RA+RA
# SK-N-SH_RA -> SK-N-SH_RA+RA
# load sources table
autoSql $HOME/kent/src/hg/lib/idName.as idName
hgLoadSqlTab hg38 uwEnc2DnaseSources idName.sql uwEnc2DnaseSources.tab
# merge files and format to BED5+sourceCount+sourceIds+sourceVals
awk '{print $8}' peak.bed > peak.vals
awk 'BEGIN {OFS="\t"}{print $1, $2, $3, $4, $5, $7, $8;}' regDnase.bed | \
paste - peak.vals > uwEnc2DnaseClustered.bed
hgLoadBed hg38 uwEnc2DnaseClustered -sqlTable=$HOME/kent/src/hg/lib/bed5SourceVals.sql \
-renameSqlTable -as=$HOME/kent/src/hg/lib/bed5SourceVals.as uwEnc2DnaseClustered.bed
# create inputs file to display metadata on details page
# NOTE: this can probably be jettisoned in favor of new code, since source info
# is now in the BED file
cat > makeInputs.csh << 'EOF'
set tables = `hgsql hg38 -Ne "show tables like 'uwEnc2DnasePeaks%'"`
foreach t ($tables)
set exp = `echo $t | sed 's/uwEnc2DnasePeaksWgEncode/wgEncode/'`
set t = `echo $t | sed 's/Peaks/BedPeaks/'`
set cell = `encodeExp show $exp cellType`
set treatment = `encodeExp show $exp treatment`
echo "$t\t$cell\t$treatment"
end
'EOF'
csh makeInputs.csh > inputs.tab
hgLoadSqlTab hg38 uwEnc2DnaseInputs ~/kent/src/hg/lib/clusterInputEapDnase.sql inputs.tab
# try bigBed version
sed 's/BedPeaks/Peaks/' inputs.tab > bigInputs.tab
hgsql hg38 -e 'alter table uwEnc2DnaseInputs rename to uwEnc2DnaseInputs_old'
hgLoadSqlTab hg38 uwEnc2DnaseInputs ~/kent/src/hg/lib/clusterInputEapDnase.sql bigInputs.tab
#Hmm, hgc peakClusters doesn't appear to work with bigBed peak files...
# Revert trackDb to BED peak files
#################
# Rename everything -- replace wgEncodeEH* with cell and treatment, to help users. Change prefix
# to match hg19
cd /hive/data/genomes/hg38/bed/uwDnase1
mkdir rename
cat > expToFile.csh << 'EOF'
#!/bin/csh -ef
set exp = `echo $1 | sed 's/wgEncodeEH\([0-9]*\).*/\1/'`
set cell = `encodeExp show $exp cellType | sed -e 's/[-+_]//g' -e 's/\(.\)\(.*\)/\U\1\L\2/'`
set treatment = `encodeExp show $exp treatment | sed -e 's/[-+_]//g' -e 's/\(.\)\(.*\)/\U\1\L\2/'`
echo ${cell}${treatment}
'EOF'
cat > renameWig.csh << 'EOF'
#!/bin/csh -ef
set bbi = /gbdb/hg38/bbi/wgEncodeRegDnase
mkdir -p $bbi
set path = (`pwd` $path)
set build = /hive/data/genomes/hg38/bed/wgEncodeRegDnase
set data = run_normalized/out
set wd = `pwd`
pushd $build/$data
rm $wd/edit.csh
foreach f (*.norm.bw)
echo "${f} "
set exp = $f:r:r
set vars = `expToFile.csh $exp`
set t = wgEncodeRegDnaseUw${vars}Signal
echo "-e s/uwEnc2DnaseSignal${exp}/$t/ \" >> $wd/edit.csh
ln -s $build/$data/$f $bbi/$t.bw
hgBbiDbLink hg38 $t $bbi/$t.bw
end
popd
'EOF'
# Manually tweak edit.csh and run to replace table and track names in trackDb.ra
# Do same for broadPeak bigBeds, and narrowPeak tables
csh edit5.csh ../clusters/bigInputs.tab > ../clusters/wgEncodeRegDnaseClusteredInputs.tab
cd ../clusters
hgLoadSqlTab hg38 wgEncodeRegDnaseClusteredInputs \
~/kent/src/hg/lib/clusterInputEapDnase.sql wgEncodeRegDnaseClusteredInputs.tab
hgql hg38 -e "alter table uwEnc2DnaseClustered rename to wgEncodeRegDnaseClustered"
hgsql hg38 -e "alter table uwEnc2DnaseSources rename to wgEncodeRegDnasedClusteredSource"
#################
# Cell table (use for cell metadata, instead of metaDb)
cd /hive/data/genomes/hg38/bed/uwDnase1
mkdir cells
cd cells
# collect cell info from ENCODE2 and ENCODE3
~/kent/src/hg/encode3/cellsFromEncode3.py > cells.tsv
# load to google spreadsheet and clean
#https://docs.google.com/a/soe.ucsc.edu/spreadsheets/d/10EWdr-JTtDvfLKKLPvP3T2ft6MZ5KVdKbBzY76SRaug/edit#gid=1783710206
# extract useful columns to file and load
tail -n +2 wgEncodeCell.tab | \
nl -v 0 | \
hgLoadSqlTab hgFixed wgEncodeCell ~/kent/src/hg/lib/encode/wgEncodeCell.sql stdin
# add order URL's
#https://docs.google.com/spreadsheets/d/14HvZfqJdClt6mfcwf2w0xRPvdhk5o7bgV77LMc1qTcU/edit?usp=sharing
tail -n +2 wgEncodeCellUrl.tsv | \
nl -v 0 | \
hgLoadSqlTab hgFixed wgEncodeCell ~/kent/src/hg/lib/encode/wgEncodeCell.sql stdin
# to verify links
checkUrlsInTable hgFixed wgEncodeCell > errs.txt
# Update table when cron of above reports errors
cat > badUrls.txt << 'EOF'
http://www.sciencellonline.com/site/productInformation.php?keyword=1830 302
http://www.sciencellonline.com/site/productInformation.php?keyword=1820 302
http://www.sciencellonline.com/site/productInformation.php?keyword=7110 302
http://www.sciencellonline.com/site/productInformation.php?keyword=1810 302
http://www.sciencellonline.com/site/productInformation.php?keyword=1000 302
http://www.sciencellonline.com/site/productInformation.php?keyword=1100 302
http://www.sciencellonline.com/site/productInformation.php?keyword=6300 302
http://www.sciencellonline.com/site/productInformation.php?keyword=6320 302
http://www.sciencellonline.com/site/productInformation.php?keyword=200 302
http://www.sciencellonline.com/site/productInformation.php?keyword=1310 302
http://www.sciencellonline.com/site/productInformation.php?keyword=6570 302
http://www.sciencellonline.com/site/productInformation.php?keyword=2720 302
http://www.sciencellonline.com/site/productInformation.php?keyword=2620 302
http://www.sciencellonline.com/site/productInformation.php?keyword=6560 302
http://www.sciencellonline.com/site/productInformation.php?keyword=7630 302
http://www.sciencellonline.com/site/productInformation.php?keyword=6580 302
http://www.sciencellonline.com/site/productInformation.php?keyword=3120 302
http://www.sciencellonline.com/site/productInformation.php?keyword=3300 302
http://www.sciencellonline.com/site/productInformation.php?keyword=4000 302
http://www.sciencellonline.com/site/productInformation.php?keyword=6540 302
http://www.sciencellonline.com/site/productInformation.php?keyword=7130 302
'EOF'
# NOTE: site query string has changed to:
# http://sciencellonline.com/catalogsearch/result/?q=1820
hgsql hgFixed -e 'select * from wgEncodeCell' > wgEncodeCell.2015-05-06.tab
sed -e 's^www.sciencellonline.com/site/productInformation.php^sciencellonline.com/catalogsearch/result/^' -e 's^keyword=^q=^' wgEncodeCell.2015-05-06.tab > wgEncodeCell.2015-05-07.tab
ln -s wgEncodeCell.2015-05-07.tab wgEncodeCell.latest.tab
# save old table for now
hgsql hgFixed -e "alter table wgEncodeCell rename to wgEncodeCell_old"
hgLoadSqlTab hgFixed wgEncodeCell ~/kent/src/hg/lib/encode/wgEncodeCell.sql wgEncodeCell.latest.tab
# check, then remove old table
hgsql hgFixed -e "drop table wgEncodeCell_old"
checkUrlsInTable hgFixed wgEncodeCell > errs.txt
# Treatment table
# TBD
# create term/description tab sep file (currently just treatments in UW DNase)
#cd /hive/data/genomes/hg38/bed/uwDnase1
#cd cells
#tail -n +2 treatments.tab | \
#nl -v 0 | \
#hgLoadSqlTab hgFixed wgEncodeTreatment ~/kent/src/hg/lib/encode/wgEncodeTreatment.sql stdin
# Comparison to lifted track (Chris Eisenhart)
# Visually, new track is appears noisy. This is not unexpected as it excludes Duke data.
# There are more elements in new track: 2076756 vs 1867194
# Coverage is similar:
featureBits hg38 wgEncodeRegDnaseClustered
# 451551920 bases of 3049335806 (14.808%) in intersection
featureBits hg38 wgEncodeRegDnaseClusteredLifted
# 477271764 bases of 3049335806 (15.652%) in intersection
# Comparing chr1:
featureBits hg38 -chrom=chr1 wgEncodeRegDnaseClustered
#41505780 bases of 230486321 (18.008%) in intersection
featureBits hg38 -chrom=chr1 wgEncodeRegDnaseClusteredLifted
#46543116 bases of 230486321 (20.193%) in intersection
# Greater number of elements must be due to mappings on new alt chroms
# (96 chroms in new track, 38 in old)
| true
|
3de2268473124c69be9c1f146d00ba4da030924a
|
Shell
|
saga-project/saga-cpp-legacy-adaptors
|
/aws/trunk/internal/bin/saga_context_init
|
UTF-8
| 756
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/sh
# allow connections to port 22 in the 'default' instance group
auth_return=`$EC2_HOME/bin/ec2-authorize default -p 22 2>&1`
echo $auth_return | grep -v 'has already been authorized' > /dev/null
if test "x$?" != "x1"; then
if test "x$auth_return" != "x"; then
echo "could not enable ssh"
return 1
fi
fi
# key location
EC2_GSG_KEY=$HOME/.ec2/saga.gsg_private.pm
export EC2_GSG_KEY
# create an access keypair. No support for concurrent sessions at the moment.
if ! test -f $EC2_GSG_KEY; then
mkdir -p $HOME/.ec2
$EC2_HOME/bin/ec2-delete-keypair saga-gsg-keypair | grep -ve '^KEYPAIR.*saga-gsg-keypair$'
$EC2_HOME/bin/ec2-add-keypair saga-gsg-keypair > $EC2_GSG_KEY
chmod 0600 $EC2_GSG_KEY
fi
echo $EC2_GSG_KEY
return 0
| true
|
09ef634a4a3d37093a4961c015296675bb105a35
|
Shell
|
kosugi/plurk4ji
|
/plurk
|
UTF-8
| 315
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ -n "$PYENV_ROOT" ]; then
PYTHON="$PYENV_ROOT/shims/python"
else
PYTHON=python
fi
cd $(dirname "$0")
CMD="cmd_$1.py"
if [ ! -f "$CMD" ]; then
echo usage: $0 command '[args...]'
echo commands: $(ls cmd_*.py | sed -e 's/^cmd_\(.*\)\.py$/\1/')
exit 1
fi
shift
$PYTHON $CMD "$@"
| true
|
7ba628c613cf7bc4ff69fc471e31b5b88a01b0b2
|
Shell
|
ariofrio/cs160-simple
|
/tests/run.sh
|
UTF-8
| 3,114
| 4.15625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
SIMPLE="./simple $@"
indent() {
c='s/^/ /'
case $(uname) in
Darwin) sed -l "$c";;
*) sed -u "$c";;
esac
}
report_incorrect() {
incorrect=$((incorrect + 1))
tput setaf 1; tput bold
echo -n '✘ '
tput sgr0; tput setaf 1
echo $test
tput sgr0
[ "$error" ] && echo "$error" | indent
return 0
}
report_skipped() {
skipped=$((skipped + 1))
tput setaf 3; tput bold
echo -n '- '
tput sgr0; tput setaf 3
echo $test
tput sgr0
[ "$error" ] && echo "$error" | indent
return 0
}
report_correct() {
correct=$((correct + 1))
echo -n '✓ '
tput setaf 2;
echo $test
tput sgr0
[ "$error" ] && echo "$error" | indent
return 0
}
section() {
echo
tput setaf 8
echo "$@"
tput sgr0
}
incorrect=0
correct=0
skipped=0
section "Parser rejects invalid programs"
for test in $(find tests/bad -name '*.simple' | sort); do
error=$(bash -c "$SIMPLE < $test" 2>&1 1>/dev/null)
test="$SIMPLE < $test"
[ "$error" ] && report_correct || report_incorrect
done > >(indent)
section "Parser accepts valid programs"
for test in $(find tests/good -name '*.simple' | sort); do
error=$(bash -c "$SIMPLE < $test" 2>&1 1>/dev/null)
test="$SIMPLE < $test"
[ "$error" ] && report_incorrect || report_correct
done > >(indent)
function simplec() {
bash -c "./simple < $test > $sfile" &&
gcc -m32 -c -o $ofile $sfile &&
gcc -m32 -c -o tests/good/start.o tests/good/start.c &&
gcc -m32 -o $runfile tests/good/start.o $ofile
}
section "Valid programs compile"
for test in $(find tests/good -name '*.simple' | sort); do
runfile=${test%.simple}.run
ofile=${test%.simple}.o
sfile=${test%.simple}.s
outfile=${test%.simple}.out
error=$(simplec 2>&1 > /dev/null)
test="$SIMPLE < $test > $sfile"
if [ "$error" ]; then
report_incorrect
else
report_correct
fi
done > >(indent)
section "Valid programs run correctly"
for test in $(find tests/good -name '*.simple' | sort); do
runfile=${test%.simple}.run
ofile=${test%.simple}.o
sfile=${test%.simple}.s
outfile=${test%.simple}.out
if [ -f $outfile ]; then
error=$(simplec 2>&1 > /dev/null) #$tempfile)
test="$SIMPLE < $test > $sfile"
if [ "$error" ]; then
report_skipped
else
tempfile=$(mktemp)
error=$(bash -c "$runfile > $tempfile" 2>&1)
if [ "$error" ]; then
report_incorrect
else
diff $tempfile $outfile &> /dev/null && report_correct || report_incorrect
colordiff $tempfile $outfile | indent
fi
rm $tempfile
fi
fi
done > >(indent)
echo
if [ $incorrect -gt 0 ] || [ $skipped -gt 0 ]; then
tput setaf 1; tput bold
echo -n '✘ '
tput sgr0; tput setaf 1
echo -n 'FAIL'
tput sgr0
else
echo -n '✓ '
tput setaf 2
echo -n 'OK'
tput sgr0
fi
echo -n ' » '
if [ $correct -gt 0 ]; then
tput bold
echo -n $correct
tput sgr0
echo -n ' correct '
fi
if [ $incorrect -gt 0 ]; then
tput bold
echo -n $incorrect
tput sgr0
echo -n ' incorrect '
fi
if [ $skipped -gt 0 ]; then
tput bold
echo -n $skipped
tput sgr0
echo -n ' skipped '
fi
echo
| true
|
4833449807caa69b6c93bff32bb68528da837241
|
Shell
|
caichang01/louplus-linux
|
/11/challenges/02/ddos.sh
|
UTF-8
| 746
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
# 单个 IP 最多连接数限制
LIMIT=21
# 找出连接数超过限制的 IP,保存到文件中
netstat -na | awk '/ESTABLISHED/{split($5, T, ":"); print T[1]}' | sort | grep -v -E '192.168|127.0' | uniq -c | sort -rn | awk -v limit=$LIMIT '{if($2 != null && $1 > limit) {print $2}}' >/var/log/rejectip
for i in $(cat /var/log/rejectip)
do
# 跳过白名单里的 IP
if grep $i /home/shiyanlou/goodip
then
continue
fi
# 判断是否已加过限制
rep=$(iptables-save | grep $i)
if [[ -z $rep ]]
then
# 限制 IP 每分钟新建连接数不超过 5 个
/sbin/iptables -A INPUT -s $i -m limit --limit 5/m -j ACCEPT
echo "$i limit at `date`" >>/var/log/ddos-ip
fi
done
| true
|
c12f89d4de89c09d195fcd19c9fd3458bae01bbb
|
Shell
|
ValHayot/sparkpilot
|
/example/hpc_default.sh
|
UTF-8
| 1,139
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --account=def-glatard
#SBATCH --time=00:05:00
#SBATCH --nodes=5
#SBATCH --mem=20G
#SBATCH --cpus-per-task=1
#SBATCH --ntasks-per-node=1
logdir=/scratch/vhayots/sparkpilot/logs
mstr_bench=$logdir/hpc_def_${SLURM_JOB_ID}_benchmarks.out
spscript="/scratch/vhayots/sparkpilot/example/dummyprogram.py /scratch/vhayots/sparkpilot/dummy-1.out -p 12 -c /scratch/vhayots/sparkpilot/checkpoints"
echo start $(date +%s.%N) > $mstr_bench
module load spark/2.3.0
#module load python
export SPARK_IDENT_STRING=$SLURM_JOBID
export SPARK_WORKER_DIR=$SLURM_TMPDIR
start-master.sh
while [ -z "$MASTER_URL" ]
do
MASTER_URL=$(curl -s http://localhost:8080/json/ | jq -r ".url")
echo "master not found"
sleep 5
done
NWORKERS=$((SLURM_NTASKS - 1))
SPARK_NO_DAEMONIZE=1 srun -n ${NWORKERS} -N ${NWORKERS} --label --output=$SPARK_LOG_DIR/spark-%j-workers.out start-slave.sh -m ${SLURM_MEM_PER_NODE}M -c ${SLURM_CPUS_PER_TASK} ${MASTER_URL} &
slaves_pid=$!
srun -n 1 -N 1 spark-submit --master=${MASTER_URL} --executor-memory=${SLURM_MEM_PER_NODE}M $spscript
kill $slaves_pid
stop-master.sh
echo end $(date +%s.%N) >> $mstr_bench
| true
|
cf19b323db16bef7698b7fe89b523955308487c6
|
Shell
|
gvsurenderreddy/magos-linux
|
/make_MagOS/2_install
|
UTF-8
| 4,951
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
# License: GPL last version . Лицензия : GPL последней версии
# Written: Mikhail Zaripov . Написано: Михаил Зарипов
# Last modified: ___ . Исправлено _____
RPMIOPT=" --ignoresize --nodigest --nosignature --nosuggest "
#RPMIOPT=" --ignoresize --nosignature --nosuggest "
if [ "`id -u`" != "0" ] ;then
echo Для установки пакетов в каталог rootfs нужны права root, команда rpm будет запущена \
с ключом --root rootfs, установочные скрипты от пакетов будут запущены с помощью chroot. \
это не повредит вашей операционной системе, из под которой запускается этот скрипт.
exit 1
fi
if [ -f .config ] ;then
. .config
else
echo "Не вижу файла .config" ; exit 1
fi
cd "$MYPATH"
function umountbranches()
{
while grep -q "$1" /proc/mounts ;do
grep "$1" /proc/mounts | awk '{print $2}' | while read a ;do
echo umount "$a" ; umount "$a" 2>/dev/null
done
done
}
function install_rpms()
{
for a in `cat $1` ;do
echo $a
rm -f rootfs/dev/null 2>/dev/null
echo -ne > rootfs/dev/null
LC_ALL=C rpm $RPMIOPT $2 --nodeps --root "$MYPATH/work/$FLASHNAME-$VERREL/rootfs" -i "$MYPATH/loaded/$FLASHNAME-$VERREL/rpms/$a" 2>error.tmp
sed -i /"arning"/d error.tmp
[ "`grep -v ^[[:space:]]*$ error.tmp | nl | head -1`" = "" ] && continue
echo $a >>install_errors.log
cat error.tmp >>install_errors.log
done
}
mkdir -p work/$FLASHNAME-$VERREL || exit 1
cd work/$FLASHNAME-$VERREL || exit 1
echo "Создаём список пакетов для установки"
mkcd -d --buildhdlist ../../loaded/$FLASHNAME-$VERREL/rpms >/dev/null 2>&1 || exit 1
echo "Обработка списка пакетов"
rm -f depslist.ordered.* *hdlist1.cz compss provides 2>/dev/null
awk '{print $1}' depslist.ordered | awk -F: '{print $1}' | grep -v ^dkms-[a-ln-z] | while read a ;do
grep -m1 "^$a" ../../loaded/rpmscripts_$VERREL/scripts >>depslist.ordered.2 && continue
grep -m1 "^$a" ../../loaded/rpmscripts_$VERREL/noscripts >>depslist.ordered.1 && continue
echo Not found $a ;exit 1
done
echo "Подготовка к установке пакетов"
umountbranches "$(pwd)/rootfs"
rm -fr rootfs install_errors.log 2>/dev/null
mkdir rootfs || exit 1
echo "Установка базовых пакетов"
cd rootfs
for a in `cat $MYPATH/files/rpm_names/$FLASHNAME-$VERREL/base` ;do
cat ../depslist.ordered.1 ../depslist.ordered.2 | grep $a | while read b ;do
echo $b
rpm2cpio $MYPATH/loaded/$FLASHNAME-$VERREL/rpms/$b | cpio -i -d || exit 1
done
done
cd ..
mkdir -p rootfs/dev rootfs/proc rootfs/sys rootfs/var/lib/rpm || exit 1
mount --bind /proc rootfs/proc || exit 1
mount --bind /sys rootfs/sys || exit 1
mount -t tmpfs tmpfs rootfs/var/lib/rpm || exit 1
echo "Импортирование ключей"
for a in ../../loaded/$FLASHNAME-$VERREL/pubkeys/*.pubkey ;do
# rpm --root "`pwd`/rootfs" --import $a 2>/dev/null || exit 1
rpm --root "`pwd`/rootfs" --import $a || exit 1
done
echo "Установка пакетов"
install_rpms depslist.ordered.1 --noscripts
install_rpms depslist.ordered.2
rm -f error.tmp
cp -pr rootfs/var/lib/rpm rootfs/var/lib/rpm_base || exit 1
umount rootfs/sys/fs/fuse/connections 2>/dev/null
umount rootfs/proc rootfs/sys rootfs/var/lib/rpm || exit 1
rm -fr rootfs/var/lib/rpm || exit 1
mv rootfs/var/lib/rpm_base rootfs/var/lib/rpm || exit 1
mkdir -p rootfs/var/lib/rpm/pubkeys
cp -p ../../loaded/$FLASHNAME-$VERREL/pubkeys/*.pubkey rootfs/var/lib/rpm/pubkeys || exit 1
echo "Создание ссылок на исходники ядра"
KERNDEVEL=$(find rootfs/usr/src | grep -m1 /include/linux/aufs_type.h | sed s=/include/linux/aufs_type.h== | sed s=rootfs==)
KERNMODULES=$(find rootfs/lib/modules | grep -m1 modules.dep | sed s=/modules.de.*==)
KERNLINUZ=$(ls -1 rootfs/boot/vmlinuz-* | sed 's|.*/||' | tail -1)
[ -z "$KERNLINUZ" ] || ln -sf $KERNLINUZ rootfs/boot/vmlinuz
[ -z "$KERNLINUZ" ] || ln -sf $KERNLINUZ rootfs/boot/vmlinuz-desktop
[ -z "$KERNDEVEL" ] || [ -h $KERNMODULES/build ] || ln -s $KERNDEVEL $KERNMODULES/build
[ -z "$KERNDEVEL" ] || [ -h $KERNMODULES/source ] || ln -s $KERNDEVEL $KERNMODULES/source
if egrep -q 'cpio|rpmDigestAlgo|conflict|output error' install_errors.log ;then
echo "Обнаружены серьёзные ошибки при установке пакетов, см. файл work/$FLASHNAME-$VERREL/install_errors.log"
else
echo "Проверьте файл work/$FLASHNAME-$VERREL/install_errors.log, как правило там некритичные ошибки"
fi
echo "Работа скрипта завершена, в work/$FLASHNAME-$VERREL/rootfs установлен наш дистрибутив"
| true
|
1aa7bfa767a620cbbaca5e36a19790e29f8512f7
|
Shell
|
michael-behrendt/docker
|
/ambari.sh
|
UTF-8
| 3,864
| 2.796875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# script was inspired by
# https://community.hortonworks.com/articles/47170/automate-hdp-installation-using-ambari-blueprints.html
if [ -f /etc/yum.repos.d/ambari.repo ]; then
echo "$0 skipped, because files already exists"
exit 1
fi
if [ "$(id -u)" != "0" ]; then
echo "$0 has to be run as root, probably sudo missing?"
exit 1
fi
# hack to avoid forwarding local requests to t-sys proxy
cat - >> /etc/profile.d/proxy.sh << EOF
export NO_PROXY="localhost,127.0.0.1,faslabdev"
export no_proxy="localhost,127.0.0.1,faslabdev"
EOF
. /etc/profile.d/proxy.sh
(cd /etc/yum.repos.d/; curl -O --silent http://public-repo-1.hortonworks.com/ambari/centos7/2.x/updates/2.5.2.0/ambari.repo)
yum install ambari-agent -y
ambari-agent start
yum install ambari-server -y
ambari-server setup --silent --verbose
echo 'export AMBARI_JVM_ARGS="$AMBARI_JVM_ARGS -Dhttp.proxyHost=10.175.249.97:8080"' >> /var/lib/ambari-server/ambari-env.sh
ambari-server start
# update ambari repository and link 2.6 to 2.6.2.0 Version
# use curl -u admin:admin http://localhost:8080/api/v1/stacks/HDP/versions/2.6/operating_systems/redhat7/repositories/HDP-2.6
# to read previous settings
curl -H "X-Requested-By: ambari" -X PUT -u admin:admin http://localhost:8080/api/v1/stacks/HDP/versions/2.6/operating_systems/redhat7/repositories/HDP-2.6 -d @- << EOF
{
"Repositories" : {
"base_url" : "http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.6.2.0",
"verify_base_url" : false
}
}
EOF
# create blueprint using 2.6
# for list of possible components names go to
# https://github.com/apache/ambari/tree/branch-2.5/ambari-server/src/main/resources/common-services
# and have a look into metainfo.xml of each subfolder
curl -H "X-Requested-By: ambari" -X POST -u admin:admin http://localhost:8080/api/v1/blueprints/bluetest1 -d @- << EOF
{
"configurations" : [ ],
"host_groups" : [
{
"name" : "host_group_1",
"components" : [
{
"name" : "NAMENODE"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "DATANODE"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "ZOOKEEPER_CLIENT"
},
{ "name" : "MYSQL_SERVER" },
{ "name" : "WEBHCAT_SERVER" },
{ "name" : "HIVE_CLIENT" },
{ "name" : "HIVE_SERVER" },
{ "name" : "HIVE_METASTORE" },
{ "name" : "SPARK2_CLIENT" },
{ "name" : "SPARK2_THRIFTSERVER" },
{ "name" : "SPARK2_JOBHISTORYSERVER" }
],
"cardinality" : "1"
}
],
"Blueprints" : {
"blueprint_name" : "single-node-hdp-cluster",
"stack_name" : "HDP",
"stack_version" : "2.6"
}
}
EOF
# commit blueprint installation
curl -H "X-Requested-By: ambari" -X POST -u admin:admin http://localhost:8080/api/v1/clusters/cluster1 -d @- << EOF
{
"blueprint" : "single-node-hdp-cluster",
"default_password" : "admin",
"host_groups" :[
{
"name" : "host_group_1",
"hosts" : [
{
"fqdn" : "faslabdev"
}
]
}
]
}
EOF
echo "http://localhost:8080 User: admin Pass: admin"
| true
|
c9d009997bec540f39a5a11b72483a6d132f8573
|
Shell
|
falenn/k8sPlayground
|
/examples/storage/weka/genSecret.sh
|
UTF-8
| 664
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/env bash
#
#
USERNAME="apiaccess"
PASSWORD="mjT8_hw2wV3s:Xm"
# default org if only one org
ORGANIZATION="Root"
ENDPOINTS="192.168.2.111:14000,192.168.2.112:14000,192.168.2.113:14000"
SCHEME="http"
USERB64=`echo -n "${USERNAME}" | base64`
PWDB64=`echo -n "${PASSWORD}" | base64`
ORGB64=`echo -n "${ORGANIZATION}" | base64`
ENDB64=`echo -n "${ENDPOINTS}" | base64`
SCHEMEB64=`echo -n "${SCHEME}" | base64`
cat << EOF > secret.yml
apiVersion: v1
kind: Secret
metadata:
name: csi-wekafs-creds
namespace: csi-wekafs
type: Opaque
data:
username: ${USERB64}
password: ${PWDB64}
organization: ${ORGB64}
endpoints: ${ENDB64}
scheme: ${SCHEMEB64}
EOF
| true
|
0e8acecd9d5964e12993317fab35b427e9e5f77f
|
Shell
|
bobbae/kubeinfra
|
/vagrant-k8s-consul/shared/install-k8s-master.sh
|
UTF-8
| 1,171
| 3.1875
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh
# must run as root
# on the master
if [ $# -lt 3 ]; then
echo "Insufficient arguments"
echo "Need interface-name master-ip my-ip"
exit 1
fi
INTF=$1
MASTERIP=$2
MYIP=$3
echo "Interface $INTF"
echo "Master IP $MASTERIP"
echo "My IP Address: $MYIP"
nohup consul agent -server -bootstrap-expect=1 -data-dir=/tmp/consul -node=`hostname` -bind=$MYIP -syslog -config-dir=/etc/consul/conf.d &
kubeadm init --apiserver-advertise-address=$MYIP --pod-network-cidr=10.244.0.0/16
#export KUBECONFIG=/etc/kubernetes/admin.conf
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl apply -f https://raw.githubusercontent.com/projectcalico/canal/master/k8s-install/1.7/rbac.yaml
kubectl apply -f https://raw.githubusercontent.com/projectcalico/canal/master/k8s-install/1.7/canal.yaml
kubectl get pods --all-namespaces
kubectl get nodes | grep NotReady
while [ $? -eq 0 ] ; do
echo Waiting for master to be Ready
sleep 7
kubectl get nodes | grep NotReady
done
kubectl get nodes
kubeadm token create --print-join-command > /tmp/k8s-join-cmd
consul kv put join-cmd "`cat /tmp/k8s-join-cmd`"
| true
|
be811398d8e1b0db6405e2f075111ae973cf4ff5
|
Shell
|
nlarosa/Scripting
|
/Bash/rsh.sh
|
UTF-8
| 2,387
| 4.25
| 4
|
[] |
no_license
|
#!/bin/bash
introduction() # Displays the command options
{
clear
echo ""
echo "Command Action: Command number:"
echo "Disk Free 1"
echo "Disk Usage 2"
echo "List Files 3"
echo "Process Status 4"
echo "Examine Volume 5"
echo "Shell Escape 6"
echo "Current Date 7"
echo "Exit 8"
}
prompt() # Prints text relating to new command choice
{
echo ""
echo -n "Choose your command: "
}
Disk_Free() # requires one prompt - filespec
{
echo ""
echo -n "Please provide the filespec of interest: "
read fileSpec
echo ""
echo "Command: df -k $fileSpec"
echo "Command output: "
echo ""
df -k $fileSpec
}
Disk_Usage() # requires one prompt - filespec
{
echo ""
echo -n "Please provide the filespec of interest: "
read fileSpec
echo ""
echo "Command: du -s -k $fileSpec"
echo "Command output: "
echo ""
du -s -k $fileSpec
}
List_Files() # no extra prompt necessary
{
echo ""
echo "Command: ls -F ."
echo "Command output: "
echo ""
ls -F .
}
Process_Status() # no extra prompt necessary
{
echo ""
echo "Command: top"
echo "Command output: "
echo ""
top
}
Examine_Volume() # requires one prompt - afsid
{
echo ""
echo -n "Please provide the AFS ID of interest: "
read afsID
echo ""
echo "Command: vos examine $USER.$afsID"
echo "Command output: "
echo ""
vos examine $USER.$afsID
}
Shell_Escape() # no extra prompt necessary
{
echo ""
echo "Command: /bin/bash"
echo "Command output: "
echo ""
/bin/bash
}
Current_Date() # no extra prompt necessary
{
echo ""
echo "Command: date +\"%a %b %Y %r\""
echo "Command output:"
echo ""
command="date +\"%a %b %Y %r\"" # store command as string
eval $command
}
Exit_rsh() # exiting function
{
echo ""
echo "Exiting..."
echo ""
exit 0 # we have exited successfully
}
control_c() # if user attempts to call CTRL^C
{
echo ""
echo "Select option 8 to exit. Please try again."
echo ""
}
trap control_c SIGINT # traps the CTRL^C calls, notifies user of command
introduction # displays menu of commands
while true
do
prompt
read input;
case $input in
1) Disk_Free
;;
2) Disk_Usage
;;
3) List_Files
;;
4) Process_Status
;;
5) Examine_Volume
;;
6) Shell_Escape
;;
7) Current_Date
;;
8) Exit_rsh
;;
*) echo "Not a valid command. Please try again." # if anything else is entered by the user
continue
;;
esac
done
| true
|
d9e5436608c91e07aad6afc6446221c69d31f1c5
|
Shell
|
Dakota628/vpnfailsafe
|
/extras/pkill_hup_openvpn
|
UTF-8
| 527
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Save as /etc/NetworkManager/dispatcher.d/pkill_hup_openvpn and make
# executable to send SIGHUP to OpenVPN upon reconnection to the underlying
# network.
# Only tested with OpenVPN started manually, or via systemd. For VPN
# connections configured via NetworkManager something else may be needed.
set -eo pipefail
readonly device="$1"
readonly state="$2"
readonly main_device="$(ip route list 0/0|cut -d' ' -f5)"
if [[ "$device" == "$main_device" && "$state" == up ]]; then
pkill -HUP openvpn
fi
| true
|
9a36261ebb5367e3e48a4f0d7fc367ee3ef6ac1a
|
Shell
|
search-future/miyou.tv
|
/miyoutv-agent/tools/chscan.sh
|
UTF-8
| 2,034
| 3.609375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Copyright 2016-2023 Brazil Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CMD=$0
TIME=10
SPAN=5
usage() {
echo "Usage:"
echo "$CMD --list <channellist> [--time <checktime>] [--span <checkspan>] checksignal [<checksignaloptions>]"
echo "$CMD --list <channellist> [--time <checktime>] [--span <checkspan>] recdvbchksig [<recdvbchksigoptions>]"
echo
echo "Options:"
echo "-l, --list <channellist>: Set channel list file"
echo "-t, --time <checktime>: Set check time in seconds(default: 10)"
echo "-s, --span <checkspan>: Set check span in seconds(default: 5)"
echo "-h, --help: Show this help"
}
for OPT in "$@"
do
case $OPT in
'-h'|'--help')
usage
exit 1
;;
'-t'|'--time')
TIME=$2
shift 2
;;
'-s'|'--span')
SPAN=$2
shift 2
;;
'-l'|'--list')
LIST=$2
shift 2
;;
*)
if echo "$1" | grep -q -v '^-.*'
then
break
fi
;;
esac
done
if [ -z "$1" ]
then
usage
exit 1
fi
while read -r LINE
do
RESULT=$(
"$@" "$LINE" 2>&1 &
PID=$!
sleep "$TIME"
kill -TERM "$PID" > /dev/null 2>&1
)
echo "$LINE: $(echo "$RESULT" | tr "\r" "\n" | grep -v -e 'SIGTERM' -v -e '^\s*$'| tail -n2 | head -n1)"
sleep "$SPAN"
done < "$LIST"
| true
|
94acd18069a5bab4cee5578be1a0dbb0c3b2be6d
|
Shell
|
DrexelCyberDragons/CCDC2019
|
/Linux/RHEL/start-fedora.sh
|
UTF-8
| 1,945
| 2.765625
| 3
|
[] |
no_license
|
systemctl stop sshd
#rm -rf /etc/skel
#mv ../config/skel /etc/
useradd alfonzo -m -p '$6$rounds=4096$P65PHjtBjJ6el$r/GIe1OktX/1MpvEXFBazHH0vrN0TpN9xndOkKKd5vRRq4bXNSIT/3BwqLU/16WuE8raX1hq2VlbF8UulLiz31'
useradd sam -m -p '$6$rounds=4096$0ut.Q36mgUsrL$dymsL91iTnadEetup04SFXfnLWiOPuhAhxvqueZvLQ2.cCVRy/4kTusY6Cs23u0S.DBljKU1dQITZmtRN31HL1'
useradd matt -m -p '$6$rounds=4096$sLTudDG7wg1h$jxDHi1eJXk.z2cAeSRnNtJWvIGWfoJCPr3x0ReQhmCcf1i1eMZPw22g1cc1ybjjYXZmVD5IvwAvflA1TIlR8a0'
usermod -aG wheel alfonzo
usermod -aG wheel sam
usermod -aG wheel matt
usermod -aG wheel root
passwd -l root
### Password Changes
for i in $( passwd -aS | grep ' P \| NP ' | cut -d' ' -f1 | sort | uniq ); do
if [ "$i" = "alfonzo" ] ; then
continue
elif [ "$i" = "sam" ] ; then
continue
elif [ "$i" = "matt" ] ; then
continue
elif [ "$i" = "root" ] ; then
continue
elif [ "$i" = "scorebot" ] ; then
continue
else
usermod $i -p '$6$rounds=4096$arm0aqVICE$qZGlom8InzFtu5jOQMhQN/JTkcVxMNigNeZse5yPmxxoQRIH6hpHC.GpoEBbUB15FUi8xACK7jLM7UqKGutuJ/'
fi
done
### Wheel Changes
grep '^wheel:.*$' /etc/group | cut -d: -f4 | sed "s/,/\n/g" > wheel.bk
for i in $(grep '^wheel:.*$' /etc/group | cut -d: -f4 | sed "s/,/\n/g"); do
if [ "$i" = "alfonzo" ] ; then
continue
elif [ "$i" = "sam" ] ; then
continue
elif [ "$i" = "matt" ] ; then
continue
elif [ "$i" = "root" ] ; then
continue
else
gpasswd -d $i wheel
fi
done
chown root:wheel /bin/su
chmod 754 /bin/su
chmod u+s /bin/su
### No login
rm -f /usr/sbin/nologin
echo 'IyEvYmluL3NoCmVjaG8gIlRoaXMgYWNjb3VudCBpcyBjdXJyZW50bHkgbm90IGF2YWlsYWJsZS4iCmV4aXQgMTsK' | base64 -d > /usr/sbin/nologin
chmod 755 /usr/sbin/nologin
for i in $( cat /etc/passwd | cut -d: -f1 ); do
if [ "$i" = "alfonzo" ] ; then
continue
elif [ "$i" = "sam" ] ; then
continue
elif [ "$i" = "matt" ] ; then
continue
elif [ "$i" = "root" ] ; then
continue
elif [ "$i" = "scorebot" ] ; then
continue
else
usermod -s /usr/sbin/nologin $i
fi
done
systemctl start sshd
| true
|
a12cd175e7bb7f5e5418c137e5f7947a84c8f161
|
Shell
|
BackBayRider/zulip-archive
|
/entrypoint.sh
|
UTF-8
| 2,899
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
zulip_organization_url=$1
zulip_bot_email=$2
zulip_bot_api_key=$3
github_personal_access_token=$4
delete_history=$5
archive_branch=$6
checked_out_repo_path="$(pwd)"
html_dir_path=$checked_out_repo_path
json_dir_path="${checked_out_repo_path}/zulip_json"
img_dir_path="${checked_out_repo_path}/assets/img"
streams_config_file_path="${checked_out_repo_path}/streams.yaml"
initial_sha="$(git rev-parse HEAD)"
if [ ! -f $streams_config_file_path ]; then
echo "Missing streams.yaml file."
exit 1
fi
cd "/zulip-archive-action"
curl "https://bootstrap.pypa.io/get-pip.py" -o "get-pip.py"
python3 get-pip.py
pip install virtualenv
virtualenv -p python3 .
source bin/activate
pip3 install zulip==0.6.3
pip3 install pyyaml==5.2
# GitHub pages API is in Preview mode. This might break in future.
auth_header="Authorization: Bearer ${github_personal_access_token}"
accept_header="Accept: application/vnd.github.switcheroo-preview+json"
page_api_url="https://api.github.com/repos/${GITHUB_REPOSITORY}/pages"
# Enable GitHub pages
curl -H "$auth_header" -H "$accept_header" --data "{\"source\":{\"branch\":\"${archive_branch}\"}}" "$page_api_url"
print_site_url_code="import sys, json; print(json.load(sys.stdin)['html_url'])"
github_pages_url_with_trailing_slash=$(curl -H "${auth_header}" $page_api_url | python3 -c "${print_site_url_code}")
github_pages_url=${github_pages_url_with_trailing_slash%/}
cp default_settings.py settings.py
cp $streams_config_file_path .
crudini --set zuliprc api site $zulip_organization_url
crudini --set zuliprc api key $zulip_bot_api_key
crudini --set zuliprc api email $zulip_bot_email
export PROD_ARCHIVE=true
export SITE_URL=$github_pages_url
export HTML_DIRECTORY=$html_dir_path
export JSON_DIRECTORY=$json_dir_path
export HTML_ROOT=""
export ZULIP_ICON_URL="${github_pages_url}/assets/img/zulip.svg"
if [ ! -d $json_dir_path ]; then
mkdir -p $json_dir_path
mkdir -p $img_dir_path
cp assets/img/* $img_dir_path
python3 archive.py -t
else
python3 archive.py -i
fi
python3 archive.py -b
cd ${checked_out_repo_path}
git checkout $archive_branch
git fetch origin
current_sha="$(git rev-parse origin/${archive_branch})"
if [[ "$current_sha" != "$initial_sha" ]]
then
echo "Archive update failed, commits have been added while processing"
exit 1
fi
echo "delete history: $delete_history"
if [[ "$delete_history" == "true" ]]
then
echo "resetting"
rm -rf .git
git config --global init.defaultBranch "$archive_branch"
git init
fi
git config --global user.email "zulip-archive-bot@users.noreply.github.com"
git config --global user.name "Archive Bot"
git add -A
git commit -m "Update archive."
git remote add origin2 https://${GITHUB_ACTOR}:${github_personal_access_token}@github.com/${GITHUB_REPOSITORY}
git push origin2 HEAD:$archive_branch -f
echo "pushed"
echo "Zulip Archive published/updated in ${github_pages_url}"
| true
|
b1245a7deb061cc7e84f7dd1ea6dc72432fc8db1
|
Shell
|
Freeaqingme/fstest
|
/tests/chflags/09.t
|
UTF-8
| 2,517
| 2.8125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# $FreeBSD: src/tools/regression/fstest/tests/chflags/09.t,v 1.1 2007/01/17 01:42:08 pjd Exp $
desc="chflags returns EPERM when one of SF_IMMUTABLE, SF_APPEND, or SF_NOUNLINK is set and securelevel is greater than 0"
dir=`dirname $0`
. ${dir}/../misc.sh
require chflags
echo "1..102"
n0=`namegen`
n1=`namegen`
n2=`namegen`
old=`sysctl -n security.jail.chflags_allowed`
sysctl security.jail.chflags_allowed=1 >/dev/null
expect 0 mkdir ${n0} 0755
cdir=`pwd`
cd ${n0}
expect 0 create ${n1} 0644
expect 0 chown ${n1} 65534 65534
for flag in SF_IMMUTABLE SF_APPEND SF_NOUNLINK; do
expect 0 chflags ${n1} ${flag}
jexpect 1 `pwd` EPERM chflags ${n1} UF_IMMUTABLE
expect ${flag} stat ${n1} flags
jexpect 1 `pwd` EPERM -u 65533 -g 65533 chflags ${n1} UF_IMMUTABLE
expect ${flag} stat ${n1} flags
jexpect 1 `pwd` EPERM -u 65534 -g 65534 chflags ${n1} UF_IMMUTABLE
expect ${flag} stat ${n1} flags
done
expect 0 chflags ${n1} none
expect 0 unlink ${n1}
expect 0 mkdir ${n1} 0755
expect 0 chown ${n1} 65534 65534
for flag in SF_IMMUTABLE SF_APPEND SF_NOUNLINK; do
expect 0 chflags ${n1} ${flag}
jexpect 1 `pwd` EPERM chflags ${n1} UF_IMMUTABLE
expect ${flag} stat ${n1} flags
jexpect 1 `pwd` EPERM -u 65533 -g 65533 chflags ${n1} UF_IMMUTABLE
expect ${flag} stat ${n1} flags
jexpect 1 `pwd` EPERM -u 65534 -g 65534 chflags ${n1} UF_IMMUTABLE
expect ${flag} stat ${n1} flags
done
expect 0 chflags ${n1} none
expect 0 rmdir ${n1}
expect 0 mkfifo ${n1} 0644
expect 0 chown ${n1} 65534 65534
for flag in SF_IMMUTABLE SF_APPEND SF_NOUNLINK; do
expect 0 chflags ${n1} ${flag}
jexpect 1 `pwd` EPERM chflags ${n1} UF_IMMUTABLE
expect ${flag} stat ${n1} flags
jexpect 1 `pwd` EPERM -u 65533 -g 65533 chflags ${n1} UF_IMMUTABLE
expect ${flag} stat ${n1} flags
jexpect 1 `pwd` EPERM -u 65534 -g 65534 chflags ${n1} UF_IMMUTABLE
expect ${flag} stat ${n1} flags
done
expect 0 chflags ${n1} none
expect 0 unlink ${n1}
expect 0 symlink ${n2} ${n1}
expect 0 lchown ${n1} 65534 65534
for flag in SF_IMMUTABLE SF_APPEND SF_NOUNLINK; do
expect 0 lchflags ${n1} ${flag}
jexpect 1 `pwd` EPERM lchflags ${n1} UF_IMMUTABLE
expect ${flag} lstat ${n1} flags
jexpect 1 `pwd` EPERM -u 65533 -g 65533 lchflags ${n1} UF_IMMUTABLE
expect ${flag} lstat ${n1} flags
jexpect 1 `pwd` EPERM -u 65534 -g 65534 lchflags ${n1} UF_IMMUTABLE
expect ${flag} lstat ${n1} flags
done
expect 0 lchflags ${n1} none
expect 0 unlink ${n1}
sysctl security.jail.chflags_allowed=${old} >/dev/null
cd "${cdir}"
expect 0 rmdir ${n0}
| true
|
a0c024bab56bc2e417c61c57aa2d9c9269e75fc9
|
Shell
|
tkq66/LLVMBasicDifferenceAnalysis
|
/run.sh
|
UTF-8
| 262
| 2.765625
| 3
|
[] |
no_license
|
if [ $1 = "1" ]; then
bin/DifferencePass resources/example1.ll c d
elif [ $1 = "2" ]; then
bin/DifferencePass resources/example2.ll x y
else
echo "Please enter the option 1 or 2 for the example file you would like to run difference analysis on."
fi
| true
|
5b89b72196023b9b60bf56cf9c7693dd4beb3539
|
Shell
|
arielorn/goalert
|
/devtools/ci/tasks/scripts/codecheck.sh
|
UTF-8
| 294
| 3.265625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -e
NOFMT=$(gofmt -l $(find . -name '*.go' |grep -v /vendor))
if test "$NOFMT" != ""
then
echo "Found non-formatted files:"
echo "$NOFMT"
exit 1
fi
CHANGES=$(git status -s --porcelain)
if test "$CHANGES" != ""
then
echo "Found changes in git:"
echo "$CHANGES"
exit 1
fi
| true
|
464eb86824d8ddd4e3db91c7f137f163a7528e75
|
Shell
|
ShubhamGG/Anubis
|
/docs/mermaid/render.sh
|
UTF-8
| 549
| 3.234375
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
cd $(dirname $(realpath $0))
if [ ! -d node_modules ]; then
yarn add @mermaid-js/mermaid-cli
fi
if [ ! -d ../img ]; then
mkdir ../img
fi
for mmdf in $(find -name '*.mmd'); do
if [ ! -f ../img/${mmdf}.png ] && [ ! -f ../img/${mmdf}.svg ]; then
echo "mermaid rendering ${mmdf}"
node_modules/.bin/mmdc -i ${mmdf} -o ../img/${mmdf}.png -b transparent -t forest
node_modules/.bin/mmdc -i ${mmdf} -o ../img/${mmdf}.svg -b transparent -t forest
else
echo "mermaid skipping ${mmdf}"
fi
done
| true
|
811786b39dbf62b16a11f55ec7f87f556ca3a858
|
Shell
|
nanxinjin/CS252
|
/lab3/lab3-src/test-shell/test10
|
UTF-8
| 659
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/sh
rm -f shell-out
myname=`whoami`
echo "Test10: Zombie processes"
before=`/bin/ps -u $myname | grep -c defunc`
echo "ls &" > shell-in
echo "ls &" >> shell-in
echo "ls &" >> shell-in
echo "ls &" >> shell-in
echo "ls &" >> shell-in
echo "ls &" >> shell-in
echo "ls &" >> shell-in
echo "sleep 5" >> shell-in
../shell < shell-in > shell-out &
sleep 1
grep test shell-out > out2 2>&1
if [ $? -ne 0 ]
then
echo "Test10 failed. Output incorrect."
exit -1
fi
after=`/bin/ps -u $myname | grep -c defunc`
if [ $before -ne $after ]
then
echo "Test10 failed. Zombie processes still around ($before, $after)"
exit -1
fi
echo Test10 OK
exit 0
| true
|
8aafd8270574b451cbdd3d93e14ef1618a64b6e9
|
Shell
|
dinfuehr/dora
|
/bench/fannkuchredux/bench.sh
|
UTF-8
| 539
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
RUNS=3
ARG=12
echo "java" 1>&2
javac fannkuchredux.java
for i in $(seq 1 $RUNS); do time java fannkuchredux $ARG; done
echo
echo "dora" 1>&2
pushd ../..
cargo build --release
for i in $(seq 1 $RUNS); do time target/release/dora bench/fannkuchredux/fannkuchredux.dora $ARG; done
popd
echo "cacao" 1>&2
javac -source 1.6 -target 1.6 fannkuchredux.java
for i in $(seq 1 $RUNS); do time /usr/local/cacao/bin/cacao fannkuchredux $ARG; done
echo
echo "perl" 1>&2
for i in $(seq 1 $RUNS); do time perl fannkuchredux.pl $ARG; done
| true
|
b7d9448b0e31e645a617512ca8df2877bcb0ce06
|
Shell
|
BristolTopGroup/DailyPythonScripts
|
/bin/x_01b_all_vars
|
UTF-8
| 498
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "This will take a while ... grab a coffee/tea/water/cocktail"
mkdir -p logs
N_JOBS=4
i=0
for var in MET HT ST WPT lepton_pt abs_lepton_eta NJets; do
echo "Getting ttbar normalisation: $var"
nohup time python dps/analysis/xsection/01_get_ttjet_normalisation.py -v $var --visiblePS &> logs/01_${var}_bgs_13TeV_fullPS.log &
let i+=1
if (( $i % N_JOBS == 0 ))
then
echo "Waiting on the above to finish."
wait;
fi
done
wait;
echo "All done! Time to run x_02b_all_vars."
| true
|
57c61ee42867803d3b7610d0e50e1543bb6f3e16
|
Shell
|
moskit/m-script
|
/www/bin/dash.cgi
|
UTF-8
| 4,031
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
source "$PWD/../../lib/dash_functions.sh"
print_cgi_headers
Size="Default size" ; Image="Source Image" ; Cloud="Cloud" ; Region="Region" ; Role="Default Role"
open_cluster "Cluster"
print_cluster_inline "Size" "Image" "Cloud" "Region" "Role"
close_cluster_line
print_timeline "Server"
close_cluster
# localhost first; if it belongs to a listed cluster, that cluster will be the first
for localip in `"$PWD"/../../helpers/localips | grep -v '127.0.0.1'` ; do
localserver=`grep ^$localip\| "$PWD/../../nodes.list"`
[ -z "$localserver" ] && continue
localcluster=`echo "$localserver" | cut -d'|' -f5`
localcloud=`echo "$localserver" | cut -d'|' -f6`
localserver=`echo "$localserver" | cut -d'|' -f4`
[ -n "$localserver" ] && break
done
if [ -z "$localcluster" ]; then
echo -e "<div class=\"clustername\"><span class=\"indent\">localhost</span></div>\n<div class=\"cluster\" id=\"localhost\">\n<div class=\"server\" id=\"localhost\">\n<span class=\"servername clickable\" id=\"localhost_status\" onclick=\"showDetails('localhost_status','serverdetails')\">localhost</span>"
cat "../nodes/localhost/dash.html" 2>/dev/null || echo "No data"
echo -e "</div>\n<div class=\"details\" id=\"localhost_details\"></div>\n</div>\n</div>"
else
[ -d "../nodes/$localcloud/$localcluster/$localserver" ] || install -d "../nodes/$localcloud/$localcluster/$localserver"
fi
for cluster in `find ../nodes/*/* -maxdepth 0 -type d 2>/dev/null`
do
cld=`echo "$cluster" | cut -d'/' -f3`
cls=`echo "$cluster" | cut -d'/' -f4`
clsconf=`grep "|${cld}$" "$PWD/../../conf/clusters.conf" | grep "^$cls|"`
size=`echo "$clsconf" | cut -d'|' -f5`
if [ -f "$M_TEMP/cloud/$cld/flavors.list" ]; then
sizeh=`cat "$M_TEMP/cloud/$cld/flavors.list" | grep ^$size\| | cut -d'|' -f2 | tr -d '"'`
else
sizeh=$size
fi
img=`echo "$clsconf" | cut -d'|' -f6`
if [ -f "$M_TEMP/cloud/$cld/images.list" ]; then
imgh=`cat "$M_TEMP/cloud/$cld/images.list" | grep ^$img\| | cut -d'|' -f2 | tr -d '"'`
else
imgh=$img
fi
region=`echo "$clsconf" | cut -d'|' -f3`
role=`echo "$clsconf" | cut -d'|' -f10`
open_cluster "${cld}|${cls}"
print_cluster_inline "sizeh" "imgh" "cld" "region" "role"
close_cluster_line
unset sizeh imgh region role
if [ "_$cls" == "_$localcluster" ] && [ "_$cld" == "_$localcloud" ]; then
node="${cld}/${cls}|${localserver}"
echo -e "<div class=\"server\" id=\"localhost\">\n<span class=\"servername clickable\" id=\"${node}_status\" onclick=\"showDetails('${node}_status','serverdetails')\" title=\"$localserver\">${localserver:0:20}</span>"
cat "../nodes/localhost/dash.html" 2>/dev/null || echo "No data"
[ -e "../nodes/localhost/notfound" ] && echo "<div class=\"chunk\"><div style=\"width:4px;height:4px;margin: 8px 3px 8px 3px;background-color: orange;\"> </div></div>"
[ -e "../nodes/localhost/stopped" ] && echo "<div class=\"chunk\"><div style=\"width:4px;height:4px;margin: 8px 3px 8px 3px;background-color: red;\"> </div></div>"
echo -e "</div>\n<div class=\"details\" id=\"${node}_details\"></div>"
fi
for server in `find $cluster/* -maxdepth 0 -type d 2>/dev/null | grep -v "^$cluster/$localserver$" | sort`
do
node="${cld}/${cls}|${server##*/}"
serverh="${server##*/}"
echo -e "<div class=\"server\" id=\"$node\">\n<span class=\"servername clickable\" id=\"${node}_status\" onclick=\"showDetails('${node}_status','serverdetails')\" title=\"$serverh\">${serverh:0:20}</span>"
cat "../nodes/$server/dash.html" 2>/dev/null || echo "No data"
[ -e "../nodes/$server/notfound" ] && echo "<div class=\"chunk\"><div style=\"width:4px;height:4px;margin: 8px 3px 8px 3px;background-color: orange;\"> </div></div>"
[ -e "../nodes/$server/stopped" ] && echo "<div class=\"chunk\"><div style=\"width:4px;height:4px;margin: 8px 3px 8px 3px;background-color: red;\"> </div></div>"
echo -e "</div>\n<div class=\"details\" id=\"${node}_details\"></div>"
done
close_cluster
done
exit 0
| true
|
e0ae6dd4498ec1c9c49291937c5bb76e065ba221
|
Shell
|
jition1987/zabbix_install
|
/Zabbix4.4.1-install/Zabbix-install.sh
|
UTF-8
| 10,522
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/sh
process()
{
install_date="zabbix_install_$(date +%Y-%m-%d_%H:%M:%S).log"
printf "
#######################################################################
# 欢迎使用Zabbix离线一键部署脚本 #
# 脚本适配环境CentOS7+/Radhat7+、内存1G+ #
# 避免软件包产生冲突建议使用纯净的操作系统进行安装! #
# 更多信息请访问 https://xxshell.com #
#######################################################################
"
while :; do echo
read -p "设置Mysql数据库root密码(建议使用字母+数字): " Database_Password
[ -n "$Database_Password" ] && break
done
#
echo "#######################################################################"
echo "# #"
echo "# 正在软件与编译环境 请稍等~ #"
echo "# #"
echo "#######################################################################"
rpm -Uvh zabbix_APP_RPM/*.rpm --force --nodeps
#rpm安装httpd、php、Mysql、编译环境等
echo "#######################################################################"
echo "# #"
echo "# 正在关闭SElinux策略 请稍等~ #"
echo "# #"
echo "#######################################################################"
setenforce 0
#临时关闭SElinux
sed -i "s/SELINUX=enforcing/SELINUX=disabled/" /etc/selinux/config
#永久关闭SElinux
echo $?="关闭SElinux成功"
echo "#######################################################################"
echo "# #"
echo "# 正在配置Firewall策略 请稍等~ #"
echo "# #"
echo "#######################################################################"
firewall-cmd --zone=public --add-port=80/tcp --permanent
firewall-cmd --zone=public --add-port=10050/tcp --permanent
firewall-cmd --zone=public --add-port=10051/tcp --permanent
firewall-cmd --reload
firewall-cmd --zone=public --list-ports
#放行TCP80、10050、10051端口
echo "#######################################################################"
echo "# #"
echo "# 正在配置Mariadb数据库 请稍等~ #"
echo "# #"
echo "#######################################################################"
systemctl start mariadb
systemctl enable mariadb
echo "#######################################################################"
echo "# #"
echo "# 正在配置PHP环境 请稍等~ #"
echo "# #"
echo "#######################################################################"
systemctl start php-fpm
systemctl enable php-fpm
echo "#######################################################################"
echo "# #"
echo "# 正在配置Apache服务 请稍等~ #"
echo "# #"
echo "#######################################################################"
systemctl start httpd
systemctl enable httpd
echo "#######################################################################"
echo "# #"
echo "# 正在创建Zabbix用户 请稍等~ #"
echo "# #"
echo "#######################################################################"
groupadd zabbix
useradd zabbix -g zabbix -s /sbin/nologin
echo "#######################################################################"
echo "# #"
echo "# 正在编译Zabbix软件 请稍等~ #"
echo "# #"
echo "#######################################################################"
chmod 776 -R zabbix-4.4.1
#解决权限不足问题
cd zabbix-4.4.1
#切换到zabbix安装包
./configure \
--prefix=/usr/local/zabbix \
--enable-server \
--enable-agent \
--with-mysql=/usr/bin/mysql_config \
--with-net-snmp \
--with-libcurl \
--with-libxml2 \
--enable-java
make -j 2 && make install
#编译安装Zabbix
echo $?="Zabbix编译完成"
echo "#######################################################################"
echo "# #"
echo "# 正在配置Mariadb数据库 请稍等~ #"
echo "# #"
echo "#######################################################################"
mysqladmin -u root password "$Database_Password"
echo "---mysqladmin -u root password "$Database_Password""
#修改数据库密码
mysql -uroot -p$Database_Password -e "CREATE DATABASE zabbix CHARACTER SET utf8 COLLATE utf8_general_ci;"
echo $?="正在创建zabbix数据库"
#将创建数据的命令重定向到数据库中
mysql -uroot -p$Database_Password -e "use zabbix;"
echo $?="对zabbix数据库进行操作"
#将选中的命令重定向到数据库中
mysql -uroot -p$Database_Password zabbix < database/mysql/schema.sql
mysql -uroot -p$Database_Password zabbix < database/mysql/images.sql
mysql -uroot -p$Database_Password zabbix < database/mysql/data.sql
echo $?="对zabbix数据库进行操作"
#zabbix数据库导入
echo "#######################################################################"
echo "# #"
echo "# 正在配置Zabbix软件 请稍等~ #"
echo "# #"
echo "#######################################################################"
cp misc/init.d/fedora/core/* /etc/init.d/
#拷贝启动文件到/etc/init.d/下
echo $?="拷贝启动文件到/etc/init.d/下"
sed -i "s#BASEDIR=/usr/local#BASEDIR=/usr/local/zabbix#" /etc/init.d/zabbix_server
sed -i "s#BASEDIR=/usr/local#BASEDIR=/usr/local/zabbix#" /etc/init.d/zabbix_agentd
#编辑启动模块下
echo $?="编辑启动模块"
sed -i "s|# DBHost=localhost|DBHost=localhost|" /usr/local/zabbix/etc/zabbix_server.conf
sed -i "s|DBUser=zabbix|DBUser=root|" /usr/local/zabbix/etc/zabbix_server.conf
sed -i "s|# DBPassword=|DBPassword=$Database_Password|" /usr/local/zabbix/etc/zabbix_server.conf
#编辑Zabbix配置配置文件
echo $?="编辑Zabbix配置配置文件"
/etc/init.d/zabbix_server restart
/etc/init.d/zabbix_agentd restart
#启动zabbix服务
systemctl restart zabbix_server
systemctl restart zabbix_agentd
#重启验证服务
#通过”netstat -an | grep LIS“查看10050、10051端口能否正常监听,如果不能正常监听可能数据库或配置文件有问题。
systemctl enable zabbix_server
systemctl enable zabbix_agentd
echo $?="配置Zabbix完成"
echo "#######################################################################"
echo "# #"
echo "# 正在配置PHP.ini 请稍等~ #"
echo "# #"
echo "#######################################################################"
sed -i "s/post_max_size = 8M/post_max_size = 32M/" /etc/php.ini
sed -i "s/max_execution_time = 30/max_execution_time = 600/" /etc/php.ini
sed -i "s/max_input_time = 60/max_input_time = 600/" /etc/php.ini
sed -i "s#;date.timezone =#date.timezone = Asia/Shanghai#" /etc/php.ini
#修改PHP配置文件
echo $?="PHP.inin配置完成完成"
echo "#######################################################################"
echo "# #"
echo "# 正在配置Zabbix前台文件 请稍等~ #"
echo "# #"
echo "#######################################################################"
rm -rf /var/www/html/*
#清空网站根目录
cp -r frontends/php/* /var/www/html/
#复制PHP文件到网站根目录
chown -R apache:apache /var/www/html/
chmod -R 777 /var/www/html/conf/
#给网站目录添加属主
echo $?="网页文件拷贝完成"
echo "#######################################################################"
echo "# #"
echo "# 正在重启服务 请稍等~ #"
echo "# #"
echo "#######################################################################"
systemctl restart php-fpm httpd mariadb zabbix_server zabbix_agentd
echo $?="服务启动完成"
echo "--------------------------- 安装已完成 ---------------------------"
echo " 数据库名 :zabbix"
echo " 数据库用户名 :root"
echo " 数据库密码 :"$Database_Password
echo " 网站目录 :/var/www/html"
echo " Zabbix登录 :http://主机IP"
echo " 安装日志文件 :/var/log/"$install_date
echo "------------------------------------------------------------------"
echo " 如果安装有问题请反馈安装日志文件。"
echo " 使用有问题请在这里寻求帮助:https://www.xxshell.com"
echo " 电子邮箱:admin@xxshell.com"
echo "------------------------------------------------------------------"
}
LOGFILE=/var/log/"zabbix_install_$(date +%Y-%m-%d_%H:%M:%S).log"
touch $LOGFILE
tail -f $LOGFILE &
pid=$!
exec 3>&1
exec 4>&2
exec &>$LOGFILE
process
ret=$?
exec 1>&3 3>&-
exec 2>&4 4>&-
| true
|
c9aada85a72e3992d2ebb3ee5100ed3a0a29218b
|
Shell
|
AnthonyAmanse/snap-and-translate
|
/scripts/build.sh
|
UTF-8
| 2,152
| 3.734375
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-dco-1.1"
] |
permissive
|
#!/bin/bash
echo -e "Build environment variables:"
echo "REGISTRY_URL=${REGISTRY_URL}"
echo "REGISTRY_NAMESPACE=${REGISTRY_NAMESPACE}"
echo "IMAGE_NAME=${IMAGE_NAME}"
echo "BUILD_NUMBER=${BUILD_NUMBER}"
# Learn more about the available environment variables at:
# https://console.bluemix.net/docs/services/ContinuousDelivery/pipeline_deploy_var.html#deliverypipeline_environment
# To review or change build options use:
# bx cr build --help
echo -e "Checking for Dockerfile at the repository root"
if [ -f server/Dockerfile ]; then
echo "Dockerfile found"
else
echo "Dockerfile not found"
exit 1
fi
# move env.sample file // .env is required in dockerfile
mv server/env.sample server/.env
# handle if user has entered api keys
# if [ -z "${WATSON_NLU}" && -z "${WATSON_TRANSLATOR}"]; then
# echo "User didn't specify watson services"
# else
# echo "copying credentials to .env"
# sed -i "s##${WATSON_NLU}#" .env
# sed -i "s##${WATSON_TRANSLATOR}#" .env
# // sed urls as well
# WATSON_API_KEYS_WERE_PROVIDED=true
# enter this in build.properties later so
# deploy.sh can remove bindings in watson-lang-trans.yml file
# fi
echo -e "Building container image"
set -x
bx cr build -t $REGISTRY_URL/$REGISTRY_NAMESPACE/$IMAGE_NAME:$GIT_COMMIT server
set +x
# devops pipeline specific
# using build.properties to pass env variables
echo "Checking archive dir presence"
if [ -z "${ARCHIVE_DIR}" ]; then
echo -e "Build archive directory contains entire working directory."
else
echo -e "Copying working dir into build archive directory: ${ARCHIVE_DIR} "
mkdir -p ${ARCHIVE_DIR}
find . -mindepth 1 -maxdepth 1 -not -path "./$ARCHIVE_DIR" -exec cp -R '{}' "${ARCHIVE_DIR}/" ';'
fi
# If already defined build.properties from prior build job, append to it.
cp build.properties $ARCHIVE_DIR/ || :
# TEST_NODEJS_IMAGE_NAME name from build.properties will be used in deploy script
WATSON_TESSERACT_IMAGE=$REGISTRY_URL/$REGISTRY_NAMESPACE/$IMAGE_NAME:$GIT_COMMIT
# write to build.properties
echo "WATSON_TESSERACT_IMAGE=${WATSON_TESSERACT_IMAGE}" >> $ARCHIVE_DIR/build.properties
cat $ARCHIVE_DIR/build.properties
| true
|
80f9de8da4779335bad08bcd1e9e4c91794723e9
|
Shell
|
mrakitin/DataCrunching
|
/ProcessingScripts/report_missing.sh
|
UTF-8
| 690
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# This script takes 2 argument:
#
# - $1 the file with SMILES strings and IDs
# - $2 an optional filename prefix
#
#. /software/anaconda3/etc/profile.d/conda.sh
#conda activate py3
#
# Conversion of SMILES string to MOL2:
#
# - Done one at a time as OpenBabel might crash attempting this
# and if that happens only 1 molecule is lost this way
#
if [ $# -eq 2 ]
then
prefix=$2
else
prefix=""
fi
declare -a fields
num=-1
while IFS= read -r line
do
((num++))
fields=($line)
smiles=${fields[0]}
id=${fields[1]}
if [ -f $prefix$id.dlg ]
then
continue
fi
if [ -f $prefix$id.dlg.bz2 ]
then
continue
fi
echo $id
echo $num > /dev/stderr
done < $1
| true
|
7e385b2a88817c48c307ea90026e12eb4513a765
|
Shell
|
lab85-ru/ti-dvsdk_omapl138-evm_04_03_00_06
|
/setup.sh
|
UTF-8
| 833
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/sh
echo
echo "--------------------------------------------------------------------------------"
echo "TISDK setup script"
echo
echo "This script will set up your development host for dvsdk development."
echo "Parts of this script require administrator priviliges (sudo access)."
echo "--------------------------------------------------------------------------------"
cwd=`dirname $0`
. $cwd/bin/common.sh
$cwd/bin/setup-host-check.sh
check_status
$cwd/bin/setup-package-install.sh
check_status
$cwd/bin/setup-targetfs-nfs.sh
check_status
$cwd/bin/setup-tftp.sh
check_status
$cwd/bin/setup-minicom.sh
check_status
$cwd/bin/setup-uboot-env.sh
check_status
echo
echo "TISDK setup completed!"
echo "Please continue reading the Software Developer's Guide for more information on"
echo "how to develop software on the EVM"
| true
|
61f4ff5c4a7170f13e107c2de49f9a1238cc3c49
|
Shell
|
hpanike/Scripts
|
/Bash/rsyc_incremental.sh
|
UTF-8
| 1,773
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
#Variables
PATH=$PATH:/bin/usr/bin
DATE=`date '+%F'`
DIR="<backup_dir>"
DATE2=`date '+%F %T'`
#Functions
function show_time {
num=$1
min=0
hour=0
day=0
if((num>59));then
((sec=num%60))
((num=num/60))
if((num>59));then
((min=num%60))
((num=num/60))
if((num>23));then
((hour=num%24))
((day=num/24))
else
((hour=num))
fi
else
((min=num))
fi
else
((sec=num))
fi
echo "$day"d "$hour"h "$min"m "$sec"s
}
# Get time as a UNIX timestamp (seconds elapsed since Jan 1, 1970 0:00 UTC)
T="$(date +%s)"
echo "[$DATE2:backup started]" >> /var/log/rsync.log 2>&1
if [ -e ${DIR}/3 ]; then
TOKILL=`readlink -n ${DIR}/3`
echo "deleting: ${DIR}/$TOKILL"
rm -rf ${DIR}/$TOKILL && rm -rf ${DIR}/3
fi
if [ -e ${DIR}/2 ]; then
mv ${DIR}/2 ${DIR}/3
fi
if [ -e ${DIR}/1 ]; then
mv ${DIR}/1 ${DIR}/2
fi
if [ -e ${DIR}/0 ]; then
mv ${DIR}/0 ${DIR}/1
fi
mkdir -p ${DIR}/$DATE
rsync -axhr --stats --safe-links --exclude-from=excludes.txt --exclude 'Trash' -z --delete \
--link-dest=${DIR}/1/ \
<rsync_primary_location> ${DIR}/${DATE}/ >> /var/log/rsync.log 2>&1 || echo "failed on users backup"
rsync -axhr --stats --safe-links --exclude-from=/git/backups/excludes.txt --exclude 'Trash' -z --delete \
--link-dest=${DIR}/1/ \
<rsync_secondary_location> ${DIR}/${DATE}/ >> /var/log/rsync.log 2>&1 || echo "failed on users backup"
ln -s $DATE ${DIR}/0
T="$(($(date +%s)-T))"
echo "Backup started on: ${DATE}" >> /var/log/rsync.log 2>&1
echo "Backup ran in: " >> /var/log/rsync.log 2>&1
show_time ${T} >> /var/log/rsync.log 2>&1
printf "\n" >> /var/log/rsync.log 2>&1
printf "\n" >> /var/log/rsync.log 2>&1
logger "Backup ran in: ${T}. Started on ${DATE} "
| true
|
68d7677f1e132515496515e9f61179b463720496
|
Shell
|
ChinaXing/appcheck
|
/plugins/check_cpu
|
UTF-8
| 953
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
#####################################################
# Author : yunxing - yunxing.cyx
# Last modified : 2011-12-28 16:53
# Function : show cpu info
# Usage :
# check_cpu [-h] option
# option : like idle,nice,sys,iowait,irq and so on..
#####################################################
LANG=C
#04:47:47 PM CPU %user %nice %sys %iowait %irq %soft %steal %idle intr/s
#04:47:19 PM all 2.18 0.00 2.97 0.21 0.02 0.10 0.00 94.53 257.23
if [ $# -lt 1 ]
then
echo -n "check_cpu -idle" 1>&2
exit 0
fi
if [ "x$1" = "x-h" ]
then
echo "cpu -idle"
exit 0
fi
vlu=$(mpstat | tail -2 |awk '{for(i=1;i<=NF;i++){if ( $i ~ /'"$1"'/ ){ getline ; print $i ; }}}')
if [ $# -lt 2 ]
then
echo "$vlu"
else
ruslt=$(echo "$vlu > ${2}"| bc)
if [ $ruslt -eq 1 ]
then
echo "[big than ${2}: $vlu]"
else
echo "[ok]"
fi
fi
| true
|
ec4908ad1c71b19d50759f9a2ea1e682410f49fd
|
Shell
|
haibinyang/JarWrapper
|
/commonDeamon.sh
|
UTF-8
| 3,775
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# jarWrapper Startup script for the RunDeck Launcher install
# paramaters:
# - env vars: []
# - standard RDECK_PORT values: [http: 4440, https: 4443]
# 主目录
BASE_DIR="/home/yanghaibin/deployWorkspace"
# 检查主目录是否可写
[ -w $BASE_DIR ] || {
echo "BASE_DIR dir not writable: $BASE_DIR"
exit 1;
}
# 二级目录:upload和instance
UPLOAD_DIR="$BASE_DIR/upload"
INSTANCE_DIR="$BASE_DIR/instance"
mkdir -p $UPLOAD_DIR
mkdir -p $INSTANCE_DIR
# 读取参数
action=$1
serviceName=$2
if [ -z $action ]; then
echo "action is unset";
exit 1
fi
if [ -z $serviceName ]; then
echo "serviceName is unset";
exit 1
fi
echo "action: $action"
echo "serviceName: $serviceName"
# 创建serviceName下的子目录
JAR_DIR=$INSTANCE_DIR/$serviceName/jar
PID_DIR=$INSTANCE_DIR/$serviceName/pid
LOK_DIR=$INSTANCE_DIR/$serviceName/lock
LOG_DIR=$INSTANCE_DIR/$serviceName/log
mkdir -p $JAR_DIR
mkdir -p $PID_DIR
mkdir -p $LOK_DIR
mkdir -p $LOG_DIR
# 所有操作都要使用到pid, lock文件
PID_FILE=$PID_DIR/pid
LOK_FILE=$LOK_DIR/lock
echo "PID_FILE: $PID_FILE"
echo "LOK_FILE: $LOK_FILE"
# TODO
uploadJarFilePath=$UPLOAD_DIR/$jarFileName
# head 软链接
#JAR_FILE=$JAR_DIR/${serviceName}.jar
#init和replaceJar才要使用到
#平时是使用header指向的jar吧
JAR_FILE=$JAR_DIR/head #指向真实的 jar 的软链接
echo "JAR_FILE: $JAR_FILE"
# 其它
RETVAL=0
DATE=`/bin/date +%Y%m%d-%H%M%S`
echo_success() {
echo "[OK]"
return 0
}
echo_failure() {
echo "[FAILED]"
return 1
}
init() {
RETVAL=0
echo "init"
return $RETVAL
}
replacejar() {
RETVAL=0
echo "replacejar"
jarFileName=$3
if [ -z $jarFullPath ]; then
echo "jarFullPath is unset";
return 1
fi
echo "jarFileName: $jarFileName"
# 创建一个软链接
return $RETVAL
}
start() {
jarFullPath=$JAR_DIR/head
# 检查是否存在
rundeckd="${JAVA_HOME}/bin/java -jar ${jarFullPath}"
LOG_FILE=$LOG_DIR/$DATE.log
echo "LOG_FILE: $LOG_FILE"
RETVAL=0
printf "%s" "Starting $serviceName: "
[ -f $LOK_FILE -a -f $PID_FILE ] && {
echo_success; #already running
return $RETVAL
}
nohup $rundeckd >>$LOG_FILE 2>&1 &
RETVAL=$?
PID=$!
echo $PID > $PID_FILE
if [ $RETVAL -eq 0 ]; then
touch $LOK_FILE
echo_success
else
echo_failure
fi
return $RETVAL
}
stop() {
RETVAL=0
printf "%s" "Stopping $serviceName: "
[ ! -f $PID_FILE ] && {
echo_success; #already stopped
return $RETVAL
}
PID=`cat $PID_FILE`
RETVAL=$?
[ -z "$PID" ] && {
echo_failure; #empty pid value"
return $RETVAL;
}
ps -p "$PID" >/dev/null 2>&1
if [ $? -eq 0 ]; then
kill $PID >/dev/null 2>&1
RETVAL=$?
[ $RETVAL -eq 0 ] || {
echo_failure; # could not kill process
return $RETVAL
}
fi
rm -f $PID_FILE; # Remove control files
rm -f $LOK_FILE
echo_success
return $RETVAL
}
status() {
RETVAL=0
printf "%s" "Status $serviceName: "
test -f "$PID_FILE"
RETVAL=$?
[ $RETVAL -eq 0 ] || {
echo "$serviceName is stopped";
return 3;
}
PID=`cat $PID_FILE`
ps -p "$PID" >/dev/null
RETVAL=$?
[ $RETVAL -eq 0 ] && {
echo "$serviceName is running (pid=$PID, port=$RDECK_PORT)"
} || {
echo "$serviceName dead but pid file exists"
}
return $RETVAL
}
case "$action" in
init)
init
;;
replacejar)
stop
;;
start)
start
;;
stop)
stop
;;
restart)
stop
start
;;
condrestart)
if [ -f $LOK_FILE ]; then
stop
start
fi
;;
status)
#status $rundeckd #临时去除
status
RETVAL=$?
;;
*)
echo $"Usage: $0 {start|stop|restart|condrestart|status}"
RETVAL=1
esac
exit $RETVAL
| true
|
b15d3d9fb3c26b9d0a915429f28dc5f2bf804ff4
|
Shell
|
feup-infolab/ckan-install-script
|
/filestore_install.sh
|
UTF-8
| 472
| 2.546875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
sudo mkdir -p /var/lib/ckan/default
#add line in ini file
sudo vim /etc/ckan/default/development.ini
#add after [app:main]
#ckan.storage_path = /var/lib/ckan/default
#set permissions on the uploads folder
#create storage files
sudo mkdir -p /var/lib/ckan/resources
sudo chmod u+rwx /var/lib/ckan/resources
sudo mkdir -p /var/lib/ckan/default
sudo chmod u+rwx /var/lib/ckan/default
sudo chown -R ckan /var/lib/ckan/
sudo service jetty8 restart
| true
|
d6f2b9be13af1c5896e9709c0b316af3775450d0
|
Shell
|
peterhellberg/dotfiles
|
/.bashrc
|
UTF-8
| 2,809
| 2.828125
| 3
|
[] |
no_license
|
ulimit -n 4096
# Have ctrl-s perform i-search (search forward, complements ctrl-r)
stty -ixon
# don't put duplicate lines or lines starting with space in the history.
# also erase all duplicates
# See bash(1) for more options
HISTCONTROL=ignoreboth:erasedups
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1500
HISTFILESIZE=3500
# Bash
export EDITOR='vim'
# Setting for the new UTF-8 terminal support in Lion
export LC_CTYPE=en_US.UTF-8
export LC_ALL=en_US.UTF-8
export LC_LANG=en_US.UTF-8
export LANG=en_US.UTF-8
# NeoVIM
export NVIM_TUI_ENABLE_CURSOR_SHAPE=true
# ooc
# export OOC_LIBS=$HOME/Work/ooc
# export PATH=$PATH:$OOC_LIBS/sam:$OOC_LIBS/rock/bin
# Dokku
export DOKKU_HOST=dokku.c7.se
# Go
export GOPATH=$HOME/Go
export CDPATH=.:$GOPATH/src/github.com:$GOPATH/src/golang.org:$GOPATH/src:$CDPATH
export PATH=$PATH:$GOPATH/bin
export GOGC=400
if [ -f /usr/local/bin/brew ]; then
if [ -f `/usr/local/bin/brew --prefix`/etc/bash_completion.d/go_completion.sh ]; then
source `/usr/local/bin/brew --prefix`/etc/bash_completion.d/go_completion.sh
fi
if [ -f `/usr/local/bin/brew --prefix`/etc/bash_completion.d/git-completion.bash ]; then
source `/usr/local/bin/brew --prefix`/etc/bash_completion.d/git-completion.bash
fi
fi
# Git
export GIT_EDITOR='vim'
# Postgres
export PATH=$PATH:/Applications/Postgres.app/Contents/Versions/latest/bin
# Rust
export PATH=$PATH:$HOME/.cargo/bin/
# Coreutils gnubin
export PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
if [ "$TERM" != "dumb" ]; then
export LS_OPTIONS='--color=auto'
eval `dircolors ~/.dircolors`
fi
# Functions
function parse_git_branch {
ref=$(git symbolic-ref HEAD 2> /dev/null) || return
echo "("${ref#refs/heads/}") "
}
# function randomgif {
# giphy random "$@" | xargs curl -s -o '/tmp/giphy.gif' && imgcat '/tmp/giphy.gif'
# }
function since {
echo "$(git l $1..HEAD)" | tac | tail
}
# NPM
export PATH=$PATH:/usr/local/share/npm/bin
# Homebrew
export PATH=/usr/local/sbin:/usr/local/bin:$PATH
# Ruby
export RUBY_GC_HEAP_INIT_SLOTS=1800000 # (10000)
export RUBY_HEAP_FREE_MIN=20000 # (4096)
export RUBY_HEAP_SLOTS_INCREMENT=300000 # (10000)
export RUBY_HEAP_SLOTS_GROWTH_FACTOR=1 # (1.8)
export RUBY_GC_MALLOC_LIMIT=85000000 # (8000000)
# Colors!
export CLICOLOR=1
export LSCOLORS=HxbxfxdxCxegedafahacad
export GREP_OPTIONS='--color=auto'
export GREP_COLOR='1;35;40'
# Load aliases
[[ -s "$HOME/.aliases" ]] && source "$HOME/.aliases"
### Added by the Heroku Toolbelt
export PATH="/usr/local/heroku/bin:$PATH"
PATH=$PATH:$HOME/.rvm/bin # Add RVM to PATH for scripting
# added by travis gem
[ -f /Users/peter/.travis/travis.sh ] && source /Users/peter/.travis/travis.sh
| true
|
d95d9f626b543dda861d0917a3c831fd7270da3c
|
Shell
|
flakyhermit/shell
|
/init
|
UTF-8
| 303
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/bash
source $HOME/.shell/env
# Custom functions
# setxkbmap -option 'caps:escape'
# Aliases
# Git for dotfiles
alias dots='/usr/bin/git --git-dir=$HOME/.local/dotfiles --work-tree=$HOME'
# Start fish
# if [[ $(ps --no-header --pid=$PPID --format=cmd) != "fish" ]]
# then
# exec fish
# fi
| true
|
bd099c96f1bc662b8c9ac96c4bef0103e9d12ab7
|
Shell
|
ebindavis17/Shell
|
/etc.sh
|
UTF-8
| 296
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
package=dbus-x11_1.2.24-4_amd64.deb
if [[ $package =~ .*\.deb ]]; then
#if [[ $package =~ (.+)_(.*)_(.*)\.deb ]]
echo " Package ${BATCH_MATCH[1]} Version ${BATCH_MATCH[2]} "\
"is for the "${BATCH_MATCH[3]}" architecture "
else
echo " File \"$package\" doesnot excits"
fi
| true
|
396d22b869ac196204914f3c4739a3b70cdc2666
|
Shell
|
hapo-anhht/lemp-server
|
/setup.sh
|
UTF-8
| 2,484
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
function set_mysql {
echo "DB_DATABASE :";
read DB_DATABASE;
echo "DB_USERNAME :";
read DB_USERNAME;
echo "DB_PASSWORD :";
read DB_PASSWORD;
}
set_mysql
echo "Link git repository :";
read GIT_LINK;
echo "Project name: "
read PRO_NAME;
# Update
# apt-get update
# Install cURL & ZIP/UNZIP & git
apt-get install -y curl
apt-get install -y zip unzip
apt-get install -y git
echo "Git Installed Successfully!"
git config --global user.name "Your Name"
git config --global user.email "youremail@domain.com"
# Install Nginx
chmod 777 Nginx.sh
./Nginx.sh
# Install MySQL
if [ -f `which mysql` ] ; then
apt-get -y install mysql-server mysql-client
fi
DBEXISTS=$(mysql --batch --skip-column-names -e "SHOW DATABASES LIKE '"$DB_DATABASE"';" | grep "$DB_DATABASE" > /dev/null; echo "$?")
while : ; do
if [ $DBEXISTS -eq 0 ];then
echo "A database with the name $DB_DATABASE already exists. Please re-enter."
set_mysql;
else
break;
fi
done
service mysql start;
if [ -f /root/.my.cnf ]; then
mysql -e "CREATE DATABASE ${DB_DATABASE} /*\!40100 DEFAULT CHARACTER SET utf8 */;"
mysql -e "CREATE USER ${DB_USERNAME}@localhost IDENTIFIED BY '${DB_PASSWORD}';"
mysql -e "GRANT ALL PRIVILEGES ON ${DB_DATABASE}.* TO '${DB_USERNAME}'@'localhost';"
mysql -e "FLUSH PRIVILEGES;"
# If /root/.my.cnf doesn't exist then it'll ask for root password
else
mysql -uroot -e "CREATE DATABASE ${DB_DATABASE} /*\!40100 DEFAULT CHARACTER SET utf8 */;"
mysql -uroot -e "CREATE USER ${DB_USERNAME}@localhost IDENTIFIED BY '${DB_PASSWORD}';"
mysql -uroot -e "GRANT ALL PRIVILEGES ON ${DB_DATABASE}.* TO '${DB_USERNAME}'@'localhost';"
mysql -uroot -e "FLUSH PRIVILEGES;"
fi
# Install PHP
chmod 777 php-install.sh
./php-install.sh
# # Git Clone your Site
export COMPOSER_ALLOW_SUPERUSER=1
git clone $GIT_LINK /var/www/$PRO_NAME
cd /var/www/$PRO_NAME
echo "APP_ENV=local
APP_DEBUG=true
APP_KEY=base64:Y6CZKraJe9eBR1OxuiVBCHHNHNl9fh1r8UwCw+64OyM=
APP_URL=http://localhost/
DB_CONNECTION=mysql
DB_HOST=127.0.0.1
DB_PORT=3306
DB_DATABASE= $DB_DATABASE
DB_USERNAME= $DB_USERNAME
DB_PASSWORD= $DB_PASSWORD
CACHE_DRIVER=file
SESSION_DRIVER=file
QUEUE_DRIVER=sync
REDIS_HOST=127.0.0.1
REDIS_PASSWORD=null
REDIS_PORT=6379
MAIL_DRIVER=log
MAIL_HOST=mailtrap.io
MAIL_PORT=2525
MAIL_USERNAME=null
MAIL_PASSWORD=null
MAIL_ENCRYPTION=null"> .env
composer install
composer update
php artisan key:generate
php artisan migrate
php artisan db:seed
php artisan serve
| true
|
6750a1828d28b79773e2a9e5853a7583e331600a
|
Shell
|
Bartor/akiso
|
/3/1.sh
|
UTF-8
| 3,743
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
width=$1
height=$2
scale=$3
i=0
while [ $i -lt $width ]; do
networkInArray+=(0)
networkOutArray+=(0)
let i=i+1
done
function displaytime {
local T=$1
local D=$((T/60/60/24))
local H=$((T/60/60%24))
local M=$((T/60%60))
local S=$((T%60))
(( $D > 0 )) && printf '%d days ' $D
(( $H > 0 )) && printf '%d hours ' $H
(( $M > 0 )) && printf '%d minutes ' $M
(( $D > 0 || $H > 0 || $M > 0 )) && printf 'and '
printf '%d seconds\n' $S
}
function convertBytes {
local B=$1
local KB=$((B/1024))
local MB=$((KB/1024))
if [ $MB -gt 0 ]; then
printf '%d MB' $MB
elif [ $KB -gt 0 ]; then
printf '%d KB' $KB
else
printf '%d B' $B
fi
}
function shiftIn {
local val=$1
for (( i=0; i<$width; i++ )); do
let j=i+1
networkInArray[$i]=${networkInArray[$j]}
done
networkInArray[-1]=$val
}
function shiftOut {
local val=$1
for (( i=0; i<$width; i++ )); do
let j=i+1
networkOutArray[$i]=${networkOutArray[$j]}
done
networkOutArray[-1]=$val
}
function maxIn {
local max=0
for (( i=0; i<$width; i++ )); do
if [ ${networkInArray[$i]} -gt $max ]; then
max=${networkInArray[$i]}
fi
done
echo $max
}
function maxOut {
local max=0
for (( i=0; i<$width; i++ )); do
if [ ${networkOutArray[$i]} -gt $max ]; then
max=${networkOutArray[$i]}
fi
done
echo $max
}
function drawIn {
local max=$(maxIn)
local levelHeight=$(echo "$max/$height" | bc)
local line="╔"
for (( i=1; i<$(($width*$scale+1)); i++ )); do
line+="═"
done
line+="╗"
echo $line
for (( i=$height; i>0; i-- )); do
local result="║\e[34m"
local currentThreshold=$(echo "$levelHeight*$i" | bc)
for (( j=0; j<$width; j++ )); do
local currentHeight=${networkInArray[$j]}
if [ $currentHeight -ge $currentThreshold ]; then
for (( k=0; k<$scale; k++ )); do
result+="█"
done
else
for (( k=0; k<$scale; k++ )); do
result+=" "
done
fi
done
echo -e "$result\e[39m║$(convertBytes $currentThreshold)"
done
line="╚"
for (( i=1; i<$(($width*$scale+1)); i++ )); do
line+="═"
done
line+="╝"
echo $line
}
function drawOut {
local max=$(maxOut)
local levelHeight=$(echo "$max/$height" | bc)
local line="╔"
for (( i=1; i<$(($width*$scale+1)); i++ )); do
line+="═"
done
line+="╗"
echo $line
for (( i=$height; i>0; i-- )); do
local result="║\e[32m"
local currentThreshold=$(echo "$levelHeight*$i" | bc)
for (( j=0; j<$width; j++ )); do
local currentHeight=${networkOutArray[$j]}
if [ $currentHeight -ge $currentThreshold ]; then
for (( k=0; k<$scale; k++ )); do
result+="█"
done
else
for (( k=0; k<$scale; k++ )); do
result+=" "
done
fi
done
echo -e "$result\e[39m║$(convertBytes $currentThreshold)"
done
line="╚"
for (( i=1; i<$(($width*$scale+1)); i++ )); do
line+="═"
done
line+="╝"
echo $line
}
clear
networkString=$(cat /proc/net/dev | grep 'eth0' | sed 's/ */ /g')
networkArray=($networkString)
oldBytesIn=${networkArray[1]}
oldBytesOut=${networkArray[9]}
while :
do
sleep 1
tput cup 0 0
networkString=$(cat /proc/net/dev | grep 'eth0' | sed 's/ */ /g')
uptimeSeconds=$(cat /proc/uptime | sed 's/\..*$//')
loadString=$(cat /proc/loadavg | sed 's/ .*$//')
loadArray=($loadString)
networkArray=($networkString)
bytesIn=${networkArray[1]}
bytesOut=${networkArray[9]}
shiftIn $((bytesIn-oldBytesIn))
echo Input graph
drawIn
shiftOut $((bytesOut-oldBytesOut))
echo Output graph
drawOut
echo ⇓ `convertBytes $((bytesIn-oldBytesIn))` ⇑ `convertBytes $((bytesOut-oldBytesOut)) `
echo `displaytime $uptimeSeconds `
echo Load ${loadArray[0]}
oldBytesIn=$bytesIn
oldBytesOut=$bytesOut
done
| true
|
4542e02031facddd4305fd21b5d60ad9a66dc7f3
|
Shell
|
Cyb10101/docker_backup-tools
|
/rootfs/opt/docker/bin/functions.sh
|
UTF-8
| 1,817
| 4.25
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
includeScriptDir() {
if [ -d "${1}" ]; then
for FILE in "${1}"/*.sh; do
echo "-> Executing ${FILE}"
. "${FILE}"
done
fi
}
cronLog() {
echo "$(date '+%F %H:%M') Cron '$(basename ${0})': ${1}"
}
createFolder() {
if [ ! -d ${1} ]; then
mkdir -p ${1}
fi
if [ ! -d ${1} ]; then
echo "ERROR: Can not create folder '${1}'!"
exit 1
fi
}
backupDatabase() {
dbHost="${1}"
dbUsername="${2}"
dbPassword="${3}"
dbDatabase="${4}"
filename="${5}"
if [ -z "${filename}" ]; then
filename="${dbDatabase}"
fi
cronLog 'Backup database...'
createFolder '/root/backup/databases'
ssh ${SSH_CONNECTION} "mysqldump --opt --skip-comments --host='${dbHost}' --user='${dbUsername}' --password='${dbPassword}' '${dbDatabase}'" \
| (echo "CREATE DATABASE IF NOT EXISTS \`${dbDatabase}\`;USE \`${dbDatabase}\`;" && cat) \
> /root/backup/databases/${filename}.sql
}
backupFilesystemRdiffBackup() {
source="${1}"
destination="${2}"
cronLog 'Backup filesystem with rdiff-backup...'
rdiff-backup -bv0 ${rdiffExcludes[@]} ${source}/ ${destination}/
}
backupFilesystemHardLinks() {
source="${1}"
destination="${2}"
cronLog 'Backup filesystem with hard links...'
linkDestination=""
if [ -d "${destination}.1" ]; then
linkDestination="--link-dest=${destination}.1/"
# Prepare old folder
cp -al "${destination}.1/" "${destination}/"
fi
# Sync folder
rsync -a --delete ${rsyncExcludes[@]} ${linkDestination} "${source}/" "${destination}/"
}
rotateFileOrFolder() {
if [ -d ${1} ] || [ -f ${1} ]; then
binRotate='/usr/local/bin/rotate'
if [[ ${2} =~ ^-?[0-9]+$ ]] && [[ ${2} > 0 ]]; then
${binRotate} -max ${2} "${1}"
else
${binRotate} "${1}"
fi
fi
}
| true
|
7c0f11b95beb781dc5c0e285508cf3d7d5399fa3
|
Shell
|
bpm-rocks/is
|
/bench-test/is-function
|
UTF-8
| 891
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
. bpm
bpm::include bench
bpm::include string
genTests() {
eval "
bench::test::$1-function() {
$1 isFunctionDeclare
}
bench::test::$1-variable() {
$1 BASH_VERION
}
bench::test::$1-command() {
$1 ls
}
bench::test::$1-unknown() {
$1 asdfasdfasdfasdfasdfasdfasdfasdf
}
"
}
isFunctionDeclare() {
local functionList functionName
functionList=$(declare -F)
functionList=( ${functionList//declare -f /} )
for functionName in "${functionList[@]}"; do
if [[ "$functionName" == "$1" ]]; then
return 0
fi
done
return 1
}
genTests isFunctionDeclare
isFunctionType() {
if [[ "$(type -t "$1" || :)" == "function" ]]; then
return 0
fi
return 1
}
genTests isFunctionType
bench::auto
| true
|
3ce4bfec7e2c876c1ac38be22790816fdcb7785e
|
Shell
|
gotswoop/pc-ors_reports
|
/createReport.sh
|
UTF-8
| 339
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/sh
if [-z $1 ]; then
echo "No report specified. You must enter the name of the new report."
fi
REPORT = $1
echo "Create new report named '$REPORT'..."
RET = `mkdir $REPORT`
if ![ $RET == 0 ]; then
echo "Error: Report directory already exists."
fi
echo "Done. Now edit the following:"
echo "\treports/$REPORTSfields.csv -
| true
|
0839f03bd609e8ff037ee80ea45e38133342d3a7
|
Shell
|
dbeato/scripts
|
/Linux/Install-XOA-From-Source.sh
|
UTF-8
| 1,037
| 2.53125
| 3
|
[] |
no_license
|
#This script is written to be installed on Debian.
#Sources on https://xen-orchestra.com/docs/from_the_sources.html
sudo apt-get install -y cifs-utils nfs-common
sudo apt-get install -y curl software-properties-common
curl -sL https://deb.nodesource.com/setup_8.x | sudo bash -
sudo apt-get install -y nodejs
sudo npm install yarn -g
sudo apt-get install build-essential redis-server libpng-dev git python-minimal libvhdi-utils lvm2
git clone -b master http://github.com/vatesfr/xen-orchestra
cd xen-orchestra
sudo yarn
sudo yarn build
cd packages/xo-server
cp sample.config.yaml .xo-server.yaml
#Edit the .xo-server.yaml and enable this part for xo-web (mounts: '/': '../xo-web/dist/)
sudo yarn start
#Optional to add as a service
sudo yarn global add forever
sudo yarn global add forever-service
# Be sure to edit the path below to where your install is located!
cd /home/username/xen-orchestra/packages/xo-server/bin/
# Change the username below to the user owning XO
sudo forever-service install orchestra -r username -s xo-server
| true
|
dac321c8e6c6122d7b759c4f3fed63388725d606
|
Shell
|
samrocketman/jenkins-bootstrap-shared
|
/scripts/jenkins_wait_job.sh
|
UTF-8
| 1,032
| 3.203125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#Created by Sam Gleske (https://github.com/samrocketman/home)
#Ubuntu 16.04.2 LTS
#Linux 4.4.0-72-generic x86_64
#Python 2.7.12
function json() {
python -c "import sys,json;print str(json.load(sys.stdin)[\"${1}\"]).lower()"
}
MESSAGE='Jobb success.'
PIPELINE_INPUT=false
count=0
while true; do
[ "$("${SCRIPT_LIBRARY_PATH}"/jenkins-call-url ${1%/}/api/json | json building)" = 'false' ] && break
if [ "$count" -eq "0" ]; then
if ( "${SCRIPT_LIBRARY_PATH}"/jenkins-call-url ${1%/}/consoleText | tail | grep 'Input requested' ); then
PIPELINE_INPUT=true
break
fi
fi
echo "building..."
#every 15 seconds check consoleText
((count++, count = count%3)) || true
sleep 5
done
if ${PIPELINE_INPUT}; then
RESULT=SUCCESS
MESSAGE='Pipeline input requested.'
else
RESULT=$("${SCRIPT_LIBRARY_PATH}"/jenkins-call-url ${1%/}/api/json | json result | tr 'a-z' 'A-Z')
fi
"${SCRIPT_LIBRARY_PATH}"/jenkins-call-url ${1%/}/consoleText
#script exit code is last command
[ "${RESULT}" = 'SUCCESS' ]
| true
|
ad180cc68948318a234518780e56d8f31960fc96
|
Shell
|
tiechui1994/linux-tools
|
/web/axel.sh
|
UTF-8
| 4,188
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
#----------------------------------------------------
# File: axel.sh
# Contents: axel是一款多线程文件下载器, 可以快速下载文件.
# Date: 19-1-18
#----------------------------------------------------
declare -r version=2.16.1
declare -r workdir=$(pwd)
declare -r success=0
declare -r failure=1
# log
log_error(){
red="\033[31;1m"
reset="\033[0m"
msg="[E] $@"
echo -e "$red$msg$reset"
}
log_warn(){
yellow="\033[33;1m"
reset="\033[0m"
msg="[W] $@"
echo -e "$yellow$msg$reset"
}
log_info() {
green="\033[32;1m"
reset="\033[0m"
msg="[I] $@"
echo -e "$green$msg$reset"
}
download() {
name=$1
url=$2
cmd=$3
decompress=$4
declare -A extends=(
["tar"]="application/x-tar"
["tgz"]="application/gzip"
["tar.gz"]="application/gzip"
["tar.bz2"]="application/x-bzip2"
["tar.xz"]="application/x-xz"
)
extend="${name##*.}"
filename="${name%%.*}"
temp=${name%.*}
if [[ ${temp##*.} = "tar" ]]; then
extend="${temp##*.}.${extend}"
filename="${temp%%.*}"
fi
# uncompress file
if [[ -f "$name" ]]; then
if [[ ${decompress} && ${extends[$extend]} && $(file -i "$name") =~ ${extends[$extend]} ]]; then
rm -rf ${filename} && mkdir ${filename}
tar -xf ${name} -C ${filename} --strip-components 1
if [[ $? -ne 0 ]]; then
log_error "$name decopress failed"
rm -rf ${filename} && rm -rf ${name}
return ${failure}
fi
fi
return ${success} #2
fi
# download
log_info "$name url: $url"
log_info "begin to donwload $name ...."
rm -rf ${name}
command -v "$cmd" > /dev/null 2>&1
if [[ $? -eq 0 && "$cmd" == "axel" ]]; then
axel -n 10 --insecure --quite -o ${name} ${url}
else
curl -C - --insecure --silent --location -o ${name} ${url}
fi
if [[ $? -ne 0 ]]; then
log_error "download file $name failed !!"
rm -rf ${name}
return ${failure}
fi
log_info "success to download $name"
# uncompress file
if [[ ${decompress} && ${extends[$extend]} && $(file -i "$name") =~ ${extends[$extend]} ]]; then
rm -rf ${filename} && mkdir ${filename}
tar -xf ${name} -C ${filename} --strip-components 1
if [[ $? -ne 0 ]]; then
log_error "$name decopress failed"
rm -rf ${filename} && rm -rf ${name}
return ${failure}
fi
return ${success} #2
fi
}
check_param() {
if [[ "$(whoami)" != "root" ]]; then
log_warn "Please use root privileges to execute"
exit
fi
command -v "axel" > /dev/null 2>&1
if [[ $? -eq 0 ]]; then
log_warn "The "axel" command appears to already exist on this system"
exit
fi
}
download_axel() {
prefix="https://github.com/axel-download-accelerator/axel/releases/download"
url=${prefix}/v${version}/axel-${version}.tar.gz
download "axel.tar.gz" ${url} curl 1
return $?
}
build() {
# install depend
apt-get update && \
apt-get install autoconf pkg-config gettext autopoint libssl-dev && \
autoreconf -fiv
# build
cd ${workdir}/axel && ./configure
if [[ $? -ne 0 ]]; then
log_error "configure fail"
return ${failure}
fi
cpu=$(cat /proc/cpuinfo | grep 'processor' | wc -l)
make -j ${cpu}
if [[ $? -ne 0 ]]; then
log_error "build fail"
return ${failure}
fi
make install
if [[ $? -ne 0 ]]; then
log_error "install failed"
return ${failure}
fi
# check
command -v "axel" > /dev/null 2>&1
if [[ $? -eq 0 ]]; then
log_info "the axel install successfully"
return ${success}
else
log_error "the axel install failed"
return ${failure}
fi
}
clean() {
cd ${workdir} && rm -rf axel-${version}*
}
do_install() {
check_param
download_axel
if [[ $? -ne ${success} ]]; then
return
fi
build
if [[ $? -ne ${success} ]]; then
return
fi
clean
}
do_install
| true
|
3c4a10c785589d1e536639ec85405e1846fd9d0e
|
Shell
|
vasanthk8640/dotfiles
|
/system/ubuntu/scripts/install-ruby.sh
|
UTF-8
| 1,447
| 2.90625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Some loose inspiration: https://google.github.io/styleguide/shell.xml
ruby_install_version=0.5.0
ruby_version=2.2.3
gem_version=2.2.0
chruby_version=0.3.9
chruby_fish_version=0.6.0
mkdir -p "${HOME}/tmp"
pushd "${HOME}/tmp"
wget -O "chruby-${chruby_version}.tar.gz https://github.com/postmodern/chruby/archive/v${chruby_version}.tar.gz"
tar -xzvf "chruby-${chruby_version}.tar.gz"
cd "chruby-${chruby_version}/"
sudo make install
cd ..
rm -rf "chruby-${chruby_version}"*
wget -O "chruby-fish-${chruby_fish_version}.tar.gz https://github.com/JeanMertz/chruby-fish/archive/v${chruby_fish_version}.tar.gz"
tar -xzvf "chruby-fish-${chruby_fish_version}.tar.gz"
cd "chruby-fish-${chruby_fish_version}/"
sudo make install
cd ..
rm -rf "chruby-fish-${chruby_fish_version}"*
wget -O "ruby-install-${ruby_install_version}.tar.gz https://github.com/postmodern/ruby-install/archive/v${ruby_install_version}.tar.gz"
tar -xzvf "ruby-install-${ruby_install_version}.tar.gz"
cd "ruby-install-${ruby_install_version}/"
sudo make install
cd ..
rm -rf "ruby-install-${ruby_install_version}"*
ruby-install ruby "${ruby_version}" -- --disable-install-rdoc
rm -rf "${HOME}/src/"
source /usr/local/share/chruby/chruby.sh
chruby ruby
gem update --system --no-document
gem install --no-document -f bundler rake
popd
mkdir -p "${HOME}/.rubies/ruby-${ruby_version}/lib/ruby/gems/${gem_version}/bin"
mkdir -p "${HOME}/.gem/ruby/${ruby_version}/bin"
| true
|
c597cee5babf589affefa65919690a260475ac61
|
Shell
|
luisalima/dotfiles
|
/install/install_brew.sh
|
UTF-8
| 359
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
fancy_echo "checking for brew and cask..."
if [ -z `which brew` ]; then
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
brew tap caskroom/cask
append_to_shell_config_files 'export PATH="/usr/local/bin:$PATH"' 1
export PATH="/usr/local/bin:$PATH"
fi
brew update && brew upgrade
| true
|
e9ea3d7e11f8f39f12678d7b9ffbd8f1120c83d2
|
Shell
|
pcamach2/rsp
|
/scripts/maskCat
|
UTF-8
| 400
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/sh
# Input should be the "freeMask" directory within the data folder generated by the Freesurfer_ROIs script.
ROIS=$1/rois
init=`find $ROIS | grep -v rois$ | head -1`
cp $init $(dirname $(dirname $init))/mask.nii.gz
mask=$(dirname $(dirname $init))/mask.nii.gz
for ROI in `find $ROIS | grep -v rois$ | grep -v $init` ; do
fslmaths $mask -add $ROI $mask ;
done
| true
|
6754f42cabe5d189c78496720255f35e9b4f6ac3
|
Shell
|
hungtd9/spark-emr
|
/build-spark-0.7.2-hadoop103.sh
|
UTF-8
| 785
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash -ev
mkdir -p target/spark-0.7.2-hadoop103
cd target/spark-0.7.2-hadoop103
if [ ! -f spark ]; then
if [ ! -f spark-0.7.2-sources.tgz ]; then
wget http://spark-project.org/files/spark-0.7.2-sources.tgz
# Mac OS X: curl http://spark-project.org/files/spark-0.7.2-sources.tgz > spark-0.7.2-sources.tgz
fi
tar -xvzf spark-0.7.2-sources.tgz
mv spark-0.7.2 spark
fi
cd spark
# We want to build against Hadoop 1.0.3
sed -i 's/val HADOOP_VERSION = "1.0.4"/val HADOOP_VERSION = "1.0.3"/g' project/SparkBuild.scala
# Mac OS X: sed 's/val HADOOP_VERSION = "1.0.4"/val HADOOP_VERSION = "1.0.3"/g' project/SparkBuild.scala > tmp && mv tmp project/SparkBuild.scala
./sbt/sbt package
cd ..
rm -fr spark-0.7.2-hadoop103.tgz
tar -cvzf spark-0.7.2-hadoop103.tgz spark
| true
|
34cc79ce630cf7ab2145f1f1108d0c2b67f51ed7
|
Shell
|
mhcerri/configs
|
/home/bin/mutt-check-patch
|
UTF-8
| 205
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
KERNEL_DIR="$HOME/workspace/linux/torvalds-linux"
trap '[ "$temp" ] && rm -f "$temp"' 0
temp=$(mktemp)
cat > "$temp"
"$KERNEL_DIR/scripts/checkpatch.pl" --color=always "$@" "$temp" | less -R
| true
|
b276d19dc9067aa141700d5045778d9fb3cffb62
|
Shell
|
ashish493/Security_Tools
|
/mac_changer.sh
|
UTF-8
| 626
| 3.5625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
INTERFACE="en0"
function mac_add_delta() {
local mac="0x${1//\:/}"
local delta="$2"
local new_mac="$(printf "%x" $((mac + delta)))" # no 0x
echo "${new_mac:0:2}:${new_mac:2:2}:${new_mac:4:2}:${new_mac:6:2}:${new_mac:8:2}:${new_mac:10:2}"
}
if [[ $1 == "-d" ]]; then
DELTA="-1"
else
DELTA="+1"
fi
MAC=$(ifconfig "$INTERFACE" | grep ether | tr -d ' \t' | cut -c 6-42)
NEW_MAC=$(mac_add_delta "$MAC" "$DELTA")
echo "$INTERFACE old mac: $MAC"
echo "$INTERFACE new mac: $NEW_MAC"
echo "Applying setting with sudo"
sudo ifconfig "$INTERFACE" ether "$NEW_MAC"
| true
|
ef8273bd26ab8e7ebfba5b25cf9dfa9065b6f1e8
|
Shell
|
jeffknupp/crunchy-containers
|
/tests/openshift/test-watch.sh
|
UTF-8
| 1,839
| 3.125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2016 Crunchy Data Solutions, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo BUILDBASE is $BUILDBASE
cleanup() {
$BUILDBASE/examples/openshift/master-slave-dc/delete.sh
$BUILDBASE/examples/openshift/watchtest/delete.sh
echo "sleeping while cleaning up any leftovers..."
sleep 30
}
#
# test setup
#
cleanup
## create container
$BUILDBASE/examples/openshift/master-slave-dc/run.sh
$BUILDBASE/examples/openshift/watchtest/run.sh
echo "sleep for 60 while the container starts up..."
sleep 60
echo "deleting the master which triggers the failover..."
oc delete pod pg-master-rc-dc
sleep 60
PODNAME=`oc get pod -l name=pg-slave-rc-dc --no-headers | cut -f1 -d' '`
echo $PODNAME " is the new master pod name"
export IP=`oc describe pod $PODNAME | grep IP | cut -f2 -d':' `
echo $IP " is the new master IP address"
export PGPASSFILE=/tmp/master-slave-pgpass
echo "using pgpassfile from master-slave test case...."
echo "should be able to insert into original slave after failover..."
echo "wait for the slave to restart as a new master...."
sleep 30
psql -h $IP -U testuser userdb -c "insert into testtable values ('watch','fromwatch', now())"
rc=$?
echo $rc is the rc
if [ 0 -eq $rc ]; then
echo "test watch passed"
else
echo "test watch FAILED"
exit $rc
fi
echo "performing cleanup..."
cleanup
exit 0
| true
|
a230525a9179d4a57d209a9cd9be5aec753e8e09
|
Shell
|
adkim1190/Summer-SIR
|
/myscripts/backwardsmyloop.sh
|
UTF-8
| 372
| 2.875
| 3
|
[] |
no_license
|
"""for i in {0..100..-2};
do
mkdir $i;
cd $i;
echo "I am in directory: $PWD";
cd ../;
done
echo "completed""""i
"""for i in {100..0..2};
do
mkdir $i;
cd $i;
echo "I am in directory: $PWD";
cd ../;
done
echo "completed""""
for i in {0..100..2};
do
mkdir $((100-$i));
cd $((100-$i));
echo "I am in directory: $PWD";
cd ../;
done
echo "completed"
| true
|
33fb17f276656d5412a3436c94eb3b68da2f3079
|
Shell
|
binakot/My-Student-Projects
|
/cp1251-utf8-encode.sh
|
UTF-8
| 141
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
find . -type f \( -name "*.pas" -o -name "*.cpp" -o -name "*.cs" \) | while read file; do enca -L russian -x UTF-8 "$file"; done;
| true
|
023c44c264bf11f5417ca1e0b1be2410977e124b
|
Shell
|
eCONBiL/frontendWebUI
|
/eCONBiL/startserver.sh
|
UTF-8
| 1,441
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
#Autor: Malte Garmhausen
arg=$1
if [ "$arg" == "up" ]
then
forever start --uid "serve" node_modules/@angular/cli/bin/ng serve --host 192.168.23.229 --public-host http://econbil01.abatgroup.de --port 8002 --prod
elif [ "$arg" == "down" ]
then
forever stop serve
rm /home/econ/.forever/serve.log
elif [ "$arg" == "-h" ] || [ "$arg" == "h" ] || [ "$arg" == "help" ] || [ "$arg" == "--help" ]
then
echo "+-------------------------------------------------------------------------------------------------------------+"
echo " small script to bring up the eCONBiL-Angular web-project "
echo " Options are: "
echo " up -- start the angular application with forever and ng serve - needs some time after startup... "
echo " check on default domain or hosts ip with port 8002 to see the result "
echo " down -- stop the angular web project and delete process log "
echo " help -- show this info message "
echo "+-------------------------------------------------------------------------------------------------------------+"
else
echo "wrong argument input - options are: up, down, help"
fi
| true
|
073a49e7f66264f7e9e8e1064fc5c881b0e1535e
|
Shell
|
devanshdalal/Ceph-Setup-Guide
|
/scripts/helper.sh
|
UTF-8
| 232
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Entered"
cd ./bundle
sudo apt-get update
sudo dpkg -i *.deb
# sudo apt-get -y install -f
# checkin if ceph is installed
ceph --version
if [ $? = 0 ]; then echo "ceph installed!!"; else echo "FAILED!!!";fi
| true
|
c50fc073577660504310cd85ae45ed537d49284f
|
Shell
|
broesler/bash_scripts
|
/run_case.sh
|
UTF-8
| 1,313
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#===============================================================================
# File: run_case.sh
# Created: 05/30/2017, 14:50
# Author: Bernie Roesler
#
# Description: Run VLM data case and store output
# Usage: ./run_case matlab_file.m
#
#===============================================================================
if [ $# -eq 1 ]; then
fileroot=${1%.*} # Strip extension
else
fileroot='hello'
fi
if [ -f "${fileroot}.m" ]; then
# Prepend date and time to filename for ID
date_str=$(date +'%y%m%d_%H%M')
diary_name="${date_str}_${fileroot}_diary.txt"
matlab_cmd="diary('${diary_name}'); ${fileroot}; exit"
rm -f "${diary_name}" # clean slate
matlab -nosplash -nodisplay -r "${matlab_cmd}"
# E-mail me upon completion
if [ "$?" -eq 0 ]; then
email_string="Your code $fileroot has finished successfully!"
else
email_string="Your code $fileroot has exited with some errors!"
fi
mail -s "$(hostname) simulation complete!" bernard.roesler@gmail.com <<-EOF
${email_string}
EOF
else
printf "Usage: ./run_case [valid-matlab-code-file.m]\n" 1>&2
exit 1
fi
echo "done."
exit 0
#===============================================================================
#===============================================================================
| true
|
583e7b8f8deab36f936900dba2ccf67f01fe13fb
|
Shell
|
adamzwickey/demo-env
|
/old/scripts/tmc-namespace.sh
|
UTF-8
| 1,387
| 3.234375
| 3
|
[] |
no_license
|
# bin/bash
: ${TMC_WORKSPACE?"Need to set TMC_WORKSPACE environment variable"}
: ${VMWARE_ID?"Need to set VMWARE_ID environment variable"}
if [ ! $# -eq 1 ]; then
echo "Must supply cluster name as args"
exit 1
fi
CLUSTER_NAME=$1
mkdir -p generated/$CLUSTER_NAME/tmc
cp -r tkg-lab/tmc/config/ generated/$CLUSTER_NAME/tmc/
# tkg-mgmt-acme-fitness.yaml
yq write -d0 generated/$CLUSTER_NAME/tmc/namespace/tkg-mgmt-acme-fitness.yaml -i "fullName.clusterName" $TMC_CLUSTER_GROUP-$CLUSTER_NAME
yq write -d0 generated/$CLUSTER_NAME/tmc/namespace/tkg-mgmt-acme-fitness.yaml -i "objectMeta.labels.origin" $VMWARE_ID
yq write -d0 generated/$CLUSTER_NAME/tmc/namespace/tkg-mgmt-acme-fitness.yaml -i "spec.workspaceName" $TMC_WORKSPACE
# acme-fitness-dev.yaml
yq write -d0 generated/$CLUSTER_NAME/tmc/workspace/acme-fitness-dev.yaml -i "fullName.name" $TMC_WORKSPACE
yq write -d0 generated/$CLUSTER_NAME/tmc/workspace/acme-fitness-dev.yaml -i "objectMeta.labels.origin" $VMWARE_ID
tmc workspace create -f generated/$CLUSTER_NAME/tmc/workspace/acme-fitness-dev.yaml
tmc workspace iam add-binding $TMC_WORKSPACE --role workspace.edit --groups acme-fitness-devs
#tmc cluster namespace create -f generated/$CLUSTER_NAME/tmc/namespace/tkg-mgmt-acme-fitness.yaml
tmc cluster namespace create --cluster-name $TMC_CLUSTER_GROUP-$CLUSTER_NAME \
--name acme-fitness --workspace-name $TMC_WORKSPACE
| true
|
45b614b777dac476a90c9d1841f483448d7d3060
|
Shell
|
ErinCGallagher/Quinterac-Course-Project
|
/frontEndScript.sh
|
UTF-8
| 1,543
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
for i in testCases/*.txt
do
#move the valid accounts file for the required test case to the in directory
cp testAccount/$(basename $i .txt)_validAccounts.txt in/validAccounts.txt
for j in $i
do
echo $j
done | python ../frontEnd.py <$i \ >out/log/$(basename $i .txt).txt.log
#moves the transaction summary file to the logSum folder for storing
cp out/transactionSummary.txt out/logSum/$(basename $i .txt)_transactionSummary.txt
#compare the terminal output file to the expectedOutput file
diff -c -b -B out/log/$(basename $i .txt).txt.log expOut/testCases/$(basename $i .txt).txt.log >> out/failLog.txt
#compares the transaction summary file outputed to the expect transaction summary file
diff -c -b -B out/logSum/$(basename $i .txt)_transactionSummary.txt expOut/transactionSummary/$(basename $i .txt)_transactionSummary.txt >> out/failLog.txt
done
# we had 5 directories
#expOut contain 2 directories :
#testCases --> stored the expected terminal output
#transactionSummary --> stored the expected transaction summary file
#testAccount --> stored the required input test Account files
#testCases --> store the required terminal input for each test cases as txt files
#in --> the script movedthe reuqired validAccounts file in here and renamed it so the program could access it as input
#out contained 2 directories:
#log --> stored the outputted terminal logs from the program
#logSum --> stored the outputted transactionSummary files from the program
# the failLog was stored in the out directory
| true
|
fb4d7e2dbbd071bb8f4ecc96e9322c15ac088da8
|
Shell
|
canonw/ansible-provision-ubuntu
|
/scripts/compile-default-vars.sh
|
UTF-8
| 482
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
# Update role variables to groups_vars
set -o xtrace
cd "$(cd "$(dirname "$0")"; pwd -P)/../"
[[ -s "group_vars/all.yml" ]] && mv -f "group_vars/all.yml" "group_vars/all.yml~"
cat \
<(echo -e "---\n") \
<(cat group_vars/groups.yml | grep -v '^---$' | grep -v '^[[:space:]]*$') \
> group_vars/all.yml
cat \
<(echo -e "\n# cat from roles defaults") \
<(cat roles/*/defaults/main.yml | grep -v '^---$' | grep -v '^[[:space:]]*$') \
>> group_vars/all.yml
| true
|
3637dfbfa1789281b57e19dc51ee68f00a9da70a
|
Shell
|
ailsa-ww/CephEvolution
|
/Journaling_Tests/install_fs_hdd.sh
|
UTF-8
| 659
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
WAL=/dev/sdb
FS=${1}
sudo umount ${WAL}
sudo umount ${WAL}1
sudo umount ${WAL}2
sudo umount ${WAL}3
sudo parted ${WAL} mklabel gpt
sudo parted ${WAL} unit GB mkpart ${FS} 0 30
sudo parted ${WAL} unit GB mkpart ${FS} 30 60
sudo parted ${WAL} unit GB mkpart ${FS} 60 120
sleep 3
sudo mkdir /media/dbhdd
sudo wipefs -a ${WAL}1
sudo mkfs.${FS} ${WAL}1
sudo mount -t auto -v ${WAL}1 /media/dbhdd
sudo chown -R $(whoami) /media/dbhdd
sudo wipefs -a ${WAL}2
sudo chown -R $(whoami) ${WAL}2
sudo mkdir /media/filehdd
sudo wipefs -a ${WAL}3
sudo mkfs.${FS} ${WAL}3
sudo mount -t auto -v ${WAL}3 /media/filehdd
sudo chown -R $(whoami) /media/filehdd
| true
|
35a1e5a3aa0ecd62d070eda7cf0f9dc2c46053ba
|
Shell
|
agutierrezrodriguez/restapi
|
/proxysql/clustercheckcron
|
UTF-8
| 815
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
ipaddr=$(hostname -i | awk ' { print $1 } ')
while true
do
sleep 5
nowhosts="''"
for i in $(curl http://$DISCOVERY_SERVICE/v2/keys/pxc-cluster/$CLUSTER_NAME/ | jq -r '.node.nodes[]?.key' | awk -F'/' '{print $(NF)}')
do
echo $i
mysql -h $i -uroot -p$MYSQL_ROOT_PASSWORD -e "GRANT ALL ON *.* TO '$MYSQL_PROXY_USER'@'$ipaddr' IDENTIFIED BY '$MYSQL_PROXY_PASSWORD'"
mysql -h 127.0.0.1 -P6032 -uadmin -padmin -e "INSERT INTO mysql_servers (hostgroup_id, hostname, port, max_replication_lag) VALUES (0, '$i', 3306, 20);"
nowhosts="'$i',$nowhosts"
done
mysql -h 127.0.0.1 -P6032 -uadmin -padmin -e "DELETE FROM mysql_servers WHERE hostname not in ($nowhosts) and hostgroup_id = 0;"
mysql -h 127.0.0.1 -P6032 -uadmin -padmin -e "LOAD MYSQL SERVERS TO RUNTIME; SAVE MYSQL SERVERS TO DISK;"
done
| true
|
ac1c97a2ba2f3a7d6dc6cd8a2465de8716132c0f
|
Shell
|
jdmoore0883/Checkpoint-Connectivity-Debug
|
/connCheck.sh
|
UTF-8
| 18,674
| 3.203125
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#! /bin/bash
# Written by: Jon Moore - jdmoore0883@gmail.com
# - http://jonmoore.duckdns.org
#
# This script is provided for debugging purposes only with no warranty, implied or otherwise.
#
# version 8 - Jan. 3, 2017 - changed 'route' command to 'netstat -nr'
# - created a function for the detail gathering
# - changed the detail output format
# - added a SecureXL re-activation check in the 'Ctrl+c' check
# - removed the output redirect from the cleanup 'tar' command to allow for errors to the user
# - added a "debug" output file
# - updated some of the true/false logic checks
# - moved the SecureXL reactiviation to it's own function
# - added 'fwaccel stats' to the detail gathering
# - added 'routed_messages*' files to the logs gathered
# - cleaned up commented code
# - added a time calculation to see the total time the script took
# - changed the 'arp' command to 'arp -en'
# - added 'hostname' to the details gathered
# - explicitly set the TCPDump snaplength to '68' bytes
# - set the TCPDump to any interface
# - added a filter, not net 127.0.0.0/8 (loopback)
# - set TCPDump to run a a nice level of 5
# - set TCPDump to rotate the output file at 100MB
# - list the interfaces tcpdump will gather on
# - added 'ifconfig' to the details gathered
# version 7 - Nov. 29, 2016 - removed the 'disown' commands
# - added default action for running a CPInfo and turning off SecureXL
# - created a "cleanup" function to compress and delete files
# - created a "kill_caps" function to take care of killing the backgrounded processes
# - created a trap to catch a user's Ctrl+c
# - changed the CPInfo's nice level to 5; 15 and 10 are too low, CPInfo takes too long
# - added CPInfo progress indicator
# - changed the working directory to /var/log/tmp/connCheck
# - changed the compressed file output directory to the user's preset directory
# version 6 - Feb. 24, 2016 - set the CPInfo to run a a nice level of 15 (lower priority)
# - changed the warning text
# - changed the zdebug drop to an actual debug with timestamps
# - changed the netstat to include timestamps
# - added date/time stamps to command output files
# version 5 - Jan. 11, 2016 - added kernel debugs
# version 4 - May 1, 2015 - added extra screen outputs to remind user to attempt the problem traffic
# version 3 - April 28, 2015 - added usage instructions at the end as comments
# version 2 - March 19, 2015 - bugfix on the fw monitor syntax
# version 1 - February 13, 2015 - initial release
#DEFAULTS
CPINFO=false # Do we run a cpinfo?
FWACCEL=false # Do we turn off SecureXL?
# Set some variables
TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S")
PRESENT_DIRECTORY=$(pwd)
MAIN_DIRECTORY=/var/log/tmp/connCheck
WORKING_DIRECTORY=$MAIN_DIRECTORY/$TIMESTAMP
OUTPUT_ARCHIVE=$PRESENT_DIRECTORY/connCheck_$TIMESTAMP.tgz
CPINFO_OUTPUT=$WORKING_DIRECTORY/cpinfo.out
DEBUG_OUTPUT=$WORKING_DIRECTORY/dbg.txt
SCRIPT_START=$(date +%s.%N)
DATE=$(date)
TCPDUMP_PID=$WORKING_DIRECTORY/dump.pid
MONITOR_PID=$WORKING_DIRECTORY/monitor.pid
CPINFO_PID=$WORKING_DIRECTORY/cpinfo.pid
DROPS_PID=$WORKING_DIRECTORY/drops.pid
NETSTATS_PID=$WORKING_DIRECTORY/netstats.pid
###################
# Cleanup function
###################
function cleanup {
# Compress the outputs to 1 file
echo "Compressing all files..."
cd $WORKING_DIRECTORY
tar -C $WORKING_DIRECTORY -czf $OUTPUT_ARCHIVE *
echo "Compression complete!"
rm -r $WORKING_DIRECTORY
rm ~/.toprc
if [ -e ~/.toprc_ORIGINAL ]
then
mv ~/.toprc_ORIGINAL ~/.toprc
fi
echo ""
echo ""
echo "Diags are now complete!"
echo ""
echo "Please upload the file:" $OUTPUT_ARCHIVE "to the case."
echo "Please also update the case with specific details on what addresses and services/ports are affected."
echo " Note: More specific details are best. Specific IP Addresses are ideal, though subnets can be just as effective."
echo ""
}
###################
# Cleanup done
###################
function kill_caps {
kill `cat $CPINFO_PID` > $DEBUG_OUTPUT
kill `cat $TCPDUMP_PID` > $DEBUG_OUTPUT
kill `cat $MONITOR_PID` > $DEBUG_OUTPUT
kill `cat $DROPS_PID` > $DEBUG_OUTPUT
kill `cat $NETSTATS_PID` > $DEBUG_OUTPUT 2>&1
fw ctl debug 0
} > $DEBUG_OUTPUT 2>&1
function SecureXL {
echo "Checking SecureXL status..."
if $FWACCEL_REACTIVATE
then
echo "SecureXL was deactivated. Turning it back on."
fwaccel on
FWACCEL_REACTIVATE=false
else
echo "SecureXL was not on to begin with, doing nothing"
fi
}
function trap_ctrlc {
# Catch Ctrl+C being pressed
echo "Ctrl-C caught...ending background process and performing clean up"
# STOP THE BACKGROUNDED PCAPS
kill_caps
# Clean Up
cleanup
# SecureXL re-activation check
SecureXL
# exit shell script, if omitted, shell script will continue execution
exit
}
function Details () {
# funtion to gather details
if [ -z "$1" ]
then
OUT_DIR=$WORKING_DIRECTORY/Details-$(date +"%H-%M-%S")
else
OUT_DIR=$WORKING_DIRECTORY/Details-$1
fi
mkdir -p $OUT_DIR
echo "Gathering command outputs '"$1"'..."
echo "*****************************************" >> $OUT_DIR/timestamp-01.txt
echo " DATE/TIME STAMP before gathering details" >> $OUT_DIR/timestamp-01.txt
echo "*****************************************" >> $OUT_DIR/timestamp-01.txt
date +"%a %b %d %T.%4N %Y" >> $OUT_DIR/timestamp-01.txt
echo "*****************************************" >> $OUT_DIR/top.txt
echo " top -b -n 1" >> $OUT_DIR/top.txt
echo "*****************************************" >> $OUT_DIR/top.txt
top -b -n 1 >> $OUT_DIR/top.txt
echo "*****************************************" >> $OUT_DIR/free_mem.txt
echo " free -tk" >> $OUT_DIR/free_mem.txt
echo "*****************************************" >> $OUT_DIR/free_mem.txt
free -tk >> $OUT_DIR/free_mem.txt
echo "*****************************************" >> $OUT_DIR/pstat.txt
echo " fw ctl pstat" >> $OUT_DIR/pstat.txt
echo "*****************************************" >> $OUT_DIR/pstat.txt
fw ctl pstat >> $OUT_DIR/pstat.txt
echo "*****************************************" >> $OUT_DIR/routes.txt
echo " netstat -nr" >> $OUT_DIR/routes.txt
echo "*****************************************" >> $OUT_DIR/routes.txt
netstat -nr >> $OUT_DIR/routes.txt
echo "*****************************************" >> $OUT_DIR/arp.txt
echo " arp -en" >> $OUT_DIR/arp.txt
echo "*****************************************" >> $OUT_DIR/arp.txt
arp -en >> $OUT_DIR/arp.txt
echo "*****************************************" >> $OUT_DIR/cphaprob_stat.txt
echo " cphaprob stat" >> $OUT_DIR/cphaprob_stat.txt
echo "*****************************************" >> $OUT_DIR/cphaprob_stat.txt
cphaprob stat >> $OUT_DIR/cphaprob_stat.txt
echo "*****************************************" >> $OUT_DIR/cphaprob_list.txt
echo " cphaprob -ia list" >> $OUT_DIR/cphaprob_list.txt
echo "*****************************************" >> $OUT_DIR/cphaprob_list.txt
cphaprob -ia list >> $OUT_DIR/cphaprob_list.txt
echo "*****************************************" >> $OUT_DIR/cphaprob_if.txt
echo " cphaprob -a if" >> $OUT_DIR/cphaprob_if.txt
echo "*****************************************" >> $OUT_DIR/cphaprob_if.txt
cphaprob -a if >> $OUT_DIR/cphaprob_if.txt
echo "*****************************************" >> $OUT_DIR/cpwd_admin.txt
echo " cpwd_admin list" >> $OUT_DIR/cpwd_admin.txt
echo "*****************************************" >> $OUT_DIR/cpwd_admin.txt
cpwd_admin list >> $OUT_DIR/cpwd_admin.txt
echo "*****************************************" >> $OUT_DIR/fwaccel_stat.txt
echo " fwaccel stat" >> $OUT_DIR/fwaccel_stat.txt
echo "*****************************************" >> $OUT_DIR/fwaccel_stat.txt
fwaccel stat >> $OUT_DIR/fwaccel_stat.txt
echo "*****************************************" >> $OUT_DIR/fwaccel_stats.txt
echo " fwaccel stats" >> $OUT_DIR/fwaccel_stats.txt
echo "*****************************************" >> $OUT_DIR/fwaccel_stats.txt
fwaccel stats >> $OUT_DIR/fwaccel_stats.txt
echo "*****************************************" >> $OUT_DIR/fwaccel_stats-s.txt
echo " fwaccel stats -s" >> $OUT_DIR/fwaccel_stats-s.txt
echo "*****************************************" >> $OUT_DIR/fwaccel_stats-s.txt
fwaccel stats -s >> $OUT_DIR/fwaccel_stats-s.txt
echo "*****************************************" >> $OUT_DIR/ifconfig.txt
echo " ifconfig" >> $OUT_DIR/ifconfig.txt
echo "*****************************************" >> $OUT_DIR/ifconfig.txt
ifconfig >> $OUT_DIR/ifconfig.txt
# TEMPLATE for additional details
# echo "*****************************************" >> $OUT_DIR/file.txt
# echo " " >> $OUT_DIR/file.txt
# echo "*****************************************" >> $OUT_DIR/file.txt
# >> $OUT_DIR/file.txt
fw tab -t connections -u > $OUT_DIR/connTable.txt
echo "*****************************************" >> $OUT_DIR/timestamp-02.txt
echo " DATE/TIME STAMP after gathering details" >> $OUT_DIR/timestamp-02.txt
echo "*****************************************" >> $OUT_DIR/timestamp-02.txt
date +"%a %b %d %T.%4N %Y" >> $OUT_DIR/timestamp-02.txt
echo "Outputs gathered!"
}
###################
# MAIN START
###################
trap "trap_ctrlc" 2
# Check for Admin/root privilges
if [ "`id -u`" != "0" ]
then
echo "You need Admin/Root privileges!"
exit
fi
clear
# Advise what the script will gather
# and that it is for Gaia only
echo ""
echo "This diagnostic script will gather several outputs and files."
echo "This script is meant to help troubleshoot connectivity problems,"
echo " and is provided for debugging purposes only with no warranty, implied or otherwise."
echo "Please run this while the issue is occurring."
echo " Otherwise, if the problem is not occurring, we will not see the problem."
echo "Details gathered includes (but not limited to) the following:"
if $CPINFO
then
echo " CPInfo"
fi
echo " tcpdump"
echo " ifconfig"
echo " fw monitor"
echo " fw ctl zdebug drop"
echo " top"
echo " free -k"
echo " cphaprob"
echo " cpwd_admin list"
echo " /var/log/messages* files"
echo " complete connections table dump"
#echo " all *.elg debug files"
echo "For complete details, please take a look at the compressed archive afterwards."
echo ""
echo "*********** WARNING ***********"
echo "*********** WARNING ***********"
echo ""
echo "This script is for a Check Point Gaia Gateway ONLY."
echo "It has not been tested on anything else."
echo ""
# Do we turn SecureXL on and off again?
if $FWACCEL
then
echo "SecureXL will need to be turned off."
echo " (This script will turn it off and back on again)"
echo ""
fi
# Do we run a CPInfo?
if $CPINFO
then
echo "This script will gather a CPInfo at a low priority."
echo " (This will use all availble CPU, but at a low priority)"
echo " (This may cause 100% CPU Usage warnings)"
echo " (But should not affect traffic)"
echo ""
fi
echo "*********** WARNING ***********"
echo "*********** WARNING ***********"
echo ""
# Offer the option to exit the script
read -p "Do you wish to proceed? " -n 1 -r
echo "" # (optional) move to a new line
if [[ ! $REPLY =~ ^[Yy]$ ]]
then
exit 1
fi
# Ensure the test directory exists
if [ ! -e $WORKING_DIRECTORY ]
then
mkdir -p $WORKING_DIRECTORY
fi
echo $DATE > $WORKING_DIRECTORY/TimeStamp.txt
hostname > $WORKING_DIRECTORY/hostname.txt
# create a .toprc for top outputs
# if one exists already, back it up
if [ -e ~/.toprc ]
then
cp ~/.toprc ~/.toprc_ORIGINAL
fi
echo "RCfile for "top with windows"
Id:a, Mode_altscr=0, Mode_irixps=1, Delay_time=3.000, Curwin=0
Def fieldscur=AEHIOQTWKNMbcdfgjplrsuvyzX
winflags=30009, sortindx=10, maxtasks=0
summclr=1, msgsclr=1, headclr=3, taskclr=1
Job fieldscur=ABcefgjlrstuvyzMKNHIWOPQDX
winflags=62777, sortindx=0, maxtasks=0
summclr=6, msgsclr=6, headclr=7, taskclr=6
Mem fieldscur=ANOPQRSTUVbcdefgjlmyzWHIKX
winflags=62777, sortindx=13, maxtasks=0
summclr=5, msgsclr=5, headclr=4, taskclr=5
Usr fieldscur=ABDECGfhijlopqrstuvyzMKNWX
winflags=62777, sortindx=4, maxtasks=0
summclr=3, msgsclr=3, headclr=2, taskclr=3
" > ~/.toprc
###################
# Gather FIRST_SET
###################
Details 01
###################
# FIRST_SET Gathered
###################
###################
# Check SecureXL
# Do we need to turn it off?
###################
if $FWACCEL
then
echo "Check SecureXL Status..."
FWACCEL_STATUS=$(fwaccel stat | head -n 1)
FWACCEL_ON="Accelerator Status : on"
FWACCEL_OFF="Accelerator Status : off"
FWACCEL_REACTIVATE=false
if [ "$FWACCEL_STATUS" == "$FWACCEL_ON" ]
then
# SecureXL is on
echo "SecureXL is turned on. Turning it off for packet captures."
echo " SecureXL will be turned back on when script is completed."
fwaccel off
FWACCEL_REACTIVATE=true
else
# SecureXL is off
echo "SecureXL is not turned on."
fi
fi
###################
# SecureXL Check complete
###################
###################
# Start the packet captures
###################
echo "Starting Packet Captures..."
# List the interfaces tcpdump will gather on
tcpdump -D > $WORKING_DIRECTORY/dump_interfaces.txt
# Start a TCPDump
nice -n 5 tcpdump -enni any not net 127.0.0.0/8 -s 68 -Z $USER -C 100 -w $WORKING_DIRECTORY/dump.cap > $DEBUG_OUTPUT 2>&1 &
PID=$!
echo $PID > $TCPDUMP_PID
# Start an FWMonitor
fw monitor -i -o $WORKING_DIRECTORY/monitor.cap > $DEBUG_OUTPUT 2>&1 &
PID=$!
echo $PID > $MONITOR_PID
# start a zdebug drop
#fw ctl zdebug drop > $WORKING_DIRECTORY/drops.txt &
fw ctl debug 0
fw ctl debug -buf 32000
fw ctl debug + drop
fw ctl kdebug –T -f > $WORKING_DIRECTORY/drops.txt &
PID=$!
echo $PID > $DROPS_PID
# start netstats
touch $WORKING_DIRECTORY/netstats.txt
while true; do date +"%D-%T.%4N" >> $WORKING_DIRECTORY/netstats.txt; netstat -i >> $WORKING_DIRECTORY/netstats.txt; echo "" >> $WORKING_DIRECTORY/netstats.txt; sleep 1; done &
PID=$!
echo $PID > $NETSTATS_PID
###################
# Start a CPInfo
###################
if $CPINFO
then
echo "Starting CPInfo..."
echo "*********** WARNING ***********"
echo "*********** WARNING ***********"
echo "Ensure the relevant problem traffic is being attempted at this time!"
echo "*********** WARNING ***********"
echo "*********** WARNING ***********"
yes no | nice -n 5 cpinfo -z -o $WORKING_DIRECTORY/cpinfo > $CPINFO_OUTPUT 2>&1 &
#dd if=/dev/urandom of=$WORKING_DIRECTORY/cpinfo count=1 bs=64M > $CPINFO_OUTPUT 2>&1 & #testing command to create a test CPInfo file a bit more quickly
PID=$!
echo $PID > $CPINFO_PID
fi
###################
# Gather additional files
###################
echo "Gathering Log Files..."
# messages files
mkdir $WORKING_DIRECTORY/messages
cp /var/log/messages* $WORKING_DIRECTORY/messages > $DEBUG_OUTPUT 2>&1
# routed_messages
mkdir $WORKING_DIRECTORY/routed_messages
cp /var/log/routed_messages* $WORKING_DIRECTORY/routed_messages > $DEBUG_OUTPUT 2>&1
# ALL *.elg* files
#mkdir $WORKING_DIRECTORY/elg_files
#find / ! -path "/home/*" ! -path $WORKING_DIRECTORY/ -name *.elg* -exec cp '{}' $WORKING_DIRECTORY/elg_files/ \; > $DEBUG_OUTPUT 2>&1
echo "Log files gathered!"
###################
# Watch the CPINFO process until completed
###################
# $WORKING_DIRECTORY
# Gather top outputs during the CPInfo
echo "******************************************************************************************************" > $WORKING_DIRECTORY/top_during.txt
date >> $WORKING_DIRECTORY/top_during.txt
echo "******************************************************************************************************" >> $WORKING_DIRECTORY/top_during.txt
top -b -n 1 >> $WORKING_DIRECTORY/top_during.txt
# If we run a CPInfo, watch the process until complete
if $CPINFO
then
ACTIVE=true
while $ACTIVE
do
sleep 5
if ps `cat $CPINFO_PID` > /dev/null
then
# This means the CPInfo is still running
echo "CPInfo still running..."
echo "*********** WARNING ***********"
echo "*********** WARNING ***********"
echo "Ensure the relevant problem traffic is being attempted at this time!"
echo "*********** WARNING ***********"
echo "*********** WARNING ***********"
# gather another top output
echo "******************************************************************************************************" >> $WORKING_DIRECTORY/top_during.txt
date >> $WORKING_DIRECTORY/top_during.txt
echo "******************************************************************************************************" >> $WORKING_DIRECTORY/top_during.txt
echo "CPInfo status:"
tail -n 1 $CPINFO_OUTPUT
echo ""
top -b -n 1 >> $WORKING_DIRECTORY/top_during.txt
else
# CPInfo has Stopped
echo "CPInfo complete!"
ACTIVE=false
#rm $CPINFO_PID
fi
done
# CPInfo done
# If NO CPInfo, then we wait for user input
else
while true
do
read -t 5 -n 1
if [ $? = 0 ]
then
break
else
#echo "Packet Captures running, waiting for Ctrl+c to end..."
echo "*********** WARNING ***********"
echo "*********** WARNING ***********"
echo "Ensure the relevant problem traffic is being attempted at this time!"
echo "*********** WARNING ***********"
echo "*********** WARNING ***********"
# gather another top output
echo "******************************************************************************************************" >> $WORKING_DIRECTORY/top_during.txt
date >> $WORKING_DIRECTORY/top_during.txt
echo "******************************************************************************************************" >> $WORKING_DIRECTORY/top_during.txt
top -b -n 1 >> $WORKING_DIRECTORY/top_during.txt
echo "Press any key to stop the packet captures"
fi
done
fi
# Kill all captures
kill_caps
echo "Packet captures done!"
###################
# SecureXL Check
###################
SecureXL
###################
# SecureXL Check complete
###################
###################
# Gather AFTER_SET
###################
Details 02
###################
# AFTER_SET Gathered
###################
SCRIPT_END=$(date +%s.%N)
DIFF=$(echo "$SCRIPT_END - $SCRIPT_START" | bc)
echo "Total time taken for script (in seconds):
$DIFF" > $WORKING_DIRECTORY/script_time.txt
###################
# Cleanup
###################
cleanup
###################
# Cleanup done
###################
echo "Total time taken for script (in seconds):
Before compression: $DIFF"
SCRIPT_END=$(date +%s.%N)
DIFF=$(echo "$SCRIPT_END - $SCRIPT_START" | bc)
echo " After compression: $DIFF"
#############################
# connCheck.sh USAGE DETAILS
#############################
# 1. Get the script to the appliance.
# 2. Ensure it is Linux formatted and executable:
# dos2unix connCheck.sh;chmod +x connCheck.sh
# 3. Run the script:
# connCheck.sh
#############################
| true
|
76a5052bdb47296cb92570f3257b685927573cea
|
Shell
|
Tejaswini-1502/FileSecuritySystem
|
/decrypt.sh
|
UTF-8
| 535
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
path="$1"
file=""
touch temp.txt
key=`cat $path | wc -l`
#echo $key
while IFS= read -r line
do
for (( i=0; i<${#line}; i++ ));
do
char=${line:$i:1}
ascii=$(printf "%d" "'$char")
if [ $ascii -ge 0 ] && [ $ascii -lt 32 ]
then
file+=$char
continue
else
if [ $ascii -lt $key ]
then
sub=`expr $key - $ascii`
else
sub=`expr $ascii - $key`
fi
ch=$(printf "\x$(printf %x $sub)")
file+="$ch"
fi
done
echo $file >> temp.txt
file=""
done < "$path"
chmod u+w $path
mv temp.txt $path
| true
|
bde1f2c3d149a67a8633b47922e701f6a4eaf5e0
|
Shell
|
phoukeo/demo-hipster
|
/hybrid-hipster-deploy.sh
|
UTF-8
| 2,986
| 3.15625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "### "
echo "### Deploying hipster app on Cloud and OnPrem clusters"
echo "### "
# Set vars for DIRs
export HIPSTER_DIR="${PWD}/hipster"
# Set namespace to desired name
export NAMESPACE="hipster"
# Get Istio ingress gateway Ip addresses from both cloud and onprem clusters
export GWIP_CLOUD=$(kubectl --context ${CONTEXT_NAME_1} get -n istio-system service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
export GWIP_ONPREM=$(kubectl --context ${CONTEXT_NAME_2} get -n istio-system service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
# Prepare cloud cluster hipster manifests
# change context to ${CONTEXT_NAME_1} cluster
kubectx ${CONTEXT_NAME_1}
# Prepare the service-entries yaml to add the onprem cluster istio ingress gateway IP
# for all services running in the ${CONTEXT_NAME_2} cluster
export pattern='.*- address:.*'
export replace=" - address: "$GWIP_ONPREM""
# sed -r -i "s|$pattern|$replace|g" ${HIPSTER_DIR}/cloud/service-entries.yaml
sed -i '' "s/${pattern}/${replace}/" ${HIPSTER_DIR}/cloud/service-entries.yaml
echo "### "
echo "### Deploying hipster app on ${CONTEXT_NAME_1} cluster"
echo "### "
# Create ${NAMESPACE} namespace and enable istioInjection on the namespace
kubectl create namespace ${NAMESPACE}
kubectl label namespace ${NAMESPACE} istio-injection=enabled
# Deploy part of hipster app on ${CONTEXT_NAME_1} cluster in the namespace ${NAMESPACE}
kubectl apply -n ${NAMESPACE} -f ${HIPSTER_DIR}/cloud
# Prepare onprem cluster hipster manifests
# change context to ${CONTEXT_NAME_2} cluster
kubectx ${CONTEXT_NAME_2}
# Prepare the service-entries yaml to add the onprem cluster istio ingress gateway IP
# for all services running in the ${CONTEXT_NAME_2} cluster
export pattern='.*- address:.*'
export replace=" - address: "$GWIP_CLOUD""
# sed -r -i "s|$pattern|$replace|g" ${HIPSTER_DIR}/onprem/service-entries.yaml
sed -i '' "s/${pattern}/${replace}/" ${HIPSTER_DIR}/onprem/service-entries.yaml
echo "### "
echo "### Deploying hipster app on onprem cluster"
echo "### "
# Create ${NAMESPACE} namespace and enable istioInjection on the namespace
kubectl create namespace ${NAMESPACE}
kubectl label namespace ${NAMESPACE} istio-injection=enabled
# Deploy part of hipster app on ${CONTEXT_NAME_2} cluster in the namespace ${NAMESPACE}
kubectl apply -n ${NAMESPACE} -f ${HIPSTER_DIR}/onprem
| true
|
02333aa539d6e42e1eb18e2eb8aec3b71ffb9073
|
Shell
|
xpotronix/xpotronix-core
|
/projects/examples/xpotronize-all.sh
|
UTF-8
| 124
| 2.625
| 3
|
[] |
no_license
|
for i in `find . -maxdepth 1 -type d \( ! -iname ".*" \)`
do
echo "transformando aplicacion $i ..."
xpotronize $i -f
done
| true
|
eec1a43f94ef556a0678339b584dff5248cd8ae0
|
Shell
|
wangfuli217/ld_note
|
/cheatsheet/团队管理的心得.sh
|
UTF-8
| 1,466
| 2.734375
| 3
|
[] |
no_license
|
团队管理的心得
1. 团队的流程建设;人才的梯队建设
2. 鼓励向上沟通。多进行内部沟通。
3. 安排任务的方法,总结的挺好:
第一,讲清楚目标与意义,我的期望与衡量标准;
第二,讲清楚做这件事对其个人能力成长有什么帮助;
第三,关于做事情的方法和思路,如何去做,做情景管理,因人而异;
总结:
1,完美之道,不在无可增加,而在于无可删减。在管理上,感觉很忙,琐事很多,而没有成就感,重要的事情没有做,
是不是因为自己“做”得太多?确实要反思自己是不是管的太多,做得太多。关键还没有做到点上。所以要对自己的
事情进行统计,看那些事情可以授权给别人完成,信任别人完成,给别人机会,不是把所有事情都抓在手里,这是
没有安全感的表现。
2,关于及早发布,及早试错,对我们行业不适合。我们行业是企业市场,一个单子做不好可能丢掉的不是一个客户,
而是一个行业。容不得我们犯错。
管理者的目标是提高整体的效率,而非自己解决问题
1. "当我编码的时候,是对公司贡献最小的时候。"
2. 研发负责人更重要的任务是提升研发整体的资源利用率。打造一个高效的研发团队。
3. 即时启动新的项目,明确项目目标,提升人力资源使用率,加强技术评审。
| true
|
8008e1c4db111592ed9154475af4b36824c9fc45
|
Shell
|
YodaEmbedding/frece
|
/examples/dir_update.sh
|
UTF-8
| 415
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
DATA_DIR="${XDG_DATA_HOME:-$HOME/.local/share}/frece"
DB_FILE="$DATA_DIR/dir.db"
ENTRIES_FILE="/tmp/frece_dir_entries.txt"
find "$@" -path '*/\.*' -prune -o -not -name '.*' -type d -print | \
sort > "$ENTRIES_FILE"
[ ! -d "$DATA_DIR" ] && mkdir -p "$DATA_DIR"
if [ ! -f "$DB_FILE" ]; then
frece init "$DB_FILE" "$ENTRIES_FILE"
else
frece update "$DB_FILE" "$ENTRIES_FILE" --purge-old
fi
| true
|
e2a875398ab83bfe5e100eddb06846a34c67196b
|
Shell
|
mjuric/ipython_eups
|
/bin/ipython-eups
|
UTF-8
| 4,088
| 3.9375
| 4
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
#!/bin/bash
# ==========================================================================
#
# Copyright 2015 Mario Juric <mjuric@astro.washington.edu> and others.
#
# See the COPYRIGHT file at the top-level directory of this distribution and
# at http://github.com/mjuric/ipython_eups/COPYRIGHT
#
# This file is part of ipython_eups. It is subject to the license terms in
# the LICENSE file found in the top-level directory of this distribution and
# at http://github.com/mjuric/ipython_eups/LICENSE. No part of
# ipython_eups, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE file.
#
# IPython executable name (because it could be ipython-2.7, for example...),
# but ignoring any -eups suffix (i.e. ipython-eups -> ipython).
IPY_NAME=$(basename "${0%-eups}")
SELFMAGIC="__ipython_eups_trampoline_script_file__"
# Find the real IPython executable
while IFS='' read IPYTHON; do
# Skip over self
cmp -s "$0" "$IPYTHON" && continue
# Skip over any file that looks like an older (or packed) version of self
grep -q "$SELFMAGIC" "$IPYTHON" && continue
# Found acceptable IPython
break
done < <(which -a "$IPY_NAME")
# What's the variable that controls the dynamic linker lookup path?
case $(uname) in
Darwin)
LDLP_NAME='DYLD_LIBRARY_PATH'
;;
*)
LDLP_NAME='LD_LIBRARY_PATH'
;;
esac
cleanup_temp_linkfarm()
{
# Cautiously clean up the linkfarm
LIBDIR="$IPYTHON_EUPS_LIB_LINKFARM/lib"
if [[ -L "$LIBDIR" ]]; then
find "$LIBDIR/" -type l -delete # Remove directory contents (expecting only symlinks!)
RESOLVED_DIR=$(cd $LIBDIR/; pwd -P)
rmdir "$RESOLVED_DIR"/; # Remove the directory itself
rm -f "$LIBDIR" # Remove the symlink
fi
PYDIR="$IPYTHON_EUPS_LIB_LINKFARM/python" # Python directory for extracted module
if [[ -d "$PYDIR" ]]; then
rm -f "$PYDIR"/eups_magic.py*
rmdir "$PYDIR"
fi
rmdir "$IPYTHON_EUPS_LIB_LINKFARM" # Remove the linkfarm directory
}
# Create the linkfarm dir, prepend it to LD_LIBRARY_PATH
if [[ -z $IPYTHON_EUPS_LIB_LINKFARM ]]; then
# Create a temporary linkfarm directory. The linkfarm will be in the lib/
# subdirectory of this directory.
export IPYTHON_EUPS_LIB_LINKFARM="$(mktemp -d -t ipython-eups-linkfarm.XXXXX)"
# Cleanup on exit
trap "{ cleanup_temp_linkfarm; }" EXIT
fi
#echo IPYTHON_EUPS_LIB_LINKFARM=$IPYTHON_EUPS_LIB_LINKFARM
export $LDLP_NAME="$IPYTHON_EUPS_LIB_LINKFARM/lib:${!LDLP_NAME}"
#echo $LDLP_NAME=${!LDLP_NAME}
# Check if the eups_magics.py module has been pasted to the end of ourselves
EXTRACTPY=0
MAGIC="EMBEDDED""_PYTHON_MODULE----------"
if grep -q "$MAGIC" "$0"; then
# Extract the Python module into $IPYTHON_EUPS_LIB_LINKFARM/python
PYDIR="$IPYTHON_EUPS_LIB_LINKFARM/python"
mkdir -p "$PYDIR"
# Do the extraction;
STARTLINE=$(grep -n "$MAGIC" "$0" | head -n 1 | cut -d : -f 1)
(( STARTLINE += 1 ))
tail -n +"$STARTLINE" "$0" > "$PYDIR/eups_magic.py"
# Add PYDIR to PYTHONPATH
export PYTHONPATH="$PYDIR:$PYTHONPATH"
fi
# We'll trap and disable SIGINT (CTRL-C handling). IPython is typically
# exited by hitting CTRL-C twice (or more...). The second CTRL-C gets
# passed down to us. If this is a packed script, this CTRL-C (or one of
# the subsequent ones) may interrupt the 'exit' that separated the
# body of the bash script and the embedded python code (see
# distrib/make_distrib.sh for details). When that happens, bash attempts
# to execute Python as bash and reports errors to the terminal. These are
# potentially confusing to the user.
#
# To avoid that, we trap CTRL-C at this point. We cannot disable it,
# however, as child processess inherid signal handlers, and if started
# with SIGINT disabled they cannot it re-enable. We therefore trap it
# into a code snippet that disables it the first time it's caught (and
# it can only get caught once we've exited IPython, **or** if the user
# hits CTRL-C in the short interval between setting the trap and
# executing IPython (which is a race condition we'll have to live with).
trap '{ trap "" SIGINT; }' SIGINT
# Run the real IPython
"$IPYTHON" "$@"
| true
|
da97fc7da2bf07c72f91c831acfbcff5ab853a9f
|
Shell
|
fab-du/dotfiles
|
/install/neovim
|
UTF-8
| 190
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
ret=`which nvim`
[[ $ret=="0" ]] && echo "Neovim already installed"; exit 0
sudo add-apt-repository ppa:neovim-ppa/unstable
sudo apt-get update
sudo apt-get install neovim -y
| true
|
9641043ed5b623715185af066d34f6fb8ba729ac
|
Shell
|
nardstorm/lab_2
|
/lab2_scripts.sh
|
UTF-8
| 881
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
# Authors : Joseph McSoud
# Date: 2/1/2019
#Problem 1 Code:
#Make sure to document how you are solving each problem!
echo "Enter regular expression, and then enter file name: "
read regex
echo "now file name:"
read file
grep regex file
echo
echo "Phone number occurences in regex_practice.txt: "
grep -c '[0-9]\{3\}-[0-9]\{3\}-[0-9]\{4\}' regex_practice.txt
echo
echo "Email occurences in regex_practice.txt: "
grep -c '@[[:alnum:]].[[:alnum:]]' regex_practice.txt
echo
echo "Now writing phone numbers to file"
grep '303-[0-9]\{3\}-[0-9]\{4\}' regex_practice.txt > phone_results.txt
echo
echo "Now writing @geocities.com emails to file"
grep '[[:alnum:]]@geocities.com' regex_practice.txt > email_results.txt
echo
echo "Now searching for command line argument in regex_practice.txt"
grep $1 regex_practice.txt > command_results.txt
echo
echo "results written to command_results.txt"
| true
|
c422efa93fd2c082dc6f7e2f511b41d8e68a526e
|
Shell
|
rkswlrbduf/GloballyDynamic
|
/.circleci/bootstrap_env
|
UTF-8
| 830
| 2.984375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
script=$(realpath $0)
script_dir=$(dirname "${script}")
echo "mkdir -p \${HOME}/secrets" >> ${BASH_ENV}
echo "mkdir -p \${HOME}/data" >> ${BASH_ENV}
echo "export PATH=${script_dir}:\$PATH" >> ${BASH_ENV}
echo "export GCLOUD_CREDENTIALS_FILE=\${HOME}/secrets/gcloud_credentials.json" >> ${BASH_ENV}
echo "export GLOBALLY_DYNAMIC_DEVICE_PATH=\${HOME}/data/device_spec.json" >> ${BASH_ENV}
echo "export GLOBALLY_DYNAMIC_KEYSTORE_PATH=\${HOME}/secrets/globallydynamic.keystore" >> ${BASH_ENV}
echo "echo \"\${GOOGLE_CLOUD_KEY}\" > \"\${GCLOUD_CREDENTIALS_FILE}\"" >> ${BASH_ENV}
echo "echo \"\${GLOBALLY_DYNAMIC_DEVICE_SPEC}\" > \"\${GLOBALLY_DYNAMIC_DEVICE_PATH}\"" >> ${BASH_ENV}
echo "echo \"\${GLOBALLY_DYNAMIC_KEYSTORE_VALUE}\" | base64 -d > \"\${GLOBALLY_DYNAMIC_KEYSTORE_PATH}\"" >> ${BASH_ENV}
| true
|
5d359b7b1751d6b1d52c90a71555bd8c0f1a8786
|
Shell
|
loudej/blueprints
|
/utils/testutils/krmt_helpers.sh
|
UTF-8
| 4,311
| 3.515625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o pipefail
# krmt_run_all runs all tests
krmt_run_all(){
pushd test/integration
go test ./... -v -timeout 0 -p=1
}
# krmt prepares container for test execution by generating kubeconfig and getting dependencies.
krmt(){
# prereqs
PROJECT_ID=${1:-${PROJECT_ID}}
if [[ -z "${PROJECT_ID}" ]]; then
echo "PROJECT_ID flag nor env var not set"
exit 1
elif [[ -z "${BILLING_ACCOUNT}" ]]; then
echo "BILLING_ACCOUNT env var is not set"
exit 1
elif [[ -z "${FOLDER_ID}" ]]; then
echo "FOLDER_ID env var is not set"
exit 1
fi
if [[ -z "${CLUSTER_NAME}" ]]; then
echo "CC_NAME env var not set. Finding first available cluster in ${PROJECT_ID}"
CLUSTER_NAME=$(gcloud alpha anthos config controller list --location=us-central1 --project="${PROJECT_ID}" --format="value(name)" --limit 1 --quiet | cut -d/ -f6)
if [ -z "${CLUSTER_NAME}" ]; then
echo "Unable to find cluster in ${PROJECT_ID}"
exit 1
else
echo "Found ${CLUSTER_NAME}, generating credentials"
gcloud alpha anthos config controller get-credentials "${CLUSTER_NAME}" --project="${PROJECT_ID}" --location us-central1
fi
fi
# jump to test dir
pushd test/integration
# godeps
go get -t ./...
# git config
git config --global user.email "blueprints-ci-test@google.com"
git config --global user.name "BlueprintsTest"
popd
}
# create_project creates a project with a default network.
create_project(){
if [[ -z "${BILLING_ACCOUNT}" ]]; then
echo "BILLING_ACCOUNT env var is not set"
exit 1
elif [[ -z "${FOLDER_ID}" ]]; then
echo "FOLDER_ID env var is not set"
exit 1
fi
if [[ -z "${CREATE_PROJECT_ID}" ]]; then
PROJECT_ID=ci-blueprints-${RANDOM}
echo "CREATE_PROJECT_ID env var is not set, creating random project-id ${PROJECT_ID}"
else
echo "Creating ${CREATE_PROJECT_ID}"
PROJECT_ID=${CREATE_PROJECT_ID}
fi
gcloud projects create ${PROJECT_ID} --folder="${FOLDER_ID}" --quiet
gcloud beta billing projects link ${PROJECT_ID} --billing-account="${BILLING_ACCOUNT}" --quiet
gcloud services enable compute.googleapis.com --project=$PROJECT_ID --quiet
HAS_DEFAULT_NETWORK=$(gcloud compute networks list --filter="name=default" --format="value(name)" --project=$PROJECT_ID)
if [[ -z "${HAS_DEFAULT_NETWORK}" ]]; then
echo "Default network not found. Creating default network."
gcloud compute networks create default --subnet-mode=auto --project=$PROJECT_ID --quiet
fi
}
# create_cc creates a config controller cluster.
create_cc(){
PROJECT_ID=${1:-${PROJECT_ID}}
if [[ -z "${PROJECT_ID}" ]]; then
echo "PROJECT_ID flag nor env var not set"
exit 1
fi
CC_NAME="${CC_NAME:-testcc}"
echo "Creating Config Controller ${CC_NAME} in ${PROJECT_ID}"
echo "Enabling services"
gcloud services enable krmapihosting.googleapis.com container.googleapis.com cloudresourcemanager.googleapis.com --project=$PROJECT_ID
echo "Creating CC"
gcloud alpha anthos config controller create ${CC_NAME} --location=us-central1 --project=$PROJECT_ID
gcloud alpha anthos config controller get-credentials ${CC_NAME} --location us-central1 --project=$PROJECT_ID
echo "Configuring CC"
export SA_EMAIL="$(kubectl get ConfigConnectorContext -n config-control \
-o jsonpath='{.items[0].spec.googleServiceAccount}' 2> /dev/null)"
gcloud projects add-iam-policy-binding "${PROJECT_ID}" \
--member "serviceAccount:${SA_EMAIL}" \
--role "roles/owner" \
--project "${PROJECT_ID}"
}
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.