blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
623718d602cb5115d3f4b96b695800f37cc18d73 | Shell | susannvorberg/contact_prediction | /contact_prediction/run/run_infer_hyperparamters_for_coupling_prior.sh | UTF-8 | 4,647 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env bash
#-------------------------------------------------------------------------------
# load modules
#-------------------------------------------------------------------------------
module load anaconda/2
source activate py27
module load C/msgpack
module load C/armadillo
module load contactprediction/contact_prediction
#------------------------------------------------------------------------------
# set up OpenMP with only one thread to make sure that is does not use more
#------------------------------------------------------------------------------
export OMP_NUM_THREADS=16
echo "using " $OMP_NUM_THREADS "threads for omp parallelization"
#------------------------------------------------------------------------------
# command line arguments
#------------------------------------------------------------------------------
method=$1
nr_components=$2
sigma=$3
echo "data dir: "$DATA
echo "plot dir: "$PLOTS
#------------------------------------------------------------------------------
# example call
#------------------------------------------------------------------------------
#bash ~/opt/contactprediction/contact_prediction/run/run_infer_hyperparamters_for_coupling_prior.sh ccmpred-pll-centerv 3 diagonal
#bash ~/opt/contactprediction/contact_prediction/run/run_infer_hyperparamters_for_coupling_prior.sh ccmpredpy_cd_gd 3 diagonal
#------------------------------------------------------------------------------
# start script
#------------------------------------------------------------------------------
for nrcontacts in 10000 100000 300000; # 500000; # 500000 300000; #10000 50000 100000 200000;
do
PARAM_DIR=$DATA"/bayesian_framework/mle_for_couplingPrior_cath4.1/"$method"/"$nr_components"/reg_prec100_mu01/"$sigma"_"$nrcontacts"_nrcomponents"$nr_components"_noncontactthr25/"
PLOT_DIR=$PLOTS"/bayesian_framework/mle_for_couplingPrior_cath4.1/"$method"/"$nr_components"/reg_prec100_mu01/"$sigma"_"$nrcontacts"_nrcomponents"$nr_components"_noncontactthr25/"
if [ ! -d "$PARAM_DIR" ]; then
mkdir -p $PARAM_DIR
fi
if [ ! -d "$PLOT_DIR" ]; then
mkdir -p $PLOT_DIR
fi
echo "method: "$method
echo "sigma: "$sigma
echo "nr contacts: "$nrcontacts
echo "param dir : "$PARAM_DIR
echo "plot dir : "$PLOT_DIR
###careful!
if grep -q "success" $PARAM_DIR"/parameters.settings"; then
echo "Method "$method" already successfully finished!\n"
continue
fi
#------- paths to data
settings="-o "$PLOT_DIR
settings=$settings" -p "$PARAM_DIR
settings=$settings" -b $DATA/benchmarkset_cathV4.1/contact_prediction/$method/braw/"
settings=$settings" -q $DATA/benchmarkset_cathV4.1/contact_prediction/$method/qij/"
settings=$settings" -a $DATA/benchmarkset_cathV4.1/psicov/"
settings=$settings" -s $DATA/benchmarkset_cathV4.1/pdb_renum_combs/"
#------- data
settings=$settings" --nr_crossval_pairs 10000"
settings=$settings" --nr_training_pairs "$nrcontacts
settings=$settings" --max_gap_percentage 0.5"
settings=$settings" --filter_gap_columns"
settings=$settings" --filter_pairs_by_Nij"
settings=$settings" --maxcontacts_per_protein 500"
settings=$settings" --maxnoncontacts_per_protein 1000"
settings=$settings" --diversity_thr 0.3"
settings=$settings" --non_contact_thr 25"
settings=$settings" --balance 1"
#------- model
settings=$settings" --sigma "$sigma
settings=$settings" --fixed_parameters weight_bg_0,weight_contact_0,mu_0"
settings=$settings" --nr_components "$nr_components
settings=$settings" --reg_coeff_mu 0.1"
settings=$settings" --reg_coeff_diagPrec 100"
#------- general settings
settings=$settings" --debug_mode 0"
settings=$settings" --nr_threads 16"
#start job 7 times after another, once it is finished because exceeding runtime limit (exit code 140)
jobname_prev=couplingprior.$method.$nrcontacts.$sigma.nrcomp$nr_components.0
bsub -W 96:00 -q mpi-long -m "mpi mpi2 mpi3_all hh sa" -n 16 -R span[hosts=1] -a openmp -J $jobname_prev -o job-$jobname_prev-%J.out python $CONTACT_PREDICTION_PATH/coupling_prior/infer_hyperparameters_for_coupling_prior.py $settings
for n in `seq 1 6`; do
jobname=couplingprior.$method.$nrcontacts.$sigma.nrcomp$nr_components.$n
bsub -W 96:00 -q mpi-long -w "exit('$jobname_prev')" -m "mpi mpi2 mpi3_all hh sa" -n 16 -R span[hosts=1] -a openmp -J $jobname -o job-$jobname-%J.out python $CONTACT_PREDICTION_PATH/coupling_prior/infer_hyperparameters_for_coupling_prior.py $settings
jobname_prev=$jobname
done
done
| true |
dc606ff698a12b43c2da1945eab34a851ce74a62 | Shell | ShoupingShan/Shell | /train7/exam_文件的格式化输出.sh | UTF-8 | 1,341 | 3.8125 | 4 | [] | no_license | #! /bin/bash
:<<BLOCK
使用UNIX制表符,制表符的功能是在不适用表格的情况下,在垂直方向上按列对齐文本.
BLOCK
#双层嵌套循环输出乘法表
# for((i=1;i<10;i++))
# do
# for((j=1;j<i;j++))
# do
# echo -n -e "$i*$j\t"
# done
# echo ""
# done
:<<BLOCK
使用fold命令格式化行,将超过指定宽度的文本行进行折叠处理,使得超过指定宽度的
字符转到下一行输出. fold [options] [file..]
-b : 按字节计算宽度.默认情况下,fold命令按照列来计算宽度
-s : 在空格处折断行
-w : 指定宽度,默认值是80列
BLOCK
# fold -w 100 demo.txt
# fold -s -w 100 demo.txt>formatdtext.txt
:<<BLOCK
使用fmt命令格式化段落
fmt [-width] [option]... [file]...
如果指定的文件名为-,则表示fmt将会从键盘上读取文本
BLOCK
#指定行长度
# str=`fmt -s -c -w 80 harry.txt`
# echo "$str"
:<<BLOCK
使用rev命令反转字符顺序(按行反转)
rev [file...]
BLOCK
# str=`rev hello.txt`
# echo "$str"
:<<BLOCK
使用pr命令格式化文本页
pr [option]... [file]...
BLOCK
#格式化文本页,把txt内容分成四栏输出
# str=`pr -2 hello.txt`
# echo "$str"
#自定义页眉,文本水平分栏以及使用换页符来替代换行符
str=`pr -h "List of countries" -a -f -2 hello.txt`
echo "$str"
echo
| true |
cde1eec9b3807e24597e5e02a19a4f4ab7efabcb | Shell | Ventero/FRESteamWorks | /config_example.sh | UTF-8 | 853 | 3.15625 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive"
] | permissive | # To set up your config on OS X or Linux, copy this file to config.sh and
# change the values below accordingly. All paths must be absolute paths!
# On Linux, remember to wrap all variables in ${}, so that they can
# be used in the Makefile.
# Path to the main AIR SDK directory, so that $AIR_SDK/bin/adt exists.
AIR_SDK="${HOME}/Coding/AdobeAIRSDK"
# Path to the Flex SDK, so that $FLEX_SDK/bin/compc exists.
# Might be identical to AIR_SDK if you use an overlayed SDK.
FLEX_SDK="${HOME}/Coding/FlexSDK"
# Path to the Steam SDK, so that $STEAM_SDK/redistributable_bin exists.
STEAM_SDK="${HOME}/Coding/SteamSDK"
# FTP path to upload the built binaries to when running builds/build.sh.
# Version number gets appended to the path. Binaries will not be uploaded
# if left commented or set to an empty value.
#UPLOAD_URL="ftp://example.org/FRESteamWorks"
| true |
5282d88308d9b0bd6ee1f1c55a8e254524b3b58c | Shell | komi-jangra/A_TEST | /Source/Oams/TestStub/Psscm/UT/Scripts/Psscm_Ut/takeLogs.sh | UTF-8 | 403 | 2.703125 | 3 | [] | no_license | rm -rf $TESTSTUBS_PATH/PRC_LOG/DUMPER.txt
cp -rf $TESTSTUBS_PATH/DUMPER.txt $TESTSTUBS_PATH/PRC_LOG/
cd $C_PATH
DATE=`date +%F%H%M`
DATE1=`date +%F`
mkdir ./$DATE1
cd $TESTSTUBS_PATH
cp -R PRC_LOG LOG_PSSCM_UT_TC_$1_$2
tar -cvzf LOG_PSSCM_UT_TC_$1_$2.tgz LOG_PSSCM_UT_TC_$1_$2
rm -fr LOG_PSSCM_UT_TC_$1_$2
mv LOG_PSSCM_UT_TC_$1_$2.tgz $C_PATH/$DATE1/
cd $C_PATH
cat result.txt >> Verdict.txt
| true |
d9d4d71ef194cf0090d6efa1abdcc86e07dc6e46 | Shell | sagarbirla/packer | /create_ami.sh | UTF-8 | 940 | 3.921875 | 4 | [] | no_license | #!/bin/bash
#
# TODO
# get a specific AMI id or assume the latest as default
# ask used to provide a path of the packer conf file
# pass the remaining args to packer
export PATH="$PATH:."
ME=$(basename $0)
if [[ $# > 0 ]] ; then
echo "This script doesn't accept any command line options."
echo "Usage: ./${ME}"
exit 1
fi
which aws > /dev/null || HAS_PREREQS="false"
which packer > /dev/null || HAS_PREREQS="false"
which jq > /dev/null || HAS_PREREQS="false"
if [[ $HAS_PREREQS == "false" ]] ; then
echo "Could not find one or more of awscli/packer/jq in the PATH."
echo "Please ensure that you have all of these in your path."
exit 1
else
echo "Found awscli and packer in the PATH."
fi
subnet_id=$1
sg_id=$2
PACKER_CMD="PACKER_LOG=1 packer build create_test_ami.packer ${subnet_id} ${sg_id}"
echo "Building AMI"
echo $PACKER_CMD
$PACKER_CMD
if [[ $? != 0 ]] ; then
echo "Packer command failed. Exiting."
exit 1
fi
| true |
fee1c1cb96b8cd8f4827667826fb7a26ded5f626 | Shell | ralfmaxxx/capi_php_sdk | /vagrant/config/php.sh | UTF-8 | 343 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env bash
function install_multiple_php_versions()
{
apt-get install -y php5.5-cli php5.5-xml php5.5-mbstring php5.6-cli php5.6-xml php5.6-mbstring php7.0-cli php7.0-xml php7.0-mbstring
}
function install_composer_globally()
{
curl -sS https://getcomposer.org/installer | php
mv composer.phar /usr/local/bin/composer
}
| true |
3a290ffa8223dfb09da8162fc71bf520f7297040 | Shell | ildarf/scripts | /focus_intellij | UTF-8 | 622 | 3.078125 | 3 | [] | no_license | #!/bin/bash
current_id=`xprop -root 32x '\t$0' _NET_ACTIVE_WINDOW | cut -f 2`
current_name=`xprop -id $current_id WM_NAME | awk -F'"' '{print $2}'`
# mod4+b jumps to intellij
# if terminal is already focused,
# focus previous window kk
# get window class: xprop
if [[ "$current_name" =~ .*\ -\ IntelliJ\ IDEA\ .* ]]
then # jump to the last window with id saved in file
previous_id=`cat ~/.config/i3/previous_id`
i3-msg [id=$previous_id] focus
else # if terminal is not selected, save the previous_id and jump to the terminal
echo $current_id > ~/.config/i3/previous_id
i3-msg [class="jetbrains-idea-ce"] focus
fi
| true |
a84acf67e914de8468075972cd967737ee56a332 | Shell | huoyu820125/idstar | /id-star-service/bin/node1/stop.sh | UTF-8 | 195 | 2.875 | 3 | [] | no_license | #!/bin/bash
pid=$(ps -aux|grep './id-star-service-0.0.1-SNAPSHOT.jar --server.port=8225'| grep -v grep | awk '{print $2}')
if [ $pid ]
then
echo stop id-region $pid
kill $pid
fi
| true |
60f76092e6e06a0c753b4306ab371a6020e3e9f6 | Shell | ueokande/shvm | /test/main/definition_test.sh | UTF-8 | 141 | 2.5625 | 3 | [
"MIT"
] | permissive | source $(dirname $BASH_SOURCE)/../test_helper.sh
testcase_shvm_is_function() {
subject type -t shvm
assert_match 'function' "$stdout"
}
| true |
a4c06ad4c080da14e34ac4709f2568454b088439 | Shell | KaOSx/buildsystem | /scripts/enter-chroot.sh | UTF-8 | 5,298 | 3.890625 | 4 | [] | no_license | #!/bin/bash
#
# Enter-Chroot Script
# to handle mounting/unmounting special directories
# and extra sanity checks
#
# version
VER="1.1"
# global vars
source _buildscripts/user.conf
_chroot=$(echo $1 | sed -e 's,/,,g')
_chroots=$(ls -1 $(pwd) | grep -e 86 | sed 's,/,,g')
_chroot_branch=$(echo ${_chroot} | sed "s/-i686//g" | sed "s/-x86_64//g")
_user=$(whoami)
_carch="x32"
[[ ${_chroot} = *x*64* ]] && _carch="x64"
# formated output functions
error() {
printf "\033[1;31m ::\033[1;0m\033[1;0m $1\033[1;0m\n"
}
msg() {
printf "\033[1;32m ::\033[1;0m\033[1;0m $1\033[1;0m\n"
}
if [ "$_chroot" == "" ] ; then
echo " "
error "you should specify a repository!"
error "available repos:\n\n${_chroots}"
exit
fi
# don't forget which chroot you are entering.. ;)
clear
msg "Packager's Enter Chroot Script v$VER"
msg "Entering chroot..."
sleep 1
msg "Repository: ${_chroot} (${_chroot_branch})" # Example: apps-x86_64 (apps)
sleep 1
msg "User: ${_user}"
sleep 2
if [ -d ${_chroot} ] ; then
sed -i -e s,#PKGDEST,PKGDEST,g _buildscripts/${_chroot}-makepkg.conf
sed -i -e s,#SRCDEST,SRCDEST,g _buildscripts/${_chroot}-makepkg.conf
sed -i -e s,#PACKAGER,PACKAGER,g _buildscripts/${_chroot}-makepkg.conf
sed -i -e s,SRCDEST.*,SRCDEST=\"/buildsys/${_chroot_branch}/_sources\",g _buildscripts/${_chroot}-makepkg.conf
sed -i -e s,PACKAGER.*,PACKAGER="\"$_packer\"",g _buildscripts/${_chroot}-makepkg.conf
sed -i -e s#_build_work.*#_build_work=\"/buildsys/${_chroot_branch}/\"#g _buildscripts/${_chroot}-cfg.conf
sed -i -e s,"_chroot_branch=".*,"_chroot_branch=\"${_chroot_branch}\"",g ${_chroot}/chroot/home/${_user}/.bashrc
sed -i -e s,"cd /buildsys/".*,"cd /buildsys/${_chroot_branch}/",g ${_chroot}/chroot/home/${_user}/.bashrc
if [[ "${_chroot}" = bundles* ]] ; then
sed -i -e s,PKGDEST.*,PKGDEST=\"/buildsys/${_chroot_branch}/_temp\",g _buildscripts/${_chroot}-makepkg.conf
else
sed -i -e s,PKGDEST.*,PKGDEST=\"/home/${_user}/build\",g _buildscripts/${_chroot}-makepkg.conf
fi
source _buildscripts/${_chroot}-cfg.conf
echo " "
echo " "
if [ "$(mount | grep ${_chroot}/chroot/dev)" == "" ] ; then
sudo mount -v /dev ${_chroot}/chroot/dev --bind &>/dev/null
else
sudo umount -v ${_chroot}/chroot/dev &>/dev/null
sudo mount -v /dev ${_chroot}/chroot/dev --bind &>/dev/null
fi
if [ "$(mount | grep ${_chroot}/chroot/sys)" == "" ] ; then
sudo mount -v /sys ${_chroot}/chroot/sys --bind &>/dev/null
else
sudo umount -v ${_chroot}/chroot/sys &> /dev/null
sudo mount -v /sys ${_chroot}/chroot/sys --bind &>/dev/null
fi
if [ "$(mount | grep ${_chroot}/chroot/proc)" == "" ] ; then
sudo mount -v /proc ${_chroot}/chroot/proc --bind &>/dev/null
else
sudo umount -v ${_chroot}/chroot/proc &> /dev/null
sudo mount -v /proc ${_chroot}/chroot/proc --bind &>/dev/null
fi
if [ "$(mount | grep ${_chroot}/chroot/var/cache/pacman/pkg)" == "" ] ; then
sudo mount -v _cache-${_carch} ${_chroot}/chroot/var/cache/pacman/pkg --bind &>/dev/null
else
sudo umount -v ${_chroot}/chroot/var/cache/pacman/pkg &> /dev/null
sudo mount -v _cache-${_carch} ${_chroot}/chroot/var/cache/pacman/pkg --bind &>/dev/null
fi
if [ "$(mount | grep ${_chroot}/chroot/dev/pts)" == "" ] ; then
sudo mount -v /dev/pts ${_chroot}/chroot/dev/pts --bind &>/dev/null
else
sudo umount -v ${_chroot}/chroot/dev/pts &>/dev/null
sudo mount -v /dev/pts ${_chroot}/chroot/dev/pts --bind &>/dev/null
fi
if [ "$(mount | grep ${_chroot}/chroot/dev/shm)" == "" ] ; then
sudo mount -v /dev/shm ${_chroot}/chroot/dev/shm --bind &>/dev/null
else
sudo umount -v ${_chroot}/chroot/dev/shm &>/dev/null
sudo mount -v /dev/shm ${_chroot}/chroot/dev/shm --bind &>/dev/null
fi
sudo mount _buildscripts/ ${_chroot}/chroot/buildsys/${_chroot_branch}/_buildscripts --bind &>/dev/null
sudo mount _sources/ ${_chroot}/chroot/buildsys/${_chroot_branch}/_sources --bind &>/dev/null
sudo mount _testing-${_carch}/ ${_chroot}/chroot/buildsys/${_chroot_branch}/_testing-${_carch} --bind &>/dev/null
sudo mount _unstable-${_carch}/ ${_chroot}/chroot/buildsys/${_chroot_branch}/_unstable-${_carch} --bind &>/dev/null
sudo cp -f /etc/mtab ${_chroot}/chroot/etc/mtab &>/dev/null
sudo cp -f /etc/resolv.conf ${_chroot}/chroot/etc/resolv.conf &>/dev/null
# actual chroot call (blocking, until exit())
sudo chroot ${_chroot}/chroot su - ${_user}
#/// exit() called, unmount all
for __chroot in ${_chroots}; do
__chroot_name=`echo ${__chroot} | sed "s/-i686//g" | sed "s/-x86_64//g"`
sudo umount -v ${__chroot}/chroot/{dev/shm,dev/pts,dev,sys,proc,var/cache/pacman/pkg} &>/dev/null
sudo umount -v ${__chroot}/chroot/buildsys/${__chroot_name}/{_buildscripts,_sources} &>/dev/null
sudo umount -v ${__chroot}/chroot/buildsys/${__chroot_name}/_testing-${_carch} &>/dev/null
sudo umount -v ${__chroot}/chroot/buildsys/${__chroot_name}/_unstable-${_carch} &>/dev/null
done
else
echo " "
error "the repository ${_chroot} does not exist!"
error "available repos:\n\n${_chroots}"
echo " "
exit 1
fi
| true |
6ce31ba7e2beba04a31e781946e1044ef68de6cc | Shell | karunmatharu/Android-4.4-Pay-by-Data | /build/tools/fixlinebreaks.sh | UTF-8 | 345 | 3.609375 | 4 | [
"MIT",
"Apache-2.0"
] | permissive | #!/bin/sh
#
# Convert EOL convention on source files from CRLF to LF.
#
echo "Scanning..."
FILES=`find . \( -iname '*.c' -o -iname '*.cpp' -o -iname '*.h' -o -iname '*.mk' -o -iname '*.html' -o -iname '*.css' \) -print`
echo "Converting..."
for file in $FILES ; do
echo $file
tr -d \\r < $file > _temp_file
mv _temp_file $file
done
exit 0
| true |
dfb74ab18ec49da512e0f16501e6de001ce77a21 | Shell | minefold/team-fortress-2.funpack | /bin/bootstrap | UTF-8 | 584 | 3.390625 | 3 | [] | no_license | #!/usr/bin/env bash
# this runs once per machine. We use it to download TF2
# the cache-dir is for temporary storage
# the build-dir will be made available to the run command
# ## Usage
#
# $ bin/bootstrap <shared-dir>
# set -ex
SHARED_DIR=$(cd $1; pwd)
mkdir -p $SHARED_DIR
cd $SHARED_DIR
if [ ! -f steamcmd.sh ]
then
curl --silent http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar xz
fi
until ./steamcmd.sh +login anonymous +force_install_dir ./tf2 +app_update 232250 validate +quit; do
echo Tansfer disrupted, retrying in 2 seconds...
sleep 2
done
| true |
1955836fd5a06f2e255b5155a061fc6aefeb2b61 | Shell | dutchme333/mobile-toolkit | /ios/iinstall | UTF-8 | 437 | 3.5 | 4 | [
"MIT",
"LicenseRef-scancode-other-permissive"
] | permissive | #!/bin/bash
LOCATION=$(dirname "$0")
source $LOCATION/../common_tools
if [ -z "$1" ] ; then
echo "🤷 What should I install?"
exit
fi
if [ ! -f "$PWD/$1" ] && [ ! -f "$FILE" ]; then
echo "🤷 File not found!"
exit
fi
if [[ "$1" != *".ipa" ]]; then
echo "🤷 Unsupported file!"
exit
fi
ios_choose_device
echo "⌛️ Installing..."
ideviceinstaller -u $SELECTED_DEVICE -i "$1" &> /dev/null
echo "✅ Done!"
| true |
1e6b8aabf20bb1e4a61f57dc82b208c57e86c8b4 | Shell | codeborne/ghostdriver | /tools/export_ghostdriver.sh | UTF-8 | 5,381 | 3.796875 | 4 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | #!/bin/bash
#
# This file is part of the GhostDriver by Ivan De Marino <http://ivandemarino.me>.
#
# Copyright (c) 2012-2014, Ivan De Marino <http://ivandemarino.me>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Builds all the atoms that will later be imported in GhostDriver
#
# Here we have a mix of:
#
# * Atoms from the default WebDriver Atoms directory
# * Atoms that were not exposed by the default build configuration of Selenium
# * Atoms purposely built for GhostDriver, still based on the default WebDriver Atoms
#
usage() {
echo ""
echo "Usage:"
echo " export_ghostdriver.sh <PATH_TO_PHANTOMJS_REPO>"
echo ""
}
info() {
echo -e "\033[1;32m*** ${1}\033[0m"
}
if [[ $# < 1 ]]
then
usage
exit
fi
################################################################################
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
PHANTOMJS_REPO_PATH=$1
DESTINATION_PATH="${PHANTOMJS_REPO_PATH}/src/ghostdriver"
DESTINATION_QRC_FILE="ghostdriver.qrc"
LASTUPDATE_FILE="${DESTINATION_PATH}/lastupdate"
README_FILE="${DESTINATION_PATH}/README.md"
GHOSTDRIVER_SOURCE_PATH="${SCRIPT_DIR}/../src"
TEST_SOURCE_PATH="${SCRIPT_DIR}/../test"
TEST_DESTINATION_PATH="${PHANTOMJS_REPO_PATH}/test/ghostdriver-test"
#1. Delete the Destination Directory, if any
if [ -d $DESTINATION_PATH ]; then
info "Deleting current GhostDriver exported in local PhantomJS source (path: '${DESTINATION_PATH}')"
rm -rf $DESTINATION_PATH
fi
#2. Create the Destination Directory again
info "Creating directory to export GhostDriver into local PhantomJS source (path: '${DESTINATION_PATH}')"
mkdir -p $DESTINATION_PATH
#3. Copy all the content of the SOURCE_DIR in there
info "Copying GhostDriver over ('${GHOSTDRIVER_SOURCE_PATH}/*' => '${DESTINATION_PATH}')"
cp -r $GHOSTDRIVER_SOURCE_PATH/* $DESTINATION_PATH
#4. Generate the .qrc file
info "Generating Qt Resource File to import GhostDriver into local PhantomJS (path: '${DESTINATION_PATH}/${DESTINATION_QRC_FILE}')"
pushd $DESTINATION_PATH
# Initiate the .qrc destination file
echo "<RCC>" > $DESTINATION_QRC_FILE
echo " <qresource prefix=\"ghostdriver/\">" >> $DESTINATION_QRC_FILE
for FILE in `find . -type f | sed "s/.\///"`
do
if [[ $FILE != "." && $FILE != *.qrc ]]; then
echo " <file>${FILE}</file>" >> $DESTINATION_QRC_FILE
fi
done
# Finish the .qrc destination file
echo " </qresource>" >> $DESTINATION_QRC_FILE
echo "</RCC>" >> $DESTINATION_QRC_FILE
popd
#5. Save the Timestamp and Git repo hash to the "lastupdate" file
info "Saving Timestamp and Git repo hash into '${LASTUPDATE_FILE}'"
date +"%Y-%m-%d %H:%M:%S" > $LASTUPDATE_FILE
echo "" >> $LASTUPDATE_FILE
git log -n 1 --decorate=full >> $LASTUPDATE_FILE
#6. Create README file
info "Creating '${README_FILE}'"
cat > $README_FILE <<README_FILE_CONTENT
# PLEASE DON'T CHANGE THIS FILE
This file is auto-generated by export scripts **from GhostDriver to PhantomJS**.
If you want to make changes to GhostDriver source,
please refer to that project instead: \`https://github.com/detro/ghostdriver\`.
Thanks,
[Ivan De Marino](http://ivandemarino.me)
README_FILE_CONTENT
#7. Delete the Test Destination Directory, if any
if [ -d $TEST_DESTINATION_PATH ]; then
info "Deleting current GhostDriver Tests exported in local PhantomJS source (path: '${TEST_DESTINATION_PATH}')"
rm -rf $TEST_DESTINATION_PATH/fixtures
rm -rf $TEST_DESTINATION_PATH/java
fi
#8. Copy all the content of the Test Directory in there
info "Copying GhostDriver Tests over ('${TEST_SOURCE_PATH}' => '${TEST_DESTINATION_PATH}')"
mkdir -p $TEST_DESTINATION_PATH
cp -r $TEST_SOURCE_PATH/fixtures $TEST_DESTINATION_PATH/fixtures
cp -r $TEST_SOURCE_PATH/java $TEST_DESTINATION_PATH/java
#9. Delete all files from Test Destination Directory that are of no use there
info "Delete files from GhostDriver Tests that have no use there"
rm -rf $TEST_DESTINATION_PATH/java/*.iml $TEST_DESTINATION_PATH/java/*.iws $TEST_DESTINATION_PATH/java/*.ipr $TEST_DESTINATION_PATH/java/*.log $TEST_DESTINATION_PATH/java/out $TEST_DESTINATION_PATH/java/.gradle
info "DONE!"
| true |
a31c1fb98d81b4bbbd196d87c5495abb1433b0e7 | Shell | FNNDSC/scripts | /lnchd | UTF-8 | 3,223 | 4.21875 | 4 | [] | no_license | #!/bin/bash
G_SYNOPSIS="
NAME
lnchd - change destination of a link.
SYNOPSIS
lnchd [-s <dirSubString>] <newBaseDir> <link1> <link2> ... <linkN>
DESCRIPTION
'lnchd' preserves the relationship between a created link and its
original destination, but replaces the original destination
directory with <newBaseDir>.
It is useful when the original source file(s) have been moved (leaving
the original links to these files dead) or when links need to be
switched to a <newBaseDir> but keep the original end target of the
original link.
'lnchd' preserves the original link name (which differ from the
actual target of the link).
ARGUMENTS
-s <dirSubString>
A sed-friendly regex that will applied to each <link> to create the
new link target string.
EXAMPLES
$>cd /some/dir
$>lnchd /some/new/dir ln1 ln2 ... lnN
In this snippet, /some/dir contains symbolic links along the lines of
ln1 -> /original/dir/target1
ln2 -> /original/dir/target2
...
lnN -> /original/dir/targetN
Assume that the targets (target1, target2, ..., targetN) have all been
moved to a new location, /some/new/dir. All the original links are now
dead. Alternatively, it could be that a copy of the targets now exist
in /some/new/dir and the links need to re-assigned to this new copy.
By running the above lnchd, the links are re-assigned to
ln1 -> /some/new/dir/target1
ln2 -> /some/new/dir/target2
...
lnN -> /some/new/dir/targetN
The link will only be restored if /some/new/dir/targetN is itself
valid.
"
if (( ! ${#1} )) ; then
echo "$G_SYNOPSIS"
exit 1
fi
let b_verbose=0
let b_regex=0
while getopts "s:vh" option ; do
case "$option"
in
s) REGEX=$OPTARG
b_regex=1 ;;
v) b_verbose=1 ;;
h) echo "$G_SYNOPSIS" ;;
\?) echo "$G_SYNOPSIS" ;;
esac
done
shift $(($OPTIND - 1))
if (( !b_regex )) ; then
NEWDIR=$1
LNTARGETS=$(echo $* | awk '{for(i=2; i<=NF; i++) printf("%s ", $i);}')
else
LNTARGETS="$*"
fi
for TARGET in $LNTARGETS ; do
if [[ -h $TARGET ]] ; then
LINK=$(/bin/ls -l $TARGET | awk '{print $NF}')
ORIGTARGET=$(basename $LINK)
ORIGNAME=$(/bin/ls -l $TARGET | grep $LINK | awk '{print $9}')
if (( b_regex )) ; then
CMD="echo $LINK | sed '$REGEX'"
NEWDIR=$(eval "$CMD")
NEWDIR=$(dirname $NEWDIR)
fi
if [[ -d ${NEWDIR}/$ORIGTARGET || -f ${NEWDIR}/$ORIGTARGET ]] ; then
rm $TARGET
if (( b_verbose )) ; then
printf "OLD: %20s -> %30s\n" "$ORIGNAME" "$LINK"
printf "NEW: %20s -> %30s\n\n" "$ORIGNAME" "${NEWDIR}/$ORIGTARGET"
fi
ln -s ${NEWDIR}/$ORIGTARGET $ORIGNAME
else
printf "%60s%20s\n" "<$ORIGTARGET> not found in target" "[ skipping ]"
fi
else
printf "\n\t'$TARGET' is not a symbolic link.\n"
printf "\tNo action performed.\n"
# exit 2
fi
done
| true |
0b698cc4440c6017f940d54dba60450e3ad74f8c | Shell | rogrwhitakr/northern-lights | /script/rename-youtube-dls.bash | UTF-8 | 3,069 | 3.8125 | 4 | [] | no_license | #! /usr/bin/env bash
# ######################################################################################
# BASH SCRIPT TEMPLATE
# HISTORY:
# 2018-09-10 Script initially created
# 2018-10-30 moved to systemd logging - using systemd-cat
# reworked the choice function
# 2018-12-04 systemd-cat has another function, really
# just regurgitate to stout / sterr, that will be in logging
# added an amount output
# made script less verbose
#
# ######################################################################################
# VERSION
version="1.0.1"
source "/home/admin/MyScripts/script/helpers/init.bash"
source "/home/admin/MyScripts/script/helpers/log.bash"
strict=1
debug=0
script_init
flags_init
# set directory videos sit in
video_dir="/mnt/backup/video"
cd "${video_dir}"
regexp_rename_spec() {
if ([[ -f "${1}" ]] && [[ ! -z "${2}" ]]); then
local file="${1}"
local qualifier="${2}"
local src="${file}"
local ext="${file##*.}"
print "BEFORE: ${file}"
# remove extension
file="${file//.${ext}/}"
print "remove extension: ${file}"
# remove youtube specifier
if [[ "${qualifier}" == true ]]; then
file="${file%%-*}"
elif [[ "${qualifier}" == false ]]; then
file="${file%-*}"
fi
print "removing youtube specifier: ${file}"
# removing everything that is NOT [A-Za-z0-9]"
file="${file//[^A-Za-z0-9]/_}"
print "removing everything that is NOT [A-Za-z0-9]: ${file}"
# removing doubles and triples and so forth
file="${file//______/_}"
file="${file//_____/_}"
file="${file//____/_}"
file="${file//___/_}"
file="${file//__/_}"
print "removing doubles and triples and so forth: ${file}"
# removing any leftover underscores from end of string
#file="${file%_*}"
#print "removing any leftover underscores from end of string: ${file}"
# reappending extension
file="${file}.${ext}"
if [[ "${src}" != "${file}" ]]; then
mv "${src}" "${file}"
fi
print "AFTER: ${file}"
fi
}
main() {
amount="$(find . -mindepth 1 -maxdepth 1 -type f | wc | column --table | cut -d' ' -f1)"
print LOGLINE
print GREEN "Start of script ${0}"
print "There are currently ${amount} files in directory $(pwd)"
for file in *; do
yt_chars=11
ac="${file%%-*}" # aggressive_count
nac="${file%-*}" # non_aggressive_count
ext="${file##*.}" # extension
ye="${file:$((${#file} - ${yt_chars} - 2 - ${#ext}))}" # youtube+extension
# print YELLOW "aggressive count: file: ${#file} :: file%%-*: ${#ac} :: ytext: ${#ye}"
# print YELLOW "non-aggressive count: file: ${#file} :: file%-*: ${#nac} :: ytext: ${#ye}"
if [[ "$((${#file} - ${#ac} - ${#ye}))" == 0 ]]; then
regexp_rename_spec "${file}" true
elif [[ "$((${#file} - ${#nac} - ${#ye}))" == 0 ]]; then
regexp_rename_spec "${file}" false
else
# do nothing
# print "file: ${file}, not renaming. Continuing"
fi
done
print LOGLINE
print GREEN "all finished :: ${0}"
}
main "${@}"
| true |
d25734a8ca9dd8cb1076c9b3928319081ce03867 | Shell | imsarllc/drivers | /version.sh | UTF-8 | 317 | 3.34375 | 3 | [] | no_license | #!/bin/sh -e
FILE=${1:-.}/version.h
GIT_DESCRIBE=`git describe --long --dirty --always --tags`
BUILD_DATE=`date +"%F_%T"`
cat <<EOF > ${FILE}
#ifndef _VERSION_H_
#define _VERSION_H_
#define GIT_DESCRIBE "${GIT_DESCRIBE}-b${BUILD_NUMBER}"
#define BUILD_DATE "${BUILD_DATE}"
#endif //_VERSION_H_
EOF
cat ${FILE}
| true |
eeeb35806000ef2f9d92981265c95a8f0de455a0 | Shell | coreos/fedora-coreos-config | /tests/kola/networking/nameserver | UTF-8 | 1,274 | 3.546875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
## kola:
## # appendKernelArgs is only supported on QEMU
## platforms: qemu
## appendKernelArgs: "nameserver=8.8.8.8 nameserver=1.1.1.1"
## description: Verify that we config multiple nameservers via kernel
## arguments work well.
# RHCOS: need to check /etc/resolv.conf and nmconnection.
# FCOS: using systemd-resolved which needs to run resolvectl to check.
# See https://bugzilla.redhat.com/show_bug.cgi?id=1763341
set -xeuo pipefail
# shellcheck disable=SC1091
. "$KOLA_EXT_DATA/commonlib.sh"
if is_fcos; then
# run resolvectl
dns=$(resolvectl dns)
if ! ([[ "$dns" =~ "8.8.8.8" ]] && [[ "$dns" =~ "1.1.1.1" ]]); then
fatal "Error: can not find nameserver via resolvectl"
fi
elif is_rhcos; then
# check nameserver in /etc/resolv.conf
resolv=/etc/resolv.conf
cat ${resolv}
if ! (grep -q "nameserver 8.8.8.8" ${resolv} && \
grep -q "nameserver 1.1.1.1" ${resolv}); then
fatal "Error: can not find nameserver in ${resolv}"
fi
fi
# check nameserver in config file
conf=/etc/NetworkManager/system-connections/default_connection.nmconnection
cat ${conf}
if ! grep -q "dns=8.8.8.8;1.1.1.1;" ${conf}; then
fatal "Error: can not find nameserver in ${conf}"
fi
ok "multiple nameserver"
| true |
7a1de50b03de0a73ab53c2ca83cd67e7b83f5f6c | Shell | erdemkeren/mmacenv_installer | /scripts/install_atom.sh | UTF-8 | 410 | 3.515625 | 4 | [
"Unlicense"
] | permissive | #!/usr/bin/env bash
source scripts/helpers.sh
function install_atom() {
## Define the name of the tool being installed
toolName="Atom"
installationStarted $toolName
brew install --cask atom
# mkdir _tmp_
# wget https://atom.io/download/mac -O_tmp_/atom-mac.zip
# unzip -q -d_tmp_ _tmp_/atom-mac.zip
# mv _tmp_/Atom.app /Applications/Atom.app
installationSucceed $toolName
}
| true |
4b223be406cb092011233cf69dddbc60a3996b0a | Shell | jalpedersen/android-utilities | /take_screenshot.sh | UTF-8 | 233 | 3.140625 | 3 | [] | no_license | #!/bin/sh
if [ -z $1 ]; then
echo missing output
exit 1
fi
if [ "$NO_CROP" == "1" ]; then
adb shell screencap -p > $1
else
top=${CROP_TOP:-70}
adb shell screencap -p | convert -gravity North -chop 0x$top - $1
fi
| true |
67f81a400be0d9174083b3402de8c77ae31f8899 | Shell | fransixles/admin-scripts | /md5.sh | UTF-8 | 122 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if [[ -z "$1" ]]; then
echo "Usage: $0 <string>" >&2
exit 1
fi
echo -n "$1" | md5sum | awk '{print $1}'
| true |
985c633b4087c566a6c09a2ffc07915dc82c4845 | Shell | farshi/opops | /setup/scripts/tools-checker.sh | UTF-8 | 2,339 | 3.875 | 4 | [] | no_license | #!/bin/sh
set -o pipefail
# set -o errexit
# set -o nounset
# set -o xtrace
export TERM="xterm-256color"
red=`tput setaf 1`
green=`tput setaf 2`
yellow=`tput setaf 3`
blue=`tput setaf 4`
white=`tput setaf 7`
reset=`tput sgr0`
function printToolStatus(){
tool=$1
status=$2
version=$3
if [[ "$status" -eq 0 ]] ; then
printf "$green \xE2\x9C\x94 $yellow $tool\t: $Version \n"
else
printf "$red \xE2\x9C\x96 $yellow $tool \n"
fi
}
function checkToolsInstalled(){
tool=$1
tool_status=x
case "$tool" in
pyhton2)
#python 2
tool_path=`which python`
if [[ -n "$tool_path" ]] ; then
Version=`python -c 'import sys; version=sys.version_info[:3]; \
print("{0}.{1}.{2}".format(*version))'`
echo $Version | grep -q "2."
tool_status=$?
fi
;;
pyhton3)
#python 3
tool_path=`which python`
if [[ -n "$tool_path" ]] ; then
Version=`python -c 'import sys; version=sys.version_info[:3]; \
print("{0}.{1}.{2}".format(*version))'`
echo $Version | grep -q "3."
tool_status=$?
fi
;;
java8)
# java
tool_path=`which java`
if [[ -n "$tool_path" ]] ; then
Version=$($tool_path -version 2>&1 >/dev/null | grep 'java version' | awk '{print $3}' | tr -d '"')
echo $Version | grep -q "1.8.0"
tool_status=$?
fi
;;
docker)
# check docker
tool_path=`which docker`
if [[ -n "$tool_path" ]] ; then
Version=$($tool_path version --format '{{.Server.Version}}')
tool_status=$?
fi
;;
ruby)
# check ruby
;;
packer)
# check packer
tool_path=`which packer`
if [[ -n "$tool_path" ]] ; then
Version=$($tool_path -v)
tool_status=$?
fi
;;
ansible)
# check ansible
;;
awscli)
;;
cfndsl)
# check cfndsl
;;
saml2aws)
# check saml2aws
;;
*)
# Anything else (is there anything else?)
echo "*** This tool checker not implemented , Volunteer?!" >&2
exit 0
;;
esac
if [[ "$tool_status" = "x" ]]; then
echo "\n"
printf "$red \xE2\x9C\x96"
echo " Warning: $tool checker not implemented yet! Volunteer?"
exit 0;
else
printToolStatus $tool $tool_status $Version
fi
}
#comma separated tools list. e.g : docker , java8 , awcli
tools_list=$1
echo "Checking for tools: $1 \n"
for tool in $(echo $tools_list | sed "s/,/ /g")
do
checkToolsInstalled "$tool"
done
# --- Finished
exit 0
| true |
dc1184786f543977bd24a57d502dc5fa9f6145f5 | Shell | MinaToma/verible | /verilog/tools/kythe/verification_test.sh | UTF-8 | 5,568 | 3.78125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Copyright 2020 Google LLC.
# SPDX-License-Identifier: Apache-2.0
#
# Extract Kythe indexing facts from SystemVerilog code and check the Kythe
# verification expectations from the annotations.
# --- begin runfiles.bash initialization ---
# Copy-pasted from Bazel's Bash runfiles library (tools/bash/runfiles/runfiles.bash).
set -euo pipefail
if [[ ! -d "${RUNFILES_DIR:-/dev/null}" && ! -f "${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then
if [[ -f "$TEST_SRCDIR/MANIFEST" ]]; then
export RUNFILES_MANIFEST_FILE="$TEST_SRCDIR/MANIFEST"
elif [[ -f "$0.runfiles/MANIFEST" ]]; then
export RUNFILES_MANIFEST_FILE="$0.runfiles/MANIFEST"
elif [[ -f "$TEST_SRCDIR/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then
export RUNFILES_DIR="$TEST_SRCDIR"
fi
fi
if [[ -f "${RUNFILES_DIR:-/dev/null}/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then
source "${RUNFILES_DIR}/bazel_tools/tools/bash/runfiles/runfiles.bash"
elif [[ -f "${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then
source "$(grep -m1 "^bazel_tools/tools/bash/runfiles/runfiles.bash " \
"$RUNFILES_MANIFEST_FILE" | cut -d ' ' -f 2-)"
else
echo >&2 "ERROR: cannot find @bazel_tools//tools/bash/runfiles:runfiles.bash"
exit 1
fi
# --- end runfiles.bash initialization ---
source "$(rlocation "io_bazel/src/test/shell/unittest.bash")" ||
{
echo "unittest.bash not found!" >&2
exit 1
}
TESTS_DIR="$(rlocation "com_google_verible/verilog/tools/kythe/testdata")" ||
{
echo "Can't load the test data!" >&2
exit 1
}
VERIBLE_EXTRACTOR_BIN="$(rlocation "com_google_verible/verilog/tools/kythe/verible-verilog-kythe-extractor")" ||
{
echo "Can't load the extractor binary!" >&2
exit 1
}
KYTHE_VERIFER_BIN="$(rlocation "io_kythe/kythe/cxx/verifier/verifier")" ||
{
echo "Can't load the verifier binary!" >&2
exit 1
}
function test_single_files() {
test_count=0
for verilog_file in $(ls -d "${TESTS_DIR}"/*); do
if [[ -d "${verilog_file}" ]]; then
continue
fi
test_filename="$(basename "${verilog_file}")"
test_dir="${TEST_TMPDIR}/${test_filename%.*}"
mkdir "${test_dir}"
cp "${verilog_file}" "${test_dir}"
filelist_path="${test_dir}/filelist"
echo "${test_filename}" > "${filelist_path}"
echo "Running Kythe verification test for ${test_filename}" >> "$TEST_log"
"${VERIBLE_EXTRACTOR_BIN}" --file_list_path "${filelist_path}" --file_list_root "${test_dir}" --print_kythe_facts proto > "${test_dir}/entries" ||
fail "Failed to extract Kythe facts"
cat "${test_dir}/entries" | "${KYTHE_VERIFER_BIN}" --nocheck_for_singletons "${test_dir}/${test_filename}" >> "$TEST_log" ||
fail "Verification failed for ${test_filename}"
test_count=$((${test_count} + 1))
done
[[ ${test_count} -gt 0 ]] || fail "No tests are executed!"
}
function test_multi_files() {
test_case_dir="${TESTS_DIR}/multi_file_test"
test_name="$(basename "${test_case_dir}")"
test_dir="${TEST_TMPDIR}/${test_name}"
mkdir "${test_dir}"
cp "${test_case_dir}"/* "${test_dir}/"
filelist_path="${test_dir}/filelist"
ls "${test_case_dir}" > "${filelist_path}"
echo "Running Kythe verification multi file test for ${test_name}" >> "$TEST_log"
"${VERIBLE_EXTRACTOR_BIN}" --file_list_path "${filelist_path}" --file_list_root "${test_dir}" --print_kythe_facts proto > "${test_dir}/entries" ||
fail "Failed to extract Kythe facts"
cat "${test_dir}/entries" | "${KYTHE_VERIFER_BIN}" "${test_dir}"/*.* >> "$TEST_log" ||
fail "Verification failed for ${test_name}"
}
function test_multi_files_with_include() {
test_case_dir="${TESTS_DIR}/include_file_test"
test_name="$(basename "${test_case_dir}")"
test_dir="${TEST_TMPDIR}/${test_name}"
mkdir "${test_dir}"
cp "${test_case_dir}"/* "${test_dir}/"
filelist_path="${test_dir}/file_list.txt"
echo "Running Kythe verification multi file test for ${test_name}" >> "$TEST_log"
"${VERIBLE_EXTRACTOR_BIN}" --include_dir_paths "${test_dir}" --file_list_path "${filelist_path}" --file_list_root "${test_dir}" --print_kythe_facts proto > "${test_dir}/entries" ||
fail "Failed to extract Kythe facts"
cat "${test_dir}/entries" | "${KYTHE_VERIFER_BIN}" "${test_dir}"/*.sv* >> "$TEST_log" ||
fail "Verification failed for ${test_name}"
}
function test_multi_files_with_include_dir() {
test_case_dir="${TESTS_DIR}/include_with_dir_test"
test_name="$(basename "${test_case_dir}")"
test_dir="${TEST_TMPDIR}/${test_name}"
mkdir "${test_dir}"
cp -r "${test_case_dir}"/* "${test_dir}/"
filelist_path="${test_dir}/file_list.txt"
first_included="${TESTS_DIR}/include_file_test"
first_included_name="$(basename "${first_included}")"
first_include_dir="${TEST_TMPDIR}/${first_included_name}"
mkdir "${first_include_dir}"
cp "${first_included}"/* "${first_include_dir}/"
second_include_dir="${test_dir}/include_dir"
VERILOG_INCLUDE_DIR_TEST_FILES="${test_dir}/*.sv ${first_include_dir}/A.svh ${first_include_dir}/B.svh ${second_include_dir}/*.svh"
echo "Running Kythe verification multi file test for ${test_name}" >> "$TEST_log"
"${VERIBLE_EXTRACTOR_BIN}" --include_dir_paths "${first_include_dir},${second_include_dir}" --file_list_path "${filelist_path}" --file_list_root "${test_dir}" --print_kythe_facts proto > "${test_dir}/entries" ||
fail "Failed to extract Kythe facts"
cat "${test_dir}/entries" | "${KYTHE_VERIFER_BIN}" ${VERILOG_INCLUDE_DIR_TEST_FILES} >> "$TEST_log" ||
fail "Verification failed for ${test_name}"
}
run_suite "kythe verification tests"
| true |
e9b7241580204ac323a99656effd4aebd6ab7b9e | Shell | samsucik/prosodic-lid-globalphone | /egs/swbd/s5b/local/nnet2/run_6a_gpu.sh | UTF-8 | 2,045 | 3.03125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# This runs on the 100 hour subset; it's another neural-net training
# after the nnet5a setup, but after realignment. We're just seeing
# whether realigning and then re-training the system is helpful.
#
# e.g. of usage:
# local/nnet2/run_6a_gpu.sh --temp-dir /export/gpu-03/dpovey/kaldi-dan2/egs/swbd/s5b
temp_dir=
train_stage=-10
. ./cmd.sh
. ./path.sh
! cuda-compiled && cat <<EOF && exit 1
This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
If you want to use GPUs (and have them), go to src/, and configure and make on a machine
where "nvcc" is installed.
EOF
. utils/parse_options.sh
parallel_opts="--gpu 1" # This is suitable for the CLSP network, you'll likely have to change it.
alidir=exp/nnet5a_ali_100k_nodup
if [ ! -f $alidir/.done ]; then
nj=`cat exp/tri4a/num_jobs`
steps/nnet2/align.sh --cmd "$decode_cmd" --nj $nj --transform-dir exp/tri4a \
data/train_100k_nodup data/lang exp/nnet5a_gpu $alidir || exit 1;
touch $alidir/.done
fi
if [ ! -f exp/nnet6a_gpu/final.mdl ]; then
if [ ! -z "$temp_dir" ] && [ ! -e exp/nnet6a_gpu/egs ]; then
mkdir -p exp/nnet6a_gpu
mkdir -p $temp_dir/nnet6a_gpu/egs
ln -s $temp_dir/nnet6a_gpu/egs exp/nnet6a_gpu/
fi
# TODO: add transform-dir option to train_tanh.sh
steps/nnet2/train_tanh.sh --stage $train_stage \
--num-jobs-nnet 8 --num-threads 1 --max-change 40.0 \
--minibatch-size 512 --parallel-opts "$parallel_opts" \
--mix-up 8000 \
--initial-learning-rate 0.01 --final-learning-rate 0.001 \
--num-hidden-layers 4 \
--hidden-layer-dim 1024 \
--cmd "$decode_cmd" \
--egs-opts "--transform-dir exp/tri4a" \
data/train_100k_nodup data/lang $alidir exp/nnet6a_gpu || exit 1;
fi
for lm_suffix in tg fsh_tgpr; do
steps/nnet2/decode.sh --cmd "$decode_cmd" --nj 30 \
--config conf/decode.config --transform-dir exp/tri4a/decode_eval2000_sw1_${lm_suffix} \
exp/tri4a/graph_sw1_${lm_suffix} data/eval2000 exp/nnet6a_gpu/decode_eval2000_sw1_${lm_suffix} &
done
| true |
989d9aa169168f63dd73a02019d80ce4b7f1a2a5 | Shell | fenengl/Probes | /paper_edition/PTetraProbes/scancel_all.sh | UTF-8 | 78 | 2.59375 | 3 | [] | no_license | #!/bin/bash
for j in `seq 4458116 4458151` ; do
scancel $j
echo $j
done
| true |
b323b661b1bda6b2e0ad1c078bad2f0f921227ac | Shell | Arquanite/words | /generate.sh | UTF-8 | 243 | 2.90625 | 3 | [] | no_license | #!/bin/bash
while true
do
A=$(shuf -n 1 ./przymiotniki)
B=$(shuf -n 1 ./rzeczowniki)
C=$(shuf -n 1 ./te_trzecie)
X=$(echo $B | rev | cut -c -1)
if [ $X = 'a' ]
then
Y='a'
elif [ $X = 'o' ]
then
Y='e'
else
Y='y'
fi
read -p "$A$Y $B $C"
done
| true |
82e883361711c896527d13358eb0ad32e4ebfa3d | Shell | alloc/react-native-macos | /scripts/run-android-emulator.sh | UTF-8 | 584 | 3.359375 | 3 | [
"MIT",
"CC-BY-4.0"
] | permissive | #!/bin/bash
# Runs an Android emulator locally.
# If there already is a running emulator, this just uses that.
# The only reason to use this config is that it represents a known-good
# virtual device configuration.
# This is useful for running integration tests on a local machine.
# TODO: make continuous integration use the precise same setup
STATE=`adb get-state`
if [ -n "$STATE" ]; then
echo "An emulator is already running."
exit 1
fi
echo "Creating virtual device..."
echo no | android create avd -n testAVD -f -t android-23 --abi default/x86
emulator -avd testAVD
| true |
281098a9c6b794e6736912b508ad1b0d30bce909 | Shell | ohmybash/oh-my-bash | /themes/brunton/brunton.theme.sh | UTF-8 | 1,428 | 2.859375 | 3 | [
"MIT"
] | permissive | #! bash oh-my-bash.module
SCM_THEME_PROMPT_PREFIX=""
SCM_THEME_PROMPT_SUFFIX=""
SCM_THEME_PROMPT_DIRTY=" ${_omb_prompt_bold_brown}✗${_omb_prompt_normal}"
SCM_THEME_PROMPT_CLEAN=" ${_omb_prompt_bold_green}✓${_omb_prompt_normal}"
SCM_GIT_CHAR="${_omb_prompt_bold_green}±${_omb_prompt_normal}"
SCM_SVN_CHAR="${_omb_prompt_bold_teal}⑆${_omb_prompt_normal}"
SCM_HG_CHAR="${_omb_prompt_bold_brown}☿${_omb_prompt_normal}"
function is_vim_shell {
if [ ! -z "$VIMRUNTIME" ]
then
echo "[${_omb_prompt_teal}vim shell${_omb_prompt_normal}]"
fi
}
function scm_prompt {
CHAR=$(scm_char)
if [ $CHAR = $SCM_NONE_CHAR ]
then
return
else
echo " $(scm_char) (${_omb_prompt_white}$(scm_prompt_info)${_omb_prompt_normal})"
fi
}
function _omb_theme_PROMPT_COMMAND {
PS1="${_omb_prompt_white}${_omb_prompt_background_navy} \u${_omb_prompt_normal}"
PS1+="${_omb_prompt_background_navy}@${_omb_prompt_brown}${_omb_prompt_background_navy}\h $(clock_prompt) ${_omb_prompt_reset_color}"
PS1+="${_omb_prompt_normal} $(battery_charge)\n"
PS1+="${_omb_prompt_bold_black}${_omb_prompt_background_white} \w "
PS1+="${_omb_prompt_normal}$(scm_prompt)$(is_vim_shell)\n"
PS1+="${_omb_prompt_white}>${_omb_prompt_normal} "
}
THEME_CLOCK_COLOR=${THEME_CLOCK_COLOR:-"$_omb_prompt_navy$_omb_prompt_background_white"}
THEME_CLOCK_FORMAT=${THEME_CLOCK_FORMAT:-" %H:%M:%S"}
_omb_util_add_prompt_command _omb_theme_PROMPT_COMMAND
| true |
b686c569a46fe86217b3af5e7a6ff760a0bd3702 | Shell | jamespcole/bash-framework-2 | /modules/web/module.base.sh | UTF-8 | 2,905 | 3.578125 | 4 | [] | no_license | #!/usr/bin/env bash
# This is an example of inclduing the params module.
# Place require module calls at the top of the file.
import.require 'params'
web_base.init() {
# Uncomment __init and put the declaration of any globals your module uses here.
# Also initiliase any required modules. If not required you can remove it.
web_base.__init() {
declare -g -A __web_COOKIES
import.useModule 'params'
}
web_base.login() {
local -A __params
__params['username']=''
__params['username-field']='username'
__params['pwd']=''
__params['pwd-field']='password'
__params['url']=''
params.get "$@"
local __formdata="${__params['username-field']}=${__params['username']}"
__formdata="${__formdata}&${__params['pwd-field']}=${__params['pwd']}"
local -A __url_bits
web.parseUrl --url "${__params['url']}" \
--return-arr __url_bits
local __hostname="${__url_bits['host']}"
web_base.initCookie --host "$__hostname"
local __cookie_path="${__web_COOKIES["${__hostname}"]}"
curl -s -L -k -b "$__cookie_path" -c "$COOKIE_PATH" -X POST -d "$__formdata" "${__params['url']}" || return 1
}
web_base.initCookie() {
local -A __params
__params['host']=''
params.get "$@"
local __host="${__params['host']}"
if [[ ! "${__web_COOKIES["${__host}"]+exists}" ]]; then
__web_COOKIES["${__host}"]="$(mktemp)"
fi
}
web_base.parseUrl() {
local -A __params
__params['url']=''
__params['return-arr']=''
params.get "$@"
local -n __parts="${__params['return-arr']}"
# extract the protocol
__parts['proto']="`echo ${__params['url']} | grep '://' | sed -e's,^\(.*://\).*,\1,g'`"
# remove the protocol
__parts['url']=`echo ${__params['url']} | sed -e s,${__parts['proto']},,g`
# extract the user and password (if any)
__parts['userpass']="`echo "${__parts['url']}" | grep @ | cut -d@ -f1`"
__parts['pass']=`echo ${__parts['userpass']} | grep : | cut -d: -f2`
if [ -n "${__parts['pass']}" ]; then
__parts['user']=`echo ${__parts['userpass']} | grep : | cut -d: -f1`
else
__parts['user']=${__parts['userpass']}
fi
# extract the host -- updated
__parts['hostport']=`echo "${__parts['url']}" | sed -e s,${__parts['userpass']}@,,g | cut -d/ -f1`
__parts['port']=`echo ${__parts['hostport']} | grep : | cut -d: -f2`
if [ -n "${__parts['port']}" ]; then
${__parts['host']}=`echo ${__parts['port']} | grep : | cut -d: -f1`
else
__parts['host']=${__parts['hostport']}
fi
# extract the path (if any)
__parts['path']="`echo "${__parts['url']}" | grep / | cut -d/ -f2-`"
}
}
| true |
0a6666486e5ad08c3a6cf1fe754169e3d61fa97b | Shell | keqingyuan/flow-agent-x | /build.sh | UTF-8 | 410 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
version=$1
if [[ -n $version ]]; then
VersionTag="-t flowci/agent:$version"
fi
# build within golang docker
docker run -it --rm \
-v "$PWD":/go/src/flow-agent-x \
-w /go/src/flow-agent-x golang:1.12 \
/bin/bash -c "GO111MODULE=on go build -o bin/flow-agent-x -v"
docker build -f ./Dockerfile -t flowci/agent:latest $VersionTag .
# docker rmi -f $(docker images -f 'dangling=true' -q) | true |
f6572b1941aa8d5f67f6eaed4ef08552b1e2dc93 | Shell | philipbo/dotfiles | /bash/.bash_profile | UTF-8 | 708 | 2.9375 | 3 | [] | no_license |
export BASH_SILENCE_DEPRECATION_WARNING=1
source ~/.profile
[ -f /usr/local/etc/bash_completion ] && . /usr/local/etc/bash_completion
if [ -f /usr/local/etc/bash_completion.d/git-prompt.sh ]; then
. /usr/local/etc/bash_completion.d/git-prompt.sh
#git prompt
#source ~/.git-prompt.sh
GIT_PS1_SHOWCOLORHINTS=true
GIT_PS1_SHOWDIRTYSTATE=true
GIT_PS1_SHOWSTASHSTATE=true
GIT_PS1_SHOWUNTRACKEDFILES=true
#GIT_PS1_SHOWUPSTREAM="auto"
#export PS1='\h:\W \u$(__git_ps1)\$ '
#PROMPT_COMMAND='__git_ps1 "\u@\h:\w" "\\\$ "'
yellow=$'\[\e[0;33m\]'
normal=$'\[\e[m\]'
PS1="\h:\W \u$yellow\$(__git_ps1)$normal\$ "
fi
#python
if command -v pyenv 1>/dev/null 2>&1; then
eval "$(pyenv init -)"
fi
| true |
db1f30df23cc1957efff6222cbb35bd15f8e1afb | Shell | RREE/build-avr-ada-toolchain | /bin/download.sh | UTF-8 | 699 | 3.578125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# download the necessary source files
#
source bin/config.inc
source bin/versions.inc
source bin/utilities.inc
function download_package()
{
local PKG="$1_TAR"
local PKG_MIRROR="$1_MIRROR"
if [ ! -f $DOWNLOAD/${!PKG} ]; then
display " >> Downloading ${!PKG}..."
wget --continue --directory-prefix=$DOWNLOAD ${!PKG_MIRROR}/${!PKG}
check_return_code
else
display " (x) Already have ${!PKG}"
fi
}
header "downloading archives"
download_package "BINUTILS"
download_package "GCC"
download_package "GMP"
download_package "MPC"
download_package "MPFR"
download_package "LIBC"
download_package "AVRDUDE"
download_package "AVRADA" | true |
3833fedee5e1a59e11ba458219ece2ec65e9137f | Shell | numeny/happy | /ihome.spider/envsetup.sh | UTF-8 | 204 | 2.640625 | 3 | [] | no_license | #!/bin/bash
export IHOME_SCRAPY_PATH=`pwd`
alias cdfang="cd ${IHOME_SCRAPY_PATH}/fangtianxia_mobile/ihome/"
function sfang() {
cd ${IHOME_SCRAPY_PATH}/fangtianxia_mobile/ihome/
scrapy crawl fang
}
| true |
8220fa65ddb0951e4d52b35a5ced448a316661c7 | Shell | Sherlock-Holo/repo | /archlinuxcn/skaffold/PKGBUILD | UTF-8 | 1,445 | 2.53125 | 3 | [] | no_license | # Maintainer: Fredy García <frealgagu at gmail dot com>
# Maintainer: Maxim Baz <${pkgname} at maximbaz dot com>
# Contributor: Stefan Cocora <stefan dot cocora at gmail dot com>
pkgname=skaffold
pkgver=0.23.0
pkgrel=1
pkgdesc="A command line tool that facilitates continuous development for Kubernetes applications"
arch=("x86_64")
url="https://github.com/GoogleContainerTools/${pkgname}"
license=("Apache")
depends=("docker" "kubectl-bin")
makedepends=("go-pie")
optdepends=("google-cloud-sdk: To use GKE"
"minikube: To use Minikube")
source=("${pkgname}-${pkgver}.tar.gz::https://github.com/GoogleContainerTools/${pkgname}/archive/v${pkgver}.tar.gz"
"build_info.patch")
sha256sums=("eaeaf6fb76e9d43d0cc996cd102d76a5dd343d9403cd7b930f469b99a65eebf7"
"39b1e127a29979ef559e0a92cd721b23d6eac4251c703befd882b8667ac9789e")
_commit="2590e9d480ffb63e9d954fd1e317b93d5b3d3b9b"
prepare() {
cd "${srcdir}/${pkgname}-${pkgver}"
patch -Np1 -i "${srcdir}/build_info.patch"
rm -rf "${srcdir}/gopath"
mkdir -p "${srcdir}/gopath/src/github.com/GoogleContainerTools"
ln -rTsf "${srcdir}/${pkgname}-${pkgver}" "${srcdir}/gopath/src/github.com/GoogleContainerTools/${pkgname}"
}
build() {
cd "${srcdir}/gopath/src/github.com/GoogleContainerTools/${pkgname}"
GOPATH="${srcdir}/gopath" PATH="${PATH}:${GOPATH}/bin" VERSION="v${pkgver}" COMMIT="${_commit}" TREE_STATE="clean" make install
}
package() {
install -Dm755 "${srcdir}/gopath/bin/${pkgname}" "${pkgdir}/usr/bin/${pkgname}"
}
| true |
771c3536e56462d08b9085f7b112c6b52d6e919b | Shell | sesame-street/bert_playground | /train_eval_steps/step001.copy_bert_model.sh | UTF-8 | 1,102 | 3.265625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -u
set -e
set -o pipefail
INCONFIG=$1
OUTCONFIG=$2
eval $(srs-config -config ${INCONFIG} -config default.config -dumpsh "cfg_")
local_bert_dir="SRS-GO/data/scratch_local/bert_dir"
if [ -d ${local_bert_dir} ]; then
echo "Removing the existing local_bert_dir"
rm -rf ${local_bert_dir}
fi
echo "Copies the bert repo"
cp -r ${cfg_bert_dir} ${local_bert_dir}
echo "Copies the bert model"
cp ${cfg_bert_model_path} "SRS-GO/data/scratch_local/"
bert_model_dir="SRS-GO/data/scratch_local/${cfg_bert_basename}"
if [ -d ${bert_model_dir} ]; then
echo "Removing the existing bert_model_dir"
rm -rf ${bert_model_dir}
fi
echo "Unzipping the bert model"
unzip "SRS-GO/data/scratch_local/${cfg_bert_basename}.zip" -d "SRS-GO/data/scratch_local"
squad_model_dir="SRS-GO/data/scratch_local/squad_dir"
tar -xzvf ${cfg_squad_model_tgz} -C "SRS-GO/data/scratch_local"
echo "bert_repo_dir ${local_bert_dir}" > ${OUTCONFIG}
echo "bert_model_dir ${bert_model_dir}" >> ${OUTCONFIG}
echo "squad_model_dir ${squad_model_dir}" >> ${OUTCONFIG}
echo INCLUDE ${INCONFIG} >> ${OUTCONFIG}
| true |
4d7a79f08f463d253476b9d00c8cdf47f68a18fe | Shell | betacloud-archive/docker-nfs-server | /files/run.sh | UTF-8 | 461 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -x
# Available environment variables
#
# n/a
# generate /etc/exports file
for export in $(find /exports -mindepth 1 -maxdepth 1 -type d); do
echo "$export *(rw,insecure,sync,no_subtree_check,fsid=0,no_root_squash)" >> /etc/exports
done
# https://github.com/AJNOURI/nfs-server/blob/master/nfs-run.sh
. /etc/default/nfs-kernel-server
. /etc/default/nfs-common
rpcbind
rpc.statd -d
rpc.nfsd
rpc.mountd $RPCMOUNTDOPTS --foreground
| true |
f0003d52d683e9aff405d3f10b8545fdd6f2b238 | Shell | manandbytes/bento | /packer/scripts/common/chef.sh | UTF-8 | 635 | 3.84375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh -eux
# Set $CHEF_VERSION inside Packer's template. Valid options are:
# 'provisionerless' -- build a box without Chef
# 'x.y.z' -- build a box with version x.y.z of Chef
# 'latest' -- build a box with the latest version of Chef
if [ $CHEF_VERSION != 'provisionerless' ]; then
if [ $CHEF_VERSION == 'latest' ]; then
echo "Installing latest Chef version"
sh <(curl -L https://www.opscode.com/chef/install.sh)
else
echo "Installing Chef version $CHEF_VERSION"
sh <(curl -L https://www.opscode.com/chef/install.sh) -v $CHEF_VERSION
fi
else
echo "Building a box without Chef"
fi
| true |
f01e0e8d57b89edfe9c6d70c1143dc610b4469a4 | Shell | turnkeylinux/inithooks | /turnkey-sudoadmin | UTF-8 | 7,000 | 3.875 | 4 | [] | no_license | #!/bin/bash -e
#
# Copyright (c) 2015 Alon Swartz <alon@turnkeylinux.org>
#
# This file is part of InitHooks.
#
# InitHooks is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
SUDOADMIN_STATE=/etc/sudoadmin/state
fatal() { echo "FATAL [$(basename $0)]: $@" 1>&2; exit 1; }
warn() { echo "WARN [$(basename $0)]: $@"; }
info() { echo "INFO [$(basename $0)]: $@"; }
usage() {
cat<<EOF
Syntax: $(basename $0) on|off|status [--disable-setpass]
Configure system to use admin user with sudo, or root
On:
- installs sudo package
- creates admin user and sets random password
- configures passwordless sudo for admin user
- configures inithooks and init-fence for admin user
- updates confconsole services for admin user
- merges root .ssh/authorized_keys with admin
- locks the root user
- disables root ssh access
- sets root ssh login banner
- restarts ssh daemon, if it is running
- sets admin password interactively (unless --disable-setpass)
Off:
- configures inithooks and init-fence for root
- updates confconsole services for root
- merges admin .ssh/authorized_keys with root
- enables root ssh access
- locks admin user
- unsets root ssh login banner
- restarts ssh daemon, if it is running
- sets root password interactively (unless --disable-setpass)
Status:
- checks turnkey-sudoadmin status, returns one of the following:
- unconfigured
- on
- off
EOF
exit 1
}
install_sudo() {
info $FUNCNAME $@
which sudo >/dev/null && return
apt-get update
DEBIAN_FRONTEND=noninteractive apt-get -y install sudo
}
create_user() {
info $FUNCNAME $@
username=$1
grep -q ^${username}: /etc/passwd && return
useradd --create-home --skel /etc/skel --shell /bin/bash ${username}
echo ${username}:$(mcookie) | chpasswd
}
passwordless_sudo() {
info $FUNCNAME $@
username=$1
cfg="/etc/sudoers.d/99_${username}"
str="${username} ALL=(ALL) NOPASSWD:ALL"
touch $cfg
grep -q "^${str}$" $cfg && return
echo "$str" >> $cfg
chmod 0440 $cfg
}
inithooks_sudoadmin() {
info $FUNCNAME $@
val=$1
key="SUDOADMIN"
cfg="/etc/default/inithooks"
[ -e $cfg ] || return 0
grep -q ^${key}= $cfg || (echo "$key=$val" >> $cfg; return)
sed -i "s/^${key}=.*/$key=$val/" $cfg
}
setup_initfence() {
info $FUNCNAME $@
username=$1
root_profiled=/root/.profile.d/turnkey-init-fence
user_profiled=/home/${username}/.profile.d/turnkey-init-fence
if [ -e $root_profiled ]; then
if [ $username != "root" ]; then
mkdir -p $(dirname $user_profiled)
cp $root_profiled $user_profiled
sed -i "s|^|sudo |" $user_profiled
sed -i "s|/root|/home\/${username}|g" $user_profiled
chown -R $username:$username $(dirname $user_profiled)
fi
fi
return 0
}
update_confconsole_services() {
info $FUNCNAME $@
new_uname=$1
old_uname=$2
cfg="/etc/confconsole/services.txt"
[ -e $cfg ] || return 0
sed -i "s|${old_uname}@|${new_uname}@|g" $cfg
}
ssh_authorizedkeys_inithook() {
info $FUNCNAME $@
username=$1
sshkeys_inithook=/usr/lib/inithooks/firstboot.d/40ec2-sshkeys
[ -e $sshkeys_inithook ] || return 0
sed -i "s|^USERNAME.*|USERNAME = \'${username}\'|" $sshkeys_inithook
}
ssh_authorizedkeys_merge() {
info $FUNCNAME $@
user1=$1
user2=$2
grep -q ^${user1}: /etc/passwd || return 0
grep -q ^${user2}: /etc/passwd || return 0
auth1="$(eval printf ~$user1)/.ssh/authorized_keys"
mkdir -p $(dirname $auth1)
chmod 0700 $(dirname $auth1)
touch $auth1
auth2="$(eval printf ~$user2)/.ssh/authorized_keys"
mkdir -p $(dirname $auth2)
chmod 0700 $(dirname $auth2)
touch $auth2
cat $auth1 $auth2 | sort | uniq > $auth1.new
echo $auth1 $auth2 | xargs -n 1 cp $auth1.new
rm $auth1.new
chown -R $user1:$user1 $(dirname $auth1)
chown -R $user2:$user2 $(dirname $auth2)
}
permitrootlogin_ssh() {
info $FUNCNAME $@
val=$1
key="PermitRootLogin"
cfg="/etc/ssh/sshd_config"
grep -q ^${key} $cfg || (echo "$key $val" >> $cfg; return)
sed -i "s/^${key} .*/$key $val/" $cfg
}
set_sshrootbanner() {
info $FUNCNAME $@
content=$1
banner_path="/root/.ssh/banner"
mkdir -p $(dirname $banner_path)
echo $content > $banner_path
}
user_state() {
info $FUNCNAME $@
username=$1
action=$2
grep -q ^${username}: /etc/passwd || return 0
passwd --${action} $username
if [ $username != "root" ]; then
[ $action == "lock" ] && usermod --expiredate "1" $username
[ $action == "unlock" ] && usermod --expiredate "" $username
fi
return 0
}
restart_sshd() {
info $FUNCNAME $@
if [ -e /var/run/sshd.pid ]; then
/etc/init.d/ssh restart
fi
}
setpass() {
info $FUNCNAME $@
username=$1
script=/usr/lib/inithooks/bin/setpass.py
if [ -x $script ]; then
$script $username
else
echo "Set password for $username"
passwd $username
fi
}
set_status() {
if [ ! -d $(dirname $SUDOADMIN_STATE) ]; then
mkdir $(dirname $SUDOADMIN_STATE)
fi
cat > $SUDOADMIN_STATE <<EOF
# This is an automatically generated file, do not edit manually
state=$1
EOF
}
check_status() {
if [ ! -f $SUDOADMIN_STATE ]; then
echo 'unconfigured'
else
sed -rn 's/state=(.*)/\1/p' "$SUDOADMIN_STATE"
fi
}
case $1 in
on)
[ "$(id -u)" != "0" ] && fatal "must be run with root permissions"
install_sudo;
create_user "admin";
passwordless_sudo "admin";
inithooks_sudoadmin "true";
setup_initfence "admin";
update_confconsole_services "admin" "root";
ssh_authorizedkeys_inithook "admin";
ssh_authorizedkeys_merge "admin" "root";
user_state "admin" "unlock";
user_state "root" "lock";
permitrootlogin_ssh "no";
set_sshrootbanner 'Please login as user "admin" rather than user "root".';
restart_sshd;
set_status "on"
[ "$2" == "--disable-setpass" ] || setpass "admin";
;;
off)
[ "$(id -u)" != "0" ] && fatal "must be run with root permissions"
inithooks_sudoadmin "false";
setup_initfence "root";
update_confconsole_services "root" "admin";
ssh_authorizedkeys_inithook "root";
ssh_authorizedkeys_merge "root" "admin";
permitrootlogin_ssh "yes";
user_state "root" "unlock";
user_state "admin" "lock";
set_sshrootbanner "";
restart_sshd;
set_status "off"
[ "$2" == "--disable-setpass" ] || setpass "root";
;;
status)
check_status;
;;
*)
usage;;
esac
| true |
beeeef2101ab0bb0e17966425b93cfce600e91fe | Shell | sarah-n-wright/gwas_pipeline | /single/e2_LD_prune.sh | UTF-8 | 1,233 | 2.78125 | 3 | [] | no_license | source ${script_path}Configs/$1 $2
echo "----------------->Prune for linkage disequilibrium<---------------------"
srun -l plink --bfile ${outDir}${outName}.LD_in \
--hwe 0.001 --geno 0.98 --make-bed \
--out ${outDir}${outName}.LD_zero
srun -l plink --bfile ${outDir}${outName}.LD_zero \
--indep-pairwise $LD_window $LD_shift $LD_r2 --make-bed \
--allow-no-sex --out ${outDir}${outName}.LD_one
echo "---------------Done indep-pairwise-------------------------"
## Getting stuck here???
srun -l plink --bfile ${outDir}${outName}.LD_in \
--extract ${outDir}${outName}.LD_one.prune.in --make-bed \
--allow-no-sex --out ${outDir}${outName}.LD_two
merge_file=${outDir}${outName}merge_list.txt
> $merge_file
chromosomes=(1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22)
for chr in ${chromosomes[@]}
do
echo "----------CHR "$chr"---------------------"
srun -l plink --bfile ${outDir}${baseName}chr${chr}.variant \
--extract ${outDir}${outName}.LD_one.prune.in --make-bed \
--allow-no-sex --out ${outDir}${baseName}chr${chr}.LD_pruned
echo ${outDir}${baseName}chr${chr}.LD_pruned >> $merge_file
done
srun -l plink --merge-list $merge_file --allow-no-sex \
--make-bed --out ${outDir}${outName}.LD_pruned
| true |
22e757ac73b8674425ab633f890fd4e1ab729f93 | Shell | dflook/python-minifier | /docker/build-images.sh | UTF-8 | 2,240 | 2.625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
DATE=$(date -I)
docker pull fedora:28
docker build --tag danielflook/python-minifier-build:python3.3-$DATE -f Dockerfile-fedora28 --target python3.3 .
docker pull fedora:30
docker build --tag danielflook/python-minifier-build:python2.7-$DATE -f Dockerfile-fedora30 --target python2.7 .
docker build --tag danielflook/python-minifier-build:python3.4-$DATE -f Dockerfile-fedora30 --target python3.4 .
docker build --tag danielflook/python-minifier-build:python3.5-$DATE -f Dockerfile-fedora30 --target python3.5 .
docker build --tag danielflook/python-minifier-build:python3.6-$DATE -f Dockerfile-fedora30 --target python3.6 .
docker build --tag danielflook/python-minifier-build:python3.7-$DATE -f Dockerfile-fedora30 --target python3.7 .
docker build --tag danielflook/python-minifier-build:python3.8-$DATE -f Dockerfile-fedora30 --target python3.8 .
docker build --tag danielflook/python-minifier-build:pypy-$DATE -f Dockerfile-fedora30 --target pypy .
docker build --tag danielflook/python-minifier-build:pypy3-$DATE -f Dockerfile-fedora30 --target pypy3 .
docker pull fedora:32
docker build --tag danielflook/python-minifier-build:python3.9-$DATE -f Dockerfile-fedora32 --target python3.9 .
docker pull fedora:34
docker build --tag danielflook/python-minifier-build:python3.10-$DATE -f Dockerfile-fedora34 --target python3.10 .
docker pull fedora:36
docker build --tag danielflook/python-minifier-build:python3.11-$DATE -f Dockerfile-fedora36 --target python3.11 .
docker push danielflook/python-minifier-build:python3.3-$DATE
docker push danielflook/python-minifier-build:python2.7-$DATE
docker push danielflook/python-minifier-build:python3.4-$DATE
docker push danielflook/python-minifier-build:python3.5-$DATE
docker push danielflook/python-minifier-build:python3.6-$DATE
docker push danielflook/python-minifier-build:python3.7-$DATE
docker push danielflook/python-minifier-build:python3.8-$DATE
docker push danielflook/python-minifier-build:python3.9-$DATE
docker push danielflook/python-minifier-build:python3.10-$DATE
docker push danielflook/python-minifier-build:python3.11-$DATE
docker push danielflook/python-minifier-build:pypy-$DATE
docker push danielflook/python-minifier-build:pypy3-$DATE
| true |
771977f0994da8f6a9a2f51da958eff8a1686acd | Shell | ldiamand/linux_embedded | /scripts/qemu_glibc/kernel-test.sh | UTF-8 | 689 | 2.578125 | 3 | [] | no_license | #!/bin/sh
# No va otra variable definida en el script principal
export DATA=${PROJECT_ROOT}/kernel/data
export KERNEL_IMAGE=${DATA}/zImage
export DTB_IMAGE=${DATA}/nova.dtb
QEMU_AUDIO_DRV=none qemu-system-arm -M ${QEMU_MACHINE} -m 128M \
-kernel ${KERNEL_IMAGE} -dtb ${DTB_IMAGE} -nographic
# -append "console=ttyAMA0 console=tty0" -serial pty
#QEMU_AUDIO_DRV=none qemu-system-arm -M vexpress-a15 -m 128M \
# -kernel ${KERNEL_IMAGE} -dtb ${DTB_IMAGE} \
# -append "console=tty0"
# -append "console=ttyAMA0 root=/dev/mmcblk0 rootfstype=squashfs" \
# -net nic -net tap,ifname=tap0,script=no,downscript=no \
# -drive file=images/rootfs.sqfs,if=sd,format=raw \
# -serial pty -s -S
| true |
ed811a6523c0df6328560dc8623eedacd19283b2 | Shell | jeromeglacken/dotfiles | /bash/.bash_profile | UTF-8 | 600 | 3.09375 | 3 | [] | no_license | # Load ~/.extra, ~/.bash_prompt, ~/.exports, ~/.aliases and ~/.functions
# ~/.extra can be used for settings you don’t want to commit
for file in ~/bin/dotfiles/bash/.{bash_prompt,exports,aliases}; do
[ -r "$file" ] && source "$file"
done
unset file
# init rmv
[[ -s "$HOME/.rvm/scripts/rvm" ]] && . "$HOME/.rvm/scripts/rvm" # Load RVM function
# Enable git completion
if [ -f /usr/local/etc/bash_completion.d/git-completion.bash ]; then
source /usr/local/etc/bash_completion.d/git-completion.bash
fi
# Prefer US English and use UTF-8
export LC_ALL="en_US.UTF-8"
export LANG="en_US"
| true |
68bfaba8a6ed5e963023cf071b8569e09ee5949b | Shell | bentappin/dotfiles | /bash/aliases.bash | UTF-8 | 888 | 3.140625 | 3 | [
"MIT"
] | permissive | # Alises and bash functions.
alias g=git
alias v=vagrant
alias ls="ls -Gp"
colors() {
echo -e "\033[0mCOLOR_NC (No color)"
echo -e "\033[1;37mCOLOR_WHITE\t\033[0;30mCOLOR_BLACK"
echo -e "\033[0;34mCOLOR_BLUE\t\033[1;34mCOLOR_LIGHT_BLUE"
echo -e "\033[0;32mCOLOR_GREEN\t\033[1;32mCOLOR_LIGHT_GREEN"
echo -e "\033[0;36mCOLOR_CYAN\t\033[1;36mCOLOR_LIGHT_CYAN"
echo -e "\033[0;31mCOLOR_RED\t\033[1;31mCOLOR_LIGHT_RED"
echo -e "\033[0;35mCOLOR_PURPLE\t\033[1;35mCOLOR_LIGHT_PURPLE"
echo -e "\033[0;33mCOLOR_YELLOW\t\033[1;33mCOLOR_LIGHT_YELLOW"
echo -e "\033[1;30mCOLOR_GRAY\t\033[0;37mCOLOR_LIGHT_GRAY"
}
ord() {
LC_CTYPE=C printf '%d' "'$1"
}
wiki() { dig +short txt "$*".wp.dg.cx; }
pman() { man -t "$@" | open -f -a Preview; }
flipcoin() { [[ $(( $RANDOM % 2 )) == 0 ]] && echo heads || echo tails; }
source_file() {
if [ -f $1 ];
then
source $1
fi
}
| true |
a2c51a7fd490360e2d05d7404ebad413c604c529 | Shell | xsh-lib/aws | /functions/gist/ec2/linux/installer/supervisor.sh | UTF-8 | 1,962 | 3.890625 | 4 | [
"MIT"
] | permissive | #? Description:
#? Install supervisor with pip.
#? Run this script under root on Linux.
#?
#? Usage:
#? @supervisor [-i] [-o] [-s] [-v VERSION]
#?
#? Options:
#? [-i]
#?
#? Generate initd script, to enable `service supervisord <start|stop>`.
#?
#? [-o]
#?
#? Enable the service to start at system boot time.
#? It's a wrapper of `chkconfig <NAME> on`.
#? This option is valid only while the `-i` is specified.
#?
#? [-s]
#?
#? Start the service after successfully installed the package.
#? It's a wrapper of `service <NAME> start`.
#? This option is valid only while the `-i` is specified.
#?
#? [-v VERSION]
#?
#? Install a specific version, default is the latest version in pip.
#?
#? @xsh /trap/err -eE
#? @subshell
#?
function supervisor () {
declare package=supervisor initd_script=0 on=0 start=0 \
OPTIND OPTARG opt
while getopts iosv: opt; do
case $opt in
i)
initd_script=1
;;
o)
on=1
;;
s)
start=1
;;
v)
package=supervisor==$OPTARG
;;
*)
return 255
;;
esac
done
pip install "$package"
mkdir -p /etc/supervisor/conf.d
echo_supervisord_conf \
| sed -e 's/;\[include]/[include]/' \
-e 's|;files = .*|files = /etc/supervisor/conf.d/*.ini|' \
> /etc/supervisord.conf
if [[ $initd_script -eq 1 ]]; then
# reference: https://github.com/alexzhangs/supervisord
curl -Lfsv https://raw.githubusercontent.com/alexzhangs/supervisord/master/supervisord \
-o /etc/init.d/supervisord
chmod 755 /etc/init.d/supervisord
fi
if [[ $on -eq 1 ]]; then
chkconfig supervisord on
fi
if [[ $start -eq 1 ]]; then
service supervisord start
fi
}
| true |
58276286a912cc6775fec0a638feadc87c125bcd | Shell | negibokken/sandbox | /atcoder/abc/abc098/questionC/test.sh | UTF-8 | 551 | 3.375 | 3 | [] | no_license | #!/bin/bash
try() {
expected="$@"
input=`cat -`
actual=`echo $input | ./main`
if [ "$actual" = "$expected" ]; then
echo "$input => $actual"
else
echo "==="
echo "$input => $expected expected, but got $actual"
exit 1
fi
}
## test case 1
cat << EOF | try 1
5
WEEWW
EOF
## test case 2
cat << EOF | try 4
12
WEWEWEEEWWWE
EOF
## test case 2
cat << EOF | try 3
8
WWWWWEEE
EOF
## test case 2
cat << EOF | try 0
8
EWWWWWWW
EOF
## test case 2
cat << EOF | try 0
8
EEEEEEEW
EOF
## test case 2
cat << EOF | try 1
4
WEEW
EOF
| true |
9f3e7c163bd9e65f899116c81e87db70fd544cf9 | Shell | chiehting/lab | /bash/speed-site-curl.sh | UTF-8 | 630 | 3.28125 | 3 | [] | no_license | #/bin/sh
cd `dirname $0`
count=3
interval=`expr 9 / ${count}`
domain=https://stage-api.example.com/api/v1/ping
echo $domain
for i in $(seq 1 $count)
do
printf "##### starting_time: $(date)\n"
curl -w @- -s -o /dev/null "$domain" \
<<'EOF'
\n
time_namelookup: %{time_namelookup}\n
time_connect: %{time_connect}\n
time_appconnect: %{time_appconnect}\n
time_redirect: %{time_redirect}\n
time_pretransfer: %{time_pretransfer}\n
time_starttransfer: %{time_starttransfer}\n
---------------------\n
time_total: %{time_total} seconds\n
EOF
sleep $interval
done
| true |
31715cc93c02ea566dcae3306381952a5c3ce56e | Shell | tompave/simpledb_exercise | /test/hello_world.bash | UTF-8 | 744 | 4.03125 | 4 | [] | no_license | #!/usr/bin/env bash
set -euo pipefail
source "$(dirname "$0")/_setup.bash"
function test_hello_world() {
local expected_output="Hello World!"
local actual_output=$($PROGRAM hello)
if [[ "$actual_output" != "$expected_output" ]]; then
_fail "Hello World test failed. The program should handle the basic 'Hello World!' command."
echo -e "
${_escape_bold}For input:${_escape_normal}
hello
${_escape_bold}the program should have produced this output:${_escape_normal}
$expected_output
${_escape_bold}but instead it was:${_escape_normal}
$actual_output
---------------------------------------------
"
return 1
fi;
_pass "The program and the tests are working! 🎉"
}
test_hello_world
| true |
b6965bb81ac9aac0172f2aa0ebb56dae92361fca | Shell | dahburj/SNC | /deps.sh | UTF-8 | 428 | 2.59375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
MACH_ARCH=`uname -m`
sudo apt-get update
sudo apt-get -y upgrade
sudo apt-get install -y qt5-default qtcreator libasound2-dev build-essential libopencv-dev
sudo apt-get install -y libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev libgstreamer-plugins-bad1.0-dev gstreamer1.0-libav libssl-dev openssl
if [ $MACH_ARCH = 'armv7l' ] ; then
sudo pip3 install --upgrade pip3
sudo pip3 install --upgrade bme680
fi
| true |
9b2df1e7a1cbc9205760b087ed0a4e47978d94fa | Shell | vlki/spacewalk-xmlrpc-tests | /tests/RHNSatellite/FrontendAPI/auth/login/runtest.sh | UTF-8 | 812 | 2.796875 | 3 | [] | no_license | #!/bin/bash
#
# Copyright (c) 2011, Jan Vlcek
# All rights reserved.
#
# For further information see enclosed LICENSE file.
#
#
# The test of auth.login frontend call
#
# Author: Jan Vlcek <xvlcek03@stud.fit.vutbr.cz>
#
# Include the common setup
. ./../../../setup.sh
rlJournalStart
# ===================================================================
# Do the testing
# ===================================================================
if rlSpacewalkVersionIs "1.2"; then
rlPhaseStartTest "Testing auth.login of default administrator"
rlSatelliteSaveTomcat6Log
rlSatelliteXmlRpcFrontendRun "auth.login.py"
# Expect the session key of length 36
rlAssertGrep "[a-z0-9]\{36\}" "$rlRun_LOG"
rlRun "rm -f $rlRun_LOG"
rlSatelliteAssertTomcat6LogNotDiffer
rlPhaseEnd
fi
rlJournalEnd
rlJournalPrintText
| true |
f84237bb0c938f829f270a97f6cbf1fa775255f0 | Shell | dcangarthK/test22222 | /deploy-namespace.sh | UTF-8 | 566 | 3.265625 | 3 | [] | no_license | #!/bin/bash
BIN_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )
RSRC_DIR="${BIN_DIR}/resources"
if [[ -z "$NAMESPACE" ]]; then
echo
echo -e "\033[33;5mError\033[0m"
echo
echo "Must provide NAMESPACE in environment" 1>&2
echo "This is the namespace we will be deploying elasticsearch to"
echo
echo "For example..."
echo "export NAMESPACE=test-elastic"
echo
exit 1
fi
oc new-project $NAMESPACE
oc patch namespace $NAMESPACE --patch '{ "metadata":{"annotations": {"openshift.io/node-selector": "" }}}'
| true |
1aa26c133fd90c393955c9b3d26382c6cdc0214a | Shell | umd-lhcb/MiniDAQ-config | /config/home/admin/.bashrc | UTF-8 | 338 | 2.546875 | 3 | [] | no_license | # .bashrc
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# Source DIM env variables
if [ -f /etc/sysconfig/dim ]; then
. /etc/sysconfig/dim
fi
# Quartus related
export QUARTUSPATH=$HOME/opt/intelFPGA_lite/18.1
export PATH=$PATH:${QUARTUSPATH}/quartus/bin
export PATH=$PATH:${QUARTUSPATH}/nios2eds/bin
| true |
fb3073986bec58938dc834d499bccdb98943ae6e | Shell | guilt/OpenStack | /openstack.sh | UTF-8 | 21,355 | 3.203125 | 3 | [] | no_license | #!/bin/sh
set -e
PRIVATE_IP=`ip addr show eth0 | grep 'inet ' | awk '{print $2}' | cut -f1 -d'/' | head -n 1 | awk '{print $1}'`
PUBLIC_IP=`curl --silent ident.me --connect-timeout 2 2>/dev/null`
[ -z ${PUBLIC_IP} ] && PUBLIC_IP=${PRIVATE_IP}
MYSQL_USER=root
MYSQL_PASSWORD=password
MQ_USER=openstack
MQ_PASSWORD=password
KS_PASSWORD=password
GLANCE_PASSWORD=password
NOVA_PASSWORD=password
NEUTRON_PASSWORD=password
CINDER_PASSWORD=password
MYSQL_IP=${PRIVATE_IP}
MQ_IP=${PRIVATE_IP}
KS_IP=${PRIVATE_IP}
GLANCE_IP=${PRIVATE_IP}
NOVA_IP=${PRIVATE_IP}
NEUTRON_IP=${PRIVATE_IP}
CINDER_IP=${PRIVATE_IP}
USER_PASSWORD=password
NEUTRON_BR_INT_PORT=eth1
NEUTRON_BR_EX_PORT=eth2
DNS_SERVERS=8.8.8.8
[ -f openstackbootrc ] && . ./openstackbootrc
if [ $USER != root ]; then
echo 'Please run as root.'
exit 1
fi
if [ -f /etc/os-release ]; then
echo 'Ubuntu/Debian ... Proceeding.'
else
echo 'Require Ubuntu/Debian.'
exit 1
fi
[ -f .00-upgrade ] || {
echo 'Upgrading'
apt-get update && apt-get -y dist-upgrade
add-apt-repository cloud-archive:liberty
apt-get update && apt-get -y dist-upgrade
echo 'Installing NTP'
apt-get install -y ntp
}
touch .00-upgrade
#Run on MYSQL_IP
[ -f .01-db ] || {
echo 'Setting Password for MySQL'
echo "mysql-server mysql-server/root_password password ${MYSQL_PASSWORD}" | sudo debconf-set-selections
echo "mysql-server mysql-server/root_password_again password ${MYSQL_PASSWORD}" | sudo debconf-set-selections
echo 'Installing MySQL Server'
apt-get install -y mysql-server
apt-get install -y --force-yes python-pymysql
echo 'Setting MySQLD Configuration'
cat > /etc/mysql/conf.d/mysqld_openstack.cnf <<EOF
[mysqld]
bind-address = 0.0.0.0
default-storage-engine = innodb
innodb_file_per_table
collation-server = utf8_general_ci
init-connect = 'SET NAMES utf8'
character-set-server = utf8
EOF
}
touch .01-db
#Run on MQ_IP
[ -f .02-mq ] || {
echo 'Installing RabbitMQ'
apt-get install -y rabbitmq-server
echo 'Setting Password for RabbitMQ'
rabbitmqctl add_user ${MQ_USER} ${MQ_PASSWORD} || echo "User Exists."
rabbitmqctl set_permissions ${MQ_USER} ".*" ".*" ".*"
}
touch .02-mq
#Run on MYSQL_IP
[ -f .03-ospass ] || {
echo 'Setting Password for MySQL Openstack Processes'
{
cat <<EOF
CREATE DATABASE IF NOT EXISTS keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'MYSQL_PASSWORD';
CREATE DATABASE IF NOT EXISTS glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'MYSQL_PASSWORD';
CREATE DATABASE IF NOT EXISTS nova;
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'MYSQL_PASSWORD';
CREATE DATABASE IF NOT EXISTS neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'MYSQL_PASSWORD';
CREATE DATABASE IF NOT EXISTS cinder;
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'MYSQL_PASSWORD';
FLUSH PRIVILEGES;
FLUSH TABLES;
EOF
} | sed s/MYSQL_PASSWORD/${MYSQL_PASSWORD}/g | mysql -u${MYSQL_USER} -p${MYSQL_PASSWORD}
}
touch .03-ospass
#Run on All Accordingly.
[ -f .04-ospkg ] || {
echo 'Installing Openstack Clients'
apt-get install --force-yes -y python-openstackclient python-glanceclient python-novaclient python-cinderclient
echo 'Installing Keystone Servers'
apt-get install --force-yes -y keystone apache2 libapache2-mod-wsgi memcached python-memcache
echo 'Installing Glance Servers'
apt-get install --force-yes -y glance
echo 'Installing Nova Servers'
apt-get install --force-yes -y nova-api nova-cert nova-conductor nova-consoleauth nova-scheduler nova-console
echo 'Installing Nova Agents'
apt-get install --force-yes -y nova-compute qemu-kvm sysfsutils nova-novncproxy
echo 'Installing Neutron Servers'
apt-get install --force-yes -y neutron-server neutron-common neutron-dhcp-agent neutron-l3-agent neutron-metadata-agent
echo 'Installing Neutron Agents'
apt-get install neutron-common neutron-plugin-openvswitch-agent openvswitch-switch neutron-plugin-ml2
#neutron-plugin-openvswitch is Deprecated.
echo 'Installing Cinder Servers'
apt-get install --force-yes -y cinder-api cinder-scheduler open-iscsi open-iscsi-utils
#open-iscsi-utils is Deprecated
echo 'Installing Cinder Agents'
apt-get install --force-yes -y cinder-volume lvm2 sysfsutils iscsitarget
echo 'Installing Dashboard'
apt-get install --force-yes -y openstack-dashboard
}
touch .04-ospkg
#Run on KS_IP
[ -f .05-oskscfg ] || {
echo 'Set up Keystone'
{
cat <<EOF
[DEFAULT]
admin_token = KS_PASSWORD
log_dir = /var/log/keystone
[database]
connection = mysql://keystone:MYSQL_PASSWORD@MYSQL_IP/keystone
[revoke]
driver = sql
[token]
provider = uuid
driver = memcache
EOF
} | sed s/MYSQL_PASSWORD/${MYSQL_PASSWORD}/g | sed s/MYSQL_IP/${MYSQL_IP}/g | sed s/KS_PASSWORD/${KS_PASSWORD}/g > /etc/keystone/keystone.conf
{
cat <<EOF
manual
EOF
} > /etc/init.d/keystone.override
{
cat <<EOF
Listen 5000
Listen 35357
<VirtualHost *:5000>
WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-public
WSGIScriptAlias / /usr/bin/keystone-wsgi-public
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/apache2/keystone.log
CustomLog /var/log/apache2/keystone_access.log combined
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
</VirtualHost>
<VirtualHost *:35357>
WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-admin
WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/apache2/keystone.log
CustomLog /var/log/apache2/keystone_access.log combined
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
</VirtualHost>
EOF
} > /etc/apache2/sites-available/001-wsgi-keystone.conf
ln -sf /etc/apache2/sites-available/001-wsgi-keystone.conf /etc/apache2/sites-enabled/001-wsgi-keystone.conf
keystone-manage db_sync
service keystone stop || echo "Unable to stop Keystone."
service apache2 restart
echo 'Set up Keystone Credentials'
export OS_TOKEN=${KS_PASSWORD}
export OS_URL=http://${KS_IP}:35357/v3
export OS_IDENTITY_API_VERSION=3
openstack project create --domain default --description "Openstack Project" admin
openstack project create --domain default --description "Openstack Service" service
openstack user create --domain default --password $USER_PASSWORD admin
openstack role create admin
openstack role add --project admin --user admin admin
openstack service create --name service-keystone --description "Openstack Identity Service" identity
openstack endpoint create --region region-one identity public http://${KS_IP}:5000/v3
openstack endpoint create --region region-one identity internal http://${PRIVATE_IP}:5000/v3
openstack endpoint create --region region-one identity admin http://127.0.0.1:35357/v3
unset OS_TOKEN OS_URL OS_IDENTITY_API_VERSION
}
touch .05-oskscfg
#Run on All Clients
echo 'Set up Openstack Credentials'
[ -f openstackrc ] || {
cat <<EOF
export OS_PROJECT_DOMAIN_ID=default
export OS_USER_DOMAIN_ID=default
export OS_PROJECT_NAME=admin
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=USER_PASSWORD
export OS_AUTH_URL=http://KS_IP:35357/v3
export OS_IDENTITY_API_VERSION=3
EOF
} | sed s/USER_PASSWORD/${USER_PASSWORD}/g | sed s/KS_IP/${KS_IP}/g > openstackrc
[ -f openstackrc ] || {
echo "Unable to load Openstack Config."
exit 1
}
. ./openstackrc
#Run on GLANCE_IP
[ -f .06-osglcfg ] || {
echo 'Set up Glance'
echo 'Set up Glance Credentials'
openstack user create --domain default --password ${GLANCE_PASSWORD} glance
openstack role add --project service --user glance admin
openstack service create --name service-glance --description "Openstack Image Service" image
openstack endpoint create --region region-one image public http://${GLANCE_IP}:9292
openstack endpoint create --region region-one image internal http://${PRIVATE_IP}:9292
openstack endpoint create --region region-one image admin http://127.0.0.1:9292
{
cat <<EOF
[DEFAULT]
[database]
connection = mysql://glance:MYSQL_PASSWORD@MYSQL_IP/glance
[keystone_authtoken]
auth_uri = http://KS_IP:5000/v3
auth_url = http://KS_IP:35357/v3
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = glance
password = GLANCE_PASSWORD
[paste_deploy]
flavor = keystone
[glance_store]
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
EOF
} | sed s/MYSQL_PASSWORD/${MYSQL_PASSWORD}/g | sed s/MYSQL_IP/${MYSQL_IP}/g | sed s/GLANCE_PASSWORD/${GLANCE_PASSWORD}/g | sed s/KS_IP/${KS_IP}/g > /etc/glance/glance-api.conf
{
cat <<EOF
[DEFAULT]
[database]
connection = mysql://glance:MYSQL_PASSWORD@MYSQL_IP/glance
[keystone_authtoken]
auth_uri = http://KS_IP:5000/v3
auth_url = http://KS_IP:35357/v3
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = glance
password = GLANCE_PASSWORD
[paste_deploy]
flavor = keystone
EOF
} | sed s/MYSQL_PASSWORD/${MYSQL_PASSWORD}/g | sed s/MYSQL_IP/${MYSQL_IP}/g | sed s/GLANCE_PASSWORD/${GLANCE_PASSWORD}/g | sed s/KS_IP/${KS_IP}/g > /etc/glance/glance-registry.conf
glance-manage db_sync
service glance-api restart
service glance-registry restart
echo 'Fetching Glance Image'
wget -c http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img -O bootstrap.img
echo 'Uploading Glance Image'
glance image-create --name "cirros" --file bootstrap.img --disk-format qcow2 --container-format bare --visibility public --progress
echo 'Cleaning Glance Image'
rm -f bootstrap.img
}
touch .06-osglcfg
#Run on NOVA_IP
[ -f .07-osnvcfg ] || {
echo 'Set up Nova'
echo 'Set up Nova Credentials'
openstack user create --domain default --password ${NOVA_PASSWORD} nova
openstack role add --project service --user nova admin
openstack service create --name service-nova --description "Openstack Compute Service" compute
openstack endpoint create --region region-one compute public http://${NOVA_IP}:8774/v2/%\(tenant_id\)s
openstack endpoint create --region region-one compute internal http://${PRIVATE_IP}:8774/v2/%\(tenant_id\)s
openstack endpoint create --region region-one compute admin http://127.0.0.1:8774/v2/%\(tenant_id\)s
{
cat <<EOF
[DEFAULT]
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
log_dir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
force_dhcp_release=True
libvirt_use_virtio_for_bridges=True
verbose=True
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
enabled_apis=ec2,osapi_compute,metadata
rpc_backend = rabbit
auth_strategy = keystone
my_ip = PRIVATE_IP
vnc_enabled = True
vncserver_listen = 127.0.0.1
vncserver_proxyclient_address = 127.0.0.1
novncproxy_base_url = http://127.0.0.1:6080/vnc_auto.html
network_api_class = nova.network.neutronv2.api.API
security_group_api = neutron
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
scheduler_default_filters=AllHostsFilter
[database]
connection = mysql://nova:MYSQL_PASSWORD@MYSQL_IP/nova
[oslo_messaging_rabbit]
rabbit_host = MQ_IP
rabbit_userid = MQ_USER
rabbit_password = MQ_PASSWORD
[keystone_authtoken]
auth_uri = http://KS_IP:5000/v3
auth_url = http://KS_IP:35357/v3
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = NOVA_PASSWORD
[glance]
host = PRIVATE_IP
[oslo_concurrency]
lock_path = /var/lock/nova
[neutron]
service_metadata_proxy = True
metadata_proxy_shared_secret = openstack
url = http://NEUTRON_IP:9696
auth_strategy = keystone
admin_auth_url = http://KS_IP:35357/v3
admin_tenant_name = service
admin_username = neutron
admin_password = NEUTRON_PASSWORD
[cinder]
os_region_name = region-one
EOF
} | sed s/NEUTRON_PASSWORD/${NEUTRON_PASSWORD}/g | sed s/NOVA_PASSWORD/${NOVA_PASSWORD}/g | sed s/MQ_PASSWORD/${MQ_PASSWORD}/g | sed s/MQ_IP/${MQ_IP}/g | sed s/MQ_USER/${MQ_USER}/g | sed s/MYSQL_PASSWORD/${MYSQL_PASSWORD}/g | sed s/MYSQL_IP/${MYSQL_IP}/g | sed s/KS_IP/${KS_IP}/g | sed s/NEUTRON_IP/${NEUTRON_IP}/g > /etc/nova/nova.conf
{
cat <<EOF
[DEFAULT]
compute_driver=libvirt.LibvirtDriver
[libvirt]
virt_type=qemu
EOF
} > /etc/nova/nova-compute.conf
nova-manage db sync
service nova-api restart
service nova-cert restart
service nova-console restart
service nova-consoleauth restart
service nova-scheduler restart
service nova-conductor restart
service nova-compute restart
service nova-novncproxy restart
#The following requires root.
nova-manage service list
}
touch .07-osnvcfg
#Run on Neutron Client
[ -f .08-sysctl ] || {
echo 'Setting Sysctl'
cat > /etc/sysctl.d/50-openstack.conf <<EOF
net.ipv4.ip_forward = 1
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
EOF
sysctl -p
service procps restart
}
touch .08-sysctl
#Run on Neutron IP
[ -f .08-osneucfg ] || {
echo 'Set up Neutron Credentials'
echo 'Setting Password for Neutron'
openstack user create --domain default --password ${NEUTRON_PASSWORD} neutron
openstack role add --project service --user neutron admin
openstack service create --name service-neutron --description "Openstack Network Service" network
openstack endpoint create --region region-one network public http://${PRIVATE_IP}:9696
openstack endpoint create --region region-one network internal http://${PRIVATE_IP}:9696
openstack endpoint create --region region-one network admin http://127.0.0.1:9696
echo 'Set up Neutron'
{
cat <<EOF
[DEFAULT]
core_plugin = ml2
service_plugins = router
rpc_backend = rabbit
auth_strategy = keystone
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
nova_url = http://NOVA_IP:8774/v2
[agent]
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
[keystone_authtoken]
auth_uri = http://KS_IP:35357/v3
identity_uri = http://KS_IP:5000/v3
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = NEUTRON_PASSWORD
[database]
connection = mysql://neutron:MYSQL_PASSWORD@MYSQL_IP/neutron
[nova]
auth_url = http://KS_IP:35357/v3
auth_plugin = password
project_domain_id = default
user_domain_id = default
region_name = region-one
project_name = service
username = nova
password = NOVA_PASSWORD
[oslo_concurrency]
lock_path = /var/lib/nova/lock
[oslo_messaging_rabbit]
rabbit_host = MQ_IP
rabbit_userid = MQ_USER
rabbit_password = MQ_PASSWORD
EOF
} | sed s/NOVA_IP/${NOVA_IP}/g | sed s/KS_IP/${KS_IP}/g | sed s/NEUTRON_PASSWORD/${NEUTRON_PASSWORD}/g | sed s/MYSQL_PASSWORD/${MYSQL_PASSWORD}/g | sed s/MYSQL_IP/${MYSQL_IP}/g | sed s/NOVA_PASSWORD/${NOVA_PASSWORD}/g | sed s/MQ_IP/${MQ_IP}/g | sed s/MQ_USER/${MQ_USER}/g | sed s/MQ_PASSWORD/${MQ_PASSWORD}/g > /etc/neutron/neutron.conf
{
cat <<EOF
[ml2]
type_drivers=flat,vlan
tenant_network_types=vlan,flat
mechanism_drivers=openvswitch
[ml2_type_flat]
flat_networks=External
[ml2_type_vlan]
network_vlan_ranges=Internal:100:200
[securitygroup]
firewall_driver=neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group=True
[ovs]
bridge_mappings=External:br-ex,Internal:br-int
EOF
} > /etc/neutron/plugins/ml2/ml2_conf.ini
ovs-vsctl add-br br-int || echo "Unable to add-br Internal"
ovs-vsctl add-br br-ex || echo "Unable to add-br External"
ovs-vsctl add-port br-int ${NEUTRON_BR_INT_PORT} || echo "Unable to add-port to Internal"
ovs-vsctl add-port br-ex ${NEUTRON_BR_EX_PORT} || echo "Unable to add-port to External"
BR_EX_ADDRESS=`/sbin/ifconfig ${NEUTRON_BR_EX_PORT} | awk '/inet addr/ {print $2}' | cut -f2 -d ":" `
[ -z "$BR_EX_ADDRESS" ] || {
sed -i "/${NEUTRON_BR_EX_PORT}/,\$d" /etc/network/interfaces
{
cat << EOF
auto eth2
iface eth2 inet manual
up ifconfig \$IFACE 0.0.0.0 up
up ip link set \$IFACE promisc on
down ip link set \$IFACE promisc off
down ifconfig \$IFACE down
# OpenVSwitch Managed
auto br-ex
iface br-ex inet static
address BR_EX_ADDRESS
netmask 255.255.255.0
up ip link set \$IFACE promisc on
down ip link set \$IFACE promisc off
EOF
} | sed s/BR_EX_ADDRESS/${BR_EX_ADDRESS}/g >> /etc/network/interfaces
}
{
cat <<EOF
[DEFAULT]
auth_uri = http://KS_IP:5000/v3
auth_url = http://KS_IP:35357/v3
auth_region = region-one
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = NEUTRON_PASSWORD
nova_metadata_ip = NOVA_IP
metadata_proxy_shared_secret = openstack
[AGENT]
EOF
} | sed s/KS_IP/${KS_IP}/g | sed s/NOVA_IP/${NOVA_IP}/g | sed s/NEUTRON_PASSWORD/${NEUTRON_PASSWORD}/g > /etc/neutron/metadata_agent.ini
{
cat <<EOF
[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
use_namespaces = True
dnsmasq_dns_servers = 8.8.8.8
[AGENT]
EOF
} | sed s/DNS_SERVERS/${DNS_SERVERS}/g > /etc/neutron/dhcp_agent.ini
{
cat <<EOF
[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
use_namespaces = True
[AGENT]
EOF
} > /etc/neutron/l3_agent.ini
neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade liberty
service neutron-server restart
service neutron-plugin-openvswitch-agent restart
service neutron-metadata-agent restart
service neutron-dhcp-agent restart
service neutron-l3-agent restart
neutron agent-list
}
touch .08-osneucfg
#Run on Cinder IP
[ -f .09-oscincfg ] || {
echo 'Set up Cinder Credentials'
echo 'Setting Password for Cinder'
openstack user create --domain default --password ${CINDER_PASSWORD} cinder
openstack role add --project service --user cinder admin
openstack service create --name service-cinder --description "Openstack Block Storage v1 Service" volume
openstack service create --name service-cinderv2 --description "Openstack Block Storage v2 Service" volumev2
openstack endpoint create --region region-one volume public http://${CINDER_IP}:8776/v1/%\(tenant_id\)s
openstack endpoint create --region region-one volume internal http://${PRIVATE_IP}:8776/v1/%\(tenant_id\)s
openstack endpoint create --region region-one volume admin http://127.0.0.1:8776/v1/%\(tenant_id\)s
openstack endpoint create --region region-one volumev2 public http://${CINDER_IP}:8776/v2/%\(tenant_id\)s
openstack endpoint create --region region-one volumev2 internal http://${PRIVATE_IP}:8776/v2/%\(tenant_id\)s
openstack endpoint create --region region-one volumev2 admin http://127.0.0.1:8776/v2/%\(tenant_id\)s
echo 'Set up Cinder'
{
cat <<EOF
[DEFAULT]
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
iscsi_helper = tgtadm
volume_name_template = volume-%s
volume_group = cinder-volumes
verbose = True
auth_strategy = keystone
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
volumes_dir = /var/lib/cinder/volumes
rpc_backend = rabbit
auth_strategy = keystone
my_ip = PRIVATE_IP
enabled_backends = lvm
glance_host = GLANCE_IP
[database]
connection = mysql+pymysql://cinder:MYSQL_PASSWORD@MYSQL_IP/cinder
[oslo_messaging_rabbit]
rabbit_host = MQ_IP
rabbit_userid = MQ_USER
rabbit_password = MQ_PASSWORD
[keystone_authtoken]
auth_uri = http://KS_IP:5000/v3
auth_url = http://KS_IP:35357/v3
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = cinder
password = CINDER_PASSWORD
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
iscsi_protocol = iscsi
iscsi_helper = tgtadm
EOF
} | sed s/GLANCE_IP/${GLANCE_IP}/g | sed s/PRIVATE_IP/${PRIVATE_IP}/g | sed s/KS_IP/${KS_IP}/g | sed s/CINDER_PASSWORD/${CINDER_PASSWORD}/g | sed s/MYSQL_PASSWORD/${MYSQL_PASSWORD}/g | sed s/MYSQL_IP/${MYSQL_IP}/g | sed s/MQ_IP/${MQ_IP}/g | sed s/MQ_USER/${MQ_USER}/g | sed s/MQ_PASSWORD/${MQ_PASSWORD}/g > /etc/cinder/cinder.conf
cinder-manage db sync
pvcreate /dev/sdb
vgcreate cinder-volumes /dev/sdb
service cinder-scheduler restart
service cinder-api restart
service cinder-volume restart
service tgt restart
}
touch .09-oscincfg
[ -f .10-createvm ] || {
echo 'Set up Test VM'
cinder create --display-name myVolume 1
cinder list
neutron subnet-create --name sn1 n1 10.10.10.0/24
neutron net-create en1 --router:external=True --shared --provider:network_type flat --provider:physical_network External
neutron subnet-create --name sen1 --allocation-pool start=192.168.57.100,end=192.168.57.105 en1 192.168.57.0/24
neutron router-create r1
neutron router-gateway-set r1 en1
neutron router-interface-add r1 sn1
neutron security-group-create sg1
neutron security-group-rule-create --protocol icmp sg1
neutron security-group-rule-create --protocol tcp --port-range-min 22 --port-range-max 22 sg1
NET_ID=`nova net-list | awk -v n=4 'n == NR' | cut -d '|' -f 1`
nova boot --flavor m1.tiny --image cirros --security-groups sg1 --nic net-id=${NET_ID} instance100
nova floating-ip-create en1
nova floating-ip-associate --fixed-address 10.10.10.3 instance100 192.168.57.101
}
touch .10-createvm
ssh cirros@192.168.57.101
echo 'Done' | true |
9c5f56cf62e7ea55cbf795748f40eaae1b88f015 | Shell | airmack/gemeaux-docker | /gemeaux/start.sh | UTF-8 | 1,217 | 3.578125 | 4 | [] | no_license | #!/bin/bash
# create certificates if not availes and expose them via the volume
if [ ! -f "/var/capsule/conf/cert.pem" ]; then
cd /var/capsule/conf/ && /bin/sh -c /git/gemeaux/cert.sh
cp -v /git/gemeaux/cert.sh /var/capsule/conf/cert.sh
fi
# create basic example app from gemini and expose the python script to the volume
if [ ! -f "/var/capsule/bin/gemini.py" ]; then
cp -riv /git/gemeaux/example_app.py /var/capsule/bin/gemini.py
fi
# copy example directories on the volume and expose them to the volume
if [ -z "$(ls -A /var/capsule/gmi/)" ]; then
cp -riv /git/gemeaux/examples/* /var/capsule/gmi/
fi
# not really a clean solution, as we are changing the permission of the cert from root to gemini and copy into the image, but no real easy way to drop privileges
cp /var/capsule/conf/key.pem /tmp/
chown gemini:nogroup /tmp/key.pem
for i in access.log error.log gemeaux.log hall_of_shame.log RateLimiter.log
do
touch /var/log/gemeaux/$i
chown gemini:nogroup /var/log/gemeaux/$i
done
# delete key after 10 seconds
(sleep 10; rm -fv /tmp/key.pem; echo 'Erased key') &
runuser -u gemini -- /var/capsule/bin/gemini.py --keyfile /tmp/key.pem --certfile /var/capsule/conf/cert.pem --ip ""
| true |
0b461f45c678219a6d1ca3180dd47eb26e304d9a | Shell | malston/spring-doge-microservice | /misc/doge_client.sh | UTF-8 | 317 | 2.984375 | 3 | [] | no_license | #!/bin/sh
set -e
##
## Simple client that doge-ifies images using the REST service.
##
uri=http://127.0.0.1:8089/doges/$1/photos
resp=`curl -F "file=@$2" $uri`
echo response is $resp
id=`echo $resp| head -n2 | tail -n1 | cut -f2 -d: |cut -f2 -d\" `
echo id is $id
uri=$uri/$id/photo
echo result from $uri
wget $uri
| true |
9ac6f5f23f8cef49c939c45b9d2ec59c952ee742 | Shell | beegee-tokyo/ganbarou_tools | /tools/builds4.sh | UTF-8 | 13,302 | 2.875 | 3 | [] | no_license | ./prebuilts/misc/linux-x86/ccache/ccache -M 10G
export USE_CCACHE=1
. build/envsetup.sh
lunch full_jflte-userdebug
make otapackage
if [ $? -eq 0 ]; then
echo -e $CL_MAG"=============================================="$CL_RST
echo -e $CL_GRN"Build for GT-I9505 successfull"$CL_RST
echo -e $CL_MAG"=============================================="$CL_RST
# $NEW_DEVICE = GT-P7500 or GT-P7510 or GT-N7000 or GT-I9300 or GT-I9505
# $NEW_DEVICE1 = GT-P7501 or GT-P7511
# $NEW_DEVICE2 = 1 for P7501/P7511 and 0 for N7000 or GT-I9300 or GT-I9505
# $OLD_DEVICE = p4 or p4wifi or n7000 or jflte or i9300
# KERNEL_VERSION = infamous or pershoot
# TEST_BUILD = 1 if quick build only for P7500 else it is 0
NEW_DEVICE=$1
NEW_DEVICE1=$2
NEW_DEVICE2=$3
OLD_DEVICE=$4
KERNEL_VERSION=$5
TEST_BUILD=$6
# set these variables fix for the tests
NEW_DEVICE=GT-I9505
NEW_DEVICE1=GT-I9505
NEW_DEVICE2=0
OLD_DEVICE=jflte
KERNEL_VERSION=cm
TEST_BUILD=1
# other global variables that usually come from makeit.sh but are missing for the tests
gooversion_t="1"
goobuild_t="20"
gooversion_build_t="120"
export USEROLD=`whoami`;
# build with colors!
CL_RED="\033[31m"
CL_GRN="\033[32m"
CL_YLW="\033[33m"
CL_BLU="\033[34m"
CL_MAG="\033[35m"
CL_CYN="\033[36m"
CL_RST="\033[0m"
echo -e $CL_GRN"============================================"$CL_RST
echo -e $CL_GRN"Building :"$CL_RST
echo -e $CL_GRN"============================================"$CL_RST
echo -e $CL_MAG"NEW_DEVICE = "$CL_YLW"$NEW_DEVICE"$CL_RST
echo -e $CL_MAG"NEW_DEVICE1 = "$CL_YLW"$NEW_DEVICE1"$CL_RST
echo -e $CL_MAG"NEW_DEVICE2 = "$CL_YLW"$NEW_DEVICE2"$CL_RST
echo -e $CL_MAG"OLD_DEVICE = "$CL_YLW"$OLD_DEVICE"$CL_RST
echo -e $CL_MAG"KERNEL_VERSION = "$CL_YLW"$KERNEL_VERSION"$CL_RST
echo -e $CL_MAG"TEST_BUILD = "$CL_YLW"$TEST_BUILD"$CL_RST
echo -e $CL_GRN"============================================"$CL_RST
echo -e $CL_GRN"Amai patches - Set common variables"$CL_RST
echo -e $CL_GRN"============================================"$CL_RST
ANDROID_BUILD_TOP="/home/$USEROLD/${PWD##*/}"
echo $ANDROID_BUILD_TOP
SECURITYDIR="$ANDROID_BUILD_TOP/build/target/product/security"
QUIET="-q"
NOW=$(date +"%Y-%m-%d")
NOWORG=$(date +"%Y%m%d")
SED=sed
MD5=md5sum
OUT_TARGET_HOST="linux-x86"
echo -e $CL_GRN"============================================"$CL_RST
echo -e $CL_GRN"Unpack $NEW_DEVICE for Amai patches"
echo -e $CL_GRN"============================================"$CL_RST
OUT="$ANDROID_BUILD_TOP/out/target/product/$OLD_DEVICE"
REPACK="$OUT/repack.d"
OUTFILE="$OUT/Amai-$NEW_DEVICE-v$gooversion_t.$goobuild_t-$NOW.zip"
DEVICE_VERSION="Version $gooversion_t.$goobuild_t"
DEVICE_INFO="for $NEW_DEVICE"
OTAPACKAGE="$OUT/full_$OLD_DEVICE-ota-eng.$USEROLD.zip"
mkdir $REPACK
mkdir $REPACK/ota
cd $REPACK/ota
printf "Unpacking $OTAPACKAGE..."
unzip $QUIET $OTAPACKAGE
echo $DEVICE_VERSION
echo $DEVICE_INFO
echo -e $CL_GRN"============================================"$CL_RST
echo -e $CL_GRN"Add Amai info into updater-script"
echo -e $CL_GRN"============================================"$CL_RST
$SED -i \
-e "s:REPLACE_WITH_VERSION:${DEVICE_VERSION}:" \
$REPACK/ota/META-INF/com/google/android/updater-script
$SED -i \
-e "s:REPLACE_WITH_DEVICE:${DEVICE_INFO}:" \
$REPACK/ota/META-INF/com/google/android/updater-script
echo -e $CL_GRN"============================================"$CL_RST
echo -e $CL_GRN"Change $OLD_DEVICE to $NEW_DEVICE in build.prop"
echo -e $CL_GRN"But keep ro.product.device for Google Play"
echo -e $CL_GRN"============================================"$CL_RST
$SED -i \
-e "s:${OLD_DEVICE}:${NEW_DEVICE}:" \
$REPACK/ota/system/build.prop
OLD_PROD_DEV="ro.product.device=$NEW_DEVICE"
NEW_PROD_DEV="ro.product.device=$OLD_DEVICE"
$SED -i \
-e "s:${OLD_PROD_DEV}:${NEW_PROD_DEV}:" \
$REPACK/ota/system/build.prop
echo -e $CL_GRN"============================================"$CL_RST
echo -e $CL_GRN"Add Goo & romstats info to build.prop"
echo -e $CL_GRN"============================================"$CL_RST
#echo " " >> $REPACK/ota/system/build.prop
#echo "#" >> $REPACK/ota/system/build.prop
#echo "# BeeGee_Tokyo tweaks" >> $REPACK/ota/system/build.prop
#echo "#" >> $REPACK/ota/system/build.prop
#echo "ro.com.google.clientidbase=android-google" >> $REPACK/ota/system/build.prop
#echo "keyguard.no_require_sim=true" >> $REPACK/ota/system/build.prop
#echo "ro.url.legal=http://www.google.com/intl/%s/mobile/android/basic/phone-legal.html" >> $REPACK/ota/system/build.prop
#echo "ro.url.legal.android_privacy=http://www.google.com/intl/%s/mobile/android/basic/privacy.html" >> $REPACK/ota/system/build.prop
#echo "ro.com.android.wifi-watchlist=GoogleGuest" >> $REPACK/ota/system/build.prop
#echo "ro.setupwizard.enterprise_mode=1" >> $REPACK/ota/system/build.prop
#echo "ro.com.android.dataroaming=false" >> $REPACK/ota/system/build.prop
#echo "persist.sys.root_access=1" >> $REPACK/ota/system/build.prop
#echo "ro.HOME_APP_ADJ=1" >> $REPACK/ota/system/build.prop
#echo "debug.sf.hw=1" >> $REPACK/ota/system/build.prop
#echo "windowsmgr.max_events_per_sec=240" >> $REPACK/ota/system/build.prop
#echo "ro.telephony.call_ring.delay=0" >> $REPACK/ota/system/build.prop
#echo "wifi.supplicant_scan_interval=180" >> $REPACK/ota/system/build.prop
#echo "pm.sleep_mode=1" >> $REPACK/ota/system/build.prop
#echo "ro.ril.disable.power.collapse=0" >> $REPACK/ota/system/build.prop
#echo "mot.proximity.delay=25" >> $REPACK/ota/system/build.prop
#echo "debug.performance.tuning=1" >> $REPACK/ota/system/build.prop
#echo "video.accelerate.hw=1" >> $REPACK/ota/system/build.prop
#echo "ro.media.enc.jpeg.quality=100" >> $REPACK/ota/system/build.prop
#echo "persist.sys.purgeable_assets=1" >> $REPACK/ota/system/build.prop
#echo "ro.tether.denied=false" >> $REPACK/ota/system/build.prop
#echo "ro.secure=0" >> $REPACK/ota/system/build.prop
#echo "debug.performance.tuning=1" >> $REPACK/ota/system/build.prop
#echo "ro.max.fling_velocity=12000" >> $REPACK/ota/system/build.prop
#echo "ro.min.fling_velocity=8000" >> $REPACK/ota/system/build.prop
#echo "dalvik.vm.dexopt-flags=m=v,o=y" >> $REPACK/ota/system/build.prop
#echo "net.tcp.buffersize.default=4096,87380,256960,4096,16384,256960" >> $REPACK/ota/system/build.prop
#echo "net.tcp.buffersize.wifi=4096,87380,256960,4096,16384,256960" >> $REPACK/ota/system/build.prop
#echo "net.tcp.buffersize.umts=4096,87380,256960,4096,16384,256960" >> $REPACK/ota/system/build.prop
#echo "net.tcp.buffersize.gprs=4096,87380,256960,4096,16384,256960" >> $REPACK/ota/system/build.prop
#echo "net.tcp.buffersize.edge=4096,87380,256960,4096,16384,256960" >> $REPACK/ota/system/build.prop
#echo " " >> $REPACK/ota/system/build.prop
echo "#" >> $REPACK/ota/system/build.prop
echo "# Goo Manager info" >> $REPACK/ota/system/build.prop
echo "#" >> $REPACK/ota/system/build.prop
echo "ro.goo.developerid=beegee_tokyo" >> $REPACK/ota/system/build.prop
RO_GOO_BOARD="ro.goo.board=$NEW_DEVICE"
echo $RO_GOO_BOARD >> $REPACK/ota/system/build.prop
RO_GOO_ROM="ro.goo.rom=Amai_$NEW_DEVICE"
echo $RO_GOO_ROM >> $REPACK/ota/system/build.prop
GOO_BUILD_VERSION="ro.goo.version=$gooversion_build_t"
echo $GOO_BUILD_VERSION >> $REPACK/ota/system/build.prop
echo " " >> $REPACK/ota/system/build.prop
echo "#" >> $REPACK/ota/system/build.prop
echo "# ROM Statistics and ROM Identification" >> $REPACK/ota/system/build.prop
echo "#" >> $REPACK/ota/system/build.prop
echo "ro.romstats.url=http://www.desire.giesecke.tk/romstats/" >> $REPACK/ota/system/build.prop
ROMSTATS_NAME="ro.romstats.name=Amai_$NEW_DEVICE"
echo $ROMSTATS_NAME >> $REPACK/ota/system/build.prop
ROMSTATS_VERSION="ro.romstats.version=V$gooversion_t.$goobuild_t"
echo $ROMSTATS_VERSION >> $REPACK/ota/system/build.prop
echo "ro.romstats.tframe=7" >> $REPACK/ota/system/build.prop
#echo -e $CL_GRN"============================================"$CL_RST
#echo -e $CL_GRN"Enable ADB and MTP"
#echo -e $CL_GRN"============================================"$CL_RST
#$SED -i \
# -e 's:persist.sys.usb.config=mtp:persist.sys.usb.config=mtp,adb': \
# $REPACK/ota/system/build.prop
#$SED -i \
# '/^$/d' \
# $REPACK/ota/system/build.prop
#echo -e $CL_GRN"============================================"$CL_RST
#echo -e $CL_GRN"Add SuperUser files"
#echo -e $CL_GRN"============================================"$CL_RST
#rm -f $REPACK/ota/system/bin/su
#rm -f $REPACK/ota/system/xbin/su
#rm -f $REPACK/ota/system/xbin/daemonsu
#rm -f $REPACK/ota/system/bin/.ext/.su
#rm -f $REPACK/ota/system/etc/install-recovery.sh
#rm -f $REPACK/ota/system/etc/init.d/99SuperSUDaemon
#rm -f $REPACK/ota/system/etc/.installed_su_daemon
#rm -f $REPACK/ota/system/app/Superuser.apk
#rm -f $REPACK/ota/system/app/Superuser.odex
#rm -f $REPACK/ota/system/app/SuperUser.apk
#rm -f $REPACK/ota/system/app/SuperUser.odex
#rm -f $REPACK/ota/system/app/superuser.apk
#rm -f $REPACK/ota/system/app/superuser.odex
#rm -f $REPACK/ota/system/app/Supersu.apk
#rm -f $REPACK/ota/system/app/Supersu.odex
#rm -f $REPACK/ota/system/app/SuperSU.apk
#rm -f $REPACK/ota/system/app/SuperSU.odex
#rm -f $REPACK/ota/system/app/supersu.apk
#rm -f $REPACK/ota/system/app/supersu.odex
#rm -f $REPACK/ota/data/dalvik-cache/*com.noshufou.android.su*
#rm -f $REPACK/ota/data/dalvik-cache/*com.koushikdutta.superuser*
#rm -f $REPACK/ota/data/dalvik-cache/*com.mgyun.shua.su*
#rm -f $REPACK/ota/data/dalvik-cache/*Superuser.apk*
#rm -f $REPACK/ota/data/dalvik-cache/*SuperUser.apk*
#rm -f $REPACK/ota/data/dalvik-cache/*superuser.apk*
#rm -f $REPACK/ota/data/dalvik-cache/*eu.chainfire.supersu*
#rm -f $REPACK/ota/data/dalvik-cache/*Supersu.apk*
#rm -f $REPACK/ota/data/dalvik-cache/*SuperSU.apk*
#rm -f $REPACK/ota/data/dalvik-cache/*supersu.apk*
#rm -f $REPACK/ota/data/dalvik-cache/*.oat
#rm -f $REPACK/ota/data/app/com.noshufou.android.su-*
#rm -f $REPACK/ota/data/app/com.koushikdutta.superuser-*
#rm -f $REPACK/ota/data/app/com.mgyun.shua.su-*
#rm -f $REPACK/ota/data/app/eu.chainfire.supersu-*
#cp -r -f -v $ANDROID_BUILD_TOP/ganbarou_tools/patches/superuser/system/. $REPACK/ota/system/.
#echo 1 > $REPACK/ota/system/etc/.installed_su_daemon
#echo 1 > $REPACK/ota/system/etc/.has_su_daemon
echo -e $CL_GRN"============================================"$CL_RST
echo -e $CL_GRN"Add Amai specific data files"
echo -e $CL_GRN"============================================"$CL_RST
mkdir $REPACK/ota/data
mkdir $REPACK/ota/data/app
cp -r -f -v $ANDROID_BUILD_TOP/ganbarou_tools/patches/data/app/. $REPACK/ota/data/app/.
cp -r -f -v $ANDROID_BUILD_TOP/ganbarou_tools/patches/datap/app/. $REPACK/ota/data/app/.
cp -r -f -v $ANDROID_BUILD_TOP/ganbarou_tools/patches/systemapp-p/. $REPACK/ota/system/app/.
echo -e $CL_GRN"============================================"$CL_RST
echo -e $CL_GRN"Copy hosts, 98ganbarou and gps.conf"
echo -e $CL_GRN"============================================"$CL_RST
#cp -r -f -v $ANDROID_BUILD_TOP/ganbarou_tools/patches/ganbarou/etc/gps.conf $REPACK/ota/system/etc/gps.conf
cp -r -f -v $ANDROID_BUILD_TOP/ganbarou_tools/patches/ganbarou/etc/hosts $REPACK/ota/system/etc/hosts
#cp -r -f -v $ANDROID_BUILD_TOP/ganbarou_tools/patches/ganbarou/etc/init.d/98Ganbarou $REPACK/ota/system/etc/init.d/97Ganbarou
#echo -e $CL_GRN"============================================"$CL_RST
#echo -e $CL_GRN"Add Japan APNs"
#echo -e $CL_GRN"============================================"$CL_RST
#$SED -i \
# -e 's:</apns>:': \
# $REPACK/ota/system/etc/apns-conf.xml
#cat $ANDROID_BUILD_TOP/ganbarou_tools/patches/japan.apns >> $REPACK/ota/system/etc/apns-conf.xml
echo -e $CL_GRN"============================================"$CL_RST
echo -e $CL_GRN"Zipping Amai $NEW_DEVICE ROM"
echo -e $CL_GRN"============================================"$CL_RST
( cd $REPACK/ota; zip $QUIET -r $REPACK/update.zip . )
echo -e $CL_GRN"============================================"$CL_RST
echo -e $CL_GRN"Sign Amai $NEW_DEVICE ROM"
echo -e $CL_GRN"============================================"$CL_RST
SECURITYDIR=$ANDROID_BUILD_TOP/build/target/product/security
java -Xmx1024m \
-jar $ANDROID_BUILD_TOP/out/host/$OUT_TARGET_HOST/framework/signapk.jar \
-w $SECURITYDIR/testkey.x509.pem $SECURITYDIR/testkey.pk8 \
$REPACK/update.zip $OUTFILE
echo -e $CL_GRN"============================================"$CL_RST
echo -e $CL_GRN"Cleanup temporary folders"
echo -e $CL_GRN"============================================"$CL_RST
rm -rf $REPACK
echo -e $CL_GRN"============================================"$CL_RST
echo -e $CL_GRN"Create a md5 checksum image of the repacked $NEW_DEVICE package"
echo -e $CL_GRN"============================================"$CL_RST
(
img=`basename $OUTFILE`
cd `dirname $OUTFILE`
$MD5 $img >$img.md5sum
)
echo -e $CL_MAG"============================================"$CL_RST
echo -e $CL_MAG"Package complete: $OUTFILE"
echo -e $CL_MAG"============================================"$CL_RST
echo -e $CL_GRN"============================================"$CL_RST
echo -e $CL_GRN"Copy to shared directory and cleanup"
echo -e $CL_GRN"============================================"$CL_RST
mv $OUTFILE $ANDROID_BUILD_TOP/
mv $OUTFILE.md5sum $ANDROID_BUILD_TOP/
else
echo -e $CL_MAG"=============================================="$CL_RST
echo -e $CL_RED"Build for GT-I9505 failed"$CL_RST
echo -e $CL_MAG"=============================================="$CL_RST
fi
| true |
f4ac85b8e817e7b44c8204e608e813d7f48ac1e8 | Shell | bohachu/cameo-motion-1213-1704 | /sh/install-js-obfuscator.sh | UTF-8 | 461 | 2.921875 | 3 | [] | no_license | #!/bin/bash
# for ubuntu/debia/kali
# initial folder = ~/$PRJ_DIR_NAME/sh
source .env
cd ~
echo "安裝npm"
sudo apt install npm
echo "javascript-obfuscator"
sudo npm install --save-dev javascript-obfuscator
if [[ ! -f /usr/local/bin/javascript-obfuscator ]]; then
sudo ln -s ~/node_modules/javascript-obfuscator/bin/javascript-obfuscator /usr/local/bin
fi
echo "Excample: javascript-obfuscator 檔案名稱.js"
source ~/.bashrc
cd ~/$PRJ_DIR_NAME/sh
| true |
04e7201cee3a16dbb09c32719382cb884af51f0c | Shell | jensomato/dotfiles | /.local/bin/xsane2tess3.sh | UTF-8 | 3,962 | 3.828125 | 4 | [] | no_license | #!/bin/bash
# xsane2tess3 - tesseractOCR directly from xsane
# Copyright (C) 2012-2017 Heinrich Schwietering
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
##############################################################################
#
# xsane2tess3 0.2
#
# *** tesseract made simple ***
#
#
##############################################################################
#
# xsane2tess is a TesseractOCR wrapper to be able to use tesseract with xsane
#
#
#
TEMP_DIR=/tmp/ # folder for temporary files (all files)
ERRORLOG="xsane2tess3.log" # file where STDERR goes
#TEST="testoutput.txt"
if [[ -z "$1" ]]
then
echo "Usage: $0 [OPTIONS]
xsane2tess3 scans images with TesseractOCR
and outputs the text in a file or as hocr/html document
OPTIONS:
-i <file1> define input file (any image-format supported)
-o <file2> define output-file (*.txt/hOCR)
-l <lang> define language-data tesseract should use
-e <config> filename for tesseract
-f </path/to/Final> name and path fot multiscan document
Progress- & error-messages will be stored in this logfile:
$TEMP_DIR$ERRORLOG
xsane2tess depends on
- XSane, http://www.xsane.org/
- TesseractOCR, http://code.google.com/p/tesseract-ocr/
Some coding was stolen from 'ocube'
http://www.geocities.com/thierryguy/ocube.html
This adaption is based on xsane2tess
http://doc.ubuntu-fr.org/xsane2tess,
Hints always welcome! heinrich (dot) schwietering (at) gmx (dot) de
"
exit
fi
# get options...
while getopts ":i:o:l:c:f:" OPTION
do
case $OPTION in
i ) # input filename (with path)
FILE_PATH="$OPTARG"
;;
o ) # output filename
FILE_OUT="$OPTARG"
;;
l ) # Language-selection
LANG="$OPTARG"
;;
c ) # use hocr configfile
CONFILE="$OPTARG"
;;
f ) # final name for multiscan ocr file
FINAL="$OPTARG"
;;
esac
done
# redirect STDOUT to FILE_OUT
exec 1>>$FILE_OUT
# redirect STDERR to ERRORLOG
exec 2>>$TEMP_DIR$ERRORLOG
# strip path from FILE_PATH, use filename only
IN_FILE="${FILE_PATH##*/.*}"
echo "~~~+++~~~~+++~~~" 1>&2
# start OCR (tesseract expands output with *.txt/.html)
tesseract "$IN_FILE" "$FILE_OUT" -l "$LANG" "$CONFILE" 1>&2
echo Tesseract used with "$LANG" "$CONFILE" 1>&2
{ if [[ "$FINAL" != '' ]]
then
{ if [[ "$CONFILE" == "" ]]
then
# check if final txt file is already existing
{ if [[ ! -a "$FINAL".txt ]]
then
# start final ocr file
cp "$FILE_OUT".txt "$FINAL".txt 1>&2
echo "$FINAL.txt started" 1>&2
else
mv "$FINAL".txt "$FINAL".new.txt
cat "$FINAL".new.txt "$FILE_OUT".txt > "$FINAL".txt
echo "$FILE_OUT.txt added to $FINAL.txt" 1>&2
rm "$FINAL".new.txt
fi }
else
# check if final hocr file is already existing
{ if [[ ! -a "$FINAL".hocr ]]
then
# start final ocr file
cp "$FILE_OUT.hocr" "$FINAL".hocr 1>&2
echo "$FINAL.hocr started" 1>&2
else
mv "$FINAL".hocr "$FINAL".new.hocr
cat "$FINAL".new.hocr "$FILE_OUT".hocr > "$FINAL".hocr
echo "$FILE_OUT.hocr added to $FINAL.hocr" 1>&2
rm "$FINAL".new.hocr
fi }
fi }
rm $FILE_OUT
else
# STDOUT scanned text => FILE_OUT
cat "$FILE_OUT".*
fi }
rm $FILE_OUT.*
echo "~~~+++~~~~+++~~~"$(date +%c) 1>&2
| true |
9eb59916c46f1ba7a6a3fd4e67de5879642a6563 | Shell | robertkeizer/misusing-video | /test-images/generate.sh | UTF-8 | 174 | 2.90625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
num=1;
for i in {1..100}; do
echo "This is some example stuff: $i" | qrencode -o - | convert - -resize 100x100\! - > $num.png;
let num=($num+1);
done;
| true |
bb26d067dacaad95677612c56ed377fa6983a556 | Shell | shannah/cn1-travis-template | /.travis/build.sh | UTF-8 | 7,257 | 3.640625 | 4 | [] | no_license | #!/bin/bash
# set -e so that this script will exit if any of the commands fail
set -e
function command_exists {
#this should be a very portable way of checking if something is on the path
#usage: "if command_exists foo; then echo it exists; fi"
command -v "$1" &> /dev/null
}
if [ -z "${CN1USER}" ] || [ -z "${CN1PASS}" ]; then
if [ -n "${CN1_RUNTESTS_ANDROID_EMULATOR}" ] || [ -n "${CN1_RUNTESTS_IOS_SIMULATOR}" ]; then
echo "Running tests on android or iOS requires the CN1USER and CN1PASS environment variables to be set to your Codename One username and password"
echo "NOTE: Running tests on iOS and Android requires an enterprise account or higher, since they rely on automated build support"
exit 1
fi
fi
if [ "${CN1_PLATFORM}" == "android" ]; then
echo "Installing Node 6"
# Need to load NVM command first
# https://github.com/BanzaiMan/travis_production_test/blob/9c02aef/.travis.yml
# https://github.com/travis-ci/travis-ci/issues/5999#issuecomment-217201571
source ~/.nvm/nvm.sh
nvm install 6
echo `which node`
android list targets
echo "Creating AVD..."
if [ "${API}" -eq "15" ]; then
echo no | android create avd --force -n test -t android-15 --abi google_apis/armeabi-v7a
elif [ "${API}" -eq "16" ]; then
echo no | android create avd --force -n test -t android-16 --abi armeabi-v7a
elif [ "${API}" -eq "17" ]; then
echo no | android create avd --force -n test -t android-17 --abi google_apis/armeabi-v7a
elif [ "${API}" -eq "18" ]; then
echo no | android create avd --force -n test -t android-18 --abi google_apis/armeabi-v7a
elif [ "${API}" -eq "19" ]; then
echo no | android create avd --force -n test -t android-19 --abi armeabi-v7a
elif [ "${API}" -eq "21" ]; then
echo no | android create avd --force -n test -t android-21 --abi armeabi-v7a
elif [ "${API}" -eq "22" ]; then
echo no | android create avd --force -n test -t android-22 --abi armeabi-v7a
fi
echo "Starting Android Emulator..."
emulator -avd test -no-window &
EMULATOR_PID=$!
# Travis will hang after script completion if we don't kill
# the emulator
function stop_emulator() {
kill $EMULATOR_PID
}
trap stop_emulator EXIT
fi
if [ "${CN1_PLATFORM}" == "ios" ]; then
if ! command_exists ant; then
echo "Installing Ant..."
# Install ANT and Maven. They are missing from iOS machines
curl -L http://archive.apache.org/dist/ant/binaries/apache-ant-1.9.6-bin.tar.gz -o apache-ant-1.9.6-bin.tar.gz
tar xfz apache-ant-1.9.6-bin.tar.gz --directory ../
export PATH=`pwd`/../apache-ant-1.9.6/bin:$PATH
fi
if ! command_exists mvn; then
echo "Installing Maven"
curl -L https://archive.apache.org/dist/maven/maven-3/3.2.3/binaries/apache-maven-3.2.3-bin.tar.gz -o apache-maven-3.2.3-bin.tar.gz
tar xvfz apache-maven-3.2.3-bin.tar.gz --directory ../
export PATH=`pwd`/../apache-maven-3.2.3/bin:$PATH
fi
fi
if [ "${CN1_PLATFORM}" == "android" ]; then
echo "We are in android"
fi
# SET UP ENVIRONMENT
SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
PROJECT_DIR=`pwd`
if [ "${CN1_PLATFORM}" == "ios" ]; then
# On OS X we need to set JAVA_HOME for maven to work properly
export JAVA_HOME=$(/usr/libexec/java_home)
fi
# Install Codename One CLI tools
echo "Current directory: "
echo `pwd`
cd ..
mkdir codenameone-cli
cd codenameone-cli
npm install codenameone-cli
CN1=`pwd`/node_modules/.bin/cn1
cd $PROJECT_DIR
# Install missing jar files into project
echo "$CN1 install-jars"
$CN1 install-jars || true
echo "$CN1 install-tests"
$CN1 install-tests || true
# If CN1_SOURCES environment variable is set, then we download the CN1_SOURCES
# And build against those
if [[ -n ${CN1_SOURCES} ]]; then
echo "Building against Codename One Sources from ${CN1_SOURCES}"
curl -L ${CN1_SOURCES} -o master.zip
unzip master.zip -d ../
rm master.zip
mv ../CodenameOne-master ../cn1
cd ../cn1
ant all
cp CodenameOne/dist/CodenameOne.jar $PROJECT_DIR/lib/CodenameOne.jar
cp Ports/CLDC11/dist/CLDC11.jar $PROJECT_DIR/lib/CLDC11.jar
cp Ports/JavaSE/dist/JavaSE.jar $PROJECT_DIR/JavaSE.jar
fi
# Build the project
cd $PROJECT_DIR
ant jar
# Run Tests Against JavaSE
if [[ -n ${CN1_RUNTESTS_JAVASE} ]]; then
mkdir dist/testrunner
cd dist/testrunner
echo -e "<?xml version='1.0'?>\n<tests><test path='${PROJECT_DIR}'/></tests>" > tests.xml
echo "tests.xml content:"
cat tests.xml
if [[ -n ${CN1_SOURCES} ]]; then
$CN1 test -s -e -cn1Sources ${PROJECT_DIR}/../cn1 -skipCompileCn1Sources
else
$CN1 test -s -e
fi
cd ../..
fi
if [[ -n ${CN1_RUNTESTS_IOS_SIMULATOR} ]]; then
echo "Running tests on IOS SIMULATOR"
$CN1 install-appium-tests || true
if (false && command -v appium 2>/dev/null); then
pkill -f appium || true
appium &
APPIUM_PID=$!
else
echo "Appium missing. Installing appium..."
npm install appium
pkill -f appium || true
./node_modules/.bin/appium &
APPIUM_PID=$!
fi
# Travis will hang after script completion if we don't kill appium
function stop_appium() {
kill $APPIUM_PID
}
trap stop_appium EXIT
#ant -f appium.xml test-ios-appium-simulator -Dcn1.iphone.target=debug_iphone_steve -Dcn1user=${CN1USER} -Dcn1password=${CN1PASS}
ant -f appium.xml test-ios-appium-simulator -Dcn1user=${CN1USER} -Dcn1password=${CN1PASS}
fi
if [[ -n ${CN1_RUNTESTS_ANDROID_EMULATOR} ]]; then
echo "Running tests on Android Emulator"
if [ ! -f "Keychain.ks" ]; then
wget https://github.com/shannah/cn1-unit-tests/raw/master/Keychain.ks
fi
$CN1 install-appium-tests || true
echo "Installing appium..."
npm install appium
./node_modules/.bin/appium &
APPIUM_PID=$!
# Travis will hang after script completion if we don't kill appium
function stop_appium() {
kill $APPIUM_PID
}
trap stop_appium EXIT
echo "Waiting for Emulator..."
bash .travis/android-waiting-for-emulator.sh
adb shell settings put global window_animation_scale 0 &
adb shell settings put global transition_animation_scale 0 &
adb shell settings put global animator_duration_scale 0 &
echo "Sleeping for 30 seconds to give emulator a chance to settle in..."
sleep 30
echo "Unlocking emulator"
adb shell input keyevent 82 &
echo "Running tests with appium in the emulator "
ant -f appium.xml test-android-appium-emulator \
-Dcn1user=${CN1USER} \
-Dcn1password=${CN1PASS} \
-Dcodename1.android.keystore="Keychain.ks" \
-Dcodename1.android.keystoreAlias="codenameone" \
-Dcodename1.android.keystorePassword="password"
fi
if [[ -n $CN1_RUNTESTS_IOS_DEVICE ]]; then
echo "Running Tests on iOS Device"
$CN1 install-appium-tests || true
if command -v appium 2>/dev/null; then
echo "Appium missing. Installing appium..."
pkill -f appium || true
appium &
APPIUM_PID=$!
else
npm install appium
./node_modules/.bin/appium &
pkill -f appium || true
APPIUM_PID=$!
fi
# Travis will hang after script completion if we don't kill appium
function stop_appium() {
kill $APPIUM_PID
}
trap stop_appium EXIT
export PLATFORM_VERSION=$DEVICE
ant -f appium.xml test-ios-appium-device -Dcn1user=${CN1USER} -Dcn1password=${CN1PASS}
fi
exit 0
| true |
56bedc54c224e627bb3807e27bb2c0e729c889b1 | Shell | ShrinathN/AndroidChrootScripts | /ent.sh | UTF-8 | 1,202 | 3.53125 | 4 | [] | no_license | #!/bin/sh
#this script will help you enter into the Debian chroot
# checks If the user is root or not, halts execution of not
if [ "$(whoami)" != "root" ]
then
echo Please run the script as root
exit
fi
# mounts the root as RW
busybox mount -o remount,rw /
# checks if /debian exists, if not, it creates it
if [ ! -e /debian ]; then
mkdir /debian
fi
# mounts the img file in /debian
busybox mount /sdcard/debian.img /debian
# all the shit i need to access the hardware
busybox mount -o bind /dev /debian/dev
busybox mount -t proc proc /debian/proc
busybox mount -t tmpfs tmpfs /debian/tmp
busybox mount -t sysfs sys /debian/sys
busybox mount -o bind /sdcard /debian/mnt/sdcard0
busybox mount -o bind /storage/2138-1704 /debian/mnt/sdcard1
# trivial variable setup
export PATH=$PATH:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
export LD_PRELOAD=""
export TERM="xterm"
export DISPLAY=:0
export HOME=/home
# the main part
busybox chroot /debian /bin/bash -l
# for unmounting
busybox umount /debian/dev
busybox umount /debian/proc
busybox umount /debian/tmp
busybox umount /debian/sys
busybox umount /debian/mnt/sdcard0
busybox umount /debian/mnt/sdcard1
busybox umount /debian
| true |
2dadc0287e3cf172179ff863b5695fa04011cd47 | Shell | kendis-fit/software-requirements-front-end | /start.sh | UTF-8 | 517 | 3.796875 | 4 | [] | no_license | #!/bin/bash
if [ -z "$(command -v serve)" ]; then
echo "You don't have program serve, you should download it with help command 'npm i -g serve'";
exit 1;
fi
get_port() {
if [ -n "$PORT" ]; then
TRUE_PORT=$PORT;
elif [ -n "$1" ]; then
TRUE_PORT=$1;
else
TRUE_PORT=3000;
fi
}
get_directory() {
if [ -d "./build" ]; then
TRUE_DIRECTORY="build";
else
TRUE_DIRECTORY=".";
fi
}
get_port $1
get_directory
serve -p $TRUE_PORT -s $TRUE_DIRECTORY | true |
a2320ae449cf18b53dabb2a0bae582bdf42fd47b | Shell | pi-netes/kubernetes-config | /bootstrapping/bootstrap.sh | UTF-8 | 1,262 | 3.421875 | 3 | [] | no_license | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
echo verifying user...
if [[ "${EUID}" != 0 ]] ; then
echo "Must run as root."
exit 1
fi
if [[ ! -f "/home/alarm/bootstrap.sh" ]]; then # not currently booted into pi
echo this is only intended to be used in conjunction with the arclinux.sh install script! would you like to install archlinux on an sd card now? [Y/n]
read SHOULD_INSTALL_ARCH_LINUX
if [[ $SHOULD_INSTALL_ARCH_LINUX == 'n' ]]; then
echo "nothing to do!"
exit 0
else
bash $SCRIPT_DIR/installs/archlinux.sh
exit 0
fi
fi
echo what would you like to name this machine?
read HOSTNAME
hostnamectl set-hostname $HOSTNAME
echo setting global options...
sed -i 's/\#Color/Color\nILoveCandy\nTotalDownload/g' /etc/pacman.conf
echo updating and installing programs with pacman...
bash $SCRIPT_DIR/installs/pacman.sh
echo installing user profile...
echo what would you like to name your user?
read USERNAME
export USERNAME
bash $SCRIPT_DIR/installs/userprofile.sh
echo installing packages from the aur
bash $SCRIPT_DIR/installs/aur.sh
echo configuring cluster...
sudo -u $USERNAME /home/$USERNAME/kubetools/kubetools.sh
echo 'to clean up, please run:
rm -rf /home/alarm
userdel alarm'
| true |
a4fabc73822485a206087d2ae457421c3868bde8 | Shell | gsathya/ooni-probe | /old/ooni/plugins/old_stuff_to_convert/twitter-test.sh | UTF-8 | 637 | 3.265625 | 3 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | #!/bin/bash
#
# A quick hack to (tcp)traceroute to a list of hosts
#
echo "tcp/conntest v0.6"
date -R
echo
/sbin/ifconfig -a
echo
/sbin/route -n
echo
echo "Testing Twitter IP addresses..."
for ip in `cat twitter-ip-list.txt|grep 1`
do
echo "Testing $ip"
tcptraceroute -m 6 -w 1 $ip 80
tcptraceroute -m 6 -w 1 $ip 0
tcptraceroute -m 6 -w 1 $ip 123
tcptraceroute -m 6 -w 1 $ip 443
done
echo "Various traceroute attempts"
for ip in `cat twitter-ip-list.txt|grep 1`
do
traceroute -A $ip
traceroute -A -I $ip
traceroute -A -U $ip
done
wget -q -O- https://check.torproject.org|grep "IP address"
echo
date -R
| true |
6afebf20940bbca24c3997ed2b1f067880f2d061 | Shell | weilaidb/PythonExample | /regularexpress/home/weilaidb/work/kernel/linux-3.0.8/scripts/tags.sh | UTF-8 | 1,511 | 3.46875 | 3 | [] | no_license | #!/bin/sh
# Generate tags or cscope files
# Usage tags.sh <mode>
#
# mode may be any of: tags, TAGS, cscope
#
# Uses the following environment variables:
# ARCH, SUBARCH, SRCARCH, srctree, src, obj
if [ "$KBUILD_VERBOSE" = "1" ]; then
set -x
fi
# This is a duplicate of RCS_FIND_IGNORE without escaped '()'
ignore="( -name SCCS -o -name BitKeeper -o -name .svn -o \
-name CVS -o -name .pc -o -name .hg -o \
-name .git ) \
-prune -o"
# Do not use full path if we do not use O=.. builds
# Use make O=.
# to force full paths for a non-O= build
if [ "$" = "" ]; then
tree=
else
tree=$/
fi
# Find all available archs
find_all_archs()
# Detect if ALLSOURCE_ARCHS is set. If not, we assume SRCARCH
if [ "$" = "" ]; then
ALLSOURCE_ARCHS=$
elif [ "$" = "all" ]; then
find_all_archs
fi
# find sources in arch/$ARCH
find_arch_sources()
# find sources in arch/$1/include
find_arch_include_sources()
# find sources in include/
find_include_sources()
# find sources in rest of tree
# we could benefit from a list of dirs to search in here
find_other_sources()
find_sources()
all_sources()
all_kconfigs()
all_defconfigs()
docscope()
dogtags()
exuberant()
emacs()
xtags()
# Support um (which uses SUBARCH)
if [ "$" = "um" ]; then
if [ "$SUBARCH" = "i386" ]; then
archinclude=x86
elif [ "$SUBARCH" = "x86_64" ]; then
archinclude=x86
else
archinclude=$
fi
fi
case "$1" in
"cscope")
docscope
;;
"gtags")
dogtags
;;
"tags")
rm -f tags
xtags ctags
;;
"TAGS")
rm -f TAGS
xtags etags
;;
esac
| true |
22d9ac323d115f2b38a75955a799df68c3b43df1 | Shell | labellson/dotfiles | /bash/.bashrc | UTF-8 | 174 | 2.796875 | 3 | [] | no_license | # From arch wiki: set fish as a interactive shell only
if [[ $(ps -p $PPID -o 'command=') != "fish" && -z ${BASH_EXECUTION_STRING} && `command -v fish` ]]
then
exec fish
fi
| true |
e6a5ff778b95e5cb45de40b4d3a4df0dcd9c1dde | Shell | M-Evans/tsh | /test/testall.sh | UTF-8 | 383 | 2.796875 | 3 | [
"MIT"
] | permissive | TMP1=`mktemp`
TMP2=`mktemp`
for i in 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16
do
make test$i 2>&1 | grep -v sdriver | sed 's/(.*)/(XXXXX)/g' > "$TMP1"
make rtest$i 2>&1 | grep -v sdriver | sed 's/(.*)/(XXXXX)/g' > "$TMP2"
echo "----- diff $i ---------------------"
diff "$TMP1" "$TMP2"
echo '==================================='
done
rm "$TMP1" "$TMP2"
| true |
20246345b42e0f2f2c5a6bc8948fbfe87ad2ab66 | Shell | pld-linux/nagios-nsca | /nagios-nsca.submit | UTF-8 | 1,894 | 4.15625 | 4 | [] | no_license | #!/bin/sh
# Arguments for service:
# $1 = host_name (Short name of host that the service is
# associated with)
# $2 = svc_description (Description of the service)
# $3 = state_string (A string representing the status of
# the given service - "OK", "WARNING", "CRITICAL"
# or "UNKNOWN")
# $4 = plugin_output (A text string that should be used
# as the plugin output for the service checks)
# $5 = perfdata
#
# Arguments for host:
# $1 = host_name (Short name of host we check for status)
# $2 = state_string (A string representing the status of
# the given service - "OK", "DOWN", "UNREACHABLE"
# or "UNKNOWN")
# $3 = plugin_output (A text string that should be used
# as the plugin output for the host checks)
# $4 = perfdata
#
if [ "$#" = 5 ]; then
TYPE=SERVICE
CODE=$3
elif [ "$#" = 4 ]; then
TYPE=HOST
CODE=$2
else
echo >&2 "You must specify exactly 4 or 5 arguments"
exit 1
fi
CENTRAL=$(awk '!/#/ { print }' /etc/nagios/send_nsca-central)
if [ -z $CENTRAL ]; then
echo >&2 "You must specify nagios NSCA host in /etc/nagios/send_nsca-central"
exit 1
fi
# Convert the state string to the corresponding return code
RETURN_CODE=3
case "$CODE" in
OK)
RETURN_CODE=0
;;
WARNING)
RETURN_CODE=1
;;
DOWN)
RETURN_CODE=1
;;
CRITICAL)
RETURN_CODE=2
;;
UNREACHABLE)
RETURN_CODE=2
;;
UNKNOWN)
RETURN_CODE=3
;;
[0-3])
RETURN_CODE="$CODE"
;;
esac
# pipe the service check info into the send_nsca program, which
# in turn transmits the data to the nsca daemon on the central
# monitoring server
if [ "$TYPE" = "SERVICE" ]; then
echo -e "$1\t$2\t$RETURN_CODE\t$4|$5\n" | /usr/sbin/send_nsca $CENTRAL -c /etc/nagios/send_nsca.cfg
elif [ "$TYPE" = "HOST" ]; then
echo -e "$1\t$RETURN_CODE\t$3|$4\n" | /usr/sbin/send_nsca $CENTRAL -c /etc/nagios/send_nsca.cfg
else
echo >&2 "This cannot happen"
exit 1
fi
| true |
3d942755946f946906df284a4e6dba4668f47586 | Shell | th3architect/k8s_tf_demo | /tf_deploy.sh | UTF-8 | 1,109 | 3.25 | 3 | [] | no_license | #!/usr/bin/env bash
if [ -z "$1" ]
then
echo "Missing 'master' private IP as first argument..."
exit 1
fi
if [ -z "$2" ]
then
echo "Missing 'gateway' of the underlay network as second argument..."
exit 1
fi
BUILD_TAG="latest"
if [ -n "$3" ]
then
BUILD_TAG="$3"
fi
K8S_MASTER_IP=$1
sudo mkdir -pm 777 /var/lib/contrail/kafka-logs
curl https://github.com/Juniper/contrail-controller/wiki/contrail.yml | awk '/<pre><code>/{flag=1;next}/<\/pre>/{flag=0}flag' | sed "s/{{ K8S_MASTER_IP }}/$K8S_MASTER_IP/g" >> tf.yaml
## change the `VROUTER_GATEWAY` to the underly gateway or network connectivity to the master will be lost
sed -i "s/VROUTER_GATEWAY: $1/VROUTER_GATEWAY: $2/g" tf.yaml
## define the build we want to deploy
sed -i "s/:latest/:$BUILD_TAG/g" tf.yaml
## uncomment to change the default `pod` and `service` networks
#sed -i 's|KUBERNETES_API_SECURE_PORT: "6443"|KUBERNETES_API_SECURE_PORT: "6443"\n KUBERNETES_POD_SUBNETS: 10.48.0.0/12\n KUBERNETES_SERVICE_SUBNETS: 10.112.0.0/12\n KUBERNETES_IP_FABRIC_SUBNETS: 10.80.0.0/12|g' tf.yaml
kubectl apply -f tf.yaml | true |
bb72072f2f8f89df5fe69655910447ae168a37a0 | Shell | wincus/inf-provisioning | /view.d/10-view.sh | UTF-8 | 219 | 2.984375 | 3 | [] | no_license | #!/bin/bash
for domain in $(virsh list --all)
do
echo $domain | egrep -q -i "${MY_HOSTNAME}-${MY_ENV}"
case $? in
0)
virt-viewer --connect ${VIRSH_DEFAULT_CONNECT_URI} $domain &
sleep 1
;;
esac
done
exit 0
| true |
68a30ced19dd7e2c4065997eb8e6b2645c076788 | Shell | tonight-halfmoon/freebsd-on-T530 | /system_daemon/erlang/fakewriter/_usr/local/fakwriter/runner | UTF-8 | 354 | 3.140625 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
main()
{
echo "" > /var/log/fakewriter
_input=8
/usr/local/bin/erl -pa /usr/local/fakewriter/ebin/ -boot start_clean -noshell -smp +S 1 \
-s fakewriter process $_input >> /var/log/fakewriter & echo $! > $pidfile
}
usage()
{
echo "usage: $0 <path/to/pidfile>"
exit 1
}
[ $# -eq 0 ] && usage
pidfile=$1
main $pidfile
| true |
718556a4ba0dced430b3b92e3d7266240a07f15b | Shell | SanyaBoriskin/Geometry | /scripts/copy_ims_kb.sh | UTF-8 | 1,473 | 2.546875 | 3 | [] | no_license | #!/bin/bash
export LD_LIBRARY_PATH=../sc-machine/bin
if [ ! -d "../ims.ostis.kb_copy" ]; then
mkdir ../ims.ostis.kb_copy
else
rm -rf ../ims.ostis.kb_copy/*
fi
cd ../
cp -a ims.ostis.kb/knowledge_base_IMS/doc_technology_ostis/section_unificated_semantic_network_and_representation/ ims.ostis.kb_copy/
cp -a ims.ostis.kb/knowledge_base_IMS/doc_technology_ostis/section_unificated_models/ ims.ostis.kb_copy/
cp -a ims.ostis.kb/knowledge_base_IMS/doc_technology_ostis/section_basic_model_of_the_unified_processing_of_semantic_networks/ ims.ostis.kb_copy/
cp -a ims.ostis.kb/knowledge_base_IMS/doc_technology_ostis/section_library_OSTIS/section_library_of_reusable_components_interfaces/lib_ui_menu/ ims.ostis.kb_copy/
cp -a ims.ostis.kb/knowledge_base_IMS/doc_technology_ostis/section_library_OSTIS/section_library_of_reusable_components_processing_machinery_knowledge/lib_c_agents/ ims.ostis.kb_copy/
cp -a ims.ostis.kb/knowledge_base_IMS/doc_technology_ostis/section_library_OSTIS/section_library_of_reusable_components_processing_machinery_knowledge/lib_scp_agents/ ims.ostis.kb_copy/
cp -a ims.ostis.kb/knowledge_base_IMS/doc_technology_ostis/section_library_OSTIS/section_library_of_reusable_components_processing_machinery_knowledge/section_library_of_reusable_programs_for_sc_text_processing/lib_scp_program/ ims.ostis.kb_copy/
cp -a ims.ostis.kb/to_check/ ims.ostis.kb_copy/
cp -a ims.ostis.kb/ui/ ims.ostis.kb_copy/
rm -rf ims.ostis.kb_copy/ui/menu
cd -
| true |
f2eb7d65705d6b660b44cf07899b269a3633aa78 | Shell | dorota-alina/atm | /login.sh | UTF-8 | 562 | 3.15625 | 3 | [] | no_license | clear
echo 'Loading...'
sleep 3
clear
echo "Welcome to the Bank of No Hope! "
echo
echo "Username: "
read USERNAME
#
source logs/username.txt
if [[($USERNAME == "$username")]];
then
echo
echo "Password: "
read PASSWORD
#
else
clear
echo "Incorrect username"
sleep 2
clear
./login.sh
fi
#
source logs/password.txt
if [[($PASSWORD == "$password")]];
then
clear
echo "$username, you have sucessfully logged in."
sleep 2
./menu.sh
exit
#
elif [[($PASSWORD != "$password")]]; then
clear
echo "Incorrect password"
sleep 2
clear
./login.sh
else
exit 0
fi
clear
exit 0 | true |
5338391b8789f7b918bcda314a50b8965a53fc2c | Shell | manish-1498/Arithmetic_Operations | /descending.sh | UTF-8 | 316 | 3.109375 | 3 | [] | no_license | #!/bin/bash -x
read a
read b
read c
sort[0]="$(($a+$b*$c))"
sort[1]="$(($a*$b+$c))"
sort[2]="$(($c+$a/$b))"
sort[3]="$(($a%$b+$c))"
for (( i=0; i<3; i++ ))
do
for (( j=0; j<3; j++ ))
do
if [ ${sort[$j]} -lt ${sort[$j+1]} ]
then
temp=${sort[$j]}
array[$j]=${sort[$j+1]}
array[$j+1]=$temp
fi
done
done
echo ${sort[@]}
| true |
d8276c7b4f43674c857d75ed7a9053368dbd558b | Shell | jsantiago/spring-boot-example | /environment | UTF-8 | 1,086 | 3.890625 | 4 | [] | no_license | #!/bin/bash
set -e
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
clean() {
./gradlew clean
}
build() {
./gradlew build
}
run() {
restart
}
status() {
cd $SCRIPT_DIR/docker && vagrant status
}
start() {
build
cd $SCRIPT_DIR/docker && vagrant up --provider=docker
}
restart() {
cd $SCRIPT_DIR/docker && vagrant reload
}
stop() {
cd $SCRIPT_DIR/docker && vagrant halt
cd $SCRIPT_DIR && vagrant halt
}
destroy() {
cd $SCRIPT_DIR/docker && vagrant destroy
}
usage() {
echo "Usage: $0 [ clean | build | run | status | start | restart | stop | destroy ]"
}
if [[ $# == 0 ]]; then
usage
exit 1
fi
while [[ -n "$1" ]]; do
case $1 in
clean ) clean
;;
build ) build
;;
run ) run
;;
status ) status
;;
start ) start
;;
restart ) restart
;;
stop ) stop
;;
destroy ) destroy
;;
* ) usage
exit 1
;;
esac
shift
done
| true |
020f7b672cbe50b15054c97deffa1aa7273abc98 | Shell | A-N-Other/pedestal | /subsample | UTF-8 | 484 | 3.6875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
version="GY170731"
usage="\
subsample ($version): subsampling of streamed FASTQ data
subsample [-h] <proportion>"
while getopts :h option; do
case "$option" in
h)
echo "$usage"
exit
;;
\?)
echo "$usage"
exit 1
;;
esac
done
if (( $# == 1 )) ; then
paste - - - - \
| awk "BEGIN {srand()} rand() < $1" \
| tr "\t" "\n"
else
echo "$usage"
exit 1
fi
| true |
6f98e2df36dfcf39e9cb564ab6b60fc6777161de | Shell | Chaosdragonian/hal_fuzz | /test_stm32_tcp_echo.sh | UTF-8 | 517 | 2.984375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
beginTime=$(date +%s%N)
BINARY=./tests/stm32_tcp_echo/stm32_tcp_echo_server.yml
INPUTS=./tests/stm32_tcp_echo/inputs
OUTPUTS=./tests/stm32_tcp_echo/output/
HARNESS="python -m hal_fuzz.harness -c $BINARY"
#./afl-fuzz -U -m none -i $INPUTS -o $OUTPUTS -- $HARNESS @@
$HARNESS $INPUTS/TCP_Echo_Server_Client.pcapng.input
endTime=$(date +%s%N)
elapsed=`echo "($endTime - $beginTime) / 1000000" | bc`
elapsedSec=`echo "scale=6;$elapsed / 1000" | bc | awk '{printf "%.6f", $1}'`
echo TOTAL: $elapsedSec sec | true |
9735f26dd7fa7929bdbead8ac4da01b181c9b657 | Shell | TimBao/Material | /Utility/build_ucar_iphone.sh | UTF-8 | 2,617 | 3.484375 | 3 | [] | no_license | #!/bin/sh
#parse input to build debug or relase
echo ""
echo "usage: build_ucar_iphone.sh release or build_ucar_iphone debug default is debug"
echo ""
if [ $1 == "release" ]; then
BUILD_TYPE=Release
else
BUILD_TYPE=Distribution
fi
CURDIR=$PWD
PROJECT_DIR=$CURDIR/../ucar
APPLICATION_NAME=UCar
#DEVELOPER_NAME=iPhone Distribution: Beijing XinDaoHaoDa Technology Co., Ltd.
PROFILE_NAME=$(head -n 1 provisoning_profile.txt)
PROVISONING_PROFILE=${PROJECT_DIR}/UCar/cer/${PROFILE_NAME}
CPU_ARCHITECTURE="armv7 arm64"
SIMULATOR_OR_IOS_SDK=iphoneos
DEVELOPMENT_TARGET=7.0
SDK_VERSION=8.1
OUTPUT=$CURDIR/output
if [ -d ${OUTPUT}/${BUILD_TYPE}-${SIMULATOR_OR_IOS_SDK} ]
then
rm -rf ${OUTPUT}/${BUILD_TYPE}-${SIMULATOR_OR_IOS_SDK}
else
mkdir ${OUTPUT}
fi
##clean
cd ${PROJECT_DIR}
echo "====>> clean output"
xcodebuild clean 2>/dev/null 2>&1
echo "====>> compile project"
if [ $2 == "online" ]; then
echo "==>ios build type"
echo ${BUILD_TYPE}
echo "current path="
echo $PWD
#xcodebuild -project ${PROJECT_DIR}/${APPLICATION_NAME}.xcodeproj -configuration ${BUILD_TYPE} -target ${APPLICATION_NAME} -sdk ${SIMULATOR_OR_IOS_SDK}${SDK_VERSION} ARCHS='armv7 arm64' IOS_DEVELOPMENT_TARGET=${DEVELOPMENT_TARGET}
xcodebuild -scheme ${APPLICATION_NAME} -archivePath ${OUTPUT}/${APPLICATION_NAME}.xcarchive archive
xcodebuild -exportArchive -exportFormat ipa -archivePath ${OUTPUT}/${APPLICATION_NAME}.xcarchive -exportPath ${OUTPUT}/${APPLICATION_NAME}.ipa
else
echo "==>---->ios build type"
echo ${BUILD_TYPE}
xcodebuild -project ${PROJECT_DIR}/${APPLICATION_NAME}.xcodeproj -configuration ${BUILD_TYPE} -target ${APPLICATION_NAME} -sdk ${SIMULATOR_OR_IOS_SDK}${SDK_VERSION} ARCHS='armv7 arm64' IOS_DEVELOPMENT_TARGET=${DEVELOPMENT_TARGET} GCC_PREPROCESSOR_DEFINITIONS="DEBUG_TEST_STATUS"
#fi
#copy app to release folder
echo "====>> copy app to output folder"
cp -r ${PROJECT_DIR}/build/${BUILD_TYPE}-${SIMULATOR_OR_IOS_SDK} ${OUTPUT}
#if [ ${BUILD_TYPE} == "Release" ]; then
echo "=====>> archiving app to ipa "
#package app to ipa
xcrun -sdk ${SIMULATOR_OR_IOS_SDK}${SDK_VERSION} PackageApplication -v "${OUTPUT}/${BUILD_TYPE}-${SIMULATOR_OR_IOS_SDK}/${APPLICATION_NAME}.app" -o "${OUTPUT}/${BUILD_TYPE}-${SIMULATOR_OR_IOS_SDK}/${APPLICATION_NAME}.ipa"
#xcrun -sdk ${SIMULATOR_OR_IOS_SDK}${SDK_VERSION} PackageApplication -v "${OUTPUT}/${BUILD_TYPE}-${SIMULATOR_OR_IOS_SDK}/${APPLICATION_NAME}.app" -o "${OUTPUT}/${BUILD_TYPE}-${SIMULATOR_OR_IOS_SDK}/${APPLICATION_NAME}.ipa" --embed "${PROVISONING_PROFILE}"
#fi
fi
cd ${CURDIR}
| true |
c45a22acae3c84a48e80eb0409f6eaaf60ab032d | Shell | ryan-williams/screen-helpers | /screens | UTF-8 | 197 | 3.34375 | 3 | [] | no_license | #!/usr/bin/env bash
prompt_every_n="${1:-10}"
i=0
for s in `screen-list-detached`; do
screen -r "$s"
let i=$i+1
if [ $i -eq $prompt_every_n ]; then
prompt "Continue"
i=0
fi
done
| true |
610077d6d2cfbaa33f120892489165c896749e34 | Shell | zyndor/xrhodes | /tools/bash/update-changes.sh | UTF-8 | 562 | 3.984375 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
if [ -z $1 ]
then
echo "Usage: ./update-changes.sh [version]"
exit 1
fi
changes=$(dirname $0)/../../CHANGES
# paste version to new changes file
echo $1 > $changes.new
# past recent changes to new changes file
git log --oneline --reverse --no-decorate RELEASE.. | cut -c 9- | sed "s/^/- /g" >> $changes.new
echo >> $changes.new
# past old changes to new changes file
cat $changes >> $changes.new
# overwrite old changes file with new
rm $changes
mv $changes{.new,}
# stage & commit
git add $changes
git commit -m "Changelog update for $1."
| true |
c9ccae1c8c8534b1e6b3379d2550b242fbdac724 | Shell | tjweir/unfog-cli | /install.sh | UTF-8 | 714 | 3.546875 | 4 | [] | permissive | #!/bin/sh
get_os () {
if [[ "$OSTYPE" == "linux-gnu" ]]; then
echo "linux"
elif [[ "$OSTYPE" == "darwin"* ]]; then
echo "osx"
elif [[ "$OSTYPE" == "cygwin" ]]; then
echo "windows"
elif [[ "$OSTYPE" == "msys" ]]; then
echo "windows"
elif [[ "$OSTYPE" == "win32" ]]; then
echo "windows"
elif [[ "$OSTYPE" == "freebsd"* ]]; then
echo "linux"
else
return -1
fi
}
cd /tmp
os=`get_os`
echo "Downloading latest ${os} release..."
curl -sLo unfog.tar.gz "https://github.com/unfog-io/unfog-cli/releases/latest/download/unfog-${os}.tar.gz"
echo "Installing binaries..."
tar xzf unfog.tar.gz
rm unfog.tar.gz
chmod u+x unfog*
sudo mv unfog* /usr/local/bin/
echo "Unfog installed!"
| true |
b695734743152882003963fa37cee5e9a9cff6ff | Shell | professorbeautiful/DUE | /vignettes/ready-for-wordpress.sh | UTF-8 | 1,141 | 2.9375 | 3 | [] | no_license | ## Upload the files from vignettes/figures to the blog media page.
## No equal signs "=" or "," in figure file names!
## Check the year/month setting that the files were uploaded.
yearmonth=2019/11/
## Unfortunately, if I add more figures next month, this whole system fails!
## AHA! I turned off the option to organize by month and year.
## Next, run DUE_vignette-no-base64.html
yearmonth=
## Next, run this file.
tempfile=`ls -t /private/var/folders/*/*/T/*/*/DUE_vignette-no-base64.html |head -1`
echo $tempfile
## Check $tempfile against the R Markdown tab
#For example tempfile=/private/var/folders/6v/y_cp6z4d22ggbgrqdk0g73v80000gv/T/RtmpxbKzxS/preview-112e64d8af0c1.dir/DUE_vignette-no-base64.html
localfolderstring=/Users/Roger/Google%20Drive/_HOME/DUE/vignettes/figures/
localfolder=/Users/Roger/GoogleDrive/_HOME/DUE/vignettes/figures/
## Gee, thanks, Google! Sheesh. Computer science 101: no spaces in file names.
blogfolder=http://www.professorbeautiful.org/IveBeenThinkin/wp-content/uploads/$yearmonth
cat $tempfile | sed "s,$localfolderstring,$blogfolder," | tee "$localfolder/../DUE_vignette-no-base64.html" | pbcopy
## Finally, paste into the html editor at IveBeenThinkin | true |
c20ed8a16dcb3a25c1ee41be0c5baf4a4db90bcc | Shell | luszczynski/scripts | /util/find_java_class.sh~ | UTF-8 | 593 | 4.1875 | 4 | [] | no_license | #!/bin/bash
usage="Uso: findJavaClass directory ClassName "
IFS='
'
if [ $# -lt 2 ] ; then
echo $usage
exit 1
fi
if [ ! -d "$1" ] ; then
echo "Diretorio nao existe"
exit 1
fi
find "${1}" -type f -name \*.jar | while read jar_file ;
do
found_class=`unzip -l $jar_file | awk '{print $4}' | grep $2`
num_classes=`echo $found_class | wc -c`
if [ $num_classes -gt 1 ] ; then
echo ""
echo "Arquivo:"
echo " $jar_file"
echo "Classes:"
echo $found_class | sed 's/\ /\n/g' | sed 's/^.*/\ \ \ \ &/g'
fi
done
| true |
4ec2a97507f944331892b1d0e06f674b81867bb8 | Shell | pld-linux/libteam | /teamd-lvl1-service-generator | UTF-8 | 651 | 3.453125 | 3 | [] | no_license | #!/bin/sh
destunitdir=${1:-/tmp}
# Do nothing if target is disabled
[ -e /etc/systemd/system/network.target.wants/teamd-lvl1.target ] || exit 0
[ -f /etc/sysconfig/teamd ] && . /etc/sysconfig/teamd
if [ -d $destunitdir/teamd-lvl1.target.wants ]; then
rm -f $destunitdir/teamd-lvl1.target.wants/teamd@*.service
else
mkdir -p $destunitdir/teamd-lvl1.target.wants
fi
for teamif in $LVL1TEAMIFS; do
[ -L $destunitdir/teamd-lvl1.target.wants/teamd@$teamif.service ] && \
continue
ln -s /lib/systemd/system/teamd@.service \
$destunitdir/teamd-lvl1.target.wants/teamd@$teamif.service
done
| true |
7e37ceaf96031baca27401d9439b89313a0182be | Shell | bioconda/bioconda-recipes | /recipes/phylip/build.sh | UTF-8 | 1,909 | 3.46875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
## The build file is taken from the biobuilds conda recipe by Cheng
## H. Lee, adjusted to fit the bioconda format.
# Configure
[ "$BB_ARCH_FLAGS" == "<UNDEFINED>" ] && BB_ARCH_FLAGS=
[ "$BB_OPT_FLAGS" == "<UNDEFINED>" ] && BB_OPT_FLAGS=
[ "$BB_MAKE_JOBS" == "<UNDEFINED>" ] && BB_MAKE_JOBS=1
CFLAGS="${CFLAGS} ${BB_ARCH_FLAGS} ${BB_OPT_FLAGS}"
# Additional flags suggested by the philip makefile
CFLAGS="${CFLAGS} -fomit-frame-pointer -DUNX"
BUILD_ARCH=$(uname -m)
BUILD_OS=$(uname -s)
if [ "$BUILD_ARCH" == "ppc64le" ]; then
# Just in case; make the same assumptions about plain "char" declarations
# on little-endian POWER8 as we do on x86_64.
CFLAGS="${CFLAGS} -fsigned-char"
fi
# Build
cd src
sed -i.bak "s:@@prefix@@:${PREFIX}:" phylip.h
if [ "$BUILD_OS" == "Darwin" ]; then
# Tweak a few things for building shared libraries on OS X.
sed -i.bak 's/-Wl,-soname/-Wl,-install_name/g' Makefile.unx
sed -i.bak 's/\.so/.dylib/g' Makefile.unx
fi
make -f Makefile.unx CFLAGS="$CFLAGS" install
# Install
cd ..
SHARE_DIR="${PREFIX}/share/${PKG_NAME}-$PKG_VERSION-$PKG_BUILDNUM"
for d in fonts java exe; do
install -d "${SHARE_DIR}/${d}"
install ${d}/* "${SHARE_DIR}/${d}"
done
pushd "${SHARE_DIR}/java"
rm -f *.unx
popd
# Install wrapper scripts
[ -d "${PREFIX}/bin" ] || mkdir -p "${PREFIX}/bin"
ln -s $SHARE_DIR/exe/* $PREFIX/bin/
# phylip's java interface can't find its dylibs (libtreegram.so,
# libdrawtree.dylib) Easiest, but oh so inelegant, solution was to
# link them to $PREFIX/bin
ln -s $SHARE_DIR/java/* $PREFIX/bin/
cp $RECIPE_DIR/phylip.py $PREFIX/bin/phylip
cp $RECIPE_DIR/drawtree.py $SHARE_DIR/drawtree_gui
ln -s $SHARE_DIR/drawtree_gui $PREFIX/bin
cp $RECIPE_DIR/drawgram.py $SHARE_DIR/drawgram_gui
ln -s $SHARE_DIR/drawgram_gui $PREFIX/bin
cd "${PREFIX}/bin"
chmod 0755 phylip drawgram drawtree drawgram_gui drawtree_gui
ls $PREFIX/bin
| true |
26ed7a19c0bfd5562952dde7a2cde99e55cc72a2 | Shell | proninkv/ac_generate_config | /AC_Config_TXT_to_XML_v1.0.sh | UTF-8 | 5,387 | 3.78125 | 4 | [] | no_license | #!/bin/bash
#--------------------------------------------------------------------------------------------
# Program name: AC_Config_Txt_to_XML_v1.0.sh
# Version: 1.0 Author: Pronin Konstantin
# Date: 2018/10/30
#
# 1. This program is used to transfer config-template files from TXT format into XML format.
# 2. Before execute this program, please put the TXT and CSV file at the same path,
# then execute the .sh with three passed parameter S1 $2 $3:
# S1 is the location of the TXT file with a '/'at the end, as '/home/user/work/'
# $2 is the full name of the TXT file, as 'xxxx.txt'
# $3 is the full name of the CSV fle, as 'XXXX.csv'
# 3. It will generate different XML files named by every MAC address named as cfgMAC.xml use
# the same TXT config-template, and he XML files will be outputed at the same location
# of the TXT file and named as cfgMAC.xml
#--------------------------------------------------------------------------------------------
#check if the user have inputed the $1 $2 $3, if not, quit and give a hint.
if [ ! -n "$1" ] || [ ! -n "$2" ] || [ ! -n "$3" ] ; then
echo " you have not input the \$1 or \$2 or \$3"
echo " Please input \$1 as the path of the txt, end with '/'"
echo " Please input \$2 as the name of the txt, end with '.txt'"
echo " Please input \$3 as the name of the csv, end with '.csv'"
exit 1
else
#after commond inputed correctly, create a tmp file.
touch $1tmp1.xml
#add the CFG declaration
echo "; Config file for AudioCodes SIP Phone" >> $1tmp1.xml
#read the content of txt into the tmp file
cat $1$2 >> $1tmp1.xml
#delete the Unicode BOM as the <feff> at the txt, if there is any.
sed 's/^\xEF\xBB\xBF//' $1tmp1.xml >> $1tmp2.xml
#comment all the line start with # and -, delete all the ^M sigh caused by the txt editor.
sed -e 's/^#\(.*$\)/;\1/' -e 's/^-\(.*$\)/;\1/' -e "s/
//g" $1tmp2.xml >> $1tmp3.xml
#convert xml skip characters into character entity references, then add <> </> to every configuration line.
sed -e '/^P/s:&:&:g' -e '/^P/s:<:\<:g' -e '/^P/s:>:\>:g' -e '/^P/s:'\'':\':g' -e '/^P/s:'\"':\":g' -e '/^P/s: ::g' -e '/^P/s:=: :' -e '/^P/s:\(.*\) \(.*\):<\1>\2</\1>:g' $1tmp3.xml >> $1tmp4.xml
#add the end of XML declaration
#echo " </config>" >> $1tmp4.xml
#echo "</gs_provision>" >>$1tmp4.xml
#prepare the CSV file, delete all the blankspace and delete the line with blank MAC value
sed -e 's: ::g' -e '/^,/d' -e '/^$/d' $1$3 > $1tmp5.csv
#read the CSV file and generate all the XML files by the information in the CSV file.
while IFS=, read mac IP_Addr NET_Mask NET_GW user DisplayName password Registrar Provisioning_Protocol Provisioning_IP; do
#check if the MAC address matches the prefix of AC devices
if [[ $mac == *"000B82"* ]] || [[ $mac == *"000b82"* ]]; then
#check the P value of Sip User ID in the txt file, and change the value in different conditions.
if grep -q "<P35>" $1tmp4.xml ; then
sed -e "/^<mac>/s:.*:<mac>$mac</mac>:g" -e "/^<P35>/s:.*:<P35>$user</P35>:g" -e "/^<P34>/s:.*:<P34>$password</P34>:g" -e "/^<P36>/s:.*:<P36>$authid</P36>:g" -e "/^<P/s:<P: <P:" -e "s/
//g" < $1tmp4.xml > "$1cfg$mac.xml"
elif grep -q "<P4060>" $1tmp4.xml ; then
sed -e "/^<mac>/s:.*:<mac>$mac</mac>:g" -e "/^<P4060>/s:.*:<P4060>$user</P4060>:g" -e "/^<P4120>/s:.*:<P4120>$password</P4120>:g" -e "/^<P4090>/s:.*:<P4090>$authid</P4090>:g" -e "/^<P/s:<P: <P:" -e "s/
//g" < $1tmp4.xml > "$1cfg$mac.xml"
elif grep -q "<P3060>" $1tmp4.xml ; then
sed -e "/^<mac>/s:.*:<mac>$mac</mac>:g" -e "/^<P3060>/s:.*:<P3060>$user</P3060>:g" -e "/^<P3120>/s:.*:<P3120>$password</P3120>:g" -e "/^<P3090>/s:.*:<P3090>$authid</P3090>:g" -e "/^<P/s:<P: <P:" -e "s/
//g" < $1tmp4.xml > "$1cfg$mac.xml"
else
sed -e "/^<mac>/s:.*:<mac>$mac</mac>:g" -e "/^<P/s:<P: <P:" -e "s/
//g" < $1tmp4.xml > "$1cfg$mac.xml"
fi
elif [[ $mac == *"00908F"* ]] || [[ $mac == *"00908f"* ]]; then
sed -e "/^network\/lan\/fixed_ip\/gateway=/s:.*:network/lan/fixed_ip/gateway=$NET_GW:g" -e "/^network\/lan\/fixed_ip\/ip_address=/s:.*:network/lan/fixed_ip/ip_address=$IP_Addr:g" \
-e "/^network\/lan\/fixed_ip\/netmask=/s:.*:network/lan/fixed_ip/netmask=$NET_Mask:g" -e "/^voip\/line\/0\/description=/s:.*:voip/line/0/description=$user:g" \
-e "/^voip\/line\/0\/auth_name=/s:.*:voip/line/0/auth_name=$user:g" -e "/^voip\/line\/0\/id=/s:.*:voip/line/0/id=$user:g" \
-e "/^voip\/line\/0\/auth_password=/s:.*:voip/line/0/auth_password=$password:g" -e "/^voip\/signalling\/sip\/proxy_address=/s:.*:voip/signalling/sip/proxy_address=$Registrar:g" \
-e "/^provisioning\/configuration\/url=/s:.*:provisioning/configuration/url=$Provisioning_Protocol\://$Provisioning_IP/configfiles/:g" \
-e "/^ems_server\/provisioning\/url=http\:\/\//s:.*:ems_server/provisioning/url=http\://$Provisioning_IP/ipprest/:g" \
-e "/^provisioning\/firmware\/url=/s:.*:provisioning/firmware/url=$Provisioning_Protocol\://$Provisioning_IP/firmwarefiles/:g" < $1tmp4.xml > "$1$mac.cfg"
else
echo " Notice: The Mac \"$mac\" is invalid, please check the CSV file"
fi
done <$1tmp5.csv
#delete all the tmp files
rm $1tmp1.xml $1tmp2.xml $1tmp3.xml $1tmp4.xml $1tmp5.csv
#end of if-statement
fi
#give a hint where the XML is outputed
echo " Thank you for using this program!"
echo " Your XML files will be generated at: $1"
| true |
e6cfc084b1ef934d48619e70ed3d95e0c53f3195 | Shell | hsnks100/cmake-examples | /01-basic/K-imported-targets/run_test.sh | UTF-8 | 333 | 3.484375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Make sure we have the minimum cmake version
cmake_version=`cmake --version | grep version | cut -d" " -f3`
[[ "$cmake_version" =~ ([3-9][.][5-9.][.][0-9]) ]] || exit 0
echo "correct version of cmake"
mkdir -p build && cd build && cmake .. && make
if [ $? -ne 0 ]; then
echo "Error running example"
exit 1
fi
| true |
533c10c0a687c6e31b8d3fd7ccdaba401f5ceac2 | Shell | martinmCGG/diplomka | /dockers/data_conversion/pnet_data/run.sh | UTF-8 | 807 | 2.984375 | 3 | [] | no_license | ##########################################################################################################
# Set required variables
name="pnet"
dataset_path="/local/krabec/ShapeNet/ShapeNet"
output_dir="/local/krabec/ShapeNet"
docker_hidden=t
##########################################################################################################
image_name="pointcloud"
output_dir="$output_dir/$name"
mkdir -m 777 $output_dir
docker build -t "$image_name" .
docker kill "$image_name"
docker rm "$image_name"
docker run --rm -id --name "$image_name" -v "$dataset_path":/dataset -v "$output_dir":/data "$image_name"
docker exec -i -"$docker_hidden" "$image_name" sh -c "python3 pnet_data.py"
##########################################################################################################
| true |
ecf6857f24a989b4bd1269ddcfc8ec665984cf75 | Shell | dmonroe85/linux_install | /dns.sh | UTF-8 | 1,682 | 3.453125 | 3 | [] | no_license | [200~#!/bin/bash
set -e
# Raspberry Pi dnsmasq script
# Stephen Wood
# www.heystephenwood.com
#
# Usage: $ sudo ./raspberrypi_dnsmasq
#
# Net install:
# $ curl https://raw.github.com/stephendotexe/raspberrypi/master/roles/dnsmasq_server | sudo sh
# Must be run as root
if [[ `whoami` != "root" ]]
then
echo "This install must be run as root or with sudo."
exit
fi
apt-get install -y dnsmasq dnsutils
# Set Up Main Conf
cat - > /etc/dnsmasq.conf <<DNSMASQCONF
# Set up your local domain here
domain-needed
bogus-priv
expand-hosts
domain=bu.net
local=/bu.net/
resolv-file=/etc/resolv.dnsmasq
# Public DNS Servers
server=8.8.8.8
server=8.8.4.4
# Max cache size dnsmasq can give us, and we want all of it!
cache-size=10000
DNSMASQCONF
# Set Up Resolver Conf
cat - > /etc/resolv.dnsmasq <<RESOLVERCONF
nameserver 127.0.0.1
RESOLVERCONF
service dnsmasq restart
echo "Testing dns performance with random urls"
# We'll generate a list of urls that we're moderately certain doesn't exist in our cache to get a good base line for speed increases.
URLS=`for i in {1..50}; do echo www.$RANDOM.com;done`
# Make the requests in parallel
echo $URLS | xargs -I^ -P50 dig @127.0.0.1 grep time | awk /time/'{sum+=$4} END { print "average response = ", sum/NR,"ms"}'
echo $URLS | xargs -I^ -P50 dig @127.0.0.1 grep time | awk /time/'{sum+=$4} END { print "average response = ", sum/NR,"ms"}'
echo $URLS | xargs -I^ -P50 dig @127.0.0.1 grep time | awk /time/'{sum+=$4} END { print "average response = ", sum/NR,"ms"}'
echo $URLS | xargs -I^ -P50 dig @127.0.0.1 grep time | awk /time/'{sum+=$4} END { print "average response = ", sum/NR,"ms"}'
echo 'Installation complete. Enjoy!'
| true |
d8e45e57cf117c5fd2859ecf1a7c28f8bcc86520 | Shell | kaiana/deprecated | /bigcontrolcenter-system-repair/usr/share/bigcontrolcenter/categories/system/repair/partition.sh.htm | UTF-8 | 6,496 | 3.296875 | 3 | [] | no_license | #!/bin/bash
#Panel for BigLinux
#
#Authors:
# Bruno Goncalves Araujo <www.biglinux.com.br>
#
#License: GPLv2 or later
#################################################
. /usr/share/bigcontrolcenter/default/theme-categories.sh.htm
#Translation
export TEXTDOMAINDIR="/usr/share/locale-langpack"
export TEXTDOMAIN=bigcontrolcenter-system-repair
echo '
<script language="JavaScript">
window.resizeTo(640,480);
function NoResize(){
window.resizeTo(640,480);
}
</script>
'
title=$"Limpar o sistema"
apply=$"Aplicar"
close=$"Sair"
tip_kernel=$"1 - Ao atualizar o sistema podem ficar instaladas diferentes versoes de kernel. Essas versoes sao apresentadas para escolha logo apos ligar o computador.<br><br>2 - No momento o sistema esta utilizando o Kernel $(uname -r). Por questoes de seguranca o mesmo nao esta disponivel na lista para remocao.<br><br>3 - Manter duas versoes de Kernel instaladas pode trazer um pouco mais de confianca no sistema, pois se um for danificado tera outro disponivel, porem possuir uma quantidade maior costuma somente utilizar espaco em excesso."
tip_apt=$"Arquivos de instalacao e atualizacao que ja foram utilizados e podem ser removidos com seguranca."
tip_tmp=$"Arquivos temporarios. Nao e recomendado apaga-los, mas se estiver utilizando espaco em excesso pode ser conveniente."
tip_cache=$"Pasta com arquivos de Cache de diversos programas. Esvaziar essa pasta pode liberar um bom espaco em disco e tambem melhorar o desempenho de alguns programas."
tip_thumbnail=$"Pasta com miniaturas geradas pelo gerenciador de arquivos."
tip_firefox=$"Remove somente a cache, ou seja, mantem historico, favoritos e todas as configuracoes do Firefox."
tip_trash=$"Esvazia a lixeira de todos usuarios."
tip_nepomuk=$"Remove a cache, porem e recomendado remover apenas se nao for utilizar mais o Nepomuk/Strigi. Caso contrario sera necessario bastante tempo de processamento para gerar novamente."
#title
echo "<title>$title</title><body onResize=\"NoResize();\">"
#header
open_header
echo "$title" '</font><br>' $"Libere espaco no HD."
close_header
#O uso do echo pode ser separado em diversos pedacoes dentro do mesmo comando
#O uso de aspas simples faz o sistema exibir exatamente o que esta escrito
#Quando se utiliza aspas duplas o sistema executa algumas partes do codigo antes de passar a resposta, entre essas execucoes inclui a interpretacao de variaveis
#Para incluir textos a serem exibidos ao usuario utilize aspas duplas com $ antes, para que funcione o sistema de tradução, exemplo: $"texto"
#No exemplo abaixo o memso comando echo foi aberto e fechado diversas vezes de acordo com a necessidade, aspas simples, duplas e duplas com traducao.
echo '<center><table width=95%><tr><td>'
echo '<form action=limpar_root_submit.sh.htm method=get>'
echo "<table width=95%><tr><td align=center>" "<b>" $"Remover" "</td><td align=center>" "<b>" $"Espaco utilizado" "</td><tr>"
for i in $(dpkg --get-selections | grep linux-image | grep "[0-9]" | grep -v "deinstall" | sed "s/linux-image-$(uname -r)//g;s/\t.*//g;$,/^$/d")
do
echo "<tr><td><INPUT TYPE=checkbox ID=$i NAME=kernel VALUE=\"$i\" $google_check $mouseover_open $tip_google $mouseover_close><label for=$i $mouseover_open $tip_kernel $mouseover_close>" "$(echo "$i" | sed 's/linux-image-/Kernel /g')" "</td><td align=right>" "Em media 150M"'</label></td></tr>'
done
echo "<tr><td><INPUT TYPE=checkbox ID=apt NAME=apt VALUE=yes $apt_check $mouseover_open $tip_apt $mouseover_close><label for=apt $mouseover_open $tip_apt $mouseover_close>" $"Pasta /var/cache/apt/archives" "</td><td align=right>" "$(du -ch /var/cache/apt/archives/ | sed '$!d;s/\t.*//g')"'</label></td></tr>'
echo "<tr><td><INPUT TYPE=checkbox ID=tmp NAME=tmp VALUE=yes $tmp_check $mouseover_open $tip_tmp $mouseover_close><label for=tmp $mouseover_open $tip_tmp $mouseover_close>" $"Pasta /tmp" "</td><td align=right>" "$(du -ch /tmp | sed '$!d;s/\t.*//g')"'</label></td></tr>'
echo "<tr><td><INPUT TYPE=checkbox ID=cache NAME=cache VALUE=yes $cache_check $mouseover_open $tip_cache $mouseover_close><label for=cache $mouseover_open $tip_cache $mouseover_close>" $"Pasta .cache de todos os usuarios" "</td><td align=right>" "$(du -ch --block-size=M /home/*/.cache /root/.cache | sed '$!d;s/\t.*//g' 2> /dev/null)"'</label></td></tr>'
echo "<tr><td><INPUT TYPE=checkbox ID=thumbnail NAME=thumbnail VALUE=yes $thumbnail_check $mouseover_open $tip_thumbnail $mouseover_close><label for=thumbnail $mouseover_open $tip_thumbnail $mouseover_close>" $"Pasta .thumbnails de todos os usuarios" "</td><td align=right>" "$(du -ch --block-size=M /home/*/.thumbnails /root/.thumbnails | sed '$!d;s/\t.*//g' 2> /dev/null)"'</label></td></tr>'
echo "<tr><td><INPUT TYPE=checkbox ID=nepomuk NAME=nepomuk VALUE=yes $nepomuk_check $mouseover_open $tip_nepomuk $mouseover_close><label for=nepomuk $mouseover_open $tip_nepomuk $mouseover_close>" $"Cache do Nepomuk/Strigi de todos os usuarios" "</td><td align=right>" "$(du -ch --block-size=M /home/*/.kdesktop/share/apps/nepomuk/ /home/*/.kde/share/apps/nepomuk/ /root/.kde/share/apps/nepomuk/ /root/.kdesktop/share/apps/nepomuk/ | sed '$!d;s/\t.*//g' 2> /dev/null)"'</label></td></tr>'
echo "<tr><td><INPUT TYPE=checkbox ID=firefox NAME=firefox VALUE=yes $firefox_check $mouseover_open $tip_firefox $mouseover_close><label for=firefox $mouseover_open $tip_firefox $mouseover_close>" $"Cache do Firefox de todos os usuarios" "</td><td align=right>" "$(du -ch --block-size=M /home/*/.mozilla/firefox/*default/Cache /root/.mozilla/firefox/*default/Cache | sed '$!d;s/\t.*//g' 2> /dev/null)"'</label></td></tr>'
echo "<tr><td><INPUT TYPE=checkbox ID=trash NAME=trash VALUE=yes $trash_check $mouseover_open $tip_trash $mouseover_close><label for=trash $mouseover_open $tip_trash $mouseover_close>" $"Arquivos da lixeira de todos os usuarios" "</td><td align=right>" "$(du -ch --block-size=M /home/*/.local/share/Trash/files /root/.local/share/Trash/files | sed '$!d;s/\t.*//g' 2> /dev/null)"'</label></td></tr>'
echo "</table>"
echo "</div><div id=\"rodape\"><button type=submit value= $apply > <img src=\"/usr/share/bigcontrolcenter/default/images/ok.png\" style=\"vertical-align:top\" /> $apply </button> <button type=button value= $close onClick= parent.location='/usr/share/bigbashview/exemplo2.sh.htm?close=True'> <img src=\"/usr/share/bigcontrolcenter/default/images/cancel.png\" style=\"vertical-align:top\" /> $close </button></div>"
| true |
c92381e099758ae81886c1567775fed0fa133137 | Shell | baosi1234/custom-monitor-scripts | /Linux/Tomcat/current_busy_threads.sh | UTF-8 | 712 | 2.6875 | 3 | [] | no_license | #!/bin/bash -
#===============================================================================
#
# FILE: current_busy_threads.sh
#
# USAGE: ./current_busy_threads.sh
#
# DESCRIPTION:
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: YOUR NAME (),
# ORGANIZATION:
# CREATED: 06/29/2016 09:25
# REVISION: ---
#===============================================================================
set -o nounset # Treat unset variables as an error
java -jar cmdline-jmxclient-0.10.3.jar - localhost:8090 Catalina:name=\"http-nio-8080\",type=ThreadPool currentThreadsBusy 2>&1 | awk '{print $NF}'
| true |
131c3b08678a7bb73bac844874d38bdb5aa0cee8 | Shell | Sidnioulz/PolicyAnalysis | /fix-iso-output-files.sh | UTF-8 | 204 | 3.21875 | 3 | [] | no_license | #!/bin/sh
find . | while read file; do
encoding=`file "$file" | grep ISO`
if [ "x$encoding" != "x" ]; then
iconv -f ISO-8859-1 -t UTF-8 "$file" > "$file.new"
mv "$file.new" "$file"
fi
done
| true |
c94062208b036bf046117a164c1b66f56dbdd716 | Shell | EdwardColon-NOAA/NOAA_3drtma | /scripts/GSI/exrtma3d_obsprep_lghtn.sh | UTF-8 | 8,574 | 2.96875 | 3 | [] | no_license | #!/bin/sh --login
############################################################################
set -x
# working directory
workdir=${DATA}
cd ${workdir}
# export MV2_ON_DEMAND_THRESHOLD=256 # if load module mvapich2 ?
mm=$subcyc
subhtime=$subcyc
${ECHO} $PDY $cyc $mm
# START_TIME="${PDY}${cyc}" # YYYYMMDDHH
START_TIME=${START_TIME:-"{PDY} ${cyc}"} # YYYYMMDD HH
# START_TIME="${PDY} ${cyc} ${subcyc} minutes" # YYYYMMDD HH MN
${ECHO} "${START_TIME}"
echo `echo "${START_TIME}" | ${AWK} '/^[[:digit:]]{10}$/'`
if [ `echo "${START_TIME}" | ${AWK} '/^[[:digit:]]{10}$/'` ]; then
START_TIME=`echo "${START_TIME}" | ${SED} 's/\([[:digit:]]\{2\}\)$/ \1/'`
elif [ ! "`echo "${START_TIME}" | ${AWK} '/^[[:digit:]]{8}[[:blank:]]{1}[[:digit:]]{2}$/'`" ]; then
echo "FATAL ERROR: start time, '${START_TIME}', is not in 'yyyymmddhh' or 'yyyymmdd hh' format"
err_exit 1
fi
START_TIME=`${DATE} -d "${START_TIME} ${subhtime} minutes"`
echo $START_TIME
# Compute date & time components for the analysis time
YYYYJJJHH00=`${DATE} +"%Y%j%H00" -d "${START_TIME}"`
YYYYMMDDHH=`${DATE} +"%Y%m%d%H" -d "${START_TIME}"`
YYYY=`${DATE} +"%Y" -d "${START_TIME}"`
MM=`${DATE} +"%m" -d "${START_TIME}"`
DD=`${DATE} +"%d" -d "${START_TIME}"`
HH=`${DATE} +"%H" -d "${START_TIME}"`
# Julian Day in format YYJJJHH
YYJJJHH=`${DATE} +"%Y%j%H" -d "${START_TIME}"`
PREVCYC_TIME=${PDYHH_cycm1}
${ECHO} "${PREVCYC_TIME}"
if [ `echo "${PREVCYC_TIME}" | ${AWK} '/^[[:digit:]]{10}$/'` ]; then
PREVCYC_TIME=`echo "${PREVCYC_TIME}" | ${SED} 's/\([[:digit:]]\{2\}\)$/ \1/'`
elif [ ! "`echo "${PREVCYC_TIME}" | ${AWK} '/^[[:digit:]]{8}[[:blank:]]{1}[[:digit:]]{2}$/'`" ]; then
echo "FATAL ERROR: previous cycle time, '${PREVCYC_TIME}', is not in 'yyyymmddhh' or 'yyyymmdd hh' format"
err_exit 1
fi
PREVCYC_TIME=`${DATE} -d "${PREVCYC_TIME} ${subhtime} minutes"`
echo $PREVCYC_TIME
# Julian Day in format YYJJJHH (two-digits year, day of year, hour.)
YYJJJHH=`${DATE} +"%y%j%H" -d "${START_TIME}"`
PREYYJJJHH=`${DATE} +"%y%j%H" -d "${PREVCYC_TIME}"`
# typeset -Z2 mm mmp1 mmp2 mmp3 # <<-- "-Z2" only work for K-Shell
mm=`${DATE} +"%M" -d "${START_TIME}"`
mmp1=$((${mm}+1))
mmp2=$((${mm}+2))
mmp3=$((${mm}+3))
mm=`printf "%2.2i\n" $mm`
mmp1=`printf "%2.2i\n" $mmp1`
mmp2=`printf "%2.2i\n" $mmp2`
mmp3=`printf "%2.2i\n" $mmp3`
ymd=`${DATE} +"%Y%m%d" -d "${START_TIME}"`
ymdh=${YYYYMMDDHH}
hh=$HH
# BUFR Table includingthe description for HREF
${CP} -p ${FIX_GSI}/prepobs_prep_RAP.bufrtable ./prepobs_prep.bufrtable
# WPS GEO_GRID Data
${LN} -s ${PARM_WRF}/hrrr_geo_em.d01.nc ./geo_em.d01.nc
#
#---
#
if [ ${obsprep_lghtn} -eq 1 ] ; then
${ECHO} " processing NCEP BUFR Lightning Data"
# copy the excutable file of processing RAP BUFR format lightning data
export pgm=${LGHTN_EXE:-"process_Lightning_bufr"}
${CP} ${LGHTN_EXEDIR}/${LGHTN_EXE} ./${pgm}
# find lightning bufr file
if [ -s $COMINrap/rap.t${cyc}z.lghtng.tm00.bufr_d ] ; then
cp $COMINrap/rap.t${cyc}z.lghtng.tm00.bufr_d ./rap.t${cyc}z.lghtng.tm00.bufr_d
elif [ -s $COMINrap_e/rap.t${cyc}z.lghtng.tm00.bufr_d ] ; then
cp $COMINrap_e/rap.t${cyc}z.lghtng.tm00.bufr_d ./rap.t${cyc}z.lghtng.tm00.bufr_d
else
echo 'No bufr file found for lightning processing'
fi
ln -s rap.t${cyc}z.lghtng.tm00.bufr_d lghtngbufr
echo ${PDY}${cyc} > ./lightning_cycle_date
YYYYMMDDHH=${PDY}${cyc}
minutetime=$subcyc
# Build the namelist on-the-fly
rm -f ./lightning_bufr.namelist
cat << EOF > lightning_bufr.namelist
&SETUP
analysis_time = ${YYYYMMDDHH},
minute=${minutetime},
trange_start=-15.0,
trange_end=0.0,
/
EOF
elif [ ${obsprep_lghtn} -eq 2 ] || [ ${obsprep_lghtn} -eq 3 ]; then
# precossing ENTLN or Vaisala netcdf lightning data
if [ ${obsprep_lghtn} -eq 2 ] ; then
${ECHO} " processing ENTLN NETCDF Lightning Data"
# copy the excutable file of processing ENTLN lightning data
export pgm=${LGHTN_EXE:-"process_Lightning_entln"}
${CP} ${LGHTN_EXEDIR}/${LGHTN_EXE} ./${pgm}
# LIGHTNING_FILE=${LIGHTNING_ROOT}/nldn/netcdf
# LIGHTNING_FILE=${LIGHTNING_ROOT}/gld360/netcdf
LIGHTNING_FILE=${COMINlightning}/entln.t${cyc}z
elif [ ${obsprep_lghtn} -eq 3 ] ; then
${ECHO} " processing NETCDF(Vaisala) Lightning Data"
# copy the excutable file of processing NETCDF (Vailsala) lightning data
export pgm=${LGHTN_EXE:-"process_Lightning"}
${CP} ${LGHTN_EXEDIR}/${LGHTN_EXE} ./${pgm}
# LIGHTNING_FILE=${LIGHTNING_ROOT}/nldn/netcdf
# LIGHTNING_FILE=${LIGHTNING_ROOT}/gld360/netcdf
LIGHTNING_FILE=${COMINlightning}/vaisala.t${cyc}z
else
echo "Wrong set up for \$obsprep_lghtn. Exit"
err_exit 1
fi
# Link to the NLDN data
filenum=0
if [ -r "${LIGHTNING_FILE}/${YYJJJHH}050005r" ]; then
((filenum += 1 ))
${LN} -sf ${LIGHTNING_FILE}/${YYJJJHH}050005r ./NLDN_lightning_${filenum}
else
${ECHO} " ${LIGHTNING_FILE}/${YYJJJHH}050005r does not exist"
fi
if [ -r "${LIGHTNING_FILE}/${YYJJJHH}000005r" ]; then
((filenum += 1 ))
${LN} -sf ${LIGHTNING_FILE}/${YYJJJHH}000005r ./NLDN_lightning_${filenum}
else
${ECHO} " ${LIGHTNING_FILE}/${YYJJJHH}000005r does not exist"
fi
if [ -r "${LIGHTNING_FILE}/${PREYYJJJHH}550005r" ]; then
((filenum += 1 ))
${LN} -sf ${LIGHTNING_FILE}/${PREYYJJJHH}550005r ./NLDN_lightning_${filenum}
else
${ECHO} " ${LIGHTNING_FILE}/${PREYYJJJHH}550005r does not exist"
fi
if [ -r "${LIGHTNING_FILE}/${PREYYJJJHH}500005r" ]; then
((filenum += 1 ))
ls ${LIGHTNING_FILE}/${PREYYJJJHH}500005r
${LN} -sf ${LIGHTNING_FILE}/${PREYYJJJHH}500005r ./NLDN_lightning_${filenum}
else
${ECHO} " ${LIGHTNING_FILE}/${PREYYJJJHH}500005r does not exist"
fi
# wider time window of lightning obs data
if [ ! 0 ] ; then
if [ -r "${LIGHTNING_FILE}/${PREYYJJJHH}450005r" ]; then
((filenum += 1 ))
${LN} -sf ${LIGHTNING_FILE}/${PREYYJJJHH}450005r ./NLDN_lightning_${filenum}
else
${ECHO} " ${LIGHTNING_FILE}/${PREYYJJJHH}450005r does not exist"
fi
if [ -r "${LIGHTNING_FILE}/${PREYYJJJHH}400005r" ]; then
((filenum += 1 ))
${LN} -sf ${LIGHTNING_FILE}/${PREYYJJJHH}400005r ./NLDN_lightning_${filenum}
else
${ECHO} " ${LIGHTNING_FILE}/${PREYYJJJHH}400005r does not exist"
fi
if [ -r "${LIGHTNING_FILE}/${PREYYJJJHH}350005r" ]; then
((filenum += 1 ))
${LN} -sf ${LIGHTNING_FILE}/${PREYYJJJHH}350005r ./NLDN_lightning_${filenum}
else
${ECHO} " ${LIGHTNING_FILE}/${PREYYJJJHH}350005r does not exist"
fi
fi
echo "found GLD360 files: ${filenum}"
#
# Alaska lightning data
#
ifalaska=false
if [ -r "${COMINlghtn}/alaska/ascii/${YYYYMMDDHH}0100" ]; then
${LN} -sf ${COMINlghtn}/alaska/ascii/${YYYYMMDDHH}0100 ./ALSKA_lightning
ifalaska=true
else
if [ -r "${COMINlghtn}/alaska/ascii/${YYYYMMDDHH}0101" ]; then
${LN} -sf ${COMINlghtn}/alaska/ascii/${YYYYMMDDHH}0101 ./ALSKA_lightning
ifalaska=true
fi
fi
rm -f ./filelist_lightning
ls ./NLDN_lightning_* > ./filelist_lightning
# Build the namelist on-the-fly
rm -f ./lightning.namelist
cat << EOF > lightning.namelist
&SETUP
analysis_time = ${YYYYMMDDHH},
NLDN_filenum = ${filenum},
IfAlaska = ${ifalaska},
/
EOF
fi
# Run process lightning
if [ -f errfile ] ; then
rm -f errfile
fi
. prep_step
startmsg
msg="***********************************************************"
postmsg "$jlogfile" "$msg"
if [ $obsprep_lghtn -eq 1 ] ; then
msg=" begin pre-processing NCEP BUFR lightning data"
elif [ $obsprep_lghtn -eq 2 ] ; then
msg=" begin pre-processing ENTLN netcdf lightning data"
elif [ $obsprep_lghtn -eq 3 ] ; then
msg=" begin pre-processing netcdf (Vaisala) lightning data"
fi
postmsg "$jlogfile" "$msg"
msg="***********************************************************"
postmsg "$jlogfile" "$msg"
# Run Processing lightning
runline="${MPIRUN} -np ${np} ./${pgm}"
if [ ${obsprep_lghtn} -eq 1 ] ; then
$runline > ${pgmout} 2>errfile
else
$runline < lightning.namelist > ${pgmout} 2>errfile
fi
export err=$?; err_chk
msg="JOB $job FOR $RUN HAS COMPLETED NORMALLY"
postmsg "$jlogfile" "$msg"
if [ $obsprep_lghtn -eq 1 ] ; then
lghtng_bufr="LightningInGSI_bufr.bufr"
else
lghtng_bufr="LightningInGSI.bufr"
fi
if [ -f ${DATA}/${lghtng_bufr} ] ; then
cpreq ${DATA}/${lghtng_bufr} ${COMINobsproc_rtma3d}/${RUN}.t${cyc}z.${lghtng_bufr}
else
msg="WARNING $pgm terminated normally but ${DATA}/${lghtng_bufr} does NOT exist."
${ECHO} "$msg"
postmsg "$jlogfile" "$msg"
exit 1
fi
exit 0
| true |
6e8dfd8c441ba8e7d4bc014bb89c3a85d2806d1c | Shell | CroixDuSud/Autom | /MODULE 2/mesScripts/Livrables_MiniProjet_Antoine/ScriptJenkins.sh | UTF-8 | 4,041 | 3.34375 | 3 | [] | no_license | ########################################
# #
# Type : Mini-projet déploiement #
# #
# Créateur : Antoine Bianchi-Bourgeois #
# #
########################################
#!/bin/bash
# ,,))))))));,
# __)))))))))))))),
# \|/ -\(((((''''((((((((.
# -*-==//////(('' . `)))))),
# /|\ ))| o ;-. '((((( ,(,
# ( `| / ) ;))))' ,_))^;(~
# | | | ,))((((_ _____------~~~-. %,;(;(>';'~
# o_); ; )))(((` ~---~ `:: \ %%~~)(v;(`('~
# ; ''''```` `: `:::|\,__,%% );`'; ~
# | _ ) / `:|`----' `-'
# ______/\/~ | / /
# /~;;.____/;;' / ___--,-( `;;;/
# / // _;______;'------~~~~~ /;;/\ /#
# // | | / ; \;;,#\
# (<_ | ; /',/-----' #_>
# \_| ||_ //~;~~~~~~~~#~
# `\_| (,~~
# \~#\
# ~#~######
# Variables
nom_fichier="apache-tomcat-7.0.75" # Nom du fichier de l'archive
nom_archive=${nom_fichier}".tar.gz" # Nom de l'archive
# Début du script
# Création des fonctions
# Fonction permettant d'inscrire un fichier log
logs(){
tee -a $HOME/logs_script_Tomcat_Jenkins.txt
}
# Initialisation des logs
echo "------------------------------------------------------------" >> $HOME/logs_script_Tomcat_Jenkins.txt
echo "Initialisation des logs du script d'installation et de lancement Tomcat & Jenkins." >> $HOME/logs_script_Tomcat_Jenkins.txt
echo "Le tout supervisé par une licorne !" >> $HOME/logs_script_Tomcat_Jenkins.txt
echo "" >> $HOME/logs_script_Tomcat_Jenkins.txt
# Décompression de l'archive
if [ -e $HOME/$nom_archive ] && [ -e $HOME/jenkins45.war ]
then
if [ -d $HOME/$nom_fichier ] # Vérification archive
then
echo "Le dossier décompressé existe déjà." | logs
else
# Décompression de l'archive et organisation du dossier
tar xvf $nom_archive && echo "Le dossier $nom_fichier n'existait pas et a été créé." | logs || echo "Erreur : Problème lors de la décompression du fichier." | logs
fi
# Installation Tomcat
if [ -d /usr/share/tomcat ] # Vérification dossier tomcat
then
echo "Le dossier tomcat existe déjà." | logs
else
# Déplacement du fichier décompressé vers /usr/share
sudo mv $nom_fichier /usr/share/tomcat && echo "Le dossier tomcat n'existait pas et a été créé." | logs || echo "Erreur : Problème lors du déplacement du fichier" | logs
fi
if [ -e /usr/share/tomcat/webapps/jenkins45.war ] # Vérification dossier tomcat
then
echo "Le fichier jenkins45.war existe." | logs
else
# Copie du .war dans le dossier webapps de Tomcat
sudo cp jenkins45.war /usr/share/tomcat/webapps && echo "Le fichier jenkins45.war n'existait pas et a été copié." | logs || echo "Erreur : Problème lors de la copie du fichier jenkins45.war" | logs
fi
cd /usr/share/tomcat/bin # Déplacement vers le dossier installé
./startup.sh && echo "Lancement de Tomcat effectué." | logs || echo "Erreur : Problème lors du lancement du Tomcat." | logs
cd # Retour au home de l'utilisateur
echo ""
echo "Script terminé."
echo "Merci de vérifier que Jenkins est correctement lancé."
echo "Ouvrez un navigateur et aller à l'adresse suivante : {adresse_serveur}:8080/jenkins45"
echo "Changer le port si vous avez modifié le paramétrage de Tomcat."
echo "Fin de l'exécution du script." | logs
echo "" >> $HOME/logs_script_Tomcat_Jenkins.txt
else
echo "Erreur : Les fichiers requis pour l'exécution du script ne sont pas présent." | logs
echo "" >> $HOME/logs_script_Tomcat_Jenkins.txt
fi
| true |
ab90c9c9a35fa3ba4e7f176bfd170b21ec4c34d9 | Shell | pengdan01/spider | /crawler/crawler/crawler_control.sh | UTF-8 | 3,397 | 3.390625 | 3 | [] | no_license | #!/bin/bash
set -u
if [ $# -ne 1 ]; then
echo "Error, Usage: $0 conf_file"
exit 1
fi
conf_file=$1
if [ ! -e ${conf_file} ]; then
echo "Error, conf file[${conf_file}] not found"
exit 1
fi
source ${conf_file}
## crawler_control.sh will combine all the modulel about crawler
## First, Start Selector to generate links need to crawle
if [ ${run_selector} = "y" -o ${run_selector} = "Y" ]; then
bash -x crawler/selector/crawler_selector.sh crawler/selector/crawler_selector.conf \
</dev/null &> selector.log&
wait $!
if [ $? -ne 0 ]; then
echo "Error, crawler_selector.sh fail"
exit 1
fi
else
echo "INFO, the process crawler_selector is skipped."
fi
## Second, Dispatch all the crawle tasks to download worker
if [ ${run_dispatcher} = "y" -o ${run_dispatcher} = "Y" ]; then
bash -x crawler/dispatcher/crawler_dispatcher.sh crawler/dispatcher/crawler_dispatcher.conf \
</dev/null &> dispatcher.log&
wait $!
if [ $? -ne 0 ]; then
echo "Error, crawler_dispatcher.sh fail"
exit 1
fi
else
echo "INFO, the process crawler_dispatcher is skipped."
fi
## Third, Start all the download worker to crawle from the web
## In the process, we use pssh tool to make all the download work concurrently.
if [ ${run_download_worker} = "y" -o ${run_download_worker} = "Y" ]; then
cmd="cd ~/wly_crawler_online && bash -x crawler/task_distribute.sh crawler/dispatcher/crawler_deploy.conf"
#${pssh_home_dir}/pssh -A -i -h hosts_info/crawler_host.txt -p 128 -O StrictHostKeyChecking=no -t 0 "${cmd}"
${pssh_home_dir}/pssh -i -h hosts_info/crawler_host.txt -p 128 -O StrictHostKeyChecking=no -t 0 "${cmd}"
if [ $? -ne 0 ]; then
echo "Error, pssh[${cmd}] fail, program exit with 1"
exit 1
fi
else
echo "INFO, the process crawler_download_workder is skipped."
fi
## Now, we will wait for all the downloaders to finish their tasks
while [ true ]
do
bash crawler/tools/info_collect.sh hosts_info/crawler_host.txt >tmp_info_collect 2>tmp_info_collect.err
if [ $? -ne 0 ]; then
echo "Error, collect status info from the downloader fail, you need to handle "
exit 1
fi
total_num=`cat tmp_info_collect | wc -l`
if [ $total_num -eq 3 ]; then
echo "OK ,all the loaders has finished there tasks"
break
else
echo "Get Info line: $total_num, still some downloaders not finished, sleep and wait..."
sleep 60
fi
done
rm -f tmp_info_collect*
## Fourth, Start Offline analyze
## Fifth, Start link_merge
## The two process can be done simultaneously
if [ ${run_link_merge} = "y" -o ${run_link_merge} = "Y" ]; then
bash -x crawler/link_merge/link_merge.sh crawler/link_merge/link_merge.conf \
</dev/null &>merge.log&
pid1=$!
else
echo "INFO, the process crawler_link_merge is skipped."
fi
if [ ${run_offline_analyze} = "y" -o ${run_offline_analyze} = "Y" ]; then
bash -x crawler/offline_analyze/offline_analyze.sh crawler/offline_analyze/offline_analyze.conf \
</dev/null &> analyze.log&
pid2=$!
else
echo "INFO, the process crawler_offline_analyze is skipped."
fi
if [ ${run_link_merge} = "y" -o ${run_link_merge} = "Y" ]; then
wait ${pid1}
if [ $? -ne 0 ]; then
echo "Error, link_merge fail"
exit 1
fi
fi
if [ ${run_offline_analyze} = "y" -o ${run_offline_analyze} = "Y" ]; then
wait ${pid2}
if [ $? -ne 0 ]; then
echo "Error, offline_analyze fail"
exit 1
fi
fi
echo "Done!"
exit 0
| true |
8954b852f67a7d4c1b3b219b5ecc2507acf33923 | Shell | stalinkay/sh-config | /.shlogin | UTF-8 | 975 | 2.578125 | 3 | [] | no_license | # -*- mode: sh; -*-
# Filename: .shlogin
# Authors: Vincent Demeester
# License: This file is licensed under the GPL v2.
# --------------------------------------------------------------------------- #
# This file is sourced only for login shells. It should contain commands that
# should be executed in login shells. This is the common one, should be called
# from .zlogin or .bashrc (?)
#
# --------------------------------------------------------------------------- #
# Allow disabling of entire environment suite
test -n "$INHERIT_ENV" && return 0
test -n "$shlogin_loaded" && return 0
. $ZDOT_RUN_HOOKS .sh/hook/shlogin.pre
# {{{ Keychain -------------------------------------------------------------- #
command -v keychain >/dev/null && {
eval $(keychain --eval --inherit any-once --quick --quiet)
}
# }}}
sh_load_status .shlogin
. $ZDOT_RUN_HOOKS .sh/hook/shlogin.post
shlogin_loaded=y
# vim:filetype=sh foldmethod=marker autoindent expandtab shiftwidth=4
| true |
fb2b8e24237ffdb0196460270507d38f9d6e632d | Shell | guidanoli/cptk | /libexec/call-routine | UTF-8 | 351 | 3.828125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#
# Summary: Make a call to a routine
#
# Usage: libexec/call-routine <name> [<arguments>]
source libexec/routines
# Exit on failure
set -e
# If <command> is present
if [[ $# -gt 0 ]]
then
routine=$(libexec/get-routine "$1")
shift
"$routine" "$@"
else
# else, exit
argname='<name>'
source libexec/err/missing-argument
fi
| true |
add641d8204d2ca94f6dd261d7cc3ede1bc24cbc | Shell | rverma-jm/geodesic | /rootfs/etc/profile.d/use-profile.sh | UTF-8 | 138 | 2.890625 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | if [ -n "${AWS_PROFILE}" ] && [ -f "${AWS_CONFIG_FILE}" ] && [ -f "${AWS_SHARED_CREDENTIALS_FILE}" ]; then
assume-role ${AWS_PROFILE}
fi
| true |
dbf066ae087e88b0c168509b30d5682abeb02793 | Shell | zhengchangsulab/pcrm2 | /convert_site_bed_fa.sh | UTF-8 | 206 | 2.96875 | 3 | [] | no_license | #!/bin/bash
ref=$1
site_beds=*.bed
for bed in ${site_beds}
do
bedtools getfasta -fi ${ref} -bed ${bed} -fo ${bed}.fa
motif_name=${bed/.sites.bed/}
sed -i "s/>/>${motif_name}:/" ${bed}.fa
done
| true |
8a0faa296be6b0a0f40b5cff5169a89fb0e01891 | Shell | simonsdave/cloudfeaster | /bin/install-dev-env-scripts.sh | UTF-8 | 1,826 | 3.546875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#
# should be run out of the spider repo - typically expect cfg4dev
# master will maintain reasonble backward compatibility
#
# curl -s -L "https://raw.githubusercontent.com/simonsdave/cloudfeaster/master/bin/install-dev-env-scripts.sh" | bash -s --
#
download_script() {
CLF_VERSION=${1:-}
SCRIPT=${2:-}
DEST_SCRIPT=${VIRTUAL_ENV}/bin/${SCRIPT}
curl \
-s \
-L \
-o "${DEST_SCRIPT}" \
"https://raw.githubusercontent.com/simonsdave/cloudfeaster/v${CLF_VERSION}/bin/${SCRIPT}"
chmod u+x "${DEST_SCRIPT}"
return 0
}
set -e
if [ $# != 0 ]; then
echo "usage: $(basename "$0")" >&2
exit 1
fi
if [ "${VIRTUAL_ENV:-}" == "" ]; then
echo "Virtual env not activated - could not find environment variable VIRTUAL_ENV" >&2
exit 2
fi
REPO_ROOT_DIR=$(git rev-parse --show-toplevel)
CLF_VERSION=$(grep cloudfeaster== "${REPO_ROOT_DIR}/setup.py" | sed -e "s|^[[:space:]]*['\"]cloudfeaster==||g" | sed -e "s|['\"].*$||g")
DEV_ENV_VERSION=$(curl -s -L "https://raw.githubusercontent.com/simonsdave/cloudfeaster/v${CLF_VERSION}/.circleci/config.yml" | grep 'image:' | head -1 | sed -e 's|[[:space:]]*$||g' | sed -e 's|^.*dev-env:||g')
if [ "${DEV_ENV_VERSION}" == "latest" ]; then DEV_ENV_VERSION=master; fi
INSTALL_DEV_ENV=$(mktemp 2> /dev/null || mktemp -t DAS)
curl -s -L https://raw.githubusercontent.com/simonsdave/dev-env/${DEV_ENV_VERSION}/bin/install-dev-env.sh -o "${INSTALL_DEV_ENV}"
chmod a+x "${INSTALL_DEV_ENV}"
"${INSTALL_DEV_ENV}" --dev-env-version "${DEV_ENV_VERSION:-}"
rm "${INSTALL_DEV_ENV}"
download_script "${CLF_VERSION}" "run-all-spiders.sh"
download_script "${CLF_VERSION}" "run-spider.sh"
download_script "${CLF_VERSION}" "check-circleci-config.sh"
download_script "${CLF_VERSION}" "generate-circleci-config.py"
exit 0
| true |
d3a02a1c8f20a6ddf72cb7247bb0e16c67b43565 | Shell | showoowohs/imx6_download_tool | /utp_com/flash.sh | UTF-8 | 4,018 | 3.671875 | 4 | [] | no_license | #!/bin/bash
DEVICE=
IMX_USB_PATH=/home/benson/imx6_download_tool/imx_usb_loader
UTP_COM_PATH=/home/benson/imx6_download_tool/utp_com/
FLASH_IMAGE_DIR=/home/benson/imx6_download_tool/images/files/android
MKSDCARD_DIR=/home/benson/imx6_download_tool/images
# Flash flashing os
echo "Loading U-boot and Kernel."
cd $IMX_USB_PATH
IMX_USB_PRINT=`./imx_usb 2>&1`
if `echo "$IMX_USB_PRINT" | grep -q "Could not open device"`; then
echo "imx_usb returned error: Could not open device"
exit 1
fi
if `echo "$IMX_USB_PRINT" | grep -q "err=-"`; then
echo "imx_usb returned error:"
echo $IMX_USB_PRINT
exit 1
fi
echo "Loading Initramfs."
# Find the correct device
for i in {1..30}
do
#DEVICE=/dev/sdc
DEVICE=`ls -l /dev/sd* | grep "8,\s*32" | sed "s/^.*\/d/\/d/"`
if [ -n "$DEVICE" ]; then
break
fi
sleep 1
done
if [ "x$DEVICE" = "x" ]; then
echo "Device $DEVICE not found" 1>&2
exit 1
elif [ ! -e $DEVICE ]; then
echo "Device $DEVICE not found" 1>&2
exit 1
fi
# Flash erase
cd $UTP_COM_PATH
echo "clean up u-boot parameter"
# clean up u-boot parameter
./utp_com -d $DEVICE -c "$ dd if=/dev/zero of=/dev/mmcblk0 bs=512 seek=1536 count=16"
echo "access boot partition 1"
# access boot partition 1
./utp_com -d $DEVICE -c "$ echo 1 > /sys/devices/platform/sdhci-esdhc-imx.3/mmc_host/mmc0/mmc0:0001/boot_config"
# Sending U-Boot
echo "Sending U-Boot"
./utp_com -d $DEVICE -c "send" -f ${FLASH_IMAGE_DIR}/u-boot-6q.bin
# write U-Boot to sd card
echo "write U-Boot to sd card"
./utp_com -d $DEVICE -c "$ dd if=\$FILE of=/dev/mmcblk0 bs=512 seek=2 skip=2"
# access user partition and enable boot partion 1 to boot
echo "access user partition and enable boot partion 1 to boot"
./utp_com -d $DEVICE -c "$ echo 8 > /sys/devices/platform/sdhci-esdhc-imx.3/mmc_host/mmc0/mmc0:0001/boot_config"
# Sending partition shell
echo "Sending partition shell"
./utp_com -d $DEVICE -c "send" -f ${MKSDCARD_DIR}/mksdcard-android.sh.tar
# Partitioning...
echo "Partitioning..."
./utp_com -d $DEVICE -c "$ tar xf \$FILE "
./utp_com -d $DEVICE -c "$ sh mksdcard-android.sh /dev/mmcblk0"
# Formatting sd partition
echo "Formatting sd partition"
./utp_com -d $DEVICE -c "$ ls -l /dev/mmc* "
# Sending kernel uImage
echo "Sending kernel uImage"
./utp_com -d $DEVICE -c "send" -f ${FLASH_IMAGE_DIR}/boot.img
# write boot.img
echo "write boot.img"
./utp_com -d $DEVICE -c "$ dd if=\$FILE of=/dev/mmcblk0p1"
# flush the memory.
echo "flush the memory."
./utp_com -d $DEVICE -c "frf"
# Formatting data partition
echo "Formatting data partition"
./utp_com -d $DEVICE -c "$ mkfs.ext4 -b 4096 -m 0 /dev/mmcblk0p4"
# Sending data partition shell
echo "Formatting data partition"
./utp_com -d $DEVICE -c "send" -f ${MKSDCARD_DIR}/mk-encryptable-data-android.sh.tar
# Extracting data partition shell
echo "Extracting data partition shell"
./utp_com -d $DEVICE -c "$ tar xf \$FILE "
# Making data encryptable
echo "Making data encryptable"
./utp_com -d $DEVICE -c "$ sh mk-encryptable-data-android.sh /dev/mmcblk0 /dev/mmcblk0p4"
# Formatting system partition
echo "Formatting system partition"
./utp_com -d $DEVICE -c "$ mkfs.ext4 /dev/mmcblk0p5"
# Formatting cache partition
echo "Formatting system partition"
./utp_com -d $DEVICE -c "$ mkfs.ext4 /dev/mmcblk0p6"
# flush the memory.
echo "flush the memory."
./utp_com -d $DEVICE -c "frf"
# Formatting device partition
echo "Formatting device partition"
./utp_com -d $DEVICE -c "$ mkfs.ext4 /dev/mmcblk0p7"
# Sending and writting system.img
echo "Sending and writting system.img"
./utp_com -d $DEVICE -c "pipe dd of=/dev/mmcblk0p5 bs=512" -f ${FLASH_IMAGE_DIR}/system.img
# flush the memory.
echo "flush the memory."
./utp_com -d $DEVICE -c "frf"
# Sending and writting recovery.img
echo "Sending and writting recovery.img"
./utp_com -d $DEVICE -c "pipe dd of=/dev/mmcblk0p2 bs=512" -f ${FLASH_IMAGE_DIR}/recovery.img
sleep 1
# Finishing rootfs write
echo "Finishing rootfs write"
./utp_com -d $DEVICE -c "frf"
# Done
echo "Done"
./utp_com -d $DEVICE -c "$ echo Update Complete!"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.