blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
5760e41bb752aebc86f96e0313ec9c84fa32a76d | Shell | MichaelBunker/php | /exercises/concept/bootstrap.sh | UTF-8 | 2,121 | 3.53125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -xeuo pipefail
new_slug="${1}"
new_classname="${2}"
author="${3}"
if [[ "$(basename "$(pwd)")" != "concept" ]]; then
echo "run from the concept dir"
exit 1
fi
base_dir="${new_slug}"
mkdir "${base_dir}"
docs_dir=".docs"
mkdir -p "${base_dir}/${docs_dir}"
meta_dir=".meta"
mkdir -p "${base_dir}/${meta_dir}"
solution_file="${new_classname}.php"
test_file="${new_classname}Test.php"
touch "${base_dir}/${solution_file}"
touch "${base_dir}/${test_file}"
hints_doc="${docs_dir}/hints.md"
instructions_doc="${docs_dir}/instructions.md"
introduction_doc="${docs_dir}/introduction.md"
introduction_doc_tpl="${docs_dir}/introduction.md.tpl"
touch "${base_dir}/${hints_doc}"
touch "${base_dir}/${instructions_doc}"
touch "${base_dir}/${introduction_doc}"
touch "${base_dir}/${introduction_doc_tpl}"
config_json="${meta_dir}/config.json"
design_doc="${meta_dir}/design.md"
exemplar_file="${meta_dir}/exemplar.php"
touch "${base_dir}/${config_json}"
touch "${base_dir}/${design_doc}"
touch "${base_dir}/${exemplar_file}"
jo -p \
authors=$(jo -a "${author}") \
contributors=$(jo -a < /dev/null) \
files=$(jo \
solution=$(jo -a "${solution_file}") \
test=$(jo -a "${test_file}") \
exemplar=$(jo -a "${exemplar_file}")
) \
language_version=">=8.1" \
blurb="Learn about ____" \
icon="" \
forked_from=$(jo -a "track/exercise") >> "${base_dir}/${config_json}"
cat <<- PHP_STUB >> "${base_dir}/${solution_file}"
<?php
class ${new_classname}
{
public function stub()
{
throw new \BadFunctionCallException("Implement the function");
}
}
PHP_STUB
cat <<- PHP_STUB >> "${base_dir}/${test_file}"
<?php
class ${new_classname}Test extends PHPUnit\Framework\TestCase
{
public static function setUpBeforeClass(): void
{
require_once '${new_classname}.php';
}
/**
* @testdox some test stub
* @task_id 1
*/
public function testStub()
{
\$class = new ${new_classname}();
\$actual = \$class->method();
\$expected = null;
\$this->assertEquals(\$expected, \$actual);
}
}
PHP_STUB
| true |
ae280e941335fc6311741cc7a2c48b43b0c0027e | Shell | SpragueLab/preproc_shFiles | /make_grayplots.sh | UTF-8 | 1,271 | 3.921875 | 4 | [] | no_license | #!/bin/bash
# make_grayplots.sh
#
# Runs AFNI's 3dGrayplot function on all run??.nii.gz files within specified $PROJECT/$SUBJ/$SESS directory and saves pngs in specified $PROJECT/$SUBJ/align_QC/$SUBJ_$SESS_run??_grayplot.png
#
# Assumes typical (~2019-2021) Sprague lab directory structure - PROJECT/SUBJ/SESS/
#
# INPUTS:
# - PROJECT: project directory
# - SUBJ: subject ID
# - SESS: session name
#
# OUTPUTS:
# - saves a .png file as produced in AFNI's 3dGrayplot function as: $DATAROOT/$PROJECT/$SUBJ/align_QC/$SUBJ_$SESS_run??_grayplot.png
#
# assumes you have a DATAROOT environment variable that says where project directories live
#
# TC Sprague 11/7/2021
EXPTDIR=$1
SUBJ=$2
SESS=$3
# size of image to be saved
XDIM=2560
YDIM=1440
# where files are found
NIIFILES=$DATAROOT/$EXPTDIR/$SUBJ/$SESS/run??.nii.gz
# where to save files
OUTDIR=$DATAROOT/$EXPTDIR/$SUBJ/align_QC
OUTSUF=_grayplot.png
for thisfile in $NIIFILES
do
echo "Making grayplot of: $thisfile"
# do some bash stuff (based on stackoverflow answers...) to cut the path and extension from the filename
tmprun="${thisfile##*/}"
tmprunnum="${tmprun%%.*}"
3dGrayplot -prefix $OUTDIR/${SUBJ}_${SESS}_${tmprunnum}$OUTSUF -dimen $XDIM $YDIM -input $thisfile
done
| true |
21aa206d59581f4f5bb9780bf77297431339e431 | Shell | llorllale/k8s | /sidetree-mock/scripts/seed_sidetree_mock.sh | UTF-8 | 1,052 | 2.765625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
#
# Copyright SecureKey Technologies Inc. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
set -e
echo "Seeding Sidetree ..."
if [ "${SIDETREE_OUTPUT_DIR}x" == "x" ]; then
echo "SIDETREE_OUTPUT_DIR env not set"
exit 1
fi
mkdir -p ${SIDETREE_OUTPUT_DIR}
cat > ${SIDETREE_OUTPUT_DIR}/stakeholder-one_jwk.json <<EOF
{
"kty": "OKP",
"kid": "key1",
"d": "CSLczqR1ly2lpyBcWne9gFKnsjaKJw0dKfoSQu7lNvg",
"crv": "Ed25519",
"x": "bWRCy8DtNhRO3HdKTFB2eEG5Ac1J00D0DQPffOwtAD0"
}
EOF
cat > ${SIDETREE_OUTPUT_DIR}/testnet.json <<EOF
{
"consortiumData": {
"domain": "testnet.${DOMAIN}",
"genesisBlock": "6e2f978e16b59df1d6a1dfbacb92e7d3eddeb8b3fd825e573138b3fd77d77264",
"policy": {
"cache": {
"maxAge": 600
},
"numQueries": 1
}
},
"membersData": [
{
"domain": "stakeholder-one.${DOMAIN}",
"policy": {
"cache": {
"maxAge": 300
}
},
"endpoints": [
"https://sidetree-mock.${DOMAIN}/sidetree/v1"
],
"privateKeyJwkPath": "/config-data/stakeholder-one_jwk.json"
}
]
}
EOF
| true |
6077daad1634b974c94f1b48c38030fcd16261c0 | Shell | thiagohersan/FofoquemeVoxPopuli | /bashrc_cmds.sh | UTF-8 | 388 | 2.671875 | 3 | [] | no_license | # run VoxPopuli python
isrun=`ps -u root | grep python | wc -l`
if [ $isrun -lt 1 ]
then
cd /home/pi/Dev/FofoquemeVoxPopuli/Python/VoxPopuli
http-server -p 8666 &
while [ 1 -le 20 ]
do
sudo python VoxPopuli.py &
sleep 1
killsudopid=$!
killpythonpid=`ps -u root | awk '/python/{print $1}'`
sleep 600
sudo kill -9 $killpythonpid
sudo kill -9 $killsudopid
done
fi
| true |
acb984124449e10a148de47a76dd51e9b8129c55 | Shell | carpy1918/toolkit | /nodeStats/.svn/text-base/nodeStatsCommand.sh.svn-base | UTF-8 | 344 | 3.125 | 3 | [] | no_license | #!/bin/bash
home='/home/curt/'
webroot='/srv/www/htdocs/nodeStats';
server=( gfs1.carpy.net gfs2.carpy.net )
if [ ! -d $webroot ]
then
mkdir -p $webroot
fi
for i in "${server[@]}"
do
echo 'server:' $i
scp nodeStats-html.pl $i:$home
ssh -t $i "chmod 755 nodeStats-html.pl;sudo $home/nodeStats-html.pl"
scp $i:$home/$i*.html $webroot/
done
| true |
c40e5e6c27a8bfdc63c3f29bfc1de6f5e98a62bc | Shell | woodie/gmaps | /build.sh | UTF-8 | 820 | 3.109375 | 3 | [] | no_license | #!/bin/bash
SOURCE=http://boostapps.com/files
mkdir -p downloads
cd downloads
if [ -f gMapsSigned.jad ]; then
echo "The source jad file exists."
else
echo "Downloading jad file."
curl -O $SOURCE/gMapsSigned.jad
fi
if [ -f gMapsSigned.jar ]; then
echo "The source jar file exists."
else
echo "Downloading jar file."
curl -O $SOURCE/gMapsSigned.jar
fi
cd ..
cp downloads/gMapsSigned.jar dist/gMapsUnsigned.jar
cd res # Add appropriate icon
jar uf ../dist/gMapsUnsigned.jar icon72x72.png
cd ..
while read line; do
if [[ "$line" =~ "MIDlet-Jar-URL" ]]; then
echo MIDlet-Jar-URL: gMapsUnsigned.jar
elif [[ "$line" =~ "MIDlet-Icon" ]]; then
echo MIDlet-Icon: icon72x72.png
elif [[ ! "$line" =~ "RSA-SHA1" ]]; then
echo $line
fi
done < downloads/gMapsSigned.jad > dist/gMapsUnsigned.jad
| true |
494a76d839f22b5779798217be9eec1e0acbfdc1 | Shell | cloudera/director-scripts | /faster-bootstrap/build-ami.sh | UTF-8 | 7,895 | 4.15625 | 4 | [
"EPL-1.0",
"Classpath-exception-2.0",
"LGPL-2.0-or-later",
"W3C",
"GPL-1.0-or-later",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause",
"MPL-2.0",
"LicenseRef-scancode-json-pd",
"LicenseRef-scancode-protobuf",
"MPL-2.0-no-copyleft-exception",
"CC0-1.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-warranty-disclaimer",
"ISC",
"CDDL-1.1",
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"OFL-1.1",
"Apache-2.0",
"AGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"Unlicense",
"MPL-1.1",
"BSD-2-Clause",
"LGPL-2.1-only",
"LicenseRef-scancode-oracle-bcl-java-platform-2013",
"LGPL-2.1-or-later",
"Plexus",
"MITNFA",
"WTFPL",
"CDDL-1.0",
"MIT"
] | permissive | #!/usr/bin/env bash
#
# (c) Copyright 2015 Cloudera, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Bash 4+ required
if (( ${BASH_VERSION%%.*} < 4 )); then
echo "bash 4 or higher is required. The current version is ${BASH_VERSION}."
exit 1
fi
# Prints out a usage statement
usage()
{
cat << EOF
This script will create a new AMI and preload it with CDH parcels to speed up
bootstrapping time for Cloudera Altus Director.
You must ensure AWS credentials are available in the environment for this to
work properly. Please refer to Packer documentation here:
https://www.packer.io/docs/builders/amazon-ebs.html.
Extra packer options can be provided in the PACKER_VARS environment variable
prior to executing this script.
Usage: $0 [options] <aws-region> <os> [<name>] [<parcel-url>] [<repository-url>]
<aws-region>: The AWS region that you want the new AMI to be housed on.
<os>: The OS that you want to use as a base.
Valid choices: rhel6x, rhel7x, centos6x, centos7x (x = minor version)
[<name>]: An optional descriptive name for the new AMI.
Default is calculated dynamically (specified by "AUTO")
[<parcel-url>]: Optional parcel URL to use for preloading.
Default https://archive.cloudera.com/cdh6/6.3/parcels/
[<repository-url>]: Optional Cloudera Manager yum repo to use for preloading.
Default https://archive.cloudera.com/cm6/6.3/redhat7/yum/
[<repository-key-url>]: Optional URL for Cloudera Manager yum repo GPG key.
Required only if repository-url is not at archive.cloudera.com
Be sure to specify <repository-url> for operating systems other than RHEL 7 or
CentOS 7.
OPTIONS:
-h
Show this help message
-a <ami-info>
Use a specific base AMI
-d
Run packer in debug mode
-j <version>
Install a specific Java version
Valid choices: 1.7, 1.8 (default)
-J <jdk-repository>
Yum repo to use for JDK RPM
Valid choices: Director (default), CM
-p
Pre-extract CDH parcels
-P
Associate public IP address
-6
Configure image for CDH 6
For the -a option, specify for <ami-info> a quoted string with the following
elements, separated by spaces:
ami-id "pv"|"hvm" ssh-username root-device-name
Example: -a "ami-00000000 hvm centos72 /dev/sda1"
EOF
}
# Finds the recommended AMI for a region and OS
find_base_ami_info()
{
local os="$1"
local region="$2"
if [[ ! -f "scripts/building/base_amis_${region}.sh" ]]; then
echo "unsupported_region"
else
source "scripts/building/base_amis_${region}.sh"
echo "${BASE_AMIS[$os]}"
fi
}
# Parses the parcel for an OS from the list of parcels at the supplied URL.
get_parcel_url()
{
local cdh_url="$1"
local os="$2"
case $os in
centos6* | rhel6*)
echo "${cdh_url}$(curl -L -s "${cdh_url}" | grep "el6.parcel<" | sed -E "s/.*>(.*parcel)<\/a.*/\1/" 2>/dev/null)"
;;
centos7* | rhel7*)
echo "${cdh_url}$(curl -L -s "${cdh_url}" | grep "el7.parcel<" | sed -E "s/.*>(.*parcel)<\/a.*/\1/" 2>/dev/null)"
;;
*)
echo ""
;;
esac
}
get_director_yum_url() {
local os="$1"
case $os in
centos6* | rhel6*)
echo "https://archive.cloudera.com/director/redhat/6/x86_64/director/2.7/"
;;
centos7* | rhel7*)
echo "https://archive.cloudera.com/director/redhat/7/x86_64/director/2.7/"
;;
*)
echo ""
;;
esac
}
AMI_OPT=
C6=
DEBUG=
JAVA_VERSION=1.8
JDK_REPO=Director
PRE_EXTRACT=
PUBLIC_IP=
while getopts "a:dj:J:pP6h" opt; do
case $opt in
a)
AMI_OPT="$OPTARG"
;;
d)
DEBUG=1
;;
j)
JAVA_VERSION="$OPTARG"
;;
J)
JDK_REPO="$OPTARG"
;;
p)
PRE_EXTRACT=1
;;
P)
PUBLIC_IP=1
;;
6)
C6=1
;;
h)
usage
exit
;;
?)
usage
exit
;;
esac
done
shift $((OPTIND - 1))
if [ $# -lt 2 ] || [ $# -gt 6 ]; then
usage
exit 1
fi
if ! hash packer 2> /dev/null; then
echo "Packer is not installed or is not on the path. Please correct this before continuing."
exit 2
else
echo "Found packer version: $(packer version)"
fi
DEFAULT_CDH_URL=https://archive.cloudera.com/cdh6/6.3/parcels/
# Gather arguments into variables
AWS_REGION=$1
OS=$2
NAME=${3-AUTO}
CDH_URL=${4-${DEFAULT_CDH_URL}}
CM_REPO_URL=${5-"https://archive.cloudera.com/cm6/6.3/redhat7/yum/"}
CM_GPG_KEY_URL=$6
# Validate OS TBD
# Validate CM_GPG_KEY_URL
if [[ -z $CM_GPG_KEY_URL && ! $CM_REPO_URL =~ ^https?://archive.cloudera.com ]]; then
echo "The URL for the RPM GPG key must be supplied for a custom Cloudera Manager repository"
exit 3
fi
# Look up AMI if necessary
if [[ -z $AMI_OPT ]]; then
AMI_INFO=( $(find_base_ami_info "$OS" "$AWS_REGION") )
if [[ ${AMI_INFO[0]} == "unsupported_region" ]]; then
echo "Base AMIs for region $AWS_REGION are not recorded, use -a to specify an AMI"
exit 3
elif [[ -z "${AMI_INFO[*]}" ]]; then
echo "A base AMI is not recorded for OS $OS in $AWS_REGION, use -a to specify an AMI"
exit 3
fi
else
AMI_INFO=( $AMI_OPT )
fi
echo "Using AMI ${AMI_INFO[0]} for OS $OS"
AMI=${AMI_INFO[0]}
VIRTUALIZATION=${AMI_INFO[1]}
if [[ $VIRTUALIZATION != "pv" && $VIRTUALIZATION != "hvm" ]]; then
echo "Invalid AMI virtualization type $VIRTUALIZATION"
usage
exit 3
fi
USERNAME=${AMI_INFO[2]}
ROOT_DEVICE_NAME=${AMI_INFO[3]}
# Compute name if necessary
if [[ -z $NAME || $NAME == "AUTO" ]]; then
NAME="$OS CM/CDH preload"
fi
# Get the appropriate parcel file
PARCEL_URL="$(get_parcel_url "$CDH_URL" "$OS")"
if [[ -z $PARCEL_URL ]]; then
echo "No parcels available for OS $OS"
exit 4
fi
# Validate the Java version
VALID_JAVA_VERSIONS=("1.7" "1.8")
for v in "${VALID_JAVA_VERSIONS[@]}"; do
if [[ "$JAVA_VERSION" == "$v" ]]; then
JAVA_VERSION_VALID=1
break
fi
done
if [[ -z $JAVA_VERSION_VALID ]]; then
echo "Invalid Java version $JAVA_VERSION"
exit 5
fi
# Validate JDK repo, set JDK repo URL
case $JDK_REPO in
CM)
JDK_REPO_URL="$CM_REPO_URL"
;;
Director)
JDK_REPO_URL=$(get_director_yum_url "$OS")
if [[ -z $JDK_REPO_URL ]]; then
echo "Cloudera Altus Director yum repo is not available for OS $OS"
exit 6
fi
;;
*)
echo "Invalid choice for JDK repo: $JDK_REPO"
usage
exit 6
esac
# Set up packer variables
PACKER_VARS_ARRAY=( $PACKER_VARS )
PACKER_VARS_ARRAY+=(-var "region=$AWS_REGION" -var "parcel_url=$PARCEL_URL" -var "cm_repository_url=$CM_REPO_URL")
if [[ -n $CM_GPG_KEY_URL ]]; then
PACKER_VARS_ARRAY+=(-var "cm_gpg_key_url=$CM_GPG_KEY_URL")
fi
PACKER_VARS_ARRAY+=(-var "jdk_repository_url=$JDK_REPO_URL")
PACKER_VARS_ARRAY+=(-var "ami=$AMI" -var "ami_virtualization_type=$VIRTUALIZATION" -var "ssh_username=$USERNAME" -var "root_device_name=$ROOT_DEVICE_NAME")
PACKER_VARS_ARRAY+=(-var "ami_prefix=$NAME")
PACKER_VARS_ARRAY+=(-var "os=$OS")
PACKER_VARS_ARRAY+=(-var "java_version=$JAVA_VERSION")
if [[ -n $PRE_EXTRACT ]]; then
PACKER_VARS_ARRAY+=(-var "preextract_parcel=true")
fi
if [[ -n $PUBLIC_IP ]]; then
PACKER_VARS_ARRAY+=(-var "associate_public_ip_address=true")
fi
if [[ -z $C6 ]]; then
PACKER_VARS_ARRAY+=(-var "c6=false")
fi
# Set up other packer options
PACKER_OPTS=()
if [[ -n $DEBUG ]]; then
PACKER_OPTS+=(-debug)
fi
JSON=rhel.json
packer build "${PACKER_VARS_ARRAY[@]}" "${PACKER_OPTS[@]}" packer-json/"$JSON"
| true |
e782e4a0bff60c42318e08d39496a53a987bae58 | Shell | goodheart/ShellRecord | /081702.sh | UTF-8 | 297 | 3.109375 | 3 | [] | no_license | #!/bin/bash
x=5;y=10
#test 是内置测试命令,test可以用方括号来代替,方括号前后要留空格
#test $x -gt $y
#[ $x -gt $y ]
#name=Tom
#[ $name = [Tt]?? ]
#[[ $name = [Tt]?? ]]
#name=Tom;
#[ -z $name ];
#name2=Andy;
#[ $name != $name2 ];
int1=1;
int2=2;
[ int1 -eq 1 ]
echo $?
| true |
21e911b58fad39da051f4df68842a74feda54690 | Shell | acastello/catplot | /test.sh | UTF-8 | 153 | 2.6875 | 3 | [] | no_license | if [ "$#" -gt 0 ] ; then
dom="$1"
else
dom="192.168.1.1"
fi
ping -i 0.3 "$dom" | sed -u 's/.*time=\(.*\)ms/\1/g' | grep -v --line-buffered PING
| true |
1000e33eb07246005b9b081fec424a7c3a4cfa55 | Shell | LSanti94/doti3 | /bini3/,volume | UTF-8 | 2,025 | 3.9375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# You can call this script like this:
# $./volume.sh up
# $./volume.sh down
# $./volume.sh mute
function get_volume {
amixer -D pulse get Master | grep '%' | head -n 1 | cut -d '[' -f 2 | cut -d '%' -f 1
}
function is_mute {
amixer -D pulse get Master | grep '%' | grep -oE '[^ ]+$' | grep off > /dev/null
}
#Repeat char n times
function rchar {
for ((i=1; i<=$1; i++)); do echo -n $2; done
}
function send_notification {
volume=`get_volume`
# Make the bar with the special character ─ (it's not dash -)
# https://en.wikipedia.org/wiki/Box-drawing_character
# bar=$(seq -s "█" $(($volume / 5)) | sed 's/[0-9]//g')
# barrest=$(seq -s "░" $((100 / 5 - $volume / 5)) | sed 's/[0-9]//g')
bar=$(rchar $(($volume / 5)) "█")
barrest=$(rchar $((20 - $volume / 5)) "░")
case $(($volume)) in
(0) s="muted";; # 0
([0-9]|[1][0-9]) s="low";; # <20
([2-3][0-9]) s="low";; # 20>=x>40
([4-5][0-9]) s="medium";; # 40>=x>60
([6-7][0-9]) s="medium";; # 60>=x>80
([8-9][0-9]) s="high";; # 80>=x>100
(100) s="muted-blocking";; # 100
esac
# Send the notification
dunstify -i "av-$s" -r 2593 -u normal "$(printf "$bar$barrest [%03d]" $volume)" -a volume
}
# flog ,volume "$1"
case $1 in
notify)
send_notification
;;
up)
# Set the volume on (if it was muted)
amixer -D pulse set Master on > /dev/null
# Up the volume (+ 5%)
amixer -D pulse sset Master 10%+ > /dev/null
send_notification
;;
down)
amixer -D pulse set Master on > /dev/null
amixer -D pulse sset Master 10%- > /dev/null
send_notification
;;
mute)
send_notification
# Toggle mute
amixer -D pulse set Master 1+ toggle > /dev/null
if is_mute ; then
dunstify -i audio-volume-muted -t 8 -r 2593 -u normal "Mute"
else
send_notification
fi
;;
esac
echo `get_volume`
pkill -SIGRTMIN+11 i3blocks
| true |
218af94109e6074442f6826acca4e52e07a1affc | Shell | kettlewell/packer-template | /scripts/gen-metadata.sh | UTF-8 | 524 | 3.875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
OUTPUT=$1
if [ -z "$OUTPUT" ]; then
echo "Usage: $0 <output file>"
exit 1
fi
GIT_REVISION=$(git rev-parse HEAD)
DATE=$(date -R)
if hash packer.io 2>/dev/null; then
PACKER_VERSION=$(packer.io --version)
elif hash packer 2>/dev/null; then
PACKER_VERSION=$(packer --version)
else
PACKER_VERSION='packer not found'
fi
echo "# Packer Template Info" > $OUTPUT
echo "git revision: $GIT_REVISION" >> $OUTPUT
echo "build date: $DATE" >> $OUTPUT
echo "packer version: $PACKER_VERSION" >> $OUTPUT
| true |
54baf9a227aa7a0f3da9db5e3f3a24577a2e62ca | Shell | avinetworks/openstack-installer | /k8s/k8s-install.sh | UTF-8 | 2,533 | 2.765625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
export LC_ALL=C
source ~/admin-openrc.sh
# Download image
wget http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
# Upload image to glance
glance image-create --name xenial \
--disk-format qcow2 \
--container-format bare \
--file ./xenial-server-cloudimg-amd64-disk1.img \
--visibility public
# Create flavors
openstack flavor create m1.k8smaster --id auto --public --ram 2048 --disk 16 --vcpus 4
openstack flavor create m1.k8snode --id auto --public --ram 1024 --disk 10 --vcpus 2
# Create network
./k8s-nw.sh
source ~/demo-openrc.sh
# Create keypair
mkdir ~/k8skeys
ssh-keygen -t rsa -N "" -f ~/k8skeys/id_rsa
openstack keypair create --public-key ~/k8skeys/id_rsa.pub k8skey
# Create security groups
openstack security group create sg-k8s-master
openstack security group create sg-k8s-node
openstack security group rule create sg-k8s-master --ingress --ethertype 'IPv4' --protocol any --remote-ip 0.0.0.0/0
openstack security group rule create sg-k8s-master --ingress --ethertype 'IPv6' --protocol any --remote-ip ::/0
openstack security group rule create sg-k8s-master --ingress --ethertype 'IPv4' --protocol any --remote-group sg-k8s-master
openstack security group rule create sg-k8s-master --ingress --ethertype 'IPv6' --protocol any --remote-group sg-k8s-master
openstack security group rule create sg-k8s-master --ingress --ethertype 'IPv4' --protocol any --remote-group sg-k8s-node
openstack security group rule create sg-k8s-master --ingress --ethertype 'IPv6' --protocol any --remote-group sg-k8s-node
openstack security group rule create sg-k8s-node --ingress --ethertype 'IPv4' --protocol any --remote-ip 0.0.0.0/0
openstack security group rule create sg-k8s-node --ingress --ethertype 'IPv6' --protocol any --remote-ip ::/0
openstack security group rule create sg-k8s-node --ingress --ethertype 'IPv4' --protocol any --remote-group sg-k8s-node
openstack security group rule create sg-k8s-node --ingress --ethertype 'IPv6' --protocol any --remote-group sg-k8s-node
openstack security group rule create sg-k8s-node --ingress --ethertype 'IPv4' --protocol any --remote-group sg-k8s-master
openstack security group rule create sg-k8s-node --ingress --ethertype 'IPv6' --protocol any --remote-group sg-k8s-master
# Launch VMs
./k8s-create-vms.sh
./k8s-fip.sh
mfip=`cat /root/k8smaster-fip`
# Install kubeadm init
scp -i ~/k8skeys/id_rsa k8s-setup.sh ubuntu@$mfip:/tmp
sshpass -p "avi123" ssh -o StrictHostKeyChecking=no root@$mfip /tmp/k8s-setup.sh
| true |
3064de1e5871d9664557f77f20981ecf24ce024d | Shell | foobar66/googlewiz | /system/etc/init.d/googlewiz.sh | UTF-8 | 1,666 | 3.453125 | 3 | [] | no_license | BB="/system/xbin/busybox"
TB="/system/bin/toybox"
TEE="$TB tee -a"
CMD=/system/bin/cmd
SETPROP="$TB setprop"
# note that getprop is going from toolbox, not toybox !!!
GETPROP="/system/bin/toolbox getprop"
RESETPROP="/sbin/resetprop"
DUMPSYS="/system/bin/dumpsys"
PM=/system/bin/pm
SETTINGSPUT="$CMD settings put"
XARGS="$TB xargs"
RM=/system/bin/rm
FIND=/system/xbin/find
SQLITE3=/system/xbin/sqlite3
MAGISKHIDE=/sbin/magiskhide
waitforsdcard () {
local CNT=0
while [ ! -d /sdcard/Android ]; do
CNT=$(( $CNT + 1 ))
sleep 1
if [ "$CNT" -eq 20 ]; then
echo "** weird, slept for > $CNT seconds waiting for /sdcard/Android" | $TEE $1
fi
done
echo "-- slept for $CNT seconds waiting for /sdcard/Android" | $TEE $1
}
SHORT=true
[ -e /data/.noshortlog ] && SHORT=false
sysset () {
if [ -e "$2" ]; then
OLDVAL=$(cat $2)
echo "$1" > "$2"
if [ $? -ne 0 ]; then
echo "** sysset $1 $2 [FAILED, non-zero return status]" | $TEE $LOGFILE
else
NEWVAL=$(cat $2)
if [ "$SHORT" = true ]; then
echo " $2 [$OLDVAL => $NEWVAL]" | $TEE $LOGFILE
else
echo "-- cat $2 => " $OLDVAL | $TEE $LOGFILE
echo " echo $1 > $2" | $TEE $LOGFILE
echo " (new) cat $2 => " $NEWVAL | $TEE $LOGFILE
fi
fi
else
echo "** $2 does not exist (script: $0)" | $TEE $LOGFILE
fi
}
set_debug () {
echo " setting $2 in /sys/ to $1" | $TEE $LOGFILE
for i in $(find /sys/ -name $2 -type f 2>/dev/null | grep -v L2TP); do
sysset "$1" "$i"
done
}
| true |
6af123f12c4b74fc072728585fece741e20587de | Shell | araymund83/CASFRI | /conversion/sh/load_qc02.sh | UTF-8 | 1,268 | 3.265625 | 3 | [] | no_license | #!/bin/bash -x
# This script loads the Quebec (QC02) into PostgreSQL
# There is a updated version of this dataset: QC/SourceDataset/v.00.04/PEE_ORI_PROV.gdb
# However here we load the original to match the CASFRI04 loading script
# The format of the source dataset is a geodatabase
# The year of photography is in a photo year shapefile that needs to loaded separately
# Load into a target table in the schema defined in the config file.
# If the table already exists, it can be overwritten by setting the "overwriteFRI" variable
# in the configuration file.
######################################## Set variables #######################################
source ./common.sh
inventoryID=QC02
srcFileName=DDE_20K_PEU_ECOFOR_ORI_VUE_SE
srcFullPath="$friDir/QC/$inventoryID/data/inventory/PEE_ORI_PROV.gdb"
fullTargetTableName=$targetFRISchema.qc02
########################################## Process ######################################
# Run ogr2ogr
"$gdalFolder/ogr2ogr" \
-f PostgreSQL "$pg_connection_string" "$srcFullPath" \
-nln $fullTargetTableName $layer_creation_options $other_options \
-sql "SELECT *, '$srcFileName' AS src_filename, '$inventoryID' AS inventory_id FROM '$srcFileName'" \
-progress $overwrite_tab
source ./common_postprocessing.sh | true |
fd02652b668842d5daa5b81df8e1ebebab1843aa | Shell | fpcardoso/utils | /scripts/github/clone | UTF-8 | 308 | 3.453125 | 3 | [] | no_license | #!/bin/bash
usuario=""
for argumento in "$@"
do
if [ "$argumento" = "-f" ]; then
usuario="fpcardoso"
fi
done
if [ "$usuario" = "" ]; then
echo "Digite usuário"
read usuario
fi
echo "Digite repositório que deseja clonar"
read repositorio
git clone https://www.github.com/$usuario/$repositorio
| true |
537fee78872f6b878aaa919e0bc472f79b0a3942 | Shell | PaulTaykalo/icfp-2017 | /view | UTF-8 | 273 | 2.953125 | 3 | [] | no_license | #!/bin/bash
FILENAME="$1"
cat viewer/index.html | sed 's/Map Viewer/'$FILENAME'/g' > "viewer/$FILENAME.html"
echo '<div id="data">' >> "viewer/$FILENAME.html"
cat $FILENAME >> "viewer/$FILENAME.html"
echo '</div>' >> "viewer/$FILENAME.html"
open "viewer/$FILENAME.html"
| true |
defbf34dcba08fcf790a9887395826e4dae5fe72 | Shell | minscof/karotz-openkarotz-firmware | /firmware/usr/yaffs_start.sh | UTF-8 | 2,442 | 3.25 | 3 | [] | no_license | #!/bin/bash
#----------------------------------------------------------------------------
# OPEN KAROTZ SYSTEM
# ---------------------------------------------------------------------------
. /karotz/scripts/update_functions.sh
. /karotz/scripts/led_functions.sh
. /usr/scripts/yaffs_start_functions.sh
function led_internet_ok {
led_fixe $VIOLET
}
function led_check_update_ok {
led_fixe $GREEN
}
# ---------------------------------------------------------------------------
# KILLALL_KAROTZ_SYSTEM
# ---------------------------------------------------------------------------
# Something failed. Let's kill the remaining parts of the system, if any.
# return : nothing
# ---------------------------------------------------------------------------
function killall_karotz_system {
logger -s "[START] killall karotz system."
/bin/killall immortaldog > /dev/null
}
logger -s "[START] starting yaffs."
led_no_conf
if [ -f /etc/conf/playsounds ] ; then
madplay /usr/karotz/res/sounds/Karotz_lumiere_bleuCiel.mp3 &
logger -s "[START] playsounds TRUE"
else
logger -s "[START] playsounds FALSE"
fi
/usr/bin/python /usr/scripts/wait_until_connected.py
if [ $? -eq 0 ]; then
start_dbus
#dbus_led_internet
led_internet_ok
[ "$AUTO_UPDATE" = "yes" ] && check_updates
led_check_update_ok
/bin/killall led > /dev/null
# ----------------------------------------
# Open Karotz modification
# ----------------------------------------
#start_karotz_bricks
#/usr/karotz/bin/immortaldog /var/run/karotz/controller.pid /usr/karotz/bin/controller
# ----------------------------------------
killall madplay
if [ -f /etc/conf/playsounds ] ; then
madplay /usr/karotz/res/sounds/karotz_allume.mp3
logger -s "[START] playsounds TRUE"
else
logger -s "[START] playsounds FALSE"
fi
else
logger -s "[START] karotz not connected"
fi
# ----------------------------------------
# Open Karotz modification
# ----------------------------------------
logger -s "[START] Mount usb key (if Present)"
/bin/mount /dev/uba1 /mnt/usbkey
logger -s "[START] Adjusting time"
ntpd -q -p pool.ntp.org
logger -s "[START] Starting scheduler"
/sbin/crond -c /usr/spool/cron/crontabs
logger -s "[START] OpenKarotz Daemon"
/usr/www/cgi-bin/start_ok
logger -s "[START] Restarting Inetd"
killall inetd
# ----------------------------------------
| true |
40a7d88af6d3285481ca4f0417070d6120c7eb07 | Shell | benscarlson/IndGrinNiche | /wf_scripts/wf_main.sh | UTF-8 | 7,937 | 2.6875 | 3 | [] | no_license |
#---- Start script init ----#
#-- parameters
datName=huj_eobs
#-- paths
srcP=~/projects/ms1/src
reportP=$srcP/reports
scriptsP=$srcP/scripts
reportOutP=~/projects/ms1/analysis/$datName/reports
msfigP=~/projects/ms1/docs/ms/v14/figs
akde=ctmm/akde_contour/contours.shp
#-- parameters used in workflow
hvjob=5axes2000pts1 #name of the hypervolume result set
mod=mod4 #The name of the rsf model to use. See models.csv
#working directory should be the specific results folder
#mkdir -p ~/projects/whitestork/results/stpp_models/$datName && cd "$_"
cd ~/projects/whitestork/results/stpp_models/$datName
#---- End Script init ----#
#-- annotation
#TODO: need to test to make sure these paths still work
dat=data/obsbg.csv #used in call to annotation script
gcsimport=gs://mol-playground/benc/ingest_ee/$datName
gcsanno=gs://mol-playground/benc/annotated/$datName
asset=users/benscarlson/annotate/tracks/$datName
#----
#---- Prep workflow ----#
#----
#set up niches.csv file using /src/scripts/rsf/niches.r. TODO: move this to rsf workflow
#set up settings.yml file. use ~/projects/rsf/settings_template.yml
#----
#---- Segment migration ----#
#----
#fine-tune phen2 table based on reports
#TODO: uses early version of runcopy/.p system
Rscript $whitestorkP/reports/report_finetune_mig/report_finetune_mig_mult.r \
~/projects/whitestork/results/seg_mig/$datName
Rscript $scriptsP/report_runcopy2.r \
$whitestorkP/reports/report_finetune_mig/time_queries.rnw \
~/projects/whitestork/results/seg_mig/$datName/time_queries.pdf
#----
#---- Set up data ----#
#----
#TODO: check to make sure this can't overwrite an existing database
$scriptsP/database/create_db.sh
#extract telemetry data from shay's dataset and save to working folder
$scriptsP/extract_inds.r
$scriptsP/outliers.r
$scriptsP/obs.r
$scriptsP/trim_dates.r data/obs.csv data/obs_trim.csv
#--- set up hpc for analyses
ssh grace "mkdir -p ~/results/$datName" #create directory on hpc
ssh grace "mkdir -p ~/results/$datName/data" #create directory on hpc
scp niches.csv grace:~/results/$datName
scp data/obs_trim.csv grace:~/results/$datName/data
#---- variogram analysis ----#
#-- run on grace
ssh grace "cd ~/results/$datName && sbatch ~/projects/rsf/config/variograms_mpi.sh" #run this on hpc, from appropriate results directory
#-- get results
scp -r grace:~/results/$datName/ctmm .
#---- akde ----#
#-- run on grace
scp niches.csv grace:~/results/$datName
ssh grace "cd ~/results/$datName && sbatch ~/projects/rsf/config/akde_mpi.sh" #run this on hpc, from appropriate results directory
scp -r grace:~/results/$datName/ctmm/akde ctmm/.
#-- save akde contours as shapefiles
$scriptsP/akde2shape.r ctmm/akde ctmm/akde_contour/contours.shp
$scriptsP/background_steps.r data/obs_trim.csv data/obsbg.csv
#-----------------------------#
#---- Download GEE Layers ----#
#-----------------------------#
geeDatName=${datName}
#Run GEE script: layers/export_image.js
mkdir layers
gsutil -m cp -r gs://benc/layer_export/$geeDatName/* layers
#Masked pixels are set to a particular integer value (e.g. 255) in ee using unmask(255).
#But, 255 is not yet set as the nodata value on the tif
gdalinfo -mm layers/pct_tree_30m.tif #note max is 255
gdalinfo -mm layers/pct_tree_30m.tif #does not seem to have masked values
#So, need to define the nodata value
gdal_edit.py -a_nodata 255 layers/pct_tree_30m.tif
gdal_edit.py -a_nodata 255 layers/pct_bare_30m.tif
#------------------------------#
#---- GEE based annotation ----#
#------------------------------#
annoprep $dat
annoimport -d $dat -g $gcsimport -a $asset
#annotate in playground: annotate/annotate_by_static_or_ts.js
annoprocess -d $dat -g $gcsanno
annocleanup -d $dat -i $gcsimport -g $gcsanno
#---------------------------------------------------------------#
#---- do another annotation on an already-annotated dataset ----#
#---------------------------------------------------------------#
#annotate in playground: annotate/annotate_by_static_or_ts.js
annoprocess -d $dat -g $gcsanno
annocleanup -d $dat -i $gcsimport -g $gcsanno
#------------------------------#
#---- Local annotation ----#
#------------------------------#
#param1: relative path to datafile to be annotated.
Rscript $scriptsP/anno_local.r data/obsbg_amt_anno.csv
cat data/obsbg_anno.csv | wc -l
cat data/sav/obsbg_anno.csv | wc -l
rm data/sav/obsbg_anno.csv #clean up - remove backup file
#------------------#
#---- Modeling ----#
#------------------#
#see model_amt.r
#------------------------#
#---- niche analysis ----#
#------------------------#
#prep hpc
ssh grace "mkdir -p ~/results/$datName" #create directory on hpc
ssh grace "mkdir -p ~/results/$datName/data" #create directory on hpc
scp niches.csv grace:~/results/$datName
scp data/obsbg_anno.csv grace:~/results/$datName/data
#----
#---- create individual hypervolumes on the hpc
#----
#Make sure to open scripts for parameter settings
scp grace:~/projects/rsf/config/* ~/projects/ms1/src/hpc
sbatch ~/projects/rsf/config/hvs_indiv_mpi.sh #run this on hpc, from appropriate results directory
scp -r grace:~/results/$datName/hvs/$hvjob hvs/$hvjob #get results from hpc
#----
#---- create population hypervolumes
#----
Rscript $scriptsP/hv/hvs_niche_set.r hvs/$hvjob -s hvs/$hvjob/hvs_niche_set_df3 -d 3 -p mc -c 6
#----
#---- Niche metrics
#----
Rscript $srcP/scripts/hv_pairwise.r $hvjob -i 1 -u 3 -p mc -c 6
$srcP/database/load_pairwise.r $hvjob
#Calculate nestedness metric and store in database
$srcP/scripts/nestedness.r $hvjob
#----
#---- nmds analysis of niche sets
#----
#run on hpc
scp niche_sets.csv grace:~/results/$datName
ssh grace "cd ~/results/$datName && sbatch ~/projects/rsf/config/nmds_hv_mpi.sh" #run this on hpc, from appropriate results directory
#get results
scp -r grace:~/results/$datName/hvs/5axes2000pts1/nmds hvs/5axes2000pts1
scp -r grace:~/results/$datName/drom13 hvs/5axes2000pts1/nmds #get dromling 2013 which I had to run seperately
scp -r grace:~/results/$datName/hvs/5axes2000pts1/nmds1000 hvs/5axes2000pts1
#----
#---- Repeatability
#----
$srcP/workflow/repeatability.r $hvjob $mod
#----
#---- Figures
#----
$srcP/figs/indiv_pal.r figs/ms/indiv_pal.csv #Create individual-level palette
#--------------------------------------#
#---- Final figures for manuscript ----#
#--------------------------------------#
year=2015
#---- Figure 2. SSF Plots
$srcP/figs/rsf.r loburg $year $mod $msfigP/2_rsf_loburg_${year}.png
$srcP/figs/rsf.r loburg $year $mod $msfigP/2_rsf_loburg_${year}.pdf
#---- Figure 3. AKDE, NMDS & Metrics
$srcP/figs/niches.r $hvjob $akde $year 11,10,10 $msfigP/3_niches_${year}.png
#---- Figure 4. Metrics over time
$srcP/figs/metrics_time.r $hvjob $msfigP/4_metrics_time.png
#--------------------------------------#
#---- Final figures for supplement ----#
#--------------------------------------#
msfigP=~/projects/ms1/docs/ms/submission_natcom/submission_2/v24
#---- Figure S1. SSF Plots
outP=$msfigP/supp_figs/S1_sub
mkdir -p $outP
pops=("beuster" "dromling" "loburg")
years=("2013" "2014" "2015" "2016")
for pop in "${pops[@]}"
do
for year in "${years[@]}"
do
out=$outP/S1_rsf_${pop}_${year}.pdf
$srcP/figs/rsf.r $pop $year $mod $out --mode=supp
done
done
/Users/benc/projects/ms1/docs/ms/submission_natcom/submission_2/v24/supp_figs/S1_sub
/Users/benc/projects/ms1/docs/ms/submission_natcom/submission_2/v24/supp_figs/S1_sub/S1_rsf_*.pdf
ls /Users/benc/projects/ms1/docs/ms/submission_natcom/submission_2/v24/supp_figs/S1_sub
files=`ls $outP/S1_rsf_*.pdf`
cpdf $files -o $msfigP/supp_figs/Fig_S1.pdf
#---- Figure S2. AKDE, NMDS & Metrics
outP=$msfigP/supp/s2_niches
mkdir -p $outP
years=("2013" "2014" "2016") #
for year in "${years[@]}"
do
out=${outP}/S2_${year}.pdf
$srcP/figs/niches.r $hvjob $akde $year 11,10,10 $out --mode=supp
done
#Combine all years into single report
files=`ls $outP/S2_*.pdf`
cpdf $files -o $msfigP/S2_niches.pdf
| true |
b86f0375417ce99a3f4357be56c869b223ae7e40 | Shell | mherman09/Hdef | /test/test_triutil.sh | UTF-8 | 9,322 | 3.03125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#####
# SET PATH TO TEST_VALUES SCRIPT
#####
TEST_BIN_DIR=$(echo $0 | xargs dirname)
#####
# SET PATH TO HDEF EXECUTABLE
#####
# Check for o92util
$TEST_BIN_DIR/test_find_hdef_exec.sh triutil || { echo "$0: could not find triutil; exiting" 1>&2; exit 1; }
BIN_DIR=$(cat hdefexec.tmp | xargs dirname)
#####
# RUN TEST
#####
trap "rm -f *.tmp" 0 1 2 3 8 9
echo "----------------------------------------------------------"
echo "Test #1: Displacement, single source, single station"
echo "----------"
echo "-123 40.2 0" > sta.tmp
echo "-122.93930377 40.34037337 10.6704 -122.88118251 40.34818434 19.3296 -122.86074437 40.25961846 19.3296 -6.73205e-06 10 0" > tri.tmp
$BIN_DIR/triutil -flt tri.tmp -sta sta.tmp -disp disp.tmp || exit 1
echo "-123.00000000000000 40.200000000000003 0.0000000000000000 9.6569791024380125E-003 2.0009010635221869E-004 -6.2046465664675754E-003" > answer.tmp
$TEST_BIN_DIR/test_values.sh disp.tmp answer.tmp 6 "triutil: disp" || exit 1
echo "----------------------------------------------------------"
echo "Test #2: Displacement, 2 sources, 2 stations"
echo "----------"
cat > sta.tmp << EOF
-123 40.2 0.0
-121 40.6 1.0
EOF
cat > tri.tmp << EOF
-122.93930377 40.34037337 10.6704 -122.88118251 40.34818434 19.3296 -122.86074437 40.25961846 19.3296 -6.73205e-06 10 0
-122.93930377 40.34037337 10.6704 -122.86074437 40.25961846 19.3296 -122.91878948 40.25180749 10.6704 -6.73205e-06 10 0
EOF
$BIN_DIR/triutil -flt tri.tmp -sta sta.tmp -disp disp.tmp || exit 1
cat > answer.tmp << EOF
-123.00000000000000 40.200000000000003 0.0000000000000000 2.9966961388728952E-002 6.2629065689802088E-003 -2.2918691003340319E-002
-121.00000000000000 40.600000000000001 1.0000000000000000 -5.4671675253995140E-003 -1.1078757892770959E-003 5.6537610980703784E-004
EOF
$TEST_BIN_DIR/test_values.sh disp.tmp answer.tmp 6 "triutil: disp, two outputs" || exit 1
echo "----------------------------------------------------------"
echo "Test #3: Strain tensor"
echo "----------"
cat > sta.tmp << EOF
-123 40.2 0.0
-121 40.6 1.0
EOF
cat > tri.tmp << EOF
-122.93930377 40.34037337 10.6704 -122.88118251 40.34818434 19.3296 -122.86074437 40.25961846 19.3296 -1 10 0
-122.93930377 40.34037337 10.6704 -122.86074437 40.25961846 19.3296 -122.91878948 40.25180749 10.6704 -2 10 0
EOF
#triutil -flt tri.tmp -sta sta.tmp -strain strain.tmp
$BIN_DIR/triutil -flt tri.tmp -sta sta.tmp -strain strain.tmp || exit 1
cat > answer.tmp << EOF
-123.00000000000000 40.200000000000003 0.0000000000000000 -2.0278065772745314E-005 -2.4945319983837185E-006 7.5908659237095965E-006 -1.2602271373136892E-005 -3.0443371261343393E-020 -5.9655329947237400E-021
-121.00000000000000 40.600000000000001 1.0000000000000000 6.0244145766807270E-008 -1.8766841507615213E-008 -1.3823728862643908E-008 1.5015079437210632E-008 -5.9673505651432135E-010 -9.8567652500763217E-011
EOF
$TEST_BIN_DIR/test_values.sh strain.tmp answer.tmp 9 "triutil: strain" || exit 1
echo "----------------------------------------------------------"
echo "Test #4: Stress tensor"
echo "----------"
cat > sta.tmp << EOF
-123 40.2 0.0
-121 40.6 1.0
EOF
cat > tri.tmp << EOF
-122.93930377 40.34037337 10.6704 -122.88118251 40.34818434 19.3296 -122.86074437 40.25961846 19.3296 -1 10 0
-122.93930377 40.34037337 10.6704 -122.86074437 40.25961846 19.3296 -122.91878948 40.25180749 10.6704 -2 10 0
EOF
#triutil -flt tri.tmp -sta sta.tmp -stress stress.tmp
$BIN_DIR/triutil -flt tri.tmp -sta sta.tmp -stress stress.tmp || exit 1
cat > answer.tmp << EOF
-123.00000000000000 40.200000000000003 0.0000000000000000 -2229514.5357164023 -806831.83376747486 -9.6624717116355896E-009 -1008181.7098509513 -2.4354697009074713E-009 -4.7724263957789922E-010
-121.00000000000000 40.600000000000001 1.0000000000000000 5925.6746772065080 -395.20430474729096 0.24470685041342222 1201.2063549768504 -47.738804521145710 -7.8854122000610571
EOF
$TEST_BIN_DIR/test_values.sh stress.tmp answer.tmp 9 "triutil: stress" || exit 1
echo "----------------------------------------------------------"
echo "Test #5: Maximum shear stress"
echo "----------"
cat > sta.tmp << EOF
-123 40.2 0.0
-121 40.6 1.0
EOF
cat > tri.tmp << EOF
-122.93930377 40.34037337 10.6704 -122.88118251 40.34818434 19.3296 -122.86074437 40.25961846 19.3296 -1 10 0
-122.93930377 40.34037337 10.6704 -122.86074437 40.25961846 19.3296 -122.91878948 40.25180749 10.6704 -2 10 0
EOF
#triutil -flt tri.tmp -sta sta.tmp -estress estress.tmp
$BIN_DIR/triutil -flt tri.tmp -sta sta.tmp -estress estress.tmp || exit 1
cat > answer.tmp << EOF
-123.00000000000000 40.200000000000003 0.0000000000000000 1513512.5208073591
-121.00000000000000 40.600000000000001 1.0000000000000000 3739.2521231165501
EOF
$TEST_BIN_DIR/test_values.sh estress.tmp answer.tmp 4 "triutil: max shear stress" || exit 1
echo "----------------------------------------------------------"
echo "Test #6: Normal tractions"
echo "----------"
cat > sta.tmp << EOF
-123 40.2 0.0
-121 40.6 1.0
EOF
cat > tri.tmp << EOF
-122.93930377 40.34037337 10.6704 -122.88118251 40.34818434 19.3296 -122.86074437 40.25961846 19.3296 -1 10 0
-122.93930377 40.34037337 10.6704 -122.86074437 40.25961846 19.3296 -122.91878948 40.25180749 10.6704 -2 10 0
EOF
echo 15 80 -6 0.5 > trg.tmp
$BIN_DIR/triutil -flt tri.tmp -sta sta.tmp -trg trg.tmp -normal normal.tmp || exit 1
cat > answer.tmp << EOF
-123.00000000000000 40.200000000000003 0.0000000000000000 -1580967.8173957772
-121.00000000000000 40.600000000000001 1.0000000000000000 4738.7840988880153
EOF
$TEST_BIN_DIR/test_values.sh normal.tmp answer.tmp 4 "triutil: normal" || exit 1
echo "----------------------------------------------------------"
echo "Test #7: Shear tractions"
echo "----------"
cat > sta.tmp << EOF
-123 40.2 0.0
-121 40.6 1.0
EOF
cat > tri.tmp << EOF
-122.93930377 40.34037337 10.6704 -122.88118251 40.34818434 19.3296 -122.86074437 40.25961846 19.3296 -1 10 0
-122.93930377 40.34037337 10.6704 -122.86074437 40.25961846 19.3296 -122.91878948 40.25180749 10.6704 -2 10 0
EOF
$BIN_DIR/triutil -flt tri.tmp -sta sta.tmp -trg trg.tmp -shear shear.tmp || exit 1
cat > answer.tmp << EOF
-123.00000000000000 40.200000000000003 0.0000000000000000 -1232623.6797110836 1241807.6939554971
-121.00000000000000 40.600000000000001 1.0000000000000000 2655.0409356889468 2723.1861172609219
EOF
$TEST_BIN_DIR/test_values.sh shear.tmp answer.tmp 5 "triutil: shear stress" || exit 1
echo "----------------------------------------------------------"
echo "Test #8: Coulomb stresses"
echo "----------"
cat > sta.tmp << EOF
-123 40.2 0.0
-121 40.6 1.0
EOF
cat > tri.tmp << EOF
-122.93930377 40.34037337 10.6704 -122.88118251 40.34818434 19.3296 -122.86074437 40.25961846 19.3296 -1 10 0
-122.93930377 40.34037337 10.6704 -122.86074437 40.25961846 19.3296 -122.91878948 40.25180749 10.6704 -2 10 0
EOF
$BIN_DIR/triutil -flt tri.tmp -sta sta.tmp -trg trg.tmp -coul coul.tmp || exit 1
cat > answer.tmp << EOF
-123.00000000000000 40.200000000000003 0.0000000000000000 -2023107.5884089721
-121.00000000000000 40.600000000000001 1.0000000000000000 5024.4329851329549
EOF
$TEST_BIN_DIR/test_values.sh coul.tmp answer.tmp 4 "triutil: coulomb" || exit 1
echo "----------------------------------------------------------"
echo "Test #9: Coulomb stresses, changing half-space parameters"
echo "----------"
echo "shear 60e9 lame 40e9" > haf.tmp
$BIN_DIR/triutil -flt tri.tmp -sta sta.tmp -trg trg.tmp -haf haf.tmp -coul coul.tmp || exit 1
cat > answer.tmp << EOF
-123.00000000000000 40.200000000000003 0.0000000000000000 -2910810.4521128507
-121.00000000000000 40.600000000000001 1.0000000000000000 7004.0166289097569
EOF
$TEST_BIN_DIR/test_values.sh coul.tmp answer.tmp 4 "triutil: coulomb, half-space parameters" || exit 1
echo "----------------------------------------------------------"
echo "Test #10: Coulomb stresses, target fault for each station"
echo "----------"
cat > trg.tmp << EOF
15 80 -6 0.5
-5 55 -93 0.4
EOF
$BIN_DIR/triutil -flt tri.tmp -sta sta.tmp -trg trg.tmp -coul coul.tmp -prog || exit 1
cat > answer.tmp << EOF
-123.00000000000000 40.200000000000003 0.0000000000000000 -2023107.5884089721
-121.00000000000000 40.600000000000001 1.0000000000000000 4460.4409590834784
EOF
$TEST_BIN_DIR/test_values.sh coul.tmp answer.tmp 4 "triutil: coulomb" || exit 1
echo "----------------------------------------------------------"
echo "Test #11: Cartesian coordinates"
echo "----------"
echo "0.5 0.6 -3" > sta.tmp
echo "0 0 6 1 0 5 1 1 7 -1 2 0" > tri.tmp
$BIN_DIR/triutil -flt tri.tmp -sta sta.tmp -disp disp.tmp -xy || exit 1
echo 0.50000000000000000 0.59999999999999998 -3.0000000000000000 -3.0663748564409593E-002 -6.0112932472348501E-002 -0.15985669393942395 > answer.tmp
$TEST_BIN_DIR/test_values.sh disp.tmp answer.tmp 6 "triutil: disp, cartesian coordinates" || exit 1
| true |
8b48c88c0677eb8c711ff7e3d120b867d4aa339d | Shell | frey-norden/unix-workbench | /dog_args.sh | UTF-8 | 282 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env zsh
# File: dog_args.sh
echo "Script args qty -> $@"
echo "How many of them args u done added to tha cmd line: $@"
echo "fers arg: $1; Sekun arg: $2; Dirty 3rd arg: $3"
echo "Number of args: $#"
v1=$#
echo "Now here is some fancy math fo ya"
echo "$v1 * $1" | bc -l
| true |
e8bb43eda4d9657c8f1778bbcd271e6089b5502f | Shell | ozmeka/auto-ozmeka | /vagrant/provisioning/roles/solr/templates/solr-undertow-init.sh.j2 | UTF-8 | 2,448 | 3.484375 | 3 | [] | no_license | #!/bin/sh
### BEGIN INIT INFO
# Provides: solr-undertow
# Required-Start: $local_fs $remote_fs $network $syslog
# Required-Stop: $local_fs $remote_fs $network $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start/Stop solr-undertow AS v7.0.0
### END INIT INFO
# Modified from https://gist.github.com/magicdude4eva/3b5fec150fbcaafdc34c
# . ${HOME}/config/undertow.environment
PID=`ps aux | grep -v grep | grep org.bremeld.solr.undertow.UndertowPackage | awk '{print $2}'`
case "$1" in
start)
if [[ "" == "$PID" ]]
then
echo "**** Staring Solr server"
# $APP_HOME/undertow/bin/solr-undertow $APP_HOME/undertow/conf/solr.conf > /dev/null 2>&1 &
/usr/local/solr-undertow-{{ undertow_version }}-with-solr-{{ solr_version }}/bin/solr-undertow /usr/local/solr-undertow-{{ undertow_version }}-with-solr-{{ solr_version }}/omeka/omeka.conf > /dev/null 2>&1 &
echo "**** Solr server started - check log files for startup"
else
echo "**** Solr Server is running with pid=$PID"
fi
;;
stop)
if [[ "" != "$PID" ]]
then
echo "Stopping Solr Server with pid=$PID"
SHUTDOWN_RESPONSE=$(curl --write-out %{http_code} --silent --output /dev/null 'http://localhost:8081?password={{ solr_shutdown_password }}')
if [ $SHUTDOWN_RESPONSE -eq 200 ]
then
echo "Graceful Solr server shutdown successful"
exit
elif [ $SHUTDOWN_RESPONSE -eq 401 ]
then
echo "Graceful Solr server shutdown unauthorised"
exit
else
echo "Graceful Solr server shutdown failed with status=$SHUTDOWN_RESPONSE, attempting kill"
fi
# Number of seconds to wait before using "kill -9"
WAIT_SECONDS=10
# Counter to keep count of how many seconds have passed
count=0
while kill $PID > /dev/null
do
# Wait for one second
sleep 1
# Increment the second counter
((count++))
# Has the process been killed? If so, exit the loop.
if ! ps -p $PID > /dev/null ; then
break
fi
# Have we exceeded $WAIT_SECONDS? If so, kill the process with "kill -9"
# and exit the loop
if [ $count -gt $WAIT_SECONDS ]; then
kill -9 $PID
break
fi
done
echo "Process has been killed after $count seconds."
else
echo "Solr Server is not running"
fi
;;
*)
echo "Usage: $0 start|stop" >&2
exit 3
;;
esac | true |
2ce4df415877e5797463106fc9ea9123721440b4 | Shell | zarlo/gameservers | /scripts/2-build.sh | UTF-8 | 6,523 | 3.859375 | 4 | [
"GPL-3.0-only",
"MIT"
] | permissive | #!/usr/bin/env bash
# by invaderctf
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
# Helper functions
source ${SCRIPT_DIR}/helpers.sh
# Variable initialisation
WORKING_DIR="tf/addons/sourcemod"
SPCOMP_PATH="scripting/spcomp64"
SCRIPTS_DIR="scripting"
COMPILED_DIR="plugins"
# Exclusion lists, use /dir/ for directories and /file_ for file_*.sp
EXCLUDE_COMPILE="/stac/ /include/ /disabled/ /external/ /economy/ /discord/ /tf2-comp-fixes/"
EXCLUDE_COMPILE="grep -v -e ${EXCLUDE_COMPILE// / -e }"
EXCLUDE_CLEANUP="/external/ /disabled/"
EXCLUDE_CLEANUP="grep -v -e ${EXCLUDE_CLEANUP// / -e }"
# Temporary files
UNCOMPILED_LIST=$(mktemp)
UPDATED_LIST=$(mktemp)
# TODO: I am pretty sure this needs to be single quoted with double quotes around the vars
trap "rm -f ${UNCOMPILED_LIST} ${UPDATED_LIST}; popd >/dev/null" EXIT
usage()
{
echo "This script looks for all uncompiled .sp files"
echo "and if a reference is given, those that were updated"
echo "Then it compiles everything"
echo "Usage: ./2-build.sh <reference>"
exit 1
}
# Just checking the git refernece is valid
reference_validation()
{
GIT_REF="${1}"
if git rev-parse --verify --quiet "${GIT_REF}" > /dev/null; then
debug "Comparing against ${GIT_REF}"
else
error "Reference ${GIT_REF} does not exist"
exit 2
fi
}
# Find all changed *.sp files inside ${WORKING_DIR}
# Write the full list to a file
# Remove all the *.smx counterparts that exist
list_updated()
{
UPDATED=$(git diff --name-only "${GIT_REF}" HEAD . | grep "\.sp$" | ${EXCLUDE_COMPILE})
# skip compile if there's nothing *to* compile
if [[ -z $UPDATED ]]; then
ok "No updated files in diff"
return 1
fi
debug "Generating list of updated plugins"
while IFS= read -r line; do
# git diff reports the full path, we need it relative to ${WORKING_DIR}
echo "${line/${WORKING_DIR}\//}" >> "${UPDATED_LIST}"
rm -f "${COMPILED_DIR}/$(basename "${line/.sp/.smx}")"
done <<< "${UPDATED}"
return 0
}
# Find all *.sp files inside ${WORKING_DIR}
# Select those that do not have a *.smx counterpart
# And write resulting list to a file
list_uncompiled()
{
# this may need to be quoted
UNCOMPILED=$(find "${SCRIPTS_DIR}" -iname "*.sp" | ${EXCLUDE_COMPILE})
debug "Generating list of uncompiled plugins"
# while loop, read from our uncompiled list we just got
while IFS= read -r line; do
# if file doesnt exist at compiled dir
if [[ ! -f "${COMPILED_DIR}/$(basename "${line/.sp/.smx}")" ]]; then
# then tack it on to the end of the temp file we made
echo "${line}" >> "${UNCOMPILED_LIST}"
fi;
done <<< "${UNCOMPILED}"
# skip compile if there's nothing *to* compile
if [[ $(wc -l < "$UNCOMPILED_LIST") == 0 ]]; then
ok "No uncompiled .sp files"
return 1
fi
return 0
}
# Iterate over a list files and compile all the *.sp files
# Output will be ${COMPILED_DIR}/plugin_name.smx
# If an error is found the function dies and report the failing file
# Iterate over a list files and compile all the *.sp files
# Output will be ${COMPILED_DIR}/plugin_name.smx
# If an error is found the function prints the warnings to stdout and kills the
# job after it compiled every plugin
compile()
{
failed=0
info "Compiling $(wc -l < "${1}") files"
while read -r plugin; do
info "Compiling ${plugin}"
# compiler path plugin name output dir output file replacing sp with smx
./${SPCOMP_PATH} "${plugin}" -o "${COMPILED_DIR}/$(basename "${plugin/.sp/.smx}")" \
-v=2 -z=9 -O=2 -\;=+ -E
# verbose, max compressed, max optimized, require semicolons, treat errors as warnings
# if something has gone wrong then stop everything and yell about it
if [[ $? -ne 0 ]]; then
error "spcomp error while compiling ${plugin}"
failed=1
fi
done < "${1}"
if [[ failed -ne 0 ]]; then
exit 1
fi
return 0
}
# Auxiliary function to catch errors on spcomp64
compile_error()
{
error "spcomp64 error while compiling ${1}"
exit 255
}
# Find all *.smx files inside ${COMPILED_DIR}
# Select those that do not have a *.sp counterpart
# And remove them
cleanup_plugins()
{
debug "Generating list of compiled plugins"
COMPILED=$(find "${COMPILED_DIR}" -iname "*.smx" | ${EXCLUDE_CLEANUP})
# while loop, read from our compiled list we just got
while IFS= read -r line; do
debug "Looking for $(basename "${line/.smx/.sp}")"
# Look for a *.sp counterpart
SP_FILE=$(find "${SCRIPTS_DIR}" -iname "$(basename "${line/.smx/.sp}")")
SP_FILE_COUNT=$(wc -l <<< ${SP_FILE})
if [[ -z ${SP_FILE} ]]; then
# If no *.sp countrerpart is found, then delete the *.smx file
important "Deleting orphan ${line} file"
rm -fv ${line}
elif [ ${SP_FILE_COUNT} -eq 1 ]; then
# If only one *.sp counterpart was found then all is good
debug "${line} -> ${SP_FILE}"
# If the ${SP_FILE} lives on the exclusion list, then delete the compiled plugin
if [[ ${SP_FILE} == */disabled/* ]] || [[ ${SP_FILE} == */external/* ]]; then
important "Plugin is disabled or external, deleting the compiled file"
rm -fv ${line}
fi
else
# If more than one *.sp counterpart was found, then print a warning (for cleanup)
warn "${line} -> ${SP_FILE//$'\n'/ - }"
fi
done <<< "${COMPILED}"
return 0
}
###
# Script begins here ↓
pushd ${WORKING_DIR} >/dev/null || exit
[[ ! -x ${SPCOMP_PATH} ]] && chmod u+x ${SPCOMP_PATH}
# Compile all scripts that have been updated
if [[ -n ${1} ]]; then
reference_validation "${1}"
debug "Looking for all .sp files that have been updated"
list_updated
# only compile if we found something to compile
if [[ $? -eq 0 ]]; then
debug "Compiling updated plugins"
compile "${UPDATED_LIST}"
fi
fi
# Compile all scripts that have not been compiled
debug "Looking for all .sp files in ${WORKING_DIR}/${SCRIPTS_DIR}"
list_uncompiled
# only compile if we found something to compile
if [[ $? -eq 0 ]]; then
debug "Compiling uncompiled plugins"
compile "${UNCOMPILED_LIST}"
fi
ok "All plugins compiled successfully !"
cleanup_plugins
ok "Obsolete plugins deleted !"
exit 0
| true |
d20a9a75fffb9c7edc0dbf072004e5bd348682a7 | Shell | Luzgar/Steganography | /revelation/build.sh | UTF-8 | 260 | 2.734375 | 3 | [] | no_license | #!/bin/bash
#On supprime, si il existe déjà, le .exe précédent
rm *.exe
#On se place dans le dossier src/
cd src
#On lance le MakeFile
make
#On déplace l'executable et renomme l'exe
mv reveal.exe ..
mv Reveal.exe ..
cd ..
mv Reveal.exe reveal.exe | true |
0d0cabf0bb430925395b9d2675722eff0ef58509 | Shell | draptik/arch-package-builds | /mobsh/PKGBUILD | UTF-8 | 1,177 | 2.59375 | 3 | [] | no_license | # Maintainer: Patrick Drechsler <socialcoding at pdrechsler dot de>
pkgname=mobsh
pkgver=1.3.0
pkgrel=1
pkgdesc="Smooth git handover during remote mob programming"
arch=('x86_64')
url="https://github.com/remotemobprogramming/mob"
license=('MIT')
depends=("git")
optdepends=('espeak-ng-espeak: Multi-lingual software speech synthesizer'
'mbrola-voices-us1: An American English female voice for the MBROLA synthesizer')
makedepends=('go')
provides=('mobsh')
conflicts=('mobsh' 'mob')
source=("$url/archive/v${pkgver}.tar.gz")
sha256sums=('4dc140a2697a83bbf9ff3209827685e1836e2438d75df62980d915c010ac21ad')
prepare(){
cd "mob-$pkgver"
mkdir -p build/
}
build() {
cd "mob-$pkgver"
export CGO_CPPFLAGS="${CPPFLAGS}"
export CGO_CFLAGS="${CFLAGS}"
export CGO_CXXFLAGS="${CXXFLAGS}"
export CGO_LDFLAGS="${LDFLAGS}"
export GOFLAGS="-buildmode=pie -trimpath -ldflags=-linkmode=external -mod=readonly -modcacherw"
go build -o build mob.go
}
# check() {
# cd "mob-$pkgver"
# ./create-testbed
# go test
# }
package() {
cd "mob-$pkgver"
install -Dm644 LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
install -Dm755 build/mob "$pkgdir"/usr/bin/mob
}
| true |
c5bdb5e53283765a24cf0b08666a148685c0b51a | Shell | malev/raspberrypi-dotfiles | /raspberry-sunzi/sunzi-recipes/debian/dotdeb.sh | UTF-8 | 302 | 3.015625 | 3 | [] | no_license | # Dotdeb
if grep -Fq "dotdeb" /etc/apt/sources.list; then
echo 'dotdeb entry already exists'
else
echo 'Adding dotdeb to the apt source list'
echo 'deb http://packages.dotdeb.org/ squeeze all' >> /etc/apt/sources.list
wget http://www.dotdeb.org/dotdeb.gpg -O - 2> /dev/null | apt-key add -
fi
| true |
fd9add10ab2e52550544f95d09eae465c8d1d879 | Shell | xfrocks/docker-xenforo | /tests/run.sh | UTF-8 | 2,425 | 3.625 | 4 | [] | no_license | #!/bin/sh
set -e
DIR=$( pwd )
NETWORK="docker-xenforo-test"
{
docker network create "$NETWORK" || true
} &> /dev/null
CONTAINER_ID_MYSQL="$( docker ps -qf "name=$NETWORK-mysql" )"
if [ -z "$CONTAINER_ID_MYSQL" ]; then
MYSQL_ATTEMPT="$(
docker run --network "$NETWORK" \
--name "$NETWORK-mysql" \
-e MYSQL_RANDOM_ROOT_PASSWORD=1 \
-e MYSQL_USER=user \
-e MYSQL_PASSWORD=password \
-e MYSQL_DATABASE=db \
-d mysql:5.7 2>&1 \
|| docker restart "$NETWORK-mysql" \
)"
echo 'Waiting for mysql...'
sleep 5
CONTAINER_ID_MYSQL="$( docker ps -qf name="$NETWORK-mysql" )"
if [ -z "$CONTAINER_ID_MYSQL" ]; then
echo "$MYSQL_ATTEMPT"
echo 'Cannot start mysql container'
exit 1
fi
fi
CONTAINER_HOSTNAME_MYSQL="$( docker inspect --format '{{.Config.Hostname}}' "$CONTAINER_ID_MYSQL" )"
CONTAINER_ID_REDIS="$( docker ps -qf "name=$NETWORK-redis" )"
if [ -z "$CONTAINER_ID_REDIS" ]; then
REDIS_ATTEMPT="$(
docker run --network "$NETWORK" \
--name "$NETWORK-redis" \
-d redis 2>&1 \
|| docker restart "$NETWORK-redis"
)"
CONTAINER_ID_REDIS="$( docker ps -qf name="$NETWORK-redis" )"
if [ -z "$CONTAINER_ID_REDIS" ]; then
echo "$REDIS_ATTEMPT"
echo 'Cannot start redis container'
exit 1
fi
fi
CONTAINER_HOSTNAME_REDIS="$( docker inspect --format '{{.Config.Hostname}}' "$CONTAINER_ID_REDIS" )"
cd "$DIR"
{ \
echo "FROM $IMAGE_TAG_FOR_TESTING"; \
echo "COPY . /tests"; \
} > Dockerfile
IMAGE_TAG_TMP="$IMAGE_TAG_FOR_TESTING-tmp"
docker build -t "$IMAGE_TAG_TMP" .
CONTAINER_ID_TARGET="$( docker run \
--network "$NETWORK" \
-e IMAGE_TAG_FOR_TESTING="$IMAGE_TAG_FOR_TESTING" \
-e MYSQL="$CONTAINER_HOSTNAME_MYSQL" \
-e REDIS="$CONTAINER_HOSTNAME_REDIS" \
-d "$IMAGE_TAG_TMP"
)"
docker exec "$CONTAINER_ID_TARGET" ls -al
for d in */ ; do
if [ -f "$DIR/$d/test.php" ]; then
echo "Testing $d..."
docker exec "$CONTAINER_ID_TARGET" php -c /tests/php.ini "/tests/${d}test.php"
fi
done
echo 'Cleaning up...'
{
docker stop "$CONTAINER_ID_MYSQL" "$CONTAINER_ID_REDIS" "$CONTAINER_ID_TARGET"
docker rm -f "$CONTAINER_ID_MYSQL" "$CONTAINER_ID_REDIS" "$CONTAINER_ID_TARGET"
docker network rm "$NETWORK" || true
} &> /dev/null
echo 'All done!'
| true |
297720bbf92d8701ec46b320c7f9ebb4ebd17432 | Shell | mbc1990/please-test-me | /bootstrap_tcl.sh | UTF-8 | 642 | 3.015625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# This is kind of hacky but you can thank python dependency management for that.
# Basically, this is the script invoked by the text editor to run tests for the
# current line. It activates the please test me virtualenv, and invokes the
# test_current_line.py script which reads from the map data to find the tests
# that cover this line.
# THEN, please_test_me.py uses subprocess.popen to invokes run_tcl.sh, which
# activates the virtualenv of the *project* directory and then runs the relevant
# tests in the project dir, with the project virtualenv
cd ~/.please-test-me
. venv/bin/activate
python test_current_line.py $1 $2
| true |
71ee2b78675629d8387c950d5efa354dbbf99950 | Shell | Heckfer/dotfiles | /install.sh | UTF-8 | 1,870 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env bash
set -o errexit
set -o pipefail
set -o nounset
#set -o xtrace
echo "== Install =="
echo "- Docker - https://download.docker.com/mac/stable/Docker.dmg"
echo "- 1Password - https://c.1password.com/dist/1P/mac4"
echo "- Datagrip - https://download-cf.jetbrains.com/datagrip"
echo "- Charles - https://www.charlesproxy.com"
echo "- Sublime - https://download.sublimetext.com"
echo "- VSCode"
echo "- Android Studio"
echo "- Chrome"
echo "- VLC"
echo "- oh-my-zsh"
echo "- ASDF"
echo "- Homebrew"
echo "== Configuration =="
echo "- Add sublime to path - ln -s /Applications/Sublime\ Text.app/Contents/SharedSupport/bin/subl /usr/local/bin/sublime"
echo "- Sublime plugins (MarkdownPreview, Pretty JSON)"
echo "- VSCode plugins (code spell checker, docker, editorconfig for vs code, elixirls, elm, env, gitlens, php-cs-fixer, prettier, sass )"
echo "oh-my-zsh..."
echo "[ -d ~/.oh-my-zsh ] || git clone https://github.com/robbyrussell/oh-my-zsh.git ~/.oh-my-zsh"
echo "[ -d ~/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting ] || git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ~/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting"
echo "[ -d ~/.oh-my-zsh/custom/plugins/zsh-autosuggestions ] || git clone git@github.com:zsh-users/zsh-autosuggestions.git ~/.oh-my-zsh/custom/plugins/zsh-autosuggestions"
echo "[ -d ~/.oh-my-zsh/themes/dracula.zsh-theme ] || git clone git@github.com:dracula/zsh.git ~/.oh-my-zsh/themes/dracula.zsh-theme"
echo "ASDF (elixir, elm, erlang, haskell, java, kotlin, nodejs, python, ruby)"
echo "Symlinking dotfiles..."
ln -sf $(pwd)/vim/.vimrc ~/.vimrc
ln -sf $(pwd)/git/.gitconfig ~/.gitconfig
ln -sf $(pwd)/git/.gitignore_global ~/.gitignore_global
ln -sf $(pwd)/zsh/.zshrc ~/.zshrc
ln -sf $(pwd)/zsh/.zshenv ~/.zshenv
mkdir ~/.heck
ln -sf $(pwd)/sh/.utility_functions ~/.heck/.utility_functions
chsh -s $(which zsh) | true |
3b02976b859a3fc049e8a571dcba7e2a92dcb7e1 | Shell | matzew/kn-box | /03-strimzi-auth.sh | UTF-8 | 4,324 | 3.375 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
# Turn colors in this script off by setting the NO_COLOR variable in your
# environment to any value:
#
# $ NO_COLOR=1 test.sh
NO_COLOR=${NO_COLOR:-""}
if [ -z "$NO_COLOR" ]; then
header=$'\e[1;33m'
reset=$'\e[0m'
else
header=''
reset=''
fi
strimzi_version=`curl https://github.com/strimzi/strimzi-kafka-operator/releases/latest | awk -F 'tag/' '{print $2}' | awk -F '"' '{print $1}' 2>/dev/null`
function header_text {
echo "$header$*$reset"
}
header_text "Using Strimzi Version: ${strimzi_version}"
header_text "Strimzi install"
kubectl create namespace kafka || true
kubectl -n kafka create --selector strimzi.io/crd-install=true -f https://github.com/strimzi/strimzi-kafka-operator/releases/download/${strimzi_version}/strimzi-cluster-operator-${strimzi_version}.yaml
curl -L "https://github.com/strimzi/strimzi-kafka-operator/releases/download/${strimzi_version}/strimzi-cluster-operator-${strimzi_version}.yaml" \
| sed 's/namespace: .*/namespace: kafka/' \
| kubectl -n kafka apply --selector strimzi.io/crd-install!=true -f -
# Wait for the CRD we need to actually be active
kubectl wait crd --timeout=-1s kafkas.kafka.strimzi.io --for=condition=Established
header_text "Applying Strimzi Cluster file"
cat <<-EOF | kubectl -n kafka apply -f -
apiVersion: kafka.strimzi.io/v1beta2
kind: Kafka
metadata:
name: my-cluster
spec:
kafka:
version: 2.7.0
replicas: 3
listeners:
- name: plain
port: 9092
type: internal
tls: false
- name: tls
port: 9093
type: internal
tls: true
authentication:
type: tls
- name: sasl
port: 9094
type: internal
tls: true
authentication:
type: scram-sha-512
config:
offsets.topic.replication.factor: 3
transaction.state.log.replication.factor: 3
transaction.state.log.min.isr: 2
log.message.format.version: "2.7"
auto.create.topics.enable: "false"
storage:
type: jbod
volumes:
- id: 0
type: persistent-claim
size: 100Gi
deleteClaim: false
zookeeper:
replicas: 3
storage:
type: persistent-claim
size: 100Gi
deleteClaim: false
entityOperator:
topicOperator: {}
userOperator: {}
EOF
header_text "Waiting for Strimzi to become ready"
kubectl wait kafka --all --timeout=-1s --for=condition=Ready -n kafka
header_text "Applying Strimzi TLS Admin User"
cat <<-EOF | kubectl -n kafka apply -f -
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaUser
metadata:
name: my-tls-user
labels:
strimzi.io/cluster: my-cluster
spec:
authentication:
type: tls
EOF
header_text "Applying Strimzi SASL Admin User"
cat <<-EOF | kubectl -n kafka apply -f -
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaUser
metadata:
name: my-sasl-user
labels:
strimzi.io/cluster: my-cluster
spec:
authentication:
type: scram-sha-512
EOF
header_text "Waiting for Strimzi Users to become ready"
oc wait kafkauser --all --timeout=-1s --for=condition=Ready -n kafka
header_text "Deleting existing KafkaUser secrets"
kubectl delete secret --namespace default my-tls-secret || true
kubectl delete secret --namespace default my-sasl-secret || true
header_text "Creating a Secret, containing TLS from Strimzi"
STRIMZI_CRT=$(kubectl -n kafka get secret my-cluster-cluster-ca-cert --template='{{index .data "ca.crt"}}' | base64 --decode )
TLSUSER_CRT=$(kubectl -n kafka get secret my-tls-user --template='{{index .data "user.crt"}}' | base64 --decode )
TLSUSER_KEY=$(kubectl -n kafka get secret my-tls-user --template='{{index .data "user.key"}}' | base64 --decode )
kubectl create secret --namespace default generic my-tls-secret \
--from-literal=ca.crt="$STRIMZI_CRT" \
--from-literal=user.crt="$TLSUSER_CRT" \
--from-literal=user.key="$TLSUSER_KEY"
header_text "Creating a Secret, containing SASL from Strimzi"
SASL_PASSWD=$(kubectl -n kafka get secret my-sasl-user --template='{{index .data "password"}}' | base64 --decode )
kubectl create secret --namespace default generic my-sasl-secret \
--from-literal=password="$SASL_PASSWD" \
--from-literal=ca.crt="$STRIMZI_CRT" \
--from-literal=saslType="SCRAM-SHA-512" \
--from-literal=user="my-sasl-user"
| true |
e97f396b7bab68d70d8af2c074f8e080a684d619 | Shell | rwfilice/DIGITS | /digits-lint | UTF-8 | 391 | 2.84375 | 3 | [
"LicenseRef-scancode-generic-cla"
] | permissive | #!/bin/bash
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
set -e
echo "=== Checking for Python lint ..."
if which flake8 >/dev/null 2>&1; then
python2 `which flake8` --exclude ./digits/jobs .
else
python2 -m flake8 --exclude ./digits/jobs .
fi
echo "=== Checking for JavaScript lint ..."
gjslint --flagfile .gjslintrc --recurse .
echo "=== No lint found."
| true |
a5d1e117c615bc179d3481195c5e41f289a17c50 | Shell | sylverpyro/scripts | /system/legacy/xboxdrv-init.sh | UTF-8 | 932 | 3.75 | 4 | [] | no_license | #!/bin/bash
BINARGS="--daemon --detach --dbus disabled --pid-file /var/run/xboxdrv.pid"
CTRLARGS="--deadzone 25% --trigger-as-button"
function usage {
echo "Usage: $0 [1|2|3|4|stop]"
}
function check_running {
ps -ef | grep 'xboxdrv --daemon' | grep -v grep | wc -l
}
if [ ! `lsmod | grep -c xpad` -eq 0 ]; then
sudo rmmod xpad
fi
if [ $# -eq 0 ]; then
if [ `check_running` -eq 0 ]; then
sudo xboxdrv $BINARGS $CTRLARGS
else
echo "Error: xbox driver already running. Run $0 stop first"
fi
elif [ "$1" = "stop" ]; then
sudo killall xboxdrv
else
case $1 in
1) sudo xboxdrv $BINARGS $CTRLARGS & ;;
2) sudo xboxdrv $BINARGS $CTRLARGS --next-controller $CTRLARGS & ;;
3) sudo xboxdrv $BINARGS $CTRLARGS --next-controller $CTRLARGS --next-controller $CTRLARGS & ;;
4) sudo xboxdrv $BINARGS $CTRLARGS --next-controller $CTRLARGS --next-controller $CTRLARGS --next-controller $CTRLARGS & ;;
*) usage ;;
esac
fi
| true |
9a852ce5a5ea840bac465bc04d9430689cf34581 | Shell | ytong3/PPSE | /start_local_slave.sh | UTF-8 | 562 | 3.125 | 3 | [] | no_license | #!/usr/local/bin/bash
# first argument is the port_number
# second argument is the public key file
USAGE="start_local_slave port_num public_key_file"
if [[ $# -ne 2 ]];then
echo $USAGE
exit
fi
echo "Starting rmiresigstry at port $1"
cd bin
rmiregistry $1 &
export rmiregistry_PID="$!"
echo "rmiregistry backgrounded process $!"
cd ..
echo "Register the local RMI server at the register"
java -cp ./bin/:jscience.jar utk.security.PPSE.slave.RMIServer $1 $2 1>>test.log 2>>error.log &
export RMIServer_PID=$!
echo "Java RMIServer backgrounded process $!"
| true |
ee2abe511bdd5424da8bc89226466401b2bd78db | Shell | Lannutti-Lab/hello-rust | /build-image.sh | UTF-8 | 973 | 3.265625 | 3 | [] | no_license | #!/bin/sh
PROJECT_NAME='hello-rust'
VERSION="$(./VERSION)"
build_ctr=$(buildah from docker.io/rust:1-alpine)
local_src_prefix=$(dirname $(readlink -f "$0"))
container_prefix="/usr/local"
container_bin_prefix="${container_prefix}/bin"
container_src_prefix="${container_prefix}/src/${PROJECT_NAME}"
# Build from source
buildah config --workingdir "${container_src_prefix}" "${build_ctr}"
buildah copy "${build_ctr}" "${local_src_prefix}" "${container_src_prefix}"
buildah run "${build_ctr}" /bin/sh -c "apk add --no-cache git musl-dev && git clean -xfd && cargo build --release"
# Copy executable from build stage to production stage
production_ctr=$(buildah from docker.io/alpine)
buildah copy --from "${build_ctr}" "${production_ctr}" "${container_src_prefix}/target/release/${PROJECT_NAME}" "${container_bin_prefix}"
buildah config --cmd "${PROJECT_NAME}" "${production_ctr}"
# Save the production image
buildah commit "${production_ctr}" "${PROJECT_NAME}:${VERSION}"
| true |
0e61038e09acc29b1c4ffdd02725ba4a0f39ce4c | Shell | amovsesy/taxSite | /bin/filesNeededToBeTranslated.sh | UTF-8 | 221 | 3.21875 | 3 | [] | no_license | #!/bin/bash
./createTBTS.sh
cd ../locale
files=`find . | grep ".php" | grep -v "svn" | awk -F "/" '{print $2}'`
for f in $files
do
num=`grep "TBT:" $f | wc -l`
if [ $num -ne 0 ]; then
echo "$f": $num
fi
done
| true |
4fef4dda237aedb897abf0bd6536b92ad992d638 | Shell | wolfbox/packages | /perl-xml-parser/PKGBUILD | UTF-8 | 602 | 2.609375 | 3 | [] | no_license | pkgname=perl-xml-parser
pkgver=2.41
pkgrel=1
pkgdesc="XML parser for Perl"
arch=("x86_64")
url="http://search.cpan.org/dist/XML-Parser"
license=('GPL' 'PerlArtistic')
depends=('perl' 'expat')
source=(http://search.cpan.org/CPAN/authors/id/T/TO/TODDR/XML-Parser-${pkgver}.tar.gz)
sha256sums=(b48197cd2265a26c5f016489f11a7b450d8833cb8b3d6a46ee15975740894de9)
build() {
cd "${srcdir}/XML-Parser-${pkgver}"
perl Makefile.PL INSTALLDIRS=vendor
make
}
package() {
cd "${srcdir}/XML-Parser-${pkgver}"
make install DESTDIR="${pkgdir}"
}
check() {
cd "${srcdir}/XML-Parser-${pkgver}"
make test
}
| true |
ad756ee646f5e36755eaacadc70c169f17bd0d42 | Shell | quyenweb/steps | /steps/kafka/describe-topics/describe.sh | UTF-8 | 168 | 2.59375 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | if [[ -z "${TOPIC_NAME}" ]] ; then
topic_arg=""
else
topic_arg="--topic ${TOPIC_NAME}"
fi
/usr/bin/kafka-topics --zookeeper ${ZOOKEPER_HOST} --describe ${topic_arg} | true |
88731a80639368f957a9a93eafccca4a4327fb46 | Shell | duyet/strongloop-vagrant | /strongloop-initial.sh | UTF-8 | 1,039 | 3.5 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# root su yourself
sudo su
export DEBIAN_FRONTEND=noninteractive
# way of checking if you we need to install everything
if [ ! -e "/var/node-app" ]; then
# Add mongo to apt
apt-key adv --keyserver keyserver.ubuntu.com --recv 7F0CEB10 &> /dev/null
echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' \
> /etc/apt/sources.list.d/mongodb.list
echo "Installing dev tools.."
apt-get install -y vim git curl build-essential
echo "Installing mongodb..."
apt-get install -y mongodb-org
# Install Node
echo "Installing node..."
curl -sL https://deb.nodesource.com/setup_5.x | bash -
apt-get install -y nodejs
echo "Installing bower..."
npm i -g bower
echo "done."
# Install StrongLoop
echo "Installing strongloop..."
npm install -g strongloop
echo "done."
# Symlink our host node-apps to the guest /var/node-apps folder
ln -s /vagrant/node-app /var/node-app
echo " 'slc run /var/node-app/myApp/app.js' to run the strong node node app in node-app/myApp"
fi | true |
66a551e71e0d9afc0b0e902a0e67381983d92ee1 | Shell | vkoukoutsas/corda-docker | /config-gen.sh | UTF-8 | 1,903 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env bash
CORDA_DB_USER=${CORDA_DB_USER:-sa}
CORDA_DB_PASS=${CORDA_DB_PASS:-dbpass}
CORDA_DB_DRIVER=${CORDA_DB_DRIVER:-org.h2.jdbcx.JdbcDataSource}
CORDA_H2_PORT=${CORDA_H2_PORT:-9090}
CORDA_DB_URL="jdbc:h2:file:${PERSISTENCE_FOLDER}/persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=100;AUTO_SERVER_PORT=${CORDA_H2_PORT}"
CORDA_DETECT_IP=${CORDA_DETECT_IP:-false}
CORDA_P2P_ADDRESS=${CORDA_P2P_ADDRESS:-$MY_PUBLIC_ADDRESS:$MY_P2P_PORT}
CORDA_RPC_ADDRESS=${CORDA_RPC_ADDRESS:-0.0.0.0:$MY_RPC_PORT}
CORDA_RPC_ADMIN_ADDRESS=${CORDA_RPC_ADMIN_ADDRESS:-0.0.0.0:$MY_RPC_ADMIN_PORT}
cat > ${CONFIG_FOLDER}/node.conf <<EOL
emailAddress="${MY_EMAIL_ADDRESS}"
myLegalName="${MY_LEGAL_NAME}"
keyStorePassword: "${CORDA_KEY_STORE_PASSWORD}"
dataSourceProperties: {
dataSourceClassName:"${CORDA_DB_DRIVER}"
dataSource.url:"${CORDA_DB_URL}"
dataSource.user:"${CORDA_DB_USER}"
dataSource.password:"${CORDA_DB_PASS}"
}
networkServices {
doormanURL="${NETWORKMAP_URL}"
networkMapURL="${NETWORKMAP_URL}"
}
trustStorePassword:"${NETWORK_TRUST_PASSWORD}"
p2pAddress="${CORDA_P2P_ADDRESS}"
detectPublicIp=${CORDA_DETECT_IP}
rpcSettings {
address="${CORDA_RPC_ADDRESS}"
adminAddress="${CORDA_RPC_ADMIN_ADDRESS}"
}
security {
authService {
dataSource {
type=INMEMORY
users=[
{
password="${CORDA_RPC_PASS}"
permissions=[
ALL
]
username="${CORDA_RPC_USER}"
}
]
}
}
}
EOL
if [ ! -z "$CORDA_NOTARY" ]; then
if [ "$CORDA_NOTARY" == "non-validating" ]; then
NOTARY_VAL=false
fi
if [ "$CORDA_NOTARY" == "validating" ]; then
NOTARY_VAL=true
fi
cat >> ${CONFIG_FOLDER}/node.conf <<EOL
notary {
validating=${NOTARY_VAL}
}
EOL
fi
| true |
b059f0a7a5f8092c77f6173d8b3d3dfd698f56bd | Shell | petronny/aur3-mirror | /frozen-synapse/PKGBUILD | UTF-8 | 1,954 | 3.0625 | 3 | [] | no_license | # Maintainer: Sid Karunaratne <sid at karunaratne dot net>
pkgname=frozen-synapse
pkgver=1.0.32
pkgrel=1
pkgdesc="brings the simultaneous turn-based strategy genre bang up-to-date and lets you give detailed, accurate orders to your squad"
arch=('i686' 'x86_64')
url="http://www.frozensynapse.com/"
license=('custom')
_gamepkg="FS_Linux_Latest.run"
_gamesum="eab8e416aa08161fa7475dfe6f85fb98f954c722985a1fe2a3115f944d3ff8a3"
source=("${pkgname}.desktop" ${pkgname})
sha256sums=("cdcfda5e8c79bb762cf10342ecb9c818c919b1b499e857f29bf7ccc0707dc966" "037d5fdf78914310a549a11465ea352eb3e082b4904fac59c0a3e9f5985f2662")
if [[ $CARCH = 'i686' ]]; then
depends=(sdl gcc-libs bash)
elif [[ $CARCH = 'x86_64' ]]; then
depends=(lib32-sdl lib32-gcc-libs bash)
fi
build() {
pkgpath=${startdir}
msg "You need a full copy of this game in order to install it"
msg "Searching for \"${_gamepkg}\" in dir: $(readlink -f ${pkgpath})"
if [[ ! ( -f "${pkgpath}/${_gamepkg}" ) ]]; then
msg "Game package not found, please type the absolute path to game setup package (/home/joe):"
read pkgpath
if [[ ! ( -f "${pkgpath}/${_gamepkg}" ) ]] ; then
error "Unable to find game package." && return 1
fi
fi
# Check the checksum
echo "${_gamesum} ${_gamepkg}" > sums
cp "${pkgpath}/${_gamepkg}" "${srcdir}/"
if [[ $(sha256sum --quiet -c sums 2> /dev/null) ]]; then
error "${_gamepkg} FAILED the checksum"
return 1
fi
msg "Found game package, installing..."
cd $srcdir
chmod +x $_gamepkg
}
package() {
cd $srcdir
./FS_Linux_Latest.run --mode unattended --prefix ${pkgdir}/opt/${pkgname}
install -Dm755 "${srcdir}/${pkgname}" "${pkgdir}/usr/bin/${pkgname}"
install -Dm644 "${pkgdir}/opt/${pkgname}/fs_icon.png" "${pkgdir}/usr/share/pixmaps/${pkgname}.png"
install -Dm644 ${srcdir}/${pkgname}.desktop "${pkgdir}/usr/share/applications/${pkgname}.desktop"
}
| true |
78b05e24ff668544e9dcc2a47abdc6b0d1f28125 | Shell | hoangvvo/next-connect | /bench/run.sh | UTF-8 | 190 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
server="$1"
url="$2"
NODE_ENV=production node $server &
pid=$!
printf "$server\n"
sleep 2
wrk -t12 -c400 -d30s http://localhost:3000/user/123
printf "\n"
kill $pid | true |
5c823f4134e9196a96a6cee0f34581ee91ab3229 | Shell | kml74123/week13 | /script12 and 13 | UTF-8 | 259 | 2.796875 | 3 | [] | no_license | #!/bin/bash
echo 'make dir'
mkdir file1 file2
rsync file1 file2
rsync -a --delete file1
rsync -a file1
echo 'make user'
useradd -e /usr/sbin/user $5
echo 'make password'
passwd $5
echo 'delete file1 file2'
rmdir file1 file2
echo 'check if it gone'
ls -l
| true |
1952acc580ab6342671b52cb67d1335ba2380f6b | Shell | ekamperi/pcca-tests | /utilities/sh/t_negpipe.sh | UTF-8 | 2,024 | 3.453125 | 3 | [] | no_license | #!/bin/sh
#
# Copyright (c) 2009, Stathis Kamperis
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
# The name of the script (i know, but it should work ok for our use).
SCRIPTNAME=$(basename "$0")
# This is the default exit status. Zero (0) signifies 'pass'.
FAIL=0
echof()
{
echo "$SCRIPTNAME: $1"
# By default, variables referenced from within a function,
# refer to the global namespace, unless the 'local' keyword
# is prepended to them.
FAIL=1
}
# See also: http://www.freebsd.org/cgi/query-pr.cgi?pr=130298
if true && ! true | true; then
echof 'failed'
fi
if true && ! true | true | true; then
echof 'failed'
fi
if true && ! true | ! false | true; then
echof 'failed'
fi
# Done
[ $FAIL -eq 0 ] && echo "$SCRIPTNAME: passed"
| true |
dfe994f9eb5ebe168c261314dfe3f50784566276 | Shell | zulip/zulip-mobile | /tools/checkout-keystore | UTF-8 | 1,916 | 4.21875 | 4 | [
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -e
## CLI PARSING
usage() {
cat >&2 <<EOF
usage: tools/checkout-keystore
Temporarily decrypt the keystore file for signing an Android release.
By using this, a release manager can ensure that the secrets are only
accessible for short periods when they're explicitly thinking about a
release.
This primarily guards against accidents: no slip of the keyboard,
no sharp-edged tool with surprising behavior in an edge case, and
no outright buggy tool can cause a validly-signed release build
unless the release manager has decrypted the key.
Setup:
* You'll need the \`sq\` command installed, the CLI for Sequoia PGP,
which is a modern implementation of OpenPGP.
* The decryption key is expected by default at a path
~/.config/sq/KEYNAME.key.pgp , where KEYNAME defaults to \$USER.
KEYNAME can be set with \`git config zulip.keystoreKeyName\`.
The directory ~/.config can be overridden by \$XDG_CONFIG_HOME.
* See also setup instructions in docs/howto/release.md .
EOF
exit 2
}
if (( ${#} )); then
usage
fi
## EXECUTION
rootdir=$(git rev-parse --show-toplevel)
cleartext="${rootdir}"/android/release.keystore
cryptotext="${rootdir}"/android/release.keystore.pgp
key_name=$(git config zulip.keystoreKeyName || echo "${USER}")
sq_user_dir="${XDG_CONFIG_HOME:-${HOME}/.config}"/sq
recipient_key="${sq_user_dir}"/"${key_name}".key.pgp
# `sq decrypt` prints obnoxious, confusing output about the key file
# to stderr. Filter that out (as a normal pipe-style filter, from
# stdin to stdout.)
filter_sq_stderr() {
grep -vE '^(Compressed|Encrypted) '
}
sq decrypt --recipient-key "${recipient_key}" \
-o "${cleartext}" "${cryptotext}" \
2> >(filter_sq_stderr >&2)
wait_min=15
(
sleep "$(( ${wait_min} * 60 ))"
rm -f "${cleartext}"
) &
cat >&2 <<EOF
Wrote decrypted file: ${cleartext}
This file will be deleted in ${wait_min} minutes.
EOF
| true |
eace8faf7f67fd9d436b8e2af8a1768a5581770d | Shell | loopduck0/OverTheWire | /scripts/remote_nc_file.sh | UTF-8 | 1,044 | 3.859375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# This was made when doing bandit 14 -> bandit 15 because I didn't
# realize the path to the password was in bandit 13 -> bandit 14 so
# I thought I needed to send the whole ssh key to localhost 30000.
# Also, I couldn't copy the ssh key with scp because of filesystem
# permissions. This script is the result.
#
# https://unix.stackexchange.com/questions/590265/running-a-script-on-a-remote-system-without-copying-the-script-there
#
# Usage:
# cat path/to/scripts/remote_nc_file.sh | ssh -i [bandit14 ssh key] bandit14@bandit.labs.overthewire.org -p 2220
#
# Read to EOF to avoid dealing with annoying character issues
#https://stackoverflow.com/questions/1167746/how-to-assign-a-heredoc-value-to-a-variable-in-bash
read -r -d '' SECRET_TO_NC <<'EOF'
<<< Insert password before running >>>
<<< Originally tried using this to send ssh key >>>
EOF
# Sanity check
echo "Running on $(hostname)"
# Send secret to localhost 30000
# Be sure to use quotes around the variable
cat <(echo "$SECRET_TO_NC") - | nc localhost 30000
| true |
e62a5fb52b8242c53f439b290d65ffc16b5074f8 | Shell | dlnichols/dotfiles | /zprofile | UTF-8 | 662 | 3.265625 | 3 | [
"MIT"
] | permissive | export EDITOR="nvim"
export ANDROID_HOME="$HOME/Android/Sdk"
export ANDROID_NDK="/opt/android-ndk/r15c"
function kdec() {
echo ${1%.kfw}
if [[ ! ${1} =~ ".kfw"$ ]]; then
echo "Must select a KFW for decryption."
return 1
fi
if [[ ! -f ${1} ]]; then
echo "File ${1} does not exist."
fi
signer -o decrypt -r ~/.ssh/kramer/kfw.pri.pem -e ${1} -c ${1%.kfw}.tgz
}
function kenc() {
echo ${1%.kfw}
if [[ ! ${1} =~ ".tgz"$ ]]; then
echo "Must select a TGZ for encryption."
return 1
fi
if [[ ! -f ${1} ]]; then
echo "File ${1} does not exist."
fi
signer -o encrypt -u ~/.ssh/kramer/kfw.pub.pem -c ${1} -e ${1%.tgz}.kfw
}
| true |
d1c9b8511141beadf38ed4be4e588aea5c257390 | Shell | zloeber/MakeDevOps | /deploy/scripts/k8s/tunnel-kubernator-access.sh | UTF-8 | 1,134 | 3.328125 | 3 | [] | no_license |
#!/bin/bash
# Usage: Assuming a vagrant based kubernetes run this script in the same folder of the Vagrantfile
# * Then insert the password (by default: kubernetes)
# * Browse localhost:PROXYPORT
MASTERNODE='master'
PROXYPORT='8001'
#vagrant ssh $MASTERNODE -c "if [ ! -d /home/$USERNAME ]; then sudo useradd $USERNAME -m -s /bin/bash && echo '$USERNAME:$PASSWORD' | sudo chpasswd; fi"
KUBE_HOST=$(vagrant ssh $MASTERNODE -c "kubectl cluster-info | head -n 1 | grep -o -E '([0-9]+\.){3}[0-9]+'")
TARGET=$(vagrant ssh $MASTERNODE -c "kubectl describe services kubernator --namespace=kubernator | grep Endpoints | cut -d':' -f2")
TARGET=$(echo $TARGET | sed -e 's/^[ \t]*//')
VAGRANTUSER=$(vagrant ssh-config $MASTERNODE | grep 'User ' | awk '{print $NF}')
SSHKEYFILE=$(vagrant ssh-config $MASTERNODE | grep IdentityFile | awk '{print $NF}')
SSHPORT=$(vagrant ssh-config $MASTERNODE | grep Port | awk '{print $NF}')
echo ''
echo "Access kubernator - http://localhost:${PROXYPORT}/api/v1/namespaces/kubernator/services/kubernator/proxy/"
ssh -L $PROXYPORT:127.0.0.1:$PROXYPORT ${VAGRANTUSER}@127.0.0.1 -p $SSHPORT -i $SSHKEYFILE
| true |
db4dc573586c0eca3c9c58d2e485312f286c0251 | Shell | stemkit-collection/stemkit-util | /scripts/install-stemkit-cpp | UTF-8 | 1,597 | 4.3125 | 4 | [
"MIT"
] | permissive | #!/bin/sh
main()
{
[ "${#}" -lt 2 ] && {
echo "USAGE: ${0} <source-path> <target-path> [<abi> ...]"
exit 2
}
source="${1}"
target="${2}"
shift
shift
install_includes "${source}" "${target}"
if [ "${#}" -eq 0 ]; then
install_libs "${source}" "${target}"
else
for abi in "${@}"; do
install_libs "${source}" "${target}" "${abi}"
done
fi
}
install_includes()
{
include_source="${1}/src/include"
include_target="${2}/include"
mkdir -p "${include_target}"
include_target=`cd "${include_target}" >/dev/null 2>&1 && pwd`
[ "${include_target:+set}" != set ] && {
echo "Cannot figure target folder for header files"
exit 3
}
rm -rf "${include_target}"
mkdir -p "${include_target}"
(
cd "${include_source}" && {
find . -name .svn -prune -o -type f -print | cpio -vdump "${include_target}"
}
)
}
install_libs()
{
if [ "${3:+set}" != "set" ]; then
library_source="${1}/bin/lib"
library_target="${2}/lib"
else
library_source="${1}/bin/${3}/lib"
if [ -d "${library_source}" ]; then
library_target="${2}/lib${3}"
else
library_source="${1}/bin/lib"
library_target="${2}/lib-${3}"
fi
fi
rm -rf "${library_target}"
mkdir -p "${library_target}"
find "${library_source}" \( -name "*.lib" -o -name "*.dll" -o -name "*.jar" -o -name "*.s[ol].*" \) -print | {
xargs -t -i cp {} "${library_target}"
}
}
main "${@}"
| true |
3430dddac4fa81bb01bdb881f3292c59ad1651ab | Shell | Allenhe123/myCaffe | /allen1/start_train.sh | UTF-8 | 325 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env sh
set -e
MY=/home/allen/mycaffe/caffe/examples/allen1
prefix=$MY/allen_caffenet_train
if [ ! -d $prefix/ ];then
echo "mkdir:"
sudo mkdir $prefix
else
echo "dir: $prefix already exist"
fi
echo "start to train..."
sudo ../../build/tools/caffe train -solver $MY/solver.prototxt
echo "train end" | true |
4afc8a198b34103d4ae88a849a12bb80f88c21e1 | Shell | CREOLEFAMILY/Debian-sh-Autoinstall_LastKernel | /install-kernel.sh | UTF-8 | 545 | 3.421875 | 3 | [] | no_license | #!/bin/bash
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root" 1>&2
exit 1
fi
mkdir tmp && cd tmp
url="http://kernel.ubuntu.com/~kernel-ppa/mainline/daily/current"
files="$(wget -c "$url" -q -O - | grep $1.deb | grep -v 'i386' | sed -e 's,.*<a href="\(.*\.deb\)">.*,\1,')"
for f in $files ; do
wget -c "$url/$f"
done
files="$(wget -c "$url" -q -O - | grep all.deb | sed -e 's,.*<a href="\(.*\.deb\)">.*,\1,')"
for f in $files ; do
wget -c "$url/$f"
done
dpkg -i *.deb
cd .. && rm -rf tmp
echo "Done ..."
| true |
2c29f44824582fc65432b365dbd55f7fe0ae1cc9 | Shell | Clapperclaws/Mad-Move | /experiments_scripts/run_experiment.sh | UTF-8 | 1,860 | 2.71875 | 3 | [] | no_license | sudo pkill tcpdump
sudo pkill wpa_supplicant
sudo pkill carfi-supplicant
sudo pkill carfi-roamingd
mv in_progress/1* "test_dir"
timestamp=$(date +"%s")
#create directory with current timestamp
mkdir "in_progress/"${timestamp}
#Start tcpdump on wireless interface
#sudo tcpdump -i wlo1 -w ${timestamp}/wifi_${timestamp}.pcap &
sudo tcpdump -i wlo1 > "in_progress/"${timestamp}/wifi_${timestamp}.txt &
#Start tcpdump on lte interface
#en_name="$(ifconfig | grep 4163 | grep -v wlo1 | awk -F ":" '{print $1}')"
en_name="$(ifconfig -s | grep BMRU | grep -v wlo1 | awk '{print $1}')" ### add "| grep enp0 |" if needed
en_name="enp0s20u3u3"
#sudo tcpdump -i ${en_name} -w ${timestamp}/lte_${timestamp}.pcap &
sudo tcpdump -i ${en_name} > "in_progress/"${timestamp}/lte_${timestamp}.txt &
#######google-chrome "file:///media/ubuntu/79bf333c-7b40-4765-9e0a-c16fa2b41163/muse/dash.js/samples/dash-if-reference-player/index.html" &
#sudo wpa_supplicant -i wlo1 -c /etc/wpa_supplicant/wpa_supplicant_eap-sim.conf -t > ${timestamp}/wpa_supplicant_${timestamp}.log
#sudo ~/Desktop/carfi/carfi-supplicant -i wlo1 -d -c /etc/wpa_supplicant/madmove.conf -t > ${timestamp}/wpa_supplicant_${timestamp}.log &
#sudo ~/Desktop/carfi/carfi-roamingd wlo1 &
sudo ~/Desktop/carfi/carfi-supplicant -Dnl80211 -i wlo1 -c /etc/wpa_supplicant/madmove.conf -dt > "in_progress/"${timestamp}/wpa_supplicant_${timestamp}.log &
#sudo ~/Desktop/carfi/carfi-roamingd wlo1 > ${timestamp}/wpa_supplicant_${timestamp}.log &
sudo ~/Desktop/carfi/carfi-roamingd wlo1 |& ts '%.s:' |& tee "in_progress/"${timestamp}/roamingd_${timestamp}.log
#@watch 'echo -n `date +"%s"`" " >> test.log; ip tcp_metrics show 35.180.29.183 >> test.log'
#watch 'echo -n "time: "`date +"%s"`" " >> test.log; ip tcp_metrics show | grep '35.180.29.183' >> test.log' &
##ping and wget
| true |
63effebbe223a9665403488ceafa134876333bbb | Shell | StudyDeepLearningAI/supervised-translation | /run.sh | UTF-8 | 634 | 3.4375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
RUN_MODE=${1:-"train"}
DATA_DIR=${2:-$(dirname "$0")/data}
DOWNLOAD_DIR=${3:-$DATA_DIR}
MODEL_DIR=$(dirname "$0")/model
mkdir -p $MODEL_DIR
mkdir -p $DATA_DIR
mkdir -p $DOWNLOAD_DIR
if [ $RUN_MODE == "train" ]
then
python $(dirname "$0")/src/train.py --model_dir $MODEL_DIR \
--data_dir $DATA_DIR \
--download_dir $DOWNLOAD_DIR
elif [ $RUN_MODE == "eval" ]
then
python $(dirname "$0")/src/eval.py --model_dir $MODEL_DIR \
--data_dir $DATA_DIR
else
echo "Invalid run mode `$RUN_MODE`."
fi
| true |
8ab95d8e3a819f19f5729556787f1b9b05c148f2 | Shell | zizu1985/vagrant | /docker/scripts/builds.sh | UTF-8 | 1,483 | 2.890625 | 3 | [] | no_license | # This is an optional file used for my setup.
# Set up environment for demos.
echo "export JAVA_HOME=/home/docker_user/java/latest" >> ~/.bash_profile
echo "alias sql=\"/home/docker_user/sqlcl/bin/sql\"" >> ~/.bash_profile
cd ~
unzip -oq /vagrant/software/sqlcl-18.3.0.259.2029.zip
mkdir ~/java
cd ~/java
tar -xf /vagrant/software/openjdk-11.0.1_linux-x64_bin.tar.gz
ln -s ./j* ./latest
cd ~
unzip -oq /vagrant/software/autorest_demo.zip
# Copy ORDS software and do build.
cd ~/dockerfiles/ords/ol7_ords/software
cp /vagrant/software/apex_18.2_en.zip .
cp /vagrant/software/apache-tomcat-9.0.13.tar.gz .
cp /vagrant/software/ords-18.3.0.270.1456.zip .
cp /vagrant/software/sqlcl-18.3.0.259.2029.zip .
cp /vagrant/software/openjdk-11.0.1_linux-x64_bin.tar.gz .
cd ~/dockerfiles/ords/ol7_ords
docker build --squash -t ol7_ords:latest .
# Copy database software and do build.
cd ~/dockerfiles/database/ol7_183/software
cp /vagrant/software/LINUX.X64_180000_db_home.zip .
cp /vagrant/software/apex_18.2_en.zip .
cd ~/dockerfiles/database/ol7_183
docker build --squash -t ol7_183:latest .
# Copy database software and don't do build.
cd ~/dockerfiles/database/ol7_122/software
cp /vagrant/software/linuxx64_12201_database.zip .
cp /vagrant/software/apex_18.2_en.zip .
cd ~/dockerfiles/database/ol7_122
#docker build --squash -t ol7_122:latest .
# Start application.
cd ~/dockerfiles/compose/ol7_183_ords
docker-compose up
#cd ~/dockerfiles/compose/ol7_122_ords
#docker-compose up
| true |
88a7703f212448559bf5932bdc3abd39aa69947a | Shell | JonShelley/azure | /benchmarking/NDv4/cc-slurm-ngc/util_scripts/check-vm-gpu-bw.sh | UTF-8 | 1,023 | 3.25 | 3 | [] | no_license | #!/bin/bash
## Note: Before you can run this script you need to build gpu-bwtest and copy the binary to BASE_DIR
## To build it you need to go to a NDv[2+] VM and if you are using a HPC marketplace image go then
## cd /usr/local/cuda/samples/1_Utilities/bandwidthTest/ and run "sudo make".
## Then copy bandwidthTest to $BASE_DIR/gpu-bwtest
##
BASE_DIR=/shared/data/azure/benchmarking/NDv4/cc-slurm-ngc/util_scripts
vmId=`curl -H Metadata:true "http://169.254.169.254/metadata/instance?api-version=2019-06-04" 2>/dev/null | jq '.compute.vmId'`
echo "VM ID: $vmId"
echo "Device: DtoH : HtoD"
for device in {0..7}
do
dtoh=`CUDA_VISIBLE_DEVICES=$device numactl -N$(( device / 2 )) -m$(( device / 2 )) ${BASE_DIR}/gpu-bwtest -dtoh |
grep 32000000 | awk '{print $2}'`
htod=`CUDA_VISIBLE_DEVICES=$device numactl -N$(( device / 2 )) -m$(( device / 2 )) ${BASE_DIR}/gpu-bwtest -htod | grep 32000000 | awk '{print $2}'`
echo "${device} : ${dtoh} : ${htod}"
done
| true |
63a73264a4e199d3c689da2739c49f9865c1c465 | Shell | diu-uf-bordeaux/bloc1 | /prog/data/data/guess/f6 | UTF-8 | 81 | 2.515625 | 3 | [] | no_license | #!/bin/bash
S="\x4C \x45 \x4E \x41"
for L in $S
do
echo -n -e $L
done
echo
| true |
99d9c9943e8b047d1575da981d28924feb06c3ce | Shell | ms705/ciel | /scripts/old/setup-testmachine.sh | UTF-8 | 372 | 2.859375 | 3 | [
"ISC",
"LicenseRef-scancode-other-permissive"
] | permissive | #!/bin/bash
SWBASE="../run/mercator.hg/"
KEY="/home/ms705/.ssh/id_rsa"
I=0
while read -r line
do
scp -i $KEY setupcache-local.sh $line:
scp -i $KEY -r $SWBASE $line:
ssh -i $KEY $line '/root/setup-local.sh'
if [ $I -eq 0 ]; then
ssh -i $KEY $line '/root/scripts/run_master.sh'
else
ssh -i $KEY $line '/root/scripts/run_worker.sh'
fi
done
exit 0 | true |
736b47e33f0f956407d2015c64c66a6c5aea0f20 | Shell | shopkeep/deis | /logger/bin/boot | UTF-8 | 1,473 | 4.0625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# This script is designed to be run inside the container
#
# fail hard and fast even on pipelines
set -eo pipefail
# set debug based on envvar
[[ $DEBUG ]] && set -x
# configure etcd
export ETCD_PORT=${ETCD_PORT:-4001}
export ETCD="$HOST:$ETCD_PORT"
export ETCD_PATH=${ETCD_PATH:-/deis/logs}
export ETCD_TTL=${ETCD_TTL:-10}
# fix perms on log directory
chmod 755 /var/log/deis
# spawn the service in the background
/go/bin/syslogd &
SERVICE_PID=$!
# smart shutdown on SIGINT and SIGTERM
function on_exit() {
kill -TERM $SERVICE_PID
wait $SERVICE_PID 2>/dev/null
exit 0
}
trap on_exit INT TERM
echo deis-logger running...
# publish the service to etcd using the injected EXTERNAL_PORT
if [[ ! -z $EXTERNAL_PORT ]]; then
# configure service discovery
PORT=${PORT:-514}
PROTO=${PROTO:-udp}
set +e
# wait for the service to become available on PORT
sleep 1 && while [[ -z $(netstat -lnu | awk "\$4 ~ \".$PORT\" && \$1 ~ \"$PROTO.?\"") ]] ; do sleep 1; done
# while the port is listening, publish to etcd
while [[ ! -z $(netstat -lnu | awk "\$4 ~ \".$PORT\" && \$1 ~ \"$PROTO.?\"") ]] ; do
etcdctl -C $ETCD set --ttl $ETCD_TTL $ETCD_PATH/host $HOST --no-sync >/dev/null
etcdctl -C $ETCD set --ttl $ETCD_TTL $ETCD_PATH/port $EXTERNAL_PORT --no-sync >/dev/null
sleep $(($ETCD_TTL/2)) # sleep for half the TTL
done
# if the loop quits, something went wrong
exit 1
fi
wait
| true |
ca0e46d777ce972e666fd47ef53720771a0f871c | Shell | smittal6/cs251 | /ass1/search.sh | UTF-8 | 622 | 4.09375 | 4 | [] | no_license | #!/bin/bash
#Arithmetic operators
if (($#!=3));
then echo "Number of arguments is not correct"
exit 1
fi
#Checking if file or not
#Using [[ ]] as it is considered safer. More powerful. Check stackoverflow.
if [[ ! -f $1 ]];
then echo "Not a valid file"
exit 1
fi
#trying using pipe
#What pipe does is, executes first statement, puts it in STDOUT, and uses that as STDIN for 2nd command.
#Which matches with the syntax for grep, grep string file. If no file picks from STDIN
number=$( grep $2 $1 | grep -c $3)
#echo $number
if [[ "$number" == 0 ]];
then echo "No match."
exit 1
else
grep $2 $1 | grep $3
fi
| true |
cbed50838f1ad65782d9482e140f2e4564015367 | Shell | GustavAndreasson/flowermap | /translations/find_phrases.sh | UTF-8 | 238 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env bash
find .. -name '*.ph*' -exec grep -e "\->__(" {} + | sed 's/.*__("\(.*\)").*/\1/' | sort | uniq | while read -r word;do
echo $word=$(echo $word | translate-bin -s google -f en -t $1 | sed 's/^.*>\(.*\)/\1/g')
done
| true |
d2af1e5119259663956b0bf58cdcc9b3e6cfc7ac | Shell | getbread/telepresence | /packaging/homebrew-package.sh | UTF-8 | 663 | 3.203125 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | #!/bin/bash
set -e
# Clone blackbird-homebrew:
BUILD_HOMEBREW_DIR=$(mktemp -d)
echo "Cloning into ${BUILD_HOMEBREW_DIR}..."
git clone git@github.com:datawire/homebrew-blackbird.git "${BUILD_HOMEBREW_DIR}"
FORMULA="${BUILD_HOMEBREW_DIR}/Formula/telepresence.rb"
# Update recipe
cp dist/homebrew-formula.rb "$FORMULA"
sed "s/__NEW_VERSION__/${TELEPRESENCE_VERSION}/g" -i "$FORMULA"
TARBALL_HASH=$(sha256sum dist/telepresence-${TELEPRESENCE_VERSION}.tar.gz | cut -f 1 -d " ")
sed "s/__TARBALL_HASH__/${TARBALL_HASH}/g" -i "$FORMULA"
chmod 644 "$FORMULA"
cd "${BUILD_HOMEBREW_DIR}"
git add "$FORMULA"
git commit -m "Release ${TELEPRESENCE_VERSION}"
git push origin master
| true |
91a8657f354d9f041ff1969ba3b3fcff7ddc6fbd | Shell | sverrewl/pba-tools | /scripts/convert.sh | UTF-8 | 1,559 | 3.578125 | 4 | [] | no_license | #!/bin/bash
inPath=$1
outPath=$2
BIN_DIR=.
# Exit in case of failure
set -e
# Handle empty lists
shopt -s nullglob
#mkdir -p $outPath
# WIP Stuff:
# DX11 models?!
for filename in $inPath/*.008E; do
echo "$filename"
$BIN_DIR/format0024 "$filename" "$outPath/$(basename "$filename" .008E).obj"
done
# PC11 Sound
for filename in $inPath/*.008F; do
echo "$filename"
#$BIN_DIR/format008F "$filename" "$outPath/$(basename "$filename" .008F).wav"
done
# Mostly working:
# Data [CSV or roms]
for filename in $inPath/*.0001; do
echo "$filename"
$BIN_DIR/format0001 "$filename" "$outPath/$(basename "$filename" .0001).csv"
done
# Textures
for filename in $inPath/*.0022; do
echo "$filename"
$BIN_DIR/format0022 "$filename" "$outPath/$(basename "$filename" .0022).tga"
done
# Models
for filename in $inPath/*.0024; do
echo "$filename"
$BIN_DIR/format0024 "$filename" "$outPath/$(basename "$filename" .0024).obj"
done
# Sounds
for filename in $inPath/*.0025; do
echo "$filename"
$BIN_DIR/format0025 "$filename" "$outPath/$(basename "$filename" .0025).wav"
done
for filename in $inPath/*.002A; do
echo "$filename"
$BIN_DIR/format002A "$filename" "$outPath/$(basename "$filename" .002A).json"
done
# Collisions
for filename in $inPath/*.002C; do
echo "$filename"
$BIN_DIR/format002C "$filename" "$outPath/$(basename "$filename" .002C).obj"
done
# PC11 Textures
for filename in $inPath/*.008D; do
echo "$filename"
$BIN_DIR/format008D "$filename" "$outPath/$(basename "$filename" .008D).jpg"
done
echo "Success!"
| true |
5a81d5330031ef3b5cad11ff7cf9295cbddf30d7 | Shell | Skyeward/Tonic | /gitsteps.sh | UTF-8 | 171 | 3.1875 | 3 | [] | no_license | #!/bin/bash -eu
push=${1}
commitnote=${2}
git add .
git commit -m "$commitnote"
echo "added and commmitted"
if [[ $push -eq 1 ]]
then
git commit; echo "and pushed"
fi
| true |
257151ee1ad85843bd59ea6035d2567d3b85d38d | Shell | hitchiker42/dotfiles | /backup.sh | UTF-8 | 853 | 3.71875 | 4 | [] | no_license | #!/bin/sh
[[ $EUID -ne 0 ]] && echo "Run as root" && exit
if [ $# -lt 1 ]; then
echo "No destination defined. Usage: $0 destination [logfile]" >&2
exit 1
elif [ $# -eq 1 ]; then
DEST="$1"
LOGFILE="$PWD/rsync.log"
elif [ $# -eq 2 ]; then
DEST="$1"
LOGFILE="$2"
elif [ $# -gt 2 ]; then
echo "Too many arguments. Usage: $0 destination [logfile]" >&2
exit 1
fi
START=$(date +%s)
rsync -aAXv /* $DEST --exclude={/dev/*,/proc/*,/sys/*,/tmp/*,/run/*,/mnt/*,/media/*,/lost+found,/var/lib/pacman/sync/*} 2>&1 | tee "$LOGFILE"
FINISH=$(date +%s)
TOTALTIME="total time: $(( ($FINISH-$START) / 60 )) minutes, $(( ($FINISH-$START) % 60 )) seconds"
echo "$TOTALTIME"
BACKUP=$DEST/"Backup_$(date %d-%m-%y_%s)"
echo "Backup from $(date '+%A, %d %B %Y, %T')" > "$BACKUP"
echo "$TOTALTIME" >>"$BACKUP"
cat "$LOGFILE" >> "$BACKUP"
exit | true |
282026d5a65f0cc8618b6ac539a8c00f865b9f46 | Shell | yanzhirun/wrapfs-encode | /auto_run/module_gen.sh | UTF-8 | 605 | 2.84375 | 3 | [] | no_license | #!/bin/bash
if lsmod | grep wrapfs &>/dev/null ; then
echo "There is already insmod wrapfs module!"
sudo rmmod wrapfs
fi
sudo cp /usr/src/linux-headers-4.2.0-27-generic/Module.symvers $HOME/Desktop/wrapfs-bata1/wrapfs/
sudo make -C /usr/src/linux-4.2.5 M=$HOME/Desktop/wrapfs-bata1/wrapfs/ modules
sudo cp $HOME/Desktop/wrapfs-bata1/wrapfs/wrapfs.ko $HOME/Desktop/wrapfs-bata1/auto_run/
#sudo insmod $HOME/Desktop/wrapfs-bata1/auto_run/wrapfs.ko user_name="$USER" pwd='aaa!@#123'
#if lsmod | grep wrapfs &>/dev/null ; then
# echo "insmod wrapfs OK!"
#else
# echo "insmod wrapfs err!"
#fi
| true |
e0ef39261a499e3354317d06e087bba28dfac561 | Shell | sergioguerrato/grafana-zabbix | /.circleci/deploy-docs.sh | UTF-8 | 701 | 3.28125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Exit script if you try to use an uninitialized variable.
set -o nounset
# Exit script if a statement returns a non-true return value.
set -o errexit
# Use the error status of the first failure, rather than that of the last item in a pipeline.
set -o pipefail
echo "current dir: $(pwd)"
# Setup git env
git config --global user.email "$CI_GIT_EMAIL"
git config --global user.name "$CI_GIT_USER"
echo "git user is $CI_GIT_USER ($CI_GIT_EMAIL)"
git checkout -b "$GH_PAGES_BRANCH"
rm -rf ./* || true
mv ../gh-pages/docs/site/* ./
git add --force .
git commit -m "build docs from commit ${CIRCLE_SHA1:0:7} (branch $CIRCLE_BRANCH)"
git log -n 3
git push origin "$GH_PAGES_BRANCH" --force
| true |
899348e7d3f4c1fb436580ccfe14411347912114 | Shell | novaextent/opensignal | /setup-env.sh | UTF-8 | 1,268 | 2.875 | 3 | [] | no_license | #!/usr/bin/env bash
rm -rf ./.env
PASSWORD=$(LC_ALL=C </dev/urandom tr -dc A-Za-z0-9 | head -c 28)
ACCESS_KEY=$(openssl rand -hex 16)
SECRET_KEY=$(openssl rand -hex 16)
#cat postgresql/accountdb.env > ./.accountdb.env
#cat postgresql/messagedb.env > ./.messagedb.env
cat postgresql/postgres.env > ./.postgres.env
cat letsencrypt/letsencrypt.env > ./.letsencrypt.env
cat minio/minio.env > ./.minio.env
sed -ie "s/CHANGE_ME/${PASSWORD}/g" ./.postgres.env
#sed -ie "s/CHANGE_ME/${PASSWORD}/g" ./.accountdb.env
#sed -ie "s/CHANGE_ME/${PASSWORD}/g" ./.messagedb.env
sed -ie "s/CHANGE_ACCESS_KEY/${ACCESS_KEY}/g" ./.minio.env
sed -ie "s/CHANGE_SECRET_KEY/${SECRET_KEY}/g" ./.minio.env
sed -ie "s/SQS_ACCESS_KEY/${ACCESS_KEY}/g" ./conf/sqs/config_local.json
sed -ie "s/SQS_SECRET_KEY/${SECRET_KEY}/g" ./conf/sqs/config_local.json
cat ./conf/signal/Signal.yml.tmpl > ./conf/signal/Signal.yml
sed -ie "s/SQS_ACCESS_KEY/${ACCESS_KEY}/g" ./conf/signal/Signal.yml
sed -ie "s/SQS_SECRET_KEY/${SECRET_KEY}/g" ./conf/signal/Signal.yml
sed -ie "s/MINIO_ACCESS_KEY/${ACCESS_KEY}/g" ./conf/signal/Signal.yml
sed -ie "s/MINIO_SECRET_KEY/${SECRET_KEY}/g" ./conf/signal/Signal.yml
sed -ie "s/CHANGE_ME/${PASSWORD}/g" ./conf/signal/Signal.yml
echo "CHANGE_ME=${PASSWORD}" > ./.env
| true |
adffbfc4dd17292b64d67423e70764ebec9858a7 | Shell | camilodiazsal/SlackBuilds | /shorewall/shorewall.SlackBuild | UTF-8 | 890 | 3.1875 | 3 | [
"BSD-2-Clause-Views",
"BSD-2-Clause"
] | permissive | #!/bin/sh -e
PRGNAM='shorewall'
VERSION='5.2.3.3'
TAG='1_x'
APP_URL='http://canada.shorewall.net/pub/shorewall/5.2/shorewall-5.2.3/shorewall-5.2.3.3.tgz'
BUILD_DIR="/tmp/SlackBuild-${PRGNAM}-$(date +%s)"
# Download
mkdir -v -p ${BUILD_DIR}/install ${BUILD_DIR}/src
if ! [ -f ${PRGNAM}-${VERSION}.tgz ]; then
wget ${APP_URL}
fi
tar -v -x -z -C ${BUILD_DIR}/src -f ./${PRGNAM}-${VERSION}.tgz
cp -v -f ./slack-desc ${BUILD_DIR}/install/slack-desc
cp -v -f ./doinst.sh ${BUILD_DIR}/install/doinst.sh
# Build
cd ${BUILD_DIR}/src/${PRGNAM}-${VERSION}
DESTDIR=${BUILD_DIR} ./install.sh ./shorewallrc.slackware
cd ${BUILD_DIR}
find ./etc/shorewall -type f -exec mv {} {}.new \;
mv -v -f ./etc/logrotate.d/shorewall ./etc/logrotate.d/shorewall.new
rm -v -r -f ${BUILD_DIR}/src
# Create package
/sbin/makepkg -l y -c n /tmp/${PRGNAM}-${VERSION}-$(uname -m)-${TAG}.txz
rm -v -r -f ${BUILD_DIR}
| true |
e0629734f00b20843753035fac8f7a0c760bbd97 | Shell | stramazzo/oaudscript | /driversinstall_script.sh | UTF-8 | 415 | 2.5625 | 3 | [] | no_license | #!/bin/bash
#This script should install the needed drivers for the dfrobot display on a raspberry system with unsupported kernel.
#clone pimoroni rp_usbdisplay repository
git clone https://github.com/pimoroni/rp_usbdisplay
#launch rp_usbdisplay driver install for unsupported kernel
cd rp_usbdisplay/dkms
sudo apt install dkms raspberrypi-kernel-headers
sudo dpkg -i rp-usbdisplay-dkms_1.0_all.deb
sudo reboot
| true |
beda1a2486338275786d7aa3d59538bbf65220e3 | Shell | kinvolk/benchmark-containers | /helpers/download_dashboard.sh | UTF-8 | 846 | 3.90625 | 4 | [] | no_license | #!/bin/bash
[ $# -lt 2 ] && {
echo
echo "$0 - download (backup) a Grafana dashboard, write to STDOUT"
echo "Usage: $0 <grafana-API-key> <dashboard-id> [<hostname-and-port>] [>dashboard-backup.json]"
echo " Hostname/port is assumed to be localhost:3000 if not provided."
echo " (use"
echo " kubectl port-forward -n monitoring svc/prometheus-operator-grafana 3000:80"
echo " for lokomotive kubernetes)"
exit 1
}
apikey="$(cat "$1")"
dashboard_uid="$2"
if [ $# -ge 3 ]; then
host="$3"
else
host="localhost:3000"
fi
echo "Downloading dashboard UID $dashboard_uid from $host" >&2
curl -sH "Authorization: Bearer $apikey" \
http://$host/api/dashboards/uid/$dashboard_uid \
| sed -e "s/\"$dashboard_uid\"/null/" -e 's/,"url":"[^"]\+",/,/' | jq
| true |
3bf9dfa070f7343733d91a5fd337ac53a97648fb | Shell | whlzdy/firmware | /OneCloud/script/server/admin/cgi-bin/modifyServerInfo.sh | UTF-8 | 2,966 | 2.875 | 3 | [] | no_license | #!/bin/sh
TOOL_BASE="/opt/onecloud/script/server/appdata"
TOOL_FUNC="${TOOL_BASE}/bin/funcs.sh"
. ${TOOL_FUNC}
state="$(getUserLoginStatus)"
if [ "${state}" == "true" ]
then
cat << EOF
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Modify Server Info</title>
<style type=text/css>
span {
display: inline-block;
width: 150px;
text-align: left;
font-weight:bold;
}
</style>
<script type="text/javascript">
function checkSubmit() {
var server1ip = login.server1ip.value;
var server1mask = login.server1mask.value;
var server1gateway = login.server1gateway.value;
var server1mac = login.server1mac.value;
var server1catagory = login.server1catagory.value;
var server2ip = login.server2ip.value;
var server2mask = login.server2mask.value;
var server2gateway = login.server2gateway.value;
var server2mac = login.server2mac.value;
var server2catagory = login.server2catagory.value;
if(server1ip.length == 0 || server1mask.length == 0 || server1gateway.length ==0 || server1mac.length == 0 || server1catagory.length == 0 || server2ip.length == 0 || server2mask.length == 0 || server2gateway.length ==0 || server2mac.length == 0 || server2catagory.length == 0) {
alert("please input all fields!")
return false;
}
if (!ValidateIPaddress(server1ip) || !ValidateIPaddress(server1mask) || !ValidateIPaddress(server1gateway) || !ValidateIPaddress(server2ip) || !ValidateIPaddress(server2mask) || !ValidateIPaddress(server2gateway)) {
alert("please make sure all ip related fields are valid!")
return false;
}
if (!ValidateMac(server1mac) || !ValidateMac(server2mac)) {
alert("please make sure all mac related fileds are valid")
return false;
}
if(! /^[0-9a-zA-Z]*$/.test(server1catagory) || ! /^[0-9a-zA-Z]*$/.test(server2catagory)){
alert("catagory field only include digits[0-9] and characters[a-zA-Z]!")
return false;
}
return true;
}
function ValidateIPaddress(ipaddress) {
if (/^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/.test(ipaddress)) {
return (true)
}
return (false)
}
function ValidateMac(mac) {
if (/^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$/.test(mac) ) {
return (true)
}
return (false)
}
</script>
</head>
<body>
$(modifyServerInfo)
</body>
</html>
EOF
else
cat << EOF
$(cat ${TOOL_BASE}/index.html)
EOF
fi
| true |
dab9fc76533caea030cbb3625d042de4d1c6bebe | Shell | gabijap/NLP-Enchanced-Search | /data/wikipedia/parallel_clean.sh | UTF-8 | 790 | 2.59375 | 3 | [] | no_license | #!/bin/bash
# Run data cleaning (remove out of vocabulary words and put one sentence per line in parallel on multiple CPU cores
mkdir -p ./cleaned
rm -f ./cleaned/*
cd ./grouped
parallel python ../clean_data.py ::: x??
cd ../cleaned
cat * > ../wikipedia_all_cleaned-voc400000-ln160M.txt
cd ..
# Create variations
cat wikipedia_all_cleaned-voc400000-ln160M.txt | head -149000000 > wikipedia_all_cleaned-voc400000-ln160M-train.txt
cat wikipedia_all_cleaned-voc400000-ln160M.txt | tail -11685048 > wikipedia_all_cleaned-voc400000-ln160M-test.txt
cat wikipedia_all_cleaned-voc400000-ln160M.txt | head -75000000 > wikipedia_all_cleaned-voc400000-ln160M-train-half.txt
cat wikipedia_all_cleaned-voc400000-ln160M.txt | head -37500000 > wikipedia_all_cleaned-voc400000-ln160M-train-quarter.txt
| true |
773359ee5b0ff573ea09bbf0fbb684e9410b7a6f | Shell | WilliamDrewAeroNomos/vulcan | /cluster/minikube/istio-minikube-config.sh | UTF-8 | 775 | 2.890625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Note: This script requires the installation of minikube, istioctl and kubectl
#
# Installation Instructions for:
#
# Minikube can be found at https://kubernetes.io/docs/tasks/tools/install-minikube/
#
# Istio and istioctl can be found at https://istio.io/latest/docs/setup/getting-started/#download
#
# Kubectl can be found at https://kubernetes.io/docs/tasks/tools/install-kubectl/
#
# If the installation process that you choose does not already do so, add the executables to your path.
#
minikube stop
minikube delete
echo "Starting minikube with 16MB and 4 CPUs with version 1.18.3..."
minikube start --memory 16384 --cpus=4 --kubernetes-version=v1.18.3 --disk-size=128GB
echo "Installing with demo profile..."
istioctl install --set profile=demo
| true |
69d034c7e982b323ead891099f99b4e35af048f9 | Shell | ShelbyTV/BigData | /scripts/cleanupOldFiles.sh | UTF-8 | 550 | 4.03125 | 4 | [] | no_license | #!/bin/bash
#
# This script cleans up old folders of exported files from the
# updateVideoGraph.sh script
# cleanup old export folders
date=${date-$(date +%Y-%m-%d)}
for dir in $(find . -maxdepth 1 -name '[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]' -type d)
do
echo "Checking if $dir is a folder that needs to be removed"
# only remove export files from earlier dates
if [ $(basename $dir) \< $date ]
then
echo "Removing old export folder $dir"
bash -c "set -x; rm -rf $dir"
else
echo "Nope, $dir isn't old enough yet"
fi
done | true |
63cb02e10f1eb910632e997c49bfb85c04415dca | Shell | 64studio/plegger | /plegger-recorder | UTF-8 | 909 | 3.359375 | 3 | [] | no_license | #!/usr/bin/env bash
# sox and libsox-fmt-mp3 are dependencies
# get the path to this script
SCRIPT_PATH=$(readlink -f "$0")
# set path for recording
REC_PATH="/home/pi/recordings"
# set the silence threshold as a percentage
THRESHOLD=1%
# set the duration of sound before recording starts
START_DELAY=0.001
# set the duration of silence before recording ends
END_DELAY=5.0
# set ALSA as the recording mode for sox
export AUDIODRIVER=alsa
# set a large buffer to avoid recording glitches
export SOX_OPTS="--buffer 128000"
# tell sox which ALSA device to use
export AUDIODEV=hw:0
echo "-------------------"
echo "Hello from Plegger Recorder!"
FILE=$REC_PATH/$(date +"%Y-%m-%d_%H-%M-%S")
INPUT_FILE=$FILE.mp3
echo "Recording $INPUT_FILE"
rec -r 44100 -C 192 -c 2 $INPUT_FILE silence 1 $START_DELAY $THRESHOLD 1 $END_DELAY $THRESHOLD || exit 1
echo "I quit!"
# restart the script
exec "$SCRIPT_PATH"
| true |
29ec3b8c772dc263f3033aa176687bfef231b470 | Shell | abelnation/statsy-docker | /bin/start | UTF-8 | 540 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | #! /bin/bash
E_ERROR=1
if [ -z "${IMAGE_NAME}" ]; then
echo "Image is not built. Run ./bin/build first"
exit $E_ERROR=1
fi
CONTAINER_NAME="${IMAGE_NAME}-container"
if [ $(docker ps -a | grep $CONTAINER_NAME | awk '{print $NF}' | wc -l) -gt 0 ];then
docker kill $CONTAINER_NAME
docker rm $CONTAINER_NAME
fi
if [ -d "./logs" ];then
rm -fr ./logs/*
fi
docker run -d \
-v $(pwd)/logs:/var/log/supervisor \
-p 80:80 \
-p 2003:2003 \
-p 2004:2004 \
-p 8125:8125 \
-p 8126:8126 \
-p 9001:9001 \
--name $CONTAINER_NAME \
statsy | true |
4e80c13f1d88ec7c519b174e3412475d007b1b37 | Shell | daspete/vagrant-lamp | /scripts/bootstrap.sh | UTF-8 | 2,431 | 3.140625 | 3 | [] | no_license | #!/usr/bin/env bash
VMNAME=$1
HOSTNAME=$2
DBNAME=$3
DBUSER=$4
DBPASSWD=$5
apt-get install -y software-properties-common
apt-get install -y python-software-properties
apt-get update
apt-get upgrade
apt-get install -y dkms curl build-essential netbase wget git
apt-get install -y virtualbox-guest-x11
echo "mysql-server mysql-server/root_password password $DBPASSWD" | debconf-set-selections
echo "mysql-server mysql-server/root_password_again password $DBPASSWD" | debconf-set-selections
apt-get -y install mysql-server
mysql -uroot -p$DBPASSWD -e "CREATE DATABASE $DBNAME"
mysql -uroot -p$DBPASSWD -e "grant all privileges on $DBNAME.* to '$DBUSER'@'localhost' identified by '$DBPASSWD'"
sed -i -e 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
mysql -uroot -p$DBPASSWD -e "grant all privileges on $DBNAME.* to '$DBUSER'@'%' identified by '$DBPASSWD'"
echo "phpmyadmin phpmyadmin/dbconfig-install boolean true" | debconf-set-selections
echo "phpmyadmin phpmyadmin/app-password-confirm password $DBPASSWD" | debconf-set-selections
echo "phpmyadmin phpmyadmin/mysql/admin-pass password $DBPASSWD" | debconf-set-selections
echo "phpmyadmin phpmyadmin/mysql/app-pass password $DBPASSWD" | debconf-set-selections
echo "phpmyadmin phpmyadmin/reconfigure-webserver multiselect none" | debconf-set-selections
apt-get install -y phpmyadmin
apt-get install -y php-gettext php5-mcrypt php5-curl
sed -i "s/User .*/User vagrant/" /etc/apache2/apache2.conf
sed -i "s/Group .*/Group vagrant/" /etc/apache2/apache2.conf
sed -i "s/AllowOverride None/AllowOverride All/g" /etc/apache2/apache2.conf
sed -i "s/error_reporting = .*/error_reporting = E_ALL/" /etc/php5/apache2/php.ini
sed -i "s/display_errors = .*/display_errors = On/" /etc/php5/apache2/php.ini
sed -i "s/max_upload_size = .*/max_upload_size = 512M/" /etc/php5/apache2/php.ini
if ! [ -L /var/www ]; then
rm -rf /var/www
ln -fs /vagrant/www /var/www
fi
cat > /etc/apache2/sites-available/$VMNAME.conf <<EOF
<VirtualHost *:80>
DocumentRoot /var/www/public
ErrorLog \${APACHE_LOG_DIR}/error.log
CustomLog \${APACHE_LOG_DIR}/access.log combined
ServerName $HOSTNAME
</VirtualHost>
EOF
a2ensite $VMNAME
a2enmod rewrite
a2enmod headers
service apache2 restart
su -c "/vagrant/scripts/postinstall.sh" -s /bin/bash vagrant
mv /home/vagrant/composer.phar /usr/bin/composer
ifconfig | true |
a155b2923ea1b4f11181bfe73914f95859671c15 | Shell | jewettaij/visfd_tutorials | /prokaryotes/Hylemonella_gracilis/STEP_4_segment_membranes.sh | UTF-8 | 21,527 | 3.546875 | 4 | [
"CC0-1.0"
] | permissive | #!/usr/bin/env bash
# WARNING: In these instructions, it is assumed that the voxel width
# is 19.6 Angstroms. If not, replace this number everywhere.
# ---- Goal: Segment the inner membrane of the cell and its contents ----
#
# Terminology: I use the phrase "3-D image" and "tomogram" interchangeably.
#
# When detecting the features we care about inside a cell, it will makes our
# life much easier if we can ignore the region outside the cell.
# So the next step is to figure out which voxels lie inside the cytoplasm,
# and which voxels lie outside.
#
# To do that, we will attempt to segment the inner membrane of a cell,
# generating a PLY file (containing surface polyhedral mesh geometry),
# on the boundary of the cytoplasm, as well as a new 3-D image file (REC file)
# indicating which voxels lie inside the cytoplasm.
# We will use that REC file as a mask in later calculations.
#
# PREREQUISITES
#
# 1) A 3-D image file (tomogram) containing the cell you want to segment.
# It is a -VERY- good idea to crop this tomogram beforehand so that it
# only includes the cell you want to segment. (You can do this using
# "3dmod", "trimvol", or the "crop_mrc" program distributed with visfd.)
# Otherwise the algorithm will be extremely slow, and your computer is also
# likely to run out of memory (freezing up and becomming unresponsive).
#
# The tomogram in this example is named "orig_crop.rec".
#
# 2) If the tomogram contains extremely dark objects, such as gold fiducial
# beads or ice contamination, then it is also a GOOD idea to remove this debris
# from the tomogram. To do that create a mask file (eg. "mask_blobs.rec").
# This file has brightness 0 in regions from the image containing these
# objects that we want to exclude from consideration.
#
# To do that, follow the instructions in the "STEP_1" file.
# That will create a 3-D image file named "fiducial_blobs.rec".
# The bright regions in that image contain any gold fiducial beads
# which might be present outside the cell. In the steps below, we
# a "mask" 3D image file whose voxel brightnesses are 1 everywhere we want to
# consder, and 0 everwhere we want to ignore (such as the gold beads).
# So, I invert the brightnesses of the voxels in the "fiducial_blobs.rec"
# file using this command:
filter_mrc -in fiducial_blobs.rec -out mask_membranes.rec -thresh2 1 0
# Later we will use this mask_membranes.rec file when detecting other objects.
# Suggestion: Use EMAN2
#
# You can use programs like "EMAN2" to generate an image that only shows
# the voxels that belong to the surface of the membrane.
# Any kind of preprocessing you can do the image to enhance the features
# you want to detect (such as membranes), will improve the detection sensitivity
# and make the following steps easier. In this tutorial, I did not use EMAN2.
# --------- Detect all membranes in the image (width ~= 80 Angstroms) --------
filter_mrc -in orig_crop.rec -w 19.6 \
-mask mask_membranes.rec \
-out orig_crop_mem80.rec \
-membrane minima 80.0 \
-tv 5 -tv-angle-exponent 4 -tv-best 0.1 \
-save-progress temporary_file
# Details: (Feel free to skip.)
#
# -The "-w 19.6" argument specifies the voxel width in Angstroms.
# -The "80.0" parameter indicates the approximate width parameter for
# membrane detection (in Angstroms). It should be approximately equal to the
# membrane width but it can be a little bit larger. Values of 60-85 Angstroms
# seem to work well.
# -The "-save-progress" argument enables us to save time. In the future, we
# skip the calculation we just ran by using the "-load-progress" argument.
# -We use "-membrane" with the "minima" argument because we are
# searching for a dark membrane-like surface on a brighter background.
# -The "-tv 5" and "-tv-angle-exponent" "-tv-best" parameters
# can usually be left alone. (They are used for tensor-voting, which is a
# popular strategy to improve signal-to-noise when detecting membranes.)
# The command above detects all of the membrane-like surfaces in the 3-D image.
# However these membranes are incomplete. They are often have large missing
# regions. For example, in electron tomography:
# Membrane surfaces in the XZ plane are often faint (compared to the YZ plane).
# Membrane surfaces in the XY plane are invisble due to the missing wedge
# artifact. These missing regions sometims break a smooth closed membrane
# into multiple disconnected fragments.
#
# The process of creating closed surfaces from one or more incomplete
# membrane fragments is called "surface reconstruction".
# We will use several tools to do that below.
# ----------------- segment the inner membrane ----------------------
#
# Now open the newly created tomogram "orig_crop_mem80.rec"
# with visualizer software (such as IMOD's "3dmod" program).
#
# OPTIONAL:
# If any of the membranes are faint or difficult to see, then try reducing
# the brightness of the brightest voxels. Unfortunately this is hard to do
# in IMOD/3dmod. So alternatively, you can create a new 3-D image file
# ("orig_crop_mem80_cl0.4.rec") whose voxel brightnesses have been clipped.
#
# filter_mrc -in orig_crop_mem80.rec \
# -w 19.6 \
# -mask mask_membranes.rec \
# -out orig_crop_mem80_cl0.4.rec \
# -cl -0.4 0.4
#
# Hopefully the membranes should be clearly visible in the new file.
# You can try experimenting with the clipping parameter ("0.4").
# (See the "filter_mrc" documentation concerning the "-cl" argument for details)
# If the new image is clear, then replace the old file with the new version:
#
# mv -f orig_crop_mem80_cl0.4.rec orig_crop_mem80.rec
#
# Membranes in EM tomograms are often faint or invisible in certain directions.
# So the membrane detector can usually only detect small disconnected fragments
# in the membrane surface. You can see these when you view "orig_crop_mem80.rec"
#
# In the next step, we will try to fuse these fragments into a closed surface.
# This process is mostly automatic. However it usually does require some
# manual intervention. If the membranes we care about appear to be broken into
# disconnected fragments (as you can see in this example when viewing the
# "orig_crop_mem80.rec" file we just created),
# ...then we must manually let our software know which of these small fragments
# belong to the larger closed membrane surface that we want to segment.
#
# So view the membrane 3-D image file we just created ("orig_crop_mem80.rec")
# using IMOD/3dmod. Then click on places along the membrane you want
# to segment, selecting least one point from each likely fragment.
# After each click, press the "F" key to print out the XYZ coordinates of
# that point to 3dmod's control window. Copy these XYZ coordinates
# (in units of voxels) to a text file ("links_membrane.txt"), and surround
# them with parenthesis. (See example below.)
#
# (You can create the "links_membrane.txt" file with a text editor. However,
# here I use the unix command "cat" together with "<<" and ">" redirection
# to copy the following lines of text into the "links_membrane.txt" file.
cat << EOF > links_membrane.txt
# inner membrane
# (X, Y, Z)
(315, 69, 68)
(164.5, 86, 68)
(133, 55, 68)
(54, 2, 68)
(212, 173, 68)
(245, 217, 68)
# (Note: Blank lines are used to separate different connected surfaces)
# outer membrane
# (X, Y, Z)
(313, 50, 68)
(168, 66, 68)
(154, 54, 68)
(30, 3, 68)
(213, 190, 68)
(221, 221, 68)
EOF
# Feel free to open up the "links_membrane.txt" file with a text editor to see
# what it looks like. (Lines beginning with "#" are comments and are ignored.)
# Now use the information in the "links_membrane.txt" file to help
# filter_mrc connect small membrane patches into a larger membrane.
filter_mrc -in orig_crop.rec \
-w 19.6 \
-load-progress temporary_file \
-out membrane_clusters.rec \
-connect 0.2 -connect-angle 15 \
-must-link links_membrane.txt \
-select-cluster 2 \
-normals-file membrane_inner_pointcloud.ply
# In this calculation, we asked "filter_mrc" to generate a PLY
# file containing the surface geometry of the second-largest surface
# in the image. Usually this will be the inner membrane.
# (The outer membrane is usually the largest surface in the tomogram.)
#
# All of these parameters make reasonably good defaults for membrane
# detection EXCEPT the "-connect" parameter ("0.2" in the example).
# It must be chosen carefully because it will vary from image to image.
# As of 2022-2-08, strategies for choosing this parameter are discussed here:
# https://github.com/jewettaij/visfd/blob/master/doc/doc_filter_mrc.md#determining-the--connect-threshold-parameter
#
# So repeat the step above with different parameters until the resulting
# "membranes_clusters.rec" shows that the various surface fragments
# that belong to the surface you care about have been merged and have
# the same ID-number.
# (Verify this by opening the file in 3dmod, clicking on different portions
# of the dark surface that you care about, pressing the "F" key each time,
# and checking if the resulting ID numbers match.)
# If they do, then it is time to view the PLY file we just created
# in 3-D to verify it has the shape we expect.
# You can use the "meshlab" program to do this:
#
# meshlab membrane_inner_pointcloud.ply
#
# Does it look correct?
#
# (Don't worry too much about the huge missing regions at the top
# and bottom of the cell. These are due to the missing wedge.
# However if there is something undesirable and unexpected attached
# the the membrane, or connecting two different membranes together,
# then we should probably eliminate this problematic part of the image
# and try again before proceeding. This is typically done using the
# "-mask", "mask-rect-sutract", or "mask-sphere-sutract" arguments.
# See "filter_mrc" documentation for details.)
#
# If everything is okay, then proceed with the next step (using "PoissonRecon").
# Now use "SSDRecon". This program will attempt to create a closed
# surface which passes through these points in a reasonable way.
# "SSDRecon" is distributed along with "PoissonRecon", which is available at:
#
# https://github.com/mkazhdan/PoissonRecon
#
# (I first tried using "PoissonRecon", but it failed to produce a single
# closed connected surface. The "SSDRecon" software performed better
# for this example, so I am using that instead. The two programs
# use nearly identical arguments.)
SSDRecon --in membrane_inner_pointcloud.ply \
--out membrane_inner_rough.ply --depth 12 --scale 2.0
# Now see if the reconstruction process worked.
meshlab membrane_inner_rough.ply
# If it doesn't look good, it could be for many reasons.
#
#
# Possible catastrophic failures:
#
#
# If the surface is not a closed surface, try running
# "SSDRecon" again with a larger "--scale" parameter.
# (If the surface volume is missing large regions,
# you might try using a smaller "--scale" parameter.)
#
# If the surface looks terrible (as if part of it is inside-out), then follow
# these instructions:
#
# https://github.com/jewettaij/visfd/blob/b9564ca3bb7b3d52ab2d38fbef15330012accdcd/doc/doc_filter_mrc.md#manual-specification-of-fragment-orientation
#
# If that fails, try using "PoissonRecon" instead of "SSDRecon".
# (Both programs accept the same arguments and are distributed together.
# Instructions for installing both programs should be in this directory.)
#
# The SSDRecon and PoissonRecon programs change all the time.
# If both "SSDRecon" and "PoinssonRecon" fail, then try downloading the
# version I used when preparing this tutorial. To do that, enter:
# git clone https://github.com/jewettaij/PoissonRecon ~/PoissonRecon_jewettaij
# and follow the same installation instructions you used earlier to compile
# that version of the code.
#
# Minor failures:
#
# If the surface becomes accidentally fused with something else
# (such as another membrane, or some other object), you can use one or more
# "-mask-sphere-subtract" and/or "-mask-rect-subtract" arguments
# to ignore the region or regions where these two objects touch each other.
# This will be demonstrated later.
#
# The surface will likely have some defects, typically located near
# the edge of the detectable portion of the original membrane surface
# where the signal is weak and noise dominates. Depending upon
# how serious those defects are, you may want to re-run the membrane
# connection step again (combining the "-connect" argument with the
# "-mask-sphere-subtract" and/or "-mask-rect-subtract" arguments
# to omit problematic regions of the 3-D image from consideration.
#
# These kinds of problems can almost always be resolved this way.
# (See the documentation for "filter_mrc" for details.)
#
# Fortunately, for this membrane, these steps weren't really necessary.
#
# Finally, the interpolation process often has difficulty filling large holes
# on the surface, such as the top and bottom holes created by the missing wedge.
# The surface may appear be bulged out in these regions. This is normal.
# This problem can be ameliorated by smoothing. (See the instructions
# below for smoothing the membrane using meshlab.)
# Now we are ready to apply a smoothing filter to the closed surface
# we created in the previous step "membrane_inner_rough.ply".
#
# 1) Open the "membrane_inner_rough.ply" file in meshlab, and select the
# "Filters"->"Smoothing, Fairing and Decomposition"->"HC Laplacian Smooth"
# menu option. Click the "Apply" button until the surface looks reasonable.
# (I ended up pressing the "Apply" button about 30 times for this example to
# eliminate the subtle bulge in the Z direction. Some tomograms may require
# more smoothing.)
#
# 2) Optional: Reduce the number of polygons in the surface. (Warning: This step
# sometimes causes tears in the surface which cause "voxelize_mesh.py" to fail.
# If "voxelize_mesh.py" crashes later on, then skip this step.) Select the
# "Filters"->"Remeshing, Simplification and Reconstruction"->
# ->"Simplification: Quadratic Edge Collapse Decimation" menu option.
# Reduce the number of polygons in the mesh down to about 30000 or less.
#
# 3) Then select the "File"->"Export Mesh" menu option and give the file a
# new name (eg "membrane_inner.ply"). (Optional: Uncheck the
# box named "binary encoding" box before clicking on the "Save" button.
# Not all mesh analysis software can read PLY files with a binary encoding.)
# Now find all of the voxels which lie inside the closed surface we
# just detected using the "voxelize_mesh.py" program.
# This program will create a new image file "membrane_inner.rec" whose voxels
# have brightness = 1 if they lie within the membrane, and 0 otherwise.
voxelize_mesh.py \
-w 19.6 \
-m membrane_inner.ply \
-i orig_crop.rec \
-o membrane_inner.rec
# --- WARNING: This program uses a lot of RAM and can crash your computer. ----
# The command below uses 2.9 GB of (free) RAM, and requires 2-3 minutes
# to complete. But this is a small tomogram (397x318x154).
# But the memory and time required scale proportionally to the tomogram's size.
# So a standard-size 1024x1024x500 size tomomogram would require 78 GB of RAM
# and would require at least an hour of time (using the "voxelize_mesh.py"
# script included with VISFD, assuming it was not cropped beforehand.)
# As of 2022-2, this exceeds the memory of most desktop computers.
# But servers often have 128 GB of ram or more. (If it helps, I included
# a slurm script in the directory named "slurm_scripts_for_voxelization"
# which can be used on a shared server that has SLURM installed.)
# (Hopefully voxelize_mesh.py will eventually be replaced by a more
# effiecient program.)
# -----------------------------------------------------------------------------
# ...The newly created file "membrane_inner.rec" is a 3-D image file whose
# voxels have brightness 1 if they are inside the closed membrane surface, and
# 0 outside. Later on, we will use this image to decide what voxels to consider
# when trying to segment ribosomes and other contents within the cytoplasm.
# Unfortunately, since the membrane itself is usually several voxels in
# thickness, some of those membrane voxels will be located within this volume,
# since it lies right on the boundary. The membrane is darker than its
# surroudings.
#
# We don't want the detection software to notice or get distracted
# by these little dark membrane fragments on the cytoplasm boundary
# when trying to detect objects that are supposed to be inside it.
# So I create a new 3-D image containing a slightly thinner, contracted
# version of the previous image we just created ("cytoplasm.rec").
# (For details, see the "filter_mrc" documentation for "-erode-gauss".)
# ---- Initial attempt (commenting out) ----
# filter_mrc -in membrane_inner.rec -w 19.6 \
# -out cytoplasm.rec \
# -erode-gauss 40 # Contract the interior region by a distance of 40 Angstroms
# # (This is approximately half the membrane thickness.)
# Later, I decided 40 wasn't enough, and raised it to 90 Angstroms
# ------------------------------------------
filter_mrc -in membrane_inner.rec -w 19.6 \
-out cytoplasm.rec \
-erode-gauss 90 # Contract the interior region by a distance of 90 Angstroms
# This might throw away a few voxels near the edge of the cytoplasm
# which don't contain any membrane, but that's okay. We just want to
# make sure the remaining voxels in the new image ("cytoplasm.rec")
# are entirely wthin the cytoplasmic volume of the cell.
# ----------------- segmenting the outer membrane ----------------------
#
# Now try to connect the outer membrane surface fragments together.
#
# We will use "-select-cluster 1" this time, because we want to segment
# the largest surface. (Previously we used "-select-cluster 2".)
filter_mrc -in orig_crop.rec \
-w 19.6 \
-load-progress temporary_file \
-out membrane_clusters.rec \
-connect 0.2 -connect-angle 15 \
-must-link links_membrane.txt \
-select-cluster 1 \
-normals-file membrane_outer_pointcloud.ply
# Open the "membrane_clusters.rec" using 3dmod to see if the membranes are
# still fused. If they are still fused, did you put the
# sphere(s) in the right place? The sphere you ignored should be visible in
# the "membrane_clusters.rec" file (as a dark region with brightness=0).
# Then proceed as we did before, using meshlab, SSDRecon, and voxelize_mesh.py
# to segment the outer membrane and create a "membrane_outer.rec file.
SSDRecon --in membrane_outer_pointcloud.ply \
--out membrane_outer_rough.ply --depth 12 --scale 2.0
# Now see if the reconstruction process worked.
meshlab membrane_outer_rough.ply
# If it worked, then, as we did earlier, smooth the mesh,
# and export a new mesh (named "membrane_outer.ply").
voxelize_mesh.py \
-w 19.6 \
-m membrane_outer.ply \
-i orig_crop.rec \
-o membrane_outer.rec
# ------ Maintianing a fixed distance between concentric membranes ------
#
# A new problem may arise:
#
# A large portion of the inner and outer membranes are not visible in the
# original tomogram. The "SSDRecon" program attempts to infer where
# they are. But it does this separately and indepenently for each surface.
# As a result, there is no way to guarantee that the inner membrane will
# lie within the outer membrane.
#
# You might want to make make sure that the two surfaces never get too
# close together. For example, suppose you want to make sure the that
# the inner membrane (represented by the "membrane_inner.rec" file) is
# separated from the outer membrane (in "membrane_outer.rec") by
# *at least* 220 Angstroms. To do that, use this procedure:
filter_mrc -in membrane_outer.rec \
-w 19.6 \
-out membrane_outer_erode220.rec \
-erode-gauss 220
combine_mrc membrane_inner.rec "*" membrane_outer_erode220.rec \
membrane_inner_opt.rec
# The new file ("membrane_inner_opt.rec") is an alternate ("optimized"?)
# version of the original "membrane_inner.rec" file which has been
# contracted in places where after SSDRecon and smoothing,
# the (invisible portion of the) inner membrane lies closer
# than 220 Angstroms to the outer membrane.
#
# (Note: The distance between the inner and outer membranes in
# gram negative bacteria is usually a little bit larger than this.
# You can play with this number.)
# Optional: The "cytoplasm.rec" file we created earlier depends on
# the "membrane_inner.rec" file. Since we updated that file, we can
# create a new "cytoplasm.rec" file, this time using the using
# the updated "membrane_inner_opt.rec" file.
filter_mrc -in membrane_inner_opt.rec -w 19.6 \
-out cytoplasm.rec \
-erode-gauss 90 # Contract the interior region by a distance of 90 Angstroms
# If you want to segment the periplasm (the space between the inner and outer
# membranes, you can use the "combine_mrc" program to subtract the volume
# of the inner membrane from the volume of the outer membrane. You can do this
# by multiplying the brightness of the two files this way:
combine_mrc membrane_inner_opt.rec "*" membrane_outer.rec,1,0 periplasm.rec
# Finally, after you are done detecting membranes, it is a good idea to delete
# all of the temporary files that we created earlier (which are quite large).
rm -f temporary_file*
| true |
97b358f85c4fdf3759cab6b403d9bf722a40533b | Shell | thorsten-klein/CommonAPI-generation | /E01HelloWorld/clean.sh | UTF-8 | 514 | 2.8125 | 3 | [] | no_license | #!/bin/bash
if [ $? -ne 0 ]; then /bin/bash -c "$0"; exit; fi
echo "removing folder ./build ..."
rm -rf ./build
echo "removing folder ./src-gen ..."
rm -rf ./src-gen
echo "removing CodeBench output folder(s) ..."
#rm -rf ./Client_Debug/
#rm -rf ./Client_Release/
#rm -rf ./Server_Debug/
#rm -rf ./Server_Release/
#rm -rf ./VM_Linux_Client_Debug/
#rm -rf ./VM_Linux_Client_Release/
#rm -rf ./VM_Linux_Server_Debug/
#rm -rf ./VM_Linux_Server_Release/
read -n1 -r -p "-------------------------------------" key
| true |
a508329073af100156b85bfe5c7f2e47345719d6 | Shell | andycmaj/AspNetCore.ApplicationBlocks | /build.sh | UTF-8 | 357 | 3.703125 | 4 | [] | no_license | #!/usr/bin/env bash
# Define default arguments.
SCRIPT="build.cake"
CAKE_ARGUMENTS=()
# Parse arguments.
for i in "$@"; do
case $1 in
-s|--script) SCRIPT="$2"; shift ;;
--) shift; CAKE_ARGUMENTS+=("$@"); break ;;
*) CAKE_ARGUMENTS+=("$1") ;;
esac
shift
done
# Start Cake
exec dotnet cake $SCRIPT "${CAKE_ARGUMENTS[@]}" | true |
ff2816ec232fd71c44f5516bca0f17d30228e288 | Shell | gffhcks/dotfiles | /.bash/PS1.sh | UTF-8 | 3,583 | 3.40625 | 3 | [] | no_license | #!/bin/bash
# PS1
# Custom command prompt
# Git branch in command prompt (__git_ps1)
[[ -s "$HOME/.bash/git-prompt.sh" ]] && source ~/.bash/git-prompt.sh
function __define_prompt()
{
# local __resultvar=$1
# Master BASH replacement list
local BELL="\a" # ASCII Bell
local HRDATE="\d" # Human-readable date (Mon Sep 06)
local ESCAPE="\e" # ASCII Escape (\033)
local SHORTHOST="\h" # First part of hostname ("mybox")
local LONGHOST="\H" # Full hostname ("mybox.mydomain.com")
local SUSPENDED="\j" # Number of suspended processes
local TERMNAME="\l" # Teminal identifier
local NEWLINE="\n" # ASCII Newline
local CR="\r" # ASCII carriage return
local SHELL="\s" # Shell name
local TIME24="\t" # Time in 24-hour format (23:01:01)
local TIME12="\T" # Time in 12-hour format (11:01:01)
local TIMEAM="\@" # Time in 12-hour format with AM/PM
local USER="\u" # Logged in user
local VERSION="\v" # Bash version (2.04)
local PATCH="\V" # Bash version plus patchlevel
local DIR="\w" # Current working dir
local BASEDIR="\W" # Basename of current working dir
local HISTORY="\!" # History buffer position
local NUMBER="\#" # Command number
local ROOT="\$" # Root indicator. "#" if root, "$" if non-root
local BRANCH="\$(__git_ps1)" # Current git branch, empty if not in a git repo
local BACKSLASH="\\" # Put a backslash
local WRAPESCAPE="\[" # word-wrap escape for non-printing chars
local WRAPESCEND="\]" # end word-wrap escape for non-printing chars
# To put an arbitrary ASCII char, replace xxx with ascii number:
# "\xxx"
# Color formatting:
#
# Always begin with WRAPESCAPE \[
# Then ASCII escape and openbracket: \e[
# Then foreground/background/bold color options, separated by semicolon.
# 0 = reset all
# 1 = bold
# 3x = FOREGROUND color (see list below)
# 4x = BACKGROUND color (see list below)
# Then end with an "m" and a WRAPESCEND.
# 0 colorcode at the end will clear all formatting.
#
# Colors:
# 0=black
# 1=red
# 2=green
# 3=yellow
# 4=blue
# 5=magenta
# 6=cyan
# 7=white
local N="\[\e[0m\]" # clear all formatting
local S="\[\e[1m\]" # strong (bold)
local R="\[\e[31m\]" # red
local G="\[\e[32m\]" # green
local B="\[\e[34m\]" # blue
local W="\[\e[37m\]" # white
local C="\[\e[36m\]" # cyan
local M="\[\e[35m\]" # magenta
local Y="\[\e[33m\]" # yellow
local K="\[\e[30m\]" # black
local BR="\[\e[41m\]" # background red
local BG="\[\e[42m\]" # background green
local BB="\[\e[44m\]" # background blue
local BW="\[\e[47m\]" # background white
local BC="\[\e[46m\]" # background cyan
local BM="\[\e[45m\]" # background magenta
local BY="\[\e[43m\]" # background yellow
local BK="\[\e[40m\]" # background black
# Titlebar:
# WRAPESCAPE and \e]2;
# Then text you want
# Then \a and WRAPESCEND
local TITLETEXT="$USER@$LONGHOST: $DIR"
local TITLEBAR="\[\e]2;$TITLETEXT\a\]"
# Put it all together!
local PS1="$TITLEBAR$C$HISTORY$N $TIME24 $DIR$Y$BRANCH$N$ROOT "
# if [[ "$__resultvar" ]]; then
# eval $__resultvar="'$PS1'"
# else
echo "$PS1"
# fi
}
PS1=$(__define_prompt)
| true |
91745dadda340150c91d57d309bfd412305190d3 | Shell | fabiolac19/munin-iops-emc-y-hp | /munin-iops-hp.sh | UTF-8 | 10,510 | 3.828125 | 4 | [] | no_license | #!/bin/bash
#Inicializo script bash.
# Modo de uso:
# ./munin-iops-hp.sh path_to_directory output
# 1º parte:
# Incluir el directorio donde se encuentran las definiciones de las maquinas virtuales.
#Verifica que el usuario haya ingresado correctamente los parametros de entrada.
#Asigna a variable salida el primer parametro introducido que posee el nombre de archivo de salida. En caso de no existir se crea.
#Si el usuario no ingreso correctamente el modo de uso se notifica error por pantalla.
if [ $1 ]; then
salida=$1
else
echo "******************************************************************************"
echo "[ERROR] Ingrese correctamente el nombre del directorio o del archivo de salida"
echo "******************************************************************************"
exit 1
fi
#Crea un directorio temporal directory.
directory=$(mktemp -d)
#Ingresa al mismo
cd $directory
#Realiza copia del repositorio que posee las definiciones de maquinas virtuales, utilizando comando check-out de subversion.
svn co http://noc-svn.psi.unc.edu.ar/servicios/xen
rm -rf $directory/xen/suspendidos $directory/xen/.svn $directory/xen/old $directory/xen/error $directory/xen/templates
#Crea archivos de texto a utilizar.
> result.txt
> vm.txt
> escrituras.txt
> lecturas.txt
> total_escrituras.txt
> total_lecturas.txt
> total.txt
#Para cada archivo del directorio directory lee linea por linea y hace un echo de cada linea en un archivo vm.txt
for file in $directory/xen/* ; do
while read -r line; do
echo "$line" >> vm.txt
done < $file
#Realiza busqueda de las lineas de vm.txt que contengan dev o dev/mapper/rogelio. Y de estas, que no contengan xen|description|device....
#Las lineas obtenidas las guarda en el archivo parser.txt
# egrep "dev|mapper" vm.txt | egrep -v "rogelio|device|args|description|file|\(name" > parser.txt
egrep "dev |dev/mapper/rogelio" vm.txt | egrep -v "xen|description|\(name|device" > parser.txt
#Cuenta las lineas obtenidad en el archivo parser.txt con el comando cat
i=$(cat parser.txt | wc -l)
#Lee linea por linea con el comando sed el archivo parser.txt. Define variables c y d para lectura de una linea y linea siguiente.
#Las dos lineas conscecutivas se guardan en lineaN0 y lineaN1
for (( c=1; c<=i; c++ ))
do
lineaN0=$(sed -n -e ${c}p parser.txt)
(( d=c+1 ))
lineaN1=$(sed -n -e ${d}p parser.txt)
# echo "Linea 0: "$lineaN0
# echo "Linea 1: "$lineaN1
#Filtramos solo lineas conscecutivas que posean informacion del disco y nombre de maquinas virtuales
#De esta forma verificamos que no sean dos lineas iguales. Por ejemplo dos lineas seguidas con informacion de discos diferentes.
#Realizamos la comparacion cortando los primeros 5 caracteres y comparandolos. Tambien tenemos en cuenta si la linea siguiente es nula.
if [[ "$(echo $lineaN0 | cut -c1-5)" = "$(echo $lineaN1 | cut -c1-5)" || -z "$lineaN1" ]]; then
echo "Se repiten o lineaN1 es NULL" > /dev/null
#Si las lineas son distintas se procede a obtener los parametros solicitados. Para ello se emplea el comando sed de la siguiente manera:
#borra -(dev - : s/(dev //g
#borra parentisis : s/(//g ;s/)//g
#borra -\:disk - : s/\:disk //g;
#borra uname phy :/dev/mapper : s/uname//g;s/phy\:\/dev\/mapper\///g
#borra todo lo anterior al caracter / : s/.*\///g
#Realiza un echo con nombre del archivo linea, disco y nombre de maquina virtual al archivo result.txt
else
echo $file $lineaN0 $lineaN1 | sed 's/(dev //g;s/(//g;s/)//g;s/\:disk //g;s/uname//g;s/phy\:\/dev\/mapper\///g;s/.*\///g' >> result.txt
#Salta linea, ya que ya tomo las dos lineas solicitadas
(( c=c+1 ))
fi
done
> vm.txt
done
# 2º parte
# Definir el formato de los distintos tipos de graficos
#Lee linea por linea el archivo result.txt.
while read -r line; do
#Corta la linea por campos.
#Asigna primer campo con el nombre de la vm a la variable vm, el segundo campo se asigna a la variable disk, y el tercer campo que contiene el nombre del disco de la vm a la variable res
vm=$(echo $line | cut -f1 -d' ')
disk=$(echo $line | cut -f2 -d' ')
res=$(echo $line | cut -f3 -d' ')
#Busca en el archivo de configuracion munin la ruta de ubicacion de la vm.
#Toma una linea y corta todo lo anterior al caracter [ y elimina el caracter ]
pline=$(echo $(egrep "$vm\.|$vm\]" /etc/munin/munin.conf | head -n1 | sed 's/^.*\[//g;s/\]//g'))
#Verifica que pline no sea nulo
if [ -z $pline ]; then
echo $pline > /dev/null
else
#Conforma los archivos con lecturas, escrituras, total, de forma que munin los entienda para graficar.
#Escrituras
# echo "****************************************************"
echo "$res=$pline:diskstats_iops.$disk.wrio" >> escrituras.txt
#Lecturas
# echo "****************************************************"
echo "$res=$pline:diskstats_iops.$disk.rdio" >> lecturas.txt
#Total Escrituras
# echo "****************************************************"
echo "$pline:diskstats_iops.$disk.wrio" >> total_escrituras.txt
#Total Lecturas
# echo "****************************************************"
echo "$pline:diskstats_iops.$disk.rdio" >> total_lecturas.txt
#Total
# echo "****************************************************"
echo "$pline:diskstats_iops.$disk.wrio" >> total.txt
echo "$pline:diskstats_iops.$disk.rdio" >> total.txt
fi
done < result.txt
# 3º parte
# Completar la estructura del archivo emc
#Borra contenido previo de la variable salida
> $salida
#Inicializa archivo salida con configuraciones de etiqueta y demas, propias de Munin
echo "[UNC;PSI;NOC;Infraestructura;Storage;HP]" >> $salida
echo " update no" >> $salida
echo " diskstats_iops.update no" >> $salida
echo " diskstats_iops.graph_title IOPS Lecturas" >> $salida
echo " diskstats_iops.graph_category IOPS" >> $salida
echo " diskstats_iops.graph_args --base 1000" >> $salida
echo " diskstats_iops.graph_vlabel IOs/sec" >> $salida
echo " diskstats_iops.graph_order \\" >> $salida
#Lee linea por linea y va escribiendo en salida el archivo lecturas.txt
while read -r line; do
echo " $line \\" >> $salida
done < lecturas.txt
# Quitar el caracter "\" de la ultima linea
line=$(tail -n1 $salida | sed 's/\\//g')
sed '$ d' $salida > temp.txt
cat temp.txt > $salida
echo "$line" >> $salida
echo " " >> $salida
echo " diskstats_iops_1.update no" >> $salida
echo " diskstats_iops_1.graph_title IOPS Escrituras" >> $salida
echo " diskstats_iops_1.graph_category IOPS" >> $salida
echo " diskstats_iops_1.graph_args --base 1000" >> $salida
echo " diskstats_iops_1.graph_vlabel IOs/sec" >> $salida
echo " diskstats_iops_1.graph_order \\" >> $salida
#Lee linea por linea y va escribiendo en salida el archivo escrituras.txt
while read -r line; do
echo " $line \\" >> $salida
done < escrituras.txt
line=$(tail -n1 $salida | sed 's/\\//g')
sed '$ d' $salida > temp.txt
cat temp.txt > $salida
echo "$line" >> $salida
echo " " >> $salida
echo " diskstats_iops_2.update no" >> $salida
echo " diskstats_iops_2.graph_title IOPS Lecturas Total" >> $salida
echo " diskstats_iops_2.graph_category IOPS" >> $salida
echo " diskstats_iops_2.graph_args --base 1000" >> $salida
echo " diskstats_iops_2.graph_vlabel IOs/sec" >> $salida
echo " diskstats_iops_2.total_iops_1.label IOPS Lecturas Total" >> $salida
echo " diskstats_iops_2.total_iops_1.sum \\" >> $salida
#Lee linea por linea y va escribiendo en salida el archivo total_lecturas.txt
while read -r line; do
echo " $line \\" >> $salida
done < total_lecturas.txt
line=$(tail -n1 $salida | sed 's/\\//g')
sed '$ d' $salida > temp.txt
cat temp.txt > $salida
echo "$line" >> $salida
echo " " >> $salida
echo " diskstats_iops_3.update no" >> $salida
echo " diskstats_iops_3.graph_title IOPS Escrituras Total" >> $salida
echo " diskstats_iops_3.graph_category IOPS" >> $salida
echo " diskstats_iops_3.graph_args --base 1000" >> $salida
echo " diskstats_iops_3.graph_vlabel IOs/sec" >> $salida
echo " diskstats_iops_3.total_iops_2.label IOPS Escrituras Total" >> $salida
echo " diskstats_iops_3.total_iops_2.sum \\" >> $salida
#Lee linea por linea y va escribiendo en salida el archivo total_escrituras.txt
while read -r line; do
echo " $line \\" >> $salida
done < total_escrituras.txt
line=$(tail -n1 $salida | sed 's/\\//g')
sed '$ d' $salida > temp.txt
cat temp.txt > $salida
echo "$line" >> $salida
echo " " >> $salida
echo " diskstats_iops_4.update no" >> $salida
echo " diskstats_iops_4.graph_title IOPS Total" >> $salida
echo " diskstats_iops_4.graph_category IOPS" >> $salida
echo " diskstats_iops_4.graph_args --base 1000" >> $salida
echo " diskstats_iops_4.graph_vlabel IOs/sec" >> $salida
echo " diskstats_iops_4.total_iops_3.label IOPS Total" >> $salida
echo " diskstats_iops_4.total_iops_3.sum \\" >> $salida
#Lee linea por linea y va escribiendo en salida el archivo total.txt
while read -r line; do
echo " $line \\" >> $salida
done < total.txt
line=$(tail -n1 $salida | sed 's/\\//g')
sed '$ d' $salida > temp.txt
cat temp.txt > $salida
echo "$line" >> $salida
echo " " >> $salida
# Cargamos el munin-conf.d con salida
mv $salida /etc/munin/munin-conf.d/$salida
# Borramos archivos temporales
rm -rf $directory
| true |
47c6fb5d87f204ff92e422e01b1be7718d46209c | Shell | siemens/fluffi | /core/dependencies/libuuid/make_dep.sh | UTF-8 | 4,073 | 2.515625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Copyright 2017-2020 Siemens AG
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author(s): Thomas Riedmaier, Pascal Eckmann, Roman Bendt
THREADS=$(nproc)
ARCH=$(file /bin/bash | awk -F',' '{print $2}' | tr -d ' ')
rm -rf lib/$ARCH
rm -rf include
mkdir -p lib/$ARCH
mkdir -p include
# Getting util-linux (contains libuuid)
rm -rf util-linux
git clone https://github.com/karelzak/util-linux.git
cd util-linux
git checkout e3bb9bfb76c17b1d05814436ced62c05c4011f48
# Compiling libuuid manualy (don't want to get autotools to run in our build environment)
mkdir -p build$ARCH
cd build$ARCH
DEFINES="-DHAVE_ERR -DHAVE_ERRX -DHAVE_ERR_H -DHAVE_GETRANDOM -DHAVE_JRAND48 -DHAVE_LANGINFO_ALTMON -DHAVE_LANGINFO_H -DHAVE_LANGINFO_NL_ABALTMON -DHAVE_LOCALE_H -DHAVE_LOFF_T -DHAVE_MEMPCPY -DHAVE_NANOSLEEP -DHAVE_NETINET_IN_H -DHAVE_NET_IF_H -DHAVE_PROGRAM_INVOCATION_SHORT_NAME -DHAVE_SRANDOM -DHAVE_STDLIB_H -DHAVE_STRNDUP -DHAVE_STRNLEN -DHAVE_SYS_FILE_H -DHAVE_SYS_IOCTL_H -DHAVE_SYS_SOCKET_H -DHAVE_SYS_SYSCALL_H -DHAVE_SYS_SYSMACROS_H -DHAVE_SYS_TIME_H -DHAVE_SYS_UN_H -DHAVE_TLS -DHAVE_UNISTD_H -DHAVE_USLEEP -DHAVE_UUIDD -DHAVE_WARN -DHAVE_WARNX -DHAVE___PROGNAME -DSTDC_HEADERS -D_GNU_SOURCE -D_PATH_RUNSTATEDIR=\"/usr/var/run\" "
(gcc -std=c99 $DEFINES -c -fPIC -I ../include ../libuuid/src/clear.c -o clear.o)&
(gcc -std=c99 $DEFINES -c -fPIC -I ../include ../libuuid/src/compare.c -o compare.o)&
(gcc -std=c99 $DEFINES -c -fPIC -I ../include ../libuuid/src/copy.c -o copy.o)&
(gcc -std=c99 $DEFINES -c -fPIC -I ../include ../libuuid/src/gen_uuid.c -o gen_uuid.o)&
(gcc -std=c99 $DEFINES -c -fPIC -I ../include ../libuuid/src/isnull.c -o isnull.o)&
(gcc -std=c99 $DEFINES -c -fPIC -I ../include ../libuuid/src/pack.c -o pack.o)&
(gcc -std=c99 $DEFINES -c -fPIC -I ../include ../libuuid/src/parse.c -o parse.o)&
(gcc -std=c99 $DEFINES -c -fPIC -I ../include ../libuuid/src/predefined.c -o predefined.o)&
(gcc -std=c99 $DEFINES -c -fPIC -I ../include ../libuuid/src/unpack.c -o unpack.o)&
(gcc -std=c99 $DEFINES -c -fPIC -I ../include ../libuuid/src/unparse.c -o unparse.o)&
(gcc -std=c99 $DEFINES -c -fPIC -I ../include ../libuuid/src/uuid_time.c -o uuid_time.o)&
(gcc -std=c99 $DEFINES -c -fPIC -I ../include ../lib/randutils.c -o randutils.o)&
(gcc -std=c99 $DEFINES -c -fPIC -I ../include ../lib/md5.c -o md5.o)&
(gcc -std=c99 $DEFINES -c -fPIC -I ../include ../lib/sha1.c -o sha1.o)&
wait
ar rcs libuuid.a compare.o copy.o gen_uuid.o isnull.o pack.o parse.o predefined.o unpack.o unparse.o uuid_time.o randutils.o md5.o sha1.o
cd ../..
cp util-linux/build${ARCH}/libuuid.a lib/${ARCH}/libuuid.a
cp util-linux/libuuid/src/uuid.h include/uuid.h
rm -rf util-linux
| true |
15735505f2711d7ea29cb17adb41a793ebcb97e1 | Shell | jinyun1tang/netcdf-c | /nc_test/test_byterange.sh | UTF-8 | 4,231 | 2.828125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
. ../test_common.sh
set -e
# Note: thredds-test is currently down and the URLs need to be replaced
# Test Urls
if test "x$FEATURE_THREDDSTEST" = x1 ; then
URL3="https://thredds-test.unidata.ucar.edu/thredds/fileServer/pointData/cf_dsg/example/point.nc#mode=bytes"
URL4b="https://thredds-test.unidata.ucar.edu/thredds/fileServer/irma/metar/files/METAR_20170910_0000.nc#bytes"
fi
if test "x$FEATURE_S3TESTS" != xno ; then
URL4a="https://s3.us-east-1.amazonaws.com/noaa-goes16/ABI-L1b-RadC/2017/059/03/OR_ABI-L1b-RadC-M3C13_G16_s20170590337505_e20170590340289_c20170590340316.nc#mode=bytes"
URL4c="s3://noaa-goes16/ABI-L1b-RadC/2017/059/03/OR_ABI-L1b-RadC-M3C13_G16_s20170590337505_e20170590340289_c20170590340316.nc#mode=bytes"
# Test alternate URL with no specified region
URL4e="http://noaa-goes16.s3.amazonaws.com/ABI-L1b-RadF/2022/001/18/OR_ABI-L1b-RadF-M6C01_G16_s20220011800205_e20220011809513_c20220011809562.nc#mode=bytes,s3"
fi
if test "x$FEATURE_S3TESTS" = xyes ; then
# Requires auth
URL3b="s3://unidata-zarr-test-data/byterangefiles/upload3.nc#bytes"
# Requires auth
URL4d="s3://unidata-zarr-test-data/byterangefiles/upload4.nc#bytes&aws.profile=unidata"
fi
URL4f="https://crudata.uea.ac.uk/cru/data/temperature/HadCRUT.4.6.0.0.median.nc#mode=bytes"
if test "x$FEATURE_S3TESTS" = xyes ; then
# Do not use unless we know it has some permanence (note the segment 'testing' in the URL);
URL4x="https://s3.us-west-2.amazonaws.com/coawst-public/testing/HadCRUT.4.6.0.0.median.nc#mode=bytes,&aws.profile=none"
fi
echo ""
testsetup() {
U=$1
# Create and upload test files
rm -f upload4.nc upload3.nc
${execdir}/../nczarr_test/s3util -u ${U} -k /byterangefiles clear
${NCGEN} -lb -3 ${srcdir}/nc_enddef.cdl
mv nc_enddef.nc upload3.nc
${execdir}/../nczarr_test/s3util -u ${U} -k /byterangefiles/upload3.nc -f upload3.nc upload
if test "x$FEATURE_HDF5" = xyes ; then
${NCGEN} -lb -4 ${srcdir}/nc_enddef.cdl
mv nc_enddef.nc upload4.nc
${execdir}/../nczarr_test/s3util -u ${U} -k /byterangefiles/upload4.nc -f upload4.nc upload
fi
rm -f tst_http_nc3.cdl tst_http_nc4?.cdl
}
testcleanup() {
U=$1
rm -f upload4.nc upload3.nc
${execdir}/../nczarr_test/s3util -u ${U} -k /byterangefiles clear
}
testbytes() {
TAG="$1"
EXPECTED="$2"
U="$3"
K=`${NCDUMP} -k "$U" | tr -d '\r\n'`
if test "x$K" != "x$EXPECTED" ; then
echo "test_http: -k flag mismatch: expected=$EXPECTED have=$K"
exit 1
fi
rm -f tst_http_$TAG.cdl
# Now test the reading of at least the metadata
${NCDUMP} -h "$U" >tst_http_$TAG.cdl
# compare
diff -wb tst_http_$TAG.cdl ${srcdir}/ref_tst_http_$TAG.cdl
}
tests3auth() {
TAG="$1"
EXPECTED="$2"
U="$3"
K=`${NCDUMP} -k "$U" | tr -d '\r\n'`
if test "x$K" != "x$EXPECTED" ; then
echo "test_http: -k flag mismatch: expected=$EXPECTED have=$K"
exit 1
fi
rm -f tmp_${TAG}.cdl
# Now test the reading of at least the metadata
${NCDUMP} -n nc_enddef "$U" >tmp_${TAG}.cdl
# compare
diff -wb tmp_$TAG.cdl ${srcdir}/nc_enddef.cdl
}
if test "x$FEATURE_S3TESTS" = xyes ; then
testsetup https://s3.us-east-1.amazonaws.com/unidata-zarr-test-data
fi
echo "*** Testing reading NetCDF-3 file with http"
if test "x$FEATURE_THREDDSTEST" = x1 ; then
echo "***Test remote classic file"
testbytes nc3 classic "$URL3"
fi
if test "x$FEATURE_HDF5" = xyes ; then
echo "***Test remote netcdf-4 files: non-s3"
if test "x$FEATURE_THREDDSTEST" = x1 ; then
testbytes nc4b netCDF-4 "$URL4b"
fi
testbytes nc4f netCDF-4 "$URL4f"
fi
if test "x$URL3B" != x ; then
echo "***Test remote netcdf-3 file: s3 auth"
tests3auth nc3b classic "$URL3b"
fi
if test "x$URL4a" != x ; then
echo "***Test remote netdf-4 file: s3"
testbytes nc4a netCDF-4 "$URL4a"
fi
if test "x$URL4c" != x ; then
echo "***Test remote netcdf-4 file: s3"
testbytes nc4c netCDF-4 "$URL4c"
fi
if test "x$URL4d" != x ; then
echo "***Test remote netcdf-4 file: s3 auth"
tests3auth nc4d netCDF-4 "$URL4d"
fi
if test "x$URL4e" != x ; then
echo "***Test remote netcdf-4 file: s3 noauth"
testbytes nc4e netCDF-4 "$URL4e"
fi
# Cleanup
if test "x$FEATURE_S3TESTS" = xyes ; then
testcleanup https://s3.us-east-1.amazonaws.com/unidata-zarr-test-data
fi
exit
| true |
5c4493ff0f2dbfa31c2ab3e5a3ec52d9af2ee783 | Shell | Smile0007-Sec/bucines | /Bucin.sh | UTF-8 | 6,072 | 2.515625 | 3 | [] | no_license | #Coded by Smile0007
#Jangan di recode bangsat!!
#Mau recode izin ke nomor 083138046547
green="\033[32;1m"
yellow="\033[33;1m"
indigo="\033[34;1m"
red="\033[35;1m"
purple="\033[37;1m"
cyan="\033[36;1m"
white="\033[39;1m"
clear
echo $yellow"==================================================================="
toilet -f future Tools Bucin | lolcat
figlet -f future Kumpulan kata kata | lolcat
echo $aqua"*" $yellow"Author : Smile0007"
echo $red"*" $yellow"Team : Indonesia Security Sistem"
echo $indigo"*" $yellow"Contact : 083138046547"
echo $pink"=+#=+#=+#=+#================================================+#=+#=+#"
echo
echo $cyan "1. Bucin by. Alpinskay "
echo $cyan "2. Bucin by. Shakira Azhar"
echo $green "3. Bucin by. TUAN NIGHTMARE"
echo $green "4. Bucin by. Dipa"
echo $indigo "5. Bucin by. Mr.Selow"
echo $indigi " 6. Bucin by. LCI-WTT-K15UD"
echo $red "7. Bucin by. Ngarhack"
echo $red "8. Bucin by. Zeus"
echo $yellow "9. Bucin by. TRH"
echo $yellow "10. Bucin by. Mr.Quiin Ucup"
echo
echo $indigo"╭─Pilih Mau Bucin Siapa?? "
read -p "╰─•> " pil
if [ $pil = '1' ]
then
echo
echo
clear
echo
toilet -f future Bucin by Alpinskay | lolcat
echo
echo
echo
sleep 0.1
echo $green"jujur, berat rasanya menahan kerinduan ini.."
sleep 2
echo $red"sungguh menjengkelkan, tapi aku berusaha bersabar :) "
sleep 2
echo $green"menahan rinduku padamu"
sleep 2
echo $red"karena hanya sabar dan.."
sleep 2
echo $green"mendoakan yang terbaik.."
sleep 2
echo $red"untuk kamu disana"
sleep 2
echo $green"jika kamu rindu, sabar ya.."
sleep 2
echo $red"AKU DISINI JUGA MERINDUKANMU... :')"
sleep 3
clear
echo
figlet -f future Bucin by Smile0007 | lolcat
echo
echo
echo
sleep 0.1
echo $red"Baper?"
echo
echo
sleep 2
echo $cyan"Jadi orang kok baperan banget??"
sleep 2
echo $red"ya iyalah.. namanya juga manusia!"
sleep 2
echo $cyan"punya otak, punya hati."
sleep 1
echo $red"beda sama lu yg ngomongnya suka ceplas-ceplos!"
sleep 2
echo $cyan"hati mati, otak gak berfungsi"
sleep 2
echo $red"haha.."
sleep 2
echo $cyan"ya.. lain kali bisalah.."
sleep 2
echo $red"ngomong. tapi mikir.. :)"
sleep 3
fi
if [ $pil = '2' ]
then
echo
echo
clear
echo
toilet -f future Bucin by Shakira Azhar | lolcat
echo
echo
sleep 0.1
echo $green"Jika wanita baik, hanya untuk pria baik.."
sleep 2
echo $red"maka bolehkah aku meminta dirimu.."
sleep 2
echo $green"untuk merubahku menjadi lebih baik?"
sleep 3
fi
if [ $pil = '3' ]
then
clear
echo
toilet -f future Bucin By Smile0007 | lolcat
echo
echo
sleep 0.1
echo $purple"Biarpun kamu disana bersamanya.."
sleep 2
echo $green"aku tetap menunggumu sampai.."
sleep 2
echo $purple"kau bisa memahami perasaanku.. :')"
sleep 2
echo $cyan"yang hampa ini kepadamu.."
sleep 2
echo $purple"biarpun sayang tetapi kita...."
echo
echo
sleep 3
figlet -f future Cuma Temenan | lolcat
sleep 3
fi
if [ $pil = '4' ]
then
clear
echo
toilet -f future Bucin by Smile0007 | lolcat
echo
echo
sleep 0.1
echo $indigo"Bukan kamu yg terlalu sibuk cuma aku aja yg berharap lebih ke kamu"
echo
echo
sleep 10
echo $red"ku tidak pingin kamu pergi meninggalkan ku sendiri berbarengan bayangmu memanglah kita tlah jauh rasanya memanglah kita telah tidak bersama bila memanglah kita ditakdirkan tuk berbarengan selamanya cinta takkan kemana.."
echo
echo
sleep 10
echo $indigo"Aku mungkin sering membuatmu kesal, tapi percayalah aku tak akan membuatmu menyesal telah bersama ku , Good Morning"
echo
echo
sleep 10
echo $red"Setiap pertemuan pasti ada perpisahan,namun entah kenapa perpisahan selalu saja terasa berat,apalagi untuk seseorang yang selalu ada dalam hidup ini.."
sleep 3
fi
if [ $pil = '5' ]
then
clear
echo
toilet -f future Bucin by Mr.Selow | lolcat
echo
echo
sleep 0.1
echo $green"Terkadang aku bodoh seperti nobita Memperjuangkan seseorang yang dia tidak tahu hatinya untuk siapa"
sleep 3
fi
if [ $pil = '6' ]
then
clear
echo
toilet -f future Smile0007 | lolcat
echo
echo
sleep 0.1
echo $white"Hargai disaat aku bertahan"
sleep 2
echo $green"jika aku sudah pergi"
sleep 2
echo $white"bakal susah diajak balikan.."
sleep 3
fi
if [ $pil = '7' ]
then
clear
echo
toilet -f future Bucin by. Smile0007 | lolcat
echo
echo
sleep 0.1
echo $red"kalau menunggu itu membosankan?"
sleep 1
echo $purple"lalu? kenapa kau suruh aku menunggu?"
sleep 3
fi
if [ $pil = '8' ]
then
clear
echo
toilet -f future Bucin by Smile0007 | lolcat
echo
echo
sleep 0.1
echo $yellow"Tidak peduli apa yang telah terjadi"
sleep 2
echo $white"Tidak peduli apa yang telah kamu lakukan"
sleep 2
echo $yellow"Aku akan selalu mencintaimu"
sleep 2
echo $whitw"Aku bersumpah untuk itu"
sleep 4
clear
echo
toilet -f future bucin by. zeus | lolcat
echo
echo
sleep 0.1
echo $indigo"Aku melihat kesempurnaan pada dirimu, lalu aku jatuh cinta padamu"
sleep 2
echo $indigo"Kemudian, aku melihat ketidaksempurnaanmu, akupun semakin jatuh cinta padamu"
sleep 4
clear
echo
toilet -f future Bucin by Smile0007 | lolcat
echo
echo
sleep 0.1
echo $red"Aku mencintai mu, dan aku akan tetap mencintaimu sampai mati"
sleep 2
echo $white"Jika ada kehidupan lagi setelah itu, maka aku akan mencintai mu lagi :)"
sleep 3
fi
if [ $pil = '9' ]
then
clear
echo
toilet -f future bucin by Smile0007 | lolcat
echo
echo
sleep 0.1
echo $green"Tidak ada yang setia di dunia ini bahkan bayanganmu sendiripun akan pergi menghilang dalam kegelapan"
sleep 2
echo
echo $indigo"Apa kah aku harus mengganti namaku menjadi aamiin,agar selalu kau sebut dalam doa mu:)"
sleep 2
echo
echo $green"Bukan kebahagian yg membuat kita bersyukur,tetapi bersyukurlah yg membuat kita bahagia:)"
sleep 3
fi
if [ $pil = '10' ]
then
clear
echo
figlet -f future bucin by. Smile0007 | lolcat
echo
echo
sleep 0.1
echo $green"Cinta itu dari hati yang terikat di dalam fikiran. Jika hati sudah saling bisa memahami dan meyakininya itu baru dnamakan cinta"
echo
sleep 2
echo $red"Jika kamu mencintai dua orang dalam satu waktu pilihlah yang pertama karena jika kamu benar-benar mencintai yang pertama tidak akan pernah ada yg kedua"
sleep 3
fi
#thanks udh gunain toolsnya
#belajar coding
#recode izin 083138046547
| true |
e28920b374f2188e0c2f6d1a0dc7ca666769b5d3 | Shell | cs-au-dk/streamliner | /misc/jmh.sh | UTF-8 | 735 | 3.484375 | 3 | [] | no_license | #!/bin/bash
set -euo pipefail
if [ ! -f "pom.xml" ]; then
cd ..
if [ ! -f "pom.xml" ]; then
echo "Run jmh.sh from project or misc directory"
exit 1
fi
fi
JAVAH=${JAVAH:-/usr/lib/jvm/java-8-openjdk-amd64}
JAVA=$JAVAH/bin/java
echo "Using ${JAVA} ($($JAVA -version 2>&1 | head -n 1)) for tests"
if [ "$#" -eq 0 ]; then
ARGS="dk.casa.streamliner.jmh.Test.*"
else
ARGS="$@"
fi
if [[ $ARGS != *"-h"* ]]; then
mvn compile
env JAVA_HOME=$JAVAH mvn exec:java -Dexec.mainClass=dk.casa.streamliner.asm.TransformASM
fi
CP=$(mvn -q exec:exec -Dexec.executable=echo -Dexec.args="%classpath" 2> /dev/null)
exec $JAVA -Dfile.encoding=UTF-8 -classpath out/asm/:$CP org.openjdk.jmh.Main -rf JSON -rff misc/out.json $ARGS
| true |
8afc8016b77a9dd92903b6be2d6affd39dc4c68c | Shell | kpsunkara/pmdarima | /build_tools/azure/test_version_tagging.sh | UTF-8 | 247 | 2.875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
pip install pathlib
BUILD_SOURCEBRANCH=refs/tags/v0.99.999 python ${BUILD_SOURCESDIRECTORY}/build_tools/get_tag.py
if [[ ! -f ${BUILD_SOURCESDIRECTORY}/pmdarima/VERSION ]]; then
echo "Expected VERSION file"
exit 4
fi
| true |
59c9ecaecaffc05f461fc725f26a83f77e29b799 | Shell | ainmosni/dotfiles-old | /misc/.bin/suspend | UTF-8 | 274 | 2.734375 | 3 | [] | no_license | #!/bin/sh
# Only suspend when not on power.
# TODO: Detect if we're on a battery for this.
if acpi | grep -q Discharging; then
echo "suspending" > /tmp/susp
/usr/bin/systemctl suspend
fi
if acpi 2>&1 | grep -q 'No support'; then
/usr/bin/systemctl suspend
fi
| true |
83e55e7fa0bb6a0bf7bfa4c4d5fad9b849eceb85 | Shell | anthraxx/ant-zsh | /lib/S05.function.zsh | UTF-8 | 550 | 3.234375 | 3 | [] | no_license | function zsh_stats() {
history | awk '{print $2}' | sort | uniq -c | sort -rn | head
}
function take() {
mkdir -p "$1"
cd "$1"
}
function mkdircd() {
take "$1"
}
function zc () {
for exp in $argv; do
print "$exp = $(( exp ))";
done
}
function psgrep() {
grep "$1" =(ps aux)
}
function run-with-sudo() {
LBUFFER="sudo $LBUFFER"
}
zle -N run-with-sudo
function zsh_update() {
pushd "${ZSH}"
git pull --rebase origin master
git submodule update --init --recursive --rebase
antigen reset
antigen update
popd 2>/dev/null
}
| true |
bb65da08a04acdb524e76eff605ba54668b67313 | Shell | delkyd/alfheim_linux-PKGBUILDS | /bibledit/PKGBUILD | UTF-8 | 827 | 2.625 | 3 | [] | no_license | # Maintainer: Caleb Maclennan <caleb@alerque.com>
pkgname='bibledit'
pkgdesc='Linux desktop client relase of bibledit for translating the Bible'
pkgver='1.0.348'
pkgrel='0.1'
arch=('any')
url='http://bibledit.org/'
license='GPL3'
source=("http://bibledit.org/linux/${pkgname}-${pkgver}.tar.gz")
sha256sums=('43367f64a3c14f2e51227758ee63b2bf6ffe68a8fb3e8a731acf018a1cfbe997')
depends=('sword' 'sqlite' 'gtk3' 'cairo' 'at-spi2-atk')
build () {
cd "${pkgname}-${pkgver}"
./configure --enable-client --enable-paratext
make -j8
}
package () {
cd "${pkgname}-${pkgver}"
make install DESTDIR="${pkgdir}/"
# Documentation and examples
for file in README NEWS; do
install -Dm644 "${file}" \
"${pkgdir}/usr/share/doc/${pkgname}/${file}"
done
# License
install -Dm644 COPYING \
"${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
}
| true |
82c2067577946fc37848c628ebe00546c996317d | Shell | puwei0000/eventuate-chaos | /src/scenarios/simple-partitions.sh | UTF-8 | 1,175 | 3.515625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash -eu
CASSANDRAS="c1 c2 c3"
SETTLE_DELAY=${SETTLE_DELAY:-40}
function settle() {
echo "waiting $SETTLE_DELAY seconds for cluster to settle..."
sleep $SETTLE_DELAY
}
function wait_till_healthy() {
echo "waiting till cluster is up and running..."
while ! ./interact.py >/dev/null 2>&1; do
sleep 2
done
}
wait_till_healthy
echo "*** checking working persistence with 1 node partition each"
for cassandra in $CASSANDRAS; do
echo "partition of cassandra '$cassandra'"
sudo blockade partition $cassandra
settle
echo "checking persistence..."
./interact.py >/dev/null
done
echo "*** checking non-working persistence with 2 node partitions"
for cassandra in $CASSANDRAS; do
echo "partition of chaos application + '$cassandra'"
sudo blockade partition location-1,$cassandra
settle
echo "checking persistence..."
./interact.py >/dev/null 2>&1 && (echo "persistence working when it shouldn't" && exit 1)
# reconnect cluster for next partition
sudo blockade join
wait_till_healthy
done
echo "*** checking final reconnect"
./interact.py >/dev/null
echo "test successfully finished"
| true |
ebd79f1b45480ad54b2de5b03c2bdbccb6579c4c | Shell | basho/data_platform_core | /priv/extras_templates/cache-proxy/test/read_through_test.sh | UTF-8 | 4,087 | 3.609375 | 4 | [] | no_license | #! /usr/bin/env bash
NC_PORT=$1
RIAK_HTTP_PORT=$2
RIAK_HTTP_HOST=${3:-localhost}
RIAK_TEST_BUCKET=${4:-test}
function usage {
echo "$(cat <<EOF
Usage: $0 «NC_PORT» «RIAK_HTTP_PORT» [RIAK_HTTP_HOST] [RIAK_TEST_BUCKET]
Arguments:
NC_PORT - Cache Proxy port, speaking the redis protocol
RIAK_HTTP_PORT - Riak HTTP port
RIAK_HTTP_HOST - Riak HTTP host name or ip address, default: localhost
RIAK_TEST_BUCKET - Riak test bucket, default: test
Example:
$0 11211 8091 127.0.0.1 test
Output:
riak get after delete
nc[test:foo]= , riak[foo]=
cache proxy get after delete
nc[test:foo]= , riak[foo]=
cache proxy get after put
nc[test:foo]=bar , riak[foo]=bar
EOF
)"
}
if [[ $NC_PORT == "" || $RIAK_HTTP_PORT == "" || $RIAK_HTTP_HOST == "" || $RIAK_TEST_BUCKET == "" ]]; then
usage && exit 1
fi
function ensure_redis_cli {
if [[ $(which redis-cli) == "" ]]; then
echo "adding local redis to path"
local DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
PATH=$PATH:$DIR/../../redis/bin
fi
if [[ $(which redis-cli) == "" ]]; then
echo "unable to locate redis-cli, ensure it is in the PATH"
exit 1
fi
}
function ensure_curl {
if [[ $(which curl) == "" ]]; then
echo "unable to locate curl, ensure it is in the PATH"
exit 1
fi
}
ensure_redis_cli
ensure_curl
declare -a RIAK_VALUES
declare -a NC_VALUES
function set_local_riak_value {
local KEY=$1
local VALUE=$2
RIAK_VALUES[$1]=$2
RIAK_VALUE=${RIAK_VALUES[$1]}
}
function get_local_riak_value {
local KEY=$1
RIAK_VALUE=${RIAK_VALUES[$1]}
}
function set_local_nc_value {
local KEY=$1
local VALUE=$2
NC_VALUES[$1]=$2
NC_VALUE=$NC_VALUES[$1]
}
function get_local_nc_value {
local KEY=$1
NC_VALUE=${NC_VALUES[$1]}
}
function riak_put {
local RIAK_HTTP_HOST=$1
local RIAK_HTTP_PORT=$2
local RIAK_TEST_BUCKET=$3
local KEY=$4
local VALUE=$5
curl -s -X PUT -d "$VALUE" "http://$RIAK_HTTP_HOST:$RIAK_HTTP_PORT/buckets/$RIAK_TEST_BUCKET/keys/$KEY" 1>/dev/null 2>&1
set_local_riak_value $KEY "$VALUE"
}
function riak_get {
local RIAK_HTTP_HOST=$1
local RIAK_HTTP_PORT=$2
local RIAK_TEST_BUCKET=$3
local KEY=$4
local VALUE=$(curl -s "http://$RIAK_HTTP_HOST:$RIAK_HTTP_PORT/buckets/$RIAK_TEST_BUCKET/keys/$KEY")
if [[ $VALUE == "not found" ]]; then
VALUE=""
fi
set_local_riak_value $KEY "$VALUE"
}
function riak_del {
local RIAK_HTTP_HOST=$1
local RIAK_HTTP_PORT=$2
local RIAK_TEST_BUCKET=$3
local KEY=$4
curl -s -X DELETE "http://$RIAK_HTTP_HOST:$RIAK_HTTP_PORT/buckets/$RIAK_TEST_BUCKET/keys/$KEY" 1>/dev/null 2>&1
set_local_riak_value $KEY ""
}
function nc_put {
local NC_PORT=$1
local KEY=$2
local NC_KEY="test:$KEY"
local VALUE=$3
redis-cli -p $NC_PORT set $NC_KEY $VALUE
set_local_nc_value $KEY "$VALUE"
}
function nc_get {
local NC_PORT=$1
local KEY=$2
local NC_KEY="test:$KEY"
local VALUE=$(redis-cli -p $NC_PORT get $NC_KEY)
set_local_nc_value $KEY "$VALUE"
}
function nc_del {
local NC_PORT=$1
local KEY=$2
local NC_KEY="test:$KEY"
redis-cli -p $NC_PORT del $NC_KEY >/dev/null 2>&1
set_local_nc_value $KEY ""
}
function debug_val {
local KEY=$1
local MSG=${2:-""}
local NC_KEY="test:$KEY"
local PROMPT=""
if [[ $MSG != "" ]]; then
PROMPT="$MSG"$'\n'
fi
get_local_riak_value $KEY
get_local_nc_value $KEY
echo "$PROMPT nc[$NC_KEY]=$NC_VALUE , riak[$KEY]=$RIAK_VALUE"
}
riak_del $RIAK_HTTP_HOST $RIAK_HTTP_PORT $RIAK_TEST_BUCKET foo
riak_get $RIAK_HTTP_HOST $RIAK_HTTP_PORT $RIAK_TEST_BUCKET foo
debug_val foo "riak get after delete"
riak_del $RIAK_HTTP_HOST $RIAK_HTTP_PORT $RIAK_TEST_BUCKET foo
nc_del $NC_PORT foo
nc_get $NC_PORT foo
debug_val foo "cache proxy get after delete"
riak_put $RIAK_HTTP_HOST $RIAK_HTTP_PORT $RIAK_TEST_BUCKET foo bar
riak_get $RIAK_HTTP_HOST $RIAK_HTTP_PORT $RIAK_TEST_BUCKET foo
nc_get $NC_PORT foo
debug_val foo "cache proxy get after put"
| true |
936db2751cf8040195fad79617be1960d8b7f6c0 | Shell | martinbaillie/rightscripts | /install_and_run_rancher.bash | UTF-8 | 2,365 | 3.734375 | 4 | [] | no_license | #! /bin/bash -e
#---------------------------------------------------------------------------------------------------------------------------
# Variables
#---------------------------------------------------------------------------------------------------------------------------
: "${RANCHER_HOST_PORT:=8080}"
: "${RANCHER_CONTAINER_PORT:=8080}"
: "${RANCHER_CONTAINER_TIMEZONE:=}"
: "${RANCHER_DOCKER_OPTS:=}"
: "${RANCHER_DOCKER_CMD:=}"
: "${RANCHER_TAG:=stable}"
[ ! -z "$RANCHER_CONTAINER_TIMEZONE" ] && RANCHER_DOCKER_OPTS="$RANCHER_DOCKER_OPTS -e TZ='$RANCHER_CONTAINER_TIMEZONE'"
#---------------------------------------------------------------------------------------------------------------------------
# FUNCTIONS
#---------------------------------------------------------------------------------------------------------------------------
function pull_rancher()
{
sudo docker pull "rancher/server:$RANCHER_TAG"
}
function run_rancher_with_external_db()
{
sudo docker run $RANCHER_DOCKER_OPTS -d --restart=always -p "$RANCHER_HOST_PORT:$RANCHER_CONTAINER_PORT" \
-e CATTLE_DB_CATTLE_MYSQL_HOST=$CATTLE_DB_CATTLE_MYSQL_HOST \
-e CATTLE_DB_CATTLE_MYSQL_PORT=$CATTLE_DB_CATTLE_MYSQL_PORT \
-e CATTLE_DB_CATTLE_MYSQL_NAME=$CATTLE_DB_CATTLE_MYSQL_NAME \
-e CATTLE_DB_CATTLE_USERNAME=$CATTLE_DB_CATTLE_USERNAME \
-e CATTLE_DB_CATTLE_PASSWORD=$CATTLE_DB_CATTLE_PASSWORD \
"rancher/server:$RANCHER_TAG" "$RANCHER_DOCKER_CMD"
}
function run_rancher_all_in_one()
{
sudo docker run $RANCHER_DOCKER_OPTS -d --restart=always -p "$RANCHER_HOST_PORT:$RANCHER_CONTAINER_PORT" "rancher/server:$RANCHER_TAG" $RANCHER_DOCKER_CMD
}
function run_rancher()
{
if [ "$RANCHER_EXTERNAL_DB" = "true" ]; then
run_rancher_with_external_db
else
run_rancher_all_in_one
fi
}
#---------------------------------------------------------------------------------------------------------------------------
# MAIN
#---------------------------------------------------------------------------------------------------------------------------
# possible race condition with dockerd on boot
sleep 5
if ! sudo docker ps | grep 'rancher/server'; then
echo 'Running rancher container...'
set -x
pull_rancher
run_rancher
{ set +x; } 2>/dev/null
else
echo 'rancher/server already running, skipping.'
fi
echo 'Done.'
| true |
247c02608a9c22f7e574260c5d2b2cdeea9ec221 | Shell | jasonbrooks/fedora-coreos-config | /overlay.d/05core/usr/lib/dracut/modules.d/20live/module-setup.sh | UTF-8 | 1,086 | 2.890625 | 3 | [
"MIT"
] | permissive | install_and_enable_unit() {
unit="$1"; shift
target="$1"; shift
inst_simple "$moddir/$unit" "$systemdsystemunitdir/$unit"
mkdir -p "$initdir/$systemdsystemunitdir/$target.requires"
ln_r "../$unit" "$systemdsystemunitdir/$target.requires/$unit"
}
install() {
inst_script "$moddir/is-live-image.sh" \
"/usr/bin/is-live-image"
inst_script "$moddir/ostree-cmdline.sh" \
"/usr/sbin/ostree-cmdline"
inst_simple "$moddir/live-generator" \
"$systemdutildir/system-generators/live-generator"
inst_simple "$moddir/coreos-populate-writable.service" \
"$systemdsystemunitdir/coreos-populate-writable.service"
inst_simple "$moddir/coreos-live-unmount-tmpfs-var.sh" \
"/usr/sbin/coreos-live-unmount-tmpfs-var"
install_and_enable_unit "coreos-live-unmount-tmpfs-var.service" \
"initrd-switch-root.target"
install_and_enable_unit "coreos-live-clear-sssd-cache.service" \
"ignition-complete.target"
inst_simple "$moddir/writable.mount" \
"$systemdsystemunitdir/writable.mount"
}
| true |
bf56145b1649e481560da3790dfae69ffb6d888b | Shell | jhkim105/deploy | /kill.sh | UTF-8 | 1,048 | 4.125 | 4 | [] | no_license | #!/bin/bash
if [ "$1" == "" ]
then
echo "Usage: $0 <name>"
exit 1
fi
SERVICE_NAME=$1
PID_DIR=/var/run/$SERVICE_NAME
PID_FILE=$SERVICE_NAME.pid
IDENTIFIER=$SERVICE_NAME.jar
function kill_process() {
echo "$SERVICE_NAME destroy started..."
PID=`cat $PID_DIR/$PID_FILE 2>/dev/null`
if [ -z $PID ]; then
echo "Not running (pidfile not found)"
else
echo "shutting down $SERVICE_NAME. pid=$PID."
# kill -9 $PID
fi
process_count=`ps aux | grep $IDENTIFIER | grep -v grep | wc -l`
if [ ! $process_count -eq 0 ]; then
echo "shuttind down $SERVICE_NAME. identifier=$IDENTIFIER."
ps aux | grep $IDENTIFIER | grep -v grep | awk '{print $2}' | xargs kill -9
fi
process_count=`ps aux | grep $IDENTIFIER | grep -v grep | wc -l`
if [ $process_count -eq 0 ]; then
echo "$SERVICE_NAME destroy completed."
else
echo "$SERVICE_NAME destroy failed."
fi
}
function clear_pid() {
if [ -f $PID_DIR/$PID_FILE ]; then
echo "rm $PID_DIR/$PID_FILE."
rm $PID_DIR/$PID_FILE
fi
}
kill_process
clear_pid | true |
6a3223d86732d5a0aa2f85dda39544b4f03567a4 | Shell | davidaayers/dotfiles | /.zshrc | UTF-8 | 2,573 | 2.5625 | 3 | [] | no_license | # Path to your oh-my-zsh configuration.
ZSH=$HOME/.oh-my-zsh
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
ZSH_THEME="kphoen"
export EDITOR='subl -w'
export LESS="R"
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
alias gs='git status -sb'
alias gfo="git fetch origin"
alias gprune='git remote prune origin && git branch --merged | grep -v "\*" | xargs -n 1 git branch -d'
__git_files () {
_wanted files expl 'local files' _files
}
# Set to this to use case-sensitive completion
# CASE_SENSITIVE="true"
# Comment this out to disable bi-weekly auto-update checks
# DISABLE_AUTO_UPDATE="true"
# Uncomment to change how often before auto-updates occur? (in days)
# export UPDATE_ZSH_DAYS=13
# Uncomment following line if you want to disable colors in ls
# DISABLE_LS_COLORS="true"
# Uncomment following line if you want to disable autosetting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment following line if you want to disable command autocorrection
# DISABLE_CORRECTION="true"
# Uncomment following line if you want red dots to be displayed while waiting for completion
# COMPLETION_WAITING_DOTS="true"
# Uncomment following line if you want to disable marking untracked files under
# VCS as dirty. This makes repository status check for large repositories much,
# much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
plugins=(alias-finder git git-extras vi-mode copydir copyfile docker docker-compose npm)
source $ZSH/oh-my-zsh.sh
export HOMEBREW_GITHUB_API_TOKEN=$(cat ~/.dotfiles/homebrew_api_token)
export NPM_AUTH_TOKEN=$(cat ~/.dotfiles/npm_token)
export NVM_DIR="$HOME/.nvm"
[ -s "/usr/local/opt/nvm/nvm.sh" ] && . "/usr/local/opt/nvm/nvm.sh" # This loads nvm
[ -s "/usr/local/opt/nvm/etc/bash_completion.d/nvm" ] && . "/usr/local/opt/nvm/etc/bash_completion.d/nvm" # This loads nvm bash_completion
source /opt/homebrew/share/zsh-history-substring-search/zsh-history-substring-search.zsh
source /opt/homebrew/share/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh
bindkey '^[OA' history-substring-search-up
bindkey '^[OB' history-substring-search-down
. $HOME/.shellrc.load
export VOLTA_HOME="$HOME/.volta"
export PATH="$VOLTA_HOME/bin:$PATH"
| true |
2b0bd26ee08777fb6c2ec0d1cd5723354bb8110f | Shell | mtalexan/bash-profile | /prompt-ubuntu.bash | UTF-8 | 413 | 3.265625 | 3 | [] | no_license | if [ -n "$USE_POWERLINE" ] && [ $USE_POWERLINE -eq 1 ] ; then
source "${LOCAL_PROFILES_DIR}/powerline.bash"
else
# add the git settings to the command prompt
if [ "${color_prompt}" = yes ]; then
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;34m\]\w\[\033[00m\]\[\033[1;33m\]$(__git_ps1)\[\033[00m\] \$ '
else
PS1='${debian_chroot:+($debian_chroot)}\w$(__git_ps1) \$ '
fi
fi
| true |
a4393c39e5663a6c6b1951453f25187d52916983 | Shell | PeterXu/ztools | /shell/.zero/uinit.sh | UTF-8 | 1,497 | 3.421875 | 3 | [] | no_license | #!/usr/bin/env bash
#
# Author: peter@uskee.org
# Created: 2015-11-18
#
## ===================================================
## ---------------------------------------------------
## global init scripts:
## To call functions with prefix of "__init_xxx".
_g_init_init() {
local kShName="sh"
local shname=$(ps -p $$ -o comm | grep -iv comm)
if [[ "$shname" =~ "zsh" ]]; then
kShName="zsh"
setopt shwordsplit
#if type emulate >/dev/null 2>/dev/null; then emulate bash; fi
tmp="/usr/local/share/zsh"
if [ -f "$tmp" ]; then
chmod -R 755 $tmp || echo "[FAILED]: chmod -R 755 $tmp"
fi
autoload -U +X compinit && compinit
elif [[ "$shname" =~ "bash" ]]; then
kShName="bash"
fi
_SHNAME="$kShName"
_INIT_PREFIX="__init_"
_SH_LIST="ucommon.sh ubase.sh umisc.sh umark.sh udocker.sh ugit.sh udocs.sh srcin.sh"
local kUname=$(uname)
[[ "$kUname" =~ "MINGW" || "$kUname" =~ "mingw" ]] && kUname="MINGW"
_UNAME="$kUname"
[ "$_UNAME" = "MINGW" ] && _SH_LIST="ucommon.sh ubase.sh umark.sh"
[ "$_UNAME" = "Darwin" ] && _SH_LIST="$_SH_LIST umacosx.sh"
local item
for item in $_SH_LIST; do
item="$HOME/.zero/$item"
[ ! -f "$item" ] && continue
source $item
local func_list=$(cat $item | grep "^${_INIT_PREFIX}[a-z_]\+() " | awk -F"(" '{print $1}')
for fn in $func_list; do
eval $fn
done
done
}
_g_init_init
| true |
f4639100c107007feaea039006bb8a156545ffb1 | Shell | influxdata/flux-lsp | /release.sh | UTF-8 | 2,578 | 4.25 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# This script will check the state of the main branch of flux-lsp for
# conditions that would allow for a release to occur. If those conditions
# are met, a signed tag is created and *pushed to github* where the CI
# will take over and publish the extension.
#
# WARNING: This script will *push directly to master*. Please make sure
# you understand the contents of this script and the consequences of its
# execution before running it.
set -e
if [[ "${DEBUG:-0}" == "1" ]]; then
set -x
fi
# Controls how the version is bumped.
# Set INCREMENT to one of: major, minor or patch
# Defaults to patch if unset.
INCREMENT=${INCREMENT:-patch}
if [[ ! $INCREMENT =~ (patch)|(minor)|(major) ]]
then
echo "Increment must be one of major, minor or patch"
exit 1
fi
if [[ ! $(command -v hub) ]]; then
echo "Please install the hub tool and re-run."
exit 1
fi
if [[ ! -f $HOME/.config/hub && "${GITHUB_TOKEN}" == "" ]]; then
echo "Please authenticate your hub command. See https://github.com/github/hub/issues/2655#issuecomment-735836048"
exit 1
fi
if [[ ! $(cargo bump -v) ]]; then
echo "Please install cargo bump and re-run: `cargo install cargo-bump`"
exit 1
fi
TEMPDIR=$(mktemp -d -t flux-release.XXXX)
echo "Using fresh install in $TEMPDIR"
cd $TEMPDIR
if [[ $(ssh -T git@github.com 2>&1 > /dev/null) ]]; then
git clone git@github.com:influxdata/flux-lsp.git > /dev/null 2>&1
else
git clone https://github.com/influxdata/flux-lsp.git > /dev/null 2>&1
fi
cd $TEMPDIR/flux-lsp
if [[ ! $(hub ci-status HEAD) ]]; then
echo "Build status on master is either incomplete or failing. Please try ag ain after build status is complete."
exit 1
fi
# Bump version
cargo bump $INCREMENT
cargo check
new_version=$(cargo pkgid | cut -d# -f2 | cut -d: -f2)
# Commit and tag release
git add Cargo.toml
git add Cargo.lock
git commit -m "release: $new_version"
# Note: Using an annotated tag (-a) is important so that we can reliably find
# the previous version tag.
git tag -a -m "$new_version" "$new_version"
git push
previous_version=`git tag --sort=-creatordate | sed -n '2 p'`
# The tail step here ignores the commit that is the release, so we don't have a changelog that also
# contains, e.g. "release: 0.10.55". We already know it's a release, that's why we're constructing release
# notes.
commits=`git log --pretty=oneline ${previous_version}..${new_version} | tail -n +2 | awk '{$1="-"; print }'`
hub release create $new_version -m "Release $new_version
${commits}"
echo "$new_version tagged and released"
rm -rf $TEMPDIR
| true |
a4d3bf28c416fc8eb26b85dc5c4f6af1fd4cf6ce | Shell | Josverl/jostatest | /scripts/ensurepws | UTF-8 | 118 | 2.71875 | 3 | [] | no_license | #!/bin/bash
#try
pwsh --version
if [ $? -eq 0 ]
then
echo "pwsh already installed"
else
# npm install pwsh -g
fi
| true |
45faca0a54364a73bcb6622974f14348adce0c49 | Shell | duanwujie/helpscript | /threadname | UTF-8 | 377 | 3.703125 | 4 | [] | no_license | #!/bin/bash
if [[ $# != 1 ]] ; then
echo "Usage: threadname <process_name>"
exit -1
fi
NAME=$1
APP_PID=`ps aux | grep $NAME | grep -v grep | grep -v threadname | awk '{printf("%s\n",$2)}' `
for i in `ps -T $APP_PID | awk '{ if(NR>1) {printf("%s ",$2)}}'`
do
cat /proc/$i/stat | awk -F ")" '{print $1}' | awk -F "(" '{printf("thread_id:%d thread_name:%s\n",$1,$2)}'
done
| true |
e2821a199e93f46c1cae02eeb89016487948112e | Shell | h-castle/loggi | /entrypoint.sh | UTF-8 | 563 | 2.734375 | 3 | [] | no_license | #!/bin/sh
if [ "$LOGGI_TLS" != "" ]
then
TLS=--tls
fi
if [ "$LOGGI_TLS_NO_VERIFY" != "" ]
then
TLS_NO_VERIFY=--tls-no-verify
fi
if [ "$LOGGI_RECONNECTION_INTERVAL" != "" ]
then
RECONNECTION_INTERVAL="--reconnection-interval $LOGGI_RECONNECTION_INTERVAL"
fi
if [ "$LOGGI_NICK" != "" ]
then
NICK="-n $LOGGI_NICK"
fi
if [ "$LOGGI_REALNAME" != "" ]
then
REALNAME="-r $LOGGI_REALNAME"
fi
loggi.py -s $LOGGI_SERVER -p $LOGGI_PORT -u $LOGGI_USER -k $LOGGI_KEY -c $LOGGI_CHANNELS \
$TLS $TLS_NO_VERIFY $RECONNECTION_INTERVAL $NICK $REALNAME
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.