blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b4d9cc8f5376eb5447a6a9f8f2428f072e9d386e
|
Shell
|
qwertie/js-astcompressor-prototype
|
/test-roundtrip.sh
|
UTF-8
| 1,186
| 2.796875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
if [ -z $1 ]; then
FILE_PREFIX=ast-encoder.js
INFILE=./ast-encoder.js ;
else
FILE_PREFIX=$2
INFILE=$1/$2 ;
fi
echo // input
gzip -9 -f -k $INFILE
third_party/lzhamtest c $INFILE $INFILE.lzham > /dev/null
ls -la $INFILE $INFILE.gz $INFILE.lzham
echo // encoding
node encode.js $INFILE Test/$FILE_PREFIX.webasm Test/$FILE_PREFIX.ast.json Test/$FILE_PREFIX.expected.js
gzip -9 -f -k Test/$FILE_PREFIX.webasm
third_party/lzhamtest c Test/$FILE_PREFIX.webasm Test/$FILE_PREFIX.webasm.lzham > /dev/null
echo // encoded sizes
ls -la Test/$FILE_PREFIX.webasm Test/$FILE_PREFIX.webasm.gz Test/$FILE_PREFIX.webasm.lzham Test/$FILE_PREFIX.ast.json
echo // read ast json
node --expose-gc -e "var json = require('fs').readFileSync('Test/$FILE_PREFIX.ast.json', { encoding: 'utf8' }); console.time('JSON.parse'); var tree = JSON.parse(json); console.timeEnd('JSON.parse'); json = null; global.gc(); console.log('heapUsed ' + process.memoryUsage().heapUsed);"
echo // decoding
node --expose-gc decode.js Test/$FILE_PREFIX.webasm Test/$FILE_PREFIX.decoded.js Test/$FILE_PREFIX.ast.decoded.json
echo // diff follows
diff Test/$FILE_PREFIX.ast.json Test/$FILE_PREFIX.ast.decoded.json
| true
|
1a2a836477fdee4d818dc6dc1bc71dfd5ec703a4
|
Shell
|
loggar/ng
|
/ng-build-deploy/ng-webpack-router-loader/update_docs.sh
|
UTF-8
| 434
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
git checkout master
npm run docs 2> /dev/null
git add docs/docs_dist
git commit -m "generate docs"
if [ $? -eq 0 ]
then
cp -r ./docs/docs_dist .docs
git checkout gh-pages
rm -rf ./assets
rm -rf ./interfaces
rm ./index.html
rm ./globals.html
mv .docs/* ./
rm -rf .docs
git add .
git commit -m "update docs"
git push origin gh-pages
git checkout master
else
echo "Compilation failed" >&2
fi
| true
|
ff4cb072c014ee534d858a68bd6a2d758c20935f
|
Shell
|
dorukgezici/SpaceHunt
|
/deploy.sh
|
UTF-8
| 520
| 2.859375
| 3
|
[] |
no_license
|
git checkout master
git push -f github master:master # force push origin's master to github
FILE="build/CNAME"
[[ -f $FILE ]] || echo "spacehunt.tk" >> $FILE && git add $FILE && git commit -m "Added CNAME file." && echo "Added CNAME file"
git subtree split --prefix build -b gh-pages # create a local gh-pages branch containing the splitted build folder
git push -f github gh-pages:gh-pages # force the push of the gh-pages branch to the remote gh-pages branch
git branch -D gh-pages # delete the local gh-pages branch
| true
|
cac12e12387c35c8b7d741d570acf2bcd0d9d0bf
|
Shell
|
inderpal2406/my_bash_scripts
|
/scripts/census.sh
|
UTF-8
| 2,144
| 4.09375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
###############################################################################
# File : census.sh
# Author : Inderpal Singh Saini <inderpal2406@gmail.com>
# Created : 23 Dec, 2019
# Updated : 23 Dec, 2019
# Description : Record user data for census purpose.
# : The $datafile has data arranged in fields: "name:gender:number:insurance:education:rights"
# : Few fields are assigned default values according to gender.
# : The $datafile should have write permissions for all, as everyone will be writing to it.
################################################################################
# Initiate variable to hold data file's path
datafile="/var/tmp/inderpal2406/datafile"
# Clear screen
clear
# Display message about what this script will do and ask for confirmation to proceed.
echo "Hello $USER ... This script will record your data for census purpose."
echo "Press ENTER to proceed."
read
echo
# Ask for name
read -p "Enter your name and press [ENTER]: " name
# Check if name is already present in datafile
grep -i "$name" $datafile >/dev/null
if [ $? -eq 0 ]
then
echo "$name has already entered data."
echo "Below are the details,"
grep -i "$name" $datafile
echo "Exiting script..."
exit 1
fi
# If contact has already not entered data, then ask for further details
read -ep "Enter your gender [m|f|t]: " -n 1 gender
read -p "Enter your contact number with country code [+91 8898****22] and press [ENTER]: " number
# Ask for further details based on gender and initialise other not-applicable [NA] details based on gender
if [ "$gender" == "m" ]
then
read -ep "Enter if you're medically insured [Y|N] " -n1 insurance
education="NA"
rights="NA"
elif [ "$gender" == "f" ]
then
read -p "Enter your most recent education [Upto SSC|SSC|HSC|Graduate|Post-graduate] and press [ENTER]: " education
insurance="NA"
rights="NA"
else
read -ep "Are you aware of your rights? [Y|N] " -n1 rights
insurance="NA"
education="NA"
fi
# Enter details into adress book and notify user
echo "$name:$gender:$number:$insurance:$education:$rights" >> $datafile
echo "Your details have been successfully saved."
| true
|
4b300c775995dd24ecc2882874800b58d25c3ff1
|
Shell
|
dictcp/jump.sh
|
/auto-completion.sh
|
UTF-8
| 359
| 2.9375
| 3
|
[] |
no_license
|
if [[ -n "$ZSH_VERSION" ]]; then
autoload bashcompinit
bashcompinit
fi
_jumpsh()
{
local cur opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
names="$(~/.jumphost/bin/find-hosts.sh | cut -f1)"
COMPREPLY=( $(compgen -W "${names}" -- ${cur}) )
return 0
}
complete -F _jumpsh jump.sh
export PATH="$HOME/.jumphost/bin:$PATH"
| true
|
11ef67944e134245c1e01fa0290eee86cb218f7f
|
Shell
|
shenyongxing/Sh
|
/获取文件后缀名.sh
|
UTF-8
| 234
| 3.546875
| 4
|
[] |
no_license
|
# /bin/bash
# 获取文件后缀名
# author shenxing
if [[ -n $1 ]]; then
# 注意这种写法是正常的,##从左到右匹配最长的
suffix=${1##*.}
echo $suffix
else
echo "请在shell文件名后面输入文件名称"
fi
| true
|
331404d5d22bc92b49d4d7f3e9acb8057fe48fe7
|
Shell
|
dawnbreaks/taomee
|
/monster/doc/db/create_stuff.sh
|
UTF-8
| 640
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/sh
host=127.0.0.1
port=3306
user=monster
password=monster@24
for i in `seq 0 9`
do
for j in `seq 0 99`
do
tb_sentence="set names utf8;use db_monster_$i;\
create table t_stuff_$j(\
user_id int not null,\
stuff_id int not null default 0,\
stuff_num int not null default 0,\
used_num int not null default 0,\
primary key(user_id, stuff_id)
)engine=InnoDB default charset=utf8;"
mysql -h$host -u$user -p$password -e "$tb_sentence"
done
done
| true
|
d628d8d4d6d4787844abcdfab711834040a4c26e
|
Shell
|
winterfroststrom/rip_classical_project
|
/Fast-Downward-8ea549f76262/new-scripts/build-opt-ipc-experiments
|
UTF-8
| 674
| 3.390625
| 3
|
[] |
no_license
|
#! /bin/bash
set -e
# Set TARGET_DIR to the directory in which the experiment directories
# will be located on the machine where qsub is run.
TARGET_DIR=/home/helmert/tmp/downward-ipc/new-scripts
DIRNAME="$(pwd)"
for ARCHIVE in "$@"; do
echo "found optimization track config: $ARCHIVE"
CONFIGNAME=$(basename $ARCHIVE .tar.gz)
EXPNAME=pkg-$CONFIGNAME
echo "creating experiment $EXPNAME..."
./ipc_experiment.py -e gkigrid --queue opteron_core.q -p $ARCHIVE -s STRIPS_IPC12345,IPC08_OPT_STRIPS $EXPNAME
echo "patching directory names in experiment $EXPNAME..."
sed -i -e "s|$DIRNAME|$TARGET_DIR|g" $EXPNAME/$EXPNAME.q $EXPNAME/runs-*/*/run
done
| true
|
2240099bf657e29dd62331fbc40616b23829694a
|
Shell
|
Ditti4/webserverscriptcollection
|
/disabledomain
|
UTF-8
| 442
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
#Simple script to disable an entire domain (including all vhosts).
server="apache2"
domain=$1
for link in `ls /etc/apache2/sites-enabled/${domain}`; do
unlink /etc/apache2/sites-enabled/${domain}/${link}
done
for link in `ls /etc/nginx/sites-enabled/${domain}`; do
unlink /etc/nginx/sites-enabled/${domain}/${link}
done
rm /etc/apache2/sites-enabled/${domain}
rm /etc/nginx/sites-enabled/${domain}
service ${server} reload
| true
|
e25aea27e9166c936f61bf4934539f042767d662
|
Shell
|
kepeng/docker_image_download
|
/auto_download_images.sh
|
UTF-8
| 1,043
| 3.71875
| 4
|
[] |
no_license
|
#!bin/bash
for image in `cat ./images_list.txt`
do
echo "begin download $image"
tmp_image=${image#*\/}
upload_image=harbor.chargerhere.net/gcr.io/${tmp_image}
echo ${tmp_image}
echo ${upload_image}
image_version=${tmp_image//[\:\/]/-}
echo ${image_version}
git checkout .
git pull --force
echo "From ${image}" > Dockerfile
echo ">>>>>>>>>>>>>>>>>Dockerfile 已经变为 :" cat Dockerfile
echo -e ">>>>>>>>>>>>>>>>>开始提交代码,下载镜像为: "${image}"\n"
git add .
git commit -m "add images '${image}'"
git tag release-v${image_version}
git push --tags
echo -e ">>>>>>>>>>>>>>>>>提交代码成功!\n"
echo "等待60S"
sleep 1m
echo "开始下载镜像到本地"
docker pull registry.cn-hangzhou.aliyuncs.com/kp_gcr_io/image:${image_version}
echo "上传镜像到本地私库"
docker tag registry.cn-hangzhou.aliyuncs.com/kp_gcr_io/image:${image_version} ${upload_image}
docker rmi registry.cn-hangzhou.aliyuncs.com/kp_gcr_io/image:${image_version}
docker push ${upload_image}
done
| true
|
deb00547e36d775a9da448a021debe9f5fab6f99
|
Shell
|
sorf/cpp-libraries-env
|
/clang_6.0_release.sh
|
UTF-8
| 384
| 2.671875
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
export CLANGXX_COMMAND="clang++-6.0 -fPIC"
export CLANG_COMMAND="clang-6.0 -fPIC"
# We need gcc as well for Boost.Build.
export GCC_COMMAND="/usr/bin/gcc"
export PS1="[clang_6.0-release]\u@\h:\w>"
export CC=clang
export CXX=clang++
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
. ${script_dir}/env_linux.sh clang_6.0 clang release && exec bash -norc
| true
|
e9d56af6147557865de87debd329abbaca01d29c
|
Shell
|
10gic/dotfiles
|
/bin/kill_ipcs
|
UTF-8
| 908
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
# This tool remove ALL System V IPC resources belong to current user (or specified user).
if [ ${#@} -ne 1 ] && [ ${#@} -ne 0 ]; then
echo 'This tool remove ALL System V IPC resources belong to specified user.' >&2;
echo 'Usage: kill_ipcs [user_name]' >&2;
exit 1
fi
user="$1"
if [ ! "$1" ]; then
# Default, whoami and "id -un" is not available in Solaris.
user=$(id | sed s"/) .*//" | sed "s/.*(//") # current user.
fi
read -r -p "Are you sure to kill all System V IPC resources belong to ${user}? [y/N] " response
case "$response" in
[yY][eE][sS]|[yY])
;;
*)
exit 1;
;;
esac
for opt in -q -s -m
do
# NOTE: Use grep to find matched user, false negative errors are POSSIBLE.
# In output of ipcs, the msqid/shmid/semid is the second column (awk $2).
ipcs $opt | grep " ${user} " | awk '{print "ipcrm ""'$opt'",$2}' | sh -x
done
| true
|
99f5c5f14b52b517f45ea453faa8c1396cc42810
|
Shell
|
venoodkhatuva12/scripts
|
/generate_radius_user_report.sh
|
UTF-8
| 1,121
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Purpose : Extract Data of last one month from radius logs
Log_Path="/var/log/radius/"
Last_Month=$( date --date="$(date +%Y-%m-15) -1 month" +%b )
OutPut="/tmp/radius_users_$(date +%F).csv"
temp1=$(mktemp)
cd $Log_Path || exit 1
for log in $(find . -type f -iname "*log*" )
do
[[ $log =~ '.gz' ]] && CAT=zcat || CAT=cat
$CAT $log | while read -a line
do
[[ ${line[@]} =~ $Last_Month ]] || break
if [[ ${line[@]} =~ "Login OK" ]]
then
Date="${line[@]::5}"
User=${line[9]}
IP=${line[${#line[@]} - 1 ]%\)}
printf "%s,%s,%s\n" "${Date}" $User $IP | tee -a ${OutPut}
echo "$User" >> $temp1
fi
done
done
Total_Uniq_User=$(sort $temp1 | uniq | wc -l)
printf "%s,%s\n" "Total_Uniq_User" $Total_Uniq_User | tee -a ${OutPut}
echo "Report Generated at \"${OutPut}\""
rm ${temp1}
| true
|
b139d83ba3deb6e552ee1dac60b9383dd5ad8fb1
|
Shell
|
LizRuelas/lambdachat
|
/publish-presentation.sh
|
UTF-8
| 1,134
| 2.859375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2015 CloudNative, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Stop execution if something goes wrong
set -e
# Read configuration details and set them as environment variables
echo "Loading configuration file: ./config.yml"
source ./yaml2shell.sh
eval $(parse_yaml ./config.yml)
echo "Configuration loaded"
echo "Uploading the presentation files to S3..."
aws s3 sync --profile ${profile} --region ${region} --exclude "*public*" --no-follow-symlinks presentation/ s3://${s3_bucket}/diapositivas/
echo "-- DONE --"
echo "Go to: http://${s3_bucket}.s3-website-${region}.amazonaws.com/diapositivas/"
| true
|
0f27c527455e747ada311d1187b1ae5ac6d2d707
|
Shell
|
phaag/nfdump
|
/src/lib/gen_version.sh
|
UTF-8
| 2,219
| 3.203125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# Copyright (c) 2023, Peter Haag
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
echo Creating vcs_track.h
f="vcs_track.h"
date=$(date +'%c')
if [ -d ../../.git ]; then
# git clone - should have git command too
if [ -x "$(command -v git)" ]; then
hash=$(git rev-parse --short HEAD)
date=$(git show -s --format=%ci)
else
# has git directory but no git command ..
hash="git"
fi
else
# no git directory - most likely release
hash="release"
fi
echo \#ifndef __VCS_TRACK_H__ >$f
echo \#define __VCS_TRACK_H__ >>$f
echo \/\/THIS FILE IS AUTO GENERATED >>$f
echo \/\/DO NOT TRACK THIS FILE WITH THE VCS >>$f
echo \#define VCS_TRACK_DATE \"$date\" >>$f
echo \#define VCS_TRACK_HASH \"$hash\" >>$f
echo \#endif >>$f
| true
|
94376a8ef477f3a772ee87a51ce838c0754f84d0
|
Shell
|
libdist-rs/libchatter-rs
|
/scripts/aws/throughput-vs-latency/clean-data.sh
|
UTF-8
| 478
| 3.1875
| 3
|
[] |
no_license
|
# This scripts converts all raw files in a folder into clean files
for file in $1/* ; do
fname=`basename $file .log`
if [ -e "$1"/$fname-cleaned.log ]; then
rm -rf "$1"/$fname-cleaned.log
fi
grep "DP\[.*\]:" $file >> "$1"/$fname-cleaned.log
sed -i "/\[Start\]/d" "$1"/$fname-cleaned.log
sed -i "/\[End\]/d" "$1"/$fname-cleaned.log
python scripts/throughput-vs-latency/vary-b/parse-exp.py "$1"/$fname-cleaned.log "$1"/$fname-cleaned.csv
done
| true
|
a5b8c9b9e58abaa4cccb4eb9f514b0cfda92e777
|
Shell
|
LPD-EPFL/ASCYLIB-Cpp
|
/scripts/run_qu.sh
|
UTF-8
| 1,575
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
ds=qu;
. ./scripts/run.config
mkdir -p data
make clean
make "SET_CPU := 0"
algos=( Q_MS_LF Q_MS_LB Q_MS_H );
# params_initial=( 128 512 2048 4096 8192 );
# params_update=( 100 50 20 10 1 );
param_initial=65534;
params_put=( 40 50 60 );
params_nc=( 10 );
num_params=${#params_put[*]};
. ./scripts/cores.config
num_tests_cores=$(echo "$cores" | wc -w);
duration_secs=$(echo "$duration/1000" | bc -l);
num_algorithms=${#algos[@]};
dur_tot=$( echo "$num_algorithms*$num_params*$num_tests_cores*$repetitions*$duration_secs" | bc -l );
printf "#> $num_algorithms algos, $num_params params, $num_tests_cores cores configurations, $repetitions reps of %.2f sec = %.2f sec\n" $duration_secs $dur_tot;
printf "#> = %.2f hours\n" $( echo "$dur_tot/3600" | bc -l );
#printf " Continue? [Y/n] ";
#read cont;
#if [ "$cont" = "n" ]; then
# exit;
#fi;
#cores=$cores_backup;
for ((i=0; i < $num_params; i++))
do
initial=${param_initial};
put=${params_put[$i]};
range=$((2*$initial));
#algos_w=( "${algos[@]/%/_$workload}" )
#algos_progs=( "${algos[@]/#/./bin/test -a}" )
#algos_str="${algos_w[@]}";
algos_str="${algos[@]}";
if [ $fixed_file_dat -ne 1 ] ; then
out="$(hostname).${ds}.thr.p${put}.dat"
else
out="data.${ds}.thr.p${put}.dat"
fi;
echo "### params -i$initial -p$put / keep $keep of reps $repetitions of dur $duration" | tee data/$out;
./scripts/scalability_rep_simple.sh ./bin/test_stackqueue "$cores" $repetitions $keep "$algos_str" -d$duration -i$initial -p$put \
| tee -a data/$out;
done;
| true
|
a4e8e2e76e584d13daae5ab65045cf817b13d234
|
Shell
|
hhnwong/MYH7nick
|
/cross_validation/testsnp/snp_split_test.sh
|
UTF-8
| 1,701
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
file2=snp_final.csv
tail -n +2 snp_final.csv >> data2.csv
number=$(wc -l data2.csv)
shuf data2.csv
split -l 46 -d data2.csv snp_
#for training set 1, testing set 1
cat snp_02 snp_03 snp_04 snp_05 snp_06 snp_07 snp_08 snp_09 snp_00 >> snp_train1.csv
cat ./snp_01 >> snp_test1.csv
#for training set 2, testing set 2
cat snp_03 snp_04 snp_05 snp_06 snp_07 snp_08 snp_09 snp_00 snp_01 >> snp_train2.csv
cat ./snp_02 >> snp_test2.csv
#for training set 3, testing set 3
cat snp_02 snp_01 snp_04 snp_05 snp_06 snp_07 snp_08 snp_09 snp_00 >> snp_train3.csv
cat ./snp_03 >> snp_test3.csv
#for training set 4, testing set 4
cat snp_02 snp_03 snp_01 snp_05 snp_06 snp_07 snp_08 snp_09 snp_00 >> snp_train4.csv
cat ./snp_04 >> snp_test4.csv
#for training set 5, testing set 5
cat snp_02 snp_03 snp_04 snp_01 snp_06 snp_07 snp_08 snp_09 snp_00 >> snp_train5.csv
cat ./snp_05 >> snp_test5.csv
#for training set 6, testing set 6
cat snp_02 snp_03 snp_04 snp_05 snp_01 snp_07 snp_08 snp_09 snp_00 >> snp_train6.csv
cat ./snp_06 >> snp_test6.csv
#for training set 7, testing set 7
cat snp_02 snp_03 snp_04 snp_05 snp_06 snp_01 snp_08 snp_09 snp_00 >> snp_train7.csv
cat ./snp_07 >> snp_test7.csv
#for training set 8, testing set 8
cat snp_02 snp_03 snp_04 snp_05 snp_06 snp_07 snp_01 snp_09 snp_00 >> snp_train8.csv
cat ./snp_08 >> snp_test8.csv
#for training set 9, testing set 9
cat snp_02 snp_03 snp_04 snp_05 snp_06 snp_07 snp_08 snp_01 snp_00 >> snp_train9.csv
cat ./snp_09 >> snp_test9.csv
#for training set 10, testing set 10
cat snp_02 snp_03 snp_04 snp_05 snp_06 snp_07 snp_08 snp_09 snp_01 >> snp_train10.csv
cat ./snp_00 >> snp_test10.csv
rm data2.csv snp_0*
| true
|
bceca8e88d3909003a462c8cfe344cf1021c41b5
|
Shell
|
holtrop/files
|
/bash_aliases.d/git
|
UTF-8
| 2,407
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
function git-config-joshs()
{
git config --global user.name 'Josh Holtrop'
git config --global color.ui true
git config --global color.diff.meta yellow
git config --global core.excludesfile ${HOME}/.gitignore
git config --global core.pager 'less -FRXi'
git config --global alias.dc 'diff --cached'
git config --global alias.lg 'log --graph --abbrev-commit --decorate --date=format:"%a %Y-%m-%d %-I:%M%P" --format=format:"%C(yellow)%h%C(reset) %C(magenta)%an%C(reset) %C(cyan)%ad%C(reset) %C(green)(%ar)%C(reset)%C(red)%d%C(reset)%n %C(white)%s%C(reset)" --all'
git config --global alias.lg1 'log --graph --abbrev-commit --decorate --date=format:"%a %Y-%m-%d %-I:%M%P" --format=format:"%C(yellow)%h%C(reset) %C(magenta)%an%C(reset) %C(cyan)%ad%C(reset) %C(green)(%ar)%C(reset)%C(red)%d%C(reset)%n %C(white)%s%C(reset)"'
git config --global alias.mergef 'merge FETCH_HEAD'
git config --global alias.gdiff 'difftool -y -t gvimdiff'
git config --global alias.gdiffc 'difftool -y -t gvimdiff --cached'
git config --global alias.wdiff 'diff --word-diff=color'
git config --global alias.mktar '!function f { name="$1"; pos="$2"; if [ "$pos" == "" ]; then pos=HEAD; fi; git archive --prefix="$name"/ "$pos" | bzip2 > ../"$name".tar.bz2; }; f'
git config --global alias.mktarxz '!function f { name="$1"; pos="$2"; if [ "$pos" == "" ]; then pos=HEAD; fi; git archive --prefix="$name"/ "$pos" | xz > ../"$name".tar.xz; }; f'
git config --global alias.amd 'am --committer-date-is-author-date'
git config --global push.default upstream
git config --global alias.bcdiff 'difftool -y -t bc'
git config --global alias.bcdiffc 'difftool -y -t bc --cached'
git config --global difftool.bc.cmd 'git_bcdiff "$LOCAL" "$REMOTE" "$MERGED"'
git config --global alias.bcmerge 'mergetool -y -t bc'
git config --global mergetool.bc.cmd \
'git_bcmerge "$LOCAL" "$REMOTE" "$BASE" "$MERGED"'
git config --global alias.authors '!git log --pretty="%an" | sort | uniq -c | sort -n'
git config --global init.defaultBranch master
}
function git-config-local-personal()
{
local domain='gmail.com'
git config user.email 'jholtrop@'${domain}
}
alias git-find-lost-commit='git fsck --lost-found'
git_empty_commit='4b825dc642cb6eb9a060e54bf8d69288fbee4904'
function gitat()
{
local timestamp="$1"
shift
GIT_AUTHOR_DATE="$timestamp" GIT_COMMITTER_DATE="$timestamp" git "$@"
}
| true
|
85778c9a54f9f28ad11a5ad776c7f8673ded50e2
|
Shell
|
tnakaicode/jburkardt
|
/hypersphere_properties/hypersphere_properties_prb.sh
|
UTF-8
| 668
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
#
g++ -c -I/$HOME/include hypersphere_properties_prb.cpp
if [ $? -ne 0 ]; then
echo "Errors compiling hypersphere_properties_prb.cpp"
exit
fi
#
g++ hypersphere_properties_prb.o /$HOME/libcpp/$ARCH/hypersphere_properties.o -lm
if [ $? -ne 0 ]; then
echo "Errors linking and loading hypersphere_properties_prb.o."
exit
fi
#
rm hypersphere_properties_prb.o
#
mv a.out hypersphere_properties_prb
./hypersphere_properties_prb > hypersphere_properties_prb_output.txt
if [ $? -ne 0 ]; then
echo "Errors running hypersphere_properties_prb."
exit
fi
rm hypersphere_properties_prb
#
echo "Program output written to hypersphere_properties_prb_output.txt"
| true
|
b9c5038ffdab59140e103dff0d24a65dcd93d740
|
Shell
|
Elrohil44/ComputerVisionBowLegs
|
/release.sh
|
UTF-8
| 230
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
VERSION=`cat ./version`
MODULE=$1
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
docker push elrohil/bow-legs-${MODULE}:latest
docker push elrohil/bow-legs-${MODULE}:${VERSION}
| true
|
18bb73890f8cd4a178467aaa5738f5d0685ce12e
|
Shell
|
suriyadeepan/scripts_and_such
|
/if_test.sh
|
UTF-8
| 52
| 2.640625
| 3
|
[] |
no_license
|
x=2
if [ $x -eq 2 ]
then
echo cool
else
echo no
fi
| true
|
d99dd2eca3ff938c995a2eca32b3d8f0ffb7540a
|
Shell
|
mlupton/openshift-internal-roadshow
|
/scripts/setup_env.sh
|
UTF-8
| 2,821
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
# Global variables
NUM_USERS=30
USER_PREFIX=
USER_BASE=user
PASSWORD=redhat
OSE_GROUP=roadshow-users
OSE_ROLE=view
OSE_PROJECT=ose-roadshow-demo
OSE_DOMAIN=ose.example.com
OSE_APP_SUBDOMAIN=apps
PASSWD_FILE=/etc/origin/openshift-passwd
# Show script usage
usage() {
echo "
Usage: $0 [options]
Options:
--user-base=<user base> : Base user name (Default: user)
--user-prefix=<user prefix> : User prefix
--num-users=<num users> : Number of Users to provision (Default: 30)
--group=<group> : Name of the group to create (Default: roadshow-users)
--role=<role> : Name of the role to give to the newly created group for the demo project (Default: view)
--project=<project> : Name of the demo project to create (Default: ose-roadshow-demo)
--domain=<domain> : Domain name for smoke test route (Default: ose.example.com)
--app-subdomain=<app subdomain> : Subdomain name for smoke test route (Default: apps)
--passwd-file=<passwd file> : OpenShift htpasswd file (Default: /etc/origin/openshift-passwd)
"
}
# Process input
for i in "$@"
do
case $i in
--user-base=*)
USER_BASE="${i#*=}"
shift;;
--user-prefix=*)
USER_PREFIX="${i#*=}"
shift;;
--num-users=*)
NUM_USERS="${i#*=}"
shift;;
--group=*)
OSE_GROUP="${i#*=}"
shift;;
--role=*)
OSE_ROLE="${i#*=}"
shift;;
--project=*)
OSE_PROJECT="${i#*=}"
shift;;
--domain=*)
OSE_DOMAIN="${i#*=}"
shift;;
--app-subdomain=*)
OSE_APP_SUBDOMAIN="${i#*=}"
shift;;
--passwd-file=*)
PASSWD_FILE="${i#*=}"
shift;;
*)
echo "Invalid Option: ${i#*=}"
exit 1;
;;
esac
done
users=
for i in $(seq -f "%02g" 0 $NUM_USERS)
do
username=${USER_PREFIX}${USER_BASE}${i}
# Create new Users
htpasswd -b ${PASSWD_FILE} $username ${PASSWORD}
# Create Comma Separated List for Groups
users+="\"${username}\","
done
# Hold current project name to switch back into
current_project=$(oc project --short)
oc project default &>/dev/null
echo "{ \"kind\": \"Group\", \"apiVersion\": \"v1\", \"metadata\": { \"name\": \"${OSE_GROUP}\", \"creationTimestamp\": null }, \"users\": [ ${users%?} ] }" | oc create -f -
oc new-project ${OSE_PROJECT} --display-name="OpenShift Roadshow Demo" --description="OpenShift Roadshow Demo Project"
oadm policy add-role-to-group ${OSE_ROLE} ${OSE_GROUP} -n ${OSE_PROJECT}
oc new-app https://github.com/gshipley/smoke -n ${OSE_PROJECT}
oc scale dc smoke --replicas=2 -n ${OSE_PROJECT} &>/dev/null
oc expose service smoke --hostname=smoketest.${OSE_APP_SUBDOMAIN}.${OSE_DOMAIN} &>/dev/null
oc project $current_project &>/dev/null
| true
|
17b4098e4759ebbfaba3b6f2ea688bd1718e338a
|
Shell
|
grussdorian/ARM-Assembly
|
/assembler/compile.sh
|
UTF-8
| 693
| 2.859375
| 3
|
[] |
no_license
|
#compile -a hello.asm -o hello.o -e hello
# ld -o hello hello.o \
# -lSystem \
# -syslibroot `xcrun -sdk macosx --show-sdk-path` \
# -e _start \
# -arch arm64
while getopts a:o:e: flag
do
case "${flag}" in
a) asmfile=${OPTARG};;
o) object=${OPTARG};;
e) executable=${OPTARG};;
esac
done
# echo $asmfile
# echo $object
# echo $executable
as $asmfile -o $object
#ld -o $executable $object -syslibroot `xcrun -sdk macosx --show-sdk-path` -e _start -arch arm64
ld -o $executable $object -lSystem \
-syslibroot `xcrun -sdk macosx --show-sdk-path` \
-e _start \
-arch arm64
| true
|
cdfa8bf3a3cf123f5698557d77d2633dee42e718
|
Shell
|
kylebailey94/ipoddisk
|
/src/iPodDisk.app/Contents/MacOS/iPodDisk
|
UTF-8
| 2,091
| 4
| 4
|
[] |
no_license
|
#!/bin/sh
display_warning_dialog () {
# If no arguments were passed, return 1
if [ -z "$1" ]
then
return 1
else
osascript <<WARNINGDIALOG
tell application "Finder"
activate
display dialog "$1" buttons {"Quit"} default button "Quit" with icon caution
end tell
WARNINGDIALOG
fi
}
# check against OS X version
osxver=`sw_vers -productVersion`
echo $osxver | grep '^10\.[45]'
if [ $? -ne 0 ]
then
display_warning_dialog "Sorry, iPodDisk requires OS X 10.4 or later"
exit 1
fi
# check MacFUSE
if [ ! -f /usr/local/lib/libfuse.dylib ]
then
display_warning_dialog "Please install MacFUSE first."
open 'http://code.google.com/p/macfuse/downloads/detail?name=MacFUSE-Core-0.2.4.dmg&can=2&q='
exit 1
fi
# find the bundle contents dir
# note: when Finder invokes this script, it uses absolute path. Thus, we can 'cd'
# to other directory, and don't have to worry about $macosdir being a relative
# path name.
macosdir=`/usr/bin/dirname "$0"`
contentsdir=`/usr/bin/dirname "$macosdir"`
# quit if iPodDisk volume still mounted
ipoddisk=`mount | grep '^iPodDisk' | awk '{print $3;}'`
if [ ! -z "$ipoddisk" ]
then
open "$ipoddisk"
exit 0
fi
# Find an iPod mount point
for mp in /Volumes/*; do
if [ -f "$mp/iPod_Control/iTunes/iTunesDB" ]
then
ipodmp=$mp
break
fi
done
if [ -z "$ipodmp" ]
then
display_warning_dialog "No iPod found. Is disk mode on?"
exit 1
fi
# create mount point
mount_point=/Volumes/iPodDisk/
mkdir $mount_point
# filenames must be double-quoted because $rootdir may contain spaces
"$macosdir/ipoddiskfuse" -oping_diskarb,subtype=1,volname=iPodDisk,fsname=iPodDisk $mount_point
errcode=$?
if [ $errcode -ne 0 ]
then
display_warning_dialog "iPodDisk failed to start. Please report this with the outputs of running /Applications/iPodDisk.app/Contents/MacOS/iPodDisk from the Terminal, thanks"
exit 1
fi
sleep 1
# open iPodDisk volume
ipoddisk=`mount | grep '^iPodDisk' | awk '{print $3;}'`
if [ -z "$ipoddisk" ]
then
display_warning_dialog "Failed to find iPodDisk volume."
exit 1
else
open "$ipoddisk"
fi
exit 0
| true
|
bcd873a26cfee5a9dc2909a04959738c8b92dc8c
|
Shell
|
KazAoyama/KaigoSystem
|
/E-LIFE/KEIRI/CGI/KOJINATESEIKYUSHO_SAKUSEI.KEISAN.NORMAL_NOTHIWARI
|
UTF-8
| 5,062
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
#
# KOJINATESEIKYUSHO_SAKUSEI.KEISAN.NORMAL_NOTHIWARI 施設請求計算部分のCGI
#
# Written by S.Otsubo
# 設定ファイル読込
source /home/hands/.bashrc &> /dev/null
source /home/hands/E-LIFE/KEIRI/CGI/KOJINATESEIKYUSHO_SAKUSEI.INI &> /dev/null
# ログ
[ ! -e /home/hands/E-LIFE/KEIRI/TRACE_LOG/${today} ] && mkdir /home/hands/E-LIFE/KEIRI/TRACE_LOG/${today}
exec 2> /home/hands/E-LIFE/KEIRI/TRACE_LOG/${today}/LOG.$(basename ${0}).${HOSTNAME}.${current_time}; set -xv
#---------------------------------------------------------------
#---------------------------------------------------------------
function error_exit {
message="${1}"
echo "${message}"
exit 1
}
#---------------------------------------------------------------
#---------------------------------------------------------------
# 引数設定
namefile=${1}
# 変数
eval $(name-source ${namefile})
seikyu_syori_month="${year}${month}"
seikyu_syori_next_month="$(mdate ${seikyu_syori_month}m/+1)"
seikyu_syori_two_next_month="$(mdate ${seikyu_syori_month}m/+2)"
seikyu_syori_last_month="$(mdate ${seikyu_syori_month}m/-1)"
seikyu_syori_two_last_month="$(mdate ${seikyu_syori_month}m/-2)"
#---------------------------------------------------------------
#---------------------------------------------------------------
# この処理で使用するファイルがなければ終了
[ ! -s ${tmp}-tujyou_hiwari_taisyougai ] && exit 0
#---------------------------------------------------------------
#---------------------------------------------------------------
### 日割りしない処理
# -
# ${tmp}-tujyou_hiwari_taisyougai
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 特販期間が請求月に引っかかっているかどうかを判定して特販なら特販の値を採用する
# また、日割りにあわせて特別処理フラグのフィールドを追加する
self 1/NF 16.1.6 17.1.6 ${tmp}-tujyou_hiwari_taisyougai |
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:特販期間FROM月 22:特販期間TO月
awk '{if($13!="_" && $21<=$2 && $22>=$2)
{zeimae_kakutei=$15}
else
{zeimae_kakutei=$12};
print $0,zeimae_kakutei,"0"}' |
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:特販期間FROM月 22:特販期間TO月 23:税計算前確定金額 24:特別処理フラグ
# 日割り処理とフィールドをあわせる
delf 21/22 > ${tmp}-tujyou_hiwari_taisyougai_kakutei
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:税計算前確定金額 22:特別処理フラグ
#---------------------------------------------------------------
# パイプでエラーがあったら落とす
[ $(plus ${PIPESTATUS[@]}) -eq 0 ] || error_exit "処理中にエラーが発生しました(NORMAL_NOTHIWARI)"
#---------------------------------------------------------------
#---------------------------------------------------------------
# 列チェック
[ "$(retu ${tmp}-tujyou_hiwari_taisyougai_kakutei)" != "22" ] && error_exit "処理中にエラーが発生しました(NORMAL_NOTHIWARI)"
[ "$(retu ${tmp}-tujyou_hiwari_taisyougai_kakutei | gyo)" != "1" ] && error_exit "処理中にエラーが発生しました(NORMAL_NOTHIWARI)"
[ "$(awk 'NF!="22"' ${tmp}-tujyou_hiwari_taisyougai_kakutei | gyo)" != "0" ] && error_exit "処理中にエラーが発生しました(NORMAL_NOTHIWARI)"
#---------------------------------------------------------------
### 日割りしない処理ここまで
#---------------------------------------------------------------
exit 0
| true
|
0a54fc228928bc48d09e0adbc8f7c8f9eca03453
|
Shell
|
pisecurity/opencv-manager
|
/setup.sh
|
UTF-8
| 900
| 2.578125
| 3
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
if [ -f /etc/friendlyelec-release ]; then
echo "OpenCV is not supported on this platform"
exit 0
fi
# Build tools:
apt-get install build-essential cmake make gcc g++ ccache yasm unzip wget
# Media I/O:
apt-get install zlib1g-dev libjpeg-dev libwebp-dev libpng-dev libtiff5-dev libjasper-dev libopenexr-dev libgdal-dev libgphoto2-dev
# Video I/O:
apt-get install libdc1394-22-dev libavcodec-dev libavformat-dev libavresample-dev libswscale-dev libtheora-dev libvorbis-dev libxvidcore-dev libx264-dev libopencore-amrnb-dev libopencore-amrwb-dev libv4l-dev
# Parallelism and linear algebra libraries:
apt-get install libtbb-dev libeigen3-dev liblapacke-dev libatlas-base-dev
# OCR and fonts support:
apt-get install tesseract-ocr tesseract-ocr-pol libtesseract-dev libharfbuzz-dev libfreetype6-dev
# Python interface:
apt-get install python-dev python-numpy python3-dev python3-numpy
| true
|
d4c6c0121157cec3dab8703e38a27e4c224fa2c9
|
Shell
|
tnshibu/daytrader
|
/aws-resources/api-gatewayv2/api-gatewayv2-linux.sh
|
UTF-8
| 3,299
| 3.25
| 3
|
[] |
no_license
|
#These have the same commands as the readme/commands text file.
#It has been simplified and made into series of shell commands
#====================================================================================================
#Get list of existing API gateways
apiid=`aws apigatewayv2 --profile shibu_admin get-apis --query "Items[].ApiId" --output text`
echo $apiid
echo deleting api = $apiid
aws apigatewayv2 --profile shibu_admin delete-api --api-id $apiid
#====================================================================================================
#Create API gateway for Lambda function
echo create api...
aws apigatewayv2 --profile shibu_admin create-api --name shibu_ff --protocol-type HTTP --target arn:aws:lambda:us-east-2:560773393352:function:sspoc_daytrader_kubernetes_deploy
apiid=`aws apigatewayv2 --profile shibu_admin get-apis --query "Items[].ApiId" --output text`
echo $apiid
#====================================================================================================
aws apigatewayv2 --profile shibu_admin get-integrations --api-id $apiid
#Note down Integration ID
integrationId=`aws apigatewayv2 --profile shibu_admin get-integrations --api-id $apiid --query "Items[].IntegrationId" --output text`
echo $integrationId
#====================================================================================================
aws apigatewayv2 --profile shibu_admin get-routes --api-id $apiid
routeId=`aws apigatewayv2 --profile shibu_admin get-routes --api-id $apiid --query "Items[].RouteId" --output text`
echo $routeId
#====================================================================================================
aws apigatewayv2 --profile shibu_admin update-route --api-id $apiid --route-id $routeId --route-key "GET /"
#====================================================================================================
#Update HTTP method to GET
aws apigatewayv2 --profile shibu_admin update-integration --api-id $apiid --integration-id $integrationId --integration-method GET
#====================================================================================================
#Allow API gateway to invoke Lambda
#replace api-id below (and also stage name $default)
# bash generate random 32 character alphanumeric string (lowercase only)
randomId=`hexdump -n 16 -e '4/4 "%08X" 1 "\n"' /dev/urandom`
aws lambda add-permission \
--profile shibu_admin \
--statement-id $randomId \
--action lambda:InvokeFunction \
--function-name "arn:aws:lambda:us-east-2:560773393352:function:sspoc_daytrader_kubernetes_deploy" \
--principal apigateway.amazonaws.com \
--source-arn "arn:aws:execute-api:us-east-2:560773393352:$apiid/*/*/"
#====================================================================================================
#invoke from command line
echo sleep for some time and then trigger the lambda url
sleep 10
echo curl -H "X-Amz-Invocation-Type: Event" https://$apiid.execute-api.us-east-2.amazonaws.com/
curl -H "X-Amz-Invocation-Type: Event" https://$apiid.execute-api.us-east-2.amazonaws.com/
#====================================================================================================
#====================================================================================================
| true
|
142fcee3793493f063b08d4d4e1ea1f18e9d4e47
|
Shell
|
190300451/Menu-interactivo-
|
/Menu interactivo (5 laboratorios)
|
UTF-8
| 959
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
vis(){
echo
echo Menu Interactivo
echo
echo 1.-lab1
echo
echo 2.-lab2
echo
echo 3.-lab3
echo
echo 4.-lab4
echo
echo 5.-lab5
echo
echo 6.-salida
echo
echo
echo
}
echo "Bienvenido Inge, Ingrese su nombre: "
read nombre
echo "Hola inge $nombre"
editor=vim
passwd=/etc/passwd
red='\033[0;41;30m'
std='\033[0;0;39m'
while true
do
vis
echo "Seleccione el numero de script que desea ejecutar"
read n
case $n in
1)cd /home/leonardo/laboratorios && ./lab1.sh && sleep 2 && clear;;
2)cd /home/leonardo/laboratorios && ./lab2.sh && sleep 2 && clear;;
3)cd /home/leonardo/laboratorios && ./lab3.sh && sleep 2 && clear;;
4)cd /home/leonardo/laboratorios && ./lab4.sh && sleep 2 && clear;;
5)cd /home/leonardo/laboratorios && ./lab5.sh && sleep 2 && clear;;
esac
done
| true
|
b08ec27b78a805a471ff17e7a36107d240eb0944
|
Shell
|
zalari/vagrant-ubuntu-java-docker
|
/make_box.sh
|
UTF-8
| 638
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#this script builds a vagrant box and exports it to vagrant-java-docker.box
#furthermore it fixes the current VirtualBox tools, because docker exchanges the kernel..."
rm -f ubuntu-java-docker.box
vagrant plugin install vagrant-vbguest
echo "Remove old base box and build new one..."
vagrant destroy -f
vagrant up
vagrant halt
echo "Updating virtualbox tools, mount error is expected behaviour..."
vagrant up
vagrant vbguest --do install
vagrant halt
echo "Do cleanup..."
vagrant up
vagrant ssh -c "sudo /vagrant/bootstrap/cleanup.sh"
echo "Start packaging base box..."
vagrant package --output ubuntu-java-docker.box
| true
|
b9aace2cbb7e415d9716d6c23fed0fb251843653
|
Shell
|
cms-analysis/HiggsAnalysis-HZZ4l_Combination
|
/RunLimits/scripts/make_ML.sh
|
UTF-8
| 1,141
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
STRICT=0; if [[ "$1" == "-s" ]]; then STRICT=1; shift; fi;
MINIM=0; if [[ "$1" == "-1" ]]; then MINIM="--minimizerAlgo=Minuit"; shift; fi;
if test -d $1; then MASS=$1; else echo "Usage: $0 mass [what ]"; exit 1; fi;
cd $MASS
WHAT="ML"
MATCH=$2;
OPTIONS="--X-rtd TMCSO_AdaptivePseudoAsimov --optimizeSim=1 --minimizerStrategy=2"
#OPTIONS=" --optimizeSim=1 $MINIM "
if [[ "$STRICT" == 1 ]]; then
OPTIONS="$OPTIONS --minimizerTolerance=0.00001"
fi
function run {
WHAT=$1; shift
NAM=$(echo $1 | sed -e s/comb_// -e s/.root// | tr [a-z] [A-Z])
if [[ "$MATCH" == "" || "$MATCH" == "$1" ]]; then
if test -f $1; then
echo "combine -M MaxLikelihoodFit $* -n ${NAM}_${WHAT} -m $MASS $OPTIONS --out"
combine -M MaxLikelihoodFit $* -n ${NAM}_${WHAT} -m $MASS $OPTIONS --out . 2>&1 | tee ${1/.root/.log.$WHAT}
fi;
fi;
}
if [[ "$MATCH" == "" ]]; then
# run $WHAT comb_hgg.root
# run $WHAT comb_hww.root
# run $WHAT comb_htt.root
run $WHAT comb_hzz4l.root
# run $WHAT comb_hzz2l2nu.root
# run $WHAT comb_hzz2l2q.root
# run $WHAT comb.root
else
run $WHAT $MATCH
fi
| true
|
b98f867a6fcada0ef0a60e626ebfdec3a862309b
|
Shell
|
alan-turing-institute/data-safe-haven
|
/tests/srd_smoke_tests/test_databases.mustache.sh
|
UTF-8
| 1,116
| 3.65625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#! /bin/bash
db_type=""
language=""
while getopts d:l: flag; do
case "${flag}" in
d) db_type=${OPTARG};;
l) language=${OPTARG};;
*) echo "Invalid option ${OPTARG}"; exit 1;;
esac
done
if [ "$db_type" == "mssql" ]; then
db_name="master"
port="{{SmokeTests.MSSQLPort}}"
server_name="{{SmokeTests.MSSQLVMName}}.{{shm.domain.fqdn}}"
elif [ "$db_type" == "postgres" ]; then
db_name="postgres"
port="{{SmokeTests.PostgreSQLPort}}"
server_name="{{SmokeTests.PostgreSQLVMName}}.{{shm.domain.fqdn}}"
else
echo "Did not recognise database type '$db_type'"
fi
if [ $port == "" ]; then
echo "Database type '$db_type' is not part of this SRE"
echo "All database tests passed"
else
script_path=$(dirname "$(readlink -f "$0")")
if [ "$language" == "python" ]; then
python "${script_path}"/test_databases_python.py --db-type "$db_type" --db-name "$db_name" --port "$port" --server-name "$server_name"
elif [ "$language" == "R" ]; then
Rscript "${script_path}"/test_databases_R.R "$db_type" "$db_name" "$port" "$server_name"
fi
fi
| true
|
276a079a4cb3df9417fa2dbaea744b5c010a6ddc
|
Shell
|
mansurovs/DMI__
|
/a_VS_b_C03.sh
|
UTF-8
| 1,443
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
#echo "Input a:"
#read a
#echo "Input b:"
#read b
#if [ $a -eq $b ] #ja a ir vienada ar b
#then
#echo "a ($a) ir vienads ar b ($b)"
#else
#echo "a ($a) nav vienads ar b ($b)"
#fi
#echo "Input a"
#read a
#echo "Input b"
#read b
#if [ $a -gt $b ]
#then
#echo "a ($a) ir lielaks par b ($b)"
#else
#echo "a ($a) ir mazaks par b ($b)"
#fi
#lt - menshe less then
#echo "Input a"
#read a
#echo "Input b"
#read b
#echo "Input c"
#read c
#if [ $a -eq $b ]
#then
#echo "a = b"
#elif [ $a -gt $b ]
#then
#echo "a > b"
#else
#echo "a < b"
#fi
#if [ $a -gt $b -a $a -gt $c ]
#then
#echo "$a >>>> alll"
#elif [ $b -gt $a -a $b -gt $c ]
#then
#echo "$b >>>> alll"
#elif [ $c -gt $a -a $c -gt $b ]
#then
#echo "$c >>> alll"
#fi
#for i in {2..100..1}
#do
# if [ `expr $i % $i` -eq 0 ]
# then
# for j in {2..100..1}
# do
# if [ $j -ne $i -a `expr $i % $j` -eq 0 ]
# then
# break
# fi
# done
# fi
#done
numberz () {
for i in {2..100..1}
do
for j in {2..100..1}
do
if [ $i -ne $j -a `expr $i % $j` -eq 0 ]
then
success=0
break
else
success=1
fi
done
if [ $success -eq 1 ]
then
printf "$i \n"
success=0
fi
done
}
average () {
max=$1
min=$1
for i in $*
do
if [ $i -gt $max ]
then
max=$i
fi
if [ $i -lt $min ]
then
min=$i
fi
avg=`expr $avg + $i`
done
printf "\n max = $max \n min = $min \n avg = `expr $avg / $#` \n"
}
average $*
#Made by RTUhacker
| true
|
8d3b34b58ccce1bec8cffd29dc22aec44a50bcba
|
Shell
|
lovesegfault/dotfiles-1
|
/bin/emacs_install.sh
|
UTF-8
| 734
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
if command -v doom 2>&2; then
doom upgrade
doom build
exit
fi
# install in XDG_CONFIG_HOME, but link to HOME nonetheless, as
# XDG_CONFIG_HOME might not be set when launching Emacs from the GUI
if [ -z "$XDG_CONFIG_HOME" ]; then
export XDG_CONFIG_HOME="$HOME/.config"
fi
export DOOMDIR="$XDG_CONFIG_HOME/doom"
emacs_config="$XDG_CONFIG_HOME/emacs"
emacs_d="$HOME/.emacs.d"
doom_d="$HOME/.doom.d"
rm -rf "$emacs_config"
rm -rf "$emacs_d"
rm -rf "$DOOMDIR"
rm -rf "$doom_d"
mkdir -p "$XDG_CONFIG_HOME"
git clone https://github.com/hlissner/doom-emacs "$emacs_config"
ln -s "$emacs_config" "$emacs_d"
"$emacs_config"/bin/doom install
ln -s "$DOOMDIR" "$doom_d"
ln -sft ~/.local/bin "$emacs_config"/bin/doom
| true
|
e8462af52924ae298d1474ed815bab221df1002c
|
Shell
|
DhruvAggarwal98/ECE364
|
/Prelab 09/getMoreUsedComponent.bash
|
UTF-8
| 671
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
# ######################################################
# Author : < Your Full Name >
# email : < Your Email >
# ID : < Your course ID , e . g . ee364j20 >
# Date : < Start Date >
# ######################################################
DataPath=~ee364/DataFolder/Prelab09
id1=$1
id2=$2
count1=0
count2=0
for circuit in $(ls $DataPath/circuits)
do
if grep --quiet -E $id1 $DataPath/circuits/$circuit
then
let "count1 = count1 + 1"
fi
done
for circuit in $(ls $DataPath/circuits)
do
if grep --quiet -E $id2 $DataPath/circuits/$circuit
then
let "count2 = count2 + 1"
fi
done
if [[ count1 -gt count2 ]]
then
echo $id1
else
echo $id2
fi
| true
|
38a4c3c94692dba595ff2c46c17ba37ce6505da2
|
Shell
|
CommandPost/CommandPost
|
/scripts/build_commandpost_release_retry_notorise.sh
|
UTF-8
| 805
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# COMMANDPOST BUILD RELEASE SCRIPT:
#
set -eu
set -o pipefail
#
# Define Variables:
#
export SENTRY_ORG=commandpost
export SENTRY_PROJECT=commandpost
export SENTRY_LOG_LEVEL=debug
export SCRIPT_HOME ; SCRIPT_HOME="$(dirname "$(greadlink -f "$0")")"
export COMMANDPOST_HOME ; COMMANDPOST_HOME="$(greadlink -f "${SCRIPT_HOME}/../")"
export VERSION ; VERSION=$(cd "${COMMANDPOST_HOME}/../CommandPost-App/" || fail "Unable to enter ${COMMANDPOST_HOME}/../CommandPost-App/" ; git describe --abbrev=0)
echo " * Moving to CommandPost-App Directory..."
cd "${COMMANDPOST_HOME}/../CommandPost-App/"
echo " * Notorizing DMG..."
./scripts/build.sh notarize -z "${COMMANDPOST_HOME}/../CommandPost-Releases/${VERSION}/CommandPost_${VERSION}.dmg"
echo " * CommandPost has been successfully built!"
| true
|
ee457002155bfb078777244de56a0cfbacb25826
|
Shell
|
d405f16/chawk
|
/make.sh
|
UTF-8
| 316
| 2.546875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "Downloading compiler..."
if [ ! -d "tmp/chawk" ]; then
git clone https://github.com/d405f16/chawk-compiler.git tmp/chawk >/dev/null
fi
cd tmp/chawk
git pull >/dev/null
cd ../..
echo "Building compiler..."
cd tmp/chawk
gradle fatJar >/dev/null
cd ../..
cd src
npm install >/dev/null
cd ..
| true
|
0525284c8dadcb6ade5fb63fac013d250a037494
|
Shell
|
mrbian/Other-intel-iot
|
/build/run_demo
|
UTF-8
| 1,537
| 3.109375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
if [ "x$1" == "x" ]; then
echo "HELP: run task, task could be:"
echo " broker start the http broker"
echo " center start the sample center"
echo " hub start the sample hub"
echo " edison start the sample hub in edison (it includes real things)"
echo " skartkit start the startkit hub in edison"
echo " grove start the hub for wio-link"
echo " lattepanda start the hub for lattepanda"
echo " orion start the hub for orion + edison"
echo " tank start the hub for Official Edison Arduino + tank"
echo " tank_romeo start the hub for Edison Romeo + tank"
exit 1
fi
case $1 in
broker)
node ./node_modules/hope-http-broker/bin/start_broker 16666
;;
center)
./center ./node_modules/hope-demo/center/config.json
;;
hub)
./hub ./node_modules/hope-demo/mockup/hub_a/config.json
;;
hub_b)
./hub ./node_modules/hope-demo/mockup/hub_b/config.json
;;
edison)
./hub ./node_modules/hope-demo/edison/config.json
;;
startkit)
./hub ./node_modules/hope-demo/startkit/config.json
;;
grove)
./hub ./node_modules/hope-demo/grove_auto/config.json
;;
lattepanda)
./hub ./node_modules/hope-demo/lattepanda/config.json
;;
orion)
./hub ./node_modules/hope-demo/makeblock/config.json
;;
tank)
./hub ./node_modules/hope-demo/tank/config.json
;;
tank_romeo)
./hub ./node_modules/hope-demo/tank_romeo/config.json
;;
esac
| true
|
19b574fba98f65645a27be3263ef8f0683cb959f
|
Shell
|
jonepl/Docker-WordPress-Dev-Env
|
/dev/Scripts/Local/package-wordpress.sh
|
UTF-8
| 1,048
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
SITENAME=$1
WPDBUSER=root
WPDBNAME=`cat ../docker-compose.yml | grep MYSQL_DATABASE | cut -d \: -f 2 | sed -e 's/^[[:space:]]*//'`
WPDBPASS=`cat ../docker-compose.yml | grep MYSQL_ROOT_PASSWORD | cut -d \: -f 2 | sed -e 's/^[[:space:]]*//'`
if [ -z $SITENAME ] || [ -z $WPDBNAME ] || [ -z $WPDBPASS ]; then
echo "$0: Invalid site name, db name or db password."
exit 1
fi
#TODO: Cleaup directory before packaging
rm -rf Backups/${SITENAME}/Local/*.sql
rm -rf Backups/${SITENAME}/Local/*.gz
echo "Starting Docker..."
docker-compose up -d
# Packages source code
echo "$0: Executing docker backup script for src changes."
docker exec wordpress sh /wp-dev/Scripts/Local/docker-wp-backup.sh ${SITENAME}
# Packages database code
echo "$0: Executing docker backup script for src changes."
docker exec mysql sh /wp-dev/Scripts/Local/docker-db-backup.sh ${SITENAME} ${WPDBUSER} ${WPDBNAME} ${WPDBPASS}
cp Scripts/Sql/2-migrate-to-server.sql Backups/${SITENAME}/Local/
echo "$0: Completed packaging of Local WordPress site."
exit 0
| true
|
6fd032dc57ceef261cfc36ab24cef69e7d064679
|
Shell
|
harlantwood/dev_env
|
/shell_ext_any_box.sh
|
UTF-8
| 4,507
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
# Designed to be used on both dev and production servers
# Read and understand all of this that you use!
#
# As the license says:
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ###############################################################################
# # Prompt
# ###############################################################################
if [ -n $IN_NIX_SHELL ]; then
parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ [\1]/'
}
# if [ -n "$SSH_CLIENT" ] || [ -n "$SSH_TTY" ]; then
# host="\[\e[01;35m\]\u@\h\[\e[0m\]"
# else
# host="\[\e[01;30m\]\h\[\e[0m\]"
# fi
# if [ -n "$IN_NIX_SHELL" ]; then
# subshell="==NIX"
# else
# subshell=""
# fi
export PS1="\[\e[1;36m\]NIX \[\e[0m\]\w\[\e[01;32m\]\$(parse_git_branch)\[\e[0m\] $ \[\e[0m\]"
fi
###############################################################################
# Basics
###############################################################################
alias ll='ls -lFh'
alias lla='ls -lFah'
alias llt='ls -lhFart'
alias a=alias
alias br='bin/run'
# eg "ga rake" to see all rake-related aliases
function ga {
alias | grep "$1" | grep -v grep
}
alias pg='ping www.google.com'
# eg "psg mysql" to see all mysql processes
function psg {
ps wwwaux | egrep "($1|%CPU)" | grep -v grep
}
# eg "port 3000" to see what is running there
function port {
lsof -i -n -P | grep TCP | grep "$1"
}
# Display folder and it's content as a tree
function tree {
find ${1:-.} -print | sed -e 's;[^/]*/;|____;g;s;____|; |;g'
}
if [ $OSTYPE = 'linux-gnu' ]; then
export EDITOR=$(which nano)
fi
# GIT
alias g=git
alias wip='git commit -m WIP'
export GIT_DUET_CO_AUTHORED_BY=1
# rsync
alias rsink='rsync --archive --verbose --progress --human-readable'
#alias rsink='rsync --archive --compress --verbose --progress --human-readable'
alias rsinkd='rsink --delete'
alias hk=heroku
alias ebp='$EDITOR ~/.zshrc'
alias ezsh='$EDITOR ~/.zshrc'
alias ebpe='$EDITOR ~/.dev_env'
alias ezshe='$EDITOR ~/.dev_env'
alias szsh='. ~/.zshrc'
alias sbp='. ~/.zshrc'
###############################################################################
# Yarn
###############################################################################
alias yup='yarn upgrade --force --latest'
alias y='yarn'
alias yt='yarn test'
alias yr='yarn run'
function yi {
yarn add $*
}
function ya {
yarn add $*
}
function yad {
yarn add $* --dev
}
###############################################################################
# NPM
###############################################################################
alias nbump='npm version patch'
alias npub='npm version patch && git push --tags origin HEAD && npm publish'
alias nup='ncu --upgrade && npm update && npm prune' # if this fails: `npm upgrade -g npm-check-updates`
alias n='npm'
alias nt='npm test'
alias nr='npm run'
alias ne='npm exec'
alias links='ll node_modules | grep \\-\>'
function ni {
npm install $1 && npm prune
}
function nis {
npm install --save $1 && npm prune
}
function nid {
npm install --save-dev $1 && npm prune
}
function nv {
npm show $1 versions
}
###############################################################################
# Ruby
###############################################################################
# Bundler
alias be='bundle exec'
alias bi='bundle install'
alias biq='bi --quiet'
alias biw='bi --without=development:test'
alias bid='biw --deployment'
alias bis='gemrat --no-version' # implements missing `bundle install --save` -- requires you first `gem install gemrat`
# # Foreman
# alias frun='be foreman run'
# alias fcon='be foreman run irb'
# alias fser='biq && be rerun foreman start'
# Rails
alias sp='bin/rspec --color'
alias sn='sp --format documentation'
alias sf='sp --require fuubar --format Fuubar'
alias r='bin/rails'
alias rs='biq && be foreman run "rails server"'
alias rdr='br db:rebuild'
alias rdm='be rake db:migrate'
alias rtp='br db:test:prepare'
alias rds='br db:seed'
alias work='br jobs:work'
alias ss='spring stop'
| true
|
4ebb42bf572f05f5afa946ada1f912e47ec75136
|
Shell
|
yjs1210/robinhood-services
|
/admin/entrypoint.sh
|
UTF-8
| 686
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# shellcheck disable=SC2034
TEXT_RESET_="\033[0m"
TEXT_BOLD_="\033[1m"
TEXT_RED_="\033[31m"
TEXT_GREEN_="\033[32m"
TEXT_YELLOW_="\033[33m"
# BASH Init Steps
{
echo 'alias ls="ls --color"'
echo 'export PS1="\[\033[1;34m\]\!\[\033[0m\] \[\033[1;35m\]\u\[\033[0m\]:\[\033[1;35m\]\W\[\033[0m\] $ "'
echo 'export LC_ALL=C.UTF-8 && export LANG=C.UTF-8'
} > ~/.initfile
echo
cat << EOF
******************************************************************************************
* ${PROJECT_NAME} Administration
******************************************************************************************
EOF
exec bash --init-file ~/.initfile
| true
|
976b8dd5e5189b4e33dd70b778f725fd87208d5f
|
Shell
|
fooblic/steem-up
|
/steem_follow.sh
|
UTF-8
| 872
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
# /usr/local/bin/steem_follow.sh
#
function start {
echo "Steem Follow: starting service"
python3 /usr/local/bin/steem_follow.py &
sleep 5
echo "PID is $(cat /var/run/steem/steem_follow.pid)"
}
function stop {
echo "Steem Follow: stopping Service (PID = $(cat /var/run/steem/steem_follow.pid) )"
kill $(cat /var/run/steem/steem_follow.pid)
rm /var/run/steem/steem_follow.pid
}
function status {
ps -ef | grep steem_follow.py | grep -v grep
echo "PID indication file $(cat /var/run/steem/steem_follow.pid 2> /dev/null) "
}
# Some Things That run always
touch /var/lock/steem_follow.lock
# Management instructions of the service
case "$1" in
start )
start
;;
stop )
stop
;;
reload )
stop
sleep 1
start
;;
status )
status
;;
* )
Echo "Usage: $0 {start | stop | reload | status}"
exit 1
;;
esac
exit 0
| true
|
9210681df062f3947d3759cd8388ec0216972e97
|
Shell
|
scherzhaft/roles
|
/sysupdate/files/opt/unix_ops/STIG/bin/enableaudit.sh
|
UTF-8
| 8,036
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash -x
##GEN002860 ##GEN000000-SOL00040 ##RHEL-06-000313 ##RHEL-06-000311
##RHEL-06-000159 ##RHEL-06-000161 ##RHEL-06-000160 ##RHEL-06-000005 ##RHEL-06-000136 ##RHEL-06-000137
##RHEL-06-000510 ##RHEL-06-000511 ##RHEL-06-000154 ##RHEL-06-000148 ##RHEL-06-000145 ##RHEL-06-000383 ##RHEL-06-000384 ##RHEL-06-000522 ##RHEL-06-000385 ##RHEL-06-000522
. /etc/default/SYSconstants || exit 4
GREP='grep'
test "X${__OSNAME}" != "XLinux" && {
/usr/sfw/bin/ggrep -V || repo install.SUNWggrp
/usr/sfw/bin/ggrep -V || exit 6
GREP='/usr/sfw/bin/ggrep'
}
test -f /export/home/steckers/.bash_login && . /export/home/steckers/.bash_login
HOSTNAME=`/bin/hostname|awk -F. {'print $1'}` ; test "X${HOSTNAME}" = "X" && exit
SELFDIR=`dirname $0`
export PATH=/sbin:/usr/sbin:${PATH}
FSTAB=/etc/fstab
test "X${1}" != "X" && FSTAB="$1"
test -f ${FSTAB} || FSTAB=/etc/vfstab
test -f ${FSTAB} || exit
TSTAMP=`perl -e 'print int(time)'` ; test "X${TSTAMP}" != "X" || exit
OSNAME=`uname -s` ; test "X${OSNAME}" != X || exit
AUDITVOL="/vol/audit"
MOUNTPOINT="/audit"
mkdir -p ${MOUNTPOINT}
MNTINUSE=`df -h ${MOUNTPOINT} 2>/dev/null|tail -1|awk {'print $NF'}`
test "X${MNTINUSE}" != "X/" && exit
LinuxFSTAB='${AUDITNAS}:${AUDITVOL} ${MOUNTPOINT} nfs rw,bg,hard,rsize=32768,wsize=32768,vers=3,nosuid,nointr,tcp,timeo=600,noexec,addr=${MYADDR} 0 0'
SunOSFSTAB='${AUDITNAS}:${AUDITVOL} - ${MOUNTPOINT} nfs - yes proto=tcp,hard,nosuid'
MYIPS=`ifconfig -a|grep '.'|${GREP} "\W.*inet"|perl -p -e "s| addr:| |g"|awk {'print $2'}|grep "^[0-9].*\.[0-9].*\.[0-9].*\.[0-9].*"|sort -u|grep -v "^127.0.0.1$"`
MYBKUPIP=`echo "${MYIPS}"|grep "^[0-9].*\.[0-9].*\.99\.[0-9].*$"|head -1`
MYPRODIP=`echo "${MYIPS}"|grep "^[0-9].*\.[0-9].*\.98\.[0-9].*$"|head -1`
MYDMZIP=`echo "${MYIPS}"|grep "^[0-9].*\.[0-9].*\.96\.[0-9].*$"|head -1`
trybkup()
{
echo "I have a backup-net interface"
AUDITNAS="b_foo"
getent hosts ${AUDITNAS} >/dev/null
if [ "X${?}" != "X0" ] ; then
PRODNAS=`echo "${AUDITNAS}"|sed -e "s|^b_||"`
test "X${PRODNAS}" != "X" && PRODNASIP=`getent hosts ${PRODNAS}|awk {'print $1'}`
test "X${PRODNASIP}" != "X" && BKUPNASIP=`echo "${PRODNASIP}"|awk -F. {'print $1"."$2".99."$4'}`
if [ "X${BKUPNASIP}" != "X" ] ; then
echo "${BKUPNASIP} ${AUDITNAS}" >> /etc/hosts
/etc/init.d/nscd stop
/etc/init.d/nscd start
sleep 4
start=1
for i in {1..70} ; do
getent hosts ${AUDITNAS} && break
sleep 2
start=`expr 1 + ${start}`
echo again
done
echo "${start}" > /tmp/nscd_hit_count.${TSTAMP}
fi
fi
}
tryprod()
{
AUDITNAS=''
MYADDR=''
echo "WARNING: backup-net interface problematic or doesn't exist...trying prod interface"
AUDITNAS="foo"
getent hosts ${AUDITNAS} >/dev/null
if [ "X${?}" != "X0" ] ; then
echo "Error: This system is unable to resolve ${AUDITNAS}...exiting"
return
fi
sleep 2
if ping -c11 ${AUDITNAS} ; then
MYADDR="${MYPRODIP}"
fi
}
setupmount()
{
MYFSTAB_formula=`eval echo \"\\$${OSNAME}FSTAB\"`
MYFSTAB=`eval echo "\"${MYFSTAB_formula}\""`
##MYFILTER=`echo "${MYFSTAB}"|perl -p -e "s|[ \t]+|\W.*|g"|perl -p -e "s|b_||"`
##MYFILTER=`echo "${MYFSTAB}"|perl -p -e "s|[ \t]+|\\\\\\\\W.*|g"|perl -p -e "s|b_||"`
cp "${FSTAB}" "${FSTAB}.preaudit.${TSTAMP}"
grep -v "[[:space:]].*/audit[[:space:]].*" "${FSTAB}.preaudit.${TSTAMP}" > "${FSTAB}.new.${TSTAMP}"
if [ -s "${FSTAB}.new.${TSTAMP}" ] ; then
cat "${FSTAB}.new.${TSTAMP}" > ${FSTAB}
echo "${MYFSTAB}" >> ${FSTAB}
fi
test -f "${FSTAB}.preaudit.${TSTAMP}" && CHANGE=`diff -u "${FSTAB}.preaudit.${TSTAMP}" ${FSTAB}|grep -v "^No differences encountered$"`
test "X${CHANGE}" != "X" && diff -u "${FSTAB}.preaudit.${TSTAMP}" ${FSTAB} > "${FSTAB}.auditpatch.${TSTAMP}"
test -f "${FSTAB}.auditpatch.${TSTAMP}" && cat "${FSTAB}.auditpatch.${TSTAMP}"
test "X${CHANGE}" != "X" && mount ${MOUNTPOINT}
sleep 1
MNTINUSE=`df -h ${MOUNTPOINT} 2>/dev/null|tail -1|awk {'print $6'}`
test "X${MNTINUSE}" != "X${MOUNTPOINT}" && exit
}
if [ "X${MYBKUPIP}" != "X" ] ; then
trybkup
ping -c11 ${AUDITNAS}
sleep 2
if ping -c11 ${AUDITNAS} ; then
MYADDR="${MYBKUPIP}"
else
tryprod
fi
else
tryprod
fi
if [ "X${MYADDR}" != "X" ] ; then
setupmount
mkdir -p "${MOUNTPOINT}/${HOSTNAME}"
else
mkdir -p "${MOUNTPOINT}"
ln -s /var/log/audit "/audit/${HOSTNAME}"
fi
case "${OSNAME}" in
Linux)
CRON=/etc/cron.allow
ENABLEROOTCRON=`printf "\`cat ${CRON}\`\nroot\n"|sort -ru`
HOME=/home/garfield
cp /etc/audit/auditd.conf /etc/audit/auditd.conf.bak.${TSTAMP}
echo 'log_file = %NASAUDIT%/audit.log
log_format = RAW
priority_boost = 3
flush = INCREMENTAL
freq = 20
num_logs = 5
#dispatcher = /sbin/audispd
max_log_file = 100
##max_log_file_action = keep_logs
max_log_file_action = rotate
space_left = 75
#space_left_action = SYSLOG
space_left_action = email
action_mail_acct = root
admin_space_left = 50
admin_space_left_action = SYSLOG
disk_full_action = SYSLOG
disk_error_action = SYSLOG'|perl -p -e "s|%NASAUDIT%|${MOUNTPOINT}/${HOSTNAME}|g" > /etc/audit/auditd.conf
sleep 1
echo
/sbin/chkconfig --level 0126 auditd off
/sbin/chkconfig --level 345 auditd on
/sbin/chkconfig --level 345 nfs on
/sbin/chkconfig --level 345 nfslock on
/sbin/chkconfig --level 345 portmap on
/sbin/chkconfig --level 345 netfs on
rm -f /etc/rc?.d/S??auditd /etc/rc?.d/K??auditd
sleep 1
ln -sf /etc/init.d/auditd /etc/rc3.d/S99auditd
ln -sf /etc/init.d/auditd /etc/rc4.d/S99auditd
ln -sf /etc/init.d/auditd /etc/rc5.d/S99auditd
ln -sf /etc/init.d/auditd /etc/rc0.d/K01auditd
ln -sf /etc/init.d/auditd /etc/rc1.d/K01auditd
ln -sf /etc/init.d/auditd /etc/rc2.d/K01auditd
ln -sf /etc/init.d/auditd /etc/rc6.d/K01auditd
sleep 1
ls -ld /etc/rc?.d/S??auditd /etc/rc?.d/K??auditd
/etc/init.d/auditd stop
sleep 3
/etc/init.d/auditd start
sleep 2
;;
SunOS)
CRON=/etc/cron.d/cron.allow
ENABLEROOTCRON=`printf "\`cat ${CRON} /etc/cron.allow \`\nroot\n"|sort -ru`
sleep 1
ln -sf ${CRON} /etc/cron.allow
HOME=/export/home/garfield
cp /etc/security/audit_control /etc/security/audit_control.bak.${TSTAMP}
echo '#
# Copyright (c) 1988 by Sun Microsystems, Inc.
#
# ident "@(#)audit_control.txt 1.4 00/07/17 SMI"
#
dir:%NASAUDIT%
flags:lo,fr,fd,am,fm,as
naflags:lo,am,-ex,-nt,-fw,-fa,-fm,-fd,-nt,-pc,as
minfree:20'|perl -p -e "s|%NASAUDIT%|${MOUNTPOINT}/${HOSTNAME}|g" > /etc/security/audit_control
sleep 1
/usr/sbin/svcadm -v enable -s svc:/network/rpc/bind:default
/usr/sbin/svcadm -v enable -s svc:/network/nfs/status:default
/usr/sbin/svcadm -v enable -s svc:/network/nfs/nlockmgr:default
/usr/sbin/svcadm -v enable -s svc:/network/nfs/mapid:default
/usr/sbin/svcadm -v enable -s svc:/network/nfs/cbd:default
/usr/sbin/svcadm -v enable -s svc:/network/nfs/rquota:default
/usr/sbin/svcadm -v enable -s svc:/network/nfs/client:default
sleep 1
/usr/sbin/svcadm disable svc:/system/auditd:default
sleep 3
/usr/sbin/svcadm enable svc:/system/auditd:default
sleep 2
esac
find /var/audit "${MOUNTPOINT}/${HOSTNAME}" -type f -exec chown root:root {} \;
find /var/audit "${MOUNTPOINT}/${HOSTNAME}" -type f -exec chmod 0640 {} \;
chmod go-w /var/audit "${MOUNTPOINT}/${HOSTNAME}"
chmod 640 /etc/security/audit_*
chown root:sys /etc/security/audit_user
chmod 0750 /usr/sbin/auditd /usr/sbin/audit /usr/sbin/bsmrecord /usr/sbin/auditreduce /usr/sbin/praudit /usr/sbin/auditconfig
test "X${HOME}" = "X" && exit
test "X${CRON}" = "X" && exit
test "X${ENABLEROOTCRON}" != "X" && echo "${ENABLEROOTCRON}" > ${CRON}
mkdir -p "${HOME}"
test -f "${SELFDIR}/compressaudit.sh" && cp "${SELFDIR}/compressaudit.sh" "${HOME}"
test -f "${HOME}/compressaudit.sh" && {
chmod 755 "${HOME}/compressaudit.sh"
crontab -l|grep -v '/compressaudit\.sh' > /tmp/$$cronedit
echo '22 3,15 * * *'" ${HOME}/compressaudit.sh"' > /dev/null 2>&1' >> /tmp/$$cronedit
crontab /tmp/$$cronedit
}
| true
|
8b57f625faa52d022ccd3336f314eea1be8b6cb5
|
Shell
|
tjdevries/config_manager
|
/install/deprecated/install_zsh.sh
|
UTF-8
| 401
| 2.515625
| 3
|
[] |
no_license
|
sudo apt install zsh
chsh -s $(which zsh)
# Install oh-my-zsh
sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
# Install spaceship theme
git clone https://github.com/denysdovhan/spaceship-prompt.git "$ZSH_CUSTOM/themes/spaceship-prompt"
ln -s "$ZSH_CUSTOM/themes/spaceship-prompt/spaceship.zsh-theme" "$ZSH_CUSTOM/themes/spaceship.zsh-theme"
| true
|
95223afb99261c02958856c65edf3ea900f999ca
|
Shell
|
petermilne/TOMO
|
/bin/load.AFHBA404
|
UTF-8
| 232
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$EUID" -ne 0 ]
then echo "Please run as root"
exit
fi
grep -q afhba /proc/modules
if [ $? -eq 0 ]; then
echo afhba loaded already
exit
fi
cd /home/dt100/PROJECTS/AFHBA404;
exec ./scripts/brotto.loadNIRQ $*
| true
|
2b1da40ba3125f6c95d5c93be03592076c3ab64a
|
Shell
|
Verumex/heroku-buildpack-libvips
|
/bin/buildscript.sh
|
UTF-8
| 2,317
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
indent() {
sed 's/^/ /'
}
arrow() {
sed 's/^/-----> /'
}
ensure_dirs() {
mkdir -p $TMP_DIR
mkdir -p $VIPS_PATH
mkdir -p $CACHE_DIR
}
cleanup_build() {
rm -Rf $TMP_DIR
}
export_profile() {
mkdir -p $BUILD_DIR/.profile.d
cp $BP_DIR/.profile.d/* $BUILD_DIR/.profile.d/
}
install_libvips() {
detect_libvips_version
if [[ -d "$CACHE_DIR/$LIBVIPS_VERSION" ]]; then
restore_cached_build
else
build_libvips
fi
}
detect_libvips_version() {
[[ ! -d $ENV_DIR ]] && exit 1
if [[ -r "$ENV_DIR/LIBVIPS_VERSION" ]]; then
export LIBVIPS_VERSION=$(cat "$ENV_DIR/LIBVIPS_VERSION")
else
echo "Checking for latest libvips version" | indent
export LIBVIPS_VERSION=$(detect_latest_version)
fi
}
detect_latest_version() {
curl -s https://api.github.com/repos/libvips/libvips/releases/latest \
| grep "browser_download_url.*tar.gz" \
| head -1 \
| cut -d : -f 2,3 \
| tr -d \" \
| grep "[0-9]*\.[0-9]*\.[0-9]" -o \
| head -1
}
restore_cached_build() {
echo "Restoring cached libvips build" | indent
cp -R "$CACHE_DIR/$LIBVIPS_VERSION/." $VIPS_PATH
}
build_libvips() {
echo "Building libvips binary..." | arrow
download_libvips \
&& unpack_source_archive \
&& cd $TMP_DIR \
&& configure_and_compile \
&& make -s install > /dev/null 2>& 1 \
&& cd ~ \
&& cache_build
}
download_libvips() {
rm -Rf $CACHE_DIR/*
local download_path="$TMP_DIR/libvips.tar.gz"
echo "Downloading libvips ${LIBVIPS_VERSION} source archive" | indent
curl -sL "https://github.com/libvips/libvips/releases/download/v${LIBVIPS_VERSION}/vips-${LIBVIPS_VERSION}.tar.gz" -o $download_path
}
unpack_source_archive() {
echo "Unpacking libvips source archive" | indent \
&& tar xf "$TMP_DIR/libvips.tar.gz" -C $TMP_DIR --strip 1
}
configure_and_compile() {
echo "Compiling libvips" | indent \
&& ./configure --prefix $VIPS_PATH --enable-shared --disable-static \
--disable-dependency-tracking --disable-debug --disable-introspection \
--without-fftw --without-pangoft2 --without-ppm \
--without-analyze --without-radiance > /dev/null 2>& 1 \
&& make -s > /dev/null 2>& 1
}
cache_build() {
echo "Caching binaries" | indent
cp -R "$VIPS_PATH/." "$CACHE_DIR/$LIBVIPS_VERSION"
}
| true
|
019ac22a909ca0b15b546f0b60fbca2b05a3382a
|
Shell
|
mmgc84/bash_scripting
|
/more_on_bash/testbash_error.sh
|
UTF-8
| 99
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
sum=0
for i in 1 2 3 4
do
sum=(($sum+$i))
done
echo "the sum of "$i" numbers is $sum"
| true
|
65be7c81695e3839131245cf014f4d4ee26d738f
|
Shell
|
sajjadwasim84/asimov
|
/quick_check._liveserver.bash
|
UTF-8
| 890
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
#ps axuf | egrep -v ^root | tr -s " " | cut -f1,11- -d' ' | column -c2
processes=$(ps axu | egrep -v '(^root|^syslog|^statd|^ntp|^dnsmasq|^daemon|^ganglia|^munge|^postfix|^systemd|^nobody)' | tail -n+2 | awk '{ printf"%-9s", $1; $1=$2=$3=$4=$5=$6=$7=$8=$9=$10=""; printf"%-70s\n", $0}')
all_clear=$(echo -e "${processes}" | egrep -v 'message+' || echo 'none')
if [[ ${all_clear:-} == 'none' ]]; then
echo -e " : no unknown processes : "
else
echo -e "....processes...."
echo -e "${all_clear}" | sort
fi
sync
mounts=$(df --portability --type=nfs --type=nfs4 | tail -n+2 | awk '{ printf"%-35s", $1; printf"%-35s\n", $6}' | egrep -v "(asimov/admin/config|asimov/tools|asimov/admin/scripts|io-.-ib:/work)" || echo 'none')
if [[ ${mounts:-} == 'none' ]]; then
echo -e " : no unknown mounts : "
else
echo -e "....mounts...."
echo -e "${mounts}" | sort
fi
| true
|
42ac423dfead1ee65f89d077df23f542c87428cf
|
Shell
|
axykon/dotfiles
|
/install/helm
|
UTF-8
| 382
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
version="v3.9.0"
curl -sL "https://get.helm.sh/helm-$version-linux-amd64.tar.gz" \
| tar -C $HOME/.local/bin -xz --strip-components=1 --mode=0755 \
linux-amd64/helm
bashcompdir=$HOME/.local/share/bash-completion/completions
[[ -d $bashcompdir ]] || mkdir -p $bashcompdir
bashcomp=$bashcompdir/_helm
helm completion bash > $bashcomp
chmod 0644 $bashcomp
| true
|
50c777fa9f193a8a5413b137e1edc9cd623ba77d
|
Shell
|
yanakievv/OS
|
/Scripting/05-b-7500.sh
|
UTF-8
| 549
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ $# -ne 2 ] || [[ ! "${1}" =~ ^[0-9]+$ ]] || [[ ! "${2}" =~ ^[0-9]+$ ]]; then
printf "The two arguments must be positive integers!\n"
exit 1
fi
lower="${1}"
upper=$(expr ${2} - ${1})
counter=1
number=$(( (RANDOM % ${upper}) + ${lower} ))
echo $number
while true; do
read -p "Guess? " ans
if [ "${ans}" -eq "${number}" ]; then
printf "RIGHT! Guessed in ${counter} tries!\n"
exit 0
elif [ "${ans}" -gt "${number}" ]; then
printf "... smaller!\n"
else
printf "... bigger!\n"
fi
counter=$(expr $counter + 1)
done
| true
|
9e1fcdb6f7dc8e6d1c2fd132dc4e055583adbe85
|
Shell
|
ihiji/hudson-dynamic-branch-manager
|
/remove_job.sh
|
UTF-8
| 407
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
set -u
#the env var CI_TYPE is expected to be hudson or jenkins, and controls location and name of the cli jar.
project_name=$1 #e.g. pachweb
simple_name=$2 #this should be a simple name (like hotfix-1.3.1 or rc-1.2, never prefixed with origin/)
job_name=${project_name}_$simple_name
java -jar /var/cache/jenkins/war/WEB-INF/jenkins-cli.jar -s http://localhost:8080 delete-job $job_name
| true
|
cc53dd3b76877bab37c089a28feec9b907f8e2c9
|
Shell
|
google/kf
|
/operator/scripts/ci-build.sh
|
UTF-8
| 1,784
| 3.375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
export GO111MODULE=on
export GOFLAGS="-mod=vendor"
REPO_ROOT=$(dirname "${BASH_SOURCE}")/..
# Ensure all go files are formatted
go_changes=$(git status --porcelain | grep .go || true)
if [ -n "${go_changes}" ]; then
echo "ERROR: This CL contains misformatted golang files. To fix this error, run `gofmt -w -s` on the affected files and add the updated files to this CL."
echo "stale files:"
printf "${go_changes}\n"
echo "git diff:"
git --no-pager diff
exit 1
fi
function run_golint_check() {
local golint_result=""
for pkg in "$@"
do
golint_result+=$(golint "$pkg")
done
if [ -n "$golint_result" ]; then
echo "ERROR: This CL contains golint errors."
echo "$golint_result"
return 1
fi
return 0
}
# Ensure no golint errors.
run_golint_check "cmd/..." \
"pkg/apis/..." \
"pkg/manifestival/..." \
"pkg/operand/..." \
"pkg/operator-cleanup/..." \
"pkg/reconciler/..." \
"pkg/release/..." \
"pkg/testing/..." \
"pkg/transformer/..." \
"version/..."
TMP="$(mktemp -d)"
cp -r ${REPO_ROOT}/* ${TMP}/
pushd "${TMP}"
# Perform go build
go build -v ./...
popd
| true
|
834f01bddb2111ea662d86ae7f598ba315ca7ed4
|
Shell
|
yitsushi/taskwarrior-docker
|
/taskd-create-user
|
UTF-8
| 572
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/env ash
org="$1"
username="$2"
if [[ -z "$org" ]]; then
echo "$0 <org> <user>"
return 1
fi
if [[ -z "$username" ]]; then
echo "$0 <org> <user>"
return 1
fi
export SERVER=$(hostname -f)
export CN=$SERVER
echo "creating $org/$username"
taskd add user "$org" "$username"
cd $TASKDDATA/pki
uname=$(echo "$username" | tr '[:upper:]' '[:lower:]' | sed -Ee 's/[^a-z]/_/g;s/_+/_/g')
./generate.client "$uname" > /dev/null 2>&1
tar zcf "/data/certs/$uname.tar.gz" "$uname.cert.pem" "$uname.key.pem" ca.cert.pem
echo "/data/certs/$uname.tar.gz created"
| true
|
5527925dc6a747196fca76e4faf0febfff281320
|
Shell
|
jpl169/POPL21-artifact
|
/runOverheadTest.sh
|
UTF-8
| 1,242
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
ResultPath="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
B16_ResultPath="${ResultPath}/B16OvhdRslt.txt"
P16_ResultPath="${ResultPath}/P16OvhdRslt.txt"
F32_ResultPath="${ResultPath}/F32OvhdRslt.txt"
echo -e "\033[1mBuilding math libraries\033[0m"
make -s clean
make -s
echo -e "\033[1mMeasuring bfloat16 math library speed\033[0m"
cd overheadtest/bfloat16
make -s clean
make -s
./runAll.sh > $B16_ResultPath
make -s clean
cd ../..
python3 bfloatOverheadFull.py
python3 bfloatOverheadInternal.py
echo -e "Output \033[1mbfloatOverheadFull.pdf\033[0m corresponds to Figure 11(a)"
echo -e "Output \033[1mbfloatOverheadInternal.pdf\033[0m corresponds to Figure 11(b)"
echo -e ""
echo -e "\033[1mMeasuring posit16 math library speed\033[0m"
cd overheadtest/posit16
make -s clean
make -s
./runAll.sh > $P16_ResultPath
make -s clean
cd ../..
python3 positOverheadInternal.py
echo -e "Output \033[1mpositOverheadInternal.pdf\033[0m corresponds to Figure 12"
echo -e ""
echo -e "\033[1mMeasuring float math library speed\033[0m"
echo -e "This make 10-20 minutes to complete"
cd overheadtest/float
make -s clean
make -s
./runAll.sh > $F32_ResultPath
make -s clean
echo -e "Output result in \033[1mF32OvhdRslt.txt\033[0m"
cd ../..
| true
|
ace48a958ed8c4a7db199a5628faa3bf7890fe29
|
Shell
|
josanabr/articulo_ds_2021_I
|
/codigo/NPB/script_instalacion.sh
|
UTF-8
| 569
| 3.546875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Este script invoca los diversos pasos (scripts) que generalmente se deben
# ejecutar para llevar a cabo el proceso de instalacion de un paquete.
#
# Autor: John Sanabria - john.sanabria@correounivalle.edu.co
# Fecha: 2021-05-05
#
if [ "${0}" == "./script_borrado.sh" ]; then
./borrar.sh && exit 0
fi
if [ "$1" == "" ]; then
echo "Indicar el directorio donde se encuentran los scripts para la instalacion del paquete"
exit 1
fi
CURRENT_DIR=$(pwd)
#cd ${1} || echo "Directorio no encontrado" && exit 1
./prerrequisitos.sh
./compilacion.sh
./instalacion.sh
cd ${CURRENT_DIR}
| true
|
6caeb80bdd9ffa7113ae367a5015912a44a6da99
|
Shell
|
lonkamikaze/bsda2
|
/tests/bsda_fifo.sh
|
UTF-8
| 1,234
| 3.421875
| 3
|
[
"ISC"
] |
permissive
|
. ../src/bsda_fifo.sh
. ../src/bsda_test.sh
. ../src/bsda_util.sh
NL=$'\n'
# Record number of file descriptors
IFS=,
bsda:util:count fdcount0 $bsda_obj_desc
# Create a Fifo instance
bsda:fifo:Fifo fifo
# Check if file descriptors were consumed
bsda:util:count fdcount1 $bsda_obj_desc
test $fdcount1 -le $fdcount0
# Fifos are not serialisable or copyable
bsda:test:isNone $fifo.serialise
bsda:test:isNone $fifo.copy
# Send something through the fifo
$fifo.sink echo foobar
$fifo.source read -r str
test "$str" = foobar
# Send a big thing many times in parallel
doc="$(cat ../ref/bsda_obj.md)$NL"
IFS="$NL"
threads=8
pids=
for i in $(jot $threads); do
$fifo.sink 'echo -n "$doc"' &
pids="${pids}$!${NL}"
done
for i in $(jot $threads); do
recv=
while [ "$recv" != "$doc" ]; do
$fifo.source read -rt5 line
recv="$recv$line$NL"
done
done
wait $pids
# Clean up
$fifo.delete
IFS=,
bsda:util:count fdcount2 $bsda_obj_desc
test $fdcount2 -eq $fdcount0
# Creating fifos must eventually fail if file descriptors are consumed
if [ $((fdcount0)) -lt $((fdcount1)) ]; then
fdcount_last=$fdcount0
while bsda:fifo:Fifo fifo; do
bsda:util:count fdcount $bsda_obj_desc
test $fdcount -lt $fdcount_last
fdcount_last=$fdcount
done
fi
| true
|
bbf82c4b68ef319025191eb480f19f53f7122edc
|
Shell
|
wwone/AUTHOR
|
/examples/article_lisbon/createPDF.sh
|
UTF-8
| 1,441
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#
# from the FOP input (made by AUTHOR), create
# an interactive PDF distribution file
#
# The working directory will contain the
# following:
#
# article.fo (FOP previously combined from AUTHOR output)
# one way to combine the FOP files is the
# following command:
# cat book*.fopdf > article.fo
#
# Arial*.ttf (Arial fonts used for this example, any other
# may be used. However note that generally they will be
# embedded. see the configuration file "fop99.xconf")
# fop99.xconf (Configuration file for FOP, expecially note
# information about fonts used)
#
# following is the pathway to the installed FOP executable
# package from the Apache FOP site. The "fop" at the
# end of the script pathway is the Linux/Unix executable
# shell script. Per my testing, no other environment variables
# need to be set, besides those below. It is assumed that the
# "java" command will execute the runtime on this sytem.
#
# this is an EXAMPLE, you must specify the full pathway for your
# FOP installation
#
SCRIPT=/media/bob/DATA/work/fop/fop-2.2/fop/fop
#
CONF=fop99.xconf
# following is the only FOP-related environment variable set for
# this FOP execution
#
FOP_OPTS="-Xms400m -Xmx400m"
export FOP_OPTS
echo $FOP_OPTS
echo sh $SCRIPT -nocs -x -c $CONF article.fo article_pdf.pdf
sh $SCRIPT -nocs -x -c $CONF article.fo article_pdf.pdf
| true
|
57b1f1711655e559ba0414f2819d5149fd0e4c23
|
Shell
|
zhangwei217245/HDF5Meta
|
/scripts/test_mongo.sh
|
UTF-8
| 973
| 2.9375
| 3
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
#!/bin/bash
nohup ./bin/hdf5_set_2_mongo /global/cscratch1/sd/houhun/h5boss_v1 100 16 > mongotest_100_16.txt &
#nohup ./sim_mongo.sh > mongotest_100_16.txt &
fakeidx=1
old_num_file=0
echo ""> mongo_mem.txt
while [ $fakeidx -eq 1 ];do
if [ $num_file -eq 100 ];then
break
fi
num_file=`grep -c "[PARSEFILE]" mongotest_100_16.txt`
if [ $num_file -gt $old_num_file ];then
mongo mongodb03.nersc.gov/HDF5MetadataTest -u HDF5MetadataTest_admin -p ekekek19294jdwss2k --eval 'db.runCommand({dbStats:1, scale:1024})' | egrep "(indexSize|dataSize)" | xargs echo >> mongo_mem.txt
old_num_file=$num_file
fi
sleep 3s
done
#for i in $(seq 0 16); do
# ./bin/hdf5_set_2_mongo /global/cscratch1/sd/houhun/h5boss_v1 8 $i
# mongo mongodb03.nersc.gov/HDF5MetadataTest -u HDF5MetadataTest_admin -p ekekek19294jdwss2k --eval 'db.runCommand({dbStats:1, scale:1024})' | egrep "(indexSize|dataSize)" | xargs echo
#done
| true
|
4e54260aca64c443962013ddc16b4cb1802d57fc
|
Shell
|
lwilletts/Legions-on-Unix
|
/legionsoverdrive
|
UTF-8
| 4,060
| 4.40625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# wildefyr - 2016 (c) MIT
# run script for Legions: Overdrive on Linux.
ARGS="$@"
WINEPRE="$HOME/.wine-legions"
DIRECTORY="$HOME/.legions"
LAUNCHER="http://www.legionsoverdrive.com/launcher/Legions.exe"
usage() {
cat << EOF
Usage: $(basename $0) [-qhV] [-p path] [-w path]
-p | --path: Set path to Legions: Overdrive folder. [default: ${DIRECTORY}]
-w | --wine: Set path of the wineprefix. [default: ${WINEPRE}]
-q | --quiet: Turn off $(basename $0)'s output.
-V | --version: Print version information.
-h | --help: Display this help.
EOF
test $# -eq 0 || exit $1
}
getVersion() {
printf '%s\n' "Legions-on-Linux Release 1.2 (c) wildefyr"
exit 0
}
validateDeps() {
type wine 2>&1 > /dev/null || {
printf '%s\n' "wine has not been found on your \$PATH." >&2
printf '%s\n' "Please install it using your distribution's package manager." >&2
exit 2
}
type winetricks 2>&1 > /dev/null || {
printf '%s\n' "winetricks has not been found on your \$PATH." >&2
printf '%s\n' "Please install it using your distribution's package manager." >&2
exit 2
}
type cabextract 2>&1 > /dev/null || {
printf '%s\n' "cabextract has not been found on your \$PATH." >&2
printf '%s\n' "Please install it using your distribution's package manager." >&2
exit 2
}
type wget 2>&1 > /dev/null || {
printf '%s\n' "wget has not been found on your \$PATH." >&2
printf '%s\n' "Please install it using your distribution's package manager." >&2
exit 2
}
}
setupEnvironment() {
test -d "$DIRECTORY" && {
printf '%s\n' "$DIRECTORY found."
test -d "$DIRECTORY/Legions" && {
printf '%s\n' "Loading Legions: Overdrive."
playLegions
} || {
printf '%s\n' "$DIRECTORY/Legions not found."
downloadLegions
}
} || {
printf '%s\n' "$DIRECTORY not found."
downloadLegions
}
}
playLegions() {
cd $DIRECTORY/Legions
WINEDEBUG=-all WINEARCH=win32 WINEPREFIX=$WINEPRE wine launcher.exe \
2>&1 &> /dev/null
exit 0
}
downloadLegions() {
mkdir -v -p $DIRECTORY && {
cd $DIRECTORY
} || {
printf '%s\n' "${DIRECTORY} is not valid. Exiting."
exit 3
}
printf '%s' "Download Legions: Overdrive to ${DIRECTORY}? [y/n]: "; \
while read -r confirm; do
case "$confirm" in
"y"|"yes")
printf '%s\n' "Downloading launcher.exe:"
break
;;
"n"|"no")
printf '%s\n' "Exiting..."
exit 0
;;
*)
printf '%s' "Download Legions: Overdrive to ${DIRECTORY}? [y/n]: "
continue
;;
esac
done
wget -nc "$LAUNCHER"
printf '%s\n' "Extract files to Z:${DIRECTORY}."
WINEDEBUG=-all WINEARCH=win32 WINEPREFIX=$WINEPRE wine Legions.exe
rm Legions.exe
installLegions
}
installLegions() {
cd Legions
# get winetricks packages
printf '%s\n' "Downloading winetricks packages."
PACKAGES="corefonts d3dcompiler_43 d3dx9 xinput"
for package in $PACKAGES; do
WINEDEBUG=-all WINEARCH=win32 WINEPREFIX=$WINEPRE winetricks ${package}
done
printf '%s\n' "Launching Legions: Overdrive."
WINEDEBUG=-all WINEARCH=win32 WINEPREFIX=$WINEPRE wine launcher.exe \
2>&1 &> /dev/null
}
main() {
validateDeps
setupEnvironment
}
for arg in $ARGS; do
test "$WINEFLAG" = "true" && { WINEPRE="$arg"; WINEFLAG=false; }
test "$PATHFLAG" = "true" && { DIRECTORY="$arg"; PATHFLAG=false; }
case "$arg" in
-h|--help|h|help) usage 0 ;;
-V|--version) getVersion ;;
-q|--quiet) QUIETFLAG=true ;;
-p|--path) PATHFLAG=true ;;
-w|--wine) WINEFLAG=true ;;
esac
done
test "$QUIETFLAG" = "true" && {
main 2>&1 > /dev/null
} || {
main
}
| true
|
24b0f6a67881ddbf6688f8927c79347007ee9b5f
|
Shell
|
sierdzio/tbus
|
/src/tbus-config
|
UTF-8
| 393
| 3.140625
| 3
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
# tbus-config is used to read and modify the global/ local configuration of tbus.
. tbus-global
help () {
echo "tbus-config is used to read and modify the global/ local configuration
of tbus.
To get general information about tbus, use tbus info."
}
if [ "${1}" = "-h" ] || [ "${1}" = "--help" ]; then
help
exit
fi
# TODO:
# - upload limit
# - download limit
# - autorun
| true
|
3c50b7f641451698da8b97b2a88f15c4be7b5e05
|
Shell
|
joechen88/scripts
|
/diskinfo.sh
|
UTF-8
| 1,748
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
usage()
{
cat << EOF
Usage: sh diskinfo.sh <ad|dg>
ad = list all disks
dg = list disks from vsan diskgroup
example:
sh diskinfo.sh dg
EOF
}
#
#
# replace carriage return w/ a TAB when issuing a grep -> awk 'ORS=NR%2?" ":"\n"'
# - get each drive,
#
driveInfo() {
vdq -qH | grep -iE "$1" | awk 'ORS=NR%2?" ":"\n"' ; \
vdq -qH | grep -A 5 -iE "$1" | grep -iE "State" | awk 'ORS=NR%2?" ":"\n"' ; \
vdq -qH | grep -A 5 -iE "$1" | grep -iE "IsSSD" | awk 'ORS=NR%2?" ":"\n"' ; \
vdq -qH | grep -A 5 -iE "$1" | grep -iE "iscapacity" | awk 'ORS=NR%2?" ":"\n"' ; \
esxcfg-scsidevs -l | grep -A 7 -iE "$1" | grep -iE "Model" | awk 'ORS=NR%2?" ":"\n"' ;
esxcfg-scsidevs -l | grep -A 7 -iE "$1" | grep -iE "size" | awk 'ORS=NR%2?" ":"\n"' ;
esxcfg-scsidevs -A | grep -iE "$1" | awk '{print $1}'
}
if [[ $# != 1 ]]; then
usage
exit 0
else
IFS=$'\n'
if [ $1 == "ad" ]; then
echo ""
echo "**********"
esxcfg-scsidevs -a
echo -e "**********\n"
driveInfoArray=$(vdq -qH | grep -iE "mpx|naa|t10" | awk '{print $2}')
fi
if [ $1 == "dg" ]; then
echo ""
echo "**********"
esxcfg-scsidevs -a
echo -e "**********\n"
vdq -iH
driveInfoArray=$(vdq -iH | grep -iE "ssd|md" | awk '{print $2}')
fi
#echo $driveInfoArray
IFS=$'\n'
j=1
for i in `echo "$driveInfoArray"`
do
echo "--------------------" ;
echo $j ;
driveInfo $i ;
echo "--------------------" ;
let j++ ;
done
fi
| true
|
b3a8341c9662379da064654d2bfde5530cc7fea1
|
Shell
|
getupcloud/origin-server
|
/cartridges/openshift-origin-cartridge-cron/bin/install
|
UTF-8
| 974
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
source $OPENSHIFT_CARTRIDGE_SDK_BASH
# only print this message on the "main" gear, not any scaled gears (for scalable apps)
# or secondary haproxy gears (for ha applications)
if [ "$OPENSHIFT_GEAR_DNS" == "$OPENSHIFT_APP_DNS" ]; then
frequencies=$(cat $OPENSHIFT_CRON_DIR/configuration/frequencies | tr '\n' ',')
client_result ""
client_result "To schedule your scripts to run on a periodic basis, add the scripts to "
client_result "your application's .openshift/cron/{${frequencies%?}}/"
client_result "directories (and commit and redeploy your application)."
client_result ""
client_result "Example: A script .openshift/cron/hourly/crony added to your application"
client_result " will be executed once every hour."
client_result " Similarly, a script .openshift/cron/weekly/chronograph added"
client_result " to your application will be executed once every week."
client_result ""
fi
| true
|
158b75d2cecc3feb4ea4d97cb64f158fe43e8e5e
|
Shell
|
CBIIT/NBIA-TCIA
|
/software/nbia-search/src/assets/set_api_url.sh~
|
UTF-8
| 1,604
| 4
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
OS=`uname`
if [ "$OS" == "Darwin" ]
then
echo "Does not run on Mac (incompatible sed)"
exit 1
fi
if [[ $1 == *"help"* ]]
then
echo
echo "Call from within nbia-client/assets/ directory"
echo "Ex: set_api_url.sh https://public-dev.cancerimagingarchive.net"
echo "Ex: set_api_url.sh http://192.168.1.14:8080"
echo
exit 0
fi
CORRECT_PATH="nbia-client/assets"
MATCH_COUNT=`pwd | grep -c ${CORRECT_PATH}$`
# echo MATCH_COUNT: ${MATCH_COUNT}
if [ ${MATCH_COUNT} -lt 1 ]
then
echo "This utility must be launched from within ${CORRECT_PATH}." >&2
exit 1
fi
# Make sure we got an API url as a parameter
if [ $# != 1 ]
then
echo "Error: Need API url" >&2
echo "Ex: set_api_url.sh https://public-dev.cancerimagingarchive.net" >&2
echo "Ex: set_api_url.sh http://192.168.1.14:8080"
exit 1
fi
PROP_FILE=properties.ts
API_SERVER_URL=$1
# Make sure the property file is there.
if [ ! -f ${PROP_FILE} ]
then
echo "Error: I could not find ${PROP_FILE} in the current directory." >&2
exit 1
fi
sed -i 's@^\s*API_SERVER_URL:.*,@API_SERVER_URL: \x27'"${API_SERVER_URL}"'\x27,@g' ${PROP_FILE}
for f in `ls ../ | grep main`
do
sed -i 's@API_SERVER_URL:"[^"]*",@API_SERVER_URL:\"'"${API_SERVER_URL}"'\",@g' ../${f}
done
# Make sure it worked.
MATCH_COUNT=`grep -c API_SERVER_URL:\ \'${API_SERVER_URL}\', ${PROP_FILE}`
if [ ${MATCH_COUNT} -lt 1 ]
then
echo "Error: I did not find any API Server url entries in ${PROP_FILE}" >&2
exit 1
fi
# echo Found ${MATCH_COUNT}
echo "Set API server url to API_SERVER_URL: '${API_SERVER_URL}'"
| true
|
3c1a23cbdec0e6ba71ecbe268bf229c465ec9d0d
|
Shell
|
petronny/aur3-mirror
|
/trebuchet/PKGBUILD
|
UTF-8
| 1,335
| 2.734375
| 3
|
[] |
no_license
|
# This is an example PKGBUILD file. Use this as a start to creating your own,
# and remove these comments. For more information, see 'man PKGBUILD'.
# NOTE: Please fill out the license field for your package! If it is unknown,
# then please put 'unknown'.
# Maintainer: Glen Oakley <goakley123@gmail.com>
pkgname=trebuchet
pkgver=1.075
pkgrel=1
epoch=
pkgdesc="A client program for connecting to TinyMUD style social MUD servers"
arch=('any')
url="http://www.belfry.com/fuzzball/trebuchet/"
license=('GPL')
groups=()
depends=('tk')
makedepends=()
checkdepends=()
optdepends=()
provides=('treb')
conflicts=()
replaces=()
backup=()
options=()
install=
changelog=
source=($pkgname-$pkgver.tar.gz treb.desktop treb.png)
noextract=()
md5sums=('f0e48e2a370ef196747be0043f28a6ca'
'3527b0aeb1b8140541664f00fa7ab892'
'9584f7c9d7144f78f1ea203a12631a19') #generate with 'makepkg -g'
build() {
cd "$srcdir/$pkgname-$pkgver"
}
package() {
mkdir -p "$pkgdir/opt/"
cp -r "$srcdir/$pkgname-$pkgver" "$pkgdir/opt/$pkgname"
mkdir -p "$pkgdir/usr/bin/"
ln -s "/opt/$pkgname/Trebuchet.tcl" "$pkgdir/usr/bin/treb"
mkdir -p "$pkgdir/usr/share/applications"
cp "$srcdir/treb.desktop" "$pkgdir/usr/share/applications/"
mkdir -p "$pkgdir/usr/share/icons/hicolor/192x192/apps"
cp "$srcdir/treb.png" "$pkgdir/usr/share/icons/hicolor/192x192/apps/"
}
| true
|
6c9dedac08b984063c12e32683c5b1095821dfe8
|
Shell
|
Ali-Neyestan/dws-dev-006-bash1
|
/try.sh
|
UTF-8
| 985
| 4.3125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
count=1
### call when an error occurred
function usage {
echo "Usage: $0 -i [interval] -n [numbers] command"
exit 1;
}
### read the &5 and after that if the command has 2 section
### like ping 4.2.2.4
command=${*:5}
### if condition to get the necessary options
if [ -e $1 ] & [ -e $3 ]; then
usage
fi
### getting option
case $1 in
-i)
interval=$2
;;
*)
usage
;;
esac
case $3 in
-n)
number=$4
;;
*)
usage
;;
esac
### if condition to check the command if it's empty
if [ -z "$command" ]; then
usage
exit 1;
fi
while true; do
eval $command
if [[ $? -eq 0 ]]; then
echo "exit $?"
exit $?;
fi
if [[ $count -ge $number ]]; then
echo "Command Failed after retrying $number times" > /dev/stderr
exit 1;
else
echo "Command Failed retrying in $interval"
sleep $interval
let "count+=1"
fi
done
| true
|
86eb967ec9baf1d2014d8d91ce429c71bb16b621
|
Shell
|
voyage13/factomd
|
/scripts/performance/pulldata.sh
|
UTF-8
| 118
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
if [ -z "$1" ]; then
echo "Usage:"
echo " ./pulldata.sh <test name>"
else
cp ../../time.out ./$1.out
fi
| true
|
40d9ee9d20d10208afe3812f596d962d2559eecc
|
Shell
|
sonnenscheinchen/emu-scripts
|
/deluaem.sh
|
UTF-8
| 1,191
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
if [ ! -d "$2" ]; then
echo "Delete .uaem files created by fs-uae"
echo "Usage: $0 [-u | -a] /path/to/harddrive_dir [-m]"
echo "Options:"
echo "-u delete (possibly) unwanted .uaem files"
echo "-a delete all .uaem files"
echo "-m also change atime and mtime of host filesystem's files (not a good idea!)"
exit 0
fi
HDD="$2"
FILELIST=$(mktemp)
if [ "$1" == "-a" ]; then
find "$HDD" -type f -name "*.uaem" > "$FILELIST"
elif [ "$1" == "-u" ]; then
find "$HDD" -type f -size 33c -name "*.uaem" -print0 | \
xargs -0 grep -lEe "^----rw[e-]d" > "$FILELIST"
else
echo "Bad option: $1"
rm "$FILELIST"
exit 1
fi
if [ ! -s "$FILELIST" ]; then
echo "No files to delete."
rm "$FILELIST"
exit 0
fi
COUNT=0
while read UAEMFILE; do
if [ "$3" == "-m" ]; then
REALFILE="${UAEMFILE%*.uaem}"
MTIME=$(cat "$UAEMFILE" | cut -c10-31)
echo "Mod: $REALFILE --> $MTIME"
touch -d "$MTIME" "$REALFILE"
fi
echo "Del: $UAEMFILE"
#echo -n "Del: $UAEMFILE "; cat "$UAEMFILE"
rm "$UAEMFILE"
((COUNT++))
done < "$FILELIST"
rm "$FILELIST"
echo -e "\nDeleted $COUNT files, saved $(($COUNT*33))Bytes, $(($COUNT*4))kb @ 4k blocksize, $(($COUNT*32))kb @ 32k blocksize."
| true
|
5b2bf2228b867f63c5496d5a9f797819ebf92493
|
Shell
|
afolarin/chip_gt
|
/opticall/opticall_parsers/sge_opticall_chunker.sh
|
UTF-8
| 1,233
| 3.375
| 3
|
[] |
no_license
|
#!/bin/sh
#$-S /bin/bash
#$-cwd
#$-V
#########################################################################
# -- Author: Amos Folarin #
# -- Organisation: KCL/SLaM #
# -- Email: amosfolarin@gmail.com #
#########################################################################
## Create chromosome chunks from the Genome Studio file with the format
## Name<tab>Chr<tab>Position<tab>Sample1.GType<tab>Sample1.X<tab>Sample1.Y
# USAGE: opticall_chunker.sh <genome-studio-report_file>
# OUTPUT: 1 file per chromosome
# ARGS:
inFile=$1
outFile=$2
#------------------------------------------------------------------------
# Create chunk files by chromosome
#------------------------------------------------------------------------
declare -a chrs=("1" "2" "3" "4" "5" "6" "7" "8" "9" "10" "11" "12" "13" "14" "15" "16" "17" "18" "19" "20" "21" "22" "X" "Y" "XY" "MT")
for i in ${chrs[@]}
do
#take the header
awk '(NR == 1) {print $0}' ${inFile} > ${outFile}"_Chr_${i}"
#print the whole row where Chr field ($2) == chr i
awk -v chromosome=${i} '($2 == chromosome) {print $0}' ${inFile} >> ${outFile}"_Chr_${i}"
done
| true
|
70591f021426b0c8e624512ef7b16e2dc7ffe6cc
|
Shell
|
adyang/dotfiles
|
/lib/await.sh
|
UTF-8
| 330
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
await() {
local commands=("$@")
local timeoutSecs=15
local current expiry
current="$(date +%s)"
expiry="$(( current + timeoutSecs ))"
until "${commands[@]}"; do
current="$(date +%s)"
if (( current > expiry )); then
echo "Timeout retrying: '${commands[@]}'"
return 124
fi
done
}
| true
|
c6fed42de212eb10b3c15cda5f8346829194a279
|
Shell
|
hdmsoftware/lightcoder-v1
|
/modules/common/bashscripts/script.sh
|
UTF-8
| 1,162
| 3.828125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# - This is the main script that is used to compile/interpret the source code
# - The script takes 3 arguments
# 1. The compiler that is to compile the source file.
# 2. The source file that is to be compiled/interpreted
# 3. Additional argument only needed for compilers, to execute the object code
#
compiler=$1
file=$2
output=$3
addtionalArg=$4
exec 1> $"/mountfolder/logfile.txt"
exec 2> $"/mountfolder/errors"
#3>&1 4>&2 >
START=$(date +%s.%2N)
#Branch 1
if [ "$output" = "" ]; then
$compiler /mountfolder/$file -< $"/mountfolder/inputFile" #| tee /mountfolder/output.txt
#Branch 2
else
#In case of compile errors, redirect them to a file
$compiler /mountfolder/$file $addtionalArg #&> /mountfolder/errors.txt
#Branch 2a
if [ $? -eq 0 ]; then
$output -< $"/mountfolder/inputFile" #| tee /mountfolder/output.txt
#Branch 2b
else
echo "Compilation Failed"
#if compilation fails, display the output file
#cat /mountfolder/errors.txt
fi
fi
END=$(date +%s.%2N)
runtime=$(echo "$END - $START" | bc)
echo "*-COMPILEBOX::ENDOFOUTPUT-*" $runtime
mv /mountfolder/logfile.txt /mountfolder/completed
| true
|
8ee414bfcb3d77648412a7b46259a7776b69642d
|
Shell
|
andrius-k/CERNTasks
|
/task3/aggregate_campaigns
|
UTF-8
| 1,730
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/sh
# Parse date argument
date=20170228
if [[ -n $1 ]]
then
date=$1
fi
date_length=${#date}
if [[ $date_length != 8 ]]
then
echo 'Invalid date. Example: 20170228'
exit
fi
echo 'Aggregating for date: '$date
# Chnage username in location value
location=/cms/users/$USER/campaigns
hdir=hdfs://$location
# Copy script file that will be ran
cp aggregate_campaigns.py ../CMSSpark/src/python/CMSSpark/aggregate_campaigns.py
# Remove previous data first
hadoop fs -rm -r $location
PYTHONPATH=$(pwd)/../CMSSpark/src/python ../CMSSpark/bin/run_spark aggregate_campaigns.py --fout=$hdir --yarn --verbose --date=$date
hadoop fs -test -e $hdir
exists=$?
# Download results and recreate csv files only if results exist in hdfs
if [[ $exists -eq 0 ]]
then
# Delete previously downloaded directory and download new one
basename $hdir | xargs rm -rf
hadoop fs -get $hdir .
# Extract PhEDEx header
head -1 campaigns/phedex/part-00000 > campaigns_phedex_df.csv
# Concatenate all PhEDEx parts except header
header=`cat campaigns_phedex_df.csv`
cat campaigns/phedex/part* | grep -v $header >> campaigns_phedex_df.csv
# Extract DBS header
head -1 campaigns/dbs/part-00000 > campaigns_dbs_df.csv
# Concatenate all DBS parts except header
header=`cat campaigns_dbs_df.csv`
cat campaigns/dbs/part* | grep -v $header >> campaigns_dbs_df.csv
# Extract site - campaign count header
head -1 campaigns/site_campaign_count/part-00000 > site_campaign_count_df.csv
# Concatenate all site - campaign count parts except header
header=`cat site_campaign_count_df.csv`
cat campaigns/site_campaign_count/part* | grep -v $header >> site_campaign_count_df.csv
fi
| true
|
453dfa51eb7ca98b951606915239e5ebd686c8e7
|
Shell
|
avh4/burndown-charts
|
/optimize-screenshots.sh
|
UTF-8
| 900
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -euo pipefail
# make animated gifs
colors=64
for i in *-1.png; do
name="${i%-1.png}"
lastframe=$(ls "$name"-*.png | sed -e "s/^${name}-//;s/\\.png$//" | sort -n | tail -n1)
size=$(identify -format '%wx%h' "$name"-"$lastframe".png)
# make a palette that is forced to include #f8f8f8 (for the gridlines)
convert "$name"-"$lastframe".png -colors $(($colors - 1)) -unique-colors _palette.png
convert _palette.png -background '#f8f8f8' -extent "$colors"x1 _p2.png
mv _p2.png _palette.png
# create the gif
convert -delay 80 -loop 0 +dither -remap _palette.png -extent "$size" "$name"-?.png "$name"-??.png _.gif
# optimize the gif
gifsicle --optimize=3 _.gif -o "$name".gif
# remove temp files
rm "$name"-*.png _palette.png _.gif
done
# optimize pngs
for i in *.png; do
pngcrush -brute -reduce -ow "$i"
done
ls -lh *.png *.gif
| true
|
cd8b60fb5ae7c7424546a79c421ac01a0d0d2d96
|
Shell
|
jan-kaspar/elastic_analysis.ctpps.2018.september
|
/export_candidates
|
UTF-8
| 197
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
export_dir="/afs/cern.ch/user/j/jkaspar/public/for_frici/ctpps-2018-september"
for dir in DS-xangle-*
do
mkdir -p "$export_dir/$dir"
cp $dir/candidates*.txt "$export_dir/$dir"
done
| true
|
51400cf072c6e21d37a0daf7dac2e2ca02aab5dd
|
Shell
|
droiter/lxc-for-Android-7.1.2
|
/envsetup_x86_64.sh
|
UTF-8
| 1,215
| 2.90625
| 3
|
[] |
no_license
|
export ANDROID_SDK_HOME=/home/sting/local/android-sdk-linux
export ANDROID_NDK_HOME=/home/sting/local/android-ndk-r15b
export ANDROID_STANDALONE_TOOLCHAIN_HOME=/opt/toolchain/android-toolchain-x86_64-4.9-android-24
export SYSROOT=$ANDROID_STANDALONE_TOOLCHAIN_HOME/sysroot
# User specific environment and startup programs
PATH=${ANDROID_NDK_HOME}
PATH=$PATH:${ANDROID_SDK_HOME}/tools:${ANDROID_SDK_HOME}/platform-tools
PATH=$PATH:${ANDROID_STANDALONE_TOOLCHAIN_HOME}/bin:/usr/local/sbin:/usr/local/bin
PATH=$PATH:/usr/sbin:/usr/bin:/sbin:/bin
export PATH=$PATH
# Tell configure what tools to use.
export BUILD_TARGET_HOST=x86_64-linux-android
export AR=$BUILD_TARGET_HOST-ar
export AS=$BUILD_TARGET_HOST-clang
export CC=$BUILD_TARGET_HOST-clang
export CXX=$BUILD_TARGET_HOST-clang++
export LD=$BUILD_TARGET_HOST-ld
export STRIP=$BUILD_TARGET_HOST-strip
export RANLIB=$BUILD_TARGET_HOST-ranlib
# Tell configure what flags Android requires.
export CFLAGS="-fPIE -fPIC --sysroot=$SYSROOT"
export LDFLAGS="-pie"
# SELinux specifics
BASEDIR=$(pwd)
export ANDROID_LIBS="$BASEDIR/../android-libs/x86_64"
export CFLAGS="$CFLAGS -I$ANDROID_LIBS/include"
export LDFLAGS="$LDFLAGS -L$ANDROID_LIBS/lib"
export ODMDIR=/odm
| true
|
e9eed6a3ab735e206e16cdf9400d4e67d913a815
|
Shell
|
rospix/gazebo_rad_obstacle
|
/gazebo_files/build.sh
|
UTF-8
| 293
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
./cleanup.sh
for filename in *; do
if [[ -d $filename ]]; then
cd $filename
for f in *; do
echo $f
if [[ $f == *".erb"* ]]; then
echo "Generating ${filename}/${f:0:-4}"
erb "$f" > ${f:0:-4}
fi
done
cd ..
fi
done
echo "Done"
| true
|
e38d651f5bfbb5204bbd68b49e6223168160ae89
|
Shell
|
uday745/DEVOPS
|
/shell scripts/file_handling.sh.txt
|
UTF-8
| 659
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
cat uday.log | while read LINE
do
echo $LINE
echo "Hello world !"
done
---------------------
#!/bin/bash
while read LINE
do
echo $LINE
echo "Hello world !"
done < uday.log
----------------------------
#!/bin/bash
myfile="uday.log"
counter=2
cat $myfile | while read LINE
do
echo $LINE
counter=$((counter+1))
echo $counter
done
echo "Filename $myfile has $counter lines"
-----------------------------------------
#!/bin/bash
counter=2
while read LINE
do
echo $LINE
counter=$((counter+1))
echo $counter
done < uday.log
echo "Filename $myfile has $counter lines"
----------------------------------
| true
|
6030c29c6cef97a164600cf504420964637a1557
|
Shell
|
dybekm/gws
|
/src/gws
|
UTF-8
| 38,380
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
################################################################################
# Author: Fabien Dubosson <fabien.dubosson@gmail.com> #
# OS: (Probably?) All Linux distributions #
# Requirements: git, bash > 4.0 #
# License: MIT (See below) #
# Version: 0.2.0 #
# #
# 'gws' is the abbreviation of 'Git WorkSpace'. #
# This is a helper to manage workspaces composed of git repositories. #
################################################################################
#-------------------------------------------------------------------------------
# License
#-------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2015 Fabien Dubosson <fabien.dubosson@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#-------------------------------------------------------------------------------
# Bash options
#-------------------------------------------------------------------------------
# Uncomment for Debug.
# set -x
# Propagate fail in pipes.
set -o pipefail
# Unset $CDPATH to avoid paths being printed and breaking functions.
# See: https://github.com/StreakyCobra/gws/pull/18
CDPATH=""
#-------------------------------------------------------------------------------
# Parameters
#-------------------------------------------------------------------------------
# Version number.
VERSION="0.2.0"
# Starting directory.
START_PWD="$(pwd)"
# Name of the file containing the projects list.
PROJECTS_FILE=".projects.gws"
# Name of the file containing the ignored patterns.
IGNORE_FILE=".ignore.gws"
# Name of the file containing the cache.
CACHE_FILE=".cache.gws"
# Name of the file overriding common colors definition
THEME_FILE_PATH=(
".git/theme.gws"
"${HOME}/.theme.gws"
"${HOME}/.config/gws/theme"
)
# Field separator in the projects list.
FIELD_SEP='|'
# Array lines separator.
ARRAY_LINE_SEP=', '
# Separator between the URL and its name in config file.
URL_NAME_SEP=' '
# Git name of the origin branch of repositories.
GIT_ORIGIN="origin"
# Git name of the upstream branch of repositories.
GIT_UPSTREAM="upstream"
# Git folder name. Used to detect unlisted git repositories.
GIT_FOLDER=".git"
# Indentation for status display.
INDENT=" "
# Indentation including a * for current branch status display.
INDENT_CURRENT=" * "
# Max length of branch names. Used to align information about branches in
# status.
MBL=25
# Command to run when displaying the status.
S_NONE=0
S_FETCH=1
S_FAST_FORWARD=2
# Default colors
C_ERROR="\e[91m"
C_NOT_SYNC="\e[91m"
C_OPERATION="\e[91m"
C_VERSION="\e[91m"
C_HELP_PROGNAME="\e[91m"
C_CLEAN="\e[92m"
C_HELP_DIR="\e[92m"
C_NO_REMOTE="\e[93m"
C_REPO="\e[94m"
C_BRANCH="\e[95m"
C_LOGS="\e[96m"
C_OFF="\e[0m"
function read_theme() {
# Printing in color.
if [[ -t 1 ]]; then
for theme_file in "${THEME_FILE_PATH[@]}"; do
if [[ -e "$theme_file" ]]; then
source "$theme_file"
break
fi
done
else
# Disable colors if standard out is not a terminal
C_ERROR=""
C_NOT_SYNC=""
C_VERSION=""
C_HELP_PROGNAME=""
C_CLEAN=""
C_HELP_DIR=""
C_NO_REMOTE=""
C_REPO=""
C_BRANCH=""
C_LOGS=""
C_OFF=""
fi
}
#-------------------------------------------------------------------------------
# Variable declarations
#-------------------------------------------------------------------------------
# Associative array containing projects' information, associated by the key
# available in `projects_indexes`.
declare -A projects
# Array containing projects' names, sorted.
declare -a projects_indexes
# Array containing ignored patterns.
declare -a ignored_patterns
# Array used to transmit the list of branches.
declare -a branches
# Default values for command line options
option_only_changes=false
#-------------------------------------------------------------------------------
# General functions
#-------------------------------------------------------------------------------
# Check if an array contains a value.
function array_contains()
{
local seeking=$1; shift
local in=1
local element
for element; do
if [[ "$element" == "$seeking" ]]; then
in=0
break
fi
done
return $in
}
# Remove elements from the first list that match a pattern in the second list.
function remove_matching()
{
local set_a set_b a b ok
# Reconstruct input arrays
declare -a set_a=( "${!1}" )
declare -a set_b=( "${!2}" )
# Filter element in `a` that match a pattern in `b`
for a in "${set_a[@]}"
do
ok=0
# For all prefixes in `b`
for b in "${set_b[@]}"
do
# If `a` matches the prefix, store result and exit the loop
[[ $a =~ $b ]] && ok=1 && break
done
# If it is still okay, print the element
[[ $ok -eq 0 ]] && echo -n "$a "
done
return 0
}
# Remove from a list all elements that have as prefix another element in the
# same list. Used to remove subrepositories, e.g. in the list `( foo/bar/ foo/
# )` the element `foo/bar/` has `foo/` as a prefix, so `foo/bar` is removed
# because it is a subrepository.
# IMPORTANT: The input list must be sorted.
function remove_prefixed()
{
local set_a a b ok
# Reconstruct array
declare -a set_a=( "${!1}" )
# Filter element that have already a prefix present
for a in "${set_a[@]}"
do
ok=0
# Look for prefix
for b in "${set_a[@]}"
do
# If `a` matches the prefix, store result and exit the loop
[[ $a =~ ^$b.+ ]] && ok=1 && break
# Because input is sorted, we can stop as soon as we are further
# than the current entry
[[ "$b" > "$a" ]] && break
done
# If it is still okay, print the element
[[ $ok -eq 0 ]] && echo -n "$a "
done
return 0
}
# Keep projects that are prefixed by the given directory.
function keep_prefixed_projects()
{
local limit_to dir current
# First check if the folder exists
[[ ! -d "${START_PWD}/$1" ]] && return 1
# Get the full path to limit_to in regexp form
limit_to=$(cd "${START_PWD}/$1" && pwd )/
# Iterate over each project
for dir in "${projects_indexes[@]}"
do
# Get its full path
current=$(cd "${PWD}/${dir}/" && pwd )/
# If it matches, add it to the output
[[ $current =~ ^$limit_to ]] && echo -n "$dir "
done
# Everything is right
return 0
}
#-------------------------------------------------------------------------------
# Projects functions
#-------------------------------------------------------------------------------
# Is the current directory the root of a workspace?
function is_project_root()
{
# If there is a projects file, this is a project root
(ls "$PROJECTS_FILE" 1>/dev/null 2>&1) && return 0
# If we reach root, and there is no projects file, exit with an error
# message
[[ $(pwd) = "/" ]] && echo "Not in a workspace" && exit 1
# Otherwise return false.
return 1
}
# Add a project to the list of projects.
function add_project()
{
# Add the project to the list
projects[$1]="$2"
return 0
}
# Check if the project exists in the list of projects
function exists_project()
{
array_contains "$1" "${projects_indexes[@]}"
}
# Read the list of projects from the projects list file
function read_projects()
{
# Replace the hash of PROJECTS_FILE in the cache
CACHED_PROJECTS_HASH=$(md5sum "${PROJECTS_FILE}" 2>/dev/null || echo NONE)
sed -i -e '/^declare -- CACHED_PROJECTS_HASH=/d' "${CACHE_FILE}"
declare -p CACHED_PROJECTS_HASH >> "${CACHE_FILE}"
# Remove arrays from the cache
sed -i -e '/^declare -A projects=/d' "${CACHE_FILE}"
sed -i -e '/^declare -a projects_indexes=/d' "${CACHE_FILE}"
projects=()
projects_indexes=()
local line dir remotes count repo remotes_list
# Read line by line (discard comments and empty lines)
while read -r line
do
# Remove inline comments
line=$(sed -e 's/#.*$//' <<< "$line")
# Get the directory
dir=$(cut -d${FIELD_SEP} -f1 <<< "$line" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')
# Get the rest of the configuration line containing remotes
remotes=$(sed -E -e "s/^[^${FIELD_SEP}]*\\${FIELD_SEP}?//" <<< "$line")
# Iterate over all the remotes
count=0
remotes_list=""
while [ -n "$remotes" ];
do
count=$((count + 1))
# Get the first remote defined in the "remotes" variable
remote=$(cut -d${FIELD_SEP} -f1 <<< "$remotes" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/[[:space:]]\+/ /g')
# Remove the current remote from the line for next iteration
remotes=$(sed -E -e "s/^[^${FIELD_SEP}]*\\${FIELD_SEP}?//" <<< "$remotes")
# Get its url
remote_url=$(cut -d"${URL_NAME_SEP}" -f1 <<< "$remote" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')
# Get its name, if any
remote_name=$(cut -d"${URL_NAME_SEP}" -f2 -s <<< "$remote")
# If name is not set we infer it as:
# 1st: origin
# 2nd: upstream
# Else crash
if [[ -z "$remote_name" ]]; then
if [[ $count == 1 ]]; then
remote_name=$GIT_ORIGIN
elif [[ $count == 2 ]]; then
remote_name=$GIT_UPSTREAM
else
error_msg="${C_ERROR}The URL at position ${count} for \"${dir}\" is missing a name.${C_OFF}"
echo -e "$error_msg"
exit 1
fi
fi
# Store the current remote in the list of remotes
remotes_list+="${remote_name}${FIELD_SEP}${remote_url}${ARRAY_LINE_SEP}"
done
# Skip if the dir is empty
[ -z "${dir}" ] && continue
# Otherwise add the project to the list
add_project "${dir}" "${remotes_list}"
done < <(grep -v "^#\|^$" $PROJECTS_FILE)
# Extract sorted index of projects
readarray -t projects_indexes < <(for a in "${!projects[@]}"; do echo "$a"; done | sort)
# Save the result in the cache
if [[ ! ${#projects[@]} -eq 0 ]]; then
declare -p projects >> "${CACHE_FILE}"
declare -p projects_indexes >> "${CACHE_FILE}"
fi
return 0
}
# Read the list of ignored patterns from the file
function read_ignored()
{
# Replace the hash of IGNORE_FILE in the cache
CACHED_IGNORE_HASH=$(md5sum "${IGNORE_FILE}" 2>/dev/null || echo NONE)
sed -i -e '/^declare -- CACHED_IGNORE_HASH=/d' "${CACHE_FILE}"
declare -p CACHED_IGNORE_HASH >> "${CACHE_FILE}"
# Remove array from the cache
sed -i -e '/^declare -a ignored_patterns=/d' "${CACHE_FILE}"
ignored_patterns=()
# If ignore file is empty, skip the rest
[[ -e "$IGNORE_FILE" ]] || return 0
local pattern
# Read line by line
while read -r pattern
do
# Remove inline comments
pattern=$(sed -e 's/#.*$//' <<< "$pattern")
# Go to next pattern if this pattern is empty
[[ -z $pattern ]] && continue
# Escape regex characters
pattern=$(sed -e 's/[/&]/\\&/g' <<< "$pattern")
# Add it to the list of ignored patterns
ignored_patterns+=( "$pattern" )
done < <(grep -v "^#\|^$" $IGNORE_FILE)
# Save the result in the cache
if [[ ! ${#ignored_patterns[@]} -eq 0 ]]; then
declare -p ignored_patterns >> "${CACHE_FILE}"
fi
return 0
}
# Get the url of a repository from an associative array
function get_repo_url()
{
local remote remote_name remote_url
declare -A assoc
# Read the projects info
IFS=${ARRAY_LINE_SEP} read -a array <<< "$1"
# Check if origin is present
for remote in "${array[@]}";
do
remote_name=$(cut -d${FIELD_SEP} -f1 <<< "${remote}")
remote_url=$(cut -d${FIELD_SEP} -f2 <<< "${remote}")
assoc["${remote_name}"]="${remote_url}"
done
[ "${assoc[${GIT_ORIGIN}]+isset}" ] || return 1
# Return the origin URL
cut -d${FIELD_SEP} -f2 <<< "${assoc[${GIT_ORIGIN}]}"
return 0
}
#-------------------------------------------------------------------------------
# Git functions
#-------------------------------------------------------------------------------
# Clone a repository
function git_clone()
{
local cmd
# Git command to execute
cmd=( "git" "clone" "$1" "$2" )
# Run the command and print the output in case of error
if ! output=$("${cmd[@]}" 2>&1); then
echo "$output"
return 1
fi
return 0
}
# Fetch from the origin
function git_fetch()
{
local cmd
# Git command to execute
cmd=( "git" "fetch" )
# Execute the command
if ! output=$(cd "$1" && "${cmd[@]}" 2>&1); then
return 1
fi
if [ -z "$output" ] ; then
return 1
fi
return 0
}
# Fetch from the origin and update ref at same time
function git_fetch_update()
{
local cmd
# Git command to execute
cmd=( "git" "fetch" "${GIT_ORIGIN}" "$2:$2")
# Execute the command
if ! output=$(cd "$1" && "${cmd[@]}" 2>&1); then
return 1
fi
if [ -z "$output" ] ; then
return 1
fi
return 0
}
# Fast-forward to the origin
function git_fast_forward()
{
local cmd
# Git command to execute
cmd=( "git" "pull" "--ff-only" )
# Execute the command
if ! output=$(cd "$1" && "${cmd[@]}" 2>&1); then
return 1
fi
if [ "$output" = "Already up-to-date." ] ; then
return 1
fi
return 0
}
# Add an upstream branch to a repository
function git_add_remote()
{
local cmd
# Git command to execute
cmd=( "git" "remote" "add" "$2" "$3")
# Run the command and print the output in case of error
if ! output=$(cd "$1" && "${cmd[@]}"); then
echo "$output"
return 1
fi
return 0
}
# Get a remote url
function git_remote_url()
{
local cmd
# Git command to execute
cmd=( "git" "remote" "-v" )
# Run the command and print the output
(cd "$1" && "${cmd[@]}" | grep "$2" | head -n 1 | cut -d' ' -f1 | cut -d' ' -f 2 | tr -d ' ')
return 0
}
# Get the list of remotes
function git_remotes()
{
local cmd
# Git command to execute
cmd=( "git" "remote" )
# Run the command and print the output
(cd "$1" && "${cmd[@]}")
return 0
}
# Check if a given remote name exists
function git_remote_exists()
{
local cmd
# Git command to execute
cmd=( "git" "remote" )
# Run the command
(cd "$1" && "${cmd[@]}" | grep "^$2\$") > /dev/null 2>&1
return $?
}
# Get the current branch name
function git_branch()
{
local cmd
# Git command to execute
cmd=( "git" "branch" "--no-color" )
# Run the command and print the output
(cd "$1" && "${cmd[@]}" | grep "*" | cut -d'*' -f 2 | tr -d ' ')
return 0
}
# Get all the branch names, result is passed by global variable
function git_branches()
{
local cmd output
# Git command to execute
cmd=( "git" "branch" "--no-color" )
# Run the command and get the output
output=$(cd "$1" && "${cmd[@]}" | cut -d'*' -f 2 | tr -d ' ')
# Save to the global branches array to be accessed by the caller
branches=( $output )
return 0
}
# Check for uncommitted changes
function git_check_uncached_uncommitted()
{
local cmd
# Git command to execute
cmd=( "git" "diff" "--exit-code" )
# Run the command, and return success if it succeeds
(cd "$1" && "${cmd[@]}" 1>/dev/null 2>&1) && return 0
# Otherwise return failure
return 1
}
# Check for cached but uncommitted changes
function git_check_cached_uncommitted()
{
local cmd
# Git command to execute
cmd=( "git" "diff" "--cached" "--exit-code" )
# Run the command, and return success if it succeeds
(cd "$1" && "${cmd[@]}" 1>/dev/null 2>&1) && return 0
# Otherwise return failure
return 1
}
# Check for untracked files
function git_check_untracked()
{
local cmd nb
# Git command to execute
cmd=( "git" "status" "--porcelain" )
# Run the command
nb=$(cd "$1" && "${cmd[@]}" 2>/dev/null | grep -c "^??")
# If no untracked files exist, return success
[[ $nb -eq 0 ]] && return 0
# Otherwise return failure
return 1
}
# Check if a local branch points to the same commit as a remote branch
function git_check_branch_origin()
{
local local_cmd remote_cmd local_hash remote_hash
# Git commands to execute
local_cmd=( "git" "rev-parse" "--verify" "$2" )
remote_cmd=( "git" "rev-parse" "--verify" "${GIT_ORIGIN}/$2" )
# Execute the command to get the local hash, If it fails this is weird,
# so... return failure
local_hash=$(cd "$1"; "${local_cmd[@]}" 2>/dev/null) || return 3
# Execute the command to get the remote hash. If it fails that means there
# is no remote branch - return special code
remote_hash=$(cd "$1"; "${remote_cmd[@]}" 2>/dev/null) || return 2
# If the hashes are equal, return success
[ "$local_hash" == "$remote_hash" ] && return 0
# Otherwise return failure
return 1
}
# Check if the git repo contains a merge head
function git_check_for_merge_head()
{
[[ -f "$1/.git/MERGE_HEAD" ]] && return 0
# Otherwise return failure
return 1
}
# Check if the git repo contains a cherry-pick head
function git_check_for_cherry_pick_head()
{
[[ -f "$1/.git/CHERRY_PICK_HEAD" ]] && return 0
# Otherwise return failure
return 1
}
#-------------------------------------------------------------------------------
# Command functions
#-------------------------------------------------------------------------------
# Init command
function cmd_init()
{
# Go back to start directory
cd "$START_PWD" || (echo "Initial folder ${START_PWD} doesn't exist any longer" && exit 1)
# Check if already a workspace
[[ -f ${PROJECTS_FILE} ]] && echo -e "${C_NOT_SYNC}Already a workspace.${C_OFF}" && return 1
local found remote
declare -a found
# Prepare the list of all existing projects, sorted
found=( $(find ./* -type d -name "$GIT_FOLDER" | sed -e "s#/${GIT_FOLDER}\$#/#" | cut -c 3- | sort) )
found=( $(remove_prefixed found[@]) )
# Create the list of repositories
output=$(for dir in "${found[@]}"
do
dir="${dir%/}"
echo -n "$dir | $(git_remote_url "$dir" "${GIT_ORIGIN}")"
for remote in $(git_remotes "$dir");
do
[[ "$remote" != "${GIT_ORIGIN}" ]] && echo -n " | $(git_remote_url "$dir" "$remote") $remote"
done
echo
done)
# Write the file if it is not empty
[[ ! -z "$output" ]] && (echo "$output" > ${PROJECTS_FILE}) && echo -e "${C_CLEAN}Workspace file «${PROJECTS_FILE}» created.${C_OFF}" && return 0
echo -e "${C_NO_REMOTE}No repository found.${C_OFF}"
return 1
}
# Selective clone command
function cmd_clone()
{
local dir repo remote remote_name remote_url
if [[ -z "$1" ]]; then
echo -e "Usage: ${C_HELP_PROGNAME}$(basename "$0")${C_OFF} ${C_REPO}clone${C_OFF} ${C_HELP_DIR}<directory>...${C_OFF}"
return 1
fi
# For all projects
for dir in "$@"
do
# Get information about the current project
repo=$(get_repo_url "${projects[$dir]}")
# Print the repository
local project_name_printed=0
local after=""
local skip_clone=0
# Print information for local only repositories
if [[ -z $repo ]]; then
print_project_name_unless_done_already "$dir" $project_name_printed
project_name_printed=1
after="${C_REPO}[Local only repository]${C_OFF}"
skip_clone=1
fi
# Check if repository already exists, and continue if it is the case
if [ -d "$dir" ]; then
if ! $option_only_changes || [[ -n "$after" ]]; then
# Print the information
print_project_name_unless_done_already "$dir" $project_name_printed
project_name_printed=1
printf "${INDENT}%-${MBL}s${C_CLEAN} %s${C_OFF} " " " "Already exists"
skip_clone=1
fi
elif [[ -z $repo ]]; then
# Print the information
print_project_name_unless_done_already "$dir" $project_name_printed
project_name_printed=1
printf "${INDENT}%-${MBL}s${C_NOT_SYNC} %s${C_OFF} " " " "No URL defined"
skip_clone=1
fi
if [[ $project_name_printed -eq 1 ]] || [[ -n "$after" ]]; then
printf "%s\n" "$after"
fi
if [[ $skip_clone -eq 1 ]]; then
continue
fi
# Clone repository if missing
if [[ ! -d "$dir" ]]; then
print_project_name_unless_done_already "$dir" $project_name_printed
project_name_printed=1
printf "${INDENT}%-${MBL}s${C_LOGS} %s${C_OFF}\n" " " "Cloning…"
# Clone the repository
if ! git_clone "$repo" "$dir"; then
printf "${INDENT}%-${MBL}s${C_ERROR} %s${C_OFF}\n" " " "Error"
return 1
fi
printf "${INDENT}%-${MBL}s${C_CLEAN} %s${C_OFF}\n" " " "Cloned"
fi
# Create any missing remotes
IFS=${ARRAY_LINE_SEP} read -a array <<< "${projects[$dir]}"
for remote in "${array[@]}"
do
remote_name=$(cut -d${FIELD_SEP} -f1 <<< "${remote}")
remote_url=$(cut -d${FIELD_SEP} -f2 <<< "${remote}")
if ! git_remote_exists "${dir}" "${remote_name}"; then
git_add_remote "${dir}" "${remote_name}" "${remote_url}"
fi
done
done
return 0
}
# Update command
function cmd_update()
{
local dir
for dir in "${projects_indexes[@]}"
do
cmd_clone "$dir"
done
return 0
}
function print_project_name_unless_done_already() {
local project_name done_already
project_name="$1"
done_already="$2"
if [[ "$done_already" -eq 0 ]]; then
# Print the project name
echo -e "${C_REPO}$project_name${C_OFF}:"
fi
}
# Status command
function cmd_status()
{
local dir repo branch branch_done rc uptodate printed branch_annotation
uptodate=1
# For each project
for dir in "${projects_indexes[@]}"
do
# Get information about the current project
repo=$(get_repo_url "${projects[$dir]}")
# Project name has not been printed yet
project_name_printed=0
# Check if repository already exists, and continue if it is not the case
if [ ! -d "$dir" ]; then
if ! $option_only_changes; then
print_project_name_unless_done_already "$dir" $project_name_printed
printf "${INDENT}%-${MBL}s${C_NO_REMOTE} %s${C_OFF} " " " "Missing repository"
[[ -z $repo ]] && echo -e "${C_REPO}[Local only repository]${C_OFF}"
printf "\n"
project_name_printed=1
fi
uptodate=0
continue
fi
# Get the current branch name
current=$(git_branch "$dir")
# Cut branch name
if [ ${#current} -gt $((MBL - 3)) ]; then
display_current="${current:0:$((MBL - 3))}… :"
else
display_current="$current :"
fi
branch_done=0
# If there is no "origin" URL defined, don't print branch information (useless)
[[ -z $repo ]] && display_current=" "
# Nothing is printed yet
printed=0
# Define current branch annotation
# Add special information about merge in progress
if git_check_for_merge_head "$dir"; then
branch_annotation="${C_OPERATION}[merge in progress]${C_OFF} "
elif git_check_for_cherry_pick_head "$dir"; then
# Add special information about cherry-pick in progress
branch_annotation="${C_OPERATION}[cherry-pick in progress]${C_OFF} "
else
branch_annotation=""
fi
# Check for uncached and uncommitted changes
if ! git_check_uncached_uncommitted "$dir"; then
print_project_name_unless_done_already "$dir" $project_name_printed
printf "${INDENT_CURRENT}${C_BRANCH}%-${MBL}s${C_OFF} " "$display_current"
echo -ne "${C_NOT_SYNC}Dirty (Uncached changes)${C_OFF} "
branch_done=1
uptodate=0
printed=1
project_name_printed=1
# Check for cached but uncommitted changes
elif ! git_check_cached_uncommitted "$dir"; then
print_project_name_unless_done_already "$dir" $project_name_printed
printf "${INDENT_CURRENT}${C_BRANCH}%-${MBL}s${C_OFF} " "$display_current"
echo -ne "${C_NOT_SYNC}Dirty (Uncommitted changes${STATE_FLAG})${C_OFF} "
branch_done=1
uptodate=0
printed=1
project_name_printed=1
# Check for untracked files
elif ! git_check_untracked "$dir"; then
print_project_name_unless_done_already "$dir" $project_name_printed
printf "${INDENT_CURRENT}${C_BRANCH}%-${MBL}s${C_OFF} " "$display_current"
echo -ne "${C_NOT_SYNC}Dirty (Untracked files)${C_OFF} "
branch_done=1
uptodate=0
printed=1
project_name_printed=1
# If the "origin" URL is not defined in the project list, then no need
# to check for synchronization. It is clean if there is no untracked,
# uncached or uncommitted changes.
elif [[ -z $repo ]]; then
print_project_name_unless_done_already "$dir" $project_name_printed
printf "${INDENT_CURRENT}${C_BRANCH}%-${MBL}s${C_OFF} " "$display_current"
echo -ne "${C_CLEAN}Clean${C_OFF} "
printed=1
project_name_printed=1
fi
# Add special information for local only repositories
if [[ -z $repo ]]; then
print_project_name_unless_done_already "$dir" $project_name_printed
echo -e "${C_REPO}[Local only repository]${C_OFF}"
project_name_printed=1
continue
fi
# If something was printed, finish the line
[[ $printed -eq 1 ]] && echo -e "$branch_annotation"
# List branches of current repository
git_branches "$dir"
# If no branches
if [[ 0 -eq ${#branches[@]} ]]; then
print_project_name_unless_done_already "$dir" $project_name_printed
printf "${INDENT}%-${MBL}s${C_NO_REMOTE} %s${C_OFF}\n" " " "Empty repository"
project_name_printed=1
fi
# Fetch origin
if [[ $1 -eq $S_FETCH ]]; then
print_project_name_unless_done_already "$dir" $project_name_printed
git_fetch "$dir" && printf "${INDENT}%-${MBL}s${C_LOGS} %s${C_OFF}\n" " " "Fetched from origin"
project_name_printed=1
fi
# Check for difference with origin
for branch in "${branches[@]}"
do
# Text to display after branch
after="\n"
# Cut branch name
if [ ${#branch} -gt $((MBL - 3)) ]; then
display_branch="${branch:0:$((MBL - 3))}…"
else
display_branch="$branch"
fi
# If the branch is already done, skip it
if [[ $branch_done -eq 1 ]] && [ "$branch" = "$current" ]; then
continue
fi
# Fast forward to origin
if [[ $1 -eq $S_FAST_FORWARD ]]; then
# Pull fast forward for current branch
if [ "$branch" = "$current" ]; then
git_fast_forward "$dir" && after=" ${C_LOGS}(fast-forwarded)${C_OFF}${after}"
# Fetch update for others
else
git_fetch_update "$dir" "$branch" && after=" ${C_LOGS}(fast-forwarded)${C_OFF}${after}"
fi
fi
if [ "$branch" = "$current" ]; then
branch_indent="${INDENT_CURRENT}"
after_note="${branch_annotation}"
else
branch_indent="${INDENT}"
after_note=""
fi
printed=0
# Check for diverged branches
git_check_branch_origin "$dir" "$branch";
# Get the return code
rc=$?
# If the hashes are different
if [[ "$rc" -eq 1 ]]; then
print_project_name_unless_done_already "$dir" $project_name_printed
printf "${branch_indent}${C_BRANCH}%-${MBL}s${C_OFF} " "$display_branch :"
echo -en "${C_NOT_SYNC}Not in sync with ${GIT_ORIGIN}/$branch${C_OFF}"
uptodate=0
printed=1
project_name_printed=1
# If the remote doesn't exist
elif [[ "$rc" -eq 2 ]]; then
print_project_name_unless_done_already "$dir" $project_name_printed
printf "${branch_indent}${C_BRANCH}%-${MBL}s${C_OFF} " "$display_branch :"
echo -en "${C_NO_REMOTE}No remote branch ${GIT_ORIGIN}/$branch${C_OFF}"
uptodate=0
printed=1
project_name_printed=1
# If there is no local hash (must never happen... but who knows?)
elif [[ "$rc" -eq 3 ]]; then
print_project_name_unless_done_already "$dir" $project_name_printed
printf "${branch_indent}${C_BRANCH}%-${MBL}s${C_OFF} " "$display_branch :"
echo -en "${C_ERROR}Internal error${C_OFF}"
uptodate=0
printed=1
project_name_printed=1
# Otherwise it's clean
else
if ! $option_only_changes || [[ $printed -eq 1 ]] || [[ $project_name_printed -eq 1 ]] || [[ "$after" != "\n" ]]; then
print_project_name_unless_done_already "$dir" $project_name_printed
printf "${branch_indent}${C_BRANCH}%-${MBL}s${C_OFF} " "$display_branch :"
echo -en "${C_CLEAN}Clean${C_OFF}"
printed=1
project_name_printed=1
fi
fi
# Print any additional info
if [[ $printed -eq 1 ]]; then
if [[ $uptodate -eq 0 ]]; then
echo -en " ${after_note}"
fi
echo -en "${after}"
fi
done
done
if [[ $uptodate -eq 0 ]]; then
exit 1
fi
return 0
}
# Verify command
function cmd_check()
{
local found all repo dir
declare -a projects_all_indexes
declare -a projects_ignored
declare -a found
declare -a all
# Create the list of all projects, including ignored ones
readarray -t projects_all_indexes < <(for a in "${!projects[@]}"; do echo "$a"; done | sort)
# Create the list of ignored projects only
readarray -t projects_ignored < <(comm -23 <(for a in "${projects_all_indexes[@]}"; do echo "$a"; done | sort) <(for a in "${projects_indexes[@]}"; do echo "$a"; done | sort))
# Prepare list of all projects, existing or missing, sorted with no duplicates
found=( $(find ./* -type d -name "$GIT_FOLDER" | sed -e "s#/${GIT_FOLDER}\$##" | cut -c 3- | sort) )
all=( "${found[@]}" "${projects_all_indexes[@]}" )
readarray -t all < <(for a in "${all[@]}"; do echo "$a"; done | sort -u)
# For each repository
for dir in "${all[@]}"
do
# Print the repository name
echo -e "${C_REPO}$dir${C_OFF}:"
# Check if the directory is ignored
if array_contains "$dir" "${projects_ignored[@]}"; then
printf "${INDENT}%-${MBL}s${C_LOGS} %s${C_OFF}\n" " " "Ignored"
continue
fi
# Check if the directory exists
if [ ! -d "$dir" ]; then
printf "${INDENT}%-${MBL}s${C_NO_REMOTE} %s${C_OFF}\n" " " "Missing"
continue
fi
# Check if it is listed as a project and print result
if exists_project "$dir"; then
printf "${INDENT}%-${MBL}s${C_CLEAN} %s${C_OFF}\n" " " "Known"
else
printf "${INDENT}%-${MBL}s${C_NOT_SYNC} %s${C_OFF}\n" " " "Unknown"
fi
done
return 0
}
# Display usage help
function usage()
{
echo -e "gws is a helper to manage workspaces which contain git repositories.
Usages: ${C_HELP_PROGNAME}$(basename "$0")${C_OFF} ${C_REPO}<command>${C_OFF} [${C_HELP_DIR}<directory>${C_OFF}]
${C_HELP_PROGNAME}$(basename "$0")${C_OFF} [${C_HELP_DIR}<directory>${C_OFF}]
where ${C_REPO}<command>${C_OFF} is:
${C_REPO}init${C_OFF} - Detect repositories and create the projects list
${C_REPO}update${C_OFF} - Clone any repositories in the projects list that are missing in the workspace
${C_REPO}clone${C_OFF} - Selectively clone specific repositories from projects list
${C_REPO}status${C_OFF} - Print status for all repositories in the workspace
${C_REPO}fetch${C_OFF} - Print status for all repositories in the workspace, but fetch the origin first
${C_REPO}ff${C_OFF} - Print status for all repositories in the workspace, but fast forward to origin first
${C_REPO}check${C_OFF} - Print difference between projects list and workspace (known/unknown/missing)
where ${C_HELP_DIR}<directory>${C_OFF} can be a path to limit the scope of the commands to a specific subfolder
of the workspace.
If no ${C_REPO}<command>${C_OFF} is specified, the command ${C_REPO}status${C_OFF} is assumed.
The commands ${C_REPO}status${C_OFF}, ${C_REPO}fetch${C_OFF} and ${C_REPO}ff${C_OFF} accept the option
--only-changes before the ${C_HELP_DIR}<directory>${C_OFF}. If given, only repositories with changes will be shown.
"
}
command=status
implicit_command=false
# Identify the desired command
case $1 in
init|clone|update|status|fetch|ff|check)
command="$1"
shift
;;
--version|-v)
command=version
shift
;;
--help|-h)
command=help
shift
;;
*)
command=status
implicit_command=true
esac
while [[ "$1" =~ ^- ]]; do
case "$1" in
--only-changes)
option_only_changes=true
;;
*)
echo -e "${C_ERROR}Unknown option: $1${C_OFF}"
exit 1
;;
esac
shift
done
# Except for the special case of "init" in which there is no projects file
if [[ "$command" != "init" ]] && [[ "$command" != "help" ]]; then
# First move to the first parent directory containing a projects file
while ! is_project_root
do
cd ..
done
read_theme
# Read the cache if present, otherwise create it
touch "${CACHE_FILE}"
[[ -e "${CACHE_FILE}" ]] && source "${CACHE_FILE}"
# If cache is not up to date, read the projects/ignore files again
if [[ "$CACHED_PROJECTS_HASH" != "$(md5sum ${PROJECTS_FILE} 2>/dev/null || echo NONE)" ]] ||
[[ "$CACHED_IGNORE_HASH" != "$(md5sum ${IGNORE_FILE} 2>/dev/null || echo NONE)" ]]; then
read_projects
read_ignored
projects_indexes=( $(remove_matching projects_indexes[@] ignored_patterns[@]) )
sed -i -e '/^declare -a projects_indexes=/d' "${CACHE_FILE}"
declare -p projects_indexes >> "${CACHE_FILE}"
fi
fi
if $implicit_command; then
if [[ -n "$1" ]]; then
error_msg="${C_ERROR}The directory '$1' does not exist and is not a recognized command.${C_OFF}"
projects_list=$(keep_prefixed_projects "$1") || (echo -e "$error_msg" && exit 1) || exit 1
projects_indexes=( ${projects_list} )
fi
fi
# If a path is specified as positional argument, limit projects to the ones matching
# the path
if [[ -n "$1" ]]; then
# But don't error out in the case of "clone", because the directory will probably not exist
if [[ "$command" != "clone" ]]; then
error_msg="${C_ERROR}The directory '$1' does not exist.${C_OFF}"
projects_list=$(keep_prefixed_projects "$1") || (echo -e "$error_msg" && exit 1) || exit 1
projects_indexes=( ${projects_list} )
fi
fi
# Finally execute the selected command
case $command in
init)
cmd_init
;;
clone)
cmd_clone "$@"
;;
update)
cmd_update
;;
status)
cmd_status $S_NONE
;;
fetch)
cmd_status $S_FETCH
;;
ff)
cmd_status $S_FAST_FORWARD
;;
check)
cmd_check
;;
version)
echo -e "gws version ${C_VERSION}$VERSION${C_OFF}"
;;
help)
usage
;;
esac
# vim: fdm=marker ts=4 sts=4 sw=4 et
| true
|
86ca602227459b5e565fced640159980b677dc6d
|
Shell
|
sufyanadam/ansible-minecraft
|
/scripts/ci.sh
|
UTF-8
| 1,525
| 3.640625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -euxo pipefail
IFS=$'\n\t'
declare -r OS=${1:-${OS}}
declare -r PROCESS_CONTROL=${2:-${PROCESS_CONTROL}}
declare -r SERVER=${3:-${SERVER}}
declare -r WORKSPACE=${WORKSPACE:-/tmp/ansible-minecraft}
function cleanup() {
docker-compose stop "${OS}"
docker-compose rm -f "${OS}"
}
function debug() {
local container="$(docker-compose ps -q "${OS}")"
docker exec -it "${container}" /bin/bash
}
function main() {
docker-compose up -d "${OS}"
local container="$(docker-compose ps -q "${OS}")"
# Install role.
docker cp . "${container}:${WORKSPACE}"
# Validate syntax
docker exec -t "${container}" ansible-playbook \
-i "${WORKSPACE}/tests/inventory" \
--syntax-check \
-v \
--extra-vars="minecraft_process_control=${PROCESS_CONTROL} minecraft_server=${SERVER}" \
"${WORKSPACE}/tests/${SERVER}.yml"
# Install Minecraft.
docker exec -t "${container}" ansible-playbook \
-i "${WORKSPACE}/tests/inventory" \
-c local \
-v \
--extra-vars="minecraft_process_control=${PROCESS_CONTROL} minecraft_server=${SERVER}" \
"${WORKSPACE}/tests/${SERVER}.yml"
# Sleep to allow Minecraft to boot.
# FIXME: A retry loop checking if it has launched yet would be better.
sleep 30
# Run tests.
docker exec -t "${container}" rspec "${WORKSPACE}/tests/spec/${SERVER}_spec.rb"
}
[[ -z "${CI:-}" ]] && trap debug ERR
trap cleanup EXIT
main "${@}"
| true
|
0c47bc2d67d8c381566e0158a374beeaeabbfffa
|
Shell
|
shlurbee/dmrs-text-generation-naacl2019
|
/scripts/run_silver.sh
|
UTF-8
| 17,387
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
###
# Helpers to prepare different combinations of gold and silver data and train.
# Please run from repo root.
###
echo "Usage: From root dir run:"
echo "sh scripts/run_experiments.sh"
preprocess_silver() {
DATA_DIR=data_gw
mkdir -p $DATA_DIR
# preprocess gigaword data files to create silver data (use gold data for validation)
python OpenNMT-py/preprocess.py -train_src $DATA_DIR/train/train-src.txt \
-train_tgt $DATA_DIR/train/train-tgt.txt \
-valid_src data/dev/dev-src.txt \
-valid_tgt data/dev/dev-tgt.txt \
-src_seq_length 400 -tgt_seq_length 400 \
-shuffle 1 -data_type text \
-save_data $DATA_DIR/opennmt -report_every 1000
}
preprocess_gold() {
# preprocess gold data files to create gold training data
python OpenNMT-py/preprocess.py -train_src data/train/train-src.txt \
-train_tgt data/train/train-tgt.txt \
-valid_src data/dev/dev-src.txt \
-valid_tgt data/dev/dev-tgt.txt \
-src_seq_length 400 -tgt_seq_length 400 \
-shuffle 1 -data_type text \
-save_data data/opennmt -report_every 1000
}
preprocess_anon_map() {
python replacements.py \
--infiles data/train/train-anon.txt data_gw/train/train-anon.txt \
--outfile data/anon-replacements.json
}
preprocess_v15() {
# combines 600K silver data examples with 60K x 10 = 600K gold examples
NUM_COPIES=10
DATA_DIR=data_v15
mkdir -p $DATA_DIR
# copy silver data to directory
head -n 600000 data_gw/train/train-tgt.txt > $DATA_DIR/train-tgt.txt
head -n 600000 data_gw/train/train-src.txt > $DATA_DIR/train-src.txt
head -n 600000 data_gw/train/train-orig.txt > $DATA_DIR/train-orig.txt
head -n 600000 data_gw/train/train-anon.txt > $DATA_DIR/train-anon.txt
# append gold data. (assumes opennmt will shuffle)
i=0
while [ $i -lt $NUM_COPIES ]; do
echo $i
cat data/train/train-tgt.txt >> $DATA_DIR/train-tgt.txt
cat data/train/train-src.txt >> $DATA_DIR/train-src.txt
cat data/train/train-orig.txt >> $DATA_DIR/train-orig.txt
cat data/train/train-anon.txt >> $DATA_DIR/train-anon.txt
i=`expr $i + 1`
done
# preprocess data files in the usual way
python OpenNMT-py/preprocess.py -train_src $DATA_DIR/train-src.txt \
-train_tgt $DATA_DIR/train-tgt.txt \
-valid_src data/dev/dev-src.txt \
-valid_tgt data/dev/dev-tgt.txt \
-src_seq_length 400 -tgt_seq_length 400 \
-shuffle 1 -data_type text \
-save_data $DATA_DIR/opennmt -report_every 1000
}
preprocess_v17() {
# combines 60K silver data examples with all gold examples
NUM_COPIES=1
DATA_DIR=data_v17
mkdir -p $DATA_DIR
# copy silver data to directory
head -n 60000 data_gw/train/train-tgt.txt > $DATA_DIR/train-tgt.txt
head -n 60000 data_gw/train/train-src.txt > $DATA_DIR/train-src.txt
head -n 60000 data_gw/train/train-orig.txt > $DATA_DIR/train-orig.txt
head -n 60000 data_gw/train/train-anon.txt > $DATA_DIR/train-anon.txt
# append gold data. (assumes opennmt will shuffle)
#for ((i=0;i<$NUM_COPIES;i+=1)); do
for i in {1..$NUM_COPIES}; do
echo $i
cat data/train/train-tgt.txt >> $DATA_DIR/train-tgt.txt
cat data/train/train-src.txt >> $DATA_DIR/train-src.txt
cat data/train/train-orig.txt >> $DATA_DIR/train-orig.txt
cat data/train/train-anon.txt >> $DATA_DIR/train-anon.txt
done
# preprocess data files in the usual way
python OpenNMT-py/preprocess.py -train_src $DATA_DIR/train-src.txt \
-train_tgt $DATA_DIR/train-tgt.txt \
-valid_src data/dev/dev-src.txt \
-valid_tgt data/dev/dev-tgt.txt \
-src_seq_length 400 -tgt_seq_length 400 \
-shuffle 1 -data_type text \
-save_data $DATA_DIR/opennmt -report_every 1000
}
preprocess_v19() {
# same as v15 but increases max vocab size from default of 50K to 100K (no min freq)
# combines 600K silver data examples with 60K x 10 = 600K gold examples
NUM_COPIES=10
DATA_DIR=data_v19
mkdir -p $DATA_DIR
# copy silver data to directory
head -n 600000 data_gw/train/train-tgt.txt > $DATA_DIR/train-tgt.txt
head -n 600000 data_gw/train/train-src.txt > $DATA_DIR/train-src.txt
head -n 600000 data_gw/train/train-orig.txt > $DATA_DIR/train-orig.txt
head -n 600000 data_gw/train/train-anon.txt > $DATA_DIR/train-anon.txt
# append gold data. (assumes opennmt will shuffle)
i=0
while [ $i -lt $NUM_COPIES ]; do
echo $i
cat data/train/train-tgt.txt >> $DATA_DIR/train-tgt.txt
cat data/train/train-src.txt >> $DATA_DIR/train-src.txt
cat data/train/train-orig.txt >> $DATA_DIR/train-orig.txt
cat data/train/train-anon.txt >> $DATA_DIR/train-anon.txt
i=`expr $i + 1`
done
# preprocess data files, increasing max vocab size
python OpenNMT-py/preprocess.py -train_src $DATA_DIR/train-src.txt \
-train_tgt $DATA_DIR/train-tgt.txt \
-valid_src data/dev/dev-src.txt \
-valid_tgt data/dev/dev-tgt.txt \
-src_vocab_size 100000 \
-tgt_vocab_size 100000 \
-src_seq_length 400 -tgt_seq_length 400 \
-shuffle 1 -data_type text \
-save_data $DATA_DIR/opennmt -report_every 1000
}
preprocess_v20() {
# same as v15 but increases max vocab size from default of 50K to 100K (no min freq)
# combines 60K silver data examples with 60K gold
NUM_COPIES=1
DATA_DIR=data_v20
mkdir -p $DATA_DIR
# copy silver data to directory
head -n 60000 data_gw/train/train-tgt.txt > $DATA_DIR/train-tgt.txt
head -n 60000 data_gw/train/train-src.txt > $DATA_DIR/train-src.txt
head -n 60000 data_gw/train/train-orig.txt > $DATA_DIR/train-orig.txt
head -n 60000 data_gw/train/train-anon.txt > $DATA_DIR/train-anon.txt
# append gold data. (assumes opennmt will shuffle)
i=0
while [ $i -lt $NUM_COPIES ]; do
echo $i
cat data/train/train-tgt.txt >> $DATA_DIR/train-tgt.txt
cat data/train/train-src.txt >> $DATA_DIR/train-src.txt
cat data/train/train-orig.txt >> $DATA_DIR/train-orig.txt
cat data/train/train-anon.txt >> $DATA_DIR/train-anon.txt
i=`expr $i + 1`
done
# preprocess data files, increasing max vocab size
python OpenNMT-py/preprocess.py -train_src $DATA_DIR/train-src.txt \
-train_tgt $DATA_DIR/train-tgt.txt \
-valid_src data/dev/dev-src.txt \
-valid_tgt data/dev/dev-tgt.txt \
-src_vocab_size 100000 \
-tgt_vocab_size 100000 \
-src_seq_length 400 -tgt_seq_length 400 \
-shuffle 1 -data_type text \
-save_data $DATA_DIR/opennmt -report_every 1000
}
preprocess_v21() {
# same as v15 but increases max vocab size from default of 50K to 100K (min freq 2)
# same as v19 but requires min freq of 2 for vocab
# combines 600K silver data examples with 60K x 10 = 600K gold examples
NUM_COPIES=10
DATA_DIR=data_v21
mkdir -p $DATA_DIR
# copy silver data to directory
head -n 600000 data_gw/train/train-tgt.txt > $DATA_DIR/train-tgt.txt
head -n 600000 data_gw/train/train-src.txt > $DATA_DIR/train-src.txt
head -n 600000 data_gw/train/train-orig.txt > $DATA_DIR/train-orig.txt
head -n 600000 data_gw/train/train-anon.txt > $DATA_DIR/train-anon.txt
# append gold data. (assumes opennmt will shuffle)
i=0
while [ $i -lt $NUM_COPIES ]; do
echo $i
cat data/train/train-tgt.txt >> $DATA_DIR/train-tgt.txt
cat data/train/train-src.txt >> $DATA_DIR/train-src.txt
cat data/train/train-orig.txt >> $DATA_DIR/train-orig.txt
cat data/train/train-anon.txt >> $DATA_DIR/train-anon.txt
i=`expr $i + 1`
done
# preprocess data files, increasing max vocab size
python OpenNMT-py/preprocess.py -train_src $DATA_DIR/train-src.txt \
-train_tgt $DATA_DIR/train-tgt.txt \
-valid_src data/dev/dev-src.txt \
-valid_tgt data/dev/dev-tgt.txt \
-src_vocab_size 100000 \
-tgt_vocab_size 100000 \
-src_words_min_frequency 2 \
-tgt_words_min_frequency 2 \
-src_seq_length 400 -tgt_seq_length 400 \
-shuffle 1 -data_type text \
-save_data $DATA_DIR/opennmt -report_every 1000
}
preprocess_v23() {
# same as v19 but replaces rare unknowns in the training data to reduce vocab size
# combines 600K silver data examples with 60K x 10 = 600K gold examples
NUM_COPIES=10
DATA_DIR=data_v23
mkdir -p $DATA_DIR
# copy silver data to directory
head -n 600000 data_gw/train/train-tgt.txt > $DATA_DIR/train-tgt.txt
head -n 600000 data_gw/train/train-src.txt > $DATA_DIR/train-src.txt
head -n 600000 data_gw/train/train-orig.txt > $DATA_DIR/train-orig.txt
head -n 600000 data_gw/train/train-anon.txt > $DATA_DIR/train-anon.txt
# append gold data. (assumes opennmt will shuffle)
i=0
while [ $i -lt $NUM_COPIES ]; do
echo $i
cat data/train/train-tgt.txt >> $DATA_DIR/train-tgt.txt
cat data/train/train-src.txt >> $DATA_DIR/train-src.txt
cat data/train/train-orig.txt >> $DATA_DIR/train-orig.txt
cat data/train/train-anon.txt >> $DATA_DIR/train-anon.txt
i=`expr $i + 1`
done
# replace rare unknown placeholders with _UNK0 to reduce vocab size
python -c "import preprocessing; preprocessing.replace_rare_tokens('data_v23/train', 'data/vocab.txt', min_word_freq=2)"
echo 'Replaced rare unknown placeholders with _UNK0'
# preprocess data files
python OpenNMT-py/preprocess.py -train_src $DATA_DIR/train-src.txt \
-train_tgt $DATA_DIR/train-tgt.txt \
-valid_src data/dev/dev-src.txt \
-valid_tgt data/dev/dev-tgt.txt \
-src_vocab_size 100000 \
-tgt_vocab_size 100000 \
-src_seq_length 400 -tgt_seq_length 400 \
-shuffle 1 -data_type text \
-save_data $DATA_DIR/opennmt -report_every 1000
}
train_v14() {
MODEL_VERSION=v14 # used in naming model files
MODEL_PREFIX=models/$MODEL_VERSION
GPU_ID=$1
# train on silver data, evaluate on gold dev data
rm $MODEL_PREFIX*
python OpenNMT-py/train.py -data data_gw/opennmt \
-layers 2 -dropout 0.5 \
-word_vec_size 500 -batch_type sents -max_grad_norm 5 -param_init_glorot \
-encoder_type brnn -decoder_type rnn -rnn_type LSTM -rnn_size 800 \
-save_model $MODEL_PREFIX \
-learning_rate 0.001 -start_decay_at 100 -opt adam -epochs 40 -gpuid $GPU_ID > "logs/train_$MODEL_VERSION.log"
}
train_v15() {
MODEL_VERSION=v15 # used in naming model files
MODEL_PREFIX=models/$MODEL_VERSION
GPU_ID=$1
# train on combination of silver and gold data (note different train data dir)
rm $MODEL_PREFIX*
python OpenNMT-py/train.py -data data_v15/opennmt \
-layers 2 -dropout 0.5 \
-word_vec_size 500 -batch_type sents -max_grad_norm 5 -param_init_glorot \
-encoder_type brnn -decoder_type rnn -rnn_type LSTM -rnn_size 800 \
-save_model $MODEL_PREFIX \
-learning_rate 0.001 -start_decay_at 100 -opt adam -epochs 35 -gpuid $GPU_ID > "logs/train_$MODEL_VERSION.log"
}
train_v16() {
MODEL_VERSION=v16
MODEL_PREFIX=models/$MODEL_VERSION
GPU_ID=$1
# load model pretrained on silver data (v14) and train more on gold data to fine-tune
rm $MODEL_PREFIX*
python OpenNMT-py/train.py -data data_gw/opennmt \
-layers 2 -dropout 0.5 \
-train_from models/v14_acc_69.44_ppl_5.96_e40.pt \
-word_vec_size 500 -batch_type sents -max_grad_norm 5 -param_init_glorot \
-encoder_type brnn -decoder_type rnn -rnn_type LSTM -rnn_size 800 \
-save_model $MODEL_PREFIX \
-learning_rate 0.001 -start_decay_at 100 -opt adam -epochs 60 -gpuid $GPU_ID > "logs/train_$MODEL_VERSION.log"
}
train_v17() {
MODEL_VERSION=v17
MODEL_PREFIX=models/$MODEL_VERSION
GPU_ID=$1
# train on combination of silver and gold data (note different data dir)
rm $MODEL_PREFIX*
python OpenNMT-py/train.py -data data_v17/opennmt \
-layers 2 -dropout 0.5 \
-word_vec_size 500 -batch_type sents -max_grad_norm 5 -param_init_glorot \
-encoder_type brnn -decoder_type rnn -rnn_type LSTM -rnn_size 800 \
-save_model $MODEL_PREFIX \
-learning_rate 0.001 -start_decay_at 100 -opt adam -epochs 35 -gpuid $GPU_ID > "logs/train_$MODEL_VERSION.log"
}
train_v18() {
# train on just gold as a baseline
MODEL_VERSION=v18
MODEL_PREFIX=models/$MODEL_VERSION
GPU_ID=$1
rm $MODEL_PREFIX*
python OpenNMT-py/train.py -data data/opennmt \
-layers 2 -dropout 0.5 \
-word_vec_size 500 -batch_type sents -max_grad_norm 5 -param_init_glorot \
-encoder_type brnn -decoder_type rnn -rnn_type LSTM -rnn_size 800 \
-save_model $MODEL_PREFIX \
-learning_rate 0.001 -start_decay_at 100 -opt adam -epochs 35 -gpuid $GPU_ID > "logs/train_$MODEL_VERSION.log"
}
train_v19() {
MODEL_VERSION=v19 # used in naming model files
MODEL_PREFIX=models/$MODEL_VERSION
GPU_ID=$1
# train on combination of silver and gold data (note different train data dir)
rm $MODEL_PREFIX*
python OpenNMT-py/train.py -data data_v19/opennmt \
-layers 2 -dropout 0.5 \
-word_vec_size 500 -batch_type sents -max_grad_norm 5 -param_init_glorot \
-encoder_type brnn -decoder_type rnn -rnn_type LSTM -rnn_size 800 \
-save_model $MODEL_PREFIX \
-learning_rate 0.001 -start_decay_at 100 -opt adam -epochs 35 -gpuid $GPU_ID > "logs/train_$MODEL_VERSION.log"
}
train_v20() {
MODEL_VERSION=v20 # used in naming model files
MODEL_PREFIX=models/$MODEL_VERSION
GPU_ID=$1
# train on combination of silver and gold data (note different train data dir)
rm $MODEL_PREFIX*
python OpenNMT-py/train.py -data data_v20/opennmt \
-layers 2 -dropout 0.5 \
-word_vec_size 500 -batch_type sents -max_grad_norm 5 -param_init_glorot \
-encoder_type brnn -decoder_type rnn -rnn_type LSTM -rnn_size 800 \
-save_model $MODEL_PREFIX -start_checkpoint_at 10 \
-learning_rate 0.001 -start_decay_at 100 -opt adam -epochs 35 -gpuid $GPU_ID > "logs/train_$MODEL_VERSION.log"
}
train_v21() {
MODEL_VERSION=v21 # used in naming model files
MODEL_PREFIX=models/$MODEL_VERSION
GPU_ID=$1
# train on combination of silver and gold data (note different train data dir)
rm $MODEL_PREFIX*
python OpenNMT-py/train.py -data data_v19/opennmt \
-layers 2 -dropout 0.5 \
-word_vec_size 500 -batch_type sents -max_grad_norm 5 -param_init_glorot \
-encoder_type brnn -decoder_type rnn -rnn_type LSTM -rnn_size 800 \
-save_model $MODEL_PREFIX \
-learning_rate 0.001 -start_decay_at 100 -opt adam -epochs 35 -gpuid $GPU_ID > "logs/train_$MODEL_VERSION.log"
}
train_v22() {
# same data as v19 but using larger model
MODEL_VERSION=v22 # used in naming model files
MODEL_PREFIX=models/$MODEL_VERSION
GPU_ID=$1
# train on combination of silver and gold data (note different train data dir)
rm $MODEL_PREFIX*
python OpenNMT-py/train.py -data data_v19/opennmt \
-layers 2 -dropout 0.5 \
-word_vec_size 800 -batch_type sents -max_grad_norm 5 -param_init_glorot \
-encoder_type brnn -decoder_type rnn -rnn_type LSTM -rnn_size 1000 \
-save_model $MODEL_PREFIX \
-learning_rate 0.001 -start_decay_at 100 -opt adam -epochs 35 -gpuid $GPU_ID > "logs/train_$MODEL_VERSION.log"
}
train_v23() {
MODEL_VERSION=v23 # used in naming model files
MODEL_PREFIX=models/$MODEL_VERSION
GPU_ID=$1
# train on combination of silver and gold data (note different train data dir)
rm $MODEL_PREFIX*
python OpenNMT-py/train.py -data data_v23/opennmt \
-layers 2 -dropout 0.5 \
-word_vec_size 500 -batch_type sents -max_grad_norm 5 -param_init_glorot \
-encoder_type brnn -decoder_type rnn -rnn_type LSTM -rnn_size 800 \
-save_model $MODEL_PREFIX \
-learning_rate 0.001 -start_decay_at 100 -opt adam -epochs 35 -gpuid $GPU_ID > "logs/train_$MODEL_VERSION.log"
}
run_data_checks() {
echo "Num lines in data (gold data) (should be ~60K)"
cat data/train/train-src.txt | wc -l
echo "Num lines in data_gw (used for train_v14) (should be ~800K)"
cat data_gw/train/train-src.txt | wc -l
echo "Num lines in data_v15 (should be ~1.2M)"
cat data_v15/train-src.txt | wc -l
echo "Num lines in data_v17 (should be ~120K)"
cat data_v17/train-src.txt | wc -l
echo "Num lines in data_v19 (should be ~1.2M)"
cat data_v19/train-src.txt | wc -l
echo "Num lines in data_v20 (should be ~120K)"
cat data_v20/train-src.txt | wc -l
echo "Num lines in data_v21 (should be ~1.2M)"
cat data_v21/train-src.txt | wc -l
echo "Num lines in data_v23 (should be ~1.2M)"
cat data_v23/train-src.txt | wc -l
}
#preprocess_silver
#preprocess_gold
#preprocess_anon_map
#preprocess_v15
#preprocess_v17
#preprocess_v19
#preprocess_v20
#preprocess_v21
#preprocess_v23
#run_data_checks
#train_v14 2
#train_v15 0
#train_v16 2
#train_v17 2 &
#train_v18 1
#train_v19 2
#train_v20 2
#train_v21 1
#train_v22 0
#train_v23 2
| true
|
67a5f4b52f47121cad6b78527d1fe8111b53d165
|
Shell
|
KeenS/sheltar
|
/test/testSheltar.sh
|
UTF-8
| 4,146
| 3.546875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# Copyright (c) 2015, Sunrin SHIMURA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Sunrin SHIMURA BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
SHELTAR="../sheltar"
BACKUP_DIR=backup
BACKUP_LIST_FILE=list.txt
BACKUP_FILES="a.txt b/1.txt b/2.txt c/1.txt c/2.txt d/e/f.txt"
testBaseBackup(){
${SHELTAR} backup "${BACKUP_DIR}" "${BACKUP_LIST_FILE}"
assertEquals "Backup file should be created" \
1 "$(ls "${BACKUP_DIR}" | wc -l | tr -d '[ \t\n]' )"
assertEquals "Backup file should contain all the listed file" \
"$({ echo "${BACKUP_FILES}"; echo c/; } | tr \ \\n | sort)" "$(tar tJf ${BACKUP_DIR}/$(ls ${BACKUP_DIR}) | sed -e 's|//*|/|g' | sort)"
}
testIncrementalBackup(){
# need to seelp to shift mtime
sleep 1
echo aaa >> a.txt
${SHELTAR} backup "${BACKUP_DIR}" "${BACKUP_LIST_FILE}"
assertEquals "New backup file should be created" \
2 "$(ls "${BACKUP_DIR}" | wc -l | tr -d '[ \t\n]' )"
assertEquals "Incremental backup file should contain only newly modified files" \
a.txt "$(tar tJf ${BACKUP_DIR}/$(ls -t ${BACKUP_DIR} | head -n 1))"
}
testIncrementalBackupDir(){
# need to seelp to shift mtime
sleep 1
echo bbb >> b/1.txt
echo ccc >> c/1.txt
${SHELTAR} backup "${BACKUP_DIR}" "${BACKUP_LIST_FILE}"
assertEquals "New backup file should be created" \
3 "$(ls "${BACKUP_DIR}" | wc -l | tr -d '[ \t\n]' )"
assertEquals "Incremental backup file should contain only newly modified files.
Concerning directory, files under directory which ends with '/' in list file should separately managed.
directory which ends without '/' in list file should be managed as one dir
" \
"b/1.txt" "$(tar tJf ${BACKUP_DIR}/$(ls -t ${BACKUP_DIR} | head -n 1 | sed -e 's|//*|/|g' | sort ))"
}
testExtractOne(){
rm -rf $BACKUP_FILES
${SHELTAR} extract "${BACKUP_DIR}" a.txt
assertTrue "a.txt should be restored" "[ -s a.txt ]"
assertEquals "The content of a.txt should be 'aaa'" \
"aaa" "$(cat a.txt)"
}
testExtractAll(){
rm -rf $BACKUP_FILES
sleep 1
${SHELTAR} extract "${BACKUP_DIR}"
assertTrue "a.txt should exist" "[ -s a.txt ]"
assertTrue "b/1.txt should exist" "[ -e b/1.txt ]"
assertTrue "b/2.txt should exist" "[ -e b/2.txt ]"
assertTrue "c/1.txt should exist" "[ -e c/1.txt ]"
assertTrue "c/2.txt should exist" "[ -e c/2.txt ]"
assertTrue "d/e/f.txt should exist" "[ -e d/e/f.txt ]"
}
oneTimeSetUp(){
mkdir -p b c d/e ${BACKUP_DIR}
for FILE in $BACKUP_FILES
do
touch "${FILE}"
done
cat <<EOF > "${BACKUP_LIST_FILE}"
a.txt
b/
c
d/
EOF
}
oneTimeTearDown(){
rm -rf ${BACKUP_DIR} ${BACKUP_FILES} ${BACKUP_LIST_FILE}
rm -rf b c d
}
. shunit2-2.1.6/src/shunit2
| true
|
5830e774a5b993e2f916b603b29cb2f3c6136993
|
Shell
|
Elbandi/lighttpd
|
/debian/lighttpd.postinst
|
UTF-8
| 508
| 2.734375
| 3
|
[
"OML",
"RSA-MD",
"BSD-3-Clause"
] |
permissive
|
#! /bin/sh -e
# postinst script for lighttpd
if [ "$1" = "configure" ]; then
if [ ! -r /var/www/index.lighttpd.html ];
then
cp /usr/share/lighttpd/index.html /var/www/index.lighttpd.html
fi
mkdir -p /var/run/lighttpd > /dev/null 2> /dev/null
chown www-data:www-data /var/log/lighttpd /var/run/lighttpd
chown www-data:www-data /var/cache/lighttpd /var/cache/lighttpd/compress /var/cache/lighttpd/uploads
chmod 0750 /var/log/lighttpd /var/run/lighttpd
fi
#DEBHELPER#
exit 0
| true
|
4c1efc3e8b27ed0dafdb875b4dcf1d8ff70c6bf8
|
Shell
|
DavidGamba/bin
|
/explorer
|
UTF-8
| 214
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
WSL_DISTRO=Ubuntu-20.04
file=$1
shift
if [[ $file != /* ]]; then
file="$PWD/$file"
fi
location="\\\\wsl\$\\$WSL_DISTRO${file//\//\\}"
echo "$location"
#sed -e 's#/#\\#g'
explorer.exe "$location"
| true
|
5768bc45c612712b578cf55ce7a53c50da5b2ce8
|
Shell
|
openstack/requirements
|
/tools/cruft.sh
|
UTF-8
| 4,029
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash -ex
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script, when run from the root directory of this repository, will
# search the default and feature branches of all projects listed in the
# projects.txt file for declared dependencies, then output a list of any
# entries in the global-requirements.txt file which are not actual
# dependencies of those projects. Old dependencies which were removed
# from projects or which were used only for projects which have since
# been removed should be cleaned up, but many entries likely represent
# recent additions which still have pending changes to add them to one
# or more projects. In most cases, git pickaxe will yield the answer.
# Remove the raw list if a copy already exists, since we're going to
# append to it in this loop.
rm -f raw-requirements.txt
for PROJECT in $(cat projects.txt); do
# Reuse existing clones in case this is being rerun.
if [ ! -d $PROJECT ]; then
mkdir -p $PROJECT
# In case this makes it into a CI job, use local copies.
if [ -d /opt/git/$PROJECT/.git ]; then
git clone file:///opt/git/$PROJECT $PROJECT
else
git clone https://git.openstack.org/$PROJECT.git $PROJECT
fi
fi
pushd $PROJECT
git remote update
# Loop over the default (HEAD) and any feature branches.
for BRANCH in $(
git branch -a \
| grep '^ remotes/origin/\(feature/\|HEAD \)' \
| cut -d' ' -f3
); do
git checkout $BRANCH
# These are files which are considered by the update.py script,
# so check them all for the sake of completeness.
for FILE in \
requirements-py2.txt \
requirements-py3.txt \
requirements.txt \
test-requirements-py2.txt \
test-requirements-py3.txt \
test-requirements.txt \
tools/pip-requires \
tools/test-requires \
doc/requirements.txt
do
if [ -f $FILE ]; then
# Add diagnostic comments to aid debugging.
echo -e "\n# -----\n# $PROJECT $BRANCH $FILE\n# -----" \
>> ${OLDPWD}/raw-requirements.txt
cat $FILE >> ${OLDPWD}/raw-requirements.txt
fi
done
done
popd
done
# Generate a unique set of package names from the raw list of all
# project requirements filtered for the same lines ignored by the
# update.py script, lower-cased with hyphens normalized to underscores.
sed -e '/^\($\|#\|http:\/\/tarballs.openstack.org\/\|-e\|-f\)/d' \
-e 's/^\([^<>=! ]*\).*/\L\1/' -e s/-/_/g raw-requirements.txt \
| sort -u > all-requirements.txt
# From here on, xtrace gets uselessly noisy.
set +x
# Loop over the set of package names from the global requirements list.
for CANDIDATE in $(
sed -e '/^\($\|#\)/d' -e 's/^\([^<>=!; ]*\).*/\1/' global-requirements.txt
); do
# Search for the package name in the set of project requirements,
# normalizing hyphens to underscores, and output the package name if
# not found.
grep -iq ^$(echo $CANDIDATE | sed s/-/_/g)$ all-requirements.txt \
|| echo $CANDIDATE
done | sort > cruft-requirements.txt
# Provide a helpful summary of the results.
if [ -s cruft-requirements.txt ] ; then
echo -e "\nCruft entries found in global-requirements.txt:\n"
cat cruft-requirements.txt
else
echo -e "\nSomething must be wrong--I found no cruft!!!"
fi
| true
|
066fcdf464bc54fa320fedf001da1d85c1c68460
|
Shell
|
prashantpok/Cloud-Computing
|
/hello.sh
|
UTF-8
| 395
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
echo "welcome to shell scripting":
echo "welcome to linux world"
echo "current user $ whoami"
<< c
echo "$(whoami)!!!!!"
echo " hostname $(hostname)"
echo "username $(uname)"
echo "added new line"
c
((sum=23+36))
echo "sum of 23 and 36= $sum"
read number1
#echo "enter a no"
read number2
#echo "enter second no"
((sum = number1 + number2))
echo "sum =$sum"
#ls >>"history"
| true
|
b1d237ce17d5e0c941dfec523febdca07ceb2391
|
Shell
|
radez/vTNG
|
/clean.sh
|
UTF-8
| 555
| 2.609375
| 3
|
[] |
no_license
|
source ./vars.sh
# delete Virtual machines
for l in $SPINES $LEAVES; do
for x in $SWITCHES; do
virsh destroy $l-$x
virsh undefine $l-$x --remove-all-storage
done
done
for l in $LEAVES; do
for x in $NODES; do
virsh destroy $l-$x
virsh undefine $l-$x --remove-all-storage
done
done
# ensure deleted Disk images
for l in $SPINES $LEAVES; do
for x in $SWITCHES; do
rm -f $LV_DIR/$l-$x.qcow2
done
done
for l in $LEAVES; do
for n in $NODES; do
rm -f $LV_DIR/$l-$n.qcow2
done
done
| true
|
18f062efc124af81d3b8d9cf0828308bca3b5c8d
|
Shell
|
NetBSD/pkgsrc
|
/mail/courier-maildir/files/sharedindexupdate.sh
|
UTF-8
| 945
| 3.3125
| 3
|
[] |
no_license
|
#!@SH@
#
# $NetBSD: sharedindexupdate.sh,v 1.4 2007/10/15 15:37:09 jlam Exp $
#
# This script updates the index of shared folders on the system and is
# only needed if Courier-IMAP or SqWebMail used to serve mail and shared
# folders are used. If so, then this script should be run regularly as a
# system process to ensure the shared folder index is kept up-to-date.
#
# The courier-authlib package must be separately installed to provide the
# "authenumerate" program used in this script.
sysconfdir="@PKG_SYSCONFDIR@"
sbindir="@PREFIX@/sbin"
@RM@ -rf $sysconfdir/shared.tmp
@MKDIR@ $sysconfdir/shared.tmp || exit 1
# split on the first character of the username
@TEST@ -x $sbindir/authenumerate || exit 1
$sbindir/authenumerate -s > $sysconfdir/shared.tmp/.tmplist || exit 1
$sbindir/sharedindexsplit $sysconfdir/shared.tmp 1 < $sysconfdir/shared.tmp/.tmplist || exit 1
@RM@ -f $sysconfdir/shared.tmp/.tmplist
$sbindir/sharedindexinstall
| true
|
1a7a87751e5f8726de4d48f53aabb1ebd0c7b0d6
|
Shell
|
ayshabaij/xml_parsing
|
/elasticsearch-2.3.1/postinst
|
UTF-8
| 3,332
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
#
# This script is executed in the post-installation phase
#
# On Debian,
# $1=configure : is set to 'configure' and if $2 is set, it is an upgrade
#
# On RedHat,
# $1=0 : indicates a removal
# $1=1 : indicates an upgrade
# Sets the default values for elasticsearch variables used in this script
ES_USER="elasticsearch"
ES_GROUP="elasticsearch"
# Source the default env file
ES_ENV_FILE="/etc/default/elasticsearch"
if [ -f "$ES_ENV_FILE" ]; then
. "$ES_ENV_FILE"
fi
IS_UPGRADE=false
case "$1" in
# Debian ####################################################
configure)
# If $1=configure and $2 is set, this is an upgrade
if [ -n $2 ]; then
IS_UPGRADE=true
fi
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
# RedHat ####################################################
1)
# If $1=1 this is an install
IS_UPGRADE=false
;;
2)
# If $1=1 this is an upgrade
IS_UPGRADE=true
;;
*)
echo "post install script called with unknown argument \`$1'" >&2
exit 1
;;
esac
if [ "x$IS_UPGRADE" != "xtrue" ]; then
if command -v systemctl >/dev/null; then
echo "### NOT starting on installation, please execute the following statements to configure elasticsearch service to start automatically using systemd"
echo " sudo systemctl daemon-reload"
echo " sudo systemctl enable elasticsearch.service"
echo "### You can start elasticsearch service by executing"
echo " sudo systemctl start elasticsearch.service"
elif command -v chkconfig >/dev/null; then
echo "### NOT starting on installation, please execute the following statements to configure elasticsearch service to start automatically using chkconfig"
echo " sudo chkconfig --add elasticsearch"
echo "### You can start elasticsearch service by executing"
echo " sudo service elasticsearch start"
elif command -v update-rc.d >/dev/null; then
echo "### NOT starting on installation, please execute the following statements to configure elasticsearch service to start automatically using chkconfig"
echo " sudo update-rc.d elasticsearch defaults 95 10"
echo "### You can start elasticsearch service by executing"
echo " sudo /etc/init.d/elasticsearch start"
fi
elif [ "$RESTART_ON_UPGRADE" = "true" ]; then
echo -n "Restarting elasticsearch service..."
if command -v systemctl >/dev/null; then
systemctl daemon-reload
systemctl restart elasticsearch.service || true
elif [ -x /etc/init.d/elasticsearch ]; then
if command -v invoke-rc.d >/dev/null; then
invoke-rc.d elasticsearch stop || true
invoke-rc.d elasticsearch start || true
else
/etc/init.d/elasticsearch restart || true
fi
# older suse linux distributions do not ship with systemd
# but do not have an /etc/init.d/ directory
# this tries to start the elasticsearch service on these
# as well without failing this script
elif [ -x /etc/rc.d/init.d/elasticsearch ] ; then
/etc/rc.d/init.d/elasticsearch restart || true
fi
echo " OK"
fi
exit 0
# Built for Distribution: Deb-2.3.1 (deb)
| true
|
a3b7e28e70de418ca38297470d26f8481d8c6fc2
|
Shell
|
tomaashcroft/Grafana-Tasks
|
/getstats-io.sh
|
UTF-8
| 2,032
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
""" Copyright (c) 2015, Genome Research Ltd
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Grafana-Tasks nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."""
# Gets disk stats and plonk them into rrdtool.
#
# distributred via cfengine
#
# Location of the database
DATABASE=/nfs/ssg_group/lustre-monitoring/rrds/${HOSTNAME}io.rrd
while [ 1 ] ; do
readn=0
writen=0
readt=0
writet=0
# read stats
while read line ; do
set -- $line
((readn=$readn+$4))
((readt=$readt+$7))
((writen=$writen+$8))
((writet=$writet+${11}))
done < <(cat /proc/diskstats | grep sd)
timestamp=`/bin/date +%s`
/usr/bin/rrdtool update $DATABASE $timestamp:$readn:$writen:$readt:$writet
sleep 30
done
| true
|
e281a7c6d31007d46af9cafd585fa259fc3fa7e9
|
Shell
|
anlaneg/tools
|
/build-ovs/build_pktgen.sh
|
UTF-8
| 778
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
export DPDK_BASE_DIR=`pwd`
export DPDK_VERSION=dpdk-16.11
export DPDK_TARGET=x86_64-native-linuxapp-gcc
#export DPDK_BUILD=$DPDK_BASE_DIR/$DPDK_VERSION/$DPDK_TARGET
export PKTGEN=pktgen-dpdk-pktgen-3.0.17
export RTE_SDK='/home/dpdk'
echo "Cleaning.."
if [ -d "$DPDK_BASE_DIR/$PKTGEN" ]; then
rm -rf $DPDK_BASE_DIR/$PKTGEN
fi
if [ ! -e "$DPDK_BASE_DIR/$PKTGEN.tar.gz" ]; then
echo "Downloading.."
wget http://dpdk.org/browse/apps/pktgen-dpdk/snapshot/$PKTGEN.tar.gz --directory-prefix=$DPDK_BASE_DIR
fi
echo "Extracting.."
tar xf $DPDK_BASE_DIR/$PKTGEN.tar.gz -C $DPDK_BASE_DIR
cd $DPDK_BASE_DIR/$PKTGEN
make RTE_SDK=$RTE_SDK RTE_TARGET=$DPDK_TARGET
ln -s $DPDK_BASE_DIR/$PKTGEN/app/app/x86_64-native-linuxapp-gcc/pktgen $DPDK_BASE_DIR/dpdk-pktgen
~
| true
|
7e48177c46288208f458763e3e74f4d988a3f744
|
Shell
|
x3ro/tinyos-legacy
|
/tinyos-2.x-contrib/diku/common/apps/TTXDemo/PC_display/start_meter.sh
|
ISO-8859-15
| 1,052
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
TREND=trend-20050723/trend
FIFOPATH=/tmp/
# D X Bl #117bff
# D Y Rd
# D Z Grn
# A X Sort
# A Y Gul
GEOMETRY=$(xdpyinfo | grep dimensions | cut -d ':' -f 2 | tr -s ' ' | cut -d ' ' -f 2)
HEIGHT=$(echo $GEOMETRY | cut -d 'x' -f 2)
WIDTH=$(echo $GEOMETRY | cut -d 'x' -f 1)
echo "Screen is ${WIDTH}x${HEIGHT}"
WINDOWS="$FIFOPATH/accel_a_x ffffff Analog X-axis
$FIFOPATH/accel_a_y ffff00 Analog Y-axis
$FIFOPATH/accel_a_z ff0000 Analog Z-axis"
# $FIFOPATH/accel_d_y 117bf Digital Y-axis
# $FIFOPATH/accel_d_z 00ff00 Digital Z-axis"
WINNO=$(echo "$WINDOWS" | wc -l)
echo "We are going to create ${WINNO} windows"
WINHEIGHT=$[ $HEIGHT / $WINNO ]
WINPOS=0
echo "$WINDOWS" | while read i; do
FILENAME=$(echo $i | cut -d ' ' -f 1)
COLOR=$(echo $i | cut -d ' ' -f 2)
TITLE=$(echo $i | cut -d ' ' -f 3-)
$TREND -geometry ${WIDTH}x${WINHEIGHT}+0+${WINPOS} -F \
-I \#${COLOR} \
-t "${TITLE}" -s -d -v -f d -- ${FILENAME} 300 0 8200 &
# -t "${TITLE}" -s -d -v -f d -- ${FILENAME} 300 -5 5 &
WINPOS=$[ $WINPOS + $WINHEIGHT ]
done;
| true
|
98b5bf4bd6b5d11f6ee52cd7049eead7d247f514
|
Shell
|
kinsamanka/mksocfpga
|
/Scripts/SD-Image-Gen/gen_rootfs-stretch.sh
|
UTF-8
| 18,825
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
#------------------------------------------------------------------------------------------------------
# Variables
#------------------------------------------------------------------------------------------------------
set -e
CURRENT_DIR=`pwd`
WORK_DIR=$1
ROOTFS_DIR=$2
SD_IMG=$3
IMG_ROOT_PART=$4
distro=$5
MOUNT_DIR=
ROOTFS_MNT=/mnt/rootfs
ROOTFS_IMG=${WORK_DIR}/rootfs.img
DRIVE=/dev/mapper/loop0
DEFGROUPS="sudo,kmem,adm,dialout,machinekit,video,plugdev"
#------------------------------------------------------------------------------------------------------
# build armhf Debian qemu-debootstrap chroot port
#------------------------------------------------------------------------------------------------------
install_dep() {
sudo apt-get -y install qemu binfmt-support qemu-user-static schroot debootstrap libc6
# sudo dpkg --add-architecture armhf
sudo apt update
sudo apt -y --force-yes upgrade
sudo update-binfmts --display | grep interpreter
}
##,rpcbind
##,ntpdate,avahi-discover
## ntpdate,dhcpcd5,isc-dhcp-client,
# run_bootstrap() {
# qoutput1='sudo qemu-debootstrap --foreign --arch=armhf --variant=buildd --keyring /usr/share/keyrings/debian-archive-keyring.gpg --include=sudo,locales,nano,adduser,apt-utils,libssh2-1,openssh-client,openssh-server,openssl,kmod,dbus,dbus-x11,xorg,xserver-xorg-video-dummy,upower,rsyslog,libpam-systemd,systemd-sysv,net-tools,lsof,less,accountsservice,iputils-ping,python,ifupdown2,iproute2,isc-dhcp-client,dhcpcd5,avahi-daemon,uuid-runtime,avahi-discover,libnss-mdns,debianutils,traceroute,strace ${distro} ${ROOTFS_DIR} http://ftp.debian.org/debian'
# echo " "
# echo "Note: Eval.Start.."
# eval $qoutput1
# echo " "
# echo "Note: Eval..Done ."
#
# }
function run_jessie_bootstrap {
sudo qemu-debootstrap --foreign --arch=armhf --variant=buildd --keyring /usr/share/keyrings/debian-archive-keyring.gpg --include=sudo,locales,nano,adduser,apt-utils,libssh2-1,openssh-client,openssh-server,openssl,kmod,dbus,dbus-x11,xorg,xserver-xorg-video-dummy,upower,rsyslog,udev,libpam-systemd,systemd-sysv,net-tools,lsof,less,accountsservice,iputils-ping,python,ifupdown,iproute2,dhcpcd5,avahi-daemon,uuid-runtime,avahi-discover,libnss-mdns,debianutils,traceroute,strace,cgroupfs-mount,ntp,autofs ${distro} ${ROOTFS_DIR} http://ftp.debian.org/debian
}
## ifupdown2,
function run_stretch_bootstrap {
sudo qemu-debootstrap --foreign --arch=armhf --variant=buildd --keyring /usr/share/keyrings/debian-archive-keyring.gpg --include=sudo,locales,nano,adduser,apt-utils,libssh2-1,openssh-client,openssh-server,openssl,kmod,dbus,dbus-x11,xorg,xserver-xorg-video-dummy,upower,rsyslog,udev,libpam-systemd,systemd-sysv,net-tools,lsof,less,accountsservice,iputils-ping,python,ifupdown,iproute2,dhcpcd5,avahi-daemon,uuid-runtime,avahi-discover,libnss-mdns,debianutils,traceroute,strace,cgroupfs-mount,ntp,autofs ${distro} ${ROOTFS_DIR} http://ftp.debian.org/debian
}
#run_jessie-host_bootstrap() {
#sudo qemu-debootstrap --arch=armhf --variant=buildd --keyring /usr/share/keyrings/debian-archive-keyring.gpg $distro $ROOTFS_DIR http://ftp.debian.org/debian/
#}
gen_policy_rc_d() {
sudo sh -c 'cat <<EOT > '$ROOTFS_DIR'/usr/sbin/policy-rc.d
echo "************************************" >&2
echo "All rc.d operations denied by policy" >&2
echo "************************************" >&2
exit 101
EOT'
}
gen_sudoers() {
sudo sh -c 'cat <<EOT > '$ROOTFS_DIR'/etc/sudoers
#
# This file MUST be edited with the 'visudo' command as root.
#
# Please consider adding local content in /etc/sudoers.d/ instead of
# directly modifying this file.
#
# See the man page for details on how to write a sudoers file.
#
Defaults env_reset
Defaults mail_badpass
Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
# Host alias specification
# User alias specification
# Cmnd alias specification
# User privilege specification
root ALL=(ALL:ALL) ALL
machinekit ALL=(ALL:ALL) ALL
# Allow members of group sudo to execute any command
%sudo ALL=(ALL:ALL) ALL
# See sudoers(5) for more information on "#include" directives:
#includedir /etc/sudoers.d
machinekit ALL=(ALL:ALL) NOPASSWD: ALL
%machinekit ALL=(ALL:ALL) NOPASSWD: ALL
EOT'
}
gen_sources_list() {
sudo sh -c 'cat <<EOT > '$ROOTFS_DIR'/etc/apt/sources.list
deb http://ftp.dk.debian.org/debian '$distro' main contrib non-free
deb-src http://ftp.dk.debian.org/debian '$distro' main contrib non-free
deb http://ftp.dk.debian.org/debian '$distro'-updates main contrib non-free
deb-src http://ftp.dk.debian.org/debian/ '$distro'-updates main contrib non-free
deb http://security.debian.org/ '$distro'/updates main contrib non-free
deb-src http://security.debian.org '$distro'/updates main contrib non-free
EOT'
}
gen_fstab(){
sudo sh -c 'cat <<EOT > '$ROOTFS_DIR'/etc/fstab
# /etc/fstab: static file system information.
#
# <file system> <mount point> <type> <options> <dump> <pass>
/dev/root / ext3 noatime,errors=remount-ro 0 1
tmpfs /tmp tmpfs defaults 0 0
none /dev/shm tmpfs rw,nosuid,nodev,noexec 0 0
EOT'
}
# 127.0.0.1 localhost.localdomain localhost mksocfpga
# ::1 localhost.localdomain localhost mksocfpga
# ff02::1 ip6-allnodes
# ff02::2 ip6-allrouters
# ::1 localhost ip6-localhost ip6-loopback
# ff02::1 ip6-allnodes
# ff02::2 ip6-allrouters
gen_hosts() {
#echo -e "127.0.1.1\tmksocfpga" | sudo tee -a $ROOTFS_DIR/etc/hosts
sudo sh -c 'cat <<EOT > '$ROOTFS_DIR'/etc/hosts
127.0.0.1 localhost.localdomain localhost mksocfpga
192.168.2,9 mksocfpga.holotronic.lan mksocfpga
EOT'
}
gen_wired_network() {
# sudo sh -c 'cat <<EOT > '$ROOTFS_DIR'/etc/systemd/network/20-dhcp.network
# [Match]
# Name=eth0
#
# [Network]
# DHCP=ipv4
# #IPv6PrivacyExtensions=true
# #IPv6AcceptRouterAdvertisements=False
# IPv6AcceptRouterAdvertisements=kernel
#
# [DHCP]
# UseDomains=true
#
# EOT'
sudo sh -c 'cat <<EOT > '$ROOTFS_DIR'/etc/systemd/network/10-wired.network
[Match]
Name=eth0
[Network]
DHCP=ipv4
EOT'
}
gen_locale_gen() {
sudo sh -c 'cat <<EOT > '$ROOTFS_DIR'/etc/locale.gen
# This file lists locales that you wish to have built. You can find a list
# of valid supported locales at /usr/share/i18n/SUPPORTED, and you can add
# user defined locales to /usr/local/share/i18n/SUPPORTED. If you change
# this file, you need to rerun locale-gen.
# aa_DJ ISO-8859-1
# aa_DJ.UTF-8 UTF-8
# aa_ER UTF-8
# aa_ER@saaho UTF-8
# aa_ET UTF-8
# af_ZA ISO-8859-1
# af_ZA.UTF-8 UTF-8
# ak_GH UTF-8
# am_ET UTF-8
# an_ES ISO-8859-15
# an_ES.UTF-8 UTF-8
# anp_IN UTF-8
# ar_AE ISO-8859-6
# ar_AE.UTF-8 UTF-8
# ar_BH ISO-8859-6
# ar_BH.UTF-8 UTF-8
# ar_DZ ISO-8859-6
# ar_DZ.UTF-8 UTF-8
# ar_EG ISO-8859-6
# ar_EG.UTF-8 UTF-8
# ar_IN UTF-8
# ar_IQ ISO-8859-6
# ar_IQ.UTF-8 UTF-8
# ar_JO ISO-8859-6
# ar_JO.UTF-8 UTF-8
# ar_KW ISO-8859-6
# ar_KW.UTF-8 UTF-8
# ar_LB ISO-8859-6
# ar_LB.UTF-8 UTF-8
# ar_LY ISO-8859-6
# ar_LY.UTF-8 UTF-8
# ar_MA ISO-8859-6
# ar_MA.UTF-8 UTF-8
# ar_OM ISO-8859-6
# ar_OM.UTF-8 UTF-8
# ar_QA ISO-8859-6
# ar_QA.UTF-8 UTF-8
# ar_SA ISO-8859-6
# ar_SA.UTF-8 UTF-8
# ar_SD ISO-8859-6
# ar_SD.UTF-8 UTF-8
# ar_SS UTF-8
# ar_SY ISO-8859-6
# ar_SY.UTF-8 UTF-8
# ar_TN ISO-8859-6
# ar_TN.UTF-8 UTF-8
# ar_YE ISO-8859-6
# ar_YE.UTF-8 UTF-8
# as_IN UTF-8
# ast_ES ISO-8859-15
# ast_ES.UTF-8 UTF-8
# ayc_PE UTF-8
# az_AZ UTF-8
# be_BY CP1251
# be_BY.UTF-8 UTF-8
# be_BY@latin UTF-8
# bem_ZM UTF-8
# ber_DZ UTF-8
# ber_MA UTF-8
# bg_BG CP1251
# bg_BG.UTF-8 UTF-8
# bh_IN.UTF-8 UTF-8
# bho_IN UTF-8
# bn_BD UTF-8
# bn_IN UTF-8
# bo_CN UTF-8
# bo_IN UTF-8
# br_FR ISO-8859-1
# br_FR.UTF-8 UTF-8
# br_FR@euro ISO-8859-15
# brx_IN UTF-8
# bs_BA ISO-8859-2
# bs_BA.UTF-8 UTF-8
# byn_ER UTF-8
# ca_AD ISO-8859-15
# ca_AD.UTF-8 UTF-8
# ca_ES ISO-8859-1
# ca_ES.UTF-8 UTF-8
# ca_ES.UTF-8@valencia UTF-8
# ca_ES@euro ISO-8859-15
# ca_ES@valencia ISO-8859-15
# ca_FR ISO-8859-15
# ca_FR.UTF-8 UTF-8
# ca_IT ISO-8859-15
# ca_IT.UTF-8 UTF-8
# ce_RU UTF-8
# cmn_TW UTF-8
# crh_UA UTF-8
# cs_CZ ISO-8859-2
# cs_CZ.UTF-8 UTF-8
# csb_PL UTF-8
# cv_RU UTF-8
# cy_GB ISO-8859-14
# cy_GB.UTF-8 UTF-8
# da_DK ISO-8859-1
# da_DK.UTF-8 UTF-8
# de_AT ISO-8859-1
# de_AT.UTF-8 UTF-8
# de_AT@euro ISO-8859-15
# de_BE ISO-8859-1
# de_BE.UTF-8 UTF-8
# de_BE@euro ISO-8859-15
# de_CH ISO-8859-1
# de_CH.UTF-8 UTF-8
# de_DE ISO-8859-1
# de_DE.UTF-8 UTF-8
# de_DE@euro ISO-8859-15
# de_LI.UTF-8 UTF-8
# de_LU ISO-8859-1
# de_LU.UTF-8 UTF-8
# de_LU@euro ISO-8859-15
# doi_IN UTF-8
# dv_MV UTF-8
# dz_BT UTF-8
# el_CY ISO-8859-7
# el_CY.UTF-8 UTF-8
# el_GR ISO-8859-7
# el_GR.UTF-8 UTF-8
# en_AG UTF-8
# en_AU ISO-8859-1
# en_AU.UTF-8 UTF-8
# en_BW ISO-8859-1
# en_BW.UTF-8 UTF-8
# en_CA ISO-8859-1
# en_CA.UTF-8 UTF-8
# en_DK ISO-8859-1
# en_DK.ISO-8859-15 ISO-8859-15
# en_DK.UTF-8 UTF-8
# en_GB ISO-8859-1
# en_GB.ISO-8859-15 ISO-8859-15
en_GB.UTF-8 UTF-8
# en_HK ISO-8859-1
# en_HK.UTF-8 UTF-8
# en_IE ISO-8859-1
# en_IE.UTF-8 UTF-8
# en_IE@euro ISO-8859-15
# en_IN UTF-8
# en_NG UTF-8
# en_NZ ISO-8859-1
# en_NZ.UTF-8 UTF-8
# en_PH ISO-8859-1
# en_PH.UTF-8 UTF-8
# en_SG ISO-8859-1
# en_SG.UTF-8 UTF-8
# en_US ISO-8859-1
# en_US.ISO-8859-15 ISO-8859-15
en_US.UTF-8 UTF-8
# en_ZA ISO-8859-1
# en_ZA.UTF-8 UTF-8
# en_ZM UTF-8
# en_ZW ISO-8859-1
# en_ZW.UTF-8 UTF-8
# eo ISO-8859-3
# eo.UTF-8 UTF-8
# es_AR ISO-8859-1
# es_AR.UTF-8 UTF-8
# es_BO ISO-8859-1
# es_BO.UTF-8 UTF-8
# es_CL ISO-8859-1
# es_CL.UTF-8 UTF-8
# es_CO ISO-8859-1
# es_CO.UTF-8 UTF-8
# es_CR ISO-8859-1
# es_CR.UTF-8 UTF-8
# es_CU UTF-8
# es_DO ISO-8859-1
# es_DO.UTF-8 UTF-8
# es_EC ISO-8859-1
# es_EC.UTF-8 UTF-8
# es_ES ISO-8859-1
# es_ES.UTF-8 UTF-8
# es_ES@euro ISO-8859-15
# es_GT ISO-8859-1
# es_GT.UTF-8 UTF-8
# es_HN ISO-8859-1
# es_HN.UTF-8 UTF-8
# es_MX ISO-8859-1
# es_MX.UTF-8 UTF-8
# es_NI ISO-8859-1
# es_NI.UTF-8 UTF-8
# es_PA ISO-8859-1
# es_PA.UTF-8 UTF-8
# es_PE ISO-8859-1
# es_PE.UTF-8 UTF-8
# es_PR ISO-8859-1
# es_PR.UTF-8 UTF-8
# es_PY ISO-8859-1
# es_PY.UTF-8 UTF-8
# es_SV ISO-8859-1
# es_SV.UTF-8 UTF-8
# es_US ISO-8859-1
# es_US.UTF-8 UTF-8
# es_UY ISO-8859-1
# es_UY.UTF-8 UTF-8
# es_VE ISO-8859-1
# es_VE.UTF-8 UTF-8
# et_EE ISO-8859-1
# et_EE.ISO-8859-15 ISO-8859-15
# et_EE.UTF-8 UTF-8
# eu_ES ISO-8859-1
# eu_ES.UTF-8 UTF-8
# eu_ES@euro ISO-8859-15
# eu_FR ISO-8859-1
# eu_FR.UTF-8 UTF-8
# eu_FR@euro ISO-8859-15
# fa_IR UTF-8
# ff_SN UTF-8
# fi_FI ISO-8859-1
# fi_FI.UTF-8 UTF-8
# fi_FI@euro ISO-8859-15
# fil_PH UTF-8
# fo_FO ISO-8859-1
# fo_FO.UTF-8 UTF-8
# fr_BE ISO-8859-1
# fr_BE.UTF-8 UTF-8
# fr_BE@euro ISO-8859-15
# fr_CA ISO-8859-1
# fr_CA.UTF-8 UTF-8
# fr_CH ISO-8859-1
# fr_CH.UTF-8 UTF-8
# fr_FR ISO-8859-1
# fr_FR.UTF-8 UTF-8
# fr_FR@euro ISO-8859-15
# fr_LU ISO-8859-1
# fr_LU.UTF-8 UTF-8
# fr_LU@euro ISO-8859-15
# fur_IT UTF-8
# fy_DE UTF-8
# fy_NL UTF-8
# ga_IE ISO-8859-1
# ga_IE.UTF-8 UTF-8
# ga_IE@euro ISO-8859-15
# gd_GB ISO-8859-15
# gd_GB.UTF-8 UTF-8
# gez_ER UTF-8
# gez_ER@abegede UTF-8
# gez_ET UTF-8
# gez_ET@abegede UTF-8
# gl_ES ISO-8859-1
# gl_ES.UTF-8 UTF-8
# gl_ES@euro ISO-8859-15
# gu_IN UTF-8
# gv_GB ISO-8859-1
# gv_GB.UTF-8 UTF-8
# ha_NG UTF-8
# hak_TW UTF-8
# he_IL ISO-8859-8
# he_IL.UTF-8 UTF-8
# hi_IN UTF-8
# hne_IN UTF-8
# hr_HR ISO-8859-2
# hr_HR.UTF-8 UTF-8
# hsb_DE ISO-8859-2
# hsb_DE.UTF-8 UTF-8
# ht_HT UTF-8
# hu_HU ISO-8859-2
# hu_HU.UTF-8 UTF-8
# hy_AM UTF-8
# hy_AM.ARMSCII-8 ARMSCII-8
# ia_FR UTF-8
# id_ID ISO-8859-1
# id_ID.UTF-8 UTF-8
# ig_NG UTF-8
# ik_CA UTF-8
# is_IS ISO-8859-1
# is_IS.UTF-8 UTF-8
# it_CH ISO-8859-1
# it_CH.UTF-8 UTF-8
# it_IT ISO-8859-1
# it_IT.UTF-8 UTF-8
# it_IT@euro ISO-8859-15
# iu_CA UTF-8
# iw_IL ISO-8859-8
# iw_IL.UTF-8 UTF-8
# ja_JP.EUC-JP EUC-JP
# ja_JP.UTF-8 UTF-8
# ka_GE GEORGIAN-PS
# ka_GE.UTF-8 UTF-8
# kk_KZ PT154
# kk_KZ RK1048
# kk_KZ.UTF-8 UTF-8
# kl_GL ISO-8859-1
# kl_GL.UTF-8 UTF-8
# km_KH UTF-8
# kn_IN UTF-8
# ko_KR.EUC-KR EUC-KR
# ko_KR.UTF-8 UTF-8
# kok_IN UTF-8
# ks_IN UTF-8
# ks_IN@devanagari UTF-8
# ku_TR ISO-8859-9
# ku_TR.UTF-8 UTF-8
# kw_GB ISO-8859-1
# kw_GB.UTF-8 UTF-8
# ky_KG UTF-8
# lb_LU UTF-8
# lg_UG ISO-8859-10
# lg_UG.UTF-8 UTF-8
# li_BE UTF-8
# li_NL UTF-8
# lij_IT UTF-8
# lo_LA UTF-8
# lt_LT ISO-8859-13
# lt_LT.UTF-8 UTF-8
# lv_LV ISO-8859-13
# lv_LV.UTF-8 UTF-8
# lzh_TW UTF-8
# mag_IN UTF-8
# mai_IN UTF-8
# mg_MG ISO-8859-15
# mg_MG.UTF-8 UTF-8
# mhr_RU UTF-8
# mi_NZ ISO-8859-13
# mi_NZ.UTF-8 UTF-8
# mk_MK ISO-8859-5
# mk_MK.UTF-8 UTF-8
# ml_IN UTF-8
# mn_MN UTF-8
# mni_IN UTF-8
# mr_IN UTF-8
# ms_MY ISO-8859-1
# ms_MY.UTF-8 UTF-8
# mt_MT ISO-8859-3
# mt_MT.UTF-8 UTF-8
# my_MM UTF-8
# nan_TW UTF-8
# nan_TW@latin UTF-8
# nb_NO ISO-8859-1
# nb_NO.UTF-8 UTF-8
# nds_DE UTF-8
# nds_NL UTF-8
# ne_NP UTF-8
# nhn_MX UTF-8
# niu_NU UTF-8
# niu_NZ UTF-8
# nl_AW UTF-8
# nl_BE ISO-8859-1
# nl_BE.UTF-8 UTF-8
# nl_BE@euro ISO-8859-15
# nl_NL ISO-8859-1
# nl_NL.UTF-8 UTF-8
# nl_NL@euro ISO-8859-15
# nn_NO ISO-8859-1
# nn_NO.UTF-8 UTF-8
# nr_ZA UTF-8
# nso_ZA UTF-8
# oc_FR ISO-8859-1
# oc_FR.UTF-8 UTF-8
# om_ET UTF-8
# om_KE ISO-8859-1
# om_KE.UTF-8 UTF-8
# or_IN UTF-8
# os_RU UTF-8
# pa_IN UTF-8
# pa_PK UTF-8
# pap_AN UTF-8
# pap_AW UTF-8
# pap_CW UTF-8
# pl_PL ISO-8859-2
# pl_PL.UTF-8 UTF-8
# ps_AF UTF-8
# pt_BR ISO-8859-1
# pt_BR.UTF-8 UTF-8
# pt_PT ISO-8859-1
# pt_PT.UTF-8 UTF-8
# pt_PT@euro ISO-8859-15
# quz_PE UTF-8
# raj_IN UTF-8
# ro_RO ISO-8859-2
# ro_RO.UTF-8 UTF-8
# ru_RU ISO-8859-5
# ru_RU.CP1251 CP1251
# ru_RU.KOI8-R KOI8-R
# ru_RU.UTF-8 UTF-8
# ru_UA KOI8-U
# ru_UA.UTF-8 UTF-8
# rw_RW UTF-8
# sa_IN UTF-8
# sat_IN UTF-8
# sc_IT UTF-8
# sd_IN UTF-8
# sd_IN@devanagari UTF-8
# se_NO UTF-8
# shs_CA UTF-8
# si_LK UTF-8
# sid_ET UTF-8
# sk_SK ISO-8859-2
# sk_SK.UTF-8 UTF-8
# sl_SI ISO-8859-2
# sl_SI.UTF-8 UTF-8
# so_DJ ISO-8859-1
# so_DJ.UTF-8 UTF-8
# so_ET UTF-8
# so_KE ISO-8859-1
# so_KE.UTF-8 UTF-8
# so_SO ISO-8859-1
# so_SO.UTF-8 UTF-8
# sq_AL ISO-8859-1
# sq_AL.UTF-8 UTF-8
# sq_MK UTF-8
# sr_ME UTF-8
# sr_RS UTF-8
# sr_RS@latin UTF-8
# ss_ZA UTF-8
# st_ZA ISO-8859-1
# st_ZA.UTF-8 UTF-8
# sv_FI ISO-8859-1
# sv_FI.UTF-8 UTF-8
# sv_FI@euro ISO-8859-15
# sv_SE ISO-8859-1
# sv_SE.ISO-8859-15 ISO-8859-15
# sv_SE.UTF-8 UTF-8
# sw_KE UTF-8
# sw_TZ UTF-8
# szl_PL UTF-8
# ta_IN UTF-8
# ta_LK UTF-8
# te_IN UTF-8
# tg_TJ KOI8-T
# tg_TJ.UTF-8 UTF-8
# th_TH TIS-620
# th_TH.UTF-8 UTF-8
# the_NP UTF-8
# ti_ER UTF-8
# ti_ET UTF-8
# tig_ER UTF-8
# tk_TM UTF-8
# tl_PH ISO-8859-1
# tl_PH.UTF-8 UTF-8
# tn_ZA UTF-8
# tr_CY ISO-8859-9
# tr_CY.UTF-8 UTF-8
# tr_TR ISO-8859-9
# tr_TR.UTF-8 UTF-8
# ts_ZA UTF-8
# tt_RU UTF-8
# tt_RU@iqtelif UTF-8
# tu_IN.UTF-8 UTF-8
# ug_CN UTF-8
# uk_UA KOI8-U
# uk_UA.UTF-8 UTF-8
# unm_US UTF-8
# ur_IN UTF-8
# ur_PK UTF-8
# uz_UZ ISO-8859-1
# uz_UZ.UTF-8 UTF-8
# uz_UZ@cyrillic UTF-8
# ve_ZA UTF-8
# vi_VN UTF-8
# wa_BE ISO-8859-1
# wa_BE.UTF-8 UTF-8
# wa_BE@euro ISO-8859-15
# wae_CH UTF-8
# wal_ET UTF-8
# wo_SN UTF-8
# xh_ZA ISO-8859-1
# xh_ZA.UTF-8 UTF-8
# yi_US CP1255
# yi_US.UTF-8 UTF-8
# yo_NG UTF-8
# yue_HK UTF-8
# zh_CN GB2312
# zh_CN.GB18030 GB18030
# zh_CN.GBK GBK
# zh_CN.UTF-8 UTF-8
# zh_HK BIG5-HKSCS
# zh_HK.UTF-8 UTF-8
# zh_SG GB2312
# zh_SG.GBK GBK
# zh_SG.UTF-8 UTF-8
# zh_TW BIG5
# zh_TW.EUC-TW EUC-TW
# zh_TW.UTF-8 UTF-8
# zu_ZA ISO-8859-1
# zu_ZA.UTF-8 UTF-8
EOT'
}
setup_configfiles() {
echo "Setting up config files "
gen_policy_rc_d
gen_sudoers
gen_sources_list
gen_fstab
sudo sh -c 'echo mksocfpga > '$ROOTFS_DIR'/etc/hostname'
gen_hosts
sudo mkdir -p $ROOTFS_DIR/etc/systemd/network
gen_wired_network
#sudo sh -c 'cat <<EOT >> '$ROOTFS_DIR'/etc/network/interfaces
#auto lo eth0
#iface lo inet loopback
#allow-hotplug eth0
# iface eth0 inet dhcp
#EOT'
sudo sh -c 'echo T0:2345:respawn:rootfs/sbin/getty -L ttyS0 115200 vt100 >> '$ROOTFS_DIR'/etc/inittab'
gen_locale_gen
sudo sh -c 'cat <<EOT > '$ROOTFS_DIR'/etc/locale.conf
LANG=en_US.UTF-8 UTF-8
LC_COLLATE=C
LC_TIME=en_GB.UTF-8
EOT'
sudo sh -c 'cat <<EOT > '$ROOTFS_DIR'/etc/X11/xorg.conf
Section "Screen"
Identifier "Default Screen"
tDevice "Dummy"
DefaultDepth 24
EndSection
Section "Device"
Identifier "Dummy"
Driver "dummy"
Option "IgnoreEDID" "true"
Option "NoDDC" "true"
EndSection
EOT'
echo "Config files genetated"
}
run_func() {
install_dep
# output=$( run_bootstrap )
# if [ $? -eq 0 ]; then
# echo ""
# echo "ECHO_Good: qoutput1 value is = ${output}"
# echo ""
# else
# echo ""
# echo "ECHO_err: run_debootstrap output = nonzero:Error"
# echo "ECHO_err: qoutput value is = ${output}"
# echo ""
# sudo sh -c 'sed -i.bak s/"set -e"/"set -x"/g '$ROOTFS_MNT'/debootstrap/debootstrap'
# echo ""
# echo "qemu stage2 mod applied "
# echo " "
# echo "Runnung stage 2 manually -----!"
# sudo chroot $ROOTFS_MNT /debootstrap/debootstrap --second-stage
# echo "stage manual run done -----!"
# echo ""
# fi
if [ "$distro" = "jessie" ]; then
echo "MSG: running bootstrap for jessie os"
run_jessie_bootstrap
elif [ "$distro" = "stretch" ]; then
echo "MSG: running bootstrap for stretch os"
run_stretch_bootstrap
else
echo "MSG: Dist detect failure distro = $distro"
exit 1
fi
echo "will now run setup_configfiles "
setup_configfiles
}
gen_install_in-img() {
if [ ! -z "$SD_IMG" ]; then
# DRIVE=`bash -c 'sudo losetup --show -f '$SD_IMG''`
sudo kpartx -a -s -v ${SD_IMG}
# sudo partprobe $DRIVE
sudo mkdir -p $ROOTFS_MNT
sudo mount ${DRIVE}$IMG_ROOT_PART $ROOTFS_MNT
echo "ECHO: ""chroot is mounted in: ${ROOTFS_MNT}"
ROOTFS_DIR=$ROOTFS_MNT
echo "ECHO: "'rootfs_dir ='$ROOTFS_DIR
run_func
sudo umount $ROOTFS_MNT
echo "ECHO: ""chroot was unounted "
echo "ECHO: ""rootfs is now installed in imagefile:"$SD_IMG
# sudo losetup -D
sudo kpartx -d -s -v ${SD_IMG}
sync
else
echo "ECHO: ""no Imagefile parameter given chroot will only be made in current local folder:"
echo "ECHO: "'rootfs_dir ='$ROOTFS_DIR
run_func
fi
}
#----------------------- Run functions ----------------------------#
echo "#---------------------------------------------------------------------------------- "
echo "#--------------------+++ gen-rootfs.sh Start +++--------------------------- "
echo "#---------------------------------------------------------------------------------- "
set -e
gen_install_in-img
echo "#---------------------------------------------------------------------------------- "
echo "#--------------------+++ gen-rootfs.sh End +++--------------------------- "
echo "#---------------------------------------------------------------------------------- "
| true
|
235a53ef29c7d0321beef41925d44edf0bf78091
|
Shell
|
memorius/policy-auction
|
/conf/cassandra-dev-dirs.sh
|
UTF-8
| 1,066
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
# Config for cassandra install / start scripts in parent dir
replace_data_and_log_dir_placeholders () {
local data_dir="$1"
local log_dir="$2"
shift 2
for f ; do
rm -f "$f"
cp "$f.template" "$f"
sed -i -r 's!%%cassandra_log_dir%%!'"${log_dir}!" "$f"
sed -i -r 's!%%cassandra_data_dir%%!'"${data_dir}!" "$f"
done
}
replace_data_dir_placeholders () {
local data_dir="$1"
shift
for f ; do
rm -f "$f"
cp "$f.template" "$f"
sed -i -r 's!%%cassandra_data_dir%%!'"${data_dir}!" "$f"
done
}
platform_readlink () {
local readlink_bin="$(which greadlink || which readlink)"
if [ -z "$readlink_bin" ]; then
exit_with_error "Cannot execute 'readlink' or 'greadlink'"
fi
"$readlink_bin" -mnq "$@"
}
# Relative to repos root dir
cassandra_bin='cassandra/cassandra-bin'
cassandra_log='cassandra/cassandra-log'
cassandra_data='cassandra/cassandra-data'
cassandra_conf='conf/cassandra-dev'
cassandra_unittest_data='target/cassandra-unit-test-data'
| true
|
bdaebe051e51afd1edf98c38182c1e5a9c3cb670
|
Shell
|
yuelicn/docker-kong
|
/run.sh
|
UTF-8
| 922
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/sh
#
# run.sh
# Copyright (C) 2019 yueli <yueli@tusdao.com>
#
# Distributed under terms of the GPL license.
image=yueli/kong:1.0.3
enviroments="
-e KONG_PREFIX=/usr/local/kong
-e ONG_DATABASE=postgres
-e KONG_PG_HOST=192.168.64.58
-e KONG_PG_PORT=5432
-e KONG_PG_USER=kong
-e KONG_PG_PASSWORD=kong-dev
-e KONG_PG_DATABASE=kong
"
# 初始化数据库
function init(){
docker run -it --rm $enviroments \
-e KONG_PROXY_LISTEN='0.0.0.0:8000, 0.0.0.0:8443 ssl' \
-e KONG_ADMIN_LISTEN='0.0.0.0:8001, 0.0.0.0:8444 ssl' \
$image kong migrations bootstrap
}
# 启动kong
function run(){
docker run -it --rm $enviroments \
-e KONG_PROXY_LISTEN='0.0.0.0:8000, 0.0.0.0:8443 ssl' \
-e KONG_ADMIN_LISTEN='0.0.0.0:8001, 0.0.0.0:8444 ssl' \
-p 8000:8000 -p 8443:8443 -p 8001:8001 -p 8444:8444 \
$image kong docker-start
}
case $1 in
"init")
init;;
"run")
run;;
* )
echo "run/init"
esac
| true
|
8fe31c7b859c25b66291827a3a68546212a0732c
|
Shell
|
LoveRfy/kubernetes-vagrant-debian-cluster
|
/prepare.sh
|
UTF-8
| 2,032
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
echo root:vagrant|chpasswd
apt-get install expect -y
expect -c "
set timeout 20
spawn su - root
expect "Password:"
send "vagrant\r"
interact
"
id
cat >> /etc/ssh/sshd_config <<EOF
UseDNS no
PasswordAuthentication yes
PermitRootLogin yes
EOF
ssh-keygen -t rsa -f /root/.ssh/id_rsa -P ""
sshkey=$(cat ~/.ssh/id_rsa.pub)
echo "${sshkey}" > /root/.ssh/authorized_keys
chmod 600 /root/.ssh/authorized_keys
echo 'Welcome to Vagrant-built virtual machine. -.-' > /etc/motd
systemctl restart sshd
apt-get update
apt-get install -y apt-transport-https ca-certificates procps curl net-tools iproute2 htop git zsh
sh -c "$(curl -fsSL https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
who am i
id
pushd /root/.oh-my-zsh/plugins
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git
git clone https://github.com/zsh-users/zsh-autosuggestions
popd
cat > /root/.oh-my-zsh/themes/robbyrussell.zsh-theme <<EOF
nodename=\$(hostname -f)
local ret_status="%(?:%{\$fg_bold[green]%} \${nodename} ➜ :%{\$fg_bold[red]%}➜ )"
PROMPT='\${ret_status} %{\$fg[cyan]%}%c%{\$reset_color%} \$(git_prompt_info)'
ZSH_THEME_GIT_PROMPT_PREFIX="%{\$fg_bold[blue]%}git:(%{\$fg[red]%}"
ZSH_THEME_GIT_PROMPT_SUFFIX="%{\$reset_color%} "
ZSH_THEME_GIT_PROMPT_DIRTY="%{\$fg[blue]%}) %{\$fg[yellow]%}✗"
ZSH_THEME_GIT_PROMPT_CLEAN="%{\$fg[blue]%})"
EOF
mkdir -p /etc/systemd/system/docker.service.d
cat > /etc/systemd/system/docker.service.d/http-proxy.conf <<EOF
[Service]
Environment="HTTP_PROXY=http://192.168.100.1:1087" "HTTPS_PROXY=http://192.168.100.1:1087"
EOF
[ -f "/vagrant/scripts/init.k8s.sh" ] || exit 1
if [ "$1" == 1 ];then
echo "start init node"
[ -f "/vagrant/install.token" ] && rm -rf /vagrant/install.token
bash -x /vagrant/scripts/init.k8s.sh $2
token=$(cat /tmp/join)
cat > /vagrant/install.token <<EOF
${token} --ignore-preflight-errors=Swap
EOF
else
echo "start join node"
bash -x /vagrant/scripts/init.k8s.sh $2 join
bash -x /vagrant/install.token
fi
| true
|
2e9fb85dc3e8fdc5c0afcec1bc93aad45c3ed8f6
|
Shell
|
ilyak93/inheritance_templates_practice
|
/MoreAVLWithPythonAux/ex1/auto-test-generation/run_test
|
UTF-8
| 1,572
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
function run_test {
if (( $# != 2 )); then
echo "Usage: ./run_test <number_of_lines> <test_id>"
exit
fi
num_of_lines=$1
test_name="randtest"$2
out_file=${test_name}.res
if (( num_of_lines <= 0 )); then
echo "<number_of_lines> must be positive"
exit
fi
echo "generating a random test... ${test_name}.in ${test_name}.out"
python2 testcreate.py $num_of_lines $test_name
echo
echo "compiling... g++ -g -DNDEBUG -Wall *.cpp -o ${test_name}.exe"
g++ -g *.cpp -o ${test_name}.exe
echo "running... ./${test_name}.exe < $test_name.in > $out_file"
echo "running valgrind..."
valgrind --log-file=valgrind.out --leak-check=full ./${test_name}.exe < $test_name.in > $out_file
if (( `grep -c "0 bytes in 0 blocks" valgrind.out` == 1 )); then
echo "MEMORY CHECK PASSED"
else
echo "MEMORY CHECK FAILED"
fi
log_name=${test_name}.log
echo "diff result: wanted_output VS my_output" > $log_name
diff ${test_name}.out $out_file >> $log_name
diff_ret=$?
if (( diff_ret == 0 )); then
echo "TEST PASSED"
rm $out_file $log_name
else
echo "TEST FAILED"
python2 testcreate.py $test_name
echo "the differences are stored in ${log_name} "
fi
rm ${test_name}.exe
rm "valgrind.out"
}
let test_num=0
lines_num=(1000 1000 5000 5000 10000 10000 20000 20000 30000 30000 50000 50000 100000)
for num_of_lines in ${lines_num[@]} ; do
run_test "$num_of_lines" "$test_num"
let test_num++
done
| true
|
7b7318ef80d64209e2baecdc0a0c506aa89620c9
|
Shell
|
SergLih/bash_project_guessing
|
/guessinggame.sh
|
UTF-8
| 511
| 4.21875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
function guessinggame {
cur_dir_count_files=$(ls | wc -l)
echo "Hi, how many files are there in the current directory? Enter the number:"
while read ans_usr
do
if [[ $ans_usr -gt $cur_dir_count_files ]]
then
echo "Your guess is high, try to guess again."
continue
elif [[ $ans_usr -lt $cur_dir_count_files ]]
then
echo "Your guess is low, try to guess again."
continue
else
echo "I congratulate you, you guessed right! Bye"
break
fi
done
}
guessinggame
| true
|
d68ac198273f36b8e844845960b6a1bbf66df211
|
Shell
|
hsandt/LD45
|
/build_itest.sh
|
UTF-8
| 1,496
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Build a PICO-8 cartridge for the integration tests.
# This is essentially a proxy script for pico-boots/scripts/build_game.sh with the right parameters.
# Extra options are passed to build_cartridge.sh (with $@).
# This is useful in particular for --symbols.
# Configuration: paths
picoboots_scripts_path="$(dirname "$0")/pico-boots/scripts"
game_src_path="$(dirname "$0")/src"
data_path="$(dirname "$0")/data"
build_dir_path="$(dirname "$0")/build"
# Configuration: cartridge
version=`cat "$data_path/version.txt"`
author="komehara"
title="wit fighter itests (all) v$version"
cartridge_stem="wit_fighter_itest_all"
config='itest'
# for now, we don't set `cheat` symbol to make it lighter, but it's still possible
# to test cheats in headless itests as busted preserves all (non-#pico8) code
symbols='assert,log,itest'
# Define build output folder from config
build_output_path="${build_dir_path}/v${version}_${config}"
# Build from itest main for all itests
"$picoboots_scripts_path/build_cartridge.sh" \
"$game_src_path" itest_main.lua itests \
-d "$data_path/data.p8" -M "$data_path/metadata.p8" \
-a "$author" -t "$title" \
-p "$build_output_path" \
-o "${cartridge_stem}_v${version}" \
-c "$config" \
-s "$symbols" \
--minify-level 3 \
--unify ''
| true
|
f69e229f26f753a136e60d4d76332cddbeac6565
|
Shell
|
bouk/nixpkgs-1
|
/config/bash/bashrc.sh
|
UTF-8
| 3,068
| 3.078125
| 3
|
[] |
no_license
|
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# allow recursive ** globs
shopt -s globstar
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
if [ -a ~/.local_bashrc ]; then
source ~/.local_bashrc
fi
export NIX_PROFILE=$HOME/.nix-profile
# Some aliases
. $NIX_PROFILE/config/bash/aliases.sh
# Set env vars
. $NIX_PROFILE/config/bash/env.sh
prompt() {
# https://www.jefftk.com/p/you-should-be-logging-shell-history
# I'm using a refined version from the one shown in the post.
# It uses logfmt to disambiguate spaces, and also logs the timezone.
# I also hooked ctrl-r up to read from this log.
$NIX_PROFILE/bin/add-hist "$(history 1)"
}
PROMPT_COMMAND=prompt
set -o vi # Use vi-mode editing on the command line.
source $NIX_PROFILE/etc/profile.d/bash_completion.sh
# source bash completions
for completion in $NIX_PROFILE/etc/bash_completion.d/*; do
. $completion
done
# git tab completion with 'g' alias
source $NIX_PROFILE/share/bash-completion/completions/git
__git_complete g __git_main
# Add timestamps to bash history.
export HISTTIMEFORMAT="%F %T "
# Set color-related env variables according to the current color scheme
eval "$(colorscheme restore-colors)"
. $NIX_PROFILE/share/fzf/key-bindings.bash
__fzf_history__() {
# This overrides the __fzf_history__ implementation from key-bindings.bash.
# It reads from ~/.full_history.logfmt rather than ~/.bash_history.
local output
output=$($NIX_PROFILE/bin/fzf-hist |
FZF_DEFAULT_OPTS="--height ${FZF_TMUX_HEIGHT:-40%} $FZF_DEFAULT_OPTS --tac --sync --tiebreak=index --bind=ctrl-r:toggle-sort $FZF_CTRL_R_OPTS +m" $(__fzfcmd)
) || return
READLINE_LINE=${output#*$'\t'}
if [ -z "$READLINE_POINT" ]; then
echo "$READLINE_LINE"
else
READLINE_POINT=0x7fffffff
fi
}
export FZF_DEFAULT_COMMAND='rg --files --no-ignore --hidden --follow -g "!{.git,node_modules}/*" 2> /dev/null'
export FZF_CTRL_T_COMMAND="$FZF_DEFAULT_COMMAND"
bind -x '"\C-p": vim $(fzf);'
# http://owen.cymru/sf-a-quick-way-to-search-for-some-thing-in-bash-and-edit-it-with-vim-2/#ampshare=http://owen.cymru/sf-a-quick-way-to-search-for-some-thing-in-bash-and-edit-it-with-vim-2/
sf() {
if [ "$#" -lt 1 ]; then echo "Supply string to search for!"; return 1; fi
printf -v search "%q" "$*"
include="yml,js,json,php,md,styl,pug,jade,html,config,py,cpp,c,go,hs,rb,conf,fa,lst"
exclude=".config,.git,node_modules,vendor,build,yarn.lock,*.sty,*.bst,*.coffee,dist"
rg_command='rg --column --line-number --no-heading --fixed-strings --ignore-case --no-ignore --hidden --follow --color "always" -g "*.{'$include'}" -g "!{'$exclude'}/*"'
files=`eval $rg_command $search | fzf --ansi --multi --reverse | awk -F ':' '{print $1":"$2":"$3}'`
[[ -n "$files" ]] && ${EDITOR:-vim} $files
}
eval "$(direnv hook bash)"
eval "$(starship init bash)"
| true
|
d1e5e58abf63ddbdc5fc422d37b978c276b99a73
|
Shell
|
yukikokubo/system_software_lecture
|
/kadai3.sh
|
UTF-8
| 171
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/sh
if [ $# -ne 2 ]; then
echo "input 2 argments" 1>&2
exit 1
fi
a=$1
b=$2
while [ $b -ne 0 ]; do
remainder=$(( $a % $b ))
a=$b
b=$remainder
done
echo $a
| true
|
224c632a3078280f8d2e906ebfe5e4302349b228
|
Shell
|
kitimark/bashscript
|
/quiz1/600610773/04.sh
|
UTF-8
| 165
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
for dir in `find $2 -name $1`
do
echo $dir > .dir_file
echo --- `cut -d'/' -f3 .dir_file` ---
cat $dir
echo
shift
done
rm .dir_file
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.