blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
f3036909caa75b82e583a07230be417647cff0c8
|
Shell
|
ecmwf/eccodes
|
/definitions/create_legacy_ecmf_defs.sh
|
UTF-8
| 2,514
| 4.3125
| 4
|
[
"Apache-2.0"
] |
permissive
|
:
set -eu
# Usage:
# $0 $paramId $paramId...
#
# This script will insert the local ECMWF GRIB2 representation
# for each paramId into the files:
# definitions/grib2/localConcepts/ecmf/paramId.legacy.def
# definitions/grib2/localConcepts/ecmf/shortName.legacy.def
# etc
# This is normally run for those GRIB2 parameters which had
# a local ECMWF representation which later acquired a standard
# WMO one. We want to be able to match the old encoding to the
# paramId but when we write out a field, we want to use the new
# standard WMO encoding.
#
# Assumptions:
# The ecCodes tools grib_set and grib_get are available
# The legacy encoding has discipline = 192
# The parameterCategory = $paramId / 1000
# The parameterNumber = $paramId - parameterCategory*1000
#
if [ $# -eq 0 ]; then
echo "Usage: $0 pid1 pid2 ..." 2>&1
exit 1
fi
# Input list of paramId values
pids="$@"
sample2=samples/GRIB2.tmpl
temp=$TMPDIR/temp.create_legacy_ecmf_def.grib
defs=definitions/grib2/localConcepts/ecmf
output_def()
{
_name=$1
_val=$2
_d=$3
_c=$4
_n=$5
if [ $_c -eq 0 ]; then
_c=128
fi
if [ "$_val" != "unknown" ]; then
echo "#$_name"
echo "'$_val' = {"
echo " discipline = $_d ;"
echo " parameterCategory = $_c ;"
echo " parameterNumber = $_n ;"
echo "}"
fi
}
count=0
for pid in $pids; do
echo "Doing $pid..."
set +e
grib_set -s paramId=$pid $sample2 $temp 2>/dev/null
status=$?
set -e
if [ $status -ne 0 ]; then
grib_set -s stepType=accum,paramId=$pid $sample2 $temp
fi
name=$(grib_get -p name $temp)
shortName=$(grib_get -p shortName $temp)
units=$(grib_get -p units $temp)
cfName=$(grib_get -p cfName $temp)
cfVarName=$(grib_get -p cfVarName $temp)
count=$((count+1))
dis=192
cat=$((pid/1000))
num=$((pid - cat*1000))
output_def "$name" "$pid" $dis $cat $num >> $defs/paramId.legacy.def
output_def "$name" "$name" $dis $cat $num >> $defs/name.legacy.def
output_def "$name" "$shortName" $dis $cat $num >> $defs/shortName.legacy.def
output_def "$name" "$units" $dis $cat $num >> $defs/units.legacy.def
output_def "$name" "$cfVarName" $dis $cat $num >> $defs/cfVarName.legacy.def
output_def "$name" "$cfName" $dis $cat $num >> $defs/cfName.legacy.def
done
echo "Number of legacy parameters added: $count"
if [ $count -gt 0 ]; then
echo "Files updated. Check directory $defs"
fi
rm -f $temp
| true
|
0763b053879ceba9da648223b9aef0e739620b65
|
Shell
|
Griffin-Brome/qscu-linux-workshop
|
/scripts/tutorial/9-function.sh
|
UTF-8
| 233
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
# Linux Scripting Tutorial Part 9: function
function helloWorld() {
echo "Hello world!"
}
helloWorld
function greet() {
echo "Hi, my name is $1 and I'm taking $2 in school"
}
greet "Barret" "Comp Sci"
| true
|
82120d0c7d0d517dcba62157a8f3b0070dfa3570
|
Shell
|
duncan-f/dotfiles
|
/.local/bin/mpdstatus
|
UTF-8
| 171
| 2.53125
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
playing=$(mpc current)
status=$(mpc status | sed "/^volume:/d" | tac | tr '\n' ' ' | awk -F' ' '{print $1}')
notify-send "Now Playing 🎵" "$playing"
| true
|
9eb1dd6e8c6e35b5da14d728ecc2be99fa757ade
|
Shell
|
PFigs/wm-utils
|
/modules/whip_ui.sh
|
UTF-8
| 805
| 3.515625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2019 Wirepas Ltd licensed under Apache License, Version 2.0
function ui_debug
{
if [[ ! -z ${WM_UTS_DEBUG} ]]
then
echo $1
fi
}
function ui_errorbox
{
whiptail --title "Error" --msgbox "$1" 8 40
}
function ui_main_menu
{
option=$(whiptail --title "Wirepas Firmware Utilities - $WM_UTS_VERSION" --menu "Choose your option" 15 60 4 \
"1" "Build Applications" \
"2" "RTT Logger" \
"3" "Program Firmware" \
"4" "Erase Firmware" 3>&1 1>&2 2>&3)
echo ${option}
exit $?
}
function ui_log_menu
{
option=$(whiptail --title "Wirepas Firmware Utilities" --menu "Choose your option" 15 60 3 \
"1" "Start Session" \
"2" "Kill Session" \
"3" "List sessions" 3>&1 1>&2 2>&3)
echo ${option}
exit $?
}
| true
|
0b7e3c23930a276d61da5867b04f1753371fe69e
|
Shell
|
mmussett/shoppingCart
|
/awscli/createCluster.sh
|
UTF-8
| 194
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/sh
CLUSTER_ARN=`aws ecs describe-clusters --cluster bwce-cluster | jq '.clusters[0].clusterArn'`
if [ -n "$CLUSTER_ARN" ]
then
aws ecs create-cluster --cluster-name bwce-cluster
fi
| true
|
cbbf4dbb1b0944842e93f68c8d3b3f88c16a4e4f
|
Shell
|
simonnagl/arch-config
|
/update_files.sh
|
UTF-8
| 2,495
| 4.5625
| 5
|
[] |
no_license
|
#!/bin/sh
# Usage: update_files.sh [Option]... check|out|in
#
# Check for changes of gatherd configuration files or
# updates them.
#
# Optionen:
# -c [FILE] FILE with a path list of configuration files
# -f [FILE] Only check or update FILE
# -h Print thus usage
#
# check: report which global files differ their gatherd files
# out: update global configuration files to fit gathered files
# in: update gathered files to fit global configuration files.
# Set default options
file_path_list='file_path_list'; # -c
only_update_file=''; # -f
config_files_folder_rel_to_script='config_files'
decree='';
##### START FUNCTIONS #####
# exit_ifn_cmdarg ARGUMENT
# exit if ARGUMENT is nor a string or starting with '-'
exit_ifn_cmdarg() {
if [ -z "$1" -o "-" = `echo "$1" | cut -b 1` ]
then
echo "Error: A command line option needs an argument"
print_usage
exit 1;
fi
}
# Print usage from the head of the file
print_usage() {
sed -n '/^$/q; /# /s/# //p' "$0"
}
run() {
gatherd_filename=`dirname $0`/$config_files_folder_rel_to_script/`basename $current_filename`
case $decree in
"check")
run_check
;;
"out")
run_out
;;
"in")
run_in
;;
esac
}
run_check() {
if ! diff $current_filename $gatherd_filename > /dev/null
then
#TODO Add a description where the file changed
echo "$current_filename has changed somwhere"
fi
}
run_out() {
echo "update $current_filename"
# TODO: Run with sudo if required
cp $gatherd_filename $current_filename
}
run_in() {
echo "refresh $current_filename"
cp $current_filename $gatherd_filename
}
##### START MAIN SCRIPT #####
# Parse command line arguments
while [ $# -gt 0 ]
do
case "$1" in
-c)
shift;
exit_ifn_cmdarg $1
file_path_list="$1"
;;
-f)
shift;
exit_ifn_cmdarg $1
only_update_file="$1"
;;
'check' | 'out' | 'in')
decree=$1
;;
-h)
print_usage
exit 0
;;
*)
echo "Error while parsing command line"
print_usage
exit 1
;;
esac
shift
done
# Check if decree was set
if [ $decree = '' ]
then
echo "Error while parsing command line. Read usage and try again."
print_usage
exit 1
fi
# Check if only_update_file is set.
if [ -z $only_update_file ]
then
# If not, parse file_path_list and procese decree for every line
while read line
do
if [ ! -z $line ]
then
current_filename=$line
run
fi
done <$file_path_list
else
# If yes, process decree for it.
current_filename=$only_update_file
run
fi
| true
|
fc341bb33f3d596fb49871a7769ff7d93da943c8
|
Shell
|
felipeg48/coreos-osx-gui
|
/src/first-init.command
|
UTF-8
| 7,037
| 3.5625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# first-init.command
# CoreOS GUI for OS X
#
# Created by Rimantas on 01/04/2014.
# Copyright (c) 2014 Rimantas Mocevicius. All rights reserved.
### Enable shared folder
LOOP=1
while [ $LOOP -gt 0 ]
do
VALID_MAIN=0
echo "Do you want to enable NFS shared local folder '~/coreos-osx/share' to CoreOS VM '/home/coreos/share' one? [y/n]"
read RESPONSE
XX=${RESPONSE:=Y}
if [ $RESPONSE = y ]
then
VALID_MAIN=1
# enable shared folder
sed -i "" 's/##$shared_folders/$shared_folders/' ~/coreos-osx/coreos-vagrant/config.rb
# update /etc/sudoers file
echo "(You will be asked for your OS X user password !!!)"
CHECK_SUDOERS=$(sudo -s 'cat /etc/sudoers | grep "# Added by CoreOS GUI App"')
if [ "$CHECK_SUDOERS" = "" ]
then
echo "Updating /etc/sudoers file"
sudo -s 'cat ~/coreos-osx/tmp/sudoers >> /etc/sudoers'
else
echo "You already have in /etc/sudoers '# Added by CoreOS GUI App' lines !!!"
echo "Please double check /etc/sudoers file for it and delete all lines starting # Added by CoreOS GUI App - start' !!!"
echo "inclusevely lines with # Added by CoreOS GUI App too "
echo "and add the entry shown below !!!"
cat ~/coreos-osx/tmp/sudoers
fi
LOOP=0
fi
if [ $RESPONSE = n ]
then
VALID_MAIN=1
LOOP=0
fi
if [ $VALID_MAIN != y ] || [ $VALID_MAIN != n ]
then
continue
fi
done
# remove temporal files
rm -f ~/coreos-osx/tmp/*
### Enable shared folder
### Set release channel
LOOP=1
while [ $LOOP -gt 0 ]
do
VALID_MAIN=0
echo " "
echo " Set CoreOS Release Channel:"
echo " 1) Alpha "
echo " 2) Beta "
echo " 3) Stable "
echo " "
echo "Select an option:"
read RESPONSE
XX=${RESPONSE:=Y}
if [ $RESPONSE = 1 ]
then
VALID_MAIN=1
sed -i "" 's/#$update_channel/$update_channel/' ~/coreos-osx/coreos-vagrant/config.rb
sed -i "" "s/channel='stable'/channel='alpha'/" ~/coreos-osx/coreos-vagrant/config.rb
sed -i "" "s/channel='beta'/channel='alpha'/" ~/coreos-osx/coreos-vagrant/config.rb
LOOP=0
fi
if [ $RESPONSE = 2 ]
then
VALID_MAIN=1
sed -i "" 's/#$update_channel/$update_channel/' ~/coreos-osx/coreos-vagrant/config.rb
sed -i "" "s/channel='alpha'/channel='beta'/" ~/coreos-osx/coreos-vagrant/config.rb
sed -i "" "s/channel='stable'/channel='beta'/" ~/coreos-osx/coreos-vagrant/config.rb
LOOP=0
fi
if [ $RESPONSE = 3 ]
then
VALID_MAIN=1
sed -i "" 's/#$update_channel/$update_channel/' ~/coreos-osx/coreos-vagrant/config.rb
sed -i "" "s/channel='alpha'/channel='stable'/" ~/coreos-osx/coreos-vagrant/config.rb
sed -i "" "s/channel='beta'/channel='stable'/" ~/coreos-osx/coreos-vagrant/config.rb
LOOP=0
fi
if [ $VALID_MAIN != 1 ]
then
continue
fi
done
### Set release channel
function pause(){
read -p "$*"
}
# first up to initialise VM
echo "Setting up Vagrant VM for CoreOS on OS X"
cd ~/coreos-osx/coreos-vagrant
vagrant box update
vagrant up --provider virtualbox
# Add vagrant ssh key to ssh-agent
##vagrant ssh-config core-01 | sed -n "s/IdentityFile//gp" | xargs ssh-add
ssh-add ~/.vagrant.d/insecure_private_key >/dev/null 2>&1
# download etcdctl and fleetctl
#
cd ~/coreos-osx/coreos-vagrant
LATEST_RELEASE=$(vagrant ssh -c "etcdctl --version" | cut -d " " -f 3- | tr -d '\r' )
cd ~/coreos-osx/bin
echo "Downloading etcdctl $LATEST_RELEASE for OS X"
curl -L -o etcd.zip "https://github.com/coreos/etcd/releases/download/v$LATEST_RELEASE/etcd-v$LATEST_RELEASE-darwin-amd64.zip"
unzip -j -o "etcd.zip" "etcd-v$LATEST_RELEASE-darwin-amd64/etcdctl"
rm -f etcd.zip
#
cd ~/coreos-osx/coreos-vagrant
LATEST_RELEASE=$(vagrant ssh -c 'fleetctl version' | cut -d " " -f 3- | tr -d '\r')
cd ~/coreos-osx/bin
echo "Downloading fleetctl v$LATEST_RELEASE for OS X"
curl -L -o fleet.zip "https://github.com/coreos/fleet/releases/download/v$LATEST_RELEASE/fleet-v$LATEST_RELEASE-darwin-amd64.zip"
unzip -j -o "fleet.zip" "fleet-v$LATEST_RELEASE-darwin-amd64/fleetctl"
rm -f fleet.zip
# download docker client
cd ~/coreos-osx/coreos-vagrant
DOCKER_VERSION=$(vagrant ssh -c 'docker version' | grep 'Server version:' | cut -d " " -f 3- | tr -d '\r')
CHECK_DOCKER_RC=$(echo $DOCKER_VERSION | grep rc)
if [ -n "$CHECK_DOCKER_RC" ]
then
# docker RC release
if [ -n "$(curl -s --head https://test.docker.com/builds/Darwin/x86_64/docker-$DOCKER_VERSION | head -n 1 | grep "HTTP/1.[01] [23].." | grep 200)" ]
then
# we check if RC is still available
echo "Downloading docker $DOCKER_VERSION client for OS X"
curl -o ~/coreos-osx/bin/docker https://test.docker.com/builds/Darwin/x86_64/docker-$DOCKER_VERSION
else
# RC is not available anymore, so we download stable release
DOCKER_VERSION_STABLE=$(echo $DOCKER_VERSION | cut -d"-" -f1)
echo "Downloading docker $DOCKER_VERSION_STABLE client for OS X"
curl -o ~/coreos-osx/bin/docker https://get.docker.com/builds/Darwin/x86_64/docker-$DOCKER_VERSION_STABLE
fi
else
# docker stable release
echo "Downloading docker $DOCKER_VERSION client for OS X"
curl -o ~/coreos-osx/bin/docker https://get.docker.com/builds/Darwin/x86_64/docker-$DOCKER_VERSION
fi
# Make it executable
chmod +x ~/coreos-osx/bin/docker
#
## Load docker images if there any
# Set the environment variable for the docker daemon
export DOCKER_HOST=tcp://127.0.0.1:2375
# path to the bin folder where we store our binary files
export PATH=${HOME}/coreos-osx/bin:$PATH
function pause(){
read -p "$*"
}
echo " "
echo "# It can upload your docker images to CoreOS VM # "
echo "If you want copy your docker images in *.tar format to ~/coreos-osx/docker_images folder !!!"
pause 'Press [Enter] key to continue...'
cd ~/coreos-osx/docker_images
if [ "$(ls | grep -o -m 1 tar)" = "tar" ]
then
for file in *.tar
do
echo "Loading docker image: $file"
docker load < $file
done
echo "Done with docker images !!!"
else
echo "Nothing to upload !!!"
fi
echo " "
##
# set fleetctl endpoint and install fleet units
export FLEETCTL_ENDPOINT=http://172.19.8.99:2379
export FLEETCTL_DRIVER=etcd
export FLEETCTL_STRICT_HOST_KEY_CHECKING=false
echo "fleetctl list-machines:"
fleetctl list-machines
echo " "
# install fleet units
echo "Installing fleet units from '~/coreos-osx/fleet' folder"
cd ~/coreos-osx/fleet
~/coreos-osx/bin/fleetctl submit *.service
~/coreos-osx/bin/fleetctl start *.service
echo "Finished installing fleet units:"
fleetctl list-units
echo " "
#
echo "Installation has finished, CoreOS VM is up and running !!!"
echo "Enjoy CoreOS-Vagrant VM on your Mac !!!"
echo ""
echo "Run from menu 'OS Shell' to open a terninal window with docker, fleetctl, etcdctl and rkt pre-set !!!"
echo ""
pause 'Press [Enter] key to continue...'
| true
|
7487caa9f6000b2b73ade0b6edc70402d454e7ad
|
Shell
|
taneeshnetworks/SkiffOS
|
/configs/skiff/core/root_overlay/opt/skiff/scripts/core-container.sh
|
UTF-8
| 3,297
| 3.78125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
exec 5>&1
chown -R root:systemd-journal /var/log/journal/
info1() {
echo " --> $1"
}
info2() {
echo " ==> $1"
}
if [ ! -d "/opt/skiff" ]; then
echo "Non-skiff system detected, bailing out!"
exit 1
fi
PERSIST_MNT=/mnt/persist
TARGET_CORE_MNT=/mnt/core
CORE_PERSIST=$PERSIST_MNT/core
SKIFF_DIR=/opt/skiff
COREENV_DIR=$SKIFF_DIR/coreenv/user
COREENV_OVERRIDE_DIR=$PERSIST_MNT/skiff/coreenv
SKIFF_SCRIPTS_DIR=$SKIFF_DIR/scripts
HOME_DIR=/home/core
if [ -d $COREENV_OVERRIDE_DIR ]; then
echo "Using coreenv files from $COREENV_OVERRIDE_DIR."
rsync -rav $COREENV_OVERRIDE_DIR/ $COREENV_DIR/
fi
rm -rf $PERSIST_MNT/tmp/crewscratch || true
mkdir -p $HOME_DIR
if [ -d $HOME_DIR/.ssh ]; then
rm -rf $HOME_DIR/.ssh
fi
cp -r /root/.ssh $HOME_DIR
chmod 700 $HOME_DIR/.ssh
chmod 600 $HOME_DIR/.ssh/authorized_keys
chown -R core:core $HOME_DIR
passwd -u core || true
info2 "Verifying skiff/core:latest image is built..."
IMAGES=$(docker images | sed 1d | grep "latest" | cut -d" " -f1 | grep "skiff/core") || true
if [ -z "$IMAGES" ]; then
info2 "skiff/core:latest not found, attempting to scratch build it at $COREENV_DIR"
cd $COREENV_DIR
if [ -f /run/skiff_core/Dockerfile.bak ]; then
cp /run/skiff_core/Dockerfile.bak Dockerfile
else
mkdir -p /run/skiff_core/
cp Dockerfile /run/skiff_core/Dockerfile.bak
fi
# Find the FROM definition
if [ ! -f Dockerfile ]; then
info2 "Dockerfile not found!"
exit 1
fi
FROM_IMG=$(cat Dockerfile | grep -m1 '^FROM .*$' | sed "s/FROM //") || true
if [ -z "$FROM_IMG" ]; then
info1 "Could not find FROM declaration in Dockerfile!"
exit 1
fi
if [ "$FROM_IMG" = "scratch" ]; then
info1 "Dockerfile is from scratch, skipping scratchbuild."
else
FROM_IMG_VERSION=$(echo "$FROM_IMG" | cut -d: -f2)
FROM_IMG_NOVER=$(echo "$FROM_IMG" | cut -d: -f1)
if [ -z "$FROM_IMG_VERSION" ]; then
FROM_IMG_VERSION=latest
fi
VER_IMG_VERSION=$(docker images | sed 1d | tr -s ' ' | grep "$FROM_IMG_NOVER" | cut -d" " -f2 | grep -m1 "$FROM_IMG_VERSION") || true
info2 "$FROM_IMG scratch building."
export CREW_IGNORE_EXISTING_SBDIR="true"
chmod +x $SKIFF_SCRIPTS_DIR/scratchbuild.bash
IMAGE=$($SKIFF_SCRIPTS_DIR/scratchbuild.bash build $FROM_IMG | tee >(cat - >&5) | tail -n1)
sed -i -e "s#FROM .*#FROM ${IMAGE}#" Dockerfile
fi
info2 "skiff/core:latest copying files."
cp /usr/bin/dumb-init ./dumb-init
info2 "skiff/core:latest building."
cat ../base/Dockerfile >> Dockerfile
cat ../base/startup.sh > startup.sh
docker build -t "skiff/core:latest" .
fi
chown -R root:systemd-journal /var/log/journal/
if CONTAINER_IS_RUNNING=$(docker inspect -f {{.State.Running}} skiff_core); then
if [ "$CONTAINER_IS_RUNNING" = "true" ]; then
info2 "Container skiff_core already running, tailing it."
docker logs -f skiff_core
else
info2 "Starting existing container skiff_core..."
docker start -a skiff_core
fi
else
info2 "Starting new skiff core attached..."
mkdir -p $CORE_PERSIST
docker run --privileged --cap-add=ALL -v /var/run/docker.sock:/var/run/docker.sock -v /usr/bin/docker:/usr/bin/docker --net=host --volume=/sys:/sys --volume=/dev:/dev --name=skiff_core -v $CORE_PERSIST:$TARGET_CORE_MNT -t skiff/core:latest
fi
| true
|
c8098ee635fe983071d5673586dff4bc87c4b3a3
|
Shell
|
rossbach/ptask
|
/ptaskUnitTest/finalpca.sh
|
UTF-8
| 1,705
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
ITERATIONS=3
SUCCEEDED=0
FAILURES=0
S=preNorm
#for schedmode in 0 1 2 3; do
dataaware=2
priority=1
maxconc=0
echo N,J,stmod,mod,handcode,cublas,ptask,cpu
outpca=outpca
if [ ! -e $outpca ]; then
mkdir $outpca
fi
rm -rf $outpca/*
for I in 10; do
for J in 10 54; do
for K in 3; do
for N in 128 256 512; do
for i in `seq 1 $ITERATIONS`; do
../../bin/x64/Release/PTaskUnitTest.exe -J -R -C 1 -m 3 -G -t cupca -f pca.ptx -s $S -r $N -c $N -n 1 -i $I -j $J -K $K > $outpca/pca-ptask-$N-i$I-k$K-j$J.txt
../../scratch/jcurrey/matrixMul/bin/win64/Release/pca_cuda.exe -M -r $N -c $N -i $I -j $J -s $S -K $K > $outpca/pca-stmod-$N-i$I-k$K-j$J.txt
../../bin/x64/Release/PTaskUnitTest.exe -J -E -R -C 1 -m 3 -L 0 -G -t cupca -f pca.ptx -s $S -r $N -c $N -n 1 -i $I -j $J -K $K > $outpca/pca-mod-$N-i$I-k$K-j$J.txt
../../scratch/jcurrey/matrixMul/bin/win64/Release/pca_cuda.exe -r $N -c $N -i $I -j $J -s $S -K $K > $outpca/pca-handcode-$N-i$I-k$K-j$J.txt
ptasktime=`egrep $Nx$N $outpca/pca-ptask-$N-i$I-k$K-j$J.txt | awk '{ print $2 }'`
stmodtime=`egrep $Nx$N $outpca/pca-stmod-$N-i$I-k$K-j$J.txt | awk '{ print $3 }'`
modtime=`egrep $Nx$N $outpca/pca-mod-$N-i$I-k$K-j$J.txt | awk '{ print $2 }'`
handcodetime=`egrep $Nx$N $outpca/pca-handcode-$N-i$I-k$K-j$J.txt | awk '{ print $3 }'`
cputime=`egrep $Nx$N $outpca/pca-handcode-$N-i$I-k$K-j$J.txt | awk '{ print $4 }'`
cublastime=`egrep $Nx$N $outpca/pca-handcode-$N-i$I-k$K-j$J.txt | awk '{ print $5 }'`
if [ "$i" = "1" ]; then
echo $N,$J,$stmodtime,$modtime,$handcodetime,$cublastime,$ptasktime,$cputime
else
echo " , ,$stmodtime,$modtime,$handcodetime,$cublastime,$ptasktime,$cputime"
fi
done
done
done
done
done
| true
|
b2ddee1534b8ceabbe387d154a37b9707695d9f3
|
Shell
|
linzjs/linz-minitwitter-basic
|
/vagrant/mongodb.sh
|
UTF-8
| 428
| 3.421875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ ! -e /etc/vagrant/mongo ]
then
echo ">>> setting up mongo shell and tools"
# install mongo
apt-get install -y mongodb-org
# setup a place for our data to live (MongoDB)
# groupadd -r mongodb
# useradd -r -g mongodb mongodb
mkdir -p /data/mongodb
chown -R mongodb:mongodb /data/mongodb
# only run once
touch /etc/vagrant/mongo
else
echo ">>> mongo shell and tools is already setup"
fi
| true
|
3919ac8b8b83097eee56fc4fd06cb86d35d2679b
|
Shell
|
dvapelnik/centos66_php53_php-fpm
|
/start.sh
|
UTF-8
| 1,892
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
PHP_FPM_CONFIG_FILE=/etc/php-fpm.d/www.conf
PHP_INI_FILE=/etc/php.ini
# Use UNIX socket or port
if [[ ${PHP_FPM_USE_SOCKET} -eq 1 ]]; then
mkdir -p `echo ${PHP_FPM_SOCKET_PATH} | egrep -o '^\/.+\/'`
sed -i "s/^listen = 127.0.0.1:9000$/listen = ${PHP_FPM_SOCKET_PATH//\//\/}/" ${PHP_FPM_CONFIG_FILE}
else
sed -i "s/^listen = 127.0.0.1:9000$/listen = 127.0.0.1:${PHP_FPM_PORT}/" ${PHP_FPM_CONFIG_FILE}
fi
# Change php-fpm config
sed -i "s/^pm\.max_children = 50$/pm.max_children = ${PHP_FPM_PM_MAX_CHILDREN}/" ${PHP_FPM_CONFIG_FILE}
sed -i "s/^;pm\.max_requests = 500$/pm.max_requests = ${PHP_FPM_PM_MAX_REQESTS}/" ${PHP_FPM_CONFIG_FILE}
sed -i "s/^pm\.start_servers = 5$/pm.start_servers = ${PHP_FPM_PM_START_SERVERS}/" ${PHP_FPM_CONFIG_FILE}
sed -i "s/^pm\.min_spare_servers = 5$/pm.min_spare_servers = ${PHP_FPM_PM_MIN_SPARE_SERVERS}/" ${PHP_FPM_CONFIG_FILE}
sed -i "s/^pm\.max_spare_servers = 35$/pm.max_spare_servers = ${PHP_FPM_PM_MAX_SPARE_SERVERS}/" ${PHP_FPM_CONFIG_FILE}
sed -i "s/^;request_terminate_timeout = 0$/request_terminate_timeout = ${PHP_FPM_REQUEST_TERMINATE_TIMEOUT}/" ${PHP_FPM_CONFIG_FILE}
# Change php.ini
sed -i "s/^;cgi\.fix_pathinfo=1/cgi.fix_pathinfo=0/" ${PHP_INI_FILE}
sed -i "s/register_globals = Off/register_globals = ${PHP_INI_REGISTER_GLOBALS}/" ${PHP_INI_FILE}
sed -i "s/post_max_size = 8M/post_max_size = ${PHP_INI_POST_MAX_SIZE}/" ${PHP_INI_FILE}
sed -i "s/max_input_time = 60/max_input_time = ${PHP_INI_MAX_INPUT_TIME}/" ${PHP_INI_FILE}
sed -i "s/max_execution_time = 30/max_execution_time = ${PHP_INI_MAX_EXECUTION_TIME}/" ${PHP_INI_FILE}
sed -i "s/;date.timezone =/date.timezone = ${PHP_INI_DATE_TIMEZONE//\//\/}/" ${PHP_INI_FILE}
sed -i "s/^upload_max_filesize = 2M$/upload_max_filesize = ${PHP_INI_UPLOAD_MAX_FILESIZE}/" ${PHP_INI_FILE}
# Start php-fpm
/usr/sbin/php-fpm -y ${PHP_FPM_CONFIG_FILE} -c ${PHP_INI_FILE} -F
| true
|
54a76feb6633c9ba33edda66e12c707756afe6dd
|
Shell
|
mmehride/MatRox_RU
|
/scripts/datash
|
UTF-8
| 235
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
declare -a filearray=(
" "
"covtype"
"higgs"
"mnist"
"susy"
"letter"
"pen"
"hepmass"
"gas"
"grid"
"random"
"dino"
"sunflower"
"unit"
)
rm data.csv
for filename in "${filearray[@]}"
do
echo "$filename," >> data.csv
done
| true
|
f8f05d66378e4728b2a1b907e8629218594476e0
|
Shell
|
kazuya-k-ishibashi/ubuntu_kit
|
/newhtml.sh
|
UTF-8
| 1,766
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/sh
#
# newhtml.sh
#
# -----------------------------------------------------------------------------
# Purpose : foobarbaz
# -----------------------------------------------------------------------------
# created : 2017.02.26
# author : kazuya ishibashi
#
# Description :
# 1. hoge
# 2. fuga
# 3. piyo
#
# Usage :
# $ newhtml.sh [filename]
# filename - foo
# Example) $ newhtml.sh sample.html
#
#
# -----------------------------------------------------------------------------
#
## init
this_dir=$(cd $(dirname $0); pwd)
this_file=$(basename $0)
## configs
filename=$1
extension=".html"
if [ "$2" = "-t" ]; then
title=$3
fi
## validate
if [ "${filename}" = "" ]; then
echo "please enter filename."
echo -n "> "
read filename
fi
if [ "${filename}" = "" ]; then
echo "${this_file}: filename is empty."
exit 1
fi
ext_regexp=$(echo ${extension} | tr '.' '\.')$
if ! echo "${filename}" | grep -G "${ext_regexp}" >/dev/null; then
filename=${filename}${extension}
fi
if [ -s ${filename} ]; then
fullpath=$(cd $(dirname ${filename}); pwd)/$(basename ${filename})
echo "${this_file}: file already exists.: [\"${fullpath}\"]"
exit 1
fi
noext_name=$(echo $(basename ${filename}) | sed 's/\.[^\.]*$//' | tr '_' ' ')
if [ "${title}" = "" ]; then
echo -n "title: "
read title
fi
if [ "${title}" = "" ]; then
title=${noext_name}
fi
## exec
cat << TEMPLATE > ${filename}
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>${title}</title>
<script src=""></script>
</head>
<body>
<header>
<h1>${title}</h1>
</header>
<main>
<section id="section_1">
</section>
</main>
<footer>
</footer>
</body>
</html>
TEMPLATE
vim ${filename}
exit
| true
|
d447afc6cf0905ff005b8ec3b565051a9ab7d8ce
|
Shell
|
Grokery/grokerylab
|
/scripts/createInitialAdminUser.sh
|
UTF-8
| 957
| 3.5625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# This script creats an initial admin user in a newly installed api.
# It will fail with a 'Not Authorized' error if there are any users
# already in the system. After initializing, you can log in to the
# UI to create additional users.
echo "Please enter apiUrl (default: http://localhost:8000/api/v0):"
read apiUrl
apiUrl=${apiUrl:-"http://localhost:8000/api/v0"}
echo "Please enter a username (email) for an admin user on the new account:"
read adminUsername
adminUsername=${adminUsername:-""}
echo "Please enter password for new admin user:"
read -s adminPass
adminPass=${adminPass:-""}
echo "Please enter name for new admin user:"
read adminName
adminName=${adminName:-""}
curl -X POST \
"$apiUrl/users" \
-H 'Cache-Control: no-cache' \
-H 'Content-Type: application/json' \
-d '{
"username": "'"$adminUsername"'",
"password": "'"$adminPass"'",
"name": "'"$adminName"'",
"accountRole":"ADMIN"
}'
echo
| true
|
bfdaca4a157c72964df5e1d27697bea67c9b3a95
|
Shell
|
dram/smlnj-mirror
|
/osx-dist/components/scripts/postinstall
|
UTF-8
| 591
| 3.34375
| 3
|
[
"SMLNJ"
] |
permissive
|
#!/bin/sh
#
# usage:
#
# postinstall <pkg-path> <dst-path> <dst-vol> <root-dir>
#
# where
#
# <pkg-path> Full path to the installation package the Installer application
# is processing. For example: /Volumes/Users/jhr/Desktop/smlnj-x86.pkg
#
# <dst-path> Full path to the installation destination. For example: /usr/local/smlnj
#
# <dst-vol> Installation volume (or mount point) to receive the payload.
#
# <root-dir> The root directory for the system
#
# the directory where the scripts live
#
SCRIPTDIR=`pwd`
DST=$2
cd $DST
config/install.sh -nolib
$SCRIPTDIR/copy-doc $DST
exit 0
| true
|
9a11a9b62dd5a2bacb49bdb0bf6b57263f15d196
|
Shell
|
dstroot/.osx-defaults
|
/defaults/dock.sh
|
UTF-8
| 4,479
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/sh
# ------------------------------------------------------------------------------
# Copyright (c) 2014 Dan Stroot
# All rights reserved.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
# NAME: dock.sh
# PURPOSE: Setup dock
# VERSION: 1.0 Initial version
# ------------------------------------------------------------------------------
# echo "Enable highlight hover effect for the grid view of a stack (Dock)"
# defaults write com.apple.dock mouse-over-hilite-stack -bool true
echo "Set the icon size of Dock items to 50 pixels"
defaults write com.apple.dock tilesize -int 50
echo "Change minimize/maximize window effect"
defaults write com.apple.dock mineffect -string "scale"
#
# echo "Minimize windows into their application’s icon"
# defaults write com.apple.dock minimize-to-application -bool true
#
# echo "Enable spring loading for all Dock items"
# defaults write com.apple.dock enable-spring-load-actions-on-all-items -bool true
#
# echo "Show indicator lights for open applications in the Dock"
# defaults write com.apple.dock show-process-indicators -bool true
echo "Don’t animate opening applications from the Dock"
defaults write com.apple.dock launchanim -bool false
# echo "Disable Dashboard"
# defaults write com.apple.dashboard mcx-disabled -bool true
#
# echo "Don’t show Dashboard as a Space"
# defaults write com.apple.dock dashboard-in-overlay -bool true
#
# echo "Don’t automatically rearrange Spaces based on most recent use"
# defaults write com.apple.dock mru-spaces -bool false
echo "Remove the auto-hiding Dock delay"
defaults write com.apple.dock autohide-delay -float 0
echo "Remove the animation when hiding/showing the Dock"
defaults write com.apple.dock autohide-time-modifier -float 0
echo "Automatically hide and show the Dock"
defaults write com.apple.dock autohide -bool true
# echo "Make Dock icons of hidden applications translucent"
# defaults write com.apple.dock showhidden -bool true
#
# echo "Disable the Launchpad gesture (pinch with thumb and three fingers)"
# #defaults write com.apple.dock showLaunchpadGestureEnabled -int 0
#
# echo "Reset Launchpad, but keep the desktop wallpaper intact"
# find "${HOME}/Library/Application Support/Dock" -name "*-*.db" -maxdepth 1 -delete
# echo "Add iOS Simulator to Launchpad"
# sudo ln -sf "/Applications/Xcode.app/Contents/Developer/Applications/iOS Simulator.app" "/Applications/iOS Simulator.app"
# echo "Add a spacer to the left side of the Dock (where the applications are)"
# #defaults write com.apple.dock persistent-apps -array-add '{tile-data={}; tile-type="spacer-tile";}'
#
# echo "Add a spacer to the right side of the Dock (where the Trash is)"
# #defaults write com.apple.dock persistent-others -array-add '{tile-data={}; tile-type="spacer-tile";}'
echo "Set hot corners"
# Possible values:
# 0: no-op
# 2: Mission Control
# 3: Show application windows
# 4: Desktop
# 5: Start screen saver
# 6: Disable screen saver
# 7: Dashboard
# 10: Put display to sleep
# 11: Launchpad
# 12: Notification Center
# Top left screen corner → Nothing
defaults write com.apple.dock wvous-tl-corner -int 0
defaults write com.apple.dock wvous-tl-modifier -int 0
# Top right screen corner → Nothing
defaults write com.apple.dock wvous-tr-corner -int 0
defaults write com.apple.dock wvous-tr-modifier -int 0
# Bottom left screen corner → Start screen saver
defaults write com.apple.dock wvous-bl-corner -int 5
defaults write com.apple.dock wvous-bl-modifier -int 0
# Bottom right screen corner → Mission Control
defaults write com.apple.dock wvous-br-corner -int 2
defaults write com.apple.dock wvous-br-modifier -int 0
| true
|
ae523eba4333568f4b1abf9cd527818c95ff03ec
|
Shell
|
salvolnn/CsInfoPa
|
/Algoritmi e Strutture Dati/Tesina ranking TTC Giugno 2008 by Sonic/src/tools/gaps_generator.sh
|
UTF-8
| 407
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/sh
echo "/* -- block generated by $0 */"
for i in $(grep _CLOSE tokens.txt)
do
echo gap_$(echo $i|sed 's/_CLOSE//'| tr '[:upper:]' '[:lower:]')" : $i { if ( DEBUG_LEVEL != 0 ) printf("'"reduce gaps '$i'\\n"'"); }"
echo " | error { if ( DEBUG_LEVEL != 0 ) printf("'"reduce gaps '$i' ERROR\\n"'"); errors++; yyerrok; }"
echo " ;"
echo
done
echo "/* -- end of block */"
| true
|
2aab130d8cae71bce776ebdeece6c443ca783f8d
|
Shell
|
FadeMind/archpkgbuilds
|
/AUR/dkms/spl-dkms/PKGBUILD
|
UTF-8
| 1,499
| 2.921875
| 3
|
[] |
no_license
|
#
# Maintainer: Iacopo Isimbaldi <isiachi@rhye.it>
#
pkgbase="spl-dkms"
pkgname=("spl-dkms" "spl-utils")
pkgver=0.6.5.6
pkgrel=2
license=('GPL')
makedepends=("git")
arch=("i686" "x86_64")
url="http://zfsonlinux.org/"
source=("git+https://github.com/zfsonlinux/spl.git#tag=spl-${pkgver}"
"spl-utils.hostid")
sha256sums=('SKIP'
'ad95131bc0b799c0b1af477fb14fcf26a6a9f76079e48bf090acb7e8367bfd0e')
build() {
cd "${srcdir}/spl"
./autogen.sh
_at_enable=""
[ "${CARCH}" == "i686" ] && _at_enable="--enable-atomic-spinlocks"
./configure --prefix=/usr \
--libdir=/usr/lib \
--sbindir=/usr/bin \
--with-config=user \
${_at_enable}
make
}
package_spl-dkms() {
pkgdesc="Solaris Porting Layer kernel modules."
depends=("dkms" "spl-utils=${pkgver}-${pkgrel}")
provides=("spl")
conflicts=("spl-git" "spl-lts")
install=spl.install
dkmsdir="${pkgdir}/usr/src/spl-${pkgver}"
install -d "${dkmsdir}"
cp -a ${srcdir}/spl/. ${dkmsdir}
cd "${dkmsdir}"
make clean distclean
find . -name ".git*" -print0 | xargs -0 rm -fr --
scripts/dkms.mkconf -v ${pkgver} -f dkms.conf -n spl
chmod g-w,o-w -R .
}
package_spl-utils() {
pkgdesc="Solaris Porting Layer kernel module support files."
conflicts=("spl-utils-git" "spl-utils-lts")
cd "${srcdir}/spl"
make DESTDIR="${pkgdir}" install
install -D -m644 "${srcdir}"/spl-utils.hostid "${pkgdir}"/etc/hostid
}
| true
|
ec86d80c655e5342de6443772a9e74c32f9ea56a
|
Shell
|
xsc/bashing
|
/src/tasks/uberbash.sh
|
UTF-8
| 1,743
| 3.984375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# <help>create standalone bash script</help>
TARGET_PATH="$PROJECT_ROOT/target"
TARGET_FILE="$TARGET_PATH/$ARTIFACT_ID-$ARTIFACT_VERSION.sh"
TARGET_FILE_COMPRESSED="$TARGET_PATH/$ARTIFACT_ID-$ARTIFACT_VERSION.gz.sh"
TARGET_FILE_DEBUG="$TARGET_PATH/$ARTIFACT_ID-$ARTIFACT_VERSION.debug.sh"
COMPRESS="no"
DEBUGGABLE="no"
if ! mkdir -p "$TARGET_PATH" 2> /dev/null; then
error "Could not create target directory: $TARGET_PATH";
exit 1;
fi
while [ $# -gt 0 ]; do
case "$1" in
"--compress") COMPRESS="yes";;
"--with-debug") DEBUGGABLE="yes";;
*) ;;
esac
shift
done
verbose "Creating $TARGET_FILE ..."
__run "compile" "--compact" -o "$TARGET_FILE"
if [[ "$?" != "0" ]]; then fatal "An error occured while running task 'compile'."; fi
success "Uberbash created successfully."
chmod +x "$TARGET_FILE" >& /dev/null
if [[ "$DEBUGGABLE" == "yes" ]]; then
echo "Creating $TARGET_FILE_DEBUG ..."
cat "$TARGET_FILE" | debugBash > "$TARGET_FILE_DEBUG"
if [[ "$?" != "0" ]]; then fatal "An error occured while running task 'compile'."; fi
success "Uberbash (debuggable) created successfully."
chmod +x "$TARGET_FILE_DEBUG" >& /dev/null
fi
if [[ "$COMPRESS" == "yes" ]]; then
verbose "Compressing into $TARGET_FILE_COMPRESSED ..."
echo "#!/usr/bin/env bash" > "$TARGET_FILE_COMPRESSED"
echo 'tail -n +3 "$0" | gzip -d -n 2> /dev/null | bash -s "$@"; exit $?' >> "$TARGET_FILE_COMPRESSED"
gzip -c -n "$TARGET_FILE" >> "$TARGET_FILE_COMPRESSED";
if [[ "$?" != "0" ]]; then fatal "An error occured while running task 'compile'."; fi
success "Uberbash (compressed) created successfully."
chmod +x "$TARGET_FILE_COMPRESSED" >& /dev/null
fi
exit 0
| true
|
b6c1131aa7379d920d59b2880d46304e3fbbe5d0
|
Shell
|
jeremybusk/supersimplescripts
|
/useraddsudo.sh
|
UTF-8
| 785
| 3.234375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
if [[ -z $1 ]]; then
echo "Usage: $0 <username for nopass sudo>"
echo "ssh <myhost> "bash -s" -- < ./$0"
exit 1
else
username=$1
fi
if [[ -z $2 ]]; then
userpass=$(openssl passwd -6 -salt xyz $username)
else
userpass=$(openssl passwd -6 -salt xyz $2)
fi
userdel -rf $username || echo "skip"
useradd -c "Test User" -p $userpass $username -s /bin/bash -m -d /home/$username
echo "$username ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/$username
sudo -u $username -- sudo whoami
echo "Finish"
# ssh sandbox1 "bash -s" -- < ./uadd
# echo "username:password" | chpasswd
# https://unix.stackexchange.com/questions/81240/manually-generate-password-for-etc-shadow
# echo "username:encryptedPassWd" | chpasswd -e
# mkpasswd --method=SHA-512 --stdin
| true
|
7943cd05d51a01873b66df94859142d6c5b7a33c
|
Shell
|
charlottestoffel1213/deployement
|
/jenkins.sh
|
UTF-8
| 480
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>INSTALLATION JENKINS<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
wget -q -O - https://pkg.jenkins.io/debian-stable/jenkins.io.key | sudo apt-key add -
sudo sh -c 'echo deb https://pkg.jenkins.io/debian-stable binary/ > /etc/apt/sources.list.d/jenkins.list'
sudo apt-get update
sudo apt-get install jenkins default-jre
chemin=`type -p java`
echo "PATH=$chemin:$PATH" > ~/.bashrc
source /etc/bash.bashrc
service jenkins restart
| true
|
ecb0b02fe9b5bceb94d8a1657857d3c83132b704
|
Shell
|
LiberatorUSA/GUCEF
|
/dependencies/agar/agar-au-config
|
UTF-8
| 938
| 3.625
| 4
|
[
"Apache-2.0",
"Bitstream-Vera",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
#!/bin/sh
# Generated for Agar by BSDBuild 3.2.
# <http://bsdbuild.hypertriton.com>
prefix="/usr/local"
exec_prefix="/usr/local"
exec_prefix_set="no"
libdir="/usr/local/lib"
usage="Usage: agar-au-config [--prefix[=DIR]] [--exec-prefix[=DIR]] [--version] [--cflags] [--libs]"
if test $# -eq 0; then
echo "${usage}" 1>&2
exit 1
fi
while test $# -gt 0; do
case "$1" in
-*=*)
optarg=`echo "$1" | LC_ALL="C" sed 's/[-_a-zA-Z0-9]*=//'`
;;
*)
optarg=
;;
esac
case $1 in
--prefix=*)
prefix=$optarg
if test $exec_prefix_set = no ; then
exec_prefix=$optarg
fi
;;
--prefix)
echo "$prefix"
;;
--exec-prefix=*)
exec_prefix=$optarg
exec_prefix_set=yes
;;
--exec-prefix)
echo "$exec_prefix"
;;
--version)
echo "1.5.0"
;;
--cflags)
echo "-I/usr/local/include/agar "
;;
--libs | --static-libs)
echo "-L/usr/local/lib -lag_au "
;;
*)
echo "${usage}" 1>&2
exit 1
;;
esac
shift
done
| true
|
39c9ea953c3b772f389fdf4242d9bdad3180da09
|
Shell
|
kui/ansipixels-server
|
/scripts/gen-init.sh
|
UTF-8
| 1,011
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/sh
set -eu
cd "$(dirname "$0")/.."
usage() {
echo "Usage: $0 USERNAME"
}
if [ $# != 1 ]; then
usage
exit
fi
project_dir="$(pwd)"
user="$1"
cat <<EOF
#!/bin/sh
### BEGIN INIT INFO
# Provides: ansipixel
# Required-Start: \$network \$local_fs \$remote_fs
# Required-Stop: \$network \$local_fs \$remote_fs
# Should-Start: \$network
# Should-Stop: \$network
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: start ansipixel server
### END INIT INFO
dir=$project_dir
usage() {
echo "Usage: \$0 {start|stop|restart|status|update}"
}
set -eu
if [ \$# -eq 0 ]; then
usage
exit
fi
case "\$1" in
start|stop|status)
sudo -u$user "\${dir}/scripts/\$1"
;;
restart)
sudo -u$user "\${dir}/scripts/stop"
sudo -u$user "\${dir}/scripts/start"
;;
update)
cd "\${dir}"
sudo -u$user git pull
sudo -u$user ./mvnw compile
;;
*)
usage;;
esac
EOF
| true
|
549f577a0c5d755c78e7c936fb4f5e0c436233a3
|
Shell
|
Sharon-Jin/Show-and-Tell
|
/im2txt/evaluate.sh
|
UTF-8
| 609
| 2.578125
| 3
|
[
"Apache-2.0"
] |
permissive
|
DIR="/home/ubuntu/new_show_and_tell.tensorflow_1.0/models/im2txt/"
MSCOCO_DIR="${DIR}/im2txt/data/mscoco"
MODEL_DIR="${DIR}/im2txt/model"
# Ignore GPU devices (only necessary if your GPU is currently memory
# constrained, for example, by running the training script).
export CUDA_VISIBLE_DEVICES=""
# Run the evaluation script. This will run in a loop, periodically loading the
# latest model checkpoint file and computing evaluation metrics.
bazel-bin/im2txt/evaluate \
--input_file_pattern="${MSCOCO_DIR}/val-?????-of-00004" \
--checkpoint_dir="${MODEL_DIR}/train" \
--eval_dir="${MODEL_DIR}/eval"
| true
|
0c8baa2829f66ffc32a9a11fc746c1f55b9b8d72
|
Shell
|
pivotal-mea-pa/pcf-environments
|
/.caps-env_pcf-demo-2
|
UTF-8
| 4,815
| 2.515625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Environment variables required by Terraform
# to bootstrap and install the PCF environment
#
#
# Automation bootstrap
#
#export TF_VAR_trace=true
#export TF_VAR_unpause_deployment_pipeline=true
#export TF_VAR_set_start_stop_schedule=false
#
# GCP credentials
#
export TF_VAR_gcp_credentials=$GOOGLE_CREDENTIALS
export TF_VAR_gcp_region=$GOOGLE_REGION
export TF_VAR_gcp_storage_access_key=$GCS_STORAGE_ACCESS_KEY
export TF_VAR_gcp_storage_secret_key=$GCS_STORAGE_SECRET_KEY
#
# Terraform state attributes
#
export TF_VAR_terraform_state_bucket=tfstate-${GOOGLE_REGION}
#
# Certificate Subject data for certificate creation
#
#export TF_VAR_company_name=Pivotal Services
#export TF_VAR_organization_name=PSO EMEA
#export TF_VAR_locality=Dubai
#export TF_VAR_province=Dubayy
#export TF_VAR_country=AE
#
# VPC configuration
#
# This value should match the name of your caps init environment.
# It is set when you run "caps-init first time and should not be
# changed.
export TF_VAR_vpc_name=pcf-demo-2
#export TF_VAR_max_azs=1
export TF_VAR_vpc_dns_zone=demo2.pocs.pcfs.io
export TF_VAR_vpc_parent_dns_zone_name=pocs-pcfs-io
#
# Bastion inception instance variables
#
#export TF_VAR_bastion_instance_type=n1-standard-2
#export TF_VAR_bastion_data_disk_size=250
#
# Bastion access configuration
#
#export TF_VAR_bastion_host_name=bastion
#export TF_VAR_bastion_admin_ssh_port=22
# This needs to be a name other than 'root' or 'admin' otherwise
# the user setup on the bastion will fail and you will be unable
# to login to the instance.
export TF_VAR_bastion_admin_user=bastion-admin
export TF_VAR_bastion_setup_vpn=false
#export TF_VAR_bastion_vpn_port=2295
#export TF_VAR_bastion_vpn_protocol=udp
#export TF_VAR_bastion_vpn_network=192.168.111.0/24
export TF_VAR_bastion_allow_public_ssh=true
#
# VPC SMTP Server Relay
#
export TF_VAR_smtp_relay_host=smtp.sendgrid.net
export TF_VAR_smtp_relay_port=2525
export TF_VAR_smtp_relay_api_key=$SENDGRID_API_KEY
# Email to send notifications to
export TF_VAR_notification_email=msamaratunga@pivotal.io
#
# Local file path to write SSH private key for bastion instance
#
#export TF_VAR_ssh_key_file_path=/Users/msamaratunga/Work/pivotal/pcf-environments/.workspace/pcf-demo-2
#
# Jumpbox
#
#export TF_VAR_deploy_jumpbox=false
#export TF_VAR_jumpbox_data_disk_size=160
#
# Concourse Automation common attributes
#
#
# Cloud Automation Pipelines (CAPs) repository
#
#export TF_VAR_automation_pipelines_repo=https://github.com/mevansam/caps.git
export TF_VAR_automation_pipelines_branch=dev
#
# Environment configuration repository
#
export TF_VAR_env_config_repo=https://github.com/mevansam/pcf-environments.git
export TF_VAR_env_config_repo_branch=master
export TF_VAR_env_config_path=pcf-demo-2/config
#
# Automation extensions git repository
#
export TF_VAR_automation_extensions_repo=https://github.com/mevansam/pcf-environments.git
export TF_VAR_automation_extensions_repo_branch=master
# Path to terraform template overrides in the 'automation extensions'
# repository for creating PCF PAS infrastructure.
export TF_VAR_pcf_terraform_templates_path=pcf-demo-2/templates/terraform
# Path to json template overrides in the 'automation extensions'
# repository for configuring PCF tiles. This folder should have
# folders named by the tile name as provided in the 'products'
# variable below.
export TF_VAR_pcf_tile_templates_path=pcf-demo-2/templates/tile
# The list of PCF environments to deploy.
#export TF_VAR_pcf_environments='["sandbox"]'
# The PCF Networks to create. The order in which subnets should
# be configured are provided via the 'subnet_config_order' key.
# If you need to add subnets always add to the end of this list.
# Otherwise any reordering will result in networks being recreated
# and may have undesired outcomes.
#export TF_VAR_pcf_networks='{
# sandbox = {
# service_networks = "services,dynamic-services"
# subnet_config_order = "infrastructure,pas-1,services-1,dynamic-services-1,monitoring"
# }
#}'
# The CIDRs of the PCF Networks subnets. The range 192.168.0.0/22
# is reserved for bootstrap services and should not be used for PCF
# environments. Multiple subnets must post-fix the network name
# with '-#' for each subnet. Subnets are additive once they have
# been created.
#export TF_VAR_pcf_network_subnets='{
# sandbox = {
# infrastructure = "192.168.101.0/26"
# pas-1 = "192.168.4.0/22"
# services-1 = "192.168.8.0/22"
# dynamic-services-1 = "192.168.12.0/22"
# monitoring = "192.168.101.64/26"
# }
#}
# Comma separated list of additional DNS hosts to use
# for instances deployed to the pcf networks.
#export TF_VAR_pcf_network_dns=169.254.169.254
#
# PCF Install params
#
export TF_VAR_pivnet_token=$PIVNET_TOKEN
| true
|
dac9c01addaabbc182eb7a52207a19f44cd36140
|
Shell
|
JEstabrook/bergamot
|
/HetMan/experiments/gene_variants/run_test.sh
|
UTF-8
| 1,528
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --job-name=gn-vars
#SBATCH --partition=exacloud
#SBATCH --mem=24000
#SBATCH --time=300
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=8
#SBATCH --output=/home/exacloud/lustre1/CompBio/mgrzad/tmp/gene-vars_%j.out
#SBATCH --error=/home/exacloud/lustre1/CompBio/mgrzad/tmp/gene-vars_%j.err
#SBATCH --verbose
cd ~/compbio/bergamot
source activate HetMan
# finds the name of the TCGA cohort to use
if [ -z ${cohort+x} ]
then
echo "no cohort defined, defaulting to BRCA"
export cohort="BRCA"
fi
# finds the name of the gene whose sub-variants are to be considered
if [ -z ${gene+x} ]
then
echo "no gene defined, defaulting to TP53"
export gene="TP53"
fi
# gets the output directory where results will be saved,
# removing it if it already exists
TEMPDIR=HetMan/experiments/gene_variants/output/$cohort/$gene
echo $TEMPDIR
rm -rf $TEMPDIR
# creates the output directory and sub-directories
mkdir -p $TEMPDIR/slurm
mkdir -p $TEMPDIR/tmp
mkdir -p $TEMPDIR/results
# submits the script that enumerates the gene sub-variants to be considered
srun -p=exacloud --ntasks=1 --cpus-per-task=1 \
--output=$TEMPDIR/slurm/setup.txt --error=$TEMPDIR/slurm/setup.err \
python HetMan/experiments/gene_variants/setup.py $cohort $gene
# submits the array script that finds these sub-variants' expression effects
sbatch HetMan/experiments/gene_variants/fit.sh
srun -p=exacloud \
--output=$TEMPDIR/slurm/fit_cna.txt --error=$TEMPDIR/slurm/fit_cna.err \
python HetMan/experiments/gene_variants/fit_cna.py $cohort $gene
| true
|
8866626ae594ec31520671c4f68ffe1934e0fa78
|
Shell
|
ThorMortensen/scripts
|
/script.conf
|
UTF-8
| 827
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
################################################
# Git Stuff
################################################
LAZY_CONF_MSG="this is not a message (lazy commit)"
# First must be scripts
AUTO_COMMIT_PATHS=~/scripts:~/afgangsprojekt_DTU_2017:~/.config/sublime-text-3/Packages/User
################################################
# Rovsing stuff
################################################
ROV_IP_LABTOP=10.0.100.164
ROV_IP_LAP_DESK=10.0.100.104
ROV_IP_DESKTOP=10.0.100.145
ROV_HOME=~/rovsing
ROV_LABTOP=~/thm_labtop
ROV_DESKTOP=~/thm_desktop
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Helper functions
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
splitIp(){
IP_SPLIT_UPPER=`echo $1 | cut -d . -f1,2,3`
IP_SPLIT_LOWER=`echo $1 | cut -d . -f4`
}
| true
|
3cb834e7cfdc3a82df731c1a8e1abf1665d7c95c
|
Shell
|
kubestyle/knative-tutorial
|
/setup/install-pubsub-serviceaccount
|
UTF-8
| 1,175
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
DIR="$(dirname "$0")"
. "${DIR}/config"
set -v
# Reference:
# https://github.com/google/knative-gcp/blob/master/docs/install/pubsub-service-account.md
# Enable the Cloud Pub/Sub API on your project:
gcloud services enable pubsub.googleapis.com
# Create a new Service Account named cre-pubsub
gcloud iam service-accounts create cre-pubsub
# Give that Service Account the necessary permissions
# Note: You probably need something more fine-grained than roles/pubsub.editor
gcloud projects add-iam-policy-binding ${PROJECT_ID} \
--member=serviceAccount:cre-pubsub@${PROJECT_ID}.iam.gserviceaccount.com \
--role roles/pubsub.editor
if [ "$1" == "workload" ]; then
echo "Workload identity, nothing to do"
else
# Download a new JSON private key for that Service Account
# Be sure not to check this key into source control!
gcloud iam service-accounts keys create cre-pubsub.json \
--iam-account=cre-pubsub@${PROJECT_ID}.iam.gserviceaccount.com
# Create a secret on the Kubernetes cluster with the downloaded key
# in the namespace the resources will reside (default in this case).
kubectl --namespace default create secret generic google-cloud-key --from-file=key.json=cre-pubsub.json
fi
| true
|
baf8f86c25ae2ac65e568a40de85f7a595901ec7
|
Shell
|
Miracle217/React-Redux-Sagas
|
/build.sh
|
UTF-8
| 1,746
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
env='local'
key="$1"
GULP_PATH=node_modules/.bin/gulp
BUILD_ENV=${NODE_ENV:-$env}
dev () {
echo "Executing: NODE_ENV=$BUILD_ENV $GULP_PATH"
NODE_ENV=$BUILD_ENV $GULP_PATH
}
test () {
echo "Executing: $GULP_PATH test"
$GULP_PATH test
}
config () {
echo "Executing: NODE_ENV=$BUILD_ENV $GULP_PATH copy-config"
NODE_ENV=$BUILD_ENV $GULP_PATH copy-config
}
build () {
echo "Executing: NODE_ENV=$BUILD_ENV $GULP_PATH compile"
NODE_ENV=$BUILD_ENV $GULP_PATH compile
}
deploy () {
echo "Executing: NODE_ENV=$BUILD_ENV $GULP_PATH deploy"
NODE_ENV=$BUILD_ENV $GULP_PATH deploy
}
clean () {
echo "Executing: $GULP_PATH clean"
$GULP_PATH clean
}
help () {
echo "
Simple build and deploy script
./build.sh {dev|test|build|config|deploy|docker|shipit|help}
Options:
dev:
Builds and launches a local dev server
test:
Runs unit tests
config:
Copy Environment based config to app/config.js
clean:
Removes previously built distrobution
build:
Builds the dist/ directory data for deploy
deploy:
Deploys the dist/ data to S3
docker:
Run npm start
shipit:
Performs a 'mark', 'build' & 'deploy' in one fell swoop
help:
Displays this help message.
"
}
case $key in
dev)
config
dev
;;
test)
config
test
;;
clean)
clean
;;
config)
config
;;
build)
config
build
;;
deploy)
deploy
;;
shipit)
clean
config
build
deploy
;;
docker)
npm start
;;
help)
help
;;
*)
echo "UNKNOWN ACTION: $key"
echo
help
;;
esac
| true
|
0fffe2859a7ffce27d2d35b311913ba5f5763eb2
|
Shell
|
laputa-er/mqs_shell_scripts
|
/tests/mqs_test.sh
|
UTF-8
| 666
| 3.125
| 3
|
[] |
no_license
|
#! /bin/zsh -y
# Author: eli01linux@gmail.com(MengQingshen)
# History:
# 2017-3-22 MengQingshen First release
function test_get_absolute_path_by_relavent_path () {
}
#-----------------------------------------------------------------------------
# shunit2 functions
#
function oneTimeSetUp () {
excuteImport ${CURSHELL_DIR}
}
# @param {string} $1 The path of the shell script it self.
function excuteImport () {
. $1/../sources/common/mqs_functions.sh
. $1/../config/rename.sh
. $1/../sources/rename/functions.sh
}
CURSHELL_DIR=$(dirname $0)
SHUNIT_PARENT=$0
[ -n "${ZSH_VERSION:-}" ] && setopt shwordsplit
. ${CURSHELL_DIR}/../shunit2-2.1.6/src/shunit2
| true
|
16fed0d59b30c38f93dd861c2d98fa48780dad9f
|
Shell
|
pomwth/dotfiles
|
/configs/zsh/.zshrc
|
UTF-8
| 591
| 2.65625
| 3
|
[] |
no_license
|
export TERM="xterm-256color"
# Path to your oh-my-zsh installation.
export ZSH=$HOME/.oh-my-zsh
# See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes
#ZSH_THEME="robbyrussell"
ZSH_THEME="powerlevel9k/powerlevel9k"
POWERLEVEL9K_DISABLE_RPROMPT=true
POWERLEVEL9K_PROMPT_ON_NEWLINE=true
POWERLEVEL9K_MULTILINE_LAST_PROMPT_PREFIX="$ "
POWERLEVEL9K_MULTILINE_FIRST_PROMPT_PREFIX=""
plugins=(git)
source $ZSH/oh-my-zsh.sh
# If not running interactively, do not do anything
[[ $- != *i* ]] && return
# Otherwise start tmux
[[ -z "$TMUX" ]] && exec tmux
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
| true
|
b67c7e504054a2ce418eec0c70431053cb226fb3
|
Shell
|
lkm1321/ICML-2017-Papers
|
/organize.bash
|
UTF-8
| 331
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
ls pdf/ | while read CURRENT_FILENAME; do
NEW_FOLDERNAME=$(echo $CURRENT_FILENAME | awk -F "--" '{print $1}' | tr -d 0-9)
NEW_FILENAME=$(echo $CURRENT_FILENAME | awk -F "--" '{print $2}')
echo "$NEW_FOLDERNAME/$NEW_FILENAME"
mkdir -p "$NEW_FOLDERNAME"
cp "pdf/$CURRENT_FILENAME" "$NEW_FOLDERNAME/$NEW_FILENAME"
done
| true
|
b000cf7dc2c307919b0c49bebd869946ac12cf66
|
Shell
|
willcassella/config
|
/linux/bin/xpaste
|
UTF-8
| 124
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
set -eou pipefail
if [ -z "${DISPLAY:-}" ]; then
export DISPLAY=:0
fi
exec xclip -out -selection clipboard
| true
|
324d9a9d9a5ff077af1037e899e1c80eb6338464
|
Shell
|
sjmudd/maxscale_sandbox
|
/my_ipaddress
|
UTF-8
| 144
| 2.625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
#
# get my ip address
# - linux only and assumes the device is eth0
ifconfig eth0 | grep -w inet | awk '{ print $2 }' | cut -d: -f2
| true
|
8bd5dd70d7c8c6d714b9ced5e2a7cb3d1ce3086f
|
Shell
|
wmarquardt/aerospike-munin-plugins
|
/aero_client_write_
|
UTF-8
| 667
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/sh
namespace=${0##*aero_client_write_}
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Aerospike client write per second on $namespace namespace"
echo "aerowritesuccess.label Write"
echo "aerowritesuccess.draw AREA"
echo 'graph_category Aerospike'
exit 0
fi
TOTALBEFORE=$(aql -c "stat namespace ${namespace}" | grep client_write_success | tr '"' ' ' | head -n 1 | awk '{print$4}')
sleep 1s
TOTALAFTER=$(aql -c "stat namespace ${namespace}" | grep client_write_success | tr '"' ' ' | head -n 1 | awk '{print$4}')
PERSEC=$((TOTALAFTER - TOTALBEFORE))
echo -n "aerowritesuccess.value $PERSEC"
| true
|
840d9150ab1da051ef53054bf8a71e7aa2874b07
|
Shell
|
davidjenni/provision-corers-cluster
|
/create_do_droplet.sh
|
UTF-8
| 1,851
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/sh
DO_REGION=${DO_REGION:="sfo1"}
DO_SIZE=${DO_SIZE:="512mb"}
DO_DROPLET_BASENAME=${DO_DROPLET_BASENAME:="coreos"}
# number of droplets to create:
for i in {1..3}
do
if [ $i -eq 1 ]; then
_DROPLETS=\"${DO_DROPLET_BASENAME}1\"
else
_DROPLETS=${_DROPLETS},\"${DO_DROPLET_BASENAME}$i\"
fi
_NUM_DROPLETS=$i
done
_CLOUD_CONFIG=${CLOUD_CONFIG:="cloud-config.yaml"}
_CLOUD_CONFIG_GEN=$(basename -s .yaml ${_CLOUD_CONFIG}).gen.yaml
if [ -z ${DO_TOKEN+x} ];
then echo "Must define env var DO_TOKEN with DO app key, see https://cloud.digitalocean.com/settings/api/tokens" && exit 1;
fi
if [ -z ${DO_SSH_PUB_KEY_FILE+x} ];
then echo "Must define env var DO_SSH_PUB_KEY_FILE with SSH public key file" && exit 1;
fi
if [ ! -r ${DO_SSH_PUB_KEY_FILE} ];
then echo "Cannot find SSH public key file ${DO_SSH_PUB_KEY_FILE}" && exit 1;
fi
_DO_SSH_KEY_ID=$(ssh-keygen -l -E md5 -f ${DO_SSH_PUB_KEY_FILE} | cut -d ' ' -f 2 | cut -d : -f 2-17)
_DO_SSH_PUB_KEY_CONTENT=$(cat ${DO_SSH_PUB_KEY_FILE})
echo Getting etcd discovery token...
_ETCD_DISCOVERY=$(curl -w "\n" "https://discovery.etcd.io/new?size=${_NUM_DROPLETS}")
echo got: ${_ETCD_DISCOVERY}
sed \
-e "s|{{DO_SSH_PUB_KEY}}|${_DO_SSH_PUB_KEY_CONTENT}|g" \
-e "s|{{ETCD_DISCOVERY_URL}}|${_ETCD_DISCOVERY}|g" \
${_CLOUD_CONFIG} \
> ${_CLOUD_CONFIG_GEN}
curl -X POST "https://api.digitalocean.com/v2/droplets" \
--header "Content-Type: application/json" \
--header "Authorization: Bearer ${DO_TOKEN}" \
--data '{"region":"'"${DO_REGION}"'",
"names":[ '"${_DROPLETS}"' ],
"image":"coreos-stable",
"size":"'"${DO_SIZE}"'",
"ssh_keys":["'"${_DO_SSH_KEY_ID}"'"],
"ipv6": true,
"private_networking": true,
"user_data": "'"$(cat ${_CLOUD_CONFIG_GEN} | sed 's/\"/\\\"/g')"'" }'
| true
|
9b4a13642267966b1d5342094b11af823edcf448
|
Shell
|
xx94xuan/mysite
|
/build/scripts/build_base.sh
|
UTF-8
| 272
| 3.03125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
BASE_IMAGE=${BASE_IMAGE}
DOCKERFILE=${DOCKERFILE}
FORCE_BUILD=${FORCE_BUILD}
if $FORCE_BUILD == "true"
then
echo "force build..."
docker build --no-cache -f $DOCKERFILE -t $BASE_IMAGE .
else
docker build -f $DOCKERFILE -t $BASE_IMAGE .
fi
| true
|
63e81ff6ee4ffb2a626d3d5094ff08b43aaa967b
|
Shell
|
rheehot/cloud-barista
|
/cb-tumblebug/test/official/5.spec/unregister-spec.sh
|
UTF-8
| 921
| 3.078125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
source ../conf.env
echo "####################################################################"
echo "## 5. spec: Unregister"
echo "####################################################################"
CSP=${1}
REGION=${2:-1}
POSTFIX=${3:-developer}
if [ "${CSP}" == "aws" ]; then
echo "[Test for AWS]"
INDEX=1
elif [ "${CSP}" == "azure" ]; then
echo "[Test for Azure]"
INDEX=2
elif [ "${CSP}" == "gcp" ]; then
echo "[Test for GCP]"
INDEX=3
elif [ "${CSP}" == "alibaba" ]; then
echo "[Test for Alibaba]"
INDEX=4
else
echo "[No acceptable argument was provided (aws, azure, gcp, alibaba, ...). Default: Test for AWS]"
CSP="aws"
INDEX=1
fi
curl -sX DELETE http://$TumblebugServer/tumblebug/ns/$NS_ID/resources/spec/${CONN_CONFIG[$INDEX,$REGION]}-${POSTFIX} -H 'Content-Type: application/json' -d \
'{
"ConnectionName": "'${CONN_CONFIG[$INDEX,$REGION]}'"
}' | json_pp #|| return 1
| true
|
13c4e2f7701a13cf3edc31cd286f8943dbe363d7
|
Shell
|
mirandaconrado/task-distribution
|
/test.sh
|
UTF-8
| 756
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ ! -d build_normal ];
then
mkdir build_normal;
cd build_normal;
cmake .. -DCMAKE_BUILD_TYPE=Debug
else
cd build_normal;
fi;
make
if [ $? != 0 ]
then
exit
fi
rm -f example.archive
./example/example.bin check
./example/example.bin run
./example/example.bin invalidate -i 'fibonacci'
./example/example.bin clean
./example/example.bin run
cd ..
if [ ! -d build_mpi ];
then
mkdir build_mpi;
cd build_mpi;
cmake .. -DCMAKE_BUILD_TYPE=Debug -DENABLE_MPI=True
else
cd build_mpi;
fi;
make
if [ $? != 0 ]
then
exit
fi
rm -f example.archive
./example/example.bin check
mpirun -np 2 ./example/example.bin run
./example/example.bin invalidate -i 'fibonacci'
./example/example.bin clean
mpirun -np 2 ./example/example.bin run
cd ..
| true
|
ec28979dcfdbec25e095b0f14dee4de3051c01f1
|
Shell
|
Ckrielle/dotfiles
|
/compiler
|
UTF-8
| 1,764
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/sh
# This script is meant to instantly compile the below document types from vim:
#
# tex files: Compiles to pdf, including bibliography if necessary
# md files: Compiles to pdf via pandoc
# rmd files: Compiles via R Markdown
# c files: Compiles via gcc
# py files: Runs via python command
# go files: Compiles & runs with go run
# config.h files: Recompiles & installs scripts
file=$(readlink -f "$1")
dir=${file%/*}
base="${file%.*}"
ext="${file##*.}"
cd "$dir" || exit
textype() { \
command="pdflatex"
( sed 5q "$file" | grep -i -q 'xelatex' ) && command="xelatex"
$command --output-directory="$dir" "$base" &&
grep -i addbibresource "$file" >/dev/null &&
biber --input-directory "$dir" "$base" &&
$command --output-directory="$dir" "$base" &&
$command --output-directory="$dir" "$base"
}
case "$ext" in
ms) preconv "$file" | refer -PS -e | groff -me -ms -kept -T pdf > "$base".pdf ;;
mom) preconv "$file" | refer -PS -e | groff -mom -kept -T pdf > "$base".pdf ;;
[0-9]) preconv "$file" | refer -PS -e | groff -mandoc -T pdf > "$base".pdf ;;
[rR]md) Rscript -e "rmarkdown::render('$file', quiet=TRUE)" ;;
tex) textype "$file" ;;
md)if command -v lowdown >/dev/null; then
lowdown -d nointem -e super "$file" -Tms | groff -mpdfmark -ms -kept > "$base".pdf
elif command -v groffdown >/dev/null; then
groffdown -i "$file" | groff > "$base.pdf"
else
pandoc "$file" --pdf-engine=xelatex -o "$base".pdf
fi ; ;;
h) sudo make install ;;
c) cc "$file" -o "$base" && "$base" ;;
py) python "$file" ;;
m) octave "$file" ;;
scad) openscad -o "$base".stl "$file" ;;
go) go run "$file" ;;
sent) setsid -f sent "$file" 2>/dev/null ;;
*) sed 1q "$file" | grep "^#!/" | sed "s/^#!//" | xargs -r -I % "$file" ;;
esac
| true
|
78c9407bb7be510ad567e21f74dbd90cc8323a57
|
Shell
|
alexbarcelo/docker-hass-init
|
/config_sample.sh
|
UTF-8
| 676
| 2.703125
| 3
|
[] |
no_license
|
# The user should provide a similar file with configuration variables.
# This file should be put into /config in the container.
GIT_USE_SSH=true
GIT_SERVER=github.com
GIT_USER=git
GIT_REPO=demo/example
# This will only be used if GIT_USE_SSH is false
# useful for public repositories or if you provide the password directly
GIT_URL=
# This is the only variable that can be provided directly to the container
# (because of the password)
MC_HOST_origin=https://Q3AM3UQ867SPQQA43P2F:zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG@play.min.io
# if you do, don't put the variable in config at all
# Path (in server) and destination (relative to git repository)
MC_PATH=/backup/hass_storage.zip
MC_LOCAL_DESTINATION=./
| true
|
545cb43caa271612920d060faca46e94ac8c7589
|
Shell
|
usp-engineers-community/Open-usp-Tukubai
|
/TEST/sm5.test
|
UTF-8
| 15,324
| 3.859375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#!/usr/local/bin/bash -xv # コマンド処理系の変更例
#
# test script of sm5
#
# usage: [<test-path>/]sm5.test [<command-path> [<python-version>]]
#
# <test-path>は
# 「現ディレクトリーからみた」本スクリプトの相対パス
# または本スクリプトの完全パス
# 省略時は現ディレクトリーを仮定する
# <command-path>は
# 「本スクリプトのディレクトリーからみた」test対象コマンドの相対パス
# またはtest対象コマンドの完全パス
# 省略時は本スクリプトと同じディレクトリーを仮定する
# 値があるときまたは空値("")で省略を示したときはあとにつづく<python-version>を指定できる
# <python-version>は
# 使用するpython処理系のversion(minor versionまで指定可)を指定する
# (例 python2 python2.6 phthon3 python3.4など)
# 単にpythonとしたときは現実行環境下でのdefault versionのpythonを使用する
# 文字列"python"は大文字/小文字の区別をしない
# 省略時はpythonを仮定する
name=sm5 # test対象コマンドの名前
testpath=$(dirname $0) # 本スクリプト実行コマンドの先頭部($0)から本スクリプトのディレトリー名をとりだす
cd $testpath # 本スクリプトのあるディレクトリーへ移動
if test "$2" = ""; # <python-version>($2)がなければ
then pythonversion="python" # default versionのpythonとする
else pythonversion="$2" # <python-version>($2)があれば指定versionのpythonとする
fi
if test "$1" = ""; # <command-path>($1)がなければ
then commandpath="." # test対象コマンドは現ディレクトリーにある
else commandpath="$1" # <command-path>($1)があればtest対象コマンドは指定のディレクトリーにある
fi
com="${pythonversion} ${commandpath}/${name}" # python処理系によるtest対象コマンド実行の先頭部
tmp=/tmp/$$
ERROR_CHECK(){
[ "$(echo ${PIPESTATUS[@]} | tr -d ' 0')" = "" ] && return
echo $1
echo "${pythonversion} ${name}" NG
rm -f $tmp-*
exit 1
}
BOMandEOLvariation(){ # BOM無しLF改行ファイル($1)からBOM付きCRLF改行ファイル($2)とBOM付きCR改行ファイル($3)を生成する
[ $# -eq 3 ]; ERROR_CHECK "TESTスクリプト内のBOMandEOLvariation()でファイル指定が不正"
awk '{print '\xEF\xBB\xBF' $0}' $1 > $2 # $1の先頭にBOMを付け全行をCRLFで連接し終端にCRを付加して$2に出力
awk 'BEGIN {ORS = "\r"} {print '\xEF\xBB\xBF' $0}' $1 > $3 # $1の先頭にBOMを付け全行をCRで連接して$3に出力し$3最終行のLFをCRに変換
}
###########################################
#TEST1
#大計を出力する。
cat << FIN > $tmp-in
01 埼玉県 01 さいたま市 91 59 20 76 54
01 埼玉県 02 川越市 46 39 8 5 21
01 埼玉県 03 熊谷市 82 0 23 84 10
02 東京都 04 新宿区 30 50 71 36 30
02 東京都 05 中央区 78 13 44 28 51
02 東京都 06 港区 58 71 20 10 6
02 東京都 07 八王子市 82 79 16 21 80
02 東京都 08 立川市 50 2 33 15 62
03 千葉県 09 千葉市 52 91 44 9 0
03 千葉県 10 市川市 60 89 33 18 6
03 千葉県 11 柏市 95 60 35 93 76
04 神奈川県 12 横浜市 92 56 83 96 75
04 神奈川県 13 川崎市 30 12 32 44 19
04 神奈川県 14 厚木市 48 66 23 71 24
FIN
cat << FIN > $tmp-ans
01 埼玉県 01 さいたま市 91 59 20 76 54
01 埼玉県 02 川越市 46 39 8 5 21
01 埼玉県 03 熊谷市 82 0 23 84 10
02 東京都 04 新宿区 30 50 71 36 30
02 東京都 05 中央区 78 13 44 28 51
02 東京都 06 港区 58 71 20 10 6
02 東京都 07 八王子市 82 79 16 21 80
02 東京都 08 立川市 50 2 33 15 62
03 千葉県 09 千葉市 52 91 44 9 0
03 千葉県 10 市川市 60 89 33 18 6
03 千葉県 11 柏市 95 60 35 93 76
04 神奈川県 12 横浜市 92 56 83 96 75
04 神奈川県 13 川崎市 30 12 32 44 19
04 神奈川県 14 厚木市 48 66 23 71 24
@@ @@@@@@ @@ @@@@@@@@@@ 894 687 485 606 514
FIN
${com} 1 4 5 NF $tmp-in >$tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST1-1 error"
# フィールドの降順指定
${com} 4 1 NF 5 $tmp-in >$tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST1-2 error"
###########################################
#TEST2
# sm4(1)で小計および中計が追加されたデータを処理する場合は次のようになる。
cat << FIN > $tmp-in
01 埼玉県 01 さいたま市 91 59 20 76 54
01 埼玉県 02 川越市 46 39 8 5 21
01 埼玉県 03 熊谷市 82 0 23 84 10
01 埼玉県 @@ @@@@@@@@@@@@@@@@ 173 59 43 160 64
02 東京都 04 新宿区 30 50 71 36 30
02 東京都 05 中央区 78 13 44 28 51
02 東京都 06 港区 58 71 20 10 6
02 東京都 07 八王子市 82 79 16 21 80
02 東京都 08 立川市 50 2 33 15 62
02 東京都 @@ @@@@@@@@@@@@@@@@ 248 213 151 95 167
03 千葉県 09 千葉市 52 91 44 9 0
03 千葉県 10 市川市 60 89 33 18 6
03 千葉県 11 柏市 95 60 35 93 76
03 千葉県 @@ @@@@@@@@@@@@@@@@ 207 240 112 120 82
04 神奈川県 12 横浜市 92 56 83 96 75
04 神奈川県 13 川崎市 30 12 32 44 19
04 神奈川県 14 厚木市 48 66 23 71 24
04 神奈川県 @@ @@@@@@@@@@@@@@@@ 170 134 138 211 118
FIN
cat << FIN > $tmp-ans
01 埼玉県 01 さいたま市 91 59 20 76 54
01 埼玉県 02 川越市 46 39 8 5 21
01 埼玉県 03 熊谷市 82 0 23 84 10
01 埼玉県 @@ @@@@@@@@@@@@@@@@ 173 59 43 160 64
02 東京都 04 新宿区 30 50 71 36 30
02 東京都 05 中央区 78 13 44 28 51
02 東京都 06 港区 58 71 20 10 6
02 東京都 07 八王子市 82 79 16 21 80
02 東京都 08 立川市 50 2 33 15 62
02 東京都 @@ @@@@@@@@@@@@@@@@ 248 213 151 95 167
03 千葉県 09 千葉市 52 91 44 9 0
03 千葉県 10 市川市 60 89 33 18 6
03 千葉県 11 柏市 95 60 35 93 76
03 千葉県 @@ @@@@@@@@@@@@@@@@ 207 240 112 120 82
04 神奈川県 12 横浜市 92 56 83 96 75
04 神奈川県 13 川崎市 30 12 32 44 19
04 神奈川県 14 厚木市 48 66 23 71 24
04 神奈川県 @@ @@@@@@@@@@@@@@@@ 170 134 138 211 118
@@ @@@@@@ @@ @@@@@@@@@@ 894 687 485 606 514
FIN
${com} 1 4 5 NF $tmp-in >$tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST2 error"
###########################################
#TEST3
# 代用対/結合文字列/IVS/SVSの処理検査
cat << FIN > $tmp-in
01 埼玉県 01 𫝆󠄁川 91 59 20 76 54 #IVSの例で𫝆󠄁(U+2B746;U+E0101)代用対では𫝆󠄁(U+D86D-U+DF46;U+DB40-U+DD01)
01 埼玉県 02 𫝆川 46 39 8 5 21 #𫝆(U+2B746)代用対では𫝆(U+D86D-U+DF46)
01 埼玉県 03 今川 82 0 23 84 10 #今(U+4ECA)
02 東京都 04 𠮷田 30 50 71 36 30 #𠮷(U+20BB7)代用対では𠮷(U+D842-U+DFB7)
02 東京都 05 吉田 78 13 44 28 51 #吉(U+5409)
02 東京都 06 渡邊 58 71 20 10 6 #邊(U+908A)
02 東京都 07 渡邊󠄊 82 79 16 21 80 #IVSの例で邊󠄊(U+908A;U+E010A)代用対では邊󠄊(U+908A;U+DB40-U+DD0A)
02 東京都 08 渡邊󠄋 50 2 33 15 62 #IVSの例で邊󠄋(U+908A;U+E010B)代用対では邊󠄋(U+908A;U+DB40-U+DD0B)
03 千葉県 09 渡邊󠄌 52 91 44 9 0 #IVSの例で邊󠄌(U+908A;U+E010C)代用対では邊󠄌(U+908A;U+DB40-U+DD0C)
03 千葉県 10 神山 60 89 33 18 6 #神(U+795E)
03 千葉県 11 神︀山 95 60 35 93 76 #SVSの例で神︀(U+795E;U+FE00)
04 神奈川県 12 羽田 92 56 83 96 75 #羽(U+7FBD)
04 神奈川県 13 羽︀田 30 12 32 44 19 #SVSの例で羽︀(U+7FBD;U+FE00)
04 神奈川県 14 か゚き゚く゚け゚こ゚ 48 66 23 71 24 #結合文字列の例でか゚(U+304B;U+309A)き゚(U+304D;U+309A)く゚(U+304F;U+309A)け゚(U+3051;U+309A)こ゚(U+3053;U+309A)
FIN
# 文字の正しい表示には適切なfontの使用が求められる
# Unicode符号位置を「字(U+xxxx)」の形式で表現する(符号位置が第2面のときは「字(U+xxxxx)」)
# 代用対は「字(U+xxxx-U+yyyy)」の形式で表現する
# 結合文字列/IVS/SVSは「字(U+xxxx;U+yyyy)」の形式で表現する
cat << FIN > $tmp-ans
01 埼玉県 01 𫝆󠄁川 91 59 20 76 54 #IVSの例で𫝆󠄁(U+2B746;U+E0101)代用対では𫝆󠄁(U+D86D-U+DF46;U+DB40-U+DD01)
01 埼玉県 02 𫝆川 46 39 8 5 21 #𫝆(U+2B746)代用対では𫝆(U+D86D-U+DF46)
01 埼玉県 03 今川 82 0 23 84 10 #今(U+4ECA)
02 東京都 04 𠮷田 30 50 71 36 30 #𠮷(U+20BB7)代用対では𠮷(U+D842-U+DFB7)
02 東京都 05 吉田 78 13 44 28 51 #吉(U+5409)
02 東京都 06 渡邊 58 71 20 10 6 #邊(U+908A)
02 東京都 07 渡邊󠄊 82 79 16 21 80 #IVSの例で邊󠄊(U+908A;U+E010A)代用対では邊󠄊(U+908A;U+DB40-U+DD0A)
02 東京都 08 渡邊󠄋 50 2 33 15 62 #IVSの例で邊󠄋(U+908A;U+E010B)代用対では邊󠄋(U+908A;U+DB40-U+DD0B)
03 千葉県 09 渡邊󠄌 52 91 44 9 0 #IVSの例で邊󠄌(U+908A;U+E010C)代用対では邊󠄌(U+908A;U+DB40-U+DD0C)
03 千葉県 10 神山 60 89 33 18 6 #神(U+795E)
03 千葉県 11 神︀山 95 60 35 93 76 #SVSの例で神︀(U+795E;U+FE00)
04 神奈川県 12 羽田 92 56 83 96 75 #羽(U+7FBD)
04 神奈川県 13 羽︀田 30 12 32 44 19 #SVSの例で羽︀(U+7FBD;U+FE00)
04 神奈川県 14 か゚き゚く゚け゚こ゚ 48 66 23 71 24 #結合文字列の例でか゚(U+304B;U+309A)き゚(U+304D;U+309A)く゚(U+304F;U+309A)け゚(U+3051;U+309A)こ゚(U+3053;U+309A)
@@ @@@@@@ @@ @@@@ 894 687 485 606 514
FIN
${com} 1 4 5 NF-1 $tmp-in >$tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST3 error"
###########################################
#TEST4
#TEST1の拡大版
# BOM付きCRLFとBOM付きCRの試験
#大計を出力する。
cat << FIN > $tmp-in
01 埼玉県 01 さいたま市 91 59 20 76 54
01 埼玉県 02 川越市 46 39 8 5 21
01 埼玉県 03 熊谷市 82 0 23 84 10
02 東京都 04 新宿区 30 50 71 36 30
02 東京都 05 中央区 78 13 44 28 51
02 東京都 06 港区 58 71 20 10 6
02 東京都 07 八王子市 82 79 16 21 80
02 東京都 08 立川市 50 2 33 15 62
03 千葉県 09 千葉市 52 91 44 9 0
03 千葉県 10 市川市 60 89 33 18 6
03 千葉県 11 柏市 95 60 35 93 76
04 神奈川県 12 横浜市 92 56 83 96 75
04 神奈川県 13 川崎市 30 12 32 44 19
04 神奈川県 14 厚木市 48 66 23 71 24
FIN
cat << FIN > $tmp-ans
01 埼玉県 01 さいたま市 91 59 20 76 54
01 埼玉県 02 川越市 46 39 8 5 21
01 埼玉県 03 熊谷市 82 0 23 84 10
02 東京都 04 新宿区 30 50 71 36 30
02 東京都 05 中央区 78 13 44 28 51
02 東京都 06 港区 58 71 20 10 6
02 東京都 07 八王子市 82 79 16 21 80
02 東京都 08 立川市 50 2 33 15 62
03 千葉県 09 千葉市 52 91 44 9 0
03 千葉県 10 市川市 60 89 33 18 6
03 千葉県 11 柏市 95 60 35 93 76
04 神奈川県 12 横浜市 92 56 83 96 75
04 神奈川県 13 川崎市 30 12 32 44 19
04 神奈川県 14 厚木市 48 66 23 71 24
@@ @@@@@@ @@ @@@@@@@@@@ 894 687 485 606 514
FIN
# 入力用tmpファイルからBOM付きCRLFとBOM付きCRの各ファイルを作る
BOMandEOLvariation $tmp-in $tmp-inBOMCRLF $tmp-inBOMCR
# BOM付きUTF8ファイルCRLF
${com} 1 4 5 NF $tmp-inBOMCRLF > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST4-1 error"
# BOM付きUTF8ファイルCR
${com} 1 4 5 NF $tmp-inBOMCR > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST4-2 error"
# pipe接続
# BOM付きUTF8ファイルCRLF
cat $tmp-inBOMCRLF | ${com} 1 4 5 NF - > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST4-3 error"
# BOM付きUTF8ファイルCR
cat $tmp-inBOMCR | ${com} 1 4 5 NF - > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST4-4 error"
# フィールドの降順指定
# BOM付きUTF8ファイルCRLF
${com} 4 1 NF 5 $tmp-inBOMCRLF > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST4-5 error"
# BOM付きUTF8ファイルCR
${com} 4 1 NF 5 $tmp-inBOMCR > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST4-6 error"
# pipe接続
# BOM付きUTF8ファイルCRLF
cat $tmp-inBOMCRLF | ${com} 4 1 NF 5 - > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST4-7 error"
# BOM付きUTF8ファイルCR
cat $tmp-inBOMCR | ${com} 4 1 NF 5 - > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST4-8 error"
###########################################
#TEST5
#TEST1の拡大版
# BOM付きCRLFとBOM付きCRの試験
#大計を出力する。
# +hオプションの試験
cat << FIN > $tmp-in
県番号 県 連番 市 d1 d2 d3 d4 d5
01 埼玉県 01 さいたま市 91 59 20 76 54
01 埼玉県 02 川越市 46 39 8 5 21
01 埼玉県 03 熊谷市 82 0 23 84 10
02 東京都 04 新宿区 30 50 71 36 30
02 東京都 05 中央区 78 13 44 28 51
02 東京都 06 港区 58 71 20 10 6
02 東京都 07 八王子市 82 79 16 21 80
02 東京都 08 立川市 50 2 33 15 62
03 千葉県 09 千葉市 52 91 44 9 0
03 千葉県 10 市川市 60 89 33 18 6
03 千葉県 11 柏市 95 60 35 93 76
04 神奈川県 12 横浜市 92 56 83 96 75
04 神奈川県 13 川崎市 30 12 32 44 19
04 神奈川県 14 厚木市 48 66 23 71 24
FIN
cat << FIN > $tmp-ans
県番号 県 連番 市 d1 d2 d3 d4 d5
01 埼玉県 01 さいたま市 91 59 20 76 54
01 埼玉県 02 川越市 46 39 8 5 21
01 埼玉県 03 熊谷市 82 0 23 84 10
02 東京都 04 新宿区 30 50 71 36 30
02 東京都 05 中央区 78 13 44 28 51
02 東京都 06 港区 58 71 20 10 6
02 東京都 07 八王子市 82 79 16 21 80
02 東京都 08 立川市 50 2 33 15 62
03 千葉県 09 千葉市 52 91 44 9 0
03 千葉県 10 市川市 60 89 33 18 6
03 千葉県 11 柏市 95 60 35 93 76
04 神奈川県 12 横浜市 92 56 83 96 75
04 神奈川県 13 川崎市 30 12 32 44 19
04 神奈川県 14 厚木市 48 66 23 71 24
@@ @@@@@@ @@ @@@@@@@@@@ 894 687 485 606 514
FIN
# 入力用tmpファイルからBOM付きCRLFとBOM付きCRの各ファイルを作る
BOMandEOLvariation $tmp-in $tmp-inBOMCRLF $tmp-inBOMCR
# BOM付きUTF8ファイルCRLF
${com} +h 1 4 5 NF $tmp-inBOMCRLF > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST5-1 error"
# BOM付きUTF8ファイルCR
${com} +h 1 4 5 NF $tmp-inBOMCR > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST5-2 error"
# pipe接続
# BOM付きUTF8ファイルCRLF
cat $tmp-inBOMCRLF | ${com} +h 1 4 5 NF - > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST5-3 error"
# BOM付きUTF8ファイルCR
cat $tmp-inBOMCR | ${com} +h 1 4 5 NF - > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST5-4 error"
# フィールドの降順指定
# BOM付きUTF8ファイルCRLF
${com} +h 4 1 NF 5 $tmp-inBOMCRLF > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST5-5 error"
# BOM付きUTF8ファイルCR
${com} +h 4 1 NF 5 $tmp-inBOMCR > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST5-6 error"
# pipe接続
# BOM付きUTF8ファイルCRLF
cat $tmp-inBOMCRLF | ${com} +h 4 1 NF 5 - > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST5-7 error"
# BOM付きUTF8ファイルCR
cat $tmp-inBOMCR | ${com} +h 4 1 NF 5 - > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST5-8 error"
###########################################
rm -f $tmp-*
echo "${pythonversion} ${name}" OK
exit 0
| true
|
8681d4ba1e1c6a98d7ed1238cd227bfa096de778
|
Shell
|
yannh/redis-dump-go
|
/acceptance-tests/tests/select-db.sh
|
UTF-8
| 716
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh -e
export DB=2
echo "-> Filling Redis with Mock Data..."
redis-cli -h redis -n $DB FLUSHDB
/generator -output resp -type strings -n 100 | redis-cli -h redis -n $DB --pipe
DBSIZE=`redis-cli -h redis -n $DB dbsize`
echo "-> Dumping DB..."
time /redis-dump-go -host redis -n 250 -db $DB -output resp >backup
echo "-> Flushing DB and restoring dump..."
redis-cli -h redis -n $DB FLUSHDB
redis-cli -h redis -n $DB --pipe <backup
NEWDBSIZE=`redis-cli -h redis -n $DB dbsize`
echo "Redis has $DBSIZE entries"
echo "-> Comparing DB sizes..."
if [ $DBSIZE -ne $NEWDBSIZE ]; then
echo "ERROR - restored DB has $NEWDBSIZE elements, expected $DBSIZE"
exit 1
else
echo "OK - $NEWDBSIZE elements"
exit 0
fi
| true
|
eae5df64abefef8f8214140ee5825b76c80c4c42
|
Shell
|
mpiko/scripts
|
/range
|
UTF-8
| 343
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
START=$1
END=$2
if [ $START -gt $END ]
then
CURRENT=$START
while [ $CURRENT -ge $END ]
do
echo $CURRENT
CURRENT=$(expr $CURRENT - 1)
done
elif [ $END -gt $START ]
then
CURRENT=$START
while [ $CURRENT -le $END ]
do
echo $CURRENT
CURRENT=$(expr $CURRENT + 1)
done
else
echo Dunno what to do
fi
| true
|
740753bbdf5f63554ded3183e473ad98443bafc8
|
Shell
|
wangjuanmt/kb_script
|
/osx/util/init_jdk.sh
|
UTF-8
| 490
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
mkdir -p $JVM_DIR
# JDK8
#brew cask install java
. $WORKSPACE_SCRIPT_ROOT/jdk/install_latest_jdk8.sh
# JDK7 (deprecated)
#brew cask install java7
# AppleJDK6
#brew cask install java6
# Link jdk installations to shorter paths
# (Mainly used in IDEs)
#ln -s $(/usr/libexec/java_home -v 1.7) $JVM_DIR/jdk7
#ln -s $(/usr/libexec/java_home -v 1.8) $JVM_DIR/jdk8
# JAVA_HOME is set to $JVM_DIR/jdk
# So that we can point to any version of JDK
cd $JVM_DIR
ln -s jdk8 jdk
| true
|
9de061427153cedb54753106c86994d264fe4885
|
Shell
|
teddyfullstack/the-blue-robo
|
/shell/lumen.sh
|
UTF-8
| 148
| 3.03125
| 3
|
[] |
no_license
|
c=0
while true
url=$(<url)
user=$(<user)
cmd=`printf "curl --output log -H @header $url$user"`
do
c=`expr $c + 1`
echo $c
echo $cmd
$cmd &
done
| true
|
e7e3deb7da09a2401cc453211f96ae3d7dbbd23b
|
Shell
|
aur-archive/lib32-gd
|
/PKGBUILD
|
UTF-8
| 1,299
| 2.875
| 3
|
[] |
no_license
|
# Maintainer: josephgbr <rafael.f.f1@gmail.com>
_pkgbase=gd
pkgname=lib32-${_pkgbase}
pkgver=2.1.1
pkgrel=1
pkgdesc="Library for the dynamic creation of images by programmers (32 bit)"
arch=('x86_64')
url="http://www.libgd.org/"
license=('custom')
depends=('lib32-fontconfig' 'lib32-libvpx' 'lib32-libxpm' 'lib32-libtiff' "${_pkgbase}")
makedepends=('gcc-multilib') #perl
options=('!libtool')
source=("${_pkgbase}::git+https://github.com/libgd/libgd.git#tag=${_pkgbase}-${pkgver}"
gd-2.1.1-libvpx-1.4.0.patch)
md5sums=('SKIP'
'9114dd8259aaa88b0a09188fe7b19afc')
prepare() {
cd ${_pkgbase}
./bootstrap.sh
patch -p1 -i "${srcdir}/gd-2.1.1-libvpx-1.4.0.patch"
}
build() {
export CC='gcc -m32'
export CXX='g++ -m32'
export PKG_CONFIG_PATH='/usr/lib32/pkgconfig'
cd ${_pkgbase}
./configure \
--prefix=/usr \
--libdir=/usr/lib32 \
--disable-rpath \
--with-vpx=/usr \
--with-tiff=/usr
make
}
package() {
cd ${_pkgbase}
make DESTDIR="${pkgdir}" install
install -D -m644 COPYING "${pkgdir}/usr/share/licenses/${pkgname}/COPYING"
rm -rf "$pkgdir/usr/include"
# Config script pointing libdir to /usr/lib32 (lib32-libgphoto2 needs it)
mv "$pkgdir/usr/bin/gdlib-config"{,-32}
find "$pkgdir/usr/bin" -type f ! -name "gdlib-config-32" -exec rm {} \;
}
| true
|
ed3db20749e17ba4b56fde709b2a1bed3f6984d1
|
Shell
|
nasa9084/MakeAbsentform
|
/MakeAbsentForm.sh
|
UTF-8
| 4,831
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
today=$(date +%Y年%m月%d日)
while getopts o:n:i:d:S:D:C:G:r:a:s:t:h OPT
do
case $OPT in
o) fname=$OPTARG ;;
n) name=$OPTARG ;;
i) id=$OPTARG ;;
d) date=$OPTARG ;;
S) section=$OPTARG ;;
D) department=$OPTARG ;;
C) cource=$OPTARG ;;
G) grade=$OPTARG ;;
r) reason=$OPTARG ;;
a) absentdate=$OPTARG ;;
s) subject=$OPTARG ;;
t) teacher=$OPTARG ;;
h) cat<<EOF
USAGE: sh ./MakeAbsentForm.sh [OPTION ARG]...
e.g.) sh ./MakeAbsentForm.sh -o Absentform.pdf
次のオプションが使用できます。引数にはスペースを含まないでください。
オプションを指定した場合、その項目は聞かれず、指定しなかった項目はあとから質問されます。
(出力ファイル名のみ指定無しでデフォルト出力)
-o FILENAME 出力ファイル名を指定。拡張子pdfまで記入してください。
-n NAME 名前
-i ID 学生番号
-d DATE 提出日
-S SECTION 学部
-D DEPARTMENT 学科
-C COURCE コース
-G GRADE 学年
-r REASON 欠席理由
-a ABSENTDATE 欠席日
-s SUBJECT 科目名
-t TEACHER 教員名
-h このヘルプを表示します。
EOF
exit 0 ;;
esac
done
echo "$fname"
if [ "$name" = '' ];then
echo "名前[default:名無しの権兵衛]?"
read name
if [ "$name" = '' ];then
name='名無しの権兵衛'
fi
fi
if [ "$id" = '' ];then
echo "学生番号[default:00000000]?"
read id
if [ "$id" = '' ];then
id='00000000'
fi
fi
if [ "$date" = '' ];then
echo "提出日[default:" $today "]?"
read date
if [ "$date" = '' ];then
date=$today
fi
fi
if [ "$section" = '' ];then
echo "学部[default:工]?"
read section
if [ "$section" = '' ];then
section='工'
fi
fi
if [ "$department" = '' ];then
echo "学科[default:情報エレクトロニクス]?"
read department
if [ "$department" = '' ];then
department='情報エレクトロニクス'
fi
fi
if [ "$cource" = '' ];then
echo "コース[default:コンピュータサイエンス]?"
read cource
if [ "$cource" = '' ];then
cource='コンピュータサイエンス'
fi
fi
if [ "$grade" = '' ];then
echo "学年[default:3]?"
read grade
if [ "$grade" = '' ];then
grade=3
fi
fi
if [ "$reason" = '' ];then
echo "理由[default:体調不良]?"
read reason
if [ "$reason" = '' ];then
reason='体調不良'
fi
fi
if [ "$absentdate" = '' ];then
echo "欠席日[default:" $today "]?"
read absentdate
if [ "$absentdate" = '' ];then
absentdate=$today
fi
fi
if [ "$subject" = '' ];then
echo "科目[default: 算数]?"
read subject
if [ "$subject" = '' ];then
subject='算数'
fi
fi
if [ "$teacher" = '' ];then
echo "教員氏名[default: Jack]?"
read teacher
if [ "$teacher" = '' ];then
teacher='Jack'
fi
fi
cat <<EOF > absentform.tex
\documentclass[a4paper]{jarticle}
\usepackage{absent-form}
\usepackage[top=30truemm, bottom=30truemm, left=25truemm, right=25truemm]{geometry}
\author{$name}
\id{$id}
\date{$date}
\teacher{$teacher}
\subject{$subject}
\section{$section}
\department{$department}
\course{$cource}
\grade{$grade}
\reason{$reason}
\absentdate{$absentdate}
\begin{document}
\absentform
\end{document}
EOF
cat <<EOF > absent-form.sty
\def\absentform{
\setcounter{page}{0}
\pagestyle{empty}
\null
\begin{center}
{\huge {\sc 欠席届}}\\\\
\end{center}
\vskip 1cm
\begin{flushright}
\@date\\\\
\end{flushright}
\vskip 1cm
\begin{flushleft}
{\large \@teacher 先生\\\\
\vskip 1cm
科目名: \@subject\\\\
\vskip1cm
\@section 学部 \@department 学科 \@course コース \@grade 年\\\\
学生番号: \@id\\\\
氏名: \@author\\\\}
\end{flushleft}
\begin{center}
{\large 下記の理由により欠席しましたので、お届けします。\\\\
\vskip 2cm
記\\\\}
\end{center}
\vskip 2cm
\begin{flushleft}
欠席理由: \@reason\\\\
\vskip 1cm
欠席日: \@absentdate\\\\
\end{flushleft}
\vfil\newpage}
\def\teacher#1{\gdef\@teacher{#1}}
\def\subject#1{\gdef\@subject{#1}}
\def\section#1{\gdef\@section{#1}}
\def\department#1{\gdef\@department{#1}}
\def\course#1{\gdef\@course{#1}}
\def\grade#1{\gdef\@grade{#1}}
\def\id#1{\gdef\@id{#1}}
\def\reason#1{\gdef\@reason{#1}}
\def\absentdate#1{\gdef\@absentdate{#1}}
EOF
if [ "$fname" = '' ];then
fname=absentform_`date +%Y_%m_%d`_"$subject".pdf
fi
platex -interaction=nonstopmode absentform.tex
dvipdfmx -o absentform.pdf absentform.dvi
mv absentform.pdf "$fname"
for i in absent-form.sty absentform.aux absentform.dvi absentform.log absentform.tex
do
rm $i
done
| true
|
e646d4d4f2f53851454537a55a39acc097819484
|
Shell
|
cyndrdev/bin
|
/ufetch-core
|
UTF-8
| 1,471
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh
# original by jschx
# depends on xbps (void linux)
user=$([ -z "$USER" ] && whoami || echo $USER)
host="$(hostname)"
os='void linux'
kernel="$(uname -sr | tr '[:upper:]' '[:lower:]')"
uptime="$(uptime -p | sed 's/up //' | tr '[:upper:]' '[:lower:]')"
packages="$(xbps-query -l | wc -l | tr '[:upper:]' '[:lower:]')"
shell="$(basename ${SHELL} | tr '[:upper:]' '[:lower:]')"
font="$(xrdb -query | grep font | tail -n 1 | sed 's/.*\.font:\s*//' | sed 's/\s[0-9]//')"
if [ -z "${WM}" ]; then
WM="$(tail -n 1 "${HOME}/.xinitrc" | cut -d ' ' -f 2 | tr '[:upper:]' '[:lower:]')"
fi
bold="$(tput bold)"
black="$(tput setaf 0)"
red="$(tput setaf 1)"
green="$(tput setaf 2)"
yellow="$(tput setaf 3)"
blue="$(tput setaf 4)"
magenta="$(tput setaf 5)"
cyan="$(tput setaf 6)"
white="$(tput setaf 7)"
reset="$(tput sgr0)"
lc="${reset}${bold}${green}" # labels
nc="${reset}${bold}${green}" # user and hostname
ic="${reset}${bold}${white}" # info
c0="${reset}${bold}${white}" # first color
c1="${reset}${red}" # second color
c2="${reset}${yellow}" # second color
echo "${c0}${nc}${user}${ic}@${nc}${host}${reset}"
echo "${c1} ${lc}os: ${ic}${os}${reset}"
echo "${c1} /\\_/\\ ${c2}❤${c1} ${lc}ke: ${ic}${kernel}${reset}"
echo "${c1} >${c0}^,^${c1}< ${lc}pk: ${ic}${packages}${reset}"
echo "${c1} / \\ ${lc}fn: ${ic}${font}${reset}"
echo "${c1} (___)_/ ${lc}sh: ${ic}${shell}${reset}"
echo -n "${c1} ${lc}wm: ${ic}${WM}${reset}"
| true
|
eea3afaf8b716e61c9a060a1edd6fd32cfd983d7
|
Shell
|
Mohxin/DevOps-Bash-tools
|
/aws_cloudtrails_s3_kms.sh
|
UTF-8
| 1,350
| 3.375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2020-01-21 18:25:39 +0000 (Tue, 21 Jan 2020)
#
# https://github.com/harisekhon/bash-tools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
set -euo pipefail
[ -n "${DEBUG:-}" ] && set -x
srcdir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck disable=SC1090
. "$srcdir/lib/aws.sh"
# shellcheck disable=SC2034,SC2154
usage_description="
Lists Cloud Trails and whether their S3 buckets are KMS secured
Output Format:
CloudTrail_Name S3_KMS_secured (boolean) KMS_Key_Id
$usage_aws_cli_required
"
# used by usage() in lib/utils.sh
# shellcheck disable=SC2034
usage_args=""
help_usage "$@"
export AWS_DEFAULT_OUTPUT=json
aws cloudtrail describe-trails --no-paginate |
# more efficient
jq -r '.trailList[] | [.Name, has("KmsKeyId"), .KmsKeyId // "N/A"] | @tsv' |
#jq -r '.trailList[] | [.Name, .KmsKeyId] | @tsv' |
#while read -r name keyid; do
# kms_secured=false
# if [ -n "$keyid" ]; then
# kms_secured=true
# else
# keyid="N/A"
# fi
# printf "%s\t%s\t%s" "$name" "$kms_secured" "$keyid"
#done |
sort |
column -t
| true
|
fd3a77f46c81772644890796d63ca8182418b967
|
Shell
|
bil0u/.dotfiles
|
/home/.chezmoiscripts/darwin/run_onchange_before_02-updates.tmpl
|
UTF-8
| 478
| 2.921875
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/usr/bin/env bash
set -uo pipefail
# time hash, based on day: {{ now | date "02 January 2006" | sha256sum }}
{{ template "darwin/utils.sh" . }}
filename "updates"
{{ template "darwin/elevate.sh" . }}
action "Checking for system updates "
sudo softwareupdate -i -a
{{ template "darwin/load-brew.sh" . }}
action "Checking for Homebrew updates"
brew update
action "Checking for Homebrew formulaes updates"
brew upgrade
action "Checking for AppStore updates"
mas upgrade
| true
|
f2a25274f7806055fc1430aca1b274bb576c45de
|
Shell
|
jbasu2013/vps-installation-scripts
|
/install-php-mysql.sh
|
UTF-8
| 485
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Colors
GREEN="\033[1;32m"
BROWN="\033[0;33m"
GREY="\033[0;37m"
RED="\033[1;31m"
NC="\033[0m"
# User info
USER=`whoami`
# Prevent running this script as root
if [ "$USER" = "root" ]; then
echo -e "YOU CAN'T RUN THIS AS ROOT!"
exit
fi
# Ask user to install MySQL support
while true; do
read -p "Do you want to install PDO drivers for MySQL database? (y/n): " yn
case ${yn} in
[Yy]* )
sudo apt-get install php7.2-mysql
break;;
[Nn]* )
break;;
esac
done
| true
|
f4e8380fb090a295ac337500187b3734cbcf1016
|
Shell
|
Osirium/third-party
|
/debs/nginx/debian/postinst
|
UTF-8
| 1,034
| 3.34375
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
set -e
case "$1" in
configure)
# Touch and set permisions on default log files on installation
if [ -z "$2" ]; then
adduser --system --group --quiet nginx
if [ -d /var/log/nginx ]; then
if [ ! -d /var/lib/nginx/body ]; then
mkdir -p /var/lib/nginx/body
chmod 640 /var/lib/nginx/body
chown nginx:nginx /var/lib/nginx/body
fi
if [ ! -e /var/log/nginx/access.log ]; then
touch /var/log/nginx/access.log
chmod 640 /var/log/nginx/access.log
chown nginx:nginx /var/log/nginx/access.log
fi
if [ ! -e /var/log/nginx/error.log ]; then
touch /var/log/nginx/error.log
chmod 640 /var/log/nginx/error.log
chown nginx:nginx /var/log/nginx/error.log
fi
fi
fi
;;
esac
#DEBHELPER#
exit 0
| true
|
832b139c68cb58730020e45a936c5d2ed40696a5
|
Shell
|
DiamondJack/NOI2018
|
/day1/return/wyz/dmk/dmk.sh
|
UTF-8
| 130
| 2.5625
| 3
|
[] |
no_license
|
DST=../../data/
./genIn.sh
./encrypt.sh 1 20
./getAns.sh 1 20
for ((i=1;i<=20;++i)); do
mv $i.in ${DST}/
mv $i.ans ${DST}/
done
| true
|
181492a56a7d5b777b2b24442ab8b39a984e22cb
|
Shell
|
eunnieverse/AcousticEigen
|
/src/tabk/awk_tabk_dat2idp.sh
|
UTF-8
| 726
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
########################################################################
### AcousticEigen project
### Yoonkyung Eunnie Lee, 2015.09.10
### convert tabk file from DAT to IDP form
########################################################################
### initial format:
### 67 1.128510 579829.671670 692245.615160
### changed format:
### tabk(67)=579829.671670+(692245.615160i); // Vec_kaLpi=1.128510
########################################################################
echo -n "Enter the name of the dat file and press [ENTER]: "
read DATFILENAME
FILEBASE=${DATFILENAME%.dat}
awk 'BEGIN {i=0;} { printf "tabk(%d)=%f+(%fi); // Vec_kaLpi=%f\n",$1,$3,$4,$2;} {i=i+1;}' ${FILEBASE}.dat >> ${FILEBASE}.idp
| true
|
05136b4dc307dd9cc7bf2d4feb519117f59456dc
|
Shell
|
Paletimeena/Prac_data
|
/linux/shellscript/Exam_DEC/Dec_Q3.sh
|
UTF-8
| 251
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
files=0
subdir=0
for i in `ls`
do
if [ -f $i ]
then
files=`expr $files + 1`
elif [ -d $i ]
then
subdir=`expr $subdir + 1`
else
echo ""
fi
done
echo "Number of files : $files"
echo "Number of subdirectory : $subdir"
| true
|
35c8e6cec712c3d03dcd0a685618198e9420b3a0
|
Shell
|
andrey-reznik/docker-server
|
/docker/nginx/templates/makecerts.tmpl
|
UTF-8
| 146
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
{{ range $host, $containers := groupByMulti $ "Env.VIRTUAL_HOST" "," }}
{{ $host := trim $host }}
mkcert {{ $host }}
{{ end }}
| true
|
f9e5e83abb07052ea8f6f2049ed0f55b3096c137
|
Shell
|
wiedehopf/mirror-json
|
/install.sh
|
UTF-8
| 541
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
sudo cp mirror-json.service /lib/systemd/system
sudo cp mirror-json.sh /usr/local/bin/
sudo cp -n default /etc/default/mirror-json
cat 88-mirror-json.conf | ssh $1@$2 "sudo tee /etc/lighttpd/conf-available/88-mirror-json.conf >/dev/null; sudo lighty-enable-mod mirror-json; sudo systemctl restart lighttpd"
sudo sed -i "s/USER=.*/USER=$1/" /etc/default/mirror-json
sudo sed -i "s/TARGET=.*/TARGET=$2/" /etc/default/mirror-json
sudo systemctl daemon-reload
sudo systemctl enable mirror-json
sudo systemctl restart mirror-json
| true
|
6b71b154abbc997de6cf669a635d3a4b0a241e1f
|
Shell
|
lkluft/scripts
|
/unimount
|
UTF-8
| 522
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
# author: Lukas Kluft
# version: 06.11.2014
#
# purpose: Mount remote directories via ssh
# enter username at host server
USERNAME=u300509
if [[ $1 = -u ]];then
# if parameter -u is given, unmount directory and delelte folder
fusermount -u $HOME/t7home
fusermount -u $HOME/l2home
rmdir $HOME/t7home $HOME/l2home
else
# create folder and mount remote directory
mkdir -p $HOME/{t7home,l2home}
sshfs -o follow_symlinks lehre: $HOME/l2home
sshfs -o follow_symlinks t7: $HOME/t7home
fi
| true
|
29d16d7c5b08407a8ac3b6588243fe07b9f5ea8f
|
Shell
|
petronny/aur3-mirror
|
/blocxx-svn/PKGBUILD
|
UTF-8
| 1,078
| 2.875
| 3
|
[] |
no_license
|
# Maintainer: Tom Kuther <gimpel@sonnenkinder.org>
pkgname=blocxx-svn
pkgver=544
pkgrel=1
pkgdesc="A cross-platform, general purpose C++ framework for application development"
provides=('blocxx')
conflicts=('blocxx')
arch=('i686' 'x86_64')
url="http://sourceforge.net/projects/blocxx"
license=('BSD')
depends=('openssl' 'pcre')
makedepends=('bc' 're2c' 'subversion')
source=()
_svntrunk='https://blocxx.svn.sourceforge.net/svnroot/blocxx/trunk'
_svnmod='blocxx'
build() {
cd "$srcdir"
msg "Connecting to SVN server...."
if [[ -d "$_svnmod/.svn" ]]; then
(cd "$_svnmod" && svn up -r "$pkgver")
else
svn co "$_svntrunk" --config-dir ./ -r "$pkgver" "$_svnmod"
fi
msg "SVN checkout done or server timeout"
msg "Starting build..."
rm -rf "$srcdir/$_svnmod-build"
cp -r "$srcdir/$_svnmod" "$srcdir/$_svnmod-build"
cd "$srcdir/$_svnmod-build"
aclocal
libtoolize --force --automake --copy
autoheader
automake --add-missing --copy
autoconf
./configure --prefix=/usr
make
}
package() {
cd "$srcdir/$_svnmod-build"
make DESTDIR="$pkgdir/" install
}
# vim:set ts=2 sw=2 et:
| true
|
c4b4667f120e01a530738b8ec7a2c4e21b2423a1
|
Shell
|
msull/dotfiles_old
|
/bash/.bash_profile
|
UTF-8
| 2,484
| 3.203125
| 3
|
[
"Unlicense"
] |
permissive
|
export PIP_REQUIRE_VIRTUALENV=true
#export PIP_DOWNLOAD_CACHE=$HOME/.pip/cache
mkvirt(){
if [ ! -z $1 ]; then
pyenv virtualenv $1 ${PWD##*/} && pyenv local ${PWD##*/}
else
pyenv virtualenv ${PWD##*/} && pyenv local ${PWD##*/}
fi
pip install --upgrade pip
pip install prospector
}
syspip(){
if [ ! -z ${VIRTUAL_ENV} ]; then
echo 'Cannot execute this with an active virtualenv'
else
PIP_REQUIRE_VIRTUALENV="" pip "$@"
fi
}
syspip2(){
if [ ! -z ${VIRTUAL_ENV} ]; then
echo 'Cannot execute this with an active virtualenv'
else
PIP_REQUIRE_VIRTUALENV="" pip2.7 "$@"
fi
}
syspip3(){
if [ ! -z ${VIRTUAL_ENV} ]; then
echo 'Cannot execute this with an active virtualenv'
else
PIP_REQUIRE_VIRTUALENV="" pip3 "$@"
fi
}
syspip_upgrade_all(){
if [ ! -z ${VIRTUAL_ENV} ]; then
echo 'Cannot execute this with an active virtualenv'
else
export PIP_REQUIRE_VIRTUALENV=''
pip freeze --local | grep -v ^-e | cut -d = -f 1 | xargs -n1 pip install -U
export PIP_REQUIRE_VIRTUALENV=true
fi
}
md (){
mkdir -p "$@" && cd "$@";
}
#export PATH="$HOME/bin:$PATH";
export PATH=$PATH:/Applications/Postgres.app/Contents/Versions/9.4/bin/
# Load the shell dotfiles, and then some:
# * ~/.path can be used to extend `$PATH`.
# * ~/.extra can be used for other settings you don’t want to commit.
for file in ~/.{path,bash_prompt,exports,aliases,functions,extra}; do
[ -r "$file" ] && [ -f "$file" ] && source "$file";
done;
unset file;
# Case-insensitive globbing (used in pathname expansion)
shopt -s nocaseglob;
# Append to the Bash history file, rather than overwriting it
shopt -s histappend;
export HISTCONTROL=ignoredups
# Autocorrect typos in path names when using `cd`
shopt -s cdspell;
export PYENV_ROOT=/python
export PATH="${PYENV_ROOT}/bin:$PATH"
if which pyenv > /dev/null; then
eval "$(pyenv init -)"
eval "$(pyenv virtualenv-init -)"
fi
if [[ "$OSTYPE" == "linux-gnu" ]]; then
source ~/.bash_profile-linux
elif [[ "$OSTYPE" == "darwin"* ]]; then
echo
elif [[ "$OSTYPE" == "cygwin" ]]; then
# POSIX compatibility layer and Linux environment emulation for Windows
echo
elif [[ "$OSTYPE" == "msys" ]]; then
# Lightweight shell and GNU utilities compiled for Windows (part of MinGW)
echo
elif [[ "$OSTYPE" == "win32" ]]; then
# I'm not sure this can happen.
echo
elif [[ "$OSTYPE" == "freebsd"* ]]; then
echo
else
echo
fi
export PATH="~/.local/bin:$PATH"
| true
|
a2c6e7894353f53d3b3497a3758c44ac3a0890d0
|
Shell
|
deepak41/talks
|
/fluentconf/bin/002/setup-cluster.sh
|
UTF-8
| 1,047
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# This program is distributed under the terms of the MIT license:
# <https://github.com/v0lkan/talks/blob/master/LICENSE.md>
# Send your comments and suggestions to <me@volkan.io>.
# 002 - Simple TCP App
docker rm -f fluent_tcp
docker rm -f fluent_bastion
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
docker run -d --privileged --cpuset-cpus="3" -i -t \
-h service-tcp \
--name fluent_tcp \
-v "${DIR}/../../containers/common/opt/shared":/opt/shared \
-v "${DIR}/../../containers/common/data":/data \
-v "${DIR}/../../containers/002-simple-app-tcp/opt/fluent":/opt/fluent \
-p 8002:8002 \
fluent:service-tcp /bin/bash
docker run -d --privileged -i -t --cpuset-cpus="0" \
-h bastion \
--name fluent_bastion \
-v "${DIR}/../../containers/common/opt/shared":/opt/shared \
-v "${DIR}/../../containers/common/data":/data \
-v "${DIR}/../../containers/bastion/opt/fluent":/opt/fluent \
-v "${DIR}/../../containers":/containers \
-p 4322:4322 \
--link fluent_tcp:app \
fluent:bastion /bin/bash
echo "Set up the cluster."
| true
|
8181bb5ba6c749490dc4b6b80af46a7d0d5bbf07
|
Shell
|
uwsbel/CharmSPH
|
/DevelopmentUtils/remote-sync.sh
|
UTF-8
| 1,581
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh
# When you have no way to run a program in your local machine (due to hardware restrictions,
# amount of data output, etc), but you still wish to develop locally you can use the 'rsync'
# command to keep a local directory and a remote directory in sync. This script will be useful
# when you modify a file locally and want the changes to be reflected in the remote copy, you just
# have to run this script.
# For ease of usage I would add the following line to you .bashrc or .bash_profile files, so you
# can call this script from any folder by just typing `rs`:
# alias rs='/PATH/TO/remote-sync.sh'
# Example:
# alias rs='/Users/felipegb94/sbel/repos/DevelopmentUtils/remote-sync.sh
# If you have ssh without password setup
USER= # USERNAME TO REMOTE HOST GOES HERE
HOST= # REMOTE HOST ALIAS GOES HERElagrange
# If you don't have ssh without password setup. I recommend setup the ssh without password so the
# sync process means only typing rs and not typing rs and then your password.
USER2= # USERNAME TO REMOTE HOST GOES HERE
HOST2= # URL TO HOST GOES HERE
# Path to repository in your local machine
LOCAL_DIR_CharmSPH=/Users/felipegb94/sbel/repos/CharmSPH
# Path to repository in your euler account
REMOTE_DIR_CharmSPH=/home/felipegb94/repositories/CharmSPH
# Note 1: If you have --exclude rsync will ignore those folder
rsync --verbose --recursive --times \
--exclude ".*" --exclude "Debug/" --exclude "build/" --exclude "data/" --exclude "armadillo_bits" --exclude "armadillo"\
$LOCAL_DIR_CharmSPH/ $USER@$HOST:$REMOTE_DIR_CharmSPH/
| true
|
ab16234b449152385cf3bb17a98dbe520da7cda3
|
Shell
|
etenoch/CISC327-Quibble
|
/Front-End/testing/quibble_test.sh
|
UTF-8
| 1,080
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
rm -f -R outputs
mkdir outputs
COUNTER=1
while [ $COUNTER -lt 81 ]; do # loop through 80 test cases
python ../quibble.py input_files/test_"$COUNTER"_input_file.txt outputs/test_"$COUNTER"_actual_output_file.txt < input/test_"$COUNTER"_input.txt > outputs/test_"$COUNTER"_actual_output.txt
let COUNTER=COUNTER+1
done
rm -f test_results.txt
COUNTER=1
while [ $COUNTER -lt 81 ]; do # validate 80 tests
echo "" >> test_results.txt
echo "==== Test Case $COUNTER ====" >> test_results.txt
if diff -u --ignore-all-space --ignore-case outputs/test_"$COUNTER"_actual_output_file.txt expected_output_files/test_"$COUNTER"_output_file.txt >> test_results.txt ;
then
echo "Output File Test Passed" >> test_results.txt
fi
if diff -u --ignore-all-space --ignore-case outputs/test_"$COUNTER"_actual_output.txt expected_output/test_"$COUNTER"_output.txt >> test_results.txt ;
then
echo "Output Test Passed" >> test_results.txt
fi
echo "======================" >> test_results.txt
let COUNTER=COUNTER+1
done
| true
|
d310557169900657124208f7105a4bfa51956e6f
|
Shell
|
billlody/Basic_programming_study
|
/post_checkout
|
UTF-8
| 383
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Create notebooks, link to py
#
OIFS="$IFS"
IFS=$'\n'
prevHEAD=$1
newHEAD=$2
echo "Running post-checkout hook to create notebooks."
for pyfile in $(git diff --name-only $prevHEAD $newHEAD -- '*.py')
do
echo "Create notebooks from $pyfile"
jupytext --from py --to ipynb "$pyfile"
jupytext --set-formats ipynb,py:percent --sync "$pyfile"
done
| true
|
5043b42ef111f1bc14b0cdf8f3a7f7525927acbb
|
Shell
|
jegtnes/dotfiles
|
/git/aliases.zsh
|
UTF-8
| 1,762
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
# Use `hub` as our git wrapper:
# http://defunkt.github.com/hub/
hub_path=$(which hub)
if (( $+commands[hub] ))
then
alias git=$hub_path
fi
alias g='git'
alias ga='git add'
alias gap='git add -p'
alias gam='git commit --amend -m'
alias gama='git commit --amend -am'
alias gaa='git commit --amend -a'
alias gb='git branch'
alias gba='git branch -a'
alias gbd='git branch -d'
alias gbm='git branch -m'
alias gcb='git copy-branch-name'
alias gco='git checkout'
alias gcob='git checkout -b'
alias gcop='git checkout -p'
alias gcp='git cherry-pick'
alias gcom='git-checkout-and-merge'
alias gcomp='git-checkout-and-merge develop && git push origin && git checkout -'
alias gc='git commit -m'
alias gca='git commit -a'
alias gcam='git commit -am'
alias gci='git commit'
# Remove `+` and `-` from start of diff lines; just rely upon color.
alias gd='git diff --color | sed "s/^\([^-+ ]*\)[-+ ]/\\1/" | less -r'
alias gdi='git diff'
alias ge='git-edit-new'
alias gf='git fetch'
alias gl='git log --oneline --decorate --color'
alias glog="git log --graph --pretty=format:'%Cred%h%Creset %an: %s - %Creset %C(yellow)%d%Creset %Cgreen(%cr)%Creset' --abbrev-commit --date=relative"
alias gm='git merge'
alias gmp='gcom develop && gpo && gco -'
alias gmpm='gcom master && gpo && gco -'
alias gpu='git pull'
alias gpuo='git pull origin'
alias gpuom='git pull origin master'
alias gp='git push -u'
alias gpo='git push -u origin'
alias gpom='git push origin master'
alias gr='git reset HEAD'
alias grp='git reset -p'
alias gsta='git stash'
alias gstp='git stash pop'
alias gstd='git stash drop'
alias gs='git status -sb'
alias gst='git status'
alias git-cleanup="git branch --merged | grep -v '\*\|master\|main\|develop' | xargs -n 1 git branch -d"
| true
|
9afb156e480a2d4c4664a25113fb8829fb9b2b51
|
Shell
|
caobaiyue/mitras
|
/bin/graph.sh
|
UTF-8
| 386
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
die () {
echo >&2 "$@"
exit 1
}
[ "$#" -eq 1 ] || die "1 argument required (python script name), $# provided"
# /home/clemsos/Dev/mitras/data/out/tmph6J4PY.gv
gv_file=$1
echo $gv_file
sfdp -Gbgcolor=black -Ncolor=white -Ecolor=white -Nwidth=0.05 -Nheight=0.05 -Nfixedsize=true -Nlabel='' -Earrowsize=0.4 -Gsize=75 -Gratio=fill -Tpng $gv_file > test2.png
| true
|
28337a94c406413f8a74d071d40a5426ea5cfe04
|
Shell
|
cloudfoundry-community/go-credhub
|
/integration-tests/ci/tasks/deploy-credhub.sh
|
UTF-8
| 1,046
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -eu
BASEDIR=$(pwd)
mv bbl-cli/bbl*linux* /usr/local/bin/bbl
mv bosh-cli/bosh*linux* /usr/local/bin/bosh
chmod +x /usr/local/bin/*
cd bbl-state
eval "$(bbl print-env)"
for release in $(find ${BASEDIR} -name '*-bosh-release' -type d); do
bosh upload-release --sha1="$(cat ${release}/sha1)" --version="$(cat ${release}/version)" "$(cat ${release}/url)"
done
for stemcell in $(find ${BASEDIR} -name '*-stemcell' -type d); do
bosh upload-stemcell --sha1="$(cat ${stemcell}/sha1)" --version="$(cat ${stemcell}/version)" "$(cat ${stemcell}/url)"
done
internal_ip=10.0.16.190
external_ip=$(bosh int vars/director-vars-file.yml --path /go-credhub-external-ip)
bosh -n update-config ${BASEDIR}/source/integration-tests/manifest/vip-cloud-config.yml --type=cloud --name=vip-network
bosh -n -d credhub deploy ${BASEDIR}/source/integration-tests/manifest/credhub.yml \
-o ${BASEDIR}/source/integration-tests/manifest/opsfile.yml \
-v external-ip-address="${external_ip}" \
-v internal-ip-address="${internal_ip}"
| true
|
6392934698e7a9bacd65626a2d1e6e40d04a3a6c
|
Shell
|
MannyMoo/AGammaD0Tohhpi0
|
/scripts/mint/check-amps.sh
|
UTF-8
| 296
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
for name in $(grep '_Re' $1 | awk '{print $1;}') ; do
name="$(echo $name | sed 's/_Re//' | sed 's/\[/\\[/g' | sed 's/\]/\\]/g' | sed 's/\*/\\*/g')"
#echo $name
grep "$name" AllKnownDecayTrees.txt > /dev/null
if [ $? != 0 ] ; then
echo "Not found: $name"
fi
done
| true
|
0fb8ec46e76531d1cff80b1f9022b33914121573
|
Shell
|
fmcoastal/bash_scripts
|
/dpdk_scripts/dpdk_exports
|
UTF-8
| 1,662
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Assume the directory structure looks like this
#
# <BASE_DIR> <REV DIR> <OUTPUT_DIR>
# /home/fsmith/marvell /sdk /SDK-19.06.3-src
# /sdk /SDK-19.06.3-src /cn96xx-release-output
#
# /dpdk /dpdk-19.05
# /dpdk /fs_target
# /libpcap
# /numa
# /ubuntu
#
DPDK_REV=dpdk-19.05
DEVICE=cn96xx
if [ "$DEVICE" == "cn96xx" ] ; then
OUTPUT_DIR=cn96xx-release-output
BASE_DIR=/home/fsmith/marvell
# because the engineering developement has a different path structure
SDK_REV=sdk10_ed1003_with_dpdk/SDK10.0-ED1003-DPDK/sdk
else
OUTPUT_DIR=cn83xx-release-output
BASE_DIR=/sdk
SDK_REV=SDK10.0_19.06.3-src
fi
SDK_DIR=$BASE_DIR/sdk/$SDK_REV
DPDK_DIR=$BASE_DIR/dpdk/$DPDK_REV
export RTE_KERNELDIR=$SDK_DIR/$OUTPUT_DIR/build/linux-custom
export PATH=$SDK_DIR/toolchain/marvell-tools-233.0/bin:$PATH
export CROSS=aarch64-marvell-linux-gnu-
export DESTDIR=$BASE_DIR/dpdk/fs_target
export RTE_SDK=$BASE_DIR/dpdk/$DPDK_REV
export RTE_ARCH=arm64
export RTE_TARGET=build
export LD_LIBRARY_PATH=$BASE_DIR/sdk/$SDK_REV/toolchain/marvell-tools-233.0/lib64:${LD_LIBRARY_PATH}
echo "OUTPUT_DIR: $OUTPUT_DIR"
echo "SDK_DIR: $SDK_DIR"
echo "RTE_KERNELDIR: $RTE_KERNELDIR"
echo "LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
echo ""
echo "make install T=arm64-thunderx2-linux-gcc"
# from dpdk prog_guide section 2.1 Developement Environment
#export RTE_SDK=/home/user/DPDK
#export RTE_TARGET=x86_64-native-linux-gcc
| true
|
af7cb71721452535bbc88249492f1d620e2662f4
|
Shell
|
pauls4GE/RACK
|
/cli/setup-rack.sh
|
UTF-8
| 918
| 2.921875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# Copyright (c) 2020, General Electric Company and Galois, Inc.
set -eu
if ! command -v rack > /dev/null
then
cat <<-END
ERROR: rack cli tool not found in PATH
Installation instructions are available at
https://github.com/ge-high-assurance/RACK/wiki/RACK-CLI#install-dependencies
or locally in README.md
If you've already installed RACK CLI, please activate your virtual environment
macOS/Linux: source venv/bin/activate
Windows: venv\\Scripts\\activate.bat
PowerShell: venv\\Scripts\\Activate.ps1
END
exit 1
fi
# RACK core ontology
rack model import ../RACK-Ontology/OwlModels/import.yaml
# ingestion nodegroups auto-generated from RACK core ontology, and a set of sample query nodegroups
rack nodegroups delete --yes --regexp --ignore-nonexistent "^ingest" "^query"
rack nodegroups import ../nodegroups/ingestion/arcos.rack
rack nodegroups import ../nodegroups/queries
| true
|
4ad13cd7d1964a5c92a836bbcfbade40c583229a
|
Shell
|
janfuu/asuscam
|
/kamera.sh
|
UTF-8
| 243
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
# Kopiert und dreht Video zur virtuellen Kamera
#ffmpeg -f v4l2 -i /dev/video9 -vf "vflip" -f v4l2 /dev/video0
if [ "$1" == "start" ]; then
systemctl --user start kamera.service
else
systemctl --user stop kamera.service
fi
| true
|
2e3fa27e529bf7ffe8004c1d0dc75e8f4358e0e5
|
Shell
|
ahoyter/dawribera2018
|
/javichu/002bash/000bashEjemplos/EjemploScriptAnaComentarQueHace.sh
|
UTF-8
| 1,449
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
#####################
#
#
#####################
crearArchivo() {
#
# if [-z $2 ];then
# order="user"
# else
# orden=$2
# fi
echo $orden
read parametro
echo "<html>" > $archivo
crearHead
crearBody
echo "</html>" >> $archivo
echo "desea verificar que lo ha creado?"
read respuesta
if [ $respuesta = "s" ]; then
comprobar
fi
}
crearHead() {
echo "<head>" >> $archivo
echo "<title>Generado con el script de creacion de archivos</title>" >> $archivo
echo "<link rel="stylesheet" type="text/css" href="style.css">" >> $archivo
echo "</head>" >> $archivo
}
crearBody() {
echo "<body>" >> $archivo
echo "<pre>" >> $archivo
ps -eo user, pid, cmd --sort $orden >> $archivo
echo "</pre>" >> $archivo
echo "</body>" >> $archivo
}
comprobar() {
#verifica que se ha creado con exito el fichero
cat $archivo
}
if [ -z $1 ]; then
echo "Usage :$0 nombreArchivo.html [user|pid|cmd]"
read parametro
else
if [ -z $2 ]; then
orden="user"
else
case $2 in
user|pid|cmd)
orden=$2
;; #fin del case switch
*) echo "error de parametro ordenacion"
exit
;;
esac #fin del case
fi #fin del if
echo $orden
read parametro
fichero=$1
if [ -e $archivo ];then
echo "Archivo $archivo existe desea reescribirlo? s/n"
read respuesta
if [ $respuesta = "s" ]; then
crearArchivo
else
echo "no lo quieres crear"
exit
fi
else
echo "no existe y lo creo"
crearArchivo
fi
fi
| true
|
afe6623c4e05c97d6ccb48e2d3f7521732442afa
|
Shell
|
jamessom/fat-code-refactoring-techniques
|
/bin/git-railsconf.zsh
|
UTF-8
| 989
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/zsh
# cd to top level of this git repo, then run
# . bin/git-railsconf.zsh
# BE SURE TO SET RAILSCONF_DEMO=<directory of git repo>
export RAILSCONF_DEMO=`pwd`
git-child-sha() {
branch=${1:-master}
git log --ancestry-path --format=%H ${commit}..$branch | tail -1
}
git-advance-history() {
branch=${1:-master}
sha=$(git-child-sha $branch)
git --no-pager show --pretty --quiet $sha
git checkout $sha
}
git-advance-history-reset-soft() {
branch=${1:-master}
git reset --hard HEAD
git-advance-history $branch
git-advance-history $branch
git reset --soft HEAD\^
}
# START HERE
railsconf-start() {
cd $RAILSCONF_DEMO
git checkout railsconf-start
git-advance-history railsconf-finish
git reset --soft HEAD\^
}
# ADVANCE BY USING THIS
# Assumes starting point
# 1. is NOT a branch
# 2. has files checked out
# NOTE: goes a git reset --hard, so you lose any changes!
railsconf-advance-history() {
git-advance-history-reset-soft railsconf-finish
}
| true
|
336a5cd48916c74d865a4dc65b3cc19fa0d2c027
|
Shell
|
sashetov/seshu
|
/lib/wpa2-hacker.inc.sh
|
UTF-8
| 733
| 3.09375
| 3
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
WPA2_HACKER_RPMS=( reaver sqlite-devel libpcap-devel aircrack-ng )
WIFI_IFACE='wlp4s0'
MON_IFACE="${WIFI_IFACE}mon"
DUMP_PATH='/dump/'
function install_packages() { #TODO - finish me ....
declare -a pkg_names=("${!1}")
for pkg in "${pkg_names[@]}"; do {
echo sudo dnf -y install $pkg
sudo dnf -y install $pkg > /dev/null 2>&1 && \
echo $pkg installed || \
return $?;
}; done;
return 0;
}
function start_airmon() {
WIFI_IFACE=$1
sudo airmon-ng start $WIFI_IFACE -w /
}
function stop_airmon() {
MON_IFACE=$1
sudo airmon-ng stop $MON_IFACE
}
start_dump() {
MON_IFACE=$1
DUMP_PATH=$2
sudo airodump-ng $MON_IFACE -t WPA2 -i -w $DUMP_PATH
}
#stop_dump() { }
#parse_dump_bssids() { }
| true
|
c9fd5fd06a9611d96a00e54f5494d7739254dcb8
|
Shell
|
ohmyzsh/ohmyzsh
|
/plugins/autojump/autojump.plugin.zsh
|
UTF-8
| 1,544
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
declare -a autojump_paths
autojump_paths=(
$HOME/.autojump/etc/profile.d/autojump.zsh # manual installation
$HOME/.autojump/share/autojump/autojump.zsh # manual installation
$HOME/.nix-profile/etc/profile.d/autojump.sh # NixOS installation
/run/current-system/sw/share/autojump/autojump.zsh # NixOS installation
/usr/share/autojump/autojump.zsh # Debian and Ubuntu package
/etc/profile.d/autojump.zsh # manual installation
/etc/profile.d/autojump.sh # Gentoo installation
/usr/local/share/autojump/autojump.zsh # FreeBSD installation
/usr/pkg/share/autojump/autojump.zsh # NetBSD installation
/opt/local/etc/profile.d/autojump.sh # macOS with MacPorts
/usr/local/etc/profile.d/autojump.sh # macOS with Homebrew (default)
/opt/homebrew/etc/profile.d/autojump.sh # macOS with Homebrew (default on M1 macs)
/etc/profiles/per-user/$USER/etc/profile.d/autojump.sh # macOS Nix, Home Manager and flakes
)
for file in $autojump_paths; do
if [[ -f "$file" ]]; then
source "$file"
found=1
break
fi
done
# if no path found, try Homebrew
if (( ! found && $+commands[brew] )); then
file=$(brew --prefix)/etc/profile.d/autojump.sh
if [[ -f "$file" ]]; then
source "$file"
found=1
fi
fi
(( ! found )) && echo '[oh-my-zsh] autojump not found. Please install it first.'
unset autojump_paths file found
| true
|
23125e9548fba307675beab47330353955f2d76c
|
Shell
|
snbabu453/Cracking_the_Devops_Interview
|
/check_apache_status.sh
|
UTF-8
| 608
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
SERVICENAME="apache2"
# One liner
# which apache2 && systemctl status apache2 | systemctl start apache2
# Other way to check if the service is up
# ps aux |grep -i apache2 > /dev/null
netstat -antulp|grep :::80 > /dev/null
APACHESTATUS=$?
COUNT=0
THRESHOLD=2
if [[ $APACHESTATUS != 0 ]]
then
echo "$SERVICENAME is not running"
while [[ $COUNT -lt $THRESHOLD ]]
do
systemctl start $SERVICENAME
if [ $? != 0 ]
then
(( COUNT++))
else
exit 0
fi
done
echo "There is some issue with $SERVICENAME" |mail -s "$SERVICENAME failure" root
else
echo "$SERVICENAME is running"
fi
| true
|
8c1cd030b75399300859ecbaaf4864e87ed8278a
|
Shell
|
Minizza/ghome
|
/install.sh
|
UTF-8
| 617
| 3.25
| 3
|
[] |
no_license
|
#local directory
DIR="$( cd "$( dirname "$0" )" && pwd )"
SERVICE=mongodb
# Install virtualenv
# You probably need to be root to do this.
sudo apt-get install xterm python-virtualenv mongodb
echo " end install with apt-get"
if ps ax | grep -v grep | grep $SERVICE > /dev/null
then
echo "$SERVICE service running, everything is fine"
else
echo "$SERVICE is not running"
fi
# Clone the project
git clone https://github.com/Minizza/ghome.git -b forInstallOnly
echo " end git cloning"
# Initialize virtualenv and install dependencies
virtualenv ghome
cd ghome
sudo pip install -r script/requirement.txt
| true
|
356229d67a5991553d6f4e47fb249aaec97feb8a
|
Shell
|
rockdrilla/dotfiles
|
/.config/zsh/alias/gpg.zsh
|
UTF-8
| 185
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/zsh
gpg-warmup() {
local t r
(( ${+commands[gpg]} )) || return 1
t=$(mktemp)
command gpg -abs "$t"
r=$?
command rm -f "$t" "$t.asc"
return "$r"
}
| true
|
8af1835325d4e653fac396e077cc13a6886d22cc
|
Shell
|
rfindler/Grift
|
/benchmark/suite/ref-cast/run.sh
|
UTF-8
| 5,562
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# a larger number will result in requesting more memory to be
# allocated by twosomes which is a very expensive operation
reads=999999
writes=999999
ncasts=9999999
casts=11
griftdir=/u/dalmahal/Grift
# --------------------------------------------------------------------
name1=refread
name2=refwrite
name3=refcast
testdir=$griftdir/benchmark/suite
datadir=$griftdir/benchmark/suite/ref-cast/data
outdir=$griftdir/benchmark/suite/ref-cast/output
# contains files required by tikz to output correct figures
# created by lua gnuplot-tikz.lua style
miscdir=$griftdir/benchmark/suite/ref-cast/misc
tmpdir=$griftdir/benchmark/suite/ref-cast/tmp
logfile1=$datadir/$name1.csv
logfile2=$datadir/$name2.csv
logfile3=$datadir/$name3.csv
TIMEFORMAT=%R
echo "Benchmarking reference read and write operations with different\
number of casts"
# create the result directory if it does not exist
mkdir -p $datadir
mkdir -p $tmpdir
mkdir -p $outdir
# specialize all source files templates to a concrete number of
# iterations.
cd $testdir/ref-cast/src
sed "s/CAST-COUNT/$casts/;s/OP-COUNT/$reads/" < $name1-template> $tmpdir/$name1.grift
sed "s/CAST-COUNT/$casts/;s/OP-COUNT/$writes/" < $name2-template> $tmpdir/$name2.grift
sed "s/CAST-COUNT/$ncasts/" < $name3-template> $tmpdir/$name3.grift
# compile Grift source files, then enter the src directory.
cd $griftdir
racket benchmark.rkt $tmpdir
cd $tmpdir
echo "n,coercion-mean,coercion-std,twosome-mean,twosome-std" > $logfile1
echo "n,coercion-mean,coercion-std,twosome-mean,twosome-std" > $logfile2
echo "coercion-mean,coercion-std,twosome-mean,twosome-std" > $logfile3
# normalize to the cost of iteration in nano seconds.
let "c1=1000000000/$reads"
let "c2=1000000000/$writes"
let "c3=1000000000/$ncasts"
# run the same experiment $1 times to compute the mean and std
for i in `seq 1 $1`;
do
# layout the result at each number of casts in one row space separated
./$name1.o1 | sed -n 's/.*: \([0-9]*.[0-9]*\)/\1/p' | paste -sd " " - >> $tmpdir/data1
./$name1.o2 | sed -n 's/.*: \([0-9]*.[0-9]*\)/\1/p' | paste -sd " " - >> $tmpdir/data2
./$name2.o1 | sed -n 's/.*: \([0-9]*.[0-9]*\)/\1/p' | paste -sd " " - >> $tmpdir/data3
./$name2.o2 | sed -n 's/.*: \([0-9]*.[0-9]*\)/\1/p' | paste -sd " " - >> $tmpdir/data4
./$name3.o1 | sed -n 's/.*: \([0-9]*.[0-9]*\)/\1/p' >> $tmpdir/data5
./$name3.o2 | sed -n 's/.*: \([0-9]*.[0-9]*\)/\1/p' >> $tmpdir/data6
echo "finished run #$i"
done
awk -v c="$c3" '{print ($1 = $1*c) " " $2}' $tmpdir/data5 > $tmpdir/data5.tmp; mv $tmpdir/data5.tmp $tmpdir/data5
awk -v c="$c3" '{print ($1 = $1*c) " " $2}' $tmpdir/data6 > $tmpdir/data6.tmp; mv $tmpdir/data6.tmp $tmpdir/data6
read std1 mean1 <<< $( cat $tmpdir/data5 | awk -v var=$1 '{sum+=$1; sumsq+=$1*$1}END{printf("%.2f %.2f\n", sqrt(sumsq/NR - (sum/NR)**2), (sum/var))}' )
read std2 mean2 <<< $( cat $tmpdir/data6 | awk -v var=$1 '{sum+=$1; sumsq+=$1*$1}END{printf("%.2f %.2f\n", sqrt(sumsq/NR - (sum/NR)**2), (sum/var))}' )
echo "$mean1,$std1,$mean2,$std2" >> $logfile3
for i in `seq 0 $((casts-1))`;
do
read std1 mean1 <<< $( while read -a line; do echo -e "${line[$i]}"; \
done < $tmpdir/data1 | awk -v c="$c1" '{print ($1 = $1*c) " " $2}' | awk -v var=$1 '{sum+=$1; sumsq+=$1*$1}END{printf("%.2f %.2f\n", sqrt(sumsq/NR - (sum/NR)**2), (sum/var))}' )
read std2 mean2 <<< $( while read -a line; do echo -e "${line[$i]}"; \
done < $tmpdir/data2 | awk -v c="$c1" '{print ($1 = $1*c) " " $2}' | awk -v var=$1 '{sum+=$1; sumsq+=$1*$1}END{printf("%.2f %.2f\n", sqrt(sumsq/NR - (sum/NR)**2), (sum/var))}' )
read std3 mean3 <<< $( while read -a line; do echo -e "${line[$i]}"; \
done < $tmpdir/data3 | awk -v c="$c2" '{print ($1 = $1*c) " " $2}' | awk -v var=$1 '{sum+=$1; sumsq+=$1*$1}END{printf("%.2f %.2f\n", sqrt(sumsq/NR - (sum/NR)**2), (sum/var))}' )
read std4 mean4 <<< $( while read -a line; do echo -e "${line[$i]}"; \
done < $tmpdir/data4 | awk -v c="$c2" '{print ($1 = $1*c) " " $2}' | awk -v var=$1 '{sum+=$1; sumsq+=$1*$1}END{printf("%.2f %.2f\n", sqrt(sumsq/NR - (sum/NR)**2), (sum/var))}' )
echo "$i,$mean1,$std1,$mean2,$std2" >> $logfile1
echo "$i,$mean3,$std3,$mean4,$std4" >> $logfile2
done
# compile figures to tikz tex code
gnuplot -e "set datafile separator \",\"; set term tikz standalone color size 5in,3in; "`
`"set output '$outdir/$name1.tex'; "`
`"set title \"Reference read\"; "`
`"set xrange [0:10]; set yrange [0:300]; "`
`"set xtics 0,1,10; set ylabel \"time in ns\"; "`
`"set xlabel \"number of casts\"; "`
`"plot '$datadir/$name1.csv' every ::1 using 1:2 with linespoints lc rgb \"blue\" title 'Coercions', "`
`"'$datadir/$name1.csv' every ::1 using 1:4 with linespoints lc rgb \"red\" title 'Twosomes'"
gnuplot -e "set datafile separator \",\"; set term tikz standalone color size 5in,3in; "`
`"set output '$outdir/$name2.tex'; "`
`"set title \"Reference write\"; "`
`"set xrange [0:10]; set yrange [0:300]; "`
`"set xtics 0,1,10; set ylabel \"time in ns\"; "`
`"set xlabel \"number of casts\"; "`
`"plot '$datadir/$name2.csv' every ::1 using 1:2 with linespoints lc rgb \"blue\" title 'Coercions', "`
`"'$datadir/$name2.csv' every ::1 using 1:4 with linespoints lc rgb \"red\" title 'Twosomes'"
# compile tex code
cp $miscdir/* $tmpdir
cp $outdir/$name1.tex $tmpdir
cp $outdir/$name2.tex $tmpdir
cd $tmpdir
lualatex --interaction=nonstopmode $name1.tex
lualatex --interaction=nonstopmode $name2.tex
mv $name1.pdf $outdir
mv $name2.pdf $outdir
| true
|
6ac372f6c4995e1dc15f6fe757f60ec3515d0612
|
Shell
|
tvdijen/OpenConext-engineblock
|
/bin/makeRelease.sh
|
UTF-8
| 4,059
| 3.953125
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
PREVIOUS_SF_ENV=${SYMFONY_ENV}
PREVIOUS_EB_ENV=${ENGINEBLOCK_ENV}
export SYMFONY_ENV=prod
export ENGINEBLOCK_ENV=production
RELEASE_DIR=${HOME}/Releases
GITHUB_USER=OpenConext
PROJECT_NAME=OpenConext-engineblock
if [ -z "$1" ]
then
cat << EOF
Please specify the tag or branch to make a release of.
Examples:
sh makeRelease.sh 0.1.0
sh makeRelease.sh master
sh makeRelease.sh develop
If you want to GPG sign the release, you can specify the "sign" parameter, this will
invoke the gpg command line tool to sign it.
sh makeRelease 0.1.0 sign
EOF
exit 1
else
TAG=$1
fi
PROJECT_DIR_NAME=${PROJECT_NAME}-${TAG//\//_} &&
PROJECT_DIR=${RELEASE_DIR}/${PROJECT_DIR_NAME} &&
# Check requirements
command -v php >/dev/null 2>&1 || { echo >&2 "Missing PHP 7.2. Aborting"; exit 1; }
command -v composer >/dev/null 2>&1 || { echo >&2 "Missing Composer. Aborting."; exit 1; }
command -v npm >/dev/null 2>&1 || { echo >&2 "Misisng NPM. Aborting."; exit 1; }
command -v git >/dev/null 2>&1 || { echo >&2 "Misisng Git. Aborting."; exit 1; }
# Prepare environment
echo "Preparing environment" &&
mkdir -p ${RELEASE_DIR} &&
rm -rf ${PROJECT_DIR} &&
echo "Cloning repository" &&
cd ${RELEASE_DIR} &&
git clone https://github.com/${GITHUB_USER}/${PROJECT_NAME}.git ${PROJECT_DIR_NAME} &&
echo "Checking out ${TAG}" &&
cd ${PROJECT_DIR} &&
git checkout ${TAG}
if [ $? -eq 0 ]; then
echo "Project prepared"
else
echo "Initialization failed"
exit 1
fi
# Install composer dependencies
echo "Running Composer Install" &&
php $(which composer) install -n --no-dev --prefer-dist -o
if [ $? -eq 0 ]; then
echo "Composer install ran"
else
echo "Unable to run compopser install"
exit 1
fi
# Build NPM frontend assets
# --unsafe-perm because we do branch install as root.
# can be removed when we stop doing that
echo "Build assets"
cd ${PROJECT_DIR}/theme &&
npm ci --unsafe-perm &&
npm run release
if [ $? -eq 0 ]; then
echo "Assets build"
else
echo "Unable to build assets"
exit 1
fi
# Tag release and remove unwanted files
echo "Tagging the release in RELEASE file" &&
COMMITHASH=`git rev-parse HEAD` &&
echo "Tag: ${TAG}" > ${PROJECT_DIR}/RELEASE &&
echo "Commit: ${COMMITHASH}" >> ${PROJECT_DIR}/RELEASE &&
echo "Updating asset_version in config" &&
sed -i s,#ASSET_VERSION#,${TAG},g ${PROJECT_DIR}/app/config/config.yml &&
echo "Cleaning build of dev files" &&
rm -rf ${PROJECT_DIR}/.idea &&
rm -rf ${PROJECT_DIR}/.git &&
rm -rf ${PROJECT_DIR}/.github &&
rm -f ${PROJECT_DIR}/.gitignore &&
rm -f ${PROJECT_DIR}/makeRelease.sh &&
rm -f ${PROJECT_DIR}/bin/composer.phar &&
rm -f ${PROJECT_DIR}/app_dev.php.dist &&
rm -rf ${PROJECT_DIR}/features &&
rm -rf ${PROJECT_DIR}/behat.yml &&
rm -rf ${PROJECT_DIR}/build.xml &&
rm -rf ${PROJECT_DIR}/tests &&
rm -rf ${PROJECT_DIR}/ci &&
rm -rf ${PROJECT_DIR}/theme/node_modules &&
rm -rf ${PROJECT_DIR}/theme/cypress &&
rm -rf ${PROJECT_DIR}/theme/.sass-cache
if [ $? -eq 0 ]; then
echo "Release buildd"
else
echo "Failed to build release"
exit 1
fi
# Create tarball
echo "Create tarball" &&
cd ${RELEASE_DIR} &&
tar -czf ${PROJECT_DIR_NAME}.tar.gz ${PROJECT_DIR_NAME}
if [ $? -eq 0 ]; then
echo "Tarball build"
else
echo "Unable to build tarball"
exit 1
fi
# Create checksum
echo "Create checksum file" &&
cd ${RELEASE_DIR} &&
if hash sha1sum 2>/dev/null; then
sha1sum ${PROJECT_DIR_NAME}.tar.gz > ${PROJECT_DIR_NAME}.sha
else
shasum ${PROJECT_DIR_NAME}.tar.gz > ${PROJECT_DIR_NAME}.sha
fi
if [ $? -eq 0 ]; then
echo "Checksum created"
else
echo "Unable to create checksum"
exit 1
fi
# Sign with GPG key
if [ -n "$2" ]
then
if [ "$2" == "sign" ]
then
echo "Signing build"
cd ${RELEASE_DIR}
gpg -o ${PROJECT_DIR_NAME}.sha.gpg --clearsign ${PROJECT_DIR_NAME}.sha
if [ $? -eq 0 ]; then
echo "Signed"
else
echo "Unable to sign tarball"
exit 1
fi
fi
fi
export SYMFONY_ENV=${PREVIOUS_SF_ENV}
export ENGINEBLOCK_ENV=${PREVIOUS_EB_ENV}
| true
|
978b4de50a057d0649600947bbd60960c51758b6
|
Shell
|
EXPRESSCLUSTER/SCSI-PR
|
/Linux Scripts/attacker.sh
|
UTF-8
| 1,363
| 3.578125
| 4
|
[] |
no_license
|
#! /bin/sh
#***************************************
#* start.sh *
#***************************************
# Parameter
#-----------
dev=/dev/sdc
#-----------
# finding current node index then making key for Persistent Reserve
key=abc00`clpstat --local | sed -n '/<server>/,/<group>/p' | grep '^ [\* ][^ ]' | sed -n '0,/^ [\*]/p' | wc -l`
interval=7 #sec
echo "[D] key : ${key}"
echo "[D] dev : ${dev}"
echo "[D] int : ${interval}"
function clear () {
sg_persist -o -C -K $key -d $dev > /dev/null 2>&1
ret=$?
if [ $ret -eq 0 ]; then
echo [I] [$ret] Clear succeeded
else
echo [E] [$ret] Clear failed
fi
}
function register () {
sg_persist -o -G -S $key -d $dev > /dev/null 2>&1
ret=$?
if [ $ret -eq 0 ] || [ $ret -eq 2 ]; then
echo [I] [$ret] Register key succeeded
else
echo [E] [$ret] Register key failed
fi
}
function reserve () {
sg_persist -o -R -K $key -T 3 -d $dev > /dev/null 2>&1
ret=$?
if [ $ret -eq 0 ]; then
echo [I] [$ret] Reserve succeeded
else
echo [E] [$ret] Reserve failed
fi
}
register
for ((i=0; i<3; i++));do
clear
register
sleep $interval
reserve
sg_persist -r $dev | grep -A 1 $key | grep 'Exclusive Access' > /dev/null 2>&1
ret=$?
if [ $ret -eq 0 ]; then
echo [I] [$ret] Reserve found. Will become DEFENDER.
exit 0
fi
echo [D] [$ret] Reserve not found.
done
# Attack failed
exit 1
| true
|
6c6942340afca64264deb267f1e457ab1723c271
|
Shell
|
gargrakhen/PBS_Pro_Hooks
|
/install_PBS.sh
|
UTF-8
| 657
| 3.234375
| 3
|
[] |
no_license
|
#This script will take an argument to automatically configure the address of the PBS server
if [ -z "$1" ]; then
echo "No hostname specified."
echo "Usage: install_pbs.sh HOSTNAME"
exit 1
fi
export PBS_SERVER=$1
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root."
exit 1
fi
useradd --system -m pbsdata
curl -O -L https://github.com/PBSPro/pbspro/releases/download/v18.1.3/pbspro_18.1.3.centos7.zip
if ! hash unzip 2>/dev/null; then
yum -y install unzip
fi
unzip pbspro_18.1.3.centos7.zip
cd pbspro_18.1.3.centos7
yum -y install pbspro-execution-18.1.3-0.x86_64.rpm
systemctl enable pbs
systemctl start pbs
| true
|
b5d6da8a8e744fe15418058f747d1f336919875e
|
Shell
|
pocketgroovy/voltage
|
/witches-server/devBuild.sh
|
UTF-8
| 843
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -o errexit
REPO_SERVER=172.16.100.204
DEPLOY_DIR=/var/www/voltage-ent.com/witches-server
# execute server build for dev
if [ ! -d witches-server ]; then
git clone git@${REPO_URL}:witches-server
fi
pushd witches-server
# remove local changes
git clean -d -f -q
git reset --hard
# retrieve the latest code
git checkout dev
git pull origin dev
# Open VPN connection
pgrep pppd || VPN_CLOSED=$?
if [ -n ${VPN_CLOSED} ]; then
sudo pppd call softlayer
sleep 15
fi
# Copy
rsync -a -c --delete --progress --exclude-from '../syncExclusions' . sysdev@witchdev:${DEPLOY_DIR}/
# Set the appropriate group on all the server files
ssh sysdev@witchdev "find '${DEPLOY_DIR}' -user sysdev -exec chown :apache {} \;"
popd
# Finally, Compile server & Reboot
ssh sysdev@witchdev "${DEPLOY_DIR}/deploy.sh dev"
| true
|
eed8f94d1a57fa5b7553bc5f14b7d13ec66a0f7b
|
Shell
|
aswinksd/project7
|
/sc.sh
|
UTF-8
| 514
| 3.3125
| 3
|
[] |
no_license
|
s=0
echo "enter first no"
read a
echo "enter second no"
read b
i="y"
while [ $i = "y" ]
do
echo "1.addition"
echo "2.subtraction"
echo "3.multiplication"
echo "4.division"
echo "5.modulus"
echo "enter your choice"
read ch
case $ch in
1)s=`expr $a + $b`
echo "sum of 2 no:'s $s";;
2)p=`expr $a - $b`
echo "diff is $p";;
3)q=`expr $a \* $b`
echo "mul is $q";;
4)r=`expr $a / $b`
echo "div is $r";;
5)m=`expr $a \% $b`
echo "mod is $m";;
esac
echo "do you want to continue?"
read i
if [ $i != "y" ]
then
exit
fi
done
| true
|
e431dc920377c3f9ab4e4cfaadb2d8a265606afa
|
Shell
|
venus29/bash-learn
|
/argument/use_getopts.bash
|
UTF-8
| 428
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
while getopts "o:f:" opt
do
case "$opt" in
o) output_filename=$OPTARG;;
f) input_filename=$OPTARG;;
*) exit 0;;
esac
done
# OPTIND is the index of the next argument
# to be processed (starting index is 1)
# usualy use with getopts
# ./use_getopts.bash -o abc
echo $OPTIND
echo $output_filename
# this will collect the last string after non args.
shift "$(($OPTIND - 1))"
echo $@
| true
|
3fbb062b95a8bf87d0e735300b9f46e472f1066e
|
Shell
|
termux/termux-packages
|
/packages/gnugo/build.sh
|
UTF-8
| 1,061
| 2.515625
| 3
|
[
"Apache-2.0"
] |
permissive
|
TERMUX_PKG_HOMEPAGE=https://www.gnu.org/software/gnugo/
TERMUX_PKG_DESCRIPTION="Program that plays the game of Go"
TERMUX_PKG_LICENSE="GPL-3.0"
TERMUX_PKG_MAINTAINER="@termux"
TERMUX_PKG_VERSION=3.8
TERMUX_PKG_REVISION=5
TERMUX_PKG_SRCURL=https://mirrors.kernel.org/gnu/gnugo/gnugo-${TERMUX_PKG_VERSION}.tar.gz
TERMUX_PKG_SHA256=da68d7a65f44dcf6ce6e4e630b6f6dd9897249d34425920bfdd4e07ff1866a72
TERMUX_PKG_DEPENDS="ncurses, readline"
TERMUX_PKG_EXTRA_CONFIGURE_ARGS="--with-readline"
TERMUX_PKG_HOSTBUILD=true
TERMUX_PKG_GROUPS="games"
termux_step_post_configure () {
cp $TERMUX_PKG_HOSTBUILD_DIR/patterns/mkeyes $TERMUX_PKG_BUILDDIR/patterns/mkeyes
cp $TERMUX_PKG_HOSTBUILD_DIR/patterns/uncompress_fuseki $TERMUX_PKG_BUILDDIR/patterns/uncompress_fuseki
cp $TERMUX_PKG_HOSTBUILD_DIR/patterns/joseki $TERMUX_PKG_BUILDDIR/patterns/joseki
cp $TERMUX_PKG_HOSTBUILD_DIR/patterns/mkmcpat $TERMUX_PKG_BUILDDIR/patterns/mkmcpat
cp $TERMUX_PKG_HOSTBUILD_DIR/patterns/mkpat $TERMUX_PKG_BUILDDIR/patterns/mkpat
touch -d "next hour" $TERMUX_PKG_BUILDDIR/patterns/*
}
| true
|
b7927fdf0d395fd0bf43e3d9121d103a96c917ad
|
Shell
|
warpme/minimyth2
|
/script/meta/minimyth/files/source/rootfs/usr/bin/irsend_daemon
|
UTF-8
| 1,074
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/sh
. /etc/rc.d/functions
# Input to this daemon is "device?command". Delay 1s or 5s
# is possible (delay_s1 and delay_5s commands)
# All spaces in device and commands shoud be replaced by ?
# i.e. "irsend SEND_ONCE <dev1> <cmd1> <sleep 5 sec> <dev2> <cmd2> shoud
# be used
# " echo dev1?cmd1 delay_5s dev2?cmd2 > /var/run/irsend_fifo
irsend="/usr/bin/irsend SEND_ONCE "
# echo "Starting irsend server"
/usr/bin/logger -t minimyth -p "local0.info" "[irsend_daemon] Starting irsend commands daemon..."
if [ ! -e /var/run/irsend_fifo ] ; then
/usr/bin/logger -t minimyth -p "local0.info" "[irsend_daemon] Creating FIFO at /var/run/irsend_fifo"
mkfifo /var/run/irsend_fifo
fi
/usr/bin/logger -t minimyth -p "local0.info" "[irsend_daemon] Waiting for commands..."
while true; do
commands=`cat < /var/run/irsend_fifo`
for cmd in $commands; do
if [ "x${cmd}" = "xdelay_1s" ] ; then
/bin/sleep 1
elif [ "x${cmd}" = "xdelay_5s" ] ; then
/bin/sleep 5
else
cmd=`echo ${cmd} | sed 's/?/ /g'`
$irsend ${cmd}
fi
done
done
| true
|
87246b9ab51fd78ada2079dbb6d54fdc01e7c929
|
Shell
|
gevent/gevent
|
/scripts/install.sh
|
UTF-8
| 4,174
| 3.859375
| 4
|
[
"Python-2.0",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# GEVENT: Taken from https://raw.githubusercontent.com/DRMacIver/hypothesis/master/scripts/install.sh
# Special license: Take literally anything you want out of this file. I don't
# care. Consider it WTFPL licensed if you like.
# Basically there's a lot of suffering encoded here that I don't want you to
# have to go through and you should feel free to use this to avoid some of
# that suffering in advance.
set -e
set -x
# Where installations go
BASE=${BUILD_RUNTIMES-$PWD/.runtimes}
PYENV=$BASE/pyenv
echo $BASE
mkdir -p $BASE
update_pyenv () {
VERSION="$1"
if [ ! -d "$PYENV/.git" ]; then
rm -rf $PYENV
git clone https://github.com/pyenv/pyenv.git $BASE/pyenv
else
if [ ! -f "$PYENV/plugins/python-build/share/python-build/$VERSION" ]; then
echo "Updating $PYENV for $VERSION"
back=$PWD
cd $PYENV
git fetch || echo "Fetch failed to complete. Ignoring"
git reset --hard origin/master
cd $back
fi
fi
}
SNAKEPIT=$BASE/snakepit
##
# install(exact-version, bin-alias, dir-alias)
#
# Produce a python executable at $SNAKEPIT/bin-alias
# having the exact version given as exact-version.
#
# Also produces a $SNAKEPIT/dir-alias/ pointing to the root
# of the python install.
##
install () {
VERSION="$1"
ALIAS="$2"
DIR_ALIAS="$3"
DESTINATION=$BASE/versions/$VERSION
mkdir -p $BASE/versions
mkdir -p $SNAKEPIT
if [ ! -e "$DESTINATION" ]; then
mkdir -p $SNAKEPIT
mkdir -p $BASE/versions
update_pyenv $VERSION
# -Ofast makes the build take too long and times out Travis. It also affects
# process-wide floating-point flags - see: https://github.com/gevent/gevent/pull/1864
CFLAGS="-O1 -pipe -march=native" $BASE/pyenv/plugins/python-build/bin/python-build $VERSION $DESTINATION
fi
# Travis CI doesn't take symlink changes (or creation!) into
# account on its caching, So we need to write an actual file if we
# actually changed something. For python version upgrades, this is
# usually handled automatically (obviously) because we installed
# python. But if we make changes *just* to symlink locations above,
# nothing happens. So for every symlink, write a file...with identical contents,
# so that we don't get *spurious* caching. (Travis doesn't check for mod times,
# just contents, so echoing each time doesn't cause it to re-cache.)
# Overwrite an existing alias.
# For whatever reason, ln -sf on Travis works fine for the ALIAS,
# but fails for the DIR_ALIAS. No clue why. So we delete an existing one of those
# manually.
if [ -L "$SNAKEPIT/$DIR_ALIAS" ]; then
rm -f $SNAKEPIT/$DIR_ALIAS
fi
ln -sfv $DESTINATION/bin/python $SNAKEPIT/$ALIAS
ln -sfv $DESTINATION $SNAKEPIT/$DIR_ALIAS
echo $VERSION $ALIAS $DIR_ALIAS > $SNAKEPIT/$ALIAS.installed
$SNAKEPIT/$ALIAS --version
$DESTINATION/bin/python --version
# Set the PATH to include the install's bin directory so pip
# doesn't nag.
# Use quiet mode for this; PyPy2 has been seen to output
# an error:
# UnicodeEncodeError: 'ascii' codec can't encode
# character u'\u258f' in position 6: ordinal not in range(128)
# https://travis-ci.org/github/gevent/gevent/jobs/699973435
PATH="$DESTINATION/bin/:$PATH" $SNAKEPIT/$ALIAS -m pip install -q --upgrade pip wheel virtualenv
ls -l $SNAKEPIT
ls -l $BASE/versions
}
for var in "$@"; do
case "${var}" in
2.7)
install 2.7.17 python2.7 2.7.d
;;
3.5)
install 3.5.9 python3.5 3.5.d
;;
3.6)
install 3.6.10 python3.6 3.6.d
;;
3.7)
install 3.7.7 python3.7 3.7.d
;;
3.8)
install 3.8.2 python3.8 3.8.d
;;
3.9)
install 3.9.0 python3.9 3.9.d
;;
pypy2.7)
install pypy2.7-7.3.1 pypy2.7 pypy2.7.d
;;
pypy3.6)
install pypy3.6-7.3.1 pypy3.6 pypy3.6.d
;;
esac
done
| true
|
6484eef800a3b41c94d14d1b12e3e59c9e37109b
|
Shell
|
siddharthbe/gpsr-command-understanding
|
/scripts/test_all_models
|
UTF-8
| 2,622
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
function usage {
cat <<- EOF
usage: test_all_models.sh results_path test_file
Runs allennlp evaluate and predict on all results directories in a path.
Options:
-h --help Print this message
-c --use-cpu Use CPU instead of GPU
EOF
exit 1
}
if [[ $# < 2 ]]; then
usage
exit 1
fi
results_path=$1
shift
test_path=$1
shift
use_cpu=false
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
-h|--help)
usage
;;
-c|--use-cpu)
use_cpu=true
;;
--)
# Get rid of --
shift
# The remainder are grabbag args to pass to the script
args="${args}$@"
break
;;
*)
>&2 echo "Unknown argument: $1"
exit 1
;;
esac
shift # move past argument
done
# This is a list of results directories (one per config)
results_dirs=${results_path}/*
# We assume the test data file lives in a directory that is named to describe what the dataset is
dataset_dir_path=$(dirname "${test_path}")
dataset_name=$(basename "${dataset_dir_path}")
summary_path="${results_path}/${dataset_name}_results_summary.txt"
# Clear this file
echo "" > "$summary_path"
gpu_arg="--cuda-device 0"
if [[ ${use_cpu} = "true" ]]; then
gpu_arg=""
fi
set -x;
python -m gpsr_command_understanding.data.evaluate_baseline_parsers -t "${dataset_dir_path}/train.txt" -v "${dataset_dir_path}/val.txt" -te "${dataset_dir_path}/test.txt" -o "${results_path}/baseline_evaluation_${dataset_name}.json"> "${results_path}/baselines_log_${dataset_name}.txt"
set +x;
for dir in $results_dirs; do
# Filter out non-directory files
if [[ ! -d "$dir" ]]; then
continue
fi
# Folder should be named for the config it was produced by
model_name=$(basename "$dir")
model_path="${dir}/model.tar.gz"
if [[ ! -f "${model_path}" ]]; then
echo "No model to test for ${dir}"
continue
fi
set -x
allennlp evaluate "${model_path}" "${test_path}" --output-file "${dir}/evaluation_${dataset_name}.json" --include-package gpsr_command_understanding $gpu_arg > /dev/null 2>&1
allennlp predict "${model_path}" "${test_path}" --output-file "${dir}/predictions_${dataset_name}.json" --use-dataset-reader --predictor command_parser --include-package gpsr_command_understanding --batch-size 64 $gpu_arg > /dev/null 2>&1
set +x
echo -e "\n ${model_name}" >> "${summary_path}"
cat "${dir}/evaluation_${dataset_name}.txt" >> "${summary_path}"
done
| true
|
39b7b4e915e3c1c5956f3783544d9bad053014ae
|
Shell
|
dlaststark/machine-learning-projects
|
/Programming Language Detection/Experiment-2/Dataset/Train/UNIX-Shell/count-the-coins-1.sh
|
UTF-8
| 293
| 3.046875
| 3
|
[] |
no_license
|
function count_change {
local -i amount=$1 coin j
local ways=(1)
shift
for coin; do
for (( j=coin; j <= amount; j++ )); do
let ways[j]=${ways[j]:-0}+${ways[j-coin]:-0}
done
done
echo "${ways[amount]}"
}
count_change 100 25 10 5 1
count_change 100000 100 50 25 10 5 1
| true
|
aecbfaddb26fe70bc817f91c6dda4e2461ab4c54
|
Shell
|
bc-charlesho/checkout-sdk-js
|
/scripts/circleci/copy-previous-releases.sh
|
UTF-8
| 470
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
set -u
# Clone checkout-sdk-js-server repo
git clone --depth 1 git@github.com:bigcommerce/checkout-sdk-js-server.git /tmp/repo-server
# Copy previous releases into a folder for further modification
cp -rf /tmp/repo-server/public/* ~/repo/dist-cdn
# Rewrite the placeholder text contained in those releases with the production URL
for file in ~/repo/dist-cdn/*/loader-v*.js; do
sed -i "s#__ASSET_HOST__#https://checkout-sdk.bigcommerce.com#g" $file
done
| true
|
9c117011c0a90920eec29bcb5cb9df8f1e34e81a
|
Shell
|
sikachov/code
|
/script.sh
|
UTF-8
| 228
| 3.09375
| 3
|
[] |
no_license
|
#! /bin/bash
flag=0
i=2
while [ $i -lt $1 ];do
j=2
flag=0
while [ $j -lt $i ];do
r=`expr $i % $j`
if [ $r -eq 0 ]
then
flag=1
fi
let j=$j+1
done
if [ $flag -eq 0 ]
then
echo $i
flag=0
fi
let i=$i+1
done
| true
|
92bf963a47736693167301b47dfb6d88d9f23e92
|
Shell
|
tylerjl/pkgbuilds
|
/packetbeat-bin/PKGBUILD
|
UTF-8
| 1,690
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
# Maintainer: Tyler Langlois <ty |at| tjll |dot| net>
pkgname=packetbeat-bin
_pkgbase=${pkgname%%-bin}
pkgver=6.2.1
pkgrel=1
pkgdesc='An open source network packet analyzer that ships data to Elasticsearch (precompiled)'
arch=('i686' 'x86_64')
url="https://www.elastic.co/products/beats/$_pkgbase"
license=('APACHE')
backup=("etc/$_pkgbase/$_pkgbase.yml")
optdepends=('elasticsearch: for running standalone installation')
options=('!strip')
provides=("$_pkgbase")
conflicts=("$_pkgbase")
source=("$_pkgbase.service")
sha256sums=('407fda47ace0e9de3dc27bf504693a3e432bf377c9a5e6772377b2ebb3a8f2e4')
source_i686=("https://artifacts.elastic.co/downloads/beats/$_pkgbase/$_pkgbase-$pkgver-linux-x86.tar.gz")
source_x86_64=("https://artifacts.elastic.co/downloads/beats/$_pkgbase/$_pkgbase-$pkgver-linux-x86_64.tar.gz")
sha256sums_i686=('fcf470ea40f1a64bdf6557124a1f332c0998cad52c0776cec6d09128715b317f')
sha256sums_x86_64=('59e4f955457b4b5bae578714d7c261538ccf798e10295acf1fbd33d07260a95f')
package() {
if [[ $CARCH == 'i686' ]] ; then
beats_arch=x86
else
beats_arch=$CARCH
fi
cd "$srcdir/$_pkgbase-$pkgver-linux-$beats_arch"
for d in lib log ; do
mkdir -p "$pkgdir/var/$d/$_pkgbase"
done
install -D -m755 $_pkgbase "$pkgdir/usr/bin/$_pkgbase"
for f in $_pkgbase.{,reference.}yml fields.yml ; do
install -D -m644 $f "$pkgdir/etc/$_pkgbase/$f"
done
for f in NOTICE.txt README.md ; do
install -D -m644 $f "$pkgdir/usr/share/$_pkgbase/$f"
done
cp -r kibana "$pkgdir/usr/share/$_pkgbase"
install -D -m644 "$srcdir/$_pkgbase.service" \
"$pkgdir/usr/lib/systemd/system/$_pkgbase.service"
}
| true
|
2d40605be5dea9e6c016eae5c8bd88ec02955337
|
Shell
|
alexzorin/i3-lastpass
|
/i3-lastpass
|
UTF-8
| 779
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -euf -o pipefail
# Try fetch the list without syncing, but sync if lpass-cli doesnt have the blob cached
LPASS_ITEMS=$(lpass ls --sync=no --color=never --format="%aN (%au) (%al) | %ai" || printf 'no-sync')
if [[ "${LPASS_ITEMS}" == "no-sync" ]]
then
LPASS_ITEMS=$(lpass ls --sync=auto --color=never --format="%aN (%au) (%al) | %ai" || printf "no-sync")
if [[ "${LPASS_ITEMS}" == "no-sync" ]]
then
printf "You need to login to LastPass first: (lpass login --trust <username>)\n" | dmenu -p "LastPass"
exit 1
fi
fi
LPASS_SELECTION=$(echo "${LPASS_ITEMS}" | dmenu -p "LastPass" -i -l 3)
LPASS_SELECTION_ID=$(echo "${LPASS_SELECTION}" | sed 's/.*| //' | xargs)
lpass show --sync=no --password "${LPASS_SELECTION_ID}" | xsel --secondary -i
| true
|
9ee01a09b23bb028fbbcc651d09c1e48dae7d3ad
|
Shell
|
ivy-rew/debianDevSystem
|
/installers/dev/inodesLimit.sh
|
UTF-8
| 364
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
# inodes
# https://unix.stackexchange.com/questions/13751/kernel-inotify-watch-limit-reached
limit=524288
current=$(cat /proc/sys/fs/inotify/max_user_watches)
if [ "$current" -ne "$limit" ]; then
echo "changing inode.limit from $current to $limit"
echo "fs.inotify.max_user_watches=${limit}" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p
fi
| true
|
ece4af5386c6856edf4b4460bbff129f8673622e
|
Shell
|
automatiche/demo
|
/src/ssh3.sh
|
UTF-8
| 601
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
# 三院
/usr/bin/expect << EOF
# -t -t
set ip [lindex $argv 0 ]
set password [lindex $argv 1 ]
# set passwd [lindex $argv 0]
# set host [lindex $argv 1]
set timeout 20
spawn ssh -p 3000 hospital@106.38.159.214
# spawn ssh -p 3000 hospital@$ip
# spawn ssh -p 2222 -o ServerAliveInterval=60 bdyx01@114.116.227.233
# expect {
# "password:"
# { send "Bysy_6606" }
# }
# expect "password1:" {
# send "B"
# }
expect {
"password:" {send "$Bysy_6606\r"}
}
# interact
# expect "password:"
# send "Bysy_6606\r"
# cd projects/hospital-management
# echo done!
interact
expect eof
EOF
| true
|
e827c7df04f038c618104a27974ce755403a05ed
|
Shell
|
LuciaRoldan/ProyectoFinal_Landing
|
/startup
|
UTF-8
| 778
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -eq 0 ]
then
echo "Usage: ./startup <env>"
exit -1
fi
ENV=$1
set -e
echo "Starting server..."
nohup java -Dspring.config.location=benito/benito-backend/environments/$ENV/application.properties -Dconfig.file=benito/benito-backend/environments/$ENV/application.conf -Dlogback.configurationFile=benito/benito-backend/environments/$ENV/logback.xml -Dspring.profiles.active=$ENV -jar benito/benito-backend/target/benito.jar > startup.log &
PID=$(ps faux | grep "benito.jar" | grep -v \"grep\" | awk '{print $2}' | head -1)
echo $PID > process
echo "Waiting for app to be ready to health-check..."
sleep 30
curl localhost:443/heart-beat
if [ $? -eq 0 ];
then
echo "Server started correctly!"
exit 0
else
echo "Server failed to start"
exit -1
fi
| true
|
95b353a0c09437466be49b80b014fe7b6fcb0662
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/jalv-select-git/PKGBUILD
|
UTF-8
| 1,072
| 2.8125
| 3
|
[
"Unlicense"
] |
permissive
|
# Maintainer: Christopher Arndt <aur -at- chrisarndt -dot- de>
_pkgname=jalv-select
pkgname="${_pkgname}-git"
pkgver=0.7.r62.5a010a0
pkgrel=1
pkgdesc="A little GUI to select installed lv2 plug-ins and run them with jalv."
arch=('i686' 'x86_64')
url="https://github.com/brummer10/jalv_select"
license=('custom:Public Domain')
depends=('jalv' 'gtkmm')
provides=("${_pkgname}")
conflicts=("${_pkgname}")
source=("${_pkgname}::git+https://github.com/brummer10/jalv_select.git"
'LICENSE.txt')
install="${_pkgname}.install"
md5sums=('SKIP'
'039bcb549d34b4273393c9ca25848fe6')
pkgver() {
cd "${srcdir}/${_pkgname}"
local ver=$(grep "VER =" Makefile | awk '{print $3}')
echo "$ver.r$(git rev-list --count HEAD).$(git rev-parse --short HEAD)"
}
build() {
cd "${srcdir}/${_pkgname}"
make
}
package() {
cd "${srcdir}/${_pkgname}"
make install DESTDIR="$pkgdir"
# readme
install -Dm644 README.md \
"$pkgdir/usr/share/doc/$pkgname/README.md"
# license
install -Dm644 "$srcdir/LICENSE.txt" \
"$pkgdir/usr/share/licenses/$pkgname/LICENSE"
}
| true
|
c8c0de4471184c9b2fa518521eb62200073ae68d
|
Shell
|
linsallyzhao/2018_term_experiment
|
/run_acp
|
UTF-8
| 444
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash -e
declare -A output_map
output_map['_']=''
for (( i = 1000; i <= 100000; i += 500 )); do
printf 'Running %s\n' "$i" >&2
output_map['_']+="$i,"
while read -r key price _ stderr; do
output_map["$key"]+="$price,"
output_map["${key}_stderr"]+="$stderr,"
done < <(./ACP_assignment "$i")
done
{
for k in "${!output_map[@]}"; do
printf '%s,%s\n' "$k" "${output_map["$k"]%,}"
done
} | LC_ALL=C sort
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.